surveillance/0000755000176200001440000000000014031002053012732 5ustar liggesuserssurveillance/NAMESPACE0000644000176200001440000003417414024063004014167 0ustar liggesusers## refer to all C routines by their name prefixed by C_ useDynLib(surveillance, .registration = TRUE, .fixes = "C_") importFrom(Rcpp, evalCpp) # see vignette("Rcpp-package", package="Rcpp") ## although Rcpp is only used on C-level we need to "ensure that Rcpp is loaded ## so any dynamic linking to its code can be resolved. (There may be none, but ## there could be, now or in future.)" (B. Ripley, 2013-09-08) ############### ### IMPORTS ### ############### ### Import all packages listed as Depends ### (for utils and polyCub: only selected methods are imported) import(methods, grDevices, graphics, stats) ## sp classes & utilities (bbox, coordinates, dimensions, overlay, plot, ...) ## (we "Depend" on package sp since it defines essential data classes & methods) import(sp) ## we define own methods for generating xtable()'s, which we want to be useable import(xtable) ### required generics for own methods (that's why we "Depend" on these packages) ## importFrom(stats, coef, vcov, logLik, nobs, residuals, confint, AIC, extractAIC, ## profile, simulate, update, terms, add1, drop1, predict, as.stepfun) importFrom(utils, head, tail, toLatex) ### required functions from utils and stats ## importFrom(stats, pnorm, cov2cor, ks.test, formula, rnorm, runif, step, dist, ## update.formula, terms.formula, rpois, rnbinom, setNames, ## na.omit, as.formula, pnbinom, qnbinom, qnorm, sd, glm, optim, ## poisson, ppois, qpois, predict.glm, summary.glm, quasipoisson, ## glm.fit) ## and many more... importFrom(utils, packageVersion, modifyList, capture.output, read.table, data, setTxtProgressBar, txtProgressBar, sessionInfo, head.matrix, str, flush.console, write.table, as.roman, tail.matrix, methods) ### sampling from mv.Gausian for OSAIC weights (twinSIR) and iafplot (twinstim) importFrom(MASS, mvrnorm) ### disProg-specific importFrom(MASS, glm.nb) # for algo.glrnb ##importFrom(msm, msm, hmmPois, viterbi.msm) # for algo.hmm() ##importFrom(spc, xcusum.arl, xcusum.crit) # for find.kh() ## (packages msm and spc are now "suggested", not imported) ### hhh4-specific importFrom(MASS, ginv, negative.binomial) importFrom(Matrix, Matrix) importClassesFrom(Matrix, ddiMatrix) importMethodsFrom(Matrix, coerce, forceSymmetric) ## sparse matrix methods provide a significant speed-up in marFisher importFrom(nlme, fixef, ranef) export(fixef, ranef) # we define corresponding methods for "hhh4" models ### twinSIR-specific # for use in computing OSAIC weights by simulation #importFrom(quadprog, solve.QP) # moved to "Suggests" ### twinstim-specific import(spatstat) # new umbrella package spatstat 2.0-0 included here to # avoid nasty collisions with spatstat < 2 lying around # FIXME: remove this when the spatstat transition has settled importFrom(spatstat.geom, area.owin, as.im.function, diameter, diameter.owin, disc, edges, inside.owin, intersect.owin, is.polygonal, as.polygonal, bdist.points, owin, ppp, shift.owin, spatstat.options, vertices) importFrom(spatstat.geom, marks) export(marks) # we define an epidataCS-method importFrom(spatstat.geom, multiplicity) export(multiplicity) # we define a Spatial-method importFrom(polyCub, polyCub, .polyCub.iso, polyCub.SV, polyCub.midpoint, xylist) importFrom(MASS, kde2d, truehist) ############### ### EXPORTS ### ############### ### general exports export(surveillance.options, reset.surveillance.options) export(animate) # new S3-generic export(R0) # new S3-generic export(intensityplot) # new S3-generic export(formatPval) # yapf -- yet another p-value formatter export(anscombe.residuals) export(magic.dim, primeFactors, bestCombination) # similar to n2mfrow export(isoWeekYear) #extract ISO 8601 date export(formatDate) #format.Date + %Q and %q formatting strings export(refvalIdxByDate) export(ks.plot.unif) export(checkResidualProcess) # for twinstim and twinSIR export(qlomax) # quantile function of the Lomax distribution export(plapply) export(clapply) export(fanplot) # spatial utilities export(discpoly) export(unionSpatialPolygons) export(inside.gpc.poly) S3method(scale, gpc.poly) # redefined method for gpc.poly in spatial_stuff.R S3method(diameter, gpc.poly) export(nbOrder) export(poly2adjmat) export(polyAtBorder) export(layout.labels) export(layout.scalebar) # randomly break tied event times or coordinates export(untie) # new S3-generic #export(untie.default, untie.matrix, untie.epidataCS) S3method(untie, default) S3method(untie, matrix) S3method(untie, epidataCS) # intersection of a polygonal and a circular domain export(intersectPolyCircle) S3method(intersectPolyCircle, owin) S3method(intersectPolyCircle, SpatialPolygons) S3method(intersectPolyCircle, gpc.poly) # little helper: multiplicity of points S3method(multiplicity, Spatial) # list coefficients by model component export(coeflist) S3method(coeflist, default) S3method(coeflist, twinstim) S3method(coeflist, simEpidataCS) S3method(coeflist, hhh4) # Spatio-temporal cluster detection export(stcd) # tests for space-time interaction export(knox) S3method(print, knox) S3method(plot, knox) S3method(xtable, knox) S3method(toLatex, knox) export(stKtest) S3method(plot, stKtest) # PIT histograms export(pit) export(pit.default) S3method(pit, default) S3method(pit, oneStepAhead) S3method(pit, hhh4) S3method(plot, pit) # calibration test for Poisson or NegBin predictions export(calibrationTest) S3method(calibrationTest, default) export(calibrationTest.default) export(dss, logs, rps, ses) # nses ### sts(BP|NC)-specific export(sts) exportClasses(sts, stsBP) export(linelist2sts) export(animate_nowcasts) # conversion of "sts" objects S3method(as.ts, sts) export(as.xts.sts) S3method(xts::as.xts, sts) # delayed registration S3method(as.data.frame, sts) # see ?Methods_for_S3 exportMethods(as.data.frame) export(tidy.sts) # more S4 generics, some with an equivalent S3-method, see ?Methods_for_S3 exportMethods(dim, dimnames, year, epochInYear, "[") S3method(plot, sts) exportMethods(plot) S3method(toLatex, sts) exportMethods(toLatex) S3method(aggregate, sts) exportMethods(aggregate) # methods for accessing/replacing slots of an sts object (cf. AllGeneric.R) exportMethods(epoch,observed,alarms,upperbound,population,control,multinomialTS,neighbourhood) exportMethods("epoch<-","observed<-","alarms<-","upperbound<-","population<-","control<-","multinomialTS<-","neighbourhood<-") # methods for accessing/replacing slots of an stsNC object exportMethods(reportingTriangle,delayCDF,score,predint) # plot variants export(stsplot_space) export(stsplot_time, stsplot_time1, stsplot_alarm) export(addFormattedXAxis, atChange, at2ndChange, atMedian) #for time axis formatting export(stsplot_spacetime) # old implementation of (animated) map S3method(animate, sts) # S3-method for an S4 class, see ?Methods_for_S3 export(autoplot.sts) S3method(ggplot2::autoplot, sts) # delayed registration # outbreak detection algorithms (sts-interfaces) export(wrap.algo, farrington, bayes, rki, cusum, glrpois, glrnb, outbreakP, boda) # FIXME: rogerson, hmm ?? export(earsC) export(farringtonFlexible) export(categoricalCUSUM, pairedbinCUSUM, pairedbinCUSUM.runlength) export(nowcast, backprojNP) export(bodaDelay) # sts creation functions export(sts_creation) export(sts_observation) ### disProg-specific export(create.disProg) S3method(print, disProg) S3method(plot, disProg) S3method(aggregate, disProg) export(sim.pointSource, sim.seasonalNoise) export(LRCUSUM.runlength, arlCusum, find.kh, findH, hValues, findK) export(estimateGLRNbHook) export(algo.compare, algo.quality, algo.summary) ## outbreak detection algorithms (old disProg implementations) export(algo.bayes, algo.bayes1, algo.bayes2, algo.bayes3, algo.bayesLatestTimepoint, algo.call, algo.cdc, algo.cdcLatestTimepoint, algo.cusum, algo.farrington, algo.glrnb, algo.glrpois, algo.hmm, algo.outbreakP, algo.rki, algo.rki1, algo.rki2, algo.rki3, algo.rkiLatestTimepoint, algo.rogerson, algo.twins) ## auxiliary functions for algo.farrington (FIXME: why export these internals?) export(algo.farrington.assign.weights, algo.farrington.fitGLM, algo.farrington.fitGLM.fast, algo.farrington.fitGLM.populationOffset, algo.farrington.threshold) S3method(plot, atwins) S3method(plot, survRes) S3method(print, algoQV) S3method(xtable, algoQV) ### conversion between old disProg and new sts classes export(disProg2sts) export(sts2disProg) ### twinSIR-specific export(cox) export(as.epidata) S3method(as.epidata, data.frame) export(as.epidata.data.frame) S3method(as.epidata, default) export(as.epidata.default) export(intersperse) export(twinSIR) export(stateplot) export(simEpidata) S3method(update, epidata) S3method("[", epidata) S3method(print, epidata) S3method(summary, epidata) S3method(print, summary.epidata) S3method(plot, epidata) S3method(animate, epidata) S3method(plot, summary.epidata) S3method(animate, summary.epidata) S3method(print, twinSIR) S3method(summary, twinSIR) S3method(print, summary.twinSIR) S3method(plot, twinSIR) S3method(intensityplot, twinSIR) export(intensityplot.twinSIR) # for convenience S3method(profile, twinSIR) S3method(plot, profile.twinSIR) S3method(vcov, twinSIR) S3method(logLik, twinSIR) S3method(AIC, twinSIR) S3method(extractAIC, twinSIR) S3method(simulate, twinSIR) export(simulate.twinSIR) # for convenience S3method(residuals, twinSIR) S3method(intensityplot, simEpidata) export(intensityplot.simEpidata) # for convenience ### twinstim-specific export(as.epidataCS) export(glm_epidataCS) export(twinstim) export(simEpidataCS) export(siaf, siaf.constant, siaf.step, siaf.gaussian, siaf.exponential, siaf.powerlaw, siaf.powerlaw1, siaf.powerlawL, siaf.student) export(tiaf, tiaf.constant, tiaf.step, tiaf.exponential) export(epidataCS2sts) export(epitest) S3method(coef, epitest) S3method(plot, epitest) export(getSourceDists) S3method(nobs, epidataCS) S3method("[", epidataCS) S3method(update, epidataCS) export(update.epidataCS) # for convenience export(permute.epidataCS) S3method(head, epidataCS) S3method(tail, epidataCS) S3method(print, epidataCS) S3method(subset, epidataCS) S3method(summary, epidataCS) S3method(print, summary.epidataCS) S3method(as.stepfun, epidataCS) S3method(animate, epidataCS) export(animate.epidataCS) # for convenience S3method(marks, epidataCS) export(marks.epidataCS) # for convenience since its a foreign generic S3method(plot, epidataCS) export(epidataCSplot_time, epidataCSplot_space) S3method(as.epidata, epidataCS) export(as.epidata.epidataCS) # for convenience S3method(print, twinstim) S3method(summary, twinstim) export(summary.twinstim) # for convenience S3method(print, summary.twinstim) S3method(toLatex, summary.twinstim) S3method(xtable, summary.twinstim) export(xtable.summary.twinstim) # for xtable.twinstim S3method(xtable, twinstim) S3method(plot, twinstim) export(iafplot) export(intensity.twinstim) S3method(intensityplot, twinstim) export(intensityplot.twinstim) # for convenience S3method(profile, twinstim) S3method(coef, summary.twinstim) S3method(vcov, twinstim) S3method(vcov, summary.twinstim) S3method(logLik, twinstim) S3method(extractAIC, twinstim) S3method(nobs, twinstim) S3method(simulate, twinstim) export(simulate.twinstim) # for convenience export(simEndemicEvents) S3method(R0, twinstim) export(simpleR0) S3method(residuals, twinstim) S3method(update, twinstim) export(update.twinstim) # for convenience S3method(terms, twinstim) S3method(all.equal, twinstim) export(stepComponent) S3method(terms, twinstim_stependemic) S3method(terms, twinstim_stepepidemic) S3method(update, twinstim_stependemic) S3method(update, twinstim_stepepidemic) S3method(add1, twinstim) S3method(add1, twinstim_stependemic) S3method(add1, twinstim_stepepidemic) S3method(drop1, twinstim) S3method(drop1, twinstim_stependemic) S3method(drop1, twinstim_stepepidemic) S3method(residuals, simEpidataCS) S3method(R0, simEpidataCS) S3method(intensityplot, simEpidataCS) export(intensityplot.simEpidataCS) # for convenience S3method(print, simEpidataCSlist) S3method("[[", simEpidataCSlist) S3method(plot, simEpidataCSlist) ### hhh4-specific ## main functions export(hhh4) export(addSeason2formula) export(makeControl) export(zetaweights, W_powerlaw) export(W_np) export(getNEweights, coefW) export(oneStepAhead) export(scores) export(permutationTest) ## S3-methods S3method(print, hhh4) S3method(summary, hhh4) S3method(print, summary.hhh4) S3method(nobs, hhh4) S3method(logLik, hhh4) S3method(formula, hhh4) S3method(terms, hhh4) S3method(coef, hhh4) S3method(vcov, hhh4) S3method(fixef, hhh4) S3method(ranef, hhh4) S3method(confint, hhh4) S3method(residuals, hhh4) S3method(predict, hhh4) S3method(update, hhh4) export(update.hhh4) # for add-on packages S3method(all.equal, hhh4) S3method(simulate, hhh4) S3method(plot, hhh4) export(plotHHH4_fitted, plotHHH4_fitted1, plotHHH4_season, getMaxEV_season, plotHHH4_maxEV, getMaxEV, plotHHH4_maps, plotHHH4_ri, plotHHH4_neweights) S3method(quantile, oneStepAhead) S3method(confint, oneStepAhead) S3method(plot, oneStepAhead) S3method(scores, default) S3method(scores, hhh4) S3method(scores, oneStepAhead) S3method(calibrationTest, hhh4) S3method(calibrationTest, oneStepAhead) ## methods for simulations from hhh4 fits S3method(aggregate, hhh4sims) S3method(plot, hhh4sims) export(as.hhh4simslist) S3method(as.hhh4simslist, hhh4sims) S3method(as.hhh4simslist, list) S3method(as.hhh4simslist, hhh4simslist) S3method("[", hhh4simslist) S3method("[[", hhh4simslist) S3method(aggregate, hhh4simslist) S3method(plot, hhh4simslist) export(plotHHH4sims_size) export(plotHHH4sims_time) export(plotHHH4sims_fan) S3method(scores, hhh4sims) S3method(scores, hhh4simslist) ## internal functions for use by add-on packages export(meanHHH, sizeHHH, decompose.hhh4) surveillance/demo/0000755000176200001440000000000014030612531013665 5ustar liggesuserssurveillance/demo/cost.R0000644000176200001440000002161513275324176015004 0ustar liggesusers## need a writable figs/ directory in getwd() ## -> switch to a temporary directory to save figures to TMPDIR <- tempdir() OWD <- setwd(TMPDIR) dir.create("figs") ################################################### ### chunk number 1: ################################################### library("surveillance") options(width=70) options("prompt"="R> ") set.seed(1234) opendevice <- function(horizontal=TRUE,width=7,height=4,...) { #Do it for postscript instead -- who uses postscript these days?? args <- list(...) args$file <- sub(".pdf",".eps",args$file) args$width <- width args$height <- height args$horizontal <- FALSE do.call("postscript",args) par(mar=c(4,4,2,2)) } ################################################### ### chunk number 2: K1 ################################################### data("ha") plot(aggregate(ha),main="Hepatitis A in Berlin 2001-2006") ################################################### ### chunk number 3: ################################################### opendevice(file="figs/002.pdf") data("ha") plot(aggregate(ha),main="Hepatitis A in Berlin 2001-2006") dev.off() ################################################### ### chunk number 4: ################################################### sps <- sim.pointSource(p = 0.99, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) plot(sps,xaxis.years=FALSE) ################################################### ### chunk number 5: ################################################### opendevice(file="figs/003.pdf") plot(sps,xaxis.years=FALSE,legend.opts=list(x="topleft")) dev.off() ################################################### ### chunk number 6: HAB662 eval=FALSE ################################################### ## ha.b662 <- algo.bayes(aggregate(ha), control = list(range = 209:290, b = 2, w = 6, alpha = 0.01)) ## plot(ha.b662, firstweek=1, startyear = 2005) ################################################### ### chunk number 7: ################################################### ha.b662 <- algo.bayes(aggregate(ha), control = list(range = 209:290, b = 2, w = 6, alpha = 0.01)) plot(ha.b662, firstweek=1, startyear = 2005) opendevice(file="figs/hab662.pdf") plot(ha.b662, firstweek=1, startyear = 2005,legend.opts=list(x="topleft",horiz=TRUE)) dev.off() ################################################### ### chunk number 8: FACDC eval=FALSE ################################################### ## cntrl <- list(range = 300:400, m = 1, w = 3, b = 5, alpha = 0.01) ## sps.cdc <- algo.cdc(sps, control = cntrl) ## sps.farrington <- algo.farrington(sps, control = cntrl) ################################################### ### chunk number 9: ################################################### cntrl <- list(range = 300:400, m = 1, w = 3, b = 5, alpha = 0.01) sps.cdc <- algo.cdc(sps, control = cntrl) sps.farrington <- algo.farrington(sps, control = cntrl) ################################################### ### chunk number 10: ################################################### opendevice(file="figs/farringtoncdc.pdf") par(mfcol = c(1, 2),cex=0.8) plot(sps.cdc, legend = NULL, xaxis.years=FALSE) plot(sps.farrington, legend = NULL, xaxis.years=FALSE) dev.off() ################################################### ### chunk number 11: CUSUM eval=FALSE ################################################### ## kh <- find.kh(ARLa=500,ARLr=7) ## ha.cusum <- algo.cusum(aggregate(ha),control=list(k=kh$k,h=kh$h,m="glm",trans="rossi",range=209:290)) ################################################### ### chunk number 12: ################################################### opendevice(file="figs/hacusum.pdf") kh <- find.kh(ARLa=500,ARLr=7) ha.cusum <- algo.cusum(aggregate(ha),control=list(k=kh$k,h=kh$h,m="glm",trans="rossi",range=209:290)) plot(ha.cusum,startyear=2005,legend.opts=list(x=30,y=5.5)) dev.off() #Extract coefficients beta <- coef(ha.cusum$control$m.glm) ################################################### ### chunk number 13: ################################################### print(algo.quality(ha.b662)) ################################################### ### chunk number 14: ################################################### #This chunk contains stuff the reader should not see, but which is necessary #for the visual block to work. control = list( list(funcName = "rki1"), list(funcName = "rki2"), list(funcName = "rki3"), list(funcName = "bayes1"), list(funcName = "bayes2"), list(funcName = "bayes3"), # list(funcName = "cdc",alpha=0.05,b=2,m=1), # list(funcName = "farrington",alpha=0.05,b=0,w=6), list(funcName = "farrington",alpha=0.05,b=1,w=6), list(funcName = "farrington",alpha=0.05,b=2,w=4)) control <- lapply(control,function(ctrl) {ctrl$range <- 300:400;return(ctrl)}) #Update range in each - cyclic continuation data("k1") range = (2*4*52) + 1:length(k1$observed) aparv.control <- lapply(control,function(cntrl) { cntrl$range=range;return(cntrl)}) #Auxiliary function to enlarge data enlargeData <- function(disProgObj, range = 1:156, times = 1){ disProgObj$observed <- c(rep(disProgObj$observed[range], times), disProgObj$observed) disProgObj$state <- c(rep(disProgObj$state[range], times), disProgObj$state) return(disProgObj) } #Outbreaks outbrks <- c("m1", "m2", "m3", "m4", "m5", "q1_nrwh", "q2", "s1", "s2", "s3", "k1", "n1", "n2", "h1_nrwrp") #Load and enlarge data. outbrks <- lapply(outbrks,function(name) { data(list=name) enlargeData(get(name),range=1:(4*52),times=2) }) #Apply function to one surv.one <- function(outbrk) { algo.compare(algo.call(outbrk,control=aparv.control)) } ################################################### ### chunk number 15: eval=FALSE ################################################### ## #Apply function to one ## surv.one <- function(outbrk) { ## algo.compare(algo.call(outbrk,control=aparv.control)) ## } ## ## algo.summary(lapply(outbrks, surv.one)) ## ################################################### ### chunk number 16: ALGOSUMMARY ################################################### res <- algo.summary(lapply(outbrks,surv.one)) ################################################### ### chunk number 17: ################################################### print(res,digits=3) ################################################### ### chunk number 18: eval=FALSE ################################################### ## setClass( "sts", representation(week = "numeric", ## freq = "numeric", ## start = "numeric", ## observed = "matrix", ## state = "matrix", ## alarm = "matrix", ## upperbound = "matrix", ## neighbourhood= "matrix", ## populationFrac= "matrix", ## map = "SpatialPolygonsDataFrame", ## control = "list")) ## ################################################### ### chunk number 19: HA eval=FALSE ################################################### ## shp <- system.file("shapes/berlin.shp",package="surveillance") ## ha <- disProg2sts(ha, map=maptools::readShapePoly(shp,IDvar="SNAME")) ## plot(ha,type=observed ~ 1 | unit) ## ################################################### ### chunk number 20: ################################################### opendevice(file="figs/ha-1unit.pdf",width=7,height=7) par(mar=c(0,0,0,0)) shp <- system.file("shapes/berlin.shp",package="surveillance") ha <- disProg2sts(ha, map=maptools::readShapePoly(shp,IDvar="SNAME")) plot(ha,type=observed ~ 1 | unit) dev.off() ################################################### ### chunk number 21: HA:MAP eval=FALSE ################################################### ## ha4 <- aggregate(ha[,c("pank","mitt","frkr","scho","chwi","neuk")],nfreq=13) ## ha4.cusum <- cusum(ha4,control=list(k=1.5,h=1.75,m="glm",trans="rossi",range=52:73)) ## #ha4.b332 <- bayes(ha4,control=list(range=52:73,b=2,w=3,alpha=0.01/6)) ## plot(ha4.cusum,type=observed ~ time | unit) ################################################### ### chunk number 22: ################################################### opendevice(file="figs/ha-timeunit.pdf",width=7,height=5) ha4 <- aggregate(ha[,c("pank","mitt","frkr","scho","chwi","neuk")],nfreq=13) ha4.cusum <- cusum(ha4,control=list(k=1.5,h=1.75,m="glm",trans="rossi",range=52:73)) #ha4.b332 <- bayes(ha4,control=list(range=52:73,b=2,w=3,alpha=0.01/6)) plot(ha4.cusum,type=observed ~ time | unit) dev.off() ## finally switch back to original working directory message("Note: selected figures have been saved in ", getwd(), "/figs") setwd(OWD) surveillance/demo/biosurvbook.R0000644000176200001440000002170613433736567016410 0ustar liggesusers###################################################################### # Demo of the code used in the book chapter # Hoehle, M. and A. Mazick, A. (2010) Aberration detection in R # illustrated by Danish mortality monitoring, Book chapter in # T. Kass-Hout and X. Zhang (Eds.) Biosurveillance: A Health Protection # Priority, CRC Press. # # The data read by csv files in the chapter are found as data("momo") # in the package. Courtesy to Statens Serum Institut for making # the mortality data public. # # Author: Michael Hoehle # Date: 13 Oct 2009 ###################################################################### #Load surveillance package library("surveillance") #Load Danish mortality data (see book chapter for CSV reading") data("momo") #Create a plot of the data as in Figure. 1 of the book chapter. #Note: The year is determined by the ISO week, not the date plot(momo[year(momo)>=2000,],ylab="No. of deaths",par.list=list(mar=c(4,2.2,2,1),cex.axis=1.5), type=observed ~ time | unit, col=c(gray(0.3),NA,NA),xaxis.tickFreq=list("%G"=atChange),xaxis.labelFormat="%G",xlab="time (weeks)") par(mfrow=c(1,2),mar=c(4,4,2,1)) plot(momo,ylab="No. of deaths",xlab="time (weeks)",legend.opts=NULL, type=observed ~ time,col=c(gray(0.3),NA,NA),xaxis.tickFreq=list("%G"=atChange,"%m"=atChange),xaxis.labelFreq=list("%G"=atChange),xaxis.labelFormat="%G") plot(momo[,"[0,1)"],xlab="time (weeks)",ylab="No. of deaths",legend.opts=NULL,col=c(gray(0.3),NA,NA),xaxis.tickFreq=list("%G"=atChange,"%m"=atChange),xaxis.labelFreq=list("%G"=atChange),xaxis.labelFormat="%G") par(mfrow=c(1,1)) #Monitoring starts in week 40, 2007 phase2 <- which(epoch(momo) >= "2007-10-01") s.far <- farrington(momo[,"[0,1)"], control=list(range=phase2,alpha=0.01,b=5,w=4,powertrans="none")) cntrlFar <- s.far@control upper.ptnone <-s.far@upperbound cntrlFar$powertrans <- "2/3" upper.pt23 <- farrington(momo[,"[0,1)"],control=cntrlFar)@upperbound cntrlFar$powertrans <- "1/2" upper.pt12 <- farrington(momo[,"[0,1)"],control=cntrlFar)@upperbound ## plot(s.far,ylab="No. of deaths",xlab="time (weeks)",main="") ymax <- max(s.far@upperbound, upper.pt12, upper.pt23)*1.2 #par(mar=c(4,4,1,1)) plot(s.far,legend.opts=NULL,ylab="No. of deaths",main="",xlab="time (weeks)",ylim=c(0,ymax),col=c("darkgray",NA,gray(0.3)),lty=c(1,1,1),lwd=c(1,1,2),dx.upperbound=0,alarm.symbol=list(pch=24,col=1, cex=1)) lines(c(1:nrow(s.far)-0.5,nrow(s.far)+0.5),c(upper.pt12,upper.pt12[nrow(s.far)]),type="s",col="darkgray",lwd=2,lty=2) lines(c(1:nrow(s.far)-0.5,nrow(s.far)+0.5),c(upper.pt23,upper.pt23[nrow(s.far)]),type="s",col=gray(0.1),lwd=2,lty=3) legend(x="topright",c("none","1/2","2/3"),col=c(gray(0.3),"darkgray",gray(0.1)),lwd=2,lty=1:3,horiz=TRUE) #legend(x="topright",c("none","1/2","2/3",expression(hat(mu)[t[0]])),col=c(gray(0.3),"darkgray",gray(0.1),1),lwd=c(2,2,2,3),lty=c(1:3,1),horiz=TRUE) #Median of predictive distribution lines(c(1:nrow(s.far)-0.5,nrow(s.far)+0.5),c(s.far@control$pd[,2],s.far@control$pd[nrow(s.far),2]),type="s",col=1,lwd=3) text(nrow(s.far)+2,tail(observed(s.far),n=1),expression(hat(mu)[t[0]])) alarmDates <- epoch(s.far)[alarms(s.far) == 1] par(mar=c(4,4,2,2)) surv2 <- s.far surv2@observed <- 0*surv2@observed surv2@upperbound <- 0*surv2@observed plot(surv2,ylim=c(-0.05,1),ylab="Quantile",xlab="time (weeks)",legend.opts=NULL,main="",dx.upperbound=0,alarm.symbol=list(pch=24,col=1, cex=1)) lines(surv2@control$pd[,1], type="S") lines( c(1,nrow(surv2)+0.), rep( 1-s.far@control$alpha/2, 2),lty=2,col=1) s.far.all <- farrington(momo, control=list(range=phase2,alpha=0.01,b=5,w=4)) ## s.far.all <- farrington(momo, control=list(range=phase2,alpha=0.01,b=5,w=4)) ## plot(s.far.all,type = alarm ~ time,xlab="time (weeks)") par(mar=c(4,4,1,1)) plot(s.far.all,type = alarm ~ time,xlab="time (weeks)",main="",alarm.symbol=list(pch=24,col=1, cex=1.5),lvl=rep(1,nrow(s.far.all))) ####################################################################### #Negative binomial GLM modelling using the population size as covariate ####################################################################### phase1 <- which(year(momo) == 2002 & epochInYear(momo) == 40):(phase2[1]-1) momo.df <- as.data.frame(momo) m <- MASS::glm.nb( `observed.[75,85)` ~ 1 + epoch + sin(2*pi*epochInPeriod) + cos(2*pi*epochInPeriod) + `population.[75,85)`, data=momo.df[phase1,]) mu0 <- predict(m, newdata=momo.df[phase2,],type="response") ci <- confint(m) kappa <- 1.2 s.nb <- glrnb(momo[,"[75,85)"], control=list(range=phase2,alpha=1/m$theta,mu0=mu0,c.ARL=4.75,theta=log(kappa),ret="cases")) alarmDates <- epoch(s.nb)[alarms(s.nb) == 1] plot(s.nb,dx.upperbound=0,legend.opts=NULL,ylab="No. of deaths",main="",ylim=c(0,max(observed(s.nb))*1.1),xlab="time (weeks)",col=c("darkgray",NA,1),lwd=c(1,1,2),lty=c(1,1,1),alarm.symbol=list(pch=24,col=1, cex=1)) lines(mu0,lwd=2,col=1,lty=2) lines(exp(log(mu0) + log(kappa)),col=1,lty=3,lwd=3) legend(x=20,y=100,c(expression(mu[0,t]),expression(mu[1,t]),"NNBA"),col=c(1,1,1),lty=c(2,3,1),horiz=TRUE,bg="white",lwd=c(2,3,2)) set.seed(123) ###################################################################### # P(N_c <= 51|\tau=\infty) computation ###################################################################### #Number of simulations to perform. In book chapter this number is #1000, but for the sake of a speedy illustration this is drastically #reduced in this demonstration nSims <- 10 #1000 ###################################################################### # Simulate one run-length by first generating data from the negative # binomial model and then applying the LR NegBin CUSUM to it ###################################################################### simone.TAleq65 <- function(sts, g) { observed(sts)[phase2,] <- rnbinom(length(mu0), mu=mu0, size=m$theta) one <- glrnb(sts, control=modifyList(control(s.nb), list(c.ARL=g))) return(any(alarms(one) > 0)) } #Determine run-length using 1000 Monte Carlo samples g.grid <- seq(1,8,by=0.5) pMC <- sapply(g.grid, function(g) { mean(replicate(nSims, simone.TAleq65(momo[,"[75,85)"],g))) }) #Density for comparison in the negative binomial distribution dY <- function(y,mu,log=FALSE, alpha, ...) { dnbinom(y, mu=mu, size=1/alpha, log=log) } #nMax <- max(which( dY(0:1e4, mu=max(mu0),alpha=1/m$theta) >= 1e-20)) - 1 pMarkovChain <- sapply( g.grid, function(g) { TA <- LRCUSUM.runlength( mu=t(mu0), mu0=t(mu0), mu1=kappa*t(mu0), h=g, dfun = dY, n=rep(600,length(mu0)), alpha=1/m$theta) return(tail(TA$cdf,n=1)) }) par(mar=c(4,4,2,2)) matplot(g.grid, cbind(pMC,pMarkovChain),type="l",ylab=expression(P(T[A] <= 65 * "|" * tau * "=" * infinity)),xlab="g",col=1) prob <- 0.1 lines(range(g.grid),rep(prob,2),lty=3,lwd=2) axis(2,at=prob,las=1,cex.axis=0.7) legend(x="topright",c("Monte Carlo","Markov chain"), lty=1:2,col=1) m.01 <- MASS::glm.nb( `observed.[0,1)` ~ 1 + epoch + `population.[0,1)`+ sin(2*pi*epochInPeriod) + cos(2*pi*epochInPeriod), data=momo.df[phase1,]) mu0 <- predict(m.01, newdata=momo.df[phase2,],type="response") #Correct for past outbreaks #omega <- algo.farrington.assign.weights(residuals(m.01, type="deviance")) #m.01.refit <- glm.nb( `observed.[0,1)` ~ 1 + epoch + `population.[0,1)`+ sin(2*pi*epochInPeriod) + cos(2*pi*epochInPeriod), data=momo.df[phase1,],weights=omega) #mu0.refit <- predict(m.01.refit, newdata=momo.df[phase2,],type="response") #Results from the previous Farrington method mu0.far <- control(s.far)$pd[,2] ###################################################################### # Simulate one run-length by first generating data from the negative # binomial model and then applying the LR NegBin CUSUM to it ###################################################################### simone.TAleq65.far <- function(sts, alpha, mu0, size) { observed(sts)[phase2,] <- rnbinom(length(mu0), mu=mu0, size=size) res <- farrington(sts, control=modifyList(control(s.far), list(alpha=alpha))) return(any(as.logical(alarms(res)))) } #Determine run-length using 1000 Monte Carlo samples res.far <- replicate(nSims, simone.TAleq65.far(momo[,"[0,1)"],alpha=0.01,mu0=mu0.far,size=m.01$theta)) (pTA65.far <- mean(res.far)) #Run CUSUM kappa <- 1.2 s.nb.01 <- glrnb(momo[,"[0,1)"], control=list(range=phase2,alpha=1/m.01$theta,mu0=mu0.far,c.ARL=2.1,theta=log(kappa),ret="cases")) alarmDates <- epoch(s.nb.01)[alarms(s.nb.01) == 1] mu1 <- kappa*mu0.far #Show as usual plot(s.nb.01,dx.upperbound=0,legend.opts=NULL,ylab="No. of deaths",main="",xlab="time (weeks)",col=c("darkgray",NA,1),lwd=c(1,1,1),lty=c(1,1,1),ylim=c(0,max(s.nb.01@upperbound))*1.15,alarm.symbol=list(pch=24,col=1, cex=1)) lines(1:(nrow(s.far)+1)-0.5, c(mu0.far,tail(mu0.far,n=1)),lwd=3,col=1,lty=1,type="s") lines(1:(nrow(s.far)+1)-0.5, c(mu1,tail(mu1,n=1)),col=1,lty=3,lwd=3,type="s") legend(x="topright",c(expression(mu[0,t]),expression(mu[1,t]),"NNBA"),col=c(1,1,1),lty=c(1,3,1),horiz=TRUE,bg="white",lwd=c(3,3,1)) surveillance/demo/fluBYBW.R0000644000176200001440000001734213536426644015313 0ustar liggesusers################################################################################ ### Demo of hhh4() modelling of influenza in Southern Germany - data("fluBYBW") ### based on ### ### Paul, M. and Held, L. (2011): Predictive assessment of a non-linear random ### effects model for multivariate time series of infectious disease counts. ### Statistics in Medicine, 30, 1118-1136. ### ### RUNNING THE WHOLE SCRIPT TAKES ~20 MINUTES! ### ### Copyright (C) 2009-2012 Michaela Paul, 2012-2013,2016-2019 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ set.seed(1) # for reproducibility (affects initial values for ri() terms) library("surveillance") ## Weekly counts of influenza in 140 districts of Bavaria and Baden-Wuerttemberg data("fluBYBW") # data corrected in surveillance 1.6-0 # -> minor differences to original results in the paper ################################################## # Fit the models from the Paul & Held (2011) paper ################################################## ## generate formula for temporal and seasonal trends f.end <- addSeason2formula(f = ~ -1 + ri(type="iid", corr="all") + I((t-208)/100), S=3, period=52) ## settings for the optimizer opt <- list(stop = list(tol=1e-5, niter=200), regression = list(method="nlminb"), variance = list(method="nlminb")) ## models # A0 cntrl_A0 <- list(ar = list(f = ~ -1), end = list(f =f.end, offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose = 1) summary(res_A0 <- hhh4(fluBYBW,cntrl_A0)) # B0 cntrl_B0 <- list(ar = list(f = ~ 1), end = list(f =f.end, offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose=1) res_B0 <- hhh4(fluBYBW,cntrl_B0) # C0 cntrl_C0 <- list(ar = list(f = ~ -1 + ri(type="iid", corr="all")), end = list(f =f.end, offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose=1) res_C0 <- hhh4(fluBYBW,cntrl_C0) #A1 # weight matrix w_ji = 1/(No. neighbors of j) if j ~ i, and 0 otherwise wji <- neighbourhood(fluBYBW)/rowSums(neighbourhood(fluBYBW)) cntrl_A1 <- list(ar = list(f = ~ -1), ne = list(f = ~ 1, weights = wji), end = list(f =f.end, offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose=1) res_A1 <- hhh4(fluBYBW,cntrl_A1) # B1 cntrl_B1 <- list(ar = list(f = ~ 1), ne = list(f = ~ 1, weights = wji), end = list(f =f.end, offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose=1) res_B1 <- hhh4(fluBYBW,cntrl_B1) # C1 cntrl_C1 <- list(ar = list(f = ~ -1 + ri(type="iid", corr="all")), ne = list(f = ~ 1, weights = wji), end = list(f =f.end, offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose=1) res_C1 <- hhh4(fluBYBW,cntrl_C1) #A2 cntrl_A2 <- list(ar = list(f = ~ -1), ne = list(f = ~ -1 + ri(type="iid",corr="all"), weights=wji), end = list(f =f.end, offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose=1) res_A2 <- hhh4(fluBYBW,cntrl_A2) # B2 cntrl_B2 <- list(ar = list(f = ~ 1), ne = list(f = ~ -1 + ri(type="iid",corr="all"), weights =wji), end = list(f =f.end, offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose=1) res_B2 <- hhh4(fluBYBW,cntrl_B2) # C2 cntrl_C2 <- list(ar = list(f = ~ -1 + ri(type="iid", corr="all")), ne = list(f = ~ -1 + ri(type="iid",corr="all"), weights =wji), end = list(f =f.end, offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose=1, start=list(fixed=fixef(res_B0),random=c(rep(0,140), ranef(res_B0)), sd.corr=c(-.5,res_B0$Sigma.orig,0))) res_C2 <- hhh4(fluBYBW,cntrl_C2) # D cntrl_D <- list(ar = list(f = ~ 1), ne = list(f = ~ -1 + ri(type="iid"), weights = wji), end = list(f =addSeason2formula(f = ~ -1 + ri(type="car") + I((t-208)/100), S=3, period=52), offset = population(fluBYBW)), family = "NegBin1", optimizer = opt, verbose=1) res_D <- hhh4(fluBYBW,cntrl_D) ########################################################### ## Exemplary summary of model B2 ## (compare with Paul & Held, 2011, Table III and Figure 5) ########################################################### summary(res_B2, idx2Exp = 1:2, maxEV = TRUE) ## Note: as of surveillance 1.6-0, results differ slightly from the paper ## (see penalized log-likelihood), because a superfluous row of zeros ## has been removed from the fluBYBW data .idx <- c(113, 111, 46, 77) plot(res_B2, units = .idx, names = fluBYBW@map@data[.idx, "name"], legend = 2, legend.args = list(x = "topleft"), legend.observed = TRUE) ###################################################################### # Compare the predictive performance of the models by computing # one-step-ahead predictions to be assessed by proper scoring rules ###################################################################### ## do 1-step ahead predictions for the last two years tp <- nrow(fluBYBW)-2*52 ## for this demo: only calculate pseudo-predictions based on the final fit ## to avoid the time-consuming sequential refitting at each step. TYPE <- "final" ## use "rolling" for true one-step-ahead predictions => TAKES ~8 HOURS! val_A0 <- oneStepAhead(res_A0, tp=tp, type=TYPE) val_B0 <- oneStepAhead(res_B0, tp=tp, type=TYPE) val_C0 <- oneStepAhead(res_C0, tp=tp, type=TYPE) val_A1 <- oneStepAhead(res_A1, tp=tp, type=TYPE) val_B1 <- oneStepAhead(res_B1, tp=tp, type=TYPE) val_C1 <- oneStepAhead(res_C1, tp=tp, type=TYPE) val_A2 <- oneStepAhead(res_A2, tp=tp, type=TYPE) val_B2 <- oneStepAhead(res_B2, tp=tp, type=TYPE) val_C2 <- oneStepAhead(res_C2, tp=tp, type=TYPE) val_D <- oneStepAhead(res_D, tp=tp, type=TYPE) ## compute scores vals <- ls(pattern="val_") nam <- substring(vals,first=5,last=6) whichScores <- c("logs", "rps", "ses") scores_i <- vector(mode="list", length=length(vals)) meanScores <- NULL for(i in seq_along(vals)){ sc <- scores(get(vals[i]), which=whichScores, individual=TRUE, reverse=TRUE) ## reverse=TRUE => same permutation test results as in surveillance < 1.16.0 scores_i[[i]] <- sc meanScores <- rbind(meanScores,colMeans(sc, dims=2)) } names(scores_i) <- nam rownames(meanScores) <- nam print(meanScores) ## Note that the above use of "final" fitted values instead of "rolling" ## one-step-ahead predictions leads to different mean scores than reported ## in Paul & Held (2011, Table IV). ## assess statistical significance of score differences compareWithBest <- function(best, whichModels, nPermut=9999, seed=1234){ set.seed(seed) pVals <- NULL for(score in seq_along(whichScores)){ p <- c() for(model in whichModels){ p <- c(p, if(model==best) NA else permutationTest(scores_i[[model]][,,score],scores_i[[best]][,,score], plot=interactive(),nPermutation=nPermut, verbose=TRUE)$pVal.permut) } pVals <- cbind(pVals,p) } return(pVals) } pVals_flu <- compareWithBest(best=9, whichModels=1:10, nPermut=999, # reduced for this demo seed=2059710987) rownames(pVals_flu) <- nam colnames(pVals_flu) <- whichScores print(pVals_flu) surveillance/demo/v77i11.R0000644000176200001440000004316413575664765015013 0ustar liggesusers################################################################################ ### Replication code from Meyer et al. (2017, JSS), ### illustrating the spatio-temporal endemic-epidemic modelling frameworks ### 'twinstim', 'twinSIR', and 'hhh4'. The full reference is: ### ### Meyer, Held, and Hoehle (2017): ### Spatio-Temporal Analysis of Epidemic Phenomena Using the R Package surveillance. ### Journal of Statistical Software, 77(11), 1-55. ### https://doi.org/10.18637/jss.v077.i11 ### ### Changes to the original replication script are marked with a "##M" comment. ### ### Copyright (C) 2017-2019 Sebastian Meyer, Leonhard Held, Michael Hoehle ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ ##M use old RNGversion to reproduce published simulation results in Section 3.4 RNGversion("3.3.3") # sampling has changed in R 3.6.0 ################################################################################ ## Section 3: Spatio-temporal point pattern of infective events ################################################################################ library("surveillance") # you should also have installed the suggested packages ## 3.2. Data structure: 'epidataCS' data("imdepi", package = "surveillance") events <- SpatialPointsDataFrame( coords = coordinates(imdepi$events), data = marks(imdepi, coords = FALSE), proj4string = imdepi$events@proj4string # ETRS89 projection (+units = km) ) stgrid <- imdepi$stgrid[,-1] load(system.file("shapes", "districtsD.RData", package = "surveillance")) imdepi <- as.epidataCS(events = events, W = stateD, stgrid = stgrid, qmatrix = diag(2), nCircle2Poly = 16) summary(events) .stgrid.excerpt <- format(rbind(head(stgrid, 3), tail(stgrid, 3)), digits = 3) rbind(.stgrid.excerpt[1:3, ], "..." = "...", .stgrid.excerpt[4:6, ]) imdepi summary(imdepi) par(mar = c(5, 5, 1, 1), las = 1) plot(as.stepfun(imdepi), xlim = summary(imdepi)$timeRange, xaxs = "i", xlab = "Time [days]", ylab = "Current number of infectives", main = "") ## axis(1, at = 2557, labels = "T", font = 2, tcl = -0.3, mgp = c(3, 0.3, 0)) par(las = 1) plot(imdepi, "time", col = c("indianred", "darkblue"), ylim = c(0, 20)) par(mar = c(0, 0, 0, 0)) plot(imdepi, "space", lwd = 2, points.args = list(pch = c(1, 19), col = c("indianred", "darkblue"))) layout.scalebar(imdepi$W, scale = 100, labels = c("0", "100 km"), plot = TRUE) ## animation::saveHTML( ## animate(subset(imdepi, type == "B"), interval = c(0, 365), time.spacing = 7), ## nmax = Inf, interval = 0.2, loop = FALSE, ## title = "Animation of the first year of type B events") eventDists <- dist(coordinates(imdepi$events)) (minsep <- min(eventDists[eventDists > 0])) set.seed(321) imdepi_untied <- untie(imdepi, amount = list(s = minsep / 2)) imdepi_untied_infeps <- update(imdepi_untied, eps.s = Inf) imdsts <- epidataCS2sts(imdepi, freq = 12, start = c(2002, 1), tiles = districtsD) par(las = 1, lab = c(7, 7, 7), mar = c(5, 5, 1, 1)) plot(imdsts, type = observed ~ time) plot(imdsts, type = observed ~ unit, population = districtsD$POPULATION / 100000) ## 3.3. Modeling and inference (endemic <- addSeason2formula(~offset(log(popdensity)) + I(start / 365 - 3.5), period = 365, timevar = "start")) imdfit_endemic <- twinstim(endemic = endemic, epidemic = ~0, data = imdepi_untied, subset = !is.na(agegrp)) summary(imdfit_endemic) imdfit_Gaussian <- update(imdfit_endemic, epidemic = ~type + agegrp, siaf = siaf.gaussian(F.adaptive = TRUE), ##M set F.adaptive=TRUE for replication with surveillance >= 1.15.0 start = c("e.(Intercept)" = -12.5, "e.siaf.1" = 2.75), control.siaf = list(F = list(adapt = 0.25), Deriv = list(nGQ = 13)), cores = 2 * (.Platform$OS.type == "unix"), model = TRUE) print(xtable(imdfit_Gaussian, caption = "Estimated rate ratios (RR) and associated Wald confidence intervals (CI) for endemic (\\code{h.}) and epidemic (\\code{e.}) terms. This table was generated by \\code{xtable(imdfit\\_Gaussian)}.", label = "tab:imdfit_Gaussian"), sanitize.text.function = NULL, sanitize.colnames.function = NULL, sanitize.rownames.function = function(x) paste0("\\code{", x, "}")) R0_events <- R0(imdfit_Gaussian) tapply(R0_events, marks(imdepi_untied)[names(R0_events), "type"], mean) imdfit_powerlaw <- update(imdfit_Gaussian, data = imdepi_untied_infeps, siaf = siaf.powerlaw(), control.siaf = NULL, start = c("e.(Intercept)" = -6.2, "e.siaf.1" = 1.5, "e.siaf.2" = 0.9)) imdfit_step4 <- update(imdfit_Gaussian, data = imdepi_untied_infeps, siaf = siaf.step(exp(1:4 * log(100) / 5), maxRange = 100), control.siaf = NULL, start = c("e.(Intercept)" = -10, setNames(-2:-5, paste0("e.siaf.", 1:4)))) par(mar = c(5, 5, 1, 1)) set.seed(2) # Monte-Carlo confidence intervals plot(imdfit_Gaussian, "siaf", xlim = c(0, 42), ylim = c(0, 5e-5), lty = c(1, 3), xlab = expression("Distance " * x * " from host [km]")) plot(imdfit_powerlaw, "siaf", add = TRUE, col.estimate = 4, lty = c(2, 3)) plot(imdfit_step4, "siaf", add = TRUE, col.estimate = 3, lty = c(4, 3)) legend("topright", legend = c("Power law", "Step (df = 4)", "Gaussian"), col = c(4, 3, 2), lty = c(2, 4, 1), lwd = 3, bty = "n") AIC(imdfit_endemic, imdfit_Gaussian, imdfit_powerlaw, imdfit_step4) ## Example of AIC-based stepwise selection of the endemic model imdfit_endemic_sel <- stepComponent(imdfit_endemic, component = "endemic") ## -> none of the endemic predictors is removed from the model par(mar = c(5, 5, 1, 1), las = 1) intensity_endprop <- intensityplot(imdfit_powerlaw, aggregate = "time", which = "endemic proportion", plot = FALSE) intensity_total <- intensityplot(imdfit_powerlaw, aggregate = "time", which = "total", tgrid = 501, lwd = 2, xlab = "Time [days]", ylab = "Intensity") curve(intensity_endprop(x) * intensity_total(x), add = TRUE, col = 2, lwd = 2, n = 501) ## curve(intensity_endprop(x), add = TRUE, col = 2, lty = 2, n = 501) text(2500, 0.36, labels = "total", col = 1, pos = 2, font = 2) text(2500, 0.08, labels = "endemic", col = 2, pos = 2, font = 2) ## meanepiprop <- integrate(intensityplot(imdfit_powerlaw, which = "epidemic proportion"), ## 50, 2450, subdivisions = 2000, rel.tol = 1e-3)$value / 2400 for (.type in 1:2) { print(intensityplot(imdfit_powerlaw, aggregate = "space", which = "epidemic proportion", types = .type, tiles = districtsD, sgrid = 5000, col.regions = grey(seq(1,0,length.out = 10)), at = seq(0,1,by = 0.1))) grid::grid.text("Epidemic proportion", x = 1, rot = 90, vjust = -1) } par(mar = c(5, 5, 1, 1)) checkResidualProcess(imdfit_powerlaw) ## 3.4. Simulation imdsims <- simulate(imdfit_powerlaw, nsim = 30, seed = 1, t0 = 1826, T = 2555, data = imdepi_untied_infeps, tiles = districtsD) table(imdsims[[1]]$events$source > 0, exclude = NULL) .t0 <- imdsims[[1]]$timeRange[1] .cumoffset <- c(table(subset(imdepi, time < .t0)$events$type)) par(mar = c(5, 5, 1, 1), las = 1) plot(imdepi, ylim = c(0, 20), col = c("indianred", "darkblue"), subset = time < .t0, cumulative = list(maxat = 336), xlab = "Time [days]") for (i in seq_along(imdsims$eventsList)) plot(imdsims[[i]], add = TRUE, legend.types = FALSE, col = scales::alpha(c("indianred", "darkblue"), 0.5), subset = !is.na(source), # exclude events of the prehistory cumulative = list(offset = .cumoffset, maxat = 336, axis = FALSE), border = NA, density = 0) # no histogram for simulations plot(imdepi, add = TRUE, legend.types = FALSE, col = 1, subset = time >= .t0, cumulative = list(offset = .cumoffset, maxat = 336, axis = FALSE), border = NA, density = 0) # no histogram for the last year's data abline(v = .t0, lty = 2, lwd = 2) ################################################################################ ## Section 4: SIR event history of a fixed population ################################################################################ library("surveillance") # you should also have installed the suggested packages ## 4.2. Data structure: 'epidata' data("hagelloch", package = "surveillance") head(hagelloch.df, n = 5) hagelloch <- as.epidata(hagelloch.df, t0 = 0, tI.col = "tI", tR.col = "tR", id.col = "PN", coords.cols = c("x.loc", "y.loc"), f = list(household = function(u) u == 0, nothousehold = function(u) u > 0), w = list(c1 = function (CL.i, CL.j) CL.i == "1st class" & CL.j == CL.i, c2 = function (CL.i, CL.j) CL.i == "2nd class" & CL.j == CL.i), keep.cols = c("SEX", "AGE", "CL")) head(hagelloch, n = 5) par(mar = c(5, 5, 1, 1)) plot(hagelloch, xlab = "Time [days]") par(mar = c(5, 5, 1, 1)) hagelloch_coords <- summary(hagelloch)$coordinates plot(hagelloch_coords, xlab = "x [m]", ylab = "y [m]", pch = 15, asp = 1, cex = sqrt(multiplicity(hagelloch_coords))) legend(x = "topleft", pch = 15, legend = c(1, 4, 8), pt.cex = sqrt(c(1, 4, 8)), title = "Household size") ## 4.3. Modeling and inference hagellochFit <- twinSIR(~household + c1 + c2 + nothousehold, data = hagelloch) summary(hagellochFit) ##M Note: OSAIC is 1244.9 (with quadprog <= 1.5-7) or 1244.8 (with 1.5-8) exp(confint(hagellochFit, parm = "cox(logbaseline)")) prof <- profile(hagellochFit, list(c(match("c1", names(coef(hagellochFit))), NA, NA, 25), c(match("c2", names(coef(hagellochFit))), NA, NA, 25))) prof$ci.hl plot(prof) par(mar = c(5, 5, 1, 1)) plot(hagellochFit, which = "epidemic proportion", xlab = "time [days]") checkResidualProcess(hagellochFit, plot = 1) knots <- c(100, 200) fstep <- list( B1 = function(D) D > 0 & D < knots[1], B2 = function(D) D >= knots[1] & D < knots[2], B3 = function(D) D >= knots[2]) hagellochFit_fstep <- twinSIR( ~household + c1 + c2 + B1 + B2 + B3, data = update(hagelloch, f = fstep)) set.seed(1) AIC(hagellochFit, hagellochFit_fstep) ##M Note: OSAIC values slightly changed (abs. diff. < 0.2) with quadprog 1.5-8 ################################################################################ ## Section 5. Areal time series of counts ################################################################################ library("surveillance") # you should also have installed the suggested packages ## 5.2. Data structure: 'sts' ## extract components from measlesWeserEms to reconstruct data("measlesWeserEms", package = "surveillance") counts <- observed(measlesWeserEms) map <- measlesWeserEms@map populationFrac <- measlesWeserEms@populationFrac weserems_nbOrder <- nbOrder(poly2adjmat(map), maxlag = 10) measlesWeserEms <- sts(observed = counts, start = c(2001, 1), frequency = 52, neighbourhood = weserems_nbOrder, map = map, population = populationFrac) plot(measlesWeserEms, type = observed ~ time) plot(measlesWeserEms, type = observed ~ unit, population = measlesWeserEms@map$POPULATION / 100000, labels = list(font = 2), colorkey = list(space = "right"), sp.layout = layout.scalebar(measlesWeserEms@map, corner = c(0.05, 0.05), scale = 50, labels = c("0", "50 km"), height = 0.03)) plot(measlesWeserEms, units = which(colSums(observed(measlesWeserEms)) > 0)) ## animation::saveHTML( ## animate(measlesWeserEms, tps = 1:52, total.args = list()), ## title = "Evolution of the measles epidemic in the Weser-Ems region, 2001", ## ani.width = 500, ani.height = 600) ## ## to perform the following analysis using biweekly aggregated measles counts: ## measlesWeserEms <- aggregate(measlesWeserEms, by = "time", nfreq = 26) ## 5.3. Modeling and inference measlesModel_basic <- list( end = list(f = addSeason2formula(~1 + t, period = measlesWeserEms@freq), offset = population(measlesWeserEms)), ar = list(f = ~1), ne = list(f = ~1, weights = neighbourhood(measlesWeserEms) == 1), family = "NegBin1") measlesFit_basic <- hhh4(stsObj = measlesWeserEms, control = measlesModel_basic) summary(measlesFit_basic, idx2Exp = TRUE, amplitudeShift = TRUE, maxEV = TRUE) plot(measlesFit_basic, type = "season", components = "end", main = "") confint(measlesFit_basic, parm = "overdisp") AIC(measlesFit_basic, update(measlesFit_basic, family = "Poisson")) districts2plot <- which(colSums(observed(measlesWeserEms)) > 20) plot(measlesFit_basic, type = "fitted", units = districts2plot, hide0s = TRUE) Sprop <- matrix(1 - measlesWeserEms@map@data$vacc1.2004, nrow = nrow(measlesWeserEms), ncol = ncol(measlesWeserEms), byrow = TRUE) summary(Sprop[1, ]) Soptions <- c("unchanged", "Soffset", "Scovar") SmodelGrid <- expand.grid(end = Soptions, ar = Soptions) row.names(SmodelGrid) <- do.call("paste", c(SmodelGrid, list(sep = "|"))) measlesFits_vacc <- apply(X = SmodelGrid, MARGIN = 1, FUN = function (options) { updatecomp <- function (comp, option) switch(option, "unchanged" = list(), "Soffset" = list(offset = comp$offset * Sprop), "Scovar" = list(f = update(comp$f, ~. + log(Sprop)))) update(measlesFit_basic, end = updatecomp(measlesFit_basic$control$end, options[1]), ar = updatecomp(measlesFit_basic$control$ar, options[2]), data = list(Sprop = Sprop)) }) aics_vacc <- do.call(AIC, lapply(names(measlesFits_vacc), as.name), envir = as.environment(measlesFits_vacc)) aics_vacc[order(aics_vacc[, "AIC"]), ] measlesFit_vacc <- measlesFits_vacc[["Scovar|unchanged"]] coef(measlesFit_vacc, se = TRUE)["end.log(Sprop)", ] measlesFit_nepop <- update(measlesFit_vacc, ne = list(f = ~log(pop)), data = list(pop = population(measlesWeserEms))) measlesFit_powerlaw <- update(measlesFit_nepop, ne = list(weights = W_powerlaw(maxlag = 5))) measlesFit_np2 <- update(measlesFit_nepop, ne = list(weights = W_np(maxlag = 2))) library("lattice") trellis.par.set("reference.line", list(lwd = 3, col="gray")) trellis.par.set("fontsize", list(text = 14)) plot(measlesFit_powerlaw, type = "neweights", plotter = stripplot, panel = function (...) {panel.stripplot(...); panel.average(...)}, jitter.data = TRUE, xlab = expression(o[ji]), ylab = expression(w[ji])) ## non-normalized weights (power law and unconstrained second-order weight) local({ colPL <- "#0080ff" ogrid <- 1:5 par(mar = c(3.6, 4, 2.2, 2), mgp = c(2.1, 0.8, 0)) plot(ogrid, ogrid^-coef(measlesFit_powerlaw)["neweights.d"], col = colPL, xlab = "Adjacency order", ylab = "Non-normalized weight", type = "b", lwd = 2) matlines(t(sapply(ogrid, function (x) x^-confint(measlesFit_powerlaw, parm = "neweights.d"))), type = "l", lty = 2, col = colPL) w2 <- exp(c(coef(measlesFit_np2)["neweights.d"], confint(measlesFit_np2, parm = "neweights.d"))) lines(ogrid, c(1, w2[1], 0, 0, 0), type = "b", pch = 19, lwd = 2) arrows(x0 = 2, y0 = w2[2], y1 = w2[3], length = 0.1, angle = 90, code = 3, lty = 2) legend("topright", col = c(colPL, 1), pch = c(1, 19), lwd = 2, bty = "n", inset = 0.1, y.intersp = 1.5, legend = c("Power-law model", "Second-order model")) }) AIC(measlesFit_nepop, measlesFit_powerlaw, measlesFit_np2) measlesFit_ri <- update(measlesFit_powerlaw, end = list(f = update(formula(measlesFit_powerlaw)$end, ~. + ri() - 1)), ar = list(f = update(formula(measlesFit_powerlaw)$ar, ~. + ri() - 1)), ne = list(f = update(formula(measlesFit_powerlaw)$ne, ~. + ri() - 1))) summary(measlesFit_ri, amplitudeShift = TRUE, maxEV = TRUE) head(ranef(measlesFit_ri, tomatrix = TRUE), n = 3) stopifnot(ranef(measlesFit_ri) > -1.6, ranef(measlesFit_ri) < 1.6) for (comp in c("ar", "ne", "end")) { print(plot(measlesFit_ri, type = "ri", component = comp, col.regions = rev(cm.colors(100)), labels = list(cex = 0.6), at = seq(-1.6, 1.6, length.out = 15))) } plot(measlesFit_ri, type = "fitted", units = districts2plot, hide0s = TRUE) plot(measlesFit_ri, type = "maps", prop = TRUE, labels = list(font = 2, cex = 0.6)) tp <- c(65, 77) models2compare <- paste0("measlesFit_", c("basic", "powerlaw", "ri")) measlesPreds1 <- lapply(mget(models2compare), oneStepAhead, tp = tp, type = "final") stopifnot(all.equal(measlesPreds1$measlesFit_powerlaw$pred, fitted(measlesFit_powerlaw)[tp[1]:tp[2], ], check.attributes = FALSE)) stopifnot(identical( measlesFit_powerlaw$loglikelihood, -sum(scores(oneStepAhead(measlesFit_powerlaw, tp = 1, type = "final"), which = "logs", individual = TRUE)))) SCORES <- c("logs", "rps", "dss", "ses") measlesScores1 <- lapply(measlesPreds1, scores, which = SCORES, individual = TRUE, reverse = TRUE) ##M for replication with surveillance >= 1.16.0 t(sapply(measlesScores1, colMeans, dims = 2)) measlesPreds2 <- lapply(mget(models2compare), oneStepAhead, tp = tp, type = "rolling", which.start = "final", cores = 2 * (.Platform$OS.type == "unix")) measlesScores2 <- lapply(measlesPreds2, scores, which = SCORES, individual = TRUE, reverse = TRUE) ##M for replication with surveillance >= 1.16.0 t(sapply(measlesScores2, colMeans, dims = 2)) set.seed(321) sapply(SCORES, function (score) permutationTest( measlesScores2$measlesFit_ri[, , score], measlesScores2$measlesFit_basic[, , score])) calibrationTest(measlesPreds2[["measlesFit_ri"]], which = "rps") par(mfrow = sort(n2mfrow(length(measlesPreds2))), mar = c(4.5, 4.5, 3, 1)) for (m in models2compare) pit(measlesPreds2[[m]], plot = list(ylim = c(0, 1.25), main = m)) ## 5.4. Simulation (y.start <- observed(measlesWeserEms)[52, ]) measlesSim <- simulate(measlesFit_ri, nsim = 100, seed = 1, subset = 53:104, y.start = y.start) summary(colSums(measlesSim, dims = 2)) par(las = 1, mar = c(5, 5, 1, 1)) plot(measlesSim, "time", ylim = c(0, 100)) surveillance/demo/00Index0000644000176200001440000000117113112020363015013 0ustar liggesuserscost Code from the first paper about the R package surveillance (Hoehle, 2007, Comput Stat) illustrating some methods for aberration detection biosurvbook Code from the book chapter on Danish mortality monitoring (Hoehle and Mazick, 2010) fluBYBW Code from Paul and Held (2011, Stat Med) to illustrate hhh4() model fitting and predictive model assessement with proper scoring rules: an application to weekly influenza counts in Southern Germany v77i11 Replication code from Meyer et al. (2017, JSS), illustrating the spatio-temporal endemic-epidemic modelling frameworks 'twinstim', 'twinSIR', and 'hhh4' surveillance/data/0000755000176200001440000000000014006051226013653 5ustar liggesuserssurveillance/data/influMen.RData0000644000176200001440000000274312376633551016372 0ustar liggesusersBZh91AY&SY#Sǀz¥Fi 12i2dh&CM440C0i0&44iM44bjyG=1&ii /dZB PQx)dh 3 #(Fn MmȄpqsPB  U F)&HIJIL'6Z[\]]^_`abcd7ef׽~.7eFm/N'!:x$ *(xpB$H֗10ȞQR,QQC(Eˎˀ`N$3Do>%orU|4Բ$?cBzEW-5 /ayPI0@Hf t2f^$0sݩ<ֹHkj" 1A! !0I jHRE*\c2KYh2(SU{!Ė$"󽰁q \d @$>VwP $YMEeUP @jIx,AEQETX(4R*SE"ȡ]$|UT3wKrj=8e͂N^'{wG##`WC]w?>0 FLX80(FC8z8+A @180c˺8bxp !ErAjU7(,IP) }Lx@")d-BI@B$2S$ esrE8P#surveillance/data/momo.RData0000644000176200001440000002102612376633551015557 0ustar liggesusersBZh91AY&SY)9#@P}T7>* X z$(( jbQ=Mj~ Ml!mFF4FOSji&&L&M2dѐM4&SS&14ɀd*SC@iF@5M0h=G ~zhC6GT=&ꞑ@ =1L=54jm 4@SA $Jh1 F24@ 4da`##@@шEPO61M&i3ISjiSGڙ4P44hSM4POP44F6M4="a24ڏ)0ɉ44L@=FLM1=Fhhz4C@d2b6&OP h>AAݞr9(XwC'L}OMtg)U].vL Ij;-}rI~_fk|dK)l6?wqyRs?AGMZ+Y|u:|7{sSs[\\u~^?'5uzwRSSUz+klݭ\vwACE㥦*y;[uwW}Memwac첳Wsuwxmu A`v;O_;?տ|sED&vl9o&-.kn[P(RX[:mk*UͶJ9Kur]cunt6eV,P\5k5Kۨ/m;wwiפ2H*ݳ-Z-ͻX5UZiVٻr.lގ9y"FI,e'K.7lV^ZWlNZl($TMe$2bnŗٽ++٬ "b>I$/e:8r^6e屍R%n^f͖['ܹ50m Krƪ47s]Ng8g,WbۻHTܴ͈VɶTs:ztz'hJD L Lf!IX#Z/M\D6ɻnKwv哖y[Ӎt:x%tjj<"Tx! =8ݜVUEGMnڈ Q ȥ2V (#(Ae,!X{6xF$B$BD5" *$-z/hE`$"pT R`%4<*PX9^LyJ#*/gDF*2ZFLE BVJ#hH <9:ğQi=PE8Td51[Y48@cL` Ra psBF MI*Fv3ȉgКH3|\!Z'trI@ @ӱlR`dKte*נN5XmXAF"NڼA!fTWj TE+R7/_2 2EJ/Tg2X Sa  ^RTƔ+❂uV&8N"T _d+Xa:a4(PI1 1x@ W@?QQ&f _^) #(;Fo_C5ZDZpMUfxۅ(N0 <BR0!݆B >{h`@`mJ8mGkK wf{1@=dQx+ßGǿEgz!)~5҃5KvIռIu$/[~%MJu902IO\[TSMUkÁ7[B`APAq)aA(QJ&XFHp7P (46 p6ą..}[Z,JC1ظOO 'q,""*Z3SX&q"c܍%8w! QadNGD@@CL4B "@ T za{X#t]#i8;ƭ k0=Š=#Bh$$0Х(TB4Q ThY(:MJqz 1$D3SHBAJCnI &bjIN@I(baɞ(\ΘR 9G  `MDhD wnZG,|.P81&$VBЃ'İD)/fWMQ?NQ`9x)BpǼCRu>'yX9P~in1=WuákWXɩZ$߯M{X۳MIWVaV VqS4H6ϭW?E ;acª v8h6TST ԕ {$Iij\ج|RiSWm\Sl76={0clYQ6omxǼ3).BE,20f%6L0PYyL:)Wy0@[wqihL]B?z0 @7 Dzmi-Uؿ٧^ kbV/pקTowY8 d.OG5?Rz9`~I h͢qԫp'7#\\8"$gn.$^쿿CUh N h!2xr%cR{|8L`4o{[ιrr'mW?;Jjƾ$svv F'oG&;7^/%[g,q!RSHKs85Z+^:P&Ri~l- 1y‘TUm+5z$0Co̧Ǥ7{Zٹs)dr)ҿq8j|$~'Nƫ V`b})Vs$D+ۓ/ ߍgɝZQc򼛲 :fS*U-r@8qQĎCw]oo<׽;*J;>pYkDWRkI}{LV)}^;s6M:G3 {lV pr6\.uXs,R1c ӯ>_#H3右COG_WݿJ>E:,F|2< l :CYέz7jA{2ph>H#싆v6Ŕ?q fW w`sSEB-,-4 >1@:Q?%WbWIǥ6-H̪:έM9g@|5ڧ#{_2m)$+.N, ʍ *IN4n@)Ͽa jXU"Q%Lln)L ށmcr/pݕYWéhD\.EU߶%j=eCQtG:@I&>ESߢS(KJSE1ZW- (Z1-!ctZHh=M*|EQbKPg@]seMdP$$Yגd?_ҋ%b_EQ4^Q~j-4]__2KB_ K:;AA<#6Pn֠Ƞ2_+QoZue_t֋5TZQ|/I|eC҈?vH\D%LlZ%F0YmeJ2q%b ̩2h|V4G]4 ib ypΤ,HTʐHDHfI'UaT0c a EI<ےԻc by1i()<ɡ<||u]i^_.TxTtEXD m$N*4%Q5hKJK4g:?v,y "_TvQ] +mk<^%;nssxLE1ݮq[ SV-%=)y-Img KtVҒKvtLIvn7Z%\>DsҒq.MEH1hR1!o1l%.8qz/%<> ȧ{/DKoOKˉQ㢛GE7< ]`:"YA:s\};nϪ[k$_|_>Ad5V Ԉ0?ei+ -۱K Uf5@ iku߿aRG۰ݻ8d[nN8mF`)`kmyܐF1li[K{"X, ߽iK@޽zp'h B߿z,+h iȌchJ^><ܻЗ<68ltvi;#jFQ*lG>*2NӴ:`] USL'FpFT VD1X]u:aw(J@G˪@izG=j~Ջ AB@F11s!_ǔ`ܐ_NBSM4 1u8i^KRi8GNP]už_"^]//wyЖs^<=ȗ9ւ>R܌ p#9. vχ1<]fg{l 40G/GqeQ Pkpΰ~^vw$Ashk/^.%aÂʟי١ ke7$ol RY0XDoŰί'ڭlK[p8+&$@]aPy` :1.a܊g-t򍍛qkv_[e5d~ObPF~&;Ijlv8lPGK%,dan;GYI [t2p`zU]3!|mۂr).yӜ<\g;Gڒ.8v۷܂0#O9O\f7SGŋ,]Nkii㪿/9O~T&<H&y+n<]g6::?s^*E}[MN\*Yx߯t{^ZYf˲-MOsyz|ns;cĹd{.yqnn\ #tcc(Z> #o5FuguU]cڗdnK4P J2zNh6yia?FQ0  0A/!k^bnj1O*N-*KM)*.I,IrXҊR $JЍLI, jS2Ӂ p4 surveillance/data/k1.RData0000644000176200001440000000051610636320360015110 0ustar liggesusersVKN0;NZJAEH.@]B 4 ħ"aM8Ψ& ~ҫ=mr5#"d![MXpJT: 60BX/U]e- 4B@VerX 1ƞ'sQOL! cJtR|\RyG6L􎵗#ZC}w}nvzN}v]{93opHo_3^32222 <2ӋHӷJ/GosxZ9Q׾i_:mۼҔ7הe`q/]~1Mp surveillance/data/measlesWeserEms.RData0000644000176200001440000002375414006051226017707 0ustar liggesusers]@IM`A,`AD򂠰 ! PE{ł=E.;AwD={|=ucX,MK]u֊ ㄉ]BW|"KMB-a82QqnjXU@\ 6b7U<:b}澚BlqmuE\ F `#6AFlBlF#n)bK7Gbk6["Elq+;!n bWm!n11 #@'b/w@1⎈ D qgA +nCwG8q{"X7HQ~*s^V?`Q寪[6 M >0hM [xeЎAA'}k|B}V̇Oű硒3P;(ˀA;_5\|/:w 9=o[W*}*}_dEr""A0ʊ,m%Mm+[; {Vr8W(_|*ph-,"wd+!'OUx1IA|H!B"D!B"D!B"DЏûj,'DS'(0a„ &L0a„ &L0a„ &L0a?ɉO4d5rxYi/%H A $H A $H A $H A $H A $H A $H AMH>;g5D>1ɒC4RjfOɌNjpBY[GC5ȳ|>Y\;0W\:r?4䞡.w|S{GSbU5b=_SűWWHgx<ߣ =, _V~ۣp1PD9 UUlbGo 7Q|9_q|Pg)Rl*RYq+uEgކ,ۀN*.O5 qXD-Ra: VgHcf V7]ez AUMj .J?p AUǪ'@ *M9KL $Xܐ$XOyy$X\.^7pM *5$^A *&˳2$H }$H x`Ik $X\QC#6$H 9mèj$H 8ٲL  v$_CxAKMBfO!4xaK+<<1{&5Xg#<2甤bV :  X})*]=A"되$Q|lO"S1NhQ6N,'HZD v@{F Ҋ$Duz4uR( '#TxzbkGㅢʅ{v 耪3GMP,хC3s+= '?8#K⢨Bq0)6 $\r2rjٮUk+{g{Z ?/!>a$.""S>;HBs+[|~^AEP]k4]\\ZU &$cqVrtA{WbXFB(HLΞ^"*~S{T=X+HB%I1l<8$+LzP@0dQ?x\u̫OfbkemlkݣdneqGD: A}ĢDFH UIS`2j*h z'T fi>Z>v@])kC^ebq¤ǒ~_Y*D3V(JEEŒDFՖ-N 5&`0ka`8><ϟ߶4K-K|2uL:bGz`өFP?h\鵜cmۊ,'X<=fiy߯Bmʖ6?el fm@7Kt~WVE](Qq&͛t hN=-vյ`s=4"}/Dk?4U T9%ůsULTbc CL k8SO~|ȦW>}}φPU>r^FGDK{7%pk@ﮱpG9tt=`oo%+.eڽ~ms_ /n@hl~rh?t=fx:nvyO{Ծ;y%vf-h ?t? G C&1MDzmW^l ۖExTL+0&eSwt5v0FI`\jݴ۬k|/"ͱu#WxKN_i:ޕЀ|S 5޾hM,0鿪@i@z=;s*K&T_.X{!W]ycd11$_@_ndW*H;%h[tQ_}؋ơ ˪v|S0/|HN_K۔vql`Z}gLeBKZL4ޚذW^h!tm)oƹ׮ve@']kt& Q ),k[הEQ d۹<56㪷j̈^Jeu9skEJд!@9>sjs{Bt^z$_jP~.TnWVtnʹZϏ5N86-ʊNCgo+#4랱!kk9h[[}YQms sIlC[d3䴃n{ rlc}ӁN(OsN- 88zPG#Wj5-lX$.K(O3k{>v$K x teb7hqͻzX3ЮCfj@k>^۵N^!sKЃlD9=ly+S@,jUwWN G={Kn\AheGa%jc/D%aV5]^<޷(x , X`d0,pGn^;% l-۳G'`FI-θNXT~ۇG3?ӿ L=>LuuMsfeƮEb0ձr>:U 섍wll֏9 lLvZ;Fiӱ ޝ$C1?3p-`}}c0-YvXs*G-K:nR  ,PXa4ze&^7%G-h;Q.m)@6wvsNՃ}\v8{ořkG l{~<0y)9?Lşw&}iuL4y2񡹯mk[0ξV朣 &N[ƅ^=K``ڒ~J` /x :^ l` fg "?JwOgs˗nf3j&6C ~MƄ?eS}vo@K%~vwJ?;{Q_5[[ bINO^UX(:}yv\C]Ho RMu [(55^飚UQenn- RH"[7j cc[]r6tߢ|~<:%9/ ޻H\ Leg-uO3=y'e 9^>îE;l c[SB]<4fm[mY} Sߒ `1|]*?'8&Vc} JoaXPiU``[`5j0>Ub4ݤyICi9a+L':;|6Q<23"Dr*4_UJjm=-m~:|CvC7ުo4[e`*N@^؋}q/~{0ȿk"Gu q3*d*BzOY͓}Bz81(mtvuă:۰g?vh? PY-4< D$cp 1:Dl?ԩ{LsQ[:dq 3lZ[3 Sژ=]O I[C ="zGdt_Ä0;dDO}7c͎w,¥v\3@Ǜ=eeMĮO ŮQ*8(EÈcq^ѷ$OCx[|/L`*P-_hRhۇ1 wt_U2>>ʲü;R7ⅫJN>bΡchgqx@Qƒwɾ֌@<(>P F d+Ps xBM͈je#AIJ-:=PwO^f3D@urg 5[!eA [6ˁ;p wo%pzu:YwO>1J3qfwSb…]!`,XK&dJc4q&18|218&eBNY}4L9(sN:L"sb.vb/g03}J&gEѤ`}w,RS_v h5/g2\v~rtMOͻlnw7xR-uU޸l]PKFKl+z[m|Hvj:ot6N?h]5@k_9کsxqcju<9%)šgg^YhG]sN=!ű8ǶJc]q+ebcq,ű8Lcg=oڱms֎h̳zWZƮfzƮhgIlTX`ŪJ!V._V?Y앹ZpG=JM=v9oVQ,ޯo OjNE df/jѩjsi2Ǖ@-i +T` +d +t|y/>Ƈ[XȰ-6R%L|QSĽzLAAJv?y]™{p6]얱IP + +0а \|{/> -.laSU& R0l<::{jq'"-qVn{Jʂif\@[#+YifgUCWLdiEcmS5o-t 3##cq 51ೝS|]!xg"kwіgϸK5ھ֓r~X{t9Qi``JK羋<zKhݼmhˉʽsV l!2l[K&>6o gPk&`lJg9ll1m4 ]JP;8i` DO}ļ@k>h;LeH\1+ 2,ռg0Tm|B=@ym;|}^|%EJeU<kWF&6ד_ Fԫ^ʁ{"F~]1*_?SvWV)ݺ4ps`YY|VnY.>gq5f"qd4RGnHnٍ#o . g"ť8Nc||fp.>g kq4Ƒ8GZk#q4rƑ8G~HpYq`TNڍ>5IJ6WWJ挋18z&N9 L73;s ^~;k1]0}v!rteZm(]50cW֖!5%[3.}]*8f"o-  T07C΍*+xCձjuк})-L&@4OB}wW01B™5d ^EJըZ+ |%% C֧hB73ɫTRq̋&3Z,}f I0dT3ȗ E1bIbJGqwV"-Q$-IFc PbظTcb`?_/ DQ8"-Yۂʼn*=Gϯ-r7VT^+O3Bi$At!%cB~BDNE#cɚU( $҆WHMJOF S6QM^g6nso珮-Ͻ>Ѝw jyf qonzu3ٙWh,@?ROfN~{fU1A~(ioQ7;SLyh/`A*w;x2P1ӟoS9 >b>ʛ&n|L ]x]~%#TX }lxWbqq&+{ybfwxw,Z/8u\^ECxf]o+RJ(!oyz[7{\_R5qTmy%1QuLyR _ރy h=7E ԮnB=};/=qWry[bV8^(㑧Lq啪ue$1xg3x%QEB /g/Oq:Q>{6JA~Mw_nƻ7d wEe9[<ލWҋJnlx܋wo niˣxw۳O96w7xmμqy}Ӌx8Af[iMݙ#`F]% osU&k/k*~Kn`@`?'DFF#mmWfb)E|+O f-AmQhD$sكF4LmG*nǀK$ԥG(`XPL]D=:dKM-2 D G~RqjQYj LQqIbI*ÒVZ$QTndrNb1HPdO,(O20 surveillance/data/fooepidata.RData0000644000176200001440000014531412420322610016707 0ustar liggesusers7zXZi"6!X5ʍ])TW"nRʟXdG>@jtA'r̆a{ i-Ny0vPrWnUXkL-ˣ@4:,Ⱦ}@Dbq%J)6=%mP"`/{}$[X,>x hLY KX g:Hxw$d{-_BqZz)w!p] jdZBL^K'̌CEb;:e2 Mw( ǟ4c4oXܸ4yI'u _C"׹Q6S5nwX^ u7$D&&O_"5 T61K'W7$ Ay ,%Y s|I:M2 KAsTeYDOo>**nOc+PPӺWRSG"NL-x F=_AZ)H۔@<l|v\ؠ`9< q0. 3_i,4@x UD­6TgE~q O{)|/P7x¸ʟbS[Pbu%HI6Hɷ('jx҉kAYb^gHjRO? A7 R+DR#oZEzS{|%RF?a*9I'PVtdыfq[cgNIv|JmuS?xdl4g(vZx{N\*JseƼ>:$x8R*[1M;LJKs!mlqIMAzu:(ۋXsh9sj$5 HEOc}1N DۊeU(Mrۤ+e +?N E1 Yy`g9DgaE~#vY,vn#E[s91jY`a "!`Cxڬm߉-xB!a QG$=KPơʩW_aDo ]Zm^I3 8p笠>rIF{龙,$QD#BĖS?$9UOzG l`rx1mO.ubZ66-oF~q-ݢ`[]zDWxlgN}U&9),Q i°qb 1S' fjM! <"tv]r[mC֬3Gu_!7?3 IX%lcŪ I=,Np3\q~~Y'c ੇlȵ']eDyvNۭ`jR4{G@M\v?VH9f# 4^oQ}]hW_* H]Y%@d}?T#P){ÒcBH6 ȩtTyhXw08@6:JןNz~ M䒘I(@#e m1ocs7:x무z?/YZ:.X1/w{M9;)T1z*W7rjϦzci%+,[?E%7k@K-JWX-.>O*|@Zn\3z' er+V"ccRQg"{w3a0C}9.pdoòPRlݔ c%:jc-t|" YpR0OwŖu$0߸nPe <ߧ (Q2Yݎԍ7vt% Q^V!ߖ&({rI-^ICYtk#8W6͋!\i񊪨 |F%veɡzT&2:mQ^GҐ]dDqraV̠tH,Y}Lk|V{ iB .e\B˺~"F ;Nge_o5WHR8p ,)x2NHJ2BsyG/87e|~/])K.]1Sݝg-mUnqwymO}UtxrkģONz(ryw|ˑ$-|VXKY%5czKI:jKzzmLb&CR' y7%!h4bͶ1Bf>$Mw^\j[ޡc2g಻ DQ"%SʻQF`4q2wC_p` a]t%m$!m<[,*_2^J1V<T{{h7zd}(_L_ /%yON:SlRC3D O5 ljjo+=F><@f_5ȏVd@9&TCd~0 s¢iUV]ayt9ݗ+&M/~[Sjwm|2rN/LͥE@1t sBKf[SIY_ZX vdKCH*;NH3R!TIȀd {A:@tb L9f <ޡFn;W1+Xj.1׍@l"Z/+s SZ\kc st#PBþA-*UwO<~zb^,[{k^@LC/BWd&N8Gfqk^&p rbph7gU>k"ӥԾ[Tt2 (wASMnz[,/#'[ҍ[*>E޹'rɦT~E[ǽ+TnѽΖy\7"L|}$:֏4v GphJcRhuMH7"0k7}&r,qMW9h6:i05tnҿuUOG~rx<3OB=.ym1<)}?%R!2;?503Y_@Mn X[͍mENݚVvR#6g -/V$7we]fU]Nn(~X*yL(4E+m[nldYZ#t-eWXêI(Q+=QNGA~/UF# Q9ߌ)OVY@ZTv U7=f qyxKjҼH40N_ԖM 3>׾mvY[ڭ:ra?m:M{LhS{ W;W27h\K >]#Z?\"*hͪEGڒ<{>r/`$@Pz^(חHY/CO$OJz3T{#U3X7UCMQuyk⼛^S+}YN fmHZ$#;8WgKoN?{ٱ/"7yeMqǏA{ ӵ4Qϝ0GK+KOpY?+޲ O~eMwRVVyPݼ+N~|\@글 ^ qC:Ԭ\Oy%D0]|b2(#N휨ggSdm2TtG2oѭ᪤ y;@ Mf 8ݜȱz&?р5(N8b.P$.^(_DTXϑ q4ce"g[m_H6&i$(1~.ހv##|ޭJC^p]J1?">^Ͽmi2x958}0GRC;3˞yQJcdb0 Wg@ujept :?7>J\gbMM .^0(JHWYj+fB{|pp-Ss{Fwh_m߲5&lDp+׆F8YFL'H؁JWe͝Չ0eaJ&(} qrjSX%K C>z`U Aօ@=ŝ kn dw$k%bX3 Qr)IM/R|"]ڨ6hBRm0Ը =)-JF 5, G.3ޠ(QmI;D㑙5AO΂3f6ᙪUH + & :B'\ XM@ňO#H!+V^Jh>ȼNK>2I޵V,S%@Pw4l0c}@M 7ru:F'Eʯ-%ۤG]2<'o_ofEKFkcuEIp B&3v{vRߛT;/mN"f31F僔#O"*%> WH LŒ[D*מ͓m0fcv3$Q,#,J/,U̸6N7*Tz!wj׆*wRˉKMWeY׫Jql`.fOȇPQVCC5 `DzW2Kq$u:-k!84&PυD%X3[ dw}޴c UxT@L)$Uh ՘8v2nWn1ۚ6_c(7 jmژE@1w>vBYjOD= 53γLEPdDU:C\V gz1p.ZdOS ?OE#p"oq *aDԭ8i[pT=MWze=?3Gޓ,^ g&6J2u yshvu텣Շlw0xTs CkǩP^f w_U[aʭ.X߈ShE/qʛJiM>o5 ͺ9qu5B~LlQ,)'I1a*eaÇ{FB ? ^'F򦏸ٛ=9)A~iCov82Rf,I}35%Kn-ďU+ =w`7& Y۶0ܲ'{z $G)P`*NL{mA& cU]VK^Hs<ډ|^H0#]&.&1$?/ ;4 WC^7 nff(-.ԻzG.` 줟wN0| PPRB1$5cm+)Ct,f!>%KmAM9ge#Nb{b{8k|߅ RK8A.Xc|UncIjTJ;z\zh<S#iUZ~k]T0] W>ܨ!¢i)Q^ H@f)hG碈,+P|#3l).\JE1h}?̼>oGvzk9_-\vøApX) y4_֏!͑?@Cv 58&N6T!Q-!.*Og",4&D.!eᮛ0X,Bݸ)KjњԞc".d"%^ŐIKa3;¼pSTOͷBaCVNp( $Zi<&_a̮w8'B'?]Cp@RO"$C 7TF'`j D7m #s9;YA`H*{Vq%MMx^aSԚrxN -<ΰPH`8bQh&̴ٽVQաx1{Vs zW {Ƭ*Ul^ڛ$,)AgӬVSZ*=ҵ v4AL@Zɧ\HoCx-xnlP85VSB7gQE~Uؘ5GD1wMDְW08HluS0d􋂏q+;f,(l!sCvk'-i ,Zf茈&U0eH!L(2r ʻQح.<\]FpWE.MGj;O(?T))5t^tRশdPgHX8 ,nRB`2[]ŋqKuǪagg{ 64oIsR1^f"swMQw  )-_|afh!;CDG" lNE=744H4bwt^:×W26ap½`=xbwzn}JW&אY5m+&=pJovmTɌf:{ /W>h]57y! Gl|6-C`/7x_k48ԣx#:85"Ǵ Ֆbibqf.Kݐ$$Vu7we$DUSr%!ڣ!CX`0G#3'SMDH cJ wCL$XVӔ+/l?Ij4V/jw{MEE+.v`j"PFͱYnY6#{ M+èGi4k:_>nO[n``lK 42Iz*͝O5n ;qK'޽E ,)t}8=궃"''- ?K\p uaS[! KV̏K( l[OEsZk4r %1o —>;! Y$!hՀjm.J1CŒFME22t~,nkc"q)2 {R6H]j'tUƼ-@3nJ lԚE63SR6dy\X=zJ ³+vn=a Ϊ(-9U]6iMШrј`C"I!1jip2,qAdgR_eĄ5 7jD߂c*D~j+<(ux_/|R,܄Z̶Se{lʑAߐ a鿱MYFBJnE&sgkch)k2N2ZpaaV_L`j["]0w+FL\h 2~qog6ucs,kzF4 b"#ҩ+$@OB{TuucjdI'-X+.DHHZ<&͏ԣJ7bXSshvw>у~9"w8ȁRU";3_k4o\D8 , N^8Ef,9Gꇋ;d!gMHOfe.ALUTdR~*o"6N)8%vk- d:0VQTeJOs6GYn&zXC-Hp1AýwR+0sFv"Y亪!+I ӦQ,'gomY++~9ZQw0z5v^9=ۜ YrvLjn{|YcOAV#v!e{A%{t[~6 ud[a]a9@b[ ]Ao@Y켗p4Ӟx U8Ng.՜פ ^̫wxy&{!(FJiLf36'"G)c_uvn./v Vk38=%s4U0{. 79?EDõVU :ħlA1$L&жc|gKgI(LMp?\dBA>t@S7!WYpcgm$ɟ@_ G cR. %.$8O5 rv9g̴mkb8Noo3ʋAy_.nO']_hu'B3cX[_Զq/~Jj29K(1UBa@\r%+R =?Ұ@19Oz fls Ap9sm+ͩ*o-HƻgOν}pGG_ЬQÉQt_^ٴpH_h}UG Rj}[g>0gҮL~);HwJJpq:%DX4o! ?&5 ӻSߘIӆ!GŜJ/Uqbp#ίMb,_H=6pf ~c7HBk&Ԡx Tظv ?)eR*a^C,UeI|ē9wk v0K]A:'QVsnbm`dP+ބrѡ, -эZÝT9:~mN^:بv=Kh$}U cѯ(,̹̌E%N hoڢ+CLp'6FW['.8dK[sfʉNހǙ|AXGxkqUvȾQ8ŏ0LEe 1ib=7%|$.d{a">mgEVxDcFԃ~q-iG/d`$QyH`gi;$(AtUT ~o8&uU;GGV2>k{=vJhORQ \v&h`=4(eGl"d$OxĂ&O\$i>$,\c0\@>4yc>wEQ젍Mc`)geM˟^np"lOt]d޻naH*c&^T.oE8D=7Ô \AVpJ3$v.\A@R`0KE?84W) 2F>DH-%2Mw}ޙ D% BF!&2d\f#n@旐A"4! !f YϿ ")D z-ua$+W[<чUJjXM?\ - t\L@Lfu+O>fJX _e'ϋO>`g;Ή \ ȧ8\C1u'"n{@-[9 Ÿr5<K]x›ELL]H`p4O{iO@v$;#jZ|Ĕ,(vXI>H-˃lRAiXjpsþ{21bSv`kpVywXnje3sgr9v 'c4+@aHȿ/z5m %+\>tGT6vn#RGuiQe{҇}^Vynuٖ2itp2]3ZkCuƞg…?S\ 0Qiѓ7p?۪O|\˩3?bWG ]S9ptQc{5ekD O]ɝۡXp?n b"M.Rg[/A w} 崯?g%]V xe"̣KΑwTF1/Fˡ1q~K PȰ2}r} e+ , ? vm&'g+X[+F@%/cq8 Tw4\qѥz*XgLܴk9d)!u| lH8.\Skz~.c?U;.(j\va .-5ƛVMEeHP+FEi kQ1$Dڻ^gJ0INQe4ha H8C§#,\h`=ߪԘ 1qN҈/D'J2yw?(9e%(ݓΠ S эXh.t&_gmʀ'($ep{"ׯ;`c-:}5^[֌0=Σ&7RMc(B NN?+9KPЛN0[0ê1LErhCx֧^|s, T)Xg1CJ{00 ;mv K@XhC.rT?rx sz.}STYz_!ED;EIp& ئ9O-ZUTJAģw"yJB;5d}CjDϴf:@mwTqy*UzXi#TS1a |h#v7Z ʩc\n)7RL?XT{ eGQȟ7AFYkƶԐ&ONx/%\4ex2b+u?Q &g hn5yro|U}bč/ & k>!N%F8+Yh[iGH"He85Uaᾚ1 *׮;qe$4E_$-985 TFM`2E.ROngƦVD[h%{V*}k1WuMkcyhIsܜ5:3ͬv rBIuI I+6k@zνxë:ڎB`QN60sWA\sMGt *s&XZ(8]?s#;Z |6W.%N)A@ [c#8%?DL"ԝ*g4cBjH 2Dެzn't$o&x>DYLccMqNJ 5)< :lcwWEkdpYw!J́*w;`̗zzZْS?6H ;jD$x_1J^}T ⴳ+ YHeP S&p_-5D-~2UiT(`ј~G+(vch @k'E3ؚ~+iY=P#"a8Ρ%*"hs6enMc.60Zc !?5=7q̱=轥Z$5؝^1 KfU+ΐʌ5JMZca DO?/~{oڒ|);&|@(4!n`:㔵aPqodT=CQWEtEX"4 Ֆ[ByńDֳ[ihZr{@fO:st ΘfDn@̳/rȂokN'SW|2GTD:J nM !m/0K ԰UƜ'hKagn9YIxsg[r9vFZ? D,~ړssi|~./uuJʡB\PpyG#W(J*K*֎b Ogk xa:7T]qˇ92!W?Hm m ߚ p_V{ AB/rC.{OmLa :== z\y"lTxǦ8xYɻcaNAvsŸ!#_!2Xg^eS>g|MT ϋׇfp-A7tPٴ3֊~Ѿ?O$hh-LC5ÛtSNtL.*kpFCg҅ u̓I `b]TaHq^[n0j&[f8 Zor^r_%eǻ9偛 2i K&yz\ eFkN5>]H 9>@@,j  gS FʉɎ]`K-/ bJFvq&Q3m ;KSWC(eGUŲ)zB9Pu=&[GB.lTEbz?M~g~F R>#j;x _A)pW,s.$.jn|va{U_&F郈7Q?$ u쫱c?"At#ZwjS#朜m@Q\'.9xN5~.x3 rGL> sf@7{TQ`z-_m=LJGŋqJB;*ߜ0}{FE i|8*!DH~؈T)_*2+<ܾ;0lA#5ͩ"wa@9dt/r? Qz v=r(>&喐q}"5WPyȢLO 0ۊtL1,Hg$z^s.qh@#9 "x,Bf /J%ID.a ƌ֗c&/Ƹ_@tGv`TllaE. .CTtkِGlH\4@pU5yFSZ3R';6Ο9 WUH. ١ze4jϒ${v8h`Icqʠ2;&B=Y;sY lP TMj[JYOC l3D!S<:Rc%RviA)e|B";u)]]SO (f_wD/s (~6s +i&]"l?LAk/cYO8 y79hgiO6+ D.'n-Yu@mjNǑӰſJMw ~w0SAo7!.q `i|-g:p[ywS(ᐕp^յ`8a"gw{X@(uO0LnMn@CaA ydVv U1G"jj[C,*p<!t{)Z=J ޿[V:49T$3 a&tkq9 Z4 D]k^xjb9ͦ8 ŘhwQ<8>]oGؕa>gt4>Dl0M_Jyj 8=!TT٤1fȱ1,DB/U9:9aiywUn3yjñ ikĔD`uX*?)u؞&@U;kx˗U̚4 1ÜV(#T[bQ4;za뻅Ø@0j9q1vr7?S|PV$’F{{\+IZ(V@ޝ]P/C9/yޢ{b*Bڐ\&~{bԉm`םf@1*x-CWZ\V?zKMf;>.FR% qIpWLy?? \TLxc"08(rV`L iz|$ su9/zO 27"uأsNӕ b[TջNMA'"1 ?ALЏh Vh4Vr LpR`Ċ 5d%RZos?(/M`C+vK S>t8]I)ΑΎO NOݒm唖XDj/`N6ÐK8b4u*3ӟ-PH@m$^=v7phG:Gɻ.KX,'"iTf̯cEP:x_Br; M%)I$zsγ{ b+>bHWdw% &o{3gx!(*: $s̚7 ct'a?evε!xU;Wh兙Z2dH*tYRt BYq_Ny7^%@mCI)~ Ԧ O1fppx΁.QuuR61KmӋ^ϝWwa~k OB"µQg87`dԭڿ*LIӅd5xSɄ+lANv3x_B:s=žC)Dy|q!\T6>"R1Vg2H[daz2"%j_{ܒ)#|lũJ.De(?@qǟ휜p ږA'Rjݪ !Y@fY {{FJO6>=wv=kJgS  lxӵjax{(zVDRk5dp q:+^$FK4@o`S|Gw8}:^ _"oYڻV 8Ɨ@ ڈ_y͘>} Di! Q 2lz\aFUW]'DztON(lSxot cVW:!ɉ2v{YdrɒXo* WΪe5'D4 rL6>Zamug &Y4J0F$A/hN '"UbU ^]wKN;AՅnV>(8,P Zan8grӓy_qp i\ܺ_lq-ܮ .gY 8Hupy }/=/+ʨӦfOGfQMXf!ܿ*8 I?p )JpK6> hM9:k5<Mb }R#>:W,H][tl[לN7/LC`f-M ĘLWY<FnE [x78$ViGl=¼€hJӄ{PJ@YI<A!: 3|uF;XG]08Omn4`tqN~噳k 3qvmb*Et0؀ɻgYx \TpLawu>!g6Ti+X}|V^/+%kMi0ZvH \ aDd)=,WٻKD,qAXf]zF#ߡFAg)a^  I}QK>;|͵@&**E|0FX8x0.s\}hӬ mj)~3FehӚ23N IgazM(~D KBbG*u|"cw4^4,;Bcr+IWqEpY7i m)C5{V ޡ ó- `>;+Zd.2ݨ?ǔFt#+,|ܜn^7 ۂ&`BwO2Onv&7nUccȾ;Ja@zyy|RN۬|Sk lڈZ5kDHeƂ)\rdqyfo<e|M78hAs6LHcʴ7pyL8ƅ{TJk ¿ȼ;2[?8ˍѳ&fg)8Q(^!*ʎGDrC@ZϷz]!}F8Ef0ڕpz@`43HёԔ.ˈd'~ݴ (8DbJ*X4)?Súq@Ҥ$ȟrpKҀbeo+M7|ՠ5dr,!{[T:>~'jGibmkY8o #8~k✱OsMD\L9$k 6%B;K22cbjp~w Q)sdc\gWPQg;S̟¨ dԝn5 =2fjK#IYPK"b{;q W1ɴ!-˒\ߥ\zAT,52Z`?).3,&uz*k,e"31vO6+RWKߺsk)go369I$s[IYXn4(@me%솢g?{ ~}^{d3Nƌa_|MP"&fD"O1أt^y 2uA:Tn+YgƑu۟}9?[>z<u "OCH1k({VbrmG}] VemZ >o৮:xf'B,j ފ% +J{J݃g8Y-E6hͭCX9NmҳWo#^.MJOAU,g"91vG+,F5p`=&E1&ɕ^c𧛧 _(/_Y_?'=fҟMt5aѾ^DоNˁ\\Ԝ O {[ȯ΍W^GHl!#Bg_-`xiu sA o5_ԣцJ)6=d4]BdVhRzSMpټm;!LO, ߐ\֐[vWRꍧ׬ŷk/ԎɴY@@`eπ:c^W]8 _%ղ]*NI#r| 7ꛅ~ {|Z~ĴDoj %S C-̉*\ U@e#N̤ܞeVBE*/46T7u*mn7 (DU|ȾhZ[b \XeقvZE=GXDh'cA.qZDe=>Ք 6B,<[Bg%q/)Y96`MӸkr;yg"Ə lt1D ܜ|cwSBL8?j?9?*AC)RW%A  Rئ}8KF1\a^ rL.O8vص&|"9\`'s&V9;7F_Cp}Z1.MѧAP,1 \<fT1!9+9"塁QM{If[xqjO]=ë[ͪt.h?ZBaAq^DSܼnp/܏YWD d 5/|NqS# jxDIp=ڎ۔UIUFHN8|ٙ(WOMp~Ā֚b~`Q+?0P+vD!v]"?#p;ٵf-?"~wTK !“aGq/0UUyN[q2T/ |ݪU.gB8J R,EkXD6,1)܂q&edE&$~\ Z #;+oM;&ࣖ62xвT22%~|Ε5`.%TcFU7]'%Ł+L9[Ao.s5W8G62m4|雹M.&VI?+< n cQR|6%Lhx/@} Wę[o(t?ZpZjm'D [qzB+]F'@ E`G$J>ޗE7N:ngCm)KIZ6ڸ,ʸ`xu˨ʔ$C:*0A20r%p7Kmx0.U Tx<.Yg<}=·S*M.KX@W49x>hJc d4~ ѳW"ttآ ~|$W'?#tiA`8s5 k揬0TOslbeG,nkJ.4q8"q={VJ08 j]]i58{jclHMBx)WmMX/(NbloRSr6O8>^ 4T{ -*35M=yVI7*;gb1ӦaIPR;ͩiϩn.K5Z@c <ȶB}?\gc1ζYd+WPiOz1u3(oNA)'25;57)oPh6>1 &_1tN cymS-rp_G|ET3ܼ]d,E5Co/A,H}.+=UfVi<\I?jJsf=lrmU|F,)J#sC7,߶s1 v7.ǎ OǯV*'R.hE0($[䥮.e@ "Vdo[i>c_餔/R3j}#ʮӶz\L=T;$AZ8y&f4%3 NDy   KXt@\0FwvwoWZA59HGľZp%FVֽAw T40b[X 3Mr<Q"hziS~Y.k֡"j#w4[eDi#s>a8JO2k4gQ6|7 yEF )]#J]TThj^dُ&g>O ͯ2bMjzI3Bk;}Kl@8G!=aQul8l 2 v%J3"vijV7r ,7T]~\Wk*%f8Ϟ a0:nϰ(!AKS[9 v/Mj[L׭" w5'mD#tJc>e;e.?~:7zoLi9`$j;+ 񷩛5NPٰS(+n*fПu#'$lZ׌&}Z$<~!fGFR7>2KT+6bڈ̬Nո_L׬29,R"ٳLoSsf[( H8JzEn#?я6p$ #6jȵ-BszelN W_=BQp"Ɵ!?U 2pRK]剠NVFWK}}q-xL:G_u+iȬ*(x=~#5mhqĎr5xXn_Z/pzMY硌s1o d`j VƳ8=#=/z"A`#zәo9>F#̈jw(0*" ?Nfy?vm(0[gt".S:m= fYHdl pن gpS/%a)~sIfΥbzqP!yO^cԴFRR v7vG5sE`\Dg`}t[u/.UCm>9;ss=}Zۏ g:S%62 =-朖Q: }0eoOYg$]Z-(oQ*erv&J[pO> sP1uEn$m?Tx,xPMv_W-<~B&)dGm]`8TbKKM'_:nCJ_c1^"ذE1tgol e!uس9cJ?;pS]q'.2G xgl[UlZlHc#?_: pR/-ed&]Qc5{S=f\eܝY[&2pOg){dY[bŅ]ɤ yuƩ %7}/om?}? 5ȼb 1ͬb3*BP,!Y]9@#b9=i \=:@qy&@dOdy\S)߸u\ _VJ?in%@68S yZ1oאKgYY}1euWbҋuN)kN U,JUE ) ~@]%m4cPT8 #vYJ! DE;n_GG tau$|nvT\"5M7-߬m(q\W˭htxVj ~ܲk,y3yWRhz<Tm3(ѝ6ƿ@͒pR;pr--rڽݔ"{ +B' 5VD/5?B2 eFG Ľd0뼔P) a Hba‡?A4:P\Px CA28+nƂ3'/I*8S aqK! $I HԒm!WJ'##wbV T N8WPv+F+vPHN8$!} PbA^>`@N/|Tux__>)_$#-`R  I[|͡@@s3/{_h854&OV8>@n +Q^d@QNdìu;R 媐uL&twHyv [wɇaA0᷼hx?Fɀ*lmf5*a |`\Yl7U'TShJQBrkj\}N!'Q9wj|~(]L&.(LqFq"KsZ#1+}1+ 3]œuSͦ1{qw+r&4T K{n+!hY+#RcBE^f35i|Pzp f|'7^|U LmkJӸ>MHcf]Tq^&9hYϑWL!x>-k`C-pldʩ68Ҷ>+"G%G(Ryd-*rwTJ6MHt<ͻ.q`Zlf$ǹ\D|+%(͚v'!BGVIZY3@ )[e|f**MQo G5M[o"n[wXKG34ةG\=Bh!3& 0@g;CR ȶl;5y PyY$k%],ټ,ƀ?sw,#=DOMlaPC~8G@.Ҙ+Tv=Ug)̀EDoYNV/.Ϸ)tMkHu=O\!em6 ې >6՚?gw $2 G-Tq :%O:8ς40^l`kO>TaP/_IG?fX!|(ۭ1s3EHfu2=_κ"/y/? ^z3zQ$*j&zD^[])N!C-h:k>?BExA@f3pV*p>竟;u6mC=(.:C"[6FgʝVI*05_\R9cr>ꤟlcᎬ\V[iGIDzr,*عPY?heK.QӖ9#YZ~/Q`cd(^mKem&v+/*p'czQ2t[toOt)C[KVRvmp3w>Exji \)ߟg-x1 s~n*/ +wANl5Su?=n&&#Iܢť8eܰ`2ZqWrA`~VM^Mk*w#P|{"d]`\+@BLbUs?R\O$9k sYDmؕ'&.{MYC*C&]0H uꚮHa!0]M"Ɓm̱׀p|o#Ï0L/ݺ{>¦0+PB|t4kdZYHںއ0K7.W="ÆZ`Yeѻ$$bVc+{6Sci oOo#؍0O}~ƚ7ՙ"w)<>@:6}XAp>J'cSkAé{$#Hp`b g}M[[ USK"muԜVxLbdeu:).#(?I! h]l &?v i}ںQ*q"Vޙ&8?n%p3՞_뒾HMHMQ0{ws'D.`l2%Й۫7˴Poۢ !F&i}Q4֯ZnU쓛w,.4maxh !lf{mLf35{2D.Nei̠<L:0v*H$Gndt@8{`Xܮb4[4F:>tM QxR ğ]/YX,!VQM4S'lDwۆuLWH #]n_F~Ũ8GM(G6L~6⎈)zR0g׿#1ڳZ$Z[*Fښ&~lv̳~ӸLrs =XȸP}U+kr/P{a/8dƦ9g xbDL%+wN DUHՓD~ ezףXٽ ʓj==Vf1{]MK'>3Wzq]QrX4B7~aU)1XZnhSCcxTpD?Fl\ yU꭬p{-EJTn<49# -84{III<$uUj挧aQ\FS31,+dA1;-"˿/C "k]"|~O,nK͇l6CTL'G_cj$ ⷗cU=K[1]ktwϴmǟ9ir c¦:kB1'R6Glk8_]V8W=4 % 1w]gnE^^81Uj,3ldܙ} 9 B_W o> D(y5K/9Io;)Bn/@xcHEP rj,2{P$FA7 n-?:7=Q5Xʜ׈Hn{G o [tr2FMs>~:ad`K|$P'v@ rKKp7J0cr1@ow*xEh _PNkhyIY~aQY0|F+啑#SȲ+'Iz۞6_y>}/רSu U c"5/g :*?͸l0"pe|Nz8զP: %(7 ];tY4wJlOë .\М/=VE@JȔWG2K.$yB?s_fnGscq v!ȏ%2lƀ孹"GLCi 7uhmLKژ$[Cu`@}Kq䧈M[PY ݬnݰ,!d+GB*Ɍי*Bo0b#"su"h^Ѥ?cߛ#Vi|=`'a'lK)u/cT7krkβ!wy%#}3=R7+VPO”'=C.[3!ĘRIfQQM;s:v{BU3X-mg!:s]`5" a4'lSe (Sff@؊+F|aΫ*6JH>ZPci; gsr;KˁlUn1Nǖ|mE<^qb(t5r+4I$j'CRż㵧qͭ}`jM[}/pL :Yk %O/%+,)y2ƣG!_%ջ~m9܉H*Gfo2j:)0\.vlZ#Tý:חV鞕aIJUDqE ȿ4àV]Y.R8a-)o0GtnY#tˡ OuX!?aOڛm"b_dw-%S -?r\&ytሹtADv=Uuڄ?E-#3>Ǟ $"h!TI?8op&`}N,jǏQ;A0>RxRO ;"8YZ^ޙp`b'Ρ㻉sii|  dw]2iӜ%u#3md$E/?]v)| $F(~ ۿXSѣpu'o@/ӏM/Jջ,XQsi$AY-~Sk0 8)k^0 AJA?+6{o~oQl3  *WJ!~G3"ӿs #h; xCl3ӡwǦw槣MĻفd9U\QPY.Q7caX$l % {m,؛Z@].V? {osG;E|z듂UVHܲhi,o1%,ze}1-+zIf"=# dnn S]5 CU 8%X9'Jx^ȃG_1 ,^ȊXxJuN,at#6Lk(0aAU֕yWN+B"oLQF bw^eߨ]PT E\S[\A$rDՀ4) 4)Ǣ#ۡv/ 2HJs2ed*I3UWd9$^0A0C\`mkxx@<Of Κ!jJ]8ʓv? ^g[FKC`T]+i ;#W.C8C Jwl`gbPFVkS1K?. g:Nd; (Y ^BҽX鎵Jen3>vwZx"P"F=*2C]& tP㟮0^+Hg1#ie,t4ߒ!ҩ!2-pj[J۸4.cm9`46 +Sw-˱b~ p$$U{ng%곺2Z\ ?=<%9i#Ojϰ̷KuڜVu|g_;/gֺa|KG?DxJ/c~1ыyV<Rûhc]ڲD*sXiSQ y"E{}@lgX}2% :4(g5)WK="Ts%_ axk}ps{`'.̫&~( miӱgh"ӌޣ)K=X(Vy[<^Z\3)wM% >uĂ+- N4 ~C7p eT+{# Uv.lj3c!)[C9@h0 8'rp%UWz' Q_ `=D&5AyT=R%ĿNL֑j갸?v;*2 l+m"0JeY r`,-"6 hP Ά}B޴cMR9ԇ-WBdg:~(`ԥ0=BuA|3 PVvpvM*{Hv;_UaIgH(50a.̣uĶ$RdI|d4/rw4i]ǧ`ISe<*5V{]w@u/hm*& E QNԣEj&!x6W z9`B<$zC;\>, D&O%%zH~&nM[m+EM $6oVL齜s<׃Og߅gtk|E-2FW*G,oFs}NoWw58Sv obHnk{~F :8yPb^hWvӑwMaBdi5ÒWl lhu ֩5? =x'r1 (]u^j?SpYm>_q+~LUk_7H_u[,z"obz2*1ݯJ#^T\381^(]@t7RO(ǃnkkGZH/o`/GT= c-jmEMS@.d9({++Qi.@7z{^zU2N? !P<[6,1F&P0[^4|=ݡ4Cլ-Ni`v2QYcۮq/>`+e0SqKIiqˢ]ɬnSţvQ_8_f7jXC2I~iKʭ.Q_$?v*(  2jXugDoEc\]hOOcTe8^%YxLf4P̐eK^ׂ8NM..i ={`5 ~9B&ߛy<%0Ϟ>`P<@̫53f/$L#qodwУ[e_ h~k˅kT[ۧR"/ o$w7ؚzu9}uTAZyvO Ўr\DYH iզ|Lǭ,u)]ظU D?NPzmds3gߨ gd'u2`{0y&𱄫3j* )(>Gz!|~dWs!^QySPC~nS?;_xܧ'`B-oI`[:g<3q8'G&:Ѽ~(M0f2 sXc>ݛ{ 0(Z"GCz؊vlh!Gv;?޿;jz2@aŽSΉ`QXDD"pкlKwMȘOcA9Nf/V}f'0W{Zomg컏;Y{mgRVǘҚY|`z>bȄֆpX$zcOHE%?t8N$] "e C6gŴI;J^M3{s^,CJq(i1? OVIvTtbP4sE-Qu,@!oÄ/:3߈ԛK0,A'L01#NTED(J`OY]z61b[>ֿ/(Q'ld^5MDp9͆o6hZ0o_?{Io,[{b8X[Oo -9!I"2$dL%HC\]䬥 ļA:x::ʿ@EI4[zܲ@OlWd _uX*'jF|1nMYa\ǹILR%YUlfu-0HwUu*:O^RKSL3n/AїP7yXy\WqDS#I)đ p6y\6/Y,EԊGI.TP7䏆#0Vb9 O#Z"vyTA|`> Zߞ] w+>֖c.J#P1{$B~$⑂ ;thdSkZJlA0T!6|M4 ưB1m] i!r|I5=# ϟcB)4l(U)0Nْ  u+lA\d {ݼv?)jQɮY7|sP+L1&%)b4% Nೖu#AWK*Pc]ϑχ8k-ݙz'jz.AŽ%X8u6/#hΝf_"`98~KlILg`(LS4oLV\5 * nT1BJdFS_올*a-\ cISM%#Eq dPLw=A(Ғ>~I_Hv7ce/u96.F[iL[_1dz\f -M9_@5 B Vu>B_si ڈRT[Ί=*2u>F]]3Ð̡SG mÌŻ /QA*WpXEX|x~Q~] Qw,>R؀Li2(j']6Q*+._7t<\Ey}؃72<Δ).å>~er )݂$B3eꚥM#hIG7ow!%T}?&p6M:Phخ8q S 8! D)٦13_Sػ#;Fg 2xKw P98&2*8 >'fuSY1S'6Z*;q' HqЛcs<% j͆yzko87&|ɈnOw@qYWJB; ', e vLoo ZSYiɴ?)- {CksX 96t:ͦJH0[_ΒguBK *v*j'AJ#?Ψ6Nװa_mh*P"z#Y* / zwBi-ȫGaWw|mLyz 7>?ݝXEw>9ssxtVV,Wn%UD.̧ZQ¸MN%¾6Uh=R* x&MQ[H^OB:^ ꨍӃ`$}!`ܤsKP2.|ŔaY_ir;QbK늯UX7󣋥 k鍊:"b͒Â˱'Bq%nV {z`n>I4^/q"B]Wlo>}zV;6q"7Z# _\|`GHp>s f>"po.'֮.ǹ7-ůFsҘJg?Q3†.KJ\pjٸ0ZWW&fZچ߭+|@Kb PhKF>]YoV0AlZZ:zLGy2^Ս 3(m*FDǕ3tff抆1^8 d20ɾТC-ds,8=3ս&*% *'-7$T廕ߞO:/=Rيi"Ov1Oԛ](gvA2iM+@Q*JdWLBR8jSN0IW\4n|3&%i:*1|d0݌bdbZ/L6lʦqSa SpY=yd?cʌiz?PO7\x( )NP 00 >C9\vOeCuy|@ğHqU_n mvlHbLΡв; d4,@cN@DIR9'SQOT)_\KpV9`iP}BUU)m84k&kP!k 9la7=-}Eo vd,|%۫t QKn!ɷ@^ͻ66v=Pi,dVo3M t&V)i_ɑ<֭Z4 qqx9t1"[e#EB2V$E߁IjrX(k#c [B86p4XFF T.K܆ΧGg4HH'ԐZ2~zLcGffiGB'sDZs2) ձ ;ՋռT$Vs^Qn)f0b#])c*B@b8vnOK1R5LY:zȋa ͺZMpDx֌Q$ڲww׹R-'ߢ9 =n&~, J6G!Hڂ_8HB)fk?xC[ @"\5H_vm{ݔfIv βYO #zõ?AZϑ9tsl_x); ć$X,]<\]Ls?6[X,]fdJٯhR(UV~AO iPR$yfY!K76dj1]yL|:<>W|(nðZBi`u f]y8DupFрts!V$9(/9ƵAO o~Q\Ps\DJia4dҸTy/!3ޜ::} ^KXQWZ3g~m׀L)5< V}M/n^͖bHɵb%*H`q'zvn^ iaca u' F71(S)~=П`X@*Ȇ mVrz@Q6ֿĮ./dAU[+l}4;.k*Z MS|(v_I 4˖5WC$ P &"j'3IR-jk r踇bMu@(qU7QˎHwL Aem _M* .V_ԥ4'qTtAȖ7+` nM1f4ǯK gvb1sBƖJּbu Y2_"AQ%ĸ0=r#a7{EɞQt}X20 e|'D!lauʆfxe&Lv_uQwLk8|t\[AyG錙MN+i>гdjz1pjwu-~AzK\iNKE.d^4vxirYT+s;wzZAn~@(.<gi7ٹv%C+V~u4rjѨs,_Ʋ֩DW^JX0[t)Uglܔp1 rau>s"lI^Z]3&5bCsMi/*/J{â]M(9<.maRH?# -U|7!dDdsR*]=ҀM)f[ϯ߅JX gtU"{o1Vuk;%ggYrzH ia?HJaf*иwl~ΦvKs\Fdď^M+vvB qG3sd]S`ڔbPQ *(~2MaE>- ݬvid"qm]0mc"͝x n&T[>zSn4֨]llD⺶T;њzTԜK6q;-j󒰙lSÅKoh"|ςFCQUB_o 6z농:p=*ÔŎ^ps3ᳫvȊJcS*l қ> p(H#2^D >jH?L8ۋ\e i31!M-)m+yK=(V`P;CҚaaGZ;=;w |"xvO dhD$WQ.M ! 뮿Ł?|( Q5JpAom. peʱ\Zpv}vs̮iĄ(')p/`o5m}e,B[1kH}Reëx.ouCxP;Y* }djEŚE%*gn&\n2-Mʆ ,ٿ׽zx}ZVRDot'2E:>o7R3%I7Z5 %Neyady4Qi#)bA"ETr#  wUdkudv373@3詁CQ(TблIY__J`'=f g:j͢a2oF:v+^m|B:7WB:/~nB*%Ywss!U]ҥ92[`MHڄ|덵>f13GmBl#4Oa6̦dMğ,K6:~/˲Ӄd}їIUBr  8_*䉂I=@yi0*AWd3=:?XB6نnP+g ׺u*U6>3o M0_K]gK6mS;#S\v,]N4PR* ںciKbaMgao2jc:Kßv'7D2iѷRZZx1'5i̼ BJnϺkZ>zYE -s,݅N@5,XEvc{ب;QbԾjgt]:9D'3IԦ^vs*ـ6Zk$Ok]!{t>OY|5R5Kʚ*#aPX+uxqlM 쒟-K9uڈHJG9ަN΅@Ea!glf%[_)Ϳ?K 5_Z;ӺD._2ɣa_m}Iz $AO*-McvmtN8 ys'n-=."A6Rtuh Smd)o|tfb}jaxXW>-Y?S/"q=k=~c'ݴŐg"0 7br=(x %] s'bĈͷ J>¨8h!A8fíccƹf3; Ye(1j[ ijnm6A_ƒ6czPOStq z'O4=?1=3gUX)#)\y#ͅϳW123M8(\ZN9\+pb19à!rj;#tp \qc$L&Z-TU+%S̵Ry6覀߮W4DӀi]weGz}šqr8 mAߡE o ӔWs2bW cmT G]P[,c\TG7TA*>V[ H/vwRآk.h/U#k n@+ߌ7S'VQ`1^r2Tԋݿ$ڭ2CjS8oE'^4PLj/sJ1e75N@j,Ӹdn_I}+,a%so>X\=qݎNXcl VLB#Q瑱_c1>9W"K_|TªN;A:-vaZ{ـO2M-kŪ3}l.u(4pőM.%:G h+2iC[L]pm?<Kmk@\)dZ.K>yY"5[C8_p;$]4F!GTc#&!ܐOX݃q\p|;(WhIa9<6#FP &D/k Qa ɗ_CmusAׄtT'eVܔzgRPjݯ+ |V25d#MK1Ddt\f6&~ ߡh_Dr&Ɵ%Hn w!,Rt5 ilխ?$,, vGXF?p"''cK޼Ve{( TsYإr P V.out`i+/ GB/nѐE1D~#`oƚ G-8kr'k3lRXnX{fJyD;r֨Yk3+6H5D|YZ jsp\n`SNH1h~n2`NȌ$3"%k02EQURmy&Is9yK=~e+},+tOyyr yWgf"\$"I&{tY!8~ ͕#'91?XPAY8\R.mbb|J W]wh>93u"uU\ t .<,|}"FԱ$cy񢾸Ʌ}Ɋ܍|D˷]~*qX07Z 7`v0b~o'rT?aJ|7DL)E3ЖӀOJd>)ר:1c S7B3pb([8PִEh5@҆yz- b (kd9鬀(h4̞)U.IF0؃O1SOYKbV͉fʊ᳥M*;aO3s:^pmICQel/>0 YZsurveillance/data/n1.RData0000644000176200001440000000051210636320360015107 0ustar liggesusersVJ@ JгŻ=I<$D-f3M|qf3K6K^/;e33b:\ @BSK|.+uD2r@m]}4Ǭs ١(lOȓ(We޿q=9i/{P>HhP(`;܋͡{ߢP-KE֧aQGqDD Zr [6|9;7l\u`hMi~-Ұ (8[n}H_fj ?_8" surveillance/data/s2.RData0000644000176200001440000000032610636320360015120 0ustar liggesusers r0b```b`bad`b2Y#'H؈Y$ ļ@|aA`3;p Ue? w08(Qz(У` @ a HO(Cּb Cܐ r'$B9,iEHE%F&$Ì@)E@?! surveillance/data/deleval.RData0000644000176200001440000000142511522016226016207 0ustar liggesusers R@mRA[6Yh#>6RVfܝt䷛_69t-#q 1h6٬DmwV0y||B;*DHn+A.Vۡ `7쁽80p 8 4~8 < p . :܀p pF`܅{pCx )?#2f02.|=(Up/]gDR7r2$JP}4YbtEYM-eM~surveillance/data/fluBYBW.RData0000644000176200001440000007403412672237564016074 0ustar liggesusers7zXZi"6!Xww])TW"nRʟbl$SJ !5?;'nY BjzR.h9VO{xʱ?GsoĊH`2kuR)uNLaj`#112{~WRj4>'9JYY?@Q]F!ڈǶI]=/UvYCt,P2)(^0m(HnzY5_)VH^b$AjMJ0'/~mB^|p/[EU@已\G’Iѥvo>TE/&?_ !v@Hdw =UjK. ߈|0=5O|Fag|5P~] Rg ".xᑬDSH@{R9KDs7PuZ#J(b/ћ%}K;Jٷ^cc!n)mJG#J~5R)&&NE+;mlѾ0hc^a挻d>TPBi?ZR!jPv;[NY1րMAVq2KETbkᠲӫ4(†2Oeӓd>po,>Zt]:9YG"b<ҲQѴ_TNvF @63nRpt~ OO{顎цbnȫ`ӁCyKߍFD*6&'OULjW%wc˭Ti%kqÐ7" Qsz%̷ (lzmP  $%?HgЯDK O A%Hlju2GסNr o9ZClqqI*α@]X俼O+Qzm]0lִ4 qtQaV8cұRbM;=EG k1˔moQyFjѧځj%x8p-ie;Q=U[l  ~~YrCUt4o9;e&&9c5>9øwVlh`_R\Kd NygFnOt !gE'sekt YMg#6:kRSuG>!W\6jO^ߑm8f_T 3BSۋ4H 5B!1|"i3p .P &v;젬4*iX\CS5M u9G^¸KIb(*㈧IXLsGwy*O1壞I&73o6:yMr1Fa+v^Wo~A-`|dM@j,Yy9P.+IBjm!? 4υ:m!'PB]ho!5se*%]=vYMwP3൙t Yq*u@ +\#sDR$GEse5 _P%dg0ktwlƭԏ1Yk(B} A[9~hF EfPJ~JdaFC1(Vb{ ĻF?:,x,;&\зŒ^>?0VE; :Cx S^GMALLE˦ktԥa%A紒DsF wXZ`4Zk_kqjr;O,=QiVv)RB.ĊgyWf" _W*w6l=c9$s FmQMcS|O0k/{F? =J*0?`'$[χw- whS}H=5񾕕Aw_鞂GBf `ffjqe4 cכmp J+'hGVtw:[Zczn&$ lJa_c7~1ۣŐ YViŭm*օ[e4c u]Y\w+NP|f% gfn:#ҽ⥀;YqGB1DQvz]0nR3Bͯ5ep%L +2ȮZ۶l:ϫHA=kR*g&!:[􋓊/.q >=15/EC>11yO0s.ChzlqsIՊkE>^$C/X9A([謝`z -sh_fw/QPSա[b_On%, !0(xJ}Z"_Avd" .aTEh}}S]ЂTw~z cvmwO8CJ2SeG|!3\+`M?=R-dxTBf ~߯4)G( K_!eƉ[s-<:(u>b K䲚1Fngve({vVSMaJ'CYżU}D1*;:α8 BBcD%:{h&ͷd?(Cc/@& lG* =-(G8Ŧ;h { N,A=XsB[^X42 b$:ie[Vp(wD B/SbsgM&7ppm}@5hp) }sMlMKd&\)| ۚt{k]`WlN96EE'5Hj}*v A;-n}Sh-``ŵL_ONGe"wz eܩȷ*J1̠41ĶZPMBe:S!zAW:j岈h6򎽵h_Q5 1J^S 8w2|&Ň|K"A&w:3 1SVgg1;k[#=oV_%} k>rͿq# _=Ƒ6'1MVcp_NbUK6OH%^7vqFyE>ܓ̪XTQ#Hޥ=S ̠ط,oWHmn8@# {HEiOדW7}g)B9iSQH WDo`Ӡ:=>RD߼MM+$R!snK=Wͷњ}ەaxg)gӻ> k)M}h,:B3S@\v?̗;"u9#=&)G?%ptRB"7\)` )T^xqdG6,3rF\%פc1~b1+< 4|ʃ}Ϝr[1Y.) q凜K"zfpdgYc'M'm ƈk:G䍏ygq!zɵ@D1_*rȸ:X.vp_\nҁ읧G La "(րā;TkE?OLQCHCC@s ~T]2*[?ǧ/?(pHpp@h  sO1Mn`!-fh M2{z_F~v+[-AMmO'ܕFk-@6̣g'GbmV\bQ6HLn9_v,A*K63{ aJRZ4%J!ct SyJ~.oB@\}턊201Gn\ o1qzGLH a;}xg~C⛌A/[ Z4ILny>fQ F Q0QHCnыo-JFQ R; ݚh qZy \v@u,82 5k>W 5bpsNjp$CAI )&h9*] '-@bE>Q ?"5||duFnNe]0EvVna &+\v2-bs%}ky~<hh =G *K9\.]?b_};yRK'=`T_:Kg'S zLoUY3 bɰi'P$M0#X;o*UZeO耎t`wkTc&4AX'}}v{uʼn~*;ŧˌ#sÒpk!w)I xz]=ӱ[~.2o2 >?zJ'Tֽ U?`XT'9̨93~}\ IB E/&r;!z9~p79 7pVjIGwwe$/zxN1M~\$NiԼ5 ^:&2BI\gmǸB+A4:-ê,DL;m_O w8̎dxqL\GM.7k"5qR]/ `u{ZHt>^`H7N8 [.a4|O {G0o zbl(/y) 1G3>29wEW%s@%Ж8C'f *?`sYgVhpt儕(eRD5(fuα&# 1JtDS6ڂl!LQwM|GS Mv&<>w:pn N||3U6!r"Gm,C+u+d҈GJA֯z BѾƆrOF%vRXWXb-y3c.3b{83~hщn5-t||~j=] Zh5H ޴xH)Y"OxW=A ":\=?y PQ zwn ieYy:i97q!uk&s7i*v˦3ROzU·;86Q=]28{MJחVy{#;0;Uʐ!RJ8}ܤ18\oz>'vP%lz,QM 8]KQ-M`rПN2dsi~sp(DVH O.r̬R\5~[04KYT{T8ԯ8 F@;a><]~95$(ӎN?݂pQV3ƟpUN2y\B^ooj'S=Z!pc#ƺk{od]JBcpMψ= n/EKrrp.b*+ cԺ4.BPs%fQN}pX:q"6)՗emŪeNE L8;8@<-½/#X.>2&򲧀|lmF'I*ĝy:D*wUX q-4 Uģ`'OG!P\`Kv][D:Jڢ^aWxQsYM^zMC'jhy*؍ ~C$x]j@W+ک^`F-K3ꞣ|em~V+7{D檛v9 |iDJQݰqv3v£A (8n> eWMp sB`uH[<8% 7Y*rj}eCXj[ƽ ;p2 y e- s+Hz*irs,lߖVna_ >].hڨ+[!;F>7r.L/sFVeg} op hB6ōɅ:u=,^5H dd3AT;4 z+Pםlu}M~=EZa<3]c0[15[H8=[_DuRSN\@t< bjG*bRNj<+ܰ|V;{;Jei#I9R1TX+CS@ ꮉQ::֡$0GĆf 𩥯$úz\U*\쩳]W|bFR(mSXP ŮRgKK|8Эn|+l_e/T&@Z f":#ױ;)jLv"VDZ0 Q]WD#jt Ra%% ogy`k4)L[:iD}JGpW5fg//tM4oCf=x.B#,2oU|ᆰYsZ'*5|CCªl#zgr?wGD4RB;۵R3襐ngR i/8R\|$i؝زrA䑨}߉J>O]  ͓ǡ1i,}0T"r1HtXeQLR?ЄNG,ʽ +:a>8/YR@nNY%/ݮsOvZ)W22#jHVWSŴ[xS۔u7"\fk.Sz#oRk,GN2j?drLaI7h&Y6yDV9 sl #n~kkϚ`>:l+`JWd$~Pu?1E@* 5J"{nFIMa/-(5(0އ~<(V\N.w!? *8,?JmTm6Td 6VB.k_Pīeǩ2sBN&l؄x 1\M<+$VNmpLYiԪQOpwl!6tU BvQ~~'eyi-{^ {ލXcX  *|8,6-7VH,kb Q <1 &&SzYugEM9!?+i ϳ8k#\3D4 E(h8HTPpt|#l]ÄE6X 'vt+p>5dp:}y'ҺvOm WI#AI*1-qytѨu2ZR;"||'Y}_]b^iaZw_ 3Dxݭ4K42]fiz_w"H @dQ(M(l(g/sƐc E%ЅN`z3|GM^HM@b)BSƬ y3mWUP|^)7H󪶺fd47?pe%.VPe#(q{D5=uA$ R7}u5G=ߩ{#dр-hNGg(w7κgAGr{6h?/`L9uأ]hd~/}̃yMrBW>J v=09qirK#ak?ӻR%:_Dj4Ӱ΄F2 F&gҒtynC>.[QDgdFpy&'s;@b!-]"@u'87ENT-YQ߬*_?g<7|%JTgX;ஶŠ^!_ a,z_^X̌ARtڟ;/)"XYJlkU%ϑ-~ gd*ׂ]KwiJ/] ,ޡ 5P}L:R½9%`)S;Z)ҽ_TA =xf ( ŔB)|JqK;,& ٪GgN 1|L'8 xq>k-/֛O/J`6#?W%dPMS(/ Do^r~ >t.*hاPgqx+3D#aVcL&>rw=OItC⼟br""=dFux φQ)p<8j%&8[0yJ%o"eKeFb{`p AhaLZ tf5Xܚrx̘R3{ k#U/3g`),?Zd閟2gm{ױvvvp@3ޏCd. &&1x k ҽ-dhiwK^Ǔ9ږ qHJT{ԑU%鏎  &;_K G{u(Ymr&S!11\RF,ޓ:ȝMu}#z`~փ:Fؠ`eKQ})>GJ e0d5Eu-WS[cIJ]S549&#- crK\Z^au}ΪƆddi=ksa9?Z%Pix,Ƃ**'iL vF)CIN[DV >PD ;#OM).[ZQKCo#b)O(1( Foc4myUoe,&2 nԒW1!#՞LSxKFJ`i M]IzBN w_t^w0IW wY\Q8TAC &Tܒص9KCZ=Px'V G.x nO}Ut4qz=gW{N[9P%.Ba^%.ɬ+Qߎ&PtGlH|˅&QDpGA;a6VԘ#;'dBH,$Nt;'0b+m뤷Po,.qC7ES9\y)'7xeW-ٝ- .;,1{@R>p.4&ۓz"? Z9&Fy;Ġ*^)60R91:7C&w|5%NX-c%IzRcueuIb.2:rhD HpoF2⨾SX9ܾimaG#vi}*^퀔&!>3O07Qnh@{Ly=oYY|Ή~m#Q ki|2אB ^ĒDm1%bw4z}{kC'oWk&}01efvh0Q%FhAQ mFȄd*pbmA\&6QvS!jVKW~5<W70XD.C'ў.^[ voR=* zrdO5B,цܡu(]ٰ4ah\!~.S;_2 LȻQ_xH:D1'֞7s2rm"JÚ)>c'ǶCu K\]UyKl_ %<4,dUH~5-+l1TG ӉX~|v0OF8C{rAY.7 :Rǐ-$x߾T87׵3vڕPItv_߱%,SD}!J;>4FI6t.}{fFG ':m&Hr%P q\^Ke nx9e~~=?J6f17h1\'uY"8?"Kˤ$dtYiրF a[]H-NHq8@=cUܺsh@9G"7\ND+( 8t<$WU 嶇(%qft3O(#W/vvok.0g"k-J^] #f7.HKQ 72Ր>kю.vG?>"Kk#-Id1 ث)ĐSF5ԬsYH;nՔ 壾N;O!r0Kfλ^Sf֍⼭ϧwæݚ6۸s"6axjևu% v2|nu.4- gWL5{.7  -'ʎ}{'VޅxDFU' xX -,3/pN3( :V嶵)H˜3UA# wV䡀R\7+IX%ÃJ0w&R_Yn:* UBJƆ LsG5č*./v;V@EZauݾCI[gС BW<%4$.-ivMm<BSݒf T~ۑٝcģHFx0׌m75="B:Ɋ X+2ˬ38,~R?CTG#e8frh3f$j3\j٪=GI'"wG (h-|"?͐y=͈\̢ l6:?&ܛlkc*bw-b-dhr+vӣƢC?'P{ozUb6i&RMłoKǣg| ;"WRPH?2"$_o?pgӧepyl r[g$=˯ިB6;VTVș>Zu˗}.-_UJOD;_ Ty_>:,ȭMEWRTZ5p,wv/}5nMw[z>_Ʉ͗T}t?lk+1Syx8 SP2x#ai?|m q}c:gx|zuu &8eL o} 8M5ijT/dI$D- Sn`\a7.ýuE~?W<>=>КډZ}_ 6 i}TRž'n 9%L"MRdD_|BakQssD=@6YۉEc? xBߊuK QxA6n I9zfw,UuO"d-Vv3fS7_9g<ǀm dML R ];jiGcCEN=GywӺ⫒,i1?f~.OZh &XzHAZwbP㒦oQ/H: roT|?GK֥m+ :W<$^cMpWȉezpado4b/;Xņr Uq! :̝,;{<՟}[d`SŐZj1܌9JD~{"y\?M~4>$tx 8_w: vd$bH3z6QdpkG/ۦBRX;'%E5YF9M+Ĺ?Vdx{ 5Nq$`1e|Xz\vwd7I~W5_[o&Wwyf*T/9&h>o&̹{bV=^/ugQh(h_s"$"Y14߯-infÝTdV**TӞEH:WB`:yz,my4^!H8pqZH-/zVY˶sV PlLݫ@hP_U!ЎRb( v*j3X͂J`<::f~̋B!ȗ5x.u;N|_Px/f+z\ݝ5cDb"#08V(r00\K7}Pk;k#?15zwS_<8}=4ƪ_-m-qO~ڦ&&t+'T-X t3H/MlZ38' ogSU~%!uAgEE[XSXЦ|M>@U/l .v'³fVMPV-^R c6_z&:Z\d⣡ع!y^_T9R{-Y?4Jxq+k)~=jľ.ףa-4,{_\x3#́AY??~'\2:Z'EcBt :h2? ,'KDĖte?O_ ߎ؄pVTʿM(&.3X{t8Īw5J` fz]x~<^ωOwjGodoiK0 kHG؈8 dk-q뷚E?O4C0C: ~IT"%ץcr; + U"fiB;!v #`M6lD $hawҰEƳ$cM҉Ƅi/CVUGӕ2V)UZ^QjΩCS\oM8@&]ofaE7ɸ}O5+5X-a\xX04#Ov[v'+!$ qГj0}-8›yG5t!VvɮMK9of~=8}}r17  Qqʢ=6@3T2c?G !@_ua"ߜHf<'_3ynޢ#pt _~:Zg|'T` ~̻ fd{HFu1OI.NنɩFѶYc9mwҸ)4f ~:+ˑp\5#^:ދ7,bShiq0",BԪ8É-fia*^Uԙ,gOy_vN n;Ȳpr %a4v%|d;Dg,.:|叹Szd`Y(oj&3+Dz}n.:OB.`,>A_-QY>τPTE?i]qqw:gY}$?' N%R:]+V쭩mXK*ܪV2sǚ?՞aarK<\ξx!HTŌx7_8yzBmrk ߠlIkزw< I}By{xUq3-I`# .Tf>زɌoF)3ХYɷXted&:34 _W}Se,#QVElĆlZdk;W İ^ӬI[qp6;.WhW5_ryjɪ# mGJQv-O>۬e~E|yKn'?]QfM6ˇ[{惆D략y)BDuc@%Rɬw.+30@vTw>næ%p- 2@뜙 xBh Ů:c{i8VňI-,1KdCv((_؅"ocG4gD +YEf19,OȍLg!w"]4y2)o+kh6iY{d#]ne Nĺ2*l) CG֘8DA)}QJ< QREEAPۋ描O0UJr4  |+/ z?+aȨ ==\ػwCPZDzd ^lM9ؒj&WScQJfF/"v?kEtP:bsֽ nڜ!>pAƄnxWcZ!~f׃RFy#~OƪphӍ|ԇEL8O՚dIo0vH7REu]DGXaa0b10H=DiXd_5c҈{$Cw} e{!{\&QAȍ4'0$Ŕ U=ӵoU5fkYCs]cJ~e^\WdK$a /fX^1LB25ѷ,k!S.Mz Oy];3掣I”fy+iM F7ʷb#pƕ k&uv̝3LH#]'Ќ`n3.pF5` s\A^nO*EiYK0~ޙ6,m %|7Uуc}2Z1 r1_JB @  Wh%i1շsm^B# *1o"Qdl+d6ujَEbgFy00rW x}fXrt Ct߇2woAKʵ3UMPWAΚ+ 2FkZRMwdnF I:)w}^/Mg6=o}UCB6 &0ybqwWc p@яt-(=g+"E뙯v{π1ȈTYR\g.[P%)&V}s]#_0^A Im[+_!S"2fQfB0Pe5џb/dX([U,g%lR[/|?|][d杪1 {,-nR`BqFxԧ~EwU~q<7h$=5ߌ Ҟ%u>j)A_ߞfg5p#@ăeSo4E% gHZAb=h &$ҽM?Sxm x{}~;]eD$`쏮$3b=r&#̵yxsaC&#eV_WHR.95 mo?7N)>N. ȳaZ齯*uC>K&P?)1V :Vv$dV[nC~Y3sr}\CG;=D$\{m鶟DlK(zg6Ml=OuE_dfkh@'&Lܲڦg-kYD fbNt7d'WD~BkgB&CMRmyQ. ܙUFY#LĪM{I'^uª-똾ZwbUӝjϘ S2P>O GB}VmP+HF 7$Z]XdIN}q)ja[iOm 3IL@Y]q_B&6Wï m`PKs|)7zm@h !h[NEN_}maNWrM;>r v:CD{\+~VBƆh" zԋS5\zi?F?Eb>)^15`H6dCs8 @ h%D)0.Ҟ))z n(c>:ɉqЎWt%ozm'Gk~óACwS1(6(xc&3m}ϡhfVEz9 O(MW,^n"Y b@|vs`=5 AjE2t݃Zb%bS7w3(q{mP@~YuhaXLFЃĊ- %U@/MċE$*lbbRtl<L>>NiH(ߜG3dE So# RAδKLmg(=m$C(&vHN3&=eۃ ϏWugyӽl2bʸU|6GSt3tb)ڶZCx3e4uҴXYۘ)x|60M!G(Z0E#lMI%] @RNc-s YrĶ sd,vtVf{ڙo5+X;ۙ۠I@΢\|*gzLz? ̆ hFu) , lx bQ%Xb]J@BK>~)@ðNwlm}(cҊ !Je ,6-Z.s8ڥY%є"xD_`X^W@)#n:h۵6mGWH}Ԋz|`m CFimmnWss'cq%N5sEMl#ͭ]Jde'ڄrrQ[0c( <&ĭAmK̜F"ٮ8/  )m6%M;"XMӧ;=3e eTv,;YNgg;R VS+}Ao5`zı ˷165s1]s#g7l8??-ZP NÁ$Gha&S_8}Os& 2j['xKDq3ko4EyjmpG}4"Y+gX!9`@+jA ņ=36¨HM5Q4ievR)<*Ek[NwO~NB4aJ #'BH50<{S&>>pNjO~W"ߵy- '6i|xmk^1iE:QfY:fBՌo02_YoPS?Xfo@#Ǻmhn1ҍ7"o2qeG?aB]Yo./_G&źnY67NN}[tΗCޓPAw(,7.{Ϩ?,EeQ1h0labE;UWOm~N r[ !2A_HdY}Le z\vՌ bCt^=piG ;m{`B}={TG*b<CY3t`E)A'rmauk^zZZ&$ҥe5ͦ}N1 \vԑ`Yr͟?A]ZV ٤N0VB͚K#A֞q\5 nr0wILH ӽ~ŧ-VQ-fH9xY7C9w=bmːTnK=~tx݆T_/Ȏh=bL߹el!Z8kH';&[NN@fx{$1+7onhrԩځÊ) x/m9!nkџ?N`**ܱN﮽@Acd(sfwy"eÿ4ԟUZl8"+)uѷN!k.I@" J9s4˔w!GS6f)V3 ێe^ˉsYsЫ]c^O4_[9N B'.?:eXX.;lln^R /dnbLkR1:w@6pMS.?/2jK0+:PaJ)]Ӛz\*‘:swQW,$pH02>Ii+n;'o5. l_ nG Xٕ)h=gAȾ ۏg9_zdknؾʊ+ts^طíݶpctz/MY2_/{-iI*e/S?Q¾L']YQ$aɯnЋ3<^URސvU6=Vl"r=>Pj' $%;&] @*1+=iğ};ͪ?DdEa`R$?y;ZɈl9{bȒ#k yCHڡ |fjw?Zmd{S"Us7 ~tyi #O gEbJ2:;#4+;bwS,9&ŕb).q!N-,&o69`@lh*KZ7Ӯ>u_I}?K#ViU .oAEX8hƤh1^ ăZ,Ǜф,yP=/?ڑȴh\ aMTMG>p%-z&Ht^,Qү͂hs=kءWb`g6n3NꡎPB. rڛ;DKt Zx Q95zw nYx~aB~rϟrL+4Tuek^Fy IQ_Uה!kr>9 ELB7&fZ0)y է^"۠zȜ qFj˹;9oĶ)Є˙̝Xui)euX5` H:g0h$ޫ 4&ԭnEwHb RUDf&48 ].Y"@r.sgDL&(ς׊1"t9nֳCUsi3ŎaGeB}]~V,IB+I- tڿB=&~-StͫP(wn`[ Lzۍ!b8bTd1exS"eA @a!򦤣YP eanDk M&I_Xנ`Z8(*$U_e (H~ n]-=}LJ`Z_ac}dd:I^2 Vot !Ƭa.QIIJa@c頬Jp Qt;)+B,@8'!VGHd}шCu߁|7 8_R 럂>Lž>O8ણ _~!c+&\~"eؠOBIؾh Nt6=1#Z?0o.y ;ww7X‡>%d=H.>&F-ц^v/>iy?I ,V}q8'eZ53ԛ'.^Sw5r_Da7K,mTNrꕆPՃq:W.9eO":y >S$HQnw=Lഥm(OCJVQ90}Qh%FJBuۻ*x)Xh~ cث6J8mxmV" 3pzcQ A)6xl"'zQUw{ϲ;wՁ+?۫bTY˶Wh^:k,cyfZיZ%I*AdRQ&##Chirƶ,hN?ƻZH^f39iӮX\N@A D1Hp.Eġɰհ ]ܜ̷Ž}_]!KqfXqR g%D P B9z}nam7Yn!rϾE,TFGƊ/57,8O"kHښ7^Ǟf$>mU=)})_ KuNVElRD~FR25m^o\`^`T ?ep?PAqn5ok-x9W:䛇bE ( 5fE WמؕFVNAy,滍r|/=ڹ:\k;NU{i[6 xן<PBif?x}x2{ @7 UQ"W;&\ZS.VTٮUMX\3  c/x~3\?Y,6 ,8i_=Ar.9 {ds08Yx$ꅷ+Q?Ӧvuà>ac+>}{)v~7M5n0|D&66,4 #UM-}ȏ[#STrbx(PJ.)ىc-WUBtг>qJ Jlachh$ 03#ƥ]7Oч'J`i>ϱ;3V`&KUgO+P;[1PqcEm(CX, l&uX)qLK/|>0 YZsurveillance/data/m3.RData0000644000176200001440000000042110636320360015107 0ustar liggesusers r0b```b`bad`b2Y#'H<טY$ ļ@|8E!a PM 6ıabHvK"<K1(d@, 3@aÆvV(fڅl'es(.@|^KQ0 F  0Ac(Cּb Cܨ r'$B9,iEHE%F&$Ì A•t rb surveillance/data/abattoir.RData0000644000176200001440000000300411522016226016373 0ustar liggesusers sEgw 75"P# w=(A2]8;E""w0b!Uxyɜonw1ZEVmWOw>}z&YY40. ~- G %Vyv5B{qǎ!S:|Y|0 20X wa`8FQNppƀAPc`x< 'x$x < *3`&ɠLς`f` 怹U5`> @X` X +Ahρ*V` x ^ 4(8X ZN IRx <6l7f&x 2.x`+>)v]`7 {` | oapqpiѓJ"u\ 5Ӟzb65#qݜ8oiNŢ EWd@KZΖ:UK\_"ZoVI,ѥuMN5 o[g~ƅZ#KھBڼoi>P0RK?D+Di~L!:A\5?7P@ϭjj=̮_sroUǝ8jyN_Qฮ@O_`G3qgmUs]u~9w]2U>?SO=羏^`8ӿY=y;:GfGޫwf}yΗzqp[7|L2~u.&Ͳ@(u\~.s):+үq>#\UG.3-W-0y~[1#7r[շne>2w{&6fC-3Fp@.HzБñ2ǵU=7j`m-龛&4}#A+vz%.v{ݿNnjS9I[⮄G7(Y_r /ٵ7VРojΗ+TuaڻJ oq+Y/`o?B5HvsdW_Tt!mI/ScCP D^:HD2i^>Q`surveillance/data/imdepifit.RData0000644000176200001440000004237013514362332016556 0ustar liggesusers7zXZi"6!XМD])TW"nRʟxq5(7.zfmY y nl,e۟bqɊfd)5י^wʾ#t^Zkم%na7 s:'$gI ;pB:ZhEz>Rd7(Y96䮯V`aan< 9S\w9nx5#*xųԹ;, Yw="ϖo՟Ɨc'[ɾ^LC5C7$Sڒ Dt ֙ '4)g(¶xt''[ ÝH磎j?(bglB3za8'\J?Xa`rsZl?1~Ecdl缼wz0јe j'N^\Bv2лy:ophm nkkHSrY6=y,xVŋwۖ\z#RVk(SbyK5fM쑜EIU?uǘhK/*Jvj- W(F_JG[ `> ?{SDOrѡY׋d2x\4 #al)~߰^sWSӁUPB;X?R1))f;p-|>w8NRg쓟mU\('$ '躌S<]wX@` *BCDlg7σw%O,׎Vq_\+HƏK؞.=V(\Asr߀0\0È@,+PbĥlulZMx{Ku9iϺz{k{d1NWMb z/U/Jj}Hy2Xa;._&@j\]o${PG;3c:BUʵeUqS[g]?mb_%]wrg[ݽRk6y\ːOFt lcxs Yټs̨QdqXWQsEvM75אzEѸ2peJgy!ިqgH`J‰Huy z1t=p//C>:P5C%a0 &=- PZɛT`0j"-#?`PPB/V%kj&<[Ta̸Yx(m"_E)]-Vi~^\i}e^4c.|oLfS iRGPVWHT%}uMhgu0mu<.ky jӵ\^ !~ia#c3qClZ4tנ`k$˫QH,>~5ry;-hPc\z@N~o.nۣzEe3>1h {InMuqMElnD[b7/j|`+"@(:eڮ}[_I;>1VN*GJd·}g/@v\I1ݶJD) D `&݋wa `X;~UPwtT-Ztb;kF`VYLi]7e1AaT5xi]+K1DK ->&QHӕ[yB58ꃗ@㕁r0q@ώ9~0i)1KƂ0y(Cd\3jn X hpzU$;n$FEB@"yTBWׄqU G=۱ ݲZzl-e.h Cx}zep]#ޫ-dX*_ >oZ,^?ucAAehZ)6͙Jw6(G?Lfa|wɻ]‰[N׆] ʩ J3_Z:bp(SʁwfDu˒+֨Lg7ytu)mgE+e3ċ/22Fdڍ\"ಙD[E?W-g쭪C2^ eQm+<rpF, }D"vX,ْu`΅QڟrG&zN1Ms`?reMj}=n&f*2Y8$ %~q9f<T7Ǥ-l!8 ְa~+caeeNmgfHe_%]+wX^ haOSG|?׶܀ xDzs!w#)BV2UJQi4sl*-=<Ɨl]R/}3q<[e ֔^ن@M?+K\n/͗5P6um'L?wjGNC&`:l?%+U}SCD) qU!+X|zj(h=K7U-}%f1t7m66Q^PS&,</* c w"ɌҿY>WlTN `B#쫆{G9(g5\XA^ y@5z'yU:R ?/Rk0~Βuu`ܩϴאVHD;{ӕ6y{l_a 93Tqv0/*9L]kO>ĥd JֳY /)=QGL/Trh/SwS );Utyf^~[QVZe@t9;6,7΋=)nd4Axi!s'3^W PD(ڌtޣ6X9-&?c"K3U('MOt.(YĠ)r*fAwhS' CߧC$t=hemCs1(ZPtO 4h u2D,V&[eYNK ^A -د#3=]`+ױѲAIA%CoFo8yq'iU̖ i1GLJC׻{\B;J9N@%~=jOhB4ʖRU tG}rOC6g7 D&xee]KbdW}bőnQ~fqMldm4,1 8S&ʭ㛗Gc¢ ji=*kڎNFWeI_؈ԧqq-\3aP(ռO', .X'kEP͝$ F:O&B<*X+1*g]3NH e dܕjveNF/0^;l_񝆘@E 8Qcκ{$Zu0º2RLtڂ@*f*F&JavgJiQf8ltBѤGں;l?NZ]dxPyoRcA׎촌Y>0j1Sv;_w[&@7fƸd)B7 >ڿiS-Q^K~H~ts7le|~|nngqS @y0+JQ&$ƴGpeW-:;â%)a9H틒1\9B6V_ QVmT^`~S[U%)7nDrʝ}'s㖋G[EQ<;\t=+lFյs#O,!vOJ.;%,2B(_|^RБA޸RQX#F2{1]NZkMX#W,C~pb֏݃?|\gM8ڈZ~],#Vx &B#ZcȅiX&'|-Ho"i er:F5r.#Mt.qVq^sӮoOTc'`0$VnDʄ/;:8Tm(Hh4 %b1 Sn!@_60-'^8Sf:6MQ4vj[aRa9ukv@9`~-@9Ea_nAFj<#l4bzB삿[>Z5V= `%dR^w5h|ǻave^Tn'R{ 2i2X}a9G=󧓌7H@ CgK]^c͖LBfEdeh{u%54oS|!-YVW &:hV) ͭL-r`PN=Odx\b Q $  4vXG4$EM9f&ERg]$!O>Woྤ/19_@E#}IG[v qEWΪI p oA`4 >fQr(:X@:# N2Zo5QOGl'PBxGK`K|~U,txR@z̈́a*^Z UAXҾvy155)0{ p_ٗMZq|Fc0]  z i(5N- -Yt m%X3 kt̶Du 4-~/6b*;W^, \-wM7o|R}+2Gae}z48Kkp3>EgcÔ7A"\ EiΣ&oBqZG_$V.ŸX&ݺ.-]4 Gч'br+ ;0M,xЁ.G9FQS55xupK$3Qa6xLV6De XQ䔀%?6FDj!uRK;>EZJaM˧Iy.j_)̇l]Nrϳ=AkJ^E:~sи7{.wԠ]\†J"KC2GjХrˊNjaHu,\+b:WkQa"aFsҨC*AwDƒB`*˃ ;oA^3DZ^|_1'~Jb K| ͋IKSӴY|`c[yzV=o:y).$}(V愀Sz-`7 428^bB(\.~ȐwlR!Pa`qO_xȶT ڄf pA:tc#3WfqDW G7ݚ6Ҁv0&l>KZ* L(6Gr.?N@94IΎjkGFV5.p4% ? ~x3|nj %'G+۔j4 ӟB@U5Z[h8PD.ͩ$o C>S%RpE՛}!Qp`T*ʐ~-"'~^Wr]&uqMN%GْB'#tgN?ZzEPlq/謿?o;er[jc30N* T- x P: :Bԍ8dd4w_w3Sܓ.c'K2-]y'6ƥ4%GCJ>,D%2G׳ rܛXhNx hBeO!T=3dSC`ރcLEY( l.Nò\DJy\`[ i߃1l綏ZWv~'Yf-'ӇDmk e`Zc 9{thk3j{;*oZ 2wp?Lʷ 9>wUuMCpE7yҦ7͒r#mk&Q8Otgraa=bw93о^2k^Hڳa0%԰-ئs-"mTeW'`y?Ma3hF5SMفP[+0! fIЛ;]wg]KuIew y#^1F+te)|b k[ ;OV_dn󤢎;bЍ@Ԝ_хb s6ܕe;~ OT [ "DAWkfxw!Ek^1jhi|\v5vz"" He:w-Vim/U(rҜ;x!K1Oby4zP&kE ;w[L_x f3u!mD/W8?;B!T'NwVqNs('m,$:3 җvH}z;ws: YuZ?~(. > `n:r^C]"wwlV$D3?Gt֌^Yôͨrq _Րk\vs+淍_[;w,qK>KZ72Jaox:Ai9D3MM^ ƞ9=<2]P1Ԛrz>/.R -أ—gfxb6dV8 ~>>"hKK;XW"O! JI)w=*{J,$JؕQ>;A3N SHۍoxqFwVa-Zw~5F$ݟq&O|G.AXD(f[mP H> 0og_x@p72'![˚Q2U&8ti%h@ƒ!b@wl+[g  iHM*l:Gdzwo;߱{%qEhi+t\[+["rN 礼Q2k<7z%sO%[^"JvOdw%#2)(Nu]m8U^mWzMqii{%fC}cCij 9/Έ)`wG˩@oō6(x򟔓v01Z4ggGcfK8)ϿgjO .Z#P? h [MF 5m;xq R^nB g֤m_E5IY4VE rI3M#jEA re'Zyy??wQ" {U[Z(XƝ32}+ 6 OjA-ZlNF>55ΊXe :Bs-5GTlɰs.`3$)=(%5}PT>4Bj\UO*{+AQb0.袓>9 Xuamu$6`s^/a-K=8[w$D j[q1g1d hSn"hb6ҝduC(xń*&Pͼ6}\}TPTS?xX27a: -IoCcvlplKDs0~u 17SU$ʋNHkz9\YkA 4ڹ6W,۬WXB:'9Ӯ[\{?7|1 l 45R 6D#$f1gwPyKCrB&z]3}_@j?Ak@8TL lI\gI?+uVOԴ>8*ά:?֠Ppe7\#ihȹPL^'Y_j6U~k&mnw "Z.ૺQWW5ɩa<*Cd#o /\2~AOi&iK}tM>j /_~$-ޮv4/**?yqH#$[ a]Fq=E1)48-%x^Q*{ Kؓ*095W1*ޫckbBV!d)_&rA `ZxNH/OI'sO6ձxu1X._ӓ9 f݀*8DI3즃FmHApv|۵Ӛ/DNcv=BK0_9?ra[[Y^kaitUq;2c}~I# йay%#w$E|xjj2jb˞Pli"J-$Vʿz2㓰 QgH/ѿz*m0UKGZIA2ys %4m"Rh1<dn>ztE+]1v_OV\::Gwk'z!} .&t0f܎Tɑ`Oi2u0oS2|XCr{yи u,xbc—oMhɝ[HdGԆx'|0m5 / Pְ a:o0yhO!&]Ooz^@+-?sۄX?/b2'00Lǰx qT3zB_& qS ޫ &$+N$'K}gtAk,`@e _j΢ <"wW٩F݇~xeV+5_p$xhxiώUX>κpؗn_اAʜrQL S4 sOiVE5%tڈNtnFUDTuO44xє2%aA4WXۗ iP:q4?#<= f.% {DUdQЎflGd:FTXuӽ]TלgX^|G>1Aw.ZK8m_`m -y'F9ߥ?I jb<2vFt+03)G1ZtoZmX|jjrl?z,x y)uJRQDqBo@>bP"1)JF%ֲY1U[WXR1}2^ɘn<; (NeWFޅXDjL3E0jq$kCJK;gug2zeb͟ٵS PEI$HP6aL(+YM=q؏)S ؕ}2l#5B/IGo}Ƣ9{є3Z| 'zE+iOH`%^K/a ?ʺ^O~DMЗp8ekAIPqy; Zͥb̨vOɫ}@eď(V@a`JX9\!4G):ްS:֍Kxwclld f]kiҙ+DM@Hf'|c(%O"Un_ssjBm8F!Hn1dKy#&!7w J_hmŞ|}ǥl4EVLoɒtU̥:q|=*`T_ :rR ߓ!(HF|3s[?3ma|mY]p)D>,4dBܤ.Xb`.+mHR B#z׹ ,ڷ|Hw?}`yKj]ۍUmՒ[7:>JmWZU[#|N,B-F삹m\>׾c_h " ԡjIz !quA XV<$?Փ0BjqT 68'4*ܵ(QF + ;o$@EH< cMXX 嶒ReEff<*cYBS]=)A8#<+j=mV?X"=Et5y>w,JYiVն5; ӆVg HVW,dS-jnnW և#[xŎ=$Lq8:!ouL5 rc`wPuphڣ.9%;20bf #c0eWK|e8þNxnQ<\碔t^Vk%-u: uՄ}5pyB5JE2m\ j1)uyOӜw>ԂJ:o6b6< +UJA16no6$x16ʎ-eрm,3IO_{ƬhuHS ֓rh2 }Bg4 ?挳'!x~T SR8 v?L1!J[Y (ߦ~!ZGôџ{yR3!vW w~]I6^%@{lr(fk2R`2f0R[h쟖 adZ^oQT(UL*\&V5_GCs}Ki_k4~bƒ q a7t4 dX'GH1a"Oo=<WN1!يk"J~I؏g0$hS5WlVҕ@]n dܘ2+Ute$!4eN%QK{i+-vh8nG\D!rKѦ\K p~1*CKd;T`Ƈ9üF y+Q<)I>g~"h]¡|)6tQ!= FJԝR*wՃ0DQ=X7Nv^D}LkFDE6^`(i/ MhՉ|d &]+z GCX8S+Sս>rL;Z$DnYǛBkOEtU&͆[yQ( `cRKw7dX+\wGRDwH\ y Ճӿݵh.i/w)lTVйD BNCֆ<9ιR`UV{.] yy<]5 ,<=?`d]Q8w~_8A7!d 4;Sgsj,zJ@ RkсM *N[ƘvVQw ZJ2|7qxwR:G8n#F^|+Tj8iiIxL)chEv8/5(B^?> FP*)a]I oQqA*rx?3u9i$r0wPȟ³Vkԁžʆ3- ؜B]\)O"ڋTJpO_Xx}s{NMJ3Df/0c<,r&[a `P{fչfǭ1 /A1|CB\i]P7 e5f*àN Nj51/ Vcsw21Q'JMhv+qݮL\%=r5Iݓfo>4I!=y뺺㿦gm^( JբijߺA3&&s\kYPʔY8j*\VQpIwڒ%3d2޴l6pGͷ 'K7jG+俈{MW+h<Ms0p;} [beeī[8~m>*E[dKWB`t61˜8Ly\O;)`/rse,PE+AӂǶ:!+7mZm18cG E6"b:rXޯq nj(A j6 1ܑF.iK`,(^ee޵9Nbh`(d<@ $c!6UX_-ػj(9rُhRmĄ5^U䀨4ҿ;YVYI]h(>e"KYpmJc{MsЦ|/򽷥'Ij%φzj76|D36Abݓ h?[:b)LWilq\WA_Ђ_\9c ʠab\׆ǂ2p@IO EβT 0 YZsurveillance/data/imdepi.RData0000644000176200001440000224713414006036350016055 0ustar liggesusers7zXZi"6!X=Q])TW"nRʟ#ߨaTYd{8O!]z9N}BQLmH7g9pVeJՄv>e#M]) Pޭ;RQg,Zn @U,sI!Ge܃JM)Xe)7X8fMٙ 0Oj?{c$*wXX@xcP_t>˰$<6]].˻'";fNW`cwۨtc_A9zE˄_Ah4dNfR gM[j52k UIfRDxb8RՐAN_!9/YR&M;A> o4}Maеnߡty^6VVPS%l!I9cϧ 㶃\7 9)CLxSqs_bvOj{mc%nߎc}tBv!A\RO\;9B2V'4$聰^aVj>|FS wz/@:v.eK]ܰe F.c@PiTFƱ)rsy:-ʹKB6(M"ݼT7w2yqT4wtM-@ykŎYɸ[[jҕZO8uRbvIb!aI#$Π%p/~y|;+IܴB0aɽUٕD\$EGsi-8pEe L KX{bzyPƃk' Cn킊uVb$^+ބC՜*@ڡ7Iþʧ>n3/Ǻv޾-t})JLuq+HϐFQլ"@?&Ny,ZtQHhZli3QCt7G];Oo mhr%n DL6d4Jz .q^(V䠦W1̙Ev7NBpt4ɿ4'JOY"t6ͥ& R5%X;X,VH*|AOlEU|ų>oZ+,- +:[$Պ3U0oDh., &UE** oF}3R< } ]0ГrO1Q/+KM-tW5f1tʾmu>27P!O<.d1cOJ (le3ʺqel7xYi {jIm!:?p&@EK^c{(‰_]%88b4>TPFxUmKr35;Pl ',P+AeaC)} N7;̰xN[LC/{hV E 5TGz,`[u˻ct2v?w"wH0:.",RO6/ӿ#,",wRsUނmGld >\Dޣ^Ch3y)ȸQZڥ`> 4>[y'9_\yM.J]* ]X^'eA/e*r0%<ac%E}z'Zl[>zV߫m{1mFb~ -3|%j@1 Dh%t{Bl[9DjyluƆ譩h3ܺX(39Eh!GWYQJ}`LS,-:DZVR ʽf[s^L/$Ax(CO[c(vGLHmѶPpo1; W/ϩF9>$nG$t{i0;նఄwL%)&VǸ Xn[Pg7MԄڣhė|/+uGZZXKm1F3>h8宂]; ~M :> TM 8tP9A7~xbc]ZN u'>O n[b&d{yAZ5{mn@750 =Aē&HUS]1CI_AkB `#> FrN;NS7ʴc2tы8qē=c܌X}|(3ə|7y& 'kvjɘ|,=06 q0Gy yK+L74`ş>4R/k%ݙVF?&9T>Z|P0U յI`dį>xowt냀wOeN& \w]&i$d`LSXέ k&x%&Z6y8|jNSjǭÚF/)la#h&ayswΈ7nN[ۖ]Q{ޔĻ l#&uVKԒiþܶj< q=nZ9T͉5DK>m0/]䅠qSXkBQ`%bvA7xi)t,oN7k'N/wqg̷qcndޕlq 颶Npce 1(+:B##=#yq-pig~VN4οs0k1 snFfJ59 7'k]-B{XfZ$1af;%*Don#`>`C+1CMC"@=_MAnL_oF&62>Ph%N-WQX/ ]p/Rspҍ &^;![ о@B\"XTFe{* ø,#S5Lɨ1(FHxZ+C @M L e?knT0jO2"[j=LV׃`ig%W299$qϢЈ=+&vhvXYU)5.?BZ;yj^G(n:.| C!ҶzOBaZ[&~[=Uiقؕ!?h-qNKRѿYo,٩O|XȜRz[_x-I$AIsyRu+1XVUHB΄hUMclӖ zp %-UԏBa)?MƲEUHا_uAh Bd;"E80?3ʪ4EP>陶i4>čz=vb[&uO#^J ޛ`=$Pɝ}ܴ%M23f&X0AeqA2|e#c{ BaS엤*戬/o>u3JFikcHۙYgufkzS̢?SjK1,5% 7U"ghW7YJ<ޱV`TrC9UEMK} C2?F\Ol:F,pdC7^*zЉV9=L⣄J,D UC:1ěِsuQv9 !}Y Ny_lB`5*'̓K~Hz봎"]D`8^xW̬SuL*0 _q)=Jn- =xhΙePJwd0q,1Kxl2ؗl ch"g?Kpccx!iEoy)T$#"cE|@|WTm=rp(N[Z8Pf`@ .ƒ^0iJ0t5íE  vqW;HvxcM'+r1̜h"C~ow!ҶeL)fzyFi{wCZ1,Pm¥Tk=@BLAn[gm=O"X3u' YsR{͓ge?`S0Ъ1%4!"trN-c( Xz"(.ѦXsYG/gV~Z]&"&b>C#.#JG8# ?P"E:YJK:+^I 79_TfRjrʝ"1'3X@t͠:i-w37R D~kW3bFZ[PpNG@0+ia@H8[tu!_@˛5^j7#7nvȁ+Z;K? XFtNcxq\CybJx_i/LN\[ T@QXq2mͳUI# /S0mb2w@8QYwx;Pb]P T "=93cȴۼsMV> "sE(V`ufQ3 VaSF#>| oXbϸJUUNc2m7~^BTxo݊ɖY.D]ɔ@Y?n.б\-H*-i;?Ώ_1'ڗ8lIp, ;'T փ=8_+'W=!V;,Ig!m武{ Zx /sE~3g/5B<@,xE}s o%'fdS '1AfiCM7@wWBGBy#ZLsTMOmto toy@s3.iMZ҇4]r1qmO>!ϐEap Mm^Z`5h.&YbGrp*dsz]wTZS>ưHi)Zٮf&yݧ,c6D z1ÜLڏeU^OjF-*$z-~Zl\(P>%,Iط=ݒpd&魀47$< M FM!"YbdXMXK)ɬ@Sv}z 05/~f0^UڔLis,q/i- I.S|P,AM -*4-2qxdi}3 W:5Hѭpj=zd䬚q*_HJ)c88aÒix.sW[$o[1HCSIܖA`x iwH"d~/e"4R (&x|;Y}2,Q#يTNӔҮC6&/,& $ /X;6M'ֺݐw)z֍NV*?T[؜VG\4뜶_l\qHT"S?Xtb•<,k8scmC3d{잼wpz"ޖVX4VEl壉_OnrZ(٫%0;m/dhSD.c` m Mty-e}0\msBgkXb4h}tw,م\#$@ ~_HG]h5=ym[& ! >C}P nΨ2H]o~,OBt| j ͜ߒG $+k0)oH W~pgNY# .O v -3RY*XkgЃH鏔{ɢdL7@`~ڗurK4 rxޔɳp21y԰CaV'$U0Z-{~TAeFyV!nEV\ЖsLc:)h  lJBR2qu"/tX 2r33U}3X}*',w ]»Zoָ+ &':lJѽڟ@A?L y4|ƸcucI9(?x%хi ۿίdv &`VK:ûDӑ%L$#vVp{]RjկМ9 GZ`C,09#c.H74.y>ɳ-2x" j_)ol K~4嗥6Zw('尿4%%~!K 4o$8$j%hY\&nWT>nׂ؝UNsI|cȳ@C93KڝMi4=5aaJ _, 2F7tEB(3Pymo{>&Rv0{NdIhCJu+#z9;'U:H0/0(P"\lػpZx{k Ѫ_'w(0C_8|w^ƹ{LOǘ2%ssV\e@BEְ[yxw&!*MBv{#1*1]V8,Q/>{:df~15%$z'AjFrzڜA,K Q!-fgA5x'9·'tF[ QË:zYҧ xDcVgOąE`+GcY-2ݨu dB0\CF't5 0F0ґ1fQ_rz]jGK)JzHnN h;`DHsrR\8lG>2OB)U)R n&;TZR'Sb]Dw(,Hƅװ -"͖H6%˛nNcqݩxiҧ&Y$ 74vdjlP/n27y D~x9XёҖGrq"+zr2ާ8S 23'TdkrC։bv~0[$|ij{)U>[NpMLc2:U)E8 F'"u s0lt0W/alYKD0emOYdq*ȚWJT_=j2H$K6r*u| 퐌 ҫ!R8ǘH+s `#A~_`3oշ3C\ҽ^߱)p񥈼zm,X$,SKPĺpNސ]PNݦT?UFo(u#W(=k+hn''u7 ) nD~܀ƻ֊lB( )*Ԉ Dhz36ϋl:6FCzț9$bE.0e.1B|!;I3vpu5 B_Ȅn!x637Ujd_3y/ڳA9wm,m;ԛsSn#zWCB!^&ge՟lM]ΈRBs/;GF2HahLP)5ަmڛ1592Uz97D!V3N4_.3 y4Ӑ nmr *nV`CS!l_C 6V终*_#i+U%7U#c?Dg[V/ȅ;Fʿ1(kƲ<We~L;%8V7bߚr`U4%*WwCۭz{fsC[=fX5e̩ cS+U/[?̤cB3hӐyLE6kҏg<:; XQiDڪyH1?GgBHk:t{%wIP^jx3nvõ_d$Wg /qI\Shj ABvB]Yw4J۾΃^}yFAO* _zB |%Q[p"="$zZd-&:vŸ2wq_vb*MФ?F)x3ݪQ[ǐ,g3}O;/KXUY+nثjy"Lo`-:4RcgЁ' DD ͐qY ŀ[E:Nn)تǾ>NL#&;rakO VHmU%MP/}Sz&(ed;CwCM95lc6mU)AݽK*Mb|).g$3x$)WG8⟋_m7;DQ9æ1?k$ckǬzpv<S2𘉹ԍMe(lMDsz/QR/fJ/XwCi3c?*nbզVGkUE zߔH75(vxHen !ߔao$5wW<* V6Dӑ?k2@my{^ϒ.ۦeH dWOFc'HÉE lO\I$*wH'7!:zډ8!c)?M2s)y+4~ɶ( |G6e$Aq-$V_W-XȂ( zFː")JnlRj ĹfP^CFm-NUpg-0 :+KXuSlI˼ сZ{Q_96d^\a±t9mZ:`MX^PμQogXӽRU!nWߣi %S!˒|ωXqL5Y_W3@&!^B`vKl!Y,+`R #Dgn1ArP /e|4gfۿxu޸y%_w{)0[ZߚlS)h3!޷R6,݇0g&gaV$٣+n$+gl'mq8[JNr.!QJqiȫ>a|da8\3|R2WQǔf g߲oW6fdE)tF򔠧 |I bŔ?z'=~asS8! är ^^"س߲T`(0 ܿZh19 5ͩ81E^^Q0 hхt7' j H,s愜ɓrGUIꥷ":)&e<^Ll'@Àjqqv 雼R/ xBӭ  ,|N#8I2!Y8Z ґ@iϞ54'lm$g '@?*:_bTP(;*B =+q >dAMHcf fi"b/V~lq ;vC@ ^ T%?rQ0 !_nCS{52y:ΆS t RuomP=(^SRI.Y6mbza@|5>}wQJJm*~-q+ *Qqd ̝TїoH AB;-Nw$F{"eIH3VI]rٴ{vƇ Qj%)x}C_Ax@4$3GjB(t X>Cv㦦g(`j^o](@AT:@7r*nVpۅq[=B?5שYH.=VVa-ӃhdeM g=#]fز?&L]uDtη{)y~|\uaJCo@F_䯝3$T  Z šM;mi;`h:P9p`(6tbZB|o7Y(Hwyqyc"-uzr} o jB8G7SEX^-iuJYa–2$o*[`32"

8Uv<w)#`usm}_hd~Jv[;eՂ:h/Fy %DA ݦ~_c*_hYT[[ a 1 u WV~N=/$;jO/m}or]-%@[ExL1w |P#,MGÚJͻw.RPMi g_KNgɟ+EhoJt! #Ԑ2^r^I29>4W ߟ /sm99*O=n+5}U#3L#0<6gyP+5ooDEU=FAPE9 `8zCTo]c +|qSLC^5}J膁]VNFbJ6Aƣu0 kSCj`"iMt(w4qَ e!#{P4"JEP¼0OC75J}1o,l% E&lD꽒dk* +XUF;/m[5װee~@r7LpG 8u 78=+J yBE9Zq pM,T0A[`s -昪KCy~/4zcir=:~axQ> | L5 zl+$̛ BKdWO&PdO:&*DuaTX*-f}v#Jj[xWߢ>{ zk ~GJZE| x̖ N}J|x~) )UW{=aIEvGndep.-Luv,F\tFv%"ݞ:䯞*{AF|X"5.Wǚɿ֍cn0i5;gRLf9AwTs: r)^C)G4߉~kbÛγ^遆8?Qz19J^^Yn˴d#h?^F7H竉ݵyR: sYIrj,.. syϨ|nR6UR(sHԠTA+O<[r+cVF[.lM%}WAlV9y Fx |HP1 _l=q}?ny@*%f +hpjҞ0Vr3@m'D69 ͻ s',6@Y`9J3D.+CIb ~el˪aJk&xmL.~e_;:``| P0SUȘmIuDRNa$1 |{7]0"dVɭTރm&V84aO"}ß\t*\$Q vw=c)BKf|'ӥ8J &\~)+gE^" O`iJ4Bq% 00XpГNN21e4~3&"Sec>(}5nխ}S iF)8/R&`9U Zq%JOŸ#;i2:l:Ik]sKQ9T|(JDۊ/ A[SlRt z~N~f'Hem\F6aF`YZD&[zQ+OĔ^̔ݒ?B̞kVЖ"E)poVݺ Dݒ~mx1'Pwu 0@jıJ9\MίWDarwf1XZm=T 5K>c:Q8wSa`݂ĮOa}ybEY4.>bì?#?%˰#)cd۶{kV]w޿CskjR(=\uL%' go_6|nYj@rW +vC;= T4 "C3U61Vt- ]LZNghf_cSǶE- /Z{n7LS+RʗiD͔V 0✶?t꣭{=L9[ҫ7^fϥ6,zw(xoWsfm 铦s^"oi .ޙC\[+ RĢm8L'L VBLIH `DD"7P [ſ{'"p~kE Y\%s2uJЦh#QYz|$5}stsPFhlgR]j\%P Λ5$ŋwqSZC귃wlPr:ݥ MnwTTH쐯<,XVӲ:\a8ra:@!˳w3ٶNkLrqόKscVte_ݦ.< ȧ_SY d] ' VmS{_`}f2YWO5RψSax]F*EI5?KN8 o3հJG8bRフ!/ס, Qv5F{Uߚm-6a;3պ˭CU8<v/)oAv3gPɧ"f/SzVXc$%ObL8d}XOpuR@ed =+"xQm| 4FRPZH5\e% Lr[e\8 V nЃ[c ѐOAQM ׿,a://l禮) sk?Pvb[9<В l[2G{-LxZFu1Ùb*w\rm˘Un;M7U #Aasno߅{  j ߳&()sSsum~܇Qh%O,3XHR-\3 :TU :?!P$ yrAa&vG52&g1ECS7[rSKČsZ6b< Z=@{!D|rfCry+DAhJB)-|֮=(|̎q)D.A7'BԠʿq4N%!NcتJǀZ%up.Y:]Ol;F߮W+p&O]A.@ E/sh;}(_úw$lzj-a+ .LDG'.7N~Lz^ w 2k?`qT{ =g\]rR:] j է0υ#wYvJPYp*YQLW Yhs*/x%Yr!(tdrtC2X8SҞ|JP{ i/ov/YJ3+s\3<0gbf^B;(-w#{L54pَsw?W Z9c0U5q]~ l=$IˇXV-IG>| .|_CgCS@/Zv8,1y->2GtBQZi!]dKm8,OxbbqZ'@v$o(WxGqB[6/U0q&Hl2HVd OtGIRy˜&vc" m,}d 0i)rǁyc^+t9PV+]}Y|7O[R赴^ HEd`븕CQաu mQK٠ݵ@^GP7oK2VxrMpwwѣA`BleAkQ;ƒ"psfI:pwse Fsf@2޹,WIF Jˁaofg$+b7_wV׋| ?԰){LBP}#+T8F[bz[E`aC\,?N(+}:BTM;u4,6_~NuKؒCPֽL>!_ǂCLR~Hd+^9Rm\?UP ]A w,v3&HZ{E )XCA39AI+ub8e̩ZfO̜\m17O8< v4p7=6R#_?Coc}'ĢƝB]۱knﭭ;j6bnI}\,> 9vBB̌w=D9avlieL^Y-x- X1otC>Zènt&~IUd XM͘ ~7T, &z`xj}w%>XPnkb틷Jr?"TWImDqXAy5ObK $ʧ%lX?q jq9'x\\H;^v\ThhYAy*5;VކL?J<)m.CvE 0( zsz$jzK_Fi^dկf"kN6 6ԝ$P"|X<}[7v&lre\g'.M Ȝ7A՞h G|l@Ҥ^bD^VrHU\7x|;7RhѪd"gmg]NB X C" P!胯#|O ;}GbvҎ Ƌ(¾UmbLeUPC d^JǷ\F[̌\ښk~=XA\vhRcf %6sr~ʽi1!M/NU?|s85b]rDI f +=^BwP0R9K3yv^D!3`2"no#~]U6 Pi5YIJcףUDZ K>Sͯ>+$ -4JiS=Q`Y۬]C[͛@)ha ]`FMx}^ 5Yk8աoS| klWp #d#qpϩ %%}JZ eC_銤W= 9,G; si<=0~#O0sR`:|ڙ0θ D3 -9t;LnYaP1Lv0' \؝!p*ylE}XO<-sQ $WY(ZZԂt$P5ӄ`lBr@ HD9BˠI݈X炭q~a_-)47k a`/1S_нFgN ,F߸^|QO 9_PY ]̂U[~OTTiơ(( 5w^N>L+UlaII`R +Iˌ]8S5 S.֦(u?x{(O s ZKSA_Ó`um6YW-H`"< xD/$Ƃ\!=xn\{\󡼼X^qx`ѻ̄.;GuݝL0 3!d$dɾNIU՚%;EnG%pO6oKRТrF<^Ōh:b v(vo;DхTM;uGhkݐ\ʌ>~P)co$q( AjP6oz+2K -AWqjt`HS3Pbd XVCWyk:6ZC>esIZN(zXcDtƢ? qVfe +q5!#LYB:;xOy*WO {ǀT5f# Aܟ*>]@Ouc掀s64>B˜'Gܳ'-`AJhDO䝇oT `5;I ǪFelR"lP >H_SN}vr{Z2ܰN禡:0:]<Q*tKl<{\q :HF@};%h^ fw\b Ҏ1U@S2u_Ga`x41^cD8. Uoߎp۵&jV>+wVfyL!]vA PvrnVSҗ쌝ϲz<"t("$*`gNN-t:P_3TF-~@ɕ9cd[bzCIdz24/>.ewZV}0@A;L3Lt1W1)2 Χ0`iyW %s źfvD-7 Z}UHsQ.J.-car'3 N-8J*ffYYOM `SfT+I+ k=0п8G7f]N/5M}/#4H:D8vC.IizBMlc^zWL#D;NqcPj@EDdaRx_īR!MI3܏9EsM{;pN{+r<\yE4I&H|s#kN"Z:%~ *ɂ=qy뱏W#S|{ita&WSn+ D E=V?A}vMq]"CǮH Td{ŚA}Th! y؎BrϤ׃;i*e|`+>FTD^<1 HR/]<!=U-Y;S/l`aJG;&sh;L?Q*+ћӷW1)PL&[Hw8߹7M+J܍݋K2#-37˗$LVE8dH TrXE>K9#hIsXc`в6s#R[f[Ci}@pJ.NkYBC'U:l-3rzKIGZCjT^F [DXC 4^ز| i1]d$|YG,O=By`>B`TP,)u=EC'E8[٣/8,k]7:Vbn(ؚ؍St&8hn9f_ ~o)dbnebD~몽9 .đ6ylώVzf,|kH31U%w9dyu̩עi/3X};6R]]oVC Y-J ϫ\< q*:9{qfjƊ $ {~3)Ac’@Bcna6Q3yME˱: EW:z ׿|k͐EDҌ@^mJM ^/x,vѻ v,L%w,Rzsat '[,sc"k >$);aYVK86>="Ȣ2qaDi%uƦ^A0<52j.ugxML6].a>,q)ޠsLgKfLmiD%b6"T(ptoxJS0+^9vPJjet8B^4 w ~nަv w\*ڝ /mocA\鈯OryMk+嘘!e_)u%Ĭ8.#%N?*>0'ŰU0b=a* Z[UhD<de7 ?cVMztT=׿~?advTN~kƉ9eቸ~_c#s)~jx=SR@Œ>Sm{Z͹|w"DYdtV Nph)G0ʙ&mC,́z\s{]'b<yEI7<} #EC!Ze6ݺD/ PrsfAIe}ZEqɳ)DZ}3p@_.)) "b7k$3E;nwe -d%ϧtM*̟.)H{9RIp;[l3wq`*f^Uua.`9Ez%sʋwOúj-ޣϮkO_@0J1LЛ%d hK'IMq̮>zz pфx#t);@]Ȯ኱QRn/3ұbzwD[Y;]V\qC&q8HsQ3 EJDaAm^h|Ańn%zC|őҶyT4o_gd6 N+;"@@%wXzX"]/?Lrsm0w6 ?ƙ-QmO2Sʩ?HX/2j3SQ3`x!8@^`4=sBq~;"MhnMliFhilwKe%Ѹ姽ɢ>jo27v5;u%cJM S:;ɰ pABz5aP&`Sݕ>[л߻l5 ('i#My Ȓ2z26' [&]0gNzO{99D4*sXo$|TaVǙXP*KbȪ) k>W<0Z[K-]1*WgN,G[4#l% #fYWːKL{DŽu0˦UCʿk-lpk I;HBtvNV|6ݷOG䤩(yQ N|6!ItFpë=a}3I]%<Ixj2}_JxĸC1i O:>B/ kzai>BOܑTFeLp?m:IF\K>T]+}va'J4h)hR7 [a?L}O5}J|\,Zx RcH$Q f`ִ:L,FP}-cc.]1vQu)?-D6.t.Tnj-ix@/ru.T#9RNyRU t~9ƺ)K=ҐȂJv'e$Gq/(me'L˳&[=(!@viBnȀ/EW"' Kأ+̿Zgfʄ;rD=Kƽ[h Лbxo 2KD Z/d PEF5iBZK0G>_:x}9fDݭ?Hb&Cl|sK{[txɏ7ͥd!N<6Dq~ߕaxO2bHMyʋ\XA_kZBQ!bkt[BW-q)ap{DLbup{x 6hqiQULq j?Gy'_~f&{s *ӫ|NGO5 +|W.6Z `PpuR7yXU>x0/Y%jЉle# @F٪N`c/OL(ZWf]~@~ofp ݇*v-i s"\TC]6ْùv5=b^t M\qf >P9U0 $bd DCW gXuQCN:V AWmƥf([ ~)zn,l.SPR[_⫚Af(/#8SAs׎彽RK i K jd$cІzw72V3лs?6KZ(Š8;MS-Iz0EX4h}K],ɞ53ВVcù}~ljÛU%+. (N]J6nT:bU ⸙r(hqZR:kh"3TJ& P&0F}  Sr5Ohb:/2 6::3֥NpEd#Y"T1JI}7}xdؓfgN ;G Beel%剸sA 32 D\ó@ޠ[&^n;*g$`ww_-rK]FcX.RRBjvѿ?=zC+ t)3?'UZO{M>1+%u=jǑe~A?0IҝKBmGD2:Z]R.3<5ƷA&9˖u5;x'EobG8²_# Q6- 7}OLfI.Wkdj!T%ۋFBvQRy}\n͡Ar=A7ry*=½A|Οݼr ۙ`eFfE,H} #93a/B/pуvVzx1aSW?|IOoV{Ljwq醴xGla\U&jrL\L" tYxnT=|-I!B:fh ("F^ ф4#}+ׯszk}lwf@-mM†>5Ϯȸ6+ӋDSF:AM򙎾=剤`!{.%3(b?6ņ-I>**w .a~\; '~jUΠ~(χH"9R!ƨ7ڋu4n+4+sbU[#hgSYklc` ]P&Yq>MAфRY>NBnlU ԋw`xsο (F=Q;_WQ)=kv_u3Y ~/~-3dlrҵֱnLxZ'!hL'V:u=t18TWBSa͝Mm$M6+MYtTC?&IfԵ*A=/UJ,~wGh 7(t5_&1vX}u1`Ux喓^4iXc CKIѤcd] ^QnOe ( U;4M4z J:A Zurb\,)F:ջJcuRFhsAUPEm9lߦؤpYz] Bn\/{`s+,p`ON06{\* J^ pM9" 6 $AmIЀ5Mu!]>sДxq'VJMҋW+2 _ݯfeŊƹ||HF;IKو e[bhjE@|Q0[NŽ8j:8is,C _HkVN^(%&aO)J2ڂ=w@;]ZDI;s7$5R#vv7=N=q!:Cab+"Psj]{<㥮ffxooX++'.hBu3`ww,AZ4LS4RZnH5{1a,TnqǮnY d B UU^q|yl1 J{`7Tx83iAI:Y^?3,[u (>F`H7lvhbgܖxWi'me@v[,v.˲OOY%n]f6y~G>^I 6|åߥz~=5ZoD4~^R:{1V@.r +jp.=o Ug[/gOMHE m} YHvlhW}LʢHuFgF?.()fEFubFr2t`t@, "hHTPA+R$߮| 5a>A۾ G 6֊/٦w0f}{nEc*n1]rRq-M8ms)!y95@4&&2Mbr6r|ށp5)Q3U晣EPfRw=2U C!`~dÃsI(}F#7M7؜=:m'TӶ}ch tKW϶o.HP<=.:iMj ٝF Y Q&0NGAͬdmƝ i3bG YY* COtSBhZ W7Ѻs@IjFoˣ;H.keo# &L5i#RtP9Yěw3̠TA=la4yTx RK, `{=W**J;0bkp(Ѕ7ChDWWqBQO dZCt_Agf{̳C_D=ц :N7]74 h)T /ߢ( J2 v`3@ ̈́1"%qPB8`w~o}nO&^ ,Dj-BբTOZk:CS>8j1B(TI"%U;Y"3X9ox|\Yvk`+HH `]-9NjSV//iwfΞtӽUF\o1tFb-z@P6\D[gqix0(< 2f'@p{8ѨV qXYĖhAxPm[ x_\;IrߟlB,r[ ??d 4X=I,WL liňYBzG$K E@}̙~TYz+O F|t v5p k63meÁasV<#6ye;⚲FuJtFB!ZJ ޹fj-L%_!'lMȔ0io:PKy`ɥvZW-#B΃Ue`I٬`쿈3d K3 K~ИK,`iSx􈸶;u0w͋tʁs ŃyY\N3G_'uj$Ob˄ZڨuGoW3||MF"&}!&S8sOkf!8}FEjϏ؛<:'c3GY\B0L-_Z8h\a-] ޘ* 0y?5h]P[rKtL2Y󋞋vIe]F9@$=|.:^v%=W6#W1] _R /BKs $sapĨHIYyx$7蹕S](%]BódMfs\W*w=@S:l$6i].G-aj=K/`.54j^JM-U94LRTǬ)Ryf3fgj]I#p%ǣ5dRӪZД; *r.pg) FIL5&B9p,qEU(x*C+MDk+)&[دrND~a@~ձUCSaG U ~  BBF{SFS<UuMl h'F!yC(E>xuVGGO /v'y~fĨvkpDg#G+ *ctͩ>0_8 2L4̆G'~icV$IVߋ:8^8jWa  jVgb,c(.`B]=kBC*-mbM H{9><(=lUȪϦf){R}-]l{LqG!ZHV+ .yJa) 5H+>@(??aP*!! fj۟C-Si)5?޴9ˌ2م`FU-ne);|':_5adzTfa a4 >PwB3Ufe:e@ޏ_`'붆\vxQP(hͲ}}I]ʱFfM-tr|3:Oܑ, eռ\^x;8o(2) MeHwv,?"9>e`M \#&V 9a#׊o0p!+Ɍl&z|-$(,R߷JDg#AV3H"wY#ϯj-:n><4h?"@92$[l; xavHSAVAJ* %dY6=l`%LK$ÄFRų"o*NIsQ e.C:zaGjrT޺ ?Yaj/dG VͧJeO}sm 39.MvɸgbEL҇ H%j%xpP?y _iMƕqbPx4m]Q$X=\8Cr4DMRmL/fDCDMmB};ʹLDybp7D 9+TaUW [okoF4 =3@:Y}B>9јCC22zsIT:X?2}' If$ػBlH.+쳢3kWHMHs`\XO|(m@{Qd)ʧL`t{*@8_b^ |67>_*5͐8ݖBЭ'(^y0gC ,ȴF%`O 6)+Z^Z-yF_FzArψ>jrhwor,ZEG2 U{JЩihj,] |d紘r-CDBc06W gjALMF@P_%v*,ydZQ?\NT?o]CU oV(J!Ƌ& r3^0 7"2zkG 5 ,.KKݬU @&*as݉mQ-["V6: nX3eP9#)K)Z(徱y>̇&gotjww=f_isJ]x33D꾲\:E5?vis?9$KVG;*t[[O=}-G6X]e~2SǢ=\6ʯr&{Gل)dhk o!),t厂7H"PdX 5Nv$e7~aA(Ieo=g@H wiގHYffwN*H{hz).i(Jz8p* _1޴0?/d4{Hre uZ\!X!|y kݣA712.5u>3wb)I=!QP-fY"Wψ:S VI9V. ^DN! 'm~g40Pc7d*L}_Ns0Ax̌ja&ry~*O߯1ݩmK(ǸhJA fKȀ% =%SSuи5C<? wEr ^#ǀ ˡ7(3II,-9Z_n, gym,nG(n+ #IT<*3_BaٮVjZܨN4kR6bWө%M85a.leưoWBCT8/9HhPһZI>REcbh\I{z!?odiӅ`_kVI2j?u5Q:09JG'>"Ɔ֊! $벛E}{6j\>:j@L]._ ŦUEJH_i3=IJ!mP:Ρ#o!I$lL4{,`B|l:Z& .A$3Y%o&8;T\-Q[yBwvi!HuKKIR+ZZL` J.5z}ܼZ Bd 4*mb>@XR^e ׭m_VAs4-N=q\C@=ZDYdUd(~ٕ~4.wYpsp"[ry؜mK{"B@2U2!==?I\Κ#pWLGOZCv$2M/in[z>Qc\968O6WN/nJR22KVBy1Ś}͢p rY)r(eZn{ɤѴ 4!1>227 ٕ˥;]Endi;VO!Roqٙ&mW;Sّ75`oϴi .KJeH&PwY*~" JAt7M9<.eD/Q'8dn;[Y "Yg+b3jgb)y4Cvw݂dvG-АȏD4.Xh`5U [jf ?KT*A>kH i3@39lm6`>ɣi^v\oǁYrf z(L`73\15,9|w/Ro3Hu#FF*Ykec+EIRsU7x马\duR,zgXzMG}]; u^4KҜ_xsl~<ܾdYUVp һx:zX oeϛ8sӪ Z! 43^yDaS`z:;?A(eAu'8#E D,]C/pIcA,e&whtcA8GAH[t \ڡd/=E.R#8Ѐ}魢\Pi%, '$^ xZ`vV_I6a(xNlbf/ Ghg隊c+][˼|'t<%@;kr!Z}U}:cӜtA5d9a'ťkW; :mсrUvKx?6VbΑٜȋTRMѲͧft )y99~hꠎ;W1-$^3E:o6 gZۖ;ݡʭL亇(]=-HS|@B)l/,Qy1L4Q0`#j/1ց7hFYI+aQi@„՞0Pܺ +cIiccX̄ Ԭ#|)fE2^F5c_1? #r1|3PAԴR{LfBbƚoݥfi57jPz”gLs[GHb3PǐO~zNUAw?919z۫K<-vӻ}l>ay8>&Dف QX{܅ɮ挳V(Ί_*Q7dߒk֬ ^R]zv'xY˽MzT.4 eƺJg<hp[8a1zd 8/s~S4qs03/b\f"0澟@BBp%h6%vIi]=>:kt4Zdi]=tVȡLos"LO_oŒF ;mش=rpa刔qkl2xhyYVP!q:D!HImgVPH(xHbLE4z2}o H:ťm[X֟j@T!vK}K?|ۭ[9 )vԭ\Fta>cih&"q|X 㫌 b^#hGTu36($g >QJ5Eo2\'S.aAM:KFL%p'_b.Bf1u}p[E4dR]fRhr-+h 7{Zތu^:D b>G#,&D`,2 N}qowa$&5ORh!KbtL}Wvrȗ> >b4'U~Z0JeQ7O ӻ]@LA]$3e?A,w\b|w1p9ɹXgҗ#e˕o29R߱?Ku>2+lQɎE r eZrY+cygV%Gfu ܼ17[eUmldѝ B!;E'\;kds5Y[XܡP~ V,C:+}cګMA 0{j&ETM}LO\'^roCn?pҲm+j7\:)_̴L*D^0jWpimH nWQ6.1JӡE?~Ad!(#6Q,LWZh8h\JYtT 7'H)΀* brXgfjP2lRƀKEOLJ#(pƂxŶQ*S]={`0m jsI7Qu7px94}2IAF&@zXe; |3LW\lCG ;x?Ї"D|ٴr/\k|WZ__wy7QA/~"@ғNfX9P_i7*?r3]b핪L^Cm荺 h30.Wa68lRdkBE k8ive"U'݄M-r9]6eV=misqzs3# ~Ԓ8hG{;F1޺\" jc֗\B{n0&ڻ,f۰hONEf3]gZThƄ&~%[~}qEўloѸ(T;"DbJ!s;Eޑ:01 ]h3` !0 wlM 俥Ā3$z a9[-/I0F-6 {SIcu~m^/NBAd>>1=h6k_xx$y)CDf &1 \i6THSYF$+=)95O\{g)Y+ $T߰,07m#j$ Bz&C'NdJIdij[[\Z3wi x<{j/LNbǝļӖCC]~9YFUǦ o'5H3jJ*sAl ?Ӕy0[DHSWuub`hY/9Oi_\0ANm1h8i{Vvv3'j~vhVyڲKR!s௙[rJ$xZމANlDѐ݉_B1>iPt䎏wQH^%>6]VhiGxxzg~(jTeu3jwDʚSPt\X}h0p(UVj+j,e2v .V.-k]a+΂[%WF4(s0ۇƍRH>K_ڋz h Ḧ́>8ߺ34W-I x9KC4>W:}츮x[dk% ]j" nC ৞AG#.vZ~=laAӰDąmyT$Q'v<>S՗N2:HW,(UTjpb,dv翎a}(s.8i;*e{W<D )Vn-ZYR=—/,=\r>[Cspe>MÍaӅfRtJ&]nFu-R C >S8b<܄RY#]0pT,D]kdedXz! Q敐z&$췴|![a=/4 آ冐X}?Wfd 3AT@QvvX v! b<̅aTҽMA ^%+T>kXBviW`{U " },mvJ+TwE#IWgeKH{gh;J?;:ֻ쉌x>ٮ}A|gjgmK[yP,& 'tZU1Bh:Nxh@>90`(yMEl_W޻z̨+)/j=KV|NPZ?cW7ί깙IJDْ l.P +V>ӈz[tצūλ?m#0n_/\Pz4B~_ǣ q2?U\ɜfVyjZ'GӨn?XiPrP]/\Ya;m=aoJ=N9vOMq;`tJRh3te,RBRxdžlIK{~8%X^u-/ 띺ݤM5rGNMS˴R%OT%㼶--تsi&3L*`;8*yƭD>3f C]>3"<܆b#l& sfڰX|:"_&*]jDtR[\I 6Z"ܜb5Co0?C|p =뒈E_kc Jcj+ ;=ݲn K$c*`({3ݤվ=(a>4,sxv@\GdH0N6Fx`6KuKawP|lCNCic('q(VIx@ip itǙ|IV4&'_( g|1M/! .רGsٸ9l}@wǖ=p FD]se0˒I%?'Av06XS!r #]ƕg?k7YY%Ac R}p,!4L7/`YVnK jXYS(On)jn}xe"ѳU)1EBM}j뫄`|Bfu'>I$)"9P$WB7[v3T꒚l>q<̯0wYeU׷if݄ +%?`<O~ o-UoslGIq?[Q%N"g *b(?; jpsYX A"o5aPqx]tҐuSqbk3v5pETT(B dضyl4uytcZh-+ e@=N*FFk9Լ78l;Ex7n= J8~St?mJ_WM ^Qmxdḷ62AU{*SU`[A1her)Vb]j34$/*c$Lթo}A~^I{WV`5Y?`u"㬔YnPS88t%3/H߽k< _ )7+VK";C {pF'[D#/^hg-5<7"~[>&8NV 0e0~3s] UlFrx CO?GCU<ҍ >6@rhC=oc3`NӞDQkF_N:&HeM\;cJX1tǕ2rSd+T0:1jID cK{ilҨYNCQaG< gE[)er~`;N%&hs2m|K"v T|;ԽvJUsmϊM *!`h,L#mkӀܒ.Aw|d~g]5q|=bGau)K4NRUjz|k ĩR&?ﴫ)J g>ڴ_?>İ*Q+}1.^ /VJ=ޚqzV_3 DB+"8ѱFrP XTW˓&l=yzCs)KJ<1Dfb#Y~O} =LבY]6E (v:Axuj/9_S?Z\]~$x`Rbv;N^;ЇԲ7ݺ d0xsM3VbAw2>\׫k־,xN#¿/fBw̻x5YoM MG >ޏJnO'wj߸D"P/b\ߕegBVk@@z vŸR (ȚX. l펨+:ʹ}Gz;Q\ޥn{>+ bOcyl=*Y>c 1ًMݰĚ 'ژ(s^]+CşiNU}{XplhRc*m ^|]c$Hn@3r[afp9dŘzOW-~G2k5Ӯ`z=Rݞy7zД=! h&bYV3FY9[ Cv]9MzN hT'yd% <8QOUUU2ތ#лBIMmѱR !H|!!{u&+Ih0d9b8 )^ifGXmPB6h}h<#OJ)XS< c,pJywg_^{J+6*m!tZ7RWS%)A2UcY%z {Z9v\):J>F^Xv/P"_I^2t_?IE0d J۬&؜GۿAp>$kXgm۪`P*]GGހQ.=-:- tll>ȼR]c1Bu8z>uKJ`cDP>ȥkl)`f_F[r&^Y8V[鉔m|"xZb*Iּb8<{u46e)r~m,ۆr6or>^@5+"^.lՂ´Wd-Z ?jn.I]_R,"\FSKzb!" zl2m+qЈ͸s@Ka,v]h`""ueԨ܊ߚQ&f[ lŇm1줄QAJB )s*}Yc;뛧ʤ_M"9e4Q|`>BmDboսIRnn'M,<>e1=3Rˑ'z@҇ /pDV `hzON}p1٣onp4^mI~i;Ԩd]~酰k!>}Y.k3ǻe`yKtoh/Զ^(5.c``no;)//aCm&d`ߋ 7RX-zpEO W 7o}0΅8GnGp_.sM)C KԮ]Hq2UX󌊡ha-梬8|SI`1V= R5,@ ć} s j/ޠ ^`'R2Z{,V4PiL/N0ElFX-rA*,n:6ohDZm1]3$64 e[;T˃"8ieg5Lf%琉 g{imFTgsB?6'n%ܘ "*PSīivK\nT*3K"%aG5B6U _)hï ^R^F֔;˒HTiry[g~snjU;-RtZCwᛙU\%I'jr731i\4>ڿl(u$Ui5L,zPxA,Zj6k9{fA}p \ 5˚#|.JHDO]ʮ4Ӭ+kJ*t!;N . lu5rQ.1Te[P@ǃ}gM0d:"yb9P'GiTzQb2MH|T0*o$`BBxu^ud@s\hϺ Rj-}s朦3{C/ml[ A%,bPZ4@k'_:B&>Fs܀1Na`9xDO]LJBChFkt5]Vv]69cWR955iWbk5R*x.T r* #Qwu2g~?44\ STUC:.}v$. 2il|P.4=s?r5X1i1= TwWߧ8x\ӡbܗ"lA6sw7*O "@pV/ǹA-*Iƣ $&TLWy:),ە pz!0EI\#!Ҹæhr"|Bm$e[ lKcy_]A<ڐ Gs/v˔`\A$%L{ CԖ=FfOKV )iH߇I`H<OsgLcNgHFƈ= l- ރ)sS]aWK U{)'CxNh3!SH|~P\c* ^U'{}@@6u6՘ސշTsa uYw6G5ϛ@sS0Ay; ~Lt[LtIgKIc^ڟu!N80 w'b}@DSx'wi|ۑx p஧\j07_NSB޳adz.Hp .J*w|vYMU;M]`>4307j!?𛬚^_oMJVW#KNX -w(t OL- }IZꢣtlТpܭ˙ oVJ~Ww(am6^"u;Rgs֨Y)[wPlq2*h,4&0ӄ ZM59Mv8?sE )eZwϛ '5' {چۜ'zu Sμ|is< /炸ɊNB,_* 5pGܵ.t8V@ΒYP z"Iz\m^06Aiô\.1{A=tlϭL;3:DEG8Ûˠ@WR/fXLb T]"[,nmbWle<,, NSɻj:nYjyVic,_蕐9OroJ翳 Ql#Tu[_1m8ױ>( 4_=N']I/pјé_D1fŨCJ;k9YT{-}q-4\-äe%o%案e.a!ioWI%S-!qtj`̎C<9P "jSmV'W l 2=h@C"ae 0EtK#W|Yb.zCi#fi#̌Go3]!a%#|ۅnUػ̌r׫hvvqMZ8荱fkka,$ .K nfx6فRi4#®v8 #8sWFI:U&EFF;/~f?qN4yeh@n51C-4ggR~{>K8ّ}{׵zOa9.R#VA|qyZCJ׏#rTD LTpGQVԢqW&%pV#6!h(R%6sԄr2;F7&=E#.YCA[#ăJAŕH>(p]Ӯׅ1?PlYQ =ÕCZ,"d:A{@Y\: ^K_$c% ;Gp^yr}(LAΛΰ &z=__jys&755n=ט )[5=8)<ܨTλJzʏ۪ny»4IG9Jlpn0' pcӻNdD&<f"uI.[6kogHi?)CO@0_MS;a7b37ƲUY2>λ׎%w9$aH|v]nLXCď,_޽b>]ODy.Z ReR!W`5uP3 h6}raE%\^4<[Kb=>/Pj|ȬLcM Hf_9[MdmG(`9gQQ]3%GF,*]nyw|}(I/{ҮJKӚ̥zҦ UxxƼ.셞#L50p#Ɏ:Y\ @G? w}ZML(7WqZg=aFw4-s'~ú愲S[oXcr_S[i_S1(h~ԱbZXF`]jro\)}TRͮ\s_ +(xټzՋ5 ]l|{yX*[ƪ]\ޭs6Aiʃ 9;_eDspd=QAc;qnX1n:-`ݎA6p?XOĉƼ8Y{>I,{TZ̥],1qΎ2ف)dbjV9 ƫR|Ѡk9]>(v$3G.APdOP-,"$?k78YPu8j6bN*2#hW0'ږFQ}o ȹJ2 @:VXizVCVq`>('Sm?ܖ9C%r6BQgKU~L7\ \h}*G^G]!t:?vxl}%q* (SIAߛdl[k4A?u p"%0keݜ;5ݎL$){_~ZԞ1>T\ !]ePq?K-^􂀳N=Bc[ߔd*| a>rg;_tU'+p &V )/!zI?7 e>>TO0ܲn^2&C@sSDTh}&~A/91G`7i~aWǛ4`gyw97Z ZrEz0+dT%&t1'V_[ļ/;L*M*8d/2;Cf-B8`D]YcyCA'`WHǠ5dեoCNSviW:Lq^55:~.B>>8כ;>rZ!Jɇ3MCIUkzu KCC| #{s븚tɋ6P R_8$]I|\S~dId h"xK.TvZ!NVlF4iz?Pwh^Z/f-ucvԴ^Sw =JE6Ѥ{U;ߣ45r2ZU(e[ JiJkOEB[yi5yjbFz9ܫŊ20xzj^9 5zWF{jZy<Hĭ ,lW=Wry 2@j}Y`2( Gi|M56h3Ui3ekw$'FCIŸ߼ԅI[ZvqhA7.UZ80[q18ɚVYB__up*FV гJS9CڄWFA&eNV<*IR7wto&YH ~vn4^Z.w4 / q3jI:c>Yiá:&0F ˗?΂t[hOHI5~4yz+QNr潅^.k˳]@ Dssj|ur&˝^7!ppT~b ~RֳwEMSQ\2y@pKM+~$ЬB60c=AM傛/ 0aVv\p3o1r 5]ᴕ.d &(`O6cU]OCv{ ¶ / CZW\fp{TQE7xm4wAY D4vH驅!th⑙w龐Ֆ hZClQ 93j YqW{ ~5Y 9)JyGcqx-T@qL缓X00!>4٠4<@R|'u제=q<3޷:НsX?1Ap.8L:YʑVeܟb-ޗcPkX0nS[<;s`?#68./c`>)G/j-.>G0w5s؀ms#,.N=2e3t.nʫ@ܴZCYRW [A@a""}/ ܡ<5ZBXU 4dx?Olu\wd0;:A㾶reJɢ_UzCz #*[(’9o>rT\M+8fyUʿ_l:cStXߙȱi(eAz M[]KLj}*1 `EݛˎG?+Ǒb?,L1v8sD6-dޥ>c|oJd*pbm>z`-}Xwk-E*cYko\} pf+rgn'$9Qi]2ѪJhPGz7Sד01~ߜ(%^~d]29ĻES> g1cQ`>M| 3OZ5g]a/v/\a̍ :y^ˌ9P+薇ԖR=89_Δ+f9&R>[,ZɓLCgvUCI9XҌ,z VC *H,;H^@ Dn$BĤuc|K=-w |^%HnjRfI/,{ͼ`[]8hU ee?N4ס`_g_GXr&eBxdd'8𪩈3LOq-./mj~\7"7))G[ R]u>W_|Jу6qҿxNlS닶QF8V9 DzbxM~װus_k䵟sK!%a!GHUu]H UWv6 Kba i;= h.8!Np ưO_]m@ ACڧL#sP r–9; NUȧ6q<)Bz#|`2I. btHfOEGRy \ʘaadJ* d蹼Ư/ yx Oz, !*ì‘^R1Ѱg܏JgtPM_Ek}9=Xo;חރ $2X] FM3!5` S0BBnS蕭L?GrZi Qy$/bONnGޜ-oVɩ}!ቩPQtc! ]|3jbjv4d™BȌ˜E8rm5H>lBVƀ1o QEi͋7IVLWlx]! JU\@K*o" (y ~Q\#uDY2n([CVA3$,ު&_O%o}31 ^slqI1*j<9h8茬k|fA #NPB8h"@lf{.JNSZjFubд92kkR%͜"T?)UMժxER~p0$ްd;io~Æ};8O.oy$Z L=[tv' =lrU B4 ^J f%-vI{3 u!RA_O!!ߤKzgŷKdDMxJ0ɢ^ \!}S!_s!:k8BAzZ$%mw=tt$(:5'94K1UH 1%zyhK礃E7St`6Ѻ)zDMK7a/&SK?ԙp+ilM5SZ#æ8 #3pk<Ume),J߾ Tz!+iz[P̔"C,4h`2L0@-NU-@KNޟO UF Um(y!oqڍw$ˆbfFdVwHպ&L͔e+77J::Ҍs^ruR #TUNȉ _θ] Q6ڽoÞ,([A礍mٻ0, Z|KrȂC$q?\`nfYRRkz\kYu(%\MɄ#  _'`K~e*/ėJzvcO]kykt;nh!|P? X.KgP{̢ :%uo"SJ\. *,ztJѐu=\x~`7݈{b܆F? kpvom@>7@!w 7bBwi<roEozTNUrd .$ #S! }7|`DR442Ls z/(*8Q;ϫP*퇔0G"v|7)Q/.UQ9yskb0],0[:O,iJ)fiP=]gDբ21ÉηElghDI#bW.ù}o /h(b\8?U='>;N5GxmAGH5 髨½$v G%pݜ|Wjᚁ R¼j*Pb r6r y\ */}-A>c (Jr{pÛ,lu5!kVonI%W2~i%x@S]Ё04:aVyZO(Gq i(쇣j^ OiWEI YǮa ˜[=l)K{(`1VqQ\|.bBsa#<1oZQpvS\a|` j+%}HI"1qIrCիRZ|l@}T?5/*䶮37Ai8-GCT g"|@D;GA{PJqb#A8J!_.?J_*i.VqzS>iJVOoV{DN=l l4c(E?Qx[ 5AgފnB~} !tGZ%g_4*;UW}0Cx;WW)%9V )vőLoV]ӸE_ jvIOOpuY۲FWQ%UTx*ɐ]Sh~zj ԮTKǶmPZg汌3[j4YS,*3~nxtH4o&\SCOZ)JϿ4z ~PiQXB}QlyjnwOQVVЫ-aX7WY37Kקiu} {AZ&͹Z9֦|1ƚ6'gJW"VLqIA2jAFVTnT zzZ(ca)@SzIzٺD9')rl^J3&БAjnSvlT}#qsŤ!I,eX\b8ā $hʱUaq,iġGgDHh"wH) cL't, /PR5t7ɶz\`Ճd c1P|'Ͽ<7E_2s3y8ы=R@A"dU}oۧeqSÿX׹ #lw{7_ <EKgr[<֌bũnƻPtfg?~.7`b\ r.0rwƞ`&1w`13&tYQFlʢk|,YL=ݙ~Γ?OEf:XRo1> Z ۦe @5xws)9aO]G>(6Ŗ4tGn=L{B>Хgg ?C5<лfDKǙn9 $$ڳR]ĄQlض.5k~t6"ykNelR {ż1x foDF6}~俈.nEn`u.C\R;ho'jl-f @eFZ!֠U }D@\Lz3{y,Pxcyߕ"ƪ\<ֲ[գ 'tL7y:ݺL#3ɹZd+<`) 6q2+."Q NtLE0$(-lWӈ\i8!LOZ>ͅ5( fRf#kwb0:G+ r%O;aaV6Tgtߔ(-o]f^$H)vvZҴ.ӤyT0s(}lwa3+/֤9S.zsi!mhhTR6Mx{$_nR? J4$};Y!Pj[i%y7ň/DO4VI?Q>ʚ\(R58+->4!қ)rFv )f T}:VȂ. l1-ǖyS>2G)Y2Bz}\~vFM}m=;O 0b{ 8iȞk]_^"`8JWǡ&ŷ}lUޣ4A&  ٶ޿5-ᠩ NKs%?!E^ E+:+l!wͬ XVnš3@"ꙜO tgV~TQ3'7(I/ig$#翄Jȟ'*|{>Lpy)NHsrjQ6Z8#A`aS4xI_+jI0$q˽w5:t$vtz{B(lR*Qi{9|xrO]2_6k%tX8Q}nJRs~l1 :)F&[q|{<4Y6Y0r4n+Z+uBz.%-MGNe/Y`,rjy K1FjyIdj&`kk \mv'IxkFc*y I/q/A栞[y(NJ.sbqGH{h?k uWc$HA=jQ{>黗Z!k f>u5ǗUQU d*O>.C%n b\Cq觨S|S@oKJИ=3m|iK ׋B Qpʳ4IiuhG+oUb!#9VtP};W$/G09d[c{H}=uۡ}.pi$5Dâ@x.fRqW՞ꃦ_;NT|RN|g-V|bݳjMt)cM{WEیAe#Gu9} *ZvPe؛R*\1fc} BW!sUXj 6y0>1%ig-? L *tmDO̵]9q(թ"VXo6&7-K sZCͭ&S4ѳ&jH)oF5fPOB^/˙oиeeDUB-lBPɻzZDY'fx]wHvꁺ:,_uܪ_֠,HkGs#d֍'F9y81j^Cr$Ƅzܾ۠X2 QW2k=Qe@OZL̀PɅ+!x} bpp.? HtlpQ)n}M.Q|$, #SY'#lS#uouo#)%m<1t$)4FR]&%ӖMލܳB@daV6ƑAz%\#Gqg~CھR M{8?է%]KWRzctۊq&/ S$ ġ #p/PS )>ЗVTC1ᨩښz3S}Gi:&%bUEGDoT2{N%ڡl꾨q}'8-o:ҹ",S97@/|.g]1z?7(+F-4PĐ[@y>w{SImxlm@ƀQ!$˨c,%jѥYmjgֺF }JA~kH(Iǰ$ )LO~iu424"dV>èe0wSz&+5m9)Wl5ŬO=;bg P΁y7A +1z?[кM}o%zUͱ~4?hvL~Z^ۋ`]G~RhgZ?Mq+HOOi?J@EV =qCK^^#96i1OM"a#@᩻!`F=P<&|~-6jd`bFqWێ=*s xzkh+[[+))5-ͤ`U[!**V9#k]97kkEGӈ(~A:貑j}$վ7В,Oi2 _wlfk&>phEx۝ bcI][^Q.8Ex I wûYe967cVB>Xj/-=I(}raYoqodTYu=NqOM;BRe4[&OxMZU\9}aR [̈toN*ψk!}uCՐ 8iXK&% p]ƽjRlYzIhaR{ʵV\^v嵎>La_NUTf՚z-rW\Qys7jwq<᦮^f|ʊQ:N DQaxs{<o_q3< kf,~ DRK2_1,< 7C$5&l\hLP lxI8įVU^/xf}&  U*k&@YQf1T:=L8 8 o䴪h>`?ф:ha?kǭpO14'b|8uEwiSƉ}|_Cz2}iuՑ~-t> ~Aӭ) /YhT1786gxe5v=f\ zYO <35m+4-O$q7&uQI/<7KEjU$0՛"0-SU'HTIFhqcPᦘ Ӕi1"P7sN=$-1d !7RcY FU~r=93HT9V24S8 xtVR散zL^6BϲNH&RTJE([oF{0l)o ć@R`_ӚH'Mm*Y/A V$39fR{ "}ѥq ѪTh- ~-,_@%ֱүa]O~S@w뇣ƔFr:sjv.-q GoR}CV<fK0DNw&ٹ`bP'JJΫ?K~^[=D_Gk0#g+֬%t$즲c$Ӭ$ðW4"^ z?!?d=!5"$:q;F-=v 5!88!`+O? ol>8`>Qv?FٲuAJuWyCvgJ\t9{"L(yh 1Cds~k^:)gi cUM\&c2ϕcio(*d)QaJ﭅ೃ.N=)/-"VGڣ>MQ(K\dVXWj5W*Mϻ>ˉl)~дlOw:lhy'D[KϾ{&AaF BPM(sZA7&%庥G5@FrrάGjƮ /wfx%gߦF.rVe([goҽS¸}S4ZV>)֪OF PDmNQbj Qpcu5="9ޡ{Vd44P@Ubf=bԬUR*n-u&y\4𘸄#gKӳAE})¹Iܦ$V:Ϭ%[Tx^v2[{`aŎڧ%mzH]=3362aDF4ر` k_1M-O>/G1]] .C!\#16.3ެZYc4/y ;[> A1wtG3esx7Q@WwDTљK51${r at>t>g$$9mo$lxcʪ J&jLR|Uj$O8.0&{WWX_$'I1%Z305ߤ*Ǒצ.eEap@TP딾s@:O)[*cRm:YOxߜׅ}q@qIvRz+_R';9T||5=r{!b 'Tkfg (ftҰwG4j01,XLx)y[j d@Zp~rMs's~ gRG~"^rp6cVs;?d$8YӗXq ٚD\1dM񢁉YiN8TJ] 73Cek+ȼo0nA|^*&jlG>9lzuٚVbH,t5_eFĎHvU6'un>>mXPni*n7-kAg@q1l\ }$2grʢ%,փVZQR8>;5-nN~LW->r02[Nrj4'ZcLm2?O!y{QCNHݑ~J4T}r-Y#zTu6x=fcm*/PQٯ}\H ,Ҋ;u4H(/R/ #UR5&Vԭ~Sk0ʗaY0?tZDpʤ3+IA5 @F:aʔ[)S*U].5C&Ң,z=C̄p-nl[5oL^vkKj pRmI*J[D<YheuHs:.^=%g %ӄf9i| tnkK2ú:VF@?.qmw]`r~;J$"լ,lo.c ~Ńvq%H˹8-)Lj+@5cOxuFU4"{!)?L eR)>wi'] @@?L"pR[4](I}rڴ5+SLj(=-Xw*ˇ'{Hh[a6%ǟKUve.WCFI.&+QôVL9Tnj4^`b/$z<97?Ićk<3C a/`)b>M]{! n?o㊠/K7U|~Ow"P ʩTqnMMZNKW!){ pN/dYː@0''$,LIX[x"lDZ]I{d=C5iKpYy .4m6U E {]"  ,uigLAF]DhdKnO0TX © C 1!<]QiQyĐz^@ -h7<߫wv(v˒$2ϛr* h3VRERIO4"f!Ra2ki=y@EbLjE+NGU6qZ,ߚq ?~n$Kp8C*.Lۖ D$H!k5"14ALrIo@ݰqCಉJfZڠx0"x6VΔ\h3HnA\C6.D _Q'юU;O3@I= 7"jtW M)o>9Ƚh*<*fՊN*%+|Bځ:gy#,O#, 2cgjRN3}ymoZ9uKV.nLYx 䜙τ;JFS3Æ\!'v[!@fIի9NeY ?5X>G2zX츤:\#܀#JO亜C~{NZ.t~ 3D_/2a=y~u$,ZP|Gc,XXRюGϒn{S;3^mzpVqI x&A7l{f~3$U]Z6^++?l+'Q Fkڵ6=TH: .e000S%)UІٕ'+f ٪Ò`])a6#LkH3R) Q uIe cb%O_7-gPu ],B ]rWᕁq* iy;1ű @;~GFxY<,킬{&p&{2)}0Kҷ:jVWF\ŋ7F=#ʇL5RzfRα+ 2d+?h冷*83{Ĩ(Fy g&dYJ?jte°_qѮW$!|Da%HHfGz:,i\DK'Йʾe1HXδэVW`nuQU[cOHD]AݑFwApQ*UZ!lIkr|iYW&C)+}\{6M}ne& 2OiJO*xsRAK4֘wYK&yaNHJ|.> NTu b& \{9)=j+/ ړA+ّYgO{q dU! |^;9rU 5:e9Y YQa@jцNhW0Ɋp>NvHy$)[~;7p6tOͥ*$ʐfMc%˅p`Q d{=$3YȰNrquW)"Ѣ 0F S.$th* ϔI_r`\lcױlMԏ$WB:@BWf_ֈWW%G4]VOYcnƊZK| ϱh<0zל3b_FlU=6 u/uAҔՐ]pؠ/W$ ޑ-Ayi/wZc5t"ԥ&Xqm ( # 9&GF1-_x>>RQ,r(pW=H'j]Yug ,Qz8k!PʝCkhQw_<.V$r-oq?-%S6㡾=me\^EϽ@ fJFlr2}x5Qb $.I;$KW|ٚ0?}Y՞'۟|9hj  !^ZA?ғ7!?n鼝ى̡}t-o~D9G)E1t{P^j:yqN#,Qz܊&CR V;;ʋ\RIVp6Yz"+ aTڤqUA䎙pqRO{N$sSw?-c;Cn}avint_N5[8kcVef⍗qE盬#)Ȯ%D5s؈Ʌ);,]:g#MTt}>}_ wtLqu9ۯ8K,&]Sԧ[}U@9'/?Cճ!-'Ax{7f\"hR$D^6Nn[IT̡NH>O㺏XMtbKWZ21U.U؛_?JRՂ2]\>e}Ljh^%OtP{~} *ei;5໹E"C|]g?%8ƻ-cgIyA4ܹ\<֕ZD 6 4+.eet2'R7o+oz|W՛](d\J XñV?f$Zd(r`gr;3A6 ?Hb)s1:";ie)EkM#6y.EL 'P̧؀@Aý! yX7׺nB+lZRQy6@.K$A.MC!hRc+@EZH s=d*] 꿒K`ׂ/ GNaXo{ڊ/ f] avֆ±5 ʑ2>maM`nq^@U<(;x"nЮQ;3& #\ƸUu/rwPW3p :{ly K~<_/z܄2/i6yDL.9 ܛxv1N<V w;ܪo<8[Sܴnx3[z 3  .R&P6BFIlOr9H-WFYG\z;ɏGHZy)D>&u;UܐR`D}|&ECh9@ժhZIP&O8W[bc`$"M2@a,RuJv z*uA]n溮qŞP摍 ۖ䃮M UbsX"~lEЀ!X(ir .fm~U,:vF~d$_F4q\iU+H\Amr43PqhF/r;5Ĵ|1Ro7 ׬@ '=V'a8rWj0=]=)`P_ŲD 1$|I:#(plU{waYj((_Rg*'V!;{, +h=b#4$Dd`E=LWؚ/"t[W#1F3N~2V(|24!h;y&a*A[[v __l`h.=2?P+H{מQEyS9E(2`3K6A as2@EԊ2_}gc 4np?̪:˴,DΨϰH[ 1aK*b Lpg"nħspȒC.0 I6(̭K1@UUdp|vZq3>ݛg&bu w*\L:8d(C47TlF^2)}-G59'?6FNzXj>ilEB|۫lӌDcVt.78/Z(G$?_Ȓ 1TfvhPBy·ŗ\Јb-U'sl]gW;#$Јuzh-3U G]aGXEUlRDAh$rFC eր3g?c jRQb˩PFf߹Qio.pzSD=,&SMa{Y#ф'Te5| >>u8lcZoQYs?@9$Gԏ͢N`5jK& ǓHBa)DZNoӒdfZFRt-rVސЭxٮtTg^ʲ9*.Rn8]f"vRW?AAu/퇟AghɎ{Jq*]AFf{^ƶF6^i>SƀÍ7Z[nQ#nQf!ߏ\hDlwR :t#jN_m 2$eBދIImKg4 5K=]e& 8t;8H(z oUU(/|_Aiw~8 { pEO"$0-in:D)1ƽha$w!W xX/;P٧K|2%LO.B=5$ڽ%g3?TE9R%~s'j*7FDS*]=pw [!Կq-4Pʖ3-ӝ:U)T6p/Yps=nF\`)I0 VWx^o+{hC/rJ"ީmmӝօTQ~:pLxlW|5YL^]/$i xJ&b&p-F?!BA./[~ W+Xqza%I'xdܽ͡E,#3 Ɉ"8mDCƵgMksm1xsWh|O-d3s/ ճI[{ 2MYIt'=~bXאQ)6nnнpGͅg%[uU53-qK0{>40CWV~6'($n9ݑkq-{͖K&}AxC[t!j;$~YW H-2Ft* 0&`삡_O6ex 6?t2jʍ)B(B2)ԏ1g"[8 Q63t$Nn 4_D&%Cߠpk210=KodkƆSJu'ÐX!NmqfE%ɗq8hytM`^!ߤX|[p&"LulPX-fP=/P pD2171Fg\`05H&<ǥ_@fo"ҰzߤX07nv1`a4ro+ ]9՟h tYϛhX$3/g?pE2?LWЖymt}6%402m¬ X^#<~UtU nVK65<Ϻxu{Xas^06v4eȷH;J[u(mg>Yd]qK,/nlf 5qhR ?X;ԉt?ðހ3 sY~m}&\X'Oxk~fۿ=i%\-rΊ nuI=ŝg4o74)sgLpn!K& it9.c.Ch\kઙ">L%ʰ0<_\`vq.Xsb#-Òg\M& Y?uF?D pܵۋ~IJ~  UMiy'ǜt{()0a+4LΩ'?Wi[`*%,x^3/zR'08*\!jRG%t+k!5 ҄٤%#$-+XjufsTD6W7EQ;-:ۧY2I~-z琗Ub/g|YNx-~t( ˥+,Y@Feqz#P}% 6?u8ۭxjyJfaBdҐz@}oȺCl9ڀt¸$h^C۾Ca_^8}gɝ@ dQ<5XkU x_9ޚߎΧPKQ7Ip8,*pu~q h+; d5b]gd @R./Z`f˼X)aXd븨q¡9 Mb7PD.tK7x> Yր훕^Or5xZz;h-gY5 0yP G>޽E9$鳨+gԡ|:ݓ !H$|d~O>7`@{&;S?zŪ+Qp*u4 1뮛L:5<'criπ+`{~]᮷r&3c+e&ԙFtS6V>~q1 1k5`$3R* p[O8hl({&KG4 isž"i7Y* K4{ ΰƴF`eCXyhaaMn+6<蕵ıQ P|Jnۗ2 f m5Jr0y/47ʃpR5 Ae6eZ< Fk @)3|Ew'ugq'@8\|ߟ6xt(S*91wݫ+5>Rmczy~e,Y:}5o*yrt .2ʜvc^q}JaZyzmDSqYwoGaɔ ,YϕHA'<;"ը9U8 V IiH_\VpYb gSa]8E-j7&t t'-#熔F54Z1" CnDkpqRy1f"셵l<~"~v"HDR]5(GyrKTZזu[r@P0zzϳQ dEt2"&zn% U9heߚ8n6(!IjOOS T t\xcMsTj6Xϟ *$Z_;H\ch1kk.)4|J?˛@Edbș/?XxX&p,<4mRn {䖵t^̐q8yje!^K.YDm(A >fs@_,ả% Z8@E1lƕ:ݺj3zXKԆگ?./sߛv5[y0-l/kˡ 8,t7lSz@3`HXWcߟ(xھ:injڽ Tص½!p o8dWy(pܵujm͑=ĉ!۹ʼy owb"qhy?u#m% 'dw3@n'Y9bsNYD'?WNݵ:zmLc[ !O',9k#jp = H^,j2ex.*]CD2[Oi|frJUnFj;G2/jΰTR/x<J0E ׆rxqGkzO+j.>V1nKLn=;I$-Е Yn6t:q͝t R:6oP*q@8-]TC7敞 j,cK1Bªi]WnFǑf\ǜu0b^)MdwB-昧v-ZtQ$\bDX9" pώl㜖 xCS; Y E$?3ސsYsyo,Y,fnLTgn(@=Ɋr")|tR^vD$RǁxGAm#*G97jYԂ>[!)A[cԊ4xMMO5'@07@(s-w-bf62=i#مո򆂓r?* Qbpd|g,&^HS(yCw?$wY;ohO6?!'ٸ;{Fc8,XzqxmQq,xtqMcp?əzSHڹ$g1W3fKUkn"rxTgds1.EH,wMDD uLFWm{@)&ǯ/d4曢 vbF t|5 ),$io2ވ3!d_0<#7csVN2u*?'JaW@&sش8K]̨_M{R}A:墒5C+_9 FyOwNyO" [2Ie'mbIHx/6rMdaV#[EcgjdP{g+k8񞤮_dY$q*t`-`yb5gªlgw$ص>-*ZTɁ~MN,=hOIe1eC|J@Nr=WDq'J$x07gxAp8;qz",6+\G#VzsTE Jf&99e>e)Ѵ%_ kxۂy|y눿}g$VBSJ%9m>gԇ-FV!F[K#Xjh~NS^ M-%u蠾WqEFx׳KUXm-?r d ݹkⲡV3 K*P0q)t\6fHsGO &b`}HQwǏ-]mFHD CZ`g#3˝,1ǘP}whx m,>^yYw+{@D(>ݎqF4@ք#Ee$ԥ'~)џa6ߡHj,΃++ ȥ>譱b/}/VsÊ']b ѽ5y aePg]&Mj,qI1sJ;S,@qxh`B8 &%ߊٿʾ# ߱rKzqOAtc d]E TĢ1q8Q}G­`;J\hHBR#"jh0K5ָ7wrI5pfwq}.BDSB*pzԋlĤ4w yg7 p,i3 R7uq9@@0\'/3\Kꏆ'Y&E߉\r})lI_ch5ȴmJr~y6SN(tM;,Eh0b V97)-x/"G*[$2.}5Y2ISdVq)H@f*qztq!2!s_~. '- X4詏۴b\?B y¹N}*(IXGx HUOI5%g/s4x+me(wP7&gl_ZǨdZ.DNŀ}9.N0x#y&AM =T}J ~̮_]5/N'Hg:guS04%jE}[F#Bsl?{cvPY!j 譕ȴl)' oQkA2tO"^XqxhD4{9% g)NbTe+SS̪;%4"FMς)'9s9 c]*-ˎ[ЮcE߆E/1gi,c'"Fߧ(J LV!ϗ+ľă%z%}5mЋ0r^_{/:(?Pc{˧o2p(kZOܽB.U2'*cWRz:Aľ.{FN6 aYFt ,,e$3we 9_@AL*T{V7 !M#qE7k::*ʢɜ3,l:,'Q18&x1va6RC׳a=#ˇ<3jj&6\ROۏ0"A@R_ӱ4Sgo1fQX :WcBpְ]`x~jnyx0HKûJ)UYaɺe rT{P80),hEd'>'#3 G8_x:uLaYAo)ћ &i6"&zH RYv'Wl~n,a[7nAj~ #NIS/N={hwǘ"٤qޔ[Aa`yU:XƣFztK>?)&M V]q6%R1hjFQ$M*(2 5j mmݡ:@1(NrӓulRh BgNhoA&tA0,28Lv5$dSC}9dZ}(m{Er`J ef z=Q!-Ѽ)wj0mڍ۱؊CքdQW3ɥSs)I1p2^GMPv6 Ȱ{ފXG":mQ٨o)BDhA#nu~kVv@.[KKK:4pe8 \OԆ' ˣ#0'Zl@4bgZ񨫳4o eSHG+W9I? ɎJLx|b!XsPT5#:@6U2mi;zmlFy$$ܗ#.RrƸA`F+yKXWFVP8OzFV쳋i*Q7q2AĈ[ KmkCRvF4Mf! \Y0xC݇#(.$"]8\h%)zϺ)ZcD*V>0D9Ȕ+1(P<@LITt pc3W`֌^deojsFgK_['MKqfsAŗ:H龕H\݋?svS0MI[:_P]EAEk /|=x6ws$Q>DU:̘aU֎]Ҭf+NLʇu|~z|׭Ba KF65e2yrS Dki&_Jx#Ǭ|!krZGpWQC5J@.E27 FQXC?Ϙ<=S>w`︗"\L1z V^6]WnpQ?B` ;:oմ]r`̈́ Bo^~kgDw4nyikbPMh/w"5S &k:Ҟ'!W0j/Ϙ>wu>m6871N"hG=3xkH$MπRKx1won>Od;~Q_n [-FRk0=kD*ӒiH:O;-/aLjHwq::_8 a'{{Zxl1ZEH}fR1W٨R3: f F~i~Mbt:st=-8%\ݗ/&)t EHAN(y GR}sI*9J87@kLҐ^VuxVHym3|$]nTZ?;3i#%4Ỏ<.sNq[5¶2M3{q<=7}-:"0y(VkVZf-{cjr?r0l$T{Nx|9]c(P_ռ>c̾2ʸf=/zdJA/Tj0F`)e9`i˄8T b 7ca~*voAW"3YO"^J QVwrz d TpGώ{"PՈ!ݰ!Ce&iQxD2V`fJ_\Ic,.6{HSjs,bU83_ %%d\c{6V!48c93Ew3 %Su A x~CɎ(èYGnJMi&cN[4/{n3ΓS"&B*_^eG:j84Y<L4O,$t5l I F\652ޞcǣ4TGP})Wsaͥ|JwB8Ju ܈mBo{eR#_`(wOݬ.sB~_΃+tƯ}HQYkKVԌ(e0W1 1^dz/3my-xI-]1!#+t,pJ/sʟJ'qD9`&IU9~F8,d>ϸݕ7YM#m* ^hlw^ҾֽAq:=n S_?86@ 3Y(c]Jx'$"zh4j= ^iXꇯM2&>MJ=;\ucY.'DbA[IegWkppqBz_i(&+W7gBR3Y@%>XF!p4805Lvr3 4 .Cs뛤X1EԹ1S#QQׇVY _h>% їh=tJ4=bȐ5*6ڹ;FL8iU/:^q.L 4TnEAe|q5'$фt:6t~"5odg&l؋1,ϡbqrC{U\ZAգft2glS`'E:B{٣ Q+[F|lN˙ywW%iR-TCeX  ԍ KңK\ɤB<ĉ0USdKK }/]Ԃ5&NB[/o;D4&U@ s+aVfBCb6"VI~?/ ȃveOX2|N#dP ⯓a?~j!CDje`b`3Y4&eX9@r&nЊpy%ͫ'_)Tg6Dƌu-h{u(4z3SmTg/C&HB::7YS3r*ᆩ%JIYYC%]Xѣv1}\/Ge|B}u"&HgDznjrk02jE{~s^U0J"Dkg[4`4(!cGE_)T/ ΨM] @uC(Ƒ}O?֫ts'ybZM7}f;A&,IohdM%_rh^ºm{vg#?ݎen[6T(e>L {椚e17znZK)t0`Q#.S$֞$(cӀnK?i\9NKW/a_6#׸]j94S_շ)xXZ.&S`P/߆_<|d¬|1W6t[:8(\;ia] ^}5s:C3-SG넖unqkn%CyռsRMT{nN(Gk RJmװe wga壟5]C1O ; 5paKs/xh1ez7q22a rcuᲈe6:\Ӟ_M/ld->pZn bnI7bͭ$z}6u{$@uii\ ;r>FMv;'U1XvgZ yoI8^ӾV 㒆?,TK7bPl'^h3_^~W}wiYet=+8xs^Ge U@d?v!:֔UEң;^ǓL091 "oGї(˞!gܛ}Yیyc7A.'tT#K=(@f9l&9&/V(0ŎnQX61*Z̑F juBrsOoA8!^0Oy4Fʋ~1Xҷpm˱=} HtalRn`z}aִ< ,xƌ9g=0p?4;5W#qT7(v[>3o:FBw^xe-%HKF!wf /[ ӔJ{>&,Kz. n$tZ~֏E邥zdN b?-RbSMF7PU̥.NL@3!OVñvY-D3‘r)UcƊEddEB s)TF},. @+-(Cx_{G>y#uT탧dϤ._%w(F6g3dnZS^G\AQ?Z:t$F G=n*K*p1jfULv L$76 p>v+ZݝUrdR6'\""48I4KRYbVq02SKPħi1QлG/SZ߾D!lyw |5ʉT<}Ib&i@k_54`v[mRP@Ev` T6Á[`8\ ͗[ʝҕe`0vcJ -{_8PGnHAԍMTVc/H/TʗYbU5ˆ#٫I1K}#!|8x$7Ssǂ?SE9OUrɟX@y%!ܦ{R??׏,y;],#$&j" IaI4B㹴ShBi͆é@vy5|)ё ,+ɰW_%@x!nͫ05y6=Y!oȀm=鰽~NsI) PR<>>n=!8uBFB*|(lԶ %2D}%"4p{0LE2 }>lƊ|-l6ʀE|-4xSc~hxg^";2cצFjslvf+p:6||(.忎.fJz}[Z^};j)u{\`csն#a4ƼXf4 p\nZ:cS K] h`kD/_bL yZ$nVU!P#_,׊ .[OUˠ`] /aCWx4JrViߚe'c 5lz#glFOZmFblf=4‡BQ(Ǥ|t?j!׈XG!ZS v-%J) Smb(~xr#3P6 & 2YBaf_2h* N~rÇ;%FyB8Z\M<$@2]TCJDž@F {)b=QKBqx>ſ[0d*%XGp >7".\J6,]j , 0eAwZX: zͽ:ƹamw$o~j6B^lVOP(nErrX~-'|39EVנJZz{|4-yi,Fڣ;ȧg9(wC';('(TZ'mGt O!?F w-J*%FtU!اcY|y/:&J6_hp ?dUW4U2 cXat%P]zE4j(hߧ(GpX%2+ñGj6=0@Ѥ&ʾ0>-oϓ?;cC>V+p dP- @;>LC0 CnB“]+27>:@k#mz6x iB`i P{DD?5W9fӯ$p4_QchI)^I:*{WEe [Xa&26qp%{|nmbQ{W 2J OZ@":fF\)aï.PAy> ٶh4L%kdk4bL}+Zm@9ė矖N0R_ D$Z~'3g8Z{BRE7S'eY~yeTkϔinȱ6跘ubLJ(ʼnH3%f1S~4%|le>bnj?V9cF&qHZo'6Rd_@=SO<%P\EdKH0K5x#3/fCy9C,;n%^~#zMY ̪tτV|;d*BCcc5F$T-=۬3*ֵ"R:΍_blJIJwIQ/SW݉*4/4!Yjo:! į,_8ؕ{^c .kM*/Ԙ\zxZ޵B(?HXp+Im- lYa֬֙^(8#&?Vmo8~0Ohp+W 85c .0S_kIQONn[AL( OSc˝\TȿR5*43sp,ly7Λ3c:NhҬ`ٟƧ\+{ ؖAT?FX~+[1* PΉ> PNŋ[v1l,޻~RH|`_ʲR =ptKCdbq[k p%BFa+>k T!\&uJ%R UNXٮ,ЉnbɛDQъoupH z<('K $9( _)"=Qh3 ] o[Q%+5TyN9_+Qu^م2={'"N^P:@n?ɩ[iw GV׬2W,+L P;͹^xU aF?C1p_iUŋ&ZyQ2nwj="@Iи!;nbE 5Kt=#]):Y_" 섞R: cﭓj9C?dZ!\ʁ1McZřbb"-)8U# Nt3[JG8mWu'`rzH"CP˰SȼEM8+3@<:ļ>}ş?p HqIX7kQGIa wgW0ZS9n1~X {p {5e-#ζu˓RȌ5$#;ytTpQ+3G! fmIQDq t+~4v7b/k?s mi TʝR?1}phV9ExQnk9F$?i}3pXӣ*Je+&{2 Pr.`USHSZHBDgp4" B(z&g #9r3"/AX,NWBy,2iȄZQmZ Թk^{LetdTMgeu$v0jfHS9bm˩BM{ўr4c}=96Uth{? T-U[7a 4NܐŞTvUGx){]x4JaCQ 3%ga=ഢpD tWoϟ>$h쾤~u$2ip6JCh)XX.?9%\l?x R4w [=E +!Q˱kjV?6 U+%VHƠzqlVVP]j2If% iCMp 9}رϞ=j4I@x{ 'C~i ׶ծ|jP v"va@Ig%kN{_-LrtKp"9,j8jͿ}ڸG)vLq]`Nz±%:$׭ @ UII'3[zo"9ƙ=kf|,XB㥔yKEwGli{[Tۣui=.7N1G@BMevfC{ E0>"E^J2ֺJϼ:¤]ZXKNCʄ#Gs<o7kqoTr"HHп)<0㭙B`L J^mzYKے1s}'^=[rqScmMĽ$_ii?,,У*)6I;LLuK4%Kyj[p4F@7+'axk/]^Q 5J] 'Q$3ˬD~>b): nKs_HTtDSIǸ0G!ƕ1ӜJp0|ÅX[I?Gta-KGRE@db%iD"!ZQ6+z@"9&+}"~ }+WܗPcҏBoĜ܏/U'Sg% |#H7#['\iKdn" |>Nu?(ZfSZq+ p;`(Z2: -D-Ljs/BLpr>u,*A/دA ecnW'a +obώ/,A /Ķ9 8a O+0SehA{]pK3 e[b;5cYgg[^`h E&= Դ7]\ pzlj3;7({Sa"i*W:׸RyƥM=@g8Crbq\vLJUPmC AÎ:!1 qdAG_ixT5 Ȣ*kqD _]9¶ qȤ7PAlr.#'1^d;&cf%a=?r쬓+㧋7tKlxӪyR_X d̙zU8 ;Ժ"?6/a손KζFm!śɍּl{q zQ߉x>jۨףBfݭ-3OajATD Ƅvn}\[*淎M$CUfݯ5oC[*lch[_ȕ%ZMgˎj )*:$% \G!?;m6`DR} Կc(2(_軴1;xֆAvKoKIy* sE3I|q﯁^YcB"3DÓ-I&N )Ddxs,g-q[n.f8O`P2.!]xϮ& J5xvK\NȢϬO![k@ ZY܃J%u `<~UQρ&wB݈r+gqՅ @5Q=? @r6FjTw @x=\Ì PcPv"eB$@2]™Vrj(o+rY?DuqEN3;qXM]NԏLإ`T.ARn*?8PJ΋P{WOq|GC-iʫ;o^a#`![:&~q910h4J=cVTJ_E^}i7mGKc{MAJtc}F<[S ڙ{ 6ɩDz(/qlU暕g*&/s/%cBWOu` 6ulA0wW K%Nn9; y *Gf|gWRPb!0iְJWy-=,Cҽ KsÀ)<1q#[=xS\\o]0ܠ |uE6T 9Vdlpi.> l7[;"6Ș\SN5t ' 04FV5c[H\IsOĥ[ @/د OܣWx V6kkNhi}ìLj|5g[ RT1p{fc*}i:) Z0`2}qN՗LTb3}nihrkQ9tNtU([BU_7es:穹W2ޝF M M^Q>9z> )'r*QEa[u R{AS]Dn ừ';K~a{{m`7p61} 7>SqsXIUq,A̗ uK'mbamŲ9sh[?N;v)ŻV1k(TJ ȸYC>{lՋQ q"1t"2nj٢la!5X_:fGIf; saV]ZZP}/ud(/ @~qNra·brDY&p&s|elL;.Tmss\b3l@m!YͩJPSAݣ;LF+_ OO=Сm}@V_~θ)D1a9F|DGAQVwQS%YB/> B XUojQf{סl;R 9t8/{vd+4J*pNP&N0-#d@X_C,ߩs%w}GuZd+$ӛ[%}yaxa$5^jRXZ#q'>~lJ7/qš7"d8Um9~6B ƅ ɷD9]ne)sTIetĻXA_n^k.l;}:`S=9av%/F3L|hh^e-HMa#1Jw@"އChլ ;}ZBFQ!dlkHGw+)ib 7* ۓ%Vۚ]| m4ơ/&[=*k?1 ~'عMx,jc;ߘ$~18 k/ ưO\:-,cdv,KZ 'VP kCC!&k7~} 7-[-l {`E *^(jyyxci7ԗFk˟KR,'df._0Lä3? D,+gO}`ugYs##_4wumqM:A<:Qc9:`e<#mӒ]vxEhŕb+WQ1G29\KT"eh]f^!\(`"UX?7~FsXʦs XQhyaQ=vҿr6w©b&F$ ~OB> K1}1Syt]R0B hP";jwmD*X!>UNx$hjP$>`lz6tMY(lk8{d&TUF49XrkȯP'1{J?-߁瞜kRF})NG{G]: !vw\ˎ ,YHР7+K>wB|T^4t%xu*W;Gm[MiS;BĄGn-WJ'ndgi]8/Vb:fLucYF!ˬ 7yY"aS#ЗE펎۹JsTjAъ>E70ԏK!0\->ύ1CQnղ~""9(YTix :LboAwʛ*]. ($?B) i8s]ST FfΔ6M c;ljfÃث(ۿՇskjdNn U{Ųݿ{x+NY`ڐ{C|Ch(>CBuϣHe;^5ȍ#B0*fjK2؜e<1w1ȾH_X,?7o(:0c|ijv#뎈z#|9eV6S{M|`)Qn<$2ħ=J||BKf yx_qZX:/H!4< EӍ7#V@;t90,ު i5y2ĂݿqN)pB@&ҁ|bV|Nkt#6:hU\C!mVZvIbQ:K?]|俷zF6bమ5#Ϙt02uۄ@v %1BF(~ 75bi?WOn jL9cӜMI*uZ< ~ a:kYzQzl.˅r0ؼCm):NAMyv"U`l; Zʵy!%IU鳚Pkwh֥I-e9 %$s*:(@PJw\/x%͇@k`wfaV ^g_D&hDV}l{J<*5Z e[ikThU#)̷─C6By &)DL/v`U$B̥a6†mvIK\^ͳc(D֭r7c@oIm @3U9JT1WԽ;h1D7N4b y,Wz8M3/Gx[=.-;t% WAEZ6OQ^0% lQ*%߿_4< Gsi1_ ^݁ x;^< fv/!|_R/ `|-(qĮDwKuH2A0K{!uYM[u$R[8cT>y[3neԟ$`rOvKv v} /sjZ:>-: @Zcxl4\S|iLcӢp}MY=U`{TǼ[SeZѮsiW iWx#聠~{ o(9{oL< <5O#2 nP5HaE) ҇Bhv؁ y܌97 J7 mg,~rfn>Yδ{aB&4jeiXMGHS=fqE4Lv9Ю cM|oeiTbskU?_l ꫡ7>ݐ%J}zLvF1Cop'hD7^~]9Nv9Fg ZvK- ,FDͭebCX8ar2` *rD+6(XXqj 9 W} 4%?vё{F6 zƻ):k֬?mwj֍񢦱 |v|}Ae6]Z^5&=m*!َmM9$4 E(.ۘtk6عI1`(}esꀣvoZiC嘶G.ݚdNQR8 ɂ8171Q~OBU7{f^ "8ՇP dQ2Pɦ#puҖ΁a+t?YE 4Yôeܪ߿}±5T4i,4b킿>Q"R%e`Ydƛ.1:U#/ȕQ{- eپi66hH),@T%Ow _t\FMeh,?-у5F;O0g2 > ޗyYa~O+(`Ыfz`%J%wbTLUP5ާGKD'ϡDaY^P<ͮ;d]5^fGs[T-$[pWl}pG|)q'͛Wbk-"Vt0I| W=jȨ4lHa`Kcp1hFȜf 46S,|ݞBNftd@{D~.B-E^Tb1:؍0f-d!ۘT,f #Ƿ.Zq"w&hCP"s̶c>**ۯIK>A܄6/`_"cNM6}L9uDțܹ,U$%VGQQ$*.tZZ$N]^4Zo .fNpcGwItz@_8y<3Ywۓ?_?U"hӂI`6U\^ZΝCQF!N(.&u3Ѱ 56O-x2/̟N^<%$ۈmAr bF-l`;Q-6ypO\ɂa i=z 󺫪Q'T)kʡl `KBbJz֛C<(*"@h{hUu>6 e:Gy+zG`j|#@GC26?`yN5x1BYEG[U2B<(__`+6ͨhSˍm~ƃhz~LoTX9jk9 σe$#Rо2utyqcw=meE9$aboѶc˫aܒNJ"|\';ym95! +Pg ·-ͮO[ nP0vr,3_1r#q!!{{hχ3i6 .H{!|e"C̍%]! GCFtM@TKدB0pǥS#OLR+nsBB; Nk6νʖ:J EbLd&+} +j6Kj߲6i夢b#Dҁ|MH!}| y. ؼKFo;rIfuޏIΗ(+TPʢ ql@3-čN M@UZEGs攘c+P6ɏ6Zg;~mng"87#j `&%1AXb9% M`]D:Y)7w[ykO{(H=)Q^i%D/lg?i'Ժ:o\)"uԖ;\Q9y E!Ӯd*.C&v l[ .2Qd?rv{"zl6m~cpur?Okcچp#h]khs&@Cᡓ~!c:Q\TYX4K7&`MItڬÌӣvp04H|2̡<пلɯ$9EZYxEe6x!w,fyuMa ^yOTɘW6F\5L٘R@e#%WY/I l),V4ߩC-]Y˅T)\&e7S3ɖnj)#\0;MegOKDi9ҽXUcvA6\1pScDAA[ɂ<pyh,x$w/(\!"´˙N0WC?{Q~b@P|['>(7P(ϓ1؛~lA "|[dY}H 8Jfu"v:d3gtPK-Z/~(&1J#͓nQ:ƫVJ3ObM0U.5DleugφAÁ<3yA4Lc# ';·D6݄WYt(G@,TRsDJfV%ow]Ң& zDa96mĵqf9Cfu5H%@w!'v[*`* c8 t_o}~w菀֜S>mѷq}ֿJ.9Ã1Wֳ q&gA*PR0FiXq|$aW{t*>/;{ghJJh_/@ ]=s3#7xHT3n T]Yho #<dΉQm$V!7> L55t%n'0BDe2{cJO%7uMs^Yp"8@TBj_݁29u;Qf8L.E;"u&*DZ1R8;>mP}4oVzŒƕد%rjÒ[!jݟ+(0*) S'ԥ5n#bmCI{yac;GHMe@A3`JS i[1AOkCY#WR g1o+DZcJ1Ί[(7[]"?:v MnǟJp_"Y6OD,U>4\ $vdvN$O#jJqu͠XjHav.Z\NpQt/IIXB ΫMI!I9_jT˶GV;LǓ'LiGCG8*v\G?K಑Â˭mi{v*pw]<#΁R25?r7(,IYh X9 ׫ihZHD!'t%ތ"*DOڇ"!?57XԠlCslSa_DQ<:h MN ]VDM7SX!u2Ap0ئ*aG`Zc<ꀣ9wa87( 4,:՝IWP6K8,h1E'0-YU D1FwdXs"L# 3DfbXH¶Q "7EF$R!]^w߿E!*l!<:THw.Jvrl[Ä )qI}"@ʅemh-gt h1D z+%`gGq>\abϙg=_eǾ,h eshZH5PI5lg[W391̫$'}|'xKfO/DFVtn HE8i>i{+ɝ KS>m MStz !jBwXXROa lTT ^aMv kUNvt6I^22;"mntrcF[PE"ySFaOg"Ki.8rɌ˫Dz@iR#I;+E1։'㦺*Evj&hg}vaYÙ}$˵@sS;̳_G2"X[C :6ul\Ӥji yEUQSu_^ N] A~ce~x-ƏypȓO7CbFDaЯz 4%ÒBنpC|dbMl; ResmR2P#xȽIRK#YΨ(7/96hEH|ztX3zߔ(сcϪ'n5VC5MC/\1X6PoP|ǹDv;wB |j=T 0R+Yv5ʘ#{^&+NMbFg0sGB` &d'G˚n zPh.oGv=gPCt{^_k|^BXvh=PMl( *PƣV0.WP')ۓJu.JR!:}yE`W%) r%H )6NԹ<$yxq%BcC},i<j,;2eh61f'N]p)^$we?df $K}#7;=dJ36Ф?ڒ9v4(n`a.:5l C:07ɐR/X|2bg-=ڨ6QG2#vt{XXXETy:/Wʂ'lkVnG"8Y1Udg,P~q?_4^i-K?9\6UK :m\Aφ[ۡ3H}בmL2o83\$6xi#lMhE Ͽͷ>ZsbW)'Fާ}@ngN&Iͤ}pgho;ݷt4l-l_ϧ`ឡp ~y5H4ƈ P3ಌDݏ6j@29 &/oTY0-rۙ\:C4I,:qBUs/!b\(H/}N7$<RG6U)3NRw`H[xPzٺ\z]/YmRKXkla{^G9jdR%)ee41{fu o\)ْJ\9ol2VXXPzߪ˧5D'؎H[w0 \ilLE@T8l^qiY>ORּq$&TfIl1eG'( MB˂g~{L\hBؾ6ؽ\MTFuN`x>ߊj* ﮳ 1&d4eW%ve ON󙕿: !Cy+v_uήѧ̩Ӎ5UWhuWH?L9}K+MCcLúP) ǿ|. e[ŗQ#ژh)7$!/QN+a1k_DR 7b^|%5ކ ZܓC:% a l$4Wn&Bpց_̋HT O3e,aA Tp{ǰ-n!RhLYf50Eކ2 P4Vd\}i4<1Uxc|9)h2Zsoٮq"n'N5)fv*[DKDP`8F\Jyׄ{!d5shvy^03}{e *N3,829̊,Ʒ۟YN&tJ^V#' OkvnkFxK>m[Ds3(,%)ĕ}l)d4O8bIGر5#(勞U`jrl_j(Q b'Gi`Ap%'OIAoQYgsQ,3gP"DMRjvNJ.Qs\-c&sJcCBFuT剡"dZ~Ǎ8t{K`3uJjwT]O)*m}1Gr 6dlх X"Q*R滸֯>_v Rc8".!ڪ\ױn;pV/%C%w 8)r15n9׺,ZCC;nq V =>#:XYD0[X\Sq%G ٮ8dt6:3eFH ˫/,mJYs. %fbl/.nW[_z'&rRGC7N'4'Z!0r"R+f{wO=H0^L~RUhUr,eʫ-W$kҤd";{k""Gifii#[tH_KJ4Ğfy&K}^"w _F8 6o85PuUcfF]cH=ٲD h+k`]-Ҁb%NӬՒs$ۣAGTx-"xS³\Iov;+V_VRZ=wZ߮c/>Jv& qćEr[.kԘLsDz|y:R Ȩ5trs%y4E/O o96XSċ="n[L*&R f_'>I0-mU6Xk_kLPdoc`X)ZBf+#Oe-f[f.nz,Q8_иbaơxJrfM0K˻)Ђ%QbseCT7J *<%ᶚ _}ޗ{H#cZVUjaث 3y!vB+$'CVyBq>*vb|%_oRQďe>s d H W0!PTC;GjF\jQouO;Ŷwc6"#W1"?0wV\A+c0Ʉ|nѯ.N Q|`sLHFRE∏տeeY#[aKh@0*-`0[7% lSQg[b1tYRLFIUaj8 C|{ W6iD1U58 wUDN]\4Vv4eQ=-zN;uB7\ksofZq$'e.I\ Y(l{q~,dg .g8Q^;-I{ r[Dpi=l"-%bBǻDl Zt&IH¯Cao[r㗸?Òi*PB/ֵ; ABED4<(8,JڡaC -#HA&*|Cykq3k*$̨x`RF؂ݦ"w&g.AsFVQY9a!vÍ`q-p 1dHDհBhBE5m$N,&,b!Uy[I|[6pcK!!d?SXm>EȊIfQ莡օA*Yz]&3|,6ƑH'V({?G0S8hxԆorG|͚]r?*+"ɞ&!:ބ@W1ʜݾHgbH_Lp㪄@=n ֛Cm6l̐`kB75Xə5 ,X_[[CTh1ahxb}AI1 d.}.O6`-).QV9~Y~<%#c]a1(w+ue\Lߧх0U:ilk\|!d?_eAQP }2tL'Um̄iPwx<>ҩؖWYJ+oGПA{wgvs[i [D#3,i^7tQ9E :K qՓekfH=)sET?5ںKOE+*簃<'UBV.A=(j&͝`6>sr#+}.y`$dB-EԖbO(!١7]&QZ쎗B7/.zuHyZ`K`ZInRq|76L(s_vDJ;y+tLլPm[B%&vRǻfɂ>;I(G'F[㔷Co+F?4#6Ȕ4rG/ԊvNGN-Hl{ t_OXM(ƕȵNե:P Qؔ0Lʉ:>#,:) 5[?|+cNZbkr&l|J&F ,2PlNJm=NϻZ2?w6 <6LG6Ra8S+(()%ns홚Of!(Mqٓ]ɽ Z-Oܯ$ acdدEr]4T~>@]3(wXy=j8'"$wG|m#$_\#2g7fV%B#OYi/%Kf aƦba|WD}KJ&{TpG9up8CzN)|6 Ɓ"%zpr#4]Vx(vp0F,16zw}̬rr`0K_rԒ&֤G.5i:6~r"ߞ:af~^,.-Xwu݃"񉳻\No>U?ZE<&>P]3>}.́/Qc*B =Q JHYU(ُta26ɧH&3fhb]hd,Of=Я֞,pR  z j“!AVZ9xt<+NbWF9~5߇rNC* \]8~dTJ^1%̡<,ȔcD=6Lx9zr'_L#~&Ͼ2~gs 53@sp¹kkzNӼ-9J9=s杳Pl < aB-tE=no0*p)m#Ej'cC֮u3V{PX ܔC(v jTn>%R> ]osQѼ- dkvAM(g\ ?Ul_B;]/xy. E-V91jWf *^0S1:}9}T̍>ƽIuhȔkޙW Q.6\8 E%b!wZa"?ݲQ()EV!W~ڲ.Jn:ў6qT%j\C#eDTVJU//YnTRqLb+Z5(&jYQ20% {%_׉c i?h|/,Q%DAR(6>PG-F|*( ;%ʳM3t8GܔgR7SPĦyc}i'Kޅ!YLX\KY0[(F; & z-3[W-i=vOg fw{? 3`>~Z2sť!Nopӛyl40Jl%mS6Eϙ?3XǙYZR }B6t$=νk=΄ E[oWK6~kgˠ丷>yCq4%  r jBi62tHqǭ2t<%ޣ>_ xMm`j-lWEH֫#Urz_bH $鄴)p>|he:\:6ahcLڿՕT|Q6W ]lhZD17aw Yk.Z'*[`#eCTU0ƎGѻup!Xd\+7{ᘭjZOAԤy{(,J6 x5,Ӥ"x޾obJƻ-eDcISH=e9ikP{W -&`&5,!9Ecide{IO]FQ)y!@u7SҭeEj@ht~9 J*N}蟸 ɌxJC#1]VlX[jn &t6B}TwܥnɆs+줰,}:&p,: 0A AwdaUˈd#h}eO8i_%ȒcN{!9!wD[/Bڎ[32bPu(TYlBꘊ}nqu3o &c?a֕Ꙃ% b `Ě3 R%&W|qvby|!"-!!czP+U/v2O>$-4d%]6H W¯%ꢾs3ə y6|->=Cp r]kZqTf kӚ)hC2TYaPjzx^>Y//UxxA *2-yMnHI6{c{^ }g⋤uCo{*a)N+pbgT (j0k:4at¼`CD_OўW71KH` rl Z"P T3g9 r+y?oc t+W|KK)64Jፃ.րBh<֑}[.K"A쥰aKDmǻsC+T24{]H2Lx`b/CS`<{M,aK lb"x$wfR l*Y]K6C*δ՝9" bJKA| 28y]uJB}N_SӪ Ԯ)ťsQV?:_"te22 ¹Hp{9gytڗC" t* ba(nKkLՆi0 *wxìKvY<vE%\5F=kIJ&Ou >txoI<"T~4m#oDctq599d2kq o˲„b GL)604RXi/3xU8.`劧M5tuE }Rڕ0||eAXg|ٗMi(Y8HTe.E-"=|P4tlm kgUxN jYȎc\N \JPT7黥@*75S,mY]c9LY#G('. , 9Wpeg.vD!m`;Q8huܗkj[4~&9v&NLd4 ọn`uxԋU;66lpmYO J27xT]^b( i [1r?@ʍ1O8uma(oAUދV9}=lV5TX/hgy+$[SYJc=\|=< Ք.*Ox:/S۶ե=uJ-[|*CBn=m;p}!aiO.wd:v_b+SgZGKŅ|˱eFBu3ODQ;.sj ,DH7A8>DFx[(jd@eg$ܱ2hMטɤ`f~SD.BB(, pRߓ3H>g{ սq#vZw٣,|zd oCϞ7-VXTUmL,ab 9`pSoa/9x]CG4ȽK@oXP"b++5OmqJ]%Q/ Fhj`heF&OQSB,ե $S(aʊGً~[ kt޼ڞ,c 3!ZNA%$zq̓Zv+C`Rf(]4+ B5 J~TG.vꟂyU0\#x _OÑmMP; O3BK?2PK@Z9>կ">PqbbNf!ܴ:(a A D-cyOϺ>1\/غ% @@G;$~G'5"Rػ}кޯH\2k<5i9(/G(ɹI9 ,*cV3m(HC,cdtMC>?X? ya1n @ aYi@ɯOr~pW,uDvE=ZS9΄8O@2ec=Jdb$V %bE-Xf L4/{\zFt}J8KKY R)IKG5o8X"x# AĀpyKĜVmS?(t:8$Ojt~rvg =R2٦:PjJPb{"_b*Uҍi/VUp-Q,MP/{<тTE<-DZrkM&F/XFp~P),|&g"M3zĨs TPD(<(Kaoւ fË+(xSlQg]0t-b|@VTK:fMZ@}$(ے'8m"N_8`n`XZG1P֛Kk×=2cC/GHՖ*h6!T0V,_EN9Q)]% 9/ue xΏ0vG@/Ѽ5֞a߁\m$)P1]pIT۴N9 om( +kk>EW2h`Arʟ 4 L)m`auuIZjodM8p\U] ]ãC0.èM#[$&XMCB\ GNU#mb pSƁ7dI+RNm zR{ԤOS>zgEƞ@`?@SV HK/sEg5*/k=.^L̪MPRR< ,*4RwnuGkuݏS]T=i_U9ahO_ / ,aV=HX"scEJ9ECL s?1ɜ鞩OڞȼksTb`\w@Fa&p4] Ԁpw4nBpDB.8QFt-I]v&XI^< YKQ R-I*t-h䜭 .\f~緷mYu-AH1z:%Ulk.Ļ\:| !>+O!Y"~ƭr<`w@+7az@wAqalNF w4/ӄ>+͞juCc5. F:\wq[W!x%Nѭo2"q4,\AI@X\1KuI,:pZ+Y?疭e }Q3:I;3Bл3 zf**~Qymq FY$'ڢ[g/4BhG6r.NߙInސ\IfF2~P&P^g(sjm]kp:ګ1+FѕAH+ĊaU^C59./eᙱ H*BP57 dn79e6z@"/zdH᪸ҍl^ʼn Y{FTߝ :68(j1[hʩD`Ar:JxhIOm>19;&f]sMVG[;hV3(}S W1[ 89I{3c}RԹm5ƛ&-ykܟ+t< )pO.ʸ ]]s`)gztKRD]T#du{a\  `Arܑ6ԩߞbQ8W9Z+畒g ]Ӑ1ސ prUSŘewŅAh.la |[ϨAȘ]Men`#|ӪwMf' G_CnH:ӭ!hC׬I U"A{1&A8(DyXh;G1 ]Pק9ؽY3 .B=$PPú]N'sXf˝4!k)k1 Hz!V\,ۜ)ug,)5BP3> gG*Y/\38k&,ti*eY_C:y.Ow[=l~EaF*#|Zap++ ܆ZҁPA0u"q BepA7@VKHe)A^iv%%47WmwR tOw]G ("q}uUsdS83ou##i7tKq \ Sc#,pŗ [`Z ::@ I;A0^J%&3Usճ ZD߅nE~μ:*!& ڍ}4qfq~IXzQJAh^X.y|qA24,8?9:JbWbZ%X佗U°cW7yh͗ydnPߠ ;.ԇZj :yDjҕkh,9ݩ%;*+U빈:aֵ߭+^!1X)*}w> WN W}8'|g*^q=ȟ܃?MO7EER,/lRں۩aZ^ ꧶]ró~Ɂk N>\8-yh.6:(ILR> o[4| ǿ|˻F&5>Pq]ړ5q4VJzꄍt^O秴N8)?N`jY/HHs)faUix?„zDS) @|}l/R8r)T r22[f\UU=E_o!r㱵ib\k6ma4g#bi$S*c@QZ{-a+G`!b0\X/0;#JV"<=@ %O;!LrOTm7zW(Kݎ^z]=0,tPҊ67[}V&-j20J8>4KDAV#5YB(mz`Wzdp3&nخ!Ob)IG'Kd A3$7XZzO֞B2EP6ћg|TOKgRC-14p;(0:~W=QX}JG]UQ6K2^:"TlX=_LmҫZދ%M$n4rzWRQX->&o WQY܋,G_Bı`TBT9/N!˷]xr۰T8{ߛpυ`CF'Or57@;Poй[DR"#ň_=c&Z`?63u17L̯tV,:b:9R>"FpD{-u)\QF:= T_cs5qڬ,\rrUƔADj|N둋gM7,OK,6 _PWxgn2a'̩5C@qO{6>6_a^ M=ӚZaV .(f}Ȗk'Uhpuc,B2*R8neү~* 5cvkS9ٞcUafnq!K?MLNLBQ#}J!>cB3 耛U[G'cN0k8E+[4Ze-5{yHd+_[[2U!76zP W?=7WLU~?ݴ O7&} #/RPz+3(&vߧ8Nk6o$6٧0`d=J0.qK$yBARr^+D{8•*[Ru 1^ @"{ZJ \*֢Kt8VrN\cC/\/LE [Gl*PԌDcj37!-!֟zZXGGDO4 ЁNறbD4 sX<&\$} *AIRVUHL)lRt-ʍ*B,}B'287m~2r"r!6=bio)KqGs[ϫҤ'~髑[}d V_tt:SxF8.?vD}4J3x fOzl9IǢa rL nD@Nq"DN M#{mlo,Os4JCg^߀.nqc4F)/YV{]FBfC K!@7*#TY-C_J㠗:VVU3Dv?!1 4 ̸B63aLa{|oSj@J[3x@5P){Q4c#Ȃ@Py}HU&_xN5rU*b; ۘݲbZM%9@bf }i(54 VEKTD A0z(_]wkǐ|8XN[c#fq*8Q,+1f0VduaSQ|A?:竦}ԹTu\ ü?JyV"o)vS4#y&|LFjid/V/!z [4khahpUEҞQH6اa7ݤFHZK&<ů>E1(Xo_IaE24=5W[-ԧjQY8mY AzqaN{˒bFS<hO"F I,; ȖPE$?D<^W.$!"?U{%uC`O%Mqqm*"Q_Zs$k*iØ)iymM8ZWT*'%7 ^a < TBޛr)&bܔܿ(9$I~DsNeKݫ6OVve{h#(V.wia盵%{uye}$1+¯>.$&ǀ 8$g<"[\U.sH7} t6sw %=k>&LkzBZy۪rlBs߽B:Z Sd- %x8yexWO4]}FthAu!5˕6¬݋Hse:$0@s- ]uwԡAf~a~pbYAp/1˶i^3MʷD"aI, $8z!6kkeɽM :YfгI]<Vwj #+$² ڌ=魿0=8Eaġ]I6g/U}`Wد1Y}$p4j$TLz8T*qA `}lLԡ&JCVZngxrGvGƘ;۶s8h* (;ѩPΖ|MۮaQBtMsSgIk)7d~荳vsaމn7\.l7tq.*&VYh$z3WLsa1>ng}(IsD4г u ֨qPN&5QOx@G9w4sSOEawQ oσۨ\)K ĕ<˸PwۧU-Ѥm-٩HqO dpy [BY}rڳ1!u 8R^u24ʑ-A)%Kex^W~ T/SOy<k9C]plZ P٠b0GHK&05.^޳hȱVE-ƾj%7%BѣdGhca"?l2c>;cF|Ph= FQidR}Y˴M Li(C5˨g1Um<_u`ꤾPfLJC*Q;J)@m?uvb!B ULX ^:M]a7e= @xc H.z-i ?{D-\&O!$3nR_ۮ$qMH}h&heQ^"W9WtɽN15/.a$VfKe-᥍OD{'OӄˍB"jq.ת +Ժm񞥃cZ:'gLk-vکHFN|k*ݒ1%9NXk٪ܛYNfCJ4U6,׈^FEc|叒InZl A!'s91ɨC 1X¾Y'I-EiHD2qjr0٦89=_㕔Ň,t؏i."m3%GǂKN極 h2m m|̂PotPF`} v,S-ҁLPrpюvTqgIi 'ヷLJT.rbq@g^񦤴Le0 I9XKB}%Ӿ g5Dhnp|Fi;P뷕);kxF\]tV:۶õKOf |Ы&c\Ĉh'TDXlP9u)m| q3\7_ϡt$I.FUu-iWcUxn1dMږ u_i|UH~t;nOfVƶ"`Èo}mh X4v<b}'o0X5YN@$PlGo|Fd~)b'ZD-%,@aB)tRba:^$V0w9bp㮩r7)m% EKA($p3ś";~@{'%27 {.n̺jju^`cQٕ DxB;7~_M|&JZ4u._G:W Yr[ LG bՁIo-U|rNWTfŧz]K R*=wUp۳Ҫeziӯ9x> Iz?uW!HoXIn(5 !%I-fǖڹl]ę7(KY~q*1TnWb8?k\'$oq\o~}y_R3Dd J}f݅$:Ƶk}*NբuY8!vNӪͻ"m&~_[dyaF@n(^^˔[Kw; >w[IG;nS'rDAmr"{TS̆ N{tlA3T;2<* IbudzwH;a"vՊxZ.]/KU"7*ŷKwL/riNj20*?k`jE\)1Dt%O|ezZ|$Gk$oR?L0I #*z3?.Tg4bG'Xt8NBnRp(L%Nh ?l|[d}#~Yg386n8cN~{ sAn]H.j͕^ː|E yOu-!by% $Heh%& d(sMpXcbh!~7N9Z U(=xN&@{b܋+Y،T fqF5(GJ=@xa͒jhI;,/,MCuxXcx,CsAϧ2괁IbWߧ#@j**z++kx/,tYqKJx5->ZL%I"eiB4YG4mH?RSJ[ɶ/7.O0K4DL*!̶1 P]{{<{ө=m;PdڨܬFE!=8|i -o9Pv!5I+to71B;Us]! `*Zn*.bΘbaHaxgw:^0@wlJt,"gVV8ȥt9~^,kf d@T>E&v1Ss'AWp|e8̳יPI z]EV{0T?#7wDj$%_(c&-Ir͇D!4~]fvcD}Y^߫cw1$B o 'F/Š7".‡iMu{TK84VCO`װ o)ճ,%Dܾa4}P_5fªti2֤ gv龩<_=h;pEeT ❌z}Ǽ,s/ @G'9ڥY[.^tQeԪ^pNg/yr>}菕3\*й?'RdWܮOF?ěΚ0͌%#.t?zi5_ҏb4xp+ ^a 8F+..*獸\-l, *4_zk6ʥ X9iM.˨V b9H;v)Ec6i y7ߌ߬?]0Dō/19̾VN%Q_@)e ;H6idsraf3S(!!-m5)4?%V;g' %NҘxVDtHB9O% 泼::KHKY?Um["LŢ4>3F";; 2kЌya;z~,FI!-"As4u Qi=2DNPwJpbUpZ(rQ 18,O깃RM=gܬw Mo/NW2Itn>L.F9Yn!}5ai]@!Pq$е\'UD) '2LW;ńuדweҀ%RexUs7%>rӮ},Q+8{ #eyTM@(ۙq^V|x{p*?2 VGv(fi8lZPheC8 oڕ)RlSRWB-7i%eK _Ugiek {=_Vã˞S?*-DS1Cj0t^:XH 8L93'zeѻlO8cm9neCʘ{ UL>6}\8 2Si刷0P{/=~L'͝^UQ*ƞ9MGތER.9`|]:Z \P~\|9Ţ G*]921L!vT)T5 w@'$0kȉ2mFy* E(93&@Qյ*Dc}H.3BS\8 Ѥk5>sHdhn1|0^9vcW)];긌q)V: wW7 @~"# o , \r 94˸Xt z#ZMS M!^KnS0eULӧc!+6dw,Z|Dz; |_838%̎QYT{%1H"Dw|OAסrEoá!̋ck.e.ra(Wj*Vtb,Þ]d7d;|JR 2\($rGy)l.su =2T861c,e^ݔho}`f #G23M=,Pl ݐd Fr.V_ڄsY3_UF{|FGVt.KBz,)GӼ;}= 7eRn Fw#?'.cn;ݼ; &4 yrKù3D'Ig7ܶZ'a 9˃Ρ܃*ĘEq4ÇWb? '|>h=&hXkQkp NcFR"p$x=)C'tBB@T2 'zʹ{ŝFW̥pDδݍJrOF۽z4G/p:_J(Q;wVG H/*]:_SOT?;+p*삪.N)!4!𿋒wY7h$Ƀ[! g*т#8kn] Blz &琉u,8sBs$9.V0'J~jv^gsr zȘq?祝/ R2.;= >a2=HD[ >i!C+.isum,y9Ⱦ)lZe#)^++6ѿrݙ t/LA?"pHC%q{3ۍm#"H-ޘ]p 3/tD?}=0;G=RpAUIk)B& ¾ H5FLC+[5w@z&ݑl|Zc$ʇ3IʳXI) 'BϩpcN"/*x8,Ye\̂Nx,L?xjh%HN]z&wtB>=ҵXn49gRni9ZtTȱڞ@y#;PtR.YV%%@ U- $KY{|mm1"p@F6Ruv| i3wKUz{"G6n%2uXC%c6YgO<)ғ%)]Fb Y%q\~qIq?[="1Yg!yDE\ŸA]< hNO/_oQDVfy`}/A}?hFUr]R O CA,Vg>+B~F⑽ρ s:\('vI~bT՟uUMU;19b="  Z n]HG"cFG\¦Q,`DHIL;) Rc2Ć9~AsN6)Z$&u|W(a#9^[dGd;o_7WW9Z^Wc놲?X*pUr5n+b2|D^W, sw;rLp9c@&4ܘ晡(d_˙E QK\,37z}\8L$սta<FIN;7TFYa!ŮO.ТBσx+Y9v%XJkVVg,Ȣk B!xwﰄOsgP fXdCD?2"ʷzc+MHe t9}p-#{R)V be(F w`Ew*3U6yؤdq´ոK$2#Etኲf!ئ$G C;:)RU6wյEuW0^@R%8~5sxQqHQ:VJU?i;\)6wiw!spƂ(xu>g /9pc 1Q'LBM`Dʰ`<0a\ E8`Lˌ,s}_HUHE9~;~Asܑ8ȁ%mZ1 ]4~6WF-jw)FUg6Z.>F&CLZy= G;OƓqg/RvYeJIeܲƱXKqk ew:e@\}/6:k6{aR(|i%[~FAϠeAQku!@}!G'!pLJ3z0"Ǹ3>͉Y .Ē K{)K}Sc{蹔÷I x)1c=2 kHgdɽϧp`unK@+;ZZ<xh. nt@*> ي0ʹe4yǔk"ITC 8 g\sR*o|ec̏sQ5QC[.TrubXZW@+쭣98^3s3K/B#Ɵ@=E]lu%εŚrx#g#訍9L`CfYWR{ԁ0V\ƙ=xRy*::g|y> r' k"gQHZ؊ƚ9igXbS <3k>-Lnjp1:bB"=3 7QE kbC&t#%\bᄑzWV>ފ @u3[ HMk AKݺk >F׈ `n[EsI! 2^jp>5Vv\[k k^'䑭x+!@:J/N |G-*Q y4 ռ[QSșY}=AJFB:mBbr7"{a%f~F!Hǀ S?,pp)eFCQ u5CV+6": 'ifx6p6\9VZ2)=[r'Ւܶ-V$pҍ]NӯwgM !VLCZц]KwOLV$bb˱*PGK5WͰPHgf7qrҢSȗ^Kp[RC\A.P95,>d?` ^0X$L!c9}/VotJ܂|Kgk< "G: # ~0Ywla+X!#б&+o9K=f! R`ls6 iF_wZSZ[\1'۲\r7ّ&`iX|I8nJˡM(hĨBTf08m%Tv.꧂; JZyP2b61W^Fo7hmGn0Щz#ܫ@b-0 G1p I9Ӆw"eԑ/ m!j-lMYAi4~}8cڶn#]7z)@dͧ=$9V/C Bj&"jhJɺkCגcΦI{]pPzTYWaWv%dvfr֒YlҞۧm _$cVkb 4dF +uR(.FL!zbΕ9ƭ='y_gbT^^EHh0ɌTAzHpyHL2ɦ0 >tp< !R(1Ϳ`6ipB4Y};E O8} c0#)~KwNT;H}6'+YjW3VᄇsU˘3 i=>Rq|^-#.uE'+xYtsPiCn5VDHBD6Lژ! Rͅko[;YcdM4=QN_&L2'Us^ [2NO kx_~Ձ9 sиht>JUFWȜ#UJ#xy,:M~1=WOboEZNb=v3, I0ðQX$GԱ}`o ၙUl(t ߌ$^g&!H|nuᄌݱn꠲2& kO4%.3&]x.j4@}usmBk]Nx$ͼ7|q>$n`-z`U=ЏrVn\Hl)g}WnޑhEpQ))tߛNFpdD9@EL# Ɨp"Y ^sռ*IuE}QٛhC+@W,'jB'aQ΍aQ[$sRnlXĒhèH~$ű,3VU\a4Zt ;1*J}U6pTXFY ɂ[3 Z'[\b(׎c'tq?&VR}$6mZYܰOC!fMӯư7ꝼt;$x)AvJxFMǢ$^"Y-wlp_!;/`a|\cпC߀$H ,>?Bs,aJ=qӵ截#Vv<6=$VƊ6+EmMC-o\UWoT))90b(,y,f3bVG)2*"*:GsaRBdvP%Wv)X] D}u*Wdg1@Ye;1j@Ta@56E~>G:'K4. (\Wz&[j?e޹1(^_^l '!!Hhb673y./AHMO)?kY$st.cZL"BLpKRGH$jd vk\ݿ)`D[\ҫ\23Y[:~"Nu`[P"&O CWD}oY$a)bŊ1s>'EJ6%_"t- $bϸ؀ 7uIf [xPt`cQDX2#o0L (kQ4H4VWs".pm/a4;K7 q4U>ݎRГ  t3u;mi\9Z"` ouZuHc],d&w M7ʸ9Lh5y"`8QK7s^qvB*4m-`_1_n7]#l-z3|'4쮅g"F?b(aPዌ<8rl}u:QQ; Vr\\#\97,p4@ih-rӞ :da) ؀b+tm?OX Z ʁIhUJihN7%qNji7W1e794c⫿2M+ 9gXlQ@)R=C#{@ni |XnևTjC>khw)Z'^=#l|*'.%$|' 7.}`s76k9j[+bs5eBT.8/]*ePAэs(F88 =;mGvNӟiv#.s5r&%G&\kpIR"T Eطù8x[=;$ߜ|[gώPszSzdϗ__jC1ՇK5w__PɂPf[l履l՚jf_\qQK ލ^65eKx879ac 7lbD/gC~2y [Ι@*c{+qtKO0!49g*B#_ q;`sJzeV\W;yQFQʩ ݳs o=rsp^zjZ -4Gp h#Ac~?=V u䣢A 5lʋ=->ٛe`,6qco@F-]nG@N`g_vHѠ wծ :G*nhE(/>B.)!CiM Spv ھ=/mL~!#{:!O!mZЀd|arWf_}\]8Ќ<탹4ì0 OC4m }VeƂ0юۆ<  鏮Ӻ{nl2G/MW}Vʔ^e2Ek[*E 9$ï{>8 JWXy9N[k. (ùַ y"H-X-DW۴i;S͠q1d(>(4˘qFBL+j9N!;R IDz!F∺1' m訃l>,\Oy49p[ =q3gΊMA9>R|/lH!=ZDaD)J{g76Wf!pj^t  IHO4etO:E5 42<¤Rg4:T8#MFB{ ^uhśOg}rcXqʃ I:-s-d`s6t(DC`NV']/ Yd~kL:usr)9 $)Hl<{ٙm2φ 3hc([}'@p3̞̃&OC\nc|\7!C SZEED4(^0}ռܤW'P9?"[ͼalSҸ'cd>YoG_(m]ދ_ ,-)'0@  :"_U[nvkuYH%7*D2y!6' d>}F`#}͉l qCW(MA8vY ; Vս)` OɁȬ1Gߎmf~+ . -x;]"Ƽ`b P7I%i9jAAr-tU=@Dr4}k ;{Ь,DN9OQ3* óϛ;M,˴WJC.2ֺp}!qur>1ڝ!'f!7fr&xKʰǎ'Y(5>6 YzG&˥RSp4h_5r9#}M)B3a6(H׿J }@\m+n[I T;R?/M}l~Ѡ/@4FJ'~c  [>+^|Zyq&G-՜Zn,Pא_IW/bFGO!TiL'6P:`p 趎F]>5XorT9DXFDP9€`; ,=OѮ[h׿kT7>vvRJp=QtCP֮3^bR &kvָ5#^l:R0h ؋TT\9_z-:QiL*]po3Xq2: ך(/,3QFe E "{q_idnH)ܱEW!ۦ1/qiX&o}P٭}KD)<h͓Q{ؒߥ / RFZHRJ 9jfe[LBԨ5e_$}̕g=S:A̋;:~ť]`W1сsM m7{gOGu"Ǯx%Ae<1&zΕ( +@*VVNhqŖLk5v9+_%s3#j7ΙӜ,j]8rskZ3ג}ճv*Yqdʃ:s lҥAZ>uTQ#%uCv0y$b *?=P=ʵ O :#ȆdpŹnٸ2K"|ᅧpt͝*̎, JX9fZ%<+,I*K|jRB. 7ܽ"ߔUMT@Eޠ L!R_#dF"HX}L|ΥwTEg3c\1'ǵJ;ҒmV7;^VYovA{?`wO|U% TT$Rtw:ţzKr{ӎ 

&-U]6Չai[r>T(9 G3Am o*_7#0M],dhKM$j=ֶ 8{}9#6F4U/CԝJ &/O/47~P$GQ -\0F,X˥3d%Apb,9xGWX~FYx]19hy8q2l[JŐF0cz|J~c-D:d+|ՐH,]qnfN& 7 'Cvr] C^ Y6ml58Z(>NYE 1-T+*_-BġL{2#1?Ųi.Z5 ZLhEJSHi~+_5ѿK#+ &xwO 3qxj9oW\H%S1.ϖG%* b "â 8Cd|ǯY2ЦRI icBs9n c{iQoդZhŘo.\3(' >429!LSneca.HY260 7Kubh) Lg'[%n*Q1;|UG41'~c.UTksA3%:*׈-a:w:rqhB"M Ic 5u|G9"[R|5f:S"C|ɷc#D]JaP7-z'QtmՑ8ƇLI0A*#[W*_ UlZ]6)(96jr7_C\ev dlO4X^Q:S!l1O3&iwr]`忚D<#UwOJM15:´ ڹ\V_qt:5w08|,' UH6'*;map&Amyt? oiJ'`5=Ci׬;ϼ{Z`od#t/'uDnҠ鶑0+@)̆͠ Ŕ` O_a+zh@O(I!ޛwY׉'8[DDU~Nı=ffkkX=g1cN`%kQ;C k0t+@k0''8kcƱ4+f76$䤪8WQ,H5j.M])0`G@qM:?L(%H0D6 W>š'n=q ~iTg>WNIפvT[w4"*=? m;cP&~360Td${"6ċ$qJ%x!+^g}tM6ί? /io{,8[x? |:ٶL8X4݌6lV8ĺ3)|p&|ٹ.|;iVMw{<6};1mQF|9uщ%\ Hb͍(djv.[3]G%aX9UK+G>lwfwiK݃Ƃ  eF 9εŁw -{4yM^.Uc:-o$UN!ܭ/6 \=I{ί az7d{TO~a&.B/uNm] h4)ih`+ |7 y@,& 3u:[MiZ\RozΉAyV돸) ~R#OULsδIͻ*^^! d)  s泷ɆD䳜3(bzP-rAVl ؈ +w@ki k'=͞r]{K>mu'RPg\ &$J43\4ԅݜ $"Ir-uBH|X%) PR5럷KXxc& Ṷ. V<kPxW^,*C=+fS~ IUa Qrdv4v#U[rމ%7~F-_R6)jSA6ݐhlv˜hW.D56dyhi@GKKIMqXcKZD  1[S$S\fo,y4]C^}* m, 8ff}$nLWG%M5/B7SKuH5N(6:s)W9َ:Ga8 *Z*5n@LTrkӎȰH4% 䥊{谥9 $=rX1:L'b2V(PzuUz5 p! КdO] ęaATͳklYxZ-Uƭe0>gOUq hQKکYQk#MLPY gۻuק'e0PdaW<-f{7LJJYīrېTw/۶~NZEĀEFhIrl"5j̣I w,>T!ɂVC9G.zH,5#Վs3#Bܨ%Gb"7NfEdɟ^!>M㿪fTU! g=a_Ѡ--e| +UiBFAiN*Ivą>|ƠRfdn1tsҁb!Xe`^*ˎz?Q@ԚVn$QQ5ebki2-QG-'1\綬< dm e7"Ԏܐ{b釽kvM%ztBQ0׺?(0s_:h0aB$ZyFw`Lk%g4+Y5tSMHG F>֓m-zjE8gCi|\uŚi^̾|I%V\KM»çk)١' A I}aެ:c?Ŀuzm9FgLDDvimG'O#Fdq9Me$˅Z~k9 KMB&s +QJtC"wi%9ѭĖHl2_^\o<Ӽ 1D$̕.V apD,Ὄswv;{[)V=}a{e\}Wc@u#{rA %d:.t*5j ZiI &߷ā5 @ӗeANMxʒ%Ccd>L|Дn?P!gP]h~.XNEq=LI#EOB&SJ'=.Iώ d 4DIhwզieCcAos6ҟzU  2r}gMK G7f#wƬ)tħIq*m9Qe5J7?\Gqru8oבS'Y>+R[-XXnAOIt%AA"=Q] ;&+$WagD ;Pv- h)3R:ȹ:rfO}P/wkb*'tިT_dTڸn?V1T>R(osteTzǩ`߻+U ˔e>Sgg[D'U r`Ma'Sp ګQD=Eqmy6t-@8F?Anᗨ%t &.,at\qpB]+^DE>@FijZF_t׭b $X/ )E%>@}ѹWd@H:| e< SNԎ76Rpi^rK;[#U÷`}A)/"bhĐRTυK悡ErS.=B323eeڑ澠6=ӓg[[Ra 'Y nqŠM`U& [[D`)o,ѽ$O#JO_[^ӆ3xFql<[ YˮPtN1͍mZ-Q5NW oukDSosܓMt qHr!yDZ.DS^̩H(kI-Xܰ()s烶^vORS*ww|7e]f-oC3ǤU0?,dߵ['ӆNo MV߬X56^ m;plΰF?y Hu܏r9Iҳ8ޏX ?yGK|T@-{hҋ C/c,d}"L9aI Fq*@{=[cN?FI45w[h{&r(2'J # R>Ha*5iFa v΂F^J0P3?4꒏RI|,#seFdL:DRi㱔@BRf }T`#)pl y!$"@;j^/YtHjN͛]`(Tdg8Z>N/$Xps%mKo+JR9dc]ZGDK=P7՜f= 4[oYTG6Ñ^aj nw`M"o9ՔEsOQ:4i}:r0W8~Rz.鸟kC僷& ,{!%!XUݏx:= N{)-6-ߜ9h41AxufeeXs A< ac@ɲF G\ UT0Ʉ=IYeh?6<1ߟsu=5姫;*@]Z,4L3Im ȧ`jj7|:* #%/&hݣ5])ذ`El) 4peO 5 ЋTЯ'pMS\u1!ZMKx)%0s8]85(@(vDw8ƚ񙖝 `\IJg=hoxS+$#D4Z8>uՌPdťiciȷFΣc볳t"/8RQLTBA~>Z:;vt,qj!%\RÎLZ жˈs E 5×eq5 j}1qn]Ex̜\tm`A 1m|sM tg5ĥU*>2@! 䖅V-v(%t4iu2A֥_-/ZWStFh,Z-c<\ofšܡ@9Q ihg.79paޣIT2<g}@,n60 e sh. d,>Awf. \§Xpt䅷<#(UT^;\/Űt8 }?[HE"r0#JߢN2$;j2xOm+5.j)*M޿D`yS|3,H`(Q7ɨG6O~;M"_llY'WF2B bo S%Oe1"wzO+Rd 5c6m{g=,Grt `CiTڪ#A /lAjK&) t{ oO [U)O볢C3{OR&ɈjEA+II< 6T Ww \P4z@ ɠˇfuqV3JfUIaC`eq,d>e-&ںuO ~;0q\7@"qHr٤}x/^^І)p˪l@=l`oKw挈gptXo^?pߖAt ܨƎie!ہhE5Qd">) +D "eV e4Hߞ.{xך.'DkW_i8ʌ $}JVC'#vSH9o6R}#*pb + 'mg2-1sv|"7 @1#Zh#K`LXZ yկ>1tBvߥ]ҍ&@V:L \sF܈Ē%neQ' MrmԳq|E Dg\Rk6/Vw"ih o9g3y#'Wj>36 # q& rLFNz}KohY6ߨ\htNneB!h(U&S b+%=<8jYHJV cA AWFyk"S+z6$ )ԹcsYD$aasl+a39K afa09l z?sۯcPXטK^I$r3lͲ56舫Bmnr-C8" 2TJG.lOK 5ڦks杒B(kqCU2Lߘo"% b9l| @b0l'lt(:0k"N[ mךqc$EژuX *\D{3]AɊmdH הYgUs…1? B bw]f 'ώROH >{O>HV@YD YKAqrĬme^W]WL[.OmLqmj5,Fx-0MD~`AdǏ ('' W|FO ]e[Q3l Jcg!qkkR >6סּD>=U:S,Sެr"fv1%}MR(d/賂^VzΆj ԾT eI9YZ 4t+ݕlG `ʏZLqNB\h]L^K'DSBW+b}Z=?]G iye,bƛE"/q);؋RX=Wa\bF5̓xi`UEg\;'_ `+ߞ@ 2`%6Sd3cUu 0kȻ9ܶy F}j >6"282Yc ^ *PdXc*oiB%*,D5Cų.=J).3~Uw<;sP"]P%ץ<ӝNegUt=5Nb5S# k{*_BE}A_mG!4kMqf{1*$YMlqr#ڸG20z61 S<-Ido͌d>;|lh#6,铽`!òvgQ NA7]mյߩOPcı+ĎI~M&Km !nYO!'lrE$m͊s@5?h;TH]Go>,0'H6@0΁,lV Y !{ Bu'GJY,eh|jK*.U١Ъp;~3A~tp?p ~w8!#pQh ,Qw\0n̲" X]NʶƑLm[v`]!ג;8ZyӶ_ҽ囩kg;C |#ZVː֢}\:Nzճ[.,0E^~,Kh}YRW4S}|rč rM/%5}{4n+x*D=/ŝa2$s:sHӎ1G@('21%E{MnnUGdgu;RX&f$eni/F%ҍCi h/4AfE#qSrVDO$Lk>~,k{We2Յ5) g&(7 VCj7$1r/0D/k=zخȒo؇gz@FŮ;8hukT ("q𥮉P" 3VdXn"-6!wa˚RWv./3{Ճ|+ =.~iT2zQ@ Y<1i`_Q^C5zúJ->g5t[TK2%]ىQzÒ5<´]-n8e٨Bnw USEewT3x&p )p-qw«9,b]G>`y(t?jP9eyt@z9*U|J|IDM[s+NGuP[p9Pw7̮Onu~,xkzT*-`ĕ W'7qsIc-?!(&:'DE&51y5]H0 XeM[S:6 7Y,:JfR D^ViT*W$TI̵`8ehR&Q`@jl#LVD{AVE|NfP`e^6Ir Ybw?̋Wzg٫zK pLZtؘ2E{+tc0&Ia~Z[Ç>{/mv_DZe/\Щ;|mdXs,F3q]1GeFe> sЯwՆ4?4LҶ7c6Bl_zlM_ Z x: ~DGB%pM(zcm#.Beeak1cuXУVY^!#L4(3pI ׫? | .׵ODFBX"IGȲzCf)|Yʶ'.ME9?6 _`g5r!4ԍDNǕ61.q >~|-olsbj~0o}B%1 mBv Cwt,$HuڎoeԃFy y̸Sb~F9K[V#SfYL1D[tI~Z^R|B-8@7Diqccn+ʴMA`#JՈ=c@ 5ZTl9 tr׌M)7Ŵ܆$sQ;N'rU(-3# u&~-HQ \nwufX 2$5!ȚhX 9]PԱVbw Y`cS]1 k|Oo_z0i[)) B0y#yU;%N ,h9"%o? )/nevԪh29QE҇IS^n#g );λL5 <Ֆn+s{2L|:GͶ^@&wG3SJEAc&[LY0V?"Ӷo8Sr%M{ J5TƩ´1 }3L&o31ow)$4ț{" Pp4|1Q$hBh"p+"VhMuɟpC}o a~n;g_'23P :-*98=d;~}1bAZK:owMʯQl`gzCEv {F&!jCVVSx$:z}F: ˘Ԕ-)3Ooh}=pu;K~C}Q'߲C\X&aM+Њͧp?mclE٧(J<%Mi$HwW 2%s1vqsфtU4`͖9A{#OZb8pE_/G`enx09@_٥Ygr _$).C&$'jw%3GE1~ZmQXVw@l&tH;]@I' a mS D ;+/תԯj\*B=&y@ #۰ H/ FSJ oMUk4RɽX^Y rԬ)RQ 7G+{_{A/xo]j BbUΌ #,6,EâogU+2P诘|yHȈ#W+[Q||(y5B )46Eяa]岁r *G9MpXI !d }ׅ#/yr5v^^% 6{H{M$s~/RqZY"H:aq7LB{C rۤYDPůOECWI5(V\ U̙R$,> AՈQa'RLFg1t]eLKwM;6?\*Ae̠[xYoEmq3'Uτv෦; cAŌ$~ǭPKH7^WD!c'lb(o9%qV=aŵz%= C|o>P`,+gu ނkz*p+ Ap J hTPoY#coq!+ۻpC[OƝrY;CDzRdʛ`'ţm'T@,x-> \8}*5B飮nEEhL|Pk E1ű Atݍ 99jksrh$W]ZV f"+laOla"M; )hqq׊jiNS{"TEK UICf"P-A6 Q悔:\Yofw}&A֥ٖʐEd[ex!\ n*-sX^$?e- AgZTKg!NDfwE;UZPf6F"T]:ڡR{W#R1"xBq[0pFmiM6jU*BS-ً;a00OΜ;Uxv44Fx-,"@kοՈVP/Fv8#ϻСʲKzۈHD̏.pqrSb}hL.K\Y4sD>/-OpGbTw#R63=ߩ[ۃW$㊎S]7hS֗AI $z:ЦD 쫈uj3K_d sVOY?H+*+x_mDkv[eclq@$Q@On0|C?.TY% \4v!Y8$kR]Jֈ,tcbbrNt?P=!Gb#Dblۅ2}oqS'-ָMBƨTiGn<ԅkt U&0sD~.?d& 8l"A8y(whxci0<^kִ&Ԩ";;A5)_+_/̜"u2/Qqpf.no T87%~ 2a[\FMås9׉?aƵfvG] F n(uhD4Zj7+ 0=#Yr#beQZƎqY _d>/GIM zOr4cx I7T7StSRx5CeL(i>5=B9te6#.db?f}@=ϠBK݇@Oj_! ݃c )leխ}?:*Rei|Lo=Z.f__aX\2_ P>ygحUd/C-R0 1s;gFϙgQH찟- SOzM+_>b3O4FyGc'Ͽx7q&'o_ga&ueí\Uh ᗷhbV7=*1x'AWأƒж;Cӗ?TOkqnje%[2oMr2[OO: `Y_pk%q :EZ5F6Pdǵ!iA 0u5#@5;V @Iw|^~H*l (o7#bӎ|9\nysF#Q'\_7]aOH_BCZ42!h+|TDW~p*N~`r̹7ߕU*S֟*pp,dE!3U9& 1LF-zh" R 84,3UC'Jڌ\GMU{4dHf \A"*2ރ[(;!!F&~sT!.&f'V$)]K(u_Uy\#ֻ W69z[fdD A~Ro$х/P Aȏ6KݢSn#U Xw'Mu陿G]WّQT֑:_7e. (6- *.oE ̩ZKϾ۵Ȗ0y QS ';ac`iumow/qގʮtY@9^!PɋZ"XhK=:M$'8+Y 3&]hpHm'3-KCwe'C\ h3\GxU\< O*DP `W"(ol̊z8vM&0*Oŏ wJ,i% j\bmǾ ôw`-J3Bٽϧz3%ShZG}eO ؾ8291uZy7i* ؖ&i bNUu%.\/V\\匼3^NZh zR(J6=yvۯxb-x)W%ةr7:r)!_찬8FgϠhg%A^Ea5-]P@[>$(;;U =Ezj`2؁M}Hf\ry^F ÖݚL!E&0lC`IaK{!=;$xx7%Ug#厲d$.cw+ :ӧCV5 piے<>ca@ OwF2flE>ABR갔%DLo 7N2ʔZմ4`?E^ل}Y^g~Wq"7%`Ca)8>.NK3}"cM+ےCo9)V: sx-CZy;4IdQ* 5y 4HQfSiV)+ބ2qU|[z '~mNb=lԚ=M,enwUZКm˭!:۸)h%3_vA/e\ڪPO֔[Zy\ENP5z N]Bia+dh j8[ɹHn=v9Է}W+jL1g9v5Gy/1IUn=m}n/J|V%K'S_mg2jy]撎ħ{*Er}خtCFʕwv0 '"٥G|{6ƚJ^b1d( ًeu:iHdMSyO/-Y)RNjg|蓽F|?1p͈ʴO-XD,|i쏢H\(aL6s(I:FH'΃- V U u}ΰ:DK;}i0T37$#yD^Ơ'ڐHI(JzTu'p="n–7 v4 ]5J7+LPC5ΐ2htt ſ܂C4Zȫ?f?yl<(AH?MO6G?*3mJMkj*C9`eQb|9uQ!p铲[)/L6|sW?9 ]`D (wAlፊﭽ6Xn }V[`@!=_X`߅NͶtȚ| UAvhM$ 4vJ\-Rf2Y53l'h~itDxb?v ` Mq_ET6{Y[{©CSjuӤ 4>D9±iT $(ʁ)#nJHwživ[oie?Ā#rpuE0>ĄS$O[ ?Fxb85x#KO8+ lFbQdWs+R7Ov_8<9m٪1ڱ[]ϻ11J# 'yovAsoMaQ9o}`䇹h1CA%𓒨-9Ǭ-1)/?DGc~a#$Ef{U~AfӀW~8}N=$U|{h 0V00@]Ie_-E,:^%bTc=+ۭ*J}V^E*5d%_1w/KtPɆf_(.& *\dV@ wt+9=o-qX㍜Sҳf'Rp[[TULF,BNly tP7 eGVKw%nVF\lɬ F%hB'hTЃ61ۃPtH=U%WÙ-ʞEfZ\Ҋ4+i Tc:#y~ }WelKxCHG #~W޵ض5!oZgt-w@g3j?|,* FIԧ2>:Kzj;ˑS(U ~O{b]5-K]p![k`N`of ۑ_2E{z`O˜ T!7YLG@, ŗ8Оq ֻRr;,s E^KỰA>9!8D4RӶ\B=r~GcUި!}w9+&j|tU)zT+1\rtEQ}հv!tC$'}#*-YᮐLyOS"+ _HiG.W[sH}K]vs$alm&dzF|D{~A]$5LnfvSo*=^N,Dy |^?6nϭ@w3rNĥ{]Fit?`Lr#%'EB{g2?s7gUlCC,@0hW]сN͛d/D*_%% A;(#H.*r?cCLD U^&bI/ lq(~] y`8d epA2 %;<98h,])3l-hm}ܾÈekY 62cj`n|?m0}s:]!CVe< ter_+}p_ ĸq~G`UU/+NN}-f#ε| 詒Ѭ5AM\E$G\ mR7ç!x`!VxC1hO 8nI ?9U[ۣB*]_j~R|AUyWD8>oKDQu֩1\= BGvz6l=ȯL+zD 4uY>Q3矍qŽ #YTMrrvZp0Y)̊-2iO R?_}-2zlka #PBn'qf_#l֢9D}W~7#.Vns1FYok'87V!2 /̄ݜ]oVh)!EQ|(w/Mɦ~?ݢFHk1ǮPjY%cS eAyf)}C=3էZ[?"o]Ua8k$i5iVl~E9lI̐"<9\jᗀ:WnY-POj[X#W TK\L ǽH9(i;ߔ9u ` s$I(' D]baLίяqmAZ7 |^OPOC?ԡZx*"Ff|&0bUTKɗ&k1`cѴ8ۊd!ewҰPYٖC]_B27qʗZ[:*gk{[Ґ\}279x-Nx\j^Z6}2*5}\nBК yD*ZkLa\5vH4 &@ʸb#~#$LiEQ멗?k6ݘGkھDXs`*&.jP9A~^=4_ Ѱ\-T!>R7~`SFָD,zUk7ހ%)YA2͓lJϻҮ4T'3ׯ~4+I%%Pѳac3px/ )YTouYu\ݨve9n*RI^IB-/ 7/G#ᾗ៍1_O^]] 1L2tAFdk"(.rZy?NBѾx V΀C+y-R22O2][NuGnx 6ae#29M!ep7 6,\%t%AjMF&1 G{ Lw.P{+JTiш"d18,O? ,b@6e&"YvwCw)rӫ$rӳH`7N8PL&3 0[j-鳌[LV^U-K@sk}X+c~vV*u fWfM)ǩE7y{wuZ'hl?wqr$Qyҹ\|[_oi@$2hS/N.-|ӿydg&UnnaL H@|EJQn9ďBHk72Π1 υ ͱZidNRvq})zuBv|Bm_UGvHm"t/SRog23cU*~ ."I[KGxSX_>ES =qcOv)ySkfz6ݓ"-)͌ @g5=e'7ެdeqo>fy_.B~ɦAi fq?^@րg,R=~Z? `173j E$Z")ݑol돋me _,k7R$ґZ݊n2i;#fiWBK 30[i|EI \tZ+DG> L.eKd2IÍ?ayCdHf'nXV8lF-my P |0& $Nᔗ)ހPs˘뼦8^ ;pS8^2%yUJoAvx$K#.?E7])J^ Ώt/ Wˈl'F\w,ߙ7C df0|[>Ԛ#6x͛''nw V㽸+ز2r"-~#2G2̍:mXx{_q0><]G/# .4ˍ@c[趌V4&%U1oZ״e_g0yQ|5%rꫵN߇lC3s粫$U>J鬺Sҳqy{-Xa4Ul/rlKAb JX%u NA\ {s0OӌeD||I:쾦|£gzX&fA~/2} \%fu>&F]Kc_x$Q7gU!hU`ׇ>q')13ZJ'ɩD9I,YT[ zѣq!ѯN.`;VX h+tIߟwo q:LG 2G;=pQ>3bh녭у[]_2KR#0^uFl}^:OӫR5^f3oO뿆I|,feѹ7'"w>$dKt3w>t{ XUAT n2P^)8OJB9izmG26oi"|P&e!YNSUxdx1$ bm  wlb_y؞ilFOT#svլW9C x2w]W ORb` :S%T"#aCi4%INE2ϱ&5g,ߩ< _wL:j3&0_ 9v\i5cbYĻ(ڦ&DYW+ht4;x %. C(AUr;>hJEq,jGn)< Vfe*q' or XA qO 9 [h#Dd@'X(|s0au05ݾR4Ay$&e:NST M\Lu7rz P*%NuW'DX́I=cYdž Ql,5Pf ņB@u ~AIJQ pzN1gS3MNN?~8p=Vyf,] ZRxTJT&Ɛ rؐTؾf[ܰ$ p0Vh7h]Wx*F](uX-1i^yp Sq" N,97xt[[lqg~V7ANɕZI%5…ӰU]t7/\ os| Wz"#bmmn i]2,&4UX;ʬ™R\lŶ ;[Xth>ouAL͑1pK$՗T36'j4nIOWp`s 9$qZsS|GF1X/ {p7c:nC9w< 6vFwaC7W5e|'8q` IG/:uM=КL|~0pW.I{)vt[}3/tƥ"_/̧axKͳE$h^)051Nf/(+МXبb%V@g &uh@t?ٝ~dEw u$BA_ϕ圄@f 6h+n.,iz>)ZwO ;U @BK\ w+=قDS u$dF ľi|5TR@'xT2oL3JZ&N $~gSz>OxZ4knSmnulj8c }^[nI&e~\JNAkIsE:Pv m\~Rbh𾓹w6DiQ_Uo{es#E)%g܀ˁFQwb-:ĵX!;^_a?Կ6'`P7SYJG1%8 ysĩ?܌CF#PCMyE5iيp.=0w(Q#Q9Oˬ[ 榦HQܐ.p/u RM&/dN2*K{?I (%<;NkB !1H]ȗ$}lrf2E: ;GqY9QyE2 S) HM.?Q*15Ɣ2!@Dv:Ix*,BNUag Nj &/<&` Z[kV ^b=]ھ:CpuN]b8 ?}.R뎷Vx ?-n&c9YK [4r{=rUd5^\ #p ,߲q{~Oqߖ@F,)@=[7yYf/!Q{{­qfE" -$<;45DFwL AeݺMOZٸ 7wf19%GoeZ4E785'98v:jņ~+ӽ٫H> {֔'ZQM Э׬")<\g5Y}Zn#BjX6+s<ԕ[4M4sw0(΍il XV"/wrV]M)vӘpdqsFK8t=>3DHqNc# K@m%x+ܫqr  VvT@){ִV`J>H%Rኜ|MÚ ߍCmrVZRg[WtºZ2^D>f[)I'sDT*l~-`aėv e@ŻAPYKK񠏼˝t,; >\`hL'cGwd6U?ȉB zbf._e%4]V1Oz54XjI}>3_,b]ڠ4-oD5$\IX7_;t\.;7W1Y @?oT:$hͱ GȐ>)M|r/W"W; yy~ey#6^kX d 6p8 i8y&daOԶhn[#0rrvY_57duO˜')y< zMd;Zp3pQ'/+50#{=ɓ /0e;SjP 0@';.ՔHZxfz"[xdp|*m Q9~tX\%67\!QlNzˌM^h> 6p ,ɗu7 ܾenuJDb|?S>`QΒ@2]kG+Dݙ-EKd)B:Gdo yrC_oCLqrtU[BT"ڕp6r9vHakxI~WLc)OGPGd4衻\-8/!4L_&WVZ9Z&Dj "ò3 o|OQ36?A-8fSU&Y0)$Q/GO){RJ%Y FG:\lhTW#cfʍ/1Ê7H?` n/!S/mJS\ `טf|=LlB= t]B- ,)vlkMUǗ~ eM?CR4 6,O1\w'Y@r|#]kN}]߆yq綁8jI)Ւ5ISZ0}TtЍJǕ-F Vz?} SעHE b4N$b/yPeB-v08L5t`ȮLheWct+s,iPJ[j-tKyre`PLόdg{,1 =)d;<|U֧o& %Kgޡ@K 4 t[P >D˃e/o@Ztػ/ ɱEf` HbrlW'Nx*m⢷zv#:m)U4HJEX07+u!ψd ߺFwfd8[b/wy@ ʍqŠtA`HTld(8 ki!ot?r 4?զe>ԾbT=R:7L\4 0]1=c?XQYէD<@&"sY^ޅ;{|^LVx@Cuq{G Ef|B2U/( 5!h]!y*]1Cb䬧&Vb-fzd2.N`PAsl1O[GG-z LD\l {.[E"LW a@K.q@Ã6t:w)n[o\ZFXɩPn( JcB:Êǯh}jhn8=2WHYZa"Cn0EؗtuW Zqex&Q|2db.ahXm}턘x\=jEԥ8.#V|0(G!ʆkKEKǎ9+nVm, 愯fu%y|veW^4ڬԁ<*ǵ @OKx9Yۨy㰉`]t5]{qa𬞾C;f*$ |v`/M=gA3NvnbK=xυ'rݍi“d7caaKy\=U++DmALv86Koɖ0Ť\!5E~繨[*avHWbE:\q0`v'龍Š{a{w쬜O6XXX_@lzydM-3JRM,@?۵ !~X΁Sos Þ+ g}'! uurm]B/>  {2_&]{KtoџH[^ZTBb 2,I14Kѻ<섏cr^"B&V|Nf)[ z~5}3#٭@;ėDʇ9+p9/>k eag-ܝ$$59x ?~]x ˤo.pS/kIڟuYsx=]w޸a7֧P|_*2FQt#(Hי XA,n.Az+SX 9 yC4S  \ℷ&>VC@,|վ ig}Ū3nXs7rZZnsetu`5V$\RcejdJ ^(U*UFlyvC_R+-tUs:m4YMJ=[M۝q캁|ET(wuAr:_>I%#va缜=LsF9lQ^O>|P![ KiJk ]Sb%g;@vAQvC`7Aٺ;RE02XAP:;+}l| 7:<Nub6rY^n͉[azřct;o=g(O܋ [!4!nA27-xYG6fViO^Nz"|u ݢ9Bx-PyGn;KRz$XY|18/v>e% (׫cK:' eɃНK܏dj&(9 +(>{7iBUPߛccЬ lO?aqNϮ W\+ZgVf09_P;JAYG! ]z8Z+ohJ 8oQU_zz'ʹW ˭ELUJ`UQ % jU?*̩pQuD[ˌq:1An_x^LS3JVxKǺűe۫*a#wC3+f)k&~X.#+n˶/|I0mԖYG8ΉZi'1۳/]bY)en,*(ÃC4$nQ6:P \y٢g!)xQUo\5WUX?9H~*:ViAcv%E3)C9bMgW]*K pZC Kg_s9*==NUT:r{í xŜ 5f.yLlW)HIE@2ay-ڎ8il !MK)V})@ (fê3G=i>?sJ9[.oEi$Z¿dmiB;VKW%PRDŗȴŹ5k8^ʏ$H(ՑithU"X4v#i&jۇ_34J^wq!7~j6ΡT$z% 1*<_ H ڥ#(%veRBY:sA6^M[?Zjhf}?(SN23fYi!˿}vʼn/0C4 e iLo׸ଵG U#,J7zV]7 U'xGmG&=[CX-2S?+h:!5np2.{M{MmR,Pg,!>S.`%i/[Sd35#y1f{أ>MJUXz+D ج2PuX5qUDc VJ\X2ˠ_% X)S-1*R)k}fs x3䶹mNZ䮞EƝAJ"뵒7#oZxÕPvD mkT };Mrag`Or2 ɋX4%tNcjE!,cSgl?oz$6 Ꝅ.@If.P?~M> }p K6>PѨAY36-}?a+! %V8AT,-!ɮ)= 3nte-@"LQ]Lq @ţSE5='хf=E |3J f.A0# x6b5:x2narDn,Prk7`M; 3RBo4h}^F-\3c_ P^圸kӭa Rz+9yuY-*8\Tb{6vSs8;aB':n8 }ヲm:{>Og(wpDFy/e/A\&*\Xx\O"+~dG_FqZh|DZ3PW˲Ҵ0(dLpqlY&%?Vqg_=|ń߰POWԕVVgV+0U-Mk?dVM O0o "D:φ{>xkd+GJN$V"7$P3UGoXk&½>DODhSrҮltB%ݣtE ".;`Iw}[7:c"27fB8>YVq71{@7Z6MgJ;zѓf/'6L1Qm"RrFvB%ѭco28Z\W^E!Ofelr;ۼɌCv=),hurp@imңwkEC؇ )ya `,~"BH1b,Hn5АL{X.,cl(\hFN5$PUeָ{zӜD%?ъz@H: 'R]<ڙc3D﷿s&kj)cIB34ɖSi'("ˍ bͯ/(|/Uu)#AaэVQѹ=@q 4jFfEt]rcv  YΚ|GGkn! &S|luxII6C2lREsO%[{q3 KY| WR/-U[^K!:m " /ܽ>0#,0^H-c, l hia"1kz oK2rN "DPS:91q%1Jv wf]8:~PyN[EHd6|jjA⫾@&l5'$ pHgA~n`qDsSi&s̝6Er4bŶ[AgLf.~%RWc loCu?{;Tu12Tnv\J;;\a}`7C,tS 45`Vw=)F8.7Vw+5}KY4j;KY`HbK9z7(YQKa"f M_f1UL~?a82N լVRm31ʨMw&s.UIhRuޓEsQNO\FvTaj7ǡWs;~H7xGAIɳG`" N0\ m*s Yb2I I‘ 5'1-l'PX;464?j q +hc@O:;6O o>[{6 ^K̴pvW xO>תo<@} sC){:Ax'tsvuNIv3*oB/}2dńk-$%PJ03Jd漾a:iBW/d7FZ/ʀLi)2y}S8uꉐ9#%2kȻϤ0D/Dћ9Qj<ͦ^vPn :2po\d VDˋR4Q+҅&1./|<ڝ7j<5ZWh#Zc=_i%`̻fx Xo4Nt-v;G}6ubqG:dϥU}ĘBUAp&\JHm\q]5Hb#fyi7;qוlR肐u(5N## m%F SČ\ cj(wԗ҃бX9}$[2:GΟn^; 7U7;P4G`ftfmU9WIPNN|ʴ2gb?V(q F"ĄWjӟ/&jxo<[tX_܏^!/!8*a'$Ҹ'2cKrVf WKi1w7dmsnfKo[*?4}H*O8|EqnI9qo`?ʤ*=d诩aTb rڌ ܞC7G p5"8t:(nm)HW\6E"Zg50Ne"?. e)H:mkv R,eL`5̽lnO *..컓OXq`A~N(Us#.'C0͌730гoG!;g|h_R~h'i6>u-i+W.3pZo#n Er sƇU}҈?P_|ڶ9T1ǽ b~):J;A7ueH]"?xETÂ7Biʩq%˻JR\)=) ݲgsrF6-CD 6rUٝB,Ce4r`I,O|5C2F}xO (HZ.Q(3gHϽl_V/6ѫ0foAA,)巴#zأKp2j祑nLCǔ&e_fIWee>s8ܳQ`&D`u\"-ߌN` !G<)cysK[]H@V%UMfސۂ@ XUM[loiFmw&y]rпJF M3Zb8BF%N1 5 FnME/d3s9۬@nۧs*RC( U]3bj51 D?jyI35z&4.+$ܞsӟj߭IuDQ3҄QͤA][l8Pε2D[鯆ݹfL3YnS*4q)c 1% "aMev+ 5.Ug{m:񏫄"^.0t[ۜ,+c `|+#=ܡju+hI"Z?P/K oރ h2R;x5nPO}pY xF1)K,_1rE ]9xCS3 @<d0(,^mSddBo͓_WrI |9l'JSkO`D)o@Jqk|Q. *_d%!F}0LSPMK  ;/@I]w댅>0&K׮Ilܪj*pF3M}ЋPE'$2 R޷2F;~'¡Q!Hd oǁ?X¯GnFSM'֋tw"A]]0H}wXI(FFU˻J̬wήA DgM Xw"\3(R3Ⱦo(ȧAT\<|&f1#c "՛  hTEY33!;uiD{kJ2X $aԇ|!(^iz1ҩMiỺA⊡Wy/ NICEiĐ8]jvUF1Y bS#$D8QtypW5E'ª"sYџDy? sByȭR3Fʿ{l_qdnϩj2oڜIR+f  Kaˍ?G͌]HZudzUV h*E';U~O+e76ڄLb xB#$K+QoϜ,a)29]hi5z\6?fH2cs;VErT}! 7 4u.}/ ]kᕚ 5Ϋ`+N…dw{ qg{ޗ U(맑؂,"U&-1·r8f |k|juO\Om^|t',/5'<›1`Nm[LNE*I]KK/b0֩M>~! ͪJ Y%Y$y߰LwQG):ZSg^ ?_?R\_9 b[n9mO`nSdIiǁ1$$_̫wsKG;zE_K7Nۍ9KhQe-uVaY AvVaI!wMew˭~%>Us=U{2X8Qp/erڞz0k} F7~%099]goL67Bto0xEjA;bN*6DIP@!>(0@IhSIƞqk``܆^F }`:Ugs<~Qx;+b5_uKfartùd `(j\œ!@q*tƭ"iu_"PQߨJH[I݇l.ęv̋mrzh(G> BEIȆ@ΠGCFp;f\YFƠ F&KO+5/ ;N/}~:CB^I[162yImƂz_,T9:7Oڱ3[Vp~yYT]n޾_LjG9l\BOr^Q#)ȴFEĔ }M=@q92=D=T~~P {e)q&c' QD 06\8]I*@YDPKg0fFKu7nLW OcQk"$#n`V&n ATZvxV XOrz tKk^#Hv "dAxuDUm=)ssp~唩ܳk\X^]B'Tvaѩ=>o%AA+ 뮩 8[Dz[*>H݇H4,`+KWM9oϡ*"pL>QQ8@r6xSIJZO?:A*ѝ"D!qixi l{5_tfZA E)E5E{)+.ebI] ^794<fU==w縫V6!O&ody?:7{^JM|pRzHY=gT50^NNAaS1f\>^; Jo%{2I@b]t'w.=Zʜ.6spQrT3@&b(o<(v4S-Y$6n3&几9+4? uGo\gê?($z'8i荳q+]p(]8nLZ<#sxZ@>3C^u\ L juEimi7SK/Ězn{&u3\X4*TKp6#?}JT:02hа80uk^|v:6oķLUEBdz)_)&U"*">E.EbKI- mENQaP(O>8XN#oן&{'{uЩIYU}{`qp<̑^/Z _"rIUh8~{))}{9=$ݩ;KA O9 mqp1>81 %]jc"sNjJ"4c1'ֈ<)0WܥNo19-ۊe-dJ2K ط%u)ȉhM21եѭ|rs|bQˉ$S& nKB̞P_/ybϋZ4\q#)tc:ˣ} zk9/˴rh}C x^~rsHPJ9Hm3,+ҝѺb|Cj^==i_Ů 8b<%|9炷Z7/@|a|m9%ř;GH,2R1b.hBfXyDNP0e0 $1z5|z+c8 +>g(cw0d"SF-lwJDN~jP&N,!D"0jhڻs(YT2@Th6v @n<~NdLό :߄_X,U!rLK',a"@+#+iM*Zʒ$r)xϋcl8"}\ˆ7ټq1bcy[.$[|K*J.tډuG1Ayۯ\͗&h-DxM!@v# EVW5b'=dYPz|1k(ÊE߽nkNYK.~LBݞ$sds)8*tU8}ش_#vj`@ ]w.\vЙ4^ҿ{Mh0#.Qm VkpcZ KE1 lع & 3t1h4:(nID|޹0\VJ`wq=1'Fr7cUd?tQ2/tЇIr[z?w0$eT˾rr .X9ps0X j[85Fj3 (P!:<H Wtѻ /׬?vb\ D=8&gY)IS2F lmR04qǞ(cU؊Xa~j38ps\̮[D9?3Vo|ѬbLYP?ZT.y.M{V`.:7:[Yɦ0u[!vx}e t۱8{O:T9̍{ui_n=S[!N.#fq@lev#~%a.#M^A;35sl-%1l2o$t,D +LM;xD0Ca&cHAʱ~z;/cQK "ϞT%`I->|{ .Q]{r<f1͔ )a}*m]V7;@w<=Zu6J,D s:F|sDMQW0jFPx.( QHؘ1l];4F w Fy`]ԕ"6ٿB%2> 脼J',`~ԃ0W H& )XOͼ-jn5qBr"نe z(ߩ@o(t[Bۯ@rEl`^$ ReGRWZN~jǕ:H_]W[k ZpG29^~D7똔ᜎ:8*[\79sw(p)(+Ywڼ՞߽#FӼ) yD0遍~QL< DZj.PGF\SSMxtvwOm{92ZL\4LSܤ!"U jhі)Np%+~Z}_Dz{Ew@=O(蚟ȽBUu4;4{l36 y HE$vq=Ŵ AtuuٶR $O"$$IC*\!\#jL}<W 1 zEAKE .NYT]hVMG&&TXp4㙺G#Tnn/{+Uڐ GsU )8Sn0H_/al5Z9@d^iKT+.zB ?00v/EpR';ϩQdv%i _K@aH H-IfE?7IƭC&A.Ⱦia' ڦ-r̪u+qynt^b̹^D]VIX_] th0A86n{x)jgඛWp$(,]ә y6dHVҩc\+ _;W!K.*R |~qͻ#Q6U|CAnò_$}K"n50-o"KBUyC=:PqwEI(俬^aO0{Q_lvaW:Km @f⧁'3攑~&6hΔ`3x0er(0 6{6:vi<"#q1NF BTm u X )b4{^^<;lOv.g-?%tx~NE1Kެ';0+hw_]?3:cm `J)H!ۋ˻@D+@=|%%}BY:R5B]VE#n:yYJˋ(笵g-23QJCꬓ u;SD5~YcB5>p~#@*FOܴAwwkɆ|'g HgSÞwι OZ5Yv$dɅjܽ#EX\xfY,ȉ)aEXXv-pL-\Ra1\Li#{X&/ 2L~y Y{%R8ۂ>{ѨmG V7vI0{.X Z ae65\q .z=T$/ u糱1lnZ'5VzZ 퓁=ӐRshAq>}v."Y]mܫBdd^Oލ%ەc2Cx3M/K}f2gQ>jpP!}tcSb>:Hר-*g, BWukYz7%jHKi`q5ϗ$a48r0\ΆԓoTk&ǯHj" gQ_zIFwwpT#>S-EoϒFJ3f ,zD݄o3s҅EOm?V)@"-d&c@R;ۡJ.RW)a6a5! (JS-jsL5{ Xyvaw/.u4DQÌ!K@٦_t7uaOfs$9A t#4Έ)NDLt I}Q/d+s4~ Տ v铚&M"\T}Rx\{=[ίQlqq3] Ÿĸ*wP'JHZDZ4T_Q6_eLjY=pe9݃cVȓ8t,bGlOƜӃgOiDqn&4QA; Z9j;0L@\Z@͐YKo-NP2 sCκOt ,Esd΢c=M$Gd!ang=˛Pݒ+m|PRBX!uժc(qұaI%5aa,NNtOlϩȘCW+8"Dj$0h~7sHM@2x@ǘZCv:1˩vBwvTzT-J4RZv16 191b+ŀnZdj8%X0%ouiTB)3C\©x sy7Y:u;g9}ώ;H"^}_9#mUͅz}dMGr:vK W >|ޫm!ȷO3M-,/X}Y\vv4ƽo#WE^y[6X)jWc`ɕXSr 2*;R\o(E+z@磚|'SBM9#7}A4 Lx=@ w2/UaO4t F[C3gPSlsՉ`#.)5 0_mهZ[K ?l=z>lǨ6u/܋, ';kiKQ5mu"gN+Rcy eHJeM13)qYY齧BSrQiWQ)GxyYj2@&4cXw;ߌyr\^áX$}o{e5+K{ӃT}QBHVs^]Z#f% g^or5KHH{ml5WRa'jryKxZ߲mg&&GMsY5oqߩ7aN`Xj`F|'oΦ$\RFhNm߿5ߏ!Y]fGM=Fp p &ٓr@aצiEWz^.vg"3 @ Zr&9;"AmxG<~F=9I:8M_w’f{9xf?\H(t.1P&㩂"pҸJ9ߋ[oH2{=7ڂR:yliK2SY$b-Ƃ3<%m3=!tnxr vA&2&}\I8ʅO7f` Ah~l:g"va;*:Y߽ DMC!-t,ctm=.L~S a`xaE1{f KS O8oB7/I>@pp*%3ImNom 6m[n;wl>zcn7g5e>~%8_%/EF0\}3&rk!# ? VƭheA"9ן\53 q)xu _=v4 ^iUS- >[nFSH$'tdaL}QcS.ZRbDE =R}z(:{-F# +/N.T6>P%8wp<-2t!,;5{ѣ寍0i4?%YqEL{1aؓ8{Ui^e,K`RH(3+ks4ݜ'TOƭYN^+8;Rͻ^a_hUA5˒\aJOJTl|Yt@QwdO3)3G;Kntd@e~XC91ls^@ h K>jy1ֻS-b^[jAD\'( f܈z i+ƽyPi Lt9Ϫ[,  7x yCz96*w,OR0@Y Qz.+oH\V@׻O0"elGʶ9dI 6H:hpKd2غD-YB5 9/Mk\g㼉O18@X"qfT\ 8E ?u1\vR^\-sjķ=W^~*<,[>炝hsoS#SFPJ9uS@̒I% TEK 3"KDj1)S s.7%5B Qu7Jn Cc_.6TQ' o/.rc42Yۂ]pNH'οa 6.vCmԬ x GգWytOp6{j)w;-k,6ŕTN7:%l~\7O~*舡(daZشW)l@`rݢaTZבUjʌJI.v'w}ai&J!OAc\^`\-YW,*3^{p3SRVR|v.d)#}M~-Q YpN(@j/k@MĹv-$P?X@RƜ\tbhr9*_gS,fo.GP|rIM D7OCrP{ J4r2+vϯ v}fD-^@zst)+wֈ:TwXEzNorscr d凜?zEtȲ)X\1@\B-2R/6u+4@Lyw`n,8"tfvgx!l>C @΅ 9SC2 "U:OP9)66!)x€p:ƭ$hOk Tt&p j휴,wD0yI,!RZCXp2Ŝ{SLި<4dNԗW<Ǥ%YAj-Ws\1 K/%.U5"͝~RߣZ y>@rCu抑>UEǰO2٤Fɻ*7gp)stЉx1zIs$ӧ0i>H{a`x`e8o %QeƬiW9ymZȑY;I~`|Rsp AOZz3ݩZr5w *b8y͑q˯+eJeZ&,˷%#uO^{(JZ_Ůz`p4&9/f7:$,,ނy}iNV*o`Fe)en߸Y EupfW&OUA+N8joxt@|]#k9! BSLԷ5 <쫍D~sU&2ʴt&pײtA;^*Rl&O%\V8fXal`K fpd >j/଺z'^Z̸gIt!V|8?田qvݶE:ZÁڴxJ,'<',{'9 qݫ8^o#Jd bha,s$Ks xpd9I5ei͐B ]6 !#.fIm*ovE93/WٍigLK m'pv>Ls1GNȞN [;'P^ky#UU~M Ċ;#Mx0רhb=Z}d ?fXF+#锛}tIYd\"1ޚm[U9|%:'{nae {+\)%il*(Tks+F4 $][k3!Qd)E☏E7=Z9~7QE! )3j#D L!yWvwm79?&RK;ߨz=tƥ6:K|(̹:#!bAj.%Qp0#äaĞ346ܦŜG>ҁT$i2Ģ:=? @B^c!j5{-UW~,;GsƓFţsp5wh;OsKfo&'*HWKySlִ׽QjI3$mDZe%I5f*}Ff2dfO; 0^ +gθU1iK 6nCxCx[0{xAlT\YϠ>@Sn:vހ~Ltۚ-籓IHH|g4@^i,ri pFcp# 'E0 4I,1KW>QFhϓ dҌ18!3_BCiqT)+Μ$;x:ulI@7_~4ad{Uy @mp2 0ldg%O-DQvr|nl-ə-L kB˔O`D 0-zZo]X ?of<W} sV̠xց +D +َ&LfX z?]W.#s2>!P?\x}GJ\3;ؘmPC*!U/o:.CI=~Q4V/OYy/^MmX siثyʇt _gD`G{2j >B'kzvbn%S&,s:~yRjn 9%'F1im?J8PAӘ>T4\J-zǬ !(T+ ;.cuJ~nHj{GOEHr9Pԅ9c0V>?P5t"A3 @͚l3ߍ0#DG]\Y9f&wJX|le-Uҟ4#I-l%x$p:.:_bߌ5qVpe.eu 벃0f GJ]+᠜Xh\E(Pz*Nōay6oO,[UxmiǝWB֤V~"Ľ(&Y愤} ;hd4vdml$3Tp!%cqrۿtL/)Jgh,7 R-Sy/{͠QO>U:"x U)Ƀ<`-ȡ'!4.Cġi1gok즵?`_h6TMsI1ܐiEw9Wuo- L&AS)O{e"'"T՗Ω;VD!ׇ;ϥx=_LxW?I%%L)iaҭu,jTuUHL+l[>hρ O1PR$\;yB^-gW=#M/u1gYp tYv zElU7'FX`A (\)[`лpXcXy^Ieid")g.XFfm9?/+!w{YsW;_.ueaL>djd̠v - тKUűSV?9#l!3^>u\Pȧ4*~Bץ'¶syR5UXN s?%Zh¶+vl`m,KVj)fW)teiN;7Y-R3 ʡCH֔| prL7QzmE(V Cä@ x8iop1)d=dOlyg=1Go6kS΁ɺGC'l#qOhWµɓ vYf:&ҤEg3/\k6.T*҄~:sy zte+*CR}qﲄ8XN4 X{]r WIVWN%vД-}6궭#yI$4e5kgSi,n#ڮö>̪PaA;Ɍ)ΊԒ:~V+3 8kSJFYbs:+CEZYb҂z r&ka݂}sQΤIRV'0 5zݘiso ʂYfO^j(#snY4_C`ӟwRl)ǎsWoH+.P,Eqs Dj`N:Cmxrc|2g0W"—L̎/奮@%;O'\њܤ Z9֋t*_Ȃrj-׀ lU [,6rn:p!I cft `ShǗ>H*r BPYudA9jq$n$z &0>|P""?sg_ˎ$O7hg +xrW]] . i* K~Z4Ğ2:|QP5Xj۔#|("܈Y:whUލ֊cLw%> D`k5Ljv[f]_%@''\pv5X#tr/E}Z7v.sz^'|Qz/~I+ my ʫtR `PvviDy[ΓgZ b _. ds zv;u|q %Nv0򺚜ѺS/YJ 8.E=֒C*<2<-_ɍW.psajz3m[hS8)ƑţyZ*]qouֳOkVxF ]X\ΠK8Pҗ f).d[[o@}j"t\:b:!O4]jF4on./#t0 /uJ sD gq~s,;%>@݊K)G2/xy2E ߘB' r/WpӭtVR+:vw2ʬ7],4W|TRŇQ?ɚ4x$Ê=y6,9"Fv @%*Q4O9#AvNb? يSpG)6::ulE6傿zqϹ"9`f״'%!ftgƦ+)*eް%TBXOIE_wBjPT"2Dڀ#!2RXbO+Y5G13zTWkIP󆨶_gbWnQU'p: ϴwϤ+=I]ǁ:խJ(#=L%)6I!SOk~˜yl&'LZgw_ެ4샢Ľ0vIXѱAI~iE4m\B=5%|?Ɣ]՝rR _6*0*)ͷ23ic(@wpFjr.SEh㇠նxʦ;ae-*j@:ͧb KRA=BTia1x"C=j$l˼yE\6yza5]tE]O5[:oTa7|{BsICC)7We(z.jp_Uvk\qyø訩/'hGh=iei*ߥh1*K̝-f Xӧf6ݐOoF쫭 fUW mHN5i 'WM@!&{(liG:plc#sA*t33H,D#2Z*n_r#KD[Kn>^ie~o2mqXAlF^-CMlˠWp#`f,ҥ>5OoIyEؙ|O1:h.[Aw|_6j#c}b S`E}\kn)nEjt38`tm#O5#;NX؆;YYI~V0mJ#`ʃ>"Jzǝ^IɁ& [;@ ;~P-H3 V i]=; k(f7AQDw#3.}4샥D7[b C|ڻL},<ُ :+~i ;K >b+ߤju h c̅`bX9m +OeSopp݃~%vHMofy?/l3o Aaİߛ#QNYK ϯG&ѵf`m?k~Gcf!-2 &_HԺMj9M*b6OcKQODt7#NTQhݵ(ҋJe](qy*͛02*}'3߅q}r`jp'W+6y"P"t.>}J=+P}4[e5s: 2#)k꠵K9:knN1!dYGl0xʯw2sr z7vT ٳ\"tef̗Zrm0сaGsQaQWlg1Sdk%=lidm? n%h&(bW 4L>R%2CG<';B`i9|DJ:EkS?ߩAonX<3g-[H?4l):f)|M<`k5ho,E哖JbEqgD1)_I ɺw :93+xuR* }&F!P;C=H2(t'fY'P_Ƚ/yqqb[R;sA32t@|u vgxZ}6F@be bj}u;~JC~ۢzh!_d}@|"Hxm]j8WӾzy$!rRHE"tF30.-#tld6o\ٜ8-κZTQ{+VH=Zf_S!Ld::$ѳɟƍ?uwԝRC}4E)%M}.,oB5B_DEB+9]5c]PUzo~f`3,yQחM%m,^VcRKEFƄi(Q#W[fVhܪB!|!xu#=fŌ,ELhLYY}Fo6o1dJ]![NBQP@f/Io#_3H1/iq"YFfdx VU隯ZaN=i30 ]4ZлYm!֎,KI;l^а )rl)i^7q/Sh`K3)s{i^NeGEPF˕ҟ/Aa)>ߦ `evD_"IpmY|p.շá~ 6Z4%>~bAgҜȧ`KG LcV}}ʐ>Ö+}0CrU(_iۃ-mC}i)x ބɉ[+\e gė 0 k놚JLl8HT"im;A85愯$B=/{֦1:"Lfg"۰k;lw~^M2PޑG? @2+L(=kLœJk+ Jshsw𧟗Z`}cӌENO01}ƫqdSjBjpӨhdևN){v&Mx!']LSN.%4$BW"u{qz O ntlyj1vZ24?XV.X{? N.CbRi̔{>x2I':bHL(W[kS\D68\KkﰀOiLY\I Plz?,Is}gyZK_YU:\e|ڥG@u}$P!U:V` BH}{11@2yѻɐ8K0fYޜmKV~yG-&C\ '޴ &3"})J##,"GB[B&NHBR,~{3eO$x+?_ L_58fovu[wcGR}d&Xa?o̓pNw[LJR'O"{:!LJ:c1x+me%f7;Wzjaĕf3B$+wE$eJ'@Z)moPY꥓?lz6ΘW:I%T` DؽaCf̨SR-tPڽ(y+'͜%m,j QmND2&lY?8b;dIA3-8!!d7lj$?" bU.u2t(%uG9+]@J/-ZBz\m0[ l{Oz`+r!{7q}5Z:nXoi*E-[wZc>%>u?7%l:y3K;9BNCqV d_$)C 4:\ TܪJN*cq8sl@I7Rlq ,ܬFu4▬Uu ݄%=ewgckSJa^f i 8 Au~{~exMK儊5~u#S%&>Qn Z;/&K`)=?0DZK 1*4~ W,'=0>ApG ʗWYGFOSv(0H.K{ֽ\ "4a9.]DTթK#5 ݽՒTۂ9LGͣE@![赍xrRS`/.& YW U2N8iu %J)Sήm=F.#u^cQEGz\2CE T!#[#B{@36S4K/?߯nX R(zJF\Jk?̎c&&83o#ɾ] "NQa^B% Qq6hT)xݧCP͔H4.0K>S+GvTϣTvuGW9>/I# %^6!bu[(2w#E@o2yӮVF_𦄣KF5k Kx6 ~69 FDè` '6]ո Q^mmr{!`n;?2qsl6sCr O.fu>z9@C30˼> qߔBTqSTEwG!Wd2jU A>ˍGM\f 荠U$<̷@8HyӨv a\6CkNhe u֣_wV(ܾdqp^iF"'_} HY Б n DTAB NY.< te[b!6"CgHBb5RL~$鷸䳔0Ƹf~:Y[Q5|q[c=;9u '_uN걝aV3C5!lMO8S;4q^ +x A2l:!J:å]mcMݿsML3.8̹Jϭ!z6.#\Z!=1!S]j e >vKō.EBdvV TߤZ<"r.H2kϢ|Ls)P`hG+-sU֘j3ujogޫ'5)_Xzo8יvjOWaQO|mpQ 7z ++j3J(+4^ Um,~2t;۸+?nnv ("q[&QS.F#2a7^}ҚG-*ֆ3DDc=_ E|$$I!NbI NY1E7z>-`<BnZn{66̬9؝%8.3 - p2yXcܛmg#UTn |w"06" X\ \/U.FX< d-1oi(a^ n:qİ!mg*ve}?= |P|QwS)ojj"  6y-v^rV'B?_}ny"!$n٢t'>[v @`zu~" =q*;לO j+ m;r.z\:#LKsT<|Y@qtTWr͖&bOǕbL_3US+GLvphvau 2\cfEcDhmRi7xmxUaW@ld(3譱, v@@@w=)zЕVmLEs84E6ɡfS uAYuԹ+?++8< *..mf"Ƨp8 ns05>0iMs6HD E?lc8ODGaI.#å㾱x{ ^. t5`TBur\XXfvbFE٣~ywJZnbinIܟC ^$:}T׵͏)z:fQ 6.S j:Ġ{xЇxvuBt(z4+xd^\f3,0\ $SG#t>tp+<ynk q롏O= tůk +=[=oa$U7RUvKsF0_:v@|6@~慊&+ъkg=:6:/uݐyAKهV7f^3{*AwkQre \B9 JY$Il(W!kb$7]T.dUSڊ_MQW9m,Ж%"DcscKB&HX4 Wz+DuJJѩ#}xCP;ڣ1#9޾xpt#=1A^nxD.y%pxIx7'tզ]>İZe+9A*Ƥ# WwIw)&V mĤƛj^~mT8щNqPu5-S&U-^~ 6Kh;*#\}qć)]ȼCF# }~֦[%-P o[*1vT<^K^J(rb dޡkȔ{wN.r.d<=%I2y O5ɚ&t%:0+zԬ r.zD/{6Zl!PdTa = _~Kr9Z5ux4uI>k%Y"f.IV挘iLâ}kcd0(0ᩄŢqڤ >3whIl#2pqL|I]CWm<7 Jc|Y{idi  >}e8``Gl!L@-txdM. +JME<?s;(5"6OLU/Vڢ6O L: n6m`̗C@m5^,GPs a ۀ@f ( e~d˲i%*Wqu@'Tao)fc<4'ZUĘvg ?7P&֙9fCH"Ğnm7=+gBx7Bٌ=ܙDgla/`ߟƔSP.'ew'YoKy,zPDF`Nf+Xt<" >pFdDt' E}p{ l[ǡܨ "zJyT鄬P#?G!q:SZ 吚ny>?yA hǡMQ˭zҿX.AA{¤lH hwƕ'd vYmKev3G#8z,V3C n ~(!;p (rb9硃ŷ PK܁9N#s>svaJh[jW5?NU >_,uFX; !YˁU`~DpHjpcޠy$wNn jtbq)4Q/ ֺYZ@t/@F EV,41ފ JXD&GD",nO:8vY|$s~RbMiģpލN"$>I   BHyݳWDQ抸4D&$Gx9&f5ԏM s&D/ktƏE5aNJ3 Vb#-zSH@d xZțqzBMz*91::?%hps?2@ZR|&JNrS@DfE>;IJrti?־;OD0!3}^Ήh?Ҩi FkRt.,fjp8~ia02$ʜ%wtlͮ9ju FV3)[wM/՞" 4tp(U r & _CHmmAAWF ى0=K~p4࿝!²Y&T(.dS( d4f@4緼6[3ddgNYxJ rxm-F㊣|=ʫVE)LS%0tb`iɬz 'k  P%w:;7ǑW)B>JxKs ) ,O~4c]O@, C@v 5_mP|ܴ'Fz2UDՂCN.ߐ5'"@}:Vx(y0<.BX 4jÓءހX% :[Ь7߹0,rc"־8qrMdTG]Ff9B2P{z \Kyӓnԩ/My͹vu"I?YfXf?ԃ)#b>O1'Kk"^n2+G:SJ*c_9%(˺)(8~տ4!jx{ gW;$`Emf ќ&O|&05˽H$$IfW/5[xh2[Y0liQC=rvX``e1a%\ <wY\9]%ѫƛ-vF8xg i{Iʦwn_~l:bǂe62HzlGAaG.5&KHPÙ^ɼ>S n\407vRKHqZwR.:&I0Y\ Ov@fYW"s8+4̓gX& ;pz߫%+ݴ NIFv  }"LxLM <ΉZ QŮbY0 ]oԞ8z--5y*D[^x^[`d٣QV## 6X ʢcL^Tם84Tl: AvF(]Y=Y#\RAL.19g4Ӄ jb$rpĻl_k17E eQ^>6vZ'^)y.r/dFFDL4Q 29!0O Ӯ\!.Јl̿NB͛IF-<"r q̄1:=l4-dP6y;‘09%'+ agOfiWv#uE?!Σd ӷw(ld) 󲻛ʅB5먒BCz߬zh⇍ /we:EQMsq 2|W m my-a?j+8)7//Z)Κ)>q Xlֈk݆C8±LP[~ߥLܿ %%[J)S~H{ `rm+Iߦ =w<"4B^~'D|k_^aȩ7\o"&;9G-#X=[Q|ұTѺw$xQ ~\ԑ|n!*sjR8k@2b;kհO;,A Ψ?<} GK'Fȹr'H_Փ(*pn0?PB(Jj$jmWRbq+_XKI]^͙|m_[oz,y=:lEjX}k]VqqRB\99o5+ݢ\x7g(sCrQ(WI2ި iD`/]ĵ@TRŭwa-㣿mI3Dr?y5n}wu<-sx$ Z)CO6&gyL]HUa-O >] 4ݚ|ֲjiACƞQx4_݆sY=ӢR{rD gYB8Ҫ0? z{#a4ﲍLlk$Y${bTFڴݤ Wea9z͛\[auCvz8|dqoZ*=[%5BkgDtzH>%B*F7(J@{L#$ phฐýrBq֮CFxH9T 3qQʤ<id{KrB sAjC9n5$A@Ou"E,ױX;j]IBnfNɼC|xi-Z1G$`8{vI"J[Qs!METb &PIEM4۸9w=ŊnV CF~&%ƑNDJhd%@ H^]XPI_'̆YT8WNnǰmD:.λb1PP=<.J]!\o5tf?%)6tWxe"jQ>NQrS`U:WԄ_ 705LXJ.5) S^`Q~V&G7d9hwYF.P)?lۑ2/NHoĦ鼎cbܸϲη&.Jd Ӂ=H愱)rcS\y$S{ia~L0 7ljJDzq$3" 蹉i\`37e#ew zwRX-K῎<7Ec/Zf o;g8^x 2XUKكfUn2 GEnulğ'z6h|(AZ S,j)?CI]c(,lץzCj &dnc$.%]#Q`>R?bm;DBmcFͶaմi~~kJű7I @evIWi+ " \aw]V4amZj{o!]sLR[]^2-$8u'va-<C߼^\{cI[+ WBGQf:~Kk{ͯH韻 E?[QCt=A) O]ܨ+PmD]'uwրЗC2]<J.>*˶(ZKX&hMX ^pu'XW]P_wk珢%up|T> yl| UҌM ;`KK:č$ӏ D} _dt: k MDg`-i&ylVG?*w:U1M(;*8 zMZe1^5c!;nj2 ӷ'(+`uV!}4-B<""I$жU-KTiQuxq 0p3t-|H7NlR ""LWK|X@B;Ge4(LDep4дwPQS]}XCQCzm"bF0r[:,Mz`)PCջMYˆp'-H0,{Qwv-s*bTÅ&}weQl44ͥѻ z'Tx`~j?HJAO/W(+JOs, : XѻVӡEVpW1B!pJ'a̪cnJ4ʢqlƊc%4iHFm񁩰Ŕ1>͆?eV~% c9n >ΓP28u4ڒ~\Kw(] Z+e/coa=O"|a3㡧p0jN[HA-\Fp5~e,Kz uS V B7Ȕ2` wcԐTbN^7,+6 C7c@5KzOk,mҷ>3 !Kマ8y mkkӫ *5ȱ,l|CG{J/`,6:4d{啺gq5-xN~m>)8 dicOI͇ FZK;8晲L3HH/dhq%n%TForGbԿ) 0FzŲcdsSObNݠ?* 5K YKJtY$,+y"Ӗ4wmyi~`o{X ˉZOB/O$?+kxyKЬZ)CN\5uT/Xk]>O-AR3K%vT0C9Z7$遒 rgs[4Gnأ.8=iXh):1d3"22+* KvBw{ I' m?Ql> .%?~y-&OFO(>y)ןCPc=3́>Y-b49SPhnwΜ;ϵ B}^{'ϔ&h&衏 MIzy; CwRa.5qICvTș}`;\'$ p6@g a]ͦ_=;k{2q; 1'h%r4f:"(a9 UG^w-+9 eC0'E5.N 믮48k-.7[Ha(2OF\- gXwłNA14/e.$`G>ݝI.U?S[*jV.g*]P-拯 d ߣ֔`Fmǘ+旤FΚ,ûe,(@lY]rdx޸X6d^l_K ~/cOv~ЃXD5PB~:0ZO~jIPz?Ew?C qE0hӔn,䴩VEF&2<"ubjRۭ4p<<>Ag5y ?1*\ $lsU1UAgҪ'gC%M)`D7$"ćհCM}jXͪ3=i~_~q8L)vP0!HSY[=WsAeǕLzVI14hk".t[Ի"EQZؑOwU OFadLZkӝ !^h򇹼4jiǝC|4Ot.j t5}r$Ef!-%^9] CRAZpXS TˈEjPU7dʰ,Z;qN&vH|4hO>-c}_RNRsG.-ޖx7򁵻We[>1Yt'"@̨Azw"bF`<-8g ?o$>INҳFZQ7u2~G'ۮ|F$:a׮kLbڛK"5FfMvر1rHZo[ n~1IWkE3y%(_QW ηTf.oce"DY1IqE[@xohS  puoJ3rǠ]R7hWG5{kXXЀDZVKs3 cۖMd XĽ=џI `'.Րx8%[qN9k{k\(AK Z'!x[r Ndd I>0190k;?lv`T8'٬Շj5sAa?" bzw35ԄH87r9 MmNz sHBl+9 fl$A"h͒'GI&=oOB%fR] ;"g^Lװ(*.mp ?uhy_#?ʞ[ AE!|IˁNџ߶gv.n}r=/,]& zRLSFaniN8恂 1|wz8[ޝ#f-@"d6ezgVԏ/cO1mfJJEf]~ڗkR/ =sG oK geÒ ?.[38d}t޽ 3>]UNs]fe-Is*Apo\I#{?iсԔ@CfMY/՞ pHX+h嵌u5unyzEMP+`Κ@7JL%wKߪ;J3#O7<_[]͔y> 3o&ՠ)O\m-Vh=N}x6 :ح6EifS}q_RsQξX#qCjNR2'{a fsͱRth1NSi]}yIs멞_ȗ`[%Wn˧TD+(<5?en5V Dmyy`WYV_h_]ΕrƷ%tf4Ew AtS|^hYM;-QM午"#PLIsb44&?iƎtI (&r@jظ?$m~FL;БOZ02,AG ޹LhXN8c`( 743t 4@:ލ+@ }-] Р!J`Q5%bk868[ݙtiivj;%tPL,K8n`^[-;H8g_zq V*b5lؐpv`5=@PG yFIt*<BBs̉L,#X'ArRc*C=E0N%tIqB"Z h^ULBq!|ݭRO_HX84_ՉNIJO2fQ\>q (|vṚ;ӑoM&5$BД)45K)bRէF6ҚB1@ %]5A.;Y q&4T /jj}ҭiئ{MD+Au`趵M \|ےB5 Rls^j9 &kdL򜥘V+lM4QU8 B@UT/!z~"8$#E4)Y!"_tq@"9JhkܠÊ;rcfpF\ݻDIUQovi`G[xq LJx 2%L;υ eױsdt#`T.J6c/s<fDZsgZRVרЇ!l+Mf54#D2FsK8篈_wˑ4D1i, 8-1s nT5/CX0څpF3CGK = S!Ⱥv2qmfFa:jjt+NJF8۳Hvn7Ҟgr`&0`ef@5##xɒ(o,ڪ4Hdck{"Zο=hD2n+`B4MQᥒ \/Zo췠'nM瀜־ю`CwTN'bvDF h$Ec`[|tG}@3،FKB,쓓GpϬ5q1&#ZQsYqCEX7p}Iz9_c_Yhqh3TR ġi)}a|h*&I#gKyl{@x%ߧSF5o ;2uGA>]`r´GDW@&gΉLZNo=i<˭NW[k@zPit_ppUx^'ɦR2SCnǛopuut2=`BL8 N[j6f8Vs<4DAK( Yы?D;ټ*ukߨx-*ԛbizeV>~-WٿҐ"Ģl}P9 ~<~hR$&(QtymƠ%4k SiUY&4- Ru<4Yޮuؔ6F٨bQ%Og|A@)!A\oDTMV{TBVjǣ O1TXmړ,];i@2µ7x⇉Zq.p] oGt.;|QEʷ}&`\ǽlSH%/O% 1-86z6an18rwRa~n\1QBK|o~taJkB ZìtPcId4-M`WH fޞ6 Zc^3&Ç]!pe2o31 (SZbT9ΫBeqsׇ/ 9mu)!a&<]CMyDQ.߀ 4,P|Xz $pЬU_L L<5-/Av։Xd;e?Tylҭy>B7,&?e跎g(JaN\ U~g=v3I'Vw0F3`4Dz:y{.!UggC4h|1 UPpo<}64&n;'qu)Mv x^h*WRb.lbIl`:Iv)kiqqt}Q+C$Hp@FG1̿ATRf`%R ӡloZhrK)߱::p֧,")0rxص׏YhtSgsxn;|erQt9ܭ\*.9\<!8 d˻<(s)Y.tA ږ I'%5֭*$ |T9 Opg|g|iyEG5/ 6dX~2Uy[w,=$rz}_O흠S  w؆IoZD~i=<'"ua-B-FSEyMj*smNRWkDtۭrknx>l 4p?C#`)wcd(46uY6./wLD8? i{op)~GoZ7q}a⨉uBE`2ky@JkWy!b=vA3,TVˡ_^@mဖJ~ &[y2/3/f=#>q^Ϥ%@%Xϗgcn& pn̚)A}Kԫ [;( |=-dfAi?VjCEdc=S/AG6ӣkFw+ FT\i=J*N[& ϡEj9e>ϭ3%/XJIkv8(S,8N7~IYb= %8/\@YwCQA# &ŔFfu+jKH 5 1ti&^ү3ퟛx~y+žEf?ӄ  œ"-g'4?;Ҧjt{empVr+z`lj>1RD5L$uq[%sQ9e i] xo ZA16.E;ZdMhRUPO3 ަr *g+ȼD`i,jOZI'j ;b,={LVh9+U!P*?xc- bU?g z֏Q2N/67h5|›+'Gl @dnڠ(_ Bb sk Ҏ8VGC(t5S)/'UUkNsXVR擯z$*9XF%rw.sչ 8׶Qz8U69uy#Q0Y?/hnSd<ׁnX[3[>r"hD~q7⡨-g)0(RХFOGb!&) _;FMxE}Nٳ]Tn1*q}:в tuȄtz܃} F`GfnR4F& ~ڕ_$ϑ!˙P]cO]3-ᢺuP`A۷(RD51m>.w&bVhBIvq\X61|5ޏFɹR\ vp*ӥT3jG)Df !)n "mo3]_9ԛ ߇\ګng>M0 MMToJfvuϚ0ll¹hr|9+{k Sd rbn _B߀0:WxV2Nqeʪ\`/xbxXRqkЏ@Me>?3zcjԉhγbɼrfh(!JsYJB'D0fMt-x`}lE&"PwGRrB50pJ3DoQbH{,ڢR>nYM'Z!]+C0QT3 P{2~ى5H7z>oulĔ?hbFoDQwAr_帤LXRZ3**78J 69l#Y]>LKi"%46VOjF& aNdIО.[\eZ yn=߀o~Ÿ˚&&4i[4.'2I*41x6\w jAd4`'""Tidg8U`GK ;<[!qp\Ewjy[TVk 򠚴}LR ckCj%'[әy[帲E1Sah%Դq,~NUvbY}/{ɧlw9 AqbfB&'@L+Af-KdžՁM$D(ν r֫1ZMլs̄ZhbBd>~4u.(4F3447UN5LgJ>` kl!4)/$yH]Z* 9Ta,uV]:?D-q)QDmʟo0^_e;Q``RM&Dh\*|I< Fmʱ׃&@ /&@xdxc8ElE܁ )RekYtBr$*wW rV`}Ak5Pb| #`jtGf`8 `kb!k'~|\wi)F F ']𩌜_?v.jt#7Km^_LֱsʖWJr21IQF-s6X|8e(ÜBMs}r)L5{{'z}2BpV!?h$&ڮSLxVys} fgCvVKԾ=tVYPф6 &t7yJؼw}=:6~ܙ6\ !Y`U@}Ed1Fp5VnP*yp,+9e[%Q8r+7{GRt#qQCqlx &\?(Koc|&$XYeQ1 A4Q<1M02LJ'5Ƽ9L~tf/8HTAsEwoƸEsNjbIQpo}VK: ^P}Q3شN\29}<sȿKJåky\,O{Ӻp5I=?,WzԒdM贃P d:|T.{|q8<,`T LcS>FRXe,CycLh f܌:\jѵG1<:&@|s$,b9Ď%OԶ{Qm2P]F3\C9td%:FtO͝ &Fk٘[Ev'׸v^ļd!-GuZQm=zIh*\ 2?_}fTOgm9TeqV,!G}|Ws'9u7j]#dWAgeL6OS_9ėdW J:b{!ˤ'fA &0zLぃmR}$qFRm(K`rUlY`n>,'lD;!A\ :ba 쌗t xThՄ$2?Mѡ7LƺXqZ΂w3*Ә@l[iISg{ yބ|@7^+tX1Lt.]H:Qݰu.tO 4Y z3O T@&mɦjmlhn?Q' 6 tK^R-}<M t2Q\òz6ٴGH"(t( Q()6XAUKuF -vѷmfM/Kd)|bVri} AvC(hU+C`TPSI~{V2)RDX#PɆ G -mw]p[Y릁iǶ})ᕾ:pl j1$ΛџbĮ)S/7Bg+Kj*O WǷyc2 tP\vf/ij%>D a`VMaf%aS|LU^HȆ}$U`tɝzƒ?lR7ԋm(>`kN>%׃8jP\v#[/,l0BьML&p+^*ns!Y@8ãm:.:~=t7x Q]f{VjMژ %Bn.2B)S\LKVXnU|;[og`ɄQN'6pVз(N@shx4*)~?k՘WѰO r&ہAUox:%@80[2ݷŲ]7;wf+) ӈ<%!Ua&gfi߅.2ך,!xufWm.eơQrx*7!{}oE6A?1Dj&h_J~nfIrޡG' Ȱv%gb}ƛӳ"/ejY m+"LuŮ&G"NLw5.hoa\W6Vm[ڣLR0sjwq~_i]ШCc$~;DMK/۩6)zҢ,U/66oc6J{K'c-AP8:*H\_u{-;TTHV!S:7Xد˻ݬ HґD`FSͻwVLagoDbܞ+c?3} H=RD-Q|zog?7TV5Jr@I .e|#77Uu_Zs 7܏!9>՚m3p*x@c(1Vki[r.ZB}% D`8LiT;@9S`nԭBHh 6{]e"Xr^\WE\Јk"E^^c~cfiF@$A[T<}<z"&S>5k+a~/羀*<~2^|~Z+p[(I]Kj+ 9&S9w̵T9,W;h&cocmӻ@tE_]SmQ@'_(I7QFVԈ :6df+7"99?MMV u0i{:2hGQ. ɗ$ܚս]4UWf2 }Wythk<rf*Rf07s0\&]->#lKJ FK!PyJ!w¿qY`0 DhѾ`TGLi`Z M9wE·\>ycs"f^*ƢIqXt۶Z(73{7ߧs2mwԧ]$8"dp4uf9,b7! N?'NE+2%;ɏh)0 !_dZc.b]VIHb [-(a6@MsyρBxТJ B_4<#T$5[5VJf]i K в'ʁ6 'Q֩NZ*GGőQ)pфF T pАAAu@({O((̎2w{XȈԏ:INc,/ (dAQ[%}x3Ǩ!+,ST vRIG%8VI R4^eaA=ApWq3%fb7ꯟx=p]f;;[W.{!@+EpFE)>rqPyR\ߠq25fKwNx*TDx GhBd)캻I}MU E`grp)]6l;E$w.eФ ek:hNRmaL>cuB03{ \X(}Astp7VmxV>?JcKȘY5b# ZxGPW1m,Lv^9rWc0FkT4E~uAj|yu` E, bWFTh XD,uMt'܃/U =8$uƜ^4!++^tuNCM*w.Qʛ5G$eتC-|/?tK9ӗ*([j]b6j7 OZx\2 n̟0q/ +V<'gq+=&׀ЃT f:1r ,OOr)MVW/>8zeXfaE탓Qƭq!p)Dx:diDwd>7I|Ǚs1B9B t:o\t8.Xs¿ XF f}^JZ̰0꜖5TB\4 5!Adͧ,l3<)NJc,6׳KOQ$`!-僯HgXwNݕ/UJ&Tf]Mʹ"GT$!y=qBhO"DRW.cq.1=ksKrB^ ^M:c]<({6rsZ~{Mɓ xP#Q"}S}JQBht{̄AI9qYD:yvb}fQ3PNn;`K= 0MtP4c*Nz?'#Xl- rqO-S]mάWBt2ہԼa)%8|?Kپ2s$ZdC7q4Ƀ Dj/lSBh]@ \ٓH- O=āX'nNp&$q_qA  e h7-qB`Bf7-p:u q%TsyO) &*x׿ͱ6 7h2j(a/loJ+0"`t$"<U_V(|y=y/, x'=Іx0Ɛ ҨSb f!_ջKΙt$9>93%{$` i/2A OKȟ&A#l l9spre23=n4B/ kŅ{1x/szba]v1b;5S/s=@`ǎCN<:oDT"VĩWP2F.8\mroN v%'~Fy u|iX5J}ݯZ[;[ަS|"zGpGs*6V8A2Mb"IrA *%Ѥ.۬ToZTX0 M-e5ڽ!;=jk =xKѢ YJ!1na.t`'C.V@$;3ʞyT"دG)m} nҶ¯ЂQWA|6PRiKݟ41"4\wv*& u`Jaȋ -Apפ xH%r#m~eq"Bͥ·K%P@(xGDL/QlQ.bCJ-n^+h=4aO@ĸ r$OgkĪl&Г *%]N"xd-|}jFM"FV8ߪ`FCpz Ie ?(rC9(=xnל[=0aY ƿ12~,(~gNL_5RT·=!$NWqט8eVؑ8'=uw3$jl_]H" (Uh/ʱ{$.f|Q-wV؍ه32< tW{'T."@Nŕn躇́K{1 لwK오\, _?)DϜxS-!ѱ[C>yuI& YxuhdfwT.)8M5]v4`ה%exzE}DӘM3&zl T63 , `"QAVJ`cp('E$l[' w;s5j/4s⻮_P/ :؈]狖|< (VHy-7,G.{j OܥerMJ\ -}~W B=I2ud;v- 7SD=J$i/]mLmw?ƒJo8n..D:CP+U䐒mnQI037:N<Faᶗ㕨0`,ހg=>H8?DjdEejYd-(Ғuv~ln&фuo ThLQ˪);H<[U d"XԪެd6f@MR9 VT(` 'Qt9R<`cD^;o$"2C3y5WZ 8F8޾ud7Oд^m&J£ ?r[@G oؽGwguҊO:3k]<˔:\{qEf՜=8 _ӈpp.,b' %W!I䑪@A%._Oݔx9#-UX_aGnU'sHk7m9/eJ|;G v aM F6H]F+=꺣\Z o_oD>9v<^qKxcjI9Hi?q0ayn@#}I {*F{ x;"WKX=JMv/ e)3  !3n㪕VĢڬcmBX8Cj*xG @&Iy?CGqmVLoK~je[oɜ;u01]Z֢ڔS]bBlyHfE19w<4^pRq!w[yL5%0AǗsNF <?fSbO)|1(guU!,"=w7Ե3PtrM['8T6E׿JXHu3'IzfXVV&$y6z+&Up(;]i~& sPQb].HV/3tME{o :dwArV S' /v1_ 4d^}.Z]I%XR 9MQh_x_.Lj;4ujԮ52}F?+B-d+y!}ܶ,Ǻ\$&SlmQJ lg730!/x0s:H.,K+eѝb$;gebb)ƅMiAE[fR"XGڗxŠqwSc& W8Ѱ՘#tMAC}hnz/m(쌲q÷vEгD+xYP\o\+.-t7fۆO14"}@(U0lLלь:bm- @h-\Es{qo6=҇k/HI_G6Ovxk_ wbƍQc.P\Dעt"H--ㆤO.#ij]V!wqx++Ѳuf=ǚ>du|Q)a@ᣌ"5%)@ver2}"DJ07 Ә.׈CT`S6MētrMDd]]ޢH N?XRmc\9ibTd 4@2TjG!]gMB@Aa P;]wE%>G'ܳ} ;!-dW#% k9a˗ȹDBWGy<ƹ*#3,ϖ _ɭ$k5Z۽y6Z=:q5թz{1Gqmg5z:3L~A$PV! WP22GoE0o5ms<cEF&D5DRWD ^$HW"MSf{ǝv˘5q96JåxG!{t 4Jpr0h?K۔f$Or}je&j[ Ƃ`߹Y0Zw0abY`ՃK9~[T;* 'GB[`u}JqΣaq]Ǥؚp_&iLkѠH[<56˒UMlhL(P:PURy: 6XKic5 :8@z p~ZBhy(ZhӞ$ˈ]AB=wT[tbK Vܾ(e`a~|Hv~&)JE*C98c<f^mhay>W󵏰xd'=̯O3mfRߺ"jt.Cߕ$c Bb4?¤:{tчMM1fMf7"]"r pqt#4-C   @+aB7Ra36Qb?au62l^,;A>H^WX+^ѕ ѣ̑|Pן]3WCj߅¹P ~ qvVξ.] pfE I[zXSkzUY06 .uɪ ,N.-F grd9d[OEq'!&: o/x36+ H]I5 LfdJNY$easUaDLyC1VrsݭR:sUGBW̿W+ԾѤG=kUN2CĕӓJ9 5v|ڽŬͯz78a"s &n?qQݸ)WӮڸy64E'hS4o%)?u[k$ղ?Y'jcԿy_^d?kh ]}VݞYm3`{6n,T/ڷ[z>Vb Dl =Q?W#馨=$*j;^m z-t0^Œ/!5Ębg7kM0pj9 sMuUCT&jJP$2߅eVX.-/8WU"jx%tX5Vy\1*7.̚u!LQpf7|lªXehAnt>jžnV [*m3o2 ghΗ-k?(S=?jWՎ5 TRlIZeJUwx159F#rrAU;"HPUzJpu KkHч>va1 \YLB o><߶^vն<, #]mBc!REQkهc ֒&ޝ +ssu%5Pa˩-YMny|$Q cVU~.?h_9WdujXdR-Sri񋟵'D~hkMN8"P O[Bd`ɌS֑~n_QLA܋s:}V&h+ek^=]xgWhOLZr|~9E{i@2 i_#˃tIdצ(ⴛͷ^ZWAיB\<5 ЏɴȌ}cA|8kӵ|󙹢wZ~6౹yԗ)ea F[O;94~՗[)~ԯ>K{@.A_<^7su4ҝ+e[5&$~2y@ܗ dbc~^u5)dKul~IxS$/rLe"er*;#%LX|XݚoB48A.9C# HٮQYg09ݰV )`#\Ssx6p~g"ɯf~h4ϗtDCx1&kr)Fk4`65sFcD:wk2̸#l$.\6ųӽxSn  X'R nAG}Fٛh ^~xzi-!~ KD0Nc:RjFA^My$&GF ]!P-Y3'7@V:n!Rr5wfźt4H+HjMtǰ+H!CjO^&2!uػbgJ1 d+U#T4Tdh &wblxK* _HC7=cISwYmb=!..= +\Eyxk᱇\iI'-T uXѣ/$6ݮBI.H%ΗK׆8AKl䳘^j9w/\8,g7cBt\Ey-Ut _l|e,xV`=Z ':fhO)s]e|<'Fڔ{nѪ]LaiBB0t:-[?T(! $%*1u %!@R-m G;vq(!"Q TR-IG,#ܧ X8x3Fq۸7a1*+U0 3#NᤛCԶܹA3e̅݌K| ?c\zѠt㑝Z@1rgX{ӎ=Cy_&RP?j3j%`pl?NpB6uDVh1M²Lh{^>'TZ/,KɩFqE8 (#c; iUkod^8y kA1ο|yBk /ܭ̡OpTOR460ux`nM ?`)$̏}dn(ȃD#}_Qɒ5x>zc3vz2Uxn4a}M>Q5>K BgmkPWAٺ @G: b׾iq0); +=Ū +vBRt?AU~D.}fȮ%,ʛ {qƙ EL'xz}t" OM[L/fNw؝%!ݜ!~<4wTa:?:t 8&꾔6&sE@QfFy U >3rZp F)#jEcr7/.Wc;TZЌ.~rp/^41=A2 EkHX.h+u;"1Qz[*CvAJ*ϞØU19׊$0q,Y:j'sRm= M'H Уa4Cv?מI1=>)[{ntdR-ŒQވ]_(+nR >{^P҈_.a. SJ;{nG͆6Sk+T}D:?/Y)JCO ]XiӼ */- +?/2[M}cT=&2R9+o&]_qZݬ7rB;SõEbK_G 0oAUA$Ui疗, |Bp32E5C+2BI>J %=($&m3QIhg0#v]6B8G#|%UD1))+#7au6- `UF7 .s@I3 #/o y5QUf\kbBJdwܷA]p,wȨ@ϣfN ޶]f4Y?EVf*(ioVw6w{ 7DὧZ޷0%t/񋱼 Ͽ[+SQH@wd`P{xl(Pa5gZ_?* 4Byr% MXSVE]V=`dB`j\[H<@Z ,ߚ!In!ęFFue\V(y!VZUV5 \P'@iʟ]flbYܚ!3YCL ACZ1/P7Id%uL<"HY@ʳ=E]],[$p* Va~'Fw M0}:R3{R/ ~m~$ OO=TׁdpgT5&G٬E/~gu@?3KIE%b+([ 2k xmLjҾ6Ho! v\E! 1"9Zx!t zlN}uQa޲"z_k +H9FI?|^YԎݟ nG9Gne!λ YH hsON0 5TV3YCf@!Mo%ͱ!PmX?d6M+`FKـư|_oٯ[D(zGyPTGi{ D@.Q\qÂZcGAvpOnpZ#5P&(fC/kxW>mC ԮYכt՘S,QJ>gzoL{OC]=Qc4dت7F׷R9G;9lMG؟K|u3a\3t-h'k1~A_PdBYv'D&~knGoM07=LiEK@9lmQ<mS[~`-H-P7h=5 "$(r]%)4}8H-Ww#jD8;ʘɣT/agEs6.WH;C ?,D}a,?<]:&J1ݍC|O G?;7★y"Az =+q>fLHf߅ilV^pE UmrXAlOSƺ$t}A2s+mYQnv"/ji>{mPH>u"_< @P7)3c1ixC:GVEl$TqHg`@[m zA ^Fg7> Zգ iz Ufl/=Hbk|_^ . @6U)9IV8{dpK&'OwSCDK-cMvo[p Lw5f$0׉O9V.([ qL`Ϩ 'DAmoլw[fm;b𿾒[g٣C ȯ~n5U)ٹ+Gc_ *,dz4 cURgAڬ=(dj.b^@C~ҲCM᭧)>XrŭYT$l+uY6؆SE#R ' ,M4ޏ/\E=?9k˦*)tUmw,[mY;%ˆ1zȗScKnL|+JY5ᢛ1g/x7&WIŖ1C#Y@sXN}C ; "pڙ>u^pzԲ+qDL2$WDbx `{GF 8Glt6T(2"騾FI [^x|RkozdF與`'߫,PɁu*=y T_ANn pYK]:A|.Җ!$UayeuAdkH7ͪZlZV-IUMta,,VbҮOvyEi@jҽOHvtAHr_I{PH_k>a_ڧ,MjX[JN3ŠӬxgeLx,7j_W$kl 0b{}85G zޤbD.B]nG}JA;!-t;ϥc {X ċ{%f{M Pa;Yd1܆j87zp,&ONY֦ww|TeiWZ*W!'6 ;e]C҂C>pq.Z,..-c ~QZ)h6[yCOC/4F5M ,dhӾL27j􍥸 /l'3)F|(  )0ъ"; V4ZS0>>*Ã}4} Do(MSy)RV{Sj=+**hZ#UXһb'^k @ӉD2oX'Qlc SR^0۸VkbC1^'N:kir2_svqԑE -T} Lc :<:Ґj8Vh.~VX\iw !*6XO ErDv??X5^B"8GȑR+Xpy{u&P޸w0_b1Ӹ-M]3lZ^MbpVgQ-fn g:FS@,{h131updap9lh5T]6n!n\>$MpHV)t؊./k2ѱd:eA?.8/6JTG -Ȍ kiTc'q6w;##l+vFޑu4QumB1XATQEcZGlṪ^AxNu1% 1\pWCzs2D 䖯. g5Ycwڽi@Lq8װy7-$"XW5۩ٶըhJiu!ˎΤ2C?D?K?(:N[9cM"am2kRv*V=ZUpH9' L;{MAA xLZSm[Iocsmx 38:jL^S>!xB,(ΩהE"2.t<$~۪Q&?.K[-ER T')"EȒ>$C _SP_-yds QY6/!mɢ8IO&/w0i HCVboGڝ??l3$.?{ۤ$~U_臏I5NijpP=Tg"ȯm9ܙ^=0ُi۟n-NB1DD"_5,B҆ 2ErÔlPq+5)T)(V:vuwa@1 HZ7CXf96Sk0WS}8"$!b 5E"WUGDv}s;x*^d1'hgňlqOа3$vCA8ڹ.D.p xP)57rwPmAǡQ5bXl0v<>}vd][n9%ILUDƪʎ'| j5|9hvg:k0:367$z(W̛9h=T'VsjK#?B yp8e<RԤ%gWK"׿}ضZ> %_橆7䤏Jsxd<AF.jHKJ{^gQ'bcRm/JK+$ߘOty9狫pQAwg1D}/ޝ8:~aF[gٹƓ[P_6d=^R-NLQ_N"c9&ↅ3|pz>*DH$Qqd:P}FQ_7uBx&kdFVYmik!sW%2H5dh%U,n[j-ׁ-}x2g w6cr+>Xl,G5.='CVCI%-1 q>HanO/S}yA*YH]҄NK%}zZ ~Н,D=kZmBt FVaz=?es KlAR" _NBf(v?r| #ɪ#6 P)]cٛꑝ.9 L|(?l6项 >) ĻZWkq &U:dX#/\*;b"p>}U[0]dQz_,>lz-?|I %43y"KQJQ MF!' rج_ > |/ٽ*ÿ55V_(!p&X/e25+[yU.<0G:s -h~waZ F۶WgAs,V)",Tɗ+xb&Wͭ{Di7@৐}1-6d P|)(XJrrA( ;SU.p/_ Z7Vѳ4 OBg[$RE20%V/o-ƿDsghA $cnK =y 8nyh?Q}gprGA?C|uuN3^/M:z*!b%H09|mċo̎-tCΜYFQ7]Rq^ ?BA_i40΅RDx@&z䒅rxX4K%ˤ5+\汐 :p &XG ޱI,7N%ݍjMF̦MT9ՙpA^7BVD1id3*"}մO/AFA8&iGJR4yt5ˇx#l@#^a?Z";g!kc Y")B.q85Id#'Ȝ@b{>4 a3H#c|˴R|tM:cn7os㇮-ZڍU `y>^# w`~C7mKCa[a"pǗO0 x踳1~A韔]?pEʼnq'{j} 3HYM>yH7R   ,@]FҠlS9wP"| 7Z;PMV*ƞo_2*;0HЛ٣v0G^%4N vMBOl]pnwhh %!~¹>=yӚ$` G됭! R u":ὕ Ɲ _8>rȚ!}0`Q0}>+t%Ti;^twCh.S8ww?Q(vMRrN_2%:e-C)MoQH|hnE᭎]!Ֆۍ'840Mp;|.b'Q?"#׉'ISQ8`!rT5-E #|ǻĘhe~i.`tf3gptm~ jY:R/zC/Tqh.p=XT V rh J1P"kc-JPO'UI5g轓Kd'\17/{^]6]lQk5+֊95~RZ .QL3w*~X% ::rMæ_JZo<9TN0+iŜ#GR-'3ǂúkN\eG;{`OPb퇙Q&X<ޞh^L^%vf ΎJp+a4D,sȜP)P̏Dd'Ȭ۾GJ0I<=uNԴ<+<}`p*3砹Ah˶mpe)|ϰ(ɻE_"t:jG&n'2O>CR'8Q0TzTmE^D{Cs2|bOg"ɻmf:O#C746FAK5!l/zr?jlA4 LotzԖ:AyGuI¨.*B%A~v󹡺9yشWLYϞػgWM#f{N^f:uv&!F9o:4n vILyO7#\ʛDqDvDl#i2贰-Sޭӏcs^(0k#,o]aQ_M7HzD8-t->RBZYWϜ l{;gRɓּOk˿掱 Džؾ@يtgY$R uזjY3Nj<74B;a[uF]0 5D-$ԅXPJ|4cY/b?}EM# ï8"B5jlBE8 7`?hP (`ߑMO4Mss!3eKwKe?b(&!_ItYf"g#LI+M1 \' 9J "IN(bNB)\zN9&ǩ_c?CRn{a!폠 LEԊhԩ/R~^9qr/} v>K\֍ X>4qa[&ZAOW4t@3*ܨ`Z$W0{繛a]sbvE1GiQ{=ɵhNj<ʯ)cZg]z'ݕgu#D%)1略J)oC5 d,hF݄^ ̜s]{Fغ0AnH]&G~7gDmTc~i_UCE_M}OFu{ wJ%7hd7vsqf]LR$^XKpCtV-] W.oOTIr4|\e*) Kf8d%/ $ӯɓ ﵝƱ_z-?)72јVΏ}'hFFtgQ{b1"MfЫ`;ߧo\3 -ʖpDTI7lH>+`]8E"}h깵$0=+Xgl|ʲ <ֲ}ΗJni{@3//n0F}g?; Ӑg#_P n"X?9 3xLSmޝG5=f=<ɹPJp+ V, Nh  6\v]lMǷ9缛S2OWzYX\ Gfnr/v O:޺ "jѱѲ촊>K$ !f_̚E7 9JO/yzI6>꧀ul @*dB8{gQЊ*E<6c!6}usMc_0_hކeQj ^̪~YbE.g'.1o}b}ďli[pM65env'%VЃ37l}5GS|es`Xc \h&쟖 KlFm#&MX#s̝#62Hg"y_eQfXM+Ǣ1>if5c/bMg>dk5A%-/*J`T>sܰpH~%}iOͰi/wB=m,o4H4Ɛ 5:B&ĵʁĘ OaeFIUkP y -Z? Z(im'6hȶnϸ<@\wG<5 V<UVX3%|ޚ!iCǵj&(c"v+h?[UdmUyӺـw!\f{E`)xGLFsʬ21}BᜈAUT-]:%#7B<4BF͉eED|CPE-"2۴hK?}@?0;_FwHXѹ|!rRg:ig<~E*6?)due2j6[vHI/wfw$>prz<!Y%#ds*!Y7C1^]*)`m)!3vʆp 2P <Bc l2?ou_xs[d^POĭ 5 15~AO1ovG̹10 3+c:h2x.mxjgGh eܹTA\+ (!S]Ӷk׍:-@?ڶX2H>]HWeL`A])4jOK2<`ɪ_$zl.`D]Lp =j]0r%,:i^+8hZ88˄"v1itTwrLW}l4w'j+d-.,&IiIxkl3>kšP+W8W:_/>CKݸ6G@:_ an^,ty)QF2kɈ 489{X@!8fs&dL(n:RK\0%JG[sKG&xXҏW8hp"I3gÏL S&o\GB ܔ"[eDz0s"ltŜŇj<-?|(U߸jdkIK7drj^wƀr.3V?js}β)Wky*)前F|fŅh0C]~)a!+Rn8 o ) \?X`nր4Bh q!?#ؓ딫a?(ɷ]oaM1L)m 0P~S6}}9DCTqe9xbo1[npinv~BMj+h*3>ߏI$\`Ÿ-mhK?Xb/"eJ]lVI^oˌ'@},ׄWڭOmHe\ aQO(]Y^ y\4 "(( ZkTGزS '4 F7 HEDImnVjZ (?BA1J;FG]%l^Q?C{a `o/܀A8)os$P|N㫎#Klzfm|@W٨s LCHpƧD{Ŵ1 GlE j9K?BM?M6VzgeP&V>a L#zT"UgyL`[ޣ,[6^ugLo4e:pFz3e /mvm 5/{;-B#)=6:f|%4f@>o%(w2'*vO8YUrJ4c%rlCyHqp)]?Kwz3Ժ qK__' ˀ]Ј2ScW-T._җP#S|&JgfDzSjAcxyǸfIb5VZ$z#͆Y*4%O =)9V'pIXD/0cytKFv8l7:iid\q:S펧 ?څ[D֡Ʒx9XӬQ,cߢ+!7{P "Kv?;b1NwL>'~t^C3D |lK{'_6ʾ/(mY|dK}}FV?3K/)iQ- Ɩ!oYuGK^,{pMasuy ce`咮 ]1i AeK YZVɧ^f۾nC%Mj}F0@gLNbqw$0\Wƍj{}ɍ giuׂ#~Cn\d [`qŲy^rr9:9`Цt x8Mn8&l= 6(;ޗQU_#P/|cU|kbb{92n+L++?_9B4\Q4ّWVJNWv5R?RD^},p";L}w=8-Eɤݰ"Zu0-QY+L7Otb,!r/\DCTEkE~o3xk}$!9QGym8YNI PL0GhXX+;50H=N[rf9닛.;~m{8J\/h/\gs2yNBd^8(3yEj#tM#2VQ -:Y HlX4}DZf¦‡FqFvA+ʧ=kQB:fJ ̉ ys_J-JT$q)O-zO?{~?*^ȑ+g1UW}AP9p\3 cg?*aIw]8ȱF4}IUN ۋ ^7/meq ǥo{x[isiz!A 8-]H#NGsSU<}OǦ2avjbN$F=wWBB^iUphВ?$FF[( g7-J99q%2^ÕS$Nݯ< c/)'aF=8g9d ݽቭUѣHG W5ayǐrǯ.Dj6DATrjj [Rc&vo He?So>@S|ehHHtajKx'R(JR ot"qYų`_}!}ӊ@ٝ7lp:] څ-G[1qQWa/Q}j_-մ (Q)2B17|=php17|hs?O頾 ւSV욤 I{*>c UB 1:sfYJ.kV~Ew# '!=AР-b& qKk,]5XGV~C䍥CY\ 5jfi,&,^ٯln`9+C@j>\"50V6A=pR.47'YeXka sfW 50~LO,Eucz r|!>GA&@(u}Y^AUT^+%jzߕvfG蓡Ee 0B;(;3"ݥ"*dQ 2[xKOR=FܿS$ Нt';f;ǯPzr̒BLD{Kb"֙(MBJJX Ict/1 ~/.k]b9RpaF"Xq}kLoЋuYnpx ?>Wτswg+ݚAN[YS 9'R:ƫ!?!%3tx@v;Taݗ^1Ps72J3L_v@<%1"jL٩B69z:LLmG2ljo__ liM+YNXͲ2 ]}}u$r*E~7ayoo+t;A{U;1`n\d}?͐xw5|8)_EWl `}+ M(Txtfa+}!Cf<V]_HIk GQ3P-t`w-qFT^_;:QhXÖ߬߀X,?N[NJk}>[,s%鉨Y7}S88(vBJ 7"g罡odY{W^CiwsRE|LgL?1Q|RkjFX0n}, .pZ1=oTFaKW.+@N4.{$ Y^h2^gPgbXɬ¶ t©W}M{-%@͑1'F8s=4 .g@'? 8iiAǼܭxZ~i0wE?φ=kϩ;zeuK~JgDg/oT[nF,jG$ڢ;ϊ|W@ 2d+H0K'ΟGwl{_侠Q'q3{nb\m֣Dy4$|{??+뽕 ot]/ (ETix-eBØ=VIjie3BU"K, ܆dehXД.\)a0aߢe?rS;*K}gu:X{o*%%ͱ>#ꋈv>p_~?4f.G1cZG *[nV%qA=omexz??ƶ0ST+?|PUNvGu[\z]J|2AQ%AXkufK&o*Fx0dp'FcߦC5ѱ.ŏC)"G4ޔsL񂗄<|lI/<CKVAڶ4_15@pp\^[Jhph EфX?sb=ˑV)/}1lXU鑜TCRxUOPHfʓ=†3n'N⤬cR;nP݌2PܪODK 376@OPLg'J"3[*Ib¾X՚3=_@d^}[Y ~g(x}l#Re@jF00i:99P%Ls; ;8r`ɀX":vEJf˛v+ܿ:? &O+jJ}ӣsbQChb=IlC%EhnA,Nzۓ+S[K"rz.Up~ W>K P̞bԘpIj#~27@^Ez(=0{+S 200WênV7VYCI=FuUá62ډ~IAib"Y6ELH /QFJ ѷǣ9RЎ"J!'fӼ [XG,޵;V@!e(;6_xQ!$Ho0vwoz}"o*` nmF$XZ1;z<7>p9Tu0{req3T lj7 m̃҄pDNݢnaezZpvvq-{6͠jib!p1ݥ%h"z|KB ;,~Gql tRr;H}x 7~FH~HƹksvVw#xduZm9  "eEq3Mk0qzʖDda*4O]>Xބe\$Ⱥ:D2 B49RzΰsjAzZ_$q`$V͗ q[&s3̃) F8ѐ/L%{yqE)W'|Z9(|u$VaO +%: L1.poR+EvH;= MȦ} ޑd&e3.> WYx(ۙZ=2VJMa^J>:_Ӻۻ<>:iooP]*=+oyvIUϐgL 4}D!RH}ef*J#oޣ.+;Qv\3%ʦDSz1}>>Q}ֆߵ[s,c.P3)#5*,oJ%^> lo #< ^jx5UM [ҕt϶"j$0߶s|zA 挈N`AMDԝ!^ 4rwwEWo2V/q}rsqP_5Zwо$4Ǣ)4MC jJS]("P'yLR9G8GC:7i9I(y.XgW<7,83nq%_γ:v_7Ezo,d"S42^ި3K^l.e9ُ$m4r>Yd݆j^OӃt:jwXjjYn<6;.c5NIE}Bekqv14OtT8ls*Im wPl ds0&W\;ď-"}%)hZ]xqz۸94p۵F `qdI[-j1:o"3ؠP@W iR`*$9gmb18|olm䋝DTDq+1.~;zƭPiV[YLM зJl\QKI5s/,NrI܈9iz6R!'(ʦur@mG=NJ0Zr7TtG,{}M+ ݊ⶻbCm\sһK9A"HzPBقƧbj n۫ܧQrfN+0:{*ʴTG@χJ^PxEn"yc~лT355CQڬP Sطx/{d|kHa2.U}kV4Yя.*IjՇ%*&ʤSb(vȐ!m:C­XWVqpښ%p%&bҟ 9b~4ô 8u)=ع*={]7̝MpNYH1<`^otG}Qق k$-Z4NN%ܸ4Z+_&>S})NJl:*iK([GWIpVḵjCXяkJ"2k]cA]8+PEgqisj,g6KK$o R\{rtс%"G܌w$χz)/DŰ"> ѯp{܀D}nlc8ό՜){"ӛ'-֐ŇǽA^ LZRJdKGz5 'Ai 'X*u;5/ahį‰0D3(FVZt3#E{DQ7}vC&k(_[؍=Vj(6 3&x8 Z)Q%8=_0g3y(O-.75j%xWhI &HtÞ0S?HCǨE5 )?*si5{8v& 5e Kf!%言\*q\Hnmo^8/%x veɡJWc@ +IkX;BϻZm'' 7M~N\Z'3ChB-Q-Nzlt9jC nF]kS/x,M>A}ȸ2"험W@`"HM/©7@e ؼ_R٦$#^|7QW=3ZIWcnmmyv|nD{o`zWQ&c=)a?e 릱õ27E{"f= %b ce=v+Y'%i>e4bZ⯣>JnĉՅk ͬ5۱`\ wP}qώz;R`{٘%H2+k,1d{PA}C riqIuz ?OBl'!#E)I8vwuÅ36g۽k-fBTd Tcn8d 8LRe:Ӂi';or Fˊ]{q/vjs&.![`geEgИZU&:T`] Xt`4u1"F3s[ Ī243yaҟYA>;f))Z[U89[&^{ҕUaƴo7yuU 2˔2^ #e#b8ݑ-Ȫ]?U6dS;&~x0v;lXg/:5߷1 ?AۻdKx=iv_ܨW Y`[?q^Ypo\BhE('jrjK0ŸʰVrY\*ŽTui7F|| U!g}})%y,=v01 O82r_cH5_ e`Qf-rʴ%Kg)n<նy 0v7Xg0jjIN!`Bld D"SO!{=)Op/r %| wv <ӷEl҂`+l+Î+Ǟzիjzx,QR{` Kkq>ҏihFy c.c^K hdmxp+oR!zZʘCI\l' C:O ҙ,< ik~j؃ iN7bsKTd{X–Sҷt"^M 4E?Y6]n' %ۈ#q~\k XeKW_ ;GH(iJKIF#g,}.Sܔ/HH:/TfKT+JՅ*Ȟa{7pXodK)wAOa]͞t8ZF@g;J _u"mBU(-QDͦ&:6dKHʁܜejG-u-L+R2sz2Z'0;덴t,zaWrn8|~vRzDjRp LNx׶GRy@Э.I) ?KGDl^cWg_}gY0@hS ~ mCDnTKtm to.܉%̲_m6GJ]Bsޟ*ºd^{]m!Af"\g3FGa'|RXju^.`voLזQ_H\z+αK)fu& >!~i̐6nN,h^m9p6,x; 9(Xiyb=MK,U3% "TrjԚ2E$d`q/%B݋,$l4QU[@2!g`A@{Ã$w4F97mCf S9P.Ta8MW%ZuHN5AOʠc٬ŋ_'X&gS$?|0/vxWDG}e8v#};wl slwLrc~+eNES>ޢjF>|*a&._/O=6TyG_)XF&8& ,3qw^?A0lP+$O3Ge'OޚTb9j4kdύNZ>iol>z8tm8H Ȕ2VXͧ}RE+%[֎qjB4ןMUr!hbqH[2w@=-to6E-*.QY2 |oK.u(j7Q*\ 8Ţ'0-'SSx簨*b2m\{z݂auQKCf<'$Y`2B7suBr;d~:'ѴBB/ 7y*Cy`h$ ~?DvgApCog]#hu=0{4V G+aPleݢx;YP:B4x6ԳA*2eXNkSkEN6p`#`"2j7QZtOGӂ!'V׀"¾M:yJ:gDSB7Z ?];C&S;S%k؄`YJn@$ZGPg KVZ2x  ['z?.ۥ v/F!j{w)\t{7/Qv-&tF=r޹tc}-*++fL; vNߓ8PRG _nU6 y'4L_>ztaROqAT+Ɔ 6zΈ1"S$!D&O,Ơ"CeYl4d% b ҜF>T20H)CoW1;"E)5^kC5[(-ϧJ1zEPR{wByr^F2H<_MXa&#~9XZHR%OZz9=Iʓڏ.5-II^A|Vh.u !M j#~q~kiA>.r-uMܗ [vVr u!Z z7)7> ෋a]~(>oU%ĞZ"sݷo4b;`~ X*T D=YG-O(o _X㵤H{E"1􅉈?탵'|nQQ rv/+Oaf}8-Խں03*y/d)SU /H¯gf@6.x9_Y02s떌{-]xq%&Ҭ2n>2 vD7`H[&5Bk^"8..LЯ]}3ȗ>jau?Nj¬,µX.z$Gt Wsf0JNwJZWHg͹\Ikc "D*Mu5tUg9M&th.? ;i[ Fjf@ Q9Hw5,zq??=3f~cǾγ^FkDE,zd{"!*qPؔA7B#{vg"~X$2xKO ƌ(F?HvM<ka1Na;0hOZtQ1ϓa{pouVYF;3>z_R: 1<ɀ[Vku`~ ;#igp ' G6 yzu 6`_f8|%j5QS.sOep"KUcSڈNIjaw0g\Mt>&PJF Et Qc9cd&<-qaU&r#,Gi/QL!dPr@:D;i5$ʄm>4zT-ߔdc)Z|bj8G=F1Oܼ:iHO) ׇC&Byp\*ܪe4t6a娙9<=}6*L^PcMh]M"fW|L-s)5OH^b$wqW 8W/댹P j8#E&EIFmKϥ*ܓ]PŤJ 0d#;S 1IVȍ. 6A}'t@)LJv _|n . 0vFBU6h@ ѰIb8Ye[whD'ʄvRV=Ž҇Tj298LF\s'r oK[aP Vz_:ev YhjyC*6Dp?g9n͇rQ0oFr+QޏQU6JC_L3ԫY€LGTH)"=shZR ^4;8݁IFͻxGaQbY2GBۤ*MQK˫(]tۿ` MNlj-g4df-;|ÇB%0xW<GR+wV@,dEEN؇rjO|{xwQnCBjs} E \l;P΄w:ԼD[&RC/5ke~OJa#M(Nಧo?cSA'ƝW-l$3\O-kG ve82N՟kJI`h(%Lj>B@HDٸurb g&KQ/_HѤm# %^z[3 IqOG`H5{C!/d3*gͺ.6 s9C@beX"U .ѽpM痬 I|ۭDk` v0FX)!sJR&.f6WorJ?lK b"ɝ~1l(Zh}2Y`ȧq*s-(ŵH?7. [Xʿ*`#MEYL#vӤ2Vt2[7(+:~{[Ph\ ]>j 6^0i t֯5v_&CHq蝵lVyLԧqSgffG+2+[m 9\?oO:mnWQuU$=:ԶPK;~TQC6|9򹙼 Qk\KI&@mϱ]Zښ/hIa+h!a*hœ%ȵ/&< U(W]ymI0|e&Y[g gza^h/ECK7RoĄlVpLrMwLQ,:>]spe!ǹ%NFiYu"}qh93܍SzC0F Ax-GBho͐2 @RIנQW Ɂ;x{߶kV( (P[3Of\;H$]f<t~y}&NUi܈}(41mӄb9w5 81Sb,<[^'S_s4v z/`V;8MH&G+\,&/poKuǷٯb=JON,>4Mz)*@f2S ͠+C1^i_Jq(6 Kk c83]k'Jq.[0Y^<#8%߰܈+ٰ%]{*-Q]զI'PqYF\\Ke<dIoZ]cY yΥtJkǞmw1 1/E}XEiȖoZNVys8s&= @8;%^Y5;NPDG J  ,)m[e2'S<д ]4P[P*HUl}Jr.ZH|;ls$ĝ}7|]qtaD#lBUdLƱIEPIToUEk"[0᷋{T'WW+g9Җ2I)Ϣ {.g,?oqW͝Zo|b#$agQܙj/`P(-Cj>? y^p9V$#ȖW<(ؘHa$b 2˟ }W Y]f(vHBIAN5 ( 4q p 2ytiIʶWmfTAHs W~ڒs,y?\qB@7-3 i "K?`>QR(_]..Nxg]T6yR޹%_qr roԴ0/FJk@š'|9>_2:~Uic1j3~ `fy3te:e`.cYI'TNi"/=lˆ`~CtYYnޔ{p3`Hm&8cY&sƐv&dڭGuMj# z;&&' _a_0|t\mRìIx,8E3Rwc`y Rbxyj $K ނLdZu`ԴrnK'Xy1曯V65vcꘄW@2._|eBYfI%q8"n=rtՋPBC0+:fݒ} ]$f2>m b],&A)s`=q20l{%| )4YJ:鮥'-|GH{N|AX^3[^kj_`<~KD{|Kw&\ W:Ýbn9CW'6xRx6SWdFN+KhAS&t~ n==GO@5犒g'F/ej&GM=_҉ѶRۏyC]GZ@I>ޖvD:վ/ńe!w 9Cۮ7Ow[ BNkr0ȁ;{<0)N'Ez #LnT2kΕg\bg3cDG/7PgJX3}O74"cnzօ/r?ԃTS4{ubNG0hPBLhy6>|Ko9קd8zp@clUy4<^8W%V6x,=jo!kHcy-"Zi+“u>dC]i8\4ӳ*?iu8')y65YA5N,Y^KA:7ob=T/dG*QG,"?>e-)7CiT{:-k'G m ${VɄR_!t(r%+_NM{.mq{R(K}*MrCO# \!?.OY{|0 퐰fZ@Șya%ؚgAuz[ϵٙMkOySQk5( R3ZyU[IQx͠Z,3ڨ :`'|ACfVZӚY0HJv܏;*M}<%~G6o7c!)Ob{N!K Xʡ;rsI9ԽO;rV80Ý?(@_o<[h=~Q;Y^3(4 m szIF?4ϣ~$.}IRU}Oqٙ\1^.%5#,"qW/9M[(01htzN:F\Obbh ;X꟝?}ޛ1-njTx!Ifr4Kz=_)gҠyam(yT1"*A?/]kHXմ9N+ўҠLRO4dOg}E7ę,HL69ڧJc+27r68 { q%I(]R^J̑ApNOJӒXϊi7tW7lĵǣO0+ ͸x(J\zLPr0{UfTW`A?eͿ]fL jH"&[Nۑ]'ɵP[Љhy} a)}* 1 jXs! g|zAu_}`8FL711rx<"J<VABϼAB5IEFͽ]sIx*UVI{:Yub4eWq`|E7g:H2;ԐUb-'Nњ≉|>"\,v#qfH.v+<"*BgA3ΚPP9&51X;koY|IIQ-Ryf y&)oL6 qUplx'`Rg[و;@! `(7N2bORH+x:lRVq䈙h{ӗLOZƛ)MH@'Ǧ~kԞ#џ/?VyzfMYY<>p'ٵ s6T:!dBExp"ahܧ-9/ҭNMDXQ޵7CqĨÇu[f-zĠ ϊ27Zޣ~Zygߞ'e.L/B 1⩑WsIlZݛks>WtȚ < "&hxdbu5JOIk*SiUsz:j[q # эwE&jf|U~˾ˉP?g? n09p񿬗j|͢\Ksg`#qvprn'1RDjJeeuDቧvxNߙu&-ezTPBG.he;/FqX)h*aO b1=3w]ץʸgY;YlC<(|%t21GW]};mXC^Po_˷MzSw㔌byMB[a{6aE8dc[ ¨] #)®` sPE?=z<3cx e7T+c t"a>@abOr) VטMv_Lj*@/ ³DUIXCf+JB(Hx\Uox'ph`~WC'aЊ"x^pD$ɼPٵ#Xq{sl%0r}9K$XUU,3p(w3gFH\MoŠz•fHT>KTPTu}g ឩy߳(D*Ǽ:0z`uG,LoVVu6 sy1׌G3]_N`7Фmǝ[Jӻ9ao?cn>--s$2#G0i`/gxeW:?0)_/P^ 幬=5"A ߤnD:Gh,;AN-?0ZMqʎmw{ u*n!\UfV,hN*~柰LnOkp~}V0zxerIf5<ŜXƾD s1Êc\ _\";S>MxwGd>Q+ۼw7ՐMS =[JH>:h h38ŀ*'WӺg(e3cΖ83{Ҫ=2 -I@V;.mqONpU1oA?4>,te,AZ-cp߄T,_CsY]T\bibHEI)#2Xr8W Efi(z`݀9Ҹ9P:ruN,HlHUuj4^?.y/ >|לDd>{dwt^|RP8Va S 4|>pVLMvtsw@Z'EڝodhhSmYVX`$hK)N}(~@dg*jz,jD11_09uFY̰/bm9=P~HlяsAoz8'LGK>[X}c5k:ۢ3^.&/Tk(3/ʊρ*?bԷQ6EA jIpǃL}|wI=$ GwtyF)G؆v1[Pw :)I](41Q-DfLv=PzUDcxrk$(t}X] Oi Z|$UX_es̕)Gc{/k ~K" e˗C zsfّ[^pK7#Rٮnlazkpy<ٮ+{q< 'w[f{OR^EU?g5 h pI?-caù'} /'fܑZm$kTVaP.$c:egkI$ 鍭yB%4`M\$EHŜ| q4;RtLYyJ\1߾ 3+y.ߧkd'3rҎ5.F(]|JH"k`X/}\ypBJ@z*Ih$o4A z'7[{i»;G.nRBc4S[Ti(9$_WB<+Hy4٥Lxm=MܸƼJzn>` JO޳n1b"2ԘL0RC <;~DqLƔ _(ꇶه_jZ$ۈdl]Yan.TF ||\4h[^t6R SciT8Ĉ̪?)eO?+25kgcC Ą%;;=Ia*fV10P^Wt 7. W,IՂ+f Q?yC.6rELQPܩ2>s)/Z6swD#1TX,=Hv=Th $F}̝*ذ+N_Mx>AHiZ <0`_:]x}>*~+L<):6~^٠3i/ 5piE+|G~Dj?&.AX\iR{U8z)g1I~"VkkɼVo@~Y;rsـul"gDy]WtNdgHSi2]&@/4%!*c@F 5YPBKIyLTp'׭T)w»#v>PfK`뵊p3C|I;.. ƨ܈_2 6,ܚuY\+k,4S,SDCg9ʼ% W=䎘ڒksUQs$ĥ>+ nXg sqکٓFF-ie@8Mۡ{{,>kXTvώ •ziW(ݸeS~״]~D""dd^y;)Ȱ MvsYo[5 O.QUK+ 'G㶳Gq򇯷m9/*ʳ*7c!U )’y%*.Œ.2aKՂG*Ƚ%(2H(ssOyBY5rs hZu֔LތXT&-[NyFHNEɗԩL9 #׈R |yx]|6 $R>3u8ㄲ;4v}bdT}Ѿ6Q$֮35T&[;,t%C=QXyH_>Ϩ\/ex^gt2Cllr؇JRD[@up g> QI_ zhUQ8TUN+6|+pu2sITglʎꜭujR"`O+GqŁj9xG..=bY@yisDQ|}RhRtp $a$%}DY9{lX5keUma4ZQ-XS~#ڬ& `,s).9`ic*Sx"415yGQߺxBs޺uU H%2~zy)1wףg*%-YO -"zMl9ӫ-/wJ*jPQUgyF•fm!."݌pSOiPOƨ@;E">]}J$tt&hpq\uTE$zU `I[kGd<%r:)yBx|kQ8ȰV3n1{2j`>_.yQPJ4e<{!7n]: a+ 11LA7Suu&FFv$v BȞoTh1zJ`wR1fs`Bn/\$Uxتl\a$)~H-'+Zj &Canϥ Hi; B @ _Q[t4+=ZeDDW8Ҽ_TEjveYb[lua˖e1Mt5l7Т&7lnm@aޢ]NMRpP<{餻 ]ppSXi2#^qz}2O_K}*/*Ep 4]WQ4lnnrzɽ& P$`ُ$p8z Cv?'R-CA 9 /ԚW81Ψ-RMPשi@C 67MmMgd(p<}pKgm| T\cGٲ>,̽N@6& f*kR ';cg/FV4=9Ʒ׊(IURdvwKs K(a B37b:AZJh,ɻ]x1cnjÀ P Ǟ4䛎VKֆ"I8%b=+p$}3Eyf-C&ctXL$ruǙXݱ)W[6s̈4)Y@CK{#{vH)>S;?*⌂6EFs*5d壌b; [Y+Y G<Vt @Nqex@\84k;~A 1KT>eH|TDM|f1^-xGj* !wwdg1NyY kRYo)ǟV@AtkG<#M]8o%)O kd:MC39kO"GV2)x-O5XܫΘB >ݢgoy%hg ;#t0vg pk3'"S)gljF?$_*)g/!!u1bIakTXKg/I|OPjGVZ{YiJZp5אx.6yrOktC\eVISD*8hFel_fxgUB`cK-);wITngkePco*sq tKA VkSpKEmԬ=yMۉ#튭p+J}W$$rLڸv xp]Umtf]Og'  Yςk݅@mD.ՁuWlӀBD<' }XVr E_vC#@E/Y3Kל&rxI'["ˎhmdO7\,!Q/syFyT_=ӥznGNPQA8")ʁ:K<!BoXSo|)[g ̞~E7!"Q^%wNq4Jţ>ݻ l]qP:C,`[/o=򭻝p@]CONOI@-s6nmx׻tHC }ӵe .ۇQq+ښEeO__MvNXfFF~Vp',|]65RGP^֒Gės:3%H]O+zPP9EH#W?VPoPaIqi꘾U4S'A?&"CqT.>M&n>Ip7o<*|ػ{IY-?-C\]KR/Zia\qyɛ9"E./CMM 0eܒo_-\lr/O]a=*COf6^:CuGOqLV\ QQ(xjT_@}[&38 >Ch>cFPrCt1;MHC~S@'nmFC-afڗVisר^Nni*~͇}1_?M 3 ϙn9e`ݻ;kE!]П0 {w8ҔeNfQsꊳu'*~ʐ8a5ZH0}8.՛x5  :Ԡ\9K5aK21:h/ <:*MCM0M#(J)tWBATߊ{sݞ&؂@5WNg 9[xY@iL_hJw(;zaE5Fe]-wQ 7a1{'/E%sg'9? Orõ-l}rs1}fwuř4-@ih.JBGiG&٣Ղ#*I/1 G bMj H& 48aG Q5EV9H$[ol2Y<-3*_jFċ)ܰۃA|"򒬲D(q1EOZA$5xٕ${m_JEEɃ<e >$*IW%a; 't7>u#E+q#'54+p,jG/hX6\R^U Q]5vTV^}k]w>zSBX|(Fq SvV`۬nc6L_hCြP :OL J6{NJO|`KWQLlgVN+t|7OW8~I+*ўrSwvlxm D5ql]x$&7v ?7$IfCLIY;HqMѢQGxW^{M̆AC|$D[ZLNKϚyR·&Mg8lq(/li9:աbK>tJJc5~X0T>4ZZCDUQ_־zWEj4U%s!U/7vEyA\7{d); ]<2—yiE.gֶ/} ve ?ؖEP|-n<va64Ac@ Fȹ41v[-gffS.5U]PPSZQy3y$JZd {]J +hL iJ/^%EHu5U׬Npo0n0]ȭwˀf.TO+dǪ-8-_jgCC ř an7+ B K"@HNfDcv`Y K .6gtfuLD9Hv52HN4Oyݿ.kV⾱FKD2c'T꯿MV3ax8gdkλ(Bx!?l^aJ81R Y.,*@%\"'EŰi?AswG۵~=aUk< KB:jy~^%w_򣣔V1`+=:I`69gD(PFV!$d;z[˵* =K8ک!ߟujbvd's@ŝ+Z76kw\Q,0h)&cI7u-%SmJqx0B <ΰ)m1o@X_#m0g"*$`4>u1Rj,puFL*k92k.8ևŋ 70Q׼ZG:م/4xuo|u^PMɔo>G//VN_lgGckwo.(b7th: ]"GtȇTO^;" ԗH:P 9M"&X:8 hMW42(֛n/!87=lZ`@MCVu\uFp@B±cyiiIUK@v3_L0)\`pEy{gߔ#^ǥo䗪;Տd`[NUЁ<lE*)[pU}&1Sr͂m-aW&A><~.SQ/Wt l!}!.|=I7ZRn҇u38JVw'.¼qrss :!Ա}89bũykBtIW{P&rݛ89Wzmli}g;1 h&z!سFvpM(6m6x/QGHZ{ծ(;.rN+[5v?8;b蟈0^A_N@KA&)y%]wÄuauA3?żU,.;+ypV6Y|J=7…}'׵pw%]rbxkHG\t3NM/$6z^hNh[Ox’A!-Yiו?s=?36odTX(̾BY8w)P*O8AO[oԵp3F܃U*hKԤ5jET%#ӛn4A3Ec} DdMCC\ 30}/6]~tBWpk0oP?KTSJVe6? a:5v6_-{ֽjOՍ/'g1 7"xGv*sPL"&גPXb yQ~}nMTT{Pנt]CCps H v2k>auũgRۼ'ޔĄwHY2fLet&T#P"2\"PZ\Oi::zϰ%I";bM?&149؎OZ} Ǭro5yG"{P>Y ʎukT {@ <_=_s(/ZGJP DžtM]^\'נ46 PI:Rߎ|BXTuΔ; l;-"e4p TMb-L v䄾k\a # ~<Z 7澨Bd](Kkp0wAM-MNQD>+EKyfrzO #lE6]ܓBcWpFѳȽ 3HDɊU!me L"^%!^"(\]R~}}z? kg1(y!(`'X޾']W\HSal?Ra eXf@vF_y+XRQN?ִ}z6FSӍs ڶUhBi ܑ82oO."Əoh鹫h=([9gIqTB ة|I* !~UGގ:#y+lT攤Jf46d-Rz47>}S\wa'SF0YP-iQ=p{ !<_),O| O|c@OA\ĬAg^_[?7:p4`ރ|:Uq}~p8s_>5by$G~]Y3.#u-;R:zkFuZ=}K#!$%1Y*ob8Fgu]]9t_}bqCy2aL [BZd{מ3dl7zyE>_G]zY> K6=ߝ+gqWlI Yj6 ag$ AKK-K*|ӄ`%HOe z,9 7,fsq`WVeF)WOpܨ]S͌Fnu4%T4 *S#%* Oމz"OP}(D7MXiib&@D8a0sñ>12K:dz^ QfbX\3)"8~N;^ VT7+k7[e+ʾ ux[; 1IS¼du1 Y\~50Uڹ_Bw,S# uFL JD:ڱ h0~ ˷&Ú?gokT]O>^ i,yG\pefX. |fwWGcTp1v̀(U@V {o9%#I䞬'Og͂.(󫼟v+%|%]є͑67wlm ^@%-XEoҩ|Ckdjk.2]; .7{*m씡{\qz8)=Uzh`:{s"G~ 9R7hmsH)Tߩ. 5e"g}xݽf x`!;J>e˷h?Nű7}~&tTN'O W0}l q(޻l{H)9>#FѩDXJgq 4'[R ;{+o{O/Drh&UZxŽx']#XTbz[|`#c)婆1iq5bq d)}CZV>ϑ7iO[R~Z?sT2t {|WP_#Tw22;ud7yBR^";,D#tFĭǜ#J!~vY_~yA~"a^D¹+FnҿPr 27!V_>^&([ds^6i(p6uTdx-"48JƙtZFuze>>)m6 ~ʠ)DgW6Oy/nc AG=P 'Pc'%7i-m (-GUFw>i֊lopL'w<|s`O7rB}Lҷ`g;T(U`@ޭSϴ e),V/ L%Vrq"EE\ l ")TI]bSbS @JjHx=pFNKQPJ-}?AH*`:nfi,DG֙h=ZIsa hYvhfUw8;?>, ˋf' zeCޮ_#(C= ~R G2 D8<+"w6LNV=3$.vb-.#zlz/e"u!ӭXg$Per$ƒσjd,B[4rz  2%†@&h#(w\>DU>˅!`Ȭ ˘ږGl*RF׾w=sΑ25068"9&yCb12޹Ḛ.軚&?UnwkK"iRէ^2 F88ϐ\WCWX4z kY@,+Io#KhE`IK&h6nBJyi@<>1[Ϟ =FjLJrheg1Rd(+TEEhrLUSˑ3([xFW|tnY}whh JTA-~{,ǡA)%/ ;պF{S6 X{6!Zut7Nhh}?hS`4ڶFݡ@ yM0~˸?'ex ֛+/OVnY]gh Vńȯ:d2mg_WӢ^kCbJ,wC+í}0R95 O#N'"V<ƨsa%\]!#m,yƄU&*!IDcf<⫄jң_$OE?YXma# {=KYosb]y->HGɝ1)|t!npxT`S>)Kkk/[m}Sbݝ|7Tdn<h. /VǓR|S\=ׅZR~!W_ JZB\=Тl߼.pD&JNP$Oh+ 8Mt~g_ɿR<]ɜpHh/Gҭ `2 9Pkڲ,9*ZV`LA 61 ]U+N>m G>aB g/l2Be+2MAޞ^e8$ ȆybMiX4jCЖ ڡib 5bu 9/ȤÔ,h8;MMNfb'ݴPos6D9j_n,qyxHߦ 픧kXK}|NKL'Z]빞ˎBK];z utﶓH|e$97A0'Jy#Fn3IV&EsntV_Z[YTGi A JJ0F)Be-U*mas# \1 _'4! 9ͻIRS30WiB[WQƔ"8lSS=x6a-yMg{z72 ,ߧ)j옓Ix\ a89nR0r\+wﳐqDf%B?+Mÿ>^V-BLAwpw2Z/<4k&*4M ?(*ݍV@me΢HTTue=(FCT*`7;׊$ĔHdХ[Xy C-`n# pr\H&KL{]2.!r~,9^U7M#1Z#ʕ>z;THSdϮK6^tOfvϷ${pI4n'w/% #8P3P./PU•\ίfR]tm#gXgKtVsB{h0*ñr`YT_ 0)M ȊE=yKO:;Tv( otc*1Ҥ_sh?}M)a Lq?dd}]WW•tD G 'y$}4^PR̳1U!oJ^]T9^Bhʅ)`[:uiJSUvg~VrR\YD :!n؅8qX$Z8/ ,GP:Pmt@2$_ pATAЁNu%;:f܄(5*@~ `%`-ׂثQY[%0 pV&Gţܪg ׳->BM }ދ;B>ʾ9੡ZA4?ڜv&Z_vJzcY1uMT k)B@X7 _&  $O!3-U kx;obiwk |q }p$WjO4T,Oa|QL :4mP:rcNTrG凸CmZ]P@gL:W\x4ur,}S9befm:CT+$n8Ȯf-.o,lWN"ގhTRmwS܆{bѵyY py1fq ]ਖ਼tfTJe?/rtIE}6?;FeW^56 ֡rNDA`dK7%k+Pq kh#[ J i 9۳&U3]S io횲>)^A 3W\O~2GjDlErqjqRlEvl_,j'zTzxQ%g<5r q2J"#?NK 9P\-5G(v8Yȑ3UnnmxFbĻ6\< G#ZfXߴZ-x8)[a .X{3Ĭf ʙ3ӦHMu=z,b.o_<=d^LzkKmwynf=yS@pX֒^`h6FdqąYDC_&wt ܗ[/Rv>Y15K \,@m6@I Q5uwҽy8Rlڍ{ԭMbclfb]n >9tӱ+^Q얭@WLWbp] >#6jXR)@c6^}Q -!p2\ K3kb1'cKt;\>\4eg}+sC>ZEK)|j ^c 'fZ]K.,=8qnƓrAVjʧjL>9F,T˙ 81uXg(S(. d@GuIB"Z ;$Ҵ2CKZ{R칂60Pg{=Ĩ:A91ԀCs6|UOqt\NzȬ%D)-}oL}B5z "LxRC4[%/`ѓ5z,i W)*&ƶr(5i#%0Lc|lot},R6VeZq Rm:4 ھSv+̓{1ƇPRQIXuEChWC`qm;x3R3n^9і%"K[Z_AJqՂm+t _)]Ti*.3?y-.%TtY,#B]Px4tyO < A26CMD% >*y[jAh2.F[d?_z Ek̛x4ӂI2뺭?92GJW}3~O>%t qBT0WR.T)lJQ`hÎ8fIJM#TK9  >$x#Iwt-'eS/9'|HNa (:tX_*CRC] u@'8r5;ŬC'^d+MOfclܗ  D 4`9{ݎy4 T$Ab!U Bq?xI[i[q4ҍSI#; oӐݵ%s ɢXs*d <Sc#xvl΂2Q#x7^_@\MTqyqW|Hs30yb~̸%b0ÄV`t /_ TqLPF"ژbŎ O}n]`m4L1׮2VH{?7\rt@'䩻$s}D-3Qד=괝.2(}jIJvg̭Z`1.¸>^P/s }z۞=(w~#ͤ+ZfpKT0qA6`Ȼd"ŵS9*- Fw]apU4,AͮiD@bdRcvgM^DA<3%$Xq۩z$gKY4ߐf<X)gdNk[¡1CvLEo-/cs8K[uJpt4PeG{ Ejz}yiX}u@cv%i}ӵQ%5*<@6`]0,?`UbOz4 n8- OͲ2+X:papuyɎS/._n G>(eS ǦI8wQTЇyAlPjlM4u9:|"\ (WFՒNCbmN 2#iQE^XqŖ}!A٥䤤l`9xp0ųi؉~5;!yX tٟ~"zZ5,$*)Kɨ0`67Qkx^{݋, M_U6Z,T~X" j? /qS̽u3 rș摭P$ͺOVvF,XM&/`_Ε=ե3~~<io% ߻A&!=-= l:Qx^\^5v߶%q`0+5UkeP#cSR|XZ[K?\4o|k XmDupdH`Md12HUPk6(VF.uh7$k ʛUiU#\?S`DžX8 ﻽,ig#vFs9me$` _c{5uh%Ba#r.6[b VrD1|Pf͠J{L-}Ce o(*V(~嗽![_aҝ54,uqz5L̓No&. ̡vuidC7Z%}_l 9M>mU$2p'"5?D.n (PjД-\.xܑ:{=xSlҝrbS`O'G)2=Y8r&w!Uߐ7]:R}$dWXMS82= $~=7c^ɨȨgiuʟudܬ 1O|#wQ?1.zxRm3|f=لk4]Rh "pƳ'єsZ߈dFW>ˣp?؀j۔̷_dBgWFEzKKNk_/,W>tzRQelHyGIgifz ~މ3Z ꜒X oVjM wزr S!c0tgEu`0ѵaL$yf)9Ə jBJS픵Bg9rÍk'r8lH -/wX"\謣n&XK󟇥 Q0tccL_ܾNz-!i^a4fYF 泜X6U]ˆ mԌ"k~t2ST+ s#|.3_d+TBJץv|gB %Teq{^ fnh 'R2yˬto_ oI-BROq@3k@?3 ݪ8>JaZ2fĦɳ.VnجGLOΌ3]LX S2EEeBM]4XLNNQ6,|8evG߄|S" o*KL6D?蟨~r=ۄB8Lךtm&}ڀbxjU pc7I JL \pRb Di,";ۖ9jgR}Vo19G#}:fbH 2 h#,mcVZWj+bugjXfg b 53^Zt>CH>U·ΔE[ᾕ +wb@J^*@TESTq]#qX Fv {5=7NO@ޅ㻿ESoHmRMNTn HbڿD>$> KܑHk Ll[!McqD.[vo#JlI<ym cj) RHif'JGoDSBNI|vdۙdى)X*-ǩ2D۝9+9TQ%;v 4#{X?̠ШlivA2^ˠCWWf/ipљ-UxNN"B"ZC:i~qG# xQ[;VXĈՃ_q$p[;ȁx_{vygX+5aP^{hl/@Źb0Ȋt-Zcx "6egDNNdtf1>1LОAݎTː[^5/zhPӄ h#+u+YV<0Z;P>xvG@ʹ.p{ Cl=qXCYl`?׃t{* {l[+y;bdgߺLZzm{c$,dF| U M ٮp+`h I|nK/Cr~O z3<%,CK> {ѽIȒ8ȓFR~Z"%jz[o$5yCކv3a 6Y6x4naIŠ4{Ƞ*:<},LHZJRpzpx'pHW^q9%ņi1Xkmܪ-98ͣPft;NJ+|aːءos0o}=v_K+RJ=irVz.9HnB k|4l-Clbֿ '͛>sNNi: E&GLoJX TM`ҁ}&,,#{vp'5#=48|ïaZ2RxvB`!TFr=kUOжPfWCC6Xet6liS fumh-]qȉ@l? 7?yUv&e?xLحA̺'5Z<.~UIj {B˰SD:vRf?ȩj 2ɣEG>f{Uo>Pceyc Z̔ Q`N=u掴"qb_ej1P̯[yL5A;LgX+ZY8U/ I

?̘Co~[xŸp,EE۵0)Ooc ucRUIfV?և2Mڽ/\]$? =Fު8䊭nK`B)$*Op]e*? Gg[x/ O3HˍF[z*yEU{k4!);}"2C2Ɛbo/p:f淎j^fІ2:6&"`b}OXM+蚋)+J?wQX;FabAK{B@QJЧfu;C?@)9>C~6[R )A)QI뻟hz Ie`iJCAgY?~hZTxHqJˋs55wCζ?* ki.K! odn#u!_.#vf{O*Pxr []O |gC >IosXd0BuJ!k8z"CYŐFwr :}LR| ]; cǖC+ MU/He"|-l$oJ&NijO~NVEF Ly+r2W) *؜Y{ y-ڂ W=ĔqwT HlSZ;b7e2U7b=/qr=|ea9 "͙5๣<܌V-;\5 (~\Sd`PCYʚ&4}3i4挞XKjU~kC<:>t4gVBxZGO JP9;ۥ›u7w|'W]ʉ%@_h68[PEӉbov |)o';Q n[ތՀ ,]QCTLy e #z'渶]!Mڜvw:d+VGx \x :#rLԘ/dƓqʞ 0TPȄ63n+\ cPWP`9L s+RBplOZgD1ؓ-h`~)Qr+ki cV *w9N`7K@s`y+[FK U&NàUX@NRnj[ 3.=}G5>%VZ7m'Nu}€_ds"닦4-R ڬFh: O4L*w{fzاw}Ɣź+m8t~f: ZzVbo],kq{ aA9z=03)!tK4B-ӀJ.UFM0֐Eeo@?$.[QePgAcp0f#n֩ᆭŦUĉx=(ZKy}̴/zbw]nH–^! {,(Cȁ(qI?kMP+zUC,'!nE2 (s|VO R)}`D>1#'Ȝ_{k8-WbVxX4ʹ;'`$QwJsm {ٵM#Lrԏ0qZH )@ГglCBS3LK^K{G72]}#%n_fj>I|8PQ}\RfPb=xx&f<Haz)9F[滝䌺JuE\r!UI.BN"D1OJҹ% 6Md[!d·m&9rvq%fPH>jn5i=cWX> -d`i:=zEmioUj pѤ(+a"9b`͙;/a.}:4UI I,sڻ-d#g{/HcHP{*t#NLI T23 5 a R&\}N4-4F&hllhqJLJr#⦿Ij5)m+O^iGW§}Ty &O+_]K{l)$z! .{&U3>j ^{OCEqo j]]XBnCI(ف>E:3>2s=_s}8wbj&zIPc8d$ JOkpjX-0Es `-čPޅ {GF~pn7qMD|e{h1$!/ǝ\#?ݢoeC xUZvP݅꿎 ool' r`"Ȅ{33|Ch}GȈz1/F: se57[ d,՘{2j,CH #- X*iz27HwU>N G&$ nTIrB߉.wo/y_'N_׫Nômutl^!P3 +XFQ9Ny)M 0encX y"ِկu]#6jW7֧#״$g&$P.. U&"hPa@n X1/+zRr{3i5K˷PqJѩOu+S{UoAG.k+@go lS77x" z ӕe Y JpK+sO8\הof6쿎+k@ 7(Su!;.̦ͻAgv6(BZ婺I=$*ΠH'|a /S=(-;QEGG=W# -56oU'C LBYq#;yzV\I;](3T8u7z;.<^*L7P,vƥ#ʼSl+iJmf eJP;6/,cwV=SY;+zu,oh vy%6(*|?8(ir@Ϊ]Q]`TA=1jnE?i/$AAtxod*MR|ůgqj0ޖu)z"/Nңb+J om1M/*1pkdž1''ހ($'i+r89\wȄ% L3z<㯐.t ?rÉŐ%Qa*&<ʆ=b׮86ޡ8 }$:ih:,lđȨZ8,Y._畢p17[pǮф׳/}*X,tEhRV1ؼ TԪzXՈ)X3-V@y]&;d@icB$EEҡ["x0oU< )]ƕO,˝𙈁~t$jngDK{˽ ʤ6';B$"Idi|Qc9+x?:+FO9gELi&sᎪ@޺rEwSum.`Oa?=?&__^>X a#[t4x;AJFF-(ǝDu.G23$3&US,gʩq[mP5i O)\: {(%ॕ@A~s _7X5 & e\x"穡a:J|6J֩(nIfuQLYxS?$iݡo:sLX{ +h%"G3UED Oi\pcl[:i7{ͬy,N0f j+h8:1!BitRZ*F̬^dkYS^-91J[𻡇{rt/iUESehV&_eCbX Z<;ګ9bHAHu|$6Vx&GM+V$@b3}a;3bT{`=9ġtޮ=OjB2/R\~î6~}?:} )gزx _P1 j6!lݽnQ ?>,^دh,Iviyq0Pa}>AkQl||(b$1x՜6`?(_U]l Md.UH@uu`gvDm~V7iv%Ga/t5YS +&U8 B25bq) "ɞ #f#:H.l:&k6 GDuv^ !U,gJ9n&yr 8 YJ^Lʰ053YJQl^і8d{FM |/3Mi ]51\hgW )~ hgg>Ԭ~^^lq$_mCe:W)c,H|d"V|x+1ԕCUd#Ɇ1h:屇/lzq:A.`,,GUi~i0v{G__H=0Mޢ em3pͰ-Q$^Nm<"c3 c?P.\)x^ i (/*mz%:h'sv#I<_qCtzE7> ;*bn@:R=й&՝BAЪ]Vr ]>ʼ$fąhٗꗨu\|dt5x3`ϱ~ 5Ay>'ll Yu )S%,a*Jm=AA[z9DNL)ݑſjn|cՀƉ'(N0a*)? P1,͐;1揶3A`e/9K QtpM^pu7d*㑥PqQ|Xb + +3ifuX͎T4-~R Omv/_~vPEJѓ OKt4rTR俏pY{»Z1NILhOȃ(+ (tx,əU>~c;*,=7V?g+= jv@eWVjVA}tˡg;}3=-l mLNRTJlX(>DUrş%#p7L4\<|v(8yさ˥$R:Xfg:5B]'uD9yz>Ei!8fȮBaB~}U l\GDy\q%;F%Tv>c=[73чOy;e__m`w<>27d2E=GDgo .j3ܐ>v!H+ⅲN j^;oi VAj;#;Kq_QÒV{nK /219@i* b~j(P#ƁS#.l;&5%zRC)* e:ƷoI w$YB@毳ngUI%{HvFљ9THL'o+#s/P#NGqjk3f"5( n|l>Ÿr,HT6%Hot`Fwme3e[ 5ZNHS۶~F)ѿDBhblzYvxx1W u|*QryCQj`ghA$F@F''n.v5|ꘄy# /hm3[zƳ{\]j yU&ND͗&^[@Ai$ռqиydz*V]TAї;>YRP/ d.'=BO#cgh^+LK]שy4JB›^kN$=r%[d\^ HT^{>NN>D _<*3fo,!JFXA^܎vܽ`O s~MU4_QvoW\RJWcѽmyKoYSy4ӱh$ %b։8C7g#;풓H%Hַ(*"T) Bpq-J)!QTI;Vpc]azjQAxTUǪ/dv"ty-#{5BcWܰSK-X 192oyoAz R>kL)x $iDAf,^yng3&1xWl!@i]oY8u=bwz֞$+3$aLD|T׈ÊLbwX$<{88j`3D(B/@dT<fY4C{U'3R\B4zmE-*BXӃ0uR;S38U 5NlnX?H)i}1_LbU, >)(pÃ(C/+f Pm6*`]Hl$ؙܛ.?-ɿ~Bl|uHˆp)bZC_ӷHҥԫ4#FOM[1zPdTGۉ5=`(~>X`#-XjpQhp>ze ni^ r1TA횵J@ #&Yi/ei6%|Z=j&ۭ MH2`!@W2Wm "dϴCݷ\c+ES13ʱw?QQ8n-rFc73+,DgTfF}=UH0Η]|#WA/U q8Qt{-^o^{bx8'No9ŨuX 0,,帷_XEHz7;OLBQIHSK7}] c? k9F!"̂XW1p q^b CJRzǘs?ZF4.GX>:0pm8eڻ$!Dnh ֦ Xt2akr`4;GWv,SUQze=˹!wiڋZQ9ykl%{AK6'/!}BǦ[(CC]XP" pWܘm̫kMs08tKr0Ub] ) Z ƻD3qK`Jz54,@|VtUk(nMJcIB?|0%{'`#s&IXmٜuG ,\7VP 0Q˻n OBCB];s-pŽ ec@&csv1IתpFlcNa(wv2 pj֥5n$ ?]rI9uw"XX3xyA=a @2Q_-^[f )&l2ΐЍƷdk8BM `FޘfK`lF=ALtH8ԃ/:.Z" kAV4eB1 2 ndL,\bz Y7lX{3or(젿EP>ܑd:~X 6d'pe ^ *o%m1jsqWZH<6]:f; Mdz[L':;s>\ٵWwܷ.ōę:EST>aU)KOg ƅFvʮf:e㬌?wŃ5Z@h c᷻Ag'GRg8k q)`ִV/uVa ɌVή%@MЕf ,]& ~%؛[׷#UϷu&8~[gz쫶VT+4VQXTgﮉzv)wDԈ`OXlT(R!fu䊤@9_U@wbv6J.[EX@oMvWh.DEg=n#1†yQwt`1f5oPˬW6ڣ!7gš(p.PIk eՑ&͉\' zo٦2NL>KBAhO<%!D q~ yVh5-JMhN[QO!z^<8sC@3#ثuܛEo)aCzYZf1<.g#9' ˂G*sx=T3@C{X;aR6YY!&*өcW-2~~ 3X\O6uq:JbaX=yٵ 5bTާXךWyW%ZbTqQuNRۂW2 /v۝ty[V%ji:Bza&ki/p+^v `QApx.^V&J2c(Eb=A\%Dnk8Zds~ŋٍ'Յ쫖OGJP=U[@!s[Bc2-,ϳM=EBVbP2:gԚ@ZȂ3DXХp@kf&!Qs.B!44ԄX{AqNjTt` fA˷832Z@U#J@Aۓ6  Qfզ@kK+8>S?D#jSQia>.Ķmp ty";)W1n玽wC&[Z)!ʳ@T6kH}y䴯A3qqҨ6.(p+:p!Ct˕"˪X|f'ɧX:Qayt@r bߝ;0+FKCl5gzi_Tv9aga'ށ:U Y :1Fi&}'~q%'y`᷑GS׳ZpK 83 8=mrg8%tXlY)EcwFJbb BoPMI  FvU@:s}"/*Ƞg xfG:T p'Wya4 r02#m&p[A}rv+rqpMv`鈪]z=o$DpϬbl9Up%"1N,^=O:?ـYՍu/Yg%ÒS,E~][e`8l`YLάOqqgMrBkȧ2IAygApX'эqmKC@Mvɴ E )b-PN a2CSw+FHrھp$(frGW?:؉b;q* 4r+H{muٞq =XQ'8d]2<Ef ?:qs.ga k#Py>B}.ҒV [[ ƾ:"b>uO+~pl@ij6$#׮ KܾAM-*lKQ˲aOA;<5rGV;0])% ]"nOY| Qn{j2wEG#jdyFB}>}qU~X ĂTApL.Pu.xtͷ[W{4b.ʬ|m UG[U$b3fKԻ4'4 \Nlu~Ϲ1'Tki<=2cp꒴BnGҾTR2MQJd:7P ƆS&R|~Kq-Gb>Գ%)If3Q|cup?J+}TxbJu2{0!uz !?'٢+``7zdG\µp)+aR]8N^‰'>E+qg:^1WxQ2cELP\Rh\{FIfEm.wCf7-N{^wrzJ<@'T䏨نpW0C @ֆB,\L_[ Wk cGdszDt7]&w"pfc :sBT2BQVlZffPgǹ }HC](*TKg!X5nLO֐!v J-PՈ3l$g40Tq 3D`Z #T|Ƶ$MC^C^k򙤞CHԄaz iۑ%Udw{i M,x!!>~BZa_m1AF? 7|3aZ,~wJo|a*.jr-u5#י!l]ini"Vrms^g j2:ƃ{K:eLAY>3_1vfj˟-I-FtGڪ O J S*q\tu6")k5w^pQ.-uM;}^32p1T>?Ըַ;rZ`^i8"v&TSD*Qd(>{ҡ`$i:I岋G@' WaR穡Nv\0%bP򕻺dۿ/ g_\,VX zҧh򩟝6blSշڮ*%63N+*};I>WYl8;w@ k#?MFJ0M-J%*d1.+Z#,PhlOGrgdÞ)w$JYW!(*+N3(C>w3jJ4֫2M{ j-*H>ҏd"K?咕O!(e1\ԙ)z -Fe^\5Į#P*t ǜoѲN:a.Y<-m};/|wsROc(wV7 e Uv: μw/Z؁ZUkR@[Kpcf5hsa`[ftVH2 n7}=q}rB|cHw"=,JF7]k?{̽$dpq~V\ɪqr\~U 'qW]s=+eb߱TҢgvAt<9=lB|2]_77ze}g~$EUɦ(3^ѽaƑmP+m` H/gI0:rO٧$]f$'}ٟRՊqxdl.}%Ў VXȶ_fTǻ vBѩ5:g_Sr,TT~ưzjKKgs2؟D ;bڅ`~+/Kc}o~X t^QÆ It{NxS^>l*1X=Ch3#'c BY%-HZa4[$Lx/oE%r8MPkM%/^Trq^NRhmʲc#VcY!fU>M0W,oOibxT̚7; ; =UԌk_KurxH?xq3rTE>JBm+nǫ_UT+'ej)MT0PLe-I'0eF>©m~;n0f#Ғum Wd?w_CU.ΖN.< xK~?&v#u{{5beǯa"5<3z.*+憬Qڥ"Rz74$6~[-R-% xVs Tp# >/+@1HAZ7DqzƩN k8"Jljb,( t~Ǧ_Es$T 21` a&=3qα`϶[*ʿ)Ot"8tPtzO: G6W:XGf,ɀR`bDrosoE"s}VBM x?)Qqr\ [N-pϫ=SkBͩkJۆϚb+͑JfYOOnEP3$@Ѝ!xoUaa~zW`o-0",AbDi&0g K7'b i =0VP_((~vfo.W4}'א#(QD_xZ2'%3^#tB>$dZcV'`3JjlUȉy>Q')~О_i~!ud `*˔nџxʈ7-bb }{d׫-sV3V S 3 ri~Z|B.O`@v$VҡvcA52WGH@EW!~hwDz Ǚ٬Šʙ~jWn֭^@JUpDj Ix I9w.XcW%׍/[&6rq>״ٹ`*TTkW$ɀyPMcImKԛ)AܠT&(2'|0ukS4xÚn$h HJ %lYlM4:r_oAȲAv*鹥^kF 'EA$4yɚeog͔$ڗ ĿSRD:=J &q0v(hՏ!͋cy 38DyRT>QQWk3=-@}/&OgP # YZ ]H2q2٤؁)%][RV'F;IҜݰ xV̂g`Y aD܎P|S^a.YDRT!7O~ jT%#hG+Fj{( Dn * ΅!-mCI `V뀦ԴG: %W*3I_L _% f-뛐 A*(ˡrcIu c9 : B~nN +BՈ/>f1GA%,nKSlo@I/]kCgDrt)8B'jBjn|a "Eo8ۃs#Z_ӈ!qȤt}z>#D˕ņ-:$W\ GIzYK[j38=}`PF[0 , ,wsqzg JrGr]b {uoC<ձZH3nV  ?[ZUiiL* I{@(G1> >CB{5ߎ׬@" $u'SO\8_Gݧ̢{<堧Ϙiy'%ZVH\M˗_vffl!$!M`ۈt{*yFRJЕrUAa2J!ۊQKw߳<,xESQcYC3N'Ն&%d)FYlaK.ŷywrW'Yd712˦U&ioטC+|u7 nLVBdw2qbV3XBY8]dObG{]:wZ uC>!o sr1=?0ﯿƜfq b'8[X_ ݜ:ݥOq?XXU$tp6oe 46CP3fK-s.9΢A^JW+XYĩfeʉwU06kr!l F?f nM3"d^ANy gFq8Ƕsjtfb˃yjɈϖ &䜬2 "6׫*W/\%.u@=Ti3D (˾o-Ƅ ê㞮~Y- uAb1F+/ޚsGn`Wq#|1H7贮c)r僩CI2Yl0}s{4 E ipxN.F6Z滛o(]U.ɖNPSTIŃJɽ&A g7WpZpF+o0(T7# 3j]?ڼgF=xoV 8NTÂ@`Y3UsK:zoՖR;#"ؾ3bsh>Ք_VG6!=6]s"f$-t|4 ڞ>jdfy})IAʀǜ5BHhT*boD1\QnВB7Q#E(Z,"D# RQ8"o(I o!VQbQВ(FWywlU;*si Qǃ }4I]P-'C, #¹B <ξ):)K"=wG?7?}~o"ogNSO56p6)5.Q(u3 !8yt-)=럘vRu Vr8$u~7Xщ)8ͷ݀=f"pIͬ|tGPkW a_}BcxSh2RktOJ#r0xO/K[=^` Jy :V.|mTv6gK(kSc`E2=7"Umf7 2rFX2P(v,1YeȮxOit JMDGMPURcnUݗQMF<Е%X";ˍm "BXV WpZY1xWG(Nn1 )RͿU>NJGqދ._xoLǎوoJ>:[ԣXUNbOce=0-i l8CÇA@0 dzMowIeD (uxS;`ZC#-9{!hs6 CwNW:#gʡ`2B yw8]v=y*NϞ{gi}/kg2/`oɄtO64ITZ<쑃Re= ]Xw7d@1~_COO'Fualzp cE;(,r'Xģ\T 6RDl?89):Xļ>a1 b;{u 7V> .AA9˚6I CR׭mcly=3=< MhO&S]=q:Fh|ﻝ͑Kұ1/(zk<|x` (uF9Q ]Z|Sq9 h},i;4^i8m.*9}TxT!pÉc8䍼 %1!zep'AB)m3A7ksĞoz6`ԅ@<7}oUln=q,٠ș}?gj֦uu,$Z2.H FVY\4 /Rґt W. ej^USD15X ]FNH ?_ :'Y8nduei(B.Y'τxz9@|3仫M@q–!Ak5cK=Ôs"6?9Yh"Hґ,8SUUVTj#0k=! gҪb?h˷ r‰n\<)7&%vdb FR^fS9*;%YeF7e/1&Umt%g'IJ~~p. 2xX` N(f* ڏg<,28TN͜_m0;2Py{,W(/A]R.Qor>t2_\M ܁mE4n)9 *i: oHğf $_]k;MXﵭF3>&;8O(xӻG=pީGoXan螅oViTҞb[ߵJgd6]99,ٗY0뫢¸pa6r+JfR+04:rc[vG*4tMK2)(!|3HW_l?dhTqU1S-koY^~?<0h0&' ,r E+mx#5J_b"{owD,<9 2XۘHj/f|(f+wD 4p>Sm} nZKndo›4[GGiNf@jOƒm ,5ãZ xV]DMm-%B)ȹ2'9ZV=?"\S#mZ2 eس?'iT vù-TIV;{kv? I7#;t+'HbZT'(l-pߓkz :!&'wz *`/B̧X\Q'.Bháj Ȇa˯¹54I+Ǔr=t0LP{4f-Ҩ"hgpiX3)5F$dN6r uDe\uBvaCi&8#6/QvWoCynt$w$$|:ڑڧ;{SJG_^-jA@cJjb]'Fd.9Ża9}t)1}rsiG1C,tK)m^YT-3EJ['N[<9㩾 K[F5}8Py]|Q;'Hd/Q9)!,ȵG#eΦfbOyMzx[^ίNXB^hS`T7|\m8Od E`,Z8O@&o3lm % Q\*կ55RƬ,pcJA@[N?d[`[LݵR"+Ul5ZdЧ`[]+Kdr| pLꟓU7,*SN2,iM$T͗׌;b!['*MgzjF[C&z_fL6ۇq'cv?ߩ[>C[- %v'FmaH-Ֆ@'սBInw>Á|z޻Cl)xGcp ^؝d:9niLˬ!oTلT̳-ˆ~2-k4t//e5?dH&.4Edv<٥ƪT?p  '%NyoWTQH'נ9*rZo܅ЧY`2,9@߭ jd)Fc'x>Z`n+\~AH1|ϱ=\a[{r/GGA0'S@ 3P[z?$/F'UTf^E5]ǟqC9[ .H%wo|4ެԾ픤p^ &գ}$maJWm6k}:DrS$NX 3#|RNc&TMl)&BV.r28TX}wDN^O+o4~GB6dW{-w *437,֛9gWU GCV<|nqMizvB?EnJDB04ۿ+uOg. "ą/wތ6>7xޑ$7[ c'.COcL {Iz໵[ 0=ypp&3+hjLg3qB~@&+<̰zOI^ۏûOLgJ'K P0v].iqL\ix-|#.;vB.)4Huƿ dYΗ⭤Y1f>8?,;ھc0`*T;ŏBdpԳ9eGr\cxkjgRp dk+U iǩ-s;%5-w?j|ٴd>5V~6f/V%Fuf6x޸T$!C!uqki#Wu?2Pq-~}ĩ_+:R/̌7P?BCO\$ɌU.s1 .npa@+N%#oPP- )QeLv-uh`#~*5WvS+fAjf# QWOUɤݕwd􌩨;,љbqP/FfJv7МfiҏFFjEjhW#mUЕ!@;aoߎ ,L:v/@X.K'LWz$w$7Rr*0|nwNN4@8!=$JZ'm6@r$y!])^_ =]ll C*ّ Ƃo_Hr[8U6L-EwVlUuݵc;wmom5S i-2CVAj낮'6 *zcC-ž,TGxJF軧sF 혬c~N{*^k/xyAƌ*L_0m&ǶQ&inTG?DFq^N4v\qlj5>w/ I>%$nlq 1W]϶]_:୞0`G--`R ;C )mX( U\^?VC+YbkU7kؿ!|H|}|&Ԅ 8wlR{A[V"PWLtRCT\p՘,E`w#heqp/]o=XqU旷_R̥}9~!fʹ-!}\j2!nEX`YSh>_QJ63]vXC|{s$+W ljI$لWѽS]lKaY7U\mDI)a@뇧֛E8Q]BCC5jU5$$|P:&0$v$m ?uSS\boпv>1%0&x;l C7PmћE_9HmnzoPƈP̀VM lͲrAt! ʤ -p?tT Tirqr),EtHlE]y##g޾&dDh4l)z nX3ci5o,k@ScjKQJ/hec  szR9YT njS0s]Lx@CɮApcDdI^G @HoԸt\-+"E,Iҭ -Ѓb&憲nI=0`Ix|)A9Bxm֡DD (X}.6M2'/4R{uLt'f^T~) bɼ}nLEτT YW%J7@TocpQФ$s;@YWd~f o?ҥɕ%ar&cnOQJٸ }¼,ó JFS+<5ꓶH&Jd"1a6 OEq#1P&t198.|#^M{\G*]}.m|XY l A$sm}cO9ǰ0B]Kԅ 쮇b3LૼvęQ2'SZ;ل]^M|{+Z[%nTYv4?J09E< nZ+f,(X.f.>1/rs E3˗9,u7P՚AM_rW%6Mi_.0g ̖m=1v[gIΙ6ߊ 1t"kC:e>Z\Љqm1[0^h1tQ)VD`ANMN Gr:e!k x;;D53K"ּϦ8,ί|cí<c/ 7| [N pMk5꘴]`Fk8>6Ӵ\|Ǎ.(sT ?ҶgOD4B+Rk3'T0 &+k~P!˓Iǃ+Z{#O9J5C֘N/zev5G߼uaBu'UlFwB<6Td WKJO+cl zDz!PAqQ{tq3CؘTcdEϘ`}#ŽWWXmPv#?[56{N4'%GkL~k? ,Â1 ׁ a_^@rlW 9 Z濠r^ MA[jwȀGh]I's֩%fجH06s zB*]\t4."=Ľk6 e+juo4ZN:fwµ4=70B 8[F않&RހO;(-CzN*vB?4-t+y?D0a"$YI@6)hnz%vϨ5Hݩ 6..8hh>6wGp6H<' p@xsc[6Kb.D!y`j19F}buGfrѲ5jG]F'p~kFHR` :|zEZBbX:HzW@G7QgOCƃjP/s[=x| Zp$KeA9y] oQz\ħԛ?ER\8f`f +Sg }2eQЏ8cX[( H1P1AꅮҒ=Eb "q2-HVik ߉L%%#02503^0pvhFba9Q9Zdsi@;V:5Y:_ITbX20WwfL9v- 6(cisCM~'\aPr}V֘@jYiS`(°|i:%_&ɣuj Cm`mgq!D/,W2'M?Q2F,MK%3Wh6sQ'no\d!qbXٴ\7iәy܇JV3Ϊt*͊sǝ$0IVA2[Ql42ٱ:ivT l26 ,?S^{ O _B)Ey5yY-K`*? z+"x`:Wr x<)$ƦaR@qh:P`]l@q ;h/)8nkbJp+Hx;#FL7RRوּ~5sqW<1ٯev-/Mo7+1L9=;n ::=KhB:Yh[گ!+[F;ޙF 3?il=9ywk*R$ɲ2 ;^yH[Ė|jG/l=svR~6+YP;fzZ+b i_"ؽdNNǵ._hU2Jd}[=[a FRKԋXU߈?h4d%/[+XR]q LiU[MDzz_mT 9W4!G,B2tXĚhYvƙ/d؈133bl-' Ɲ܂Nd e 趶!Stf)Ōܷ GgZH T|i:|怠fT[.Y9W#6Z[K5_|3>E旦ĺ&{w׵4X/+~O>4+#4H_r0 8ȴr~-W{y,pFn=GAQJzK¼\#wZyFv&lHw:ƚJxc՟,l`0;f1|yN wJ~1]X=J賖WH 2eOM$h|vϵ- ,`+IPSWxDraSޯɋ8FgW[aЇ^8\T5gV\e%BXr! yp#܀i1ӯ6TS-VEV0\ƶQwDxFxR8|cePlQo5KY}~e#F([<7Eh`M }v^~.]+~?Z"\b"5'jo_y(*g Jpv%́T#DWd?nւN@ N tgٴޗQĠC,j!Z5| ~7Ew3踹= v[6Q3D.\cCqRuL#yf -)O`po%Y5ivIgK,41lM=G>z elon5Dv&+]0*_>, "NkAD0#Sjjw,8֬|1~j(oJD-⤠Tk=C ]#fl./iz5F=91a^Z݆&qU5YC3zpT7j_h\iϹ\^'/s-3n5 w a)3DbR"kaQKDž@1cvoe44"GOW2d\I0!t0MМ&.7 5lg h.h(x`wKƩqkdžULUNR:?W+g%=`QUv#r[[:9b/a2%1ܒ"VD FQ]߮oo=R+yMvUpq~d[xb[YH6Ј9n6 b,2E_/a[Ǒ-HmxH;c5SO=$:xF(MRa Wy+"~ 8&a}JKw0*v@`$*~"u<ˌҕ[xOVWVXg.>hYh Lx۠x?{gǶz(iC(')T(2 MM?6gdη7i# ߟ"3bnK'Q]3F&hURW]%"59 E(d(taoua3(UiNqb]y* mS HPW\f'$^ݟuWxMcP7}ȟxd 8@wdZ \aYl B hG@%Z_tc;ᕓ+aUly>XKw_ R3-\ONJG0GL7p#$iJ$uFtMxG WQ^j Evw&v~Ƿ mȍ0u&fC,__9ߎ]!o7y7\~3:=r^t&e,%4M>l5C-2:'9"@=@j]522 GA? @ M3.f ")Ϗ᜖9  وtx`5̗&7K6:x 2Lk||Ojd>z&YCridVL.޵ҹQv}4!A~#lGLe\DIoowNGF}}/רȧNR[QR:z&ШM|*Ǯ!C݃&8cOCm\U:uqdhjө"tZّxR Q;E!h/v:Rx}d K"Chz_ GNA%QV}g<ܫ}{h Ofp">p(PKpBQ{DkxC@/v{;Q'fᦥ0"<4/INT :hH`sOQq紊<=bv RhROIA6Pю@\?͗h*&>s٠x,YP\ v7I_k];4W6'#-@]3vsdmURƐTz5֪~ M15A4!pmAGa~d /Rጉ?*%GJ6lH3V7 8>aP̗*Ieɝ`$`xMFau;)\-t4=pJNc)0j^_2H֏*%*8J6Ǹ/A3B K#DMZKŴfߨs^x0E CᲓ5_"My!KLg~-3̛S:=-  T‚B<&ii02ޝ TSm-/ q&D^84$=;cd1mƃ^7|6F{fA9}8yYxw \<ӣ +kz/~?^NwvfDr۰MЄ @X>yH Kqm{"1L.ؠBu( |7[~R W[wEoY~I;7Ko٢TV^H*&LǜPi57a.D% va8VRXKx)@h;S=?i783j)ۇHoⰊk|Xq#H\iZ].Zc![ W(Y2sh9\rCxW1YF'7?ڄſA0:^"̚W5]0(+]8KP 7BL_ڸz- n0[5[d5sFYfyñPb+Ao1B zJUjCm't*pDDH~] *鲝G*UGwx5hc59kc=2n(ڤ=`11B6<-42w(κ'_ˏ;BkN^@~pj9w{ 'uX[h3'M4;JWс}\-'L5f,l+063N:Gp})+mZ9Fݚ"^E#Aml +^囟Q5`ycbfϦE$գiSɖ>tK Y9] hx/{h 1͢'*[w ,3&1iQnCD/H5G@9N<[)ANW@`!g"ھV 01[e/(wH{~:IƝB3nGXPϔB8j79W[n 98BJAy?"#3/k*O')r͋mR)89,_XLrkWQF@&tӶ{ vك$#…& ֖V@*QfsM3HR4* txDi_15JaL +y~ %^][3L$i% )Zu;d&TK38c$K=P\DOͮH5.\ fn)eJ?hs)snv3`_a䷞ orL"Oq|dI J6H$Vh6͐]uEkIQXT{K© p ڜ%, 2DSӜ'HUƿS-P@1 DED5,b8Ig!MgĹO]0Ҳv(djIfv۹HO/p`h@3Hſ7>%0T>"P 0.j°;n0-Ξ*3`{Xw~R`Jymyy:ju(w%+s iwc'+ ՞efcRlڜ5eJihdR^[缕u!B>~|Pɢ>?жvIp7dxE  )~g a1FbƊ,:QZ;;1}bN2`WƑlK1.sc6/k3j__3Պ=BCA(_Do8dp9 kKaQ\40u~@^3ALGSh{cl,V< TCttUedKKPۑGD!1[fI!,|ܡR;_8mFlԯZ6Q`*[aQm"P>+SRjd3׺1j]?]}5鲍4Kԩ/ܞ;ú&|h%E1eKE=Ysṁ8n&?{ZElj^>17nxsxX;\'2' TjmX#i}Rh|L /oӜ^AEoC=w{ )1{c`Je*'뉜b߮C#)MΣJEFqЙyTIm].2DzֺyK'-в7pjڔ9& 3Xyh!X+/j}kU2ڧe6ZO톈³"w[{tG@R9gEiC,T,.褧9Bkun=pi8"i&BI r;`cSEčθ:UiPQMbBLzwY&|+1*İe8Ϳ|eD`/[~+&͡i<8Z;~ߎ ษeYsnpu7ZDMvn6j3 spRN`p$mMS46sqHѹ|>Ó97s([DnKV+^hFlydwF=Zg*b*Dox?VonEƼ%z$O~Q0,om# ]d\Ipja9xa[b~LP^'R]TLJWH}AC=롸#}hk:𒣔+57?npH]9fJ >5iۚ̈́x9mx"ĉol>{[ NXL+ɟw,QOe|qɇ7W] ױE:O ?,L QF9-uLk`j)ߝvlGRqYomxYb*1-rt7<<8eg^x[GVl+?5+(i:d7 ]1֯Ws0 $E_b||s_>XKu#6}Z@'Q\}.LC'FފxEG ȳٗus_%{/µ)}kQ_zm:idN ןzYv4G۔90aәţ_Q,]+>3pT;8Ad\`3!D5#YIYh" XVt ؖ34z[M)c% Z!-i/BrU9 #~ǹ @ŝ@?39ڟx1r*bkq:s?-8\6;Q ;HiMa;$t5fBe5%qo?6^>PM6&Úኩ5'Rw6ց)Sƣ?XYӇo k#}bj>5\u*'ݙS&(6Ψ^;y#y)&+>aVLw9XtisK\Tٜ6inq\NrC]<[FO#P$ û6aY$(ɠsHYn,[ ፶^mpiZ1:RRF% ΚfMm`>Q* lOxTr&, ^@:?^qڝ=tTu`iAEj%n|j\tQCZF*ZVI]n6~{cj 8Sr 8I/9BahoyTl{c"C,%򿐕C>_5H+ohՓ2EpQ*'0n9mdQY8p'ܡW3hsHcx׋ dbRxxJd$达(26N1BsQoK˥9'Mr>Λk@gmX?kλĴ?Z%"Xd7 6bz4ϜHA骎*Iz͍J|=W,@$x* w'KdJuq~1Haow螄ɍݨbO Ik,lǕ1cYȱ)=-h?QQDn b brs!ICwZ}A=)sk9 S*upm\{|bu1@ zVP'oR2V-"v\pCr <ˤp&n}iu%Fĵ;g)|u e9ҵ{ݨ0A\Yb?zH98Au3 JAaFS+u4aT84fO~XҸP~PZ4,y^(*(>A'Փш>ZX$%,.'N/E'3Vgvǻfy:`ke1pۣI8 W$ wp'mY^#|U!nQcYzMX S:?hŊj/D^dӝ{lSt qKI2I:'LݒδI;Q\ܪ盧i7%r`oM7ARV@<߁h>C7^8iFٲî'ZE'^$[ZOEnrEX.|١.Ӊs"^W6D!$X䭡VM1u_\<ӟ\2K&!DMfVɕ5I;fST[6k ;}o-qϲ(i`rX3J.,C"CC`>dV`00Jsm~!y l?l蓲&3CPę3%-(Jjˤy47t81ЁOguCڈn+#7X9`ͳ\"%jBTr]3P$)}lukaڦ%R~{= ;sJAIFϊ-dê0^]1~1Q.g# {"gkН+f*gZͰyLb%; D5e f*/_枼/tyӴ!S[JpXaחgvN8Kin Ċiz{҈fiVYV䭐s^F>']D vVB.7ͯ|ܱ[ZtݹZ`#|<9]iiɱ%OfoaN hS4@y 88(P%FXhKD6 3M>΅qkӗ nwtLzA.8)HM\S5 .F\ %KYM =Su^r±v[D!K`~$Ud Ö„cB"n}{|X$w7̓vWhB ⧼!S< u Æ|`OKJ's9, po`mwta_X0.9K2_.wrѠv^+e8%ngnAsL (LSKX$U^3[N ov'p]|trV {:Vo_U1Y v,ơe fu;x88G`,HF?V1k_IJf-.`fٰ1U$6E/,H i _Wt]9^x7?k Tյ{$3o .&lV\sIt8|ȻׁxJmN686β|s(oQz /g§{f-tST4 F~z7~d-G <Ҵbޚp$>z=6-Pwi^+[jl`?#j0dv٢*NL2y6wE$Yї=ݚ.6C >I5=(- 2zSqVxCZ<ԤsHS~ZkīlH\R>5G!o6͢3N[b6^dQqwj" d,e}˯޹,mu m?,@HޕkP*p^nѡ:OJ$k=~dLE33Sb|>BeF%k!L:|vwY!#W,JHk01)eNly>NtwW[d _l yrس u0ٹbNv!a3j&T q;!.!yOL5,\\j|Fd<)4Ì]aPVQڹ,8JoxXÁ}DUF$v=_Z<{g$ ~ U=iJ%%}էŵ[>Te1Fn3{XyJMΗ8zVxv'wgDѕ|J=9gG 1) >`-?X8߶ (Qs  n2)^4e:Fnһ0߼OcM)Tu_j{?-ۯd1T*L|?Q&L6TVG@ǟ^e-Oc(t$4$'uh">cJ~V_65r,]X{YZ_=""}{<7Ϗ*` #Y RX[l6.oOT3vS׏̬[mUۼ|yVoUeH;iE*|iXA '7e'Vlu'T@G2B/ NftN@ʓ1X_\7rH’tWAq`vW)nSɑ{ cT%]5Q%ȨeG)J=rkAx}:^x;E=USlHSX0#o{^"PIǰ[<+zFCd M'8W 8ЬG-k&ES-#c3eLS8+-fD__UǙ` ab^\f.XE6KP)"15e L=~nvI/Mcu]/۝|#CRٹ+ ]$0B4H^D|AVQ>U&9 ÚVQb$b˵W`I{zŽ0+k7;<vȻmEŠ)H6zrpDF4XU^ MU/8vtIWrn.Yڠ:qY8#0M큺~PCCi*/P4~G")̡_Umwh4[ Lq`v+Ye_o.RtQhtj&R8O&3PrcL>crFJj"O@@JFG3#oR? uQoE CBpzn,v{*Ed+azdO<\Se;d;B3 35%XznC}@N \Z0U|ή=l*uS+cPA##J/ȚbWyCyߎy( Fź8 s'TihNmsuu? snyj97ˏ2 1;ѧn([(0! ]i?d e%- hMr}QS; :Uq?Sl^ֽ>|5U6R#-Y+ͯX1nKے4`u|_N l3dؽo{Zb^oyDiyKC'!X!@Q".p<8 Z;)TIWgΚI/@d,.l#B3Ep^fc@'g6&8&86jOl f JV\n&=:=}d:πSq1S`,"ߋ 94a_jک(k3^~E9zLJ؅(tbׄfŎuiab@+ChxA36e݀b o7̛ W?_V]'0M5^&֓O6<:)Ve u; aV IO_{.:{' m/@n/'"Ib1AYta!J^HB#{EjxN&E$t&ZE"Gݲ?aXVkkָ5[>#l7[sA%J'Ɏ$$1d|mdl ڦ(7NafHa }?d"oPhe%(4<>&9IۿeIM:z&Im$h4lHQgؠbDrNz ;| :]?2/oރ38qa#7~Er+P:a,(hbÉ#fN_OlA rVP-k+2zOkϟLtYxB҉8E4mߜ?A߯($<M`ߦJ /j'c`!=i[pd aPp7Fq/8"Ef7hѤuo/I%鯬*2Lhu1쬵TB:@k 5:pQ^CB/ ꎣ#1 )9da`]zyHnchpԔdjOE.m1ª4/yv]"-E^32|73G Q2 $ӤyЯ=eAk!c[ D2gI 1z$/( Njt;٩:N/i>bjk= V Z8r8%d{ji0&` ٭t}u^܇R 7tWYS_ZCX Ib;W\CsPϿ{h,>/} 2Jd06c@i#t06}wTB]^f_5>mSpLcG!  ,,>$hƼ/0)q(Ό`e$Ztb:O&L)Jn$5i&uUL͵pܔ+={T!;s?"`K g>El.W!4mYR>,Lvr3kERVU% J{ƤMrv|6|Wc;DHHV"9Mu1*AjĪZ֟Tk ԙy*7FCZO6ϔrw/+@:W>\6.Lj?'eC)K~Aran\h]tK\6q^/fBD+ V}|dt<|WܦVXkw`=,yk:I4U[G`_{w!KaQ;Q]V!}USԑ*]sq_^F]3ȁ,<~ed ex-} JHҢύhVEނe:v%6ak󮠵=c:ɃuɩY.q%8d[+n.{%9-;O K&.1}@F{<: @Qٹ-<&٨flP-+1yzG3ϒ^Wtp;| h.AvZ`U&>#Sd*P)bU<"P$kE2˾҇N!*Ǻ$/:ZCswu56xߣ ?8mK.={U=l˷&} PǕA1G&`]w2CTˇf9gm$}ԘhXe2N5: "0f3m/H{M~8Skܺi,b^l ƎB~0r,T\͡-~nŠz3j&E VPRc4DWf8񧛻8ZyƝBCO$9{/$|%h;n_sX"jl*$b,DAH9^އoESU&`ylSxC'4]"pf!h># {'RwJ)z83r>M۷;aejNRaH˛[R>=HK3#߼hrflk,/ ܘ9sHa\ =Clf>{;s(+DI?='ǐpH㤭KVpf 9L,=ԓ\QXWT{s?nZ3DX8VG]AC!6a+wg(/=ZGEꞘ֘Lfx0:XqZX1k?X&y}}HE%)13s"hkz9<f8:3.$k?֫糒g<ڋl-C3HmYC d(3P VGLeyM^i8=5C'e!$ֺ"qi|Ȼ®n Ыe&A@) 5mɈVy<>8vF*`6<ãbeygS/P^,B0SM?MSB6";>Qapp:p3bhv< .-Td@^NsS&n.v*Zݺ΍@QISUyTE":oՉL=12LF6N/,h}Ί_"8ȭ)Qm|m畮q\h8_.[yw❝[ү*A-s?piI胊<.$ LceAn!{ if8Ru#Mݗw6ƕ ^.h:sK%!ch}8MfQ@JARZ܋AwG@:YP Ul6wq$<<@N<* ٕ~jEPy^]fAiBZr w ѣ$fBqAg)d M42ګ펩׼⤬O[סKC+Rh ;8DU e pKqQV0baؕD`506x~$O3S+SS2s7N7),;MufJ7IFjQ J+ߛ_DӶ/C37F͒E<;Uf}p53&|©S[2N^? _bF3YMԦp[)u5UC?VޠgTij-tBZ_/fZ+)E" jB'/ׂ&9Ck+v 7<ĖprGLC56{?0ch*)DhwBJ~)oOBjX};K54JZpy>PΡsRڗ/abaia,i DgCKR"vL;6]D/ \G s۸p-oπZBeGI$*@ nbU!*KGue)]wW04Č2B[+M/ڧl[3C|_ZDޟwLlwA=b'ѷrZxC3 5xY)܁CVSuGatāos\"dq( $f(K\An RDi8x}\5-w+ zǽU_Sun- 2aVX,AvV'9=^Le|WY"GAծe]A6! qr;&@"ڈ:*s9:R7iV0Yf3o-lj8.s0ήڔ@m;Aeb%  uXp,>ޝf˪d_80q7#.w$$wWߩ`:Z=-|iVtkRckRh܇L1ߊy!i6*\-?vkQmCg{ur@B 7[#dB@%RU+ʵː[!=L$E#TU!_8. a)h1ail0׈]'GVn.mDPiFPWc!Ĉ'$n*j求)5M$HDN"txV&zSfwNv,?7W Ust)o,2N+`+{*LՖ~\Do`XNR3Èh 0MV,B^Eadq(05!nPVPfn~M|EljU-|(fXG]70哽 ؙW/{J3#t%<.8Q/◂wnnTRQºocvq~MO~R rR%d4(W.=%WGG'9 A yl^AW8)|~XB{(S1/m r4qw`oR<$]6I/&1K‚v>3;dIbٷPYwafz˨3k]S\g{eۉ<< .Mί}rҎZ˶n%a0T'$i[bQ-w9ϖH-ЅMp>󭩃'*e0݀l f]A 4#HԼLCaSIOMIx˻q shMRwq0AGeСJnԏT͡X"nF[Mљtɸ>1>5,P =m8B̔X]z8 8fO9RK:kvDtIF_P%c/mv¨h̷?֒Ya&oi3#j qp[#*pobϠ/5E.ڣ(6Ph E& V1GI퐨]-̀1,q?beЦ)/H?wnÍTa8)#|\*;en1+UB8I衠\87Ǝh-":.g3 FJFڧfb8Q2˚ grB1-װzE`*xŤJ$ w9LTM![iˮ YFWM?եu(jSn"mGŝr?тֆw#[^06Bx^Fɱh o !l"rSY*єOcN( ͐>ރg W9|tRbuk/3[_ WS*XZM|єҶGp^JkGC>ard6K$q4y. jւo]mo xlzJ}?ףA=&_R~#[U@s#UP~0rP,`UٙT1x0SxQv} fE0ClO*ϯ*'0X+a݁'e1*NT3Vh4AGTB[,T)*8M0vNYgJbdĆyxnz*c0!Sݦhnܬ&c"ˈĦh;s3MC̫f첽!Vmz'ʀH^’w/9h+4}Y\y1 `; ;P4%a4"jSCsmD8 y'MtJʣ)0 N{Z~+dP}߳@^pH ݺxG>A&`TFwhɀs{_U`:Bg!O–uYɵ7/%_1{,j a]۟(a.-]M(/,Te<&iG\7TRyF`cGM ̞cثݤYxܯҐo_svYw왇y(2=@`'w|2^]ΆіK9Eؔrlܩ=;s|[&w'>K!Eq__ԉdJTߏ%E;U?#ax.LfTRXԈ73wg {rƙ9? Q5rLe.uM=6uw2jV6)hʊ[%>FT;tU9v}u"Vc 7Щl)jիvg1H¿ԏdpOb .bz+Gypi3{pCNf$v{%00\雄WYP5Y@nu/AQW{ILǟyI9(L1D \(ǰR*g7c!g,)NŭTUj6tVI)`c6`5R-8Q_a%e0F},1/l.3mE.cuN9#U< iI:$zIW:ֵ `5r;_0,oX 4(e&${y'R, :̥H| AC!eSc#.ڤR q?@(yp`(To}AY4YYSƅM ^$?YaƢ1鄥=d<Q40u!0+7c ƒqN#cd>=jے `%V"%ꀴ"0<=Hefz_3rhCxEd{iw$c*P0be2y/u1U q"x'; qaEG /ci .72}R4 SsdT.??2Z=LO wd|`כm۝a;2I7@%G9 RT,! Mq4 '2A[oN?&[Q 5 f( %޺Ȅ߮q2) _'aS4Xc}T0&oԦBz2j]5ad Q}:iS c DDV|;dA/4Ic}AФtțyv!⯳"6dSvAtAbU\O$'ۜhټ#LE B}Y)ADNXF/lِ]_' ى E-ӫTw-E"8eD7wB_mڤ4lU8a/NeYJ}{;l!Ñ34J!M/uQ%iz\XP_|)39j%/uK|LJ4P7&؉?6q|jd/:S* [f?zEr,jվ֯E DRmvGDJE |aC,9[n+Ӓ嫯>(C@S2?G0AMU 5,Q`,*%;~yh_@iH SAߩDiq9@Ʌ rOQ/$Zh]>5 B5bC&>Y2f%7œ[S) {h-_b%5?ʵcO Ë=TBd1<8Vim|WlUȴU/DnSaNjuEs2iET)68h!8P!M3}ZgA5PC.+P.)):fuGڏ(|0f3Auڢh 9` -:E:^Thũ~Nq?h}LM>+]k+ Rn]M ;/%p}Ly5q30p ,}iۣbۤע $.Lȓ~!Ң5oiF2BImćE#&N|Z:){]^?bA$l5q3:'s?ȯZFyι & 0eKBdl}S{]}(SULh$Q%ܐ2nAj+_8yNeFn vluj39Hxym QCsOrep*24`@f̲{MSF>ٓYM%WQ&e/ƺ;`CQսQ/?QEjj 遛o&= .`yIv*B κj_$#^KlxU>XZdv3P-1 vu X&XQ85!e څGj `W rf$"DIv#!b_jF/gўb~D-(+E]bP^\t( Ŝ!P|l]Nk1+yfݏ<7٦gcX: A'6dMn($ Y ^͒'?v^ț^W;dl:9}(eyQۂqי1؇ͣ *VHb3:Vs \Ou Byafp0{Գ -sa\A@v"))*0˼d+Y"i/?RfO*E/MjW=!u6 q1a,Lz3Ͽ4Bjb쑧{z `ShI`3K%pww.6{g[]E G w"LeɩQVL@C0%4'̀J)ެdV¼>s$:*P$_HB gpR3]_ˆ |ǥ fi:tno w"qRi\wU+ KNzWW$ eu+y7P B4%$xPX \ tDMbLk|"%:q vDt_S 8ϝ56}"٩hk1o/ csyy 6x'8v =؋b"s LJQ3$«$N:ĠUѴĥ20 Pz Ӏ!s_& ":%b>l;bdCv2|UX(vShLg]RjsfZyFRV4WX1:gw:FBl ?/[[PmS"ZᚴX,֍ҸF&@7:W+\S1 W.QTAxfz;Ms5n@׊{dD4m00֚F+b,E KykyĔ2GH)px7S-n͈0*@N=Mczx[uҾhfaՈɎn}'_m}b8рeNO5w.iL1#֦lR c({3{mq+ə(g `#*:(NIXu,;¢l ]+ v&b} ]keA3=VΰNtz?w)kqtvG#rN<8 k"\V& t|ԓ.ySqTgɲkWsCbcJlpa<=lݕ<,Y3* >ԭҤyt`W-KFbg#-TO1 ,?{ňS7slW5.qX[H>8Rw;#tV*vH'Ե:o}>%6GED7=-fw9`5RI@F- dCPk<AXFNbs [˗[2mp~o?2~J:Q}=:]خcܴ7,O"9'LԁBA#ϊ]qvv9CهWaI}uw g^a㮽 Xڢ켭π909Psp%0;hML*&. 3fq{=c>rR8H{?S; P}$,@)rӺ/ kp4=rU&dďRb}Qbi$A*q"fhVű"*EH'+KǶqUŸ[-5R!A`a$͖LwNznJPc0^ˏ~ҫ?ϊd$EذGϏk#MVYa'{v p &~v+3x-rvMT@=  fiYd^]j:ttQf!˸֖ ]Cz*CorkN!¸l뒶:e1q*aak(>U ED`/Nߵq-IqFۄa .% s>V;bI)ŬIqOx; TPg~x}yf^+Χ`*TUL4 nJ4Tӓp*C)4ѓ~BR3ݿldp^ o7ɌI СHy %9`zst1ׂOF|627+_xJ_\WSX_Ș]K]BQǷZw Kls+1=!`U@⫲A(#"~ 0SϯMcHri\Y1q;V3Ũ؀&ly +>!2}SJCF%ư &zc(#+-d!(M,MSP a\( 3\u:jfj| 4f .=8H3g]wjEw{}Dž2A7dOVWVe#GݖZJچlBV4Q44;iP!T[k}!:6sbXQ}ݠ~9%L1)Oҋ t;ƶFgnc;:j_AgMgtѻƇ8-M{8{AsΣѸf|:Kk}Ϟj-Z&'"+Ļ$QN&%$GWGƞQ~mge@EzH.- ¸Hl-Ȏ2$LFxfMFU(_~?#lԉt!{$;m \BqyV meN X}.ehGa>iVRGO w |.|>(_kc6 U9j~؝8_YA=x4\1-bȫCf!Î0LW`c`#0|ktM&Q˺3_B3`Φ0\?*tҸX'Ku`&΄vP7+:8qU2Mɛ nѴҦQ7or Rc.XQ;Y}!(פ5o&-yrG,!DFɍ.#::Rf%,СO Ow :$N"o€qwU&k1CBs,il(k# ltΤew,܉)o<o\"67.y^]Ĺ8-JRC/gP۲\vrR1$+saZfVt=$Ba:eLo7Hb㚢^BB4n^)d9G%32@bZHI=X*}'^e:2^ ?; DOH0 `9Z;N3mYP"I`kt= ͜*5&*˥%h,sO% '~B$CSM)|p~ιy6 6E_R:/91.p>0L8ڎ8crAPEkOZ ^#^0\;l'kͳ|^b@| 攸f9jPq$_G[aDE nmXp5_ʚl ư`ۂ|{eTj[qH4"}r{H7pE%_ǫ@ /qO>s|Wh%ܯ/LT=.G@%# |e S;Y駵 <+N0 FW_Ze+.szXǼ%fbK"Vo=&DӓySvUW;L ]տ@ em:14KXE #Э Z^&DЩ3,s8yȾqWxoEIxLQ^l/@;+ /ٛ%I߿fLCs}‚_ $;&0A}|3K!\>&%JFX}䂫 |"Fx`k>[-pc-`3Fs $~s{_ Pime(FᶶY!{..`& p̓8vߵ%<&( Y>[!=Yl&$B|% z'f8\(x=Fb+]pX'D ֑,˱Yt2-jѳ{D VR"Taʦ6w%s+CvnkÕPӐ_Pw 8G [bsPe(?}y!Cnu,WWk,q\uk-o=b@&y?n ު"gOjt]$lfb4/7mVHl!;] 竣ةأ?‡kE!pb{Ƴ^wsߦ$]Pd@Zm׭Kоu撰|]~Bɘ|NVDJ~j,&{$GJgJ4߲e U5KHl?֘Lxmr䟀X{f}e' 94ls1'&/5Mjz>X)X5pTWm>{#a^ Do ƗY]fӛYKۇ='4׹JkT"A/S@> C.nM:>-n&Ld)|Q r߳AM]I :-l\oN}9V<갰ծf\+Z506s7EkUDR_nP']k8Zd܃AeY* _RSgS^G@] c݃I|9Uq\l>JsP=j4;?wFL\6&ti:Z:GL_Ɨa?(ڳxLZ_'?Rnju!t4G9Rrn?Y^Nk:w8ۖ}ڨFԢ#c<طq/kO$& ŪY\B5ĮRN] xKfJT>7r ДLԩg[#(^! Hb/MDզ e6-B0X_wGvvLMxMhˎCVt{O|vYpl8p=]# _(XxG@џg%^u,fb7{<Əj>Nэ7璕̻X¾&ﵴ2EDG%z- _#)j1T K8 P辕ieʦR _ۿb)WT.VHN 5NF*=xfp{t-UaX3c1@3mCOu4[s-I.? *1 Èh-Q̀tiޏ]yc uNPp?c͒7{P3hG֌PuCvwӯ#zG~pk1G!c8~(D-QVn(u>{O5Ϭ~]E#K(*ȑN3} :d2UBbR<{M #8o5${LEq / |A x@m}o>9גj3M7yT Ź*ΎT@ғ9|6b4` O2Ω6[6cv9ݯ7?$抃¯rTل}:!mZ5A+I^YPW#e'mb]k;w]]#m, k[*{ .}7 ,edLM`/!l |1!J_aWz5v(EjFM" =I;E/G`t}0G8?|.AO`JowbE16/:ZS$\4O_\ae*7'OmySMxfQf# KԜ B ]BSe'V2C,gC(Jé"[}^!Y4ULJsϥD_GZL|W.6uq#>vl9709 >7q4TԿfeغ< Y3zl8.V VQ %4Zj }ѳy4Eѯڞ!5y*6۪5f"qLʚS Ny!m%j~CfYqlg2>=gw_{nm3n:B*+ysD6얮 ]و&uMHNYqr7e`ΎȚfyv, wNzκ|!Gռ+EOgFRR2Sj?X 2պna1h=:BÝ@azbO7 8ϱg]׺~#Jx^f^2BM>`# [_q^?[Ul5`v7he|ۤŠ@}.:YJojS1űUnƑ5*2sP&/-x2S11q!͟eޜq^dC-gצN8}0ަ {ePwuOeau.[Y`N-cOϓFVuIX'AcZ, K 4 'Iqjʻp.c890w)ɐl] .vM^`wMcb:z{sza_z+KŊcIS9E D׭$ZiKITdVﻫ !Lc ˨6ޛNb%gQn۽.!X>pɉ$ګq쀛㦚Q@ m BK~oosp"Y~Aj/" ȡ_dnԘV4 XPg?_Bo2&Ҡ'w>ٜÖl`j:>72KYbɛJ;0?6klGG*k?Juԁ ѫʟj[aF'"م agLM5!ڃ*&=_wpIؕIdD{ Fa11FBd-Ek_㻙D̶ 6cG8YᤘM"vG5FmLZΛ%M,~6$j!3+#+^Fl>LP{oL!ҸXӦktQRVq0UNJHJ/cfg;s+`3z_$ڞA]u(="4PK$7]2 6Zr >jgIʮiLjiseHm8 5'ޥR 5Lư;"';l@?pp lx 9ib"̿uĶi!FZf&+}FBg!"5l(U[(Xp8f)t A8e H3BNa @z"PAG|AZfzD%:A`HoHUs6~4kJ 32>섥\/&MEL*w!=H"Pt}4&~n|C+=Uz^*BSh";3a#jVZK7GEfo2?o5棜I A[}~V,\d5KfDoWP2s=?P#S&|9}tCdtR԰ѮĔ\#Vs+9B`뀾#ҁh% ,5Vyq(/Eu1@+*"cPh=ON<Ǖ$r.<)i6&6E.鴙NƦO1f@,",<)k#NjwFkRVG^e9c@!$^S#,?.`iSB7ѓK(\C}둅Au rquˤ%4wU$ЍpŒF/ǵyvJY8azf<)-Im4Y X^t dgG˳ۘt`jjwV(e`O `48dm{ڤҋ]iRsrإ#xz:cȤEIc+6 X*boG\N!'/kwBuOj_ejG؄N> Ti#UX z4.5$6Xjs[! -אƝu2?!j.Mj'9/IuJx{fJ=ejBd 0 HR`𯫂liW*(x~@1X Oid?dFKS~TBB Vg"-RdBtQ'vpj9%33D2B3@lh"% m$q-V;^6t2>] <%l3w !2Ҿ̕@$U2NڝaŃmF&RHIej NzngȚou=U 3ꕡ";h7:3x׷L/l0tk]'#‰wy<,7ϝ4|=8K|Ǒ jJ$wPn`Mm3a%/(,.@û]:|{r2pk]I8V:*yn,%…54RA#~g+DbMm7 Ily{wg+Ƙ-1I‰Mpw^GRXYMfgy*5k 9:rsqʍ4nL+0fk'6yW"lVGZ\ךh,…[nཀྵ)_ee$i.V#U$@s[$>(3wAe|;?ekVi\c{%"ld $ H:%lf f.y*l&8 FQ g RU <<7c^F3S)]$PW"1Lk1x.&Wpp ]8 >[5b^85(GXA}ow/ì29⅘Ċ*D^*d0FLH\ M؂ŷ1J.Pb'qRwb*)b'_N:.lWDał<qeHv(Ѹ,*۬g%FEdI{w fceCcuFNs9#It`SPd> +4 V2 8≎+fXQrxv.nsc6-emx-!Mkz:5{GH"Q/Q>YWⵙ4h@koBf.V3>M(yD}F"F'Q6Tay 8DѠ-6f@;jHZǍvhO5Xq` 40I!\IԱ݂=M+h~=yLNξa!:߂R\vur8e\W}kɰj[*MRߌPNAl1]xf7"5CB聏] &wӗEj Fոm[Sabzw-idȑ,hPm h>!9&g;>(@B#irad|,hu惃P`@b墯dg=z ]vS^7\GueCTA?/{qr m_uMQv̼4;0>ԃm2ْ^1)|s\R\$YlQeнa)QsdƠC-mK0 Ɯn sz+J64< ڟP##$P  Y#S }c4 Xttm{pGqA-yMٙ(.CW?ǛM*S@}ٽbjuݨd<©Hc6J?m)%φ,UOKvNy3֒Hr[xl"KFv/s'-Mlz >6[͉> TBHVQ\7vYF!dJ&Jm?PՌ2q P.BEu1sToLSBPP::ӭH.y6Mހ }Mu[=#K@B"]Cq@悫'MhKPn}#/Wu_-MSfeb8Iǒ`h4Fc{Bqy':.ȁ VٱGAgv'7Q-sq,rU <=oـTx=H/d BOȩb$p698QL48nm VbD@3Iv4/JN#x͇ ŕHE͑Z.Fзu *qL}-.0>,W*r7̗xX 2lfYا9CHCVs[ If7aܦC^S)P'"ϣq SwWl!K|Kf!q #5y+2 ˦: σ1f ZT]R@ϔ%g 43Ԃ_L+O^^Tezx7 s 1 ~:j>q3&5ak H't`%VSXvF_G_ytw0Q+I.G<K>=:roDTaTl] 7KI<3 (P${.TtTÏta|653yS$6$1|BJI8EmGH@+#| 'BFerSpS>"BX}NmY3b@"W [(ߊ39E9XF"|sK=g5YWJHGSE N#jƚvi6Y*ݲ{4 9D7vۖomjDsrxvSac-j84>/"r)50?K6DpQ vv 7E6xٟI"ҫ |lKvX3@f$~txmNd{R Iz$<+ܿyEVN3B@gmհl j˞OG3FG!r:o:[AY Zi?GƼΐx puNZTd*2T3۫o;*>gVLdz~o;-KeFYaۀz-zNt>-̡".T?ܐqDp&2"`$yUE.֘֍;VҥΙM|z TwT&M*M :#hH.vOfŀFIUajF[麃$iBAo[ym" Y grւ9qN Z gctZ|:#̫$_'n*cҿ.,xɻoztcjVdmހ:Ct~v#ē(`Ѷ\]p5=Fgafn[< ['Qp&3H;nE *Za&<0"KB%g.3vȝ^ d Kn?)PۤZe)B ֥û0kD`DG2Gד654]3njr+6Ao+>`ݳ_:ouNa刡zx<]}>wAH|92 .D).mO/"p.V L~XQgA7b(_rfͥuef5&3`¤ |GӾd,tx;acGd\0z: M%`bM,Ȟ@Z>7a qQI? Vj_𒇂?~9P.Z=K[b;|P+L02AH_l0fB&t= [4XtK~oO ZԪ?vx ؄ooɺ~ }>mCYuXg[]mV}VbJ+@ݢC؍Nn"fyCYo. IӢ/Q# : ~tfzKc^|"ClQ/V+z]խ aBFATxgцSجLhoI֐#!xߚpSӴlt[+AkDށ9b/;YW]2\ K` /c=Fj_5*_H_09-u`Ԉs}^*R.ْ/+_4()kBUtyH7tʲ#G ,FmbS'% ˱+ƅj7ayONKY8*Ŝ`8aK/Ѽw ;G$+9Fj$:r!ؾ3ٝ(3A}F{Ûn{#cib=Mi6=)NLXF?6[3@K%jo=P d!D̛ %_N'R4`Y_Wɭ+~"1-iܤh:>eC%QtHœ"%x+i@\ ~ݺQWC;ApaW=s}RbqEU].b |>lW Vۙ4A\6e|f5\Qls%r R cBM"6*'D\ ΧqFw+n;hQA+x8)t2AԽ6t)w1{/mA#JGTh89`۫*F |hcYi+ܯ~,O; ]2چv m<&N4Nb=Go l;i~qK F3W^”S)Er{ɪ]ù~/dgl_)i (Xeta2D2|>J?ZCUJ " aҶ" ˵"*ɸ!V2TXD;ys$1OZ'6*j'FX@?mOҊ/laQݻ|[f[ 6gGIxXμ0nu N!ˮBݻ!B1SG9U諟WQ1A "HdH5{W&S6EwH l?t f1M3C'5{+YZ{&N{bR+b/u= Ӡhsh ]D|l@BXruzl^{68\|݇H,bܕ(D Ӷ:י?ZbieGf#4VctDL;9RnTΖwG_X;7tlpEhJu|`(Faah6•Tݴ~EvgK6z1No|:6y$s C%PR\]ˈ6gjF!{vCQ(2lA)KhntKKFB.Wnbo¼9Ip#CG&ϴ^ +As7K@"M!`+vMP{L>%C̰$C$GPRMG5UÛ+lU`f rƼSaZvu/hю( W=$ S&'#^.5J\1cR ) @u#7L?N4u%el]~`0b7<EΉքN,|NN(Dz>q^y_3 RkfXT(|d'W1D&ܮC5<ʥ_Ec$plG!SLvI)D E]iM?(I=ˊS_?mh;kc+!Q U@AaDȦc%ѯª 90eűCCOAD\f$U < 8Q&HeN*+* )K#HxnQ~ PС P0rnቺhdުq77y0p2Z^d W& J֘;L72KvwIo2 GSu!KcQGWPpXAz>O\Y s9aߠwI;^(m],/`ieC߄r'@EMdDZu]9p.Z D{Jg>8 euP\>Ͻ!׃un gHr\@2SY`6nsO ~;SyM !gr`|&Q?ĺ_ĵnD'/f r?ڰʚ8pT fan%FXSgi8r0oQ_-A>1%/ݣu`{?`h;ct}yԆp>"B5@Y8m!5gawPl]=8=i{ʺedHhwcDDH; U0ς :3[ %gFjX,tP73-0]2?)ͳs!t. rk37lsZZU:! xFW$ sul" E!)KԅOewFIHI.T3A'42Br„=Q&.XCJ A֒G]~h7?6<Б[%),Тr"qnǝ#2)OA%NϦXxA`P$YKhh5uAu6u؝A 3(Lv,hJ\*T/~QZy! \b/ c k"8xagͰw{u_ 6t]Ǻn!31FE60h?`T&m'#ܑc**wP7tHUFւzf2^OyjAc~ or9nDEp&iZvu\s]';  #5ge[gbEqEH7$> '=3pqN2\%Z.HHn[vP1jlkibjNn@ ej5ZrƳwT[i#Ds`pAӕ%ɝhW9bv b}b8]j7^f4G"jC(EĪG3]$-Ac4[Y+)tvU*FJLm)rdsLJvXkj\Fggrv'!ߝ6:+l$Yб]m@jub~|NBϠ$Hy}TXL .0N&ѰQ!9nN4dԃMiCEքFJ4ugtyr шr`ًCprA{>!0a(ζ6dִ_Sm< Cw靘ev&&Gٞ}] s6A􆴂hYƻ[ɎtFG.%@DW_vS 1^.rHR3Up29:Ks3Z8,Zy6ZѦq35e+(ǔw#\IS\wo"/*XGNFss$v}7uO^/Or+ hҧƊ)YGz+|[#BEq[ r3wlT5KEs劮5ES;F nd#S )&&e;/uA[6A4&h8ikh^`W?[eҕ|e[d>i<&c*~azd?a7s5Kf(p'5, ˉUv+ubd\HX!IL1oy[y%k 5"Qy? >^RMjZς.P~cmwhUyF J/ 1N ݐYxjiځi B/lLJ1[2S,YY1'vQxʕN?KF ˹Sml>m* 6/Uᮅ728 _5ڒ~:cHSpBUuqqA2h紁}e8#kjV2 hBA`x*J Rԍ q3|l9E뾭W)7&bJ0G'^/\I٬2fF"[1$![kw ch`Ds˗r흎Gp_'Wcw~p?S3d]V79m,wn[3z ?+zzȳIES@Mv(NB/%- I+hאq!VÆT[|0 |񏹒rjm@ wN"1@wREs|{x#vY\(I"`sa#"8Fʉ􈸨 D*y_0wK:NfR#S4ZFJ ;8(*۝h7roaM*8h&{g/;`nZʘQl+'V\Σڌ莕 i-p >O;Ubdk7BV ]%JɥCqWsT'MDBGe=;YIJّes5Dұ2x_LS !bʥrE6"*[^1FR_6e/;tń!/'|LW0GCwH09*C;q$rȐ1((j'Áur E*:irf_8Vr@ ua 'NP\t[< 'pSVgMEҜn2"C#pۆ*KbHn(<5ۈG"ԧ/}rD'N`4 F{˨Y%1(ADVckȸ! Q|P(` SIWXS3֒͐a~t)\hX@ rJ =\Ř _8+`~^`e"{wzNKna3\ĝa?/{ |eo^)dFp7%g~ᣨFUM$<ͥu~c/.is0[ t>x*ð [Y9$1i TB Q_aX.;4Qdom]Zo u*%ipV<`k/3kLm `21QC.G='u`}f|rԝ\,ر`liXФNlcsCyYG;)[TС[, ?(K$E]-SL"t$@ik(Pt'*lz*]c( V9sq 3"Z-3@s)ckMnioQe}>OHeTfY3~ ܀ IIewO'Fm Rϗֻ0TdsOw%=y>=rOBD0CJ.B%;.f%5 d٢kҩF\^Gx{ŏe0]J󞑩LMJQ#۟.[>gOYr,~}>:JE=2b{*qj/z)Z81!O] 05'#8/D\gVPre :KXB=f}I$Ⱝ+` Dk^qul'< JcF2^Ѽw՘;~xg}LO6q.g1_hf/dTTQwH1_o=R> HLϐ w9w^Z Bs+uZln+8]ů<0_.{hjC/ 5tv9PQ[17lMT ǚo^68h>qۨ2CS?ᅂ}"b#%_O43YW=a(y`e0 ִ#k 2q,վjIZڠ2(/"z}W JcͥN% &-+ƾ=ʹh\Ņc1r\Rk*NAe݂%IQ(J`YSe4g4\(> =eͲ5Bz\ nR:_pK yxl#2_ig'y"tl(w,(&]( u9U}*O%}#8* Eqi5_5A< :a%EL0&{N$F+&].-X9x>S#GW:~4QO~2o|uDdu9?FUEX*"jQ8^ZJŷvq kD1l!ٍ v*}x=(A0tVT;ZQ h&80+Cb^PBg&| #sp'xhnF &ݩM$-k "kBx4P> ·晰xb{nwLޏkӪ .z Ntm+˿C[ YVYO y0j͂40&c rBZC97݉ց;kj7$na[<&3mB6*6z@lZVK!i`5N /p# `r^g,k:ؘOݕAhK6^5l;X^jvO3pЈXjSv"Tol;^![axG bW-ve/9Fz 8=1lJWS(}v\P]O{ yz`~UA`M&FW0N*"6=+rе_h4\J TذA9g̷D0keŕz3d(+1:oYALX~䏑1.b`>"D});#$#(])V0yۊFxs)kk?(v;ƎYxAd6xUӖ#33M Plz'!mހXFT4;s& ·rzP] wM7[n6p0+f쏻P.,nRTtΥuӮˆ^ ;7RE+5qg6n8K&7аP\r ij2 NlU~3JL ãdƇ9a2PWWMTѿ0gs11- EO";C6gKGCԲY/H{j[eMf` J 0 INi žZm2d.(nͻ6T ݹ-wɡ{H8r; $Csc%E^ORw4@$fdŠ(v:JYq%X+<uZ4E(cїWhFu9"_FvZ?H "> iڽ`'k2$a WR^ĐVvX:"5 &OG=:~~1%C H6@-m&,{!zrR'JVb # tP2 Jyr(6fEz= J!Hf]7m#<Nk~4qвa,RC`sCPzm./9!cI蘖\ ҥ9d3,_c)eBSX@WcÄOpj =3KϳMy!딤MqixE"۵mfڪx|D7؁4_G6"d=qS 0XM<"2oضsz{P,R5kHLa3p\BV1JfU3QxJ)=(P#Kc|Z6v"{Hj*?*5fϿԕyYeq.ƝMrI.-v'XQޛ nA^߻]/an̺ox*+lSs~|I}rMO:6,P9s~-eVrնša!PaSe4 uOh&*SJN0F=_(]$X5TIC0L!H[dEn%;MQ!0ɬ[c,FI@G8Ա}Xlo=CzIO,##=D{̮7>F7e^©}RNH#y?̰T")X+u9{턾֢# ;"8␏T8i?|6%V;ފeB"~u7ڂ2xp/P7iꔫckЉ Vm0[>,Gq8@e!31@8JLɚ&2¤lt=ĵ;bQ;6?`mwwϗ ȦF(R_݂2PC_!+Y`aJ)gMKV͈VuxI]Avzh{w'GsЩwny65xn<2fò AicۆXQ l>?%e#ϯ'dD AdGuT@>$HAr{qJX8WtMg1L{vxɻ+V<A"r]AqH5;Ћ(䲬X@.y#TFlo&fפq3wzk]]$ѝ1?:~*LNm= arO`+׾#XlveavG`h\4B譞3=nZ850V)ݚOM5rZZTT۫t6k𦀩;-i9P[^f,m }! ȟ>\8|volI, CCDtN(9GEϽeP3/'3hFw戏IBŽ0^峆U/!f7pde f&ҹᴮI7==S6@&8]oY{,(*1]43k/Ӽ5׻R=*h1F˺cB񈙁b`O eEZJF 6ք֑p [_zr<-$`fL'ۖ"z`yŒV֧>Jzp*,بp8Nk5s kz%vم MnX@si6r5Y@d~;:sv Tӧ]S{k4DgBlıEx,,X;!n1)H^CO_l<ۭ!1ݫeeGQmఖy0t]}}NĢF>M T-S P~5fϵEE r?O%VЃm٣:Lsڸ hw75W#fF|jK[aYEafMiky[/Nk*&Q!%Q8'x\Yk*+iD5e,^# UْMݶ>ۦ aOCetLba7n@".^l~dz\a 1[ċ? ?42;<@@=!ۤ~aiG\ܔ1PC4;} `GV㤨ЗEa.@`sMY%ܒ;` 5$N 8B{S-8M[IgL51YI D8Y8hQD%c _ϐ6iwv6fL(`,pdi拏:YԞ؃ӵy}&'1)QfOCc^V0˭,߲ou5"Y8T:!(6$'sBqr+lëcР^>fRe4 IPNJW`•s#vk"@TB*{N|JD#rat $`e2EXGqL@7aW30o=uELVs~ E4*s[Ovx ^MH_4XSi՛`_=Mrh&/4}?AkG0,7;88 )+X 0i"N^ sD):r쿖[{y> 0U Ƴ=ٽ>84%?b+FcGX,kd df,0"|@͈_e1\ |r}W{gTsM}Lt.XM:7N)=~BM;z2=S8W.-w˵n $UI)ŻRxLejtuܹgA{Z t/;br](Ζ'rC.M'y;X!9|!4Qsˑ&Y W gsk(r}WgSϨb袥7S]5`wM~+U{@@4mTI j<@}A:ѽ;sP{ϨpIOk)Ǘ,P!ᗀڬ .y^*&JM8||0ڝqlJj'0>?9VfI<[q<0'R{*C ϲǝʢVݮILv]>>@3_SB/\Tޝ4 *qOu[r#!fGi:"wSxNLd' lKt:*;ɘSJG@j כʡo($ًA%,Gc;bVC?6t>F4 _*MgUg26e#U>)NCzE튀 !*l;}! bKHuN*s{fK%q HXQ zH?LDn=LډOJkN{>f-^w0>V8*N]ALz2!.ԫebHϦGEqhVfkwvpaG>[ L `ъr@؏ErrB"Ñ?[irOT ai*3f cԁ0$F(b/+fS<6zV[\;2ngkS/U'f <2 &1$("Bx!P-H?(C ]Qgxe]`n$rAEň JYȑ$zc3,뉆^r]-M]D+|Af |I0p3r?5"5}Y{oy =^/< 7&ndrPE/s3(ECz scfP}_ĖBK]ۺkP-qؤ_8ؖm P텥xgeaɹ;M8B{/BODejG )1_:EyK$ t!<493ނ|aX!BfƌK˾5`gOkn( }%L''(^A5"0NFp]oR&VvWi1ζUw'0U1JT(U\08l8*GA@)zXȀ & 3k>5-sHгq{ZhL]򿊼8>i%i}aI:R m1qi:!dvr:0w,c%Pdžb.C O+SXƏiHN*Sp /v4f{DUU5ܼ XBgnP);^+6]@[g_%3m/CgYq|U6<Eaݛ[R>K"d^Eh?Ly7o5uMވS8 ɑ VÛ3܂hvC6|%RJ~[Q* &P]jlV lg!٢h6~lIӇ+*MF;.6P!SC#}g|%lxXhdQǷ, /$_|Y6zcU=8Sұ#}B@ÿ}d_Pmaf{-$Oe3+h頇'{ˌߔU!|G6@KbX+Դ%*AzWӵ;\76G7?y[W5zcowF&Șת15̝롖*t/ GU1kQ#V"90> k.~fޥ~3J6C+Oi|C2z|_7,=+huVxއEq÷R=_uZNR$9`_DǾk8YYMD<@nxy4'|~)s! !}UJe# =h&u8%YQ;3gC'q˹8f?* =jE<ᭂp_J(h ?1y8Di+nSUdbDa N3u~VMO1DV$|Ui>̩8]_ɰrw;GzB/ckeG(KE Q?gΣm'%=+sgLTĞ0<29kgZCa.k%?9MDeɉnUXE0d+bS作 b;ZE#|Hƙgbo _@X`vPnR# $}GͰ *f\ڥǫLǜv@@{aSnڦ iEr=:GyԘF,񛚊fj@ǝt{_t`E/b`ahId/䐰+e1σV^JIm"lQ<Kδ0EعS98b-/ HmUK,llF梥 grO8A@`2\B'|;m~=hޞB,Yi<[f}Ð;ӌhHĖg HGaLQsr1u,iJ brAwL_;C3<>sIzzch/i TQ1zGK62}q,@r upFQeaTpN{UӍw- OCcFǷQCG% mҔ~/Aɯ_'{~4kfo} Ōa:(M!AӐLqv--iA'!rVPVOBDZXϨ:LNy>kGϤ!!~آsDӢg ꯘZ%/(W</&t@$=}%ęnq$,wtnhSxq-&%Nhqn[ݝA}b.q\:n̯exv, =|L$ d=lHjsҰY' ?>R]NM&̧$ZX1r҂u$ysi6{QgR9`EżTI4 HkQonY±Q$ Q0Ze)NMTՠ)wjQdGgkԅ'I|Rjz^i`.9R$+\ Ҭ9M64[]pO\p u@AHAy1ˠpWchHfhl^ۀB~9["ݛA7ɳ>Vt]4iE,Tbg5B&#mz YȨ<:̫zBQ$JqL|%q?Ŭ[2? \*v70^(L#pL8PTO8K6R%k<՗1Fu%sW 1vtm7溽vvf@`#olϒD"bx{qaMT+; 2'vQfsjNTuGLYS3pR\uo   8)sU>.uca2' ,v\y{V4~R5,1I8\ag@A琑mi7u5nzbBj.6ԧ =W !xv0lE FBͅz盩.SFD=J#5z7E4׾<;xg@{[k@ 䵢L6S;pUyF!X!TD'tse|zݙgw> fżЕB-y3?l7Ëצwr>i2*Y3]'>^\zwcme?pH %p=O+T  MѧO]M%!J2tA 1Id5m9)l;swm^^߾pA9䯇X~ߴ(x:Vި>Kn,.^ pJ R-LRÃ-^Z8mϗ9?EI  )`wu[lyū<y<0Ye{2o?9a/@%ŝeS0 }.r)=R:_D?vwSpP!\u֩Wn8$2^q?@'@Cr('m;$.M0=<^<'5ff_T!_%rٟPl}NܮZY7t4?eIB:ieD՛] }uWz5l'@ɃOi. ~݅^1*`=hq?$]~S6UY|X tdjO Y(b}d7c^t^밥}g-%|SOXR5Zmg`~̟0_}$*VZeNhTn ed(o^ ux @BB3::foAϜlq5WڽGaP<3$-Y,yI{C\v)t*;|ʷsjKv Nw _kؤϷa%GtDE:~6lew@ˡ֢)x5n(cϫBr]hghl"IP˵fGT%Pń"iEtH"ӍoYU[slj /!2ؠJ I:~ ?Njd+ b2qvaG5듐lAsʉd۫Ecb%URGXL;krs- Ԯ"Ska];UĢQs&j$ Fٮ1M`r5BDG ko2OQY5 jo#w OKb&ϏNKlA*,sȑ=ܟ?ktLo;=6%QA蛕Ug`\ -K:5p+TJ_Ȇ=2]@Ņrzh5ߟ`Ӕߺ# e@'\Aݬ1PcLyə" %PWq/5TDO5ۧ&UhKD 0=|\os5jK5*UwU&Z_\-% ~G7"vXB{zn265&4 zOE4nyiOMd@PYVou l:!UڣKyمg?:C b1Y%h|t\ŧԎ'%9Ǐ5ag5g|wp#YSi"YT'Yw<4mH)(4k7Z,CSmkkQ]4f EYՖ^J%﹃^yayO\( EN[n`i*{C˄B?0#D,3MU;|PgTg1 _ ICϰ7/a)AIޑҵu<#MI[՗FbCV]nF XAV J5_wNa=xY0F>UAz=;/jQ56׽UkȻIEQ]^o|^ EYcj$)h Q=@y|CKR=a;d 0M./A{,my4M_"j"YJQd#ˆFj֒{{FmՌ@-$Τm쯤 Rgn>yۤ:B⏫7T8]GRl6@SJipX<l(wBMb;+1ًpkƂNǷb@zeꅷ}Fؒeb^"c`rȓ.[g_ŶJNJ85Pu/`0~II}h2g3N&\L Ym|:Se`)bM]88N&h/XrQ.2=ԿLFiX)7Py>ic`3}J4:Bwf2APYʮwީo`-n^ɦ;zCspv鍪(a՜+קj1tjߙFpE CWɫs(٘BPP mX]3_A6$}}&QU9Ȁ #jHA1,#o 6leבIf}Khnה-߷n߃r} >q.FL*3jBzJuqAx{K#˟˖in6Ǿ2EVT-(cY^F, KvnYUdb6>QaG7T|A1g4cp<߷ߏ&g7r18QNA>)PY(oV{;X.tF,oO\zs/j4hh<2l~ v383V4OfFUiѭPi.F2 1y_ dI#WP EMz:M |NFN(j m1]HY'|x%kΣ:R5GM̮cj^59; ӄ\I+$C+]D_6˯<"aݔvPI6`)#A١D$_C` 9!DԺDі FDqd1@MQUeYk΂1X4gq9t\Gb4w"[)4=UnA64qVDUJzbh,GR׀)5HA` ɦ-a4 ?2GIDR3lHX֧5s*H&۩\>Vner&͡K|Ě؜FGmw0Q.xҙϚ\#Fs4@"M@?yEy3 NoD;U+Ӹ|HT (zg0wUDV ; +\sus5qVvZ~'NZ^}M˩a5]&3;A6=E(R+(ŹQ-=H-RQh PS۫=fqEKx /dibM(t\&m!5oC]{Yl ᾹH#5%3a(Hqg50㤭/lEneFDख'1qnogըOc`NX6<4K5b Xa <zbB^YmW9CrVu{Z8FG^:}]]QD;R }C"f:V&+0f3.Kwf g+(\Eѳ2TFdd^4ӏW8Hy_Qh-4T:J_h쌹Ux0lN,A : ?8Xb όAC"/}fI-_H97 Wv~5E1dWM4 |\' &bP ragN<)1Bk&Ld%:@AR)(ZR;ȵd)l+ƤafecHW+!.q|.}"ve+Gd]éh=4"XbP^[K#:D.qݦka R&IIPGt,1 8g:"]aA{;Cޏ@6>%<%"iu8JE~ >'E3^ Hsܗn lU 'zVj+#l wlݣc,d}r|mvF9,P< _C?G{\]drbM zav i&[>F=j- M镂9A`O7x\q^@d7_L@zˡK*.'/MݖHT h3Hbk^e>iG^)-ꤑڷ7ryX%SҜ /Q@\ p\fv0!B-%3KdKe-}#"4񉜆G! DJNRZ{3Efl+ڠ G$wF^՛ QZ9عflnO򽪪OAӅ&3rJzL&Cc W|myppp VJfF9kh In~aUq1Cf<5~Jvh&rR6y T 2b$[" U_4W=팷 l4@b-;A@dloqj-2 }{u^0݈ հ^{Fl۟1pVvf$:s'nulGl>TۙmZV e6.U4̻3fz|~Ihx%@:C 3鄢o =>ߒ@vs{C܌;@n@i6w6YkTa lnzP^RW5;@HXc1lL2U%a%@1<!X\[oy˖ h)ԙ+2Me޹,fXd#K^!bA]?[? 8߆3 ! tw)w|-K ?!Ap"f(G~3nέi rRm5Rq*2#/k:5ʅSB#NhWIiAb 7\S1Z%qN~uBe1GfYu [; ʫXEIU"{D8S]cdmʝSE,)I.\LzAQ0$YE*|@$\ѡ7-sfzkB$Lq;/߹qdf4! --" 'zgw!3%*PAZf]ƊMh8An0)Eۈ0]-LVX".aF`!6,9*lD7WcfHA+ļ9lP4YH[3(rEX f_U#'޺",mcjhrQfR#5=SQ6Cc," !rm]Y۵`[T bdmFRGU4<ܒUUIzر{y"RF<"⡳+?ݪx オXrYwH074B+àJbcd-GĺσK~k4ObG30{Fg+r;ڕ:w6V9dP7/0t%nX#Q1 7^2gE'VQ^^E6 v't̿0UM+w} - $T)3#Gɠ/a [?FzMB13ȏ;bAK mn{E$iHrقlLjHdwI^`#Z [iC qq+C.Di.!K08gSh%'!h&5µl.C>P\"j\>\HQ/6f+:9("7 ^!y;Z)\}dBBR _P4>r(4$Dk@eH"~ð$/ejCܨa:ߝkσ0[;Tw\fY(MUpT^bmcfV֥@\+dd!fk^v|&@)6^&1B!nwFY*6 Qŗ/)".W(=h$Y+A0S{E?㬜 <}dͱb-`m#:,f?){*^ *ːnĀHi/J}~'!KZ'E`,Z5LM@溬ŵ&6Ky2 \>Eݷ[h('fNE; VpQ~o:GВ6 L[>cojZ,(`4N) 2r8`^Lk,8Nn5״8=viO#/Vk*3xԐzgV~zasgS|prX jKP3&C|,:U,)ZO*V:h.|%&kfwtl}m+ )B Mɼ\-P[0e 0?uOzN|~o? 0 i6Tʐt8@v*сbɥ=un>?HnvKϳ5@eb- |q)G-7 𒨺KzַKb>ǫP/-F*|C<%ˉNqʲп} ~v; iNq60’ڒ9v>OⰺHObgyqޠ&mGW&Ec@Mkc2X/#`j%6(b r <' -\ fq:>l`'|fC٣s@4d{9u_ߧauNqaY"AUzHߪ=8C3JShkR==4MMdҶrs ^4?k'XyJl"jFSjWy#LEJ^hk;u88ǓkbA~m1&]bGṦ]7{R6F2纒aNiކtvB( K_z>!ůf #EՑb_9\VLS"jjG@s6vA43TY:vΞ {aJJV"Z['Q@;s  ;a}PS~Z{3Lg܈qP ̠m\T%=y>EIn vKꚯc(s~YwÇd yy~4#!Vji*$o*|M^Ϗ_-ozU 1I7t+-2) #Gd+*Hi%FEn dXc9SzQTCc s9wgaGZhS]U:q<-/uTxMB\LZ @:lM4 ͩ($1G# ZEiN*ҿa]u3YAfp]x@ F([F*¥8!R6X%qyKo34~ .T+۔ʦp(v5kWB$ ¥jBsϩISDXՁ5wOa}eApI{ Gb8B'Ca+X <$x`0.ż[a[4,aZZ%W gF`(}åmτDa%DK#zՋ:}t1[o{:b*<)J$+?".t%>~:Z 5JX鑹MHS!ҥ߬Ky}ȏ_MwjXIBM@ki<>8>Nynm$udot@n&ҙ1j0/8[[nOWsk!iOԋ65]U&He0z!5C~ֲ}Aߧ*zڍ#_18PYQB cTo PxXU҂P\ˈVD\M/#e@&ɿ(Q8آ,ukj9'ƿ$>0ؒft. YYE#妁 xaTfPo /­{n-}Y{ ܄kZ k2/^MCscGdl޹훊**ᇞ>c>Z2qS]9R?!;x7 Mn˦Uܠ+IoR`띴$s\g!P ՛Ug+3)&[JҮF{|:_PE_s$'?puM2QƟ&kp-T/}-4Ethe&$yMmAiA8&4LJnYo`[g՝J T6ۿiy?CDo9 +Č!80^idaʬ7Xj$WJg/'wK,8wL qx:P;:}s0> ?LsXI@ y ~p*(ɿ\7ѐGB[m*E4*QܺMg'de"*0Ӑ? h8gf˗7~ gĮ$,=M.VU &DNP =f5qv"J=RObMIהbҬd ^:[)hk??qYtO]`ee.[Q?.VOUģ0nq?N6Gc@UU_KkγfQwkgӝ F@J11!AyTe=.DrLpn|3Zz!,؛OXkDT铚iz)">,EÑY@Uϫ=%h_j Sz1!׾/NH}!ĹgC{4iuJ(ӑ|`FwЯy2qG۩nET"Gml^QNy4ZzxKk$܎XH'JV8鸮,5i8Q7XUSFn Mnճ\Mf5<[pv:hԂ3Y7»`%j)ftPqe235yB7"wܘ. HhjTZF< Ѧ:_pE4xz2Lc:zCS̮]K a08x]'|^%ne8S uRSuvI#`RzN$B xijʿ+2;inZ0zߕ\@=ȴowD4-r3f];*X owsez=ɩ} L1q6ҧ ڄ#VS$~W^aoP`b4nieK+U׼SL[j$/b99v$Z5!x (Jmal>$|lpKȢ^r:S4V"= W0yv15lƷLSk!gv`ת GE< N=FZ~}T^d Ӆ~ iDlao)d+])"Op·x|403tةLnMܧLb59 E8` L[ugĝw/n0Qf$Q?O?9IaILeeȋYBI w"Y;b nac0`&7%MBĒb•pB%30h-l?ˋ;"M^X^$5[Ǜ>{^q6V="k2N;cdxe5oHxfMtthNsJNz3ihu1QQ_Z6}i>l\ڛ= 5R՝ih˟Zߖ+Q]^1D9,z2(݂fˊQ6@_ l3{y!6}Gx$sf l.}IxQHUSJ,b;)m4DDWռYz;vY rD+򇼰dZU h]8Qݭ vU mϰ!YF`:c? D͌[ƫQJI4Ql6 @M8>zQs'ʚchcPW֍ ;5 5YVn1 j)[yƘHQ[%Y$ŬLd1zx\c>1iWQt -pYdwOyȑ*p>GDK(F;4H;Y5xwl'Ǩ*Nm Q8'6X Th )a@9[*|#m'T˔jG3 }p|5#OOɴXh Ezt?3Kȗ*4D+k>]mL 57&>f_J{~ԟ 7cBlRCqk( +u( ߕm-6.QH?_Pzs[ k< kW֗0JeGa]gyp*KhBJPd)kZaxqy90Jj,-9#_RkʡwQ:8R3“Ɉ#+?o7C 28]MM3ٖKeWb7e 9M:sۙ9ċ.-ij=f9[ G??@׉՝ʖT^w*QAIJrYdELu_"@#u;vN8Uc; 'ifX^ Xܺ%%tK"U aEyESI($+8S}{;ZZ IY|)Z+1)jԶc]"iO[\b$\U4eת<%x5` e^ &wnpP0ofW#0S|݉@)ӁYZ0Z\!܅AՆ=":rWgǵBs $,!WK B T=Sx\3tN,0)* b7nzӥ3rVs]uL1ѤD!H=SP5syo"fK| n>yW"JإhwT\eє#t"X l:lÅ 3sO.#kZZ6G$1:ct7_7Sjq#Y7wV3jZŝ^ ZlFI|n-)d%K?/JhKEU^*:pڷMX[I+╗ C"xW; 1^BPԷ2,KƔ5 5Zl=DJ0BYz`WX))-j3ј)⧣%)wL0%Yb\gpGG叝G!*%&S,zE(IҲBOA詏ymC8\{G(Uh/)xgY 3q&Zb)҈lwCGo~+oWmrÆ#kd)°:ɐ =Ά";0#NǂX'(!!?o(smAPQL\mo&-5j@)xT+4)W#B7 p@T9AidM2@O ъ'WySR2[ ^#w=SȆSđa.@CwiD's Y̼D_}c<i~|mTjY*_)7#0/mqI[ \—r \b#L R%ѠS]gwI+1O|1JA-=)GTTCpzpL2& W}[;:`p6|= .Y4 A1MYQ١C!x}EVqwx pe{ W`&ęz,2zE!16E|/_uvF}\Ƅ@"::>I]3MO긦XYd><[g=U 25g@IMWe㦶:{;ӣlaYm9FS8gI( 5iu "d3 ZwF ~8w6.m4xSh-39Xء+\Qs\5VP6 a8bS{e֯R88IDКDv԰ cAhΦL! D-L չ2|p5h8]QώJŃB | ʝ} 3{Q0=|Y :G{.p!Z !msTiFTVejARXC:[=ŋask_߃$bCޜ[Njj>Yo+ȭ[~,Wl6qVUu5NSkfOt& 8c)ɧ'm,'}{jg g5NjCCMi2v8 ޣ}AoVI92DŽIb}M^&/;l,J^ω7:rli*홌%<ǠpN!P'Ȳb"#D$>9k%$U|[۞Gj'VukKp2[gGGZ{UnKٽm9/py+[XhxnE@&å@t8#-5 4k 7+@NQRhσU(I&-Ɖz#d'4\hD77p| -PjM75yMӥ8=\o'{,/3S8@* 9W8@J,zan4U\mxJ P @!DB@ջ+4ඔy~">N~u4t(-m5P2; hzKb "=KpXKf\Y,b I| iBYW.h72nj"*pFGw&xU j|-♬ {tSO $ysneG\D<쫕N;ߕ*{0&Nj^if*)2~ϖM#9E~(;φ6% MvPșS S,f}\8"5pҲt.* toTIt( qwऔ錆(8eKq-7iy}DϦCR9C>xg y?$nGD2LSNPdyv6inZyBR2u!s v`]XROЄYu"3^ \l,D1PU\3犋6HP9=f7VI;~TUYv( nK{_@9D B~~LIU +x_ RB$-L@Lg<)|̡PZ9x.MwRwvSE|zwD^,HR55Pم,M#dج %wz<yK_5&{"# \x]p?c6_8Ӹ +漗˾kD`92U `ٵPl.90r=/FnD ?OCR/oʩ4Oz}fq+}B<}*Duzep;SBaEb)m@^l/i]Wޡ#B,Cԉx.XWsjJ&]'3 WL~L))eݓwG %q*e 'ǼGȽ,޲ :ydfR8\VNE9V-5 d^SzɅ f{T?"L GKhWS3Qtb],<Ae Re pta '^04TLZb˼ HV!Ի6@-S^x[ u VyZO)6C rq\'؆i 'A뻑6;nOT NCMjk ڞ'X@qyǵ)f@H"NLd;'_z3~Lf{#ZC˅ҘL"n~H_ LV. NivC(D"̨\<R|Y:nAL0^qyi# ܫ<@6|^H7:N, W;}$fٝ;qDfMןRA=wtna╿!;v\(!+ #-7 ki:'k3q{ȗݫ+?dRuAgحʒRpIR7;2v/0+5=찷ܬۃ01R0 FM' 6 ayV9N~˧PGDWJ%h<ĝhl{A Roe8~${ ] i::.M {byˉ޾gڂ4 <hME*D, Gs|w݂ќ'5Bx䅐y+QC97%%s^q1yc")3c$< RM]<|7Ej|t?%&Ysrg梨 {kɌ=t$%9{85'@) 5~ m6-D4WJ * N|(.7 azVeVέL յtZ)H퐻sOaǑ޹Ǒ[I#$%^$$We.!@K%ٺ}wgFIia@X]6f+ʁc)y*7#B\.͖3c(j:862rt_O>'Oi. A/ALj{,M ig8UߊDk;JS{ I]_dEa u'Ha˫0ZXp,L5f~G,E^?ɞ*e~0{I%U0 -SwB4MeT#.`~M{P^9OV}*"?p ?>hlH3XxrQXƬX/.e_k:P6>ZH37A 0c#/\;-Һb$SZK'0sir٣ˉ>߃ 0TG+I9 ފgXTv>̥F5宛W.Jul$ZbX>1 #-vє_/4+'UΪ*Z[I΂ 5's:t.L9`Tq3*x9Z6я/ Ovmڣ&=+\CƕBPE3}[BdmT/üWު- yFz5{ Ũne[Տ/~ 8/?[m%m̎6BB}ri߻ E'3gb_W*qߠޢ6%+ ? >(Ml8?3>-i@ғ c;vC,Ym䴊`պE4dV cy=ƥ@ `gq5UG]?SatCP4 oC2 RoX[9Z%ھl0aI]t _A+R;X!cmoXIcB`O)H_#b4\ -`E\S5ߗ%faO,1xe@w~U{M=XFRbO 23&%x~.݂ԕ#NyF`ލtMѐ&m8oJ _!UR< 7L9J4n(@ӢdKMo)_kQnER"li f5Y3lwH7a Kpx3lr+DBp@y#T5FS|é<6q,7?|M$ДkagF^Nr2=u*־Gف(I2m:y:im/~zlJ:/tBB `:? =ҢV겄?Qr(OJvoKL._8\\V.pmꕌ.k3[E.Ug~@N 3 RU6\IXUQC9gDW M˧k "Iѐ;FiFӫs*gv 7T2}B{J,^g>7Dz#c!Wna|]T2f%0zO历Y vE՘a`f̻Yo;nbXuߨX{PQTwꢼh4xy-t!Q=NF&s $e4c"*?iB4 d91ˤrCMAtl~U}H>18kr6ԍ?A OsI./ x b ŅT:d\;5A=6`M(i6Ll8r ʥ{!(p['"f$n?;mVxʞ te_QjInY{n}AEO-S O[OGs{Cdt 8 >Q_kHxt0.lő5{4%[M6$Ycib]^ ~V7PmU#:00~"$8ߢfi 4=!sZ sןݯ>HPSUonǦ:4H]gkZR`wM)W[d=QxQ?0ٔT֦FvbnRW/ašJwzl1%b@h(xcZ('4Ȕ*y(靅v68rW&~h [W9xK1"j aSQ{~׬7gUoUjŒeFM}8]Յz6v( e}p K(ҚdE%٥fc]U?u%NQ=٦ RcBpv ';RIۺ5eD!uKCYP#6fET- }<{-KmKxMWG(uY6BIr nj?UF;Z`˸<S]m;YvVKR'dքz9K94 *+w 1o?4J|wx)cJp (#a03ԄL2g양Jrx+;y\ּ$KApnvt46Ha+.QioǏ#du*y y|A;)..-6hnAa7b&$ܩ7s a) T_d1QAⓥIm|dq'qq29w%>PݦzETL,i17ឞ@6O9R5XS1B*ٜ?q5pɵ#8*(VCWOsaWX}(P {$y"lZe pB-d晟PFz-X?c"3Sۆ [7 䪄Q`#jEKCj*D-7>y\Խ4ZbƇJـ?p!7 qZMpۇVBnI>q:SLWMt02 eZYg)b T9~fi, Y^k~x$O"dj+ ZWU p>.?׸I=N?Y ཝo,prؔ#}z4q0̌KXtSwgSM.-,7edЌid~zA)@ζGj2jQ 킃MR.yA>a.F [M V*CAK>YSťR,HPN4S'/66=s)S»OC+KE $f =vcA YT]bBn @%gvʰB އ5Gn<3!-5h*nXwɎN^3An7H]zbI-0EG.;cYų/i7C +5f'@yCJ0{644,+ҙI5O8ob;iffO8“L[)=h{"nF溪5(}? nZ۪ß#5́R[$<> 5{^gГ1)y^c83 P&EEᴤ+o4 l7A7, PGCiiǵn'FjcFx|(e?qJ}bKܧ˓w2*tbt|<2Zy#N~@I"f 9Zl"[;`1#@Ub0ذ@hb*y4e' j/̊s$r>͒z׎E_06cb,u!u^'0B!Xw&ec_~~Dk `fzAxneMxh.JNiTG=.|f)`{D9smP6ĐZz'KCHR/~!faFLJfۮ EodR'W#.]67T(6-z& F =yC~  VKfN3BYU @ ٰiB{ò}0Pk_{o/d{}-xjMu'e+I'vh)M* kĨZ7 I{+0U%m%9&B9:F[ң̓ffM mQ1E?8RnX۰"ܑGN 6ţFo/VnJ# 9 K?&I 瞗UZ7 ]w9%_{"lj'@iiyҺ1WPʝou2rh|v0FZ=' l \]C/Xp6r**|wOomNjdo{CJD\o^l]mX˙;ALdY> : &]#3ãѪ.rc¥j\W6gmAO>hFax0؅I!䶔,MK ! b8ׁT9Z",laعAf-#%ZaWY>_S'N@x>+⑈ygn[[RpJEvXɝb.'x鷼dIN.N3,6 f/2=$\3lFN'TA=ؼ$, 4A4Z&1Ω dpWщss?ꨘŠaƪ[3*eWR%4N Kc5/bt3f#^9@f=D<8-( (5;P"P*÷Q:z2?U̽1nbp& -( {w~k7*mK({RmQרּw+j".dFB4 yDK| Qp*$sgmmLo,Ԡh՟f l*~{ZU|v%uT֜Xq)eCM13.H&뵂[2/ '6NRz`A65- ՃRcM>rh@(i G#Ni(4jpȫQ`kLC;M||T/]6D~-ҭb­2FګWyl7́<0 M~'hϊ vߑ̪Z߫iN5gp[JfzWLȩStE^@NAߒ-΋Ic"Z vrE_'W:\:88~/wL0ڽ+ R{&gL*v%Arp=I1zyt*owX#uDjfh;K?%oʌo)>IzpƾC]ۅ{UQ,=eaJXaS%\fj򌫡i £ (UlW,8督 Qkqiu©ha>jߧf0, o0dɼE QoW/*jLXQCwe2@ahhS@-"C6n+t!Ky[P_^urm $6KW( $o{(j2|:v~e=(T̨g$Gr1nI$RWǁ *任|2S&<hDAᰶ?sWrhFU=~lQ 7R}ưsF>~-Ujdz֓he< O Qh&ФoDw E-W[.m|_(!#zpU)06,{z&`Պ^Ų$BmuW6DS;qO\^"^SP vPrCi&H#T)̄@c0EͲNnK (K|0BWw‡2j63K4^oT;K[DրH;@fiתG-r?8!p62%0ڎD!CI[H|y|«m >N8EJ##K!ApVIKugJ*Wh` #onc5mMs蝌 [X,R T5ٚ-X&0aV S\ftE4EIHa0@P2;VrCg,ݮc~f߇ |"s2M&Ki9}sy1ӜAK&&KG>N ]{6h!%l_Yn9~|^IarR#èh$xnUE^ W/x~sLk5Pd2LpT4 C$O'wӌz+jSsCD"lA'CȝDVb ACE,xZ!*iy n"9T[<)NtY7]&8D ݒF~4qFpa1* VLUJ,;.:pÂZ[44g՗n^#",lAQ(^T+Y3u\QggcZ)wK|£Vj$+31K\E 85Ml;1 nZH `sD() Դ=kY_YX@\]}}JWK{ ~9H' IzƉKe}߽ILsYtEǚ^*X+5D,-7U~>|H.$ Vse1~!^i@k m|d7.٨o"Jn1?Hj^F^Pn_f6û"I\_40̋%2CqW7k6ߎ<2Gio:E3pN6!k {b4IaLxy>lxϠn4˂:P kcΥ뻙L{8sYKs+TEu-B䃑;3)y\L&ddeA2L:m`H( TrHd?hkr]$zҾ4 $q& Q%h >b%@z3-R-\,T,`mc.Jĕht~$hXzI`OcOpq,ܪ Hp *Cdנ TMhڕ  Iw|qY~jV:'%f(wc+OjDf̜vۭ3dߚN)>Heiih݅vh1E+/ C0Y蔽MNʝ { 󳨵%u$zļ~oF`׺]lǙ K*z Qt /ė(~,y8ڷ !) B DL#.nmK8dh @̿əMf7A.[$/u˷'WClW% Nj;97YU{oOI$0tH["х_̝ O͑f%ua|:˩`\>t~>$+{\`Θ8Z2+`}֞9NĄ,6hZYnk*'0F>'#x̭*Y׎k&f߫\QS;/EL?JUgCF{]4$ 8dގ!k)$ZRLލ($lG墲f kl9 խrL0Ud &±1wGȬg }k?zY=aNTweEfz &_!`kM5VV3Kդ<2z7,S{bƋ ZGwq_z^N<ߐąvf Py6^N[v9'h2lݓ6Mэ]ϰ" ٿlU" D{+=G^P)[3[*l A;N bdI[/txHN |1kjw4qf}!Q'LkuI TSS=C Go;GZ*.\pg}If_6(3*Qog\ݕqG{*ZV/%*|*k'PThSP(\O>cvA;*gBP4ޭñS# f?z%%|7bN7f+S2 DӒꀄkiH(5Bw)"ge@ؤqKMs[ѤG!xJ瀞\h^(n^*VS-w# mjx:i?]mnoziX1` z+(J fQNeKI|oMRONm6uQlgg.Dd|2*ajg)jnƳ?*K{_h.O*b [ J^|QDwaa̞v}94ZoBU¼ބGI O0 ]f]DE%NK%煂y{=j#@F_1X =e–p1q#LNQ&"c:p'ā4*r+ v]yxץz3se}?BX:DKKRҽ@UG\t5`J@SI I:DtFZKr%>9}mH{‰p \,;ǥ?'9.ĺZF9G?Hk '0pwfĸzs l@p'5xXW'-D>F:{-z! &97:.Y,(o_ 祖E I}Z-\;]0I6{ xY | '0|G"PŶKLgm(R#B;L&!k{!o \ϲeVQ/ !ECRС(*&ſHH(T'٠6=E W Ա-C{TIgd8`2s>9hpefˁ/CIE͡hF㨭 6u^6wI>oڭC7j`*'o B$'M5P^I4g b7Fr80LDz /pYw1>IJJeϻHrr #hYo'>iEM5,y|EŃ)$)~i@['ZI9_S9 }61csH*J%\'"'mwvjDݐk!*e$YÁ` ;pJLnޅ%KJڸcN\ h-Mq MO(5k]WYe:wEG pFTw29=\?0cH+ ‚S1Q䲑+ 2EzȻH"D7/J1vވ)rc-i*"!_kX sԕ.ߠꘉ4>?TYr*WTyqeE Լb͟zIPPv|}R*)^_1p4ڢ&]6dV2tPzc©ͥmIs/LccꯍTOAg3jNC+tQ ךQ<6~y Ct/>mOЙM\ٶnz3fS[).N/?WkdNQ#ln&4KÐJ.$0 6 e|K%M>Ĥ. W?Tԝdh\Kw}#SY;|-}{Vvayᣔ+X$1W wGPiL~3~yWb(cu"e 8*>48Uy*{dZXClnpP'Od j"n~v*wهaPԦSZ%+gĕt bՕ]QhB{īI-\oq*';n cys;(Kÿ$yj`p8og8nJe:o{@oބ5v b%`ceA_R_Æ芣on0jEU>~׏y:Tm0NPryNƪe!7[A*o]\QlWӝpk:]u>EK.ȴu^V\gJS _f,K OtVڟ|6_]~m&_[+4ƗW҅^9}*ٛըff;PUapuxkS<6GG紘26cNfBsKkt%S":3ՠ?yspq3nRUC[=vO|ԔoZτ %ԝI/Xj &s`;KM͐KJʉv0vMy3OENٰoiefIާՙ=@6vRWd`3vdxDJGx/yX"񫲅 ܂=wb"SarԆ]UGڭx'a-CjDо?sm@"bؓ(52껗P00GĨ(&4|hWPrgbfsɑrr* j阑$1 H}O>R5׿j@r˂JY-؈}c դ+)7_`t_aFU6k1aR].(A%`aCaXs_-<XѪ WЁf'?:Ψ;mG t| /N a,^Iv̉_Ӹ/Ep)}o6O;$5D)Oϳ|5}K?dQ߽4(&AZªQ @UfuŊ Y P|cŬln^!Q`0dfs/CboBN=umB+;b{<]t{cev}Wa.yO;n|6V"4f#]}}u6d_[8Oj= NTiLk:e>6ޚU}ޅPIrEu$Q)o5noAXr-JSNC Td  /ˆt3 dH,w16~%ո⣝?TҎ4pP@y ܩXpISzGV²O=( prn(_:qӶ8#-8_CΛ@ebƃt`|Xwu!qEk|ުG4ZI2L;,J (H915 :"ӭ1}WuW|ے0(Ym,UbsDς|^6$0GVI N`hg= ؁T4Pix+(M% /~ ӹ{Cddfo1TؑM\)iM=xvT?<5(ml_-hY \\@oQI,K?̋Rԭ 1FC[GHuwͨ؉XcczF_^;e$gq`ӡlP <@X^3,XZqq>b'zyi\%\v0巳$9!ޗUޑK; ]0ojxx];gWwq\Sf *zkg*30Ê?K̰5"K2֠lL]*Th|xJ17 Uyx~UD MN̊i@$R0Aet&P=)ssUq4i)#Fr” [?Cm&Q: .o &#|^0+,;6*~#ٴFY>+NF|qޛu!]9VZ]$Af^7g,&7R.R ~XBR%RU?K pgJ|U+|ߥs%ءGBS#ZwHXN)h`biޏb_g# $V^J˸Po4Zs~ݩ3NF&pM$z7G %`PI*Q4HPu&bENol{{]`hC3H^j"a4o+_] npDFQnv9,Ō>}^2&=A!Н{t4bfZYV&O}>bxW*>"@Jp_ %İhaJnkMshU]Oj6R5Bl6g ^ 0z4j߻`LoCV0]?H JaUШ[܄q-/}Dv3R[TW?^(4x'eޠ YZǰG Lo54*Ћ5hNBt`װ:rMzfrќ@z"Z# ƓӨ U>]$*ں)aH(M.fЎQjySsg]DoIBuP(-~^E4A"J^hBw{d9Ίv_/'x,ϻ7" k~p)SOrܸØdz,k ȔV7o )?kH}0f2v$8)fܼ?$X'vi+o7 z u%WJ.wܿ 6{E> ND%؂tV.29YGI)E0蟃Szt-tT73W>V!,<V#ܼOBL_ nM)Rm(S XlOe48/Q6uSnI< +xab_-ΩA0/ι*(j彚Ĝ=nI=G9^G &aތ@p!*3E<)\UZ#!՚[8v{ y>8ym%kh\c"~&)Cﰮ4@;B˝t%w})}Y3eTfQgxĘo--_uGf,˿J*7Oӳ%TMX,2 pձZ`&hes5F4"?f?aq:`Yi {[߿\- mb=1VibCO8`+ z*M>ڇN6sVX$M,;NAX 5plO;@˯b="6s9$JyY?&;9Dիq$W~B+~˲ezIWnGUG͘t@ RRD9a&x#>ZF1"c'~kuyۂI~j<FGBI+;v@0O t޻rҘ~p1 GzI^tqAgӽJT[ J"f1¬>jv'g6iZo ;H|~?~xrJFo7C:83,~#iH[uS/`%rjhweD%37<]HI7o_xi~\oqTG:ₜn˝$2ۤѐ*_eF=N֝-KnQS=#ҭ'\/0өxW`]~A |C c.r<;=OhyMtE0 JLQxKFPWW*6am:p>Z3{߻g9JJq)u /F΋^P^\ϗeS*|!nbш>i~r/636ɩ^/- {:{4˼V{D(vRIa8Ca)?Eys۽SLIes7]L @J_̦qyY]e?̒S P) )+gh'4ԓĽIfcVEswX&[FVfJW1Fپ!~z19dA{ }X Sy[X\GE-Vh7X<7|BW+31 v+yd*_:濬\ߏrFG CcgԘ>;}f_2ʈ|F1T4$&u*Kd[ wXHo_c$#d2[8¾;L~\+PzUXm$ydL|҂c Xx*R: mBKbf|[PkR~"ԥ6䜦{ypzB)\;le˟ik5(žJd q n۷D3 eT6:U1"'remT;Efmʩd8e}FyBti'ɥ8!;Hސr+j2OuSqKn߂e*TEB"K-xI2nHp>Rr!ʤӟe%"=qy,\N0:BBdbie]!cMSaC ,R%Eu>"̅۠MٞhkuCԖAmH 256/cx1@XL.95(K ?Qyu*BGzm" ]h[I>d _PaO#Bb :}D枯-o= HBE F U:nzG%KU<{ņ|uqVw@e{kDӂ&-ZJ!sύ*Ϯm:{@^Yԝ6>',\RʹYӚMb(oO<9w1hK: ʤ,]ɘmF4p *|etDv>z;>Nѹ] Gq;01~0 WߎreZQIti3 hK~}]^7&H@Є ⩾fC6HaU*>}_d7+5Qho'$I3+&ng5JS``3{8Z0pZ4+Ҥ ?LZ OȊ#gAݥps6\ 'pƣH/c[vXσ(1\3nz|C1r:k?ZQdוDyp4j3jno(I1hx2$Bp'\|@[.QTV9ԇѐN(L)iFSô23Cj5©) 8ѴVRSʨ?h,卄]83}dF;~u9) lnujV9)o8.a4b@o<7%1QΝ=;*H4,Z$#&*~ t6EB$X"b62ϵoQ |-V0_}lgi#p4*]܁u0xUaSUnֻ`ּt#}D0إC[OTIwmMk6gع@z;&h1 ґ^H$kS0ZM:yvR4vӴzcXl1<&d5mXKE?1Q$k *)KxKNf&85VBq.ƈ}Cx (S%$(L8^kw, {ݐg?F8NJV[j/`tw# l%EO9,zt?SpBj'ad-.Ƚl*5e-C]V*.ӂ/z tVҽN Ei^k޼h)C0Pn\9IQpވ3P8(|~U7^3Vn8Ax- &څs7tW{cplL5vmzTYa2D)yLfgQX.xTQDCLxuziI vs$[79qOt vP*8chЪy-EٮN TOa]@UHI|&$j'1~h TM}]RX2|ۮr!bf58yL.BRE}jhޛ HrcC OI';\&63 V!2-+- ͣ.Jl,/p}԰z*(T!gter#+R$M  Y 5TwI;ރi҅ Jz>P y!텸k& e ܗf(~މ:FV3 ~)H:qۿ`,uꀆkҮϕnEL+ܦBM{XdR91ۇᢎgzeA \`hΨYAvkLs3.횈GY:]JN- bTq+sL^n EY_(ŻOQwݐK,t;,%Yv44,ƿg-'ݏ]Aa hp֭h0NH6ovL UfodZqbמsL q>.A}:Ar>7?fy,~9ٚUCL G~"S7$lTDJ\RғrĤJ.~@HYTo#;Yqs+<]oJ ş?չpJҩ G)KT# oKqbu*o"csx-iUH ~dp4 ;V=-~kWYq XH#L`9sŗRc Bѐ#9tc68<:Ύ!ܑ $?[NJ:֧;6r \} ;V\Uu jfU%.3*_2/Z5}rA |76K,42<}s*F#C$Rj9 , $]hsZ83]z U}BrUˆdthhDkIijf/kG J+b6GmZ+\IW W$92>6l/iVk2OZ4<DinPY;/ņ\ W%T8MX.Q|{@VE]"G< Ѽ A:ܯ22HbxqT;y}\˖0cx0Cv]_wŴ{|TK˚@Q/onĆQM8OA4vRr、eWAͪtǧN=UuOExU|S_#<9;yUBMi䚈|H)OTkU^ k{f)kZ9** 5sQ{G8߃h/6zEb9]x{SDջ~oPev9BFtA{MҲ9?Gw=.q>L~%iuga60ePJcsf$w{UU9U>UBݯ:B\m,7*'I:Þ$$ eh@U ?W]<ҟ.I -lf2bm4"/nJBmn퇑^Vp듨Lɱ<S\mf7x@'g#R€A-[?V1ٚz@gT8Dڼgn *AG EV69 ʣVP(uǾ nf'/`#Y7*.|x~E3(dm׀i>: ی8חdC-; e>AjƁeflsp)'c3zK4ϭZp4 T LK|@2 Js2"k)$gJhC{dZ9J}>bdDC c-ڹ_;HjLMuBdГNAݺ^NՍĹ"1&yA5'd,/Ǣ:茷7g)Z$W.Ԏ3tg M ty(rRF1^,0!<>Km,̛*)V Q-H?7yz# 2q Һ.tR'.LQl*В> (QJ֠v2 㔇/|O/jh`ϪojUK(To"ݬ~_TCR$o0~iS+wEF *_|[yIw2j\r.(b2Oґфa1L N='5eP{Zv4_|uaؓ h343 Bܢ/rOKPQLK ɍpevE,VEzBādBLCqz2$涭QQo;iQx}M Ie ſbEw9DyL*;s 5dAKkXJqXCyy-QwPR#;  JT|U+g.mq,uPtm1gEjSP~eɒ*'vQmKr>[M%IA"Y{5ݔO"m'Z|¦f-y;")v N'#'E9ʷw8n =5[Cp :%4RB٦ˍqoMDdѼˈY"z@ĮFG)r!cz-2 cyFo*=U3cApdR)>QEUNdȪlOxr*=„3kx'B4 p[><>x"/&RVc({nX.kzn)usA]"Ҷh-WMb\=Ҋv?y7@]ܮ+lY$_0lu$-1vR,dFd(0p%8 lڎ;_QilUϊI|S¥Sp溫vywcˋ1vGt2U(=xC e8fM84C˄m":߰`3w>j z4+ib~{;yT헅@^0Y@P|zS& 5~hy*O1d6(YC9II) p8KǪQηlrb!m2KՏ\ Ɉ'g{Qw]bDaUh2 JS w5!Ŧ`3k)\?KpC҇4?):DE,zyϡ64MSfC=^u +D,W3vcz3l-}p|7qSvc.g@zyl>]ťQ a`bDUst|Jw9YFUI BC=jK SkrTIJ+`):VUv2M3|Kܱ(Yr\zX~NyNSUӼɱ=xm|s-0#̂hj@ܱd;\RRqxwQlW(Ǥg TvpX?~"J>xX%{~Gh/x`=XNf&&6 ,F;Eyr菔NCh4WkٽD}D 9m:܊% :I(:,wjSOͯ*uB*G2J8cvIծՊPN\PNFnxåL` l${Mb9aMU{+md @γ ^g˗t/2y*khC9q"K2><4 ?3m M6u!R$BZ睐.NId0tTY[/T&%%}<妸ƬȨ k8/̌W;öc!_$#:ՠ%+Dt:}[螊ڔ= ,2a\,)Q`)H4$cooՔUȡzV\6F|j;8BnೲQ-9 1qn^_ (SCeyY9=Wne!~x1VRzTա:/@Pfy͛_stz%LLKcnKWGT.((Dž +IkhD>lLc$_;#?Yo4۱JQr8!k> $btn?*E`="ћX w`N1JYn4EQr2XX.k _{ZگkMPy9B,%ş IU"[ s7 [$\~ϑmT)-̄=c:R#)U Jeotz B^e,RqWx{ U3=r@ujX.֔o&>&gFGH1G"+jJg8C*(OEjc~PG(v6[ `,8we*,91fsi: };MOyᝠ6*FdVȍb;-D!ETMx 03B[c8ۜUKXXϑhX*)D TH E"ǭ;)E8@9$ּ1|_erY5"nc(:1fz>Z1.bgt-N93s]sP z6Z渓MWml;).Nø) z$Y"F (׷jLUxClg\>mrc|,RI 27-Kы &7ʂcOꈮxS9(mbٙ| bt ~I<ɘ>. 'S@_t$zX,)LfF8#~ʃvs !vcN"gAXb5„f~1p4: r$b8i0MW5S(_cAmtL |`A+E'YX+e;Jy{ϐP75|({q#IqXrF}L 7d<-qCPJ8l 1Z<%vޢc x1G ,b"'Mv+:ws=c ˻(i@mȺ[ZQ3 0lcow'@R":$o 7-2I *qs12=JEreE:TS),|u\a-VU\B@;} 0z KdӦ+AɲQi㎐g>%-Kͅ67s4kVd,6_}AAXn+:E?ߩ)]C99Iuҗ,2/m:,-1EbTݤ$F3y-(5ƽf>E - Gz)mGӴsKv=6ADd8 slWy<)*ʽ܍{#voo3vYOtm7%Oі.GA˛L,x 8 ]fd&Ւ]߰em~WU)D=u8+i0[*!&=.ظq f'&ΰΏTC*؃Dt*X:,pWa^xL5 350 #ԙ:D^`qDp%󓋴jéji9MpPN 9=&5LUZF9`jNVYdaHo q [V5|65)eTg4Uԯ/]ƹ ޭK)3R U`w1C=Wȿ8Ԕ(%|e 7hjٽyhdBp6aVNv .ٳf'7. uruh$ -D_Q%RZþ ruGм}ߌyK)rg% ]f6Eh(ó美"z\󬭆kWMAoX$Wܶ R4k3w] 5Skgz;"C67`! k^L&R20;3lȧGe}QޮQ%$l JMWU}ee 68']a՚d=ultH5!U6:_J}&5Z?V:f<D_+Tڃ8tTh~U񉉼 T; )lxɔE{,*a;tP>L=|9EC%lyuE:xiEIS2Jy/3MҸqGTN#~qOj;j>\'ąqcd/p'}Ⱥћ~gXvW;?nDbz8NQ,v6_ga\;]`b&+)*x)hH5x1CHFifJ椃'ÈWi(-8s aFvF鷺 ^͗^)b_cl.Fs w_W(/{9$ctTmO b:-qiHGTN΄;=n@Cj4Ӧ{fu/WbvIkg2OsNyK(< Jp4/ 2;IMv:aVh1Q$N p,H׀(A7t[2"yCOܢad0[mDQ0'"7dxn)$!#4`_S*9? ).̈p8M U21S-hp W@nR#QG~P);6āNMF's䉱tٶW&jgŀ[׸4c1z"( wC`%.M 8Tj}B&bԚ&LX,8gWFTk]zkӫvF?=VpzN oκ5;/8x~UGȬqYp*#UQtk'dm̤a6"|D`(w 4 z2gSB'yywNb0m@D ILg\[*n`whp *ȢU2ھ |X xlu¯\ދQsk󏉫H(T'@?!<7pġ${w-O/֝H[]d$ Ԑ8'Y:mm"eD~ih>MgLatW>EC@D!pq~VT/_' zlj9 ]_ T٭~*G0Q և:_KZci #+ys s%_eR }\E 4K֟l6suM)P+lɅ_s2ykjsY$XudҌِ{0 qu6[}r=|qaL9>\. bLױkԝӐ+}6grC#hWikW4Θ>i?ͬ|&_ g8>a"Wh7_C# x@%LΥi񞿡SVVb#D;[TωayR&_"p~ o 3?fp_~q(܂g#f =~֍Fi|/e$o(H2:Oa 041WX$n ?3+ L)18$ G;W'Xyӕ>!ӴJ(zaAzp!/VktZ- m kH Zp63>7.: Pà@iOOgMmd5tfb0B"Ƽ\g0#Jl 5J:"PO{Hζ|@:ou\.Xv^8[Uj(z">7 `^3,Ne0R`03pIR&Gp GĎ4M'<-sp)ڭz oP5x&ƫG/'b@0??PGtOtscR? B;}bVw,IMmYQ ,#YNho On@m'(i\I }K]+!Ѣegw*URǗq?T _S%?=ƽo[1%o ӫiR ܣKf/$ b4#]>|G K: .6}d~I Um2f~QJ<%ɔͼjIcmkO[j}k x5*t}1EJˆ&{￷6X`oq5<:]&+࠯x*,t5柹W]w-|g*.TZxVhq1F&9o dOk0;Ic۰.4&$$Luz6€qE,ȵ)>S-.dϦ!E!:R*w 9Y|y.9zt⚀nb.!v!36ν_o9RmT%#7,SD f$-naKh5V#u:.$Dh[u*LI 4ߋ%!`@-nШBՂ/m/)==cp)mGǩVd`͊wb gN&plv@&6&L$؜ǐdAk834|+g&.른9 N"vT Zk1Z|;E3kVgyRut=aǔ:stc7 L<j>Wud)1ƵRjΤ !mawY_Ɣ7l9!k eƕf]#5^.]r#<7_Jg &^GUcFj._NҝBo_͗4XP؊&B]0 a~q&v(*ke\]Fv7k]g9Gj0b9ƼEӊ(@5*_0ךEy[;0 Z&QþwzV+C(u{)߱RH[ $Ս 5ҋCukEf`ȓ&x)1='z&d:H`\pmdk!C;dku.\`MnTc.}Ck2Sڗe "?Soe!&/y&prvDsrV'K9U l0ڽ%)TH0wI_"jWf KFO~e ˮHtkiux=h_L!$F<4ԸV"bQ: r#YB*ES)3x+dzPZM"([TnhrJ"б9Sl;z|g=[VaJ/.JNZQFN aoMYѵZ|vL&P'ݢrclChCP2GB~7$z. ~y?m8ڙONm;2o 9<P\2BK].dԺ;$$rW 2 UZ| Zygdon9HY*U{I!:hFUԻ?_I*RbJE'ɳ5{6ppٴ!)hp L Ȃ-Xȵ#EzɢOߩ|@$S0)n^+F7Jhv0\ps<:3[`H5I8Jc_}o㏰`?WOdDADŞ*):O9RRn'I+FVGgޠƲ5| ]*ô%}qӭ:Of#cZ~4BA#b O}mZ&%P1a Y30NG)f"W}>z$ZYj#-u+&5o Djg\(2{g79ާJflB/[Nr'iH(vX0YXs,>365l<.vZb2/`D3-5:W$9*A (nJB sQSi^-ђvS=+ ڟ"o|(Lf3T)G3MdSgRf{b0Q}*qCwk Wr Am6'[ S}gyx_ပfH r)X!|f,Nd\G#DRU3П5C$e'4gjCl)qqvc+{/Ϊsc@㷎N6XOj!*ž:O_I>@zov26ڷߏfʜӴI3=K*ɣ;T8b?i_l IJ }7熶? qwL:qmU LǼꗥ>!L^R]/zYî1d]Щ'Bg.!^ J^UΚ_ _*4RǬ5Z .L\mbc2:DN#bj2#sPBbH0bWK3Tr# :̤?D fP[æe()R '' S<͊h|L2\m hfC c g7i!)>Ϲ%A>IqZ_#(],P{(J޹S0d/%L(>:юJȕHQ2;[T'I{o#Zm. OĶHg)$_8Y̓᜺qԤ9:D>4!RGj$+w`2%\;ze0rV'v.v;\In={?i0\yqJo w(<Kw%1dHU<0u߷ bAͩgEqhSOj6袯Zr})W%[.r,nќ.^`T r;ߨQCQYY'ڬP ޿m74h׌cZJn^+U#N2Spkw;0wx,썦EN4+Ei \$}Lrqxl;xi @D6% F6ʩpVdJ$~V)]6yیN,1ƝxGhYYY4Wh9X-E@ND u(@=g 5g "ZB>Lk\H*:+ʹw~X D}2 WԖ2_I>K?&:fbPiF:G,^i *Qׅ珽 j!ZWBf7D}P8W{ fRls>2|J!ӄv>޾v@l,yx)!=s\r!}12TFRp[$rRDy]!:[!t ˺l[D#-+{.`qi7< ÉF;ʧ }w44 Y,'VyMHhOxr}X%FW9й$^ ,-^㟒KGo˛ ™d)Ğ;.WDO{s2}hؐ`5ɲx61E3d^!W'w2R2؈rԇs]vAYSA%~Pؑ"..Psq\2 D uR E?2! J<_eJ Pb b{g.EKe6ڰ䧊表 7"XσSuH00İ$*8w3-TSKvpW=FH Q1mp޸%i[,ο+QH:HpSڃ8r$8sfwT]ծoN\H } BT W_ðqP fvbQe;N#}d2L6ѬE=@~̪i P)M4mAVquO卆k -:zEVIF^5ĝ_$v`c@b'М"jI$~hbLK? pςm+LСAշgBoIj.6, }2Wd,. #.ML!9:ݢW*pn=y*+ YɾW%`m3gJ0NLǏ@m3M(dNFrxJvFFR;=A7?9vi WWs|+WYƖ\4ADI`Z mO*۫$XK[F䠢*se1_T} w|o J?]eNlH =Q1%tq=mkvQp!靠c9M}XE,7MF!k2WFh}e_Y̎F [c8CΦ#,I/܁oarQFB`Dg  : >YoW f/Huq}סB2݇V4NZyH>A'3GW6oǫB]͏·HO'p$x>WFod{ P)[\D⎤qG篲6uD]_#n.#B&U1\3Qs*a+ۯ|;G'GiҾiZb43أ<S?N?)yN";ݫn}#QM Z9_6J7+FLhe(AYsՇOeki*N̂"5Xm*._Vie;i!`R,gԬ֣&dNӵ'M[ר0V [`Ėy7[իU\9Yx.k6Y8aH8etT2oLBa.E 4Z:3 ܡIbR% eOEZ7cZ*xkԛ1}WB+o;gTljO6슰^BM#ig#XbT! ߡG(YNF$gֽMj-RO9ΒKV&HIkk<@ eF]ژQ~ol}^F]zu]KO\A)n[(ϵ#\և(!c/suw.U"6s1$rFSo  6k(_ RqF"xJ6GX:}"y 0G*NW\2$KN,{J6m 5fd$H{xe6K5ǍAh 0: ]GSіPh<}InL3?r* 5~;^hz;h !0%`LK XЈpVk9 mK"%_̞[T .}>O„DSv#Fҕ|(l?i0T<6.N&ߎ~+|5a@`kl>E-Z7"wkR(WWחZ`?|֕>a>` Z.0m=Z=);;w75wW?` v|<@ԼQU_ŽM79ZˀS;nEډ;hԬoOA}[$Z?Csi^.d ?ol}I,~ǁFwLsZAZcG>>XԱzѰ[o/f/Úʱ}|L Ȯlv-}b };c~nEy*h{g}AV&~$]Qž{aq0ߥ d˭eTU\'.)x}W '#n;r*9PÛVbn[V;Q:-$v05XhF!$^eL#m n pZ CPVߣAx0.#(:/j8[e&8]f}KU0!|ͬlpv,YQBpCPnK0^f9y.-bz ] H{P-s%sٝ߅?y4Mq: #Yߋ<ݛt__^prS,>3DaaL$Dϔ_$T+@ Scp(WH FJ d0dݐPբԪ;Ca6@4n5e5 3IWZwwx̸A \i;e 2@kTv+J}mlP"Z=*P\xog/Z1DFĸFeqQF$EpQʂih:eQO`#ƐmW#܄Gh' ӛIM1kZmY8tF*'>:x5(bnÖC-Y4lL>l#ZDq@` z=zEy)k|9u:Ti?@0T RXP󀲃ҹTR~гHOv8B:.nfm;Yx zY MQ5LpB+n8LpYxq_>/]UwdCS>)ΐ)l$GؼڂxJLcR^YtWNf3=}cؓUo0bO+'H,I,5@Ƥ(ünx.#ʴbJ!cIw>A[E`F W#S>zt~7&!. -~$ѨG{ț{ 8NÂ#^OR6!UcTa}SLS7:Wn-Wس( cx<]AD Mdӻ7֜J@'jjTUJ ,Huf&b帜z L-G"pmo2mKN;:ʎC>_OKJ: 6:.OpTP;)w#RcQ$DT;mHğ&A3!NZR iK*lPS$GAP#z, l:EmW.O(&f0nޣ/2kIV˗VsB9ʘk+FiB%Rhָ+ЃpDLtR }Rzk_V~S;J!ļBQq$/._:IGɺHklRT۳xg$ ( o1^#o.ympNۂ;HS<e|}@HJ5,]ucԫZ[2)뇃_gEE3#їMүWE:ϔ>^$ if-W5٘CдOSOWBfY\f1HUH[E@#dKc(^űj2eE ˫o9)3c"+ 0a뗌ǧD]cĽIH40nѕ= Ȇ߅= Б) _R@fV)r TbPIhǶA[:5Vf& Ŵ8y.cor(9/{V0{+Z ƜSŃҫc R"GTmse*=BfڮP*dN@N)W5$==ow;X-;r[Ij\X\p%jo  ~vfp6ibБ]=vSe`nxOE% eG΋cuvu;럐a&"biB֢ /lh5=}_\P#){#-3}Vޓ(ǣDQЙ_}ZܴWzS_+ED˒iY\h>e8(ʞZ1үuYBM)H ^/S \'rN1,) jDzx-{fayT:@0nU79*B 񉍊P׎q hQQ?-N=ӻjd3lӧ=y{QU7_uxhocx7%G^=t-Phs})=vgVkp:E FO!(SGEHy@Yt2oU){ 3*IRdRFϩ1 hJN0'}eW#.F@5K7Ѐѯ6ſ.  %*c5gJ &U|'1}yqG׾SYL; o }B7M嗓%b11z>"5{|7>sʹ&RwC\'Y՚30iF2vTTI`07ra@k='Ê@NɒK"87yMO/M[;#'4F"<}~z+KBfO7j'@vW!hݬ[= UΑu]QP7ԑgyKl p|oЫwLif=CZ"/OAMM~#&R ZԞWG5x=`cuth3>u{Qn kQnlFݵUe^qbuhp'ߤ|sEQfCVW)HXĤ@VWPL'Y|du$3׵H_9zSkQD/E !eRkHN1737=K-d%1 B@+!{-E%nwҿlF!pD29ʟcTvf~x~%Y{3c*-Vi";M8"c-S6J#/]"[`'%_ 2pczksP]c6q={5}]N ;ˡuЊ[xծR]3YTRM6$0aJ!Bpf# ;IG|;,sL2uЗɎ R9 ˠYd;;w_(p f~tZ`;-5tKՏkWHXԶKӓt,1Ғ\?\ڹvTr&CDlIߋ"r,!~pf!PJd2hd]{4ILK\NJy*B_It~뿐b^ uh(] ZM\JA1\G " j*ɰlJ ipٻP.~f+3R+qѨհǯ1WQ]kxAOA#e+KQ7:SMeJr6X!srl)͉&5w )ÞdkEE c[N:.;P==E3 W=(_sgKU62 tri0ǗeB0UH.'=ڞM CaV4B{_=b $U׼f]}@5'x5@ kzWp͛* k{pQvnt ӳu ?љ\b2G-+Mr>?_L?dž^Kŏݕ yb5/1_KA ߜHWo`dA[Jgؒ" Sl?]Ĉ;ήQmuLrXRTs>v#@BS$۪t!W);>Y=6 M G䗄tPLiY|qg@$.xj4/yUtSkdӋWLch K\6*enQEźWϼZV)Vwv]/r (6mD8fi ƶlA|8H<%8rV;Q\/Aoԙ͊ ~ˁ"#y>%YHf7I Uy7d;cəzF۱HbNG /i.44Y/ <̲7$W1w5VGGW 116hol7z-b9s9\e{SAO/F3s>7v9k(߰;ŝr$Poy0F-=Yg4jVUm sSv:cRgĽ=';nN?ԛcx6 ]w=阐2KCNzi+&;Q"({f,}hbC26}6qj D1 XqD@kh!4-ICGVOĴޝV\V˪*H%q8Hn'4x}ؒtUHCeXn"-#݂{ g:CA/. طfi?vYվAO5UԹǬnxg I:,/Sڙ$i 2&j 6吸CWp,tV;t E5рW|@Ji|?oЀG?.Тw<^esZ 8Fk?J[LV<#Ac!b+bd`ʲ^M{t3"~!f.6tPcRңEc4lɬGjLB+ 9>c9!hW#if\TYɁ4U^"|jej _C,^p!mD9\r7­u0%5{}" WA2OD *.!u3i>? @HqeUF/SMej4 =bKtuΑoh@@Rf6|T_禍41c8.yC6-EYٷ$JOIM񎟷.왉Ԃ,01ZBh\XYEE;fw٫mCOpn^9-xg:%( G"nٚ+d^aCJl$ @NɄ}o0'H5^=sZTm.Wo3Gr>۬ZQbS!MޚȜ,jͥ)kg:|hݕ6˫Ł{Rؒ d^i ŒjnXi 6yGKW#hF.וEϾP%ijRo{hg[2ҵ<J^l.g]rrHG,1IŲ&ǩ ^UQAP눃G+. v[%m O5@/Ĝr97W'ey6 Ě$K5%YH@J%XnSz/t]&WF Q=7TSMOmP\/'jmWzmYODp pXDCd#Rzi >0yX!ΘlN`9~R|[SN_7&:*f:q c|\?N.">? '#v!g;s,Kb!J^K0)]dvĻ6H\܀D.6nɌ0+mAI| c50xFz xIAdD?rJɃ0nJFwn#ʔDCd|$?E_{8õ(?u# a'e&mF3>Hg|N}Æ3=nDȭ\.ġra։4I4 v0tPs !Lv: Z$q%du-`r;ϯE { .6`s j }ʌqsB!WQEw!q9!Tmv@ .}&kIvוÙU*g㨋d-: Ȃ:Y>F '%"UQ>[DU躒eYImTݗdQB ;\|G嗿'0[W{F}Vw7l n/eu"uyY RU0y5S3!>HkUQF+] eKS>JgQa}=O-W-\:@tf6Æhek xh+Dαv5AdO6<pOTu;X̯i^qK-Cqi ECY oT@ e r/v>b8AYR&K&,b0Uư@ gqѡгtll {Rh1uV#;UBwg"vmy!ڊ0"uE&e`}J׌}Pʳ[f9uoڊLc3AR mlX0ze5|[#8&c]2=>Ox8o$fs!sc UX" th:i Kgr8 -E |鐝Ӆq%1eԛZ$]wUsr8[dV 'FKߌ8`9P6M*i +6mig M^2p8ERBO#Ne:Kqc4xlma7 .8~ w"@q-g +:}cE$A^swC(|:n}N|p8Fg#pIM$c2g;Q*RKNѳ,/?ٷrfk;b؋%8Y)I17_%ꜞ!:m FTaTոR>;3רW @(yxgvpvO@T+nq4(.lVՍx^gv#IEe*7ė#VQ-6^q@!.?%Pӈgf)GHZҵ?dWiDaΎqF2q1ʮM +J|g)B5Cޙj'') Hә9ArB?+,.(vE{"$\1XJBe?=q& ُ/ #µunOV# .w+$w,M3Tśÿfų5RC' 1Ю4';JnxG,'Yes<(cѾ JLU//"pa.7rfmlj8WdY141O%hj OF׈_&뜊ՂENѣxZ|#1Gq'BGgd藽,7nCy{!HZԛΐvO GwV- )hyUBoP,'+ύ"_Uk9Dua&Ck rO-/x;*0}4!my'C>Tܗr8?;L MNX畢 z .}ohRŤ+0h3s+y*Lv&gK֠ ccCt)\ǩour zFNd c忱QR۱{bg(C.;u0nAO**W0Uf .2rK˼txoM8_N9t7vI~ѧYڼ+&CxZ2ɸ`w唵 u(9=1,2)j i#EDDۜH4p_$IOR'9*8Khv':?%,z? in&#J^;sv[ Ch‹kL(buy@* G<\t#BX*F}U:7pSrHY&6H?g>&'*W}~"3M?s߱9d-$мj :o} Uik=*V/YcuL8/ƃRZ1o^+ N0(.Z<;Qnׯ[FF^5V˱|JAuwԥke3}N3m 01F G*MT^x׳a ߰ʲb͛1Ma[2#Jap5 Ω$D[\{- =KlR`C?m\#=P]"SqrQedԦa'Ht רbQ>d)}nNR ȳn(P6gFk$zٝF<X?kN폣 (uNά["jN HZS!"oXM6ndxd \&oY6DQS}=yky?DDJ;4m6vlrM%>a`6 /fc)f.:MFDx% Q~rW8i6Yj_u:mjrȧ/?EqҮfD,~oy5M8i;gD^;MK޵De,*j9ZbyGG+}TPf<}:j8ܼ-c*>`:A.r3B` ]+N̑ Xɥ)} b>OϧЄoNsq,72j>hD{LQ=Z 6mruyM2Td+oIp, K?B7,[ܺ7.{maL}}g@9nMBMɑEd}*^ஶf)NOC4)axj#S %VABf7g-“xxt8=/jRsK) eZpas kADR}≨9G.uuIO{ RTT I<6 k>)v"T8]0k: Yfz뺿 bpd~ZfW?ǔe(\ϽA:&g Ve2Mo@ wINaۦMw}Rp}n٣ ױObl%\^3^4t4nCB%/odc RN1t8g(N%sF~H `ׅ'YhfZi8Lq [y[bΧ:_X1eb3vQx6'4Z(xR/ٙG.P?ܜS$gCE w?V.Rc 7Dc? fFFunobβXSNzU?dc@~$ml.7*Z2FmiV5C)3GP^k%(”!1L> vuk vۉ̝+{8cb};G' 9gċ`hvD­39pl#U)Y#sF5%LuPI = p{Ub4~< `R.S 9\3>=@o*ǚsA{:gXk /gt휤bw -K\$j,3 m(ǓX-0h˜$!j:K4 #]+ ,2$LJ-襾( 40ޞkߤhLݖ[q)v.Zy3{@z;lLW(}>١nn',3P+VLREu00(%/ds|!X.>OY[0@]nۈӷ+[ F@ӚeV9tuP|u˴=%#>׼PYrnnA?8*j^U*œ J}ůb^2hC[#?)xr!OiT6pp!oDkyy?Rcov߻iاsxR䄀-{^is6c:6 (6b ϑJ^$ [ҷ᦮ 7 o%m_[-$! Iپq gG#e[Gl'}f}Hrw-lK^8+NIα8u#bbyƞgioW8OqT,{KjN ?>cC[HJ9/=_ q',P2Qv69@{2Awc>tD-aJ)r(SWѰ9qcNttT( wSRZRj[TRߝu{2$p Kg\-b%NLڦD1^@«.&ήMDFjACٚ5Ԭ^:PCo=5w.zQ}-psS&?Jd`y\hDPl X563g}oјώ ];>=)l#4[\c,`Wžs!dK|c:)Ԛ_n)MхfNVh|^rs-B,|<Y|N*WEK rnhir(s `pũ9lUBmވQ/ڙkb>+Q*ʨ3A*@l伢x2"?;3&_"r "Kb*GZkS.ksS{e %U8PH\N`'kauF)IAQ o{-$m&.'z@c%;aa-T ֝ K4 ~(LvǽtQŽ#e/pP`D©WDE ~ŏ5M1p22ϘpΤA$TZ^d)EܤdV"%Q=;?[rA+E=( "PH*rϹIVq-P0ΐU: {!H5 ꋟJ Bҽ4etk6w \o#.XyBLQkXd^&DAȇɾHU¸(iCCqTщ[k[jR]]@qX-3#8VV_،s=F Y]-e$uqM34WЍ$4~հ+ &`~Q +@6 *aVP;}k3XZ$QW2c^=ĶMggI&R/+z'N^L;fxjzorvuUf8i]Rr4K3q?!#^jJ5X(fZ"sA\iݨH@|-[(vY,$*=(+lRERΖ*{޳K) 3ؽ 9PB|II 8vCa&Άe!K JaI|Pߴk٩U:cqg PTt.IĚc-At0] ͛ia cdyR+#AA$f g!-t)wUּYxBӳۤidm y.}O:Z$a OX *}38Dro[./5UY?khs:ZjegCAT`Sd LsN0xwFWZ% 8PNDBܥMݥds$]sV^w]xwˆ)=JRߡNeU H`4GpA(Һ-XƘ}zN0bij C&[0Wv">DipqVs 7vs?B蝂Na؆0=[M#[. NNyUy FECV h"0rg8zɽ";7Y_Vr'ϔ藍u)?Ӯ:'a$2SL"Ke[HUV]Sv;si 4i]8-` 4{F oTArUe''a3U] VTޡ%($b×?MϤnyh sĎLWcqJO}k[cs)<:'f I3uAY7J[ƣ!3J5ŭK2Th}sMyȢXJ%lzi\ e eR9DթxvBnYM9Kj\vɕ)e?BCr/'E# $мa7 3∊RDD: +ny;ڨ79viQ+ )ۮN[:j"B@`FLtl\YBAa,`74s!1Չҝ)\U,ZW/YvTAD8u(0 ׸t̆{lVP% ;YWGl9R7LhsjpsH!stwj$mWW|wcZ/j0V0xiRNrAS VD!q1.hɅ"6Uq phZW9jQ”p 2` T,QLՖ~Tvؚ}y{Yp2Qoȓ{&2GV{ߞn.I0fC,ŦC+nl Iba$BgS9S؆i{!-}Z_vtrRR%&1ਣ̹0"aGz ooJw8ff:1?`Sp-SE9'@6|@шżSW &?s"a-}2>;;ZpN@-)J~I+(H yh;{1hH,a*feKsXrsUn?8[1AzW_6}p/4EJae-G@a,y%eC('ȜڥՑw(?aټh_ nI,RE x0 v ߀X P8F>x+fqXxd~|ϫ#,'ǍP띕rY{ ȩ @!^(8tYȲ8S[J _!i0t_,{$'1ZZ~b꩝;! 6PNP"7x#BPgE_,Tl{SԱk;ˮ fS5W S*LiEl9 elwf2 ЍGM?.mC4g1嗓 y\~s3eQWW\fAٚ86%E`] M^Ȭ>Q,I1Ya؞l]x*T*/gjM h {3\Pw"`87,~cE%.VAJ"wI1'^#W_F%!$𪾆[d )X=_d̖tvq2[\[EƒdάhddsqgoI3ܠ)kmł&(qph>(;ZֽVVWPTdrINY(K ed䶎nH֦Ugfm+JT1Ӛ TSz$u7y)WcIlą_sIFJWɂڇq( {a5N@IJZ>@L\Ҭ#E#~{ f0-Vv26)MZ[8? |hi6*,}MifH\C?Zacl{;[> Ócɽ%(#|_/0 xp6ڽ]|^J]oT'K%[(}eeD}uE߬ꑥXĪ!) ˼ayiV Ofi;(v}f "N'h:**>Q@j9F5aN-*jJr-s[s{=&nN+-" W5@q!"xKbP*n1ui2bָg-;O[::S/区M:.x͏N%jELlU Lݲ | 4tY2?G~;1arմ&j7i8aۍ CpiG~ O?]6y.~giKU R[7 R~r,ec)UV\И`+>u܏EbH ^R U#ȓNZﱻF_1~6-p=*``xwgUqCA٘2-/MA|TecZ &lvժ&cI 袽H>O+m^Т(2. ~:I3sK}ZŠX6#ՙ&+*4k7ˏb=[,khd FPp2+r|:跷.V~/mXy' 4? YV[>UAsc)$;ɸSknPuA^֯$o^8%d!m('!ȍ kP:FΥ)?n@VGc0D4rCF1$oHь֦iOfz[PzO#hsMx5p mC R-~ L=zУ"{&3#^eC$@hE@|ofAOY,I 5(yf0c!7j ,ΎX*Ǖ$yQ2rPao% F#k`^QBH+~~ٔj9LĘK1#E/`QaS`Lumgy}uӷ糕*:\bF\wm` \{ '(*/pP59^]q"LԇƇ9EAN97KX|},Nh7_iW5;0;*anKRyvۭc Rj 8B?eʓ)c7>]MA=mZrv﯍EMsM HkO}۵F!E‘'E6oNÒB (粎UwoF!95]^'C+ir?4U`&2CXdHWVi^Dq ' 2r A4zv0foحalIWlw0Zzl$ *NI)3-8BwהslMbēi6@{^'//64/WO8w(6.faFxpz j+üܙ}mN9hPXXgD$}yleK|s?T%.7}is(oT ;j"OG|.GڦI-S_oU~Iݦ31L~U B}|dC7 d4BR<W!Y +NV-+IF$Hé@S?>n5UIY׀ cdž$q$ok` g~΍|lH 9o*ׂ#5נn{&N*1 w7PbkIQVdYդaNGlUBspFMϫ=R'&Xm&f9{)xk#|x-(J GSq\ rL^7wA, uB1T_e:-zL%yVGSۚzHae]1;:  ڬE.h  1!f^(ZqVu9eu("=[?'Ws+L<%j98&Dܲ'B&}t &Rg7:#+If?e)< ɾzi!_IjP+,̌.5 9p]0[(c k: w\dEaͩw Yߢ/݀h#r/W3рb5IMVwSdI~nC(a@U;࠽rsӃ |QaKTJ^k@P6mbmo{4<" P.S' \P{9,Z4q*Gj2.[_7Aaӿb/ :? enj]F[׽C=a>Tǧc=6EK  5Vlq c,G^L!ZbS"`ȣєTg+t*{:MkQìm$LspwHCiKbc <$|w顭#=@jEևIVx&kH0脂u2]fFaG[V ԵrAc%>wz3دoDzPq8%ׁ<Xj[`'QcL!nǒcZ:Gi6-܏.*^m/͌du,Sa]^&Y4S"IA𺲴w5l T&yݴ r4]Da1/An1wpAHPUS cO&b6x1uXEG;d9QKqU]l{)(:B NS]J ?|>g3о@t\`'MR}KwB#{'hH`qDnvLY8fLJȌg9@OR%a6~Iܞ\bR$b;!&+\Fe̛wT$r,LW0a8V T`FsUE"hs]1Pw aOdo h-CvE LVP"/+ҌFQ W&7w7T%&ٍ$D&QcWz});r)+RHc{_>5m"P,tmE <Vip[Pt5Yz"/'J 9(0ui\+s { V|lUq.e ?rj&.ˀkg$D2IEԾ[ۦVc_8*Hf#U߂MiFh  "ϿlXnuY&om(*V?hLGQtslmǶzUO:~nBeEstrRGvkԒWJ2:sdG$MTψEpۇT5܅}$c[.)cE}U,Mh.:/۱f2и@1'{5t,?l8D#z7x=.LSEXov=Q)qϔ]s!m1aQ%R\ᔜ‡ɠLBЎoX3B9~FwS-tY2R{3JAF%aQs:n:bfLlnhC ͇-<{Ԫ2_q(P{(FqoLbdKsŇ=$t*,X08 ήɞ&t7݋ PH6XIjH\کcn)neG*U\TZ<ɿ Aρz$xa2K:E#4T+ umMc١J̘T^b4"?O]lsuq_X9Xm\[:61@~Tә!](}3GdU*e{zF'cYm:l,Фh9 Xk|")2c9V֦HG?)=#k}<@3RQ;y-SNkcڮwd)jk,I&v6MOp{Z`'@7:Y.ǐ CT\Hwԡ oݧ:xي)31 9P{?`IA.N å{lG }麥(Ɲȱأ& gW vNfI%u% g.X+*ʅ2RÆd@Sڬ :?9MTS mجmFl\yTi`"81H A)hsili;']Lm";~0 2\5ӎLw~ KꐯF1vQmB{ww,1KYHpJx!b7ZB~.iO" 2"^>e`_uRc,w{!s8BEО.&"Om$yKL}l("iM &ݲa "0"W',2/a Pc㭡63bv%N66;6ǽ=rR}M/IRkIqj!@lTe2FUPU M<2 pId#T:XwD˱ѵ&$ExkbA`Lox;%?*gt`ksl21v:2dHz,~q?$gC~\.l8vX$uIʨ{N\,쩝CQ<ی44MD$)?@s{y"P6ـz:7Jc=IeXB_kyr)4V(U <ޖyvN0k^C00L1ݵcgD0/VMHwͶX-bvvJ#뼴\a+ƥC)Q x{ .afֵlƄ,4Yc/d;K7Z4d۬&/}Ĩs)[{ 6^X޸w- %WFRz_5 ~ DE8H0T E&O%\o<ADj6[}!/:}{ሓy-&|R|,K?ԐjWNk̨*pD[ϭ /cEy:7>^(Ujj`PcL/DT(Ñ x5:&5s] h %J%R)bSg9Ё~iH~cNbtGtr+h2k?mjāвG)3A^ZraV7jFUdEjjlJ6~ȉW=rb֚8{hb$@ ylܲD{o;`WL4hrь̴k r,~MGOvjai |$! MBi_h@%,YI{9[<#0EVTYwa 8nL5'bPw_ȩ:E`$aWHVkDcze]h=$!9_Ksp0f^/Pm8;.SuPl@@ xTXb7F[j, _b3:##DЇ*yI唍]$| pd+B?dlH6qQ$dc\8?OȔ| N?o}s!TOb kU#K(hcM)(݄νL&j}t*?DN:Cس=UVtP6^{f'١N-TMϛݴ,X[T 6gԲy 3TKj'BOa۴a Y暃:4L%FgEJ3yD| )yWܪ5/P;Gʝ |HRAu'zESp9f#.H!Z"<*y(e4*#: y/q ð+"QR8(i|簝t Ps(Jj̃2b~ ?Y 0N%~`Z,  j^ On;]ܜm%E-*OG ъI̴PFvN&DAQZȠ3p*Z ^&l33h zQ8P:~D]# E4 ͔ٯ:4c:P b]>gOpy/vid#Ȃ)7SvhCY6TR#ضyNۃpn\ ͦ\2BtQLj`(<4)^7 Y$!Vw, i=|Nd 마!B&UVU6R0rKei'bxf"H7;;eüʹD5C)Z=y}_ۀ-'_9{9ѤR ` c3~ȒDY Pqcxă"Z5_a ?G 8Tw)_Ana֦q8`}8Ycn{܊6 2>q?$5&,$\AwZBI={/PUɣy CM#)a>_+DQخnc:.Za#)ϠZat=>Cxp`-!4zdC< 7 {_{Y}i}*k|Sز3RbD-^p ⋌,6; ͂\Dc%R=C2_v?j.[IKcNᶆ2mi<#O;6xcd= p茳 ;~g!i~ayn[ؔDOA-Ǩ]sNJr{z=6s'ԸbxQcэE!xb-aڮ_wQG (YoTkn|1@,-sz3N5[ OMrdJLzu Nn}!<,[ζj>lSPÃŚ7}JW;=Wc%AA>QLY<Y&WA3OIVoz@ ߊajB^)zO[k,9݊qh_aމ*{')]Z`:7u!不jxzZ^ygFbV__"2c54{~nc{[dW-M4":Dyk@w1i10 i6i;U54گ(tuBsX-=dhaіjo @H%eɞgoAUQ/S9(  iլ/P]t}XG-~R .q:t0J}߻>'6j~\ěb- T_WuRd+6#ؒ]R>[F03)5[L?:GUIr9rP=&0񴎛,fN,Mvx݂ 9D5\~.k{s/ ig3R'fJP& /ՖU _o0)1?2_j.k_ae kPt$ x%^?TNWgħ.D^2GV؉䀾E]y*h:4 8տ#ۆдu|<5Q> lf)"< hEwMrKJ 1skXɤXK2Wi^a|a÷H5LQK^2 ]n sU>iz;-ڍ7j$$E~,!ϋg@vn<1]cM˥^듏m_iIݮ{P6(uwc\ WڅzW!y$ٻNO)JDeڱHh&V+kcU:K➥^(NV=G afdmWרe"9\-,Uz?6c M{NG-7Qc؋?U0aKlg!zD m'+9z}hPTowӿ< +Qj#G\( S^LـCjIDJ6H$x_0s|I}5)Y/Ab]fT "cNk=\:!њ?J tA^H=VU;5PsǺM &E6W89x0JMUm9q:T-^մw x+HHf b~i~6 q˂{]q_]."3Z(fu ..rzq}4.YB'ge`bђa2uTy%ci@WOrz#DBS0/@3#ϢmPPnok͕ܵ*:\$YNI8_IOH@SF&z&aK[=!ϰySef9M8F*ƽ3hRN 1K y ʿR'_LGnGmUicw;% Eҳp"0^vadIfWp@AJ - A?od۴,G.߉bi Yy(. |j& 5󳥫dq߽$ėJ^$zGE76kQ,‘N$/.pA#SG LCa~n4O}G$Nnkocr*RD癄|iB7)A1yIsVTvzɬD#;g=тl((..rS6`kurYo t4%bZl§$NQyl3i !EGX[Ixhi O@@-:dd$Jv{J`-f0ȤUk_ͣNvUU|'iڡ8r@2Y_h |IՖF+RJF1sG\:U_&oәχ]`أQLDTQX 2^7rb>s&hO5"vB% 2G3xۙ "'#&ɤhrO6#7+˵ lxscY:w~ :!>DL}+b=ԺׂEUt=#M!!am+(C9Rq@K9RKːSݍJa%"#99 jX|=kPe׽nLGAT9weKxC!R"/x/ ob̼2U~9dtZp:yh5?Z $i3CtESw]zy S4'oVc: NƠ(2d hw~y_VT^K ~I<zȋ&BOsi20Rf,fCt֙}[^~jDfIiJgpV-3 Vl!P[&c.xd pv%LxJ ƑFDkuIzyI-a>2mvWKjg;uvA6Q*aDŽIU(ɯسU~)db4JQqN_nnWk;<|;s UϜFk;Cge?B%Ƨ*@SfLn Gpouna Mfj;  6~aZ1ezUq 3w{L_(SW;C3 k4 P#¯b^*Utf#S{X7mTÅØ-·~|OE0/ 7*o>TCS&5ƚ/~0hml"@cg9[D㯀]N%.C0P ,|Eܱ!^#a?]N>%5VJƣKso1^A[TSc0iumD璐"R(p~OH*~?D%"jvrGɈ E.s GJQ+Rponu"1c*JG0Fk_p;*]"740J׶'=goi=m򏕤u?\; la'ҫbҐIoW<#i TpD)g[ m߬W&?s?[vO=YRcafZm{iDv{}44uD.hi/WTn{s4:pJv61@J(7P@?҄\9#7_ǝI6&ԫaNɍ;ahnĂބ- buh~ _Wgߝ'E} CUkQ δ[%R bsBWݰM0gdt@!!V&4H=;8)6m'X 1!/zt0 4/b9\u'7ͦ(L`, qEExb#I=ơΘ1p<6ش `f5p2 0>ezʑ !էw{V+k`[ ˲S 9|vEER[۩U0[~.2DօJkH49b6+ 磊PהRX%$ `}+Jt,BRF2,q ,E(Se>0c`3}>}d ZmK$ n ϶@lSއ0f:e8)隹(j$%9(0.F$w3H1މ]j30Kzd9~]S!P֎:@Zd+WJrZ<Ɠ@N`zQ KI.~Gwag4>ǃ,^9N,ꓮ*0"GNP0L|?\[/Ձ^ܒlW&w,.U)_ 57@CH.|sHc%]\nIKGd&\D[&(3a94.fn{v~6U9jTl[([ٔ)%NlT U$Cj)8-dgM}0:e/"TM YhBk%ɷSBLi4/fVE XDF6+FK=ڢ卬l1Ӧ;,WCAs/hO@"+kWJ!QSe`GgƖnMԚzP30Cp@ f$NOx;[z [Ȗ6 b`lȻlSWlaiKOCZɼ?:| I4ک.+SfIQ!6gw2k 5q "7;E:{ ]et^ŋ6E SY]헨f#⚚~q}ix3zrhbX_!>" &uyy\9oEp!y-.z7ȿhXG ^ɛ]7h T !g +VU'i^ EB|ODcyIGukOwgݲVCK5`gL2wGԇfh1I<羧֒Q*)}!ixk&jT0gz!Rt `lA̔E(rh6+g~=`FLV^0fL`K3_eh[vVԕw8Cc azҦ9.`oot[֯{@\t!ҭ|Ё&1R7fA5_"#ʲOC}EgvyiOe7MU< "7RqU]ʞ(~ǁsxo?z@kd}(?ĔMv/HEJ Fw |g(t9' <#KGICj_qX*YxB_#(0Mf J=@0yqH2LŬVG-\b Áۏѷ\9.AmVoW?+g>MB: :w@l< dsT&0IH֚P'_^M*.{u)wM6gfqأF1 (6Pej*4u7=P9LL%'R tb5Qj$ɿ@d@*I3í/4~Wvfjz/ExׇY'a֫Ug<~a8 %]m%"D̙w79xT@ WRxȪ*fIjbrqkZ$I+`Hxů>g&&p1g2=^FTt SSj;},@q3Q]z'~Et Wp[`|-fw:m*6畅׈XݫuL24|;[WAc+SNj3/ `$ơ4]3jE1@,[#;!_B=Ij3PX缏l1*͆(~%-yU[Qw22ohdG|/}HXaz sys7ȿ襖JޫP|svZ<0bـ T0]-ϫJV` w];gluajNN+W-J7!3JtD]-h\V*C/*'͖,51?b\h=qFP^֭C9iSOzQ\AR_ +!@)s{u筋:S|(Tʸ@UkY3zT ;i &)gc&&g*Z=pD+UkQ\9&`76E"&zz]bO)a L41yƧ$,Xus]4xx:1Eym 'j!| +f_m| 2f HO}g:w*j(l>4 8RgrGCn&@_8&mK׈]-PZ1r>1 )%kO[ubͩև uԺZJlPb|kv+LXNC5 ṙt?~~=s87pN禱'BB>-vd{Kcq~8Ҧ[Q*ݶ%Kr몤-S`%D ARҳ4z}<zu*-RJPa# o.`p~U(Ț)H|!W/qɢw5q^cs;otHm*}|j*Ud/=acG|gFu-Ͽt[4>@~B,5G]T@D*A&2p ެw4Eqbp=U_H n[B ~>'Mqf ;H'ƃ) *(뙴?#qoO r?j^P٘,ZϹ)DlJL֢Kz֛{WH \~ A cR2fbWfzNc4(a2t?Y>0AȂ}G\cJL[)]͞׽S m.(AvX2[M}FXi" \Da`]KUo,.92nr[@ĽgR42ctNN]޾sT>b\(> }vj3qJu*z }MLZ\-k|coYWEi9g <ߐr)iV9i1"_)d6͊Vf%O&=L M@M/g׹lfWX`_yXI92~ҁ9y;ľq2z%#Xj>ѝZuHNRYˀFـ:KMP⒜gδT7jZ4! f㪎Xpbg!dn0frL5 (ƽP'튇E$7{@5)u &qĆ=2 nA%Q:>y gΛ`-9hiBL"uprIo*u]O; j,'DCQɈؕJ<"2JB+c*{>#0Q(bɦڑPvlf =I܋7AP4"9GN/*}n-ըJpP?,~ ޲v!ȡP^v] gռ9=qV&-5 QRKw3jraN- ^`pnnj >`R-7u]q/`جڵ ݈0 #MU'+CVE 7uϴăV$`w;s9KֈYu"UוY;ڭ#3_ %"Bf;?i vQ hnǎHuԭ|{aRi쨠r7opET&־VWDYk8K%* ? FԂ>kg@R\ɽۅ r5^;op$oL鲍ҵcFE}k[_1Woj1h^$Ct ;%R(rUu2(nf̟N9!)`B)3yzl}rڰ&K= 1gyh(fUaz>|Eb.M"7-Ց7(]9&bLj `B6VDb 4Lm[U$l,2377HԜTzJdm"l rH$+|X= {'|Q' OHH oaZ6.UL¬b+.,.ԥdUzT J%BMHi(1i@ʻJ +*icRiH'2E*s<2/ V NhZW ԟj7{֜wxGPI+Hε2;%P`Xuݗh˖ ulo|y9?dž^`qF.T"Vٓ8Vģ胊_ʒ[ciPqC6fpu?uF (0*UM%ըˮ@Z65Ɠxor,EF4#FПgZ=~j3u,hCF&][Ӏ:}9E:I0Q$ŧRYjb_ޱRP(έq^A/qkN6Tbo߉ВĠ> \5IMy]R Ұ3<}c/J'v+Ndou I0LZJM~*A*J. nv4ӥ(K'fII,R1c@{M~ vr?3%*T:Phzz45uVgRᒀ7Q7c޷5.%bHv% Q:_()_ 񃅢Wj@ŦcꜩqU[);|K@@zeXp6xΜ_Xg3$:+K1{ J~AQiirJvCDF{ag_u s6T[v>6-Lj1\T`7>+YW$r̝2+r&9hM/|aV$>N>eW!^I (d6j0X;IIn/}BztE.@${DM'0B$Y޲ 4vvsSZ{.n|p Bt HF QR+KT$#Ve Sog- lpwny<+rp^lS/FKo#Sr.1;!-i\@ب[z8t#quzjg KDMFgLrn8ݠ@l%HY G}f{ Q"r*?λ'y EL$gSkи=e-yVqtpi6T^gbəYgC-Eե{Y H585KD}use8̴J0b&2E/ O.7s,ѱ FI)K{QǚE@4۾ !U$/&a8}K^ k|E6@yZ U 3tۃ{EDZK\<$>HMk/$~XM9`2*#?33NL[-Ko/װ9l11bʕNfN4K.189$a64ӊreʃҪ1r*mJvFl3ߒzzUq.hBJMG+!bgQ.l]tP!!be9/= ^ߞJe SO|Fv 7iYU$qkqzqvWX|6yQ4ӣ^h(n3\<()wJܹ֞\@`(ߺ0Wks-?W?=~!d$g.(a#>dŖMx4yz\obg3B3?a;/Q͒O XѿopNk*\\q * z0{|7^"y EK 9$0DO/6CE$_J-ĕ+Ɏٖ~ci}2O.gpG2tɦ: wR/Ճ).A!XvX(:_1C3 FN(Kh%ܥ{y1E0Xo2&tvBh9BQu 0;-.˦S^ca!I[ND,1a״? ._ȷתTPdM9%NN_9L\k lDL4"C螋 @ l3U8)gkT鶈Ԯy.3| 1Ž,mQ?lgQQk*ٛ'ȜQdIIґH2..w{}2o'GtȨXNBDJKWe(n|>m+ ˺X#Q1PuKۡ7}mܛ`7ҿ)F^jV6ӟk!6߽a< tIv.s`9CSM9%k`I KȜȑz^nКdlAb!יgV'gM*jiJm e eȈ!LQ&"|tw%x! t?lCT8JE i5W[Kk,y)i,E&H,NYuOR.2xE`! NںG3;Uٮ,njue2P/?.FJ2*95zϱ:ޤJ9 88 >ӡ+TV0^&kbuR!!  &3`yf 4[ȓ"A5Y1JZX} KRCYQܤ8-d}@>0+Agʕv}!g0ߧN\ ݩ݈@X%ܛ$ 05LeOVđn2(m;Tي!%b]򇾰L>47"T2z B4 Oӈ.[k3 켈]Vz%yͯw~tUV\~ lP`ג٨z_v|Sg7/,z[Ϊ'@m>Yap.I>.,e'SUvZh> +o}q1 n ~a W% >.2n (Z6\= w3 6_֖ilA;$ӇFz6 ]ezŰ⌜ONk_$>&L&ZmfͤNXs5ץ[8]}cjŜୡv\z]^#=Nk2n|޾D>=:$[-HqN+vR4e_:&t݈iԣth?}_F,O;k**rAŖ?vCkp5g )*>ϰ%~Ɔˎr'^pZO*C|?[~<賺*嫢o A+WSN0_j:TtgadnM&u?MC9ӄN7 b` i46tK(mj:@(_\E"@Y+*,y 8\FJ; uZ'sƋߔǘ#|bpf/ű[g$c|ӶJ6t)b?,@F`s C/w7MP|W,M Zqo:џzω7lԇA:,7`㲼j1HA3 Yn["C5phR ϋVn0_:JSfg*ztF:Y)*!(_/ȡ7sӨ_Y1p{ 1n̲HouN:ږg ۂ.d_˩kB.{ݐ|7vF{=2¼"}?79ٔL>ё$%܄A QAs2FD,qQu8K^ R|]Gn#Q֬. )5.t8bȿ Ƶ+km箅Tg/C=wT>)9<7tmLi&85<ǿ@sAvYАFGFҙc@6|PM5χ=+GJ &Bcİ$*74#D^yl| *(%e#ظn C #"]ti?8=sw4|xwk'4B{(~j\lv]U'o\oP~y>qBRY^p+ "˧%sF`glO*gX"<%tf(qP\]-SAB6ͭ6F~BZ_dĤ> BsQlQDjU3ԁ4{`$g̻M"RHyGٍ2vh{eL쮚~Tp}ڞWI{]mcmܿ?kOm o\&B1k?*CWbh7!zzMY%wKVۈ4 l% 5!z'/^#Bw9vEty[ӣ0ޤWw}GY,1t˽yټ{ tz쪧w #s8`;Y}Ú֑&N:Ăq"uX{H)M-vv^#@o&vT͗QzI ]%*IMHnTPJ󜫌UoʟS$Jo%ߢ^ ZY*+{0oE d2$O^3W3| P!'^{PPZz ǕuJ=$[\ӗ6+0QM-T&'>.HZb)OxwuJ?T_4ˈЉpBWLU" ߫KES0͵++6QsG>{ꉷJD - 5QZ }ۍXjκy͆Y&~~Re]-@/h)763'xr,83L.R=8 4*Fl)% u!ޑqvwtY;x ˷M;lI YMr iR}]XZOQ&5iܽ&(DHX\mbE{:{i;j zŅ*)Q5kX'P2A !:PuD{Wܸ.4kc܌g""tr*m+״Gqq oh?^q.ޛnY|eV֠_~+L<'xeO~n8 ' bɛZu{0{y-aY*V?DI@,$У*P S7jPjCFA8eΟy_H_oǛgJt~"#!Dpx,eui aU|QOR7J4ۛ&1,CwQԹH6C+mKռ7@tSJq95"C||l:g*@.1MNK)w%fR 6Fez3ld׌18NqJ muMXQ.0~?.LfnyaV/u~2' "^\fPUpx:v0uy3x=6a@'O1` `R@M&Ah( K.:B ҰэSJ_S<j[?4|'rCLOˬڹ- 7V}`q1 G9 -1sWZVʫ@ ;CHWÔR_t54Hkd!cc6橽+zi(ٓ+ow&]c ̐Y>^zZJA@NeɩakU3h)Q o0>J%ʏqe5 <#&zsK߉# mfF lYl+B`L}>.]>5Uql{*9,Uŷ8R9zb5Q(T"Xd\XfIKc^P@ 39&w,{H&IkpIV&xq0K}M#zxipQnyx5ߥ)ۯuV'V-هlf>< of@2Y5 }rRV9mК^dUtX;w\{'n}|H_1\ *΍a~Z:6*m#P <q_5I*g}p岦_>! VτT3QΊ)+:Mo^($Jl85䋚 )QW %GS <~6n{2Iu)w<'[z"JK 8|5YLF.[ :RF'W\"ДBeM.8;ѣ\x2'j  `[g5Dvҗ;q7H*B8 C< cmYpz˂cKdݣY4΅9i2  x*Qrs01\];t=+)uD|ep=Rf&6i-a_V,ckUNy"ͼk*4'޿j- MnoP7#xvj/,qkr 6bNʲoyQlɗ.y0bgm)oeS))9HD#QCK-|PZ64`}8eX'GOiF*"hU8ot*F5')=н^Gse.sq'Y>jǖN6GL}Cory8T~V߱-}ӣ-k`IO}ޢ9tpGeKE8|Lt‡EaFls.GtKIjˣeNZDg C1zy1w` -.m-"s#L߃znF.miVOGztq?Ыچsʆﱜuȍa@9a0[9%f5.c_ zkfd(`!!Mq̋_".Rf&{"&4{T L#ݰeONBPN2-YқϫPf61,-l &a2@y-!AD^gKis܂[YgH\/;j1/DM}Ni}Q%;ȺQD]$pᆭ _=ߗc?KȤQ MY>"JvpPB{Cg2fpVF1-R?D-&3H un+M-=e-Y2wDX(X^E%WwU'"iIù$rꂥ> V-Bqu{X/Jjq,^'3ѵ60q^W@iС/Ϗ}i]9Q`e:&g{h<1ƑӇתgٱwS0xAf`Tw3$*"k(4{~E?}K@7Ŗug_,1 `HNJQЩ*X7Ôܚ ٯ >ŝC2M4$ֺF0fYZj?l_pj;ISdHs}C*j!ఖit X&\5w.o|*~RUº:&DeAAM.#W7xP *--7"Tˡn*a ,f}*\uDY;=JA_ϑqk.k2EƤI Nz%8@XN~-4^꨺BK5*LSZt1P!T(/׼FOG ۅ{XǹDj+voV;S3g :(X )f]1cL *'n n_{hBKvN85e)p`ԲU49raG>Y{禳zZ$Z=˯˫v|*NexC6I1y&yM ϯl s=9Rn@8o񑡲3)WK mEa 1E$ss۲Ř@Qhfd6/ڢ+4<QRȤ xZ-4meuuN3h+7ٽbj UVKa3-8-؋3+vMQA*:rhP !'3MQzfbgˡ"dW㿧輣1NoE5B50#cb E^oMΜқJgA_Q7rxtXL)ަԟ` McBs_ A1.U\il[kn ̐嶪Cgp;ljW[ʍ:XwN3<gX꩖v \4qa&EdGSd(3 aBjMoA:yx49x$:;^WX\0eR!6mp_DZ2z|atq?b >e4ߥeᱮ2m'hK^R?ua.Wj5|EݕiD&:Vb"7XtEqʝG=X_UA"8 L3pk[bGCK`W4K`H́LtBN?uN|T6p[zp\7T.|ѰĻ+x^kۧ:;g)_frtP]~9VZ;;[.utv )AB]}r7y qOYcյc.+MG|*4;5P#<$-1߆ Ve/vnN'j I:4Nv#YB&w`ŜƂszjX2\5 1OۙS;f0:2Qz{qmmԇ^i)Q]S;QHG~`՚r>/݀y*he)aLQgB;iLQ0P["-*8ZYODð9=92T\IM` 1kBT_w3]>'udQɅWD^z.nK˦DaKÀ "fąHq9ҧ2K PXoArQa枠bFnn59s<^ZtnkԌf:ߍ[ ֶ8;tvlCQ#T^Y)r~6`l'/V`1Rm7Te.󨺫@p~83щlF9DKy i頒 X:K7O-[ͻr;Ti!lvg7n~ +5./RՁT\J R"|OrN?:Rqp眰: <܏^=k\u#siRHM:kaʈ*Q:% UmQʳ<̝\3JDrq 1etR\7e(ʭ\ޠqbMY K 06=?6/d]՝~r:EH;s܂͏~n3'D<ǫc6p!D׌b;Vןk+ |ĕK PkZOtP`&_, :j47ŅR}4\'~34oHi xT%{[{)û;E9#ҪlTMC?ʟ;Lz#kE*~ۦ8::r$$:,_ǢAYT˅)'`{Rn* mkkՖSp4KPϨGOWEX,oZj_j5MYnX\O}/tYQЏUC6fFbDžPlPe*c LvUqCB761[ Sly747ΌȊeP8" cǘ, p_Yό$oLkc[07;}(As),F޼Z{ &g-go? m Jݰ7yiCZGiM#)iGQU;9U/ f*vyBb +|ݽc@y`D3x,# 90z6P.Nq"Q^G̢fvw6Qip2JŅ>B rarbuG0ln&P;7usmlsV Тlm]f&f$n'ȿ59E.vj!M? .%`2l6M[tJB59SFiП:my|1F\)cB\B֩ :a$~9ܓᖆJc`R*`dg ))ns*{o{AZ ϳMVFUO- h"?07ٳC|i洡^ReB;`ҳ[pڴuBS[p)v(v˂W}! F[e0P,iKR2$\ RO>iCdz[_q&Vjb}3~wA`JƩyѤh458A#_A#^TK/ǵ[Y춷 frԌm5& If?)8?uS`Ի| LnJhs2 AxSDUkܔIxGV/113NMi3U6م؄ww"6o8Al*R X,ChBas1a&[8jFl3J#]c!ժؓ)\RD|3TcQ&.T9;5fy HS?MFQc_7ܥUP]X\m){w$S{5YeJÆT!4ڟxNS[.MF|V[)y<\t.OPo ]όQ{yLu=E΍{|qٙ% ȭFy%>6կ^D7IniuI$rm?X5-C49lL)2b,tz9i04qg#qs$2Ft6td%Pr6]4k@o3tE%G?U%_l\Uy3YC^74\CJ TS w]Et;Ԕ WYeaߚ򊧏sx\"}>>gZ\U?m 5>G3&"iCTZ (U+?v-oK0rC|-ʼk 00&)л ޭ3;ri8WS[qb42NV #2}9~7p&2Vx*s$Uq < . nxVtIt/"eAn j~H3[)F# ujNlF2e =Ǐ ]z)8g˱,{DUȊ!~ k鱷!p%J2."'ofBsu.lZ}bUF,>jpPdY9рPDS=.fL?cdO DDa+ػ*#SxSuvn> GӐi21̪x8eɰ/bH0x5(,dr@7ͯ 2oڷ4^[s}ESgUЂNQPVI5RwNDݼt>%!NiNHMe@9B]7Eΰq;c Rl%|~m'ԇ@VFx&8s a+ի=_kr@R6}:[GZ wE;hb 'ʴ9s6g. N'L%g$ .Zoqdޢgq|P9FPNB[T6|\}"+Mx g3\iw Aӈ$+>}Yq؃B`zALmsIo9ak*d1Wk ]u b[^vuÚ| 9Cq~ynu;Izͼ#*nX\3>`GZzAĩsv pц}hL}0o9P5Yg <rE/RhM#d,UG˖< xLxBUϮWN/B?_,>b?kuq_pJiÈGCENaFOV~D-]Fqs6.v O=܃v /,1/?,~1`! _ d5O$faim (噶hsN.ZFWP79yc 6}WHc=MRR\ q_ҋ CY3P*[|g4R@I3BuȪ ^.yO_v+2 V%{'ndhWW: `C`9=|8m mnƲ4ex *e.R>xKGD gP/?`ˇ Z!/ni0:t7ޙD w%>Ko <Df2t ys,, #|f+{U=IUQc3ӊJfܙo/5{\\u l+-5"chQxd׋Gx+>a S;fV%eH8BqQ_N XSx%h@.EE5&,!%:qlgW(I/jr#2O 8h1hs`-8]uM18‹!(Ғmg Q*m~NXTȳ kvbʻ@=+EƹgSFC–,Ge*}(k^d J/{ ])l@7ZC~{rH3e&O?Z ?=̚D0 iB^-!WO﬍-d:KXIq(lմ8TҜ%(1ߊaޗrfN)3ֻ*Xh$FVH5kOWa. z6P+5,GqtFg#vlI>vTkB`KËHb0)ʝEa@z ef7eAnx-9?!H5ݦWnG|&>L&6"-uǝ.L-)ʳ*r|ijVk'mm Se Gj p`3EU<5PЄH`L|Z>-}"EsAw(Zʊ %[97fi)n zEƦ|]۶*#ln<Ŭ4$^бkv?[%pNʈxjJ*]%JxO*ËKW =5zmY"7{u N < %Q+CsH{@/d_? *!K*  rb=X-N7o;c t0d`YP^JU@+W>7x#NZR _n9KAD"u<`A[tÎ˲C\Dj" O2O,W$uVrձs)Tt"J8-\U9 OyP/xZEUyC3E`5bi Y )ӭ[( x*Yv9Dm Zn ʶZ=&+HξE" ƞAiZwl% Ad]~Y iw̺Њ>Uv? ֏(Ih/8NmN& 9֟}4yC 1|&U)$8wK7҂n"ī@ڰk;5^e]ujfA'9hS :^/kR4cf8Wd,U;BZ{NQҖ{ oUz)bFIEPcrn[ӡ[x6#vQNQ8d /]gU͌ D_dh[BaɺT̲RWE_9_ zɑ&^0IʲOo{b]u%IKP~,ݖu&ѿz.]?=qoxMu}.CO62i`sPU|6-Ht)91^Le6KC~AAObAIDH@8i5I==^<殛j`2; ` % 6ދ@})\3x'KM$bͪLk8w͍u3z҂ '? wإ6ĭ*eۻ9Y\֊6t/k.-r=63,E{^Z\حΨ==](>RԟK8# wWLw1)(gθ /{ څ(F ES\!Xv~RH:Q^P^ F1 Tł H'ܸm)` :\i-s7,etMޝ,b&nJR&Pvoe1_(^MAl5UɛͽoxB/y,pB h4;Y|"2cˀ/-S|yg\vV.zc ѽ4HQ/x:FH@pai))^F&SOV>6ΏpA\O`[܅NXO3E^uoua& P័r4q"ȴhكF\pl KbUQGa|$=,hH[ũMj2L(x1ߡ5)o8r2 {?ϡJ9lNΤ> |Ad*&x]_1O-2Lk1%eoj;EcL@ ;avH`SxT 3z: .f<m@5t}N׉%XFtΣ~_WnY+1:tp4ʔOouܸr<$ls Y\T!K$KC5ܞAg9N,|}B*֎1:H󙡓~=dy{>3YGeBU59]?s=w4g뙮Mat1KcIC iʖ~n!X&MwV ]u7q*U["Q:NG}mh]4:T(cQ r ia 0N|Yɬ> J\XwE츋-YYrG+?$1J5?_l/Z6f-ac`ߜS*?:QMc 'C<{;nS)KN a(# މj{7T]{A}\D#TJ"*~Eǣ=2ꏭ.~Pi76V\WYVV2d,tշH8?)ˮ̆h;̞Z$%SGGsRT6Rhi*IZRžl|8$h9XSh4J*$wK|zrvcq J79m"ݾ+Lna,fD,+b#c4qpZEu _k |"dK_.(aju,SZQ@sv@n5YPRwc βG8Ԧa=꒢FS18y7҆ˈG_Y9lQ^b;U 1\xdE؂.NyB bkb^Br:;ݍ)s͖.PJwNMMIM`'5cp&+\pLEWsۆ[~ߺ*k<|) pQѣŞSБ*YT\q_d=/6t(PUQ|I%H\=|?b ](D$'پps:k,N ( lݦpL2n -Wxc85<Ч4Xz~ jB٫?a)R{`V A0)t{Z(e^C8-6"YĜ5b5SO[MWB[p}?Ḧx j(4:%3 <%/$ټU2j<$ '±F$i!SJ_hU;bY(;gh=c `] &AB@XZa1 \~kz2{@`jNؽh~Xnzxm%{ uVQ+68,9[RGrs>T' h'؜=c{1D321+eq:=U~Tid Qn' TpVޖSYa>kڞ 9LuV,&߲(.H@J27u?8ېi(OqZ.BTR.[_Hb7ڱ) kQnT2F #VP:Sem.9wTV([n T;?lǂCF!Cp$0>w,noWƻmD9`T:R4(SADCql^u]Ӂ _㒯565y%,|A4q+2|c"q:mGW[<<'OKָz,i>.,d?ַyF-GyzObw'{pta:cYIԣt[4;ehN޲'9vNmf SIt3mB)e mkg70vlPnuؐHBR7k 8 39Vk0Pw`4᪂]ҹ"[mHe0S!c2 Ì))/)q1E<l7Ϩ1h&jX|f`OPCO.!BOOHgyq4eD8dUȳߧWʪ />`l V)/0]Q58 _{6y>h|#+29Y.(wI:fots(hԓõG_WA8(w FbzZ.CN$a*nM;z4b}N:}'rɟ p?@/2JʘLCUIed1\rwB^Z9MV9 (Uh7"2,U 6y3#N&y Gf'x{|*Ѐzt~XI5әjT/c_Idpn91/mwY6cuVxYOb\B~*{_nw'zjv7 z VlQkC` JKKjg^+LM"@lV4IAz>_~}h)^ԹŚ5&30ZGzcA1,LG $+ȜrA;@U.IS>Xgۜ@ ~Q[@-@͐>>w{(`y}XjTrXJ--Ya pFrZ؋I%^ac 6;.#[^1>4M>H@%~ {&-/'8i?0NtV&xp\SxWUe54!ԠVe LrkTs@(0JܠLK0ȣH%[|?IUc6t1G6qLQNU5`nU9;oОGv83F;O{n&辬$gбfګx&6{lNm@3kaMN9s dTkŤDFw[={qf0M8ߴ&5a8Tk>X.927(OL.2%,jl؂ȹh`ZSQ"ƍ0I {yzfϵSH[Պ_C9ʵd)I$)unHj;a~8R{ C*I&y 0cJ^\3 $ޡnƆFVDAfgXN9jZ$1뗜N?gL#tzV78C{F*, lnfNzZX3WYș1yǧJx4.qǩWй؄$\C ޶L= [x)HϕԀ}S0goawIB noB45]a3E ՞4&$CA_ D1c;!&dv jAXan ;UPҟ33Td:MLJ9cªd۞՜G׽)k 81>(z?]LE@m'Wʲsn v˿'M-vJaq jb^<dDjtd[%ufZwGŞy A'MޣN)/ִ$v m)n͗$϶6 TQoYm=q?<>qeۑWEXI1x 7jF6Mu_&O2 BAmb1Kkeo<[͡vYu#f1 Ӝ֒xQdOzP${wthdhl,2Ɋ8l7[ "v߁&ğWx$W#&ϩ]h_?,aOs}atQo>øKJ9.c-sK.8b/h;",N`6 Q^=I/<|RY-w?[//8ަ(+tUمW&@j^:|镪*M)4]>A:\lj;Ś8V-! E !S Z^;17tƒq3(.,XfZ8NC^!RYq+qr[,ZتjWJU1 VI3A@9K.њwJY0nlX_D)vb6/JFXNSO=gx(t'(oa[/GN@iQ&b"е"2;i'|gb> c]"62==Xr0P >1>Mֲvw\j*ՍC)&hxռM;IV"ª"+n,* jUܴӾq3)eMږrs9nE3  e^S8a!ȟu-+Qfy+ɨTUUѝ]7&ۏU]M&j~-}!A̐%S>~̄x}U:ۀ;I R83PH0ư8XQ_a7L~䠍 L ~TdTτ޽<+%Qp Oo^ 窨yw8*^RVA`c;:,XqE o͖5StѮ]D~fJ`GϗXz oH]NZR-}=,Q?A(, fX8.%<[ CSIxOsK_<f}N#!+.~z\eI1m0B}@ڦXX ~Z#IC)9.T|6nMnߩ N^U7~́|`$.xUe58J2YѮfeo]P=uqA@;6>gQώh(DKQם 'O,ʭ|a>h@{]AkdjَL ?<@!&f㦘Pt(}!!chz EL.ŵm;ƞX86't^8sZF)dOdsqCnȦf ,]>j =$} VJws$, %̒ږqW1h%-\x!juyoҙzw.ΪTtǛ2(IN x{1ӛ] ]habv 0{a^؋B#2AJ駋ʝm21nM6y eǏ|SGϮFK$J6ĴZOm@a[$t?B{T-Ԣh'L65'puj 03,=hy Z<׻̾)b[ļ'SûIS8=pE'46鶬(劸 ][5&f _UjC$ -5+IEfۃRe jȓ{Ȅ a1bɛ'*9֦̇zK&#c$)u`҂Uv[nŻUmgA[%2T1b-ԨJiɓj8>`Pu$p7 L|Իm[&%*O[!ؒ;2Dž9Vc{(a$|:"\CA:gӦwmʤ0qmP"c{!/*lW`Yj]FXL Dt7'uAkW?4asDUBM?J3-DLm[ȯnx0 N4϶j%ʆ |IҹSp'qܦ·}/u FmXqա, .U5gZЖ!c MOjXV y!zgsTOuw79v*LLYǶu(or+"S+ 86xAV&VNL`uFŠ&*|SO=](3┌.(pc6#?KI]F w"§ݽ"7#^'i]CNDQZr Q2p+/I:~YNc'M`||=?2A5nlC9M \Ͳ֐mE]rjN+rN8%+8oi#LkzKj 䆢2`m |\qݑ.+;J;t`_sM#+ե=^V%EB6yD 8T3l/W`!N ]Ҽ}f˟Rx^CŸpJ@&.G>92-e~׆+M~^D)҄iZ5waU0W). i8O k*:M5QP!8dbx<6ԛ sd+hKx,*erPV\ZIkxA+,)SRZYs!rt8GzMȩrBhw9ݱA&j48" -nt@ V뮩ތڽ7]zJ-}*nh>INS3(ѦgA~<&єb13' W5`_+񵢸1b37DJ򶇨sdBEw~Ss-"G5 r7Z 2f-\dYb釅ZY1N k{_C[+e;Xxp(@-"z6=ٲMx87 T$$`PUtΗD#3<S ^ݍ!ٕO6ۈk?/Oz?)7\wġeE(  &Paȡ4yN 2ًC6p y־/lT+jkO(!A "怎N|kG$$&ods;We]?jri<# >О~x:kZB+ Vwcq?lR%d͆|2쇐)FVO*zQRoyWI;0f1%sגluO_/Q(?%'YK[/#^cO&ȤEtYc8{{qk.nQt*-UIߑTR!0Z "̠&A[mmP<[Н{NIuYɃae/t@$qR1]6:O=ert/ې8Z|Th Vڬ`Rvl9y1z FSou8ڼuΡnώ;9uDLٯeCTJG}@ȆeQ b҄ >U& 4s==oBwɌJ7FFDۘEt}aЩ6Z5rFaVJjKr&PijT7N [| ߠ#F!]⥸e R&MpOk e z|瘮ŜӘYqT q5zgV`$&U$s#.*Vb+%_.7#5b0S"EUS:*[䡵'Coja#8gVlTb{ uԾwcy>!ȹAɒ1R )aS,&l..%Uv ]BREp|[$;Up-}(:^e[B\s+mx1?I!W=WoH?mˋLDk/t=1]I@ןk<߈L>.w2<|6 :akp0 N0b VU%x=lG̷6 *fZXQWЈ0&>Yo]gllNa>t7*277/ʵBBF9,)0)h3B&߅jw,WpwN5,w)Rҳ'8Vy-7&AMiQpr zS?L~mYLQ}ު$ae4)!b:_4Jۋ?^ɐGELgn@NnL Kz)q\hp&mo㲡AdoLZN" 0gJ:wˮXt *ǐ-b7DI+E8Rxv{„tganY3W13sEv:O:y2SA/VZp\;O!1`CZ" 3cstKIT:QNNsɵB4SLq@#)Ӟ,q^uc#ћ7nόƖhx$Z;h5'1.21lU4K\-` -O>H?zIkqm$ǿ> P {R@Y8Qghm)*N<x+ÿ\ds( F]+~tJ?<N%\zۢTgTr]} #P;uZ|3&6?TMw"`5XmFZCr㯌 _PA.<fH螦'm0*:0O)'bh -08tll؊VlVlvXr5?. RuPRi>m55yM?)V8cEվ嵱wvFW^2;Yu<˶27bpI)m83Uu/9ʐ<ਠ940"׀~z!͚ջ(#PpNfcSO QJejMWS6 b\m0CVE %"Ӕn'K=\齈BPN.ݍ|Yv#'M <#ΥsG~vqw' .ڪ;à;ܐa&K{&3c <Y\Z*-)>E*uQLF?!Pya!!_KhV?GIizui:T^ӛWn+3~ZW3[N2~{%Z]ߵl,dCV‹D4J3gS Ulp]崒Ǝ 1v+%;F304 ;(Nk8nW#/6J)3@zST׈lg e Vp%Q? "B?oyzKĉ[{+E ]SYFA~U2TxYC#g* V2wd-~7Sfx5%|i{Mb(myP7[K_ RY RLmWn6_ml_4pE%3I:?A[U)ش`x "Ts<S5zkŜ `YXJ1(Y:'] !_"۵z/b?:gz]yt;/@N7j7"yGnS&}V뵌Rt'|n N;/U^Fќo fˏMrI-:u,.͵7!!42i})L =%f´ Iٳ[oa-ہ:kb΢O>jf%mmJ>"Oį]ךZޭ5!hmA[{-,8UM+<~!Ag@rl2:WLVYhmu9Jy]ȋ<, E\Aɂk/5>HP54ͷ~ArPvIG>=k~5,2?dyi74YjHbt~ >)vw| 4Q VZJB\%go<}xUvG&XIs1YyDZlp7G ,rU~A} yˏDR|QRa_1w7Yh_! !Ɲ9O,zO)vroF%Fed4 5SFMS˾)]HPw *Qs^ĸDWwd-4嶇P8/}6HXЌ3ff,w8amɣǣ S^Ah ,,kP RGr$_]0abKԳQiEq\ Suud9?pGMkVtF?:q f'`=_B>DR`b=Jad=񜯡 6,J>\,H;`ozxg^ik1[;8Г( ΁:1Ck}kR,;|[SIM<mnʾr+3mFy~ą#a" efO*Rln6FGu"^B[8e읺ҵ}_5b135J{ݒ|8傗 ֏AJ<֫K rE HVT\&61u&igrc 9ƨGS4>땨n mHNl7sÑܼb㽵v;pAM7|P ԲDP<ҎS^KLu<ŪgҌ#s)VX4٘9eoh-Mýz*kOđ` 0,LI%UqYJ%d2P6SADVĖi~$I+Dۢ =DWZ.ǪrQyܨklG,"<9J$+gMeV1Kcw `wc3qtr'Vp/%f13Guyc!]U|Rn\ь\i|UudT,݂[#NOZ82FGFyD/8@#4p7ivPTR4fө8U]9|qUhC0k#?#"bQw9:5hd/F|a1`T6*3R~ߕܦyH I=2Wk69:ȑP;KjC>5=Jlq~kЫtwC`k E\dWzqp~[)?%1ܡ8AWVPxBDPNZdK*8˼E\XN 0gu gV-Mgk{0ݭ<"pMXׅ(Eg!!Xp,AdL~ho~HdFnis3W>囵auㅮ)'giVQ`M;T{p,ErΖ2Mٽry4Tj%h&Y>)Uu0@:Y@Tc\~t(8JKteJ͗.%qOUW&5pb,UUsֺ$#VWZ)?*^"  \2EJ#/ "+㦵W}4@XemSvKbWBFy\씭UE>lH&M~! Q)*mn٧XUQB%Ÿ0udPI#):o`(VeGUuܰ7d/MH{SESh=$oDm{: @k]ACpye NZ02v3tv]ɐ Qrk"ۜAw - ν˞`~k֔Ȏ`Z4i)õ_*{ +LZfFOHGwм#xp=fZK-缜#L~F8U{¦ q}:§)24N,@AM G{["awܜ}Ȑ[r+,:VWDf 3p "Ha~JtqM_KAZ0Q;=冶`t*]V+hX K_"T_>\o2PKE$0pl& 1H3 ΍QO|?m30I<1xyHks iG!F;+! KeY l<@c%W/m0ΖxY~yeӸ`jiDx.Ql\@3o0=xv/{9nk } f?wmKJ~OxJ[n"sDZhncZעJfE9oy]Hֹ FVx9XAcگ= L|px/\]9X/$t':bB7Ŀ^!!~(RX/qjŹ4:6SD] > !;8j8\74ʙq! t,q+X"Zj*¤wFh )U7#+|?Cv-tvN!|QiWBEͥM{w,aΪD\V\r`Θ [q["=H"m9WUݩ R+>7ˬW^,O fv6~lꯁ,r.)U[EFЗ.v*se,:(Vn$sG_>h޺wf'"岚1[Vq溜/{mϷxXYFҁ1P{V7d*@gI(rB:@%󷫙#ր) 7^"r:nkRߴNhY8_ }6|8PJrҘH>T;Gn$CU-'{hYJ@?G s.aIz(\"s {x\5MH6.:UBrg?D#Yu(V]fHQHٿP-Y*hyoWS뾒Oc#|^JmF Z 5J/+?7DNȢaY-&zsF ųv 8*۰~q`O 6I(LP8fe 9 3$tkdΌ?_Ju'bF7+ZbWrF%D2\n{x[w)rO{E=M5m OlT9gژHVC9.B o9bUmsx6ϕz#u<ދ!*"~WT_ ; /ucC+K)aѸ<eX2?KHlRTop7Pe!Lv| tvژPc.>eCb &'nRV7ڙ$4ϐF:?T {]/UЦY^3OD.Dj!}ܷg(>mE8{T<5X[+A)VCZL5檩"a5Ue虴k |I.ijCd{wIґ%H3tKg(\!> (YOjdeDF5V-'#0 1c=y$d >kzszF(4_8N*N u]g#̰̊xl vu@ۖ|>O>[ϝ:ӕ7`3Ԭ\ݓM.)Cvsm/J V_>Kjs޶uP/i?%|!vEq\ bk-P嘬R]Q{hA LlDT#5 S_4k*{An粫wR9}IAS +̜HU2#JWպb#GֈgEjrtlv˂e=rBj$)tviT1}<17Æ_K^ĦڤDTwf5UsWѯ2zezIL*$yEKxmLqN&r_tWja_ 1^%`u p}h/h:<'ƮT?Ƚؗ"'{a9HP3y.L7rWBlIMVWCN9|^u_pV5}V7s2>FD%\]SvdgT57sl{f[1[* 'ֽC^^.Q=,m*  4ƞ(W=C,Fw Q:, ĥr,VzUvpO_/ LQPe/4d_>׫? vODSJ5BָfN3&cj5;{ª~t6 !U`s'FJ([v2UΊ׈M~ -2z4E1Ulã4p $)?ZJ)ύkֈjfnd6j-6gJ9- ʹ`':DI51ijr 2)B#9tϟOO)4.4dHd6FGG53~}.L"S-7tVcP~GZs\y/Bcg0ȓ-ʩh/♤oC n?4~?..\yR?LiiD8#Z9 By@3/a!ԲDJ'Nr&`xY*n$]q-yxꁏ)x7N¢JR_J}a) ܟJFM\{p?PSMԥ*CS'BBtuuК yZv Qw)ԑA]CB"iEwX6PĀxv!02*Gu;:`Op;9UZtqOZU~6;8!7t?sWe fa-O>'93mjw(\ާ lSv#`{D(=fЪLQ(X('Drhwx Ibw1֪zKe=8 JMcg~5XEMLe.``}7T+@is ̱]>C104 .3@gD?*0{^g785})n%GNė{VfQYmH{UIoQe~2Q:o d`Z$/b}_9X& ʼn'EinQ0/#Gd ѝb-ͼB3Ju8ljݐ^tm5gegDu+yPZb' XwAʴU^9>_qR D=g)؛JGڨ\XKqlsKއR$uRRu(x :PBs1&6|' $OپA& И'aPpw6KD{|N\ #LrD7!1L緧klsE XO_9EťD "q6#{ |dv f%4^@^< )R+@qW'S U58|MPƆe=YWE h+^B1V-i\h0R/{ Dɦ:\g<#H4K-#i00hUv<N ^LyP3)>ux@'۲Um}"" P,#x0h;!0`FUOt.zC|~5W r;YZ8B42ec  Y1zEGIG+3ܰ2$2gj; 5Wxn=x$6.+"=>)7s*)kyC yk7 s&tH!^^b)_o HP샅:e^#ZssyPj9$,PY+mSDiWyVM :%yeK'Q- ٲ D 0 کxyyu^bW6V[./%+lC1 9MS4!q?ndbWCS*] H6K3G ;[ܫ{Ef9^q퐵*%]ʧc|#R?H?aE@vʂ5mLluE67s*+1/^:ˡ5BQI%UEE Ϸ?(̭Od}#WG #LD:-`CEbR}X^1c=~}?IrK @2/Zb'<:DK3@-舆~?v47% x#ԗyq 8_+4iV|کg !LU#82'#ӄyC֎T|pv"aHbwZn$y,XC}!5a9(ˡ=l|2yٌB`4ID7<+ mb"KzW-۵a8!M]lHE(\l~өx n}N%X/($}i/<-@Ŵ1noz_#cHOq .@-|#1CWg)ӆQ ? y~1 t΋Q H/&m6i(rYAd/+ /QL6 GōnyChj/k88]so(FC Ň;lݑ_BBZ]kgy)8?sd]rlBˋ/% }aU0;fBԼ@.4Qmhn(*9,c2__` ~@qxpmdAky62j:r@񸇱ӈvزրb4!ttz9ݝ8s7|m0H4v8t}1D" lS=tokܻSNOMf_J:жvnŶI7]\`Lr q%%99_c9'0:]7yPה! ܦ#\=iWN'h os0+Lj'pWBܔnaO j0m9b|fټ0LS3x<+ YY[ZGSx3t>1bĮeIn4D xriXz-"L7]|m%vhAHOVx"5׭r\A[VQUKïZ` oj& yH_I C] jRK2Y Roҙ x5mzhTj;yT8SjFOㅒEFR\0,|>'ZZʴ@Dch$T)4BjySܭf$7g8sa^ h~ &[,%c=6;;8*}兦*l0X bfq[|""@}npc(oI ߲A@jv^_uzJ3:_^C>P"Ot` T#V/-ǡMAkG1uN\A+լe8n`LƊY~ =$G $B{/1p, SDa;8ۻo<ܽ~"BUcqNP* 4(_S0HZ嘢.q^x萵Bۙ-r=n`!tՓm!V;RT:NU0v17YA'#gk o"CDcu8:䁠ޔD‡ 95; f{Ƹ>dEdi7ۆIk迿Zu CwҊ8c P|zX"DX3R]f`ctgS)yXP&/:j1D͚3lƱ.ÔѺD(f;")| Ht Rtgc0-.zB:}C6fZv]9~(#ÂJXP"IH*NDpos;xn{=4Y&Om2#b Z5@A P _c;;یȽM/\d=LDGa?X0\MWsmy!3gK:_ep~0ܮ4djw}n;5.(AwR8͖쏛zSHMd}uy㻽(usRQ0 ůMpЫԣHWz*dx_4,;W-13T H+aѲΫ`ixm$< Wx"ڨzYdwI*s} MTUDbv}^fp[`v8MECS  UC= p} ;Yӌf ?V`5VVU`Lsnŝo#ÅFWya@|L.]ܼ3AP霰{& utbu!MR}*xWw:SʘECA09?bM@CH#ܣBbhNFŹ^.{r,<]j]w,HǿC$XDa'QQ/g^!V4htP\%}R & Qp76[MZi?B fw bz+'DWlegֶ9]ڌ/2=ʶXc[K^0mۅ2J*0nU.x$vdnVͻ:Lo=NhEL&+3]q<ޅ;fJ2>2h:&bbс/:uĠg#!eK,Zi/d]9O4W= _{-*Du6b YJcm zZ~ŀ>Z^??V'`:ךþNbo5o3.ɠmLUO0f2G*|aWt8.4?-1 H|e:%x2@"RNEALk 36U xKXWJ7hu8^~VhuqR=U;Z(Cɺc1Z1qxR]fD_L!Ο 1#Dؾ2D(/˧1G(5Wm~;m:[[!֊ʜ"`.7JX{9 5։f J N&Dt$=q/_*JٸK$A_~JSunH#ʹsbc~Ċ(BtS+iTHV3*Eqw54fM(eR!3D8b؉@c#.7hՊ 2v y)hgv+gz!%Ad .l8,pD3)IF&z{xTԧQya-_c7_ }#zޭXM}&QP nh0Jt!zg,؀vǨ,6c9H)Xi>#\e$P+%-:ؘs)NZR c*8vuf,KZ&r 9$p#_(˅ѕߊ Esεm`/z7/[kLS='{gׄ12/ddi?/sЌ`WU~�z1rQBf;q>F@IHX2YTӀdv.dٝɇ(oڤjSHQ1 ul dTcvjސsdD1r=XTƠCVQcR٦dBqR'cw\[; v'hEPh<:LXS|K|>rjdl xRkhE>\̿e3ԙ [" -{q dK^Yw?ekT+x;-:e]7gI7-hɅLbt6Y[,MMު%1 AWMIR*}4 ͡`} (ǐ ?*'Etpy ) [ {'֯ P<\:uҳi&Uk/[ ď٪mDŽl_jjTQNLݯm1#$ҾÉ!NDK TX7ILheͫ1Oa3q[}e+, 1NbI?lr4\yA F( rf"_88T87HoTl\G yߠ$?ϝnrNf<1Qm-ea L>:ZDM3gM @} &ǿYqi{TCUÍvӴUw'7qWъ$~DbEWb,J\Q$/Eh0'%UA}D7 p8oQ"-ߔc In>cۥu ?;5N_D.jP>L7xhY'%<>eNSÌC]Yh^n^,_XsT;DPSF6g#yR)j)J`\ 먶&Tśޫᄤ@Vi˲55C&M$Bo* g-b6ܳUxCx\w߅iöqؔ+}oFIhؑoN՜~fxIr$zIE6Ml \?+]ZM5Шt]]\`c7ཚF B4K@MUnen=B09 AJBq{pGG)fLP/ ܦ7 9F@bY }₀xn5l;_{|< eIR oJ-%z~[JD`bbH1mW~$TtAX\84Y @f'@ %˦ ӎ*a@mQWLa`;*ޛ;TYRg~Th3Dw`2Dp-'Cr+ \F@0 ڀ@ʘ%& TCڅ2pTCECkMS7K=P[4;=,'c1ǯ0$!!ݠd]NL?UR(|-FU愄IqJ eLȺ_-Ddw),v)5)I)*|hFw#Y qpy:[ÈQ`qa,0==ʻ> /vD&/#Fڰq'sL ,Kg>0rFP?ꚗyiNSin̾)F&5V}Dsr&􊕟,\b|1bl"CL`} r 1Nh0 Ⲟ.=(*f-YMS\[zEp v *+5瀠kXr)6Vo͋ aU{:urVK5)?|)yZcuOƭ>{"!߽AWT/%R_NgΥګE^᭜wa>Bc̝lenh+'PJ9r)޽NaϭGsy"սc *2$rLtbobOg ;IȍigzW&A4\S+ d嶋 ^=Bc}qUWp(H?L[ Hdc>N.xUtC=3ۉo? &l} k7VsfisHnÇDT/xZu'`}Ȫ{b{0(M] MP ,kK-]to)0n:3WjX/j/ᅲ;K+oVJ\1abcP z||8ђ}@5R#I~nbEbgW)[ Xy9!`OC+^ꑸ_ڀG5}4.q/5!uN7֝ԡަ2Mÿ{ǟ9!kxտd`"]@;zwF}9aqLtO%y` ɷ/d0o' A4TA3 oڿfFrD楻.+Lcu7Ȓ-@@6 )h4pRA Ta4 =A,3|e5wgbmpcW(~|ejdƽV,') *121U-oj 0uJ։ωx1pETym ,X-۫%3]e~YkX6Q"8p8}WFyz)b]Qj~z;|ztj+;9zlZ)fhZqw}vFyAO\s(*󓆺$w=9~8GZ q.aklH8”@ˋftOlJ޾SToy/fTRa֪F`q'Lo7T\XtVQSZg-{fY*Աl[v4U N Lg zq>zH-г/ (;~-(X_,ISsebC&uiGϦV1jq9a&~mWqm7䜁Mv/yY(j6x%1FalY,::yRS$ɧxbrm&_Y-!p|N.:] _$ͨD$MgTǷN#+~l4>A3&8 X߇>e9SsTC`CU%?3t|U\ZefJUd\ ՚TѓFQ~FDRʯv&3 =ef~`l)=w;4evqX*; E7mN$[|_M /PvZQ^ Pb~'^&1eTԅK4a{Gj7j%*5@6`]FbT֦+*;D^^DjƦ쁳sR:~z92-|@Tɭ|?_!ⅶX'ILJ1iL/V (E|Ob~⢕\/Š)Bkny$ ;;9)\.f"z*Цa{_/uTTV;cC"XGj6=֕PIUwcV1BȗpylNǧɮލkZn7 upwx_ܳEOz2RHC- V,z2v7{cyt݁6ٙA~nW:Snr X4 mRV=Z)fS'{+m - !<亪?W+S^OȐFv%vݫvD`]N]?`IMpڕza,ߊZnm'-[c(ewiSkh8SlǕY CVeM/67FP='Pc-Xjߓ>c^ϰ*x;ځ}(Us/80"W%9䞌0c/-28'ؚ ,H :!K3;0W`:+RQz-6үyIFYn:+.4Qi8i)TA&zFq8w&c0eU,ZŽkwϷ7?_|:ջaֹ? O6Ί_}]@ !:ZF$$ ryĚR4>Y}YGl/N*ZIdlNbüQWsNVJ\ڥDUs2]s~|[c xV skJ}'7Se|:M;D3Ԥʇ|{,p\=&&Zl&VwYgqP?|GRFG*+J_泦 aa< 8pW68m=rt0'ŊЅʸX6#4Z+]CQ?!ƉD %% J^W5O`·)Z?+Rfl2]-ήb)-u:>~.kFDokn!4 !v.wpUlԕ5kjBAV I”&_):;Чm~ak#\Xmn !l] #G}̜WUqV##g.'س`Ez#,?Wέ%zz5Ej si fkH<'2)u7*/Mխrq^rxQ"Q/O_tL ko!`E>ߚ׵ &K74ue-%ҺF ,}QNhӎ%sяDHe H#UR!PuKYWhk ݇#F6Q#zw[،tblAgX ۝VkF p2~ڲ/U*1-SGER?uu:]Ϡ )H&U 2on Z]!7Wuv5!2|r86I Jd.I&cq 0Znz6M"W CKO4PiQMi%Ƣ:Q@YZsurveillance/data/measles.weser.RData0000644000176200001440000000311210716603245017352 0ustar liggesusers]oU?s7nmA P,TAJ.6,xlS/AbG$O>H1>vηәvml~ɏ73g9辮^saB!2a !.sa_ B͸8X*(p{MA VZmvR2 ֻx'.{+| p%!U5G>|vMt0 |p iV`'m.snv]{{=/_< < | :0<}q@H8MxBD9s sIku[jFqNFm~8sW#>bⳠ1}eQccjDLQώTQPpQe}?Ê`~o8/=y`V&mEAz9nT[ C"<2q}VQוUMj>K'j[5~u0 YNU9JԾ9>Գ~ztĽQ&hzp5L*ENF(4SNZli]3Zh3z;Zjr줵XuRek?0 `0 `0 `0 `0 `0 `TՐO9ǟ#BTJŊgTf$i_JJ$*.c9]W&ܯJӸ};ƕqŵfOzN=?s[AZVVنϵ&K#Vv]geeMԯWYYYWk5~51G Gʚ@?]\deeM _>}YVVz\zXYY腷txs++kʚ@?cXYYoʚ@nϗt_deeu`A*!PYJ0T/ ya C\*Bv)ȤȢȦ(CCKGyaI&yaI&yaEyXaEyXaEyaM6yaM6yaG<(FRʍI6m$HJf#)FR鐇Cy8ᐇCy8ᒇK.yᒇK.yᒇKyxᑇGyxᑇGyx%,yd#KYȒG<%Hu尣*hT9\ht ,;w\Z=o5?؆oV67u+~ڡ8%'cS|^h=?yj87:?]:7: 犪%FнbO!?< ݈surveillance/data/husO104Hosp.RData0000644000176200001440000000125312625315364016601 0ustar liggesusersn1LJMj= ^W$* 円UWӆR_`O[O=N!e(kZ-Ƞ>YClf 4aIwo4nOB+Tǰr1[n LBMAxX)11&?@b-u6$ċ܁X~"g WpPhrP07%1<珉vVڻi.46WAGGE_q'ڣ$U H ^ӖRp~M$.a#8X`u٬*I[xN0V0u龢~S3|HO_I}: -=Yz+r%%I ˒Ǚ h7N<9ס~7$λ.g2Os.v_˓qz-h癮4h^Qv~'m+$}qsT vx}LuK /QχtgtWC=b?p LCp(8G(cp,f8p"N843p&8<"\Kp).W*\kp-7&܌[p+nw.܍{p/x!_+|o-?'_+~/0/_!_!_!{nB /俐B /俐B /俐B /俐BK 俐B /ċϦwa㼳4F~c}ԱF.0Z?j7qLvz>1ޞVE[W޶t~dKLvE/Nx?zN_g_ߟ,+ݏ* ug-Ao}wOmۢ-)Q+Wߑ<\y뾰Q?U=o]r7<ϡJu巽=Fyζ?J9>>@~ }FGg1zJW-կWt%Wvbئ*?ӚKe5 KX%,a ,,2ߔ]Uy] |"Wa Vbz01a cѽ+x ma;4Ң3R)6]PXRLO#-ug{:tk|6fǜMMټI[g"ggbl?y0E"surveillance/data/MMRcoverageDE.RData0000644000176200001440000000151112004012605017140 0ustar liggesuserseTkHQ>l4Qq2b4lGt:[3rg^D~"(GDDdЃGE"":w489N]Ubq8Ji̥e^ȃ4ukUi.9U ,` I!s^DR9#GBdZ^TnD7ra1`3^>U.,h&PƓD[vӃ:։ct}1ᡒQ?h1ݖW4[h[6ޤQڒFs!i3]cb+:d- 516@ih`Ch׻K.@Nn40w1hY v@Ήk~D۫dt4qssqgc?x?:o-z~3ԡ{IBg^4~[֥o_f[яNݼ)'{1Sn9Vq܆fd?t<DD}f֫*`&v@_6ЃVtW\~^녋%K#{ 1 KZsc.LvTL^߯#{Mm5u>jԴ/ݟҳ \q4l=}}`\>9??P68{;WA.k PPiǔ%gwfOӅ surveillance/data/salmonella.agona.RData0000644000176200001440000000065510636320360020014 0ustar liggesusersVKN0Np +V@C6"W'p&<(wOS%"FXu2ܷˡnݮitZ^ vs=H~9ڜb՟P|h Kgu.VsmF2/_t\ 8jz5/4$'j+ȇ5ε>ن@x%saaT+S>xi%Fb\k$/cbc|sZw`.I!r.aMW&fI.jJ~G3Ŝ.o C>F+sL2$UμfhlVa ǟY<4GFаk߼ߧ/m=2X߆w+ surveillance/data/stsNewport.RData0000644000176200001440000001213412645666575017013 0ustar liggesusersBZh91AY&SYoEJ~= B@p 1@q{0ْI"!2`#F<4FԏI=C7 2zGɦS=SzdM=MSFFM1i| _Wg`yow~?m޻_67?+mzK}8\>'6|3mn7;ߣz~}'w[G{7^+?cq8n?#<_33鯰5==^DDitd(PH*@$) 1J &U!$X")j02Uc !bYsT1f0c;l%d HTЩJ0*UQ ȓ Ild*5HT!KeIeId-BUIP2UKPUEQ9B&q4lZ(LCAEDS@c(sHb & +8dhTgHCc"HBj* Z Y) I3Le!% U`RJbH!f.Y-bblk{)bْ^YT08BbґUar1l0XV5̃&B%1< ߶7V1$d -iʘhB@fK7iJQK$f!yoJV+I@K? Hї f5[emક$䭻7 c,d8eo,S !klպ2G9\ed*Mƙ-$cvbyf%4 qMfi3WnI ٍei3x[Zgbu!FJeQ2o9·&gawCen1dM~bCbHc-, 0g[&zlbq޸e]10cO/IТ*ov564hȄ&0Ā7s.wiKm0yb^lXoοn+鷓2imfm=.3ɽq6.klgJkՇ9iٜ@D Ĺ[%6O<뮸8n8t//+tZ/)JR)VRRY|M|nJRxV_.})W7]Zuw]?h޺y5` ht@<4> Pvx0ky v{ma88#(o,Xbŋ  118 88ܐ <$^H0eyxvUO@\'5:$I$UUUUY$I" $I#RI$i$IUUUeI$Y$I"'{ϟ>||;wwwwwwww}zׯ^.!4 4!4!i"C^"ׯC\+BֽTZ׵^ֽUUT*I$H$I$I#/$ĒI$$M{ZֵꪪHԒI$y$Hֶ١{UK$aCܑN$ [u@surveillance/data/measlesDE.RData0000644000176200001440000000442511532744762016456 0ustar liggesusers[lWg/_M[%N-m@ۄؐ tx7 )-ƝBUUܞZ U'xUU_*PA(cή㍽}736gΜ93Z?rC~qNr0$SXL''^yv'L C)CԢ^u6q ` `\+Up ׂ:V6vN.n`xx?>n7G-V{>0nv| LOO;.p$Yp| >.0

snaqmEǙV1o$i5o0uu VauX}ԹMW-;24+|5o[%7X^aaX\#90=F@Qi%fal5Λ&,K}sۦ.GTY/5GjZEosYvݪz9jwY6JݬU>50eߣVJՍ[IwGr>KK튫g9`r="7m~Xg29f 5,Ηr@OdS_i9ZN{Z_I3.m_4GT^r:vhX1G=aQ /;;Ny"/V%l2YSėzxwK A^@Ky"/]d!sz|!c ;\ojݜ66lk53NCЯͪ[jY)mZgq.jÞkKmw:L/J1  *0\׆w C#cSfps\|"b _W=b* _ _7#x,|?<~ Og'x_w [ӡa8ׂww C#cF)pS:fffffff^VLmQU )-5{M&HM+z4 {5 z T{{P5l͏|'0[g+͎ln^ao4;vZ!92VNU'Fu4; uivK$"zrw1QMNYC" f$[IXkbN٭ٌZi|V7LȶVlQbS4BFR+l,K\R*&ezFX7R:?WH+RW5PGY ivpF^\3籞iym$YԕavYvʿ\9u|RjIJM:XU|ec|l>j>0vM'c`췺Y/MG !Y]ȇp* 7Ҋԩ=|i'N4qZ Y;͊tlv2<ͤmتޜE.&^lU9;zrwiKǒ[3HmF7 86a+%O~ ٌon3t_Y[^k4ی X7o>-֑֮,f>뛭{Lު׬,B?Cuhkj#6[V$ 1Ah6r$uno6_ݛ${Celof]Jy[_9{)y/>iur|l4;uv ea`oa/%O}xjj_[6[6{)O)s'.wSIHmmFٷa^y_Pd.V3fS)A}43W2)Ssjw#{d=G#{d=9vƣb=R eUVYeUVY]jd,(/)NV**UdfI&+󕐫<`Z%iU_HV2,Gˊ%礢;:HHyERP/=*,%] RT5n?[*VIsePWJ3gwi|t |b۟ UU*-u<86KgZX:Ui|*#U 罍lH%Rsi7IJS~_VOҳGJ9Iqψ҈}{ IeQ]9*ל4LZ7 ]k#"#K+C{Jʆd|S7/.NuFIO~l{~Y0~31EzQڪɞղtp9\Ro:_/]{I*HeodtIHkVQ݂ TGHʳ7MzM2tD:S:ZGS[..|_qXVaKP]崓9rN4X.]<(*1Ie^jd˧"Jϰ O^od{mOy!|޻vYGIJJ׫|$ʥ9YZϩ~õ%{XFNo+k7" 9WOGVDa#J/ 8ᡒ?C_+_a%yo׭ tOIJؓ|f˯AW`۫m;:}tߪו?ʟ_E:W;dz,3Nշ4OC9xE9&~OOn_dūsɇ.Y 6IoKjodWNS6}S?ӧ%-{ N)~<|TOTf6ԞO ̞gy^k}`!`?>1jBb%fv!eS>[~F}rޓ cڀ_Z//<7J2|og2e oS\Em'[?'qJէ=^b4~Dwth>m'Gzc0UKNcSԧS(SZ?ƞ̷};3>ߩC7|`>O;<*ڕSd]y9[Â|f|; Q웊$4O]4NyݰʾՃ;]Sm ھ"ײf\/&+[*Ἴ̜} Jvܧ'SKRE%VnGS;c*:*Z~sp."$Џ "-0R[1mEc؎ a;R\qU؅ \kp-؍p#n͸6܎;p'ݸ>܏ x1<=xO)_k|o?g_?^vGe"k-nb&7)_YWZؤ|+ߠw+_k,+*KxUfK_oZI;vD8^84Ev^˷xWԼ4z^z^U^X:kxWzzͷ8ϫy]ObO8(O+Q;բNu^EOo5U~l)_*/K׹ǯݢGֺwQשK85\>QWvC hyu}qh>jOW_7F=t|.+(ݿ~_뫨yx+L ZlyEEfPd4-|6̶r\IW(&3;a^sb|}7F,'mA^(}*(F1Q~諸돻 _~>qdr_al.K$S-v}h254ܟ Ӫ:k,=6>ȥңĀ?DV# $ 3pnߟܲ`nsurveillance/data/ha.sts.RData0000644000176200001440000001761211746064472016016 0ustar liggesusers\Sf+(bDY`N݈n%QZ(V:jm݊V8q m= }rϽ·ߗp8z>4 Z`y(u_'H00j o7}1FxHFy2E^mDZ@bC)T@|TF UjHu$HMF u' Wi4Fw ""DH)x"HsiF m/⍴G: NHg tE!ݑ/F }~Hd"G$Q H= @$ G"%|(DD#d02 C##Qhd 2G& Idd LE!ӑL$F sy|d,DEb$Y,E!ˑ${dYF kuzd#لlF [mvdم$#=H هG /ȯAr9E~C!ǑIwr 9E! Er!WkuDA2[mrG2CB Ogs \p\.?Es\n^E .?Es\.?EsՐ7:)@"]3\YԛYZRk [,<_B5H'`>S-i)}yBimyoQmyPqԅtb}Wcϵ|J1>g bKg K֯tc},ƷKwvS s ]߷ץ$s{Xok9~2%=VQgϩ*k}yzUNƜƹ͙*y5cWRQ׫ss|RTq,Bo=?|oK=RjAxW57𮱤83UE<_~̹-mJh:mJ1פkޯ(_ }x[OuxL? $ͼgg{լ#jX$B{U ^<@{^˃!j5{^G#U: ?`*ô+kjU =V`-J|D"H$ҿNx%H$D!D"H_("H$!D"H_("H$!D"H_("H$!D"H_("H$!D"H_("H$PޏrthqiUXo~ޏw             /CtHO@OAAAAAAAAAAAAA|G3P)UJa:O}7%Qt:y:ZlCTry6Ȓ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%K,Ydɒ%߷џ[Gr m8 lb^GMUGfsU|uB*ztŐk 7XGM#UP(*$"HGyyUAQs\Tra(,'x4Sq-5-rݬM"Ôꎪa[f׶E _*R:׼'+k[z_X?ӣPNTѡr ^.s5j9[b֟ĝW.q=㽛ⵂVI3Q?7 '=n@|^0{ ^@M?r󤪵3@Rmœ] O}+%ݿ4;?b$^vėv)]x[ ┞j@h^NN>Xm|7nSg=ʫT z=hZhM0u3țUι8Vwe  K h̶?vؕ;6$<S/;+L/ݷϣnnW`5l瀉J#gT`ⴿôX5VTcAU*W1CLC"?gs ,䣆TP7}!pXJhbMMIxJvB Z $oLH#= *dlv}A}݋An7$í:g= .mmnɪ滚ɮNv ^۞ Iq=Xf~E JqO. vIĖۘ[r|7bn܌]yW^qN%q|C{% iz|JM|w q>QI 7v4h8wCH:L^rf5G2#Ijǁ;׺;@(/~74'=,nU,8ڴ.1\f. L;7-I F`/[; x=g kuG+vxm sm5`bkzuz0$y1mBZO榋ʬ|: W*e ?q޵z 0vu`;&8}僪+deգnPzN׫u5֝u:Haw~xĻF/mX.r#+麭AV"{FW7I^>}/l(4Jœxf3ŠLc}@2e A2=6k)~A/FQ\=/Bm2|C`? rotg0PD0z^2N5js7MNr$ms3f禘Q)\p M~w/O65 v?q=t=q,'gL V2x衫vE6=U<ܪNJ mk+;qUwwOZ-)k]u8GqϴeGnX5GR6e^06y/%^ ;$-k^َ13uw d|F(̕6f s&[w]Wn6>$ÖIWs ܈J,wb˭46b,7cS$)6FM6*QooJ6h~^ ?3M.:el;Ʀv41=^Gw Pyւ@8ywp̌'A8,; 9a<_l@a͸GE6JQlTe,[Qg+lŝkV voA|pz lgAx/*wA{CaB0Rn:0LN!wfc7ӋCrak`:`] WJeީ;ŞN?;WRb\;V^qGLCq^Q׎QV}}V?N67CgүI _|#AʡRNy%,e0``-ܤVJ6æz bSC"6u],ϳz#[dl[B[.dtZ ^. ]* =⧮/A4)}Yζ7+QW0)-̣*hs"l!]_,<*Yto{h Hs; ,3H-K5 ).S3z>5 *%[tya ]lHLY9h {ݪC'{8Nw.s$Os@m[ǛɊN&~~l*Mm.6Ŧ*cSgl*M;nS`-8v[| Wg78(/Z9ҪkCD vFë4FpldϫKA0[Xxیx,.7ZdU5#7;I]qo1 ]kzݫw쁑Nn^=r_4Y#nqOOd/r.;ON˲yn$dՋ({b5nLtK|e>e-ˎW>o1C A~w5.+^<}jW#۟Z/A2?Wr=%?p/Ч?^utҲNuvPWpy\A-֭?.,<{x'];&\ :(ۺZh EHX<__kNXʽsurveillance/data/s3.RData0000644000176200001440000000047110636320360015122 0ustar liggesusersVN0 utI |; qڵw&/@$U,^i&$/_c-қD:?hyȞth?Hހ3Բ}xY㒏ikXQz7+h`1m ؛}5{}i:q*'=u^KȵPSUVgNϙ}Ve=>8K&L8\~u"Gfu5y|V/ͳYR96o5ρo+mK=5AJ;>vݯdJI surveillance/data/campyDE.RData0000644000176200001440000002031012625315364016121 0ustar liggesuserswPY۷9=3$ 9d3n#9GYfŜ,b+`T VŜPyso}ϳ>_oU\\Ϲu[F0oonbb"Ȉ$"D11ëgLLd`)ZtZ؄_o3c~K"03r0rlll\َّمُٝٛ|8s&Y9̗11f3Eo?0ff&F>0--2cvdvfvgbcdag\:s-37enܚ=s'n=0g<9y<$)302/b^Ƽ'̛2dNg|1L3癳/1_ec|S2a~512ْٜ s9ffgf7f/f_@`pUk1a܈)ss;1weܛ? x '1OfNf"+0gļys:^̿0c>|99"U̷029̯3b\b /h͙K3aev`31{22031Wd\s=FM3bnܑ+sso~̃2`|W\י 1?d~;KWo?2an\a\ )s)f+ff;f'fffof̡̕1`&h1̱-0w`ܝ's_C1b<9y*s , 3/g^żym?3f|(qSgea\?.sy\eMAq# +7Ұ0҆"_qd<eDe+$j;3 W+a!a֕^%_$Ǧ݆!cu)XX Hmv`ף$ N®IN[Z.%^`-0-c60Vd2dIV+< v ڲF;Z f v#H9㸓r 6:$:\2fkόcACR%3|2]lzdW^1eK{lG<}5|Yaǧp.cGZ{|NsYɢ``A*;>Ƈ7fn{! }yJ195!lqΓ^ 8ʞ/exŽkVBy|`|jyc^ՎU܋_[m_/[sa^Y!-~Os8 c@k"ssdqn+cFJ[x0IXHx8gd1an6\|Y}2G%^Zy^S9FGZj_{.S9TZ[%vxLA {l`gqn)1^%O`(9_6E<-32V52ߗ aKpAd|׼-XS<0vL1ޚa'j ))/:ESLqOM3C?1VsC9K9ZL5JY$c$FS&ƹNmjSԦ6MmjSԦ60dM_"g'SDky~!y2yXkp.l;L(~Fqw:}֢Ȃw+3E|Z:߆^ z\z–dLػ`&uN[NRn=)L5iH: _72ظ~|W{zn=) qgTؤ_8]vS\Ƕ٘ǣ헷VN$uKm>Ωny0ŵ < yfLNțLVɱa^+P7OoL+[G.DՆL&wOdAs#J"+<.OMr+5ij>Rw곃yYq$EWpkr3V]uٮ4=wnV;y:9sfq]^9'.yQȧw-f⫍^]K𬓿rT#(|f'RR|3(x3)$aTspAOЭY o.8r*q~^[ *u2ZG&/7TEobi΄DR&xF&Mvt@~m{~";Av0k1m9N*:I1 9=퇭N҅1ܡmd?q}n)rR$TkNvQǛ\FNZk=)'Tnc^&cW_cll9:wzQ:M6W"~Xorg ?.*!1ӯ;\{%$pM!M߬|~?z+?ȻϑIɳ͠A~qS ?HGL>+Ǝ1l Xu{JTWwPh-4-|@a%_HaԹ]M҇G^>{.v0?fOsӥpvfa34ծ6 Iݰݷj Ao=:f9gItٖV۟ lQ>hF#ܖLyU)9+YX#׶x)뭤蚘ڹvYo6n%5gp_P~F;YgxK*3+2w.x*^SjRr)yϛ:F/r)C-&+ёSȫKeL!ע{NI>N\>Ԃ|-j;Ty,ۿu#?=ww N,7*>'Xy M>KAmZÎ~;QFM{:]\K 9 "?}ݩAE v>? u[MNaI1}gP J$GnuJ7?_G>er6(K{UZ8;[L!L辑Ԯ jEv/XE d 뫾419ŏ;^,\s,L }⠉cL69,?M?ڜdyWC"zނL_TDeNywYt/O,%ml;&Ѯd;|i$ҵ*'Y##ֻGN.֞F R]3<ꑗތ!{Vگ"'#"^OWuUs'<&_7kQ1U6SoGF 3-_-?l9pҬsRīSJ>дFJsm ]K'(S}0k-o  Iji ?O!["#\9%xג؊ۿ7/~&?%w덛Z-&?u ȳʇ 3:7W$,ޥ"ZO,2R> ,Zǃ#&/!9ffN5B*& #]W-=ߝ_{ؾjz-*ՈUlu\Lzٌ\N&unOǍ(r|p;+}m䷸Z '晻'5vkXHQܣIm\Rԕ;SX Lm3jeY5Iu'uɠAle*-v>K(ejQ{)a+_ z-Ȧ{΄Wo.iT^k'ܹ3/zʦ3>Q$;wneY\[E^-w6UO9J%/|L/zI[(r5`yK*sYьad(ac ׉λȽ]' sϱnMK eю{מ#ۣ kƹ#+&{D]+O_r/5,o)e(ݻk9Mr7:G`!c<7\Nn#3Vv@}mم3O_HA_6Sȭ{V)N)xΨs !sOKPHd(xk祩c(\Īmf uDUpiW D1VS.PUnk(8GQH}"S4äG6T㰦ogꬮ\Dm"*kt,};V2|_G^S͋@ŕζ>3uus|&vy{ 3$$m?{f!59Užƿsݔo_&*G:!ۘe!9569?DW7;n8lwlپ"* 6y>>ܸ_ȷϰ IKo:P@@Q?)Ԫvz@ mx^=*Mr3}=O QzԥKc(}Mz`J(,wSSXLb)dhߕ(pVoA3߲v0y[sj˛,9{w\/E_:ۙ7\@>RvQ|sxr-\u]]wdYn߿D O~no};*{^nr1W:BrhӲ֤z=!RVA)1Y/WJV{H[D6G=B`ǶjdޱoU-˶С笉EiōO>Nż=u=ߩG'.#(z; ]佣Ts&:'*y^ yE Jjp”Q3PU| k-Z?:Sx~ٍ<*Ppˍ?K^aqx}x,E앶 1:}iDHWs?GI?\\D|/czsyE-ڜ㦑wR1;QJ˶i=-_o_sMx8t rZ6Iw["|WnuY/Z_P j}A/Z_P j}?/Z_P j}A/Z_P j}A/Z_P j}A/Z_P j}A/T_ j}A/Z_P j}A/Z_P ]}?+Z_P j}A/Z_P j}A/`dWc7~Ϗ?;O?vSnjMUWPl??vSnjM󿣩c7~Ϗ+MmjSԦ0"u| fÇ& {4/$*!*HUD!ՑHMFREzH}i4Fb&HS$i4GZ -VHk iG: NHg tGHOE!@<#CH<2 Hd2I@ cqxd2$"Idd 2$#)tDf!9\d2IE Ebd Y,GV +Ujd YG &d3IC"ېd'3 IGv#{>d?r9B# GHr 9@N")4r9Cȯ$Ar%2r\C#7_Dn!rG C7y5N~987 {xW!}58>qM</bvhsurveillance/data/salmNewport.RData0000644000176200001440000000575412376633551017135 0ustar liggesusersBZh91AY&SY$? |0<0>AJ=M4h hhiAAAIL2112d40SFd``@oTziT+M5'iz=5hh@JJF"d"z&M)h ~U@F0LF#iIDdCSLSLE40i 2h@ @d R!UMb8<>@"JLϢF>Io>̬-mNoDcdiQZ\)^nwL_iꁃA!)17AIQX*3t{.n/Pp(Y$Uu6Sxݞ`=HtD1qO2߹ 4TU-ʓCDD}23S_TUu,lmҦN:JZz$uu -kk))8qq$?cJR)HB+H$# jU (*2TI@@I!$Ye2B$2*eEdH B[H$HH,0-[!!"Ki)2# HTjYRH222$ @KQ BEH@?\0P J%J" d `[$XFP*.T2 2Nrb20UBI$D C  #UY$Sr(Q"5AZK`d/ ;IUAU M xpLVa jmR Gف ` 0`XMV^Îw]qxBՐXn(ba9Vt %#By2Eʪ54j[۠b0  UFX@U5 X"YNit.XX똷U "Gϊ>RAo,.1BD)GndS9c%M=iJ]78ג /#I!AOyȇ1XIu%|AS `bŕm(a: tK g\W4BUQt#h20j*rxS AنNZ$R`Q䫀 +hc]!d6(`n2{$·fӲ֮8aN`By$2tq[59*66> N=ur2|oնJ#\E Rt-x,wz!y/L0!rs1ۋC,5}H&ꮩZքiY^&.Q2 .][w)m()Y9|Thr"tG;% kQWRLbH 94귍˟*#3U/{ITu s%( e D M,)0[UIVRD ZA2 f3kW?iBD.xY$JlZG*X 1M1Cr6ު"C`[L<$vL vG,oCDr[rL;#>fš>3mcPx {ӧS!Է$-СT9+S1ЂZB`Ejw"jA6qDq_@(_.-xD11/c }KI$/Ku.`{sys0 pg3z-֗Lɓ&K//ŋzu]v뮺 AUT1;fhB"6 Dw $@` K0v쟡sĺ!W 9 Ȩr G0 s%߀E$ 篭6m$ c90""\}}]y}9oK]6./06f^[@ڮ-MmnBɟv+W4]} E]^qu`,\uW4q/B,\^.x;w[ \KWi[뜾;}o(.^[Z-qq¶UǶ],-Q5tz8uL˴[.j.k[Fp bZl[̹l`֗+Vqd.(:O3p|_曇6ly+y1su2?C͈nVt3rl)OE$ ]-d4y寎?@ݟU10q^([/+h7l{JцlxjuW}c:MW'_TkF9_^j,kUܗb'9fceI@$}(ɽ5^~ Mr/fNdxzə8t~b 2m&\UOY9Ss}%%uN۬_9`&Y\RHs,e5ދhyoĚnK\FWl}FgfqTm[e[-Kme>QoqwA3oCb|~X%c~:=j=` ?vuX3qf=,P.痵B; >299sjguc}N&΃܋\cp}e+T^F#~=˪I30^Κ{ak_@!==ǘxe̬+sXg~w~i~C;9ޓUq^T?hLbOnΉ{لJ*\d_SD,=K$(;$p620MuWWJ\s5\s5\s5\s5\sm՚a0kwn=r7qۘon't5'^9N:䚓?>uI'\sSN:3ֿ>Zso;?M3̀?ؔaw/ٞ\:iްX_ҊRgݦFu=4[_l[{;)߉*Ŵ'Szaͅ}vC"<'%^{ 7surveillance/data/hagelloch.RData0000644000176200001440000010602412625315364016534 0ustar liggesusers7zXZi"6!X{L])TW"nRʟXdG>@jtA69z^n G䷾ V8oۼ)J?(]a$%!OKiVu@d= <+$i t#BU< Tɷmv=`.χhncHp;-ym ]>?QesrMU%" uvi.7* 70&oC0ez32P_9ɩ6cYVx8bC( zL %TIXl% inHԟyD1O Oem]bܻGMz5f>`*/_U* ބL;xsgpR^a1l!3-Ӏښ1 Jf<KL})Ьwߓ\6x&peHlQ#>#6Yk)aIdَ9R3|\oe7^%+4iH=z^h&6Ӊ}Wxqq #]XRט d}*-MS2'_P8z7:\L].\:qkzOEXGuir7Ǥg2m&XL"ƭ:[n|f#m A(gX-YR|]UJmv>ZJ;m! ]HS?@^y5PO; 2땾fD/ܫ"cw(z1] 6][X {ݠ'p/ͼ(6'kX::M13" > +H6lEgا"=Nƛh;B%. !ۯ1Gl{rć&ڲPFU iڻ/%a'`+[yrKf,SLpj'#uxkn攕l?@R8/ە& cB4^%euGq~ih=eꪀA(HL/0;Is"V!% |d{FL`ȵf&qw ?I. Ҧn. תYxo pCJ5}%F~gQ5{n74ІWJ~4iF>quS LۭAH8ZhIkL= ^ |EB^*wȩekpRBaVZrKdey3%k.  *_S %ǁ\(;+VGǁmiAґ d~_n@fʊN¨.|7e I Սȉ,XV/iq&#4*Gy-] 8z_bFe>ߓz@$_BĀ)(09||g=HAҿŸNځ`6׫;s,U~!JG%1muauű0ЈvsyeA/s1L Ϊ|SV:{Y˩$NJ>y|TH`\Cn.P$#?L+gvoຏ5+e=?FJ9غгJ ˽3.o4%k0L,ٔIOhݥan6K}gIdѦ*~ h |K4ՃR&m]\^܅P߹/p?ay81 Xg_ z ljeܥMuϸ~!Cݵщc1gAy^?c]kA}n+/Fz'C3snS\Yzʼ@J7t=*cZVmu 7P9/!9{eye&$#M U[gD+n -jb"Yg7R-e?]Tхأ+1랻W ^5}oUoWaY#ȽjGf:ꦱzgwM *5$M Jrp~/ \*~s|Iz~rD1[8 S@ maC6s뮄?D^jik;6T :҃Imt+@^S*5||V|/Q\'LVWҠ7z1Oq0@gҖ&]5ƏNwD{c_4B^e1rKcV7.27vNjQG?Pd*Ԥ[gRi3FDz- 47Y>  ^ 9\VqwS( ťCKk6`٬ px{5m;bB=G.} T+(.w!sfƒF9Ф1!=KB oRR2ۖ-U=ȊC`yMt^B(3KoIniL(%dM}2!Yk}'0OT^E 侃",ث6Cwkk ^A@hvw\W,r\wuvBI1pT+WӾ?,[ֶcRTj(%94) %E;>0|`9`tt.u<6dШC.+hqX=SrrT&ޜ ~@{EEI:8)٣ L&tiae߲Ttb⮥mg"{JIMB|Հ"933q.ަQo pӳhޡbQ}ѽ)ɟ^ov;MA}.I(vcN6MM&=tK<,Fj]HIN;9_a0u|\Aڥ|_C)t*PnYp@}8>fٹ].qSPiȲo#`97qՒnt7?w®Kp^z;L$Tc/6}k\cxjW珂yG%qN*t59D6ry&?Q5MVApYٯ?u *ҌT`㕉Zb#]y i5,~}Q *4^5T"I:gS B0m1u"r +|J c`Wi8RT)DΩaBmқlK`qRVneov,:gB6V̈zn–qk~z:=Bl"(j)5[ڥQF6dژ2f4 ZE Dz=WoAJIAQY \>&ȵ~|˹.`{ؕjUX3nLB{ H@̏32X E?Ҝ[!dѮd mvGwn NU)l20Rw y$2 P͟? $4JZ:LV[#]}vpFR5[+bc<;m|V7h)^[}Ŗ]z-5ac(R[Z߂Lx/1UR׷pS;E_`|u2n7tl{棰y@ &ύ X 3@o08XS>&V8.>X%]6NHҿS']Hy|*1M+(CLTB+pw%퍄B1TTD& V G~Q sCo)5QҸ-5ܭ3Awjf/h>~REjbSxt<">l A [_,:-sG94UE즀kR=uc䀹&Kv+'D ™hCoۦwQqf J,wTxCB'?!Ebqܤ@̤AIZuGR@ٶ:s:Ewg2xfsW\6o- ۫"顷1f1DhhuΘ5ݤD`,̭]XrhmW>l@ۣ\XWWnY G\OVs_is|ntW%zS tI r7r: h]$WcԬ<5@^'F^&Y/Q"zɷp8R$iQwZ7 ?+k=!iNkNOneRIDK˛Ce8GuL|!Usnfq7GcDMG]'͋g[b~8؛1؜J6ymá3HҙE̖^Gʡ1hp/Uv웣j{W KhύK ټؠarrVavw<_WG I{2Bx.\1pK" %-K+i 2dG7{v~Wh8N-aUo FޯC;Ky1U"wX놝]*^4E#n-%궨arI c-_<%B(1/s_b w˥b g8 N+&&4DtmqTgQIAЌ~,DomH(SR{x#UlM#w+6SÙc܊l] jkYyZ(ߗA痆5˞K%'Ԏ接 aR=A 8O)FY+V{c=RdX`Te1+~NZ ۟c .VlCOWxKR[^5.*}yK4Hm43pt, ~\Km֐p_{>薝 #bV=նQ}|~GPA[rRʱX:/xRP fORc~x΁nd&[畀C_{z?ԉIR *Z&r 9$p#_(˅ѕ~*'uMدՐ%S/۪yݰ4l|q3QzqRp ek&wx [#*"PEӊ:5;{zX8y3*.4,6H+Zh>i?āx$\_Y&GXSbvY[ڭR+t[ BS=)d ;*lߵ;÷W eh"ɦlQ[b#\F &k#WIdl?> Ϙ9;̣Rz ^vZu;+^9$p#_(˅ѕ~*'uvv%C&JL S] }w$4Ԩ<% *rjKFIUoLAl?,yCjOoTSLO 2cn"QzL4A|R@4j8) ?H3&a(GQsv~Ut-LglQzjff*ŋ@p,&~w?8)L$\,fޖF<=iؒ#Iw$A{{JtBOmJϸn&|Χ?=)Lg;"a◣P˟O6PTIH)U*YaZ̲ʜ&ܜ Oi8l q HHAA- ^CG[G[h7QU} E 78MrܳA2Mn":-qx,/;ߜŀ@6ss#X=)/܁5,N5fSǨʫD:<4z:롽)۬KL*`g)qC'rIh'pkaPf'9r|P{]qgN8f:^v0Ԟ`7bj$kI8Y\Uܪw}pWJ v>Ӥ|x#7 Oxnx:DN 8#heJ2{gK}r e`r-1QG:jlYޚ`”~zj*S(15FdQ'7oT(c#ILa3험KgTV N/.>q~ֽJ-B&{hޕ-U5wiM!#7ה6Ӿvl }t3ec:}\Cp֓ņp %Н n.*gWncLmE);n(f냽w¸'۲Ui>B:#=%p 6El h}HF"fTB>NysGԳ: lN'ўTVP}a/Ur PGSM/((qD86 7+Զd!紵޻A *]G(D`M#džΑQ/8TYθor^~Rekj^7U4 M^\lVAԯaW e!9ZhlwW=ؓZB`u.ZSJ#1a\[Qhɠ`{&Hn7|N6b5-(\*gG-]{7JE.pBGmonT>eYWڈ;w 5Ӓi:GF4da+I/7cГKvԩUs\8B2a5*hU -`VeYdSͭvĠ$B6ಷάt vR zj [6awOVzDKP4Gp^9\;sF6G좐މlr. 6Eaqvg9n0z,$ῑtRtX:}%GSyysѧ|{T 쁎mwle~`^fe5E0`hUvuR$ghHFL~Z™dFsrx_z@\= XbaEɾwQ~AV[O6GW(%N73Evl+.m=BݴB\-ͭI_ k9,#"=పޕ ;<-uz8~,S"TmcX>rANRQ9}RL{r}Y>:0C~]xx~u)r/8ˋW|:+}%Gў! ,(R{j[MUI"@h<l-K?!~'b:MحYT.2vNԤ] CF雈iLoa a"m-難R9vEDSǀg3_Ukz?_?bƘy[hwQ!Vm3!6ó5. kC+VUiy2yf՝@ۀ XiO`Kln/u+ 46,@m(K)v'l$1ZrסqF'n#PiF jCLff4 q[;kFU؋zCilw:r2~q?Pc伽$݇#?<Էx)_4‰S7Ӿ׭w >^|idUMqy|ط:8KA&v6K' ,4IOXq铗W[ 6`3jNeԧ?i VmC^G|QWF⇚L'd2 DۣHAQPhr*%--ր3ǰU(zGkЦ3@4|j%5P,tjܷFH!fqg, Qj[`LأڗVoLPbt-&M2UCVԹ93U2` \v%İK0H%iIAktn/0PcPA'Tco @ȯ' }|pg!}8o|mU5.-1p$}[gc'V0BTp{$"`qЧն+S<7xvJ_#b%FSU.ЏEQYo]⃘L[R?Ob{U@ұIs~ zoO Is7n,F n)\-Z(K(D`M2)5]&1-9G=p%Fq?sDc+0yJo.na'8Q@Ѹ~#ܟj~L(qM"/NƁąЬkJ;fğCAcu2XVa1jc4lw<жZp4;Z8#@)sAp_=kb] $妗RzVӋnUPgV:{Y+`/UU!=r(/!R-Ӛ MҶZ`exNr 8d&2l5J<@>w$.+'9wOՀgb,\#\*Ib򈪞"086|QE ]-/oU.ֲdL%VL>HY&`F{Z,Qm,qUȸۑO.53X#3&&w9a9ke0TĈG*`vlaY}uҮz弖`Tgo a Ϭ !Kp{Q$blW Ԕ!nWO^mݑ>tL'B 7 +1 Fb >j55RaS/z 5/tUɢt,X w[ζNeLsIUp js|aRF`e,lU)huVd*%-ɇl| A "۠m8DQ([jJNW "FqhJ"vU`Iՠh2B&u}hRDer=_x`)vW/Up5q+:]/:4;31eTۯ &}D?lG<'pCuI"tӰ\|dGg6a('6H[ ?-γ@Rˠmp掑`?D䁷 [ew`ؼK9LH,9́j"vwIï؀m[KJ''g)j@ j1_li^^-Kys 5=Veu2\?$L!x<Ɋ0A UHb >m:cR/X]m@l>Pyrtb>`w'D)u'[wy# B Jd|$*3j,7: -$fjIz|qDԻ"=Ocb?G82h囈d,EG;`|!i V$,p~9G,y]! QC|ϦDft#\ :'ifNs$4v,PelzyDF(=vS ]9Cծ752mZ<ҋ(gP0 YgCfy:gdi_9 @g[!ˢGͶ#9r 6ԩ:daK& n3kUb-܋q9~^}y;Y,;q/;e*;Q4A Qoɾ3;)֋+( ۛ^~J7]*zt8'uːJ ,EɮI#5qyh xs!j![€Z);hd %›$ dfު xFh"2" /BQJ'$È* `Y+:x9C⦪81n"`iفoO!OmxE\3PjN ql}rp15 #Ay}Hۓ]Ъs#ֱ!x`6.)QQЯ+Ƅj{`P=tؗ]+Yp(FeҥwkAq\?Cڂ;ook zIگ8u]SBD(3t,'1nQW R*KTo͂u 'x@$b { T\ =4Ԭ3;Cae[zDKq14p`VF% ?kKTAHǤX/`,,G[o9R\uAHJ ,n;2Mqh P"akavc8үAG05A/IP}_ XbBn9KhŞ*p,*[k# $(?Y6h?`${p}4`! F4eЪ@uF4nNꧦzy`(3LX%l/xIC.uvC̀΃G&{ORBՌS۸,kys3}Bf/dD EbI9])r%jpn6Xxd\g"EA&+5O ڛ(l\vV<5.QˆM{y.Z>(Ml)5ҋ~- (} $SrPÁG' ,xv*5|+O2 ,+kig4pMaE .!"9+@mK&]qH\GU'4:U!F6HKrhOM\o[^ -xIFK,-5 9@$O3ݦj"o8 ˞ &HW*fk_=$> ,䦱HT$ 6׹Z9hyїAs*ՙyM?_2u4ՙhwy!.̓eTY <9Ι(; A=$'VmX̳2U(i1XN {ݓe ^ T.q^[=*9&LH^ۭ (^͓Be½MYQգq+quObu&Hú#Gm2w߫o}Շ1x?vN`)9{ qϸwT1'F'HR)0~Io')%GFdf%]^)_Ճ$ 5˾'*e #ڴ<;)epCxoN\h,,/ Lo4Cw_IBl`!ц) (NUwyC' A$Ҭ wp o`iFs5=/5W"98"\OU'ƾ^uomP9{ ǔUX($ğx"$DJ8 ~Qhyduvz^epV)r)؜J4c0iQ=M-CdȋC/BC Q$2N`N{ؠVLvȎ@nz,#ƚ^ԝQk/i9?x2q7W~u#ge>k~Evd67T~zеI/\-4vǏh'; TES+إXO3h>ks/ccVoImƶ7( U-URX2s-Ng;w-V5dENb.hja_&PLߚA",J س:@_UoE(' N;[àxtR3T#WNEF F8*^׳)'$7Ƒ19Lsx2  B*|A,ma,Nif3|ͽ}aؖ齐VYb/VG2D\Nw%$/TCy^[HkW:$ [7KĀiIF8'F,@&0[ŧ aΥF$-ˢTG%BQ*#zyq+$S_Hek-ꂎ929[x6v^'- I1d}3,U:2Oj 6TKFQI< $pj;v?LB})a*Mfs^h>XNWB=ogm2O ؓX9+ T|Yb MdOsVᩐc.׉!~'lg7t0,pZG4F {\RVDB~n2d0fSoggIsGG;Sx(␶)~e|)O!"1G hLECM/77N}XkŲ=yAyς)l ù8Gi[M3*ص፡8[Yr6fތVh~4IE8||p7>qChZo59`fTLFHY3 mgAYF7^"WLj=5HW@.ܣT%eḔjR=rcF*fyb IM(dXDN6k+lWM͖^ߏ{uIptMٗr aP2@/Fo!#vKD /B"%'=É$dn# wgD1`/CTtNNrZٹcpa3Jl8!Owpw^ -dAfK08 g+ woU36)oZ6RnwF3i{C4jC w C{E6tjI>j֡Lʱ<,#|zu&F.ԇ~H-UL+~9gRĨf^F9&_F5?W,_P 8 5~\lAfSQD#y@^$P*(˺qઃL,RBqϢ^[ %Ohh\V3j˜ȗCxn`"sQ^qSq`Vu,X|U=qUQ$= Y kXrS DK \UidV`7yNhC~]Й'ogzTIZO;73iu楗 BXEmuoT2lGԘ@k 0Ar ̪R~Q7 ޟ&ι߹jDԪ EL={M}ĖjI1Aj[Musf-o;,\a"wMS7tBbp+@;">}zy"}I7݂pcNJ)m3T`G"1On5ԄƳ:w4q`SR+B[,h6=3VX=`7V:0& TrwȬBշBNqe?$J%ፎK6E"=h"O*kNkU" ه|:zo2x ͡Ŝ5>+GoE{7̀3/n:Kf ekM P4" #.eKX~wP[\^!"ByYqMq9G<αIq񭟩i/[ktT"؄y)O>ڜr19 ՚P|OI"̇o9)xW`F] J^L}&?¿!Hʬ?Vm;$1s'OO7?74gӴ3]HAH͡nnwKK^L܍3?c:_٢BDGmyA;uJg̏dnd 8.psZ6 P#\Q!Xk9AByXKދز?g19s- .cȥ?a!층s|7( ں@4q:ؼ)8cp*EAD Rf)%euA5ύM]uʓSV/HK^pmnBFpg,wi7aQ0)Ksu/iWy_A 2ի')Cw<*yp77H, ¼qZGPCjcsi%<a$ӑx쵃傍E xd*MMAE 5;b) e:g[Bx1 [Rǰ4+JBp?{`@N%g}(w #mb;ߖ.cڻv#רZ84J4/$W ݳ>Hy }_[]KgKp;1Oh"m)k96L.A풊)q ~P:nޓ#aY, ̥F_<,RQ0Q$/A?VK,ԅNdQ8pɷ0zV(4lD@x-uE4קމ tGui)FPs +z@+ÇE /(.)D%6Y&%%# 1bWϯ†oAWNH?c3BIJ7 o <+كϑO lժ_~p*킂y,4$ ]xŠHS?f>mAY26yBɉG؎,.hص2oߏI),KIzQv zFbS$'XpQ՘BCTr AXʷFhޝ3bc WegEjs&Ӣ},|cm_J X<>G)RFڎ␑и6l 4ڄטSR#ԧм7:8uk 4MiI[k$;VooAr1а1&%+8kgDLJrUn &,彸75q-vM;~3Y͵K Gyya㚧D)_Yl̬KEĴ|yqM"h%[ +B2_kcO {puڇ6c]mD?y ]_\Lio1wwcF{!w^ yF#&9#!2&0Mzq*CKNZ ffF*!y[S6L,I8^IOޕon=%5rö4TIiM}{Jkޯ[61Q?)㚙[.g<464Īմ~PLߒeuq?O 1bS]VmNECҰ-3qM}9]M<1gLD{>`V~BEj-+A3LXF|[̎fS|iX|ej¿j3 3k}rܴՈr2GwNϤ30Fv>>Կ샵g}.?[_Ym=R4C@-B%z,{晭ƗL ٕG?" d#2s#aҏ4D›XQyt9҂_$rãAZq ݤvkˇ$a[TZaˑ.Yn eoA SPl.yI- :/`!u?c,QQͼ!}r[= pJ(woV_à{k hwa?v'D6gH9S_PjF0GU-i'uYj> lVƛY\(ƘU:vMZ8qs4i D0 .pNHTH Sʓ؆u3=MI@!.ڐc7ЖBmYޜh03ᛶBH\^Pg;Oc536UQH(ۀEiWBhȤ~XWV7a8P&X {n#fPF9ù&s-y%~2$|$vOT/VfO*F-i)Wb bf5 S'`Y-+Qd/Tʌf UEG|Q֦D%C#qquݳ$g(+ l\r'r}lh )-0yZj}&&YME МnRO?)-XR4Dc oo._"hf!) S Z`m3ǻ{#/̆T S< yΌTV4ƴZmǺH{YN.ϭT6_^Щ^ў쐴fμqO*w1CZAҠ* #=,hHlj" o`U͚Lq-,q}$L[=UC3. p>kz-ul&{>6?Butϼ\)5p3vF E0߽ì U8ő˽]\A+ k= }dM\o 1OT\KJZ;g$&6, Qb|&Mc-u0{:zOwjCnW8H>-^5L0HEb yPiG,|r3:ۭRt$rU6ұ@1Ne*m{( vJx\tU掽.w AшTm02fh*J)2gҌ\DL0I}Xp|01c3byhZbԨ~dO!MfO7yq!posj/_NNqu aR\BK|5٦{ÁܥJl#W]7 pd| +y!K^3<@`Senp1n/Gc1UeVI fͱT5+mw!$@#i 8cȀ!eor ;{fM {} 02PX<.pёWϤwDmNdч@+y]w8PbJW3QVIo'sLTm­8,C6_3~o&.@5%4ψmދ+t7bHk_L]?uƄW gX0lwKSgX󥮨5aT"  2X3XyC ~,IUybT8Dc.+}$4Q߼)~LryC%{cěN#T~bk~y @63L-1"KwPn^fqc̒E.3c5 ه6A>F]K`9~w͐EM_cK CLidٳGɵ+##M]aii 3QMk_1֩2w֎Sؒ/$by@R0ai0jӘU*dm5!͓q8 ?duyޝd'e3|BSD{蚮E)sjJ[j#'i@T:zo{g%;MJ+*a=P?rˆs[MDd5,NHmǡ삯΂u1g1 ͼOl[Oӎݟ/+g>S<Ӓ0Vƛ3__S5V]UhY ڙ)G/$td2O_^!n [wspL%9ba0yRґ`EIBJ4K#]%փ-wixiCJ<{*pV$`_|UMb.ٽil~f^հlk\( >t&E[=hw1[dlt]0_Y,hnS=N͗M@7o|ʃh=AbA\Y6]c<5Trya5H*QV-s)4; XRyP=ĂeS,uXV!OJu23u~ -Ꝑ 7R ߧ{,LN-npZ$m&ۺf1cV&W@_^]m7m0/Uj[ e($wηV_r~ưTSÖ dnM;vX\Jlvégv6}\eX8GneHܲ_ +}7J~2E#]YDڌ->dp'XH|< +bH_^Z %?*֚X0oq.L15;yˢ=2i6\"`aZ @EAU L>I,; eisCf2FY`t֐EBVx6 Eb5/ ֈ~!RZwMLj$J`~*w>g!{3[nOʞ, ;s {-܄d@ *T^Tv #!㓟Ω,M|qEKC/߿ϑ1O1^eՇ cqQ>"Zo.빗[|V,8!/]A I3SiI "  PM╃ŝX?eD^eՑ:ZѶNX$Ѵ:J!f9zA9l1V]phQ`nزssAkzf uzgbE]I7y,t4({Ix SYYp~A<⅄[4e$b쯗ےf Gы%Dzv`F?)M &-:j(5uq kdm#6ݎ¾vB|9;Jɖx5%[-T)߃̝3]sdl !U*@bҼ]e.r)сdR<[>zj(qZ/(D<G4Kݑ o^O uȞyQ9+Y'.yvxh >8YD* 0߸:Im]L_lt']2G2#wjòtj{{i5zX(M -;fV7BY;LT?nR}u&ND6|d` `˪R(^3mpCwu1[u57#iD-5_9IJݯ"C+ P 13u]|3sgHe[&Ɣ Z~_KxhB=gD7-`rFTu,0PT"]*"Z:I->Q޸ %МQ X `?I> V1E;h:5A$Cn҉=l6Tۨٴue@Dq3cPwmMBǩX "JS/Q H-s#lZӍT4qV dYkh1iWo4Kۃ,l7?׽YGu{-{ҸGHdEr="oMyy> W>xjӪ_i"3Oم ԞiߗVM =Xy^ XW Ƙ5T ('Z>bQhVJ0Y0#zy<5[E##Pҳ Mr*oO}::GdbT@v~aɥzav|SɝZ$D /Ozj =# F_YP=cVd92.#fvw[Xʎ+ YGh~gBr^GHHʻ<8}{qWAXby)%gދX%GG]:zRp0f@ݥoM| |74)>su$7*&~t!j5, ?3Q2>iGY1NdNs"#b /ؒ/HR_/1ηD9_SI hBS &(QzT '6F8cIyO &짷]W\q|}27{Nk:5W@推8@ hpvu/B.iS-HX(?'$J|K5ϡ*4Q Nak ֔ԕ'VA})! ]@$S>{Yl IX)wp}~q ,wC}&:Cέ5t%gH K!GKH')C DfAᣳ5r (%P=FA8K8/7نw`DR>7$6^|uhAS7 /v-"/2Vv΍=)v"./ė# G_`-,Z4b1CI_#r#\0+/Sz2oڰJ$XB05n!姥X'}mhvٲMZ.>"w sUy#cw>[iCۅd<$9ոTv$8Y}`8%C:^n͚]5{M8;rJϐA*`7I]61]2+>83iSˏ룓,fq COod4_7N]jΣxtRsBoӍ)B !]?x&K̡J yWʥhl!Mu_EمnM? Tvyӿ ٦BڢIdKL4~VCث ƵB)=^A]0? IƝЂ78$ft?RQs$14L- nG9zW5=\d3BP9_-sm( GB''Rľw.ݲ()սCP%<)xȚПn dIc*H.j~oo=K^7B.r mu>lyed6bL;28$C&oDMUlt͑e+(#B15fn  GI+eĻ.5vttljc^ r F`*dob!_Ө'*ش-cWLRcoȣeh,pz*X6L1:aE\M3RIBWm ˁ4_ECW-wl%z-DR*]"9z\6o}dnnaTqRWGCs=o$jaEHԓȁqd͞3QEkg6.>"EB*2<ի%lۑ(`<GL3s -X]t/Fg1֩'ey}͒Vl 43wZCw*zufiZea}U@pݽ%XdMGbSЋ"{m K^}d5iuQ[,~ ΊY FEZL}'E<^ڽ>i:"aj@T^읔hG<^3yB`VW>Qk4Jct؆. Rh[Ig%DP9 ql"ܽbO 7d(p+bLҧ1AD6=F߀;-s(KUq&p]a' G(tM9d>R$Jd^n%ls]x]Ml>m>p@7.b.hߘ.A; #[k;bX+x){#Е ӹ[VGk] *?`lȲ#,(!8' ow7tFD֫butxD%%4$F+#$ʀs6MwPtr;v4W $wnZ fmgsNnדK.mM!;l͸cٕ"D١h dzjXK~2TtIɐH:/VeiaΜˁqruMhj8DGA[IK{i$ʧa'CJ:kr{;Jv$YWeBɾӶ8p$V/-qW7[?WNu>!B~*4GQEԇ&P4j࡞*(N-Mu%CQi'HC}Aa#2s3ZϑWʸ(5VrB2*~3kk1rn^mW;{՝8CVu<6<.C^R<fUS&װE8.8&HXzM.&7ds~8/Dlʞ#0oR B48L>QxQM @ @e~dW!j׉C؏6zh.2ĸUQUk!S#fL=\V;6֒ǩB6@H3հ!4BkO|:nNCrE{g7e1.=bp}o/`i/6̯ud}!-a?ێJME/;;ۣVb8].5PO3 +qTMnH F"KvaH;KURuU4ĆNQIqp#t>~CCεw^a %l:;G$/ɩI_ iiю QtI]ӤJr-^5M11 TzP?[Ӧ <Vu pķl*QD]Q(ŎLh0E?.0lc6^%wUsqNHU=8㪄LcQIxCNn*~-Qz{F~U{(š!ҹ2ԺA#sF(f`]="I%h|ugΰa,Ti~y@[x#q':}u;sMRl/_FbPCO<~3)ҴXRHTu1تo'iu;lw4(^P?ì.N06 z*]7B|ߠkYpK(v$~zcWBP4mtnx+3d8b@%V`WŸ irN˴3r1t˨Ty#5loOd`o:n^jgO ˫L׈ijl?3׬5d9b<3fŗ͛ⳮC{k*=Zy!@ua#`9u90Hc hnJON/Q4$ rT>NDxb3ۂjyp-nb HaRˣyM?wj(;GAji)$ ?]?*I8pNé6ѽVŎ/Ess]j| >QC'7@,CYNrIGl$}@S!!bLU'PՖQ{0̈́gs5CwO?OE6g8FG#([Ŕ)Q-~MX1|KSa=e [dD{et+~6'O6ױԙ%0!"nᅠjov,&Ġ`'N{ػBb"'pz ǭ c^K?_A/]*_cgی),aO1#4pLK0 vHhg-Tͯx[p :\D׏Б&@pGv:Ip`tG$qESrhVk$t*n\;DŽpPB ֌L\fێDF pv>Ow{um095jX74J7;F[p'7?00{Pf)Io >qV;S>-]oEC5u$<($< Vwv/7He98=};N4#tpRV r:V^!&@ ٙ>yK1iQX<̴Ol3|z= (s@9}I+,f;YFェFÒow1G"@99XFaܩ\@dzġpliPă腞/'BXC#EBә0J ѥj]x(AlT!V`':/B!\虭r C@V1"hܺBA3|ҊOIDdsǹWYNʱ >_Vy_u8ª 4G :stbhx'͢|2Ӷ x~̇5~ ;8`nEv;:4נ]`2XJ[rn>`:'(#NnŠ/2ZYN?KR4)#flCJ~r#5M,]zm *VȰxU*ռt-xJܾpuAss&jӝ}´:v[`SdqNް9/[>ezQZ^er XHD{e/ֺ?BNKsFGd+r1Q1iۖ'8tyƬ2g`ȉ~L2K Q3 ډ)_2 qlԬXGuxAu.&:Y%d Y!~fz7x X(qL4VhhgCc`_OsLH<߽菶H=l SsٯZIˉ<<[/ %VPA.Ff,_k*0FuAX1w-7Kv\d# L>r!Auz5Au)Y::${)(` iN-=)qĻPG-:H [qIWd:zbC䳨z!)<N33h7"j:r<w6yYwYT ,UwS#.} -K0%3.B,GԸӜ ;3yW+ݞTKؑ^`{4O܉hA\iȊL.sc 1k^ 7*>܀4d&sU4}Zw2Xk]OKhҝp"5pw;>._uMlL2v{jŽՃN#G Z9Dxc*ȿ>ӧbI-͛R 45;״c+%ȶZn_mU,S|R[YDJkP?v\x޸B{ Gj*FJH h!F!~׊23{H_~宄'|0חzwGSZS#$KҘgAY^ЄPFxgxon>K?}T#]^ߵמdq =i?9oA Yi~уmIAN}B٥RnO9Eéxl8a=Fvuoiq3'¹lşԈՂIqT"@ŴBy 9 浮kFzd-B*FkZb(pMp_]XL13=xcEa Daj)I'_f`)Eo/ 4˘k~45eGm}pOeY`1Gy:ڪrS=@Ȝ=; E(5i% 2PIY@:7fԽ!.*j:kdi,K8LԮw,c_'{d{F3`Lv(/v r drMYQe|u^q$ǣ<!vBBҸ1oQR<^}01Lx҉#_ e.A- gcgw i d^7L&>Ӯ: O 1"KCWa[&PuܛQ@YZsurveillance/data/q2.RData0000644000176200001440000000030010636320360015106 0ustar liggesusers r0b```b`bad`b2Y#'HЈY$ ļ@|apF$g`FQ1 4hQ0 F($P}gr W!k^bnj1!n 9SRS`KKRB$t#saFQ A•t ; surveillance/data/rotaBB.RData0000644000176200001440000000447412305624556015766 0ustar liggesusers\ l\}l&v &$1qBCc@ۃmӤlP"6! ,jUZ$(*ml8 jQ+@UtB{-ZxO:{[ɝ 7/)\r 8.RaUBK܆7tCٹ%s/G4{CZ8O!8瑿4C5R~>CSkҿkՒΧL/ iYבFR=H_ok"]Hy;*7P~5ҝ?DM@~uO(#-䗑|iϑA#OP0?%'J9Zbɯ#_n_%}HH/ȿE: &-m%]Az^Iv[(HEW(:t4xʓ+)?eʯ!2\'Aw# oSE4KH(t 97|?Kkm-?ur-?Lt .6~jPgJ'/·.|kJדO[}P^+D9 ɮ3 4e9j\' |_-ǀa]CYVYlȈ AT>l!mYi E q^W m.=KpBoԻu q#`W AZvB{ ` b^祐e7|݌M"Ww;F@p Dy';&K_V[;ϻKa>ۨ odEp|YD* e;s8&_a(?2rQciUŶ3Kl~.elee'e[WQ`++L()6%Cu܀fSDt.u*ٶx1ukc8U_-u8#>)3e_ߦn93sQpL;f_2&gbnI{ cWα`=_Ӿ+Cj&&+g%&)ikU05klbeT&WcS7k. %Vҙ$k YH2P,[<站k1/L}AĬzV,P8k}T:/g?p7--Sغ>J?}}4PzF9q^*quѦ)uQ.g󣀜W짲6uʹ"gW>}=c93q(jxWҏcX-cf֯c@[Ň/nf}3 'dOy+Ŭgg/W\Y{fk3NB3wv/7;{>}UUFMI&N'Sz.1.vv(}a;v<$j;n'NzkF'gfϠ~3d ;QW-}J fQ_ژ[rvhwa|J65Ϋ}2bdk}hM.7L[mM[GR]\:󠀏^'b-b-b-b-b-attLM7OeR')#J1 q/~7{ Gž*nc|dDXyR߇ᮁoW1\>6c珝?vc珝?v|}rSYƯ/bZ˝=o ]ٞ/) ߟgşC)GZTJNuUKbTUxsZsԾxl&[/e~_gxP3Vb̌R c%^/[4Tbw11yh8SzC0ecG Z3T7=az_qcOsurveillance/data/ha.RData0000644000176200001440000000233512376633551015202 0ustar liggesusersBZh91AY&SY* ͘XȿȀ@8- SLM ~O)&'CM hb4ChAdDORUIPU)6Ꚕ2 A 0@)OP=)jiК2d<=! z4=FS @H@^H2 ] dZ2WRȤR%DP*TR)  ]z sidq6)!`d܀tV06DB:9ƳA~ƈaTv<<ƹ /3<'+<ހ㮺뮺wwwwwww`݀{}k6(7dB{` @+fB3Qن!-A6?w-S?8Ж)qaIgAA"Dݥm/ydu[q_oe=pkL/Yb@n^!f)䍝8+ߚ֤tr _λJby8ջU5Mn@|ˢ1$G[ x} 2hˆ`$`D ւ DIt I^Gwwwqqq.9ss\V5]u]yRIX4VIX$TbI+$JI%brmm _xV*I1U$JI%bRI$TV*I+1\997mww*6]ҀWqj뙙UUUU@k)FUUUUv*ȈUY UUUUU[.-XX)JYb@ v໻wiiiZi9sUp`H ]ڸ߁P~?1<C3%"ʮpf4IK|, ]Ba-Z / ,5SFm}?੒56pbվ1ȑj s!=̆&n. C9l^la"(Hޕasurveillance/data/n2.RData0000644000176200001440000000047210636320360015115 0ustar liggesusers r0b```b`bad`b2Y#'H<ψY$ ļ@|"  $>Ѐ %Ę@p"F V @ @LPsXg`@MɭqV:f$145HldA3B qԀAg eF>^f03"g@2fͤcQ0 F(+J@ a H'(Cּb C\I9SRS`KKRB$t#saFa {Jfq@Q~: surveillance/data/h1_nrwrp.RData0000644000176200001440000000043510636320360016335 0ustar liggesusers r0b```b`bad`b2Y#'0+*/*```I1/_d@F ccˌfEӋF6Đ1AM6w2"ɀĊGW˄f':B=> . sC7T-{ aǀF(`X 0AJ5/17`7 ԢĒT(%(Ib:$ȞYPda o surveillance/data/m4.RData0000644000176200001440000000037710636320360015122 0ustar liggesusers r0b```b`bad`b2Y#'H<ׄY$ ļ@|xH<#:FꙡY3&@7] %EX -ʖby bvZN4Bb@r6r@w(c -/CQ0 F  0Ac(Cּb C@ r'$B9,iEHE%F&$ÌQ@)E@?Ÿ surveillance/data/salmAllOnset.RData0000644000176200001440000004155212630575752017215 0ustar liggesusers}yGq;dz:l˷|5Ooa6l [ K$s`pq!$@HHBv$9H$!@ïT|~O=[5=uuuU>u]vBZy|m,ô\ٲozǞ{C:lݵ!m{BHTg>py#E/%WD)o%|;; ?s $~ ~9W~ %Hao#N!˄?H&/^$% -'$?Lϴ O>J« JpG^OD§>Y7'X› _HR—o$|3~: ?3 M{ ? }_Fg [;/ _%'Y_$<$s_'7?g#A"϶ O>W>ᄏ" O!|:7~,_H—o |3'~:?6wEx/~>+ ?Hu_O[ U¿N'g  ?!焿Fo?6> P-“>*‡^G(>ɄO'pAxp9#|K?𕄯!|?Fx+mEx /$bi¯$j¯#s :(/ ?!U_#ׄA[MO 7F~2+":G>O&| 7%\>/ |15'|[?[ Ex;ᝄ~6 /&R?M_M#N]G?DÄ?Bw'  W1KM?.%#<_ IO#|&ፄ{K!|. _L2WHO%",; &l#O~C@M!EOWo#IŸ&y D 1?#&"JCxr3>!|JLg##|>_F &|'~$,;&|/IO~9C?L { 2_!ko#iŸ#@x~ Eo ?!oFx!BW^CPGf /0_` /0_`V/0_` /0_` /0 /0_` /,f /0_`Մ6f?`?`?`?`?`?`?`?`?`?`?`?`?`?`?`?`?`?0 300000000000000000|0!!!!!!!!!zf?d?d?d?d?d?d?d?d?d?d?d?|0!!!!!!!!!!!!!!!!! 322222222222_ ^>,v6+SϺrg}`q%NY/Xs<8g=bq3_>>O}+}-}/o}1g,>c]ýlh?cV8V(#ٳwo~?v޶ggo2_rH0. 4(5OKvܒ?,1ϖ/M2\eg%%bk=(WdmIّ#qR" '$wrQ wJ"ih{Z")&%H yBVFuċw&ϖ%YqKL)ɔiѴ<-zޗID0-O35':M9ms[mӒu|'he {yfKviu)wyggm珗&Qi~PW|j]#MjNȮ?!wre-΄ds" &3Nl$,cRVD{\d-[g.;ek&rEmx&tJ֐gQvSlbtȾ/W dkD?ƾdr4ȼx&yk-;)ʰjH:v.4M\mq‡dhwɌ&ΎG-zhK+MFK\2\\ew$d 3\䙋 gEF/Kgoe"Md}TI_!M ݹЗF[Bk[Y["޶2m-K&v$[.; Y[Gl-Qw[2Nsl̖ג59se.L\䙉_&Qy&r$;~8A&{&Is_c*}nc?!^ev[BoKւёu~;ӄB2 &$✐(#6)oi.:(@В`+$sZMBh;ƵU.T{DoqMgyF. -6Z+ZS:qGtّɆ;HuёcB3) Kȱ-h[l2-֣dD쏥o`+bWIGH2W+dϡɚ$ Ddyy[/~-k-;d[H[l-|xmY7>۲"זH ؒ&/ށ#6} .;Y\Й15˄8˴ˋmd"|/!G)GgOmb"\_ܒ=%t$jo#T&|%; *ӤAޒUgL +Md]e/&~1{fVEg> /GqkOb Wr?JJoWR33i&Jfq"kA%k=- AwWP5s Q5|?t2~?K$CddLQ.ɯ ~>sor=?̗=tsX' B5J/S?!B[]P&{` ܃O2]߇h+fe'X|.^@$Ω+^i|fed3-Tϐq<ʱnP >P6}v d{|0k{K$l &wIlN8T{̏`?o㼰À5(TşeǓ7T~/\]VLQב/ӲQgXeP<3?N@+*x#2n D oPx y~ aMDF>?HQIۿ!mp){g&53QNYI0?IL&3*syC&=x+Y?3j^J-7nIZ rx?3 ؎@kuL)oU O82CmS?S/Q6Lf{[;Q^=?- GR6hܵ\4'1ϖ;{6MKFm{kVݷW]3 7CH: )'KVgJG'kFiw˿O+z?{?c?m7&vN,&Fc>ۇ߇ˆSMiUySשVhX;ۮe|~jef}Kc2&=CZAi9ُ| :\+O9ۡ ewK嚻Kő`GR8 G23^- G>AvK, K- dvK1I8ǽ&N8ZisnG8:|Իc8xi'8p xiǹOdwybՓv4=Nqt=ҞCp}חq N sy|'_gl~' Gqld瓟9}a\vSPo8mp$9jrN!";E4{gSWgpF^0 -CYmf9Bi7Q[B!(k{8>up}p<f=ȺI^ ]ҴIZA˱өsTI|9\J v#?܎|;yFڡ#YVi8Bv.z:w2i?ב]w (CXkX%5PDp/8S!qQ8^xB\w}7.ByCIYw)W9x齙2y"e/|(׷~30y8&-T"T};0qr8bǔ8u$;Rl"?͇gg ^aǰއNΡ<^vyi !t">ŔV~s@⧙GO~ZFޛH7x]@~=O}`Q`WXkBM[)g77Iyf)K װQe2&󤯤Ol s o|\m1=?toa]w~.s,:Y z??|@OXw3!+{,%UamcB~nj;úXG}@Jd>r:z8%h'qT3l1샣qǏǰw؛ >W:&}X (x#'v\xz;縀2~Ͼ'>o/yx1/44f9S8GxaSCu`p?|4S.Ww}\wɞMlD#!{z_pP×N$[ P  !R8Lӿ*_U4y!58(&XE?x8:/np6\8tHzrguI{m/{?" X-[=VA?82+ip#WnCC=LҞC|oD1-hH_ǡOkN_*>͒:l!%8q7C<ԗA2guĥs^p^U`T oxx/y%6a|\.|+l$ꣲo;cLؾJ8 ~oW{>i?cR-P ]8 U/Zu5.Cph% >^83=l>{;<8: b='pz:沇TJz6v?9!EºB+,n?IU})IΒ?@? ru;}T~鮸Sҧ:VCu-x_|# _XW$fYX;ߢ?|5叜Ox }ʹOY;≿ZM|au):L }C{vXfO2m c)7e_6_W4oazy~ ~'_G(u }|.qF&~:dqUzġfaW ,Vu\g46C>z _%»yZoFi\[ߤ}e;W5qGIۻiM!C ~ q W]:p_l~;#gz}ykk>mCC~=^ ޿"겒$aB9xr=|vO㘟!ZB ]cY{2N<#Ե<^+B\k.Nu'dzA.R _.#C]o{OıRZCww.rΝ N-ca=r~?A>aQ>H~LY=tC:`?g\D]ޛ ?8yl +r{z1(|Y XSBHj=?''XO#=.LC )] ;<]$w[(_þcWQAvr}}$B+y`ǢoB ||ec%.弙F;$eO߷)k:lYb:T]tȧgq8h>!Mhb҄zS>?l|C?B|}_wC<4- f̼9XZ|Uu",6:,!~%u%PG{bͪIE#%&C;?L~XX߈밷O:pqr<䌈EՁGW1Tt˚C>u\rܿ^0b2%=Vu|_G*y@.C/JZ8}^=(ё?v>g&>:å _P&: { I;t1#=?oWw?|zD[u);T#U~E~ՁRwPfDUDN}~n[T:B6 m뽡; ~>{s tVBjO9@uU.#?wgE.DzOd/x#ꨊC!CX)jI{-m :s7a ;Þp#]j"_+`!-9p:)ޫqW0)zY@;McބwxR=Xw>/&O7QVwR'~GBOvP?z=!^{iG綨JN#|-u##]GQ]I* 1 PAwzo0Y\ t\By^vV߻l g!'<4 PGv,É_mx9+9g9Ncrw=i q9|^>>`CjsI׏sֆ GC;(˩\0kPmPv@7~bX Be;Ԙ>> qL3WlKX~)F8'rE:jN?|c>~m΅9 'XW wUkh;iCL~/>/2l##㸏b_x 7/1y:'W5'g~n/HO?=?G<@?XW2-|6|"ʿO_Ob}PF|d*,NEm9Y"CGꙡ! l;ˉC ϯ!UkLޟǾ;)[ĕLӴGf>!3kC>t`o?s{!o ?r qx;*Կi's^[BwB!g6ČC ~ruB@Ɩa9Bw|Yԁo@溑}|3 gBXDr5e#zi5gyvv y_H=A^@1U=1~,ur{B!\ sWSWQN?7Եysn WGK er uOgs3lt#yVylri>K:;2{:YV}Rw)Ǒ9(ν)Կo359W:Q? }83FFnNyG.W~AKwLmvJ~_xt-y[q+yojSuz:>;Կ6]\O%k5qveSߕ#q<8j-4{}9AL$#s$:_E5^WY/ זqUm6P~PP~cy;CwsutW`{$J΃"^8y~\안 6߀y!^HzO2i\ׇ3pz?ޅ51=$mm]Nz#BW!lWG/Oh}$ ߰ nx42f;g<jv"ylC1rue!zg>|# 6z,uCGX;e ^$'8vóN!s!ϝ N A;4I]J uLsjܠ1 PG3k64V?-Կ X8~_OKY®<49u}P﹫Y3]>jSp| u-KIJPpօz]-IT?2>r) CQaݞmfBww:tD{6|(ᕔ!͐&#?cֆ^քN4~ MFr^[s 3QF058AsB34<&ԾV9t}m u">[㐉Pm湆?)#eFgB{Vo4\/xuj?A56՘+#+:quLy cN9y]8ֵ6껑3(k<9lkCg`zlר-Xӎs֒s1et"fTBw߳i?9m5iIUxH3]6/ֽE}F#ޱ~/9j>*Եq#iPgz926_|Ϝ}۔K}l X_kw+yɡ/ z%(͸T׊w-ԿwA:2kN'SISaR,x;!S޺wiJjwZ߫{̲P+J]kB] 6v$NxqU|]jGԏƨ6fQڡ7Yg5z1A)w'C]k]j?txߕkl*kv-&rнv{6Ugv׺>kG Wʳ1]>Qk{y'qd ƣ8*nSz5~ջխVn2cTj8t2W15ҹ27n6FShimƇk [գ޷n|urӬ앾`t6qwK}/zB)}P+AmN\ʟ՗AQ1cm2iG:N;1yPZgEna} 龫פֿ u-Ij43.Twxh9t?֘m r`~?,U.τ|<:oJڭL&U^*/ilMөu/PCW_Ջa"Y:<(4PoԼI2Wք:V:W':`dv8?'wf>Y>5ָw _BW}V[޵nkJ׌&8-Φ NmM׀ְׄz=,^w9Wuуl,eWedvֺ{Ѩ|5!:Gճ*fNˋi>v}t}z%l,zP ac6 }}Aizi|y3a P^oOՕ'LytxkJ>/Yzl_onjm{[n`˿GUWt-Zj0Z>-w}u O6EpWWl|ϲv6+Qv0œo/ '}N'`/6ĸ^cFPcB~5սBhAhXl^T5Sv6NcB;k6]7*''z74&Q1C]?w76WhmT9>[ԵklkL}_<ƾ}fѸ֮1[bmSk[ߪ:h_s'(*[3Ə֧8фncmb#!f kcs;_ќ7Y]tͩ qEh2]Kt>'u-7/@6 ^~_va緲ڝ@c6+?KƧv=[OX`܍稘'k c]y87uN٘ƆʫIV~u>*vChg}-1f~j+W~ڝ=o1ZvWeie& {vÔfk3 K`8#X|_C!4mfخ,Kٓ)/>[-k~>ϟiKl}~s}w L#TuSċk|ꚬiv=׳XzyvSG}&=aڢ_J>KXS_kc/;4wģ5w4ްaדD־^k63!o0F&YZKU{]Y~@6>Thvݏ :vX^҂^gi]>8*ڢZ7{Pn`X_|=>No}`{;RzۮE^\#Ǟ~ {۴8yԾisNY^lՏ|XoKk8kT93*VuH76/U+?>&~.5x\nG>G1ga;G٦(c:#ЏճqMy4maQ>:мKD,n=1Zӭ64)h6[opH3F߼"f`ד*M3V>뛴*, uN`;>kkwvnXLl]f MS{K53G*hBM).Mz^ڵy8/_ؘۇ]^oW=?K];ܨߎXxv| ~^h;>ci}&]?'& oe'|eipZ& ei_1Ecɨϼ(c &mʧ8^`) ouqVL2Y] q L1[cVWaɨuu:1E`Q VtcE᭮(VWN,'4TuM0鍘a:1dTC:FLcNsMFLcdT`|ʄ2L8-NM0zLq:$n2۔&q ̼8- ouE᭮(VWt=EsZt=EnS ku$UC{LVW yz1SW]_&fcdT꺾\t}8btQ R31لللللfe)b:1&YYH0ɨOpZ& eipOtOtOtOtڬ,EL1$M1})6yL`qqV*KU"q*K9mV,ELzDLiV"(L2au=ˆyz<"iFLz{897[1: eipZ& FLM0ɨO{L<הma8u-$)ˆqV~[1o ǩma48-S61ΓjXݬG1O# pMF5t:+ӰY_sL̼ɨncN˄2L8-N):4+=HFu )WgLLS>1\8bչ 01}q 01M1 +ӰML̼ɨm`b=1Oz7 p=&:+ӰM1]'fɨn78 eipZ&6+SlL0ɨ pMFuqq*U"%V.v߼qbof۴ooߔ͢7]Ȉr=Gَ?t7W޺kvym[vܹ}i>um52λm~cs֞{⭵{۶o߲vRk0surveillance/man/0000755000176200001440000000000014030612531013514 5ustar liggesuserssurveillance/man/salmonella.agona.Rd0000644000176200001440000000126013122471774017232 0ustar liggesusers\name{salmonella.agona} \alias{salmonella.agona} \docType{data} \title{Salmonella Agona cases in the UK 1990-1995} \description{ Reported number of cases of the Salmonella Agona serovar in the UK 1990-1995. Note however that the counts do not correspond exactly to the ones used by Farrington et. al (1996). } \usage{data(salmonella.agona)} \format{ A \code{disProg} object with 312 observations starting from week 1 in 1990. } \source{ A statistical algorithm for the early detection of outbreaks of infectious disease, Farrington, C.P., Andrews, N.J, Beale A.D. and Catchpole, M.A. (1996). , J. R. Statist. Soc. A, 159, 547-563. } \keyword{datasets} surveillance/man/hhh4_formula.Rd0000644000176200001440000000602313122471774016401 0ustar liggesusers\name{hhh4_formula} \alias{fe} \alias{ri} \title{ Specify Formulae in a Random Effects HHH Model } \description{ The special functions \code{fe} and \code{ri} are used to specify unit-specific effects of covariates and random intercept terms, respectively, in the component formulae of \code{\link{hhh4}}. } \usage{ fe(x, unitSpecific = FALSE, which = NULL, initial = NULL) ri(type = c("iid","car"), corr = c("none", "all"), initial.fe = 0, initial.var = -.5, initial.re = NULL) } \arguments{ \item{x}{an expression like \code{sin(2*pi*t/52)} involving the time variable \code{t}, or just \code{1} for an intercept. In general this covariate expression might use any variables contained in the \code{control$data} argument of the parent \code{\link{hhh4}} call.} \item{unitSpecific}{logical indicating if the effect of \code{x} should be unit-specific. This is a convenient shortcut for \code{which = rep(TRUE, nUnits)}, where \code{nUnits} is the number of units (i.e., columns of the \code{"sts"} object).} \item{which}{vector of logicals indicating which unit(s) should get an unit-specific parameter. For units with a \code{FALSE} value, the effect term for \code{x} will be zero in the log-linear predictor. Note especially that setting a \code{FALSE} value for the intercept term of a unit, e.g., \code{ar = list(f = ~-1 + fe(1, which=c(TRUE, FALSE)))} in a bivariate \code{hhh4} model, does \emph{not} mean that the (autoregressive) model component is omitted for this unit, but that \eqn{\log(\lambda_1) = \alpha_1} and \eqn{\log(\lambda_2) = 0}, which is usually not of interest. ATM, omitting an autoregressive effect for a specific unit is not possible.\cr If \code{which=NULL}, the parameter is assumed to be the same for all units.} \item{initial}{initial values (on internal scale!) for the fixed effects used for optimization. The default (\code{NULL}) means to use zeroes.} \item{type}{random intercepts either follow an IID or a CAR model.} \item{corr}{whether random effects in different components (such as \code{ar} and \code{end}) should be correlated or not.} \item{initial.fe}{initial value for the random intercept mean.} \item{initial.var}{initial values (on internal scale!) for the variance components used for optimization.} \item{initial.re}{initial values (on internal scale!) for the random effects used for optimization. The default \code{NULL} are random numbers from a normal distribution with zero mean and variance 0.001.} } \seealso{ \code{\link{addSeason2formula}} \code{hhh4} model specifications in \code{vignette("hhh4")}, \code{vignette("hhh4_spacetime")} or on the help page of \code{\link{hhh4}}. } \note{ These special functions are intended for use in component formulae of \code{hhh4} models and are not exported from the package namespace. If unit-specific fixed or random intercepts are specified, an overall intercept must be excluded (by \code{-1}) in the component formula. } \keyword{regression} surveillance/man/find.kh.Rd0000644000176200001440000000247112375711212015336 0ustar liggesusers\name{find.kh} \alias{find.kh} \title{Determine the k and h values in a standard normal setting} \description{ Given a specification of the average run length in the (a)cceptance and (r)ejected setting determine the k and h values in a standard normal setting. } \usage{ find.kh(ARLa = 500, ARLr = 7, sided = "one", method = "BFGS", verbose=FALSE) } \arguments{ \item{ARLa}{average run length in acceptance setting, aka. in control state. Specifies the number of observations before false alarm.} \item{ARLr}{average run length in rejection state, aka. out of control state. Specifies the number of observations before an increase is detected (i.e. detection delay)} \item{sided}{one-sided cusum scheme} \item{method}{Which method to use in the function \code{\link{optim}}. Standard choice is BFGS, but in some situation Nelder-Mead can be advantageous.} \item{verbose}{gives extra information about the root finding process} } \value{ Returns a list with reference value k and decision interval h. } \details{ Functions from the \pkg{spc} package are used in a simple univariate root finding problem. } \examples{ if (requireNamespace("spc")) { find.kh(ARLa=500,ARLr=7,sided="one") find.kh(ARLa=500,ARLr=3,sided="one") } } \keyword{models} surveillance/man/backprojNP.Rd0000644000176200001440000002574513324114314016052 0ustar liggesusers\encoding{latin1} \name{backprojNP} \alias{backprojNP} %Internal functions %\alias{backprojNP.fit} %\alias{naninf2zero} %\alias{em.step.becker} \title{ Non-parametric back-projection of incidence cases to exposure cases using a known incubation time as in Becker et al (1991) } \description{ The function is an implementation of the non-parametric back-projection of incidence cases to exposure cases described in Becker et al. (1991). The method back-projects exposure times from a univariate time series containing the number of symptom onsets per time unit. Here, the delay between exposure and symptom onset for an individual is seen as a realization of a random variable governed by a known probability mass function. The back-projection function calculates the expected number of exposures \eqn{\lambda_t}{lambda_t} for each time unit under the assumption of a Poisson distribution, but without any parametric assumption on how the \eqn{\lambda_t}{lambda_t} evolve in time. Furthermore, the function contains a bootstrap based procedure, as given in Yip et al (2011), which allows an indication of uncertainty in the estimated \eqn{\lambda_t}{lambda_T}. The procedure is equivalent to the suggestion in Becker and Marschner (1993). However, the present implementation in \code{backprojNP} allows only a univariate time series, i.e. simultaneous age groups as in Becker and Marschner (1993) are not possible. The method in Becker et al. (1991) was originally developed for the back-projection of AIDS incidence, but it is equally useful for analysing the epidemic curve in outbreak situations of a disease with long incubation time, e.g. in order to qualitatively investigate the effect of intervention measures. } \usage{ backprojNP(sts, incu.pmf, control = list(k = 2, eps = rep(0.005,2), iter.max=rep(250,2), Tmark = nrow(sts), B = -1, alpha = 0.05, verbose = FALSE, lambda0 = NULL, eq3a.method = c("R","C"), hookFun = function(stsbp) {}), \dots) } \arguments{ \item{sts}{ an object of class \code{"\linkS4class{sts}"} (or one that can be coerced to that class): contains the observed number of symptom onsets as a time series. } \item{incu.pmf}{Probability mass function (PMF) of the incubation time. The PMF is specified as a vector or matrix with the value of the PMF evaluated at \eqn{0,...,d_max}{0,...,d_max}, i.e. note that the support includes zero. The value of \eqn{d_max}{d_max} is automatically calculated as \code{length(incu.pmf)-1} or \code{nrow(incu.pmf)-1}. Note that if the sts object has more than one column, then for the backprojection the incubation time is either recycled for all components or, if it is a matrix with the same number of columns as the sts object, the \eqn{k}{k}'th column of \code{incu.pmf} is used for the backprojection of the \eqn{k}{k}'th series. } \item{control}{A list with named arguments controlling the functionality of the non-parametric back-projection. \describe{ \item{\code{k}}{An integer representing the smoothing parameter to use in the smoothing step of the EMS algorithm. Needs to be an even number. } \item{\code{eps}}{A vector of length two representing the convergence threshold \eqn{\epsilon}{epsilon} of the EMS algorithm, see Details for further information. The first value is the threshold to use in the \eqn{k=0}{k=0} loop, which forms the values for the parametric bootstrap. The second value is the threshold to use in the actual fit and bootstrap fitting using the specified \code{k}. If \code{k} is only of length one, then this number is replicated twice. } \item{\code{Tmark}}{Numeric with \eqn{T'\leq T}. Upper time limit on which to base convergence, i.e. only the values \eqn{\lambda_1,\ldots,\lambda_{T'}} are monitored for convergence. See details. } \item{\code{iter.max}}{ The maximum number of EM iterations to do before stopping. } \item{\code{B}}{ Number of parametric bootstrap samples to perform from an initial k=0 fit. For each sample a back projection is performed. See Becker and Marschner (1993) for details. } \item{\code{alpha}}{(1-\eqn{\alpha}{alpha})*100\% confidence intervals are computed based on the percentile method. } \item{\code{verbose}}{(boolean). If true show extra progress and debug information. } \item{\code{lambda0}}{Start values for lambda. Vector needs to be of the length \code{nrow(sts)}. } \item{\code{eq3a.method}}{A single character being either \code{"R"} or \code{"C"} depending on whether the three nested loops of equation 3a in Becker et al. (1991) are to be executed as safe R code (can be extremely slow, however the implementation is not optimized for speed) or a C code (can be more than 200 times faster!). However, the C implementation is experimental and can hang R if, e.g., the time series does not go far enough back. } \item{\code{hookFun}}{ Hook function called for each iteration of the EM algorithm. The function should take a single argument \code{stsbp} of class \code{"\linkS4class{stsBP}"} class. It will be have the lambda set to the current value of lambda. If no action desired just leave the function body empty (default). Additional arguments are possible. } } } \item{\dots}{Additional arguments are sent to the hook function. } } \details{ Becker et al. (1991) specify a non-parametric back-projection algorithm based on the Expectation-Maximization-Smoothing (EMS) algorithm. In the present implementation the algorithm iterates until \deqn{\frac{||\lambda^{(k+1)} - \lambda^{(k)}||}{||\lambda^{(k)}||} < \epsilon} This is a slight adaptation of the proposals in Becker et al. (1991). If \eqn{T} is the length of \eqn{\lambda} then one can avoid instability of the algorithm near the end by considering only the \eqn{\lambda}{lambda}'s with index \eqn{1,\ldots,T'}. See the references for further information. } \value{ \code{backprojNP} returns an object of \code{"\linkS4class{stsBP}"}. } \references{ Becker NG, Watson LF and Carlin JB (1991), A method for non-parametric back-projection and its application to AIDS data, Statistics in Medicine, 10:1527-1542. Becker NG and Marschner IC (1993), A method for estimating the age-specific relative risk of HIV infection from AIDS incidence data, Biometrika, 80(1):165-178. Yip PSF, Lam KF, Xu Y, Chau PH, Xu J, Chang W, Peng Y, Liu Z, Xie X and Lau HY (2011), Reconstruction of the Infection Curve for SARS Epidemic in Beijing, China Using a Back-Projection Method, Communications in Statistics - Simulation and Computation, 37(2):425-433. Associations of Age and Sex on Clinical Outcome and Incubation Period of Shiga toxin-producing Escherichia coli O104:H4 Infections, 2011 (2013), Werber D, King LA, \enc{Mller}{Mueller} L, Follin P, Buchholz U, Bernard H, Rosner BM, Ethelberg S, de Valk H, \enc{Hhle}{Hoehle} M, American Journal of Epidemiology, 178(6):984-992. } \author{ Michael \enc{Hhle}{Hoehle} with help by Daniel \enc{Sabans Bov}{Sabanes Bove} for the \pkg{Rcpp} interface } \note{ The method is still experimental. A proper plot routine for \code{stsBP} objects is currently missing. } \examples{ #Generate an artificial outbreak of size n starting at time t0 and being of length n <- 1e3 ; t0 <- 23 ; l <- 10 #PMF of the incubation time is an interval censored gamma distribution #with mean 15 truncated at 25. dmax <- 25 inc.pmf <- c(0,(pgamma(1:dmax,15,1.4) - pgamma(0:(dmax-1),15,1.4))/pgamma(dmax,15,1.4)) #Function to sample from the incubation time rincu <- function(n) { sample(0:dmax, size=n, replace=TRUE, prob=inc.pmf) } #Sample time of exposure and length of incubation time set.seed(123) exposureTimes <- t0 + sample(x=0:(l-1),size=n,replace=TRUE) symptomTimes <- exposureTimes + rincu(n) #Time series of exposure (truth) and symptom onset (observed) X <- table( factor(exposureTimes,levels=1:(max(symptomTimes)+dmax))) Y <- table( factor(symptomTimes,levels=1:(max(symptomTimes)+dmax))) #Convert Y to an sts object Ysts <- sts(Y) #Plot the outbreak plot(Ysts, xaxis.labelFormat=NULL, legend=NULL) #Add true number of exposures to the plot lines(1:length(Y)+0.2,X,col="red",type="h",lty=2) #Helper function to show the EM step plotIt <- function(cur.sts) { plot(cur.sts,xaxis.labelFormat=NULL, legend=NULL,ylim=c(0,140)) } #Call non-parametric back-projection function with hook function but #without bootstrapped confidence intervals bpnp.control <- list(k=0,eps=rep(0.005,2),iter.max=rep(250,2),B=-1,hookFun=plotIt,verbose=TRUE) #Fast C version (use argument: eq3a.method="C")! sts.bp <- backprojNP(Ysts, incu.pmf=inc.pmf, control=modifyList(bpnp.control,list(eq3a.method="C")), ylim=c(0,max(X,Y))) #Show result plot(sts.bp,xaxis.labelFormat=NULL,legend=NULL,lwd=c(1,1,2),lty=c(1,1,1),main="") lines(1:length(Y)+0.2,X,col="red",type="h",lty=2) #Do the convolution for the expectation mu <- matrix(0,ncol=ncol(sts.bp),nrow=nrow(sts.bp)) #Loop over all series for (j in 1:ncol(sts.bp)) { #Loop over all time points for (t in 1:nrow(sts.bp)) { #Convolution, note support of inc.pmf starts at zero (move idx by 1) i <- seq_len(t) mu[t,j] <- sum(inc.pmf[t-i+1] * upperbound(sts.bp)[i,j],na.rm=TRUE) } } #Show the fit lines(1:nrow(sts.bp)-0.5,mu[,1],col="green",type="s",lwd=3) #Non-parametric back-projection including boostrap CIs. B=10 is only #used for illustration in the documentation example #In practice use a realistic value of B=1000 or more. bpnp.control2 <- modifyList(bpnp.control, list(hookFun=NULL,k=2,B=10,eq3a.method="C")) \dontrun{ bpnp.control2 <- modifyList(bpnp.control, list(hookFun=NULL,k=2,B=1000,eq3a.method="C")) } sts.bp2 <- backprojNP(Ysts, incu.pmf=inc.pmf, control=bpnp.control2) ###################################################################### # Plot the result. This is currently a manual routine. # ToDo: Need to specify a plot method for stsBP objects which also # shows the CI. # # Parameters: # stsBP - object of class stsBP which is to be plotted. ###################################################################### plot.stsBP <- function(stsBP) { maxy <- max(observed(stsBP),upperbound(stsBP),stsBP@ci,na.rm=TRUE) plot(upperbound(stsBP),type="n",ylim=c(0,maxy), ylab="Cases",xlab="time") if (!all(is.na(stsBP@ci))) { polygon( c(1:nrow(stsBP),rev(1:nrow(stsBP))), c(stsBP@ci[2,,1],rev(stsBP@ci[1,,1])),col="lightgray") } lines(upperbound(stsBP),type="l",lwd=2) legend(x="topright",c(expression(lambda[t])),lty=c(1),col=c(1),fill=c(NA),border=c(NA),lwd=c(2)) invisible() } #Plot the result of k=0 and add truth for comparison. No CIs available plot.stsBP(sts.bp) lines(1:length(Y),X,col=2,type="h") #Same for k=2 plot.stsBP(sts.bp2) lines(1:length(Y),X,col=2,type="h") } \keyword{models} \keyword{optimize} surveillance/man/isoWeekYear.Rd0000644000176200001440000000161713430572512016246 0ustar liggesusers\name{isoWeekYear} \alias{isoWeekYear} \title{Find ISO Week and Year of Date Objects} \description{ The function \code{isoWeekYear} extracts the year and week of a \code{\link{Date}} according to the ISO 8601 specification. It simply calls \code{\link{strftime}} with format strings \code{"\%G"} and \code{"\%V"}, respectively. } \usage{ isoWeekYear(Y, M, D) } \arguments{ \item{Y}{year(s) or a Date/POSIXt object. Can be a vector.} \item{M}{month(s), only used if \code{Y} is not a Date/POSIXt object.} \item{D}{day(s), only used if \code{Y} is not a Date/POSIXt object.} } \value{ A list with entries \code{ISOYear} and \code{ISOWeek} containing the corresponding results. } \examples{ dates <- as.Date(c("2002-12-31","2003-01-01","2003-01-06")) isoWeekYear(dates) ## the same using numeric inputs: isoWeekYear(Y = c(2002, 2003, 2003), M = c(12, 1, 1), D = c(31, 1, 6)) } \keyword{chron} surveillance/man/momo.Rd0000644000176200001440000000321414004512307014753 0ustar liggesusers\name{momo} \alias{momo} \docType{data} \encoding{latin1} \title{Danish 1994-2008 all cause mortality data for six age groups} \description{ Weekly number of all cause mortality from 1994-2008 in each of the six age groups <1, 1-4, 5-14, 15-44, 45-64, 65-74, 75-84 and 85 years. } \usage{data(momo)} \details{ The object of class \code{"\linkS4class{sts}"} contains the number of all cause mortality from 1994-2008 in Denmark for each of the six age groups <1, 1-4, 5-14, 15-44, 45-64, 65-74, 75-84 and 85 years. A special feature of such EuroMOMO data is that weeks are handled as defined by the ISO 8601 standard, which can be handled by the \code{"sts"} class. The \code{population} slot of the \code{momo} object contains the population size in each of the six age groups. These are yearly data obtained from the StatBank Denmark. The aim of the EuroMOMO project is to develop and strengthen real-time monitoring of mortality across Europe; this will enhance the management of serious public health risks such as pandemic influenza, heat waves and cold snaps. For further details see the homepage of the EuroMOMO project at \url{https://www.euromomo.eu/}. } \source{ Department of Epidemiology, Statens Serum Institute, Copenhagen, Denmark StatBank Denmark, Statistics Denmark, \url{https://www.statistikbanken.dk/} } \examples{ data("momo") plot(momo) } \references{ H\enc{}{oe}hle, M. and A. Mazick, A. (2009) Aberration detection in R illustrated by Danish mortality monitoring, Book chapter to appear in T. Kass-Hout and X. Zhang (Eds.) Biosurveillance: A Health Protection Priority, CRC Press. } \keyword{datasets} surveillance/man/hhh4_methods.Rd0000644000176200001440000001447313507340445016404 0ustar liggesusers\encoding{latin1} \name{hhh4_methods} \alias{print.hhh4} \alias{summary.hhh4} \alias{nobs.hhh4} \alias{formula.hhh4} \alias{logLik.hhh4} \alias{coef.hhh4} \alias{vcov.hhh4} \alias{fixef.hhh4} \alias{ranef.hhh4} \alias{coeflist.hhh4} \alias{confint.hhh4} \alias{residuals.hhh4} %% internal methods without need for documentation %\alias{print.summary.hhh4} %\alias{terms.hhh4} \title{ Print, Summary and other Standard Methods for \code{"hhh4"} Objects } \description{ Besides \code{print} and \code{summary} methods there are also some standard extraction methods defined for objects of class \code{"hhh4"} resulting from a call to \code{\link{hhh4}}. The implementation is illustrated in Meyer et al. (2017, Section 5), see \code{vignette("hhh4_spacetime")}. } \usage{ \method{print}{hhh4}(x, digits = max(3, getOption("digits") - 3), ...) \method{summary}{hhh4}(object, maxEV = FALSE, ...) \method{coef}{hhh4}(object, se = FALSE, reparamPsi = TRUE, idx2Exp = NULL, amplitudeShift = FALSE, ...) \method{fixef}{hhh4}(object, ...) \method{ranef}{hhh4}(object, tomatrix = FALSE, intercept = FALSE, ...) \method{coeflist}{hhh4}(x, ...) \method{formula}{hhh4}(x, ...) \method{nobs}{hhh4}(object, ...) \method{logLik}{hhh4}(object, ...) \method{vcov}{hhh4}(object, reparamPsi = TRUE, idx2Exp = NULL, amplitudeShift = FALSE, ...) \method{confint}{hhh4}(object, parm, level = 0.95, reparamPsi = TRUE, idx2Exp = NULL, amplitudeShift = FALSE, ...) \method{residuals}{hhh4}(object, type = c("deviance", "response"), ...) } \arguments{ \item{x, object}{an object of class \code{"hhh4"}.} \item{digits}{the number of significant digits to use when printing } \item{maxEV}{logical indicating if the summary should contain the (range of the) dominant eigenvalue as a measure of the importance of the epidemic components. By default, the value is not calculated as this may take some seconds depending on the number of time points and units in \code{object$stsObj}.} \item{\dots}{ For the \code{print}, \code{summary}, \code{fixef}, \code{ranef}, and \code{coeflist} methods: arguments passed to \code{coef}.\cr For the remaining methods: unused (argument of the generic). } \item{reparamPsi}{ logical. If \code{TRUE} (default), the overdispersion parameter from the negative binomial distribution is transformed from internal scale (-log) to standard scale, where zero corresponds to a Poisson distribution. } \item{se}{logical switch indicating if standard errors are required} \item{idx2Exp}{integer vector selecting the parameters which should be returned on exp-scale. Alternatively, \code{idx2Exp = TRUE} will exp-transform all parameters except for those associated with \code{log()} covariates or already affected by \code{reparamPsi} or \code{amplitudeShift}.} \item{amplitudeShift}{logical switch indicating whether the parameters for sine/cosine terms modelling seasonal patterns (see \code{\link{addSeason2formula}}) should be transformed to an amplitude/shift formulation.} \item{tomatrix}{logical. If \code{FALSE} (default), the vector of all random effects is returned (as used internally). However, for random intercepts of \code{type="car"}, the number of parameters is one less than the number of regions and the individual parameters are not obviously linked to specific regions. Setting \code{tomatrix} to \code{TRUE} returns a more useful representation of random effects in a matrix with as many rows as there are regions and as many columns as there are random effects. Here, any CAR-effects are transformed to region-specific effects.} \item{intercept}{logical. If \code{FALSE} (default), the returned random effects represent zero-mean deviations around the corresponding global intercepts of the \emph{log}-linear predictors. Setting \code{intercept=TRUE} adds these global intercepts to the result (and implies \code{tomatrix=TRUE}).} \item{parm}{a vector of numbers or names, specifying which parameters are to be given confidence intervals. If missing, all parameters are considered.} \item{level}{the confidence level required.} \item{type}{the type of residuals which should be returned. The alternatives are \code{"deviance"} (default) and \code{"response"}.} } \value{ The \code{\link{coef}}-method returns all estimated (regression) parameters from a \code{\link{hhh4}} model. If the model includes random effects, those can be extracted with \code{ranef}, whereas \code{fixef} returns the fixed parameters. The \code{coeflist}-method extracts the model coefficients in a list (by parameter group). The \code{\link{formula}}-method returns the formulae used for the three log-linear predictors in a list with elements \code{"ar"}, \code{"ne"}, and \code{"end"}. The \code{\link{nobs}}-method returns the number of observations used for model fitting. The \code{\link{logLik}}-method returns an object of class \code{"logLik"} with \code{"df"} and \code{"nobs"} attributes. For a random effects model, the value of the \emph{penalized} log-likelihood at the MLE is returned, but degrees of freedom are not available (\code{NA_real_}). As a consequence, \code{\link{AIC}} and \code{\link{BIC}} are only well defined for models without random effects; otherwise these functions return \code{NA_real_}. The \code{\link{vcov}}-method returns the estimated variance-covariance matrix of the \emph{regression} parameters. The estimated variance-covariance matrix of random effects is available as \code{object$Sigma}. The \code{\link{confint}}-method returns Wald-type confidence intervals (assuming asymptotic normality). The \code{\link{residuals}}-method extracts raw (\code{"response"}) or scaled (\code{"deviance"}) residuals from the model fit similar to \code{\link{residuals.glm}} for Poisson or NegBin GLM's. } \seealso{ the \code{\link[=plot.hhh4]{plot}} and \code{\link[=update.hhh4]{update}} methods for fitted \code{"hhh4"} models. } \author{ Michaela Paul and Sebastian Meyer } \references{ Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} } \keyword{methods} \keyword{print} surveillance/man/stsplot.Rd0000644000176200001440000000702113507411303015515 0ustar liggesusers\name{stsplot} \docType{methods} \alias{plot.sts} \alias{plot,sts,missing-method} \alias{plot,stsNC,missing-method} \alias{stsplot} % for convenience \title{Plot-Methods for Surveillance Time-Series Objects} \description{ This page gives an overview of plot types for objects of class \code{"sts"}. } \usage{ \S4method{plot}{sts,missing}(x, type = observed ~ time | unit, \dots) } \arguments{ \item{x}{an object of class \code{"\linkS4class{sts}"}.} \item{type}{see Details.} \item{\dots}{arguments passed to the \code{type}-specific plot function.} } \details{ There are various types of plots which can be produced from an \code{"sts"} object. The \code{type} argument specifies the desired plot as a formula, which defaults to \code{observed ~ time | unit}, i.e., plot the time series of each unit separately. Arguments to specific plot functions can be passed as further arguments (\dots). The following list describes the plot variants: \describe{ \item{\code{observed ~ time | unit}}{The default type shows \code{ncol(x)} plots, each containing the time series of one observational unit. The actual plotting per unit is done by the function \code{\link{stsplot_time1}}, called sequentially from \code{\link{stsplot_time}}.\cr A \CRANpkg{ggplot2}-based alternative for this type of plot is provided through an \code{\link[=autoplot.sts]{autoplot}}-method for \code{"sts"} objects. } \item{\code{observed ~ time}}{The observations in \code{x} are first \code{\link[=aggregate.sts]{aggregated}} over units and the resulting univariate time-series is plotted via the function \code{\link{stsplot_time}}.} \item{\code{alarm ~ time}}{Generates a so called alarmplot for a multivariate \code{sts} object. For each time point and each series it is shown whether there is an alarm. In case of hierarchical surveillance the user can pass an additional argument \code{lvl}, which is a vector of the same length as rows in \code{x} specifying for each time series its level. } \item{\code{observed ~ unit}}{ produces a map of counts (or incidence) per region aggregated over time. See \code{\link{stsplot_space}} for optional arguments, details and examples. } \item{\code{observed ~ 1 | unit}}{old version of the map plot, which supports shading regions with an alarm. The plotting is done by the function \code{\link{stsplot_spacetime}}. Use \code{type=observed~unit} for the new implementation as function \code{\link{stsplot_space}} (without alarm support, though). } \item{\code{observed ~ 1 | unit * time}}{old version for animated maps via the \code{\link{stsplot_spacetime}} function. Each of the \code{nrow(x)} frames contains the number of counts per region for the current row in the \code{observed} matrix. It is possible to redirect the output into files, e.g. to generate an animated GIF. NOTE: the new \code{\link{animate.sts}} method supersedes this plot \code{type}! } } } \value{ \code{NULL} (invisibly). The methods are called for their side-effects. } \seealso{ the documentation of the individual plot types \code{\link{stsplot_time}}, \code{\link{stsplot_space}}, \code{\link{stsplot_spacetime}} (obsolete), as well as the \code{animate}-method \code{\link{animate.sts}}. \code{\link{plot.survRes}} is the old implementation. } \keyword{ts} \keyword{spatial} \keyword{hplot} \keyword{methods} surveillance/man/isScalar.Rd0000644000176200001440000000101512143464746015561 0ustar liggesusers\name{isScalar} \alias{isScalar} \title{ Checks if the Argument is Scalar } \description{ The simple helper function \code{isScalar} just checks if its argument is a scalar, i.e. a numeric vector of length 1. It is implemented as \code{length(x) == 1L && is.vector(x, mode = "numeric")}. } \usage{ isScalar(x) } \arguments{ \item{x}{an \code{R} object.} } \value{ A length-one logical vector. } %% \examples{ %% isScalar(TRUE) # FALSE %% isScalar(1:10) # FALSE %% isScalar(pi) # TRUE %% } \keyword{internal} surveillance/man/poly2adjmat.Rd0000644000176200001440000000334713174104255016247 0ustar liggesusers\name{poly2adjmat} \alias{poly2adjmat} \title{ Derive Adjacency Structure of \code{"SpatialPolygons"} } \description{ Wrapping around functionality of the \pkg{spdep} package, this function computes the symmetric, binary (0/1), adjacency matrix from a \code{"\linkS4class{SpatialPolygons}"} object. It essentially applies \code{\link[spdep]{nb2mat}(\link[spdep]{poly2nb}(SpP, ...), style="B", zero.policy=zero.policy)}. } \usage{ poly2adjmat(SpP, ..., zero.policy = TRUE) } \arguments{ \item{SpP}{an object inheriting from \code{"\linkS4class{SpatialPolygons}"}.} \item{\dots}{arguments passed to \code{\link[spdep]{poly2nb}}. Its \code{snap} argument might be particularly useful to handle maps with sliver polygons.} \item{zero.policy}{logical indicating if islands are allowed, see \code{\link[spdep]{nb2mat}}.} } \value{ a symmetric numeric indicator matrix of size \code{length(SpP)}^2 representing polygon adjacencies. } \author{ (of this wrapper) Sebastian Meyer } \seealso{ \code{\link[spdep]{poly2nb}} in package \pkg{spdep} } \examples{ if (requireNamespace("spdep")) { ## generate adjacency matrix for districts of Bayern and Baden-Wuerttemberg data("fluBYBW") adjmat <- poly2adjmat(fluBYBW@map) ## same as already stored in the neighbourhood slot (in different order) stopifnot(all.equal(adjmat, neighbourhood(fluBYBW)[rownames(adjmat),colnames(adjmat)])) ## a visual check of the district-specific number of neighbours plot(fluBYBW@map) text(coordinates(fluBYBW@map), labels=rowSums(adjmat==1), font=2, col=2) ## the neighbourhood graph can be plotted with spdep plot(spdep::mat2listw(adjmat), coordinates(fluBYBW@map)) } } \keyword{spatial} \keyword{graphs} surveillance/man/formatDate.Rd0000644000176200001440000000162313430615173016103 0ustar liggesusers\name{formatDate} \alias{formatDate} \title{ Convert Dates to Character (Including Quarter Strings) } \description{ An extension of \code{\link{format.Date}} with additional formatting strings for quarters. Used by \code{\link{linelist2sts}}. } \usage{ formatDate(x, format) } \arguments{ \item{x}{a \code{"\link{Date}"} object.} \item{format}{ a character string, see \code{\link{strftime}} for possible specifications. Further to these base formats, \code{formatDate} implements: \describe{ \item{\code{"\%Q"}}{the quarter as a numeric} \item{\code{"\%OQ"}}{the quarter as a roman numeral} \item{\code{"\%q"}}{the day within the quarter} } } } \value{ a character vector representing the input date(s) \code{x} following the \code{format} specification. } \seealso{ \code{\link{strftime}} } \examples{ formatDate(Sys.Date(), "\%G/\%OQ/\%q") } \keyword{chron} surveillance/man/untie.Rd0000644000176200001440000001075613266056545015161 0ustar liggesusers\name{untie} \alias{untie} \alias{untie.epidataCS} \alias{untie.matrix} \alias{untie.default} \title{ Randomly Break Ties in Data } \description{ This is a generic function intended to randomly break tied data in a way similar to what \code{\link{jitter}} does: tie-breaking is performed by shifting \emph{all} data points by a random amount. The \pkg{surveillance} package defines methods for matrices, \code{"epidataCS"}, and a default method for numeric vectors. } \usage{ untie(x, amount, ...) \method{untie}{epidataCS}(x, amount = list(t=NULL, s=NULL), minsep = list(t=0, s=0), direction = "left", keep.sources = FALSE, ..., verbose = FALSE) \method{untie}{matrix}(x, amount = NULL, minsep = 0, constraint = NULL, giveup = 1000, ...) \method{untie}{default}(x, amount = NULL, minsep = 0, direction = c("symmetric", "left", "right"), sort = NULL, giveup = 1000, ...) } \arguments{ \item{x}{ the data to be untied. } \item{amount}{ upper bound for the random amount by which data are shifted. \code{NULL} means to use a data-driven default, which equals the minimum separation of the data points for the non-symmetric default method and its half for the symmetric default method and the \code{matrix} method. } \item{minsep}{minimum separation of jittered points. Can only be obeyed if much smaller than \code{amount} (also depending on the number of points). \code{minsep>0} is currently only implemented for the spatial (matrix) method.} \item{keep.sources}{ logical (\code{FALSE}). If \code{TRUE}, the original list of possible event sources in \code{x$events$.sources} will be preserved. For instance, events observed at the same time did by definition not trigger each other; however, after random tie-breaking one event will precede the other and considered as a potential source of infection for the latter, although it could just as well be the other way round. Enabling \code{keep.sources} will use the \code{.sources} list from the original (tied) \code{"epidataCS"} object. Note, however, that an update is forced within \code{twinstim} if a subset of the data is selected for model fitting or if a different \code{qmatrix} is supplied. } \item{constraint}{ an object of class \code{"\linkS4class{SpatialPolygons}"} representing the domain which the points of the matrix should belong to -- before and after jittering. } \item{giveup}{number of attempts after which the algorithm should stop trying to generate new points.} \item{direction}{ one of \code{"symmetric"} (default), \code{"left"}, or \code{"right"}, indicating in which direction vector elements should be shifted. } \item{sort}{ logical indicating if the jittered vector should be sorted. Defaults to doing so if the original vector was already sorted. } \item{\dots}{ For the \code{"epidataCS"}-method: arguments passed to the \code{matrix}- or \code{default}-method (\code{giveup}). Unused in other methods. } \item{verbose}{logical passed to \code{\link{as.epidataCS}}.} } \details{ For numeric vectors (default method), the jittered version is the same as for \code{\link{jitter}(x, amount=amount)}, if \code{direction="symmetric"} (and \code{amount} is non-\code{NULL}), and otherwise uses \code{x} \dQuote{+-} \code{runif(length(x), 0, amount)}. For matrices, a vector uniformly drawn from the disc with radius \code{amount} is added to each point (row). For \code{"epidataCS"}, \code{amount} is a list stating the amounts for the temporal and/or spatial dimension, respectively. It then uses the specific methods with arguments \code{constraint=x$W}, \code{direction}, and \code{sort=TRUE}. Note that this implements a simplistic approach of tie-breaking where all events are assumed to be subject to the same amounts of censoring, and the default amounts may not be sensible choices. } \value{ the untied (jittered) data. } \author{ Sebastian Meyer } \seealso{ \code{\link{jitter}} } \examples{ # vector example set.seed(123) untie(c(rep(1,3), rep(1.2, 4), rep(3,3)), direction="left", sort=FALSE) # spatial example data(imdepi) coords <- coordinates(imdepi$events) table(duplicated(coords)) plot(coords, cex=sqrt(multiplicity(coords))) set.seed(1) coords_untied <- untie(coords) stopifnot(!anyDuplicated(coords_untied)) points(coords_untied, col=2) # shifted by very small amount in this case } \keyword{utilities} \keyword{manip} \keyword{dplot} surveillance/man/algo.rki.Rd0000644000176200001440000001000113165505075015515 0ustar liggesusers\name{algo.rki} \alias{algo.rkiLatestTimepoint} \alias{algo.rki} \alias{algo.rki1} \alias{algo.rki2} \alias{algo.rki3} \encoding{latin1} \title{The system used at the RKI} \description{ Evaluation of timepoints with the detection algorithms used by the RKI } \usage{ algo.rkiLatestTimepoint(disProgObj, timePoint = NULL, control = list(b = 2, w = 4, actY = FALSE)) algo.rki(disProgObj, control = list(range = range, b = 2, w = 4, actY = FALSE)) algo.rki1(disProgObj, control = list(range = range)) algo.rki2(disProgObj, control = list(range = range)) algo.rki3(disProgObj, control = list(range = range)) } \arguments{ \item{disProgObj}{object of class disProg (including the observed and the state chain).} \item{timePoint}{time point which should be evaluated in \code{algo.rkiLatestTimepoint}. The default is to use the latest timepoint.} \item{control}{control object: \code{range} determines the desired timepoints which should be evaluated, \code{b} describes the number of years to go back for the reference values, \code{w} is the half window width for the reference values around the appropriate timepoint and \code{actY} is a boolean to decide if the year of \code{timePoint} also spend \code{w} reference values of the past. As default \code{b}, \code{w}, \code{actY} are set for the RKI 3 system. } } \value{ \code{algo.rkiLatestTimepoint} returns a list of class \code{survRes} (surveillance result), which includes the alarm value (alarm = 1, no alarm = 0) for recognizing an outbreak, the threshold value for recognizing the alarm and the input object of class disProg. \code{algo.rki} gives a list of class \code{survRes} which includes the vector of alarm values for every timepoint in \code{range}, the vector of threshold values for every timepoint in \code{range} for the system specified by \code{b}, \code{w} and \code{actY}, the range and the input object of class disProg. \code{algo.rki1} returns the same for the RKI 1 system, \code{algo.rki2} for the RKI 2 system and \code{algo.rki3} for the RKI 3 system. } \details{ Using the reference values for calculating an upper limit (threshold), alarm is given if the actual value is bigger than a computed threshold. \code{algo.rki} calls \code{algo.rkiLatestTimepoint} for the values specified in \code{range} and for the system specified in \code{control}. \code{algo.rki1} calls \code{algo.rkiLatestTimepoint} for the values specified in \code{range} for the RKI 1 system. \code{algo.rki2} calls \code{algo.rkiLatestTimepoint} for the values specified in \code{range} for the RKI 2 system. \code{algo.rki3} calls \code{algo.rkiLatestTimepoint} for the values specified in \code{range} for the RKI 3 system. \itemize{ \item \code{"RKI 1"} reference values from 6 weeks ago \item \code{"RKI 2"} reference values from 6 weeks ago and 13 weeks of the year ago (symmetrical around the comparable week). \item \code{"RKI 3"} 18 reference values. 9 from the year ago and 9 from two years ago (also symmetrical around the comparable week). } } \seealso{ \code{\link{algo.bayesLatestTimepoint}} and \code{\link{algo.bayes}} for the Bayes system. } \author{M. \enc{Hhle}{Hoehle}, A. Riebler, Christian Lang} \examples{ # Create a test object disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 208, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) # Test week 200 to 208 for outbreaks with a selfdefined rki algo.rki(disProgObj, control = list(range = 200:208, b = 1, w = 5, actY = TRUE)) # The same for rki 1 to rki 3 algo.rki1(disProgObj, control = list(range = 200:208)) algo.rki2(disProgObj, control = list(range = 200:208)) algo.rki3(disProgObj, control = list(range = 200:208)) # Test for rki 1 the latest timepoint algo.rkiLatestTimepoint(disProgObj) } \keyword{classif} surveillance/man/knox.Rd0000644000176200001440000001545014004512307014770 0ustar liggesusers\encoding{latin1} \name{knox} \alias{knox} \alias{plot.knox} \alias{toLatex.knox} \title{ Knox Test for Space-Time Interaction } \description{ Given temporal and spatial distances as well as corresponding critical thresholds defining what \dQuote{close} means, the function \code{knox} performs Knox (1963, 1964) test for space-time interaction. The corresponding p-value can be calculated either by the Poisson approximation or by a Monte Carlo permutation approach (Mantel, 1967) with support for parallel computation via \code{\link{plapply}}. There is a simple \code{plot}-method showing a \code{\link{truehist}} of the simulated null distribution together with the expected and observed values. This implementation of the Knox test is due to Meyer et al. (2016). } \usage{ knox(dt, ds, eps.t, eps.s, simulate.p.value = TRUE, B = 999, ...) \method{plot}{knox}(x, ...) } \arguments{ \item{dt,ds}{ numeric vectors containing temporal and spatial distances, respectively. Logical vectors indicating temporal/spatial closeness may also be supplied, in which case \code{eps.t}/\code{eps.s} is ignored. To test for space-time interaction in a single point pattern of \eqn{n} events, these vectors should be of length \eqn{n*(n-1)/2} and contain the pairwise event distances (e.g., the lower triangle of the distance matrix, such as in \code{"\link{dist}"} objects). Note that there is no special handling of matrix input, i.e., if \code{dt} or \code{ds} are matrices, all elements are used (but a warning is given if a symmetric matrix is detected). } \item{eps.t,eps.s}{ Critical distances defining closeness in time and space, respectively. Distances lower than or equal to the critical distance are considered \dQuote{"close"}. } \item{simulate.p.value}{ logical indicating if a Monte Carlo permutation test should be performed (as per default). Do not forget to set the \code{\link{.Random.seed}} via an extra \code{.seed} argument if reproducibility is required (see the \dots arguments below). If \code{simulate.p.value = FALSE}, the Poisson approximation is used (but see the note below). } \item{B}{ number of permutations for the Monte Carlo approach. } \item{\dots}{ arguments configuring \code{\link{plapply}}: \code{.parallel}, \code{.seed}, and \code{.verbose}. By default, no parallelization is performed (\code{.parallel = 1}), and a progress bar is shown (\code{.verbose = TRUE}).\cr For the \code{plot}-method, further arguments passed to \code{\link{truehist}}. } \item{x}{ an object of class \code{"knox"} as returned by the \code{knox} test. } } \note{ The Poisson approximation works well if the proportions of close pairs in both time and space are small (Kulldorff and Hjalmars, 1999), otherwise the Monte Carlo permutation approach is recommended. } \value{ an object of class \code{"knox"} (inheriting from \code{"htest"}), which is a list with the following components: \item{method}{a character string indicating the type of test performed, and whether the Poisson approximation or Monte Carlo simulation was used.} \item{data.name}{a character string giving the supplied \code{dt} and \code{ds} arguments.} \item{statistic}{the number of close pairs.} \item{parameter}{if \code{simulate.p.value = TRUE}, the number \code{B} of permutations, otherwise the \code{lambda} parameter of the Poisson distribution, i.e., the same as \code{null.value}.} \item{p.value}{the p-value for the test. In case \code{simulate.p.value = TRUE}, the p-value from the Poisson approximation is still attached as an attribute \code{"Poisson"}.} \item{alternative}{the character string \code{"greater"} (this is a one-sided test).} \item{null.value}{the expected number of close pairs in the absence of space-time interaction.} \item{table}{the contingency table of \code{dt <= eps.t} and \code{ds <= eps.s}.} The \code{plot}-method invisibly returns \code{NULL}. A \code{toLatex}-method exists, which generates LaTeX code for the contingency table associated with the Knox test. } \author{ Sebastian Meyer } \seealso{ The function \code{mantel.randtest} in package \pkg{ade4} implements Mantel's (1967) space-time interaction test, i.e., using the Pearson correlation between the spatial and temporal distances of all event pairs as the test statistic, and assessing statistical significance using a Monte Carlo permutation approach as with \code{simulate.p.value} here in the \code{knox} function. To combine information from different scales \code{eps.t} and \code{eps.s} while also handling edge effects, the space-time K-function test available via \code{\link{stKtest}} can be used. Function \code{\link{epitest}} tests epidemicity in a \code{"\link{twinstim}"} point process model. } \references{ Knox, G. (1963): Detection of low intensity epidemicity: application to cleft lip and palate. \emph{British Journal of Preventive & Social Medicine}, \bold{17}, 121-127. Knox, E. G. (1964): The detection of space-time interactions. \emph{Journal of the Royal Statistical Society. Series C (Applied Statistics)}, \bold{13}, 25-30. Kulldorff, M. and Hjalmars, U. (1999): The Knox method and other tests for space-time interaction. \emph{Biometrics}, \bold{55}, 544-552. Mantel, N. (1967): The detection of disease clustering and a generalized regression approach. \emph{Cancer Research}, \bold{27}, 209-220. Meyer, S., Warnke, I., R\enc{}{oe}ssler, W. and Held, L. (2016): Model-based testing for space-time interaction using point processes: An application to psychiatric hospital admissions in an urban area. \emph{Spatial and Spatio-temporal Epidemiology}, \bold{17}, 15-25. \doi{10.1016/j.sste.2016.03.002}. Eprint: \url{https://arxiv.org/abs/1512.09052}. } \examples{ data("imdepi") imdepiB <- subset(imdepi, type == "B") ## Perfom the Knox test using the Poisson approximation knoxtest <- knox( dt = dist(imdepiB$events$time), eps.t = 30, ds = dist(coordinates(imdepiB$events)), eps.s = 50, simulate.p.value = FALSE ) knoxtest ## The Poisson approximation works well for these data since ## the proportion of close pairs is rather small (204/56280). ## contingency table in LaTeX toLatex(knoxtest) if (surveillance.options("allExamples")) { ## Obtain the p-value via a Monte Carlo permutation test, ## where the permutations can be computed in parallel ## (using forking on Unix-alikes and a cluster on Windows, see ?plapply) knoxtestMC <- knox( dt = dist(imdepiB$events$time), eps.t = 30, ds = dist(coordinates(imdepiB$events)), eps.s = 50, simulate.p.value = TRUE, B = 999, .parallel = 2, .seed = 1, .verbose = FALSE ) knoxtestMC plot(knoxtestMC) } } \keyword{htest} surveillance/man/estimateGLRNbHook.Rd0000644000176200001440000000131013122471774017274 0ustar liggesusers\name{estimateGLRNbHook} \alias{estimateGLRNbHook} \encoding{latin1} \title{Hook function for in-control mean estimation} \description{ Estimation routine for the in-control mean of \code{\link{algo.glrpois}}. In \R < 2.14.0 and \pkg{surveillance} < 1.4 (i.e., without a package namespace) users could customize this function simply by defining a modified version in their workspace. This is no longer supported. } \usage{ estimateGLRNbHook() } \value{ A list with elements \item{\code{mod}}{resulting model of a call of \code{glm.nb}} \item{\code{range}}{vector of length as \code{range} containing the predicted values} } \seealso{ \code{\link{algo.glrnb}} } \author{M. Hoehle} \keyword{internal} surveillance/man/hhh4_internals.Rd0000644000176200001440000000336713117734037016741 0ustar liggesusers\name{hhh4_internals} \alias{meanHHH} \alias{sizeHHH} \alias{decompose.hhh4} \title{ Internal Functions Dealing with \code{hhh4} Models } \description{ The functions documented here are considered \emph{internal}, i.e., not intended to be called by the user. They are used by add-on packages dealing with \code{\link{hhh4}} models. } \usage{ meanHHH(theta, model, subset = model$subset, total.only = FALSE) sizeHHH(theta, model, subset = model$subset) decompose.hhh4(x, coefs = x$coefficients, ...) } \arguments{ \item{theta,coefs}{numeric vector of model parameters.} \item{model}{the model terms as returned by the \code{\link{terms}}-method for \code{"hhh4"} objects.} \item{subset}{vector of time points for which to compute the component means. Defaults to the fitted time range. For \code{sizeHHH}, \code{subset=NULL} means to return the vector of dispersion parameters.} \item{total.only}{logical. Should only the total mean (epidemic + endemic) be returned in a \code{length(subset)} x nUnit matrix? Otherwise, a list of such matrices is returned, giving the values of the various model components separately (as well as the total).} \item{x}{a fitted \code{hhh4} model.} \item{\dots}{unused.} } \details{ \code{meanHHH} computes the components of the mean returned in \code{length(subset)} x nUnit matrices. \code{sizeHHH} computes the model dispersion in \code{\link{dnbinom}} (\code{mu}, \code{size}) parametrization (it returns \code{NULL} in the Poisson case). \code{decompose.hhh4} decomposes the fitted mean (extracted via \code{meanHHH}) in an array with dimensions \eqn{(t, i, j)}, where the first \eqn{j} index is \code{"endemic"}. } \author{ Michaela Paul and Sebastian Meyer } \keyword{internal} surveillance/man/sts_tidy.Rd0000644000176200001440000000204313751225541015656 0ustar liggesusers\name{tidy.sts} \alias{tidy.sts} \title{ Convert an \code{"sts"} Object to a Data Frame in Long (Tidy) Format } \description{ The resulting data frame will have a row for each time point and observational unit, and columns corresponding to the slots of the \code{"\linkS4class{sts}"} object (except for \code{populationFrac}, which is named \code{population}). Some time variables are added for convenience: \code{year}, \code{epochInYear}, \code{epochInPeriod}, \code{date} (the latter gives \code{NA} dates if \code{epoch(x, as.Date=TRUE)} fails, i.e., for non-standard \code{x@freq} if not \code{x@epochAsDate}). } \usage{ tidy.sts(x, ...) } \arguments{ \item{x}{an object of class \code{"\linkS4class{sts}"}.} \item{\dots}{unused.} } \author{ Sebastian Meyer } \seealso{ \code{\link{as.data.frame.sts}} } \examples{ data("momo") momodat <- tidy.sts(momo) head(momodat) ## tidy.sts(stsObj) is the same as as.data.frame(stsObj, tidy = TRUE) stopifnot(identical(as.data.frame(momo, tidy = TRUE), momodat)) } \keyword{manip} surveillance/man/anscombe.residuals.Rd0000644000176200001440000000101512665561746017610 0ustar liggesusers\name{anscombe.residuals} \alias{anscombe.residuals} \title{Compute Anscombe Residuals} \description{ Compute Anscombe residuals from a fitted \code{\link{glm}}, which makes them approximately standard normal distributed. } \usage{ anscombe.residuals(m, phi) } \arguments{ \item{m}{a fitted \code{"glm"}} \item{phi}{the current estimated overdispersion} } \value{The standardized Anscombe residuals of \code{m}} \references{McCullagh & Nelder, Generalized Linear Models, 1989} \keyword{regression} surveillance/man/surveillance-defunct.Rd0000644000176200001440000000603413627474351020151 0ustar liggesusers\name{surveillance-defunct} \alias{surveillance-defunct} \title{Defunct Functions in Package \pkg{surveillance}} \alias{compMatrix.writeTable} \alias{correct53to52} \alias{enlargeData} \alias{makePlot} \alias{readData} \alias{test} \alias{testSim} \alias{toFileDisProg} \alias{algo.hhh} \alias{algo.hhh.grid} \alias{create.grid} \description{ The functions listed here are no longer part of \pkg{surveillance}. } \usage{ ## Defunct in surveillance 1.18.0 algo.hhh(disProgObj, control=list( lambda=TRUE, neighbours=FALSE, linear=FALSE, nseason=0, negbin=c("none", "single", "multiple"), proportion=c("none", "single", "multiple"), lag.range=NULL ), thetastart=NULL, verbose=TRUE) algo.hhh.grid(disProgObj, control=list( lambda=TRUE, neighbours=FALSE, linear=FALSE, nseason=0, negbin=c("none", "single", "multiple"), proportion=c("none", "single", "multiple"), lag.range=NULL ), thetastartMatrix, maxTime=1800, verbose=FALSE) create.grid(disProgObj, control, params=list( epidemic=c(0.1, 0.9, 5), endemic=c(-0.5, 0.5, 3), negbin=c(0.3, 12, 10) )) ## Defunct in surveillance 1.17.0 compMatrix.writeTable(compMatrix) correct53to52(disProgObj, firstweek = 1) enlargeData(disProgObj, range = 1:156, times = 1) makePlot(outputpath, data = "k1", method = "rki1", name, disease, range = 157:339) readData(abb, week53to52=TRUE, sysPath=TRUE) test(data = c("k1", "m5"), range = 157:339) testSim(p = 0.99, r = 0.01, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K, range = 200:400) toFileDisProg(disProgObj, toFile) } \details{ \code{algo.hhh} was an early implementation of the HHH regression framework for multivariate time series of infectious disease counts. An improved and considerably extended implementation is provided by the \code{\link{hhh4}} function since 2012. The trivial function \code{compMatrix.writeTable} is no longer used (it did nothing more than generating an \code{\link{xtable}} of its input). The ancient test function \code{makePlot} is no longer used, nor are functions \code{readData} (the datasets are still available via \code{data(\link{m1})} etc) and \code{correct53to52} / \code{enlargeData} (which both only worked for old \code{"disProg"} objects with non-matrix elements). \code{enlargeData} is still exemplified in the old \code{vignette("surveillance")}. \code{test} calls of outbreak detection algorithms applied to the old SurvStat datasets can be found in \code{vignette("surveillance")}, and \code{testSim} is provided as an example in \code{help("\link{sim.pointSource}")}. Functions related to the old \code{"\link[=create.disProg]{disProg}"} class are no longer needed. The effect of \code{toFileDisProg} could still be achieved via \code{\link{write.table}} of \code{as.data.frame(disProg2sts(disProgObj))[c("epoch", "observed", "state")]}. } \seealso{ \code{\link{Defunct}} } \keyword{internal} surveillance/man/twinSIR_profile.Rd0000644000176200001440000000453213433452530017075 0ustar liggesusers\encoding{latin1} \name{twinSIR_profile} \alias{profile.twinSIR} \alias{plot.profile.twinSIR} \title{ Profile Likelihood Computation and Confidence Intervals } \description{ Function to compute estimated and profile likelihood based confidence intervals. Computations might be cumbersome! There is a simple \code{plot}-method for the result. } \usage{ \method{profile}{twinSIR}(fitted, profile, alpha = 0.05, control = list(fnscale = -1, factr = 10, maxit = 100), ...) } \arguments{ \item{fitted}{ an object of class \code{"twinSIR"}. } \item{profile}{ a list with elements being numeric vectors of length 4. These vectors must have the form \code{c(index, lower, upper, gridsize)}. \describe{ \item{\code{index}:}{ index of the parameter to be profiled in the vector \code{coef(fitted)}. } \item{\code{lower, upper}:}{ lower/upper limit of the grid on which the profile log-likelihood is evaluated. Can also be \code{NA} in which case \code{lower/upper} equals the lower/upper bound of the respective 0.3 \% Wald confidence interval (+-3*se). } \item{\code{gridsize}:}{ grid size of the equally spaced grid between lower and upper. Can also be 0 in which case the profile log-likelihood for this parameter is not evaluated on a grid. } } } \item{alpha}{ \eqn{(1-\alpha) 100\%}{(1-alpha)*100\%} profile likelihood based confidence intervals are computed. If \code{alpha <= 0}, then no confidence intervals are computed. } \item{control}{ control object to use in \code{\link{optim}} for the profile log-likelihood computations. } \item{\dots}{ unused (argument of the generic). } } \value{ a list with profile log-likelihood evaluations on the grid and highest likelihood and Wald confidence intervals. The argument \code{profile} is also returned. The result has class \code{"profile.twinSIR"}, for which a simple (undocumented) \code{plot}-method is available. } \author{ Michael \enc{Hhle}{Hoehle} and Sebastian Meyer } \examples{ data("hagelloch") fit <- twinSIR(~ household, data = hagelloch) gridsize <- if (interactive()) 35 else 5 # for fast tests prof <- profile(fit, list(c(1, NA, NA, gridsize))) prof$ci.hl plot(prof) } \keyword{htest} \keyword{methods} \keyword{optimize} \keyword{dplot} surveillance/man/refvalIdxByDate.Rd0000644000176200001440000000211513122471774017034 0ustar liggesusers\name{refvalIdxByDate} \alias{refvalIdxByDate} \title{Compute indices of reference value using Date class} \description{ The reference values are formed based on computations of \code{seq} for Date class arguments. } \usage{ refvalIdxByDate(t0, b, w, epochStr, epochs) } \arguments{ \item{t0}{A Date object describing the time point} \item{b}{Number of years to go back in time} \item{w}{Half width of window to include reference values for} \item{epochStr}{One of \code{"1 month"}, \code{"1 week"} or \code{"1 day"}} \item{epochs}{Vector containing the epoch value of the sts/disProg object} } \details{ Using the Date class the reference values are formed as follows: Starting from \code{t0} go i, i= 1,...,\code{b} years back in time. For each year, go \code{w} epochs back and include from here to \code{w} epochs after \code{t0}. In case of weeks we always go back to the closest Monday of this date. In case of months we also go back in time to closest 1st of month. } \value{ a vector of indices in epochs which match } \keyword{chron} surveillance/man/linelist2sts.Rd0000644000176200001440000000722413430572401016453 0ustar liggesusers\encoding{latin1} \name{linelist2sts} \alias{linelist2sts} \title{ Convert Dates of Individual Case Reports into a Time Series of Counts } \description{ The function is used to convert an individual line list of cases to an aggregated time series of counts based on event date information of the cases. } \usage{ linelist2sts(linelist,dateCol, aggregate.by=c("1 day", "1 week", "7 day", "1 week", "1 month", "3 month", "1 year"), dRange=NULL, epochInPeriodStr=switch(aggregate.by, "1 day"="1", "1 week"="\%u", "1 month"="\%d","3 month"="\%q","1 year"="\%j"), startYearFormat=switch(aggregate.by,"1 day"="\%Y", "7 day"="\%G", "1 week"="\%G","1 month"="\%Y","3 month"="\%Y","1 year"="\%Y"), startEpochFormat=switch(aggregate.by,"1 day"="\%j", "7 day"="\%V", "1 week"="\%V", "1 month"="\%m", "3 month"="\%Q", "1 year"="1") ) } \arguments{ \item{linelist}{ A \code{data.frame} containing the line list of cases. } \item{dateCol}{A character string stating the column name in \code{linelist} which contains the event occurrence information (as a vector of \code{Date}s) which are to be temporally aggregated. } \item{aggregate.by}{Temporal aggregation level given as a string, see the \code{by} variable of the \code{\link{seq.Date}} function for further details. } \item{dRange}{A vector containing the minimum and maximum date for doing the aggregation. If not specified these dates are extracted automatically by taking \code{range(D[,dateCol])} and adjust these according to \code{aggregate.by} (e.g. always first of a month). } \item{epochInPeriodStr}{\code{strptime} compatible format string to use for determining how a date is placed within the epoch. This is, e.g., used to move the \code{dRange} epochs to the beginning of the period. Example: In case of weekly aggregation the "\%u" determines which day within the week (Monday is day 1) we have. See \code{\link{strptime}} for further details. } \item{startYearFormat}{\code{strptime} compatible format string to use for determining how the \code{start} entry of the \code{sts} object is generated. Usually the provided defaults are sufficient.} \item{startEpochFormat}{\code{strptime} compatible format string to use for determining how the \code{start} entry of the \code{sts} object is generated. Usually the provided defaults are sufficient.} } \details{ The date range is automatically extended such that the starting and ending dates are always the first epoch within the period, i.e. for aggregation by week it is moved to Mondays. This is controlled by the \code{epochInPeriodStr} parameter. Please note that the formatting strings are implemented by the \code{\link{formatDate}} function, which uses \code{\link{strptime}} formatting strings as well as formatting of quarters via "\%Q", "\%OQ" and "\%q". } \value{ The function returns an object of class \code{"\linkS4class{sts}"}. The \code{freq} slot might not be appropriate. } \author{ Michael \enc{Hhle}{Hoehle} } \seealso{ \code{\link{seq.Date}}, \code{\link{strptime}}, \code{\link{formatDate}} } \examples{ #Load O104 outbreak data data("husO104Hosp") #Convert line list to an sts object sts <- linelist2sts(husO104Hosp, dateCol="dHosp", aggregate.by="1 day") #Check that the number of cases is correct all.equal(sum(observed(sts)),nrow(husO104Hosp)) #Plot the result plot(sts,xaxis.tickFreq=list("\%d"=atChange,"\%m"=atChange), xaxis.labelFreq=list("\%d"=at2ndChange), xaxis.labelFormat="\%d \%b", xlab="",las=2,cex.axis=0.8) } \keyword{models} \keyword{optimize} surveillance/man/bodaDelay.Rd0000644000176200001440000001510613433744455015713 0ustar liggesusers\encoding{latin1} \name{bodaDelay} \alias{bodaDelay} \title{Bayesian Outbreak Detection in the Presence of Reporting Delays} \usage{ bodaDelay(sts, control = list( range = NULL, b = 5, w = 3, mc.munu = 100, mc.y = 10, pastAberrations = TRUE, verbose = FALSE, alpha = 0.05, trend = TRUE, limit54 = c(5,4), inferenceMethod = c("asym","INLA"), quantileMethod = c("MC","MM"), noPeriods = 1, pastWeeksNotIncluded = NULL, delay = FALSE)) } \arguments{ \item{sts}{sts-object to be analysed. Needs to have a reporting triangle.} \item{control}{list of control arguments: \describe{ \item{\code{b}}{How many years back in time to include when forming the base counts.} \item{\code{w}}{Window's half-size, i.e. number of weeks to include before and after the current week in each year.} \item{\code{range}}{Specifies the index of all timepoints which should be tested. If range is \code{NULL} all possible timepoints are used.} \item{\code{pastAberrations}}{Boolean indicating whether to include an effect for past outbreaks in a second fit of the model. This option only makes sense if \code{inferenceMethod} is \code{INLA}, as it is not supported by the other inference method.} \item{\code{verbose}}{Boolean specifying whether to show extra debugging information.} \item{\code{alpha}}{An approximate (one-sided) \eqn{(1-\alpha)\cdot 100\%} prediction interval is calculated unlike the original method where it was a two-sided interval. The upper limit of this interval i.e. the \eqn{(1-\alpha)\cdot 100\%} quantile serves as an upperbound.} \item{\code{trend}}{Boolean indicating whether a trend should be included} \item{\code{noPeriods}}{Number of levels in the factor allowing to use more baseline. If equal to 1 no factor variable is created, the set of reference values is defined as in Farrington et al (1996).} \item{\code{inferenceMethod}}{Which inference method used, as defined in Salmon et al. (2015). If one chooses \code{"INLA"} then inference is performed with INLA. If one chooses \code{"asym"} (default) then the asymptotic normal approximation of the posteriori is used.} \item{\code{pastWeeksNotIncluded}}{Number of past weeks to ignore in the calculation. The default (\code{NULL}) means to use the value of \code{control$w}.} \item{\code{delay}}{Boolean indicating whether to take reporting delays into account.} \item{\code{mc.munu}}{Number of samples for the parameters of the negative binomial distribution for calculating a threshold} \item{\code{mc.y}}{Number of samples for observations when performing Monte Carlo to calculate a threshold} \item{\code{limit54}}{c(cases,period) is a vector allowing the user to change these numbers.} \item{\code{quantileMethod}}{Character, either \code{"MC"} (default) or \code{"MM"}. Indicates how to compute the quantile based on the posterior distribution (no matter the inference method): either by sampling \code{mc.munu} values from the posterior distribution of the parameters and then for each sampled parameters vector sampling \code{mc.y} response values so that one gets a vector of response values based on which one computes an empirical quantile (MC method, as explained in Salmon et al. 2015); or by sampling \code{mc.munu} from the posterior distribution of the parameters and then compute the quantile of the mixture distribution using bisectioning, which is faster.} } } } \description{ The function takes \code{range} values of the surveillance time series \code{sts} and for each time point uses a Bayesian model of the negative binomial family with log link inspired by the work of Noufaily et al. (2012) and of Manitz and \enc{Hhle}{Hoehle} (2014). It allows delay-corrected aberration detection as explained in Salmon et al. (2015). A \code{reportingTriangle} has to be provided in the \code{control} slot. } \examples{ \dontrun{ data("stsNewport") salm.Normal <- list() salmDelayAsym <- list() for (week in 43:45){ listWeeks <- as.Date(row.names(stsNewport@control$reportingTriangle$n)) dateObs <- listWeeks[isoWeekYear(listWeeks)$ISOYear==2011 & isoWeekYear(listWeeks)$ISOWeek==week] stsC <- sts_observation(stsNewport, dateObservation=dateObs, cut=TRUE) inWeeks <- with(isoWeekYear(epoch(stsC)), ISOYear == 2011 & ISOWeek >= 40 & ISOWeek <= 48) rangeTest <- which(inWeeks) alpha <- 0.07 # Control slot for Noufaily method controlNoufaily <- list(range=rangeTest,noPeriods=10, b=4,w=3,weightsThreshold=2.58,pastWeeksNotIncluded=26, pThresholdTrend=1,thresholdMethod="nbPlugin",alpha=alpha*2, limit54=c(0,50)) # Control slot for the Proposed algorithm with D=0 correction controlNormal <- list(range = rangeTest, b = 4, w = 3, reweight = TRUE, mc.munu=10000, mc.y=100, verbose = FALSE, alpha = alpha, trend = TRUE, limit54=c(0,50), noPeriods = 10, pastWeeksNotIncluded = 26, delay=FALSE) # Control slot for the Proposed algorithm with D=10 correction controlDelayNorm <- list(range = rangeTest, b = 4, w = 3, reweight = FALSE, mc.munu=10000, mc.y=100, verbose = FALSE, alpha = alpha, trend = TRUE, limit54=c(0,50), noPeriods = 10, pastWeeksNotIncluded = 26, delay=TRUE,inferenceMethod="asym") set.seed(1) salm.Normal[[week]] <- farringtonFlexible(stsC, controlNoufaily) salmDelayAsym[[week]] <- bodaDelay(stsC, controlDelayNorm) } opar <- par(mfrow=c(2,3)) lapply(salmDelayAsym[c(43,44,45)],plot, legend=NULL, main="", ylim=c(0,35)) lapply(salm.Normal[c(43,44,45)],plot, legend=NULL, main="", ylim=c(0,35)) par(opar) } } \references{ Farrington, C.P., Andrews, N.J, Beale A.D. and Catchpole, M.A. (1996): A statistical algorithm for the early detection of outbreaks of infectious disease. J. R. Statist. Soc. A, 159, 547-563. Noufaily, A., Enki, D.G., Farrington, C.P., Garthwaite, P., Andrews, N.J., Charlett, A. (2012): An improved algorithm for outbreak detection in multiple surveillance systems. Statistics in Medicine, 32 (7), 1206-1222. Salmon, M., Schumacher, D., Stark, K., \enc{Hhle}{Hoehle}, M. (2015): Bayesian outbreak detection in the presence of reporting delays. Biometrical Journal, 57 (6), 1051-1067. } surveillance/man/MMRcoverageDE.Rd0000644000176200001440000000361213122471774016402 0ustar liggesusers\name{MMRcoverageDE} \alias{MMRcoverageDE} \docType{data} \title{MMR coverage levels in the 16 states of Germany} \description{ Coverage levels at school entry for the first and second dose of the combined measles-mumps-rubella (MMR) vaccine in 2006, estimated from children presenting vaccination documents at school entry examinations. } \usage{data(MMRcoverageDE)} \format{ A \code{data.frame} containing 19 rows and 5 columns with variables \describe{ \item{state}{Names of states: the 16 federal states are followed by the total of Germany, as well as the total of West and East Germany.} \item{nOfexaminedChildren}{Number of children examined.} \item{withVaccDocument}{Percentage of children who presented vaccination documents.} \item{MMR1}{Percentage of children with vaccination documents, who received at least 1 dose of MMR vaccine.} \item{MMR2}{Percentage of children with vaccination documents, who received at least 2 doses of MMR vaccine.} } Coverage levels were derived from vaccination documents presented at medical examinations, which are conducted by local health authorities at school entry each year. Records include information about the receipt of 1st and 2nd doses of MMR, but no information about dates. Note that information from children who did not present a vaccination document on the day of the medical examination, is not included in the estimated coverage. } \source{ Robert Koch-Institut (2008) Zu den Impfquoten bei den Schuleingangsuntersuchungen in Deutschland 2006. Epidemiologisches Bulletin, \bold{7}, 55-57 } \seealso{\code{\link{measlesDE}}} \references{ Herzog, S.A., Paul, M. and Held, L. (2011) Heterogeneity in vaccination coverage explains the size and occurrence of measles epidemics in German surveillance data. Epidemiology and Infection, \bold{139}, 505--515. } \keyword{datasets} surveillance/man/intersectPolyCircle.Rd0000644000176200001440000000320113777627613020015 0ustar liggesusers\name{intersectPolyCircle} \alias{intersectPolyCircle} \alias{intersectPolyCircle.owin} \alias{intersectPolyCircle.SpatialPolygons} \alias{intersectPolyCircle.gpc.poly} \title{ Intersection of a Polygonal and a Circular Domain } \description{ This is a unifying wrapper around functionality of various packages dealing with spatial data. It computes the intersection of a circular domain and a polygonal domain (whose class defines the specific method). } \usage{ intersectPolyCircle(object, center, radius, ...) \method{intersectPolyCircle}{owin}(object, center, radius, npoly = 32, ...) \method{intersectPolyCircle}{SpatialPolygons}(object, center, radius, npoly = 32, ...) \method{intersectPolyCircle}{gpc.poly}(object, center, radius, npoly = 32, useGEOS = FALSE, ...) } \arguments{ \item{object}{a polygonal domain of one of the supported classes.} \item{center,radius,npoly}{see \code{\link{discpoly}}.} \item{useGEOS}{logical indicating if package \pkg{rgeos} (\code{\link[rgeos]{gIntersection}}) should be used instead of package \pkg{gpclib}. The latter (default) requires explicit acceptance of \pkg{gpclib}'s restricted license via \code{\link{surveillance.options}(gpclib=TRUE)}.} \item{\dots}{potential further arguments (from the generic).} } \value{ a polygonal domain of the same class as the input \code{object}. } \author{ Sebastian Meyer } \seealso{ \code{\link{discpoly}} to generate a polygonal approximation to a disc } \examples{ library("spatstat.geom") plot(letterR) plot(intersectPolyCircle(letterR, c(3,2), 1), add=TRUE, col=2, lwd=3) } \keyword{spatial} \keyword{manip} surveillance/man/sts-class.Rd0000644000176200001440000002711514026677433015746 0ustar liggesusers\name{sts-class} \docType{class} \alias{sts} \alias{sts-class} % methods to access and replace slots \alias{alarms,sts-method} \alias{alarms<-,sts-method} \alias{upperbound,sts-method} \alias{upperbound<-,sts-method} \alias{control,sts-method} \alias{control<-,sts-method} \alias{epoch,sts-method} \alias{epoch<-,sts-method} \alias{observed,sts-method} \alias{observed<-,sts-method} \alias{population,sts-method} \alias{population<-,sts-method} \alias{multinomialTS,sts-method} \alias{multinomialTS<-,sts-method} \alias{neighbourhood,sts-method} \alias{neighbourhood<-,sts-method} % other access methods \alias{dim,sts-method} \alias{dimnames,sts-method} \alias{year} \alias{year,sts-method} \alias{epochInYear} \alias{epochInYear,sts-method} % conversion methods \alias{as.data.frame.sts} \alias{as.data.frame,sts-method} \alias{as.ts.sts} \alias{coerce,sts,ts-method} \alias{coerce,ts,sts-method} \alias{as.xts.sts} \encoding{latin1} \title{Class \code{"sts"} -- surveillance time series} \description{ This is a lightweight S4 class to implement (multivariate) time series of counts, typically from public health surveillance. For areal time series, the class can also capture the spatial layout of the regions, where the data originate from. The constructor function \code{sts} can be used to setup an \code{"sts"} object. Conversion of simple time-series objects (of class \code{"\link{ts}"}) is also possible. The slots of the \code{"sts"} class and available methods are described below. } \usage{ sts(observed, start = c(2000, 1), frequency = 52, epoch = NULL, population = NULL, ...) } \arguments{ \item{observed}{a vector (for a single time series) or matrix (one time series per column) of counts. A purely numeric data frame will also do (transformed via \code{as.matrix}). This argument sets the \code{observed} slot, which is the core element of the resulting \code{"sts"} object. It determines the dimensions and colnames for several other slots. The columns (\dQuote{units}) typically correspond to different regions, diseases, or age groups.} \item{start,frequency}{basic characteristics of the time series data just like for simple \code{"\link{ts}"} objects. The (historical) default values correspond to weekly data starting in the first week of 2000. The \code{epoch} and \code{epochInYear} methods use the ISO 8601 specification when converting between week numbers and dates, see \code{\link{isoWeekYear}}.} \item{epoch}{observation times, either as an integer sequence (default) or as a \code{Date} vector (in which case \code{epochAsDate} is automatically set to \code{TRUE}).} \item{population}{a vector of length the number of columns in \code{observed} or a matrix of the same dimension as \code{observed}. Especially for multivariate time series, the population numbers (or fractions) underlying the counts in each unit are relevant for visualization and statistical inference. The \code{population} argument is an alias for the corresponding slot \code{populationFrac}. The default \code{NULL} value sets equal population fractions across all units.} \item{\dots}{further named arguments with names corresponding to slot names (see the list below). For instance, in the public health surveillance context, the \code{state} slot is used to indicate outbreaks (default: \code{FALSE} for all observations). For areal time series data, the \code{map} and \code{neighbourhood} slots are used to store the spatial structure of the observation region.} } \section{Slots}{ \describe{ \item{\code{epoch}:}{a numeric vector specifying the time of observation, typically a week index. Depending on the \code{freq} slot, it could also index days or months. Furthermore, if \code{epochAsDate=TRUE} then \code{epoch} is the integer representation of \code{\link{Date}}s giving the exact date of the observation.} \item{\code{freq}:}{If weekly data \code{freq} corresponds to 52, in case of monthly data \code{freq} is 12.} \item{\code{start}:}{vector of length two denoting the year and the sample number (week, month, etc.) of the first observation} \item{\code{observed}:}{A matrix of size \code{length(epoch)} times the number of regions containing the weekly/monthly number of counts in each region. The colnames of the matrix should match the ID values of the shapes in the \code{map} slot.} \item{\code{state}:}{Matrix with the same dimension as \code{observed} containing Booleans whether at the specific time point there was an outbreak in the region} \item{\code{alarm}:}{Matrix with the same dimension as \code{observed} specifying whether an outbreak detection algorithm declared a specific time point in the region as having an alarm.} \item{\code{upperbound}:}{Matrix with upper bound values } \item{\code{neighbourhood}:}{Symmetric matrix of size \eqn{(number of regions)^2} describing the neighbourhood structure. It may either be a binary adjacency matrix or contain neighbourhood orders (see the Examples for how to infer the latter from the \code{map}).} \item{\code{populationFrac}:}{A \code{matrix} of population fractions or absolute numbers (see \code{multinomialTS} below) with dimensions \code{dim(observed)}.} \item{\code{map}:}{Object of class \code{SpatialPolygonsDataFrame} providing a shape of the areas which are monitored. } \item{\code{control}:}{Object of class \code{list}, this is a rather free data type to be returned by the surveillance algorithms. } \item{\code{epochAsDate}:}{a Boolean indicating if the \code{epoch} slot corresponds to \code{Date}s.} \item{\code{multinomialTS}:}{a Boolean stating whether to interpret the object as \code{observed} out of \code{population}, i.e. a multinomial interpretation instead of a count interpretation.} } } \section{Methods}{ \subsection{Extraction of slots}{ There is an extraction (and replacement) method for almost every slot. The name of the method corresponds to the slot name, with two exceptions: the \code{populationFrac} slot is addressed by a \code{population} method, and the \code{alarm} slot is addressed by an \code{alarms} method. \describe{ \item{epoch}{\code{signature(x = "sts")}: extract the \code{epoch} slot. If the \code{sts} object is indexed by dates (\code{epochAsDate} = TRUE), the returned vector is of class \code{Date}, otherwise numeric (usually the integer sequence \code{1:nrow(x)}).\cr By explicitly requesting \code{epoch(x, as.Date = TRUE)}, dates can also be extracted if the \code{sts} object is not internally indexed by dates but has a standard frequency of 12 (monthly) or 52 (weekly). The transformation is based on \code{start} and \code{freq} and will return the first day of each month (\code{freq=12}) and the Monday of each week (\code{freq=52}), respectively.} \item{observed}{\code{signature(x = "sts")}: extract the \code{observed} slot.} \item{alarms}{\code{signature(x = "sts")}: extract the \code{alarm} slot.} \item{upperbound}{\code{signature(x = "sts")}: extract the \code{upperbound} slot.} \item{neighbourhood}{\code{signature(x = "sts")}: extract the \code{neighbourhood} slot.} \item{population}{\code{signature(x = "sts")}: extract the \code{populationFrac} slot.} \item{control}{\code{signature(x = "sts")}: extract the \code{control} slot.} \item{multinomialTS}{\code{signature(x = "sts")}: extract the \code{multinomialTS} slot.} } } \subsection{Other extraction methods}{ \describe{ \item{dim}{\code{signature(x = "sts")}: extract matrix dimensions of \code{observed}. This method also enables \code{nrow(x)} and \code{ncol(x)}.} \item{dimnames}{\code{signature(x = "sts")}: extract the \code{\link{dimnames}} of the \code{observed} matrix. This method also enables \code{rownames(x)} and \code{colnames(x)}.} \item{year}{\code{signature(x = "sts")}: extract the corresponding year of each observation.} \item{epochInYear}{\code{signature(x = "sts")}: extract the epoch number within the year.} \item{[}{\code{signature(x = "sts")}: subset rows (time points) and/or columns (units), see \code{help("\link{[,sts-method}")}.} } } \subsection{Transformation methods}{ \describe{ \item{aggregate}{\code{signature(x = "sts")}: see \code{\link{aggregate.sts}}.} \item{as.data.frame}{\code{signature(x = "sts")}: the default \code{as.data.frame} call will collect the following slots into a data frame: \code{observed}, \code{epoch}, \code{state}, \code{alarm}, \code{upperbound}, and \code{populationFrac}. Additional columns will be created for \code{freq} (potentially varying by year for weekly or daily data if \code{x@epochAsDate} is \code{TRUE}) and \code{epochInPeriod} (the epoch fraction within the current year).\cr Calling the \code{as.data.frame} method with the argument \code{tidy = TRUE} will return \code{\link{tidy.sts}(x)}, which reshapes multivariate \code{sts} objects to the \dQuote{long} format (one row per epoch and observational unit). The tidy format is particularly useful for standard regression models and customized plotting.} \item{coerce}{\code{signature(from="sts", to="ts")} and \code{signature(from="ts", to="sts")}, to be called via \code{as(stsObj, "ts")} (or \code{as.ts(stsObj)}) and \code{as(tsObj, "sts")}, respectively.} \item{as.xts}{convert to the \CRANpkg{xts} package format.} } } \subsection{Visualization methods}{ \describe{ \item{plot}{\code{signature(x = "sts", y = "missing")}: entry point to a collection of plot variants. The \code{type} of plot is specified using a formula, see \code{\link{plot.sts}} for details.} \item{autoplot}{a \CRANpkg{ggplot2} variant of the standard time-series-type plot, see \code{\link{autoplot.sts}}.} \item{animate}{see \code{\link{animate.sts}}.} \item{toLatex}{see \code{\link{toLatex.sts}}.} } } } \author{Michael \enc{Hhle}{Hoehle} and Sebastian Meyer} \examples{ showClass("sts") ## A typical dataset with weekly counts of measles from several districts data("measlesWeserEms") measlesWeserEms ## reconstruct data("measlesWeserEms") from its components counts <- observed(measlesWeserEms) map <- measlesWeserEms@map populationFrac <- population(measlesWeserEms) weserems_nbOrder <- neighbourhood(measlesWeserEms) ## orders of adjacency can also be determined from the map if (requireNamespace("spdep")) { stopifnot(identical(weserems_nbOrder, nbOrder(poly2adjmat(map), maxlag = 10))) } mymeasles <- sts(counts, start = c(2001, 1), frequency = 52, population = populationFrac, neighbourhood = weserems_nbOrder, map = map) stopifnot(identical(mymeasles, measlesWeserEms)) ## convert ts/mts object to sts z <- ts(matrix(rpois(300,10), 100, 3), start = c(1961, 1), frequency = 12) z.sts <- as(z, "sts") plot(z.sts) ## conversion of "sts" objects to the quasi-standard "xts" class if (requireNamespace("xts")) { z.xts <- as.xts.sts(z.sts) plot(z.xts) } } \keyword{classes} \keyword{methods} surveillance/man/hhh4_predict.Rd0000644000176200001440000000216013241246036016355 0ustar liggesusers\name{hhh4_predict} \alias{predict.hhh4} \title{Predictions from a \code{hhh4} Model} \description{ Get fitted (component) means from a \code{\link{hhh4}} model. } \usage{ \method{predict}{hhh4}(object, newSubset=object$control$subset, type="response", \dots) } \arguments{ \item{object}{fitted \code{\link{hhh4}} model (class \code{"hhh4"}).} \item{newSubset}{subset of time points for which to return the predictions. Defaults to the subset used for fitting the model, and must be a subset of \code{1:nrow(object$stsObj)}.} \item{type}{the type of prediction required. The default (\code{"response"} or, equivalently, \code{"mean"}) is on the scale of the response variable (mean = endemic plus epidemic components). The alternatives are: \code{"endemic"}, \code{"epidemic"}, \code{"epi.own"} (i.e. the autoregressive part), and \code{"epi.neighbours"} (i.e. the spatio-temporal part).} \item{\dots}{unused (argument of the generic).} } \value{ matrix of fitted means for each time point (of \code{newSubset}) and region. } \author{Michaela Paul and Sebastian Meyer} \keyword{methods} \keyword{models} surveillance/man/permutationTest.Rd0000644000176200001440000000511313671631746017235 0ustar liggesusers\name{permutationTest} \alias{permutationTest} \title{Monte Carlo Permutation Test for Paired Individual Scores} \description{ The difference between mean \code{\link{scores}} from model 1 and mean \code{\link{scores}} from model 2 is used as the test statistic. Under the null hypothesis of no difference, the actually observed difference between mean scores should not be notably different from the distribution of the test statistic under permutation. As the computation of all possible permutations is only feasible for small datasets, a random sample of permutations is used to obtain the null distribution. The resulting p-value thus depends on the \code{\link{.Random.seed}}. } \usage{ permutationTest(score1, score2, nPermutation = 9999, plot = FALSE, verbose = FALSE) } \arguments{ \item{score1, score2}{ numeric vectors of scores from models 1 and 2, respectively. } \item{nPermutation}{ number of Monte Carlo replicates. } \item{plot}{ logical indicating if a \code{\link{truehist}} of the \code{nPermutation} permutation test statistics should be plotted with a vertical line marking the observed difference of the means. To customize the histogram, \code{plot} can also be a list of arguments for \code{truehist} replacing internal defaults. } \item{verbose}{ logical indicating if the results should be printed in one line. } } \details{ For each permutation, we first randomly assign the membership of the n individual scores to either model 1 or 2 with probability 0.5. We then compute the respective difference in mean for model 1 and 2 in this permuted set of scores. The Monte Carlo p-value is then given by (1 + #{permuted differences larger than observed difference (in absolute value)}) / (1 + \code{nPermutation}). } \value{ a list of the following elements: \item{diffObs}{observed difference in mean scores, i.e., \code{mean(score1) - mean(score2)}} \item{pVal.permut}{p-value of the permutation test} \item{pVal.t}{p-value of the corresponding \code{\link{t.test}(score1, score2, paired=TRUE)}} } \author{ Michaela Paul with contributions by Sebastian Meyer } \references{ Paul, M. and Held, L. (2011): Predictive assessment of a non-linear random effects model for multivariate time series of infectious disease counts. \emph{Statistics in Medicine}, \bold{30} (10), 1118-1136. \doi{10.1002/sim.4177} } \seealso{ Package \CRANpkg{coin} for a comprehensive permutation test framework. } \examples{ permutationTest(rnorm(50, 1.5), rnorm(50, 1), plot = TRUE) } \keyword{htest} surveillance/man/wrap.algo.Rd0000644000176200001440000000513713433500440015704 0ustar liggesusers\name{wrap.algo} \alias{wrap.algo} \alias{farrington} \alias{bayes} \alias{rki} \alias{cusum} \alias{glrpois} \alias{glrnb} \alias{outbreakP} %% FIXME: hmm and rogerson are currently undocumented and unexported %\alias{hmm} %\alias{rogerson} \encoding{latin1} \title{Multivariate Surveillance through independent univariate algorithms} \description{ This function takes an \code{sts} object and applies an univariate surveillance algorithm to the time series of each observational unit. } \usage{ %This is the main function wrap.algo(sts, algo, control,control.hook=function(k, control) return(control),verbose=TRUE,...) %Derived functions fixing the control object and the "algo" argument farrington(sts, control=list(range=NULL, b=5, w=3, reweight=TRUE, verbose=FALSE, alpha=0.05),...) bayes(sts, control = list(range = range, b = 0, w = 6, actY = TRUE,alpha=0.05),...) rki(sts, control = list(range = range, b = 2, w = 4, actY = FALSE),...) cusum(sts, control = list(range=range, k=1.04, h=2.26, m=NULL, trans="standard",alpha=NULL),...) glrpois(sts, control = list(range=range,c.ARL=5, S=1,beta=NULL, Mtilde=1, M=-1, change="intercept",theta=NULL),...) glrnb(sts, control = list(range=range,c.ARL=5, mu0=NULL, alpha=0, Mtilde=1, M=-1, change="intercept", theta=NULL,dir=c("inc","dec"), ret=c("cases","value")),...) outbreakP(sts, control=list(range = range, k=100, ret=c("cases","value"),maxUpperboundCases=1e5),...) } \arguments{ \item{sts}{Object of class \code{sts}} \item{algo}{Character string giving the function name of the algorithm to call, e.g. \code{"algo.farrington"}. Calling is done using \code{do.call}.} \item{control}{Control object as list. Depends on each algorithm.} \item{control.hook}{This is a function for handling multivariate objects. This argument is a function function of integer k and the current control object and which returns the appropriate control object for region k.} \item{verbose}{Boolean, if \code{TRUE} then textual information about the process is given} \item{...}{Additional arguments sent to the \code{algo} function.} } \value{ An \code{sts} object with the \code{alarm}, \code{upperbound}, etc. slots filled with the results of independent and univariate surveillance algorithm. } \seealso{ \code{\link{algo.rki}}, \code{\link{algo.farrington}}, \code{\link{algo.cusum}}, \code{\link{algo.glrpois}}, \code{\link{algo.glrnb}}, \code{\link{algo.outbreakP}} for the exact form of the \code{control} object. } \author{M. \enc{Hhle}{Hoehle}} \keyword{classif} surveillance/man/stsBP-class.Rd0000644000176200001440000000202013122471774016150 0ustar liggesusers\name{stsBP-class} \docType{class} \alias{stsBP-class} \alias{coerce,sts,stsBP-method} \encoding{latin1} \title{Class "stsBP" -- a class inheriting from class \code{sts} which allows the user to store the results of back-projecting or nowcasting surveillance time series} \description{ A class inheriting from class \code{sts}, but with additional slots to store the result and associated confidence intervals from back projection of a \code{sts} object. } \section{Slots}{ The slots are as for \code{"\linkS4class{sts}"}. However, two additional slots exists. \describe{ \item{\code{ci}:}{An array containing the upper and lower limit of the confidence interval.} \item{\code{lambda}:}{Back projection component} } } \section{Methods}{ The methods are the same as for \code{"\linkS4class{sts}"}. \itemize{ \item{\code{signature(from = "sts", to = "stsBP")}}{ Convert an object of class \code{sts} to class \code{stsBP}. } } } \author{M. \enc{Hhle}{Hoehle}} \keyword{classes} surveillance/man/twinSIR_simulation.Rd0000644000176200001440000003470413557773757017655 0ustar liggesusers\encoding{latin1} \name{twinSIR_simulation} \alias{simEpidata} \alias{simulate.twinSIR} \title{ Simulation of Epidemic Data } \description{ This function simulates the infection (and removal) times of an epidemic. Besides the classical SIR type of epidemic, also SI, SIRS and SIS epidemics are supported. Simulation works via the conditional intensity of infection of an individual, given some (time varying) endemic covariates and/or some distance functions (epidemic components) as well as the fixed positions of the individuals. The lengths of the infectious and removed periods are generated following a pre-specified function (can be deterministic). The \code{\link{simulate}} method for objects of class \code{"\link{twinSIR}"} simulates new epidemic data using the model and the parameter estimates of the fitted object. } \usage{ simEpidata(formula, data, id.col, I0.col, coords.cols, subset, beta, h0, f = list(), w = list(), alpha, infPeriod, remPeriod = function(ids) rep(Inf, length(ids)), end = Inf, trace = FALSE, .allocate = NULL) \method{simulate}{twinSIR}(object, nsim = 1, seed = 1, infPeriod = NULL, remPeriod = NULL, end = diff(range(object$intervals)), trace = FALSE, .allocate = NULL, data = object$data, ...) } \arguments{ \item{formula}{ an object of class \code{"\link{formula}"} (or one that can be coerced to that class): a symbolic description of the intensity model to be estimated. The details of model specification are given under Details. } \item{data}{ a data.frame containing the variables in \code{formula} and the variables specified by \code{id.col}, \code{I0.col} and \code{coords.col} (see below). It represents the \dQuote{history} of the endemic covariates to use for the simulation. The form is similar to and can be an object of class \code{"\link{epidata}"}. The simulation period is split up into \emph{consecutive} intervals of constant endemic covariables. The data frame consists of a block of N (number of individuals) rows for each of those time intervals (all rows in a block share the same start and stop values... therefore the name \dQuote{block}), where there is one row per individual in the block. Each row describes the (fixed) state of the endemic covariates of the individual during the time interval given by the start and stop columns (specified through the lhs of \code{formula}). For the \code{simulate} method of class \code{"twinSIR"} this should be the object of class \code{"\link{epidata}"} used for the fit. This is a part of the return value of the function \code{twinSIR}, if called with argument \code{keep.data} set to \code{TRUE}. } \item{id.col}{ only if \code{data} does not inherit from \code{epidata}: single index of the \code{id} column in \code{data}. Can be numeric (by column number) or character (by column name).\cr The \code{id} column identifies the individuals in the data-frame. It will be converted to a factor variable and its levels serve also to identify individuals as argument to the \code{infPeriod} function. } \item{I0.col}{ only if \code{data} does not inherit from \code{epidata}: single index of the \code{I0} column in \code{data}. Can be numeric (by column number), character (by column name) or \code{NULL}.\cr The \code{I0} column indicates if an individual is initially infectious, i.e. it is already infectious at the beginning of the first time block. Setting \code{I0.col = NULL} is short for \dQuote{there are no initially infectious individuals}. Otherwise, the variable must be logical or in 0/1-coding. As this variable is constant over time the initially infectious individuals are derived from the first time block only. } \item{coords.cols}{ only if \code{data} does not inherit from \code{epidata}: index\emph{es} of the \code{coords} column\emph{s} in \code{data}. Can be a numeric (by column number), a character (by column name) vector or \code{NULL}.\cr These columns contain the coordinates of the individuals. It must be emphasized that the functions in this package currently assume \emph{fixed positions} of the individuals during the whole epidemic. Thus, an individual has the same coordinates in every block. For simplicity, the coordinates are derived from the first time block only. The epidemic covariates are calculated based on the Euclidian distance between the individuals, see \code{f}. } \item{subset}{ an optional vector specifying a subset of the covariate history to be used in the simulation. } \item{beta}{ numeric vector of length equal the number of endemic (\code{cox}) terms on the rhs of \code{formula}. It contains the effects of the endemic predictor (excluding the log-baseline \code{h0}, see below) in the same order as in the formula. } \item{h0}{ \emph{either} a single number to specify a constant baseline hazard (equal to \code{exp(h0)}) \emph{or} a list of functions named \code{exact} and \code{upper}. In the latter case, \code{h0$exact} is the true log-baseline hazard function and \code{h0$upper} is a \emph{piecewise constant upper bound} for \code{h0$exact}. The function \code{h0$upper} must inherit from \code{\link{stepfun}} with \code{right=FALSE}. Theoretically, the intensity function is left-continuous, thus \code{right=TRUE} would be adequate, but in the implementation, when we evaluate the intensity at the \code{\link{knots}} (change points) of \code{h0$upper} we need its value for the subsequent interval. } \item{f, w}{ see \code{\link{as.epidata}}. } \item{alpha}{ a named numeric vector of coefficients for the epidemic covariates generated by \code{f} and \code{w}. The names are matched against \code{names(f)} and \code{names(w)}. Remember that \code{alpha >= 0}. } \item{infPeriod}{ a function generating lengths of infectious periods. It should take one parameter (e.g. \code{ids}), which is a character vector of id's of individuals, and return appropriate infection periods for those individuals. Therefore, the value of the function should be of length \code{length(ids)}. For example, for independent and identically distributed infection periods following \eqn{Exp(1)}, the generating function is \code{function(ids) rexp(length(ids), rate=1)}. For a constant infectious period of length c, it is sufficient to set \code{function (x) {c}}.\cr For the \code{simulate} method of class \code{"twinSIR"} only, this can also be \code{NULL} (the default), which means that the observed infectious periods of infected individuals are re-used when simulating a new epidemic and individuals with missing infectious periods (i.e. infection and recovery was not observed) are attributed to the mean observed infectious period. Note that it is even possible to simulate an SI-epidemic by setting \code{infPeriod = function (x) {Inf}} In other words: once an individual became infected it spreads the disease forever, i.e. it will never be removed. } \item{remPeriod}{ a function generating lengths of removal periods. Per default, once an individual was removed it will stay in this state forever (\code{Inf}). Therefore, it will not become at-risk (S) again and re-infections are not possible. Alternatively, always returning 0 as length of the removal period corresponds to a SIS epidemic. Any other values correspond to SIRS. Note that \code{end} should be set to a finite value in these cases. } \item{end}{ a single positive numeric value specifying the time point at which the simulation should be forced to end. By default, this is \code{Inf}, i.e. the simulation continues until there is no susceptible individual left.\cr For the \code{simulate} method of class \code{"twinSIR"} the default is to have equal simulation and observation periods. } \item{trace}{ logical (or integer) indicating if (or how often) the sets of susceptible and infected individuals as well as the rejection indicator (of the rejection sampling step) should be \code{cat}ed. Defaults to \code{FALSE}. } \item{.allocate}{ number of blocks to initially allocate for the event history (i.e. \code{.allocate*N} rows). By default (\code{NULL}), this number is set to \code{max(500, ceiling(nBlocks/100)*100)}, i.e. 500 but at least the number of blocks in \code{data} (rounded to the next multiple of 100). Each time the simulated epidemic exceeds the allocated space, the event history will be enlarged by \code{.allocate} blocks. } \item{object}{ an object of class \code{"twinSIR"}. This must contain the original \code{data} used for the fit (see \code{data}). } \item{nsim}{ number of epidemics to simulate. Defaults to 1. } \item{seed}{ an integer that will be used in the call to \code{\link{set.seed}} before simulating the epidemics. } \item{\dots}{ unused (argument of the generic). } } \details{ A model is specified through the \code{formula}, which has the form \code{cbind(start, stop) ~ cox(endemicVar1) * cox(endemicVar2)}, i.e. the right hand side has the usual form as in \code{\link{lm}}, but all variables are marked as being endemic by the special function \code{\link{cox}}. The effects of those predictor terms are specified by \code{beta}. The left hand side of the formula denotes the start and stop columns in \code{data}. This can be omitted, if \code{data} inherits from class \code{"epidata"} in which case \code{cbind(start, stop)} will be used. The epidemic model component is specified by the arguments \code{f} and \code{w} (and the associated coefficients \code{alpha}). If the epidemic model component is empty and \code{infPeriod} always returns \code{Inf}, then one actually simulates from a pure Cox model. The simulation algorithm used is \emph{Ogata's modified thinning}. For details, see \enc{Hhle}{Hoehle} (2009), Section 4. } \value{ An object of class \code{"simEpidata"}, which is a \code{data.frame} with the columns \code{"id"}, \code{"start"}, \code{"stop"}, \code{"atRiskY"}, \code{"event"}, \code{"Revent"} and the coordinate columns (with the original names from \code{data}), which are all obligatory. These columns are followed by all the variables appearing on the rhs of the \code{formula}. Last but not least, the generated columns with epidemic covariates corresponding to the functions in the lists \code{f} and \code{w} are appended. Note that objects of class \code{"simEpidata"} also inherit from class \code{"\link{epidata}"}, thus all \code{"\link{epidata}"} methods can be applied. The \code{data.frame} is given the additional \emph{attributes} \item{"eventTimes"}{ numeric vector of infection time points (sorted chronologically). } \item{"timeRange"}{ numeric vector of length 2: \code{c(min(start), max(stop))}. } \item{"coords.cols"}{ numeric vector containing the column indices of the coordinate columns in the resulting data-frame. } \item{"f"}{ this equals the argument \code{f}. } \item{"w"}{ this equals the argument \code{w}. } \item{"config"}{ a list with elements \code{h0 = h0$exact}, \code{beta} and \code{alpha}. } \item{call}{the matched call.} \item{terms}{the \code{terms} object used.} If \code{nsim > 1} epidemics are simulated by the \code{simulate}-method for fitted \code{"twinSIR"} models, these are returned in a list. } \references{ \enc{Hhle}{Hoehle}, M. (2009), Additive-Multiplicative Regression Models for Spatio-Temporal Epidemics, Biometrical Journal, 51(6):961-978. } \author{ Sebastian Meyer and Michael \enc{Hhle}{Hoehle} } \seealso{ The \code{\link{plot.epidata}} and \code{\link{animate.epidata}} methods for plotting and animating (simulated) epidemic data, respectively. The \code{\link{intensityplot.simEpidata}} method for plotting paths of infection intensities. Function \code{\link{twinSIR}} for fitting spatio-temporal epidemic intensity models to epidemic data. } \examples{ ## Generate a data frame containing a hypothetic population with 100 individuals set.seed(1234) n <- 100 pos <- matrix(rnorm(n*2), ncol=2, dimnames=list(NULL, c("x", "y"))) pop <- data.frame(id=1:n, x=pos[,1], y=pos[,2], gender=sample(0:1, n, replace=TRUE), I0col=c(rep(1,3),rep(0,n-3)), # 3 initially infectious start=rep(0,n), stop=rep(Inf,n)) ## Simulate an SIR epidemic in this population set.seed(123) infPeriods <- setNames(c(1:3/10, rexp(n-3, rate=1)), 1:n) epi <- simEpidata( cbind(start,stop) ~ cox(gender), data = pop, id = "id", I0.col = "I0col", coords.cols = c("x","y"), beta = c(-2), h0 = -1, alpha = c(B1=0.1), f = list(B1=function(u) u<=1), infPeriod = function(ids) infPeriods[ids], ##remPeriod = function(ids) rexp(length(ids), rate=0.1), end = 30 # -> SIRS ) ## extract event times by id head(summary(epi)$byID) ## Plot the numbers of susceptible, infectious and removed individuals plot(epi) ## load the 1861 Hagelloch measles epidemic data("hagelloch") summary(hagelloch) plot(hagelloch) ## fit a simplistic twinSIR model fit <- twinSIR(~ household, data = hagelloch) ## simulate a new epidemic from the above model ## with simulation period = observation period, re-using observed infPeriods sim1 <- simulate(fit, data = hagelloch) plot(sim1) ## check if we find similar parameters in the simulated epidemic fitsim1 <- update(fit, data = sim1) cbind(base = coef(fit), new = coef(fitsim1)) if (surveillance.options("allExamples")) { ## simulate only 10 days, using random infPeriods ~ Exp(0.1) sim2 <- simulate(fit, data = hagelloch, seed = 2, end = 10, infPeriod = function(ids) rexp(length(ids), rate = 0.1)) plot(sim2) ## simulate from a different model with manually specified parameters set.seed(321) simepi <- simEpidata(~ cox(AGE), data = hagelloch, beta = c(0.1), h0 = -4, alpha = c(household = 0.05), f = list(household = function(u) u == 0), infPeriod = function(ids) rexp(length(ids), rate=1/8)) plot(simepi) intensityplot(simepi) ## see if we correctly estimate the parameters fitsimepi <- twinSIR(~ cox(AGE) + household, data = simepi) cbind(true = c(0.05, -4, 0.1), est = coef(fitsimepi), confint(fitsimepi)) } } \keyword{datagen} \keyword{models} surveillance/man/multiplicity.Rd0000644000176200001440000000065013777627613016564 0ustar liggesusers\name{multiplicity} \alias{multiplicity} \docType{import} \title{Import from package \pkg{spatstat.geom}} \description{ The generic function \code{multiplicity} is imported from package \pkg{spatstat.geom}. See \code{\link[spatstat.geom:multiplicity]{spatstat.geom::multiplicity}} for \pkg{spatstat.geom}'s own methods, and \code{\link{multiplicity.Spatial}} for the added method for \code{\linkS4class{Spatial}} objects. } surveillance/man/epidata.Rd0000644000176200001440000003751513446347317015446 0ustar liggesusers\encoding{latin1} \name{epidata} \alias{as.epidata} \alias{as.epidata.data.frame} \alias{as.epidata.default} \alias{print.epidata} \alias{[.epidata} \alias{update.epidata} \alias{epidata} \title{ Continuous-Time SIR Event History of a Fixed Population } \description{ The function \code{as.epidata} is used to generate objects of class \code{"epidata"}. Objects of this class are specific data frames containing the event history of an epidemic together with some additional attributes. These objects are the basis for fitting spatio-temporal epidemic intensity models with the function \code{\link{twinSIR}}. Their implementation is illustrated in Meyer et al. (2017, Section 4), see \code{vignette("twinSIR")}. Note that the spatial information itself, i.e. the positions of the individuals, is assumed to be constant over time. Besides epidemics following the SIR compartmental model, also data from SI, SIRS and SIS epidemics may be supplied. } \usage{ as.epidata(data, ...) \method{as.epidata}{data.frame}(data, t0, tE.col, tI.col, tR.col, id.col, coords.cols, f = list(), w = list(), D = dist, max.time = NULL, keep.cols = TRUE, ...) \method{as.epidata}{default}(data, id.col, start.col, stop.col, atRiskY.col, event.col, Revent.col, coords.cols, f = list(), w = list(), D = dist, .latent = FALSE, ...) \method{print}{epidata}(x, ...) \method{[}{epidata}(x, i, j, drop) \method{update}{epidata}(object, f = list(), w = list(), D = dist, ...) } \arguments{ \item{data}{ For the \code{data.frame}-method, a data frame with as many rows as there are individuals in the population and time columns indicating when each individual became exposed (optional), infectious (mandatory, but can be \code{NA} for non-affected individuals) and removed (optional). Note that this data format does not allow for re-infection (SIRS) and time-varying covariates. The \code{data.frame}-method converts the individual-indexed data frame to the long event history start/stop format and then feeds it into the default method. If calling the generic function \code{as.epidata} on a \code{data.frame} and the \code{t0} argument is missing, the default method is called directly.\cr For the default method, \code{data} can be a \code{\link{matrix}} or a \code{\link{data.frame}}. It must contain the observed event history in a form similar to \code{Surv(, type="counting")} in package \pkg{survival}, with additional information (variables) along the process. Rows will be sorted automatically during conversion. The observation period is split up into \emph{consecutive} intervals of constant state - thus constant infection intensities. The data frame consists of a block of \eqn{N} (number of individuals) rows for each of those time intervals (all rows in a block have the same start and stop values\dots therefore the name \dQuote{block}), where there is one row per individual in the block. Each row describes the (fixed) state of the individual during the interval given by the start and stop columns \code{start.col} and \code{stop.col}.\cr Note that there may not be more than one event (infection or removal) in a single block. Thus, in a single block, only one entry in the \code{event.col} and \code{Revent.col} may be 1, all others are 0. This rule follows the point process characteristic that there are no concurrent events (infections or removals). } \item{t0,max.time}{ observation period. In the resulting \code{"epidata"}, the time scale will be relative to the start time \code{t0}. Individuals that have already been removed prior to \code{t0}, i.e., rows with \code{tR <= t0}, will be dropped. The end of the observation period (\code{max.time}) will by default (\code{NULL}, or if \code{NA}) coincide with the last observed event. } \item{tE.col, tI.col, tR.col}{ single numeric or character indexes of the time columns in \code{data}, which specify when the individuals became exposed, infectious and removed, respectively. \code{tE.col} and \code{tR.col} can be missing, corresponding to SIR, SEI, or SI data. \code{NA} entries mean that the respective event has not (yet) occurred. Note that \code{is.na(tE)} implies \code{is.na(tI)} and \code{is.na(tR)}, and \code{is.na(tI)} implies \code{is.na(tR)} (and this is checked for the provided data).\cr CAVE: Support for latent periods (\code{tE.col}) is experimental! \code{\link{twinSIR}} cannot handle them anyway. } \item{id.col}{ single numeric or character index of the \code{id} column in \code{data}. The \code{id} column identifies the individuals in the data frame. It is converted to a factor by calling \code{\link{factor}}, i.e., unused levels are dropped if it already was a factor. } \item{start.col}{ single index of the \code{start} column in \code{data}. Can be numeric (by column number) or character (by column name). The \code{start} column contains the (numeric) time points of the beginnings of the consecutive time intervals of the event history. The minimum value in this column, i.e. the start of the observation period should be 0. } \item{stop.col}{ single index of the \code{stop} column in \code{data}. Can be numeric (by column number) or character (by column name). The \code{stop} column contains the (numeric) time points of the ends of the consecutive time intervals of the event history. The stop value must always be greater than the start value of a row. } \item{atRiskY.col}{ single index of the \code{atRiskY} column in \code{data}. Can be numeric (by column number) or character (by column name). The \code{atRiskY} column indicates if the individual was \dQuote{at-risk} of becoming infected during the time interval (start; stop]. This variable must be logical or in 0/1-coding. Individuals with \code{atRiskY == 0} in the first time interval (normally the rows with \code{start == 0}) are taken as \emph{initially infectious}. } \item{event.col}{ single index of the \code{event} column in \code{data}. Can be numeric (by column number) or character (by column name). The \code{event} column indicates if the individual became \emph{infected} at the \code{stop} time of the interval. This variable must be logical or in 0/1-coding. } \item{Revent.col}{ single index of the \code{Revent} column in \code{data}. Can be numeric (by column number) or character (by column name). The \code{Revent} column indicates if the individual was \emph{recovered} at the \code{stop} time of the interval. This variable must be logical or in 0/1-coding. } \item{coords.cols}{ index\emph{es} of the \code{coords} column\emph{s} in \code{data}. Can be numeric (by column number), character (by column name), or \code{NULL} (no coordinates, e.g., if \code{D} is a pre-specified distance matrix). These columns contain the individuals' coordinates, which determine the distance matrix for the distance-based components of the force of infection (see argument \code{f}). By default, Euclidean distance is used (see argument \code{D}).\cr Note that the functions related to \code{\link{twinSIR}} currently assume \emph{fixed positions} of the individuals during the whole epidemic. Thus, an individual has the same coordinates in every block. For simplicity, the coordinates are derived from the first time block only (normally the rows with \code{start == 0}).\cr The \code{\link[=animate.epidata]{animate}}-method requires coordinates. } \item{f}{ a \emph{named} list of \emph{vectorized} functions for a distance-based force of infection. The functions must interact elementwise on a (distance) matrix \code{D} so that \code{f[[m]](D)} results in a matrix. A simple example is \code{function(u) {u <= 1}}, which indicates if the Euclidean distance between the individuals is smaller than or equal to 1. The names of the functions determine the names of the epidemic variables in the resulting data frame. So, the names should not coincide with names of other covariates. The distance-based weights are computed as follows: Let \eqn{I(t)} denote the set of infectious individuals just before time \eqn{t}. Then, for individual \eqn{i} at time \eqn{t}, the \eqn{m}'th covariate has the value \eqn{\sum_{j \in I(t)} f_m(d_{ij})}{% \sum_{j in I(t)} f[[m]](d[i,j])}, where \eqn{d_{ij}}{d[i,j]} denotes entries of the distance matrix (by default this is the Euclidean distance \eqn{||s_i - s_j||} between the individuals' coordinates, but see argument \code{D}). } \item{w}{ a \emph{named} list of \emph{vectorized} functions for extra covariate-based weights \eqn{w_{ij}}{w_ij} in the epidemic component. Each function operates on a single time-constant covariate in \code{data}, which is determined by the name of the first argument: The two function arguments should be named \code{varname.i} and \code{varname.j}, where \code{varname} is one of \code{names(data)}. Similar to the components in \code{f}, \code{length(w)} epidemic covariates will be generated in the resulting \code{"epidata"} named according to \code{names(w)}. So, the names should not coincide with names of other covariates. For individual \eqn{i} at time \eqn{t}, the \eqn{m}'th such covariate has the value \eqn{\sum_{j \in I(t)} w_m(z^{(m)}_i, z^{(m)}_j)}, where \eqn{z^{(m)}} denotes the variable in \code{data} associated with \code{w[[m]]}. } \item{D}{ either a function to calculate the distances between the individuals with locations taken from \code{coord.cols} (the default is Euclidean distance via the function \code{\link{dist}}) and the result converted to a matrix via \code{\link{as.matrix}}, or a pre-computed distance matrix with \code{dimnames} containing the individual ids (a classed \code{"\linkS4class{Matrix}"} is supported). } \item{keep.cols}{ logical indicating if all columns in \code{data} should be retained (and not only the obligatory \code{"epidata"} columns), in particular any additional columns with time-constant individual-specific covariates. Alternatively, \code{keep.cols} can be a numeric or character vector indexing columns of \code{data} to keep. } \item{.latent}{ (internal) logical indicating whether to allow for latent periods (EXPERIMENTAL). Otherwise (default), the function verifies that an event (i.e., switching to the I state) only happens when the respective individual is at risk (i.e., in the S state). } \item{x,object}{ an object of class \code{"epidata"}. } \item{\dots}{ arguments passed to \code{\link{print.data.frame}}. Currently unused in the \code{as.epidata}-methods. } \item{i,j,drop}{ arguments passed to \code{\link{[.data.frame}}. } } \details{ The \code{print} method for objects of class \code{"epidata"} simply prints the data frame with a small header containing the time range of the observed epidemic and the number of infected individuals. Usually, the data frames are quite long, so the summary method \code{\link{summary.epidata}} might be useful. Also, indexing/subsetting \code{"epidata"} works exactly as for \code{\link[=[.data.frame]{data.frame}}s, but there is an own method, which assures consistency of the resulting \code{"epidata"} or drops this class, if necessary. The \code{update}-method can be used to add or replace distance-based (\code{f}) or covariate-based (\code{w}) epidemic variables in an existing \code{"epidata"} object. SIS epidemics are implemented as SIRS epidemics where the length of the removal period equals 0. This means that an individual, which has an R-event will be at risk immediately afterwards, i.e. in the following time block. Therefore, data of SIS epidemics have to be provided in that form containing \dQuote{pseudo-R-events}. } \note{ The column name \code{"BLOCK"} is a reserved name. This column will be added automatically at conversion and the resulting data frame will be sorted by this column and by id. Also the names \code{"id"}, \code{"start"}, \code{"stop"}, \code{"atRiskY"}, \code{"event"} and \code{"Revent"} are reserved for the respective columns only. } \value{ a \code{data.frame} with the columns \code{"BLOCK"}, \code{"id"}, \code{"start"}, \code{"stop"}, \code{"atRiskY"}, \code{"event"}, \code{"Revent"} and the coordinate columns (with the original names from \code{data}), which are all obligatory. These columns are followed by any remaining columns of the input \code{data}. Last but not least, the newly generated columns with epidemic variables corresponding to the functions in the list \code{f} are appended, if \code{length(f)} > 0. The \code{data.frame} is given the additional \emph{attributes} \item{"eventTimes"}{ numeric vector of infection time points (sorted chronologically). } \item{"timeRange"}{ numeric vector of length 2: \code{c(min(start), max(stop))}. } \item{"coords.cols"}{ numeric vector containing the column indices of the coordinate columns in the resulting data frame. } \item{"f"}{ this equals the argument \code{f}. } \item{"w"}{ this equals the argument \code{w}. } } \author{ Sebastian Meyer } \seealso{ The \code{\link{hagelloch}} data as an example. The \code{\link[=plot.epidata]{plot}} and the \code{\link[=summary.epidata]{summary}} method for class \code{"epidata"}. Furthermore, the function \code{\link{animate.epidata}} for the animation of epidemics. Function \code{\link{twinSIR}} for fitting spatio-temporal epidemic intensity models to epidemic data. Function \code{\link{simEpidata}} for the simulation of epidemic data. } \references{ Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} } \examples{ data("hagelloch") # see help("hagelloch") for a description head(hagelloch.df) ## convert the original data frame to an "epidata" event history myEpi <- as.epidata(hagelloch.df, t0 = 0, tI.col = "tI", tR.col = "tR", id.col = "PN", coords.cols = c("x.loc", "y.loc"), keep.cols = c("SEX", "AGE", "CL")) \dontshow{ ## test consistency with default method evHist <- as.data.frame(myEpi)[,-1] myEpi2 <- as.epidata( evHist, id.col = 1, start.col = "start", stop.col = "stop", atRiskY.col = "atRiskY", event.col = "event", Revent.col = "Revent", coords.cols = c("x.loc", "y.loc") ) stopifnot(identical(myEpi, myEpi2)) } str(myEpi) head(as.data.frame(myEpi)) # "epidata" has event history format summary(myEpi) # see 'summary.epidata' plot(myEpi) # see 'plot.epidata' and also 'animate.epidata' ## add distance- and covariate-based weights for the force of infection ## in a twinSIR model, see vignette("twinSIR") for a description myEpi <- update(myEpi, f = list( household = function(u) u == 0, nothousehold = function(u) u > 0 ), w = list( c1 = function (CL.i, CL.j) CL.i == "1st class" & CL.j == CL.i, c2 = function (CL.i, CL.j) CL.i == "2nd class" & CL.j == CL.i ) ) ## this is now identical to the prepared hagelloch "epidata" stopifnot(all.equal(myEpi, hagelloch)) \dontshow{ ## test with precomputed distance matrix D myEpi3 <- suppressWarnings( # from overwriting existing f columns update(hagelloch, f = attr(hagelloch, "f"), D = as.matrix(dist(hagelloch.df[c("x.loc", "y.loc")]))) ) stopifnot(identical(hagelloch, myEpi3)) } } \keyword{spatial} \keyword{classes} \keyword{manip} surveillance/man/multiplicity.Spatial.Rd0000644000176200001440000000306513777627613020163 0ustar liggesusers\name{multiplicity.Spatial} \alias{multiplicity.Spatial} \title{ Count Number of Instances of Points } \description{ The generic function \code{multiplicity} defined in \pkg{spatstat.geom} is intended to count the number of duplicates of each element of an object. \pkg{spatstat.geom} already offers methods for point patterns, matrices and data frames, and here we add a method for \code{Spatial} objects from the \pkg{sp} package. It is a wrapper for the default method, which effectively computes the distance matrix of the points, and then just counts the number of zeroes in each row. } \usage{ \method{multiplicity}{Spatial}(x) } \arguments{ \item{x}{ a \code{"\linkS4class{Spatial}"} object (we only need a \code{\link{coordinates}}-method), e.g. of class \code{"\linkS4class{SpatialPoints}"}. } } \value{ an integer vector containing the number of instances of each point of the object. } \seealso{ \code{\link[spatstat.geom]{multiplicity}} in package \pkg{spatstat.geom}. See the Examples of the \code{\link{hagelloch}} data for a specific use of \code{multiplicity}. } \examples{ foo <- SpatialPoints(matrix(c(1,2, 2,3, 1,2, 4,5), 4, 2, byrow=TRUE)) multiplicity(foo) # the following function determines the multiplicities in a matrix # or data frame and returns unique rows with appended multiplicity countunique <- function(x) unique(cbind(x, count=multiplicity(x))) countunique(coordinates(foo)) } \keyword{utilities} \keyword{spatial} surveillance/man/algo.cusum.Rd0000644000176200001440000001146113122471774016100 0ustar liggesusers\name{algo.cusum} \alias{algo.cusum} \title{CUSUM method} \encoding{latin1} \description{ Approximate one-side CUSUM method for a Poisson variate based on the cumulative sum of the deviation between a reference value k and the transformed observed values. An alarm is raised if the cumulative sum equals or exceeds a prespecified decision boundary h. The function can handle time varying expectations. } \usage{ algo.cusum(disProgObj, control = list(range = range, k = 1.04, h = 2.26, m = NULL, trans = "standard", alpha = NULL)) } \arguments{ \item{disProgObj}{object of class disProg (including the observed and the state chain)} \item{control}{control object: \describe{ \item{\code{range}}{determines the desired time points which should be evaluated} \item{\code{k}}{is the reference value} \item{\code{h}}{the decision boundary} \item{\code{m}}{how to determine the expected number of cases -- the following arguments are possible \describe{ \item{\code{numeric}}{a vector of values having the same length as \code{range}. If a single numeric value is specified then this value is replicated \code{length(range)} times.} \item{\code{NULL}}{A single value is estimated by taking the mean of all observations previous to the first \code{range} value.} \item{\code{"glm"}}{ A GLM of the form \deqn{\log(m_t) = \alpha + \beta t + \sum_{s=1}^S (\gamma_s \sin(\omega_s t) + \delta_s \cos(\omega_s t)),} where \eqn{\omega_s = \frac{2\pi}{52}s}{\omega_s = 2\pi/52 s} are the Fourier frequencies is fitted. Then this model is used to predict the \code{range} values.} }} \item{\code{trans}}{one of the following transformations (warning: Anscombe and NegBin transformations are experimental) \describe{ \item{\code{rossi}}{standardized variables z3 as proposed by Rossi} \item{\code{standard}}{standardized variables z1 (based on asymptotic normality) - This is the default.} \item{\code{anscombe}}{anscombe residuals -- experimental} \item{\code{anscombe2nd}}{ anscombe residuals as in Pierce and Schafer (1986) based on 2nd order approximation of E(X) -- experimental} \item{\code{pearsonNegBin}}{compute Pearson residuals for NegBin -- experimental} \item{\code{anscombeNegBin}}{anscombe residuals for NegBin -- experimental} \item{\code{none}}{ no transformation} } } \item{\code{alpha}}{parameter of the negative binomial distribution, s.t. the variance is \eqn{m+\alpha *m^2} } } } } \value{ \code{algo.cusum} gives a list of class \code{"survRes"} which includes the vector of alarm values for every timepoint in \code{range} and the vector of cumulative sums for every timepoint in \code{range} for the system specified by \code{k} and \code{h}, the range and the input object of class \code{"disProg"}. The \code{upperbound} entry shows for each time instance the number of diseased individuals it would have taken the cusum to signal. Once the CUSUM signals no resetting is applied, i.e. signals occurs until the CUSUM statistic again returns below the threshold. In case \code{control$m="glm"} was used, the returned \code{control$m.glm} entry contains the fitted \code{"glm"} object. } \note{This implementation is experimental, but will not be developed further.} \author{M. Paul and M. \enc{Hhle}{Hoehle}} \examples{ # Xi ~ Po(5), i=1,...,500 disProgObj <- create.disProg(week=1:500, observed= rpois(500,lambda=5), state=rep(0,500)) # there should be no alarms as mean doesn't change res <- algo.cusum(disProgObj, control = list(range = 100:500,trans="anscombe")) plot(res) # simulated data disProgObj <- sim.pointSource(p = 1, r = 1, length = 250, A = 0, alpha = log(5), beta = 0, phi = 10, frequency = 10, state = NULL, K = 0) plot(disProgObj) # Test week 200 to 250 for outbreaks surv <- algo.cusum(disProgObj, control = list(range = 200:250)) plot(surv) } \references{ G. Rossi, L. Lampugnani and M. Marchi (1999), An approximate CUSUM procedure for surveillance of health events, Statistics in Medicine, 18, 2111--2122 D. A. Pierce and D. W. Schafer (1986), Residuals in Generalized Linear Models, Journal of the American Statistical Association, 81, 977--986 } \keyword{classif} surveillance/man/twinstim.Rd0000644000176200001440000006033714004512307015673 0ustar liggesusers\encoding{latin1} \name{twinstim} \alias{twinstim} \title{ Fit a Two-Component Spatio-Temporal Point Process Model } \description{ A \code{twinstim} model as described in Meyer et al. (2012) is fitted to marked spatio-temporal point process data. This constitutes a regression approach for conditional intensity function modelling. The implementation is illustrated in Meyer et al. (2017, Section 3), see \code{vignette("twinstim")}. } \usage{ twinstim(endemic, epidemic, siaf, tiaf, qmatrix = data$qmatrix, data, subset, t0 = data$stgrid$start[1], T = tail(data$stgrid$stop,1), na.action = na.fail, start = NULL, partial = FALSE, epilink = "log", control.siaf = list(F = list(), Deriv = list()), optim.args = list(), finetune = FALSE, model = FALSE, cumCIF = FALSE, cumCIF.pb = interactive(), cores = 1, verbose = TRUE) } \arguments{ \item{endemic}{ right-hand side formula for the exponential (Cox-like multiplicative) endemic component. May contain offsets (to be marked by the special function \code{offset}). If omitted or \code{~0} there will be no endemic component in the model. A type-specific endemic intercept can be requested by including the term \code{(1|type)} in the formula. } \item{epidemic}{ formula representing the epidemic model for the event-specific covariates (marks) determining infectivity. Offsets are not implemented here. If omitted or \code{~0} there will be no epidemic component in the model. } \item{siaf}{ spatial interaction function. Possible specifications are: \itemize{ \item \code{NULL} or missing, corresponding to \code{siaf.constant()}, i.e. spatially homogeneous infectivity independent of the distance from the host \item a list as returned by \code{\link{siaf}} or, more commonly, generated by a predefined interaction function such as \code{\link{siaf.gaussian}} as in Meyer et al. (2012) or \code{\link{siaf.powerlaw}} as in Meyer and Held (2014). The latter requires unique event locations, possibly after random tie-breaking (\code{\link{untie}}) or imputation of interval-censored locations. \code{\link{siaf.exponential}} is a simpler alternative. \item a numeric vector corresponding to the knots of a step function, i.e. the same as \code{\link{siaf.step}(knots)} } If you run into \dQuote{false convergence} with a non-constant \code{siaf} specification, the numerical accuracy of the cubature methods is most likely too low (see the \code{control.siaf} argument). } \item{tiaf}{ temporal interaction function. Possible specifications are: \itemize{ \item \code{NULL} or missing, corresponding to \code{tiaf.constant()}, i.e. time-constant infectivity \item a list as returned by \code{\link{tiaf}} or by a predefined interaction function such as \code{\link{tiaf.exponential}} \item a numeric vector corresponding to the knots of a step function, i.e. the same as \code{\link{tiaf.step}(knots)} } } \item{qmatrix}{ square indicator matrix (0/1 or \code{FALSE}/\code{TRUE}) for possible transmission between the event types. The matrix will be internally converted to \code{logical}. Defaults to the \eqn{Q} matrix specified in \code{data}. } \item{data}{ an object of class \code{"\link{epidataCS}"}. } \item{subset}{ an optional vector evaluating to logical indicating a subset of \code{data$events} to keep. Missing values are taken as \code{FALSE}. The expression is evaluated in the context of the \code{data$events@data} \code{data.frame}, i.e. columns of this \code{data.frame} may be referenced directly by name. } \item{t0, T}{ events having occurred during (-Inf;t0] are regarded as part of the prehistory \eqn{H_0} of the process. Only events that occurred in the interval (t0; T] are considered in the likelihood. The time point \code{t0} (\code{T}) must be an element of \code{data$stgrid$start} (\code{data$stgrid$stop}). The default time range covers the whole spatio-temporal grid of endemic covariates. } \item{na.action}{ how to deal with missing values in \code{data$events}? Do not use \code{\link{na.pass}}. Missing values in the spatio-temporal grid \code{data$stgrid} are not accepted. } \item{start}{ a named vector of initial values for (a subset of) the parameters. The names must conform to the conventions of \code{twinstim} to be assigned to the correct model terms. For instance, \code{"h.(Intercept)"} = endemic intercept, \code{"h.I(start/365)"} = coefficient of a linear time trend in the endemic component, \code{"h.factorB"} = coefficient of the level B of the factor variable \code{factor} in the endemic predictor, \code{"e.(Intercept)"} = epidemic intercept, \code{"e.VAR"} = coefficient of the epidemic term \code{VAR}, \code{"e.siaf.2"} = second \code{siaf} parameter, \code{"e.tiaf.1"} = first \code{tiaf} parameter. Elements which don't match any of the model parameters are ignored. Alternatively, \code{start} may also be a named list with elements \code{"endemic"} or \code{"h"}, \code{"epidemic"} or \code{"e"}, \code{"siaf"} or \code{"e.siaf"}, and \code{"tiaf"} or \code{"e.tiaf"}, each of which containing a named numeric vector with the term labels as names (i.e. without the prefix \code{"h."}, \code{"e."}, etc). Thus, \code{start=list(endemic=c("(Intercept)"=-10))} is equivalent to \code{start=c("h.(Intercept)"=-10)}. } \item{partial}{ logical indicating if a partial likelihood similar to the approach by Diggle et al. (2010) should be used (default is \code{FALSE}). Note that the partial likelihood implementation is not well tested. } \item{epilink}{ a character string determining the link function to be used for the \code{epidemic} linear predictor of event marks. By default, the log-link is used. The experimental alternative \code{epilink = "identity"} (for use by \code{\link{epitest}}) does not guarantee the force of infection to be positive. If this leads to a negative total intensity (endemic + epidemic), the point process is not well defined (the log-likelihood will be \code{\link{NaN}}). } \item{control.siaf}{ a list with elements \code{"F"} and \code{"Deriv"}, which are lists of extra arguments passed to the functions \code{siaf$F} and \code{siaf$Deriv}, respectively.\cr These arguments control the accuracy of the cubature routines from package \pkg{polyCub} involved in non-constant \code{siaf} specifications, e.g., the bandwidth of the midpoint rule \code{\link{polyCub.midpoint}}, the number of Gaussian quadrature points for \code{\link{polyCub.SV}}, or the relative tolerance of \code{\link{integrate}} in \code{\link{polyCub.iso}}.\cr For instance, \code{\link{siaf.gaussian}(F.adaptive = TRUE)} uses the midpoint-cubature \code{\link{polyCub.midpoint}} with an adaptive bandwidth of \code{eps=adapt*sd} to numerically integrate the kernel \eqn{f(\bold{s})}, and the default \code{adapt} value (0.1) can be overwritten by setting \code{control.siaf$F$adapt}. However, the default version \code{siaf.gaussian()} as well as \code{\link{siaf.powerlaw}()} and friends use \code{\link{polyCub.iso}} and thus accept control arguments for the standard \code{\link{integrate}} routine (such as \code{rel.tol}) via \code{control.siaf$F} and \code{control.siaf$Deriv}.\cr This argument list is ignored in the case \code{siaf=siaf.constant()} (which is the default if \code{siaf} is unspecified). } \item{optim.args}{ an argument list passed to \code{\link{optim}}, or \code{NULL}, in which case no optimization will be performed but the necessary functions will be returned in a list (similar to what is returned if \code{model = TRUE}). Initial values for the parameters may be given as list element \code{par} in the order \code{(endemic, epidemic, siaf, tiaf)}. If no initial values are provided, crude estimates will be used for the endemic intercept and the Gaussian kernel, -9 for the epidemic intercept, and zeroes for the remaining parameters. Any initial values given in the \code{start} argument take precedence over those in \code{par}. Note that \code{optim} receives the negative log-likelihood for minimization (thus, if used, \code{optim.args$control$fnscale} should be positive). The \code{hessian} argument defaults to \code{TRUE}, and in the \code{control} list, \code{trace}ing is enabled with \code{REPORT=1} by default. By setting \code{optim.args$control$trace = 0}, all output from the optimization routine is suppressed. For the \code{partial} likelihood, the analytic score function and the Fisher information are not implemented and the default is to use robust \code{method="Nelder-Mead"} optimization. There may be an extra component \code{fixed} in the \code{optim.args} list, which determines which parameters should stick to their initial values. This can be specified by a logical vector of the same length as the \code{par} component, by an integer vector indexing \code{par} or by a character vector following the \code{twinstim} naming conventions. Furthermore, if \code{isTRUE(fixed)}, then all parameters are fixed at their initial values and no optimization is performed. Importantly, the \code{method} argument in the \code{optim.args} list may also be \code{"nlminb"}, in which case the \code{\link{nlminb}} optimizer is used. This is also the default for full likelihood inference. In this case, not only the score function but also the \emph{expected} Fisher information can be used during optimization (as estimated by what Martinussen and Scheike (2006, p. 64) call the \dQuote{optional variation process}, or see Rathbun (1996, equation (4.7))). In our experience this gives better convergence than \code{optim}'s methods. For \code{method="nlminb"}, the following parameters of the \code{optim.args$control} list may be named like for \code{optim} and are renamed appropriately: \code{maxit} (-> \code{iter.max}), \code{REPORT} (-> \code{trace}, default: 1), \code{abstol} (-> \code{abs.tol}), and \code{reltol} (-> \code{rel.tol}, default: \code{1e-6}). For \code{nlminb}, a logical \code{hessian} argument (default: \code{TRUE}) indicates if the negative \emph{expected} Fisher information matrix should be used as the Hessian during optimization (otherwise a numerical approximation is used). Similarly, \code{method="nlm"} should also work but is not recommended here. } \item{finetune}{ logical indicating if a second maximisation should be performed with robust Nelder-Mead \code{optim} using the resulting parameters from the first maximisation as starting point. This argument is only considered if \code{partial = FALSE} and the default is to not conduct a second maximization (in most cases this does not improve upon the MLE). } \item{model}{ logical indicating if the model environment should be kept with the result, which is required for \code{\link[=intensityplot.twinstim]{intensityplot}}s and \code{\link[=R0.twinstim]{R0}(..., trimmed = FALSE)}. Specifically, if \code{model=TRUE}, the return value will have the evaluation environment set as its \code{\link{environment}}, and the returned \code{functions} element will contain the log-likelihood function (or partial log-likelihood function, if \code{partial = TRUE}), and optionally the score and the expected Fisher information functions (not for the partial likelihood, and only if \code{siaf} and \code{tiaf} provide the necessary derivatives).\cr Note that fitted objects with a model environment might consume quiet a lot of memory since they contain the \code{data}. } \item{cumCIF}{ logical (default: \code{FALSE}) indicating whether to calculate the fitted cumulative ground intensity at event times. This is the residual process, see \code{\link{residuals.twinstim}}. } \item{cumCIF.pb}{ logical indicating if a progress bar should be shown during the calculation of \code{cumCIF}. Defaults to do so in an interactive \R session, and will be \code{FALSE} if \code{cores != 1}. } \item{cores}{ number of processes to use in parallel operation. By default \code{twinstim} runs in single-CPU mode. Currently, only the \pkg{multicore}-type of parallel computing via forking is supported, which is not available on Windows, see \code{\link[parallel]{mclapply}} in package \pkg{parallel}. Note that for a \pkg{memoise}d \code{\link{siaf.step}} kernel, \code{cores=1} is fixed internally since parallelization would slow down model fitting significantly. } \item{verbose}{ logical indicating if information should be printed during execution. Defaults to \code{TRUE}. } } \details{ The function performs maximum likelihood inference for the additive-multiplicative spatio-temporal intensity model described in Meyer et al. (2012). It uses \code{\link{nlminb}} as the default optimizer and returns an object of class \code{twinstim}. Such objects have \code{print}, \code{\link[=plot.twinstim]{plot}} and \code{\link[=summary.twinstim]{summary}} methods. The output of the \code{summary} can be processed by the \code{\link[=toLatex.summary.twinstim]{toLatex}} function. Furthermore, the usual model fit methods such as \code{coef}, \code{vcov}, \code{logLik}, \code{\link[=residuals.twinstim]{residuals}}, and \code{update} are implemented. A specific add-on is the use of the functions \code{\link{R0}} and \code{\link[=simulate.twinstim]{simulate}}. } \value{ Returns an S3 object of class \code{"twinstim"}, which is a list with the following components: \item{coefficients}{vector containing the MLE.} \item{loglik}{value of the log-likelihood function at the MLE with a logical attribute \code{"partial"} indicating if the partial likelihood was used.} \item{counts}{number of log-likelihood and score evaluations during optimization.} \item{converged}{either \code{TRUE} (if the optimizer converged) or a character string containing a failure message.} \item{fisherinfo}{\emph{expected} Fisher information evaluated at the MLE. Only non-\code{NULL} for full likelihood inference (\code{partial = FALSE}) and if spatial and temporal interaction functions are provided with their derivatives.} \item{fisherinfo.observed}{observed Fisher information matrix evaluated at the value of the MLE. Obtained as the negative Hessian. Only non-\code{NULL} if \code{optim.args$method} is not \code{"nlminb"} and if it was requested by setting \code{hessian=TRUE} in \code{optim.args}.} \item{fitted}{fitted values of the conditional intensity function at the events.} \item{fittedComponents}{two-column matrix with columns \code{"h"} and \code{"e"} containing the fitted values of the endemic and epidemic components, respectively.\cr (Note that \code{rowSums(fittedComponents) == fitted}.)} \item{tau}{fitted cumulative ground intensities at the event times. Only non-\code{NULL} if \code{cumCIF = TRUE}. This is the \dQuote{residual process} of the model, see \code{\link{residuals.twinstim}}.} \item{R0}{estimated basic reproduction number for each event. This equals the spatio-temporal integral of the epidemic intensity over the observation domain (t0;T] x W for each event.} \item{npars}{vector describing the lengths of the 5 parameter subvectors: endemic intercept(s) \eqn{\beta_0(\kappa)}, endemic coefficients \eqn{\beta}, epidemic coefficients \eqn{\gamma}, parameters of the \code{siaf} kernel, and parameters of the \code{tiaf} kernel.} \item{qmatrix}{the \code{qmatrix} associated with the epidemic \code{data} as supplied in the model call.} \item{bbox}{the bounding box of \code{data$W}.} \item{timeRange}{the time range used for fitting: \code{c(t0,T)}.} \item{formula}{a list containing the four main parts of the model specification: \code{endemic}, \code{epidemic}, \code{siaf}, and \code{tiaf}.} \item{xlevels}{a record of the levels of the factors used in fitting.} \item{control.siaf}{see the \dQuote{Arguments} section above.} \item{optim.args}{input optimizer arguments used to determine the MLE.} \item{functions}{if \code{model=TRUE} this is a \code{list} with components \code{ll}, \code{sc} and \code{fi}, which are functions evaluating the log-likelihood, the score function and the expected Fisher information for a parameter vector \eqn{\theta}. The \code{environment} of these function is the model environment, which is thus retained in the workspace if \code{model=TRUE}. Otherwise, the \code{functions} component is \code{NULL}.} \item{call}{the matched call.} \item{runtime}{the \code{\link{proc.time}}-queried time taken to fit the model, i.e., a named numeric vector of length 5 of class \code{"proc_time"}, with the number of \code{cores} set as additional attribute.} If \code{model=TRUE}, the model evaluation environment is assigned to this list and can thus be queried by calling \code{environment()} on the result. } \note{ \code{twinstim} makes use of the \pkg{memoise} package if it is available -- and that is highly recommended for non-constant \code{siaf} specifications to speed up calculations. Specifically, the necessary numerical integrations of the spatial interaction function will be cached such that they are only calculated once for every state of the \code{siaf} parameters during optimization. } \references{ Diggle, P. J., Kaimi, I. & Abellana, R. (2010): Partial-likelihood analysis of spatio-temporal point-process data. \emph{Biometrics}, \bold{66}, 347-354. Martinussen, T. and Scheike, T. H. (2006): Dynamic Regression Models for Survival Data. Springer. Meyer, S. (2010): Spatio-Temporal Infectious Disease Epidemiology based on Point Processes. Master's Thesis, Ludwig-Maximilians-Universit\enc{}{ae}t M\enc{}{ue}nchen.\cr Available as \url{https://epub.ub.uni-muenchen.de/11703/} Meyer, S., Elias, J. and H\enc{}{oe}hle, M. (2012): A space-time conditional intensity model for invasive meningococcal disease occurrence. \emph{Biometrics}, \bold{68}, 607-616. \doi{10.1111/j.1541-0420.2011.01684.x} Meyer, S. and Held, L. (2014): Power-law models for infectious disease spread. \emph{The Annals of Applied Statistics}, \bold{8} (3), 1612-1639. \doi{10.1214/14-AOAS743} Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} Rathbun, S. L. (1996): Asymptotic properties of the maximum likelihood estimator for spatio-temporal point processes. \emph{Journal of Statistical Planning and Inference}, \bold{51}, 55-74. } \author{ Sebastian Meyer Contributions to this documentation by Michael H\enc{}{oe}hle and Mayeul Kauffmann. } \seealso{ \code{vignette("twinstim")}. There is a \code{\link{simulate.twinstim}} method, which simulates the point process based on the fitted \code{twinstim}. A discrete-space alternative is offered by the \code{\link{twinSIR}} modelling framework. } \examples{ # Load invasive meningococcal disease data data("imdepi") ### first, fit a simple endemic-only model m_noepi <- twinstim( endemic = addSeason2formula(~ offset(log(popdensity)) + I(start/365-3.5), S=1, period=365, timevar="start"), data = imdepi, subset = !is.na(agegrp) ) ## look at the model summary summary(m_noepi) ## there is no evidence for a type-dependent endemic intercept (LR test) m_noepi_type <- update(m_noepi, endemic = ~(1|type) + .) pchisq(2*c(logLik(m_noepi_type)-logLik(m_noepi)), df=1, lower.tail=FALSE) ### add an epidemic component with just the intercept, i.e. ### assuming uniform dispersal in time and space up to a distance of ### eps.s = 200 km and eps.t = 30 days (see summary(imdepi)) m0 <- update(m_noepi, epidemic=~1, model=TRUE) ## summarize the model fit s <- summary(m0, correlation = TRUE, symbolic.cor = TRUE) s # output the table of coefficients as LaTeX code toLatex(s, digits=2) # or, to report rate ratios xtable(s) ## the default confint-method can be used for Wald-CI's confint(m0, level=0.95) ## same "untrimmed" R0 for every event (simple epidemic intercept model) summary(R0(m0, trimmed=FALSE)) ## plot the path of the fitted total intensity plot(m0, "total intensity", tgrid=500) ## extract "residual process" integrating over space (takes some seconds) if (surveillance.options("allExamples")) { res <- residuals(m0) # if the model describes the true CIF well _in the temporal dimension_, # then this residual process should behave like a stationary Poisson # process with intensity 1 plot(res, type="l"); abline(h=c(0, length(res)), lty=2) # easier, with CI and serial correlation -> checkResidualProcess() checkResidualProcess(m0) } \dontrun{ ## NB: in contrast to nlminb(), optim's BFGS would miss the ## likelihood maximum wrt the epidemic intercept m0_BFGS <- update(m_noepi, epidemic=~1, optim.args = list(method="BFGS")) format(cbind(nlminb=coef(m0), BFGS=coef(m0_BFGS)), digits=3, scientific=FALSE) m0_BFGS$fisherinfo # singular Fisher information matrix here m0$fisherinfo logLik(m0_BFGS) logLik(m0) ## nlminb is more powerful since we make use of the analytical fisherinfo ## as estimated by the model during optimization, which optim cannot } ### an epidemic-only model? ## for a purely epidemic model, all events must have potential source events ## (otherwise the intensity at the observed event would be 0) ## let's focus on the C-type for this example imdepiC <- subset(imdepi, type == "C") table(summary(imdepiC)$nSources) ## 106 events have no prior, close events (in terms of eps.s and eps.t) try(twinstim(epidemic = ~1, data = imdepiC)) # detects this problem ## let's assume spatially unbounded interaction imdepiC_infeps <- update(imdepiC, eps.s = Inf) (s <- summary(imdepiC_infeps)) table(s$nSources) ## for 11 events, there is no prior event within eps.t = 30 days ## (which is certainly true for the first event) plot(s$counter, main = "Number of infectious individuals over time (eps.t = 30)") rug(imdepiC_infeps$events$time) rug(imdepiC_infeps$events$time[s$nSources == 0], col = 2, lwd = 3) ## An endemic component would catch such events (from unobserved sources), ## otherwise a longer infectious period would need to be assumed and ## for the first event to happen, a prehistory is required (e.g., t0 = 31). ## As an example, we fit the data only until T = 638 (all events have ancestors) m_epi <- twinstim(epidemic = ~1, data = imdepiC_infeps, t0 = 31, T = 638) summary(m_epi) ### full model with interaction functions (time-consuming) if (surveillance.options("allExamples")) { ## estimate an exponential temporal decay of infectivity m1_tiaf <- update(m0, tiaf=tiaf.exponential()) plot(m1_tiaf, "tiaf", scaled=FALSE) ## estimate a step function for spatial interaction summary(sourceDists <- getSourceDists(imdepi, "space")) (knots <- quantile(sourceDists, c(5,10,20,40)/100)) m1_fstep <- update(m0, siaf=knots) plot(m1_fstep, "siaf", scaled=FALSE) rug(sourceDists, ticksize=0.02) ## estimate a continuously decreasing spatial interaction function, ## here we use the kernel of an isotropic bivariate Gaussian m1 <- update(m0, siaf = siaf.gaussian()) AIC(m_noepi, m0, m1_fstep, m1) summary(m1) # e.siaf.1 is log(sigma), no test for H0: log(sigma) = 0 exp(confint(m1, "e.siaf.1")) # a confidence interval for sigma plot(m1, "siaf", scaled=FALSE) ## alternative: siaf.powerlaw() with eps.s=Inf and untie()d data, ## see vignette("twinstim") ## add epidemic covariates m2 <- update(m1, epidemic = ~ 1 + type + agegrp) AIC(m1, m2) # further improvement summary(m2) ## look at estimated R0 values by event type tapply(R0(m2), imdepi$events@data[names(R0(m2)), "type"], summary) } } \keyword{models} \keyword{optimize} surveillance/man/plot.atwins.Rd0000644000176200001440000000467113122471774016312 0ustar liggesusers\name{plot.atwins} \alias{plot.atwins} \encoding{latin1} \title{Plot results of a twins model fit} \description{ Plot results of fitting a twins model using MCMC output. Plots similar to those in the Held et al. (2006) paper are generated } \usage{ \method{plot}{atwins}(x, which=c(1,4,6,7), ask=TRUE, \dots) } \arguments{ \item{x}{An object of class \code{atwins}.} \item{which}{a vector containing the different plot types to show \describe{ \item{1}{A plot of the observed time series Z is shown together with posterior means for the number of endemic cases (X) and number of epidemic cases (Y).} \item{2}{This plot shows trace plots of the gamma parameters over all MCMC samples.} \item{3}{This shows a trace plot of psi, which controls the overdispersion in the model.} \item{4}{Autocorrelation functions for K and psi are shown in order to judge whether the MCMC sampler has converged.} \item{5}{Shows a plot of the posterior mean of the seasonal model nu[t] together with 95\% credibility intervals based on the quantiles of the posterior.} \item{6}{Histograms illustrating the posterior density for K and psi. The first one corresponds to Fig. 4(f) in the paper.} \item{7}{Histograms illustrating the predictive posterior density for the next observed number of cases Z[n+1]. Compare with Fig.5 in the paper.} } } \item{ask}{Boolean indicating whether to ask for a newline before showing the next plot.} \item{\dots}{Additional control for the plots, which are currently ignored.} } \details{ For details see the plots in the paper. Basically MCMC output is visualized. This function is together with \code{algo.twins} still experimental. } \value{This function does not return anything.} \references{Held, L., Hofmann, M., \enc{Hhle}{Hoehle}, M. and Schmid V. (2006) A two-component model for counts of infectious diseases, Biostatistics, \bold{7}, pp. 422--437. } \author{M. Hofmann and M. \enc{Hhle}{Hoehle}} \seealso{\link{algo.twins}} \examples{ \dontrun{ #Apparently, the algo.atwins can crash on some LINUX systems #thus for now the example section is commented #Load the data used in the Held et al. (2006) paper data("hepatitisA") #Fix seed - this is used for the MCMC samplers in twins set.seed(123) #Call algorithm and save result otwins <- algo.twins(hepatitisA) #This shows the entire output plot(otwins,which=c(1,2),ask=FALSE) } } \keyword{ts} \keyword{regression} surveillance/man/hhh4_W_utils.Rd0000644000176200001440000000211313117736473016362 0ustar liggesusers\name{hhh4_W_utils} \alias{getNEweights} \alias{coefW} \title{ Extract Neighbourhood Weights from a Fitted \code{hhh4} Model } \description{ The \code{getNEweights} function extracts the (fitted) weight matrix/array from a \code{"hhh4"} object, after scaling and normalization. The \code{coefW} function extracts the coefficients of parametric neighbourhood weights from a \code{hhh4} fit (or directly from a corresponding coefficient vector), i.e., coefficients whose names begin with \dQuote{neweights}. } \usage{ getNEweights(object, pars = coefW(object), scale = ne$scale, normalize = ne$normalize) coefW(object) } \arguments{ \item{object}{an object of class \code{"hhh4"}. \code{coefW} also works with the coefficient vector.} \item{pars}{coefficients for parametric neighbourhood weights, such as for models using \code{\link{W_powerlaw}}. Defaults to the corresponding point estimates in \code{object}.} \item{scale,normalize}{parameters of the \code{ne} component of \code{\link{hhh4}}.} } \author{ Sebastian Meyer } \keyword{utilities} surveillance/man/twinstim_iafplot.Rd0000644000176200001440000002205313100434734017405 0ustar liggesusers\encoding{latin1} \name{twinstim_iafplot} \alias{iafplot} \title{ Plot the Spatial or Temporal Interaction Function of a \code{twimstim} } \description{ The function plots the fitted temporal or (isotropic) spatial interaction function of a \code{twinstim} object. The implementation is illustrated in Meyer et al. (2017, Section 3), see \code{vignette("twinstim")}. } \usage{ iafplot(object, which = c("siaf", "tiaf"), types = NULL, scaled = c("intercept", "standardized", "no"), truncated = FALSE, log = "", conf.type = if (length(pars) > 1) "MC" else "parbounds", conf.level = 0.95, conf.B = 999, xgrid = 101, col.estimate = rainbow(length(types)), col.conf = col.estimate, alpha.B = 0.15, lwd = c(3,1), lty = c(1,2), verticals = FALSE, do.points = FALSE, add = FALSE, xlim = NULL, ylim = NULL, xlab = NULL, ylab = NULL, legend = !add && (length(types) > 1), ...) } \arguments{ \item{object}{ object of class \code{"twinstim"} containing the fitted model. } \item{which}{ argument indicating which of the two interaction functions to plot. Possible values are \code{"siaf"} (default) for the spatial interaction \eqn{f(x)} as a function of the distance \eqn{x}, and \code{"tiaf"} for the temporal interaction function \eqn{g(t)}. } \item{types}{ integer vector indicating for which event \code{types} the interaction function should be plotted in case of a marked \code{"twinstim"}. The default \code{types=NULL} checks if the interaction function is type-specific: if so, \code{types=1:nrow(object$qmatrix)} is used, otherwise \code{types=1}. } \item{scaled}{ character string determining if/how the the interaction function should be scaled. Possible choices are: \describe{ \item{"intercept":}{multiplication by the epidemic intercept.} \item{"standardized":}{division by the value at 0 distance such that the function starts at 1.} \item{"no":}{no scaling.} } The first one is the default and required for the comparison of estimated interaction functions from different models. For backward compatibility, \code{scaled} can also be a boolean, where \code{TRUE} refers to \code{"intercept"} scaling and \code{FALSE} to \code{"no"} scaling. } \item{truncated}{ logical indicating if the plotted interaction function should take the maximum range of interaction (\code{eps.t}/\code{eps.s}) into account, i.e., drop to zero at that point (if it is finite after all). If there is no common range of interaction, a \code{\link{rug}} indicating the various ranges will be added to the plot if \code{truncated=TRUE}. If \code{truncated} is a scalar, this value is used as the point \code{eps} where the function drops to 0. } \item{log}{a character string passed to \code{\link{plot.default}} indicating which axes should be logarithmic. If \code{add=TRUE}, \code{log} is set according to \code{par("xlog")} and \code{par("ylog")}.} \item{conf.type}{ type of confidence interval to produce.\cr If \code{conf.type="MC"} (or \code{"bootstrap"}), \code{conf.B} parameter vectors are sampled from the asymptotic (multivariate) normal distribution of the ML estimate of the interaction function parameters; the interaction function is then evaluated on the \code{xgrid} (i.e. temporal or spatial distances from the host) for each parameter realization to obtain a \code{conf.level} confidence interval at each point of the \code{xgrid} (or to plot the interaction functions of all Monte-Carlo samples if \code{conf.level=NA}). Note that the resulting plot is \code{\link{.Random.seed}}-dependent for the Monte-Carlo type of confidence interval.\cr If \code{conf.type="parbounds"}, the \code{conf.level} Wald confidence intervals for the interaction function parameters are calculated and the interaction function is evaluated on the \code{xgrid} (distances from the host) for all combinations of the bounds of the parameters and the point-wise extremes of those functions are plotted. This type of confidence interval is only valid in case of a single parameter, i.e. \code{scaled + nsiafpars == 1}, but could also be used as a rough indication if the Monte-Carlo approach takes too long. A warning is thrown if the \code{"parbounds"} type is used for multiple parameters.\cr If \code{conf.type="none"} or \code{NA} or \code{NULL}, no confidence interval will be calculated. } \item{conf.level}{ the confidence level required. For \code{conf.type = "MC"} it may also be specified as \code{NA}, in which case all \code{conf.B} sampled functions will be plotted with transparency value given by \code{alpha.B}. } \item{conf.B}{ number of samples for the \code{"MC"} (Monte Carlo) confidence interval. } \item{xgrid}{ either a numeric vector of x-values (distances from the host) where to evaluate \code{which}, or a scalar representing the desired number of evaluation points in the interval \code{c(0,xlim[2])}.\cr If the interaction function is a step function (\code{\link{siaf.step}} or \code{\link{tiaf.step}}), \code{xgrid} is ignored and internally set to \code{c(0, knots)}. } \item{col.estimate}{ vector of colours to use for the function point estimates of the different \code{types}. } \item{col.conf}{ vector of colours to use for the confidence intervals of the different \code{types}. } \item{alpha.B}{ alpha transparency value (as relative opacity) used for the \code{conf.B} sampled interaction functions in case \code{conf.level = NA} } \item{lwd, lty}{ numeric vectors of length two specifying the line width and type of point estimates (first element) and confidence limits (second element), respectively. } \item{verticals,do.points}{graphical settings for step function kernels. These can be logical (as in \code{\link{plot.stepfun}}) or lists of graphical parameters.} \item{add}{ add to an existing plot? } \item{xlim, ylim}{ vectors of length two containing the x- and y-axis limit of the plot. The default y-axis range (\code{ylim=NULL}) is from 0 to the value of the (scaled) interaction function at \eqn{x = 0}. The default x-axis (\code{xlim=NULL}) starts at 0, and the upper limit is determined as follows (in decreasing order of precedence): \itemize{ \item If \code{xgrid} is a vector of evaluation points, \code{xlim[2]} is set to \code{max(xgrid)}. \item \code{eps.t}/\code{eps.s} if it is unique and finite. \item If the interaction function is a step function with \code{maxRange= 1) to be in the palette.} \item{use.color}{logical. Should the palette use colors? Otherwise grey levels are returned.} } \value{ A character vector of \code{ncolors} colors. } \examples{ barplot(rep(1,10), col = surveillance:::.hcl.colors(10), axes = FALSE) } \keyword{color} \keyword{dplot} \keyword{internal} surveillance/man/stsplot_spacetime.Rd0000644000176200001440000000654313276520503017565 0ustar liggesusers\encoding{latin1} \name{stsplot_spacetime} \alias{stsplot_spacetime} \title{ Map of Disease Incidence } \description{ For each period (row) or for the overall period of the \code{observed} matrix of the \code{"\linkS4class{sts}"} object, a map showing the counts by region is produced. It is possible to redirect the output into files, e.g., to generate an animated GIF. } \usage{ stsplot_spacetime(x, type, legend = NULL, opts.col = NULL, labels = TRUE, wait.ms = 250, cex.lab = 0.7, verbose = FALSE, dev.printer = NULL, ...) } \arguments{ \item{x}{ an object of class \code{"\linkS4class{sts}"}. } \item{type}{ a formula (see \code{\link{stsplot}}). For a map aggregated over time (no animation), use \code{observed ~ 1 | unit}, otherwise \code{observed ~ 1 | unit * time}. } \item{legend}{ An object of type \code{list} containing the following items used for coloring \itemize{ \item{dx}{position increments in x direction} \item{dy}{position increments in y direction} \item{x}{position in x} \item{y}{position in y} \item{once}{\code{Boolean} - if \code{TRUE} then only shown once} } If \code{NULL} then a default legend is used. } \item{opts.col}{ A list containing the two elements \itemize{ \item{ncolors}{Number of colors to use for plotting} \item{use.color}{\code{Boolean} if \code{TRUE} then colors will be used in the palette, otherwise grayscale} } } \item{labels}{\code{Boolean} whether to add labels } \item{wait.ms}{Number of milliseconds to wait between each plot } \item{cex.lab}{\code{cex} of the labels } \item{verbose}{\code{Boolean} whether to write out extra information } \item{dev.printer}{Either \code{NULL} (default), which means that plotting is only to the screen, or a list with elements \code{device}, \code{extension}, \code{width}, \code{height}, and \code{name} (with defaults \code{png}, \code{".png"}, \code{640}, \code{480}, and \code{"Rplot"}, respectively) to \code{\link{dev.print}} the plots to files (only works in interactive sessions). This option is more or less obsolete since the \pkg{animation} package provides better features for output to files. } \item{\dots}{Extra arguments sent to the plot function. } } \author{ Michael H\enc{}{oe}hle } \note{ The \code{\link{animate.sts}} method provides a re-implementation and supersedes this function! } \seealso{ Other \code{\link{stsplot}} types, and \code{\link{animate.sts}} for the new implementation. } \examples{ data("ha.sts") print(ha.sts) ## map of total counts by district plot(ha.sts, type=observed ~ 1 | unit) ## only show a sub-period total for two selected districts plot(ha.sts[1:20,1:2], type=observed ~ 1 | unit) \dontrun{ # space-time animation plot(aggregate(ha.sts,nfreq=13), type= observed ~ 1 | unit * time) #print the frames to a png device #and do the animation without extra sleeping between frames imgname <- file.path(tempdir(), "berlin") plot(aggregate(ha.sts,nfreq=13), type = observed ~ 1 | unit * time, wait.ms=0, dev.printer=list(name=imgname)) #Use ImageMagick (you might have to adjust the path to 'convert') system(paste0("convert -delay 50 ", imgname, "*.png ", imgname, "-animated.gif")) } } \keyword{hplot} \keyword{dynamic} \keyword{spatial} surveillance/man/measlesDE.Rd0000644000176200001440000000250214026701226015651 0ustar liggesusers\name{measlesDE} \alias{measlesDE} \docType{data} \title{Measles in the 16 states of Germany} \description{ Weekly number of measles cases in the 16 states (Bundeslaender) of Germany for years 2005 to 2007. } \usage{data(measlesDE)} \format{ An \code{"\linkS4class{sts}"} object containing \eqn{156\times 16}{156 x 16} observations starting from week 1 in 2005. The \code{population} slot contains the population fractions of each state at 31.12.2006, obtained from the Federal Statistical Office of Germany. } \source{ Robert Koch-Institut: SurvStat: \url{https://survstat.rki.de/}; Queried on 14 October 2009. } \seealso{\code{\link{MMRcoverageDE}}} \examples{ data(measlesDE) plot(measlesDE) ## aggregate to bi-weekly intervals measles2w <- aggregate(measlesDE, nfreq = 26) plot(measles2w, type = observed ~ time) ## use a date index for nicer x-axis plotting epoch(measles2w) <- seq(as.Date("2005-01-03"), by = "2 weeks", length.out = nrow(measles2w)) plot(measles2w, type = observed ~ time) } \references{ Herzog, S. A., Paul, M. and Held, L. (2011): Heterogeneity in vaccination coverage explains the size and occurrence of measles epidemics in German surveillance data. \emph{Epidemiology and Infection}, \bold{139}, 505-515. \doi{10.1017/S0950268810001664} } \keyword{datasets} surveillance/man/ranef.Rd0000644000176200001440000000062112716552041015105 0ustar liggesusers\name{ranef} \alias{ranef} \alias{fixef} \docType{import} \title{Import from package \pkg{nlme}} \description{ The generic functions \code{ranef} and \code{fixef} are imported from package \pkg{nlme}. See \code{\link[nlme:ranef]{nlme::ranef}} for \pkg{nlme}'s own description, and \code{\link{ranef.hhh4}} or \code{\link{fixef.hhh4}} for the added methods for \code{"\link{hhh4}"} models. } surveillance/man/xtable.algoQV.Rd0000644000176200001440000000217013122471774016467 0ustar liggesusers\name{xtable.algoQV} \alias{xtable.algoQV} \title{Xtable quality value object} \description{xtable a single quality value object in a nicely formatted way} \usage{ \method{xtable}{algoQV}(x,caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, ...) } \arguments{ \item{x}{Quality Values object generated with \code{quality}} \item{caption}{See \code{\link[xtable]{xtable}}} \item{label}{See \code{\link[xtable]{xtable}}} \item{align}{See \code{\link[xtable]{xtable}}} \item{digits}{See \code{\link[xtable]{xtable}}} \item{display}{See \code{\link[xtable]{xtable}}} \item{...}{Further arguments (see \code{\link[xtable]{xtable})}} } \keyword{print} \seealso{ \code{\link[xtable]{xtable}}} \examples{ # Create a test object disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 200, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) # Let this object be tested from rki1 survResObj <- algo.rki1(disProgObj, control = list(range = 50:200)) # Compute the quality values in a nice formatted way xtable(algo.quality(survResObj)) } surveillance/man/plapply.Rd0000644000176200001440000000726612477533154015517 0ustar liggesusers\name{plapply} \alias{plapply} \title{Verbose and Parallel \code{lapply}} \description{ Verbose and parallelized version of \code{lapply} wrapping around \code{\link[parallel]{mclapply}} and \code{\link[parallel]{parLapply}} in the base package \pkg{parallel}. This wrapper can take care of the \code{.Random.seed} and print progress information (not for cluster-based parallelization). With the default arguments it equals \code{lapply} enriched by a progress bar. } \usage{ plapply(X, FUN, ..., .parallel = 1, .seed = NULL, .verbose = TRUE) } \arguments{ \item{X,FUN,\dots}{see \code{\link{lapply}}.} \item{.parallel}{ the number of processes to use in parallel operation, or a \code{"cluster"} object (see \code{\link[parallel]{makeCluster}}). If a number, \code{\link[parallel]{mclapply}} (forking) is used on Unix-alikes, whereas on Windows \code{\link[parallel]{parLapply}} is used on a newly created cluster of the specified size, which is stopped when exiting the function. By default (\code{.parallel = 1}), the basic \code{\link{lapply}} is used. } \item{.seed}{ If set (non-\code{NULL}), results involving random number generation become reproducible. If using a cluster (see the \code{.parallel} argument), \code{\link[parallel]{clusterSetRNGStream}} is called with the specified \code{.seed} before running \code{parLapply}. Otherwise, \code{\link{set.seed}(.seed)} is called and the \code{\link{RNGkind}} is changed to \code{"L'Ecuyer-CMRG"} if \code{.parallel > 1} (see the section on random numbers in the documentation of \code{mcparallel} in package \pkg{parallel}). % no link to mcparallel since it is not available on Windows (R-3.1.2) If \code{.seed} is non-\code{NULL}, the original \code{\link{.Random.seed}} will be restored \code{on.exit} of the function. } \item{.verbose}{ if and how progress information should be displayed, i.e., what to do on each exit of \code{FUN}. This is unsupported and ignored for cluster-based parallelization and primitive \code{FUN}ctions. The default (\code{TRUE}) will show a \code{\link{txtProgressBar}} (if \code{.parallel = 1} in an \code{\link{interactive}} \R session) or \code{cat(".")} (otherwise). Other choices for the dot are possible by specifying the desired symbol directly as the \code{.verbose} argument. Alternatively, \code{.verbose} may be any custom call or expression to be executed \code{\link{on.exit}} of \code{FUN} and may thus involve any objects from the local evaluation environment. } } \value{ a list of the results of calling \code{FUN} on each value of \code{X}. } \author{ Sebastian Meyer } \seealso{ \code{\link[parallel]{mclapply}} and \code{\link[parallel]{parLapply}} } \examples{ ## example inspired by help("lapply") x <- list(a = 1:10, beta = exp(-3:3), logic = c(TRUE,FALSE,FALSE,TRUE)) ## if neither parallel nor verbose then this simply equals lapply() plapply(x, quantile, probs = 1:3/4, .verbose = FALSE) ## verbose lapply() -- not really useful for such fast computations res <- plapply(x, quantile, probs = 1:3/4, .verbose = TRUE) res <- plapply(x, quantile, probs = 1:3/4, .verbose = "|") res <- plapply(x, quantile, probs = 1:3/4, .verbose = quote(cat("length(x) =", length(x), "\n"))) ## setting the seed for reproducibility of results involving the RNG samp <- plapply(as.list(1:3), runif, .seed = 1) ## parallel lapply() res <- plapply(x, quantile, probs = 1:3/4, .parallel = 2) ## using a predefined cluster library("parallel") cl <- makeCluster(getOption("cl.cores", 2)) res <- plapply(x, quantile, probs = 1:3/4, .parallel = cl) stopCluster(cl) } \keyword{iteration} \keyword{list} surveillance/man/epidata_intersperse.Rd0000644000176200001440000000333213433306243020044 0ustar liggesusers\name{epidata_intersperse} \alias{intersperse} \title{ Impute Blocks for Extra Stops in \code{"epidata"} Objects } \description{ This function modifies an object inheriting from class \code{"epidata"} such that it features the specified stop time points. For this purpose, the time interval in the event history into which the new stop falls will be split up into two parts, one block for the time period until the new stop -- where no infection or removal occurs -- and the other block for the time period from the new stop to the end of the original interval.\cr Main application is to enable the use of \code{knots} in \code{twinSIR}, which are not existing stop time points in the \code{"epidata"} object. } \usage{ intersperse(epidata, stoptimes, verbose = FALSE) } \arguments{ \item{epidata}{ an object inheriting from class \code{"epidata"}. } \item{stoptimes}{ a numeric vector of time points inside the observation period of the \code{epidata}. } \item{verbose}{ logical indicating if a \code{\link{txtProgressBar}} should be shown while inserting blocks for extra \code{stoptimes}. } } \value{ an object of the same class as \code{epidata} with additional time blocks for any new \code{stoptimes}. } \author{ Sebastian Meyer } \seealso{ \code{\link{as.epidata.epidataCS}} where this function is used. } \examples{ data("hagelloch") subset(hagelloch, start < 25 & stop > 25 & id \%in\% 9:13, select = 1:7) # there is no "stop" time at 25, but we can add this extra stop nrow(hagelloch) moreStopsEpi <- intersperse(hagelloch, stoptimes = 25) nrow(moreStopsEpi) subset(moreStopsEpi, (stop == 25 | start == 25) & id \%in\% 9:13, select = 1:7) } \keyword{spatial} \keyword{manip} surveillance/man/stsAggregate.Rd0000644000176200001440000000320513507405136016434 0ustar liggesusers\name{aggregate-methods} \docType{methods} \alias{aggregate.sts} \alias{aggregate,sts-method} \title{Aggregate an \code{"sts"} Object Over Time or Across Units} \description{ Aggregate the matrix slots of an \code{"\linkS4class{sts}"} object. Either the time series is aggregated so a new sampling frequency of \code{nfreq} observations per year is obtained (i.e., as in \code{\link{aggregate.ts}}), or the aggregation is over all columns (units). } \usage{ \S4method{aggregate}{sts}(x, by = "time", nfreq = "all", ...) } \arguments{ \item{x}{an object of class \code{"\linkS4class{sts}"}.} \item{by}{a string being either \code{"time"} or \code{"unit"}.} \item{nfreq}{new sampling frequency for \code{by="time"}. If \code{nfreq="all"} then all time points are summed.} \item{\dots}{unused (argument of the generic).} } \value{ an object of class \code{"sts"}. } \section{Warning}{ Aggregation over units fills the upperbound slot with \code{NA}s and the \code{map} slot is left as-is, but the object cannot be plotted by unit any longer. The \code{populationFrac} slot is aggregated just like \code{observed}. Population fractions are recomputed if and only if \code{x} is no \code{multinomialTS} and already contains population fractions. This might not be intended, especially for aggregation over time. } \examples{ data("ha.sts") dim(ha.sts) dim(aggregate(ha.sts, by = "unit")) dim(aggregate(ha.sts, nfreq = 13)) \dontshow{ ## population(ha.sts) are trivial fractions, aggregate() should keep them stopifnot(population(aggregate(ha.sts)) == 1/ncol(ha.sts)) ## failed in surveillance <= 1.16.2 } } \keyword{methods} surveillance/man/husO104Hosp.Rd0000644000176200001440000000535513234140561016014 0ustar liggesusers\encoding{latin1} \name{husO104Hosp} \alias{husO104Hosp} \docType{data} \title{Hospitalization date for HUS cases of the STEC outbreak in Germany, 2011} \description{ Data contain the date of hospitalization for 630 hemolytic-uremic syndrome (HUS) cases during the large STEC outbreak in Germany, 2011. Note: Only HUS cases which ultimately had a hospitalization date available/reported are included in the data set. The total number of HUS cases during the outbreak was 855 -- see \enc{Hhle}{Hoehle} and an der Heiden (2014) as well as Frank et al. (2011) for details. For each HUS case the attribute \code{dHosp} contains the date of hospitalization and the attribute \code{dReport} contains the date of first arrival of this hospitalization date at the Robert Koch Institute (RKI). As described in \enc{Hhle}{Hoehle} and an der Heiden (2014) the mechanisms of the delay were complicated and should be interpreted with care. For example, the case report could have arrived earlier, but without information about the hospitalization date. The resulting reporting triangle corresponds to Fig. 1 of the Web appendix of \enc{Hhle}{Hoehle} and an der Heiden (2014). This means that the reports which arrived with a delay longer than 15 days are set to have have arrived after 15 days. Altogether, this gives small discrepancies when compared with the results of the paper. However, as mentioned in the paper, longer delays were not very relevant for the nowcasting. } \usage{data(husO104Hosp)} \format{ A \code{data.frame} object. } \source{ Data were collected during the outbreak as part of the mandatory reporting of notifiable diseases in Germany (Faensen et al., 2006). Here, reports are transmitted from the local health authorities via the state health authorities to the Robert Koch Institute, Berlin. The resulting reporting triangle corresponds to Fig. 1 of the Web appendix of \enc{Hhle}{Hoehle} and an der Heiden (2014). } \references{ \enc{Hhle}{Hoehle} M and an der Heiden, M (2014). Bayesian Nowcasting during the STEC O104:H4 Outbreak in Germany, 2011, In revision for Biometrics. Frank C, Werber D, Cramer JP, Askar M, Faber M, an der Heiden M, Bernard H, Fruth A, Prager R, Spode A, Wadl M, Zoufaly A, Jordan S, Kemper MJ, Follin P, \enc{Mller}{Mueller} L, King LA, Rosner B, Buchholz U, Stark K, Krause G; HUS Investigation Team (2011). Epidemic Profile of Shiga-Toxin Producing Escherichia coli O104:H4 Outbreak in Germany, N Engl J Med. 2011 Nov 10;365(19):1771-80. Faensen D, Claus H, Benzler J, Ammon A, Pfoch T, Breuer T, Krause G (2014). SurvNet@RKI - a multistate electronic reporting system for communicable diseases, Euro Surveillance, 2006;11(4):100-103. } \keyword{datasets} surveillance/man/salmAllOnset.Rd0000644000176200001440000000130313234140561016402 0ustar liggesusers\encoding{latin1} \docType{data} \name{salmAllOnset} \alias{salmAllOnset} \title{Salmonella cases in Germany 2001-2014 by data of symptoms onset} \format{A sts-object} \usage{ data(salmAllOnset) } \description{ A dataset containing the reported number of cases of Salmonella in Germany 2001-2014 aggregated by data of disease onset. The slot \code{control} contains a matrix \code{reportingTriangle$n} with the reporting triangle as described in Salmon et al. (2015). } \references{ Salmon, M., Schumacher, D., Stark, K., \enc{Hhle}{Hoehle}, M. (2015): Bayesian outbreak detection in the presence of reporting delays. Biometrical Journal, 57 (6), 1051-1067. } \keyword{datasets} surveillance/man/arlCusum.Rd0000644000176200001440000000350313122471774015614 0ustar liggesusers\name{arlCusum} \alias{arlCusum} \title{Calculation of Average Run Length for discrete CUSUM schemes} \description{ Calculates the average run length (ARL) for an upward CUSUM scheme for discrete distributions (i.e. Poisson and binomial) using the Markov chain approach. } \usage{ arlCusum(h=10, k=3, theta=2.4, distr=c("poisson","binomial"), W=NULL, digits=1, ...) } \arguments{ \item{h}{ decision interval} \item{k}{ reference value} \item{theta}{distribution parameter for the cumulative distribution function (cdf) \eqn{F}, i.e. rate \eqn{\lambda} for Poisson variates or probability \eqn{p} for binomial variates} \item{distr}{ \code{"poisson"} or \code{"binomial"} } %ppois, pbinom \item{W}{Winsorizing value \code{W} for a robust CUSUM, to get a nonrobust CUSUM set %\code{W} is set to \code{W} > \code{k}+\code{h}. If \code{NULL}, a nonrobust CUSUM is used.} \item{digits}{ \code{k} and \code{h} are rounded to \code{digits} decimal places } \item{\dots}{ further arguments for the distribution function, i.e. number of trials \code{n} for binomial cdf } } \value{ Returns a list with the ARL of the regular (zero-start) and the fast initial response (FIR) CUSUM scheme with reference value \code{k}, decision interval \code{h} for \eqn{X \sim F(\theta)}, where F is the Poisson or binomial CDF. \item{ARL}{one-sided ARL of the regular (zero-start) CUSUM scheme} \item{FIR.ARL}{one-sided ARL of the FIR CUSUM scheme with head start \eqn{\frac{\code{h}}{2}} } } \keyword{models} \source{Based on the FORTRAN code of Hawkins, D. M. (1992). Evaluation of Average Run Lengths of Cumulative Sum Charts for an Arbitrary Data Distribution. Communications in Statistics - Simulation and Computation, 21(4), p. 1001-1020. } surveillance/man/influMen.Rd0000644000176200001440000000113113174706302015564 0ustar liggesusers\name{influMen} \alias{influMen} \docType{data} \title{Influenza and meningococcal infections in Germany, 2001-2006} \description{ Weekly counts of new influenza and meningococcal infections in Germany 2001-2006. } \usage{data(influMen)} \format{ A \code{disProg} object containing \eqn{312\times 2}{312 x 2} observations starting from week 1 in 2001 to week 52 in 2006. } \source{ Robert Koch-Institut: SurvStat: \url{https://survstat.rki.de/}. Queried on 25 July 2007. } \examples{ data(influMen) plot(influMen, as.one=FALSE, same.scale=FALSE) } \keyword{datasets} surveillance/man/marks.Rd0000644000176200001440000000055513777627613015155 0ustar liggesusers\name{marks} \alias{marks} \docType{import} \title{Import from package \pkg{spatstat.geom}} \description{ The generic function \code{marks} is imported from package \pkg{spatstat.geom}. See \code{\link[spatstat.geom:marks]{spatstat.geom::marks}} for \pkg{spatstat.geom}'s own methods, and \code{\link{marks.epidataCS}} for the \code{"epidataCS"}-specific method. } surveillance/man/hhh4.Rd0000644000176200001440000005630513741600116014653 0ustar liggesusers\encoding{latin1} \name{hhh4} \alias{hhh4} \title{Fitting HHH Models with Random Effects and Neighbourhood Structure} \description{ Fits an autoregressive Poisson or negative binomial model to a univariate or multivariate time series of counts. The characteristic feature of \code{hhh4} models is the additive decomposition of the conditional mean into \emph{epidemic} and \emph{endemic} components (Held et al, 2005). Log-linear predictors of covariates and random intercepts are allowed in all components; see the Details below. A general introduction to the \code{hhh4} modelling approach and its implementation is given in the \code{vignette("hhh4")}. Meyer et al (2017, Section 5, available as \code{vignette("hhh4_spacetime")}) describe \code{hhh4} models for areal time series of infectious disease counts. } \usage{ hhh4(stsObj, control = list( ar = list(f = ~ -1, offset = 1, lag = 1), ne = list(f = ~ -1, offset = 1, lag = 1, weights = neighbourhood(stsObj) == 1, scale = NULL, normalize = FALSE), end = list(f = ~ 1, offset = 1), family = c("Poisson", "NegBin1", "NegBinM"), subset = 2:nrow(stsObj), optimizer = list(stop = list(tol=1e-5, niter=100), regression = list(method="nlminb"), variance = list(method="nlminb")), verbose = FALSE, start = list(fixed=NULL, random=NULL, sd.corr=NULL), data = list(t = stsObj@epoch - min(stsObj@epoch)), keep.terms = FALSE ), check.analyticals = FALSE) } \arguments{ \item{stsObj}{object of class \code{"\linkS4class{sts}"} containing the (multivariate) count data time series.} \item{control}{a list containing the model specification and control arguments: \describe{ \item{\code{ar}}{Model for the autoregressive component given as list with the following components: \describe{ \item{f = ~ -1}{a formula specifying \eqn{\log(\lambda_{it})}{log(\lambda_it)}} \item{offset = 1}{optional multiplicative offset, either 1 or a matrix of the same dimension as \code{observed(stsObj)}} \item{lag = 1}{a positive integer meaning autoregression on \eqn{y_{i,t-lag}}} } } \item{\code{ne}}{Model for the neighbour-driven component given as list with the following components: \describe{ \item{f = ~ -1}{a formula specifying \eqn{\log(\phi_{it})}{log(\phi_it)}} \item{offset = 1}{optional multiplicative offset, either 1 or a matrix of the same dimension as \code{observed(stsObj)}} \item{lag = 1}{a non-negative integer meaning dependency on \eqn{y_{j,t-lag}}} \item{weights = neighbourhood(stsObj) == 1}{ neighbourhood weights \eqn{w_{ji}}{w_ji}. The default corresponds to the original formulation by Held et al (2005), i.e., the spatio-temporal component incorporates an unweighted sum over the lagged cases of the first-order neighbours. See Paul et al (2008) and Meyer and Held (2014) for alternative specifications, e.g., \code{\link{W_powerlaw}}. Time-varying weights are possible by specifying an array of \code{dim()} \code{c(nUnits, nUnits, nTime)}, where \code{nUnits=ncol(stsObj)} and \code{nTime=nrow(stsObj)}.} \item{scale = NULL}{ optional matrix of the same dimensions as \code{weights} (or a vector of length \code{ncol(stsObj)}) to scale the \code{weights} to \code{scale * weights}. } \item{normalize = FALSE}{ logical indicating if the (scaled) \code{weights} should be normalized such that each row sums to 1. } } } \item{\code{end}}{Model for the endemic component given as list with the following components \describe{ \item{f = ~ 1}{a formula specifying \eqn{\log(\nu_{it})}{log(\nu_it)}} \item{offset = 1}{optional multiplicative offset \eqn{e_{it}}{e_it}, either 1 or a matrix of the same dimension as \code{observed(stsObj)}} } } \item{\code{family}}{Distributional family -- either \code{"Poisson"}, or the Negative Binomial distribution. For the latter, the overdispersion parameter can be assumed to be the same for all units (\code{"NegBin1"}), to vary freely over all units (\code{"NegBinM"}), or to be shared by some units (specified by a factor of length \code{ncol(stsObj)} such that its number of levels determines the number of overdispersion parameters). Note that \code{"NegBinM"} is equivalent to \code{factor(colnames(stsObj), levels = colnames(stsObj))}. } \item{\code{subset}}{Typically \code{2:nrow(obs)} if model contains autoregression} \item{\code{optimizer}}{a list of three lists of control arguments. The \code{"stop"} list specifies two criteria for the outer optimization of regression and variance parameters: the relative \code{tol}erance for parameter change using the criterion \code{max(abs(x[i+1]-x[i])) / max(abs(x[i]))}, and the maximum number \code{niter} of outer iterations. Control arguments for the single optimizers are specified in the lists named \code{"regression"} and \code{"variance"}. \code{method="nlminb"} is the default optimizer for both (taking advantage of the analytical Fisher information matrices), however, the \code{method}s from \code{\link{optim}} may also be specified (as well as \code{"\link{nlm}"} but that one is not recommended here). Especially for the variance updates, Nelder-Mead optimization (\code{method="Nelder-Mead"}) is an attractive alternative. All other elements of these two lists are passed as \code{control} arguments to the chosen \code{method}, e.g., if \code{method="nlminb"} adding \code{iter.max=50} increases the maximum number of inner iterations from 20 (default) to 50. } \item{\code{verbose}}{non-negative integer (usually in the range \code{0:3}) specifying the amount of tracing information to be output during optimization.} \item{\code{start}}{a list of initial parameter values replacing initial values set via \code{\link{fe}} and \code{\link{ri}}. Since \pkg{surveillance} 1.8-2, named vectors are matched against the coefficient names in the model (where unmatched start values are silently ignored), and need not be complete, e.g., \code{start = list(fixed = c("-log(overdisp)" = 0.5))} (default: 2) for a \code{family = "NegBin1"} model. In contrast, an unnamed start vector must specify the full set of parameters as used by the model.} \item{\code{data}}{a named list of covariates that are to be included as fixed effects (see \code{\link{fe}}) in any of the 3 component formulae. By default, the time variable \code{t} is available and used for seasonal effects created by \code{\link{addSeason2formula}}. In general, covariates in this list can be either vectors of length \code{nrow(stsObj)} interpreted as time-varying but common across all units, or matrices of the same dimension as the disease counts \code{observed(stsObj)}.} \item{\code{keep.terms}}{logical indicating if the terms object used in the fit is to be kept as part of the returned object. This is usually not necessary, since the terms object is reconstructed by the \code{\link{terms}}-method for class \code{"hhh4"} if necessary (based on \code{stsObj} and \code{control}, which are both part of the returned \code{"hhh4"} object).} } The auxiliary function \code{\link{makeControl}} might be useful to create such a list of control parameters. } \item{check.analyticals}{logical (or a subset of \code{c("numDeriv", "maxLik")}), indicating if (how) the implemented analytical score vector and Fisher information matrix should be checked against numerical derivatives at the parameter starting values, using the packages \pkg{numDeriv} and/or \pkg{maxLik}. If activated, \code{hhh4} will return a list containing the analytical and numerical derivatives for comparison (no ML estimation will be performed). This is mainly intended for internal use by the package developers.} } \value{ \code{hhh4} returns an object of class \code{"hhh4"}, which is a list containing the following components: \item{coefficients}{named vector with estimated (regression) parameters of the model} \item{se}{estimated standard errors (for regression parameters)} \item{cov}{covariance matrix (for regression parameters)} \item{Sigma}{estimated variance-covariance matrix of random effects} \item{Sigma.orig}{estimated variance parameters on internal scale used for optimization} \item{Sigma.cov}{inverse of marginal Fisher information (on internal scale), i.e., the asymptotic covariance matrix of \code{Sigma.orig}} \item{call}{ the matched call } \item{dim}{ vector with number of fixed and random effects in the model } \item{loglikelihood}{(penalized) loglikelihood evaluated at the MLE} \item{margll}{ (approximate) log marginal likelihood should the model contain random effects } \item{convergence}{logical. Did optimizer converge?} \item{fitted.values}{fitted mean values \eqn{\mu_{i,t}}{\mu_it}} \item{control}{control object of the fit} \item{terms}{the terms object used in the fit if \code{keep.terms = TRUE} and \code{NULL} otherwise} \item{stsObj}{ the supplied \code{stsObj} } \item{lags}{named integer vector of length two containing the lags used for the epidemic components \code{"ar"} and \code{"ne"}, respectively. The corresponding lag is \code{NA} if the component was not included in the model.} \item{nObs}{number of observations used for fitting the model} \item{nTime}{ number of time points used for fitting the model } \item{nUnit}{ number of units (e.g. areas) used for fitting the model} \item{runtime}{the \code{\link{proc.time}}-queried time taken to fit the model, i.e., a named numeric vector of length 5 of class \code{"proc_time"}} } \details{ An endemic-epidemic multivariate time-series model for infectious disease counts \eqn{Y_{it}}{Y_it} from units \eqn{i=1,\dots,I} during periods \eqn{t=1,\dots,T} was proposed by Held et al (2005) and was later extended in a series of papers (Paul et al, 2008; Paul and Held, 2011; Held and Paul, 2012; Meyer and Held, 2014). In its most general formulation, this so-called \code{hhh4} (or HHH or \eqn{H^3} or triple-H) model assumes that, conditional on past observations, \eqn{Y_{it}}{Y_it} has a Poisson or negative binomial distribution with mean \deqn{\mu_{it} = \lambda_{it} y_{i,t-1} + \phi_{it} \sum_{j\neq i} w_{ji} y_{j,t-1} + e_{it} \nu_{it} }{% \mu_it = \lambda_it y_i,t-1 + \phi_it sum_(j != i) w_ji y_j,t-1 + e_it \nu_it } In the case of a negative binomial model, the conditional variance is \eqn{\mu_{it}(1+\psi_i\mu_{it})}{\mu_it(1+\psi_i*\mu_it)} with overdispersion parameters \eqn{\psi_i > 0} (possibly shared across different units, e.g., \eqn{\psi_i\equiv\psi}{\psi_i=\psi}). Univariate time series of counts \eqn{Y_t} are supported as well, in which case \code{hhh4} can be regarded as an extension of \code{\link[MASS]{glm.nb}} to account for autoregression. See the Examples below for a comparison of an endemic-only \code{hhh4} model with a corresponding \code{glm.nb}. The three unknown quantities of the mean \eqn{\mu_{it}}{\mu_it}, \itemize{ \item \eqn{\lambda_{it}}{\lambda_it} in the autoregressive (\code{ar}) component, \item \eqn{\phi_{it}}{\phi_it} in the neighbour-driven (\code{ne}) component, and \item \eqn{\nu_{it}}{\nu_it} in the endemic (\code{end}) component, } are log-linear predictors incorporating time-/unit-specific covariates. They may also contain unit-specific random intercepts as proposed by Paul and Held (2011). The endemic mean is usually modelled proportional to a unit-specific offset \eqn{e_{it}}{e_it} (e.g., population numbers or fractions); it is possible to include such multiplicative offsets in the epidemic components as well. The \eqn{w_{ji}}{w_ji} are transmission weights reflecting the flow of infections from unit \eqn{j} to unit \eqn{i}. If weights vary over time (prespecified as a 3-dimensional array \eqn{(w_{jit})}{(w_jit)}), the \code{ne} sum in the mean uses \eqn{w_{jit} y_{j,t-1}}{w_jit y_j,t-1}. In spatial \code{hhh4} applications, the \dQuote{units} refer to geographical regions and the weights could be derived from movement network data. Alternatively, the weights \eqn{w_{ji}}{w_ji} can be estimated parametrically as a function of adjacency order (Meyer and Held, 2014), see \code{\link{W_powerlaw}}. (Penalized) Likelihood inference for such \code{hhh4} models has been established by Paul and Held (2011) with extensions for parametric neighbourhood weights by Meyer and Held (2014). Supplied with the analytical score function and Fisher information, the function \code{hhh4} by default uses the quasi-Newton algorithm available through \code{\link{nlminb}} to maximize the log-likelihood. Convergence is usually fast even for a large number of parameters. If the model contains random effects, the penalized and marginal log-likelihoods are maximized alternately until convergence. } \seealso{ See the special functions \code{\link{fe}}, \code{\link{ri}} and the examples below for how to specify unit-specific effects. Further details on the modelling approach and illustrations of its implementation can be found in \code{vignette("hhh4")} and \code{vignette("hhh4_spacetime")}. } \author{Michaela Paul, Sebastian Meyer, Leonhard Held} \examples{ ###################### ## Univariate examples ###################### ### weekly counts of salmonella agona cases, UK, 1990-1995 data("salmonella.agona") ## convert old "disProg" to new "sts" data class salmonella <- disProg2sts(salmonella.agona) salmonella plot(salmonella) ## generate formula for an (endemic) time trend and seasonality f.end <- addSeason2formula(f = ~1 + t, S = 1, period = 52) f.end ## specify a simple autoregressive negative binomial model model1 <- list(ar = list(f = ~1), end = list(f = f.end), family = "NegBin1") ## fit this model to the data res <- hhh4(salmonella, model1) ## summarize the model fit summary(res, idx2Exp=1, amplitudeShift=TRUE, maxEV=TRUE) plot(res) plot(res, type = "season", components = "end") ### weekly counts of meningococcal infections, Germany, 2001-2006 data("influMen") fluMen <- disProg2sts(influMen) meningo <- fluMen[, "meningococcus"] meningo plot(meningo) ## again a simple autoregressive NegBin model with endemic seasonality meningoFit <- hhh4(stsObj = meningo, control = list( ar = list(f = ~1), end = list(f = addSeason2formula(f = ~1, S = 1, period = 52)), family = "NegBin1" )) summary(meningoFit, idx2Exp=TRUE, amplitudeShift=TRUE, maxEV=TRUE) plot(meningoFit) plot(meningoFit, type = "season", components = "end") ######################## ## Multivariate examples ######################## ### bivariate analysis of influenza and meningococcal infections ### (see Paul et al, 2008) plot(fluMen, same.scale = FALSE) ## Fit a negative binomial model with ## - autoregressive component: disease-specific intercepts ## - neighbour-driven component: only transmission from flu to men ## - endemic component: S=3 and S=1 sine/cosine pairs for flu and men, respectively ## - disease-specific overdispersion WfluMen <- neighbourhood(fluMen) WfluMen["meningococcus","influenza"] <- 0 WfluMen f.end_fluMen <- addSeason2formula(f = ~ -1 + fe(1, which = c(TRUE, TRUE)), S = c(3, 1), period = 52) f.end_fluMen fluMenFit <- hhh4(fluMen, control = list( ar = list(f = ~ -1 + fe(1, unitSpecific = TRUE)), ne = list(f = ~ 1, weights = WfluMen), end = list(f = f.end_fluMen), family = "NegBinM")) summary(fluMenFit, idx2Exp=1:3) plot(fluMenFit, type = "season", components = "end", unit = 1) plot(fluMenFit, type = "season", components = "end", unit = 2) \dontshow{ ## regression test for amplitude/shift transformation of sine-cosine pairs ## coefficients were wrongly matched in surveillance < 1.18.0 stopifnot(coef(fluMenFit, amplitudeShift = TRUE)["end.A(2 * pi * t/52).meningococcus"] == sqrt(sum(coef(fluMenFit)[paste0("end.", c("sin","cos"), "(2 * pi * t/52).meningococcus")]^2))) } ### weekly counts of measles, Weser-Ems region of Lower Saxony, Germany data("measlesWeserEms") measlesWeserEms plot(measlesWeserEms) # note the two districts with zero cases ## we could fit the same simple model as for the salmonella cases above model1 <- list( ar = list(f = ~1), end = list(f = addSeason2formula(~1 + t, period = 52)), family = "NegBin1" ) measlesFit <- hhh4(measlesWeserEms, model1) summary(measlesFit, idx2Exp=TRUE, amplitudeShift=TRUE, maxEV=TRUE) ## but we should probably at least use a population offset in the endemic ## component to reflect heterogeneous incidence levels of the districts, ## and account for spatial dependence (here just using first-order adjacency) measlesFit2 <- update(measlesFit, end = list(offset = population(measlesWeserEms)), ne = list(f = ~1, weights = neighbourhood(measlesWeserEms) == 1)) summary(measlesFit2, idx2Exp=TRUE, amplitudeShift=TRUE, maxEV=TRUE) plot(measlesFit2, units = NULL, hide0s = TRUE) ## 'measlesFit2' corresponds to the 'measlesFit_basic' model in ## vignette("hhh4_spacetime"). See there for further analyses, ## including vaccination coverage as a covariate, ## spatial power-law weights, and random intercepts. \dontrun{ ### last but not least, a more sophisticated (and time-consuming) ### analysis of weekly counts of influenza from 140 districts in ### Southern Germany (originally analysed by Paul and Held, 2011, ### and revisited by Held and Paul, 2012, and Meyer and Held, 2014) data("fluBYBW") plot(fluBYBW, type = observed ~ time) plot(fluBYBW, type = observed ~ unit, ## mean yearly incidence per 100.000 inhabitants (8 years) population = fluBYBW@map$X31_12_01 / 100000 * 8) ## For the full set of models for data("fluBYBW") as analysed by ## Paul and Held (2011), including predictive model assessement ## using proper scoring rules, see the (computer-intensive) ## demo("fluBYBW") script: demoscript <- system.file(file.path("demo", "fluBYBW.R"), package = "surveillance") demoscript #file.show(demoscript) ## Here we fit the improved power-law model of Meyer and Held (2014) ## - autoregressive component: random intercepts + S = 1 sine/cosine pair ## - neighbour-driven component: random intercepts + S = 1 sine/cosine pair ## + population gravity with normalized power-law weights ## - endemic component: random intercepts + trend + S = 3 sine/cosine pairs ## - random intercepts are iid but correlated between components f.S1 <- addSeason2formula( ~-1 + ri(type="iid", corr="all"), S = 1, period = 52) f.end.S3 <- addSeason2formula( ~-1 + ri(type="iid", corr="all") + I((t-208)/100), S = 3, period = 52) ## for power-law weights, we need adjaceny orders, which can be ## computed from the binary adjacency indicator matrix nbOrder1 <- neighbourhood(fluBYBW) neighbourhood(fluBYBW) <- nbOrder(nbOrder1, 15) ## full model specification fluModel <- list( ar = list(f = f.S1), ne = list(f = update.formula(f.S1, ~ . + log(pop)), weights = W_powerlaw(maxlag=max(neighbourhood(fluBYBW)), normalize = TRUE, log = TRUE)), end = list(f = f.end.S3, offset = population(fluBYBW)), family = "NegBin1", data = list(pop = population(fluBYBW)), optimizer = list(variance = list(method = "Nelder-Mead")), verbose = TRUE) ## CAVE: random effects considerably increase the runtime of model estimation ## (It is usually advantageous to first fit a model with simple intercepts ## to obtain reasonable start values for the other parameters.) set.seed(1) # because random intercepts are initialized randomly fluFit <- hhh4(fluBYBW, fluModel) summary(fluFit, idx2Exp = TRUE, amplitudeShift = TRUE) plot(fluFit, type = "fitted", total = TRUE) plot(fluFit, type = "season") range(plot(fluFit, type = "maxEV")) plot(fluFit, type = "maps", prop = TRUE) gridExtra::grid.arrange( grobs = lapply(c("ar", "ne", "end"), function (comp) plot(fluFit, type = "ri", component = comp, main = comp, exp = TRUE, sub = "multiplicative effect")), nrow = 1, ncol = 3) plot(fluFit, type = "neweights", xlab = "adjacency order") } ######################################################################## ## An endemic-only "hhh4" model can also be estimated using MASS::glm.nb ######################################################################## ## weekly counts of measles, Weser-Ems region of Lower Saxony, Germany data("measlesWeserEms") ## fit an endemic-only "hhh4" model ## with time covariates and a district-specific offset hhh4fit <- hhh4(measlesWeserEms, control = list( end = list(f = addSeason2formula(~1 + t, period = measlesWeserEms@freq), offset = population(measlesWeserEms)), ar = list(f = ~-1), ne = list(f = ~-1), family = "NegBin1", subset = 1:nrow(measlesWeserEms) )) summary(hhh4fit) ## fit the same model using MASS::glm.nb measlesWeserEmsData <- as.data.frame(measlesWeserEms, tidy = TRUE) measlesWeserEmsData$t <- c(hhh4fit$control$data$t) glmnbfit <- MASS::glm.nb( update(formula(hhh4fit)$end, observed ~ . + offset(log(population))), data = measlesWeserEmsData ) summary(glmnbfit) ## Note that the overdispersion parameter is parametrized inversely. ## The likelihood and point estimates are all the same. ## However, the variance estimates are different: in glm.nb, the parameters ## are estimated conditional on the overdispersion theta. \dontshow{ stopifnot( all.equal(logLik(hhh4fit), logLik(glmnbfit)), all.equal(1/coef(hhh4fit)[["overdisp"]], glmnbfit$theta, tolerance = 1e-6), all.equal(coef(hhh4fit)[1:4], coef(glmnbfit), tolerance = 1e-6, check.attributes = FALSE), all.equal(c(residuals(hhh4fit)), residuals(glmnbfit), tolerance = 1e-6, check.attributes = FALSE) ) } } \references{ Held, L., \enc{Hhle}{Hoehle}, M. and Hofmann, M. (2005): A statistical framework for the analysis of multivariate infectious disease surveillance counts. \emph{Statistical Modelling}, \bold{5} (3), 187-199. \doi{10.1191/1471082X05st098oa} Paul, M., Held, L. and Toschke, A. M. (2008): Multivariate modelling of infectious disease surveillance data. \emph{Statistics in Medicine}, \bold{27} (29), 6250-6267. \doi{10.1002/sim.4177} Paul, M. and Held, L. (2011): Predictive assessment of a non-linear random effects model for multivariate time series of infectious disease counts. \emph{Statistics in Medicine}, \bold{30} (10), 1118-1136. \doi{10.1002/sim.4177} Held, L. and Paul, M. (2012): Modeling seasonality in space-time infectious disease surveillance data. \emph{Biometrical Journal}, \bold{54} (6), 824-843. \doi{10.1002/bimj.201200037} Meyer, S. and Held, L. (2014): Power-law models for infectious disease spread. \emph{The Annals of Applied Statistics}, \bold{8} (3), 1612-1639. \doi{10.1214/14-AOAS743} Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} } \keyword{ts} \keyword{regression} surveillance/man/farringtonFlexible.Rd0000644000176200001440000002626213712020702017636 0ustar liggesusers\name{farringtonFlexible} \alias{farringtonFlexible} \encoding{latin1} \title{Surveillance for Univariate Count Time Series Using an Improved Farrington Method} \description{ % The function takes \code{range} values of the surveillance time series \code{sts} and for each time point uses a Poisson GLM with overdispersion to predict an upper bound on the number of counts according to the procedure by Farrington et al. (1996) and by Noufaily et al. (2012). This bound is then compared to the observed number of counts. If the observation is above the bound, then an alarm is raised. The implementation is illustrated in Salmon et al. (2016). % } \usage{ farringtonFlexible(sts, control = list( range = NULL, b = 5, w = 3, reweight = TRUE, weightsThreshold = 2.58, verbose = FALSE, glmWarnings = TRUE, alpha = 0.05, trend = TRUE, pThresholdTrend = 0.05, limit54 = c(5,4), powertrans = "2/3", fitFun = "algo.farrington.fitGLM.flexible", populationOffset = FALSE, noPeriods = 1, pastWeeksNotIncluded = NULL, thresholdMethod = "delta")) } \arguments{ \item{sts}{object of class \code{\linkS4class{sts}} (including the \code{observed} and the \code{state} time series)} \item{control}{Control object given as a \code{list} containing the following components: \describe{ \item{\code{range}}{Specifies the index of all timepoints which should be tested. If range is \code{NULL} all possible timepoints are used.} \item{\code{b}}{How many years back in time to include when forming the base counts.} \item{\code{w}}{Window's half-size, i.e. number of weeks to include before and after the current week in each year.} \item{\code{reweight}}{Boolean specifying whether to perform reweighting step.} \item{\code{weightsThreshold}}{Defines the threshold for reweighting past outbreaks using the Anscombe residuals (1 in the original method, 2.58 advised in the improved method).} \item{\code{verbose}}{Boolean specifying whether to show extra debugging information.} \item{\code{glmWarnings}}{Boolean specifying whether to print warnings from the call to \code{glm}.} \item{\code{alpha}}{An approximate (one-sided) \eqn{(1-\alpha)\cdot 100\%} prediction interval is calculated unlike the original method where it was a two-sided interval. The upper limit of this interval i.e. the \eqn{(1-\alpha)\cdot 100\%} quantile serves as an upperbound.} \item{\code{trend}}{Boolean indicating whether a trend should be included and kept in case the conditions in the Farrington et. al. paper are met (see the results). If \code{false} then NO trend is fit.} \item{\code{pThresholdTrend}}{Threshold for deciding whether to keep trend in the model (0.05 in the original method, 1 advised in the improved method).} \item{\code{limit54}}{Vector containing two numbers: \code{cases} and \code{period}. To avoid alarms in cases where the time series only has about almost no cases in the specific week the algorithm uses the following heuristic criterion (see Section 3.8 of the Farrington paper) to protect against low counts: no alarm is sounded if fewer than \eqn{\code{cases}=5} reports were received in the past \eqn{\code{period}=4} weeks. \code{limit54=c(cases,period)} is a vector allowing the user to change these numbers. Note: As of version 0.9-7 of the package the term "last" period of weeks includes the current week - otherwise no alarm is sounded for horrible large numbers if the four weeks before that are too low.} \item{\code{powertrans}}{Power transformation to apply to the data if the threshold is to be computed with the method described in Farrington et al. (1996. Use either "2/3" for skewness correction (Default), "1/2" for variance stabilizing transformation or "none" for no transformation.} \item{\code{fitFun}}{String containing the name of the fit function to be used for fitting the GLM. The only current option is "algo.farrington.fitGLM.flexible".} \item{\code{populationOffset}}{Boolean specifying whether to include a population offset in the GLM. The slot \code{sts@population} gives the population vector.} \item{\code{noPeriods}}{Number of levels in the factor allowing to use more baseline. If equal to 1 no factor variable is created, the set of reference values is defined as in Farrington et al (1996).} \item{\code{pastWeeksNotIncluded}}{Number of past weeks to ignore in the calculation. The default (\code{NULL}) means to use the value of \code{control$w}. Setting \code{pastWeeksNotIncluded=26} might be preferable (Noufaily et al., 2012).} \item{\code{thresholdMethod}}{Method to be used to derive the upperbound. Options are \code{"delta"} for the method described in Farrington et al. (1996), \code{"nbPlugin"} for the method described in Noufaily et al. (2012), and \code{"muan"} for the method extended from Noufaily et al. (2012).} } } } \details{ The following steps are performed according to the Farrington et al. (1996) paper. \enumerate{ \item Fit of the initial model with intercept, time trend if \code{trend} is \code{TRUE}, seasonal factor variable if \code{noPeriod} is bigger than 1, and population offset if \code{populationOffset} is \code{TRUE}. Initial estimation of mean and overdispersion. \item Calculation of the weights omega (correction for past outbreaks) if \code{reweighting} is \code{TRUE}. The threshold for reweighting is defined in \code{control}. \item Refitting of the model \item Revised estimation of overdispersion \item Omission of the trend, if it is not significant \item Repetition of the whole procedure \item Calculation of the threshold value using the model to compute a quantile of the predictive distribution. The method used depends on \code{thresholdMethod}, this can either be: \describe{ \item{"delta"}{One assumes that the prediction error (or a transformation of the prediction error, depending on \code{powertrans}), is normally distributed. The threshold is deduced from a quantile of this normal distribution using the variance and estimate of the expected count given by GLM, and the delta rule. The procedure takes into account both the estimation error (variance of the estimator of the expected count in the GLM) and the prediction error (variance of the prediction error). This is the suggestion in Farrington et al. (1996).} \item{"nbPlugin"}{One assumes that the new count follows a negative binomial distribution parameterized by the expected count and the overdispersion estimated in the GLM. The threshold is deduced from a quantile of this discrete distribution. This process disregards the estimation error, though. This method was used in Noufaily, et al. (2012).} \item{"muan"}{One also uses the assumption of the negative binomial sampling distribution but does not plug in the estimate of the expected count from the GLM, instead one uses a quantile from the asymptotic normal distribution of the expected count estimated in the GLM; in order to take into account both the estimation error and the prediction error. } } \item Computation of exceedance score } Warning: monthly data containing the last day of each month as date should be analysed with \code{epochAsDate=FALSE} in the \code{sts} object. Otherwise February makes it impossible to find some reference time points. } \value{ An object of class \code{sts} with the slots \code{upperbound} and \code{alarm} filled by appropriate output of the algorithm. The \code{control} slot of the input \code{sts} is amended with the following matrix elements, all with \code{length(range)} rows: \describe{ \item{trend}{Booleans indicating whether a time trend was fitted for this time point.} \item{trendVector}{coefficient of the time trend in the GLM for this time point. If no trend was fitted it is equal to NA.} \item{pvalue}{probability of observing a value at least equal to the observation under the null hypothesis .} \item{expected}{expectation of the predictive distribution for each timepoint. It is only reported if the conditions for raising an alarm are met (enough cases).} \item{mu0Vector}{input for the negative binomial distribution to get the upperbound as a quantile (either a plug-in from the GLM or a quantile from the asymptotic normal distribution of the estimator)} \item{phiVector}{overdispersion of the GLM at each timepoint.} } } \keyword{classif} \examples{ ### DATA I/O ### #Read Salmonella Agona data data("salmonella.agona") # Create the corresponding sts object from the old disProg object salm <- disProg2sts(salmonella.agona) ### RUN THE ALGORITHMS WITH TWO DIFFERENT SETS OF OPTIONS ### # Farrington with old options control1 <- list(range=(260:312), noPeriods=1,populationOffset=FALSE, fitFun="algo.farrington.fitGLM.flexible", b=4,w=3,weightsThreshold=1, pastWeeksNotIncluded=3, pThresholdTrend=0.05,trend=TRUE, thresholdMethod="delta",alpha=0.1) control2 <- list(range=(260:312), noPeriods=10,populationOffset=FALSE, fitFun="algo.farrington.fitGLM.flexible", b=4,w=3,weightsThreshold=2.58, pastWeeksNotIncluded=26, pThresholdTrend=1,trend=TRUE, thresholdMethod="delta",alpha=0.1) salm1 <- farringtonFlexible(salm,control=control1) salm2 <- farringtonFlexible(salm,control=control2) ### PLOT THE RESULTS ### y.max <- max(upperbound(salm1),observed(salm1),upperbound(salm2),na.rm=TRUE) plot(salm1, ylim=c(0,y.max), main='S. Newport in Germany', legend.opts=NULL) lines(1:(nrow(salm1)+1)-0.5, c(upperbound(salm1),upperbound(salm1)[nrow(salm1)]), type="s",col='tomato4',lwd=2) lines(1:(nrow(salm2)+1)-0.5, c(upperbound(salm2),upperbound(salm2)[nrow(salm2)]), type="s",col="blueviolet",lwd=2) legend(0, 10, legend=c('Alarm','Upperbound with old options', 'Upperbound with new options'), pch=c(24,NA,NA),lty=c(NA,1,1), bg="white",lwd=c(2,2,2),col=c('red','tomato4',"blueviolet")) } \author{M. Salmon, M. \enc{Hhle}{Hoehle}} \seealso{\code{\link{algo.farrington.fitGLM}},\code{\link{algo.farrington.threshold}}} \keyword{classif} \references{ Farrington, C.P., Andrews, N.J, Beale A.D. and Catchpole, M.A. (1996): A statistical algorithm for the early detection of outbreaks of infectious disease. J. R. Statist. Soc. A, 159, 547-563. Noufaily, A., Enki, D.G., Farrington, C.P., Garthwaite, P., Andrews, N.J., Charlett, A. (2012): An improved algorithm for outbreak detection in multiple surveillance systems. Statistics in Medicine, 32 (7), 1206-1222. Salmon, M., Schumacher, D. and \enc{Hhle}{Hoehle}, M. (2016): Monitoring count time series in \R: Aberration detection in public health surveillance. \emph{Journal of Statistical Software}, \bold{70} (10), 1-35. \doi{10.18637/jss.v070.i10} } surveillance/man/twinstim_plot.Rd0000644000176200001440000000260512011140620016712 0ustar liggesusers\name{twinstim_plot} \alias{plot.twinstim} \title{ Plot methods for fitted \code{twinstim}'s } \description{ The fitted conditional intensity function from \code{\link{twinstim}} may be visualized in at least two ways: \code{\link{iafplot}} plots the fitted interaction functions (as a function of the distance from the host), and \code{\link{intensityplot.twinstim}} plots the fitted intensity either aggregated over space (evolution over time) or aggregated over time (spatial surface of the cumulated intensity). The \code{plot} method for class \code{"twinstim"} is just a wrapper for these two functions. } \usage{ \method{plot}{twinstim}(x, which, ...) } \arguments{ \item{x}{ an object of class \code{"twinstim"}. } \item{which}{ character. Which characteristic of the conditional intensity should be plotted? Possible values are the ones allowed in the functions \code{\link{iafplot}} and \code{\link{intensityplot.twinstim}}, e.g. \code{"siaf"}, or \code{"epidemic proportion"}. Partial matching is applied. } \item{\dots}{ further arguments passed to \code{iafplot} or \code{intensityplot.twinstim}. } } \value{ See the documentation of the respective plot functions, \code{\link{iafplot}} or \code{\link{intensityplot.twinstim}}. } \author{ Sebastian Meyer } \examples{ # see the examples for iafplot() and intensityplot.twinstim() } \keyword{hplot} surveillance/man/algo.glrnb.Rd0000644000176200001440000002316113165505075016047 0ustar liggesusers\name{algo.glrnb} \alias{algo.glrnb} \alias{algo.glrpois} \encoding{latin1} \title{Count Data Regression Charts} \description{ Count data regression charts for the monitoring of surveillance time series as proposed by \enc{Hhle}{Hoehle} and Paul (2008). The implementation is described in Salmon et al. (2016). } \usage{ algo.glrnb(disProgObj, control = list(range=range, c.ARL=5, mu0=NULL, alpha=0, Mtilde=1, M=-1, change="intercept", theta=NULL, dir=c("inc","dec"), ret=c("cases","value"), xMax=1e4)) algo.glrpois(disProgObj, control = list(range=range, c.ARL=5, mu0=NULL, Mtilde=1, M=-1, change="intercept", theta=NULL, dir=c("inc","dec"), ret=c("cases","value"), xMax=1e4)) } \arguments{ \item{disProgObj}{object of class \code{disProg} to do surveillance for} \item{control}{A list controlling the behaviour of the algorithm \describe{ \item{\code{range}}{vector of indices in the observed vector to monitor (should be consecutive)} \item{\code{mu0}}{A vector of in-control values of the mean of the Poisson / negative binomial distribution with the same length as \code{range}. If \code{NULL} the observed values in \code{1:(min(range)-1)} are used to estimate the beta vector through a generalized linear model. To fine-tune the model one can instead specify \code{mu0} as a list with two components: \describe{ \item{\code{S}}{integer number of harmonics to include (typically 1 or 2)} \item{\code{trend}}{A Boolean indicating whether to include a term \code{t} in the GLM model} } The fitting is controlled by the \code{estimateGLRNbHook} function. The in-control mean model is re-fitted after every alarm. The fitted models can be found as a list \code{mod} in the \code{control} slot after the call. Note: If a value for \code{alpha} is given, then the inverse of this value is used as fixed \code{theta} in a \code{\link[MASS]{negative.binomial}} \code{glm}. If \code{is.null(alpha)} then the parameter is estimated as well (using \code{\link[MASS]{glm.nb}}) -- see the description of this parameter for details. } \item{\code{alpha}}{The (known) dispersion parameter of the negative binomial distribution, i.e. the parametrization of the negative binomial is such that the variance is \eqn{mean + alpha*mean^2}{mean + \alpha*mean^2}. Note: This parametrization is the inverse of the shape parametrization used in R -- for example in \code{dnbinom} and \code{glr.nb}. Hence, if \code{alpha=0} then the negative binomial distribution boils down to the Poisson distribution and a call of \code{algo.glrnb} is equivalent to a call to \code{algo.glrpois}. If \code{alpha=NULL} the parameter is calculated as part of the in-control estimation. However, the parameter is estimated only once from the first fit. Subsequent fittings are only for the parameters of the linear predictor with \code{alpha} fixed.} \item{\code{c.ARL}}{threshold in the GLR test, i.e. \eqn{c_{\gamma}}{c_gamma}} \item{\code{Mtilde}}{number of observations needed before we have a full rank the typical setup for the "\code{intercept}" and "\code{epi}" charts is \code{Mtilde=1}} \item{\code{M}}{number of time instances back in time in the window-limited approach, i.e. the last value considered is \eqn{\max{1,n-M}}. To always look back until the first observation use \code{M=-1}.} \item{\code{change}}{a string specifying the type of the alternative. Currently the two choices are \code{intercept} and \code{epi}. See the SFB Discussion Paper 500 for details.} \item{\code{theta}}{if \code{NULL} then the GLR scheme is used. If not \code{NULL} the prespecified value for \eqn{\kappa} or \eqn{\lambda} is used in a recursive LR scheme, which is faster. } \item{\code{dir}}{a string specifying the direction of testing in GLR scheme. With \code{"inc"} only increases in \eqn{x} are considered in the GLR-statistic, with \code{"dec"} decreases are regarded. } \item{\code{ret}}{a string specifying the type of \code{upperbound}-statistic that is returned. With \code{"cases"} the number of cases that would have been necessary to produce an alarm or with \code{"value"} the GLR-statistic is computed (see below).} \item{\code{xMax}}{Maximum value to try for x to see if this is the upperbound number of cases before sounding an alarm (Default: 1e4). This only applies for the GLR using the NegBin when \code{ret="cases"} -- see details.} } } } \value{ \code{algo.glrpois} simply calls \code{algo.glrnb} with \code{control$alpha} set to 0. \code{algo.glrnb} returns a list of class \code{survRes} (surveillance result), which includes the alarm value for recognizing an outbreak (1 for alarm, 0 for no alarm), the threshold value for recognizing the alarm and the input object of class disProg. The \code{upperbound} slot of the object are filled with the current \eqn{GLR(n)} value or with the number of cases that are necessary to produce an alarm at any time point \eqn{<=n}. Both lead to the same alarm timepoints, but \code{"cases"} has an obvious interpretation. } \details{ This function implements the seasonal count data chart based on generalized likelihood ratio (GLR) as described in the \enc{Hhle}{Hoehle} and Paul (2008) paper. A moving-window generalized likelihood ratio detector is used, i.e. the detector has the form % \deqn{N = \inf\left\{ n : \max_{1\leq k \leq n} \left[ \sum_{t=k}^n \log \left\{ \frac{f_{\theta_1}(x_t|z_t)}{f_{\theta_0}(x_t|z_t)} \right\} \right] \geq c_\gamma \right\} }{N = inf(... >= c_gamma)} % where instead of \eqn{1\leq k \leq n}{1<= k <= n} the GLR statistic is computed for all \eqn{k \in \{n-M, \ldots, n-\tilde{M}+1\}}{k \in \{n-M, \ldots, n-Mtilde+1\}}. To achieve the typical behaviour from \eqn{1\leq k\leq n}{1<= k <= n} use \code{Mtilde=1} and \code{M=-1}. So \eqn{N} is the time point where the GLR statistic is above the threshold the first time: An alarm is given and the surveillance is reset starting from time \eqn{N+1}. Note that the same \code{c.ARL} as before is used, but if \code{mu0} is different at \eqn{N+1,N+2,\ldots} compared to time \eqn{1,2,\ldots} the run length properties differ. Because \code{c.ARL} to obtain a specific ARL can only be obtained my Monte Carlo simulation there is no good way to update \code{c.ARL} automatically at the moment. Also, FIR GLR-detectors might be worth considering. In case \code{is.null(theta)} and \code{alpha>0} as well as \code{ret="cases"} then a brute-force search is conducted for each time point in range in order to determine the number of cases necessary before an alarm is sounded. In case no alarm was sounded so far by time \eqn{t}, the function increases \eqn{x[t]} until an alarm is sounded any time before time point \eqn{t}. If no alarm is sounded by \code{xMax}, a return value of 1e99 is given. Similarly, if an alarm was sounded by time \eqn{t} the function counts down instead. Note: This is slow experimental code! At the moment, window limited ``\code{intercept}'' charts have not been extensively tested and are at the moment not supported. As speed is not an issue here this doesn't bother too much. Therefore, a value of \code{M=-1} is always used in the intercept charts. } \author{M. \enc{Hhle}{Hoehle} with contributions by V. Wimmer} \examples{ ##Simulate data and apply the algorithm S <- 1 ; t <- 1:120 ; m <- length(t) beta <- c(1.5,0.6,0.6) omega <- 2*pi/52 #log mu_{0,t} base <- beta[1] + beta[2] * cos(omega*t) + beta[3] * sin(omega*t) #Generate example data with changepoint and tau=tau tau <- 100 kappa <- 0.4 mu0 <- exp(base) mu1 <- exp(base + kappa) ## Poisson example #Generate data set.seed(42) x <- rpois(length(t),mu0*(exp(kappa)^(t>=tau))) s.ts <- create.disProg(week=1:length(t),observed=x,state=(t>=tau)) #Plot the data plot(s.ts,legend=NULL,xaxis.years=FALSE) #Run cntrl = list(range=t,c.ARL=5, Mtilde=1, mu0=mu0, change="intercept",ret="value",dir="inc") glr.ts <- algo.glrpois(s.ts,control=cntrl) plot(glr.ts,xaxis.years=FALSE) lr.ts <- algo.glrpois(s.ts,control=c(cntrl,theta=0.4)) plot(lr.ts,xaxis.years=FALSE) ## NegBin example #Generate data set.seed(42) alpha <- 0.2 x <- rnbinom(length(t),mu=mu0*(exp(kappa)^(t>=tau)),size=1/alpha) s.ts <- create.disProg(week=1:length(t),observed=x,state=(t>=tau)) #Plot the data plot(s.ts,legend=NULL,xaxis.years=FALSE) #Run GLR based detection cntrl = list(range=t,c.ARL=5, Mtilde=1, mu0=mu0, alpha=alpha, change="intercept",ret="value",dir="inc") glr.ts <- algo.glrnb(s.ts,control=c(cntrl)) plot(glr.ts,xaxis.years=FALSE) #CUSUM LR detection with backcalculated number of cases cntrl2 = list(range=t,c.ARL=5, Mtilde=1, mu0=mu0, alpha=alpha, change="intercept",ret="cases",dir="inc",theta=1.2) glr.ts2 <- algo.glrnb(s.ts,control=c(cntrl2)) plot(glr.ts2,xaxis.years=FALSE) } \keyword{classif} \references{ \enc{Hhle}{Hoehle}, M. and Paul, M. (2008): Count data regression charts for the monitoring of surveillance time series. Computational Statistics and Data Analysis, 52 (9), 4357-4368. Salmon, M., Schumacher, D. and \enc{Hhle}{Hoehle}, M. (2016): Monitoring count time series in \R: Aberration detection in public health surveillance. \emph{Journal of Statistical Software}, \bold{70} (10), 1-35. \doi{10.18637/jss.v070.i10} } surveillance/man/earsC.Rd0000644000176200001440000002014713020537177015056 0ustar liggesusers\name{earsC} \alias{earsC} \encoding{latin1} \title{Surveillance for a count data time series using the EARS C1, C2 or C3 method and its extensions} \description{ % The function takes \code{range} values of the surveillance time series \code{sts} and for each time point computes a threshold for the number of counts based on values from the recent past. This is then compared to the observed number of counts. If the observation is above a specific quantile of the prediction interval, then an alarm is raised. This method is especially useful for data without many historic values, since it only needs counts from the recent past. % } \usage{ earsC(sts, control = list(range = NULL, method = "C1", baseline = 7, minSigma = 0, alpha = 0.001)) } \arguments{ \item{sts}{object of class sts (including the \code{observed} and the \code{state} time series) , which is to be monitored.} \item{control}{Control object \describe{ \item{\code{range}}{Specifies the index in the \code{sts} object of all the timepoints which should be monitored. If \code{range} is \code{NULL} the maximum number of possible timepoints is used (this number depends on the method chosen): \describe{ \item{C1}{all timepoints from the observation with index \code{baseline + 1} can be monitored,} \item{C2}{timepoints from index \code{baseline + 3} can be monitored,} \item{C3}{timepoints starting from the index \code{baseline + 5} can be monitored.} } } \item{\code{method}}{String indicating which method to use: \cr \describe{ \item{\code{"C1"}}{for EARS C1-MILD method (Default),} \item{\code{"C2"}}{for EARS C2-MEDIUM method,} \item{\code{"C3"}}{for EARS C3-HIGH method.} } See Details for further information about the methods. } \item{\code{baseline}}{how many time points to use for calculating the baseline, see details} \item{\code{minSigma}}{By default 0. If \code{minSigma} is higher than 0, for C1 and C2, the quantity zAlpha * minSigma is then the alerting threshold if the baseline is zero. Howard Burkom suggests using a value of 0.5 or 1 for sparse data.} \item{\code{alpha}}{An approximate (two-sided) \eqn{(1-\alpha)\cdot 100\%} prediction interval is calculated. By default if \code{alpha} is \code{NULL} the value 0.001 is assumed for C1 and C2 whereas 0.025 is assumed for C3. These different choices are the one made at the CDC.} % } } } \details{ The three methods are different in terms of baseline used for calculation of the expected value and in terms of method for calculating the expected value: \itemize{ \item in C1 and C2 the expected value is the moving average of counts over the sliding window of the baseline and the prediction interval depends on the standard derivation of the observed counts in this window. They can be considered as Shewhart control charts with a small sample used for calculations. \item in C3 the expected value is based on the sum over 3 timepoints (assessed timepoints and the two previous timepoints) of the discrepancy between observations and predictions, predictions being calculated with the C2 method. This method has similarities with a CUSUM method due to it adding discrepancies between predictions and observations over several timepoints, but is not a CUSUM (sum over 3 timepoints, not accumulation over a whole range), even if it sometimes is presented as such. } Here is what the function does for each method, see the literature sources for further details: \enumerate{ \item For C1 the baseline are the \code{baseline} (default 7) timepoints before the assessed timepoint t, t-\code{baseline} to t-1. The expected value is the mean of the baseline. An approximate (two-sided) \eqn{(1-\alpha)\cdot 100\%} prediction interval is calculated based on the assumption that the difference between the expected value and the observed value divided by the standard derivation of counts over the sliding window, called \eqn{C_1(t)}, follows a standard normal distribution in the absence of outbreaks: \deqn{C_1(t)= \frac{Y(t)-\bar{Y}_1(t)}{S_1(t)},} where \deqn{\bar{Y}_1(t)= \frac{1}{\code{baseline}} \sum_{i=t-1}^{t-\code{baseline}} Y(i)} and \deqn{ S^2_1(t)= \frac{1}{6} \sum_{i=t-1}^{t-\code{baseline}} [Y(i) - \bar{Y}_1(i)]^2.} Then under the null hypothesis of no outbreak, \deqn{C_1(t) \mathcal \> \sim \> {N}(0,1)} An alarm is raised if \deqn{C_1(t)\ge z_{1-\alpha}} with \eqn{z_{1-\alpha}} the \eqn{(1-\alpha)^{th}} quantile of the standard normal distribution. \cr The upperbound \eqn{U_1(t)} is then defined by: \deqn{U_1(t)= \bar{Y}_1(t) + z_{1-\alpha}S_1(t).} \item C2 is very similar to C1 apart from a 2-day lag in the baseline definition. In other words the baseline for C2 is \code{baseline} (Default: 7) timepoints with a 2-day lag before the monitored timepoint t, i.e. \eqn{(t-\code{baseline}-2)} to \eqn{t-3}. The expected value is the mean of the baseline. An approximate (two-sided) \eqn{(1-\alpha)\cdot 100\%} prediction interval is calculated based on the assumption that the difference between the expected value and the observed value divided by the standard derivation of counts over the sliding window, called \eqn{C_2(t)}, follows a standard normal distribution in the absence of outbreaks: \deqn{C_2(t)= \frac{Y(t)-\bar{Y}_2(t)}{S_2(t)},} where \deqn{\bar{Y}_2(t)= \frac{1}{\code{baseline}} \sum_{i=t-3}^{t-\code{baseline}-2} Y(i)} and \deqn{ S^2_2(t)= \frac{1}{\code{baseline}-1} \sum_{i=t-3}^{t-\code{baseline}-2} [Y(i) - \bar{Y}_2(i)]^2.} Then under the null hypothesis of no outbreak, \deqn{C_2(t) \mathcal \sim {N}(0,1)} An alarm is raised if \deqn{C_2(t)\ge z_{1-\alpha},} with \eqn{z_{1-\alpha}} the \eqn{(1-\alpha)^{th}} quantile of the standard normal distribution. \cr The upperbound \eqn{U_{2}(t)} is then defined by: \deqn{U_{2}(t)= \bar{Y}_{2}(t) + z_{1-\alpha}S_{2}(t).} \item C3 is quite different from the two other methods, but it is based on C2. Indeed it uses \eqn{C_2(t)} from timepoint t and the two previous timepoints. This means the baseline consists of the timepoints \eqn{t-(\code{baseline}+4)} to \eqn{t-3}. The statistic \eqn{C_3(t)} is the sum of discrepancies between observations and predictions. \deqn{C_3(t)= \sum_{i=t}^{t-2} \max(0,C_2(i)-1)} Then under the null hypothesis of no outbreak, \deqn{C_3(t) \mathcal \sim {N}(0,1)} An alarm is raised if \deqn{C_3(t)\ge z_{1-\alpha},} with \eqn{z_{1-\alpha}} the \eqn{(1-\alpha)^{th}} quantile of the standard normal distribution. \cr The upperbound \eqn{U_3(t)} is then defined by: \deqn{U_3(t)= \bar{Y}_2(t) + S_2(t)\left(z_{1-\alpha}-\sum_{i=t-1}^{t-2} \max(0,C_2(i)-1)\right).} } } \value{ An object of class \code{sts} with the slots \code{upperbound} and \code{alarm} filled by the chosen method. } \examples{ #Sim data and convert to sts object disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 208, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) stsObj <- disProg2sts( disProgObj) # Call earsC function and show result res1 <- earsC(stsObj, control = list(range = 20:208, method="C1")) plot(res1, legend.opts=list(horiz=TRUE, x="topright")) # Compare C3 upperbounds depending on alpha res3 <- earsC(stsObj, control = list(range = 20:208,method="C3",alpha = 0.001)) plot(upperbound(res3), type='l') res3 <- earsC(stsObj, control = list(range = 20:208,method="C3")) lines(upperbound(res3), col='red') } \author{M. Salmon, H. Burkom} \keyword{classif} \source{ Fricker, R.D., Hegler, B.L, and Dunfee, D.A. (2008). Comparing syndromic surveillance detection methods: EARS versus a CUSUM-based methodology, 27:3407-3429, Statistics in medicine. Salmon, M., Schumacher, D. and \enc{Hhle}{Hoehle}, M. (2016): Monitoring count time series in \R: Aberration detection in public health surveillance. \emph{Journal of Statistical Software}, \bold{70} (10), 1-35. \doi{10.18637/jss.v070.i10} } surveillance/man/twinstim_profile.Rd0000644000176200001440000000704712677753025017434 0ustar liggesusers\encoding{latin1} \name{twinstim_profile} \alias{profile.twinstim} \title{ Profile Likelihood Computation and Confidence Intervals for \code{twinstim} objects } \description{ Function to compute estimated and profile likelihood based confidence intervals for \code{twinstim} objects. Computations might be cumbersome! WARNING: the implementation is not well tested, simply uses \code{optim} (ignoring optimizer settings from the original fit), and does not return the complete set of coefficients at each grid point. } \usage{ \method{profile}{twinstim}(fitted, profile, alpha = 0.05, control = list(fnscale = -1, maxit = 100, trace = 1), do.ltildeprofile=FALSE, ...) } \arguments{ \item{fitted}{ an object of class \code{"twinstim"}. } \item{profile}{ a list with elements being numeric vectors of length 4. These vectors must have the form \code{c(index, lower, upper, gridsize)}. \describe{ \item{\code{index}:}{ index of the parameter to be profiled in the vector \code{coef(fitted)}. } \item{\code{lower, upper}:}{ lower/upper limit of the grid on which the profile log-likelihood is evaluated. Can also be \code{NA} in which case \code{lower/upper} equals the lower/upper bound of the respective 0.3 \% Wald confidence interval (+-3*se). } \item{\code{gridsize}:}{ grid size of the equally spaced grid between lower and upper. Can also be 0 in which case the profile log-likelihood for this parameter is not evaluated on a grid. } } } \item{alpha}{ \eqn{(1-\alpha)\%}{(1-alpha)\%} profile likelihood based confidence intervals are computed. If alpha <= 0, then no confidence intervals are computed. This is currently not implemented. } \item{control}{ control object to use in \code{\link{optim}} for the profile log-likelihood computations. It might be necessary to control \code{maxit} or \code{reltol} in order to obtain results in finite time. } \item{do.ltildeprofile}{If \code{TRUE} calculate profile likelihood as well. This might take a while, since an optimisation for all other parameters has to be performed. Useful for likelihood based confidence intervals. Default: \code{FALSE}. } \item{\dots}{ unused (argument of the generic). } } \value{ list with profile log-likelihood evaluations on the grid, and -- not implemented yet -- highest likelihood and Wald confidence intervals. The argument \code{profile} is also returned. } \author{ Michael \enc{Hhle}{Hoehle} } \examples{ # profiling takes a while \dontrun{ #Load the twinstim model fitted to the IMD data data("imdepi", "imdepifit") # for profiling we need the model environment imdepifit <- update(imdepifit, model=TRUE) #Generate profiling object for a list of parameters for the new model names <- c("h.(Intercept)","e.typeC") coefList <- lapply(names, function(name) { c(pmatch(name,names(coef(imdepifit))),NA,NA,11) }) #Profile object (necessary to specify a more loose convergence #criterion). Speed things up by using do.ltildeprofile=FALSE (the default) prof <- profile(imdepifit, coefList, control=list(reltol=0.1, REPORT=1), do.ltildeprofile=TRUE) #Plot result for one variable par(mfrow=c(1,2)) for (name in names) { with(as.data.frame(prof$lp[[name]]), matplot(grid,cbind(profile,estimated,wald), type="l",xlab=name,ylab="loglik")) legend(x="bottomleft",c("profile","estimated","wald"),lty=1:3,col=1:3) } } } \keyword{htest} \keyword{methods} \keyword{optimize} \keyword{dplot} surveillance/man/LRCUSUM.runlength.Rd0000644000176200001440000001452114004564174017157 0ustar liggesusers\name{LRCUSUM.runlength} \alias{LRCUSUM.runlength} %% \alias{outcomeFunStandard} %% \alias{LLR.fun} \encoding{latin1} \title{Run length computation of a CUSUM detector} \description{ Compute run length for a count data or categorical CUSUM. The computations are based on a Markov representation of the likelihood ratio based CUSUM. } \usage{ LRCUSUM.runlength(mu,mu0,mu1,h,dfun, n, g=5,outcomeFun=NULL,...) } \arguments{ \item{mu}{\eqn{k-1 \times T} matrix with true proportions, i.e. equal to mu0 or mu1 if one wants to compute e.g. \eqn{ARL_0} or \eqn{ARL_1}.} \item{mu0}{\eqn{k-1 \times T} matrix with in-control proportions} \item{mu1}{\eqn{k-1 \times T} matrix with out-of-control proportion} \item{h}{The threshold h which is used for the CUSUM.} \item{dfun}{The probability mass function or density used to compute the likelihood ratios of the CUSUM. In a negative binomial CUSUM this is \code{dnbinom}, in a binomial CUSUM \code{dbinom} and in a multinomial CUSUM \code{dmultinom}.} \item{n}{Vector of length \eqn{T} containing the total number of experiments for each time point.} \item{g}{The number of levels to cut the state space into when performing the Markov chain approximation. Sometimes also denoted \eqn{M}. Note that the quality of the approximation depends very much on \eqn{g}. If \eqn{T} greater than, say, 50 its necessary to increase the value of \eqn{g}.} \item{outcomeFun}{A hook \code{function (k,n)} to compute all possible outcome states to compute the likelihood ratio for. If \code{NULL} then the internal default function \code{surveillance:::outcomeFunStandard} is used. This function uses the Cartesian product of \code{0:n} for \code{k} components.} \item{\dots}{Additional arguments to send to \code{dfun}.} } \details{ Brook and Evans (1972) formulated an approximate approach based on Markov chains to determine the PMF of the run length of a time-constant CUSUM detector. They describe the dynamics of the CUSUM statistic by a Markov chain with a discretized state space of size \eqn{g+2}. This is adopted to the time varying case in \enc{Hhle}{Hoehle} (2010) and implemented in R using the \dots notation such that it works for a very large class of distributions. } \seealso{\code{\link{categoricalCUSUM}}} \value{A list with five components \item{P}{An array of \eqn{g+2 \times g+2} transition matrices of the approximation Markov chain.} \item{pmf}{Probability mass function (up to length \eqn{T}) of the run length variable.} \item{cdf}{Cumulative density function (up to length \eqn{T}) of the run length variable.} \item{arl}{If the model is time homogenous (i.e. if \eqn{T==1}) then the ARL is computed based on the stationary distribution of the Markov chain. See the eqns in the reference for details. Note: If the model is not time homogeneous then the function returns \code{NA} and the ARL has to be approximated manually from the output. One could use \code{sum(1:length(pmf) * pmf)}, which is an approximation because of using a finite support for a sum which should be from 1 to infinity. } } \references{ \enc{Hhle}{Hoehle}, M. (2010): Online change-point detection in categorical time series. In: T. Kneib and G. Tutz (Eds.), Statistical Modelling and Regression Structures - Festschrift in Honour of Ludwig Fahrmeir, Physica-Verlag, pp. 377-397. Preprint available as \url{https://staff.math.su.se/hoehle/pubs/hoehle2010-preprint.pdf} \enc{Hhle}{Hoehle}, M. and Mazick, A. (2010): Aberration detection in R illustrated by Danish mortality monitoring. In: T. Kass-Hout and X. Zhang (Eds.), Biosurveillance: A Health Protection Priority, CRCPress. Preprint available as \url{https://staff.math.su.se/hoehle/pubs/hoehle_mazick2009-preprint.pdf} Brook, D. and Evans, D. A. (1972): An approach to the probability distribution of cusum run length. \emph{Biometrika} \bold{59}(3):539-549. } \examples{ ###################################################### #Run length of a time constant negative binomial CUSUM ###################################################### #In-control and out of control parameters mu0 <- 10 alpha <- 1/2 kappa <- 2 #Density for comparison in the negative binomial distribution dY <- function(y,mu,log=FALSE, alpha, ...) { dnbinom(y, mu=mu, size=1/alpha, log=log) } #In this case "n" is the maximum value to investigate the LLR for #It is assumed that beyond n the LLR is too unlikely to be worth #computing. LRCUSUM.runlength( mu=t(mu0), mu0=t(mu0), mu1=kappa*t(mu0), h=5, dfun = dY, n=rep(100,length(mu0)), alpha=alpha) h.grid <- seq(3,6,by=0.3) arls <- sapply(h.grid, function(h) { LRCUSUM.runlength( mu=t(mu0), mu0=t(mu0), mu1=kappa*t(mu0), h=h, dfun = dY, n=rep(100,length(mu0)), alpha=alpha,g=20)$arl }) plot(h.grid, arls,type="l",xlab="threshold h",ylab=expression(ARL[0])) if (surveillance.options("allExamples")) { ###################################################### #Run length of a time varying negative binomial CUSUM ###################################################### mu0 <- matrix(5*sin(2*pi/52 * 1:200) + 10,ncol=1) rl <- LRCUSUM.runlength( mu=t(mu0), mu0=t(mu0), mu1=kappa*t(mu0), h=2, dfun = dY, n=rep(100,length(mu0)), alpha=alpha,g=20) plot(1:length(mu0),rl$pmf,type="l",xlab="t",ylab="PMF") plot(1:length(mu0),rl$cdf,type="l",xlab="t",ylab="CDF") } ######################################################## # Further examples contain the binomial, beta-binomial # and multinomial CUSUMs. Hopefully, these will be added # in the future. ######################################################## #dfun function for the multinomial distribution (Note: Only k-1 categories are specified). dmult <- function(y, size,mu, log = FALSE) { return(dmultinom(c(y,size-sum(y)), size = size, prob=c(mu,1-sum(mu)), log = log)) } #Example for the time-constant multinomial distribution #with size 100 and in-control and out-of-control parameters as below. n <- 100 pi0 <- as.matrix(c(0.5,0.3,0.2)) pi1 <- as.matrix(c(0.38,0.46,0.16)) #ARL_0 LRCUSUM.runlength(mu=pi0[1:2,,drop=FALSE],mu0=pi0[1:2,,drop=FALSE],mu1=pi1[1:2,,drop=FALSE], h=5,dfun=dmult, n=n, g=15)$arl #ARL_1 LRCUSUM.runlength(mu=pi1[1:2,,drop=FALSE],mu0=pi0[1:2,,drop=FALSE],mu1=pi1[1:2,,drop=FALSE], h=5,dfun=dmult, n=n, g=15)$arl } \author{M. \enc{Hhle}{Hoehle}} \keyword{regression} surveillance/man/discpoly.Rd0000644000176200001440000000432413777627613015664 0ustar liggesusers\name{discpoly} \alias{discpoly} \title{Polygonal Approximation of a Disc/Circle} \description{ Generates a polygon representing a disc/circle (in planar coordinates) as an object of one of three possible classes: \code{"\link[sp:Polygon-class]{Polygon}"}, \code{"\link[spatstat.geom]{owin}"}, or -- if \pkg{rgeos} (or \pkg{gpclib}) are available -- \code{"\link[rgeos:gpc.poly-class]{gpc.poly}"}. } \usage{ discpoly(center, radius, npoly = 64, class = c("Polygon", "owin", "gpc.poly"), hole = FALSE) } \arguments{ \item{center}{numeric vector of length 2 (center coordinates of the circle).} \item{radius}{single numeric value (radius of the circle).} \item{npoly}{single integer. Number of edges of the polygonal approximation.} \item{class}{class of the resulting polygon (partial name matching applies). For \code{"owin"}, this is just a wrapper around \pkg{spatstat.geom}'s own \code{\link[spatstat.geom]{disc}} function.} \item{hole}{logical. Does the resulting polygon represent a hole?} } \value{ A polygon of class \code{class} representing a circle/disc with \code{npoly} edges accuracy.\cr If \code{class="gpc.poly"} although this formal class is not currently defined (and \pkg{rgeos} is not available), only the \code{pts} slot of a \code{"gpc.poly"} is returned with a warning. } \author{ Sebastian Meyer\cr This function is inspired by the \code{\link[spatstat.geom]{disc}} function from package \pkg{spatstat.geom}. } \examples{ ## Construct circles with increasing accuracy and of different spatial classes disc1 <- discpoly(c(0,0), 5, npoly=4, class = "owin") disc2 <- discpoly(c(0,0), 5, npoly=16, class = "Polygon") ## Look at the results print(disc1) plot(disc1, axes=TRUE, main="", border=2) print(disc2) lines(disc2, col=3) if (requireNamespace("rgeos")) { # for the "gpc.poly" class definition disc3 <- discpoly(c(0,0), 5, npoly=64, class = "gpc.poly") print(disc3) plot(disc3, add=TRUE, poly.args=list(border=4)) } ## if one only wants to _draw_ a circle without an object behind symbols(0, 0, circles=5, inches=FALSE, add=TRUE, fg=5) } \seealso{ \link[spatstat.geom]{disc} in package \pkg{spatstat.geom}. } \keyword{datagen} \keyword{spatial} surveillance/man/hagelloch.Rd0000644000176200001440000001576214004512307015745 0ustar liggesusers\encoding{latin1} \name{hagelloch} \alias{hagelloch} \alias{hagelloch.df} \docType{data} \keyword{datasets} \title{1861 Measles Epidemic in the City of Hagelloch, Germany} \description{ Data on the 188 cases in the measles outbreak among children in the German city of Hagelloch (near T\enc{}{ue}bingen) 1861. The data were originally collected by Dr. Albert Pfeilsticker (1863) and augmented and re-analysed by Dr. Heike Oesterle (1992). This dataset is used to illustrate the \code{twinSIR} model class in \code{vignette("twinSIR")}. } \usage{ data("hagelloch") } \format{ Loading \code{data("hagelloch")} gives two objects: \code{hagelloch} and \code{hagelloch.df}. The latter is the original \code{data.frame} of 188 rows with individual information for each infected child. \code{hagelloch} has been generated from \code{hagelloch.df} via \code{\link{as.epidata}} (see the Examples below) to obtain an \code{"epidata"} object for use with \code{\link{twinSIR}}. It contains the entire SIR event history of the outbreak (but not all of the covariates). The covariate information in \code{hagelloch.df} is as follows: \describe{ \item{PN:}{patient number} \item{NAME:}{patient name (as a factor)} \item{FN:}{family index} \item{HN:}{house number} \item{AGE:}{age in years} \item{SEX:}{gender of the individual (factor: male, female)} \item{PRO:}{\code{Date} of prodromes} \item{ERU:}{\code{Date} of rash} \item{CL:}{class (factor: preschool, 1st class, 2nd class)} \item{DEAD:}{\code{Date} of death (with missings)} \item{IFTO:}{number of patient who is the putative source of infection (0 = unknown)} \item{SI:}{serial interval = number of days between dates of prodromes of infection source and infected person} \item{C:}{complications (factor: no complications, bronchopneumonia, severe bronchitis, lobar pneumonia, pseudocroup, cerebral edema)} \item{PR:}{duration of prodromes in days} \item{CA:}{number of cases in family} \item{NI:}{number of initial cases} \item{GE:}{generation number of the case} \item{TD:}{day of max. fever (days after rush)} \item{TM:}{max. fever (degree Celsius)} \item{x.loc:}{x coordinate of house (in meters). Scaling in metres is obtained by multiplying the original coordinates by 2.5 (see details in Neal and Roberts (2004))} \item{y.loc:}{y coordinate of house (in meters). See also the above description of \code{x.loc}.} \item{tPRO:}{Time of prodromes (first symptoms) in days after the start of the epidemic (30 Oct 1861).} \item{tERU:}{Time upon which the rash first appears.} \item{tDEAD:}{Time of death, if available.} \item{tR:}{Time at which the infectious period of the individual is assumed to end. This unknown time is calculated as \deqn{tR_i = \min{tDEAD_i,tERU_i+d_0},}{tR[i] = min(tDEAD[i],tERU[i]+d0),} where -- as in Section 3.1 of Neal and Roberts (2004) -- we use \eqn{d_0=3}{d0=3}.} \item{tI:}{Time at which the individual is assumed to become infectious. Actually this time is unknown, but we use \deqn{tI_i = tPRO_i - d_1,}{tI[i] = tPRO[i] - d1,} where \eqn{d_1=1}{d1=1} as in Neal and Roberts (2004).} } The time variables describe the transitions of the individual in an Susceptible-Infectious-Recovered (SIR) model. Note that in order to avoid ties in the event times resulting from daily interval censoring, the times have been jittered uniformly within the respective day. The time point 0.5 would correspond to noon of 30 Oct 1861. The \code{hagelloch} \code{"epidata"} object only retains some of the above covariates to save space. Apart from the usual \code{"epidata"} event columns, \code{hagelloch} contains a number of extra variables representing distance- and covariate-based weights for the force of infection: \describe{ \item{household:}{the number of currently infectious children in the same household (including the child itself if it is currently infectious).} \item{nothousehold:}{the number of currently infectious children outside the household.} \item{c1, c2:}{the number of children infectious during the respective time block and being members of class 1 and 2, respectively; but the value is 0 if the individual of the row is not herself a member of the respective class.} } Such epidemic covariates can been computed by specifying suitable \code{f} and \code{w} arguments in \code{\link{as.epidata}} at conversion (see the code below), or at a later step via the \code{\link[=update.epidata]{update}}-method for \code{"epidata"}. } \source{ Thanks to Peter J. Neal, University of Manchester, for providing us with these data, which he again became from Niels Becker, Australian National University. To cite the data, the main references are Pfeilsticker (1863) and Oesterle (1992). } \examples{ data("hagelloch") head(hagelloch.df) # original data documented in Oesterle (1992) head(as.data.frame(hagelloch)) # "epidata" event history format ## How the "epidata" 'hagelloch' was created from 'hagelloch.df' stopifnot(all.equal(hagelloch, as.epidata( hagelloch.df, t0 = 0, tI.col = "tI", tR.col = "tR", id.col = "PN", coords.cols = c("x.loc", "y.loc"), f = list( household = function(u) u == 0, nothousehold = function(u) u > 0 ), w = list( c1 = function (CL.i, CL.j) CL.i == "1st class" & CL.j == CL.i, c2 = function (CL.i, CL.j) CL.i == "2nd class" & CL.j == CL.i ), keep.cols = c("SEX", "AGE", "CL")) )) ### Basic plots produced from hagelloch.df # Show case locations as in Neal & Roberts (different scaling) using # the data.frame (promoted to a SpatialPointsDataFrame) coordinates(hagelloch.df) <- c("x.loc","y.loc") plot(hagelloch.df, xlab="x [m]", ylab="x [m]", pch=15, axes=TRUE, cex=sqrt(multiplicity(hagelloch.df))) # Epicurve hist(as.numeric(hagelloch.df$tI), xlab="Time (days)", ylab="Cases", main="") ### "epidata" summary and plot methods (s <- summary(hagelloch)) head(s$byID) plot(s) \dontrun{ # Show a dynamic illustration of the spread of the infection animate(hagelloch, time.spacing=0.1, sleep=1/100, legend.opts=list(x="topleft")) } } \references{ Pfeilsticker, A. (1863). Beitr\enc{}{ae}ge zur Pathologie der Masern mit besonderer Ber\enc{}{ue}cksichtigung der statistischen Verh\enc{}{ae}ltnisse, M.D. Thesis, Eberhard-Karls-Universit\enc{}{ae}t T\enc{}{ue}bingen. Available as \url{https://archive.org/details/beitrgezurpatho00pfeigoog}. Oesterle, H. (1992). Statistische Reanalyse einer Masernepidemie 1861 in Hagelloch, M.D. Thesis, Eberhard-Karls-Universit\enc{}{ae}at T\enc{}{ue}bingen. Neal, P. J. and Roberts, G. O (2004). Statistical inference and model selection for the 1861 Hagelloch measles epidemic, Biostatistics 5(2):249-261 } \seealso{ data class: \code{\link{epidata}} point process model: \code{\link{twinSIR}} illustration with \code{hagelloch}: \code{vignette("twinSIR")} } surveillance/man/stcd.Rd0000644000176200001440000000733312014262005014743 0ustar liggesusers\name{stcd} \alias{stcd} \encoding{latin1} \title{Spatio-temporal cluster detection} \description{ Shiryaev-Roberts based prospective spatio-temporal cluster detection as in Assuncao & Correa (2009). } \usage{ stcd(x, y,t,radius,epsilon,areaA, areaAcapBk, threshold, cusum=FALSE) } \arguments{ \item{x}{Vector containing spatial x coordinate of the events.} \item{y}{Vector containing spatial y coordinate of the events.} \item{t}{Vector containing the time points of the events. It is assumed that the vector is sorted (early->last).} \item{radius}{Radius of the cluster to detect.} \item{epsilon}{Relative change of event-intensity within the cluster to detect. See reference paper for an explicit definition.} \item{areaA}{Area of the observation region A (single number) -- This argument is currently ignored!} \item{areaAcapBk}{Area of A \ B(s_k,rho) for all k=1,\ldots,n (vector). This argument is currently ignored!} \item{threshold}{Threshold limit for the alarm and should be equal to the desired Average-Run-Length (ARL) of the detector.} \item{cusum}{(logical) If \code{FALSE} (default) then the Shiryaev-Roberts detector is used as in the original article by Assuncao & Correa (2009), i.e. \eqn{R_n = \sum_{k=1}^n \Lambda_{k,n}}, where \eqn{\Lambda_{k,n}} denotes the likelihood ratio between the in-control and out-of control model. If \code{TRUE}, CUSUM test statistic is used instead. Here, \deqn{R_n = \max_{1\leq k \leq n} \Lambda_{k,n}}. Note that this has implications on what threshold will sound the alarm (CUSUM threshold needs to be smaller).} } \details{ Shiryaev-Roberts based spatio-temporal cluster detection based on the work in Assuncao and Correa (2009). The implementation is based on C++ code originally written by Marcos Oliveira Prates, UFMG, Brazil and provided by Thais Correa, UFMG, Brazil during her research stay in Munich. This stay was financially supported by the Munich Center of Health Sciences. Note that the vectors \code{x}, \code{y} and \code{t} need to be of the same length. Furthermore, the vector \code{t} needs to be sorted (to improve speed, the latter is not verified within the function). The current implementation uses a call to a C++ function to perform the actual computations of the test statistic. The function is currently experimental -- data type and results may be subject to changes. } \value{A list with three components \item{R}{A vector of the same length as the input containing the value of the test statistic for each observation.} \item{idxFA}{Index in the x,y,t vector causing a possible alarm. If no cluster was detected, then a value of \code{-1} is returned here.} \item{idxCC}{index in the x,y,t vector of the event containing the cluster. If no cluster was detected, then a value of \code{-1} is returned here.} } \references{ Assuncao, R. and Correa, T. (2009), Surveillance to detect emerging space-time clusters, Computational Statistics & Data Analysis, 53(8):2817-2830. } \examples{ if (require("splancs")) { # load the data from package "splancs" data(burkitt, package="splancs") # order the times burkitt <- burkitt[order(burkitt$t), ] #Parameters for the SR detection epsilon <- 0.5 # relative change within the cluster radius <- 20 # radius threshold <- 161 # threshold limit res <- stcd(x=burkitt$x, y=burkitt$y, t=burkitt$t, radius=radius, epsilon=epsilon, areaA=1, areaAcapBk=1, threshold=threshold) #Index of the event which.max(res$R >= threshold) } } \author{M. O. Prates, T. Correa and M. \enc{Hhle}{Hoehle}} \keyword{cluster} surveillance/man/calibration.Rd0000644000176200001440000000616113062247044016305 0ustar liggesusers\name{calibrationTest} \alias{calibrationTest} \alias{calibrationTest.default} \title{ Calibration Tests for Poisson or Negative Binomial Predictions } \description{ The implemented calibration tests for Poisson or negative binomial predictions of count data are based on proper scoring rules and described in detail in Wei and Held (2014). The following proper scoring rules are available: Dawid-Sebastiani score (\code{"dss"}), logarithmic score (\code{"logs"}), ranked probability score (\code{"rps"}). } \usage{ calibrationTest(x, ...) \method{calibrationTest}{default}(x, mu, size = NULL, which = c("dss", "logs", "rps"), tolerance = 1e-4, method = 2, ...) } \arguments{ \item{x}{ the observed counts. All involved functions are vectorized and also accept matrices or arrays. } \item{mu}{ the means of the predictive distributions for the observations \code{x}. } \item{size}{ either \code{NULL} (default), indicating Poisson predictions with mean \code{mu}, or dispersion parameters of negative binomial forecasts for the observations \code{x}, parametrized as in \code{\link{dnbinom}} with variance \code{mu*(1+mu/size)}. } \item{which}{ a character string indicating which proper scoring rule to apply. } \item{tolerance}{ absolute tolerance for the null expectation and variance of \code{"logs"} and \code{"rps"}. For the latter, see the note below. Unused for \code{which = "dss"} (closed form). } \item{method}{ selection of the \eqn{z}-statistic: \code{method = 2} refers to the alternative test statistic \eqn{Z_s^*} of Wei and Held (2014, Discussion), which has been recommended for low counts. \code{method = 1} corresponds to Equation 5 in Wei and Held (2014). } \item{\dots}{ unused (argument of the generic). } } \value{ an object of class \code{"htest"}, which is a list with the following components: \item{method}{a character string indicating the type of test performed (including \code{which} scoring rule).} \item{data.name}{a character string naming the supplied \code{x} argument.} \item{statistic}{the \eqn{z}-statistic of the test.} \item{parameter}{the number of predictions underlying the test, i.e., \code{length(x)}.} \item{p.value}{the p-value for the test.} } \note{ If the \CRANpkg{gsl} package is installed, its implementations of the Bessel and hypergeometric functions are used when calculating the null expectation and variance of the \code{rps}. These functions are faster and yield more accurate results (especially for larger \code{mu}). } \references{ Wei, W. and Held, L. (2014): Calibration tests for count data. \emph{Test}, \bold{23}, 787-805. } \author{ Sebastian Meyer and Wei Wei } \examples{ mu <- c(0.1, 1, 3, 6, pi, 100) size <- 0.1 set.seed(1) y <- rnbinom(length(mu), mu = mu, size = size) calibrationTest(y, mu = mu, size = size) # p = 0.99 calibrationTest(y, mu = mu, size = 1) # p = 4.3e-05 calibrationTest(y, mu = 1, size = size) # p = 0.6959 calibrationTest(y, mu = 1, size = size, which = "rps") # p = 0.1286 } \keyword{htest} surveillance/man/nowcast.Rd0000644000176200001440000003455314024066511015477 0ustar liggesusers\encoding{latin1} \name{nowcast} \alias{nowcast} %Internal functions %\alias{dist.median} %\alias{outside.ci} %\alias{logS} %\alias{RPS} \title{ Adjust a univariate time series of counts for observed but-not-yet-reported events } \description{ Nowcasting can help to obtain up-to-date information on trends during a situation where reports about events arrive with delay. For example in public health reporting, reports about important indicators (such as occurrence of cases) are prone to be delayed due to for example manual quality checking and reporting system hierarchies. Altogether, the delays are subject to a delay distribution, which may, or may not, vary over time. } \usage{ nowcast(now, when, data, dEventCol="dHospital", dReportCol="dReport", method=c("bayes.notrunc", "bayes.notrunc.bnb", "lawless", "bayes.trunc", "unif", "bayes.trunc.ddcp"), aggregate.by="1 day", D=15, m=NULL, m.interpretation=c("hoehle_anderheiden2014", "lawless1994"), control=list( dRange=NULL, alpha=0.05, nSamples=1e3, N.tInf.prior=c("poisgamma","pois","unif"), N.tInf.max=300, gd.prior.kappa=0.1, ddcp=list(ddChangepoint=NULL, cp_order=c("zero","one"), Wextra=NULL, logLambda=c("iidLogGa","tps","rw1","rw2"), responseDistr=c("poisson", "negbin"), mcmc=c(burnin=2500, sample=10000, thin=1, adapt=1000, store.samples=FALSE)), score=FALSE, predPMF=FALSE)) } \arguments{ \item{now}{ an object of class \code{Date} denoting the day at which to do the nowcast. This corresponds to \eqn{T} in the notation of \enc{Hhle}{Hoehle} and an der Heiden (2014). } \item{when}{a vector of \code{Date} objects denoting the day(s) for which the projections are to be done. One needs to ensure that each element in \code{when} is smaller or equal to \code{now}. } \item{data}{A data frame with one row per case -- for each case on needs information on the day of the event (e.g. hospitalization) and the day of report of this event. } \item{dEventCol}{The name of the column in \code{data} which contains the date of the event, e.g. hospitalization. Default: \code{"dHospital"}. } \item{dReportCol}{Name of the column in \code{data} containing the date at which the report arrives at the respective register. Default: \code{"dReport"}. } \item{method}{A vector of strings denoting the different methods for doing the nowcasting. Note that results of the first name in this list are officially returned by the function. However, it is possible to specify several methods here, e.g., in order to compare score evaluations. Details of the methods are described in \enc{Hhle}{Hoehle} and an der Heiden (2014). \describe{ \item{\code{"unif"}}{} \item{\code{"bayes.notrunc"}}{A Bayesian procedure ignoring truncation.} \item{\code{"bayes.notrunc.bnb"}}{A fast Bayesian procedure ignoring truncation and which calculates the adjustment per-time (i.e. ignoring other delays) using the negative binomial.} \item{\code{"lawless"}}{A discretized version of the Gaussian predictive distribution suggested in Lawless (1994).} \item{\code{"bayes.trunc"}}{Bayesian method based on the generalized Dirichlet distribution, which is the conjugate prior-posterior for the delay distribution PMF under right-truncated sampling as shown in HadH (2014).} \item{\code{"bayes.trunc.ddcp"}}{Fully Bayesian method allowing for change-points in the delay distribution, e.g., due to speed-ups in the reporting process. A discrete-survival model is used for the delay distribution. Details of the methods are described in HadH (2014). Note: This method requires that the JAGS program is installed on the system.} } } \item{aggregate.by}{Time scale used for the temporal aggregation of the records in the data \code{data}. See \code{\link{linelist2sts}} and \code{\link{seq.Date}} for further information.} \item{D}{Maximum possible or maximum relevant delay (unit: \code{aggregate.by}). Default: 15.} \item{m}{Size of the moving window for the estimation of the delay distribution. Default: \code{NULL}, i.e. take all values at all times. Otherwise: a positive integer equal to or greater than \code{D} such that only values from a sliding window are used. The shape of the window depends on the value of \code{m.interpretation}.} \item{m.interpretation}{This parameter controls the interpretation of the sliding window used to estimate the delay distribution. If \code{m.interpretation="hoehle_anderheiden2014"} (Default) then the sliding window is defined as a horizontal cut in the reporting triangle, i.e. the values for the delay estimation originate from reports occuring during \code{(now-m):now}. This means that the estimation of long delays is based on fewer observations than the estimation of the short delays, hence, the long delay estimates are subject to more variability. If for example \eqn{m=D} then the estimate for a delay of \eqn{d=D} is based on only one observation. The advantage of this choice is that one explicitly knows which time period all observations originate from. For details see Section 3 of \enc{Hhle}{Hoehle} and an der Heiden (2014). Alternatively, when \code{m.interpretation}="lawless1994", the cut in the reporting triangle is made such that each delay \code{d} is estimated based on the same number of observations (\eqn{m+1}). This means that in order to estimate the delay for \eqn{d} days, a sliding rectangle of length \eqn{m+1} containing the reports which occured during \code{(now-m-d):now}. See Fig. 2 in Lawless (1994) for details. Note: A warning is given is \code{method="lawless"}, but \code{m.interpretation} is not.} \item{control}{A list with named arguments controlling the functionality of the nowcasting. \describe{ \item{dRange}{Default: \code{NULL}. In this case the \code{dEventCol} column is used to extract the first and last available in \code{data}.} \item{alpha}{Equal tailed (1-\eqn{\alpha}{alpha})*100\% prediction intervals are calculated. Default: 0.05.} \item{nSamples}{Number of PMF samples in the \code{bayes.*} procedures. Note: Entire vectors containing the PMF on the grid from 0 to \code{N.tInf.max} are drawn and which are then combined. The argument does not apply to the \code{bayes.trunc.ddcp} method.} \item{N.tInf.prior}{Prior distribution of \eqn{N(t,\infty)}{N(t,Inf)}. Applies only to the \code{bayes.*} except \code{bayes.bayes.ddcp} methods. See example on how to control the distribution parameters.} \item{N.tInf.max}{Limit of the support of \eqn{N(t,\infty)}{N(t,Inf)}. The value needs to be high enough such that at this limit only little of the predictive distribution is right-truncated. Default: 300.} \item{gd.prior.kappa}{Concentration parameter for the Dirichlet prior for the delay distribution on \eqn{0,...,D}. Default: 0.1. Note: The procedure is quite sensitive to this parameter in case only few cases are available.} \item{ddcp}{A list specifying the change point model for the delay distribution. This method should only be used if detailed information about changes in the delay distribution are available as, e.g., in the case of the STEC O104:H4 outbreak. The components are as follows: \describe{ \item{\code{ddChangepoint}}{Vector of Date objects corresponding to the changepoints} \item{\code{cp_order}}{Either \code{"zero"} (Default) or \code{"one"}. This is the degree of the TPS spline for the baseline hazard, which is formed by the changepoints. Order zero corresponds to the dummy variables of the change-points being simply zero or one. In case a 1st order polynomial is chosen, this allows the delay distribution to change towards faster or slow reporting as time progresses (until the next change-point). The later can be helpful in very dynamic epidemic situations where a lot of cases suddenly appear overwhelming the surveillance system infrastructure.} \item{code{Wextra}}{An additional design matrix part to be joined onto the part originating from the change-points. Altogether, the column bind of these two quantities will be \eqn{W_{t,d}}. This allows one to include, e.g., day of the week effects or holidays.} \item{\code{logLambda}}{Prior on the spline. One of \code{c("iidLogGa","tps","rw1","rw2")}.} \item{\code{respDistr}}{Reponse distribution of \eqn{n_{t,d}} in the reporting triangle. Default is \code{"poisson"}. An experimental alternative is to use \code{"negbin"}.} \item{\code{tau.gamma}}{} \item{\code{eta.mu}}{Vector of coefficients describing the mean of the prior normal distribution of the regression effects in the discrete time survival model.} \item{\code{eta.prec}}{A precision matrix for the regression effects in the discrete time survival model.} \item{\code{mcmc}}{A named vector of length 5 containing burn-in (default: 2500), number of samples (10000), thinning (1) and adaptation (1000) for the three MCMC chains which are ran. The values are passed on to \code{\link[runjags]{run.jags}}. The fifth argument \code{store.samples} denotes if the output of the JAGS sampling should be included as part of the returned \code{stsNC} object. Warning: If \code{TRUE} (Default: \code{FALSE}) the size of the returned object might increase substantially.} } } \item{score}{Compute scoring rules. Default: \code{FALSE}. The computed scores are found in the \code{SR} slot of the result.} \item{predPMF}{Boolean whether to return the probability mass functions of the individual forecasts (Default: \code{FALSE}). The result can be found in the \code{control} slot of the return object.} } } } \details{ The methodological details of the nowcasting procedures are described in \enc{Hhle}{Hoehle} M and an der Heiden M (2014). } \value{ \code{nowcast} returns an object of \code{"\linkS4class{stsNC}"}. The \code{upperbound} slot contains the median of the method specified at the first position the argument \code{method}. The slot \code{pi} (for prediction interval) contains the equal tailed (1-\eqn{\alpha}{alpha})*100\% prediction intervals, which are calculated based on the predictive distributions in slot \code{predPMF}. Furthermore, slot \code{truth} contains an \code{sts} object containing the true number of cases (if possible to compute it is based on the data in \code{data}). Finally, slot \code{SR} contains the results for the proper scoring rules (requires truth to be calculable). } \references{ \enc{Hhle}{Hoehle}, M. and an der Heiden, M. (2014): Bayesian nowcasting during the STEC O104:H4 outbreak in Germany, 2011. \emph{Biometrics} 70(4):993-1002. \doi{10.1111/biom.12194}.\cr A preprint is available as \url{https://staff.math.su.se/hoehle/pubs/hoehle_anderheiden2014-preprint.pdf}. \enc{Gnther}{Guenther}, F. and Bender, A. and Katz, K. and \enc{Kchenhoff}{Kuechenhoff}, H. and \enc{Hhle}{Hoehle}, M. (2020): Nowcasting the COVID-19 pandemic in Bavaria. \emph{Biometrical Journal}. \doi{10.1002/bimj.202000112}\cr Preprint available at \doi{10.1101/2020.06.26.20140210}. } \author{ Michael \enc{Hhle}{Hoehle} } \note{ Note: The \code{bayes.trunc.ddcp} uses the JAGS software together with the \R package \pkg{runjags} to handle the parallelization of the MCMC using the \code{"rjparallel"} method of \code{\link[runjags]{run.jags}}, which additionally requires the \pkg{rjags} package. You need to manually install JAGS on your computer for the package to work -- see \url{https://mcmc-jags.sourceforge.io/} and the documentation of \pkg{runjags} for details. Note: The function is still under development and might change in the future. Unfortunately, little emphasis has so far been put on making the function easy to understand and use. } \examples{ data("husO104Hosp") #Extract the reporting triangle at a specific day t.repTriangle <- as.Date("2011-07-04") #Use 'void' nowcasting procedure (we just want the reporting triangle) nc <- nowcast(now=t.repTriangle,when=t.repTriangle, dEventCol="dHosp",dReportCol="dReport",data=husO104Hosp, D=15,method="unif") #Show reporting triangle reportingTriangle(nc) #Perform Bayesian nowcasting assuming the delay distribution is stable over time nc.control <- list(N.tInf.prior=structure("poisgamma", mean.lambda=50,var.lambda=3000), nSamples=1e2) t.repTriangle <- as.Date("2011-06-10") when <- seq(t.repTriangle-3,length.out=10,by="-1 day") nc <- nowcast(now=t.repTriangle,when=when, dEventCol="dHosp",dReportCol="dReport",data=husO104Hosp, D=15,method="bayes.trunc",control=nc.control) #Show time series and posterior median forecast/nowcast plot(nc,xaxis.tickFreq=list("\%d"=atChange,"\%m"=atChange), xaxis.labelFreq=list("\%d"=at2ndChange),xaxis.labelFormat="\%d-\%b", xlab="Time (days)",lty=c(1,1,1,1),lwd=c(1,1,2)) \dontrun{ ### Using runjags to do a Bayesian model with changepoint(s) ### -- this might take a while nc.control.ddcp <- modifyList(nc.control, list(gd.prior.kappa=0.1, ddcp=list(ddChangepoint=as.Date(c("2011-05-23")), logLambda="tps", tau.gamma=1, mcmc=c(burnin=1000,sample=1000,thin=1, adapt=1000,store.samples=FALSE)))) nc.ddcp <- nowcast(now=t.repTriangle,when=when, dEventCol="dHosp",dReportCol="dReport", data=husO104Hosp, aggregate.by="1 day", method="bayes.trunc.ddcp", D=15, control=nc.control.ddcp) plot(nc.ddcp,legend.opts=NULL, xaxis.tickFreq=list("\%d"=atChange,"\%m"=atChange), xaxis.labelFreq=list("\%d"=at2ndChange),xaxis.labelFormat="\%d-\%b", xlab="Time (days)",lty=c(1,1,1,1),lwd=c(1,1,2)) lambda <- attr(delayCDF(nc.ddcp)[["bayes.trunc.ddcp"]],"model")$lambda showIdx <- seq(which( max(when) == epoch(nc.ddcp))) #seq(ncol(lambda)) matlines( showIdx,t(lambda)[showIdx,],col="gray",lwd=c(1,2,1),lty=c(2,1,2)) legend(x="topright",c(expression(lambda(t)),"95\% CI"),col="gray",lwd=c(2,1),lty=c(1,2)) } } \keyword{models} surveillance/man/measles.weser.Rd0000644000176200001440000001102413536703542016573 0ustar liggesusers\encoding{latin1} \name{measles.weser} \alias{measles.weser} \alias{measlesWeserEms} \docType{data} \keyword{datasets} \title{Measles in the Weser-Ems region of Lower Saxony, Germany, 2001-2002} \description{ Weekly counts of new measles cases for the 17 administrative districts (NUTS-3 level) of the \dQuote{Weser-Ems} region of Lower Saxony, Germany, during 2001 and 2002, as reported to the Robert Koch institute according to the Infection Protection Act (\dQuote{Infektionsschutzgesetz}, \acronym{IfSG}).\cr \code{data("measlesWeserEms")} is a corrected version of \code{data("measles.weser")} (see Format section below). These data are illustrated and analyzed in Meyer et al. (2017, Section 5), see \code{vignette("hhh4_spacetime")}. } \usage{ data("measles.weser") data("measlesWeserEms") } \format{ \code{data("measles.weser")} is an object of the old \code{"disProg"} class, whereas \code{data("measlesWeserEms")} is of the new class \code{"\linkS4class{sts}"}. Furthermore, the following updates have been applied for \code{data("measlesWeserEms")}: \itemize{ \item it includes the two districts \dQuote{SK Delmenhorst} (03401) and \dQuote{SK Wilhemshaven} (03405) with zero counts, which are ignored in \code{data("measles.weser")}. \item it corrects the time lag error for year 2002 caused by a redundant pseudo-week \dQuote{0} with 0 counts only (the row \code{measles.weser$observed[53,]} is nonsense). \item it has one more case attributed to \dQuote{LK Oldenburg} (03458) during 2001/W17, i.e., 2 cases instead of 1. This reflects the official data as of \dQuote{Jahrbuch 2005}, whereas \code{data("measles.weser")} is as of \dQuote{Jahrbuch 2004}. \item it contains a map of the region (as a \code{"\linkS4class{SpatialPolygonsDataFrame}"}) with the following variables: \describe{ \item{\code{GEN}}{district label.} \item{\code{AREA}}{district area in m^2.} \item{\code{POPULATION}}{number of inhabitants (as of 31/12/2003).} \item{\code{vaccdoc.2004}}{proportion with a vaccination card among screened abecedarians (2004).} \item{\code{vacc1.2004}}{proportion with at least one vaccination against measles among abecedarians presenting a vaccination card (2004).} \item{\code{vacc2.2004}}{proportion of doubly vaccinated abecedarians among the ones presenting their vaccination card at school entry in the year 2004.} } \item it uses the correct format for the official district keys, i.e., 5 digits (initial 0). \item its attached neighbourhood matrix is more general: a distance matrix (neighbourhood orders) instead of just an adjacency indicator matrix (special case \code{nbOrder == 1}). \item population fractions represent data as of 31/12/2003 (\acronym{LSN}, 2004, document \dQuote{A I 2 - hj 2 / 2003}). There are only minor differences to the ones used for \code{data("measles.weser")}. } } \source{ Measles counts were obtained from the public SurvStat database of the Robert Koch institute: \url{https://survstat.rki.de/}. A shapefile of Germany's districts as of 01/01/2009 was obtained from the German Federal Agency for Cartography and Geodesy (\url{https://gdz.bkg.bund.de/}). The map of the 17 districts of the \dQuote{Weser-Ems} region (\code{measlesWeserEms@map}) is a simplified subset of this shapefile using a 30\% reduction via the Douglas-Peucker reduction method as implemented at \url{https://MapShaper.org}. Population numbers were obtained from the Federal Statistical Office of Lower Saxony (\acronym{LSN}): \url{https://www.statistik.niedersachsen.de/themenbereiche/bevoelkerung/} Vaccination coverage was obtained from the public health department of Lower Saxony: Nieders\enc{}{ae}chsisches Landesgesundheitsamt (2005): Impfreport -- Durchimpfung von Kindern im Einschulungsalter in Niedersachsen im Erhebungsjahrgang 2004. Online available from \url{https://www.nlga.niedersachsen.de/gesundheitsberichterstattung/gesundheitsberichte/impfreport/}, also as an interactive version. } \references{ Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} } \examples{ ## old "disProg" object data("measles.weser") measles.weser plot(measles.weser, as.one=FALSE) ## new "sts" object (with corrections) data("measlesWeserEms") measlesWeserEms plot(measlesWeserEms) } surveillance/man/epidataCS_aggregate.Rd0000644000176200001440000001336713433544762017700 0ustar liggesusers\name{epidataCS_aggregate} \alias{epidataCS2sts} \alias{as.epidata.epidataCS} \title{Conversion (aggregation) of \code{"epidataCS"} to \code{"epidata"} or \code{"sts"}} \description{ Continuous-time continuous-space epidemic data stored in an object of class \code{"\link{epidataCS}"} can be aggregated in space or in space and time yielding an object of class \code{"\link{epidata}"} or \code{"\linkS4class{sts}"} for use of \code{\link{twinSIR}} or \code{\link{hhh4}} modelling, respectively. } \usage{ ## aggregation in space and time over 'stgrid' for use of 'hhh4' models epidataCS2sts(object, freq, start, neighbourhood, tiles = NULL, popcol.stgrid = NULL, popdensity = TRUE) ## aggregation in space for use of 'twinSIR' models \method{as.epidata}{epidataCS}(data, tileCentroids, eps = 0.001, ...) } \arguments{ \item{object, data}{an object of class \code{"\link{epidataCS}"}.} \item{freq,start}{ see the description of the \code{"\linkS4class{sts}"} class. The \code{start} specification should reflect the beginning of \code{object$stgrid}, i.e., the start of the first time interval. } \item{neighbourhood}{ binary adjacency or neighbourhood-order matrix of the regions (\code{tiles}). If missing but \code{tiles} is given, a binary adjacency matrix will be auto-generated from \code{tiles} using functionality of the \pkg{spdep} package (see \code{\link{poly2adjmat}}). Since the \code{"neighbourhood"} slot in \code{"\linkS4class{sts}"} is actually optional, \code{neighbourhood=NULL} also works. } \item{tiles}{ object inheriting from \code{"\linkS4class{SpatialPolygons}"} representing the regions in \code{object$stgrid} (column \code{"tile"}). It will become the \code{"map"} slot of the resulting \code{"sts"} object. Its \code{row.names} must match \code{levels(object$stgrid$tile)}. If \code{neighbourhood} is provided, \code{tiles} is optional (not required for \code{hhh4}, but for plots of the resulting \code{"sts"} object). } \item{popcol.stgrid}{ single character or numeric value indexing the column in \code{object$stgrid} which contains the population data (counts or densities, depending on the \code{popdensity} argument). This will become the \code{"populationFrac"} slot (optional).} \item{popdensity}{ logical indicating if the column referenced by \code{popcol.stgrid} contains population densities or absolute counts. } \item{tileCentroids}{ a coordinate matrix of the region centroids (i.e., the result of \code{coordinates(tiles)}). Its row names must match \code{levels(data$stgrid$tile)}. This will be the coordinates used for the \dQuote{population} (i.e., the \code{tiles} from \code{"\link{epidataCS}"}) in the discrete-space \code{\link{twinSIR}} modelling. } \item{eps}{ numeric scalar for breaking tied removal and infection times between different individuals (tiles), which might occur during conversion from \code{"epidataCS"} to \code{"epidata"}. Rather dumb, this is simply done by subtracting \code{eps} from each tied removal time. One should consider other ways of breaking the tied event times. } \item{\dots}{unused (argument of the generic).} } \details{ Conversion to \code{"\linkS4class{sts}"} only makes sense if the time intervals (\code{BLOCK}s) of the \code{stgrid} are regularly spaced (to give \code{freq} intervals per year). Note that events of the prehistory (not covered by \code{stgrid}) are not included in the resulting \code{sts} object. Some comments on the conversion to \code{"epidata"}: the conversion results into SIS epidemics only, i.e. the at-risk indicator is set to 1 immediately after recovery. A tile is considered infective if at least one individual within the tile is infective, otherwise it is susceptible. The lengths of the infectious periods are taken from \code{data$events$eps.t}. There will be no \code{f} columns in the resulting \code{"epidata"}. These must be generated by a subsequent call to \code{\link{as.epidata}} with desired \code{f}. } \value{ \code{epidataCS2sts}: an object of class \code{"\linkS4class{sts}"} representing the multivariate time-series of the number of cases aggregated over \code{stgrid}. \code{as.epidata.epidataCS}: an object of class \code{"\link{epidata}"} representing an SIS epidemic in form of a multivariate point process (one for each region/\code{tile}). } \author{ Sebastian Meyer } \seealso{ \code{\link{epidata}} and \code{\link{twinSIR}} \code{linkS4class{sts}} and \code{\link{hhh4}}. } \examples{ data("imdepi") load(system.file("shapes", "districtsD.RData", package="surveillance")) ## convert imdepi point pattern into multivariate time series imdsts <- epidataCS2sts(imdepi, freq = 12, start = c(2002, 1), neighbourhood = NULL, # not needed here tiles = districtsD) ## check the overall number of events by district stopifnot(all.equal(colSums(observed(imdsts)), c(table(imdepi$events$tile)))) ## compare plots of monthly number of cases opar <- par(mfrow = c(2, 1)) plot(imdepi, "time") plot(imdsts, type = observed ~ time) par(opar) if (surveillance.options("allExamples")) { ## plot number of cases by district plot(imdsts, type = observed ~ unit) } ## also test conversion to an SIS event history ("epidata") of the "tiles" if (requireNamespace("intervals")) { imdepi_short <- subset(imdepi, time < 50) # to reduce the runtime imdepi_short$stgrid <- subset(imdepi_short$stgrid, start < 50) imdepidata <- as.epidata(imdepi_short, tileCentroids = coordinates(districtsD)) summary(imdepidata) } } \keyword{spatial} \keyword{manip} \keyword{methods} surveillance/man/coeflist.Rd0000644000176200001440000000207112476432506015631 0ustar liggesusers\name{coeflist} \alias{coeflist} \alias{coeflist.default} \title{ List Coefficients by Model Component } \description{ S3-generic function to use with models which contain several groups of coefficients in their coefficient vector. The \code{coeflist} methods are intended to list the coefficients by group. The default method simply \code{\link{split}}s the coefficient vector given the number of coefficients by group. } \usage{ coeflist(x, ...) \method{coeflist}{default}(x, npars, ...) } \arguments{ \item{x}{ a model with groups of coefficients or, for the default method, a vector of coefficients. } \item{npars}{ a named vector specifying the number of coefficients per group. } \item{\dots}{ potential further arguments (currently ignored). } } \value{ a list of coefficients } \author{ Sebastian Meyer } \examples{ ## the default method just 'split's the coefficient vector coefs <- c(a = 1, b = 3, dispersion = 0.5) npars <- c(regression = 2, variance = 1) coeflist(coefs, npars) } \keyword{models} \keyword{utilities} surveillance/man/twinSIR.Rd0000644000176200001440000003611513433341250015353 0ustar liggesusers\encoding{latin1} \name{twinSIR} \alias{twinSIR} \title{ Fit an Additive-Multiplicative Intensity Model for SIR Data } \description{ \code{twinSIR} is used to fit additive-multiplicative intensity models for epidemics as described in \enc{Hhle}{Hoehle} (2009). Estimation is driven by (penalized) maximum likelihood in the point process frame work. Optimization (maximization) of the (penalized) likelihood function is performed by means of \code{\link{optim}}. The implementation is illustrated in Meyer et al. (2017, Section 4), see \code{vignette("twinSIR")}. } \usage{ twinSIR(formula, data, weights, subset, knots = NULL, nIntervals = 1, lambda.smooth = 0, penalty = 1, optim.args = list(), model = TRUE, keep.data = FALSE) } \arguments{ \item{formula}{ an object of class \code{"\link{formula}"} (or one that can be coerced to that class): a symbolic description of the intensity model to be estimated. The details of the model specification are given below. } \item{data}{ an object inheriting from class \code{"\link{epidata}"}. } \item{weights}{ an optional vector of weights to be used in the fitting process. Should be \code{NULL} (the default, i.e. all observations have unit weight) or a numeric vector. } \item{subset}{ an optional vector specifying a subset of observations to be used in the fitting process. The subset \code{atRiskY == 1} is automatically chosen, because the likelihood only depends on those observations. } \item{knots}{ numeric vector or \code{NULL} (the default). Specification of the knots, where we suppose a step of the log-baseline. With the current implementation, these must be existing \code{"stop"} time points in the selected \code{subset} of the \code{data}, which is always restricted to \code{atRiskY == 1} rows. The intervals of constant log-baseline hazard rate then are \eqn{(minTime;knots_1]}, \eqn{(knots_1;knots_2]}, \ldots, \eqn{(knots_K;maxTime]}. By default, the \code{knots} are automatically chosen at the quantiles of the infection time points such that \code{nIntervals} intervals result. Non-NULL \code{knots} take precedence over \code{nIntervals}. } \item{nIntervals}{ the number of intervals of constant log-baseline hazard. Defaults to 1, which means an overall constant log-baseline hazard will be fitted. } \item{lambda.smooth}{ numeric, the smoothing parameter \eqn{\lambda}. By default it is 0 which leads to unpenalized likelihood inference. In case \code{lambda.smooth=-1}, the automatic smoothing parameter selection based on a mixed model approach is used (cf. \enc{Hhle}{Hoehle}, 2009). } \item{penalty}{ either a single number denoting the order of the difference used to penalize the log-baseline coefficients (defaults to 1), or a more specific penalty matrix \eqn{K} for the parameter sub-vector \eqn{\beta}. In case of non-equidistant knots -- usually the case when using quantile based knot locations -- only a 1st order differences penalty matrix as in Fahrmeir and Lang (2001) is implemented. } \item{optim.args}{ a list with arguments passed to the \code{\link{optim}} function. Especially useful are the following ones: \describe{ \item{\code{par}:}{ to specify initial parameter values. Those must be in the order \code{c(alpha, h0, beta)}, i.e. first the coefficients of the epidemic covariates in the same order as they appear in the \code{formula}, then the log-baseline levels in chronological order and finally the coefficients of the endemic covariates in the same order as they appear in the \code{cox} terms of the \code{formula}. The default is to start with 1's for \code{alpha} and 0's for \code{h0} and \code{beta}. } \item{\code{control}:}{ for more detailed \code{trace}-ing (default: 1), another \code{REPORT}-ing frequency if \code{trace} is positive (default: 10), higher \code{maxit} (maximum number of iterations, default: 300) or another \code{factr} value (default: 1e7, a lower value means higher precision). } \item{\code{method}:}{ the optimization algorithm defaults to \code{"L-BFGS-B"} (for box-constrained optimization), if there are any epidemic (non-\code{cox}) variables in the model, and to \code{"BFGS"} otherwise. } \item{\code{lower}:}{ if \code{method = "L-BFGS-B"} this defines the lower bounds for the model coefficients. By default, all effects \eqn{\alpha} of epidemic variables are restricted to be non-negative. Normally, this is exactly what one would like to have, but there might be reasons for other lower bounds, see the Note below. } \item{\code{hessian}:}{ An estimation of the Expected Fisher Information matrix is always part of the return value of the function. It might be interesting to see the Observed Fisher Information (= negative Hessian at the maximum), too. This will be additionally returned if \code{hessian = TRUE}. } } } \item{model}{ logical indicating if the model frame, the \code{weights}, \code{lambda.smooth}, the penalty matrix \eqn{K} and the list of used distance functions \code{f} (from \code{attributes(data)}) should be returned for further computation. This defaults to \code{TRUE} as this information is necessary e.g. in the \code{profile} and \code{plot} methods. } \item{keep.data}{ logical indicating if the \code{"epidata"} object (\code{data}) should be part of the return value. This is only necessary for use of the \code{\link[=simulate.twinSIR]{simulate}}-method for \code{"twinSIR"} objects. The reason is that the \code{twinSIR} function only uses and stores the rows with \code{atRiskY == 1} in the \code{model} component, but for the simulation of new epidemic data one needs the whole data set with all individuals in every time block. The default value is \code{FALSE}, so if you intent to use \code{simulate.twinSIR}, you have to set this to \code{TRUE}. } } \details{ A model is specified through the \code{formula}, which has the form \code{~ epidemicTerm1 + epidemicTerm2 + cox(endemicVar1) * cox(endemicVar2)}, i.e. the right hand side has the usual form as in \code{\link{lm}} with some variables marked as being endemic by the special function \code{\link{cox}}. The left hand side of the formula is empty and will be set internally to \code{cbind(start, stop, event)}, which is similar to \code{Surv(start, stop, event, type="counting")} in package \pkg{survival}. Basically, the additive-multiplicative model for the infection intensity \eqn{\lambda_i(t)} for individual \eqn{i} is \deqn{\lambda_i(t) = Y_i(t) * (e_i(t) + h_i(t))} where \describe{ \item{Y\_i(t)}{ is the at-risk indicator, indicating if individual \eqn{i} is \dQuote{at risk} of becoming infected at time point \eqn{t}. This variable is part of the event history \code{data}. } \item{e\_i(t)}{ is the epidemic component of the infection intensity, defined as \deqn{e_i(t) = \sum_{j \in I(t)} f(||s_i - s_j||)} where \eqn{I(t)} is the set of infectious individuals just before time point \eqn{t}, \eqn{s_i} is the coordinate vector of individual \eqn{i} and the function \eqn{f} is defined as \deqn{f(u) = \sum_{m=1}^p \alpha_m B_m(u)} with unknown transmission parameters \eqn{\alpha} and known distance functions \eqn{B_m}. This set of distance functions results in the set of epidemic variables normally calculated by the converter function \code{\link{as.epidata}}, considering the equality \deqn{e_i(t) = \sum_{m=1}^p \alpha_m x_{im}(t)} with \eqn{x_{im}(t) = \sum_{j \in I(t)} B_m(||s_i - s_j||)} being the \eqn{m}'th epidemic variable for individual \eqn{i}. } \item{h\_i(t)}{ is the endemic (\code{cox}) component of the infection intensity, defined as \deqn{h_i(t) = \exp(h_0(t) + z_i(t)' \beta)} where \eqn{h_0(t)} is the log-baseline hazard function, \eqn{z_i(t)} is the vector of endemic covariates of individual \eqn{i} and \eqn{\beta} is the vector of unknown coefficients. To fit the model, the log-baseline hazard function is approximated by a piecewise constant function with known knots, but unknown levels, which will be estimated. The approximation is specified by the arguments \code{knots} or \code{nIntervals}. } } If a big number of \code{knots} (or \code{nIntervals}) is chosen, the corresponding log-baseline parameters can be rendered identifiable by the use of penalized likelihood inference. At present, it is the job of the user to choose an adequate value of the smoothing parameter \code{lambda.smooth}. Alternatively, a data driven \code{lambda.smooth} smoothing parameter selection based on a mixed model representation of an equivalent truncated power spline is offered (see reference for further details). The following two steps are iterated until convergence: \enumerate{ \item Given fixed smoothing parameter, the penalized likelihood is optimized for the regression components using a L-BFGS-B approach \item Given fixed regression parameters, a Laplace approximation of the marginal likelihood for the smoothing parameter is numerically optimized. } Depending on the data, convergence might take a couple of iterations. Note also that it is unwise to include endemic covariates with huge values, as they affect the intensities on the exponential scale (after multiplication by the parameter vector \eqn{\beta}). With large covariate values, the \code{optim} method "L-BFGS-B" will likely terminate due to an infinite log-likelihood or score function in some iteration. } \value{ \code{twinSIR} returns an object of class \code{"twinSIR"}, which is a list containing the following components: \item{coefficients}{a named vector of coefficients.} \item{loglik}{the maximum of the (penalized) log-likelihood function.} \item{counts}{the number of log-likelihood and score function evaluations.} \item{converged}{logical indicating convergence of the optimization algorithm.} \item{fisherinfo.observed}{if requested, the negative Hessian from \code{optim}.} \item{fisherinfo}{an estimation of the Expected Fisher Information matrix.} \item{method}{the optimization algorithm used.} \item{intervals}{a numeric vector (\code{c(minTime, knots, maxTime)}) representing the consecutive intervals of constant log-baseline.} \item{nEvents}{a numeric vector containing the number of infections in each of the above \code{intervals}.} \item{model}{if requested, the model information used. This is a list with components \code{"survs"} (data.frame with the id, start, stop and event columns), \code{"X"} (matrix of the epidemic variables), \code{"Z"} (matrix of the endemic variables), \code{"weights"} (the specified \code{weights}), \code{"lambda.smooth"} (the specified \code{lambda.smooth}), \code{"K"} (the penalty matrix used), and \code{"f"} and \code{"w"} (the functions to generate the used epidemic covariates). Be aware that the model only contains those rows with \code{atRiskY == 1}!} \item{data}{if requested, the supplied \code{"epidata"} \code{data}.} \item{call}{the matched call.} \item{formula}{the specified \code{formula}.} \item{terms}{the \code{terms} object used.} } \references{ \enc{Hhle}{Hoehle}, M. (2009), Additive-multiplicative regression models for spatio-temporal epidemics, \emph{Biometrical Journal}, \bold{51} (6), 961-978. Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} } \author{ Michael \enc{Hhle}{Hoehle} and Sebastian Meyer } \note{ There are some restrictions to modelling the infection intensity without a baseline hazard rate, i.e. without an intercept in the \code{formula}. Reason: At some point, the optimization algorithm L-BFGS-B tries to set all transmission parameters \eqn{\alpha} to the boundary value 0 and to calculate the (penalized) score function with this set of parameters (all 0). The problem then is that the values of the infection intensities \eqn{lambda_i(t)} are 0 for all \eqn{i} and \eqn{t} and especially at observed event times, which is impossible. Without a baseline, it is not allowed to have all alpha's set to 0, because then we would not observe any infections. Unfortunately, L-BFGS-B can not consider this restriction. Thus, if one wants to fit a model without baseline hazard, the control parameter \code{lower} must be specified in \code{optim.args} so that some alpha is strictly positive, e.g. \code{optim.args = list(lower = c(0,0.001,0.001,0))} and the initial parameter vector \code{par} must not be the zero vector. } \seealso{ \code{\link{as.epidata}} for the necessary data input structure, \code{\link{plot.twinSIR}} for plotting the path of the infection intensity, \code{\link{profile.twinSIR}} for profile likelihood estimation. and \code{\link{simulate.twinSIR}} for the simulation of epidemics following the fitted model. Furthermore, the standard extraction methods \code{\link[=vcov.twinSIR]{vcov}}, \code{\link[=logLik.twinSIR]{logLik}}, \code{\link[=AIC.twinSIR]{AIC}} and \code{\link[=extractAIC.twinSIR]{extractAIC}} are implemented for objects of class \code{"twinSIR"}. } \examples{ data("hagelloch") summary(hagelloch) # simple model with an overall constant baseline hazard rate fit1 <- twinSIR(~ household + cox(AGE), data = hagelloch) fit1 summary(fit1) # see also help("summary.twinSIR") plot(fit1) # see also help("plot.twinSIR") checkResidualProcess(fit1) # could be better # fit a piecewise constant baseline hazard rate with 3 intervals using # _un_penalized ML and estimated coefs from fit1 as starting values fit2 <- twinSIR(~ household, data = hagelloch, nIntervals = 3, optim.args = list(par = coef(fit1)[c(1,2,2,2)])) summary(fit2) # fit a piecewise constant baseline hazard rate with 7 intervals # using _penalized_ ML fit3 <- twinSIR(~ household, data = hagelloch, nIntervals = 7, lambda.smooth = 0.1, penalty = 1) summary(fit3) checkResidualProcess(fit3) # plot the estimated log-baseline levels plot(x=fit2$intervals, y=coef(fit2)[c(2,2:4)], type="S", ylim=c(-6, -1)) lines(x=fit3$intervals, y=coef(fit3)[c(2,2:8)], type="S", col=2) legend("right", legend=c("unpenalized 3", "penalized 7"), lty=1, col=1:2, bty="n") ## special use case: fit the model to a subset of the events only, ## while preserving epidemic contributions from the remainder ## (maybe some buffer area nodes) fit_subset <- twinSIR(~ household, data = hagelloch, subset = CL=="preschool") summary(fit_subset) \dontshow{ ## the eventTimes attribute was wrong in surveillance <= 1.15.0 stopifnot( length(residuals(fit_subset)) == sum(fit_subset$model$survs$event) ) } } \keyword{models} \keyword{optimize} surveillance/man/surveillance.options.Rd0000644000176200001440000000630613777627613020226 0ustar liggesusers\name{surveillance.options} \alias{surveillance.options} \alias{reset.surveillance.options} \title{Options of the \pkg{surveillance} Package} \description{ Query, set or reset options specific to the \pkg{surveillance} package, similar to what \code{\link{options}} does for global settings. } \usage{ surveillance.options(...) reset.surveillance.options() } \arguments{ \item{\dots}{ Either empty, or a sequence of option names (as strings), or a sequence of \code{name=value} pairs, or a named list of options. Available options are: \describe{ \item{gpclib:}{ Logical flag indicating whether \pkg{gpclib}, the General Polygon Clipping Library for \R, which has a restricted license (commercial use prohibited), may be used. This is no longer required since package \pkg{surveillance} has switched to alternatives such as \pkg{polyclip} and \pkg{rgeos} for generating \code{"epidataCS"} objects by \code{as.epidataCS} or \code{simEpidataCS}. However, for \code{\link{unionSpatialPolygons}} and \code{\link{intersectPolyCircle.gpc.poly}}, using \pkg{gpclib} is still an option (mainly for backwards compatibility). The default setting is \code{FALSE}. } \item{stsTickFactors:}{ A named vector containing tick sizes for the \code{"sts"} x-axis relative to \code{\link{par}("tcl")}. Each entry contains the size at \code{\link{strptime}} formatting strings. See the help on \code{\link{stsplot_time1}} for details. \describe{ \item{"\%d"}{} \item{"\%W"}{} \item{"\%V"}{} \item{"\%m"}{} \item{"\%Q"}{} \item{"\%Y"}{} \item{"\%G"}{} } } \item{colors:}{ A named list containing plotting color defaults. \describe{ \item{nowSymbol}{Color of the "now" symbol in \code{stsNC} plots. Default: \code{"springgreen4"}.} \item{piBars}{Color of the prediction interval bars in \code{stsNC} plots. Default: \code{"orange"}.} } } \item{allExamples:}{ Logical flag queried before running cumbersome computations in help file examples. For \code{interactive()} sessions, this option defaults to \code{TRUE}. Otherwise, long examples will only be run if the environment variable \env{_R_SURVEILLANCE_ALL_EXAMPLES_} is set (to any value different from \code{""}) when attaching the \pkg{surveillance} package. This is to avoid long computations during (daily) CRAN checks. } } } } \value{ \code{reset.surveillance.options} reverts all options to their default values and (invisibly) returns these in a list. For \code{surveillance.options}, the following holds: \itemize{ \item If no arguments are given, the current values of all package options are returned in a list. \item If one option name is given, the current value of this option is returned (\emph{not} in a list, just the value). \item If several option names are given, the current values of these options are returned in a list. \item If \code{name=value} pairs are given, the named options are set to the given values, and the \emph{previous} values of these options are returned in a list. } } \examples{ surveillance.options() } \keyword{environment} surveillance/man/campyDE.Rd0000644000176200001440000000555614004512307015341 0ustar liggesusers\encoding{latin1} \name{campyDE} \alias{campyDE} \docType{data} \title{Campylobacteriosis and Absolute Humidity in Germany 2002-2011} \description{ Weekly number of reported campylobacteriosis cases in Germany, 2002-2011, together with the corresponding absolute humidity (in g/m^3) that week. The absolute humidity was computed according to the procedure by Dengler (1997) using the means of representative weather station data from the German Climate service. } \usage{ data(campyDE) } \format{ A \code{data.frame} containing the following columns \describe{ \item{\code{date}}{\code{Date} instance containing the Monday of the reporting week.} \item{\code{case}}{Number of reported cases that week.} \item{\code{state}}{Boolean indicating whether there is external knowledge about an outbreak that week} \item{\code{hum}}{Mean absolute humidity (in g/m^3) of that week as measured by a single representative weather station.} \item{\code{l1.hum}-\code{l5.hum}}{Lagged version (lagged by 1-5) of the \code{hum} covariate.} \item{newyears}{Boolean indicating whether the reporting week corresponds to the first two weeks of the year (TRUE) or not (FALSE). Note: The first week of a year is here defined as the first reporting week, which has its corresponding Monday within new year.} \item{christmas}{Boolean indicating whether the reporting week corresponds to the last two weeks of the year (TRUE) or not (FALSE). Note: This are the first two weeks before the \code{newyears} weeks.} \item{O104period}{Boolean indicating whether the reporting week corresponds to the W21-W30 period of increased gastroenteritis awareness during the O104:H4 STEC outbreak.} } } \source{ The data on campylobacteriosis cases have been queried from the Survstat@RKI database of the German Robert Koch Institute (\url{https://survstat.rki.de/}). Data for the computation of absolute humidity were obtained from the German Climate Service (Deutscher Wetterdienst), Climate data of Germany, available at \url{https://www.dwd.de}. A complete data description and an analysis of the data can be found in Manitz and \enc{Hhle}{Hoehle} (2013). } \references{ Manitz, J. and \enc{Hhle}{Hoehle}, M. (2013): Bayesian outbreak detection algorithm for monitoring reported cases of campylobacteriosis in Germany. Biometrical Journal, 55(4), 509-526. } \examples{ # Load the data data("campyDE") # O104 period is W21-W30 in 2011 stopifnot(all(campyDE$O104period == ( (campyDE$date >= as.Date("2011-05-23")) & (campyDE$date < as.Date("2011-07-31")) ))) # Make an sts object from the data.frame cam.sts <- sts(epoch=campyDE$date, observed=campyDE$case, state=campyDE$state) # Plot the result plot(cam.sts) } \keyword{datasets} surveillance/man/stsNewport.Rd0000644000176200001440000000126114026351202016173 0ustar liggesusers\encoding{latin1} \name{stsNewport} \alias{stsNewport} \docType{data} \title{Salmonella Newport cases in Germany 2001-2015} \description{ Reported number of cases of the Salmonella Newport serovar in Germany 2001-2015, by date of disease onset. The slot \code{control} contains a matrix \code{reportingTriangle$n} with the reporting triangle as described in Salmon et al. (2015). } \usage{data(stsNewport)} \format{ A \code{sts} object. } \references{ Salmon, M., Schumacher, D., Stark, K., \enc{Hhle}{Hoehle}, M. (2015): Bayesian outbreak detection in the presence of reporting delays. Biometrical Journal, 57 (6), 1051-1067. } \keyword{datasets} surveillance/man/stsplot_space.Rd0000644000176200001440000001441513746501137016707 0ustar liggesusers\name{stsplot_space} \alias{stsplot_space} \title{ Map of Disease Counts/Incidence accumulated over a Given Period } \description{ This is the \code{plot} variant of \code{type=observed~unit} for \code{"\linkS4class{sts}"} objects, i.e., \code{plot(stsObj, type=observed~unit, ...)} calls the function documented below. It produces an \code{\link{spplot}} where regions are color-coded according to disease incidence (either absolute counts or relative to population) over a given time period. } \usage{ stsplot_space(x, tps = NULL, map = x@map, population = NULL, main = NULL, labels = FALSE, at = 10, col.regions = NULL, colorkey = list(space = "bottom", labels = list(at=at)), total.args = NULL, gpar.missing = list(col = "darkgrey", lty = 2, lwd = 2), sp.layout = NULL, xlim = bbox(map)[1, ], ylim = bbox(map)[2, ], ...) } \arguments{ \item{x}{ an object of class \code{"\linkS4class{sts}"} or a matrix of counts, i.e., \code{observed(stsObj)}, where especially \code{colnames(x)} have to be contained in \code{row.names(map)}. If a matrix, the \code{map} object has to be provided explicitly. The possibility of specifying a matrix is, e.g., useful to plot mean counts of simulations from \code{\link{simulate.hhh4}}. } \item{tps}{ a numeric vector of one or more time points. The unit-specific \emph{sum} over all time points \code{tps} is plotted. The default \code{tps=NULL} means accumulation over the whole time period \code{1:nrow(x)}. } \item{map}{ an object inheriting from \code{"\linkS4class{SpatialPolygons}"} representing the \code{ncol(x)} regions. By default the \code{map} slot of \code{x} is queried (which might be empty and is not applicable if \code{x} is a matrix of counts). } \item{population}{ if \code{NULL} (default), the map shows the region-specific numbers of cases accumulated over \code{tps}. For a disease incidence map, \code{population} can be specified in three ways: \itemize{ \item a numeric vector of population numbers in the \code{ncol(x)} regions, used to divide the disease counts. \item a matrix of population counts of dimension \code{dim(x)} (such as \code{population(x)} in an \code{"sts"} object). This will produce the cumulative incidence over \code{tps} relative to the population at the first time point, i.e., only \code{population[tps[1],]} is used. \item [if \code{is(x, "sts")}] a scalar specifying how \code{population(x)} should be scaled for use as the population matrix, i.e., \code{population(x)/population} is used. For instance, if \code{population(x)} contains raw population numbers, \code{population=1000} would produce the incidence per 1000 inhabitants. } } \item{main}{ a main title for the plot. If \code{NULL} and \code{x} is of class \code{"sts"}, the time range of \code{tps} is put as the main title. } \item{labels}{ determines if and how the regions of the \code{map} are labeled, see \code{\link{layout.labels}}. } \item{at}{ either a number of levels (default: 10) for the categorization (color-coding) of counts/incidence, or a numeric vector of specific break points, or a named list of a number of levels (\code{"n"}), a transformer (\code{"trafo"}) of class \code{"\link[scales]{trans}"} defined by package \pkg{scales}, and optional further arguments for \code{\link{pretty}}. The default is the square root transformation (\code{\link[scales]{sqrt_trans}}). Note that intervals given by \code{at} are closed on the left and open to the right; if manually specified break points do not cover the data range, further breaks are automatically added at 0 and the maximum (rounded up to 1 significant digit), respectively. } \item{col.regions}{ a vector of fill colors, sufficiently long to serve all levels (determined by \code{at}). \dQuote{Heat} colors are used by default (\code{NULL}). } \item{colorkey}{ a list describing the color key, see \code{\link[lattice]{levelplot}}. The default list elements will be updated by the provided list using \code{\link{modifyList}}. } \item{total.args}{ an optional list of arguments for \code{\link[grid]{grid.text}} to have the overall number/incidence of cases printed at an edge of the map. The default settings are \code{list(label="Overall: ", x=1, y=0)}, and \code{total.args=list()} will use all of them. } \item{gpar.missing}{list of graphical parameters for \code{\link{sp.polygons}} applied to the regions of \code{map}, which are not part of \code{x}. Such extra regions won't be plotted if \code{!is.list(gpar.missing)}.} \item{sp.layout}{ optional list of additional layout items, see \code{\link{spplot}}. } \item{xlim,ylim}{numeric vectors of length 2 specifying the axis limits.} \item{\dots}{ further arguments for \code{\link{spplot}}. } } \value{ a lattice plot of class \code{"\link[lattice:trellis.object]{trellis}"}, but see \code{\link{spplot}}. } \author{ Sebastian Meyer } \seealso{ the central \code{\link{stsplot}}-documentation for an overview of plot types, and \code{\link{animate.sts}} for animations of \code{"sts"} objects. } \examples{ data("measlesWeserEms") # default plot: total region-specific counts over all weeks plot(measlesWeserEms, type = observed~unit) stsplot_space(measlesWeserEms) # the same # compare with old implementation plot(measlesWeserEms, type = observed~1|unit) # cumulative incidence (per 100000 inhabitants), with region labels plot(measlesWeserEms, type=observed~unit, population=measlesWeserEms@map$POPULATION / 100000, labels=list(labels="GEN", cex=0.7, font=3), sub="cumulative incidence (per 100'000 inhabitants)") # incidence in a particular week, manual color breaks, display total plot(measlesWeserEms, type=observed~unit, tps=62, population=measlesWeserEms@map$POPULATION / 100000, at=c(0, 1, 5), total.args=list(x=0, label="Overall incidence: ")) # if we had only observed a subset of the regions plot(measlesWeserEms[,5:11], type = observed~unit, gpar.missing = list(col="gray", lty=4)) } \keyword{hplot} \keyword{spatial} surveillance/man/epidataCS_update.Rd0000644000176200001440000000444612320060306017207 0ustar liggesusers\name{epidataCS_update} \alias{update.epidataCS} \title{ Update method for \code{"epidataCS"} } \description{ The \code{\link{update}} method for the \code{"\link{epidataCS}"} class may be used to modify the hyperparameters \eqn{\epsilon} (\code{eps.t}) and \eqn{\delta} (\code{eps.s}), the indicator matrix \code{qmatrix} of possible ways of transmission between the event types, and the numerical accuracy \code{nCircle2Poly} of the polygonal representation of a circle. The update method will also update the auxiliary information contained in an \code{"epidataCS"} object accordingly, e.g., the vector of potential sources of each event, or the polygonal representation of the influence region. } \usage{ \method{update}{epidataCS}(object, eps.t, eps.s, qmatrix, nCircle2Poly, ...) } \arguments{ \item{object}{ an object of class \code{"epidataCS"}. } \item{eps.t}{ numeric vector of length the number of events in \code{object$events}. The event data column \code{eps.t} specifies the maximum temporal influence radius (e.g., length of infectious period, time to culling, etc.) of the events. } \item{eps.s}{ numeric vector of length the number of events in \code{object$events}. The event data column \code{eps.s} specifies the maximum spatial influence radius of the events. } \item{qmatrix}{ square indicator matrix (0/1 or TRUE/FALSE) for possible transmission between the event types. } \item{nCircle2Poly}{ accuracy (number of edges) of the polygonal approximation of a circle. } \item{\dots}{ unused (argument of the generic). } } \value{ The updated \code{"epidataCS"} object. } \author{ Sebastian Meyer } \seealso{ class \code{"\link{epidataCS}"}. } \examples{ data("imdepi") ## assume different interaction ranges and simplify polygons imdepi2 <- update(imdepi, eps.t = 20, eps.s = Inf, nCircle2Poly = 16) (s <- summary(imdepi)) (s2 <- summary(imdepi2)) ## The update reduced the number of infectives (along time) ## because the length of the infectious periods is reduced. It also ## changed the set of potential sources of transmission for each ## event, since the interaction is shorter in time but wider in space ## (eps.s=Inf means interaction over the whole observation region). } \keyword{manip} \keyword{utilities} \keyword{methods} surveillance/man/findH.Rd0000644000176200001440000000445313122471774015056 0ustar liggesusers\name{findH} \alias{findH} \alias{hValues} \title{Find decision interval for given in-control ARL and reference value} \description{ Function to find a decision interval \code{h}* for given reference value \code{k} and desired ARL \eqn{\gamma} so that the average run length for a Poisson or Binomial CUSUM with in-control parameter \eqn{\theta_0}, reference value \code{k} and is approximately \eqn{\gamma}, i.e. \eqn{\Big| \frac{ARL(h^*) -\gamma}{\gamma} \Big| < \epsilon}, or larger, i.e. \eqn{ARL(h^*) > \gamma }. } \usage{ findH(ARL0, theta0, s = 1, rel.tol = 0.03, roundK = TRUE, distr = c("poisson", "binomial"), digits = 1, FIR = FALSE, ...) hValues(theta0, ARL0, rel.tol=0.02, s = 1, roundK = TRUE, digits = 1, distr = c("poisson", "binomial"), FIR = FALSE, ...) } \arguments{ \item{ARL0}{ desired in-control ARL \eqn{\gamma} } \item{theta0}{in-control parameter \eqn{\theta_0}} \item{s}{change to detect, see details} \item{distr}{ \code{"poisson"} or \code{"binomial"} } \item{rel.tol}{relative tolerance, i.e. the search for \code{h}* is stopped if \eqn{\Big| \frac{ARL(h^*) -\gamma}{\gamma} \Big| < } \code{rel.tol} } \item{digits}{the reference value \code{k} and the decision interval \code{h} are rounded to \code{digits} decimal places} \item{roundK}{ passed to \code{findK} } \item{FIR}{if \code{TRUE}, the decision interval that leads to the desired ARL for a FIR CUSUM with head start \eqn{\frac{\code{h}}{2}} is returned } \item{\dots}{ further arguments for the distribution function, i.e. number of trials \code{n} for binomial cdf } } \value{ \code{findH} returns a vector and \code{hValues} returns a matrix with elements \item{theta0}{in-control parameter} \item{h}{decision interval} \item{k}{reference value} \item{ARL}{ARL for a CUSUM with parameters \code{k} and \code{h} } \item{rel.tol}{corresponds to \eqn{\Big| \frac{ARL(h) -\gamma}{\gamma} \Big|} } } \details{ The out-of-control parameter used to determine the reference value \code{k} is specified as: \deqn{\theta_1 = \lambda_0 + s \sqrt{\lambda_0} } for a Poisson variate \eqn{X \sim Po(\lambda)} \deqn{\theta_1 = \frac{s \pi_0}{1+(s-1) \pi_0} } for a Binomial variate \eqn{X \sim Bin(n, \pi) } } \keyword{models} surveillance/man/shadar.Rd0000644000176200001440000000114013174706302015251 0ustar liggesusers\name{shadar} \alias{shadar} \docType{data} \title{Salmonella Hadar cases in Germany 2001-2006} \description{ Number of salmonella hadar cases in Germany 2001-2006. An increase is seen during 2006. } \usage{data(shadar)} \format{ A \code{disProg} object containing \eqn{295\times 1}{295 x 1} observations starting from week 1 in 2001 to week 35 in 2006. } \source{ Robert Koch-Institut: SurvStat: \url{https://survstat.rki.de/}; Queried on September 2006. Robert Koch Institut, Epidemiologisches Bulletin 31/2006. } \examples{ data(shadar) plot(shadar) } \keyword{datasets} surveillance/man/bestCombination.Rd0000644000176200001440000000102513122471774017136 0ustar liggesusers\name{bestCombination} \alias{bestCombination} \title{Partition of a number into two factors} \description{ Given a prime number factorization \code{x}, \code{bestCombination} partitions \code{x} into two groups, such that the product of the numbers in group one is as similar as possible to the product of the numbers of group two. This is useful in \code{\link{magic.dim}}. } \usage{ bestCombination(x) } \arguments{ \item{x}{prime number factorization} } \value{a vector \code{c(prod(set1),prod(set2))}} \keyword{dplot} surveillance/man/algo.cdc.Rd0000644000176200001440000000627313165505075015501 0ustar liggesusers\name{algo.cdc} \alias{algo.cdcLatestTimepoint} \alias{algo.cdc} \encoding{latin1} \title{The CDC Algorithm} \description{ Surveillance using the CDC Algorithm } \usage{ algo.cdcLatestTimepoint(disProgObj, timePoint = NULL, control = list(b = 5, m = 1, alpha=0.025)) algo.cdc(disProgObj, control = list(range = range, b= 5, m=1, alpha = 0.025)) } \arguments{ \item{disProgObj}{object of class disProg (including the observed and the state chain).} \item{timePoint}{time point which should be evaluated in \code{algo.cdcLatestTimepoint}. The default is to use the latest timepoint.} \item{control}{control object: \code{range} determines the desired timepoints which should be evaluated, \code{b} describes the number of years to go back for the reference values, \code{m} is the half window width for the reference values around the appropriate timepoint (see details). The standard definition is \code{b}=5 and \code{m}=1.} } \details{ Using the reference values for calculating an upper limit, alarm is given if the actual value is bigger than a computed threshold. \code{algo.cdc} calls \code{algo.cdcLatestTimepoint} for the values specified in \code{range} and for the system specified in \code{control}. The threshold is calculated from the predictive distribution, i.e. \deqn{mean(x) + z_{\alpha/2} * sd(x) * \sqrt(1+1/k),} which corresponds to Equation 8-1 in Farrington and Andrews (2003). Note that an aggregation into 4-week blocks occurs in \code{algo.cdcLatestTimepoint} and \code{m} denotes number of 4-week blocks (months) to use as reference values. This function currently does the same for monthly data (not correct!) } \value{ \code{algo.cdcLatestTimepoint} returns a list of class \code{survRes} (surveillance result), which includes the alarm value (alarm = 1, no alarm = 0) for recognizing an outbreak, the threshold value for recognizing the alarm and the input object of class disProg. \code{algo.cdc} gives a list of class \code{survRes} which includes the vector of alarm values for every timepoint in \code{range}, the vector of threshold values for every timepoint in \code{range} for the system specified by \code{b}, \code{w}, the range and the input object of class disProg. } \seealso{ \code{\link{algo.rkiLatestTimepoint}},\code{\link{algo.bayesLatestTimepoint}} and \code{\link{algo.bayes}} for the Bayes system. } \author{M. \enc{Hhle}{Hoehle}} \examples{ # Create a test object disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 500, A = 1,alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) # Test week 200 to 208 for outbreaks with a selfdefined cdc algo.cdc(disProgObj, control = list(range = 400:500,alpha=0.025)) } \keyword{classif} \references{ Stroup, D., G. Williamson, J. Herndon, and J. Karon (1989). Detection of aberrations in the occurence of notifiable diseases surveillance data. Statistics in Medicine 8, 323-329. Farrington, C. and N. Andrews (2003). Monitoring the Health of Populations, Chapter Outbreak Detection: Application to Infectious Disease Surveillance, pp. 203-231. Oxford University Press. } surveillance/man/hhh4_validation.Rd0000644000176200001440000003517614005030003017052 0ustar liggesusers\name{hhh4_validation} \alias{oneStepAhead} \alias{quantile.oneStepAhead} \alias{confint.oneStepAhead} \alias{plot.oneStepAhead} \alias{scores.oneStepAhead} \alias{scores.hhh4} \alias{calibrationTest.oneStepAhead} \alias{calibrationTest.hhh4} \alias{pit.oneStepAhead} \alias{pit.hhh4} \title{Predictive Model Assessment for \code{hhh4} Models} \description{ The function \code{oneStepAhead} computes successive one-step-ahead predictions for a (random effects) HHH model fitted by \code{\link{hhh4}}. These can be inspected using the \code{quantile}, \code{confint} or \code{plot} methods. The associated \code{\link{scores}}-method computes a number of (strictly) proper scoring rules based on such one-step-ahead predictions; see Paul and Held (2011) for details. There are also \code{\link{calibrationTest}} and \code{\link{pit}} methods for \code{oneStepAhead} predictions. Scores, calibration tests and PIT histograms can also be computed for the fitted values of an \code{hhh4} model (i.e., in-sample/training data evaluation). } \usage{ oneStepAhead(result, tp, type = c("rolling", "first", "final"), which.start = c("current", "final"), keep.estimates = FALSE, verbose = TRUE, cores = 1) \method{quantile}{oneStepAhead}(x, probs = c(2.5, 10, 50, 90, 97.5)/100, ...) \method{confint}{oneStepAhead}(object, parm, level = 0.95, ...) \method{plot}{oneStepAhead}(x, unit = 1, probs = 1:99/100, start = NULL, means.args = NULL, ...) ## assessment of "oneStepAhead" predictions \method{scores}{oneStepAhead}(x, which = c("logs", "rps", "dss", "ses"), units = NULL, sign = FALSE, individual = FALSE, reverse = FALSE, ...) \method{calibrationTest}{oneStepAhead}(x, units = NULL, ...) \method{pit}{oneStepAhead}(x, units = NULL, ...) ## assessment of the "hhh4" model fit (in-sample predictions) \method{scores}{hhh4}(x, which = c("logs", "rps", "dss", "ses"), subset = x$control$subset, units = seq_len(x$nUnit), sign = FALSE, ...) \method{calibrationTest}{hhh4}(x, subset = x$control$subset, units = seq_len(x$nUnit), ...) \method{pit}{hhh4}(x, subset = x$control$subset, units = seq_len(x$nUnit), ...) } \arguments{ \item{result}{fitted \code{\link{hhh4}} model (class \code{"hhh4"}).} \item{tp}{ numeric vector of length 2 specifying the time range in which to compute one-step-ahead predictions (for the time points \code{tp[1]+1}, \ldots, \code{tp[2]+1}). If a single time index is specified, it is interpreted as \code{tp[1]}, and \code{tp[2]} is set to the penultimate time point of \code{result$control$subset}. } \item{type}{ The default \code{"rolling"} procedure sequentially refits the model up to each time point in \code{tp} and computes the one-step-ahead predictions for the respective next time point. The alternative \code{type}s are no true one-step-ahead predictions but much faster: \code{"first"} will refit the model for the first time point \code{tp[1]} only and use this specific fit to calculate all subsequent predictions, whereas \code{"final"} will just use \code{result} to calculate these. The latter case thus gives nothing else than a subset of \code{result$fitted.values} if the \code{tp}'s are part of the fitted subset \code{result$control$subset}. } \item{which.start}{ Which initial parameter values should be used when successively refitting the model to subsets of the data (up to time point \code{tp[1]}, up to \code{tp[1]+1}, ...) if \code{type="rolling"}? Default (\code{"current"}) is to use the parameter estimates from the previous time point, and \code{"final"} means to always use the estimates from \code{result} as initial values. Alternatively, \code{which.start} can be a list of \code{start} values as expected by \code{\link{hhh4}}, which then replace the corresponding estimates from \code{result} as initial values. This argument is ignored for \dQuote{non-rolling} \code{type}s. } \item{keep.estimates}{ logical indicating if parameter estimates and log-likelihoods from the successive fits should be returned. } \item{verbose}{ non-negative integer (usually in the range \code{0:3}) specifying the amount of tracing information to output. During \code{hhh4} model updates, the following verbosity is used: \code{0} if \code{cores > 1}, otherwise \code{verbose-1} if there is more than one time point to predict, otherwise \code{verbose}. } \item{cores}{the number of cores to use when computing the predictions for the set of time points \code{tp} in parallel (with \code{\link[parallel]{mclapply}}). Note that parallelization is not possible in the default setting \code{type="rolling"} and \code{which.start="current"} (use \code{which.start="final"} for this to work).} \item{object}{an object of class \code{"oneStepAhead"}.} \item{parm}{unused (argument of the generic).} \item{level}{required confidence level of the prediction interval.} \item{probs}{numeric vector of probabilities with values in [0,1].} \item{unit}{single integer or character selecting a unit for which to produce the plot.} \item{start}{ x-coordinate of the first prediction. If \code{start=NULL} (default), this is derived from \code{x}. } \item{means.args}{ if a list (of graphical parameters for \code{\link{lines}}), the point predictions (from \code{x$pred}) are added to the plot. } \item{x}{an object of class \code{"oneStepAhead"} or \code{"hhh4"}.} \item{which}{character vector determining which scores to compute. The package \pkg{surveillance} implements the following proper scoring rules: logarithmic score (\code{"logs"}), ranked probability score (\code{"rps"}), Dawid-Sebastiani score (\code{"dss"}), and squared error score (\code{"ses"}). The normalized SES (\code{"nses"}) is also available but it is improper and hence not computed by default.\cr It is possible to name own scoring rules in \code{which}. These must be functions of \code{(x, mu, size)}, vectorized in all arguments (time x unit matrices) except that \code{size} is \code{NULL} in case of a Poisson model. See the available scoring rules for guidance, e.g., \code{\link{dss}}. } \item{subset}{ subset of time points for which to calculate the scores (or test calibration, or produce the PIT histogram, respectively). Defaults to the subset used for fitting the model.} \item{units}{integer or character vector indexing the units for which to compute the scores (or the calibration test or the PIT histogram, respectively). By default, all units are considered.} \item{sign}{logical indicating if the function should also return \code{sign(x-mu)}, i.e., the sign of the difference between the observed counts and corresponding predictions. This does not really make sense when averaging over multiple \code{units} with \code{individual=FALSE}.} \item{individual}{logical indicating if the individual scores of the \code{units} should be returned. By default (\code{FALSE}), the individual scores are averaged over all \code{units}.} \item{reverse}{logical indicating if the rows (time points) should be reversed in the result. The long-standing but awkward default was to do so for the \code{oneStepAhead}-method. This has changed in version 1.16.0, so time points are no longer reversed by default.} \item{\dots}{Unused by the \code{quantile}, \code{confint} and \code{scores} methods.\cr The \code{plot}-method passes further arguments to the \code{\link{fanplot}} function, e.g., \code{fan.args}, \code{observed.args}, and \code{key.args} can be used to modify the plotting style.\cr For the \code{calibrationTest}-method, further arguments are passed to \code{\link{calibrationTest.default}}, e.g., \code{which} to select a scoring rule.\cr For the \code{pit}-methods, further arguments are passed to \code{\link{pit.default}}.} } \value{ \code{oneStepAhead} returns a list (of class \code{"oneStepAhead"}) with the following components: \item{pred}{one-step-ahead predictions in a matrix, where each row corresponds to one of the time points requested via the argument \code{tp}, and which has \code{ncol(result$stsObj)} unit-specific columns. The rownames indicate the predicted time points and the column names are identical to \code{colnames(result$stsObj)}.} \item{observed}{matrix with observed counts at the predicted time points. It has the same dimensions and names as \code{pred}.} \item{psi}{in case of a negative-binomial model, a matrix of the estimated overdispersion parameter(s) at each time point on the internal -log-scale (1 column if \code{"NegBin1"}, \code{ncol(observed)} columns if \code{"NegBinM"} or shared overdispersion). For a \code{"Poisson"} model, this component is \code{NULL}.} \item{allConverged}{logical indicating if all successive fits converged.} If \code{keep.estimates=TRUE}, there are the following additional elements: \item{coefficients}{matrix of estimated regression parameters from the successive fits.} \item{Sigma.orig}{matrix of estimated variance parameters from the successive fits.} \item{logliks}{matrix with columns \code{"loglikelihood"} and \code{"margll"} with their obvious meanings.} The \code{quantile}-method computes quantiles of the one-step-ahead forecasts. If there is only one unit, it returns a tp x prob matrix, otherwise a tp x unit x prob array. The \code{confint}-method is a convenient wrapper with \code{probs} set according to the required confidence level. The function \code{scores} computes the scoring rules specified in the argument \code{which}. If multiple \code{units} are selected and \code{individual=TRUE}, the result is an array of dimensions \code{c(nrow(pred),length(units),5+sign)} (up to \pkg{surveillance} 1.8-0, the first two dimensions were collapsed to give a matrix). Otherwise, the result is a matrix with \code{nrow(pred)} rows and \code{5+sign} columns. If there is only one predicted time point, the first dimension is dropped in both cases. The \code{\link{calibrationTest}}- and \code{\link{pit}}-methods are just convenient wrappers around the respective default methods. } \references{ Czado, C., Gneiting, T. and Held, L. (2009): Predictive model assessment for count data. \emph{Biometrics}, \bold{65} (4), 1254-1261. \doi{10.1111/j.1541-0420.2009.01191.x} Paul, M. and Held, L. (2011): Predictive assessment of a non-linear random effects model for multivariate time series of infectious disease counts. \emph{Statistics in Medicine}, \bold{30} (10), 1118-1136. \doi{10.1002/sim.4177} } \author{ Sebastian Meyer and Michaela Paul } \seealso{ \code{vignette("hhh4")} and \code{vignette("hhh4_spacetime")} } \examples{ ### univariate salmonella agona count time series data("salmonella.agona") ## convert from old "disProg" to new "sts" class salmonella <- disProg2sts(salmonella.agona) ## generate formula for temporal and seasonal trends f.end <- addSeason2formula(~1 + t, S=1, period=52) model <- list(ar = list(f = ~1), end = list(f = f.end), family = "NegBin1") ## fit the model result <- hhh4(salmonella, model) ## do sequential one-step-ahead predictions for the last 5 weeks pred <- oneStepAhead(result, nrow(salmonella)-5, type="rolling", which.start="final", verbose=FALSE) pred quantile(pred) confint(pred) ## simple plot of the 80% one-week-ahead prediction interval ## and point forecasts if (requireNamespace("fanplot")) plot(pred, probs = c(.1,.9), means.args = list()) \dontshow{ ## test equivalence of parallelized version if (.Platform$OS.type == "unix" && isTRUE(parallel::detectCores() > 1)) stopifnot(identical(pred, oneStepAhead(result, nrow(salmonella)-5, type="rolling", which.start="final", verbose=FALSE, cores=2))) } ## note: oneStepAhead(..., type="final") just means fitted values stopifnot(identical( unname(oneStepAhead(result, nrow(salmonella)-5, type="final", verbose=FALSE)$pred), unname(tail(fitted(result), 5)))) ## compute scores of the one-step-ahead predictions (sc <- scores(pred)) ## the above uses the scores-method for "oneStepAhead" predictions, ## which is a simple wrapper around the default method: scores(x = pred$observed, mu = pred$pred, size = exp(pred$psi)) ## scores with respect to the fitted values are similar (scFitted <- scores(result, subset = nrow(salmonella)-(4:0))) \dontshow{ ## test that scFitted is equivalent to scores(oneStepAhead(..., type = "final")) stopifnot(all.equal( scFitted, scores(oneStepAhead(result, nrow(salmonella)-5, type="final", verbose=FALSE)), check.attributes = FALSE)) } ## test if the one-step-ahead predictions are calibrated calibrationTest(pred) # p = 0.8746 ## the above uses the calibrationTest-method for "oneStepAhead" predictions, ## which is a simple wrapper around the default method: calibrationTest(x = pred$observed, mu = pred$pred, size = exp(pred$psi)) ## we can also test calibration of the fitted values ## using the calibrationTest-method for "hhh4" fits calibrationTest(result, subset = nrow(salmonella)-(4:0)) ## plot a (non-randomized) PIT histogram for the predictions pit(pred) ## the above uses the pit-method for "oneStepAhead" predictions, ## which is a simple wrapper around the default method: pit(x = pred$observed, pdistr = "pnbinom", mu = pred$pred, size = exp(pred$psi)) ### multivariate measles count time series ## (omitting oneStepAhead forecasts here to keep runtime low) data("measlesWeserEms") ## simple hhh4 model with random effects in the endemic component measlesModel <- list( end = list(f = addSeason2formula(~0 + ri(type="iid"))), ar = list(f = ~1), family = "NegBin1") measlesFit <- hhh4(measlesWeserEms, control = measlesModel) ## assess overall (in-sample) calibration of the model, i.e., ## if the observed counts are from the fitted NegBin distribution calibrationTest(measlesFit) # default is DSS (not suitable for low counts) calibrationTest(measlesFit, which = "rps") # p = 0.9322 calibrationTest(measlesFit, which = "logs") # p = 0.7238 ## to assess calibration in the second year for a specific district calibrationTest(measlesFit, subset = 53:104, units = "03452", which = "rps") pit(measlesFit, subset = 53:104, units = "03452") ### For a more sophisticated multivariate analysis of ### areal time series of influenza counts - data("fluBYBW") - ### see the (computer-intensive) demo("fluBYBW") script: demoscript <- system.file(file.path("demo", "fluBYBW.R"), package = "surveillance") demoscript #file.show(demoscript) } \keyword{univar} \keyword{htest} \keyword{dplot} \keyword{ts} surveillance/man/ks.plot.unif.Rd0000644000176200001440000000447214004512307016345 0ustar liggesusers\encoding{latin1} \name{ks.plot.unif} \alias{ks.plot.unif} \title{ Plot the ECDF of a uniform sample with Kolmogorov-Smirnov bounds } \description{ This plot function takes a univariate sample that should be tested for a U(0,1) distribution, plots its empirical cumulative distribution function (\code{\link{ecdf}}), and adds a confidence band by inverting the corresponding Kolmogorov-Smirnov test (\code{\link{ks.test}}). The uniform distribution is rejected if the ECDF is not completely inside the confidence band. } \usage{ ks.plot.unif(U, conf.level = 0.95, exact = NULL, col.conf = "gray", col.ref = "gray", xlab = expression(u[(i)]), ylab = "Cumulative distribution") } \arguments{ \item{U}{ numeric vector containing the sample. Missing values are (silently) ignored. } \item{conf.level}{ confidence level for the K-S-test (defaults to 0.95), can also be a vector of multiple levels. } \item{exact}{see \code{\link{ks.test}}.} \item{col.conf}{ colour of the confidence lines. } \item{col.ref}{ colour of the diagonal reference line. } \item{xlab, ylab}{ axis labels. } } \value{ \code{NULL} (invisibly). } \author{ Michael H\enc{}{oe}hle and Sebastian Meyer. The code contains segments originating from the source of the \link{ks.test} function \url{https://svn.R-project.org/R/trunk/src/library/stats/R/ks.test.R}, which is Copyright (C) 1995-2012 The R Core Team available under GPL-2 (or later) and C functionality from \url{https://svn.R-project.org/R/trunk/src/library/stats/src/ks.c}, which is copyright (C) 1999-2009 the R Core Team and available under GPL-2 (or later). Somewhat hidden in their \file{ks.c} file is a statement that part of their code is based on code published in Marsaglia et al. (2003). } \references{ George Marsaglia and Wai Wan Tsang and Jingbo Wang (2003): Evaluating Kolmogorov's distribution. \emph{Journal of Statistical Software}, \bold{8} (18). \doi{10.18637/jss.v008.i18} } \seealso{ \code{\link{ks.test}} for the Kolmogorov-Smirnov test, as well as \code{\link{checkResidualProcess}}, which makes use of this plot function. } \examples{ samp <- runif(99) ks.plot.unif(samp, conf.level=c(0.95, 0.99), exact=TRUE) ks.plot.unif(samp, conf.level=c(0.95, 0.99), exact=FALSE) } \keyword{hplot} \keyword{htest} surveillance/man/primeFactors.Rd0000644000176200001440000000043613122471774016461 0ustar liggesusers\name{primeFactors} \alias{primeFactors} \title{Prime Number Factorization} \description{ Computes the prime number factorization of an integer. } \usage{ primeFactors(x) } \arguments{ \item{x}{an integer} } \value{vector with prime number factorization of \code{x}} \keyword{math} surveillance/man/twinstim_simEndemicEvents.Rd0000644000176200001440000000356113165702123021215 0ustar liggesusers\name{twinstim_simEndemicEvents} \alias{simEndemicEvents} \title{ Quick Simulation from an Endemic-Only \code{twinstim} } \description{ In \emph{endemic-only} \code{\link{twinstim}} models, the conditional intensity is a piecewise constant function independent from the history of the process. This allows for a much more efficient simulation algorithm than via Ogata's modified thinning as in the general \code{\link{simulate.twinstim}} method. } \usage{ simEndemicEvents(object, tiles) } \arguments{ \item{object}{ an object of class \code{"\link{twinstim}"} (with the \code{model} component retained; otherwise try \code{object <- \link[=update.twinstim]{update}(object, model = TRUE)}). } \item{tiles}{ an object inheriting from \code{"\linkS4class{SpatialPolygons}"}, which represents the tiles of the original data's \code{stgrid} (see, e.g., \code{levels(environment(object)$gridTiles)}). } } \value{ a \code{\linkS4class{SpatialPointsDataFrame}} } \author{ Sebastian Meyer } \seealso{ the general simulation method \code{\link{simulate.twinstim}} } \examples{ data("imdepi", "imdepifit") load(system.file("shapes", "districtsD.RData", package="surveillance")) ## Fit an endemic-only twinstim() m_noepi <- update(imdepifit, epidemic = ~0, siaf = NULL, model = TRUE) ## Simulate events from the above endemic model set.seed(1) s1 <- simEndemicEvents(m_noepi, tiles = districtsD) class(s1) # just a "SpatialPointsDataFrame" summary(s1) plot(s1, col = s1$type, cex = 0.5); plot(imdepi$W, lwd = 2, add = TRUE) \dontrun{ ## the general simulation method takes several seconds s0 <- simulate(m_noepi, seed = 1, data = imdepi, tiles = districtsD) class(s0) # gives a full "simEpidataCS" with several methods applicable methods(class = "epidataCS") plot(s0, "time") plot(s0, "space", points.args = list(pch = 3), lwd = 2) } } \keyword{datagen} \keyword{models} surveillance/man/sts_animate.Rd0000644000176200001440000001232014004512307016311 0ustar liggesusers\name{sts_animate} \alias{animate.sts} \title{ Animated Maps and Time Series of Disease Counts or Incidence } \description{ The \code{animate}-method for \code{\linkS4class{sts}} objects supersedes the \code{\link{stsplot}} type \code{observed~1|unit*time} implemented by the function \code{\link{stsplot_spacetime}}. Maps generated by \code{\link{stsplot_space}} are sequentially plotted along time (optionally showing cumulative counts/incidence), with an optional time series chart below the map to track the epidemic curve. It is worth using functionality of the \pkg{animation} package (e.g., \code{\link[animation]{saveHTML}}) to directly export the animation into a useful format. See Meyer and Held (2014, Supplement A) for an example with the \code{\link{fluBYBW}} data. } \usage{ \method{animate}{sts}(object, tps = NULL, cumulative = FALSE, population = NULL, at = 10, ..., timeplot = list(pos = 1, size = 0.3, fill = TRUE), sleep = 0.5, verbose = interactive(), draw = TRUE) } \arguments{ \item{object}{ an object of class \code{"\linkS4class{sts}"} or a matrix of counts, i.e., \code{observed(stsObj)}, where especially \code{colnames(x)} have to be contained in \code{row.names(map)}. If a matrix, the \code{map} object has to be provided explicitly (as part of \code{\dots}). } \item{tps}{ a numeric vector of one or more time points at which to plot the map. The default \code{tps=NULL} means the whole time period \code{1:nrow(object)}. } \item{cumulative}{ logical specifying if the cumulative counts/incidence over time should be plotted. The cumulative incidence is relative to the population from the first time point \code{tps[1]} throughout the whole animation, while \code{cumulative=FALSE} computes the incidence from the current population numbers. } \item{population,at,\dots}{ arguments for \code{\link{stsplot_space}}. } \item{timeplot}{ if a list and package \CRANpkg{gridExtra} is available, a time series chart of the counts along the selected time points \code{tps} will be plotted next to the map. The list elements determine both the positioning of this plot (\code{pos}, \code{size}, and \code{fill}) and its appearance. The default \code{pos=1} and \code{size=0.3} arguments put the time series plot below the map, using 30\% of the total plot height. The logical value \code{fill} indicates whether to make the panel as big as possible (default: TRUE). An alternative to \code{fill=FALSE} is to manually specify an \code{aspect} (ratio) value in \code{timeplot}. Other list elements are arguments for the internal (and currently undocumented) function \code{stsplot_timeSimple}. For example, \code{inactive} and \code{active} are lists of graphical parameters (e.g., \code{col}) determining the appearance of the bars (e.g., default color is grey when inactive and black when active), and the boolean \code{as.Date} determines whether dates should be put on the x-axis (instead of the \code{tps} indexes). } \item{sleep}{ time to wait (\code{Sys.sleep}) between subsequent snapshots (only if \code{\link{dev.interactive}}), in seconds. } \item{verbose}{ logical indicating if a \code{\link{txtProgressBar}} should be shown during generation of the animation -- which may take a while. Default is to do so in \code{\link{interactive}} sessions. } \item{draw}{ logical indicating if the produced plots at each time point should be drawn directly (the default) or not. The setting \code{draw = FALSE} is useful if one would like to manually arrange the plots, which are always returned invisibly in a list of length \code{length(tps)}. } } \value{ (invisibly) a list of the \code{length(tps)} sequential plot objects. These are of class \code{"gtable"} (from \CRANpkg{gtable}) if the \code{timeplot} is included, otherwise of class \code{"\code{\link[lattice:trellis.object]{trellis}"}. } \references{ Meyer, S. and Held, L. (2014): Power-law models for infectious disease spread. \emph{The Annals of Applied Statistics}, \bold{8} (3), 1612-1639. \doi{10.1214/14-AOAS743}.\cr Supplement A is available from \url{https://www.biostat.uzh.ch/static/powerlaw/}. } \author{ Sebastian Meyer } \seealso{ the other plot types documented in \code{\link{stsplot}} for static time series plots and maps. } \examples{ data("measlesWeserEms") ## animate the weekly counts of measles (during weeks 12-16 only, for speed) if (require("animation")) { oldwd <- setwd(tempdir()) # to not clutter up the current working dir saveHTML(animate(measlesWeserEms, tps=12:16), title="Evolution of the measles epidemic in the Weser-Ems region", ani.width=500, ani.height=600) setwd(oldwd) } \dontrun{ ## animate the weekly incidence of measles (per 100'000 inhabitants), ## and label the time series plot with dates in a specified format animate(measlesWeserEms, tps=12:16, population = measlesWeserEms@map$POPULATION / 100000, timeplot = list(as.Date = TRUE, scales = list(x = list(format = "\%G/\%V")))) } } \keyword{hplot} \keyword{dynamic} \keyword{spatial} surveillance/man/zetaweights.Rd0000644000176200001440000000300012316635114016341 0ustar liggesusers\name{zetaweights} \alias{zetaweights} \title{ Power-Law Weights According to Neighbourhood Order } \description{ Compute power-law weights with decay parameter \code{d} based on a matrix of neighbourhood orders \code{nbmat} (e.g., as obtained via \code{\link{nbOrder}}). Without normalization and truncation, this is just \eqn{o^{-d}} (where \eqn{o} is a neighbourhood order). This function is mainly used internally for \code{\link{W_powerlaw}} weights in \code{\link{hhh4}} models. } \usage{ zetaweights(nbmat, d = 1, maxlag = max(nbmat), normalize = FALSE) } \arguments{ \item{nbmat}{numeric, symmetric matrix of neighbourhood orders.} \item{d}{single numeric decay parameter (default: 1). Should be positive.} \item{maxlag}{single numeric specifying an upper limit for the power law. For neighbourhood orders > \code{maxlag}, the resulting weight is 0. Defaults to no truncation.} \item{normalize}{Should the resulting weight matrix be normalized such that rows sum to 1?} } \value{ a numeric matrix with same dimensions and names as the input matrix. } \author{ Sebastian Meyer } \seealso{\code{\link{W_powerlaw}}} \examples{ nbmat <- matrix(c(0,1,2,2, 1,0,1,1, 2,1,0,2, 2,1,2,0), 4, 4, byrow=TRUE) zetaweights(nbmat, d=1, normalize=FALSE) # harmonic: o^-1 zetaweights(nbmat, d=1, normalize=TRUE) # rowSums=1 zetaweights(nbmat, maxlag=1, normalize=FALSE) # results in adjacency matrix } \keyword{spatial} \keyword{utilities} surveillance/man/glm_epidataCS.Rd0000644000176200001440000000570513165513254016520 0ustar liggesusers\name{glm_epidataCS} \alias{glm_epidataCS} \title{ Fit an Endemic-Only \code{twinstim} as a Poisson-\code{glm} } \description{ An endemic-only \code{\link{twinstim}} is equivalent to a Poisson regression model for the aggregated number of events, \eqn{Y_{[t][\bm{s}],k}}, by time-space-type cell. The rate of the corresponding Poisson distribution is \eqn{e_{[t][\bm{s}]} \cdot \lambda([t],[\bm{s}],k)}, where \eqn{e_{[t][\bm{s}]} = |[t]| |[\bm{s}]|} is a multiplicative offset. Thus, the \code{\link{glm}} function can be used to fit an endemic-only \code{twinstim}. However, wrapping in \code{glm} is usually slower. } \usage{ glm_epidataCS(formula, data, ...) } \arguments{ \item{formula}{ an endemic model formula without response, comprising variables of \code{data$stgrid} and possibly the variable \code{type} for a type-specific model. } \item{data}{ an object of class \code{"\link{epidataCS}"}. } \item{\dots}{ arguments passed to \code{\link{glm}}. Note that \code{family} and \code{offset} are fixed internally. } } \value{ a \code{\link{glm}} } \author{ Sebastian Meyer } \examples{ data("imdepi", "imdepifit") ## Fit an endemic-only twinstim() and an equivalent model wrapped in glm() fit_twinstim <- update(imdepifit, epidemic = ~0, siaf = NULL, subset = NULL, optim.args=list(control=list(trace=0)), verbose=FALSE) fit_glm <- glm_epidataCS(formula(fit_twinstim)$endemic, data = imdepi) ## Compare the coefficients cbind(twinstim = coef(fit_twinstim), glm = coef(fit_glm)) \dontshow{ stopifnot(all.equal(coef(fit_glm), coef(fit_twinstim), tolerance = 1e-6, check.attributes = FALSE)) if (surveillance.options("allExamples")) { ## also check type-specific model: stopifnot(all.equal( coef(glm_epidataCS(~0+type, imdepi)), coef(update(fit_twinstim, endemic=~(1|type))), tolerance = 1e-6, check.attributes = FALSE)) } } ### also compare to an equivalent endemic-only hhh4() fit ## first need to aggregate imdepi into an "sts" object load(system.file("shapes", "districtsD.RData", package="surveillance")) imdsts <- epidataCS2sts(imdepi, freq = 12, start = c(2002, 1), neighbourhood = NULL, tiles = districtsD, popcol.stgrid = "popdensity") ## determine the correct offset to get an equivalent model offset <- 2 * rep(with(subset(imdepi$stgrid, !duplicated(BLOCK)), stop - start), ncol(imdsts)) * sum(districtsD$POPULATION) * population(imdsts) ## fit the model using hhh4() fit_hhh4 <- hhh4(imdsts, control = list( end = list( f = addSeason2formula(~I(start/365-3.5), period=365, timevar="start"), offset = offset ), family = "Poisson", subset = 1:nrow(imdsts), data = list(start=with(subset(imdepi$stgrid, !duplicated(BLOCK)), start)))) summary(fit_hhh4) stopifnot(all.equal(coef(fit_hhh4), coef(fit_glm), check.attributes=FALSE)) } \keyword{models} surveillance/man/findK.Rd0000644000176200001440000000213113122471774015050 0ustar liggesusers\name{findK} \alias{findK} \title{Find Reference Value} \description{ Calculates the reference value \code{k} for a Poisson or binomial CUSUM designed to detect a shift from \eqn{\theta_0} to \eqn{\theta_1} } \usage{ findK(theta0, theta1, distr = c("poisson", "binomial"), roundK = FALSE, digits = 1, ...) } \arguments{ \item{theta0}{ in-control parameter } \item{theta1}{ out-of-control parameter } \item{distr}{ \code{"poisson"} or \code{"binomial"} } \item{digits}{ the reference value \code{k} is rounded to \code{digits} decimal places} \item{roundK}{ For discrete data and rational reference value there is only a limited set of possible values that the CUSUM can take (and therefore there is also only a limited set of ARLs). If \code{roundK=TRUE}, integer multiples of 0.5 are avoided when rounding the reference value \code{k}, % i.e. the CUSUM can take more values.} \item{\dots}{ further arguments for the distribution function, i.e. number of trials \code{n} for the binomial CDF.} } \value{ Returns reference value \code{k}. } \keyword{models} surveillance/man/sts_observation.Rd0000644000176200001440000000145513346465003017245 0ustar liggesusers\name{sts_observation} \alias{sts_observation} \title{Create an \code{sts} object with a given observation date} \usage{ sts_observation(sts, dateObservation, cut = TRUE) } \arguments{ \item{sts}{sts-object we want to set at a previous state. Needs to include a reporting triangle.} \item{dateObservation}{Date for which we want the state. Needs to be in the reporting triangle dates.} \item{cut}{Boolean indicating whether to have 0 counts after the observation date or to simply cut the sts-object} } \description{ Function for creating an \code{\linkS4class{sts}} object with a given observation date. } \examples{ data("salmAllOnset") salmAllOnsety2013m01d20 <- sts_observation(salmAllOnset, dateObservation="2014-01-20",cut=FALSE) plot(salmAllOnset) lines(salmAllOnsety2013m01d20@observed,t="h",col="red") } surveillance/man/algo.farrington.Rd0000644000176200001440000001252513433523371017113 0ustar liggesusers\name{algo.farrington} \alias{algo.farrington} \encoding{latin1} \title{Surveillance for Count Time Series Using the Classic Farrington Method} \description{ The function takes \code{range} values of the surveillance time series \code{disProgObj} and for each time point uses a GLM to predict the number of counts according to the procedure by Farrington et al. (1996). This is then compared to the observed number of counts. If the observation is above a specific quantile of the prediction interval, then an alarm is raised. } \usage{ algo.farrington(disProgObj, control=list( range=NULL, b=5, w=3, reweight=TRUE, verbose=FALSE, plot=FALSE, alpha=0.05, trend=TRUE, limit54=c(5,4), powertrans="2/3", fitFun="algo.farrington.fitGLM.fast")) } \arguments{ \item{disProgObj}{ object of class disProgObj (including the \code{observed} and the \code{state} time series.) } \item{control}{list of control parameters \describe{ \item{\code{range}}{Specifies the index of all timepoints which should be tested. If range is \code{NULL} the maximum number of possible weeks is used (i.e. as many weeks as possible while still having enough reference values).} \item{\code{b}}{how many years back in time to include when forming the base counts.} \item{\code{w}}{windows size, i.e. number of weeks to include before and after the current week} \item{\code{reweight}}{Boolean specifying whether to perform reweight step} \item{\code{trend}}{If \code{TRUE} a trend is included and kept in case the conditions documented in Farrington et al. (1996) are met (see the results). If \code{FALSE} then NO trend is fit.} \item{\code{verbose}}{Boolean indicating whether to show extra debugging information.} \item{\code{plot}}{Boolean specifying whether to show the final GLM model fit graphically (use History|Recording to see all pictures).} \item{\code{powertrans}}{Power transformation to apply to the data. Use either "2/3" for skewness correction (Default), "1/2" for variance stabilizing transformation or "none" for no transformation.} \item{\code{alpha}}{An approximate (two-sided) \eqn{(1-\alpha)} prediction interval is calculated.} \item{\code{limit54}}{To avoid alarms in cases where the time series only has about 0-2 cases the algorithm uses the following heuristic criterion (see Section 3.8 of the Farrington paper) to protect against low counts: no alarm is sounded if fewer than \eqn{cases=5} reports were received in the past \eqn{period=4} weeks. \code{limit54=c(cases,period)} is a vector allowing the user to change these numbers. Note: As of version 0.9-7 the term "last" period of weeks includes the current week - otherwise no alarm is sounded for horrible large numbers if the four weeks before that are too low.} \item{\code{fitFun}}{String containing the name of the fit function to be used for fitting the GLM. The options are \code{algo.farrington.fitGLM.fast} (default) and \code{algo.farrington.fitGLM} or \code{algo.farrington.fitGLM.populationOffset}. See details of \code{\link{algo.farrington.fitGLM}} for more information.} } } } \details{ The following steps are performed according to the Farrington et al. (1996) paper. \enumerate{ \item fit of the initial model and initial estimation of mean and overdispersion. \item calculation of the weights omega (correction for past outbreaks) \item refitting of the model \item revised estimation of overdispersion \item rescaled model \item omission of the trend, if it is not significant \item repetition of the whole procedure \item calculation of the threshold value \item computation of exceedance score } } \value{ An object of class \code{"survRes"}. } \examples{ #Read Salmonella Agona data data("salmonella.agona") #Do surveillance for the last 100 weeks. n <- length(salmonella.agona$observed) #Set control parameters. control <- list(b=4,w=3,range=(n-100):n,reweight=TRUE, verbose=FALSE,alpha=0.01) res <- algo.farrington(salmonella.agona,control=control) #Plot the result. plot(res,disease="Salmonella Agona",method="Farrington") \dontrun{ #Generate Poisson counts and convert into an "sts" object set.seed(123) x <- rpois(520,lambda=1) sts <- sts(observed=x, state=x*0, freq=52) #Compare timing of the two possible fitters for algo.farrington (here using S4) system.time( sts1 <- farrington(sts, control=list(range=312:520, fitFun="algo.farrington.fitGLM.fast"))) system.time( sts2 <- farrington(sts, control=list(range=312:520, fitFun="algo.farrington.fitGLM"))) #Check if results are the same stopifnot(upperbound(sts1) == upperbound(sts2)) } } \author{M. \enc{Hhle}{Hoehle}} \seealso{ \code{\link{algo.farrington.fitGLM}}, \code{\link{algo.farrington.threshold}} An improved Farrington algorithm is available as function \code{\link{farringtonFlexible}}. } \keyword{classif} \references{ A statistical algorithm for the early detection of outbreaks of infectious disease, Farrington, C.P., Andrews, N.J, Beale A.D. and Catchpole, M.A. (1996), J. R. Statist. Soc. A, 159, 547-563. } surveillance/man/algo.rogerson.Rd0000644000176200001440000001044613634201155016574 0ustar liggesusers\name{algo.rogerson} \alias{algo.rogerson} \title{Modified CUSUM method as proposed by Rogerson and Yamada (2004)} \description{ Modified Poisson CUSUM method that allows for a time-varying in-control parameter \eqn{\theta_{0,t}} as proposed by Rogerson and Yamada (2004). The same approach can be applied to binomial data if \code{distribution="binomial"} is specified. } \usage{ algo.rogerson(disProgObj, control = list(range = range, theta0t = NULL, ARL0 = NULL, s = NULL, hValues = NULL, distribution = c("poisson","binomial"), nt = NULL, FIR=FALSE, limit = NULL, digits = 1)) } \arguments{ \item{disProgObj}{object of class \code{disProg} that includes a matrix with the observed number of counts} \item{control}{ list with elements \describe{ \item{range}{vector of indices in the observed matrix of \code{disProgObj} to monitor} \item{theta0t}{matrix with in-control parameter, must be specified} \item{ARL0 }{ desired average run length \eqn{\gamma} } \item{s}{change to detect, see \code{\link{findH}} for further details} \item{hValues}{matrix with decision intervals \code{h} for a sequence of values \eqn{\theta_{0,t}} (in the range of \code{theta0t}) } \item{distribution}{\code{"poisson"} or \code{"binomial"} } \item{nt}{optional matrix with varying sample sizes for the binomial CUSUM} \item{FIR}{a FIR CUSUM with head start \eqn{\frac{\code{h}}{2}} is applied to the data if \code{TRUE}, otherwise no head start is used; see details } \item{limit}{numeric that determines the procedure after an alarm is given, see details} \item{digits}{the reference value and decision interval are rounded to \code{digits} decimal places. Defaults to 1 and should correspond to the number of digits used to compute \code{hValues} } } } } \details{ The CUSUM for a sequence of Poisson or binomial variates \eqn{x_t} is computed as \deqn{S_t = \max \{0, S_{t-1} + c_t (x_t- k_t)\} , \, t=1,2,\ldots ,} where \eqn{S_0=0} and \eqn{c_t=\frac{h}{h_t} }; \eqn{k_t} and \eqn{h_t} are time-varying reference values and decision intervals. An alarm is given at time \eqn{t} if \eqn{S_t \geq h}. If \code{FIR=TRUE}, the CUSUM starts with a head start value \eqn{S_0=\frac{\code{h}}{2}} at time \eqn{t=0}. After an alarm is given, the FIR CUSUM starts again at this head start value. The procedure after the CUSUM gives an alarm can be determined by \code{limit}. Suppose that the CUSUM signals at time \eqn{t}, i.e. \eqn{S_t \geq h}. For numeric values of \code{limit}, the CUSUM is bounded above after an alarm is given, % at time \eqn{t-1}, i.e. \eqn{S_{t}} is set to \eqn{ \min\{\code{limit} \cdot h,S_{t}\} }. %\deqn{S_{t} = \max \{0, S_{t-1} + c_t(x_t - k_t)\}. } Using \code{limit}=0 corresponds to resetting \eqn{S_t} to zero after an alarm as proposed in the original formulation of the CUSUM. If \code{FIR=TRUE}, \eqn{S_{t}} is reset to \eqn{ \frac{\code{h}}{2} } (i.e. \code{limit}=\eqn{\frac{\code{h}}{2} } ). If \code{limit=NULL}, no resetting occurs after an alarm is given. } \note{\code{algo.rogerson} is a univariate CUSUM method. If the data are available in several regions (i.e. \code{observed} is a matrix), multiple univariate CUSUMs are applied to each region. } \value{Returns an object of class \code{survRes} with elements \item{alarm}{indicates whether the CUSUM signaled at time \eqn{t} or not (1 = alarm, 0 = no alarm) } \item{upperbound}{CUSUM values \eqn{S_{t}} } \item{disProgObj}{\code{disProg} object } \item{control}{list with the alarm threshold \eqn{h} and the specified control object} } \examples{ # simulate data (seasonal Poisson) set.seed(123) t <- 1:300 lambda <- exp(-0.5 + 0.4 * sin(2*pi*t/52) + 0.6 * cos(2*pi*t/52)) data <- create.disProg(week = t, observed = rpois(length(lambda), lambda)) # determine a matrix with h values hVals <- hValues(theta0 = 10:150/100, ARL0=500, s = 1, distr = "poisson") # apply modified Poisson CUSUM res <- algo.rogerson(data, control=c(hVals, list(theta0t=lambda,range=1:300))) plot(res) } \references{ Rogerson, P. A. and Yamada, I. Approaches to Syndromic Surveillance When Data Consist of Small Regional Counts. Morbidity and Mortality Weekly Report, 2004, 53/Supplement, 79-85 } \seealso{\code{\link{hValues}}} \keyword{classif} surveillance/man/epidataCS_animate.Rd0000644000176200001440000001360213302745730017351 0ustar liggesusers\encoding{latin1} \name{epidataCS_animate} \alias{animate.epidataCS} \title{ Spatio-Temporal Animation of a Continuous-Time Continuous-Space Epidemic } \description{ Function for the animation of continuous-time continuous-space epidemic data, i.e. objects inheriting from class \code{"epidataCS"}. There are three types of animation, see argument \code{time.spacing}. Besides the on-screen plotting in the interactive \R session, it is possible and recommended to redirect the animation to an off-screen graphics device using the contributed \R package \pkg{animation}. For instance, the animation can be watched and navigated in a web browser via \code{\link[animation]{saveHTML}} (see Examples). } \usage{ \method{animate}{epidataCS}(object, interval = c(0,Inf), time.spacing = NULL, nmax = NULL, sleep = NULL, legend.opts = list(), timer.opts = list(), pch = 15:18, col.current = "red", col.I = "#C16E41", col.R = "#B3B3B3", col.influence = NULL, main = NULL, verbose = interactive(), ...) } \arguments{ \item{object}{ an object inheriting from class \code{"epidataCS"}. } \item{interval}{time range of the animation.} \item{time.spacing}{ time interval for the animation steps.\cr If \code{NULL} (the default), the events are plotted sequentially by producing a snapshot at every time point where an event occurred. Thus, it is just the \emph{ordering} of the events, which is shown.\cr To plot the appearance of events proportionally to the exact time line, \code{time.spacing} can be set to a numeric value indicating the period of time between consecutive snapshots. Then, for each time point in \code{seq(0, end, by = time.spacing)} the current state of the epidemic can be seen and an additional timer indicates the current time (see \code{timer.opts} below).\cr If \code{time.spacing = NA}, then the time spacing is automatically determined in such a way that \code{nmax} snapshots result. In this case, \code{nmax} must be given a finite value. } \item{nmax}{ maximum number of snapshots to generate. The default \code{NULL} means to take the value from \code{ani.options("nmax")} if the \pkg{animation} package is available, and no limitation (\code{Inf}) otherwise. } \item{sleep}{ numeric scalar specifying the artificial pause in seconds between two time points (using \code{\link{Sys.sleep}}), or \code{NULL} (default), when this is taken from \code{ani.options("interval")} if the \pkg{animation} package is available, and set to 0.1 otherwise. Note that \code{sleep} is ignored on non-interactive devices (see \code{\link{dev.interactive}}), e.g., if generating an animation inside \pkg{animation}'s \code{\link[animation]{saveHTML}}. } \item{pch, col}{ vectors of length equal to the number of event types specifying the point symbols and colors for events to plot (in this order). The vectors are recycled if necessary. } \item{legend.opts}{ either a list of arguments passed to the \code{\link{legend}} function or \code{NULL} (or \code{NA}), in which case no legend will be plotted. All necessary arguments have sensible defaults and need not be specified. } \item{timer.opts}{ either a list of arguments passed to the \code{\link{legend}} function or \code{NULL} (or \code{NA}), in which case no timer will be plotted. All necessary arguments have sensible defaults and need not be specified, i.e. \describe{ \item{\code{x}:}{\code{"bottomright"}} \item{\code{title}:}{\code{"time"}} \item{\code{box.lty}:}{\code{0}} \item{\code{adj}:}{\code{c(0.5,0.5)}} \item{\code{inset}:}{\code{0.01}} \item{\code{bg}:}{\code{"white"}} } Note that the argument \code{legend}, which is the current time of the animation, can not be modified. } \item{col.current}{color of events when occurring (new).} \item{col.I}{color once infectious.} \item{col.R}{color event has once \dQuote{recovered}. If \code{NA}, then recovered events will not be shown.} \item{col.influence}{color with which the influence region is drawn. Use \code{NULL} (default) if no influence regions should be drawn.} \item{main}{optional main title placed above the map.} \item{verbose}{logical specifying if a (textual) progress bar should be shown during snapshot generation. This is especially useful if the animation is produced within \code{\link[animation]{saveHTML}} or similar.} \item{\dots}{ further graphical parameters passed to the plot-method of the \code{\link{SpatialPolygons-class}}. } } %\value{ % invisibly returns \code{NULL}. %} \author{ Sebastian Meyer with documentation contributions by Michael H\enc{}{oe}hle } \seealso{ \code{\link{plot.epidataCS}} for plotting the numbers of events by time (aggregated over space) or the locations of the events in the observation region \code{W} (aggregated over time). The contributed \R package \pkg{animation}. } \examples{ data("imdepi") imdepiB <- subset(imdepi, type == "B") \dontrun{ # Animate the first year of type B with a step size of 7 days animate(imdepiB, interval=c(0,365), time.spacing=7, nmax=Inf, sleep=0.1) # Sequential animation of type B events during the first year animate(imdepiB, interval=c(0,365), time.spacing=NULL, sleep=0.1) # Animate the whole time range but with nmax=20 snapshots only animate(imdepiB, time.spacing=NA, nmax=20, sleep=0.1) } # Such an animation can be saved in various ways using the tools of # the animation package, e.g., saveHTML() if (require("animation")) { oldwd <- setwd(tempdir()) # to not clutter up the current working dir saveHTML(animate(imdepiB, interval = c(0,365), time.spacing = 7), nmax = Inf, interval = 0.2, loop = FALSE, title = "Animation of the first year of type B events") setwd(oldwd) } } \keyword{hplot} \keyword{dynamic} \keyword{spatial} surveillance/man/algo.farrington.assign.weights.Rd0000644000176200001440000000123613122471774022050 0ustar liggesusers\name{algo.farrington.assign.weights} \alias{algo.farrington.assign.weights} \title{Assign weights to base counts} \description{ Weights are assigned according to the Anscombe residuals } \usage{ algo.farrington.assign.weights(s, weightsThreshold=1) } \arguments{ \item{s}{Vector of standardized Anscombe residuals} \item{weightsThreshold}{A scalar indicating when observations are seen as outlier. In the original Farrington proposal the value was 1 (default value), in the improved version this value is suggested to be 2.58.} } \value{Weights according to the residuals} \seealso{\code{\link{anscombe.residuals}}} \keyword{regression} surveillance/man/addSeason2formula.Rd0000644000176200001440000000523613122471774017377 0ustar liggesusers\name{addSeason2formula} \alias{addSeason2formula} \title{ Function that adds a sine-/cosine formula to an existing formula. } \description{ This function helps to construct a \code{\link{formula}} object that can be used in a call to \code{\link{hhh4}} to model seasonal variation via a sum of sine and cosine terms. } \usage{ addSeason2formula(f = ~1, S = 1, period = 52, timevar = "t") } \arguments{ \item{f}{ formula that the seasonal terms should be added to, defaults to an intercept \code{~1}. } \item{S}{ number of sine and cosine terms. If \code{S} is a vector, unit-specific seasonal terms are created. } \item{period}{ period of the season, defaults to 52 for weekly data. } \item{timevar}{ the time variable in the model. Defaults to \code{"t"}. } } \details{ The function adds the seasonal terms \deqn{ \sum_{s=1}^\code{S} \gamma_s \sin(\frac{2\pi s}{\code{period}} t) +\delta_s \cos(\frac{2\pi s}{\code{period}} t), }{ sum_s gamma_s * sin(2*pi*s/period * t) + delta_s * cos(2*pi*s/period * t), } where \eqn{\gamma_s}{gamma_s} and \eqn{\delta_s}{delta_s} are the unknown parameters and \eqn{t}, \eqn{t = 1, 2, \ldots} denotes the time variable \code{timevar}, to an existing formula \code{f}. Note that the seasonal terms can also be expressed as \deqn{\gamma_{s} \sin(\frac{2\pi s}{\code{period}} t) + \delta_{s} \cos(\frac{2\pi s}{\code{period}} t) = A_s \sin(\frac{2\pi s}{\code{period}} t + \epsilon_s)}{% \gamma_s sin(2*pi*s/period * t) + \delta_s cos2*pi*s/period * t) = A_s sin(2*pi*s/period * t + \epsilon_s)} with amplitude \eqn{A_s=\sqrt{\gamma_s^2 +\delta_s^2}}{A_s=sqrt{\gamma_s^2 +\delta_s^2}} and phase shift \eqn{\tan(\epsilon_s) = \delta_s / \gamma_s}. The amplitude and phase shift can be obtained from a fitted \code{\link{hhh4}} model via \code{coef(..., amplitudeShift = TRUE)}, see \code{\link{coef.hhh4}}. } \value{ Returns a \code{\link{formula}} with the seasonal terms added and its environment set to \code{\link{.GlobalEnv}}. Note that to use the resulting formula in \code{\link{hhh4}}, a time variable named as specified by the argument \code{timevar} must be available. } \author{ M. Paul, with contributions by S. Meyer } \seealso{ \code{\link{hhh4}}, \code{\link{fe}}, \code{\link{ri}} } \examples{ # add 2 sine/cosine terms to a model with intercept and linear trend addSeason2formula(f = ~ 1 + t, S = 2) # the same for monthly data addSeason2formula(f = ~ 1 + t, S = 2, period = 12) # different number of seasons for a bivariate time series addSeason2formula(f = ~ 1, S = c(3, 1), period = 52) } surveillance/man/hhh4_simulate_scores.Rd0000644000176200001440000000674213671635730020150 0ustar liggesusers\name{hhh4_simulate_scores} \alias{scores.hhh4sims} \alias{scores.hhh4simslist} \title{ Proper Scoring Rules for Simulations from \code{hhh4} Models } \description{ Calculate proper scoring rules based on simulated predictive distributions. } \usage{ \method{scores}{hhh4sims}(x, which = "rps", units = NULL, ..., drop = TRUE) \method{scores}{hhh4simslist}(x, ...) } \arguments{ \item{x}{ an object of class \code{"hhh4sims"} (as resulting from the \code{\link[=simulate.hhh4]{simulate}}-method for \code{"\link{hhh4}"} models if \code{simplify = TRUE} was set), or an \code{"hhh4simslist"}, i.e., a list of such simulations potentially obtained from different model fits (using the same simulation period). } \item{which}{ a character vector indicating which proper scoring rules to compute. By default, only the ranked probability score (\code{"rps"}) is calculated. Other options include \code{"logs"} and \code{"dss"}. } \item{units}{ if non-\code{NULL}, an integer or character vector indexing the columns of \code{x} for which to compute the scores. } \item{drop}{ a logical indicating if univariate dimensions should be dropped (the default). } \item{\dots}{ unused (argument of the generic). } } \details{ This implementation can only compute \emph{univariate scores}, i.e., independently for each time point. The logarithmic score is badly estimated if the domain is large and there are not enough samples to cover the underlying distribution in enough detail (the score becomes infinite when an observed value does not occur in the samples). An alternative is to use kernel density estimation as implemented in the \R package \CRANpkg{scoringRules}. } \author{ Sebastian Meyer } \examples{ data("salmAllOnset") ## fit a hhh4 model to the first 13 years salmModel <- list(end = list(f = addSeason2formula(~1 + t)), ar = list(f = ~1), family = "NegBin1", subset = 2:678) salmFit <- hhh4(salmAllOnset, salmModel) ## simulate the next 20 weeks ahead (with very small 'nsim' for speed) salmSims <- simulate(salmFit, nsim = 500, seed = 3, subset = 678 + seq_len(20), y.start = observed(salmAllOnset)[678,]) if (requireNamespace("fanplot")) plot(salmSims, "fan") ### calculate scores at each time point ## using empirical distribution of simulated counts as forecast distribution scores(salmSims, which = c("rps", "logs", "dss")) ## observed count sometimes not covered by simulations -> infinite log-score ## => for a more detailed forecast, either considerably increase 'nsim', or: ## 1. use continuous density() of simulated counts as forecast distribution fi <- apply(salmSims, 1, function (x) approxfun(density(x))) logs_kde <- mapply(function (f, y) -log(f(y)), f = fi, y = observed(attr(salmSims,"stsObserved"))) cbind("empirical" = scores(salmSims, "logs"), "density" = logs_kde) ## a similar KDE approach is implemented in scoringRules::logs_sample() ## 2. average conditional predictive NegBin's of simulated trajectories, ## currently only implemented in HIDDA.forecasting::dhhh4sims() \dontrun{ ### produce a PIT histogram ## using empirical distribution of simulated counts as forecast distribition pit(x = observed(attr(salmSims, "stsObserved")), pdistr = apply(salmSims, 1:2, ecdf)) ## long-term forecast is badly calibrated (lower tail is unused, see fan above) ## we also get a warning for the same reason as infinite log-scores } } \keyword{univar} surveillance/man/algo.compare.Rd0000644000176200001440000000333513122471774016373 0ustar liggesusers\name{algo.compare} \alias{algo.compare} \title{Comparison of Specified Surveillance Systems using Quality Values} \description{ Comparison of specified surveillance algorithms using quality values. } \usage{ algo.compare(survResList) } \arguments{ \item{survResList}{a list of survRes objects to compare via quality values.} } \value{ Matrix with values from \code{\link{algo.quality}}, i.e. quality values for every surveillance algorithm found in \code{survResults}. } \seealso{\code{\link{algo.quality}}} \examples{ # Create a test object disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) # Let this object be tested from any methods in range = 200:400 range <- 200:400 survRes <- algo.call(disProgObj, control = list( list(funcName = "rki1", range = range), list(funcName = "rki2", range = range), list(funcName = "rki3", range = range), list(funcName = "rki", range = range, b = 3, w = 2, actY = FALSE), list(funcName = "rki", range = range, b = 2, w = 9, actY = TRUE), list(funcName = "bayes1", range = range), list(funcName = "bayes2", range = range), list(funcName = "bayes3", range = range), list(funcName = "bayes", name = "myBayes", range = range, b = 1, w = 5, actY = TRUE,alpha=0.05) )) algo.compare(survRes) } \keyword{classif} surveillance/man/sts_creation.Rd0000644000176200001440000000501613430612031016500 0ustar liggesusers\name{sts_creation} \alias{sts_creation} \title{Simulate Count Time Series with Outbreaks} \usage{ sts_creation(theta, beta, gamma1, gamma2, m, overdispersion, dates, sizesOutbreak, datesOutbreak, delayMax, alpha, densityDelay) } \arguments{ \item{theta}{baseline frequency of reports} \item{beta}{time trend} \item{gamma1}{seasonality} \item{gamma2}{seasonality} \item{m}{seasonality} \item{overdispersion}{\code{size} parameter of \code{\link{rnbinom}} for the parameterization with mean and dispersion} \item{dates}{dates of the time series} \item{sizesOutbreak}{sizes of all the outbreaks (vector)} \item{datesOutbreak}{dates of all the outbreaks (vector)} \item{delayMax}{maximal delay in time units} \item{alpha}{alpha for getting the (1-alpha) quantile of the negative binomial distribution at each timepoint} \item{densityDelay}{density distribution for the delay} } \description{ Function for simulating a time series and creating an \code{\linkS4class{sts}} object. As the counts are generated using a negative binomial distribution one also gets the (1-alpha) quantile for each timepoint (can be interpreted as an in-control upperbound for in-control values). The baseline and outbreaks are created as in Noufaily et al. (2012). } \examples{ set.seed(12345) # Time series parameters scenario4 <- c(1.6,0,0.4,0.5,2) theta <- 1.6 beta <- 0 gamma1 <-0.4 gamma2 <- 0.5 overdispersion <- 1 m <- 1 # Dates firstDate <- "2006-01-01" lengthT=350 dates <- as.Date(firstDate) + 7 * 0:(lengthT - 1) # Maximal delay in weeks D=10 # Dates and sizes of the outbreaks datesOutbreak <- as.Date(c("2008-03-30","2011-09-25")) sizesOutbreak <- c(2,5) # Delay distribution data("salmAllOnset") in2011 <- which(isoWeekYear(epoch(salmAllOnset))$ISOYear == 2011) rT2011 <- salmAllOnset@control$reportingTriangle$n[in2011,] densityDelay <- apply(rT2011,2,sum, na.rm=TRUE)/sum(rT2011, na.rm=TRUE) # alpha for the upperbound alpha <- 0.05 # Create the sts with the full time series stsSim <- sts_creation(theta=theta,beta=beta,gamma1=gamma1,gamma2=gamma2,m=m, overdispersion=overdispersion, dates=dates, sizesOutbreak=sizesOutbreak,datesOutbreak=datesOutbreak, delayMax=D,densityDelay=densityDelay, alpha=alpha) plot(stsSim) } \references{ Noufaily, A., Enki, D.G., Farrington, C.P., Garthwaite, P., Andrews, N.J., Charlett, A. (2012): An improved algorithm for outbreak detection in multiple surveillance systems. Statistics in Medicine, 32 (7), 1206-1222. } surveillance/man/twinSIR_intensityplot.Rd0000644000176200001440000001425213433343262020363 0ustar liggesusers\encoding{latin1} \name{twinSIR_intensityplot} \alias{plot.twinSIR} \alias{intensityplot.twinSIR} \alias{intensityplot.simEpidata} \title{ Plotting Paths of Infection Intensities for \code{twinSIR} Models } \description{ \code{\link{intensityplot}} methods to plot the evolution of the total infection intensity, its epidemic proportion or its endemic proportion over time. The default \code{plot} method for objects of class \code{"twinSIR"} is just a wrapper for the \code{intensityplot} method. The implementation is illustrated in Meyer et al. (2017, Section 4), see \code{vignette("twinSIR")}. } \usage{ \method{plot}{twinSIR}(x, which = c("epidemic proportion", "endemic proportion", "total intensity"), ...) \method{intensityplot}{twinSIR}(x, which = c("epidemic proportion", "endemic proportion", "total intensity"), aggregate = TRUE, theta = NULL, plot = TRUE, add = FALSE, rug.opts = list(), ...) \method{intensityplot}{simEpidata}(x, which = c("epidemic proportion", "endemic proportion", "total intensity"), aggregate = TRUE, theta = NULL, plot = TRUE, add = FALSE, rug.opts = list(), ...) } \arguments{ \item{x}{ an object of class \code{"\link{twinSIR}"} (fitted model) or \code{"\link{simEpidata}"} (simulated \code{twinSIR} epidemic), respectively. } \item{which}{ \code{"epidemic proportion"}, \code{"endemic proportion"}, or \code{"total intensity"}. Partial matching is applied. Determines whether to plot the path of the total intensity \eqn{\lambda(t)} or its epidemic or endemic proportions \eqn{\frac{e(t)}{\lambda(t)}}{e(t)/lambda(t)} or \eqn{\frac{h(t)}{\lambda(t)}}{h(t)/lambda(t)}. } \item{aggregate}{ logical. Determines whether lines for all individual infection intensities should be drawn (\code{FALSE}) or their sum only (\code{TRUE}, the default). } \item{theta}{ numeric vector of model coefficients. If \code{x} is of class \code{"twinSIR"}, then \code{theta = c(alpha, beta)}, where \code{beta} consists of the coefficients of the piecewise constant log-baseline function and the coefficients of the endemic (\code{cox}) predictor. If \code{x} is of class \code{"simEpidata"}, then \code{theta = c(alpha, 1, betarest)}, where 1 refers to the (true) log-baseline used in the simulation and \code{betarest} is the vector of the remaining coefficients of the endemic (\code{cox}) predictor. The default (\code{NULL}) means that the fitted or true parameters, respectively, will be used. } \item{plot}{ logical indicating if a plot is desired, defaults to \code{TRUE}. Otherwise, only the data of the plot will be returned. Especially with \code{aggregate = FALSE} and many individuals one might e.g. consider to plot a subset of the individual intensity paths only or do some further calculations/analysis of the infection intensities. } \item{add}{ logical. If \code{TRUE}, paths are added to the current plot, using \code{lines}. } \item{rug.opts}{ either a list of arguments passed to the function \code{\link{rug}}, or \code{NULL} (or \code{NA}), in which case no \code{rug} will be plotted. By default, the argument \code{ticksize} is set to 0.02 and \code{quiet} is set to \code{TRUE}. Note that the argument \code{x} of the \code{rug()} function, which contains the locations for the \code{rug} is fixed internally and can not be modified. The locations of the rug are the time points of infections. } \item{\dots}{ For the \code{plot.twinSIR} method, arguments passed to \code{intensityplot.twinSIR}. For the \code{intensityplot} methods, further graphical parameters passed to the function \code{\link{matplot}}, e.g. \code{lty}, \code{lwd}, \code{col}, \code{xlab}, \code{ylab} and \code{main}. Note that the \code{matplot} arguments \code{x}, \code{y}, \code{type} and \code{add} are implicit and can not be specified here. } } \value{ numeric matrix with the first column \code{"stop"} and as many rows as there are \code{"stop"} time points in the event history \code{x}. The other columns depend on the argument \code{aggregate}: if \code{TRUE}, there is only one other column named \code{which}, which contains the values of \code{which} at the respective \code{"stop"} time points. Otherwise, if \code{aggregate = FALSE}, there is one column for each individual, each of them containing the individual \code{which} at the respective \code{"stop"} time points. } \references{ Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} } \author{ Sebastian Meyer } \seealso{ \code{\link{twinSIR}} for a description of the intensity model, and \code{\link{simulate.twinSIR}} for the simulation of epidemic data according to a \code{twinSIR} specification. } \examples{ data("hagelloch") plot(hagelloch) # a simplistic twinSIR model fit <- twinSIR(~ household, data = hagelloch) # overall total intensity plot(fit, which = "total") # overall epidemic proportion epi <- plot(fit, which = "epidemic", ylim = c(0, 1)) head(epi) # add overall endemic proportion = 1 - epidemic proportion ende <- plot(fit, which = "endemic", add = TRUE, col = 2) legend("topleft", legend = "endemic proportion", lty = 1, col = 2, bty = "n") # individual intensities tmp <- plot(fit, which = "total", aggregate = FALSE, col = rgb(0, 0, 0, alpha = 0.1), main = expression("Individual infection intensities " * lambda[i](t) == Y[i](t) \%.\% (e[i](t) + h[i](t)))) # return value: matrix of individual intensity paths str(tmp) # plot intensity path only for individuals 3 and 99 matplot(x = tmp[,1], y = tmp[,1+c(3,99)], type = "S", ylab = "Force of infection", xlab = "time", main = expression("Paths of the infection intensities " * lambda[3](t) * " and " * lambda[99](t))) legend("topright", legend = paste("Individual", c(3,99)), col = 1:2, lty = 1:2) } \keyword{hplot} \keyword{aplot} \keyword{dplot} \keyword{methods} surveillance/man/hhh4_W.Rd0000644000176200001440000001477214005021174015136 0ustar liggesusers\name{hhh4_W} \alias{W_powerlaw} \alias{W_np} \title{ Power-Law and Nonparametric Neighbourhood Weights for \code{hhh4}-Models } \description{ Set up power-law or nonparametric weights for the neighbourhood component of \code{\link{hhh4}}-models as proposed by Meyer and Held (2014). Without normalization, power-law weights are \eqn{w_{ji} = o_{ji}^{-d}}{w_ji = o_ji^-d} (if \eqn{o_{ji} > 0}{o_ji > 0}, otherwise \eqn{w_{ji} = 0}{w_ji = 0}), where \eqn{o_{ji}}{o_ji} (\eqn{=o_{ij}}{=o_ij}) is the adjacency order between regions \eqn{i} and \eqn{j}, and the decay parameter \eqn{d} is to be estimated. In the nonparametric formulation, unconstrained log-weights will be estimated for each of the adjacency orders \code{2:maxlag} (the first-order weight is fixed to 1 for identifiability). Both weight functions can be modified to include a 0-distance weight, which enables \code{hhh4} models without a separate autoregressive component. } \usage{ W_powerlaw(maxlag, normalize = TRUE, log = FALSE, initial = if (log) 0 else 1, from0 = FALSE) W_np(maxlag, truncate = TRUE, normalize = TRUE, initial = log(zetaweights(2:(maxlag+from0))), from0 = FALSE, to0 = truncate) } \arguments{ \item{maxlag}{a single integer specifying a limiting order of adjacency. If spatial dependence is not to be truncated at some high order, \code{maxlag} should be set to the maximum adjacency order in the network of regions. The smallest possible value for \code{maxlag} is 2 if \code{from0=FALSE} and 1 otherwise.} \item{truncate,to0}{\code{W_np} represents order-specific log-weights up to order \code{maxlag}. Higher orders are by default (\code{truncate=TRUE}) assumed to have zero weight (similar to \code{W_powerlaw}). Alternatively, \code{truncate=FALSE} requests that the weight at order \code{maxlag} should be carried forward to higher orders. \code{truncate} has previously been called \code{to0} (deprecated).} \item{normalize}{logical indicating if the weights should be normalized such that the rows of the weight matrix sum to 1 (default). Note that normalization does not work with islands, i.e., regions without neighbours.} \item{log}{logical indicating if the decay parameter \eqn{d} should be estimated on the log-scale to ensure positivity.} \item{initial}{initial value of the parameter vector.} \item{from0}{logical indicating if these parametric weights should include the 0-distance (autoregressive) case. In the default setting (\code{from0 = FALSE}), adjacency order 0 has zero weight, which is suitable for \code{hhh4} models with a separate autoregressive component. With \code{from0 = TRUE} (Meyer and Held, 2017), the power law is based on \eqn{(o_{ji} + 1)}{(o_ji + 1)}, and nonparametric weights are estimated for adjacency orders \code{1:maxlag}, respectively, where the 0-distance weight is \eqn{w_{jj} = 1}{w_jj = 1} (without normalization). Note that the corresponding \code{hhh4} model should then exclude a separate autoregressive component (\code{control$ar$f = ~ -1}).} } \value{ a list which can be passed as a specification of parametric neighbourhood weights in the \code{control$ne$weights} argument of \code{\link{hhh4}}. } \details{ \code{hhh4} will take adjacency orders from the \code{neighbourhood} slot of the \code{"sts"} object, so these must be prepared before fitting a model with parametric neighbourhood weights. The function \code{\link{nbOrder}} can be used to derive adjacency orders from a binary adjacency matrix. } \references{ Meyer, S. and Held, L. (2014): Power-law models for infectious disease spread. \emph{The Annals of Applied Statistics}, \bold{8} (3), 1612-1639. \doi{10.1214/14-AOAS743} Meyer, S. and Held, L. (2017): Incorporating social contact data in spatio-temporal models for infectious disease spread. \emph{Biostatistics}, \bold{18} (2), 338-351. \doi{10.1093/biostatistics/kxw051} } \author{ Sebastian Meyer } \seealso{ \code{\link{nbOrder}} to determine adjacency orders from a binary adjacency matrix. \code{\link{getNEweights}} and \code{\link{coefW}} to extract the estimated neighbourhood weight matrix and coefficients from an \code{hhh4} model. } \examples{ data("measlesWeserEms") ## data contains adjaceny orders as required for parametric weights plot(measlesWeserEms, type = observed ~ unit, labels = TRUE) neighbourhood(measlesWeserEms)[1:6,1:6] max(neighbourhood(measlesWeserEms)) # max order is 5 ## fit a power-law decay of spatial interaction ## in a hhh4 model with seasonality and random intercepts in the endemic part measlesModel <- list( ar = list(f = ~ 1), ne = list(f = ~ 1, weights = W_powerlaw(maxlag=5)), end = list(f = addSeason2formula(~-1 + ri(), S=1, period=52)), family = "NegBin1") ## fit the model set.seed(1) # random intercepts are initialized randomly measlesFit <- hhh4(measlesWeserEms, measlesModel) summary(measlesFit) # "neweights.d" is the decay parameter d coefW(measlesFit) ## plot the spatio-temporal weights o_ji^-d / sum_k o_jk^-d ## as a function of adjacency order plot(measlesFit, type = "neweights", xlab = "adjacency order") ## normalization => same distance does not necessarily mean same weight. ## to extract the whole weight matrix W: getNEweights(measlesFit) ## visualize contributions of the three model components ## to the overall number of infections (aggregated over all districts) plot(measlesFit, total = TRUE) ## little contribution from neighbouring districts if (surveillance.options("allExamples")) { ## simpler model with autoregressive effects captured by the ne component measlesModel2 <- list( ne = list(f = ~ 1, weights = W_powerlaw(maxlag=5, from0=TRUE)), end = list(f = addSeason2formula(~-1 + ri(), S=1, period=52)), family = "NegBin1") measlesFit2 <- hhh4(measlesWeserEms, measlesModel2) ## omitting the separate AR component simplifies model extensions/selection ## and interpretation of covariate effects (only two predictors left) plot(measlesFit2, type = "neweights", exclude = NULL, xlab = "adjacency order") ## strong decay, again mostly within-district transmission ## (one could also try a purely autoregressive model) plot(measlesFit2, total = TRUE, legend.args = list(legend = c("epidemic", "endemic"))) ## almost the same RMSE as with separate AR and NE effects c(rmse1 = sqrt(mean(residuals(measlesFit, "response")^2)), rmse2 = sqrt(mean(residuals(measlesFit2, "response")^2))) } } \keyword{spatial} \keyword{models} \keyword{utilities} surveillance/man/create.disProg.Rd0000644000176200001440000000421613122471774016674 0ustar liggesusers\name{create.disProg} \alias{create.disProg} \alias{print.disProg} \title{Creating an object of class disProg} \description{ Creates an object of class \code{disProg} from a vector with the weeknumber (week) and matrices with the observed number of counts (observed) and the respective state chains (state), where each column represents an individual time series. The matrices neighbourhood and populationFrac provide information about neighbouring units and population proportions. } \usage{ create.disProg(week, observed, state, start=c(2001,1), freq=52, neighbourhood=NULL, populationFrac=NULL, epochAsDate=FALSE) } \arguments{ \item{week}{index in the matrix of observations, typically weeks} \item{observed}{matrix with parallel time series of counts where rows are time points and columns are the individual time series for unit/area \eqn{i, i=1,\ldots,m}} \item{state}{matrix with corresponding states} \item{start}{vector of length two denoting the year and the sample number (week, month, etc.) of the first observation} \item{freq}{sampling frequency per year, i.e. 52 for weekly data, 12 for monthly data, 13 if 52 weeks are aggregated into 4 week blocks.} \item{neighbourhood}{neighbourhood matrix \eqn{N} of dimension \eqn{m \times m} with elements \eqn{n_{ij}=1} if units \eqn{i} and \eqn{j} are adjacent and 0 otherwise } \item{populationFrac}{matrix with corresponding population proportions} \item{epochAsDate}{interpret the integers in \code{week} as Dates. Default is \code{FALSE}} } \value{object of class \code{disProg}} \author{M. Paul} \examples{ # create an univariate disProg object # read in salmonella.agona data salmonella <- read.table(system.file("extdata/salmonella.agona.txt", package = "surveillance"), header = TRUE) # look at data.frame str(salmonella) salmonellaDisProg <- create.disProg(week = 1:nrow(salmonella), observed = salmonella$observed, state = salmonella$state, start = c(1990, 1)) # look at disProg object salmonellaDisProg } \keyword{datagen} surveillance/man/algo.farrington.fitGLM.Rd0000644000176200001440000000560313122471774020237 0ustar liggesusers\name{algo.farrington.fitGLM} \alias{algo.farrington.fitGLM} \alias{algo.farrington.fitGLM.fast} \alias{algo.farrington.fitGLM.populationOffset} \title{Fit Poisson GLM of the Farrington procedure for a single time point} \description{ The function fits a Poisson regression model (GLM) with mean predictor \deqn{\log \mu_t = \alpha + \beta t}{ log mu_t = alpha + beta * t} as specified by the Farrington procedure. If requested, Anscombe residuals are computed based on an initial fit and a 2nd fit is made using weights, where base counts suspected to be caused by earlier outbreaks are downweighted. } \usage{ algo.farrington.fitGLM(response, wtime, timeTrend = TRUE, reweight = TRUE, ...) algo.farrington.fitGLM.fast(response, wtime, timeTrend = TRUE, reweight = TRUE, ...) algo.farrington.fitGLM.populationOffset(response, wtime, population, timeTrend=TRUE,reweight=TRUE, ...) } \arguments{ \item{response}{The vector of observed base counts} \item{wtime}{Vector of week numbers corresponding to \code{response}} \item{timeTrend}{Boolean whether to fit the \eqn{\beta t}{beta*t} or not} \item{reweight}{Fit twice -- 2nd time with Anscombe residuals} \item{population}{Population size. Possibly used as offset, i.e. in \code{algo.farrington.fitGLM.populationOffset} the value \code{log(population)} is used as offset in the linear predictor of the GLM: \deqn{\log \mu_t = \log(\texttt{population}) + \alpha + \beta t}{ log mu_t = log(population) alpha + beta * t} This provides a way to adjust the Farrington procedure to the case of greatly varying populations. Note: This is an experimental implementation with methodology not covered by the original paper. } \item{\dots}{Used to catch additional arguments, currently not used.} } \details{ Compute weights from an initial fit and rescale using Anscombe based residuals as described in the \code{\link{anscombe.residuals}} function. Note that \code{algo.farrington.fitGLM} uses the \code{glm} routine for fitting. A faster alternative is provided by \code{algo.farrington.fitGLM.fast} which uses the \code{glm.fit} function directly (thanks to Mikko Virtanen). This saves computational overhead and increases speed for 500 monitored time points by a factor of approximately two. However, some of the routine \code{glm} functions might not work on the output of this function. Which function is used for \code{algo.farrington} can be controlled by the \code{control$fitFun} argument. } \value{ an object of class GLM with additional fields \code{wtime}, \code{response} and \code{phi}. If the \code{glm} returns without convergence \code{NULL} is returned. } \seealso{\code{\link{anscombe.residuals}},\code{\link{algo.farrington}}} \keyword{regression} surveillance/man/algo.call.Rd0000644000176200001440000000476313122471774015666 0ustar liggesusers\name{algo.call} \alias{algo.call} \title{Query Transmission to Specified Surveillance Algorithm} \description{ Transmission of a object of class disProg to the specified surveillance algorithm. } \usage{ algo.call(disProgObj, control = list( list(funcName = "rki1", range = range), list(funcName = "rki", range = range, b = 2, w = 4, actY = TRUE), list(funcName = "rki", range = range, b = 2, w = 5, actY = TRUE))) } \arguments{ \item{disProgObj}{object of class disProg, which includes the state chain and the observed} \item{control}{specifies which surveillance algorithm should be used with their parameters. The parameter \code{funcName} and \code{range} must be specified. Here, \code{funcName} is the appropriate method function (without '\code{algo.}') and \code{range} defines the timepoints to be evaluated by the actual system. If \code{control} includes \code{name} this name is used in the survRes Object as name.} } \value{ a list of survRes objects generated by the specified surveillance algorithm } \seealso{\code{\link{algo.rki}}, \code{\link{algo.bayes}}, \code{\link{algo.farrington}}} \examples{ # Create a test object disProg <- sim.pointSource(p = 0.99, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) # Let this object be tested from any methods in range = 200:400 range <- 200:400 survRes <- algo.call(disProg, control = list( list(funcName = "rki1", range = range), list(funcName = "rki2", range = range), list(funcName = "rki3", range = range), list(funcName = "rki", range = range, b = 3, w = 2, actY = FALSE), list(funcName = "rki", range = range, b = 2, w = 9, actY = TRUE), list(funcName = "bayes1", range = range), list(funcName = "bayes2", range = range), list(funcName = "bayes3", range = range), list(funcName = "bayes", name = "myBayes", range = range, b = 1, w = 5, actY = TRUE,alpha=0.05) )) # this are some survResObjects plot(survRes[["rki(6,6,0)"]]) survRes[["bayes(5,5,1)"]] } \keyword{classif} surveillance/man/stK.Rd0000644000176200001440000001163314004512307014551 0ustar liggesusers\encoding{latin1} \name{stK} \alias{stKtest} \alias{plot.stKtest} \title{ Diggle et al (1995) K-function test for space-time clustering } \description{ The function \code{stKtest} wraps functions in package \pkg{splancs} to perform the K-function based Monte Carlo permutation test for space-time clustering (Diggle et al, 1995) for \code{"epidataCS"}. The implementation is due to Meyer et al. (2016). } \usage{ stKtest(object, eps.s = NULL, eps.t = NULL, B = 199, cores = 1, seed = NULL, poly = object$W) \method{plot}{stKtest}(x, which = c("D", "R", "MC"), args.D = list(), args.D0 = args.D, args.R = list(), args.MC = list(), mfrow = sort(n2mfrow(length(which))), ...) } \arguments{ \item{object}{an object of class \code{"epidataCS"}.} \item{eps.s, eps.t}{ numeric vectors defining the spatial and temporal grids of critical distances over which to evaluate the test. The default (\code{NULL}) uses equidistant values from 0 to the smallest \code{eps.s}/\code{eps.t} value in \code{object$events}, but not larger than half the observed spatial/temporal domain. } \item{B}{the number of permutations.} \item{cores}{ the number of parallel processes over which to distribute the requested number of permutations. } \item{seed}{ argument for \code{\link{set.seed}} to initialize the random number generator such that results become reproducible (also if \code{cores > 1}, see \code{\link{plapply}}). } \item{poly}{ the polygonal observation region of the events (as an object handled by \code{\link{xylist}}). The default \code{object$W} might not work since package \pkg{splancs} does not support multi-polygons. In this case, the \code{poly} argument can be used to specify a substitute. } \item{x}{an \code{"stKtest"}.} \item{which}{ a character vector indicating which diagnostic plots to produce. The full set is \code{c("D", "D0", "R", "MC")}. The special value \code{which = "stdiagn"} means to call the associated \pkg{splancs} function \code{\link[splancs]{stdiagn}}. } \item{args.D,args.D0,args.R,args.MC}{ argument lists for the plot functions \code{\link{persp}} (for \code{"D"} and \code{"D0"}), \code{\link{plot.default}} (\code{"R"}), and \code{\link[MASS]{truehist}} (\code{"MC"}), respectively, to modify the default settings. Ignored if \code{which = "stdiagn"}. } \item{mfrow}{ \code{\link{par}}-setting to layout the plots. Ignored for \code{which = "stdiagn"} and if set to \code{NULL}. } \item{\dots}{ignored (argument of the generic).} } \value{ an object of class \code{"stKtest"} (inheriting from \code{"htest"}), which is a list with the following components: \item{method}{a character string indicating the type of test performed.} \item{data.name}{a character string naming the supplied \code{object}.} \item{statistic}{the sum \eqn{U} of the standardized residuals \eqn{R(s,t)}.} \item{parameter}{the number \code{B} of permutations.} \item{p.value}{the p-value for the test.} \item{pts}{the coordinate matrix of the event locations (for \code{\link[splancs]{stdiagn}}.} \item{stK}{the estimated K-function as returned by \code{\link[splancs]{stkhat}}.} \item{seD}{the standard error of the estimated \eqn{D(s,t)} as returned by \code{\link[splancs]{stsecal}}.} \item{mctest}{the observed and permutation values of the test statistic as returned by \code{\link[splancs]{stmctest}}.} The \code{plot}-method invisibly returns \code{NULL}. } \references{ Diggle, P. J.; Chetwynd, A. G.; H\enc{}{ae}ggkvist, R. and Morris, S. E. (1995): Second-order analysis of space-time clustering \emph{Statistical Methods in Medical Research}, \bold{4}, 124-136. Meyer, S., Warnke, I., R\enc{}{oe}ssler, W. and Held, L. (2016): Model-based testing for space-time interaction using point processes: An application to psychiatric hospital admissions in an urban area. \emph{Spatial and Spatio-temporal Epidemiology}, \bold{17}, 15-25. \doi{10.1016/j.sste.2016.03.002}. Eprint: \url{https://arxiv.org/abs/1512.09052}. } \author{ Sebastian Meyer } \seealso{ the simple \code{\link{knox}} test and function \code{\link{epitest}} for testing \code{"\link{twinstim}"} models. } \examples{ if (requireNamespace("splancs")) { data("imdepi") imdepiB <- subset(imdepi, type == "B") mainpoly <- coordinates(imdepiB$W@polygons[[1]]@Polygons[[5]]) if (surveillance.options("allExamples")) { SGRID <- c(0, 10, 25, 50, 75, 100, 150, 200) TGRID <- c(0, 7, 14, 21, 28) B <- 99 CORES <- 2 } else { # dummy settings for fast CRAN checks SGRID <- c(0, 50) TGRID <- c(0, 30) B <- 9 CORES <- 1 } imdBstKtest <- stKtest(imdepiB, eps.s = SGRID, eps.t = TGRID, B = B, cores = CORES, seed = 1, poly = list(mainpoly)) print(imdBstKtest) plot(imdBstKtest) } } \keyword{htest} surveillance/man/twinstim_intensity.Rd0000644000176200001440000002243113276476421020012 0ustar liggesusers\name{twinstim_intensity} \alias{intensityplot.twinstim} \alias{intensity.twinstim} \alias{intensityplot.simEpidataCS} \title{ Plotting Intensities of Infection over Time or Space } \description{ \code{\link{intensityplot}} method to plot the evolution of the total infection intensity, its epidemic proportion or its endemic proportion over time or space (integrated over the other dimension) of fitted \code{\link{twinstim}} models (or \code{\link{simEpidataCS}}). The \code{"simEpidataCS"}-method is just a wrapper around \code{intensityplot.twinstim} by making the \code{"simEpidataCS"} object \code{"twinstim"}-compatible, i.e. enriching it by the required model components and environment. The \code{intensity.twinstim} auxiliary function returns functions which calculate the endemic or epidemic intensity at a specific time point or location (integrated over the other dimension). } \usage{ \method{intensityplot}{twinstim}(x, which = c("epidemic proportion", "endemic proportion", "total intensity"), aggregate = c("time", "space"), types = 1:nrow(x$qmatrix), tiles, tiles.idcol = NULL, plot = TRUE, add = FALSE, tgrid = 101, rug.opts = list(), sgrid = 128, polygons.args = list(), points.args = list(), cex.fun = sqrt, ...) \method{intensityplot}{simEpidataCS}(x, ...) intensity.twinstim(x, aggregate = c("time", "space"), types = 1:nrow(x$qmatrix), tiles, tiles.idcol = NULL) } \arguments{ \item{x}{ an object of class \code{"twinstim"} or \code{"simEpidataCS"}, respectively. } \item{which}{ \code{"epidemic proportion"}, \code{"endemic proportion"}, or \code{"total intensity"}. Partial matching is applied. Determines whether to plot the path of the total intensity or its epidemic or endemic proportions over time or space (\code{which}) aggregated over the other dimension and \code{types}. } \item{aggregate}{ One of \code{"time"} or \code{"space"}. The former results in a plot of the evolution of \code{which} as a function of time (integrated over the observation region \eqn{\bold{W}}), whereas the latter produces a \code{spplot} of \code{which} over \eqn{\bold{W}} (spanned by \code{tiles}). In both cases, \code{which} is evaluated on a grid of values, given by \code{tgrid} or \code{sgrid}, respectively. } \item{types}{ event types to aggregate. By default, all types of events are aggregated, but one could also be interested in only one specific type or a subset of event types. } \item{tiles}{ object of class \code{\linkS4class{SpatialPolygons}} representing the decomposition of \eqn{\bold{W}} into different regions (as used in the corresponding \code{stgrid} of the \code{"\link{epidataCS}"}. This is only needed for \code{aggregate = "space"}. } \item{tiles.idcol}{ either a column index for \code{tiles@data} (if \code{tiles} is a \code{\linkS4class{SpatialPolygonsDataFrame}}), or \code{NULL} (default), which refers to the \code{"ID"} slot of the polygons, i.e., \code{row.names(tiles)}. The ID's must correspond to the factor levels of \code{stgrid$tile} of the \code{"\link{epidataCS}"} on which \code{x} was fitted. } \item{plot}{ logical indicating if a plot is desired, which defaults to \code{TRUE}. Otherwise, a function will be returned, which takes a vector of time points (if \code{aggregate = "time"}) or a matrix of coordinates (if \code{aggregate = "space"}), and returns \code{which} on this grid. } \item{add}{ logical. If \code{TRUE} and \code{aggregate = "time"}, paths are added to the current plot, using \code{lines}. This does not work for \code{aggregate = "space"}. } \item{tgrid}{ either a numeric vector of time points when to evaluate \code{which}, or a scalar representing the desired number of evaluation points in the observation interval \eqn{[t_0, T]}. This argument is unused for \code{aggregate = "space"}. } \item{rug.opts}{ if a list, its elements are passed as arguments to the function \code{\link{rug}}, which will mark the time points of the events if \code{aggregate = "time"} (it is unused in the spatial case); otherwise (e.g., \code{NULL}), no \code{rug} will be produced. By default, the \code{rug} argument \code{ticksize} is set to 0.02 and \code{quiet} is set to \code{TRUE}. Note that the argument \code{x} of the \code{rug} function, which contains the locations for the \code{rug} is fixed internally and can not be modified. } \item{sgrid}{ either an object of class \code{"\linkS4class{SpatialPixels}"} (or coercible to that class) representing the locations where to evaluate \code{which}, or a scalar representing the total number of points of a grid constructed on the bounding box of \code{tiles} (using \code{\link[maptools]{Sobj_SpatialGrid}} from package \pkg{maptools}). \code{sgrid} is internally subsetted to contain only points inside \code{tiles}. This argument is unused for \code{aggregate = "time"}. } \item{polygons.args}{ if a list, its elements are passed as arguments to \code{\link{sp.polygons}}, which will add \code{tiles} to the plot if \code{aggregate = "space"} (it is unused for the temporal plot). By default, the fill \code{col}our of the tiles is set to \code{"darkgrey"}. } \item{points.args}{ if a list, its elements are passed as arguments to \code{\link{sp.points}}, which will add the event locations to the plot if \code{aggregate = "space"} (it is unused for the temporal plot). By default, the plot symbol is set to \code{pch=1}. The sizes of the points are determined as the product of the argument \code{cex} (default: 0.5) of this list and the sizes obtained from the function \code{cex.fun} which accounts for multiple events at the same location. } \item{cex.fun}{ function which takes a vector of counts of events at each unique location and returns a (vector of) \code{cex} value(s) for the sizes of the points at the event locations used in \code{points.args}. Defaults to the \code{sqrt()} function, which for the default circular \code{pch=1} means that the area of each point is proportional to the number of events at its location. } \item{\dots}{ further arguments passed to \code{plot} or \code{lines} (if \code{aggregate = "time"}), or to \code{\link{spplot}} (if \code{aggregate = "space"}).\cr For \code{intensityplot.simEpidataCS}, arguments passed to \code{intensityplot.twinstim}. } } \value{ If \code{plot = FALSE} or \code{aggregate = "time"}, a function is returned, which takes a vector of time points (if \code{aggregate = "time"}) or a matrix of coordinates (if \code{aggregate = "space"}), and returns \code{which} on this grid. \code{intensity.twinstim} returns a list containing such functions for the endemic and epidemic intensity (but these are not vectorized). If \code{plot = TRUE} and \code{aggregate = "space"}, the \code{\link[lattice]{trellis.object}} of the spatial plot is returned. } \author{ Sebastian Meyer } \seealso{ \code{\link{plot.twinstim}}, which calls \code{intensityplot.twinstim}. } \examples{ data("imdepi", "imdepifit") # for the intensityplot we need the model environment, which can be # easily added by the intelligent update method (no need to refit the model) imdepifit <- update(imdepifit, model=TRUE) ## path of the total intensity opar <- par(mfrow=c(2,1)) intensityplot(imdepifit, which="total intensity", aggregate="time", tgrid=500) plot(imdepi, "time", breaks=100) par(opar) ## time course of the epidemic proportion by event intensityplot(imdepifit, which="epidemic proportion", aggregate="time", tgrid=500, types=1) intensityplot(imdepifit, which="epidemic proportion", aggregate="time", tgrid=500, types=2, add=TRUE, col=2) legend("topright", legend=levels(imdepi$events$type), lty=1, col=1:2, title = "event type") ## endemic and total intensity in one plot intensity_endprop <- intensityplot(imdepifit, which="endemic proportion", aggregate="time", plot=FALSE) intensity_total <- intensityplot(imdepifit, which="total intensity", aggregate="time", tgrid=501, lwd=2) curve(intensity_endprop(x) * intensity_total(x), add=TRUE, col=2, lwd=2, n=501) text(2500, 0.36, labels="total", col=1, pos=2, font=2) text(2500, 0.08, labels="endemic", col=2, pos=2, font=2) ## spatial shape of the intensity (aggregated over time) if (surveillance.options("allExamples") && requireNamespace("maptools")) { ## load borders of Germany's districts load(system.file("shapes", "districtsD.RData", package="surveillance")) # total intensity (using a rather sparse 'sgrid' for speed) intensityplot(imdepifit, which="total intensity", aggregate="space", tiles=districtsD, sgrid=500) # epidemic proportion by type maps_epiprop <- lapply(1:2, function (type) { intensityplot(imdepifit, which="epidemic", aggregate="space", types=type, tiles=districtsD, sgrid=1000, at=seq(0,1,by=0.1), col.regions=rev(heat.colors(20))) }) plot(maps_epiprop[[1]], split=c(1,1,2,1), more=TRUE) plot(maps_epiprop[[2]], split=c(2,1,2,1)) } } \keyword{hplot} \keyword{aplot} \keyword{dplot} \keyword{methods} surveillance/man/scores.Rd0000644000176200001440000000673413166672062015331 0ustar liggesusers\name{scores} \alias{scores} \alias{scores.default} \alias{logs} \alias{rps} \alias{dss} \alias{ses} \title{ Proper Scoring Rules for Poisson or Negative Binomial Predictions } \description{ Proper scoring rules for Poisson or negative binomial predictions of count data are described in Czado et al. (2009). The following scores are implemented: logarithmic score (\code{logs}), ranked probability score (\code{rps}), Dawid-Sebastiani score (\code{dss}), squared error score (\code{ses}). } \usage{ scores(x, ...) \method{scores}{default}(x, mu, size = NULL, which = c("logs", "rps", "dss", "ses"), sign = FALSE, ...) logs(x, mu, size = NULL) rps(x, mu, size = NULL, k = 40, tolerance = sqrt(.Machine$double.eps)) dss(x, mu, size = NULL) ses(x, mu, size = NULL) } \arguments{ \item{x}{ the observed counts. All functions are vectorized and also accept matrices or arrays. Dimensions are preserved. } \item{mu}{ the means of the predictive distributions for the observations \code{x}. } \item{size}{ either \code{NULL} (default), indicating Poisson predictions with mean \code{mu}, or dispersion parameters of negative binomial forecasts for the observations \code{x}, parametrized as in \code{\link{dnbinom}} with variance \code{mu*(1+mu/size)}. } \item{which}{ a character vector specifying which scoring rules to apply. By default, all four proper scores are calculated. The normalized squared error score (\code{"nses"}) is also available but it is improper and hence not computed by default. } \item{sign}{ a logical indicating if the function should also return \code{sign(x-mu)}, i.e., the sign of the difference between the observed counts and corresponding predictions. } \item{\dots}{ unused (argument of the generic). } \item{k}{ scalar argument controlling the finite sum approximation for the \code{rps} with truncation at \code{ceiling(mu + k*sd)}. } \item{tolerance}{ absolute tolerance for the finite sum approximation employed in the \code{rps} calculation. A warning is produced if the approximation with \code{k} summands is insufficient for the specified \code{tolerance}. In this case, increase \code{k} for higher precision (or use a larger tolerance). } } \value{ The scoring functions return the individual scores for the predictions of the observations in \code{x} (maintaining their dimension attributes). The default \code{scores}-method applies the selected (\code{which}) scoring functions (and calculates \code{sign(x-mu)}) and returns the results in an array (via \code{\link{simplify2array}}), where the last dimension corresponds to the different scores. } \references{ Czado, C., Gneiting, T. and Held, L. (2009): Predictive model assessment for count data. \emph{Biometrics}, \bold{65} (4), 1254-1261. \doi{10.1111/j.1541-0420.2009.01191.x} } \seealso{ The R package \CRANpkg{scoringRules} implements the logarithmic score and the (continuous) ranked probability score for many distributions. } \author{ Sebastian Meyer and Michaela Paul } \examples{ mu <- c(0.1, 1, 3, 6, pi, 100) size <- 0.1 set.seed(1) y <- rnbinom(length(mu), mu = mu, size = size) scores(y, mu = mu, size = size) scores(y, mu = mu, size = 1) # ses ignores the variance scores(y, mu = 1, size = size) ## apply a specific scoring rule scores(y, mu = mu, size = size, which = "rps") rps(y, mu = mu, size = size) } \keyword{univar} surveillance/man/surveillance-package.Rd0000644000176200001440000001334014027037311020074 0ustar liggesusers%\RdOpts{stage=build} % Not yet: in R 2.12.0 - 4.0.5, \RdOpts{} had no effect (PR#18073) \encoding{latin1} \name{surveillance-package} \alias{surveillance-package} \alias{surveillance} \docType{package} \title{\pkg{surveillance}: \packageTitle{surveillance}} \description{ The \R package \pkg{surveillance} implements statistical methods for the retrospective modeling and prospective monitoring of epidemic phenomena in temporal and spatio-temporal contexts. Focus is on (routinely collected) public health surveillance data, but the methods just as well apply to data from environmetrics, econometrics or the social sciences. As many of the monitoring methods rely on statistical process control methodology, the package is also relevant to quality control and reliability engineering. } \details{ The package implements many typical outbreak detection procedures such as Stroup et al. (1989), Farrington et al. (1996), Rossi et al. (1999), Rogerson and Yamada (2001), a Bayesian approach (H\enc{}{oe}hle, 2007), negative binomial CUSUM methods (H\enc{}{oe}hle and Mazick, 2009), and a detector based on generalized likelihood ratios (H\enc{}{oe}hle and Paul, 2008), see \code{\link{wrap.algo}}. Also CUSUMs for the prospective change-point detection in binomial, beta-binomial and multinomial time series are covered based on generalized linear modeling, see \code{\link{categoricalCUSUM}}. This includes, e.g., paired comparison Bradley-Terry modeling described in H\enc{}{oe}hle (2010), or paired binary CUSUM (\code{\link{pairedbinCUSUM}}) described by Steiner et al. (1999). The package contains several real-world datasets, the ability to simulate outbreak data, visualize the results of the monitoring in temporal, spatial or spatio-temporal fashion. In dealing with time series data, the fundamental data structure of the package is the S4 class \code{\link{sts}} wrapping observations, monitoring results and date handling for multivariate time series. A recent overview of the available monitoring procedures is given by Salmon et al. (2016). For the retrospective analysis of epidemic spread, the package provides three endemic-epidemic modeling frameworks with tools for visualization, likelihood inference, and simulation. The function \code{\link{hhh4}} offers inference methods for the (multivariate) count time series models of Held et al. (2005), Paul et al. (2008), Paul and Held (2011), Held and Paul (2012), and Meyer and Held (2014). See \code{vignette("hhh4")} for a general introduction and \code{vignette("hhh4_spacetime")} for a discussion and illustration of spatial \code{hhh4} models. Furthermore, the fully Bayesian approach for univariate time series of counts from Held et al. (2006) is implemented as function \code{\link{algo.twins}}. Self-exciting point processes are modeled through endemic-epidemic conditional intensity functions. \code{\link{twinSIR}} (H\enc{}{oe}hle, 2009) models the susceptible-infectious-recovered (SIR) event history of a fixed population, e.g, epidemics across farms or networks; see \code{vignette("twinSIR")} for an illustration. \code{\link{twinstim}} (Meyer et al., 2012) fits spatio-temporal point process models to point patterns of infective events, e.g., time-stamped geo-referenced surveillance data on infectious disease occurrence; see \code{vignette("twinstim")} for an illustration. A recent overview of the implemented space-time modeling frameworks for epidemic phenomena is given by Meyer et al. (2017). } %% Author information is dynamically extracted from the DESCRIPTION file \author{ \Sexpr[stage=build]{ pkgdir <- tools:::Rd_macros_package_dir() # support R CMD Rd2pdf pkg desc <- tools:::.read_description(file.path(pkgdir, "DESCRIPTION")) aar <- unname(eval(parse(text=desc["Authors@R"]))) authors <- aar[grep("aut", aar$role)] paste0(format(authors, include = c("given", "family")), collapse = ", ") } Maintainer: \packageMaintainer{surveillance} } %% Dynamically extract contributors from the DESCRIPTION file %% and persons from inst/THANKS for acknowledgement: \section{Acknowledgements}{ Substantial contributions of code by: \Sexpr[stage=build]{ contributors <- aar[grepl("ctb", aar$role) & !sapply(aar$family, is.null)] paste0(format(contributors, include = c("given", "family")), collapse = ", ") }. Furthermore, the authors would like to thank the following people for ideas, discussions, testing and feedback: \Sexpr[stage=build]{ find_inst_file <- function (file) { # support R CMD Rd2pdf in source package if(dir.exists(file.path(pkgdir, "inst"))) file.path(pkgdir, "inst", file) else file.path(pkgdir, file) } thanks <- readLines(find_inst_file("THANKS"), encoding="latin1") paste0(grep("^(#|[[:blank:]]*$)", thanks, invert=TRUE, value=TRUE), collapse = ", ") }. } \references{ \code{citation(package="surveillance")} gives the two main software references for the modeling (Meyer et al., 2017) and the monitoring (Salmon et al., 2016) functionalities: \Sexpr[stage=build,results=rd]{ paste0("\\\itemize{\n", paste0("\\\item ", tools::toRd( readCitationFile(find_inst_file("CITATION"), list(Encoding="latin1")) ), collapse = "\n\n"), "\n}") } Further references are listed in \code{surveillance:::REFERENCES}. If you use the \pkg{surveillance} package in your own work, please do cite the corresponding publications. } \seealso{ \url{https://surveillance.R-forge.R-project.org/} } \keyword{ package } \examples{ ## Additional documentation and illustrations of the methods are ## available in the form of package vignettes and demo scripts: vignette(package = "surveillance") demo(package = "surveillance") } surveillance/man/twinstim_siaf_simulatePC.Rd0000644000176200001440000000316513612576410021030 0ustar liggesusers\name{siaf.simulatePC} \alias{siaf.simulatePC} \title{ Simulation from an Isotropic Spatial Kernel via Polar Coordinates } \description{ To sample points from isotropic spatial kernels \eqn{f_2(s) = f(||s||)} such as \code{\link{siaf.powerlaw}} on a bounded domain (i.e., \eqn{||s|| < \code{ub}}), it is convenient to switch to polar coordinates \eqn{(r,\theta)}, which have a density proportional to \eqn{r f_2((r \cos(\theta), r \sin(\theta))) = r f(r)} (independent of the angle \eqn{\theta} due to isotropy). The angle is thus simply drawn uniformly in \eqn{[0,2\pi)}, and \eqn{r} can be sampled by the inversion method, where numeric root finding is used for the quantiles (since the quantile function is not available in closed form). } \usage{ siaf.simulatePC(intrfr) } \arguments{ \item{intrfr}{ a function computing the integral of \eqn{r f(r)} from 0 to \code{R} (first argument, not necessarily named \code{R}). Parameters of the function are passed as its second argument and a third argument is the event type. } } \value{ a function with arguments \code{(n, siafpars, type, ub)}, which samples \code{n} points from the spatial kernel \eqn{f_2(s)} within the disc of radius \code{ub}, where \code{siafpars} and \code{type} are passed as second and third argument to \code{intrfr}. The environment of the returned function will be the caller's environment. } \author{ Sebastian Meyer } \examples{ simfun <- siaf.powerlaw()$simulate ## is internally generated as siaf.simulatePC(intrfr.powerlaw) set.seed(1) simfun(n=10, siafpars=log(c(sigma=1, d=2)), ub=5) } \keyword{internal} surveillance/man/algo.farrington.threshold.Rd0000644000176200001440000000225513122471774021111 0ustar liggesusers\name{algo.farrington.threshold} \alias{algo.farrington.threshold} \title{Compute prediction interval for a new observation} \description{ Depending on the current transformation \eqn{h(y)= \{y, \sqrt{y}, y^{2/3}\}}, \deqn{V(h(y_0)-h(\mu_0))=V(h(y_0))+V(h(\mu_0))} is used to compute a prediction interval. The prediction variance consists of a component due to the variance of having a single observation and a prediction variance. } \usage{ algo.farrington.threshold(pred,phi,alpha=0.01,skewness.transform="none",y) } \arguments{ \item{pred}{A GLM prediction object} \item{phi}{Current overdispersion parameter (superflous?)} \item{alpha}{Quantile level in Gaussian based CI, i.e. an \eqn{(1-\alpha)\cdot 100\%} confidence interval is computed. } \item{skewness.transform}{Skewness correction, i.e. one of \code{"none"}, \code{"1/2"}, or \code{"2/3"}.} \item{y}{Observed number} } \value{ Vector of length four with lower and upper bounds of an \eqn{(1-\alpha)\cdot 100\%} confidence interval (first two arguments) and corresponding quantile of observation \code{y} together with the median of the predictive distribution. } \keyword{regression} surveillance/man/aggregate.disProg.Rd0000644000176200001440000000117113122471774017354 0ustar liggesusers\name{aggregate.disProg} \alias{aggregate.disProg} \title{Aggregate the observed counts} \description{ Aggregates the observed counts for a multivariate \code{disProgObj} over the units. Future versions of \code{surveillance} will also allow for time aggregations etc. } \usage{ \method{aggregate}{disProg}(x,\dots) } \arguments{ \item{x}{Object of class \code{disProg}} \item{\dots}{not used at the moment} } \value{\item{x}{univariate \code{disProg} object with aggregated counts and respective states for each time point.} } \keyword{hplot} \examples{ data(ha) plot(aggregate(ha)) } surveillance/man/epidataCS.Rd0000644000176200001440000004301314006017101015634 0ustar liggesusers\encoding{latin1} \name{epidataCS} \alias{epidataCS} \alias{as.epidataCS} \alias{print.epidataCS} \alias{nobs.epidataCS} \alias{head.epidataCS} \alias{tail.epidataCS} \alias{[.epidataCS} \alias{subset.epidataCS} \alias{marks.epidataCS} \alias{summary.epidataCS} \alias{print.summary.epidataCS} \alias{as.stepfun.epidataCS} \alias{getSourceDists} \alias{coerce,epidataCS,SpatialPointsDataFrame-method} \title{ Continuous Space-Time Marked Point Patterns with Grid-Based Covariates } \description{ Data structure for \strong{c}ontinuous \strong{s}patio-temporal event data, e.g. individual case reports of an infectious disease. Apart from the actual \code{events}, the class simultaneously holds a spatio-temporal grid of endemic covariates (similar to disease mapping) and a representation of the observation region. The \code{"epidataCS"} class is the basis for fitting spatio-temporal endemic-epidemic intensity models with the function \code{\link{twinstim}} (Meyer et al., 2012). The implementation is described in Meyer et al. (2017, Section 3), see \code{vignette("twinstim")}. } \usage{ as.epidataCS(events, stgrid, W, qmatrix = diag(nTypes), nCircle2Poly = 32L, T = NULL, clipper = c("polyclip", "rgeos"), verbose = interactive()) \method{print}{epidataCS}(x, n = 6L, digits = getOption("digits"), ...) \method{nobs}{epidataCS}(object, ...) \method{head}{epidataCS}(x, n = 6L, ...) \method{tail}{epidataCS}(x, n = 6L, ...) \method{[}{epidataCS}(x, i, j, ..., drop = TRUE) \method{subset}{epidataCS}(x, subset, select, drop = TRUE, ...) \method{marks}{epidataCS}(x, coords = TRUE, ...) \method{summary}{epidataCS}(object, ...) \method{print}{summary.epidataCS}(x, ...) \method{as.stepfun}{epidataCS}(x, ...) getSourceDists(object, dimension = c("space", "time")) } \arguments{ \item{events}{ a \code{"\linkS4class{SpatialPointsDataFrame}"} of cases with the following obligatory columns (in the \code{events@data} \code{data.frame}): \describe{ \item{time}{time point of event. Will be converted to a numeric variable by \code{as.numeric}. There should be no concurrent events (but see \code{\link{untie}} for an ex post adjustment) and there cannot be events beyond \code{stgrid} (i.e., \code{time<=T} is required). Events at or before time \eqn{t_0} = \code{min(stgrid$start)} are allowed and form the prehistory of the process.} \item{tile}{the spatial region (tile) where the event is located. This links to the tiles of \code{stgrid}.} \item{type}{optional type of event in a marked \code{twinstim} model. Will be converted to a factor variable dropping unused levels. If missing, all events will be attribute the single type \code{"1"}.} \item{eps.t}{maximum \emph{temporal} influence radius (e.g. length of infectious period, time to culling, etc.); must be positive and may be \code{Inf}.} \item{eps.s}{maximum \emph{spatial} influence radius (e.g. 100 [km]); must be positive and may be \code{Inf}. A compact influence region mainly has computational advantages, but might also be plausible for specific applications.} } The \code{data.frame} may contain columns with further marks of the events, e.g. sex, age of infected individuals, which may be used as epidemic covariates influencing infectiousness. Note that some auxiliary columns will be added at conversion whose names are reserved: \code{".obsInfLength"}, \code{".bdist"}, \code{".influenceRegion"}, and \code{".sources"}, as well as \code{"start"}, \code{"BLOCK"}, and all endemic covariates' names from \code{stgrid}. } \item{stgrid}{ a \code{\link{data.frame}} describing endemic covariates on a full spatio-temporal region x interval grid (e.g., district x week), which is a decomposition of the observation region \code{W} and period \eqn{t_0,T}. This means that for every combination of spatial region and time interval there must be exactly one row in this \code{data.frame}, that the union of the spatial tiles equals \code{W}, the union of the time intervals equals \eqn{t_0,T}, and that regions (and intervals) are non-overlapping. There are the following obligatory columns: \describe{ \item{tile}{ID of the spatial region (e.g., district ID). It will be converted to a factor variable (dropping unused levels if it already was one).} \item{start, stop}{columns describing the consecutive temporal intervals (converted to numeric variables by \code{as.numeric}). The \code{start} time of an interval must be equal to the \code{stop} time of the previous interval. The \code{stop} column may be missing, in which case it will be auto-generated from the set of \code{start} values and \code{T}.} \item{area}{area of the spatial region (\code{tile}). Be aware that the unit of this area (e.g., square km) must be consistent with the units of \code{W} and \code{events} (as specified in their \code{\link{proj4string}}s).} } The remaining columns are endemic covariates. Note that the column name \code{"BLOCK"} is reserved (a column which will be added automatically for indexing the time intervals of \code{stgrid}). } \item{W}{ an object of class \code{"\linkS4class{SpatialPolygons}"} representing the observation region. It must have the same \code{proj4string} as \code{events} and all events must be within \code{W}. Prior simplification of \code{W} may considerably reduce the computational burden of likelihood evaluations in \code{\link{twinstim}} models with non-trivial spatial interaction functions (see the \dQuote{Note} section below). } \item{qmatrix}{ a square indicator matrix (0/1 or \code{FALSE}/\code{TRUE}) for possible transmission between the event types. The matrix will be internally converted to \code{logical}. Defaults to an independent spread of the event types, i.e. the identity matrix. } \item{nCircle2Poly}{ accuracy (number of edges) of the polygonal approximation of a circle, see \code{\link{discpoly}}. } \item{T}{ end of observation period (i.e. last \code{stop} time of \code{stgrid}). Must be specified if the start but not the stop times are supplied in \code{stgrid} (=> auto-generation of \code{stop} times). } \item{clipper}{polygon clipping engine to use for calculating the \code{.influenceRegion}s of events (see the Value section below). Default is the \CRANpkg{polyclip} package (called via \code{\link{intersect.owin}} from package \CRANpkg{spatstat.geom}). In \pkg{surveillance} <= 1.6-0, package \pkg{gpclib} was used, which has a restrictive license. This is no longer supported.} \item{verbose}{logical indicating if status messages should be printed during input checking and \code{"epidataCS"} generation. The default is to do so in interactive \R sessions.} \item{x}{an object of class \code{"epidataCS"} or \code{"summary.epidataCS"}, respectively.} \item{n}{a single integer. If positive, the first (\code{head}, \code{print}) / last (\code{tail}) \code{n} events are extracted. If negative, all but the \code{n} first/last events are extracted.} \item{digits}{minimum number of significant digits to be printed in values.} \item{i,j,drop}{ arguments passed to the \code{\link[=[,SpatialPointsDataFrame-method]{[-method}} for \code{SpatialPointDataFrame}s for subsetting the \code{events} while retaining \code{stgrid} and \code{W}.\cr If \code{drop=TRUE} (the default), event types that completely disappear due to \code{i}-subsetting will be dropped, which reduces \code{qmatrix} and the factor levels of the \code{type} column.\cr By the \code{j} index, epidemic covariates can be removed from \code{events}.} \item{\dots}{unused (arguments of the generics) with a few exceptions: The \code{print} method for \code{"epidataCS"} passes \code{\dots} to the \code{\link{print.data.frame}} method, and the \code{print} method for \code{"summary.epidataCS"} passes additional arguments to \code{\link{print.table}}.} \item{subset, select}{arguments used to subset the \code{events} from an \code{"epidataCS"} object like in \code{\link{subset.data.frame}}.} \item{coords}{logical indicating if the data frame of event marks returned by \code{marks(x)} should have the event coordinates appended as last columns. This defaults to \code{TRUE}.} \item{object}{an object of class \code{"epidataCS"}.} \item{dimension}{the distances of all events to their potential source events can be computed in either the \code{"space"} or \code{"time"} dimension.} } \details{ The function \code{as.epidataCS} is used to generate objects of class \code{"epidataCS"}, which is the data structure required for \code{\link{twinstim}} models. The \code{[}-method for class \code{"epidataCS"} ensures that the subsetted object will be valid, for instance, it updates the auxiliary list of potential transmission paths stored in the object. The \code{[}-method is used in \code{subset.epidataCS}, which is implemented similar to \code{\link{subset.data.frame}}. The \code{print} method for \code{"epidataCS"} prints some metadata of the epidemic, e.g., the observation period, the dimensions of the spatio-temporal grid, the types of events, and the total number of events. By default, it also prints the first \code{n = 6} rows of the \code{events}. } \value{ An object of class \code{"epidataCS"} is a list containing the following components: \item{events}{a \code{"\linkS4class{SpatialPointsDataFrame}"} (see the description of the argument). The input \code{events} are checked for requirements and sorted chronologically. The columns are in the following order: obligatory event columns, event marks, the columns \code{BLOCK}, \code{start} and endemic covariates copied from \code{stgrid}, and finally, hidden auxiliary columns. The added auxiliary columns are: \describe{ \item{\code{.obsInfLength}}{observed length of the infectious period (possibly truncated at \code{T}), i.e., \code{pmin(T-time, eps.t)}.} \item{\code{.sources}}{a list of numeric vectors of potential sources of infection (wrt the interaction ranges eps.s and eps.t) for each event. Row numbers are used as index.} \item{\code{.bdist}}{minimal distance of the event locations to the polygonal boundary \code{W}.} \item{\code{.influenceRegion}}{a list of influence regions represented by objects of the \pkg{spatstat.geom} class \code{"owin"}. For each event, this is the intersection of \code{W} with a (polygonal) circle of radius \code{eps.s} centered at the event's location, shifted such that the event location becomes the origin. The list has \code{nCircle2Poly} set as an attribute.} } } \item{stgrid}{a \code{data.frame} (see description of the argument). The spatio-temporal grid of endemic covariates is sorted by time interval (indexed by the added variable \code{BLOCK}) and region (\code{tile}). It is a full \code{BLOCK} x \code{tile} grid.} \item{W}{a \code{"\linkS4class{SpatialPolygons}"} object representing the observation region.} \item{qmatrix}{see the above description of the argument. The \code{\link{storage.mode}} of the indicator matrix is set to logical and the \code{dimnames} are set to the levels of the event types.} The \code{nobs}-method returns the number of events. The \code{head} and \code{tail} methods subset the epidemic data using the extraction method (\code{[}), i.e. they return an object of class \code{"epidataCS"}, which only contains (all but) the first/last \code{n} events. For the \code{"epidataCS"} class, the method of the generic function \code{\link[spatstat.geom]{marks}} defined by the \pkg{spatstat.geom} package returns a \code{data.frame} of the event marks (actually also including time and location of the events), disregarding endemic covariates and the auxiliary columns from the \code{events} component of the \code{"epidataCS"} object. The \code{summary} method (which has again a \code{print} method) returns a list of metadata, event data, the tables of tiles and types, a step function of the number of infectious individuals over time (\code{$counter}), i.e., the result of the \code{\link{as.stepfun}}-method for \code{"epidataCS"}, and the number of potential sources of transmission for each event (\code{$nSources}) which is based on the given maximum interaction ranges \code{eps.t} and \code{eps.s}. } \note{ Since the observation region \code{W} defines the integration domain in the point process likelihood, the more detailed the polygons of \code{W} are the longer it will take to fit a \code{\link{twinstim}}. You are advised to sacrifice some shape details for speed by reducing the polygon complexity, for example via the \command{mapshaper} JavaScript library wrapped by the R package \CRANpkg{rmapshaper}. Alternative tools are provided by the packages \CRANpkg{maptools} (\code{\link[maptools]{thinnedSpatialPoly}}) and \CRANpkg{spatstat.geom} (\code{\link[spatstat.geom]{simplify.owin}}). } \references{ Meyer, S., Elias, J. and H\enc{}{oe}hle, M. (2012): A space-time conditional intensity model for invasive meningococcal disease occurrence. \emph{Biometrics}, \bold{68}, 607-616. \doi{10.1111/j.1541-0420.2011.01684.x} Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} } \author{ Sebastian Meyer Contributions to this documentation by Michael H\enc{}{oe}hle and Mayeul Kauffmann. } \seealso{ \code{vignette("twinstim")}. \code{\link{plot.epidataCS}} for plotting, and \code{\link{animate.epidataCS}} for the animation of such an epidemic. There is also an \code{\link[=update.epidataCS]{update}} method for the \code{"epidataCS"} class. To re-extract the \code{events} point pattern from \code{"epidataCS"}, use \code{as(object, "SpatialPointsDataFrame")}. It is possible to convert an \code{"epidataCS"} point pattern to an \code{"\link{epidata}"} object (\code{\link{as.epidata.epidataCS}}), or to aggregate the events into an \code{"\linkS4class{sts}"} object (\code{\link{epidataCS2sts}}). } \examples{ ## load "imdepi" example data (which is an object of class "epidataCS") data("imdepi") ## print and summary print(imdepi, n=5, digits=2) print(s <- summary(imdepi)) plot(s$counter, # same as 'as.stepfun(imdepi)' xlab = "Time [days]", ylab="Number of infectious individuals", main=paste("Time course of the number of infectious individuals", "assuming an infectious period of 30 days", sep="\n")) plot(table(s$nSources), xlab="Number of \"close\" infective individuals", ylab="Number of events", main=paste("Distribution of the number of potential sources", "assuming an interaction range of 200 km and 30 days", sep="\n")) ## the summary object contains further information str(s) ## a histogram of the spatial distances to potential source events ## (i.e., to events of the previous eps.t=30 days within eps.s=200 km) sourceDists_space <- getSourceDists(imdepi, "space") hist(sourceDists_space); rug(sourceDists_space) ## internal structure of an "epidataCS"-object str(imdepi, max.level=4) ## see help("imdepi") for more info on the data set ## extraction methods subset the 'events' component ## (thereby taking care of the validity of the epidataCS object, ## for instance the hidden auxiliary column .sources) imdepi[101:200,] tail(imdepi, n=4) # reduce the epidemic to the last 4 events subset(imdepi, type=="B") # only consider event type B ## see help("plot.epidataCS") for convenient plot-methods for "epidataCS" ### ### reconstruct the "imdepi" object ### ## observation region load(system.file("shapes", "districtsD.RData", package="surveillance"), verbose = TRUE) summary(stateD) ## extract point pattern of events from the "imdepi" data data(imdepi) events <- marks(imdepi) # data frame with coordinate columns coordinates(events) <- c("x", "y") # promote to a "SpatialPointsDataFrame" #proj4string(events) <- proj4string(stateD) events@proj4string <- stateD@proj4string # exact copy (avoid CRS reformatting) ## or, much simpler, use the corresponding coerce-method \dontshow{ events@coords.nrs <- numeric(0L) stopifnot(all.equal(as(imdepi, "SpatialPointsDataFrame"), events)) } events <- as(imdepi, "SpatialPointsDataFrame") summary(events) ## plot observation region with events plot(stateD, axes=TRUE); title(xlab="x [km]", ylab="y [km]") points(events, pch=unclass(events$type), cex=0.5, col=unclass(events$type)) legend("topright", legend=levels(events$type), title="Type", pch=1:2, col=1:2) ## space-time grid with endemic covariates head(stgrid <- imdepi$stgrid[,-1]) ## reconstruct the "imdepi" object from its components myimdepi <- as.epidataCS(events = events, stgrid = stgrid, W = stateD, qmatrix = diag(2), nCircle2Poly = 16) \dontrun{ ## This reconstructed object is equal to 'imdepi' as long as the internal ## structures of the embedded classes ("owin", "SpatialPolygons", ...), and ## the calculation of the influence regions by "polyclip" have not changed: stopifnot(all.equal(imdepi, myimdepi)) } } \keyword{spatial} \keyword{classes} \keyword{manip} surveillance/man/twinstim_siaf.Rd0000644000176200001440000001253612665561746016721 0ustar liggesusers\name{twinstim_siaf} \alias{siaf} \title{ Spatial Interaction Function Objects } \description{ A spatial interaction function for use in \code{\link{twinstim}} can be constructed via the \code{siaf} function. It checks the supplied function elements, assigns defaults for missing arguments, and returns all checked arguments in a list. However, for standard applications it is much easier to use one of the pre-defined spatial interaction functions, e.g., \code{\link{siaf.gaussian}}. } \usage{ siaf(f, F, Fcircle, effRange, deriv, Deriv, simulate, npars, validpars = NULL) } \arguments{ \item{f}{the spatial interaction function. It must accept two arguments, the first one being a (2-column) coordinate matrix, the second one a parameter vector. For marked \code{twinstim}, it must accept the type of the event (integer code) as its third argument (either a single type for all locations or separate types for each location).} \item{F}{function computing the integral of \eqn{f(s)} (passed as second argument) over a polygonal \code{"owin"} domain (first argument). The third and fourth argument are the parameter vector and the (\emph{single}) type, respectively. There may be additional arguments, which can then be specified in the \code{control.siaf$F} argument list of \code{twinstim}. If the \code{F} function is missing, a general default (\code{\link[polyCub]{polyCub}}) will be used, with extra arguments \code{method} (default: \code{"SV"}) and corresponding accuracy parameters.} \item{Fcircle}{optional function for fast calculation of the (two-dimensional) integral of \eqn{f(s)} over a circle with radius \code{r} (first argument). Further arguments are as for \code{f}. It must not be vectorized (will always be called with single radius and a single type). If this function is specified, integration of the \code{siaf} over the spatial influence region of an event will be faster if the region is actually circular. This is the case if the event is located at least a distance \code{eps.s} from the border of the observation region \code{W}, or if the distance to the border is larger than the effective integration range (if specified, see \code{effRange} below).} \item{effRange}{optional function returning the \dQuote{effective} range of \eqn{f(s)} for the given set of parameters (the first and only argument) such that the circle with radius \code{effRange} contains the numerically essential proportion of the integral mass. For the Gaussian kernel the default is \code{function (logsd) 6*exp(logsd)}. The return value must be a vector of length \code{nTypes} (effective range for each type). This function is only used if \code{Fcircle} is also specified.} \item{deriv}{optional derivative of \eqn{f(s)} \emph{with respect to the parameters}. It takes the same arguments as \code{f} but returns a matrix with as many rows as there were coordinates in the input and \code{npars} columns. This derivative is necessary for the calculation of the score function in \code{twinstim()}, which is advantageous for the numerical log-likelihood maximization.} \item{Deriv}{function computing the integral of \code{deriv} (passed as second argument) over a polygonal \code{"owin"} domain (first argument). The return value is thus a vector of length \code{npars}. The third argument is the parameter vector and the fourth argument is a (\emph{single}) type and must be named \code{type}. There may be additional arguments, which can then be specified in the \code{control.siaf$Deriv} argument list of \code{twinstim}. If the \code{Deriv} function is missing, a general default (\code{\link[polyCub]{polyCub}}) will be used, with extra arguments \code{method} (default: \code{"SV"}) and corresponding accuracy parameters.} \item{simulate}{optional function returning a sample drawn from the spatial kernel (only required for the simulation of \code{twinstim} models). Its first argument is the size of the sample to generate, next the parameter vector, an optional single event type, and an optional upper bound for the radius within which to simulate points. The function must return a two-column \emph{matrix} of the sampled locations. Note that the simulation method actually samples only one location at a time, thus it is sufficient to have a working \code{function(n=1, pars, type, ub)}. } \item{npars}{the number of parameters of the spatial interaction function \code{f} (i.e. the length of its second argument).} \item{validpars}{ optional function taking one argument, the parameter vector, indicating if it is valid. This approach to specify parameter constraints is rarely needed, because usual box-constrained parameters can be taken into account by using L-BFGS-B as the optimization method in \code{twinstim} (with arguments \code{lower} and \code{upper}), and positivity constraints by using log-parametrizations. This component is not necessary (and ignored) if \code{npars == 0}. } } \value{ list of checked arguments. } \author{ Sebastian Meyer } \seealso{ \code{\link{siaf.gaussian}} for a pre-defined spatial interaction function, and \code{\link{tiaf}} for the temporal interaction function. } \keyword{utilities} surveillance/man/fluBYBW.Rd0000644000176200001440000000447313174706302015275 0ustar liggesusers\name{fluBYBW} \alias{fluBYBW} \docType{data} \title{Influenza in Southern Germany} \description{ Weekly number of influenza A & B cases in the 140 districts of the two Southern German states Bavaria and Baden-Wuerttemberg, for the years 2001 to 2008. These surveillance data have been analyzed originally by Paul and Held (2011) and more recently by Meyer and Held (2014). } \usage{data(fluBYBW)} \format{ An \code{sts} object containing \eqn{416\times 140}{416 x 140} observations starting from week 1 in 2001. The \code{population} slot contains the population fractions of each district at 31.12.2001, obtained from the Federal Statistical Office of Germany. The \code{map} slot contains an object of class \code{"\linkS4class{SpatialPolygonsDataFrame}"}. } \source{ Robert Koch-Institut: SurvStat: \url{https://survstat.rki.de/}; Queried on 6 March 2009. } \note{ Prior to \pkg{surveillance} version 1.6-0, \code{data(fluBYBW)} contained a redundant last row (417) filled with zeroes only. } \examples{ data("fluBYBW") # Count time series plot plot(fluBYBW, type = observed ~ time) # Map of disease incidence (per 100000 inhabitants) for the year 2001 plot(fluBYBW, type = observed ~ unit, tps = 1:52, total.args = list(), population = fluBYBW@map$X31_12_01 / 100000) # the overall rate for 2001 shown in the bottom right corner is sum(observed(fluBYBW[1:52,])) / sum(fluBYBW@map$X31_12_01) * 100000 \dontrun{ # Generating an animation takes a while. # Here we take the first 20 weeks of 2001 (runtime: ~3 minutes). # The full animation is available in Supplement A of Meyer and Held (2014) if (require("animation")) { oldwd <- setwd(tempdir()) # to not clutter up the current working dir saveHTML(animate(fluBYBW, tps = 1:20), title="Evolution of influenza in Bayern and Baden-Wuerttemberg", ani.width=500, ani.height=600) setwd(oldwd) } } } \references{ Paul, M. and Held, L. (2011) Predictive assessment of a non-linear random effects model for multivariate time series of infectious disease counts. Statistics in Medicine, \bold{30}, 1118-1136. Meyer, S. and Held, L. (2014): Power-law models for infectious disease spread. \emph{The Annals of Applied Statistics}, \bold{8} (3), 1612-1639. \doi{10.1214/14-AOAS743} } \keyword{datasets} surveillance/man/polyAtBorder.Rd0000644000176200001440000000320712437341450016423 0ustar liggesusers\name{polyAtBorder} \alias{polyAtBorder} \title{Indicate Polygons at the Border} \description{ Determines which polygons of a \code{"\linkS4class{SpatialPolygons}"} object are at the border, i.e. have coordinates in common with the spatial union of all polygons (constructed using \code{\link{unionSpatialPolygons}}). } \usage{ polyAtBorder(SpP, snap = sqrt(.Machine$double.eps), method = "rgeos", ...) } \arguments{ \item{SpP}{ an object of class \code{"\linkS4class{SpatialPolygons}"}. } \item{snap}{ tolerance used to consider coordinates as identical. } \item{method}{method to use for \code{\link{unionSpatialPolygons}}. Defaults to \code{"rgeos"}, since \pkg{polyclip} uses integer arithmetic, which causes rounding errors usually requiring tuning of (i.e., increasing) the tolerance parameter \code{snap} (see example below).} \item{\dots}{further arguments passed to the chosen \code{method}.} } \value{ logical vector of the same length as \code{SpP} also inheriting its \code{row.names}. } \author{ Sebastian Meyer } \examples{ ## Load districts of Germany load(system.file("shapes", "districtsD.RData", package = "surveillance")) ## Determine districts at the border and check the result on the map if (requireNamespace("rgeos")) { atBorder <- polyAtBorder(districtsD, method = "rgeos") plot(districtsD, col = atBorder) } ## For method = "polyclip", a higher snapping tolerance is required ## to obtain the correct result if (requireNamespace("polyclip")) { atBorder <- polyAtBorder(districtsD, snap = 1e-6, method = "polyclip") plot(districtsD, col = atBorder) } } \keyword{spatial} surveillance/man/sts_ggplot.Rd0000644000176200001440000000476314026737231016214 0ustar liggesusers\name{sts_ggplot} \alias{autoplot.sts} \title{ Time-Series Plots for \code{"sts"} Objects Using \pkg{ggplot2} } \description{ A simple \CRANpkg{ggplot2} variant of \code{\link{stsplot_time}}, based on a \dQuote{tidy} version of the \code{"sts"} object via \code{\link{tidy.sts}}. It uses a date axis and thus only works for time series indexed by dates or with a standard frequency (daily, weekly, or monthly). } \usage{ autoplot.sts(object, population = FALSE, units = NULL, as.one = FALSE, scales = "fixed", width = NULL, ...) } \arguments{ \item{object}{an object of class \code{"\linkS4class{sts}"}.} \item{population}{logical indicating whether \code{observed(object)} should be divided by \code{population(object)}. The \code{population} argument can also be a scalar, which is used to scale the denominator \code{population(object)}, i.e., \code{observed(object)} is divided by \code{population(object) / population}. For instance, if \code{population(object)} contains raw population numbers, \code{population = 1000} could be used to plot the incidence per 1000 inhabitants.} \item{units}{optional integer or character vector to select the units (=columns of \code{object}) to plot. The default (\code{NULL}) is to plot all time series.} \item{as.one}{logical indicating if all time series should be plotted in one panel with \code{\link[ggplot2]{geom_line}}. By default, the time series are plotted in separate panels (using \code{\link[ggplot2]{geom_col}}).} \item{scales}{passed to \code{\link[ggplot2]{facet_wrap}} (for \code{as.one=FALSE}). By default, all panels use a common \code{ylim} (and \code{xlim}).} \item{width}{bar width, passed to \code{\link[ggplot2]{geom_col}}. Defaults to 7 for weekly time series.} \item{\dots}{unused (argument of the generic).} } \value{ a \code{"ggplot"} object. } \author{ Sebastian Meyer } \seealso{ \code{\link{stsplot_time}} for the traditional plots. } \examples{ ## compare traditional plot() with ggplot2-based autoplot.sts() if (requireNamespace("ggplot2")) { data("measlesDE") plot(measlesDE) autoplot.sts(measlesDE) } ## weekly incidence: population(measlesDE) gives population fractions, ## which we need to multiply by the total population if (surveillance.options("allExamples") && require("ggplot2")) { autoplot.sts(measlesDE, population = 1000000/82314906) + ylab("Weekly incidence [per 1'000'000 inhabitants]") } } \keyword{hplot} \keyword{ts} surveillance/man/hhh4_update.Rd0000644000176200001440000000663413162205245016216 0ustar liggesusers\name{hhh4_update} \alias{update.hhh4} \title{ \code{update} a fitted \code{"hhh4"} model } \description{ Re-fit a \code{"\link{hhh4}"} model with a modified \code{control} list. } \usage{ \method{update}{hhh4}(object, ..., S = NULL, subset.upper = NULL, use.estimates = object$convergence, evaluate = TRUE) } \arguments{ \item{object}{ a fitted \code{"hhh4"} model. Non-convergent fits can be updated as well. } \item{\dots}{ components modifying the original control list for \code{\link{hhh4}}. Modifications are performed by \code{\link{modifyList}(object$control, list(...))}. } \item{S}{ a named list of numeric vectors serving as argument for \code{\link{addSeason2formula}}, or \code{NULL} (meaning no modification of seasonal terms). This argument provides a convenient way of changing the number of harmonics in the \code{f}ormulae of the model components \code{"ar"}, \code{"ne"} and \code{"end"} (to be used as names of the list). Non-specified components are not touched. Updating the \code{i}'th component's \code{f}ormula works by first dropping all sine and cosine terms and then applying \code{addSeason2formula} with arguments \code{S = S[[i]]} and \code{period = object$stsObj@freq}. Note that this step of updating seasonality is processed after modification of the \code{control} list by the \code{\dots} arguments. } \item{subset.upper}{ if a scalar value, refit the model to the data up to the time index given by \code{subset.upper}. The lower time index remains unchanged, i.e., \code{control$subset[1]:subset.upper} is used as the new \code{subset}. This argument is used by \code{\link{oneStepAhead}}. } \item{use.estimates}{ logical specifying if \code{coef(object)} should be used as starting values for the new fit (which is the new default since \pkg{surveillance} 1.8-2, in case the original fit has converged). This works by matching names against the coefficients of the new model. Extra coefficients no longer in the model are silently ignored. Setting \code{use.estimates = FALSE} means to re-use the previous start specification \code{object$control$start}.\cr Note that coefficients can also receive initial values from an extra \code{start} argument in the update call (as in \code{\link{hhh4}}), which then takes precedence over \code{coef(object)}. } \item{evaluate}{ logical indicating if the updated model should be fitted directly (defaults to \code{TRUE}). Otherwise, the updated \code{control} list is returned. } } \value{ If \code{evaluate = TRUE} the re-fitted object, otherwise the updated \code{control} list for \code{\link{hhh4}}. } \author{ Sebastian Meyer } \seealso{ \code{\link{hhh4}} } \examples{ data("salmonella.agona") ## convert to sts class salmonella <- disProg2sts(salmonella.agona) ## fit a basic model fit0 <- hhh4(salmonella, list(ar = list(f = ~1), end = list(f = addSeason2formula(~t)))) ## update: Poisson -> NegBin1, component seasonality fit1 <- update(fit0, family = "NegBin1", S = list(end=2, ar=2)) ## compare fits AIC(fit0, fit1) opar <- par(mfrow=c(2,2)) plot(fit0, type="fitted", names="fit0", par.settings=NULL) plot(fit1, type="fitted", names="fit1", par.settings=NULL) plot(fit0, fit1, type="season", components=c("end", "ar"), par.settings=NULL) par(opar) } \keyword{models} \keyword{methods} surveillance/man/twinstim_step.Rd0000644000176200001440000000365713165517635016750 0ustar liggesusers\name{twinstim_step} \alias{stepComponent} \alias{add1.twinstim} \alias{drop1.twinstim} \title{ Stepwise Model Selection by AIC } \description{ \code{stepComponent} is a wrapper around \code{\link{step}} to select a \code{"\link{twinstim}"} component's model based on an information criterion in a stepwise algorithm. There are also stand-alone single-step methods of \code{\link{add1}} and \code{\link{drop1}}. } \usage{ stepComponent(object, component = c("endemic", "epidemic"), scope = list(upper = object$formula[[component]]), direction = "both", trace = 2, verbose = FALSE, ...) \method{add1}{twinstim}(object, scope, component = c("endemic", "epidemic"), trace = 2, ...) \method{drop1}{twinstim}(object, scope, component = c("endemic", "epidemic"), trace = 2, ...) } \arguments{ \item{object}{an object of class \code{"twinstim"}.} \item{component}{one of \code{"endemic"} or \code{"epidemic"} (partially matched), determining the model component where the algorithm should proceed.} \item{scope,direction,trace}{see \code{\link{step}} and \code{\link{add1}}, respectively.} \item{verbose}{see \code{\link{twinstim}}.} \item{\dots}{further arguments passed to \code{\link{step}}, \code{\link{add1.default}}, or \code{\link{drop1.default}}, respectively.} } \value{ See \code{\link{step}} and \code{\link{add1}}, respectively. } \author{ (of this wrapper around \code{\link{step}}) Sebastian Meyer } \seealso{ \code{\link{step}}, \code{\link{add1}}, \code{\link{drop1}} } \examples{ data("imdepi", "imdepifit") ## simple baseline model m0 <- update(imdepifit, epidemic=~1, siaf=NULL) ## AIC-based step-wise backward selection of the endemic component m0_step <- stepComponent(m0, "endemic", scope=list(lower=~I(start/365-3.5))) ## nothing is dropped from the model \dontshow{ m0_step$anova <- NULL stopifnot(identical(m0, m0_step)) } } \keyword{models} \keyword{methods} surveillance/man/algo.outbreakP.Rd0000644000176200001440000001215513757504671016710 0ustar liggesusers\encoding{latin1} \name{algo.outbreakP} \alias{algo.outbreakP} \alias{calc.outbreakP.statistic} \title{Semiparametric surveillance of outbreaks} \description{ Frisen and Andersson (2009) method for semiparametric surveillance of outbreaks } \usage{ algo.outbreakP(disProgObj, control = list(range = range, k=100, ret=c("cases","value"),maxUpperboundCases=1e5)) } \arguments{ \item{disProgObj}{object of class disProg (including the observed and the state chain).} \item{control}{A list controlling the behaviour of the algorithm \describe{ \item{\code{range}}{determines the desired time-points which should be monitored. Note that it is automatically assumed that ALL other values in \code{disProgObj} can be used for the estimation, i.e. for a specific value \code{i} in \code{range} all values from 1 to \code{i} are used for estimation.} \item{\code{k}}{The threshold value. Once the outbreak statistic is above this threshold \code{k} an alarm is sounded.} \item{\code{ret}}{a string specifying the type of \code{upperbound}-statistic that is returned. With \code{"cases"} the number of cases that would have been necessary to produce an alarm (NNBA) or with \code{"value"} the outbreakP-statistic is computed (see below).} \item{\code{maxUpperboundCases}}{Upperbound when numerically searching for NNBA. Default is 1e5.} } } } \value{ \code{algo.outbreakP} gives a list of class \code{survRes} which includes the vector of alarm values for every time-point in \code{range}, the vector of threshold values for every time-point in \code{range}. } \details{ A generalized likelihood ratio test based on the Poisson distribution is implemented where the means of the in-control and out-of-control states are computed by isotonic regression. \deqn{OutbreakP(s) = \prod_{t=1}^s \left( \frac{\hat{\mu}^{C1}(t)}{\hat{\mu}^D(t)} \right)^{x(t)}} where \eqn{\hat{\mu}^{C1}(t)} is the estimated mean obtained by uni-modal regression under the assumption of one change-point and \eqn{\hat{\mu}^D(t)} is the estimated result when there is no change-point (i.e. this is just the mean of all observations). Note that the contrasted hypothesis assume all means are equal until the change-point, i.e. this detection method is especially suited for detecting a shift from a relative constant mean. Hence, this is less suited for detection in diseases with strong seasonal endemic component. Onset of influenza detection is an example where this method works particular well. In case \code{control$ret == "cases"} then a brute force numerical search for the number needed before alarm (NNBA) is performed. That is, given the past observations, whats the minimum number which would have caused an alarm? Note: Computing this might take a while because the search is done by sequentially increasing/decreasing the last observation by one for each time point in \code{control$range} and then calling the workhorse function of the algorithm again. The argument \code{control$maxUpperboundCases} controls the upper limit of this search (default is 1e5). Currently, even though the statistic has passed the threshold, the NNBA is still computed. After a few time instances what typically happens is that no matter the observed value we would have an alarm at this time point. In this case the value of NNBA is set to \code{NA}. Furthermore, the first time point is always \code{NA}, unless \code{k<1}. } \source{ The code is an extended R port of the Java code by Marianne \enc{Frisn}{Frisen} and Linus \enc{Schiler}{Schioeler} from the CASE project available under the GNU GPL License v3. See \url{https://case.folkhalsomyndigheten.se/} for further details on the CASE project. %% A manual on how to use an Excel implementation of the method %% is available at \url{http://economics.handels.gu.se/english/Units+and+Centra/statistical_research_unit/software}. An additional feature of the R code is that it contains a search for NNBA (see details). } \author{M. \enc{Hhle}{Hoehle} -- based on Java code by M. Frisen and L. \enc{Schiler}{Schioeler}} \references{ \enc{Frisn}{Frisen}, M., Andersson and \enc{Schiler}{Schioeler}, L., (2009), Robust outbreak surveillance of epidemics in Sweden, Statistics in Medicine, 28(3):476-493. \enc{Frisn}{Frisen}, M. and Andersson, E., (2009) Semiparametric Surveillance of Monotonic Changes, Sequential Analysis 28(4):434-454. } \examples{ #Use data from outbreakP manual (http://www.hgu.gu.se/item.aspx?id=16857) y <- matrix(c(1,0,3,1,2,3,5,4,7,3,5,8,16,23,33,34,48),ncol=1) #Generate sts object with these observations mysts <- sts(y, alarm=y*0) #Run the algorithm and present results #Only the value of outbreakP statistic upperbound(outbreakP(mysts, control=list(range=1:length(y),k=100, ret="value"))) #Graphical illustration with number-needed-before-alarm (NNBA) upperbound. res <- outbreakP(mysts, control=list(range=1:length(y),k=100, ret="cases")) plot(res,dx.upperbound=0,lwd=c(1,1,3),legend.opts=list(legend=c("Infected", "NNBA","Outbreak","Alarm"),horiz=TRUE)) } \keyword{classif} surveillance/man/epidata_summary.Rd0000644000176200001440000000561613433274256017215 0ustar liggesusers\name{epidata_summary} \alias{summary.epidata} \alias{print.summary.epidata} \title{ Summarizing an Epidemic } \description{ The \code{\link{summary}} method for \code{\link{class}} \code{"\link{epidata}"} gives an overview of the epidemic. Its \code{\link{print}} method shows the type of the epidemic, the time range, the total number of individuals, the initially and never infected individuals and the size of the epidemic. An excerpt of the returned \code{counters} data frame is also printed (see the Value section below). } \usage{ \method{summary}{epidata}(object, ...) \method{print}{summary.epidata}(x, ...) } \arguments{ \item{object}{an object inheriting from class \code{"epidata"}.} \item{x}{an object inheriting from class \code{"summary.epidata"}, i.e. an object returned by the function \code{summary.epidata}.} \item{\dots}{unused (argument of the generic).} } \value{ A list with the following components: \item{type}{ character string. Compartmental type of the epidemic, i.e. one of "SIR", "SI", "SIS" or "SIRS". } \item{size}{ integer. Size of the epidemic, i.e. the number of initially susceptible individuals, which became infected during the course of the epidemic. } \item{initiallyInfected}{ factor (with the same levels as the \code{id} column in the \code{"epidata"} object). Set of initially infected individuals. } \item{neverInfected}{ factor (with the same levels as the \code{id} column in the \code{"epidata"} object). Set of never infected individuals, i.e. individuals, which were neither initially infected nor infected during the course of the epidemic. } \item{coordinates}{ numeric matrix of individual coordinates with as many rows as there are individuals and one column for each spatial dimension. The row names of the matrix are the \code{id}s of the individuals. } \item{byID}{ data frame with time points of infection and optionally removal and re-susceptibility (depending on the \code{type} of the epidemic) ordered by \code{id}. If an event was not observed, the corresponding entry is missing. } \item{counters}{ data frame containing all events (S, I and R) ordered by time. The columns are \code{time}, \code{type} (of event), corresponding \code{id} and the three counters \code{nSusceptible}, \code{nInfectious} and \code{nRemoved}. The first row additionally shows the counters at the beginning of the epidemic, where the \code{type} and \code{id} column contain missing values. } } \author{ Sebastian Meyer } \seealso{ \code{\link{as.epidata}} for generating objects of class \code{"epidata"}. } \examples{ data("hagelloch") s <- summary(hagelloch) s # uses the print method for summary.epidata names(s) # components of the list 's' # positions of the individuals plot(s$coordinates) # events by id head(s$byID) } \keyword{methods} surveillance/man/clapply.Rd0000644000176200001440000000124613117527513015464 0ustar liggesusers\name{clapply} \alias{clapply} \title{ Conditional \code{lapply} } \description{ Use \code{\link{lapply}} if the input is a list and otherwise apply the function directly to the input \emph{and} wrap the result in a list. The function is implemented as \preformatted{ if (is.list(X)) lapply(X, FUN, ...) else list(FUN(X, ...)) } } \usage{ clapply(X, FUN, ...) } \arguments{ \item{X}{a list or a single \code{R} object on which to apply \code{FUN}.} \item{FUN}{the function to be applied to (each element of) \code{X}.} \item{\dots}{optional arguments to \code{FUN}.} } \value{ a list (of length 1 if \code{X} is not a list). } \keyword{iteration} \keyword{list} surveillance/man/meningo.age.Rd0000644000176200001440000000162313122471774016211 0ustar liggesusers\name{meningo.age} \alias{meningo.age} \docType{data} \title{Meningococcal infections in France 1985-1995} \description{ Monthly counts of meningococcal infections in France 1985-1995. Here, the data is split into 4 age groups (<1, 1-5, 5-20, >20). } \usage{data(meningo.age)} \format{ An object of class disProg with 156 observations in each one of 4 age groups. \describe{ \item{week}{Number of month} \item{observed}{Matrix with number of counts in the corresponding month and age group} \item{state}{Boolean whether there was an outbreak -- dummy not implemented} \item{neighbourhood}{Neighbourhood matrix, all age groups are adjacent} \item{populationFrac}{Population fractions} } } \source{ ?? } \examples{ data(meningo.age) plot(meningo.age, title="Meningococcal infections in France 1985-95") plot(meningo.age, as.one=FALSE) } \keyword{datasets} surveillance/man/surveillance-deprecated.Rd0000644000176200001440000000103213431363065020602 0ustar liggesusers\name{surveillance-deprecated} \title{Deprecated Functions in Package \pkg{surveillance}} \alias{surveillance-deprecated} \alias{qlomax} \description{ The functions listed here are provided for compatibility with older versions of \pkg{surveillance} only, and may be defunct as soon as of the next release. } \usage{ qlomax(p, scale, shape) } \arguments{ \item{p}{vector of probabilities.} \item{scale}{positive scale parameter.} \item{shape}{positive shape parameter.} } \seealso{ \code{\link{Deprecated}} } \keyword{misc} surveillance/man/twinstim_update.Rd0000644000176200001440000000504513165520251017234 0ustar liggesusers\name{twinstim_update} \alias{update.twinstim} \title{ \code{update}-method for \code{"twinstim"} } \description{ Update and (by default) re-fit a \code{"twinstim"}. This method is especially useful if one wants to add the \code{model} environment (which is required for some methods) to a fitted model object a posteriori. } \usage{ \method{update}{twinstim}(object, endemic, epidemic, control.siaf, optim.args, model, ..., use.estimates = TRUE, evaluate = TRUE) } \arguments{ \item{object}{a previous \code{"twinstim"} fit.} \item{endemic, epidemic}{changes to the formulae -- see \code{\link{update.formula}} and \code{\link{twinstim}}.} \item{control.siaf}{a list (see \code{\link{twinstim}}) to replace the given elements in the original \code{control.siaf} list. If \code{NULL}, the original list of control arguments is removed from the call, i.e., the defaults are used in \code{twinstim}.} \item{optim.args}{see \code{\link{twinstim}}. If a list, it will modify the original \code{optim.args} using \code{\link{modifyList}}.} \item{model}{see \code{\link{twinstim}}. If this is the only argument to update, re-fitting is cleverly circumvented. Enriching the fit by the model environment is, e.g., required for \code{\link{intensityplot.twinstim}}.} \item{\dots}{Additional arguments to the call, or arguments with changed values.\cr If \code{start} values are specified, they need to be in the same format as in the original call \code{object$call$start}, which is either a named list of named numeric vectors or a named numeric vector; see the argument description in \code{\link{twinstim}}.} \item{use.estimates}{logical indicating if the estimates of \code{object} should be used as initial values for the new fit (in the \code{start} argument of \code{twinstim}). Defaults to \code{TRUE}.} \item{evaluate}{If \code{TRUE} (default), evaluate the new call else return the call.} } \value{ If \code{evaluate = TRUE} the re-fitted object, otherwise the updated call. } \author{ Sebastian Meyer Inspiration and some pieces of code originate from \code{\link{update.default}} by the R Core Team. } \seealso{ \code{\link{update.default}} } \examples{ data("imdepi", "imdepifit") ## add another epidemic covariate ## (but fix siaf-parameter so that this example runs quickly) imdepifit2 <- update(imdepifit, epidemic = ~. + log(popdensity), optim.args = list(fixed="e.siaf.1")) ## compare by AIC AIC(imdepifit, imdepifit2) } \keyword{models} \keyword{methods} surveillance/man/m1.Rd0000644000176200001440000000433613431363065014337 0ustar liggesusers\name{m1} \alias{m1} \alias{h1_nrwrp} \alias{k1} \alias{m2} \alias{m3} \alias{m4} \alias{m5} \alias{n1} \alias{n2} \alias{q1_nrwh} \alias{q2} \alias{s1} \alias{s2} \alias{s3} \docType{data} \encoding{latin1} \title{RKI SurvStat Data} \description{ 14 datasets for different diseases beginning in 2001 to the 3rd Quarter of 2004 including their defined outbreaks. \itemize{ \item \code{m1} 'Masern' in the 'Landkreis Nordfriesland' (Germany, Schleswig-Holstein) \item \code{m2} 'Masern' in the 'Stadt- und Landkreis Coburg' (Germany, Bayern) \item \code{m3} 'Masern' in the 'Kreis Leer' (Germany, Niedersachsen) \item \code{m4} 'Masern' in the 'Stadt- und Landkreis Aachen' (Germany, Nordrhein-Westfalen) \item \code{m5} 'Masern' in the 'Stadt Verden' (Germany, Niedersachsen) \item \code{q1\_nrwh} 'Q-Fieber' in the 'Hochsauerlandkreis' (Germany, Westfalen) and in the 'Landkreis Waldeck-Frankenberg' (Germany, Hessen) \item \code{q2} 'Q-Fieber' in '\enc{Mnchen}{Muenchen}' (Germany, Bayern) \item \code{s1} 'Salmonella Oranienburg' in Germany \item \code{s2} 'Salmonella Agona' in 12 'Bundesl\enc{}{ae}ndern' of Germany \item \code{s3} 'Salmonella Anatum' in Germany \item \code{k1} 'Kryptosporidiose' in Germany, 'Baden-W\enc{}{ue}rttemberg' \item \code{n1} 'Norovirus' in 'Stadtkreis Berlin Mitte' (Germany, Berlin) \item \code{n2} 'Norovirus' in 'Torgau-Oschatz' (Germany, Sachsen) \item \code{h1\_nrwrp} 'Hepatitis A' in 'Oberbergischer Kreis, Olpe, Rhein-Sieg-kreis' (Germany, Nordrhein-Westfalen) and 'Siegenwittgenstein Altenkirchen' (Germany, Rheinland-Pfalz) } } \usage{data(m1)} \format{ \code{disProg} objects each containing 209 observations (weekly on 52 weeks) \describe{ \item{observed}{Number of counts in the corresponding week} \item{state}{Boolean whether there was an outbreak.} } } \source{ Robert Koch-Institut: SurvStat: \url{https://survstat.rki.de/}; m1 and m3 were queried on 10 November 2004. The rest during September 2004. } \examples{ data(k1) survResObj <- algo.rki1(k1, control=list(range=27:192)) plot(survResObj, "RKI 1", "k1", firstweek=27, startyear=2002) } \keyword{datasets} surveillance/man/stsNC-class.Rd0000644000176200001440000000421313122471774016155 0ustar liggesusers\name{stsNC-class} \docType{class} \alias{stsNC-class} %New stsNC specific methods \alias{reportingTriangle} \alias{reportingTriangle,stsNC-method} \alias{delayCDF} \alias{delayCDF,stsNC-method} \alias{score} \alias{score,stsNC-method} \alias{predint} \alias{predint,stsNC-method} %Coerce method to convert to sts object \alias{coerce,sts,stsNC-method} \encoding{latin1} \title{Class "stsNC" -- a class inheriting from class \code{sts} which allows the user to store the results of back-projecting surveillance time series} \description{ A class inheriting from class \code{sts}, but with additional slots to store the results of nowcasting. } \section{Slots}{ The slots are as for \code{"\linkS4class{sts}"}. However, a number of additional slots exists. \describe{ \item{\code{reportingTriangle}:}{An array containing the upper and lower limit of the confidence interval.} \item{\code{predPMF}:}{Predictive distribution for each nowcasted time point.} \item{\code{pi}:}{A prediction interval for each nowcasted time point. This is calculated based on \code{predPMF}.} \item{\code{truth}:}{An object of type \code{sts} containing the true number of cases.} \item{\code{delayCDF}:}{List with the CDF of the estimated delay distribution for each method.} \item{\code{SR}:}{Possible output of proper scoring rules} } } \section{Methods}{ The methods are the same as for \code{"\linkS4class{sts}"}. \itemize{ \item{\code{signature(from = "sts", to = "stsNC")}}{ Convert an object of class \code{sts} to class \code{stsNC}. } \item{reportingTriangle}{\code{signature(x = "stsNC")}: extract the \code{reportingTriangle} slot of an \code{stsNC} object. } \item{delayCDF}{\code{signature(x = "stsNC")}: extract the \code{delayCDF} slot of an \code{stsNC} object. } \item{score}{\code{signature(x = "stsNC")}: extract the scoring rules result slot of an \code{stsNC} object. } \item{predint}{\code{signature(x = "stsNC")}: extract the prediction interval slot of an \code{stsNC} object. } } } \author{M. \enc{Hhle}{Hoehle}} \keyword{classes} surveillance/man/intensityplot.Rd0000644000176200001440000000130712061471523016737 0ustar liggesusers\name{intensityplot} \alias{intensityplot} \title{ Plot Paths of Point Process Intensities } \description{ Generic function for plotting paths of point process intensities. Methods currently defined in package \pkg{surveillance} are for classes \code{"twinSIR"} and \code{"simEpidata"} (temporal), as well as \code{"twinstim"} and \code{"simEpidataCS"} (spatio-temporal). } \usage{ intensityplot(x, ...) } \arguments{ \item{x}{ An object for which an \code{intensityplot} method is defined. } \item{\dots}{ Arguments passed to the corresponding method. } } \seealso{ The methods \code{\link{intensityplot.twinSIR}} and \code{\link{intensityplot.twinstim}}. } \keyword{hplot} surveillance/man/algo.twins.Rd0000644000176200001440000001270612665561746016125 0ustar liggesusers\encoding{latin1} \name{algo.twins} \alias{algo.twins} \title{Model fit based on a two-component epidemic model} \description{ Fits a negative binomial model (as described in Held et al. (2006) to an univariate time series of counts. } \usage{ algo.twins(disProgObj, control=list(burnin=1000, filter=10, sampleSize=2500, noOfHarmonics=1, alpha_xi=10, beta_xi=10, psiRWSigma=0.25,alpha_psi=1, beta_psi=0.1, nu_trend=FALSE, logFile="twins.log")) } \arguments{ \item{disProgObj}{object of class \code{disProg}} \item{control}{control object: \describe{ \item{\code{burnin}}{Number of burn in samples.} \item{\code{filter}}{Thinning parameter. If \code{filter = 10} every 10th sample is after the burn in is returned.} \item{\code{sampleSize}}{Number of returned samples. Total number of samples = \code{burnin}+\code{filter}*\code{sampleSize}} \item{\code{noOfHarmonics}}{Number of harmonics to use in the modelling, i.e. \eqn{L}{L} in (2.2) of Held et al (2006).} \item{\code{alpha_xi}}{Parameter \eqn{\alpha_{\xi}}{\alpha_\xi} of the hyperprior of the epidemic parameter \eqn{\lambda}{\lambda}} \item{\code{beta_xi}}{Parameter \eqn{\beta_{\xi}}{\beta_\xi} of the hyperprior of the epidemic parameter \eqn{\lambda}{\lambda}} \item{\code{psiRWSigma}}{Starting value for the tuning of the variance of the random walk proposal for the overdispersion parameter \eqn{\psi}{\psi}.} \item{\code{alpha_psi}}{Parameter \eqn{\alpha_{\psi}}{\alpha_\psi} of the prior of the overdispersion parameter \eqn{\psi}{\psi}} \item{\code{beta_psi}}{Parameter \eqn{\beta_{\psi}}{\beta_\psi} of the prior of the overdispersion parameter \eqn{\psi}{\psi}} \item{\code{nu_trend}}{Adjust for a linear trend in the endemic part? (default: \code{FALSE})} \item{\code{logFile}}{Base file name for the output files. The function writes three output files in the current working directory \code{getwd()}. If \code{logfile = "twins.log"} the results are stored in the three files \file{twins.log}, \file{twins.log2} and \file{twins.log.acc}.\cr \file{twins.log} contains the returned samples of the parameters \eqn{\psi}{\psi}, \eqn{\gamma_{0}}{\gamma_0}, \eqn{\gamma_{1}}{\gamma_1}, \eqn{\gamma_{2}}{\gamma_2}, K, \eqn{\xi_{\lambda}}{\xi_\lambda} \eqn{\lambda_{1},...,\lambda{n}}{\lambda_1,...,\lambda_{n}}, the predictive distribution of the number of cases at time \eqn{n+1} and the deviance.\cr \file{twins.log2} contains the sample means of the variables \eqn{X_{t}, Y_{t}, \omega_{t}}{X_t, Y_t, \omega_t} and the relative frequency of a changepoint at time t for t=1,...,n and the relative frequency of a predicted changepoint at time n+1.\cr \file{twins.log.acc} contains the acceptance rates of \eqn{\psi}{\psi}, the changepoints and the endemic parameters \eqn{\gamma_{0}}{\gamma_0}, \eqn{\gamma_{1}}{\gamma_1}, \eqn{\gamma_{2}}{\gamma_2} in the third column and the variance of the random walk proposal for the update of the parameter \eqn{\psi}{\psi} in the second column.} } } } \details{Note that for the time being this function is not a surveillance algorithm, but only a modelling approach as described in the Held et. al (2006) paper. Note also that the function writes three logfiles in the current working directory \code{getwd()}: \file{twins.log}, \file{twins.log.acc} and \file{twins.log2}. Thus you need to have write permissions in the current working directory. Finally, inspection of the C++ code using valgrind shows some memory leaks when running the old underlying C++ program. As we are unable to fix this impurity at the present time, we have instead put the example code in a 'dontrun' environment. The example code, however, works fine -- the measure is thus more aimed at reducing the number of CRAN problems with the package. } \value{Returns an object of class \code{atwins} with elements \item{control}{specified control object} \item{disProgObj}{specified \code{disProg}-object} \item{logFile}{contains the returned samples of the parameters \eqn{\psi}{\psi}, \eqn{\gamma_{0}}{\gamma_0}, \eqn{\gamma_{1}}{\gamma_1}, \eqn{\gamma_{2}}{\gamma_2}, K, \eqn{\xi_{\lambda}}{\xi_\lambda} \eqn{\lambda_{1},...,\lambda{n}}{\lambda_1,...,\lambda_{n}}, the predictive distribution and the deviance.} \item{logFile2}{contains the sample means of the variables \eqn{X_{t}, Y_{t}, \omega_{t}}{X_t, Y_t, \omega_t} and the relative frequency of a changepoint at time t for t=1,...,n and the relative frequency of a predicted changepoint at time n+1.} } \references{ Held, L., Hofmann, M., \enc{Hhle}{Hoehle}, M. and Schmid V. (2006): A two-component model for counts of infectious diseases. \emph{Biostatistics}, \bold{7}, pp. 422--437. } \author{ M. Hofmann and M. \enc{Hhle}{Hoehle} and D. \enc{Sabans Bov}{Sabanes Bove} } \examples{ \dontrun{ # Load the data used in the Held et al. (2006) paper data("hepatitisA") # Fix seed - this is used for the MCMC samplers in twins set.seed(123) # Call algorithm and save result (use short chain without filtering for speed) otwins <- algo.twins(hepatitisA, control=list(burnin=500, filter=1, sampleSize=1000)) # This shows the entire output (use ask=TRUE for pause between plots) plot(otwins, ask=FALSE) # Direct access to MCMC output hist(otwins$logFile$psi,xlab=expression(psi),main="") if (require("coda")) { print(summary(mcmc(otwins$logFile[,c("psi","xipsi","K")]))) } } } \keyword{ts} \keyword{regression} surveillance/man/algo.bayes.Rd0000644000176200001440000001216413165505075016047 0ustar liggesusers\name{algo.bayes} \alias{algo.bayes} \alias{algo.bayesLatestTimepoint} \alias{algo.bayes1} \alias{algo.bayes2} \alias{algo.bayes3} \encoding{latin1} \title{The Bayes System} \description{ Evaluation of timepoints with the Bayes subsystem 1, 2, 3 or a self defined Bayes subsystem. } \usage{ algo.bayesLatestTimepoint(disProgObj, timePoint = NULL, control = list(b = 0, w = 6, actY = TRUE,alpha=0.05)) algo.bayes(disProgObj, control = list(range = range, b = 0, w = 6, actY = TRUE,alpha=0.05)) algo.bayes1(disProgObj, control = list(range = range)) algo.bayes2(disProgObj, control = list(range = range)) algo.bayes3(disProgObj, control = list(range = range)) } \arguments{ \item{disProgObj}{object of class disProg (including the observed and the state chain)} \item{timePoint}{time point which should be evaluated in \code{algo.bayes LatestTimepoint}. The default is to use the latest timepoint} \item{control}{control object: \code{range} determines the desired timepoints which should be evaluated, \code{b} describes the number of years to go back for the reference values, \code{w} is the half window width for the reference values around the appropriate timepoint and \code{actY} is a boolean to decide if the year of \code{timePoint} also contributes \code{w} reference values. The parameter \code{alpha} is the \eqn{(1-\alpha)}-quantile to use in order to calculate the upper threshold. As default \code{b}, \code{w}, \code{actY} are set for the Bayes 1 system with \code{alpha}=0.05. } } \value{ \item{survRes}{ \code{algo.bayesLatestTimepoint} returns a list of class \code{survRes} (surveillance result), which includes the alarm value for recognizing an outbreak (1 for alarm, 0 for no alarm), the threshold value for recognizing the alarm and the input object of class disProg. \code{algo.bayes} gives a list of class \code{survRes} which includes the vector of alarm values for every timepoint in \code{range} and the vector of threshold values for every timepoint in \code{range} for the system specified by \code{b}, \code{w} and \code{actY}, the range and the input object of class disProg. \code{algo.bayes1} returns the same for the Bayes 1 system, \code{algo.bayes2} for the Bayes 2 system and \code{algo.bayes3} for the Bayes 3 system. } } \details{ Using the reference values the \eqn{(1-\alpha)\cdot 100\%}{(1-alpha)*100\%} quantile of the predictive posterior distribution is calculated as a threshold. An alarm is given if the actual value is bigger or equal than this threshold. It is possible to show using analytical computations that the predictive posterior in this case is the negative binomial distribution. Note: \code{algo.rki} or \code{algo.farrington} use two-sided prediction intervals -- if one wants to compare with these procedures it is necessary to use an alpha, which is half the one used for these procedures. Note also that \code{algo.bayes} calls \code{algo.bayesLatestTimepoint} for the values specified in \code{range} and for the system specified in \code{control}. \code{algo.bayes1}, \code{algo.bayes2}, \code{algo.bayes3} call \code{algo.bayesLatestTimepoint} for the values specified in \code{range} for the Bayes 1 system, Bayes 2 system or Bayes 3 system. \itemize{ \item \code{"Bayes 1"} reference values from 6 weeks. Alpha is fixed a t 0.05. \item \code{"Bayes 2"} reference values from 6 weeks ago and 13 weeks of the previous year (symmetrical around the same week as the current one in the previous year). Alpha is fixed at 0.05. \item \code{"Bayes 3"} 18 reference values. 9 from the year ago and 9 from two years ago (also symmetrical around the comparable week). Alpha is fixed at 0.05. } The procedure is now able to handle \code{NA}'s in the reference values. In the summation and when counting the number of observed reference values these are simply not counted. } \seealso{ \code{\link{algo.call}}, \code{\link{algo.rkiLatestTimepoint}} and \code{\link{algo.rki}} for the RKI system. } \author{M. \enc{Hhle}{Hoehle}, A. Riebler, C. Lang} \examples{ disProg <- sim.pointSource(p = 0.99, r = 0.5, length = 208, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) # Test for bayes 1 the latest timepoint algo.bayesLatestTimepoint(disProg) # Test week 200 to 208 for outbreaks with a selfdefined bayes algo.bayes(disProg, control = list(range = 200:208, b = 1, w = 5, actY = TRUE,alpha=0.05)) # The same for bayes 1 to bayes 3 algo.bayes1(disProg, control = list(range = 200:208,alpha=0.05)) algo.bayes2(disProg, control = list(range = 200:208,alpha=0.05)) algo.bayes3(disProg, control = list(range = 200:208,alpha=0.05)) } \keyword{classif} \source{ Riebler, A. (2004), Empirischer Vergleich von statistischen Methoden zur Ausbruchserkennung bei Surveillance Daten, Bachelor's thesis. } surveillance/man/checkResidualProcess.Rd0000644000176200001440000000504013433452632020121 0ustar liggesusers\name{checkResidualProcess} \alias{checkResidualProcess} \title{ Check the residual process of a fitted \code{twinSIR} or \code{twinstim} } \description{ Transform the residual process (cf. the \code{\link[=residuals.twinstim]{residuals}} methods for classes \code{"twinSIR"} and \code{"twinstim"}) such that the transformed residuals should be uniformly distributed if the fitted model well describes the true conditional intensity function. Graphically check this using \code{\link{ks.plot.unif}}. The transformation for the residuals \code{tau} is \code{1 - exp(-diff(c(0,tau)))} (cf. Ogata, 1988). Another plot inspects the serial correlation between the transformed residuals (scatterplot between u_i and u_{i+1}). } \usage{ checkResidualProcess(object, plot = 1:2, mfrow = c(1,length(plot)), ...) } \arguments{ \item{object}{ an object of class \code{"\link{twinSIR}"} or \code{"\link{twinstim}"}. } \item{plot}{ logical (or integer index) vector indicating if (which) plots of the transformed residuals should be produced. The \code{plot} index 1 corresponds to a \code{\link{ks.plot.unif}} to check for deviations of the transformed residuals from the uniform distribution. The \code{plot} index 2 corresponds to a scatterplot of \eqn{u_i} vs. \eqn{u_{i+1}}. By default (\code{plot = 1:2}), both plots are produced. } \item{mfrow}{ see \code{\link{par}}. } \item{\dots}{ further arguments passed to \code{\link{ks.plot.unif}}. } } \value{ A list (returned invisibly, if \code{plot = TRUE}) with the following components: \describe{ \item{tau}{the residual process obtained by \code{residuals(object)}.} \item{U}{the transformed residuals which should be distributed as U(0,1).} \item{ks}{the result of the \code{ks.test} for the uniform distribution of \code{U}.} } } \references{ Ogata, Y. (1988) Statistical models for earthquake occurrences and residual analysis for point processes. \emph{Journal of the American Statistical Association}, 83, 9-27 } \author{ Sebastian Meyer } \seealso{ \code{\link{ks.plot.unif}} and the \code{\link[=residuals.twinstim]{residuals}}-method for classes \code{"twinSIR"} and \code{"twinstim"}. } \examples{ data("hagelloch") fit <- twinSIR(~ household, data = hagelloch) # a simplistic model ## extract the "residual process", i.e., the fitted cumulative intensities residuals(fit) ## assess goodness of fit based on these residuals checkResidualProcess(fit) # could be better } \keyword{dplot} \keyword{htest} surveillance/man/epidata_animate.Rd0000644000176200001440000001713413433272510017123 0ustar liggesusers\name{epidata_animate} \alias{animate.epidata} \alias{animate.summary.epidata} \title{ Spatio-Temporal Animation of an Epidemic } \description{ Function for the animation of epidemic data, i.e. objects inheriting from class \code{"epidata"}. This only works with 1- or 2-dimensional coordinates and is not useful if some individuals share the same coordinates (overlapping). There are two types of animation, see argument \code{time.spacing}. Besides the direct plotting in the \R session, it is also possible to generate a sequence of graphics files to create animations outside \R. } \usage{ \method{animate}{summary.epidata}(object, main = "An animation of the epidemic", pch = 19, col = c(3, 2, gray(0.6)), time.spacing = NULL, sleep = quote(5/.nTimes), legend.opts = list(), timer.opts = list(), end = NULL, generate.snapshots = NULL, ...) \method{animate}{epidata}(object, ...) } \arguments{ \item{object}{ an object inheriting from class \code{"epidata"} or \code{"summary.epidata"}. In the former case, its summary is calculated and the function continues as in the latter case, passing all \code{...} arguments to the \code{summary.epidata} method. } \item{main}{ a main title for the plot, see also \code{\link{title}}. } \item{pch, col}{ vectors of length 3 specifying the point symbols and colors for susceptible, infectious and removed individuals (in this order). The vectors are recycled if necessary. By default, susceptible individuals are marked as filled green circles, infectious individuals as filled red circles and removed individuals as filled gray circles. Note that the symbols are iteratively drawn (overlayed) in the same plotting region as time proceeds. For information about the possible values of \code{pch} and \code{col}, see the help pages of \code{\link{points}} and \code{\link{par}}, respectively. } \item{time.spacing}{ time interval for the animation steps. If \code{NULL} (the default), the events are plotted one by one with pauses of \code{sleep} seconds. Thus, it is just the \emph{ordering} of the events, which is shown. To plot the appearance of events proportionally to the exact time line, \code{time.spacing} can be set to a numeric value indicating the period of time between consecutive plots. Then, for each time point in \code{seq(0, end, by = time.spacing)} the current state of the epidemic can be seen and an additional timer indicates the current time (see \code{timer.opts} below). The argument \code{sleep} will be the artificial pause in seconds between two of those time points. } \item{sleep}{ time in seconds to \code{\link{Sys.sleep}} before the next plotting event. By default, each artificial pause is of length \code{5/.nTimes} seconds, where \code{.nTimes} is the number of events (infections and removals) of the epidemic, which is evaluated in the function body. Thus, for \code{time.spacing = NULL} the animation has a duration of approximately 5 seconds. In the other case, \code{sleep} is the duration of the artificial pause between two time points. Note that \code{sleep} is ignored on non-interactive devices (see \code{\link{dev.interactive}}) } \item{legend.opts}{ either a list of arguments passed to the \code{\link{legend}} function or \code{NULL} (or \code{NA}), in which case no legend will be plotted. All necessary arguments have sensible defaults and need not be specified, i.e. \describe{ \item{\code{x}:}{\code{"topright"}} \item{\code{legend}:}{\code{c("susceptible", "infectious", "removed")}} \item{\code{pch}:}{same as argument \code{pch} of the main function} \item{\code{col}:}{same as argument \code{col} of the main function} } } \item{timer.opts}{ either a list of arguments passed to the \code{\link{legend}} function or \code{NULL} (or \code{NA}), in which case no timer will be plotted. All necessary arguments have sensible defaults and need not be specified, i.e. \describe{ \item{\code{x}:}{\code{"bottomright"}} \item{\code{title}:}{\code{"time"}} \item{\code{box.lty}:}{\code{0}} \item{\code{adj}:}{\code{c(0.5,0.5)}} \item{\code{inset}:}{\code{0.01}} \item{\code{bg}:}{\code{"white"}} } Note that the argument \code{legend}, which is the current time of the animation, can not be modified. } \item{end}{ ending time of the animation in case of \code{time.spacing} not being \code{NULL}. By default (\code{NULL}), time stops after the last event. } \item{generate.snapshots}{ By default (\code{NULL}), the animation is not saved to image files but only shown on the on-screen device. In order to print to files, \code{time.spacing} must not be \code{NULL}, a screen device must be available, and there are two options:\cr If the framework of the \pkg{animation} package should be used, i.e. the \code{animate}-call is passed as the \code{expr} argument to one of the \code{save*} functions of the \pkg{animation} package, then set \code{generate.snapshots = img.name}, where \code{img.name} is the base name for the generated images (the same as passed to the \code{save*} function). The path and format (type, width, height) for the generated images is derived from \code{\link[animation]{ani.options}}. See the last example below.\cr Alternatively, \code{generate.snapshots} may be a list of arguments passed to the function \code{\link{dev.print}}, which then is executed at each time point of the grid defined by \code{time.spacing}. Essentially, this is used for saving the produced snapshots to files, e.g. \code{generate.snapshots = % list(device=pdf, file=quote(paste("epidemic_",sprintf(form,tp),".pdf",% sep="")))} will store the animation steps in pdf-files in the current working directory, where the file names each end with the time point represented by the corresponding plot. Because the variables \code{tp} and \code{form} should only be evaluated inside the function the \code{file} argument is \code{quote}d. Alternatively, the file name could also make use of the internal plot index \code{i}, e.g., use \code{file=quote(paste("epidemic",i,".pdf",sep=""))}. } \item{\dots}{ further graphical parameters passed to the basic call of \code{plot}, e.g. \code{las}, \code{cex.axis} (etc.) and \code{mgp}. } } %\value{ % invisibly returns \code{NULL}. %} \author{ Sebastian Meyer } \seealso{ \code{\link{summary.epidata}} for the data, on which the plot is based. \code{\link{plot.epidata}} for plotting the evolution of an epidemic by the numbers of susceptible, infectious and removed individuals. The contributed \R package \pkg{animation}. } \examples{ data("hagelloch") (s <- summary(hagelloch)) # plot the ordering of the events only animate(s) # or: animate(hagelloch) # with timer (animate only up to t=10) animate(s, time.spacing=0.1, end=10, sleep=0.01, legend.opts=list(x="topleft")) # Such an animation can be saved in various ways using tools of # the animation package, e.g., saveHTML() if (interactive() && require("animation")) { oldwd <- setwd(tempdir()) # to not clutter up the current working dir saveHTML({ par(bg="white") # default "transparent" is grey in some browsers animate(s, time.spacing=1, sleep=0, legend.opts=list(x="topleft"), generate.snapshots="epiani") }, use.dev=FALSE, img.name="epiani", ani.width=600, interval=0.5) setwd(oldwd) } } \keyword{hplot} \keyword{dynamic} \keyword{spatial} surveillance/man/runifdisc.Rd0000644000176200001440000000166013777627613016024 0ustar liggesusers\name{runifdisc} \alias{runifdisc} \title{ Sample Points Uniformly on a Disc } \description{ Sample \code{n} points uniformly on a disc of radius \code{r} in two-dimensional euclidean space via transformation to polar coordinates: the angle is sampled uniformly from \eqn{U(0,2\pi)}, the length is sampled uniformly from \eqn{\sqrt{U(0,r^2)}}. The sampled polar coordinates are then back-transformed to cartesian coordinates. } \usage{ runifdisc(n, r = 1, buffer = 0) } \arguments{ \item{n}{ integer size of the sample. } \item{r}{ numeric radius of the disc (centered at (0,0)). } \item{buffer}{ radius of inner buffer zone without points. } } \value{ A two-column coordinate matrix of the sampled points. } \author{ Sebastian Meyer } \examples{ x <- surveillance:::runifdisc(1000, 3) plot(x) } \keyword{datagen} \keyword{distribution} \keyword{internal} % not exported to avoid clash with spatstat::runifdisc surveillance/man/all.equal.Rd0000644000176200001440000000202412670511517015671 0ustar liggesusers\name{all.equal} \alias{all.equal.twinstim} \alias{all.equal.hhh4} \title{ Test if Two Model Fits are (Nearly) Equal } \description{ Two model fits are compared using standard \code{\link{all.equal}}-methods after discarding certain elements considered irrelevant for the equality of the fits, e.g., the runtime and the call. } \usage{ \method{all.equal}{twinstim}(target, current, ..., ignore = NULL) \method{all.equal}{hhh4}(target, current, ..., ignore = NULL) } \arguments{ \item{target,current}{the model fits to be compared.} \item{\dots}{further arguments for standard \code{\link{all.equal}}-methods, e.g., the numerical \code{tolerance}.} \item{ignore}{an optional character vector of elements to ignore when comparing the two fitted objects. The following elements are always ignored: \code{"runtime"} and \code{"call"}.} } \value{ Either \code{TRUE} or a character vector describing differences between the \code{target} and the \code{current} model fit. } \author{ Sebastian Meyer } \keyword{utilities} surveillance/man/animate.Rd0000644000176200001440000000111113167111527015424 0ustar liggesusers\name{animate} \alias{animate} \title{ Generic animation of spatio-temporal objects } \description{ Generic function for animation of \R objects. } \usage{ animate(object, ...) } \arguments{ \item{object}{The object to animate.} \item{\dots}{ Arguments to be passed to methods, such as graphical parameters or time interval options for the snapshots. } } \seealso{ The methods \code{\link{animate.epidata}}, \code{\link{animate.epidataCS}}, and \code{\link{animate.sts}} for the animation of surveillance data. } \keyword{hplot} \keyword{dynamic} \keyword{spatial} surveillance/man/ha.Rd0000644000176200001440000000242713174706302014410 0ustar liggesusers\name{ha} \alias{ha} \alias{ha.sts} \docType{data} \title{Hepatitis A in Berlin} \description{ Number of Hepatitis A cases among adult male (age>18) in Berlin 2001-2006. An increase is seen during 2006 } \usage{ data("ha") data("ha.sts") } \format{ \code{ha} is a \code{disProg} object containing \eqn{290\times 12}{290 x 12} observations starting from week 1 in 2001 to week 30 in 2006. \code{ha.sts} is generated from \code{ha} by the converter function \code{\link{disProg2sts}} using a shape file of Berlin (see Examples). } \source{ Robert Koch-Institut: SurvStat: \url{https://survstat.rki.de/}; Queried on 25 August 2006. Robert Koch Institut, Epidemiologisches Bulletin 33/2006, p.290. } \examples{ ## deprecated "disProg" object data("ha") ha plot(aggregate(ha)) ## new-style "sts" object data("ha.sts") ha.sts plot(ha.sts, type = observed ~ unit, labels = TRUE) ## conversion of the old "disProg" object 'ha' to the new S4 class "sts" \dontrun{ shpfile <- system.file("shapes/berlin.shp", package="surveillance") ha.sts <- disProg2sts(ha, map = maptools::readShapePoly(shpfile,IDvar="SNAME")) ## in data("ha.sts"), German umlauts in 'ha.sts@map@data$BEZIRK' ## have been replaced for compatibility } } \keyword{datasets} surveillance/man/addFormattedXAxis.Rd0000644000176200001440000000637413234140561017374 0ustar liggesusers\encoding{latin1} \name{addFormattedXAxis} \alias{addFormattedXAxis} % helper functions for time axis formatting \alias{atChange} \alias{at2ndChange} \alias{atMedian} \title{ Formatted Time Axis for \code{"sts"} Objects } \description{ Add a nicely formatted x-axis to time series plots related to the \code{"\linkS4class{sts}"} class. This utility function is, e.g., used by \code{\link{stsplot_time1}} and \code{\link{plotHHH4_fitted1}}. } \usage{ addFormattedXAxis(x, epochsAsDate = FALSE, xaxis.tickFreq = list("\%Q"=atChange), xaxis.labelFreq = xaxis.tickFreq, xaxis.labelFormat = "\%G\n\n\%OQ", ...) } \arguments{ \item{x}{ an object of class \code{"\linkS4class{sts}"}. } \item{epochsAsDate}{ a logical indicating if the old (\code{FALSE}) or the new (\code{TRUE}) and more flexible implementation should be used. The \code{xaxis.*} arguments are only relevant for the new implementation \code{epochsAsDate = TRUE}. } \item{xaxis.labelFormat,xaxis.tickFreq,xaxis.labelFreq}{ see the details below. } \item{\dots}{ further arguments passed to \code{\link{axis}}. } } \details{ The setting \code{epochsAsDate = TRUE} enables very flexible formatting of the x-axis and its annotations using the \code{xaxis.tickFreq}, \code{xaxis.labelFreq} and \code{xaxis.labelFormat} arguments. The first two are named lists containing pairs with the \emph{name} being a \code{\link{strftime}} single conversion specification and the second part is a function which based on this conversion returns a subset of the rows in the \code{sts} objects. The subsetting function has the following header: \code{function(x,xm1)}, where \code{x} is a vector containing the result of applying the conversion in \code{name} to the epochs of the \code{sts} object and \code{xm1} is the scalar result when applying the conversion to the natural element just before the first epoch. Please note that the input to the subsetting function is converted using \code{as.numeric} before calling the function. Hence, the conversion specification needs to result in a string convertible to integer. Three predefined subsetting functions exist: \code{atChange}, \code{at2ndChange} and \code{atMedian}, which are used to make a tick at each (each 2nd for \code{at2ndChange}) change and at the median index computed on all having the same value, respectively: \preformatted{ atChange <- function(x,xm1) which(diff(c(xm1,x)) != 0) at2ndChange <- function(x,xm1) which(diff(c(xm1,x) \%/\% 2) != 0) atMedian <- function(x,xm1) tapply(seq_along(x), INDEX=x, quantile, prob=0.5, type=3) } By defining own functions here, one can obtain an arbitrary degree of flexibility. Finally, \code{xaxis.labelFormat} is a \code{\link{strftime}} compatible formatting string., e.g. the default value is \code{"\%G\\n\\n\%OQ"}, which means ISO year and quarter (in roman letters) stacked on top of each other. } \value{ \code{NULL} (invisibly). The function is called for its side effects. } \author{ Michael H\enc{}{oe}hle with contributions by Sebastian Meyer } \seealso{ the examples in \code{\link{stsplot_time1}} and \code{\link{plotHHH4_fitted1}} } \keyword{aplot} surveillance/man/twinstim_simulation.Rd0000644000176200001440000004656314006026604020146 0ustar liggesusers\encoding{latin1} \name{twinstim_simulation} \alias{simEpidataCS} \alias{simulate.twinstim} \title{ Simulation of a Self-Exciting Spatio-Temporal Point Process } \description{ The function \code{simEpidataCS} simulates events of a self-exciting spatio-temporal point process of the \code{"\link{twinstim}"} class. Simulation works via Ogata's modified thinning of the conditional intensity as described in Meyer et al. (2012). Note that simulation is limited to the spatial and temporal range of \code{stgrid}. The \code{\link{simulate}} method for objects of class \code{"\link{twinstim}"} simulates new epidemic data using the model and the parameter estimates of the fitted object. } \usage{ simEpidataCS(endemic, epidemic, siaf, tiaf, qmatrix, rmarks, events, stgrid, tiles, beta0, beta, gamma, siafpars, tiafpars, epilink = "log", t0 = stgrid$start[1], T = tail(stgrid$stop,1), nEvents = 1e5, control.siaf = list(F=list(), Deriv=list()), W = NULL, trace = 5, nCircle2Poly = 32, gmax = NULL, .allocate = 500, .skipChecks = FALSE, .onlyEvents = FALSE) \method{simulate}{twinstim}(object, nsim = 1, seed = NULL, data, tiles, newcoef = NULL, rmarks = NULL, t0 = NULL, T = NULL, nEvents = 1e5, control.siaf = object$control.siaf, W = data$W, trace = FALSE, nCircle2Poly = NULL, gmax = NULL, .allocate = 500, simplify = TRUE, ...) } \arguments{ \item{endemic}{ see \code{\link{twinstim}}. Note that type-specific endemic intercepts are specified by \code{beta0} here, not by the term \code{(1|type)}. } \item{epidemic}{ see \code{\link{twinstim}}. Marks appearing in this formula must be returned by the generating function \code{rmarks}. } \item{siaf}{ see \code{\link{twinstim}}. In addition to what is required for fitting with \code{twinstim}, the \code{siaf} specification must also contain the element \code{simulate}, a function which draws random locations following the spatial kernel \code{siaf$f}. The first argument of the function is the number of points to sample (say \code{n}), the second one is the vector of parameters \code{siafpars}, the third one is the type indicator (a character string matching a type name as specified by \code{dimnames(qmatrix)}). With the current implementation there will always be simulated only one location at a time, i.e. \code{n=1}. The \link[=siaf.constant]{predefined siaf's} all provide simulation. } \item{tiaf}{ e.g. what is returned by the generating function \code{\link{tiaf.constant}} or \code{\link{tiaf.exponential}}. See also \code{\link{twinstim}}. } \item{qmatrix}{ see \code{\link{epidataCS}}. Note that this square matrix and its \code{dimnames} determine the number and names of the different event types. In the simplest case, there is only a single type of event, i.e. \code{qmatrix = diag(1)}. } \item{rmarks}{ function of single time (1st argument) and location (2nd argument) returning a one-row \code{data.frame} of marks (named according to the variables in \code{epidemic}) for an event at this point. This must include the columns \code{eps.s} and \code{eps.t}, i.e. the values of the spatial and temporal interaction ranges at this point. Only \code{"numeric"} and \code{"factor"} columns are allowed. Assure that factor variables are coded equally (same levels and level order) for each new sample. For the \code{simulate.twinstim} method, the default (\code{NULL}) means sampling from the empirical distribution function of the (non-missing) marks in \code{data} restricted to events in the simulation period (\code{t0};\code{T}]. If there are no events in this period, e.g., if simulating beyond the original observation period, \code{rmarks} will sample marks from all of \code{data$events}. } \item{events}{ \code{NULL} or missing (default) in case of an empty prehistory, or a \code{\link{SpatialPointsDataFrame}} containing events of the prehistory (-Inf;\code{t0}] of the process (required for the epidemic to start in case of no endemic component in the model). The \code{SpatialPointsDataFrame} must have the same \code{proj4string} as \code{tiles} and \code{W}). The attached \code{data.frame} (data slot) must contain the typical columns as described in \code{\link{as.epidataCS}} (\code{time}, \code{tile}, \code{eps.t}, \code{eps.s}, and, for type-specific models, \code{type}) and all marks appearing in the \code{epidemic} specification. Note that some column names are reserved (see \code{\link{as.epidataCS}}). Only events up to time \code{t0} are selected and taken as the prehistory. } \item{stgrid}{ see \code{\link{as.epidataCS}}. Simulation only works inside the spatial and temporal range of \code{stgrid}. } \item{tiles}{ object inheriting from \code{"\linkS4class{SpatialPolygons}"} with \code{row.names} matching the \code{tile} names in \code{stgrid} and having the same \code{proj4string} as \code{events} and \code{W}. This is necessary to sample the spatial location of events generated by the endemic component. } \item{beta0,beta,gamma,siafpars,tiafpars}{ these are the parameter subvectors of the \code{twinstim}. \code{beta} and \code{gamma} must be given in the same order as they appear in \code{endemic} and \code{epidemic}, respectively. \code{beta0} is either a single endemic intercept or a vector of type-specific endemic intercepts in the same order as in \code{qmatrix}. } \item{epilink}{ a character string determining the link function to be used for the \code{epidemic} linear predictor of event marks. By default, the log-link is used. The experimental alternative is \code{epilink = "identity"}. Note that the identity link does not guarantee the force of infection to be positive. If this leads to a negative total intensity (endemic + epidemic), the point process is not well defined and simulation cannot proceed. } \item{t0}{ \code{events} having occurred during (-Inf;\code{t0}] are regarded as part of the prehistory \eqn{H_0} of the process. For \code{simEpidataCS}, by default and also if \code{t0=NULL}, the beginning of \code{stgrid} is used as \code{t0}. For the \code{simulate.twinstim} method, \code{NULL} means to use the fitted time range of the \code{"twinstim"} \code{object}. } \item{T, nEvents}{ simulate a maximum of \code{nEvents} events up to time \code{T}, then stop. For \code{simEpidataCS}, by default, and also if \code{T=NULL}, \code{T} equals the last stop time in \code{stgrid} (it cannot be greater) and \code{nEvents} is bounded above by 10000. For the \code{simulate.twinstim} method, \code{T=NULL} means to use the same same time range as for the fitting of the \code{"twinstim"} \code{object}. } \item{W}{ see \code{\link{as.epidataCS}}. When simulating from \code{twinstim}-fits, \code{W} is by default taken from the original \code{data$W}. If specified as \code{NULL}, \code{W} is generated automatically via \code{\link{unionSpatialPolygons}(tiles)}. However, since the result of such a polygon operation should always be verified, it is recommended to do that in advance.\cr It is important that \code{W} and \code{tiles} cover the same region: on the one hand direct offspring is sampled in the spatial influence region of the parent event, i.e., in the intersection of \code{W} and a circle of radius the \code{eps.s} of the parent event, after which the corresponding tile is determined by overlay with \code{tiles}. On the other hand endemic events are sampled from \code{tiles}. } \item{trace}{ logical (or integer) indicating if (or how often) the current simulation status should be \code{cat}ed. For the \code{simulate.twinstim} method, \code{trace} currently only applies to the first of the \code{nsim} simulations. } \item{.allocate}{ number of rows (events) to initially allocate for the event history; defaults to 500. Each time the simulated epidemic exceeds the allocated space, the event \code{data.frame} will be enlarged by \code{.allocate} rows. } \item{.skipChecks,.onlyEvents}{ these logical arguments are not meant to be set by the user. They are used by the \code{simulate}-method for \code{"twinstim"} objects. } \item{object}{ an object of class \code{"\link{twinstim}"}. } \item{nsim}{ number of epidemics (i.e. spatio-temporal point patterns inheriting from class \code{"epidataCS"}) to simulate. Defaults to 1 when the result is a simple object inheriting from class \code{"simEpidataCS"} (as if \code{simEpidataCS} would have been called directly). If \code{nsim > 1}, the result will be a list the structure of which depends on the argument \code{simplify}. } \item{seed}{ an object specifying how the random number generator should be initialized for simulation (via \code{\link{set.seed}}). The initial state will also be stored as an attribute \code{"seed"} of the result. The original state of the \code{\link{.Random.seed}} will be restored at the end of the simulation. By default (\code{NULL}), neither initialization nor recovery will be done. This behaviour is copied from the \code{\link{simulate}.lm} method. } \item{data}{ an object of class \code{"epidataCS"}, usually the one to which the \code{"twinstim"} \code{object} was fitted. It carries the \code{stgrid} of the endemic component, but also \code{events} for use as the prehistory, and defaults for \code{rmarks} and \code{nCircle2Poly}. } \item{newcoef}{ an optional named numeric vector of (a subset of) parameters to replace the original point estimates in \code{coef(object)}. Elements which do not match any model parameter by name are silently ignored. The \code{newcoef}s may also be supplied in a list following the same conventions as for the \code{start} argument in \code{\link{twinstim}}. } \item{simplify}{ logical. It is strongly recommended to set \code{simplify = TRUE} (default) if \code{nsim} is large. This saves space and computation time, because for each simulated epidemic only the \code{events} component is saved. All other components, which do not vary between simulations, are only stored from the first run. In this case, the runtime of each simulation is stored as an attribute \code{"runtime"} to each simulated \code{events}. See also the \dQuote{Value} section below. } \item{control.siaf}{see \code{\link{twinstim}}.} \item{nCircle2Poly}{see \code{\link{as.epidataCS}}. For \code{simulate.twinstim}, \code{NULL} means to use the same value as for \code{data}.} \item{gmax}{ maximum value the temporal interaction function \code{tiaf$g} can attain. If \code{NULL}, then it is assumed as the maximum value of the type-specific values at 0, i.e. \code{max(tiaf$g(rep.int(0,nTypes), tiafpars, 1:nTypes))}. } \item{\dots}{unused (arguments of the generic).} } \value{ The function \code{simEpidataCS} returns a simulated epidemic of class \code{"simEpidataCS"}, which enhances the class \code{"epidataCS"} by the following additional components known from objects of class \code{"\link{twinstim}"}: \code{bbox}, \code{timeRange}, \code{formula}, \code{coefficients}, \code{npars}, \code{control.siaf}, \code{call}, \code{runtime}. It has corresponding \code{\link{coeflist}}, \code{\link[=residuals.simEpidataCS]{residuals}}, \code{\link[=R0.simEpidataCS]{R0}}, and \code{\link[=intensityplot.simEpidataCS]{intensityplot}} methods. The \code{simulate.twinstim} method has some additional \emph{attributes} set on its result: \code{call}, \code{seed}, and \code{runtime}. If \code{nsim > 1}, it returns an object of class \code{"simEpidataCSlist"}, the form of which depends on the value of \code{simplify} (which is stored as an attribute \code{simplified}): if \code{simplify = FALSE}, then the return value is just a list of sequential simulations, each of class \code{"simEpidataCS"}. However, if \code{simplify = TRUE}, then the sequential simulations share all components but the simulated \code{events}, i.e. the result is a list with the same components as a single object of class \code{"simEpidataCS"}, but with \code{events} replaced by an \code{eventsList} containing the \code{events} returned by each of the simulations. The \code{stgrid} component of the returned \code{"simEpidataCS"} will be truncated to the actual end of the simulation, which might be \eqn{ 1}) may have different \code{stgrid} time ranges. In a \code{"simEpidataCSlist"}, the \code{stgrid} shared by all of the simulated epidemics is just the \code{stgrid} returned by the \emph{first} simulation. } \note{ The more detailed the polygons in \code{tiles} are the slower is the algorithm. You are advised to sacrifice some shape details for speed by reducing the polygon complexity, for example via the \command{mapshaper} JavaScript library wrapped by the R package \CRANpkg{rmapshaper}. Alternative tools are provided by the packages \CRANpkg{maptools} (\code{\link[maptools]{thinnedSpatialPoly}}) and \CRANpkg{spatstat.geom} (\code{\link[spatstat.geom]{simplify.owin}}). } \references{ Douglas, D. H. and Peucker, T. K. (1973): Algorithms for the reduction of the number of points required to represent a digitized line or its caricature. \emph{Cartographica: The International Journal for Geographic Information and Geovisualization}, \bold{10}, 112-122 Meyer, S., Elias, J. and H\enc{}{oe}hle, M. (2012): A space-time conditional intensity model for invasive meningococcal disease occurrence. \emph{Biometrics}, \bold{68}, 607-616. \doi{10.1111/j.1541-0420.2011.01684.x} } \author{ Sebastian Meyer, with contributions by Michael H\enc{}{oe}hle } \seealso{ The function \code{\link{simEndemicEvents}} is a faster alternative for endemic-only models, only returning a \code{"\linkS4class{SpatialPointsDataFrame}"} of simulated events. The \code{\link{plot.epidataCS}} and \code{\link{animate.epidataCS}} methods for plotting and animating continuous-space epidemic data, respectively, also work for simulated epidemics (by inheritance), and \code{\link{twinstim}} can be used to fit spatio-temporal conditional intensity models also to simulated data. } \examples{ data("imdepi", "imdepifit") ## load borders of Germany's districts (originally obtained from ## the German Federal Agency for Cartography and Geodesy, ## https://gdz.bkg.bund.de/), simplified by the "modified Visvalingam" ## algorithm (level=6.6\%) using MapShaper.org (v. 0.1.17): load(system.file("shapes", "districtsD.RData", package="surveillance")) plot(districtsD) plot(stateD, add=TRUE, border=2, lwd=2) # 'stateD' was obtained as 'rgeos::gUnaryUnion(districtsD)' ## simulate 2 realizations (over a short period, for speed) ## considering events from data(imdepi) before t=31 as prehistory mysims <- simulate(imdepifit, nsim=2, seed=1, data=imdepi, tiles=districtsD, newcoef=c("e.typeC"=-1), t0=31, T=if (interactive()) 180 else 45, # for CRAN simplify=TRUE) \dontshow{ ## check construction and selection from "simEpidataCSlist" local({ mysim_from_list <- mysims[[1]] capture.output(mysim_single <- eval("[[<-"(attr(mysims, "call"), "nsim", 1))) mysim_from_list$runtime <- mysim_single$runtime <- NULL stopifnot(all.equal(mysim_single, mysim_from_list, check.attributes = FALSE)) }) ## check equivalence of Lambdag from simulation and residuals via twinstim stopifnot(all.equal( residuals(mysims[[1]]), suppressMessages(surveillance:::residuals.twinstim(surveillance:::as.twinstim.simEpidataCS(mysims[[1]]))) )) } ## plot both simulations using the plot-method for simEpidataCSlist's mysims plot(mysims, aggregate="time") ## extract the second realization -> object of class simEpidataCS mysim2 <- mysims[[2]] summary(mysim2) plot(mysim2, aggregate="space") if (surveillance.options("allExamples")) { ### compare the observed _cumulative_ number of cases during the ### first 90 days to 20 simulations from the fitted model ### (performing these simulations takes about 30 seconds) sims <- simulate(imdepifit, nsim=20, seed=1, data=imdepi, t0=0, T=90, tiles=districtsD, simplify=TRUE) ## extract cusums getcsums <- function (events) { tapply(events$time, events@data["type"], function (t) cumsum(table(t)), simplify=FALSE) } csums_observed <- getcsums(imdepi$events) csums_simulated <- lapply(sims$eventsList, getcsums) ## plot it plotcsums <- function (csums, ...) { mapply(function (csum, ...) lines(as.numeric(names(csum)), csum, ...), csums, ...) invisible() } plot(c(0,90), c(0,35), type="n", xlab="Time [days]", ylab="Cumulative number of cases") plotcsums(csums_observed, col=c(2,4), lwd=3) legend("topleft", legend=levels(imdepi$events$type), col=c(2,4), lwd=1) invisible(lapply(csums_simulated, plotcsums, col=adjustcolor(c(2,4), alpha=0.5))) } \dontrun{ ### Experimental code to generate 'nsim' simulations of 'nm2add' months ### beyond the observed time period: nm2add <- 24 nsim <- 5 ### With these settings, simulations will take about 30 seconds. ### The events still infective by the end of imdepi$stgrid will be used ### as the prehistory for the continued process. origT <- tail(imdepi$stgrid$stop, 1) ## create a time-extended version of imdepi imdepiext <- local({ ## first we have to expand stgrid (assuming constant "popdensity") g <- imdepi$stgrid g$stop <- g$BLOCK <- NULL gadd <- data.frame(start=rep(seq(origT, by=30, length.out=nm2add), each=nlevels(g$tile)), g[rep(seq_len(nlevels(g$tile)), nm2add), -1]) ## now create an "epidataCS" using this time-extended stgrid as.epidataCS(events=imdepi$events, # the replacement warnings are ok W=imdepi$W, qmatrix=imdepi$qmatrix, stgrid=rbind(g, gadd), T=max(gadd$start) + 30) }) newT <- tail(imdepiext$stgrid$stop, 1) ## simulate beyond the original period simsext <- simulate(imdepifit, nsim=nsim, seed=1, t0=origT, T=newT, data=imdepiext, tiles=districtsD, simplify=TRUE) ## Aside to understand the note from checking events and tiles: # marks(imdepi)["636",] # tile 09662 is attributed to this event, but: # plot(districtsD[c("09678","09662"),], border=1:2, lwd=2, axes=TRUE) # points(imdepi$events["636",]) ## this mismatch is due to polygon simplification ## plot the observed and simulated event numbers over time plot(imdepiext, breaks=c(unique(imdepi$stgrid$start),origT), cumulative=list(maxat=330)) for (i in seq_along(simsext$eventsList)) plot(simsext[[i]], add=TRUE, legend.types=FALSE, breaks=c(unique(simsext$stgrid$start),newT), subset=!is.na(source), # have to exclude the events of the prehistory cumulative=list(offset=c(table(imdepi$events$type)), maxat=330, axis=FALSE), border=NA, density=0) # no histogram abline(v=origT, lty=2, lwd=2) } } \keyword{datagen} \keyword{models} surveillance/man/print.algoQV.Rd0000644000176200001440000000136313122471774016347 0ustar liggesusers\name{print.algoQV} \alias{print.algoQV} \title{Print Quality Value Object} \description{Print a single quality value object in a nicely formatted way} \usage{ \method{print}{algoQV}(x,...) } \arguments{ \item{x}{Quality Values object generated with \code{quality}} \item{...}{Further arguments (not really used)} } \examples{ # Create a test object disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 200, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) # Let this object be tested from rki1 survResObj <- algo.rki1(disProgObj, control = list(range = 50:200)) # Compute the quality values in a nice formatted way algo.quality(survResObj) } \keyword{print} surveillance/man/stsSlots.Rd0000644000176200001440000000126013507347337015661 0ustar liggesusers\name{stsSlot-generics} \docType{methods} \alias{alarms} \alias{alarms<-} \alias{upperbound} \alias{upperbound<-} \alias{control} \alias{control<-} \alias{epoch} \alias{epoch<-} \alias{observed} \alias{observed<-} \alias{population} \alias{population<-} \alias{multinomialTS} \alias{multinomialTS<-} \alias{neighbourhood} \alias{neighbourhood<-} \title{Generic Functions to Access \code{"sts"} Slots} \description{ For almost every slot of the \code{"sts"} class, package \pkg{surveillance} defines a generic function of the same name (and a replacement version) to extract (or set) the corresponding slot. See the \code{"\linkS4class{sts}"} class documentation. } \keyword{methods} surveillance/man/twinSIR_methods.Rd0000644000176200001440000001541713433460451017105 0ustar liggesusers\encoding{latin1} \name{twinSIR_methods} \alias{print.twinSIR} \alias{summary.twinSIR} \alias{AIC.twinSIR} \alias{extractAIC.twinSIR} \alias{vcov.twinSIR} \alias{logLik.twinSIR} \alias{print.summary.twinSIR} \title{ Print, Summary and Extraction Methods for \code{"twinSIR"} Objects } \description{ Besides \code{print} and \code{summary} methods there are also some standard extraction methods defined for objects of class \code{"twinSIR"}: \code{vcov}, \code{logLik} and especially \code{AIC} and \code{extractAIC}, which extract Akaike's Information Criterion. Note that special care is needed, when fitting models with parameter constraints such as the epidemic effects \eqn{\alpha} in \code{twinSIR} models. Parameter constraints reduce the average increase in the maximized loglikelihood - thus the penalty for constrained parameters should be smaller than the factor 2 used in the ordinary definition of AIC. To this end, these two methods offer the calculation of the so-called one-sided AIC (OSAIC). } \usage{ \method{print}{twinSIR}(x, digits = max(3, getOption("digits") - 3), ...) \method{summary}{twinSIR}(object, correlation = FALSE, symbolic.cor = FALSE, ...) \method{AIC}{twinSIR}(object, ..., k = 2, one.sided = NULL, nsim = 1e3) \method{extractAIC}{twinSIR}(fit, scale = 0, k = 2, one.sided = NULL, nsim = 1e3, ...) \method{vcov}{twinSIR}(object, ...) \method{logLik}{twinSIR}(object, ...) \method{print}{summary.twinSIR}(x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), ...) } \arguments{ \item{x, object, fit}{an object of class \code{"twinSIR"}.\cr For the \code{print} method of the \code{summary} method, an object of class \code{"summary.twinSIR"}.} \item{digits}{ integer, used for number formatting with \code{signif()}. Minimum number of significant digits to be printed in values. } \item{correlation}{ logical. if \code{TRUE}, the correlation matrix of the estimated parameters is returned and printed. } \item{symbolic.cor}{ logical. If \code{TRUE}, print the correlations in a symbolic form (see \code{symnum}) rather than as numbers. } \item{\dots}{ For the \code{summary} method: arguments passed to \code{\link{extractAIC.twinSIR}}.\cr For the \code{AIC} method, optionally more fitted model objects.\cr For the \code{print}, \code{extractAIC}, \code{vcov} and \code{logLik} methods: unused (argument of the generic). } \item{k}{ numeric specifying the "weight" of the \emph{penalty} to be used; in an unconstrained fit \code{k = 2} is the classical AIC. } \item{one.sided}{ logical or \code{NULL} (the default). Determines if the one-sided AIC should be calculated instead of using the classical penalty \code{k*edf}. The default value \code{NULL} chooses classical AIC in the case of an unconstrained fit and one-sided AIC in the case of constraints. The type of the fit can be seen in \code{object$method} (or \code{fit$method} respectively), where \code{"L-BFGS"} means constrained optimization. } \item{nsim}{ when there are more than two epidemic covariates in the fit, the weights in the OSAIC formula have to be determined by simulation. Default is to use 1000 samples. Note that package \pkg{quadprog} is additionally required in this case. } \item{scale}{unused (argument of the generic).} \item{signif.stars}{logical. If \code{TRUE}, \dQuote{significance stars} are printed for each coefficient.} } \details{ The \code{print} and \code{summary} methods allow the compact or comprehensive representation of the fitting results, respectively. The former only prints the original function call, the estimated coefficients and the maximum log-likelihood value. The latter prints the whole coefficient matrix with standard errors, z- and p-values (see \code{\link{printCoefmat}}), and additionally the number of infections per log-baseline \code{interval}, the (one-sided) AIC and the number of log-likelihood evaluations. They both append a big \dQuote{WARNING}, if the optimization algorithm did not converge. The estimated coefficients may be extracted by using the default \code{coef}-method from package \pkg{stats}. The two AIC functions differ only in that \code{AIC} can take more than one fitted model object and that \code{extractAIC} always returns the number of parameters in the model (\code{AIC} only does with more than one fitted model object). Concerning the choice of one-sided AIC: parameter constraints -- such as the non-negative constraints for the epidemic effects alpha in \code{twinSIR} models -- reduce the average increase in the maximized loglikelihood. Thus, the penalty for constrained parameters should be smaller than the factor 2 used in the ordinary definition of AIC. One-sided AIC (OSAIC) suggested by Hughes and King (2003) is such a proposal when \eqn{p} out of \eqn{k = p + q} parameters have non-negative constraints: \deqn{OSAIC = -2 l(\theta, \tau) + 2 \sum_{g=0}^p w(p,g) (k-p+g)}{% OSAIC = -2 l(theta, tau) + 2 sum_{g=0}^p w(p,g) (k-p+g)} where \eqn{w(p,g)} are \eqn{p}-specific weights. For more details see Section 5.2 in \enc{Hhle}{Hoehle} (2009). } \value{ The \code{print} methods return their first argument, invisibly, as they always should. The \code{vcov} and \code{logLik} methods return the estimated variance-covariance matrix of the parameters (here, the inverse of the estimate of the expected Fisher information matrix), and the maximum log-likelihood value of the model, respectively. The \code{summary} method returns a list containing some summary statistics of the fitted model, which is nicely printed by the corresponding \code{print} method. For the \code{\link{AIC}} and \code{\link{extractAIC}} methods, see the documentation of the corresponding generic functions. } \references{ Hughes A, King M (2003) Model selection using AIC in the presence of one-sided information. \emph{Journal of Statistical Planning and Inference} \strong{115}, pp. 397--411. \enc{Hhle}{Hoehle}, M. (2009), Additive-Multiplicative Regression Models for Spatio-Temporal Epidemics, Biometrical Journal, 51(6):961-978. } \author{ Michael \enc{Hhle}{Hoehle} and Sebastian Meyer } \examples{ data("hagelloch") # a simplistic twinSIR model fit <- twinSIR(~ household + cox(AGE), data = hagelloch) coef(fit) vcov(fit) logLik(fit) summary(fit, correlation = TRUE, symbolic.cor = TRUE) # AIC or OSAIC AIC(fit) AIC(fit, one.sided = FALSE) extractAIC(fit) extractAIC(fit, one.sided = FALSE) # comparing models via AIC fit2 <- update(fit, nIntervals = 2) AIC(fit, fit2) # the 2nd column should be named "OSAIC" here } \keyword{methods} \keyword{print} \keyword{htest} surveillance/man/formatPval.Rd0000644000176200001440000000152712536544321016135 0ustar liggesusers\name{formatPval} \alias{formatPval} \title{ Pretty p-Value Formatting } \description{ Just \acronym{yapf} -- yet another p-value formatter... It is a wrapper around \code{\link{format.pval}}, such that by default \code{eps = 1e-4}, \code{scientific = FALSE}, \code{digits = if (p<10*eps) 1 else 2}, and \code{nsmall = 2}. } \usage{ formatPval(pv, eps = 1e-4, scientific = FALSE, ...) } \arguments{ \item{pv}{a numeric vector (of p-values).} \item{eps}{a numerical tolerance, see \code{\link{format.pval}}.} \item{scientific}{see \code{\link{format}}.} \item{\dots}{further arguments passed to \code{\link{format.pval}} (but \code{digits} and \code{nsmall} are hard-coded internally).} } \value{ The character vector of formatted p-values. } \examples{ formatPval(c(0.9, 0.13567, 0.0432, 0.000546, 1e-8)) } \keyword{print} surveillance/man/algo.hmm.Rd0000644000176200001440000001573313122471774015533 0ustar liggesusers\encoding{latin1} \name{algo.hmm} \alias{algo.hmm} \title{Hidden Markov Model (HMM) method} \description{ This function implements on-line HMM detection of outbreaks based on the retrospective procedure described in Le Strat and Carret (1999). Using the function \code{\link[msm]{msm}} (from package \pkg{msm}) a specified HMM is estimated, the decoding problem, i.e. the most probable state configuration, is found by the Viterbi algorithm and the most probable state of the last observation is recorded. On-line detection is performed by sequentially repeating this procedure. Warning: This function can be very slow - a more efficient implementation would be nice! } \usage{ algo.hmm(disProgObj, control = list(range=range, Mtilde=-1, noStates=2, trend=TRUE, noHarmonics=1, covEffectEqual=FALSE, saveHMMs = FALSE, extraMSMargs=list())) } \arguments{ \item{disProgObj}{object of class disProg (including the observed and the state chain)} \item{control}{control object: \describe{ \item{\code{range}}{determines the desired time points which should be evaluated. Note that opposite to other surveillance methods an initial parameter estimation occurs in the HMM. Note that range should be high enough to allow for enough reference values for estimating the HMM} \item{\code{Mtilde}}{number of observations back in time to use for fitting the HMM (including the current observation). Reasonable values are a multiple of \code{disProgObj$freq}, the default is \code{Mtilde=-1}, which means to use all possible values - for long series this might take very long time!} \item{\code{noStates}}{number of hidden states in the HMM -- the typical choice is 2. The initial rates are set such that the \code{noStates}'th state is the one having the highest rate. In other words: this state is considered the outbreak state.} \item{\code{trend}}{Boolean stating whether a linear time trend exists, i.e. if \code{TRUE} (default) then \eqn{\beta_j \neq 0}{\beta != 0}} \item{\code{noHarmonics}}{number of harmonic waves to include in the linear predictor. Default is 1.} \item{\code{covEffectEqual}}{see details} \item{\code{saveHMMs}}{Boolean, if \code{TRUE} then the result of the fitted HMMs is saved. With this option the function can also be used to analyse data retrospectively. Default option is \code{FALSE}} \item{\code{extraMSMArgs}}{A named list with additional arguments to send to the \code{\link[msm:msm]{msm}} HMM fitting function. Note that the \code{msm} arguments \code{formula}, \code{data}, \code{qmatrix}, \code{hmodel}, \code{hcovariates} and \code{hconstraint} are automatically filled by \code{algo.hmm}, thus these should NOT be modified.} } } } \value{ \code{algo.hmm} gives a list of class \code{survRes} which includes the vector of alarm values for every timepoint in \code{range}. No \code{upperbound} can be specified and is put equal to zero. The resulting object contains a slot \code{control$hmm}, which contains the \code{msm} object with the fitted HMM. } \details{ For each time point t the reference values values are extracted. If the number of requested values is larger than the number of possible values the latter is used. Now the following happens on these reference values: A \code{noState}-State Hidden Markov Model (HMM) is used based on the Poisson distribution with linear predictor on the log-link scale. I.e. \deqn{Y_t | X_t = j \sim Po(\mu_t^j),}{Y_t|X_t = j ~ Po(\mu_t^j),} where \deqn{\log(\mu_t^j) = \alpha_j + \beta_j\cdot t + \sum_{i=1}^{nH} \gamma_j^i \cos(2i\pi/freq\cdot (t-1)) + \delta_j^i \sin(2i\pi/freq\cdot (t-1))}{% log(mu_t^j) = alpha_j + beta_j t + \sum_{i=1}^{nH} gamma_j^i \cos(2*i*pi/freq * (t-1)) + delta_j^i sin(2*i*pi/freq * (t-1)) } and \eqn{nH=}\code{noHarmonics} and \eqn{freq=12,52} depending on the sampling frequency of the surveillance data. In the above \eqn{t-1} is used, because the first week is always saved as \code{t=1}, i.e. we want to ensure that the first observation corresponds to cos(0) and sin(0). If \code{covEffectEqual} then all covariate effects parameters are equal for the states, i.e. \eqn{\beta_j=\beta, \gamma_j^i=\gamma^i, \delta_j^i=\delta^i} for all \eqn{j=1,...,noState}. In case more complicated HMM models are to be fitted it is possible to modify the \code{msm} code used in this function. Using e.g. \code{AIC} one can select between different models (see the \pkg{msm} package for further details). Using the Viterbi algorithms the most probable state configuration is obtained for the reference values and if the most probable configuration for the last reference value (i.e. time t) equals \code{control$noOfStates} then an alarm is given. Note: The HMM is re-fitted from scratch every time, sequential updating schemes of the HMM would increase speed considerably! A major advantage of the approach is that outbreaks in the reference values are handled automatically. } \seealso{\code{\link[msm:msm]{msm}}} \author{M. \enc{Hhle}{Hoehle}} \examples{ #Simulate outbreak data from HMM set.seed(123) counts <- sim.pointSource(p = 0.98, r = 0.8, length = 3*52, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.5) \dontrun{ #Do surveillance using a two state HMM without trend component and #the effect of the harmonics being the same in both states. A sliding #window of two years is used to fit the HMM surv <- algo.hmm(counts, control=list(range=(2*52):length(counts$observed), Mtilde=2*52,noStates=2,trend=FALSE, covEffectsEqual=TRUE,extraMSMargs=list())) plot(surv,legend=list(x="topright")) } if (require("msm")) { #Retrospective use of the function, i.e. monitor only the last time point #but use option saveHMMs to store the output of the HMM fitting surv <- algo.hmm(counts,control=list(range=length(counts$observed),Mtilde=-1,noStates=2, trend=FALSE,covEffectsEqual=TRUE, saveHMMs=TRUE)) #Compute most probable state using the viterbi algorithm - 1 is "normal", 2 is "outbreak". viterbi.msm(surv$control$hmm[[1]])$fitted #How often correct? tab <- cbind(truth=counts$state + 1 , hmm=viterbi.msm(surv$control$hmm[[1]])$fitted) table(tab[,1],tab[,2]) } } \references{ Y. Le Strat and F. Carrat, Monitoring Epidemiologic Surveillance Data using Hidden Markov Models (1999), Statistics in Medicine, 18, 3463--3478 I.L. MacDonald and W. Zucchini, Hidden Markov and Other Models for Discrete-valued Time Series, (1997), Chapman & Hall, Monographs on Statistics and applied Probability 70 } \keyword{classif} surveillance/man/hhh4_simulate.Rd0000644000176200001440000001224413377012440016552 0ustar liggesusers\name{hhh4_simulate} \alias{simulate.hhh4} \title{Simulate \code{"hhh4"} Count Time Series} \description{ Simulates a multivariate time series of counts based on the Poisson/Negative Binomial model as described in Paul and Held (2011). } \usage{ \method{simulate}{hhh4}(object, nsim = 1, seed = NULL, y.start = NULL, subset = 1:nrow(object$stsObj), coefs = coef(object), components = c("ar","ne","end"), simplify = nsim>1, ...) } \arguments{ \item{object}{ an object of class \code{"\link{hhh4}"}. } \item{nsim}{ number of time series to simulate. Defaults to \code{1}. } \item{seed}{ an object specifying how the random number generator should be initialized for simulation (via \code{\link{set.seed}}). The initial state will also be stored as an attribute \code{"seed"} of the result. The original state of the \code{\link{.Random.seed}} will be restored at the end of the simulation. By default (\code{NULL}), neither initialization nor recovery will be done. This behaviour is copied from the \code{\link{simulate}.lm} method. } \item{y.start}{ vector or matrix (with \code{ncol(object$stsObj)} columns) with starting counts for the epidemic components. If \code{NULL}, the observed means in the respective units of the data in \code{object} during \code{subset} are used. } \item{subset}{ time period in which to simulate data. Defaults to (and cannot exceed) the whole period defined by the underlying \code{"sts"} object. } \item{coefs}{ coefficients used for simulation from the model in \code{object}. Default is to use the fitted parameters. Note that the \code{coefs}-vector must be in the same order and scaling as \code{coef(object)}, which especially means \code{reparamPsi = TRUE} (as per default when using the \code{coef}-method to extract the parameters). The overdispersion parameter in \code{coefs} is the inverse of the dispersion parameter \code{size} in \code{\link{rnbinom}}. } \item{components}{ character vector indicating which components of the fitted model \code{object} should be active during simulation. For instance, a simulation with \code{components="end"} is solely based on the fitted endemic mean. } \item{simplify}{ logical indicating if only the simulated counts (\code{TRUE}) or the full \code{"\linkS4class{sts}"} object (\code{FALSE}) should be returned for every replicate. By default a full \code{"sts"} object is returned iff \code{nsim=1}. } \item{\dots}{unused (argument of the generic).} } \details{ Simulates data from a Poisson or a Negative Binomial model with mean \deqn{\mu_{it} = \lambda_{it} y_{i,t-1} + \phi_{it} \sum_{j \neq i} w_{ji} y_{j,t-1} + \nu_{it}}{% \mu_it = \lambda_it y_i,t-1 + \phi_it \sum_j w_ji y_j,t-1 + \nu_it} where \eqn{\lambda_{it}>0}, \eqn{\phi_{it}>0}, and \eqn{\nu_{it}>0} are parameters which are modelled parametrically. The function uses the model and parameter estimates of the fitted \code{object} to simulate the time series. With the argument \code{coefs} it is possible to simulate from the model as specified in \code{object}, but with different parameter values. } \value{ If \code{simplify=FALSE}: an object of class \code{"\linkS4class{sts}"} (\code{nsim = 1}) or a list of those (\code{nsim > 1}). If \code{simplify=TRUE}: an object of class \code{"hhh4sims"}, which is an array of dimension \code{c(length(subset), ncol(object$stsObj), nsim)}, where the third dimension is dropped if \code{nsim=1} (yielding a matrix). The originally observed counts during the simulation period, \code{object$stsObj[subset,]}, are attached for reference (used by the \code{plot}-methods) as an attribute \code{"stsObserved"}, and the initial condition \code{y.start} as attribute \code{"initial"}. } \references{ Paul, M. and Held, L. (2011) Predictive assessment of a non-linear random effects model for multivariate time series of infectious disease counts. Statistics in Medicine, \bold{30}, 1118--1136 } \author{ Michaela Paul and Sebastian Meyer } \seealso{ \code{\link{plot.hhh4sims}} and \code{\link{scores.hhh4sims}} } \examples{ data(influMen) # convert to sts class and extract meningococcal disease time series meningo <- disProg2sts(influMen)[,2] # fit model fit <- hhh4(meningo, control = list(ar = list(f = ~ 1), end = list(f = addSeason2formula(S = 1, period = 52)), family = "NegBin1")) plot(fit) # simulate from model simData <- simulate(fit, seed=1234) # plot simulated data plot(simData, main = "simulated data", xaxis.labelFormat=NULL) # consider a Poisson instead of a NegBin model coefs <- coef(fit) coefs["overdisp"] <- 0 simData2 <- simulate(fit, seed=123, coefs = coefs) plot(simData2, main = "simulated data: Poisson model", xaxis.labelFormat = NULL) # consider a model with higher autoregressive parameter coefs <- coef(fit) coefs[1] <- log(0.5) simData3 <- simulate(fit, seed=321, coefs = coefs) plot(simData3, main = "simulated data: lambda = 0.5", xaxis.labelFormat = NULL) } \keyword{datagen} surveillance/man/stsXtrct.Rd0000644000176200001440000000320213507411066015646 0ustar liggesusers\name{stsXtrct} \docType{methods} \title{Subsetting \code{"sts"} Objects} \alias{[,sts-method} % for convenience \alias{[,sts,ANY,ANY,ANY-method} \description{ The \code{[}-method extracts parts of an \code{"\linkS4class{sts}"} object using row (time) and column (unit) indices. } \usage{ \S4method{[}{sts}(x, i, j, ..., drop) } \arguments{ \item{x}{an object of class \code{"\linkS4class{sts}"}.} \item{i}{row index (integer or logical vector).} \item{j}{column index (character, integer, or logical vector).} \item{\dots,drop}{unused (arguments of the generic).\cr Dimensions are never dropped.} } \value{ an object of class \code{"sts"}. } \details{ Row indices are used to select a subset of the original time period. The \code{start} and \code{epoch} slots of the time series are adjusted accordingly. A warning is issued if an irregular integer sequence is used to extract rows, e.g., \code{x[c(1,2,4),]}, which could destroy the structure of the time series (\code{freq}). Column indices work as usual when indexing matrices, so may select units by name, position or a vector of booleans. When subsetting columns, population fractions are recomputed if and only if \code{x} is no \code{multinomialTS} and already contains population fractions. \code{NA} indices are not supported, negative indices are. Note that a \code{[<-} method (i.e., subassignment) is not implemented. } \examples{ data("ha.sts") haagg <- aggregate(ha.sts, nfreq=13) plot(haagg[, 3]) # Single series plot(haagg[1:30, 3]) # Somewhat shorter #Counts at time 20 plot(haagg[20, ], type = observed ~ unit) } \keyword{methods} surveillance/man/sim.seasonalNoise.Rd0000644000176200001440000000335113122471774017414 0ustar liggesusers\name{sim.seasonalNoise} \alias{sim.seasonalNoise} \encoding{latin1} \title{Generation of Background Noise for Simulated Timeseries} \description{Generation of a cyclic model of a Poisson distribution as background data for a simulated timevector. The mean of the Poisson distribution is modelled as: \deqn{\mu = \exp(A \sin( frequency \cdot \omega \cdot (t + \phi)) + \alpha + \beta * t + K * state)}{% mu = exp(A * sin( frequency * omega * (t + phi)) + alpha + beta * t + K * state)} } \usage{ sim.seasonalNoise(A = 1, alpha = 1, beta = 0, phi = 0, length, frequency = 1, state = NULL, K = 0) } \arguments{ \item{A}{amplitude (range of sinus), default = 1.} \item{alpha}{parameter to move along the y-axis (negative values not allowed) with alpha > = A, default = 1.} \item{beta}{regression coefficient, default = 0.} \item{phi}{factor to create seasonal moves (moves the curve along the x-axis), default = 0.} \item{length}{number of weeks to model.} \item{frequency}{factor to determine the oscillation-frequency, default = 1.} \item{state}{if a state chain is entered the outbreaks will be additional weighted by K.} \item{K}{additional weigth for an outbreak which influences the distribution parameter mu, default = 0.} } \value{ an object of class \code{seasonNoise} which includes the modelled timevector, the parameter \code{mu} and all input parameters. } \seealso{\code{\link{sim.pointSource}}} \author{M. \enc{Hhle}{Hoehle}, A. Riebler, C. Lang} \examples{ season <- sim.seasonalNoise(length = 300) plot(season$seasonalBackground,type = "l") # use a negative timetrend beta season <- sim.seasonalNoise(beta = -0.003, length = 300) plot(season$seasonalBackground,type = "l") } \keyword{datagen} surveillance/man/makeControl.Rd0000644000176200001440000000234113346465003016272 0ustar liggesusers\name{makeControl} \alias{makeControl} \title{Generate \code{control} Settings for an \code{hhh4} Model} \usage{ makeControl(f = list(~1), S = list(0, 0, 1), period = 52, offset = 1, ...) } \arguments{ \item{f, S, period}{ arguments for \code{\link{addSeason2formula}} defining each of the three model formulae in the order (\code{ar}, \code{ne}, \code{end}). Recycled if necessary within \code{\link{mapply}}. } \item{offset}{ multiplicative component offsets in the order (\code{ar}, \code{ne}, \code{end}). } \item{...}{ further elements for the \code{\link{hhh4}} control list. The \code{family} parameter is set to \code{"NegBin1"} by default. } } \value{ a list for use as the \code{control} argument in \code{\link{hhh4}}. } \description{ Generate \code{control} Settings for an \code{hhh4} Model } \examples{ makeControl() ## a simplistic model for the fluBYBW data ## (first-order transmission only, no district-specific intercepts) data("fluBYBW") mycontrol <- makeControl( f = list(~1, ~1, ~t), S = c(1, 1, 3), offset = list(population(fluBYBW)), # recycled -> in all components ne = list(normalize = TRUE), verbose = TRUE) str(mycontrol) \dontrun{fit <- hhh4(fluBYBW, mycontrol)} } \author{ Sebastian Meyer } surveillance/man/plot.survRes.Rd0000644000176200001440000000734313276250727016460 0ustar liggesusers\name{plot.survRes} \alias{plot.survRes} \alias{plot.survRes.one} \encoding{latin1} \title{Plot a survRes object} \description{ Plotting a (multivariate) \code{survRes} object. The internal function \code{plot.survRes.one} is used as a helper function to plot a univariate time series. } \usage{ \method{plot}{survRes}(x, method=x$control$name, disease=x$control$data, xaxis.years=TRUE,startyear = 2001, firstweek = 1, same.scale=TRUE, ...) plot.survRes.one(x, method=x$control$name, disease=x$control$data, domany=FALSE,ylim=NULL,xaxis.years=TRUE,startyear = 2001, firstweek = 1, xlab="time", ylab="No. infected", main=NULL, type="hhs", lty=c(1,1,2),col=c(1,1,4), outbreak.symbol = list(pch=3,col=3),alarm.symbol=list(pch=24,col=2), legend.opts=list(x="top", legend=c("Infected", "Upperbound", "Alarm", "Outbreak"), lty=NULL,col=NULL,pch=NULL), ...) } \arguments{ \item{x}{object of class \code{survRes}} \item{method}{surveillance method to be used in title} \item{disease}{name of disease in title} \item{xaxis.years}{Boolean indicating whether to show a year based x-axis for weekly data} \item{domany}{Boolean telling the function whether it is called for a multivariate (\code{TRUE}) or univariate (\code{FALSE}) \code{survRes} object. In case of \code{TRUE} no titles are drawn.} \item{ylim}{range of y axis} \item{startyear}{year to begin the axis labeling (the year where the oldest data come from)} \item{firstweek}{number of the first week of January in the first year (just for axis labeling reasons)} \item{xlab}{label of the x-axis} \item{ylab}{label of the y-axis} \item{main}{the title of the graphics is generated from the \code{method} and \code{disease} arguments if not specified otherwise} \item{same.scale}{plot all time series with the same \code{ylim}? Defaults to \code{true}}. \item{type}{line type of the observed counts (first two elements) and the upper bound (third element)} \item{lty}{vector of size 3 specifying the line type of the observed counts (left, right) and the upperbound line} \item{col}{vector with three elements: color of left bar and color of top bar, color of right bar, col of the upperbound line.} \item{outbreak.symbol}{list with entries \code{pch} and \code{col} specifying the plot symbol} \item{alarm.symbol}{list with entries \code{pch} and \code{col} specifying the plot symbol} \item{legend.opts}{a list containing the entries to be sent to the \code{\link{legend}} function. If no legend is requested use \code{legend.opts=NULL}. Otherwise, the following arguments are default \describe{ \item{\code{x}}{\code{top}} \item{\code{legend}}{The names infected and outbreak.} \item{\code{lty}}{If \code{NULL} the \code{lty} argument will be used} \item{\code{pch}}{If \code{NULL} the \code{pch} argument is used} \item{\code{col}}{If \code{NULL} the \code{col} argument is used} } Any further arguments to the \code{legend} function are just provided as additional elements of this list, e.g. \code{horiz=TRUE}. } \item{...}{arguments passed to \code{plot.survRes.one}. From there, further arguments are passed to \code{\link{matplot}}.} } \value{ none. A plot showing the number of infected, the threshold for recognizing an outbreak, the alarm status and the outbreak status is generated. } \author{M. \enc{Hhle}{Hoehle}} \examples{ data(ha) ctrl <- list(range = 209:290, b = 2, w = 6, alpha = 0.005) plot(algo.bayes(aggregate(ha), control = ctrl)) } \keyword{hplot} surveillance/man/epidata_plot.Rd0000644000176200001440000001467513671635730016505 0ustar liggesusers\name{epidata_plot} \alias{plot.epidata} \alias{plot.summary.epidata} \alias{stateplot} \title{ Plotting the Evolution of an Epidemic } \description{ Functions for plotting the evolution of epidemics. The \code{\link{plot}} methods for \code{\link{class}}es \code{"\link{epidata}"} and \code{"summary.epidata"} plots the numbers of susceptible, infectious and recovered (= removed) individuals by step functions along the time axis. The function \code{stateplot} shows individual state changes along the time axis. } \usage{ \method{plot}{summary.epidata}(x, lty = c(2, 1, 3), lwd = 2, col = c("#1B9E77", "#D95F02", "#7570B3"), col.hor = col, col.vert = col, xlab = "Time", ylab = "Number of individuals", xlim = NULL, ylim = NULL, legend.opts = list(), do.axis4 = NULL, panel.first = grid(), rug.opts = list(), which.rug = c("infections", "removals", "susceptibility", "all"), ...) \method{plot}{epidata}(x, ...) stateplot(x, id, ...) } \arguments{ \item{x}{ an object inheriting from class \code{"epidata"} or \code{"summary.epidata"}. In the former case, its summary is calculated and the function continues as in the latter case. The \code{plot} method for class \code{"epidata"} is a simple wrapper for \code{plot.summary.epidata} implemented as \code{plot(summary(x, ...))}. } \item{lty, lwd}{ vectors of length 3 containing the line types and widths, respectively, for the numbers of susceptible, infectious and removed individuals (in this order). By default, all lines have width 1 and the line types are dashed (susceptible), solid (infectious) and dotted (removed), respectively. To omit the drawing of a specific line, just set the corresponding entry in \code{lty} to 0. The vectors are recycled if necessary. For information about the different \code{lty} and \code{lwd} codes, see the help pages of \code{\link{par}}. } \item{col, col.hor, col.vert}{ vectors of length 3 containing the line colors for the numbers of susceptible, infectious and removed individuals (in this order). \code{col.hor} defines the color for the horizontal parts of the step function, whilst \code{col.vert} defines the color for its vertical parts. The argument \code{col} is just short for \code{col.hor = col} and \code{col.vert = col}. The default \code{col} vector corresponds to \code{brewer.pal("Dark2",n=3)} from the \CRANpkg{RColorBrewer} package. The vectors are recycled if necessary. For information about the possible values of \code{col}, see the help pages of \code{\link{par}}. } \item{xlab, ylab}{ axis labels, default to "Time" and "Number of individuals", respectively. } \item{xlim, ylim}{ the x and y limits of the plot in the form \code{c(xmin, xmax)} and \code{c(ymin, ymax)}, respectively. By default, these are chosen adequately to fit the time range of the epidemic and the number of individuals. } \item{legend.opts}{ if this is a list (of arguments for the \code{\link{legend}} function), a legend will be plotted. The defaults are as follows: \describe{ \item{\code{x}:}{\code{"topright"}} \item{\code{inset}:}{\code{c(0,0.02)}} \item{\code{legend}:}{\code{c("susceptible", "infectious", "removed")}} \item{\code{lty},\code{lwd},\code{col}:}{same as the arguments \code{lty}, \code{lwd}, and \code{col.hor} of the main function} \item{\code{bty}:}{\code{"n"}} } } \item{do.axis4}{ logical indicating if the final numbers of susceptible and removed individuals should be indicated on the right axis. The default \code{NULL} means \code{TRUE}, if \code{x} represents a SIR epidemic and \code{FALSE} otherwise, i.e. if the epidemic is SI, SIS or SIRS. } \item{panel.first}{ an expression to be evaluated after the plot axes are set up but before any plotting takes place. By default, a standard grid is drawn. } \item{rug.opts}{ either a list of arguments passed to the function \code{\link{rug}} or \code{NULL} (or \code{NA}), in which case no \code{rug} will be plotted. By default, the argument \code{ticksize} is set to 0.02, \code{col} is set to the color according to \code{which.rug} (black if this is \code{"all"}), and \code{quiet} is set to \code{TRUE}. Note that the argument \code{x}, which contains the locations for the \code{rug} is fixed internally and can not be modified. The argument \code{which.rug} (see below) determines the locations to mark. } \item{which.rug}{ By default, tick marks are drawn at the time points of infections. Alternatively, one can choose to mark only \code{"removals"}, \code{"susceptibilities"} (i.e. state change from R to S) or \code{"all"} events. } \item{id}{ single character string or factor of length 1 specifying the individual for which the \code{stateplot} should be established. } \item{\dots}{ For \code{plot.summary.epidata}: further graphical parameters passed to \code{plot}, \code{lines} and \code{axis}, e.g. \code{main}, \code{las}, \code{cex.axis} (etc.) and \code{mgp}.\cr For \code{plot.epidata}: arguments passed to \code{plot.summary.epidata}.\cr For \code{stateplot}: arguments passed to \code{\link{plot.stepfun}} or \code{\link{plot.function}} (if \code{id} had no events during the observation period). By default, \code{xlab="time"}, \code{ylab="state"}, \code{xlim=attr(x,"timeRange")}, \code{xaxs="i"} and \code{do.points=FALSE}. } } \value{ \code{plot.summary.epidata} (and \code{plot.epidata}) invisibly returns the matrix used for plotting, which contains the evolution of the three counters.\cr \code{stateplot} invisibly returns the function, which was plotted, typically of class \code{"stepfun"}, but maybe of class \code{"function"}, if no events have been observed for the individual in question (then the function always returns the initial state). The vertical axis of \code{stateplot} can range from 1 to 3, where 1 corresponds to \emph{S}usceptible, 2 to \emph{I}nfectious and 3 to \emph{R}emoved. } \author{ Sebastian Meyer } \seealso{ \code{\link{summary.epidata}} for the data, on which the plots are based. \code{\link{animate.epidata}} for the animation of epidemics. } \examples{ data("hagelloch") (s <- summary(hagelloch)) # rudimentary stateplot stateplot(s, id = "187") # evolution of the epidemic plot(s) } \keyword{hplot} \keyword{methods} \keyword{spatial} surveillance/man/boda.Rd0000644000176200001440000001265613432527626014741 0ustar liggesusers\encoding{latin1} \name{boda} \alias{boda} \title{Bayesian Outbreak Detection Algorithm (BODA)} \description{ The function takes \code{range} values of a univariate surveillance time series \code{sts} and for each time point uses a negative binomial regression model to compute the predictive posterior distribution for the current observation. The \eqn{(1-\alpha)\cdot 100\%}{(1-alpha)*100\%} quantile of this predictive distribution is then used as bound: If the actual observation is above the bound an alarm is raised. The Bayesian Outbreak Detection Algorithm (\code{boda}) is due to Manitz and \enc{Hhle}{Hoehle} (2013) and its implementation is illustrated in Salmon et al. (2016). However, \code{boda} should be considered as an experiment, see the Warning section below! } \usage{ boda(sts, control = list( range=NULL, X=NULL, trend=FALSE, season=FALSE, prior=c('iid','rw1','rw2'), alpha=0.05, mc.munu=100, mc.y=10, verbose=FALSE,multicore=TRUE, samplingMethod=c('joint','marginals'), quantileMethod=c("MC","MM") )) } \arguments{ \item{sts}{object of class sts (including the \code{observed} and the \code{state} time series)} \item{control}{Control object given as a \code{list} containing the following components: \describe{ \item{\code{range}}{Specifies the index of all timepoints which should be tested. If range is \code{NULL} all possible timepoints are used.} \item{\code{X}}{} \item{\code{trend}}{Boolean indicating whether a linear trend term should be included in the model for the expectation the log-scale} \item{\code{season}}{Boolean to indicate whether a cyclic spline should be included.} \item{\code{alpha}}{The threshold for declaring an observed count as an aberration is the \eqn{(1-\alpha)\cdot 100\%}{(1-alpha)*100\%} quantile of the predictive posterior.} \item{\code{mc.munu}}{} \item{\code{mc.y}}{Number of samples of \eqn{y}{y} to generate for each par of the mean and size parameter. A total of \eqn{mc.munu \times mc.y}{mc.munu*mc.y} samples are generated.} \item{\code{verbose}}{Argument sent to the inla call. When using ESS it might be necessary to force verbose mode for INLA to work.} \item{\code{multicore}}{Detect using \code{parallel::detectCores} how many logical cores are available and set INLA to use this number.} \item{\code{samplingMethod}}{Should one sample from the parameters joint distribution (joint) or from their respective marginal posterior distribution (marginals)?} \item{quantileMethod}{Character, either \code{MC} or \code{MM}. Indicates how to compute the quantile based on the posterior distribution (no matter the inference method): either by sampling \code{mc.munu} values from the posterior distribution of the parameters and then for each sampled parameters vector sampling \code{mc.y} response values so that one gets a vector of response values based on which one computes an empirical quantile (MC method, as explained in Manitz and \enc{Hhle}{Hoehle} 2013); or by sampling \code{mc.munu} from the posterior distribution of the parameters and then compute the quantile of the mixture distribution using bisectioning, which is faster.} } } } \note{ This function requires the \R package \pkg{INLA}, which is currently \emph{not} available from CRAN. It can be obtained from INLA's own repository via \code{install.packages("INLA", repos="https://inla.r-inla-download.org/R/stable")}. } \section{Warning}{ This function is currently experimental!! It also heavily depends on the \pkg{INLA} package so changes there might affect the operational ability of this function. Since the computations for the Bayesian GAM are quite involved do not expect this function to be particularly fast. Future work could focus on improving the speed, e.g., one issue would be to make the inference work in a sequential fashion. } \keyword{classif} \examples{ \dontrun{ ## running this example takes a couple of minutes #Load the campylobacteriosis data for Germany data("campyDE") #Make an sts object from the data.frame cam.sts <- sts(epoch=campyDE$date, observed=campyDE$case, state=campyDE$state) #Define monitoring period # range <- which(epoch(cam.sts)>=as.Date("2007-01-01")) # range <- which(epoch(cam.sts)>=as.Date("2011-12-10")) range <- tail(1:nrow(cam.sts),n=2) control <- list(range=range, X=NULL, trend=TRUE, season=TRUE, prior='iid', alpha=0.025, mc.munu=100, mc.y=10, samplingMethod = "joint") #Apply the boda algorithm in its simples form, i.e. spline is #described by iid random effects and no extra covariates library("INLA") # needs to be attached cam.boda1 <- boda(cam.sts, control=control) plot(cam.boda1, xlab='time [weeks]', ylab='No. reported', dx.upperbound=0) } } \author{J. Manitz, M. \enc{Hhle}{Hoehle}, M. Salmon} \references{ Manitz, J. and \enc{Hhle}{Hoehle}, M. (2013): Bayesian outbreak detection algorithm for monitoring reported cases of campylobacteriosis in Germany. Biometrical Journal, 55(4), 509-526. Salmon, M., Schumacher, D. and \enc{Hhle}{Hoehle}, M. (2016): Monitoring count time series in \R: Aberration detection in public health surveillance. \emph{Journal of Statistical Software}, \bold{70} (10), 1-35. \doi{10.18637/jss.v070.i10} } surveillance/man/fanplot.Rd0000644000176200001440000000723313325600040015451 0ustar liggesusers\name{fanplot} \alias{fanplot} \title{Fan Plot of Forecast Distributions} \description{ The \code{fanplot()} function in \pkg{surveillance} wraps functionality of the dedicated \CRANpkg{fanplot} package, employing a different default style and optionally adding point predictions and observed values. } \usage{ fanplot(quantiles, probs, means = NULL, observed = NULL, start = 1, fan.args = list(), means.args = list(), observed.args = list(), key.args = NULL, xlim = NULL, ylim = NULL, log = "", xlab = "Time", ylab = "No. infected", add = FALSE, ...) } \arguments{ \item{quantiles}{ a time x \code{probs} matrix of forecast quantiles at each time point. } \item{probs}{ numeric vector of probabilities with values between 0 and 1. } \item{means}{ (optional) numeric vector of point forecasts. } \item{observed}{ (optional) numeric vector of observed values. } \item{start}{ time index (x-coordinate) of the first prediction. } \item{fan.args}{ a list of graphical parameters for the \code{\link[fanplot]{fan}}, e.g., to employ a different \code{\link{colorRampPalette}} as \code{fan.col}, or to enable contour lines via \code{ln}. } \item{means.args}{ a list of graphical parameters for \code{\link{lines}} to modify the plotting style of the \code{means}. The default is a white line within the fan. } \item{observed.args}{ a list of graphical parameters for \code{\link{lines}} to modify the plotting style of the \code{observed} values. } \item{key.args}{ if a list, a color key (in \code{\link[fanplot]{fan}()}'s \code{"boxfan"}-style) is added to the fan chart. The list may include positioning parameters \code{start} (the x-position) and \code{ylim} (the y-range of the color key), \code{space} to modify the width of the boxfan, and \code{rlab} to modify the labels. An alternative way of labeling the quantiles is via the argument \code{ln} in \code{fan.args}. } \item{xlim,ylim}{ axis ranges. } \item{log}{ a character string specifying which axes are to be logarithmic, e.g., \code{log="y"} (see \code{\link{plot.default}}). } \item{xlab,ylab}{ axis labels. } \item{add}{ logical indicating if the fan plot should be added to an existing plot. } \item{\dots}{ further arguments are passed to \code{\link{plot.default}}. For instance, \code{panel.first} could be used to initialize the plot with \code{\link{grid}(nx=NA, ny=NULL)} lines. } } \value{ \code{NULL} (invisibly), with the side effect of drawing a fan chart. } \author{ Sebastian Meyer } \seealso{ the underlying \code{\link[fanplot]{fan}} function in package \CRANpkg{fanplot}. The function is used in \code{\link{plot.oneStepAhead}} and \code{\link{plot.hhh4sims}}. } \examples{ ## artificial data example to illustrate the graphical options if (requireNamespace("fanplot")) { means <- c(18, 19, 20, 25, 26, 35, 34, 25, 19) y <- rlnorm(length(means), log(means), 0.5) quantiles <- sapply(1:99/100, qlnorm, log(means), seq(.5,.8,length.out=length(means))) ## default style with point predictions, color key and log-scale fanplot(quantiles = quantiles, probs = 1:99/100, means = means, observed = y, key.args = list(start = 1, space = .3), log = "y") ## with contour lines instead of a key, and different colors pal <- colorRampPalette(c("darkgreen", "gray93")) fanplot(quantiles = quantiles, probs = 1:99/100, observed = y, fan.args = list(fan.col = pal, ln = c(5,10,25,50,75,90,95)/100), observed.args = list(type = "b", pch = 19)) } } \keyword{hplot} \keyword{distribution} surveillance/man/rotaBB.Rd0000644000176200001440000000110613174706302015162 0ustar liggesusers\name{rotaBB} \alias{rotaBB} \docType{data} \title{Rotavirus cases in Brandenburg, Germany, during 2002-2013 stratified by 5 age categories} \description{ Monthly reported number of rotavirus infections in the federal state of Brandenburg stratified by five age categories (00-04, 05-09, 10-14, 15-69, 70+) during 2002-2013. } \usage{data(rotaBB)} \format{ A \code{sts} object. } \source{ The data were queried on 19 Feb 2014 from the Survstat@RKI database of the German Robert Koch Institute (\url{https://survstat.rki.de/}). } \keyword{datasets} surveillance/man/unionSpatialPolygons.Rd0000644000176200001440000000435012437341450020216 0ustar liggesusers\name{unionSpatialPolygons} \alias{unionSpatialPolygons} \title{ Compute the Unary Union of \code{"SpatialPolygons"} } \description{ Union all subpolygons of a \code{"\link[sp:SpatialPolygons-class]{SpatialPolygons}"} object. This is a wrapper for the polygon clipping engines implemented by packages \pkg{rgeos}, \pkg{polyclip}, or \pkg{gpclib}. } \usage{ unionSpatialPolygons(SpP, method = c("rgeos", "polyclip", "gpclib"), ...) } \arguments{ \item{SpP}{ an object of class \code{"\link[sp:SpatialPolygons-class]{SpatialPolygons}"}. For the \pkg{polyclip} \code{method} only, all polygon classes for which an \code{\link{xylist}}-method exists should work as input. } \item{method}{ polygon clipping machinery to use. Default is to simply call \code{\link[rgeos]{gUnaryUnion}} in package \pkg{rgeos}. For \code{method="polyclip"}, function \code{\link[polyclip]{polyclip}} from package \pkg{polyclip} is used, whereas \code{method="gpclib"} calls \code{\link[maptools]{unionSpatialPolygons}} in package \pkg{maptools} (and requires acceptance of \pkg{gpclib}'s restricted license via \code{\link{surveillance.options}(gpclib=TRUE)}). } \item{\dots}{further arguments passed to the chosen \code{method}.} } \value{ an object of class \code{"\link[sp:SpatialPolygons-class]{SpatialPolygons}"} representing the union of all subpolygons. } \author{ Sebastian Meyer } \seealso{ \code{\link[rgeos]{gUnaryUnion}} in package \pkg{rgeos}, \code{\link[polyclip]{polyclip}} in package \pkg{polyclip}, \code{\link[maptools]{unionSpatialPolygons}} in package \pkg{maptools} (for using \code{\link[gpclib:gpc.poly-class]{union}} of package \pkg{gpclib}). } \examples{ ## Load districts of Germany load(system.file("shapes", "districtsD.RData", package = "surveillance")) plot(districtsD, border = "gray") ## Union these districts using either "rgeos" or "polyclip" if (requireNamespace("rgeos")) { stateD <- unionSpatialPolygons(districtsD, method = "rgeos") plot(stateD, add = TRUE, border = 2, lwd = 2) } if (requireNamespace("polyclip")) { stateD_pc <- unionSpatialPolygons(districtsD, method = "polyclip") plot(stateD_pc, add = TRUE, border = 1, lwd = 2, lty = 2) } } \keyword{spatial} surveillance/man/plot.disProg.Rd0000644000176200001440000000741713276254152016414 0ustar liggesusers\name{plot.disProg} \alias{plot.disProg} \alias{plot.disProg.one} \encoding{latin1} \title{Plot Generation of the Observed and the Defined Outbreak States of a (Multivariate) Time Series} \description{ Plotting a (multivariate) \code{disProg} object. The internal function \code{plot.disProg.one} is used as a helper function to plot a univariate time series. } \usage{ \method{plot}{disProg}(x, title = "", xaxis.years=TRUE, startyear = x$start[1], firstweek = x$start[2], as.one=TRUE, same.scale=TRUE, ...) plot.disProg.one(x, title = "", xaxis.years=TRUE, quarters=TRUE, startyear = x$start[1], firstweek = x$start[2], ylim=NULL, xlab="time", ylab="No. infected",type="hh",lty=c(1,1),col=c(1,1), outbreak.symbol = list(pch=3, col=3), legend.opts=list(x="top", legend=c("Infected", "Outbreak"), lty=NULL,pch=NULL,col=NULL), ...) } \arguments{ \item{x}{object of class \code{disProg}} \item{title}{plot title} \item{xaxis.years}{if \code{TRUE}, the x axis is labeled using years} \item{quarters}{add quarters to the plot} \item{startyear}{year to begin the axis labeling (the year where the oldest data come from). This arguments will be obsolete in \code{sts}.} \item{firstweek}{number of the first week of January in the first year (just for axis labeling grounds)} \item{as.one}{if \code{TRUE} all individual time series are shown in one plot} \item{same.scale}{if \code{TRUE} all plots have same scale} \item{ylim}{range of y axis} \item{xlab}{label of the x-axis} \item{ylab}{label of the y-axis} \item{type}{line type of the observed counts (should be \code{hh})} \item{lty}{line type of the observed counts} \item{col}{color of the observed count lines} \item{outbreak.symbol}{list with entries \code{pch} and \code{col} specifying the plot symbol} \item{legend.opts}{a list containing the entries to be sent to the \code{\link{legend}} function. If no legend is requested use \code{legend.opts=NULL}. Otherwise, the following arguments are default \describe{ \item{\code{x}}{\code{top}} \item{\code{legend}}{The names infected and outbreak} \item{\code{lty}}{If \code{NULL} the \code{lty} argument will be used} \item{\code{pch}}{If \code{NULL} the \code{pch} argument is used} \item{\code{col}}{If \code{NULL} the \code{col} argument is used} } An further arguments to the \code{legend} function are just provided as additional elements of this list, e.g. \code{horiz=TRUE}. } \item{\dots}{arguments passed to \code{plot.disProg.one}. From there, further arguments are passed to \code{\link{matplot}}.} } \value{ a plot showing the number of infected and the defined alarm status for a time series created by simulation or given in data either in one single plot or in several plots for each individual time series. } \author{M. \enc{Hhle}{Hoehle} with contributions by A. Riebler and C. Lang} \examples{ # Plotting of simulated data disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 208, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 5) # plot the simulated disease with the defined outbreaks plot(disProgObj) title <- "Number of Infected and Defined Outbreak Positions for Simulated Data" plot(disProgObj, title = title) plot(disProgObj, title = title, xaxis.years=TRUE, startyear = 1999, firstweek = 13) plot(disProgObj, title = title, xaxis.years=TRUE, startyear = 1999, firstweek = 14) # Plotting of measles data data(measles.weser) # one plot plot(measles.weser, title = "measles cases in the district Weser-Ems", xaxis.years=TRUE, startyear= 2001, firstweek=1) # plot cases for each "Kreis" plot(measles.weser, same.scale=TRUE, as.one=FALSE) } \keyword{hplot} surveillance/man/magic.dim.Rd0000644000176200001440000000137613276245673015666 0ustar liggesusers\name{magic.dim} \alias{magic.dim} \title{Compute Suitable k1 x k2 Layout for Plotting} \description{ For a given number \code{k}, \code{magic.dim} provides a vector containing two elements, the number of rows (k1) and columns (k2), respectively, which can be used to set the dimension of a single graphic device so that k1*k2 plots can be drawn by row (or by column) on the device. } \usage{ magic.dim(k) } \arguments{ \item{k}{an integer} } \value{numeric vector with two elements} \seealso{ \code{\link{primeFactors}} and \code{\link{bestCombination}} which are internally used to complete the task. \code{\link{n2mfrow}} is a similar function from package \pkg{grDevices}. } \keyword{dplot} \keyword{utilities} surveillance/man/twinstim_iaf.Rd0000644000176200001440000003441514004512307016510 0ustar liggesusers\encoding{latin1} \name{twinstim_iaf} \alias{siaf.constant} \alias{siaf.step} \alias{siaf.gaussian} \alias{siaf.exponential} \alias{siaf.powerlaw} \alias{siaf.powerlaw1} \alias{siaf.powerlawL} \alias{siaf.student} \alias{tiaf.constant} \alias{tiaf.step} \alias{tiaf.exponential} \title{ Temporal and Spatial Interaction Functions for \code{twinstim} } \description{ A \code{twinstim} model as described in Meyer et al. (2012) requires the specification of the spatial and temporal interaction functions (\eqn{f} and \eqn{g}, respectively), i.e. how infectivity decays with increasing spatial and temporal distance from the source of infection. Own such functions can be specified (see \code{\link{siaf}} and \code{\link{tiaf}}, respectively), but the package already predefines some common dispersal kernels returned by the constructor functions documented here. See Meyer and Held (2014) for various spatial interaction functions, and Meyer et al. (2017, Section 3, available as \code{vignette("twinstim")}) for an illustration of the implementation. } \usage{ # predefined spatial interaction functions siaf.constant() siaf.step(knots, maxRange = Inf, nTypes = 1, validpars = NULL) siaf.gaussian(nTypes = 1, logsd = TRUE, density = FALSE, F.adaptive = FALSE, F.method = "iso", effRangeMult = 6, validpars = NULL) siaf.exponential(nTypes = 1, validpars = NULL, engine = "C") siaf.powerlaw(nTypes = 1, validpars = NULL, engine = "C") siaf.powerlaw1(nTypes = 1, validpars = NULL, sigma = 1) siaf.powerlawL(nTypes = 1, validpars = NULL, engine = "C") siaf.student(nTypes = 1, validpars = NULL, engine = "C") # predefined temporal interaction functions tiaf.constant() tiaf.step(knots, maxRange = Inf, nTypes = 1, validpars = NULL) tiaf.exponential(nTypes = 1, validpars = NULL) } \arguments{ \item{knots}{numeric vector of distances at which the step function switches to a new height. The length of this vector determines the number of parameters to estimate. For identifiability, the step function has height 1 in the first interval \eqn{[0,knots_1)}. Note that the implementation is right-continuous, i.e., intervals are \eqn{[a,b)}.\cr An initial choice of knots could be based on quantiles of the observed distances between events and their potential source events. For instance, an identifiable spatial step function could be \code{siaf.step(quantile(\link{getSourceDists}(myepi, "space"), c(1,2,4)/10))}, where \code{myepi} is the \code{"epidataCS"} data to be modelled.} \item{maxRange}{a scalar larger than any of \code{knots}. Per default (\code{maxRange=Inf}), the step function never drops to 0 but keeps the last height for any distance larger than the last knot. However, this might not work in some cases, where the last parameter value would become very small and lead to numerical problems. It is then possible to truncate interaction at a distance \code{maxRange} (just like what the variables \code{eps.s} and \code{eps.t} do in the \code{"\link{epidataCS}"} object).} \item{nTypes}{ determines the number of parameters ((log-)scales or (log-)shapes) of the kernels. In a multitype epidemic, the different types may share the same spatial interaction function, in which case \code{nTypes=1}. Otherwise \code{nTypes} should equal the number of event types of the epidemic, in which case every type has its own (log-)scale or (log-)shape, respectively.\cr Currently, \code{nTypes > 1} is only implemented for \code{siaf.gaussian(F.adaptive = TRUE)}, \code{tiaf.step}, and \code{tiaf.exponential}. } \item{logsd,density}{ logicals affecting the parametrization of the Gaussian kernel. Settings different from the defaults are deprecated. The default is to use only the kernel of the bivariate, isotropic normal distribution (\code{density=FALSE}, see Details below), parametrized with the log-standard deviation (\code{logsd=TRUE}) to avoid constrained optimisation (L-BFGS-B) or \code{validpars}.\cr The power-law kernels always employ the log-scale for their scale and shape parameters. } \item{F.adaptive,F.method}{ If \code{F.adaptive = TRUE}, then an adaptive bandwidth of \code{adapt*sd} will be used in the midpoint-cubature (\code{\link[polyCub]{polyCub.midpoint}} in package \pkg{polyCub}) of the Gaussian interaction kernel, where \code{adapt} is an extra parameter of the returned \code{siaf$F} function and defaults to 0.1. It can be customized either by the \code{control.siaf$F} argument list of \code{twinstim}, or by a numeric specification of \code{F.adaptive} in the constructing call, e.g., \code{F.adaptive = 0.05} to achieve higher accuracy.\cr Otherwise, if \code{F.adaptive = FALSE}, the \code{F.method} argument determines which \code{\link[polyCub]{polyCub}} method to use in \code{siaf$F}. The accuracy (controlled via, e.g., \code{nGQ}, \code{rel.tol}, or \code{eps}, depending on the cubature method) can then be adjusted in \code{twinstim}'s \code{control.siaf$F} argument. } \item{effRangeMult}{ determines the effective range for numerical integration in terms of multiples of the standard deviation \eqn{\sigma} of the Gaussian kernel, i.e. with \code{effRangeMult=6} the \eqn{6 \sigma} region around the event is considered as the relevant integration domain instead of the whole observation region \code{W}. Setting \code{effRangeMult=NULL} will disable the integral approximation with an effective integration range. } \item{validpars}{ function taking one argument, the parameter vector, indicating if it is valid (see also \code{\link{siaf}}). If \code{logsd=FALSE} and one prefers not to use \code{method="L-BFGS-B"} for fitting the \code{twinstim}, then \code{validpars} could be set to \code{function (pars) pars > 0}. } \item{engine}{ character string specifying the implementation to use. Prior to \pkg{surveillance} 0.14.0, the \code{intrfr} functions for \code{\link{polyCub.iso}} were evaluated in \R (and this implementation is available via \code{engine = "R"}). The new C-implementation, \samp{LinkingTo} the newly exported \code{polyCub_iso} C-implementation in \pkg{polyCub} 0.6.0, is considerably faster. } \item{sigma}{ Fixed value of \eqn{\sigma} for the one-parameter power-law kernel. } } \details{ Evaluation of \code{twinstim}'s likelihood involves cubature of the spatial interaction function over polygonal domains. Various approaches have been compared by Meyer (2010, Section 3.2) and a new efficient method, which takes advantage of the assumed isotropy, has been proposed by Meyer and Held (2014, Supplement B, Section 2) for evaluation of the power-law kernels. These cubature methods are available in the dedicated \R package \pkg{polyCub} and used by the kernels implemented in \pkg{surveillance}. The readily available spatial interaction functions are defined as follows: \describe{ \item{\code{siaf.constant}:}{ \eqn{f(s) = 1} } \item{\code{siaf.step}:}{ \eqn{f(s) = \sum_{k=0}^K \exp(\alpha_k) I_k(||s||)},\cr where \eqn{\alpha_0 = 0}, and \eqn{\alpha_1, \dots, \alpha_K} are the parameters (heights) to estimate. \eqn{I_k(||s||)} indicates if distance \eqn{||s||} belongs to the \eqn{k}th interval according to \code{c(0,knots,maxRange)}, where \eqn{k=0} indicates the interval \code{c(0,knots[1])}.\cr Note that \code{siaf.step} makes use of the \pkg{memoise} package if it is available -- and that is highly recommended to speed up calculations. Specifically, the areas of the intersection of a polygonal domain (influence region) with the \dQuote{rings} of the two-dimensional step function will be cached such that they are only calculated once for every \code{polydomain} (in the first iteration of the \code{twinstim} optimization). They are used in the integration components \code{F} and \code{Deriv}. See Meyer and Held (2014) for a use case and further details. } \item{\code{siaf.gaussian}:}{ \eqn{f(s|\kappa) = \exp(-||s||/2/\sigma_\kappa^2)}\cr If \code{nTypes=1} (single-type epidemic or type-invariant \code{siaf} in multi-type epidemic), then \eqn{\sigma_\kappa = \sigma} for all types \eqn{\kappa}. If \code{density=TRUE} (deprecated), then the kernel formula above is additionally divided by \eqn{2 \pi \sigma_\kappa^2}, yielding the density of the bivariate, isotropic Gaussian distribution with zero mean and covariance matrix \eqn{\sigma_\kappa^2 I_2}. The standard deviation is optimized on the log-scale (\code{logsd = TRUE}, not doing so is deprecated). } \item{\code{siaf.exponential}:}{ \eqn{f(s) = exp(-||s||/sigma)}\cr The scale parameter \eqn{sigma} is estimated on the log-scale, i.e., \eqn{\sigma = \exp(\tilde{\sigma})}, and \eqn{\tilde{\sigma}} is the actual model parameter. } \item{\code{siaf.powerlaw}:}{ \eqn{f(s) = (||s|| + \sigma)^{-d}}\cr The parameters are optimized on the log-scale to ensure positivity, i.e., \eqn{\sigma = \exp(\tilde{\sigma})} and \eqn{d = \exp(\tilde{d})}, where \eqn{(\tilde{\sigma}, \tilde{d})} is the parameter vector. If a power-law kernel is not identifiable for the dataset at hand, the exponential kernel or a lagged power law are useful alternatives. } \item{\code{siaf.powerlaw1}:}{ \eqn{f(s) = (||s|| + 1)^{-d}},\cr i.e., \code{siaf.powerlaw} with fixed \eqn{\sigma = 1}. A different fixed value for \eqn{sigma} can be specified via the \code{sigma} argument of \code{siaf.powerlaw1}. The decay parameter \eqn{d} is estimated on the log-scale. } \item{\code{siaf.powerlawL}:}{ \eqn{f(s) = (||s||/\sigma)^{-d}}, for \eqn{||s|| \ge \sigma}, and \eqn{f(s) = 1} otherwise,\cr which is a \emph{L}agged power-law kernel featuring uniform short-range dispersal (up to distance \eqn{\sigma}) and a power-law decay (Pareto-style) from distance \eqn{\sigma} onwards. The parameters are optimized on the log-scale to ensure positivity, i.e. \eqn{\sigma = \exp(\tilde{\sigma})} and \eqn{d = \exp(\tilde{d})}, where \eqn{(\tilde{\sigma}, \tilde{d})} is the parameter vector. However, there is a caveat associated with this kernel: Its derivative wrt \eqn{\tilde{\sigma}} is mathematically undefined at the threshold \eqn{||s||=\sigma}. This local non-differentiability makes \code{twinstim}'s likelihood maximization sensitive wrt parameter start values, and is likely to cause false convergence warnings by \code{\link{nlminb}}. Possible workarounds are to use the slow and robust \code{method="Nelder-Mead"}, or to just ignore the warning and verify the result by sets of different start values. } \item{\code{siaf.student}:}{ \eqn{f(s) = (||s||^2 + \sigma^2)^{-d}},\cr which is a reparametrized \eqn{t}-kernel. For \eqn{d=1}, this is the kernel of the Cauchy density with scale \code{sigma}. In Geostatistics, a correlation function of this kind is known as the Cauchy model.\cr The parameters are optimized on the log-scale to ensure positivity, i.e. \eqn{\sigma = \exp(\tilde{\sigma})} and \eqn{d = \exp(\tilde{d})}, where \eqn{(\tilde{\sigma}, \tilde{d})} is the parameter vector. } } The predefined temporal interaction functions are defined as follows: \describe{ \item{\code{tiaf.constant}:}{ \eqn{g(t) = 1} } \item{\code{tiaf.step}:}{ \eqn{g(t) = \sum_{k=0}^K \exp(\alpha_k) I_k(t)},\cr where \eqn{\alpha_0 = 0}, and \eqn{\alpha_1, \dots, \alpha_K} are the parameters (heights) to estimate. \eqn{I_k(t)} indicates if \eqn{t} belongs to the \eqn{k}th interval according to \code{c(0,knots,maxRange)}, where \eqn{k=0} indicates the interval \code{c(0,knots[1])}. } \item{\code{tiaf.exponential}:}{ \eqn{g(t|\kappa) = \exp(-\alpha_\kappa t)},\cr which is the kernel of the exponential distribution. If \code{nTypes=1} (single-type epidemic or type-invariant \code{tiaf} in multi-type epidemic), then \eqn{\alpha_\kappa = \alpha} for all types \eqn{\kappa}. } } } \value{ The specification of an interaction function, which is a list. See \code{\link{siaf}} and \code{\link{tiaf}}, respectively, for a description of its components. } \references{ Meyer, S. (2010): Spatio-Temporal Infectious Disease Epidemiology based on Point Processes. Master's Thesis, Ludwig-Maximilians-Universit\enc{}{ae}t M\enc{}{ue}nchen.\cr Available as \url{https://epub.ub.uni-muenchen.de/11703/} Meyer, S., Elias, J. and H\enc{}{oe}hle, M. (2012): A space-time conditional intensity model for invasive meningococcal disease occurrence. \emph{Biometrics}, \bold{68}, 607-616. \doi{10.1111/j.1541-0420.2011.01684.x} Meyer, S. and Held, L. (2014): Power-law models for infectious disease spread. \emph{The Annals of Applied Statistics}, \bold{8} (3), 1612-1639. \doi{10.1214/14-AOAS743} Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} } \author{ Sebastian Meyer } \seealso{ \code{\link{twinstim}}, \code{\link{siaf}}, \code{\link{tiaf}}, and package \pkg{polyCub} for the involved cubature methods. } \examples{ # constant temporal dispersal tiaf.constant() # step function kernel tiaf.step(c(3,7), maxRange=14, nTypes=2) # exponential temporal decay tiaf.exponential() # Type-dependent Gaussian spatial interaction function using an adaptive # two-dimensional midpoint-rule to integrate it over polygonal domains siaf.gaussian(2, F.adaptive=TRUE) # Single-type Gaussian spatial interaction function (using polyCub.iso) siaf.gaussian() # Exponential kernel siaf.exponential() # Power-law kernel siaf.powerlaw() # Power-law kernel with fixed sigma = 1 siaf.powerlaw1() # "lagged" power-law siaf.powerlawL() # (reparametrized) t-kernel siaf.student() # step function kernel siaf.step(c(10,20,50), maxRange=100) } \keyword{models} \keyword{utilities} surveillance/man/salmNewport.Rd0000644000176200001440000000156313174706302016333 0ustar liggesusers\name{salmNewport} \alias{salmNewport} \docType{data} \title{Salmonella Newport cases in Germany 2004-2013} \description{ Reported number of cases of the Salmonella Newport serovar in the 16 German federal states 2004-2013. } \usage{data(salmNewport)} \format{ A \code{sts} object. } \source{ The data were queried from the SurvStat@RKI database of the German Robert Koch Institute (\url{https://survstat.rki.de/}). A detailed description of the 2011 outbreak can be found in the publication Bayer, C., Bernard, H., Prager, R., Rabsch, W., Hiller, P., Malorny, B., Pfefferkorn, B., Frank, C., de Jong, A., Friesema, I., Start, K., Rosner, B.M. (2014), An outbreak of Salmonella Newport associated with mung bean sprouts in Germany and the Netherlands, October to November 2011, Eurosurveillance 19(1):pii=20665. } \keyword{datasets} surveillance/man/hhh4_simulate_plot.Rd0000644000176200001440000002114713230375405017613 0ustar liggesusers\name{hhh4_simulate_plot} \alias{plot.hhh4sims} \alias{aggregate.hhh4sims} \alias{as.hhh4simslist} \alias{plot.hhh4simslist} \alias{aggregate.hhh4simslist} \alias{plotHHH4sims_size} \alias{plotHHH4sims_time} \alias{plotHHH4sims_fan} \title{ Plot Simulations from \code{"hhh4"} Models } \description{ Arrays of simulated counts from \code{\link{simulate.hhh4}} can be visualized as final size boxplots, individual or average time series, or fan charts (using the \CRANpkg{fanplot} package). An \code{aggregate}-method is also available. } \usage{ \method{plot}{hhh4sims}(x, ...) \method{aggregate}{hhh4sims}(x, units = TRUE, time = FALSE, ..., drop = FALSE) as.hhh4simslist(x, ...) \method{plot}{hhh4simslist}(x, type = c("size", "time", "fan"), ..., groups = NULL, par.settings = list()) \method{aggregate}{hhh4simslist}(x, units = TRUE, time = FALSE, ..., drop = FALSE) plotHHH4sims_size(x, horizontal = TRUE, trafo = NULL, observed = TRUE, names = base::names(x), ...) plotHHH4sims_time(x, average = mean, individual = length(x) == 1, conf.level = if (individual) 0.95 else NULL, matplot.args = list(), initial.args = list(), legend = length(x) > 1, xlim = NULL, ylim = NULL, add = FALSE, ...) plotHHH4sims_fan(x, which = 1, fan.args = list(), observed.args = list(), initial.args = list(), means.args = NULL, key.args = NULL, xlim = NULL, ylim = NULL, add = FALSE, xaxis = list(), ...) } \arguments{ \item{x}{ an object of class \code{"hhh4sims"} (as resulting from the \code{\link[=simulate.hhh4]{simulate}}-method for \code{"\link{hhh4}"} models if \code{simplify = TRUE} was set), or an \code{"hhh4simslist"}, i.e., a list of such simulations potentially obtained from different model fits (using the same simulation period). } \item{type}{ a character string indicating the summary plot to produce. } \item{\dots}{ further arguments passed to methods. } \item{groups}{ an optional factor to produce stratified plots by groups of units. The special setting \code{groups = TRUE} is a convenient shortcut for one plot by unit. } \item{par.settings}{ a list of graphical parameters for \code{\link{par}}. Sensible defaults for \code{mfrow}, \code{mar} and \code{las} will be applied unless overridden or \code{!is.list(par.settings)}. } \item{horizontal}{ a logical indicating if the boxplots of the final size distributions should be horizontal (the default). } \item{trafo}{ an optional transformation function from the \pkg{scales} package, e.g., \code{\link[scales]{sqrt_trans}}. } \item{observed}{ a logical indicating if a line and axis value for the observed size of the epidemic should be added to the plot. Alternatively, a list with graphical parameters can be specified to modify the default values. } \item{names}{ a character vector of names for \code{x}. } \item{average}{ scalar-valued function to apply to the simulated counts at each time point. } \item{individual}{ a logical indicating if the individual simulations should be shown as well. } \item{conf.level}{ a scalar in (0,1), which determines the level of the pointwise quantiles obtained from the simulated counts at each time point. A value of \code{NULL} disables the confidence interval. } \item{matplot.args}{ a list of graphical parameters for \code{\link{matlines}}. } \item{initial.args}{ if a list (of graphical parameters for \code{\link{lines}}), a bar for the initial number of cases is added to the plot. } \item{legend}{ a logical, a character vector (providing names for \code{x}), or a list of parameters for \code{\link{legend}}. } \item{xlim,ylim}{ vectors of length 2 determining the axis limits. } \item{add}{ a logical indicating if the (mean) simulated time series or the fan chart, respectively, should be added to an existing plot. } \item{which}{ a single integer or a character string selecting the model in \code{x} for which to produce the fan chart. This is only relevant if \code{x} is a \code{"hhh4simslist"} of simulations from multiple models. Defaults to the first model. } \item{fan.args}{ a list of graphical parameters for the \code{\link[fanplot]{fan}}, e.g., to employ a different \code{\link{colorRampPalette}} as \code{fan.col}, or to enable contour lines via \code{ln}. } \item{observed.args}{ if a list (of graphical parameters for \code{\link{lines}}), the originally observed counts are added to the plot. } \item{means.args}{ if a list (of graphical parameters for \code{\link{lines}}), the point forecasts are added to the plot (by default as a white line within the fan). } \item{key.args}{ if a list, a color key (in \code{\link[fanplot]{fan}}'s \code{"boxfan"}-style) is added to the fan chart. The list may include positioning parameters \code{start} (the x-position) and \code{ylim} (the y-range of the color key), \code{space} to modify the width of the boxfan, and \code{rlab} to modify the labels. The color key is disabled by default. An alternative way of labeling the quantiles is via the argument \code{ln} in \code{fan.args}, see the Examples. } \item{xaxis}{ if a list of arguments for \code{\link{addFormattedXAxis}}, that function is used to draw the time axis, otherwise a default x-axis is drawn. } \item{units}{ a logical indicating aggregation over units. Can also be a factor (or something convertible to a factor using \code{\link{as.factor}}) to aggregate groups of units. } \item{time}{ a logical indicating if the counts should be summed over the whole simulation period. } \item{drop}{ a logical indicating if the unit dimension and the \code{"hhh4sims"} (or \code{"hhh4simslist"}) class should be dropped after aggregating over (groups of) units. } } \author{ Sebastian Meyer } \examples{ ### univariate example data("salmAllOnset") ## fit a hhh4 model to the first 13 years salmModel <- list(end = list(f = addSeason2formula(~1 + t)), ar = list(f = ~1), family = "NegBin1", subset = 2:678) salmFit <- hhh4(salmAllOnset, salmModel) ## simulate the next 20 weeks ahead salmSims <- simulate(salmFit, nsim = 300, seed = 3, subset = 678 + seq_len(20), y.start = observed(salmAllOnset)[678,]) ## compare final size distribution to observed value summary(aggregate(salmSims, time = TRUE)) # summary of simulated values plot(salmSims, type = "size") ## individual and average simulated time series with a confidence interval plot(salmSims, type = "time", main = "20-weeks-ahead simulation") ## fan chart based on the quantiles of the simulated counts at each time point ## point forecasts are represented by a white line within the fan if (requireNamespace("fanplot")) { plot(salmSims, type = "fan", main = "20-weeks-ahead simulation", fan.args = list(ln = 1:9/10), means.args = list()) } ### multivariate example data("measlesWeserEms") ## fit a hhh4 model to the first year measlesModel <- list( end = list(f = addSeason2formula(~1), offset = population(measlesWeserEms)), ar = list(f = ~1), ne = list(f = ~1 + log(pop), weights = W_powerlaw(maxlag = 5, normalize = TRUE)), family = "NegBin1", subset = 2:52, data = list(pop = population(measlesWeserEms))) measlesFit1 <- hhh4(measlesWeserEms, control = measlesModel) ## use a Poisson distribution instead (just for comparison) measlesFit2 <- update(measlesFit1, family = "Poisson") ## simulate realizations from these models during the second year measlesSims <- lapply(X = list(NegBin = measlesFit1, Poisson = measlesFit2), FUN = simulate, nsim = 50, seed = 1, subset = 53:104, y.start = observed(measlesWeserEms)[52,]) ## final size of the first model plot(measlesSims[[1]]) ## stratified by groups of districts mygroups <- factor(substr(colnames(measlesWeserEms), 4, 4)) apply(aggregate(measlesSims[[1]], time = TRUE, units = mygroups), 1, summary) plot(measlesSims[[1]], groups = mygroups) ## a class and plot-method for a list of simulations from different models measlesSims <- as.hhh4simslist(measlesSims) plot(measlesSims) ## simulated time series plot(measlesSims, type = "time", individual = TRUE, ylim = c(0, 80)) ## fan charts if (requireNamespace("fanplot")) { opar <- par(mfrow = c(2,1)) plot(measlesSims, type = "fan", which = 1, ylim = c(0, 80), main = "NegBin", key.args = list()) plot(measlesSims, type = "fan", which = 2, ylim = c(0, 80), main = "Poisson") par(opar) } } \keyword{hplot} surveillance/man/algo.summary.Rd0000644000176200001440000000347013122471774016442 0ustar liggesusers\name{algo.summary} \alias{algo.summary} \title{Summary Table Generation for Several Disease Chains} \description{ Summary table generation for several disease chains. } \usage{ algo.summary(compMatrices) } \arguments{ \item{compMatrices}{list of matrices constructed by algo.compare.} } \value{ a matrix summing up the singular input matrices } \details{ As lag the mean of all single lags is returned. TP values, FN values, TN values and FP values are summed up. \code{dist}, \code{sens} and \code{spec} are new computed on the basis of the new TP value, FN value, TN value and FP value. } \seealso{\code{\link{algo.compare}}, \code{\link{algo.quality}}} \examples{ # Create a test object disProgObj1 <- sim.pointSource(p = 0.99, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) disProgObj2 <- sim.pointSource(p = 0.99, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 5) disProgObj3 <- sim.pointSource(p = 0.99, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 17) # Let this object be tested from any methods in range = 200:400 range <- 200:400 control <- list(list(funcName = "rki1", range = range), list(funcName = "rki2", range = range), list(funcName = "rki3", range = range)) compMatrix1 <- algo.compare(algo.call(disProgObj1, control=control)) compMatrix2 <- algo.compare(algo.call(disProgObj2, control=control)) compMatrix3 <- algo.compare(algo.call(disProgObj3, control=control)) algo.summary( list(a=compMatrix1, b=compMatrix2, c=compMatrix3) ) } \keyword{print} surveillance/man/pit.Rd0000644000176200001440000000547713446150274014627 0ustar liggesusers\name{pit} \alias{pit} \alias{pit.default} \title{ Non-Randomized Version of the PIT Histogram (for Count Data) } \description{ See Czado et al. (2009). } \usage{ pit(x, ...) \method{pit}{default}(x, pdistr, J = 10, relative = TRUE, ..., plot = list()) } \arguments{ \item{x}{ numeric vector representing the observed counts. } \item{pdistr}{ either a list of predictive cumulative distribution functions for the observations \code{x}, or (the name of) a single predictive CDF used for all \code{x} (with potentially varying arguments \code{...}). It is checked that the predictive CDF returns 0 at \code{x=-1}. The name of its first argument can be different from \code{x}, e.g., \code{pdistr="pnbinom"} is possible.\cr If \code{pdistr} is a single function and no additional \code{\dots} arguments are supplied, \code{pdistr} is assumed to be vectorized, i.e., it is simply called as \code{pdistr(x)} and \code{pdistr(x-1)}. Otherwise, the predictive CDF is called sequentially and does not need to be vectorized. } \item{J}{ the number of bins of the histogram. } \item{relative}{ logical indicating if relative frequency or the density should be plotted. Due to a historical bug, \code{relative=TRUE} (the default) actually plots a density histogram while \code{relative=FALSE} plots relative frequencies. } \item{\dots}{ ignored if \code{pdistr} is a list. Otherwise, such additional arguments are used in sequential calls of \code{pdistr} via \code{\link{mapply}(pdistr, x, ...)}. } \item{plot}{ a list of arguments for \code{\link{plot.histogram}}. Otherwise, no plot will be produced. } } \value{ an object of class \code{"pit"}, which inherits from class \code{"histogram"} (see \code{\link{hist}}). It is returned invisibly if a plot is produced. } \references{ Czado, C., Gneiting, T. and Held, L. (2009): Predictive model assessment for count data. \emph{Biometrics}, \bold{65} (4), 1254-1261. \doi{10.1111/j.1541-0420.2009.01191.x} } \author{ Michaela Paul and Sebastian Meyer } \examples{ ## Simulation example of Czado et al. (2009, Section 2.4) set.seed(100) x <- rnbinom(200, mu = 5, size = 2) pdistrs <- list("NB(5,0)" = function (x) ppois(x, lambda=5), "NB(5,1/2)" = function (x) pnbinom(x, mu=5, size=2), "NB(5,1)" = function (x) pnbinom(x, mu=5, size=1)) ## Reproduce Figure 1 op <- par(mfrow = c(1,3)) for (i in seq_along(pdistrs)) { pit(x, pdistr = pdistrs[[i]], J = 10, plot = list(ylim = c(0,2.75), main = names(pdistrs)[i])) box() } par(op) ## Alternative call using ... arguments for pdistr (less efficient) stopifnot(identical(pit(x, "pnbinom", mu = 5, size = 2, plot = FALSE), pit(x, pdistrs[[2]], plot = FALSE))) } \keyword{dplot} surveillance/man/toLatex.sts.Rd0000644000176200001440000000410513507405436016247 0ustar liggesusers\name{toLatex.sts} \docType{methods} \alias{toLatex.sts} \alias{toLatex,sts-method} \title{\code{toLatex}-Method for \code{"sts"} Objects} \description{ Convert \code{"\linkS4class{sts}"} objects to a character vector with LaTeX markup. } \usage{ \S4method{toLatex}{sts}(object, caption = "",label=" ", columnLabels = NULL, subset = NULL, alarmPrefix = "\\\\textbf{\\\\textcolor{red}{", alarmSuffix = "}}", ubColumnLabel = "UB", ...) } \arguments{ \item{object}{an \code{"\linkS4class{sts}"} object.} \item{caption}{A caption for the table. Default is the empty string.} \item{label}{A label for the table. Default is the empty string.} \item{columnLabels}{A list of labels for each column of the resulting table. Default is NULL} \item{subset}{A range of values which should be displayed. If Null, then all data in the sts objects will be displayed. Else only a subset of data. Therefore range needs to be a numerical vector of indexes from 1 to length(@observed).} \item{alarmPrefix}{A latex compatible prefix string wrapped around a table cell iff there is an alarm;i.e. alarm = TRUE} \item{alarmSuffix}{A latex compatible suffix string wrapped around a table cell iff there is an alarm;i.e. alarm[i,j] = TRUE} \item{ubColumnLabel}{The label of the upper bound column; default is \"UB\".} \item{\dots}{further arguments passed to \code{\link{print.xtable}}.} } \value{ An object of class \code{\link[=toLatex]{"Latex"}}. } \examples{ # Create a test object data("salmonella.agona") # Create the corresponding sts object from the old disProg object salm <- disProg2sts(salmonella.agona) control <- list(range=(260:312), noPeriods=1,populationOffset=FALSE, fitFun="algo.farrington.fitGLM.flexible", b=4,w=3,weightsThreshold=1, pastWeeksNotIncluded=3, pThresholdTrend=0.05,trend=TRUE, thresholdMethod="delta",alpha=0.1) salm <- farringtonFlexible(salm,control=control) toLatex(salm) } \author{Dirk Schumacher} \keyword{print} surveillance/man/R0.Rd0000644000176200001440000002045313514363214014277 0ustar liggesusers\encoding{latin1} \name{R0} \alias{R0} \alias{R0.twinstim} \alias{R0.simEpidataCS} \alias{simpleR0} \title{Computes reproduction numbers from fitted models} \description{ The S3 generic function \code{R0} defined in package \pkg{surveillance} is intended to compute reproduction numbers from fitted epidemic models. The package currently defines a method for the \code{"\link{twinstim}"} class, which computes expected numbers of infections caused by infected individuals depending on the event type and marks attached to the individual, which contribute to the infection pressure in the epidemic predictor of that class. There is also a method for simulated \code{"epidataCS"} (just a wrapper for the \code{"twinstim"}-method). } \usage{ R0(object, ...) \method{R0}{twinstim}(object, newevents, trimmed = TRUE, newcoef = NULL, ...) \method{R0}{simEpidataCS}(object, trimmed = TRUE, ...) simpleR0(object, eta = coef(object)[["e.(Intercept)"]], eps.s = NULL, eps.t = NULL, newcoef = NULL) } \arguments{ \item{object}{A fitted epidemic model object for which an \code{R0} method exists.} \item{newevents}{ an optional \code{data.frame} of events for which the reproduction numbers should be calculated. If omitted, it is calculated for the original events from the fit. In this case, if \code{trimmed = TRUE} (the default), the result is just \code{object$R0}; however, if \code{trimmed = FALSE}, the model environment is required, i.e. \code{object} must have been fitted with \code{model = TRUE}. For the \code{twinstim} method, \code{newevents} must at least contain the following columns: the event \code{time} (only for \code{trimmed = TRUE}) and \code{type} (only for multi-type epidemics), the maximum interaction ranges \code{eps.t} and \code{eps.s}, as well as columns for the marks and \code{stgrid} variables used in the epidemic component of the fitted \code{"twinstim"} \code{object} as stored in \code{formula(object)$epidemic}. For \code{trimmed} R0 values, \code{newevents} must additionally contain the components \code{.influenceRegion} and, if using the \code{Fcircle} trick in the \code{siaf} specification, also \code{.bdist} (cf. the hidden columns in the \code{events} component of class \code{"epidataCS"}). } \item{trimmed}{ logical indicating if the individual reproduction numbers should be calculated by integrating the epidemic intensities over the observation period and region only (\code{trimmed = TRUE}) or over the whole time-space domain R+ x R^2 (\code{trimmed = FALSE}). By default, if \code{newevents} is missing, the trimmed \code{R0} values stored in \code{object} are returned. Trimming means that events near the (spatial or temporal) edges of the observation domain have lower reproduction numbers (ceteris paribus) because events outside the observation domain are not observed. } \item{newcoef}{ the model parameters to use when calculating reproduction numbers. The default (\code{NULL}) is to use the MLE \code{coef(object)}. This argument mainly serves the construction of Monte Carlo confidence intervals by evaluating \code{R0} for parameter vectors sampled from the asymptotic multivariate normal distribution of the MLE, see Examples. } \item{\dots}{additional arguments passed to methods. Currently unused for the \code{twinstim} method.} \item{eta}{a value for the epidemic linear predictor, see details.} \item{eps.s,eps.t}{the spatial/temporal radius of interaction. If \code{NULL} (the default), the original value from the data is used if this is unique and an error is thrown otherwise.} } \details{ For the \code{"\link{twinstim}"} class, the individual-specific expected number \eqn{\mu_j} of infections caused by individual (event) \eqn{j} inside its theoretical (untrimmed) spatio-temporal range of interaction given by its \code{eps.t} (\eqn{\epsilon}) and \code{eps.s} (\eqn{\delta}) values is defined as follows (cf. Meyer et al, 2012): \deqn{\mu_j = e^{\eta_j} \cdot \int_{b(\bold{0},\delta)} f(\bold{s}) d\bold{s} \cdot \int_0^\epsilon g(t) dt .} Here, \eqn{b(\bold{0},\delta)} denotes the disc centred at (0,0)' with radius \eqn{\delta}, \eqn{\eta_j} is the epidemic linear predictor, \eqn{g(t)} is the temporal interaction function, and \eqn{f(\bold{s})} is the spatial interaction function. For a type-specific \code{twinstim}, there is an additional factor for the number of event types which can be infected by the type of event \eqn{j} and the interaction functions may be type-specific as well. Alternatively to the equation above, the \code{trimmed} (observed) reproduction numbers are obtain by integrating over the observed infectious domains of the individuals, i.e. integrate \eqn{f} over the intersection of the influence region with the observation region \code{W} (i.e. over \eqn{\{ W \cap b(\bold{s}_j,\delta) \} - \bold{s}_j}) and \eqn{g} over the intersection of the observed infectious period with the observation period \eqn{(t_0;T]} (i.e. over \eqn{(0; \min(T-t_j,\epsilon)]}). The function \code{simpleR0} computes \deqn{\exp(\eta) \cdot \int_{b(\bold{0},\delta)} f(\bold{s}) d\bold{s} \cdot \int_0^{\epsilon} g(t) dt ,} where \eqn{\eta} defaults to \eqn{\gamma_0} disregarding any epidemic effects of types and marks. It is thus only suitable for simple epidemic \code{\link{twinstim}} models with \code{epidemic = ~1}, a diagonal (or secondary diagonal) \code{qmatrix}, and type-invariant interaction functions. \code{simpleR0} mainly exists for use by \code{\link{epitest}}. (Numerical) Integration is performed exactly as during the fitting of \code{object}, for instance \code{object$control.siaf} is queried if necessary. } \value{ For the \code{R0} methods, a numeric vector of estimated reproduction numbers from the fitted model \code{object} corresponding to the rows of \code{newevents} (if supplied) or the original fitted events including events of the prehistory. For \code{simpleR0}, a single number (see details). } \references{ Meyer, S., Elias, J. and H\enc{}{oe}hle, M. (2012): A space-time conditional intensity model for invasive meningococcal disease occurrence. \emph{Biometrics}, \bold{68}, 607-616. \doi{10.1111/j.1541-0420.2011.01684.x} } \author{Sebastian Meyer} \examples{ ## load the 'imdepi' data and a model fit data("imdepi", "imdepifit") ## calculate individual and type-specific reproduction numbers R0s <- R0(imdepifit) tapply(R0s, imdepi$events@data[names(R0s), "type"], summary) ## untrimmed R0 for specific event settings refevent <- data.frame(agegrp = "[0,3)", type = "B", eps.s = Inf, eps.t = 30) setting2 <- data.frame(agegrp = "[3,19)", type = "C", eps.s = Inf, eps.t = 14) newevents <- rbind("ref" = refevent, "event2" = setting2) (R0_examples <- R0(imdepifit, newevents = newevents, trimmed = FALSE)) stopifnot(all.equal(R0_examples[["ref"]], simpleR0(imdepifit))) ### compute a Monte Carlo confidence interval ## use a simpler model with constant 'siaf' for speed simplefit <- update(imdepifit, epidemic=~type, siaf=NULL, subset=NULL) ## we'd like to compute the mean R0's by event type meanR0ByType <- function (newcoef) { R0events <- R0(simplefit, newcoef=newcoef) tapply(R0events, imdepi$events@data[names(R0events),"type"], mean) } (meansMLE <- meanR0ByType(newcoef=NULL)) ## sample B times from asymptotic multivariate normal of the MLE B <- 5 # CAVE: toy example! In practice this has to be much larger set.seed(123) parsamples <- MASS::mvrnorm(B, mu=coef(simplefit), Sigma=vcov(simplefit)) ## for each sample compute the 'meanR0ByType' meansMC <- apply(parsamples, 1, meanR0ByType) ## get the quantiles and print the result cisMC <- apply(cbind(meansMLE, meansMC), 1, quantile, probs=c(0.025,0.975)) print(rbind(MLE=meansMLE, cisMC)) ### R0 for a simple epidemic model ### without epidemic covariates, i.e., all individuals are equally infectious mepi1 <- update(simplefit, epidemic = ~1, subset = type == "B", model = TRUE, verbose = FALSE) ## using the default spatial and temporal ranges of interaction (R0B <- simpleR0(mepi1)) # eps.s=200, eps.t=30 stopifnot(identical(R0B, R0(mepi1, trimmed = FALSE)[[1]])) ## assuming smaller interaction ranges (but same infection intensity) simpleR0(mepi1, eps.s = 50, eps.t = 15) } \keyword{methods} \keyword{univar} surveillance/man/hepatitisA.Rd0000644000176200001440000000101313174706302016101 0ustar liggesusers\name{hepatitisA} \docType{data} \alias{hepatitisA} \title{Hepatitis A in Germany} \description{ Weekly number of reported hepatitis A infections in Germany 2001-2004. } \usage{data(hepatitisA)} \format{ A \code{disProg} object containing \eqn{208\times 1}{208 x 1} observations starting from week 1 in 2001 to week 52 in 2004. } \source{ Robert Koch-Institut: SurvStat: \url{https://survstat.rki.de/}; Queried on 11-01-2005. } \examples{ data(hepatitisA) plot(hepatitisA) } \keyword{datasets} surveillance/man/pairedbinCUSUM.Rd0000644000176200001440000001465613157045136016603 0ustar liggesusers\name{pairedbinCUSUM} \alias{pairedbinCUSUM} \alias{pairedbinCUSUM.runlength} \alias{pairedbinCUSUM.LLRcompute} \encoding{latin1} \title{Paired binary CUSUM and its run-length computation} \description{ CUSUM for paired binary data as described in Steiner et al. (1999). } \usage{ pairedbinCUSUM(stsObj, control = list(range=NULL,theta0,theta1, h1,h2,h11,h22)) pairedbinCUSUM.runlength(p,w1,w2,h1,h2,h11,h22, sparse=FALSE) } \arguments{ \item{stsObj}{Object of class \code{sts} containing the paired responses for each of the, say n, patients. The observed slot of \code{stsObj} is thus a \eqn{n \times 2}{n x 2} matrix.} \item{control}{Control object as a list containing several parameters. \itemize{ \item{\code{range}}{Vector of indices in the observed slot to monitor.} \item{\code{theta0}}{In-control parameters of the paired binary CUSUM.} \item{\code{theta1}}{Out-of-control parameters of the paired binary CUSUM.} \item{\code{h1}}{Primary control limit (=threshold) of 1st CUSUM.} \item{\code{h2}}{Primary control limit (=threshold) of 2nd CUSUM.} \item{\code{h11}}{Secondary limit for 1st CUSUM.} \item{\code{h22}}{Secondary limit for 2nd CUSUM.} } } \item{p}{Vector giving the probability of the four different possible states, i.e. c((death=0,near-miss=0),(death=1,near-miss=0), (death=0,near-miss=1),(death=1,near-miss=1)).} \item{w1}{The parameters \code{w1} and \code{w2} are the sample weights vectors for the two CUSUMs, see eqn. (2) in the paper. We have that \code{w1} is equal to deaths } \item{w2}{As for \code{w1}} \item{h1}{decision barrier for 1st individual cusums} \item{h2}{decision barrier for 2nd cusums} \item{h11}{together with \code{h22} this makes up the joing decision barriers} \item{h22}{together with \code{h11} this makes up the joing decision barriers} \item{sparse}{Boolean indicating whether to use sparse matrix computations from the \code{Matrix} library (usually much faster!). Default: \code{FALSE}.} } \details{ For details about the method see the Steiner et al. (1999) reference listed below. Basically, two individual CUSUMs are run each based on a logistic regression model. The combined CUSUM not only signals if one of its two individual CUSUMs signals, but also if the two CUSUMs simultaneously cross the secondary limits. } \seealso{\code{\link{categoricalCUSUM}}} \value{An \code{sts} object with \code{observed}, \code{alarm}, etc. slots trimmed to the \code{control$range} indices. } \references{ Steiner, S. H., Cook, R. J., and Farewell, V. T. (1999), Monitoring paired binary surgical outcomes using cumulative sum charts, Statistics in Medicine, 18, pp. 69--86. } \examples{ #Set in-control and out-of-control parameters as in paper theta0 <- c(-2.3,-4.5,2.5) theta1 <- c(-1.7,-2.9,2.5) #Small helper function to compute the paired-binary likelihood #of the length two vector yz when the true parameters are theta dPBin <- function(yz,theta) { exp(dbinom(yz[1],size=1,prob=plogis(theta[1]),log=TRUE) + dbinom(yz[2],size=1,prob=plogis(theta[2]+theta[3]*yz[1]),log=TRUE)) } #Likelihood ratio for all four possible configurations p <- c(dPBin(c(0,0), theta=theta0), dPBin(c(0,1), theta=theta0), dPBin(c(1,0), theta=theta0), dPBin(c(1,1), theta=theta0)) #Compute ARL using non-sparse matrix operations \dontrun{ pairedbinCUSUM.runlength(p,w1=c(-1,37,-9,29),w2=c(-1,7),h1=70,h2=32,h11=38,h22=17) } #Sparse computations don't work on all machines (e.g. the next line #might lead to an error. If it works this call can be considerably (!) faster #than the non-sparse call. \dontrun{ pairedbinCUSUM.runlength(p,w1=c(-1,37,-9,29),w2=c(-1,7),h1=70,h2=32, h11=38,h22=17,sparse=TRUE) } #Use paired binary CUSUM on the De Leval et al. (1994) arterial switch #operation data on 104 newborn babies data("deleval") #Switch between death and near misses observed(deleval) <- observed(deleval)[,c(2,1)] #Run paired-binary CUSUM without generating alarms. pb.surv <- pairedbinCUSUM(deleval,control=list(theta0=theta0, theta1=theta1,h1=Inf,h2=Inf,h11=Inf,h22=Inf)) plot(pb.surv, xaxis.labelFormat=NULL, ylab="CUSUM Statistic") ###################################################################### #Scale the plots so they become comparable to the plots in Steiner et #al. (1999). To this end a small helper function is defined. ###################################################################### ###################################################################### #Log LR for conditional specification of the paired model ###################################################################### LLR.pairedbin <- function(yz,theta0, theta1) { #In control alphay0 <- theta0[1] ; alphaz0 <- theta0[2] ; beta0 <- theta0[3] #Out of control alphay1 <- theta1[1] ; alphaz1 <- theta1[2] ; beta1 <- theta1[3] #Likelihood ratios llry <- (alphay1-alphay0)*yz[1]+log(1+exp(alphay0))-log(1+exp(alphay1)) llrz <- (alphaz1-alphaz0)*yz[2]+log(1+exp(alphaz0+beta0*yz[1]))- log(1+exp(alphaz1+beta1*yz[1])) return(c(llry=llry,llrz=llrz)) } val <- expand.grid(0:1,0:1) table <- t(apply(val,1, LLR.pairedbin, theta0=theta0, theta1=theta1)) w1 <- min(abs(table[,1])) w2 <- min(abs(table[,2])) S <- upperbound(pb.surv) / cbind(rep(w1,nrow(observed(pb.surv))),w2) #Show results par(mfcol=c(2,1)) plot(1:nrow(deleval),S[,1],type="l",main="Near Miss",xlab="Patient No.", ylab="CUSUM Statistic") lines(c(0,1e99), c(32,32),lty=2,col=2) lines(c(0,1e99), c(17,17),lty=2,col=3) plot(1:nrow(deleval),S[,2],type="l",main="Death",xlab="Patient No.", ylab="CUSUM Statistic") lines(c(0,1e99), c(70,70),lty=2,col=2) lines(c(0,1e99), c(38,38),lty=2,col=3) ###################################################################### # Run the CUSUM with thresholds as in Steiner et al. (1999). # After each alarm the CUSUM statistic is set to zero and # monitoring continues from this point. Triangles indicate alarm # in the respective CUSUM (nearmiss or death). If in both # simultaneously then an alarm is caued by the secondary limits. ###################################################################### pb.surv2 <- pairedbinCUSUM(deleval,control=list(theta0=theta0, theta1=theta1,h1=70*w1,h2=32*w2,h11=38*w1,h22=17*w2)) plot(pb.surv2, xaxis.labelFormat=NULL) } \author{S. Steiner and M. \enc{Hhle}{Hoehle}} \keyword{regression} surveillance/man/hhh4_plot.Rd0000644000176200001440000004321314005021174015676 0ustar liggesusers\encoding{latin1} \name{plot.hhh4} \alias{plot.hhh4} \alias{plotHHH4_fitted} \alias{plotHHH4_fitted1} \alias{plotHHH4_season} \alias{getMaxEV_season} \alias{plotHHH4_maxEV} \alias{getMaxEV} \alias{plotHHH4_maps} \alias{plotHHH4_ri} \alias{plotHHH4_neweights} \title{Plots for Fitted \code{hhh4}-models} \description{ There are six \code{type}s of plots for fitted \code{\link{hhh4}} models: \itemize{ \item Plot the \code{"fitted"} component means (of selected units) along time along with the observed counts. \item Plot the estimated \code{"season"}ality of the three components. \item Plot the time-course of the dominant eigenvalue \code{"maxEV"}. \item If the units of the corresponding multivariate \code{"\linkS4class{sts}"} object represent different regions, maps of the fitted mean components averaged over time (\code{"maps"}), or a map of estimated region-specific intercepts (\code{"ri"}) of a selected model component can be produced. \item Plot the (estimated) neighbourhood weights (\code{"neweights"}) as a function of neighbourhood order (shortest-path distance between regions), i.e., \code{w_ji ~ o_ji}. } Spatio-temporal \code{"hhh4"} models and these plots are illustrated in Meyer et al. (2017, Section 5), see \code{vignette("hhh4_spacetime")}. } \usage{ \method{plot}{hhh4}(x, type=c("fitted", "season", "maxEV", "maps", "ri", "neweights"), ...) plotHHH4_fitted(x, units = 1, names = NULL, col = c("grey85", "blue", "orange"), pch = 19, pt.cex = 0.6, pt.col = 1, par.settings = list(), legend = TRUE, legend.args = list(), legend.observed = FALSE, decompose = NULL, total = FALSE, meanHHH = NULL, ...) plotHHH4_fitted1(x, unit = 1, main = NULL, col = c("grey85", "blue", "orange"), pch = 19, pt.cex = 0.6, pt.col = 1, border = col, start = x$stsObj@start, end = NULL, xaxis = NULL, xlim = NULL, ylim = NULL, xlab = "", ylab = "No. infected", hide0s = FALSE, decompose = NULL, total = FALSE, meanHHH = NULL) plotHHH4_season(..., components = NULL, intercept = FALSE, xlim = NULL, ylim = NULL, xlab = NULL, ylab = "", main = NULL, par.settings = list(), matplot.args = list(), legend = NULL, legend.args = list(), refline.args = list(), unit = 1) getMaxEV_season(x) plotHHH4_maxEV(..., matplot.args = list(), refline.args = list(), legend.args = list()) getMaxEV(x) plotHHH4_maps(x, which = c("mean", "endemic", "epi.own", "epi.neighbours"), prop = FALSE, main = which, zmax = NULL, col.regions = NULL, labels = FALSE, sp.layout = NULL, ..., map = x$stsObj@map, meanHHH = NULL) plotHHH4_ri(x, component, exp = FALSE, at = list(n = 10), col.regions = cm.colors(100), colorkey = TRUE, labels = FALSE, sp.layout = NULL, gpar.missing = list(col = "darkgrey", lty = 2, lwd = 2), ...) plotHHH4_neweights(x, plotter = boxplot, ..., exclude = 0, maxlag = Inf) } \arguments{ \item{x}{a fitted \code{\link{hhh4}} object.} \item{type}{type of plot: either \code{"fitted"} component means of selected \code{units} along time along with the observed counts, or \code{"season"}ality plots of the model components and the epidemic dominant eigenvalue (which may also be plotted along overall time by \code{type="maxEV"}, especially if the model contains time-varying neighbourhood weights or unit-specific epidemic effects), or \code{"maps"} of the fitted mean components averaged over time, or a map of estimated region-specific random intercepts (\code{"ri"}) of a specific model \code{component}. The latter two require \code{x$stsObj} to contain a map.} \item{\dots}{For \code{plotHHH4_season} and \code{plotHHH4_maxEV}, one or more \code{\link{hhh4}}-fits, or a single list of these. Otherwise further arguments passed on to other functions.\cr For the \code{plot}-method these go to the specific plot \code{type} function.\cr \code{plotHHH4_fitted} passes them to \code{plotHHH4_fitted1}, which is called sequentially for every unit in \code{units}.\cr \code{plotHHH4_maps} and \code{plotHHH4_ri} pass additional arguments to \code{\link{spplot}}, and \code{plotHHH4_neweights} to the \code{plotter}.} \item{units,unit}{integer or character vector specifying a single \code{unit} or possibly multiple \code{units} to plot. It indexes \code{colnames(x$stsObj)}.\cr In \code{plotHHH4_fitted}, \code{units=NULL} plots all units.\cr In the seasonality plot, selection of a unit is only relevant if the model contains unit-specific intercepts or seasonality terms.} \item{names,main}{main title(s) for the selected \code{unit}(\code{s}) / \code{components}. If \code{NULL} (default), \code{plotHHH4_fitted1} will use the appropriate element of \code{colnames(x$stsObj)}, whereas \code{plotHHH4_season} uses default titles.} \item{col,border}{length 3 vectors specifying the fill and border colors for the endemic, autoregressive, and spatio-temporal component polygons (in this order).} \item{pch,pt.cex,pt.col}{style specifications for the dots drawn to represent the observed counts. \code{pch=NA} can be used to disable these dots.} \item{par.settings}{list of graphical parameters for \code{\link{par}}. Sensible defaults for \code{mfrow}, \code{mar} and \code{las} will be applied unless overridden or \code{!is.list(par.settings)}.} \item{legend}{Integer vector specifying in which of the \code{length(units)} frames the legend should be drawn. If a logical vector is supplied, \code{which(legend)} determines the frame selection, i.e., the default is to drawn the legend in the first (upper left) frame only, and \code{legend=FALSE} results in no legend being drawn.} \item{legend.args}{list of arguments for \code{\link{legend}}, e.g., to modify the default positioning \code{list(x="topright", inset=0.02)}.} \item{legend.observed}{logical indicating if the legend should contain a line for the dots corresponding to observed counts.} \item{decompose}{if \code{TRUE} or (a permutation of) \code{colnames(x$stsObj)}, the fitted mean will be decomposed into the contributions from each single unit and the endemic part instead of the default endemic + AR + neighbours decomposition.} \item{total}{logical indicating if the fitted components should be summed over all units to be compared with the total observed counts at each time point. If \code{total=TRUE}, the \code{units}/\code{unit} argument is ignored.} \item{start,end}{time range to plot specified by vectors of length two in the form \code{c(year,number)}, see \code{"\linkS4class{sts}"}.} \item{xaxis}{if this is a list (of arguments for \code{\link{addFormattedXAxis}}, the time axis is nicely labelled similar to \code{\link{stsplot_time}}. Note that in this case, the time indexes \code{1:nrow(x$stsObj)} will be used as x-values in the plot, which is different from the long-standing default (\code{xaxis = NULL}) with a real time scale.} \item{xlim}{numeric vector of length 2 specifying the x-axis range. The default (\code{NULL}) is to plot the complete time range.} \item{ylim}{y-axis range. For \code{type="fitted"}, this defaults to \code{c(0,max(observed(x$stsObj)[,unit]))}. For \code{type="season"}, \code{ylim} must be a list of length \code{length(components)} specifying the range for every component plot, or a named list to customize only a subset of these. If only one \code{ylim} is specified, it will be recycled for all \code{components} plots.} \item{xlab,ylab}{axis labels. For \code{plotHHH4_season}, \code{ylab} specifies the y-axis labels for all \code{components} in a list (similar to \code{ylim}). If \code{NULL} or incomplete, default mathematical expressions are used. If a single name is supplied such as the default \code{ylab=""} (to omit y-axis labels), it is used for all \code{components}.} \item{hide0s}{logical indicating if dots for zero observed counts should be omitted. Especially useful if there are too many.} \item{meanHHH}{(internal) use different component means than those estimated and available from \code{x}.} \item{components}{character vector of component names, i.e., a subset of \code{c("ar", "ne", "end")}, for which to plot the estimated seasonality. If \code{NULL} (the default), only components which appear in any of the models in \code{\dots} are plotted.\cr A seasonality plot of the epidemic dominant eigenvalue is also available by including \code{"maxEV"} in \code{components}, but it only supports models without epidemic covariates/offsets.} \item{intercept}{logical indicating whether to include the global intercept. For \code{plotHHH4_season}, the default (\code{FALSE}) means to plot seasonality as a multiplicative effect on the respective component. Multiplication by the intercept only makes sense if there are no further (non-centered) covariates/offsets in the component.} \item{exp}{logical indicating whether to \code{exp}-transform random effects to show multiplicative effects on the respective components. The default is \code{FALSE}.} \item{at}{a numeric vector of breaks for the color levels (see \code{\link[lattice]{levelplot}}), or a list specifying the number of breaks \code{n} (default: 10) and their \code{range} (default: range of the random effects, extended to be symmetric around 0, or around 1 if \code{exp=TRUE}). If \code{exp=TRUE}, the breaks are generated using \code{scales::\link[scales]{log_breaks}}.} \item{matplot.args}{list of line style specifications passed to \code{\link{matplot}}, e.g., \code{lty}, \code{lwd}, \code{col}.} \item{refline.args}{list of line style specifications (e.g., \code{lty} or \code{col}) passed to \code{\link{abline}} when drawing the reference line (\code{h=1}) in plots of seasonal effects (if \code{intercept=FALSE}) and of the dominant eigenvalue. The reference line is omitted if \code{refline.args} is not a list.} \item{which}{a character vector specifying the components of the mean for which to produce maps. By default, the overall mean and all three components are shown.} \item{prop}{a logical indicating whether the component maps should display proportions of the total mean instead of absolute numbers.} \item{zmax}{a numeric vector of length \code{length(which)} (recycled as necessary) specifying upper limits for the color keys of the maps, using a lower limit of 0. A missing element (\code{NA}) means to use a map-specific color key only covering the range of the values in that map (can be useful for \code{prop = TRUE}). The default \code{zmax = NULL} means to use the same scale for the component maps and a separate scale for the map showing the overall mean.} \item{col.regions}{a vector of colors used to encode the fitted component means (see \code{\link[lattice]{levelplot}}). For \code{plotHHH4_maps}, the length of this color vector also determines the number of levels, using 10 heat colors by default.} \item{colorkey}{a Boolean indicating whether to draw the color key. Alternatively, a list specifying how to draw it, see \code{\link[lattice]{levelplot}}.} \item{map}{an object inheriting from \code{"\linkS4class{SpatialPolygons}"} with \code{row.names} covering \code{colnames(x)}.} \item{component}{component for which to plot the estimated region-specific random intercepts. Must partially match one of \code{colnames(ranef(x, tomatrix=TRUE))}.} \item{labels}{determines if and how regions are labeled, see \code{\link{layout.labels}}.} \item{sp.layout}{optional list of additional layout items, see \code{\link{spplot}}.} \item{gpar.missing}{list of graphical parameters for \code{\link{sp.polygons}}, applied to regions with missing random intercepts, i.e., not included in the model. Such extra regions won't be plotted if \code{!is.list(gpar.missing)}.} \item{plotter}{the (name of a) function used to produce the plot of weights (a numeric vector) as a function of neighbourhood order (a factor variable). It is called as \code{plotter(Weight ~ Distance, ...)} and defaults to \code{\link{boxplot}}. A useful alternative is, e.g., \code{\link{stripplot}} from package \pkg{lattice}.} \item{exclude}{vector of neighbourhood orders to be excluded from plotting (passed to \code{\link{factor}}). By default, the neighbourhood weight for order 0 is not shown, which is usually zero anyway.} \item{maxlag}{maximum order of neighbourhood to be assumed when computing the \code{\link{nbOrder}} matrix. This additional step is necessary iff \code{neighbourhood(x$stsObj)} only specifies a binary adjacency matrix.} } \value{ \code{plotHHH4_fitted1} invisibly returns a matrix of the fitted component means for the selected \code{unit}, and \code{plotHHH4_fitted} returns these in a list for all \code{units}.\cr \code{plotHHH4_season} invisibly returns the plotted y-values, i.e. the multiplicative seasonality effect within each of \code{components}. Note that this will include the intercept, i.e. the point estimate of \eqn{exp(intercept + seasonality)} is plotted and returned.\cr \code{getMaxEV_season} returns a list with elements \code{"maxEV.season"} (as plotted by \code{plotHHH4_season(..., components="maxEV")}, \code{"maxEV.const"} and \code{"Lambda.const"} (the Lambda matrix and its dominant eigenvalue if time effects are ignored).\cr \code{plotHHH4_maxEV} (invisibly) and \code{getMaxEV} return the dominant eigenvalue of the \eqn{\Lambda_t} matrix for all time points \eqn{t} of \code{x$stsObj}.\cr \code{plotHHH4_maps} returns a \code{\link[lattice]{trellis.object}} if \code{length(which) == 1} (a single \code{\link{spplot}}), and otherwise uses \code{\link[gridExtra]{grid.arrange}} from the \pkg{gridExtra} package to arrange all \code{length(which)} \code{\link{spplot}}s on a single page. \code{plotHHH4_ri} returns the generated \code{\link{spplot}}, i.e., a \code{\link[lattice]{trellis.object}}.\cr \code{plotHHH4_neweights} eventually calls \code{plotter} and thus returns whatever is returned by that function. } \author{ Sebastian Meyer } \references{ Held, L. and Paul, M. (2012): Modeling seasonality in space-time infectious disease surveillance data. \emph{Biometrical Journal}, \bold{54}, 824-843. \doi{10.1002/bimj.201200037} Meyer, S., Held, L. and \enc{Hhle}{Hoehle}, M. (2017): Spatio-temporal analysis of epidemic phenomena using the \R package \pkg{surveillance}. \emph{Journal of Statistical Software}, \bold{77} (11), 1-55. \doi{10.18637/jss.v077.i11} } \seealso{ other methods for \code{hhh4} fits, e.g., \code{\link{summary.hhh4}}. } \examples{ data("measlesWeserEms") ## fit a simple hhh4 model measlesModel <- list( ar = list(f = ~ 1), end = list(f = addSeason2formula(~0 + ri(type="iid"), S=1, period=52), offset = population(measlesWeserEms)), family = "NegBin1" ) measlesFit <- hhh4(measlesWeserEms, measlesModel) ## fitted values for a single unit plot(measlesFit, units=2) ## sum fitted components over all units plot(measlesFit, total=TRUE) ## 'xaxis' option for a nicely formatted time axis ## default tick locations and labels: plot(measlesFit, total=TRUE, xaxis=list(epochsAsDate=TRUE, line=1)) ## an alternative with monthly ticks: oopts <- surveillance.options(stsTickFactors = c("\%m"=0.75, "\%Y" = 1.5)) plot(measlesFit, total=TRUE, xaxis=list(epochsAsDate=TRUE, xaxis.tickFreq=list("\%m"=atChange, "\%Y"=atChange), xaxis.labelFreq=list("\%Y"=atMedian), xaxis.labelFormat="\%Y")) surveillance.options(oopts) ## plot the multiplicative effect of seasonality plot(measlesFit, type="season") ## dominant eigenvalue of the Lambda matrix (cf. Held and Paul, 2012) getMaxEV(measlesFit) # here simply constant and equal to exp(ar.1) plot(measlesFit, type="maxEV") # not very exciting ## fitted mean components/proportions by district, averaged over time if (requireNamespace("gridExtra")) { plot(measlesFit, type="maps", labels=list(cex=0.6), which=c("endemic", "epi.own"), prop=TRUE, zmax=NA, main=c("endemic proportion", "autoregressive proportion")) } ## estimated random intercepts of the endemic component fixef(measlesFit)["end.ri(iid)"] # global intercept (log-scale) ranef(measlesFit, tomatrix = TRUE) # zero-mean deviations ranef(measlesFit, intercept = TRUE) # sum of the above exp(ranef(measlesFit)) # multiplicative effects plot(measlesFit, type="ri", component="end", main="deviations around the endemic intercept (log-scale)") if (requireNamespace("scales")) # needed for logarithmic color breaks plot(measlesFit, type="ri", component="end", exp=TRUE, main="multiplicative effects", labels=list(font=3, labels="GEN")) ## neighbourhood weights as a function of neighbourhood order plot(measlesFit, type="neweights") # boring, model has no "ne" component ## fitted values for the 6 regions with most cases and some customization bigunits <- tail(names(sort(colSums(observed(measlesWeserEms)))), 6) plot(measlesFit, units=bigunits, names=measlesWeserEms@map@data[bigunits,"GEN"], legend=5, legend.args=list(x="top"), xlab="Time (weekly)", hide0s=TRUE, ylim=c(0,max(observed(measlesWeserEms)[,bigunits])), start=c(2002,1), end=c(2002,26), par.settings=list(xaxs="i")) } \keyword{hplot} surveillance/man/stsplot_time.Rd0000644000176200001440000002053513755760615016562 0ustar liggesusers\encoding{latin1} \name{stsplot_time} \alias{stsplot_time} \alias{stsplot_time1} \alias{stsplot_alarm} \title{ Time-Series Plots for \code{"sts"} Objects } \description{ These are the \code{plot} variants of \code{type=observed~time|unit}, \code{type=observed~time}, and \code{type=alarm~time} for \code{"\linkS4class{sts}"} objects (see the central \code{"sts"} \code{\link[=plot,sts,missing-method]{plot}}-method for an overview of plot types). } \usage{ stsplot_time(x, units=NULL, as.one=FALSE, same.scale=TRUE, par.list=list(), ...) stsplot_time1(x, k=1, ylim=NULL, axes=TRUE, xaxis.tickFreq=list("\%Q"=atChange), xaxis.labelFreq=xaxis.tickFreq, xaxis.labelFormat="\%G\n\n\%OQ", epochsAsDate=x@epochAsDate, xlab="time", ylab="No. infected", main=NULL, type="s", lty=c(1,1,2), col=c(NA,1,4), lwd=c(1,1,1), outbreak.symbol=list(pch=3, col=3, cex=1, lwd=1), alarm.symbol=list(pch=24, col=2, cex=1, lwd=1), legend.opts=list(), dx.upperbound=0L, hookFunc=function(){}, .hookFuncInheritance=function() {}, ...) stsplot_alarm(x, lvl=rep(1,nrow(x)), ylim=NULL, xaxis.tickFreq=list("\%Q"=atChange), xaxis.labelFreq=xaxis.tickFreq, xaxis.labelFormat="\%G\n\n\%OQ", epochsAsDate=x@epochAsDate, xlab="time", main=NULL, type="hhs", lty=c(1,1,2), col=c(1,1,4), outbreak.symbol=list(pch=3, col=3, cex=1, lwd=1), alarm.symbol=list(pch=24, col=2, cex=1, lwd=1), cex=1, cex.yaxis=1, ...) } \arguments{ \item{x}{an object of class \code{"\linkS4class{sts}"}.} \item{units}{optional integer or character vector to select the units (=columns of \code{observed(x)}) to plot. The default is to plot all time series. If \code{as.one=FALSE}, \code{stsplot_time1} is called \code{for (k in units)} with \code{mfrow} splitting (see \code{par.list}). Note that if there are too many \code{units}, the default \code{mfrow} setting might lead to the error \dQuote{figure margins too large} (meaning that the units do not fit onto a single page).} \item{as.one}{logical indicating if all time series should be plotted in a single frame (using \code{\link{matplot}}).} \item{same.scale}{logical indicating if all time series should be plotted with the same \code{ylim}. Default is to do so. Only relevant for multivariate plots (\code{ncol(x) > 1}).} \item{par.list}{a list of arguments delivered to a call of \code{\link{par}} to set graphical parameters before plotting. The \code{mfrow} splitting is handled per default. Afterwards, the \code{par}ameters are reverted to their original values. Use \code{par.list=NULL} to disable the internal \code{par} call.} \item{k}{the unit to plot, i.e., an element of \code{1:ncol(x)}.} \item{ylim}{the y limits of the plot(s). Ignored if \code{same.scale=FALSE}.} \item{axes}{a logical value indicating whether both axes should be drawn on the plot.} \item{xaxis.tickFreq,xaxis.labelFreq,xaxis.labelFormat}{see \code{\link{addFormattedXAxis}}.} \item{epochsAsDate}{Boolean indicating whether to treat the epochs as Date objects (or to transform them to dates such that the new x-axis formatting is applied). Default: Value of the \code{epochAsDate} slot of \code{x}.} \item{xlab}{a title for the x axis. See \code{plot.default}.} \item{ylab}{a title for the y axis. See \code{plot.default}.} \item{main}{an overall title for the plot: see 'title'.} \item{type}{type of plot to do.} \item{lty}{vector of length 3 specifying the line type for the three lines in the plot -- see \code{col} argument.} \item{col}{Vector of length 3 specifying the color to use in the plot. The first color is the fill color of the polygons for the counts bars (\code{NA} for unfilled), the 2nd element denotes their border color, the 3rd element is the color of the \code{upperbound} plotting.} \item{lwd}{Vector of length 3 specifying the line width of the three elements to plot. See also the \code{col} argument.} \item{alarm.symbol}{a list with entries \code{pch}, \code{col}, \code{cex} and \code{lwd} specifying the appearance of the outbreak symbol in the plot.} \item{outbreak.symbol}{a list with entries \code{pch}, \code{col}, \code{cex} and \code{lwd} specifying the appearance of the outbreak symbol in the plot.} \item{legend.opts}{a list of arguments for \code{\link{legend}}. If \code{\link{missing}(legend.opts)} (i.e., not explicitly specified), the default legend will only be produced if \code{x} contains any information on outbreaks, alarms, or upperbounds. To disable the legend, use, e.g., \code{legend.opts=NULL}. Otherwise, the following arguments are default: \describe{ \item{\code{x}}{\code{"top"}} \item{\code{legend}}{\code{c("Infected","Threshold","Outbreak","Alarm")}} \item{\code{lty,pch,col}}{the corresponding graphical settings} } Any further arguments to the \code{legend} function are just provided as additional elements of this list, e.g. \code{horiz=TRUE}. } \item{dx.upperbound}{horizontal change in the plotting of the upperbound line. Sometimes it can be convenient to offset this line a little for better visibility.} \item{lvl}{A vector of length \code{ncol(x)}, which is used to specify the hierarchy level for each time series in the sts object for alarm plots.} \item{cex}{A numerical value giving the amount by which plotting text and symbols should be magnified relative to the default. See \code{\link{par}} for details.} \item{cex.yaxis}{The magnification to be used for y-axis annotation relative to the current setting of \code{cex}.} \item{hookFunc}{a function that is called after all the basic plotting has be done, i.e., it is not possible to control formatting with this function. See Examples.} \item{.hookFuncInheritance}{a function which is altered by sub-classes plot method. Do not alter this function manually.} \item{...}{further arguments for the function \code{matplot}. If e.g. \code{xlab} or \code{main} are provided they overwrite the default values.} } \details{ The time series plot relies on the work-horse \code{stsplot_time1}. Its arguments are (almost) similar to \code{\link{plot.survRes}}. } \value{ \code{NULL} (invisibly). The functions are called for their side-effects. } \author{ Michael H\enc{}{oe}hle and Sebastian Meyer } \seealso{ There is an \code{\link[=autoplot.sts]{autoplot}}-method, which implements \CRANpkg{ggplot2}-based time-series plots of \code{"sts"} objects. The \code{\link{stsplot}} help page gives an overview of other types of plots for \code{"sts"} objects. } \examples{ data("ha.sts") print(ha.sts) plot(ha.sts, type=observed ~ time | unit) # default multivariate type plot(ha.sts, units=c("mitt", "pank")) # selected units plot(ha.sts, type=observed ~ time) # aggregated over all districts ## Hook function example hookFunc <- function() grid(NA,NULL,lwd=1) plot(ha.sts, hookFunc=hookFunc) ## another multivariate time series example plotted "as.one" data("measlesDE") plot(measlesDE, units=1:2, as.one=TRUE, legend.opts=list(cex=0.8)) ## more sophisticated plots are offered by package "xts" if (requireNamespace("xts")) plot(as.xts.sts(measlesDE)) ## Use ISO8601 date formatting (see ?strptime) and no legend data("salmNewport") plot(aggregate(salmNewport,by="unit"), xlab="Time (weeks)", xaxis.tickFreq=list("\%m"=atChange,"\%G"=atChange), xaxis.labelFreq=list("\%G"=atMedian),xaxis.labelFormat="\%G") ## Formatting now also works for daily data (illustrate by artifical ## outbreak converted to sts object by linelist2sts) set.seed(123) exposureTimes <- as.Date("2014-03-12") + sample(x=0:25,size=99,replace=TRUE) sts <- linelist2sts(data.frame(exposure=exposureTimes), dateCol="exposure",aggregate.by="1 day") ## Plot it with larger ticks for days than usual surveillance.options("stsTickFactors"=c("\%d"=1, "\%W"=0.33, "\%V"=0.33, "\%m"=1.75, "\%Q"=1.25, "\%Y"=1.5, "\%G"=1.5)) plot(sts,xaxis.tickFreq=list("\%d"=atChange,"\%m"=atChange), xaxis.labelFreq=list("\%d"=at2ndChange),xaxis.labelFormat="\%d-\%b", xlab="Time (days)") } \keyword{hplot} \keyword{ts} surveillance/man/epidataCS_permute.Rd0000644000176200001440000000270713263671176017430 0ustar liggesusers\name{epidataCS_permute} \alias{permute.epidataCS} \title{ Randomly Permute Time Points or Locations of \code{"epidataCS"} } \description{ Monte Carlo tests for space-time interaction (\code{\link{epitest}}) use the distribution of some test statistic under the null hypothesis of no space-time interaction. For this purpose, the function \code{permute.epidataCS} randomly permutes the time or space labels of the events. } \usage{ permute.epidataCS(x, what = c("time", "space"), keep) } \arguments{ \item{x}{an object of class \code{"\link{epidataCS}"}.} \item{what}{character string determining what to permute: time points (default) or locations.} \item{keep}{optional logical expression to be evaluated in the context of \code{x$events@data}, determining for which events the time and location should be kept as is. For instance, to keep some \dQuote{prehistory} before time point 30 unchanged, use \code{keep = time <= 30}.} } \value{ the permuted \code{"\link{epidataCS}"} object. } \author{ Sebastian Meyer } \seealso{ \code{\link{epitest}} } \examples{ data("imdepi") set.seed(3) permepi <- permute.epidataCS(imdepi, what = "time", keep = time <= 30) print(imdepi, n = 8) print(permepi, n = 8) ## the first 6 events are kept (as are all row.names), ## the time labels of the remaining events are shuffled ## (and events then again sorted by time), ## the marginal temporal distribution is unchanged } \keyword{manip} surveillance/man/twinstim_tiaf.Rd0000644000176200001440000000535312265262002016674 0ustar liggesusers\name{twinstim_tiaf} \alias{tiaf} \title{ Temporal Interaction Function Objects } \description{ A temporal interaction function for use in \code{\link{twinstim}} can be constructed via the \code{tiaf} function. It checks the supplied function elements, assigns defaults for missing arguments, and returns all checked arguments in a list. However, for standard applications it is much easier to use one of the pre-defined temporal interaction functions, e.g., \code{\link{tiaf.exponential}}. } \usage{ tiaf(g, G, deriv, Deriv, npars, validpars = NULL) } \arguments{ \item{g}{the temporal interaction function. It must accept two arguments, the first one being a vector of time points, the second one a parameter vector. For marked \code{twinstim}, it must accept the type of the event (integer code) as its third argument (either a single type for all locations or separate types for each location).} \item{G}{a primitive of \eqn{g(t)} (with respect to time). It must accept the same arguments as \code{g}, for instance a \emph{vector} of time points (not just a single one).} \item{deriv}{optional derivative of \eqn{g(t)} \emph{with respect to the parameters}. It takes the same arguments as \code{g} but returns a matrix with as many rows as there were time points in the input and \code{npars} columns. This derivative is necessary for the calculation of the score function in \code{twinstim()}, which is advantageous for the numerical log-likelihood maximization.} \item{Deriv}{optional primitive of \code{deriv} (with respect to time). It must accept the same arguments as \code{deriv}, \code{g} and \code{G} and returns a matrix with as many rows as there were time points in the input and \code{npars} columns. The integrated derivative is necessary for the score function in \code{twinstim}.} \item{npars}{the number of parameters of the temporal interaction function \code{g} (i.e. the length of its second argument).} \item{validpars}{ optional function taking one argument, the parameter vector, indicating if it is valid. This approach to specify parameter constraints is rarely needed, because usual box-constrained parameters can be taken into account by using L-BFGS-B as the optimization method in \code{twinstim} (with arguments \code{lower} and \code{upper}), and positivity constraints by using log-parametrizations. This component is not necessary (and ignored) if \code{npars == 0}. } } \value{ list of checked arguments. } \author{ Sebastian Meyer } \seealso{ \code{\link{tiaf.exponential}} for a pre-defined temporal interaction function, and \code{\link{siaf}} for the spatial interaction function. } \keyword{utilities} surveillance/man/scale.gpc.poly.Rd0000644000176200001440000000133212060143477016634 0ustar liggesusers\name{scale.gpc.poly} \alias{scale.gpc.poly} \title{Centering and Scaling a \code{"gpc.poly"} Polygon} \description{ This is a re-implementation of the corresponding method from package \pkg{gpclib} to also allow centering. } \usage{ \method{scale}{gpc.poly}(x, center = c(0,0), scale = c(1,1)) } \arguments{ \item{x}{an object of class \code{"gpc.poly"}.} \item{center}{numeric vector of length 2 (x,y), which will be subtracted from the respective coordinates of \code{x}.} \item{scale}{numeric vector of length 2 (x,y), which serves as the divisor for the respective coordinates of \code{x}.} } \value{ A \code{"gpc.poly"}, the shifted and/or scaled version of \code{x}. } \keyword{methods} \keyword{manip} surveillance/man/twinSIR_exData.Rd0000644000176200001440000000051313562264564016651 0ustar liggesusers\name{twinSIR_exData} \alias{fooepidata} \docType{data} \title{ Toy Data for \code{twinSIR} } \description{ Toy \code{"\link{epidata}"} previously used to exemplify \code{\link{twinSIR}} models. We now use the well-known \code{\link{hagelloch}} dataset. } \usage{ data(fooepidata) } \keyword{datasets} \keyword{internal} surveillance/man/imdepifit.Rd0000644000176200001440000000214213165516007015765 0ustar liggesusers\name{imdepifit} \alias{imdepifit} \docType{data} \title{ Example \code{twinstim} Fit for the \code{imdepi} Data } \description{ \code{data("imdepifit")} is a \code{\link{twinstim}} model fitted to the \code{\link{imdepi}} data. } \usage{data("imdepifit")} \format{ an object of class \code{"\link{twinstim}"} } \seealso{ common methods for \code{"twinstim"} fits, exemplified using \code{imdepifit}, e.g., \code{\link{summary.twinstim}}, \code{\link{plot.twinstim}}, and \code{\link{simulate.twinstim}} } \examples{ data("imdepi", "imdepifit") \dontrun{ ## reproduce "imdepifit" myimdepifit <- twinstim( endemic = addSeason2formula(~ offset(log(popdensity)) + I(start/365-3.5), S = 1, period = 365, timevar = "start"), epidemic = ~ type + agegrp, siaf = siaf.gaussian(), data = imdepi, subset = !is.na(agegrp), optim.args = list(control = list(reltol = sqrt(.Machine$double.eps))), ## the historical default for reltol is 1e-6, which is rather large model = FALSE, cumCIF = FALSE ) stopifnot(all.equal(imdepifit, myimdepifit)) } } \keyword{datasets} surveillance/man/categoricalCUSUM.Rd0000644000176200001440000001532713324034565017117 0ustar liggesusers\name{categoricalCUSUM} \alias{categoricalCUSUM} \alias{catcusum.LLRcompute} \encoding{latin1} \title{CUSUM detector for time-varying categorical time series} \description{ Function to process \code{sts} object by binomial, beta-binomial or multinomial CUSUM as described by \enc{Hhle}{Hoehle} (2010). Logistic, multinomial logistic, proportional odds or Bradley-Terry regression models are used to specify in-control and out-of-control parameters. The implementation is illustrated in Salmon et al. (2016). } \usage{ categoricalCUSUM(stsObj,control = list(range=NULL,h=5,pi0=NULL, pi1=NULL, dfun=NULL, ret=c("cases","value")),...) } \arguments{ \item{stsObj}{Object of class \code{sts} containing the number of counts in each of the \eqn{k} categories of the response variable. Time varying number of counts \eqn{n_t} is found in slot \code{populationFrac}. } \item{control}{Control object containing several items \itemize{ \item{\code{range}}{Vector of length \eqn{t_{max}} with indices of the \code{observed} slot to monitor.} \item{\code{h}}{Threshold to use for the monitoring. Once the CUSUM statistics is larger or equal to \code{h} we have an alarm.} \item{\code{pi0}}{\eqn{(k-1) \times t_{max}} in-control probability vector for all categories except the reference category.} \item{\code{mu1}}{\eqn{(k-1) \times t_{max}} out-of-control probability vector for all categories except the reference category.} \item{\code{dfun}}{The probability mass function (PMF) or density used to compute the likelihood ratios of the CUSUM. In a negative binomial CUSUM this is \code{dnbinom}, in a binomial CUSUM \code{dbinom} and in a multinomial CUSUM \code{dmultinom}. The function must be able to handle the arguments \code{y}, \code{size}, \code{mu} and \code{log}. As a consequence, one in the case of, e.g, the beta-binomial distribution has to write a small wrapper function.} \item{\code{ret}}{Return the necessary proportion to sound an alarm in the slot \code{upperbound} or just the value of the CUSUM statistic. Thus, \code{ret} is one of the values in \code{c("cases","value")}. Note: For the binomial PMF it is possible to compute this value explicitly, which is much faster than the numeric search otherwise conducted. In case \code{dfun} just corresponds to \code{dbinom} just set the attribute \code{isBinomialPMF} for the \code{dfun} object.} }} \item{\dots}{Additional arguments to send to \code{dfun}.} } \details{ The function allows the monitoring of categorical time series as described by regression models for binomial, beta-binomial or multinomial data. The later includes e.g. multinomial logistic regression models, proportional odds models or Bradley-Terry models for paired comparisons. See the \enc{Hhle}{Hoehle} (2010) reference for further details about the methodology. Once an alarm is found the CUSUM scheme is reset (to zero) and monitoring continues from there. } \seealso{\code{\link{LRCUSUM.runlength}}} \value{An \code{sts} object with \code{observed}, \code{alarm}, etc. slots trimmed to the \code{control$range} indices. } \references{ \enc{Hhle}{Hoehle}, M. (2010): Online Change-Point Detection in Categorical Time Series. In: T. Kneib and G. Tutz (Eds.), Statistical Modelling and Regression Structures, Physica-Verlag. Salmon, M., Schumacher, D. and \enc{Hhle}{Hoehle}, M. (2016): Monitoring count time series in \R: Aberration detection in public health surveillance. \emph{Journal of Statistical Software}, \bold{70} (10), 1-35. \doi{10.18637/jss.v070.i10} } \examples{ if (require("gamlss")) { ########################################################################### #Beta-binomial CUSUM for a small example containing the time-varying #number of positive test out of a time-varying number of total #test. ####################################### #Load meat inspection data data("abattoir") #Use GAMLSS to fit beta-bin regression model phase1 <- 1:(2*52) phase2 <- (max(phase1)+1) : nrow(abattoir) #Fit beta-binomial model using GAMLSS abattoir.df <- as.data.frame(abattoir) #Replace the observed and epoch column names to something more convenient dict <- c("observed"="y", "epoch"="t", "population"="n") replace <- dict[colnames(abattoir.df)] colnames(abattoir.df)[!is.na(replace)] <- replace[!is.na(replace)] m.bbin <- gamlss( cbind(y,n-y) ~ 1 + t + + sin(2*pi/52*t) + cos(2*pi/52*t) + + sin(4*pi/52*t) + cos(4*pi/52*t), sigma.formula=~1, family=BB(sigma.link="log"), data=abattoir.df[phase1,c("n","y","t")]) #CUSUM parameters R <- 2 #detect a doubling of the odds for a test being positive h <- 4 #threshold of the cusum #Compute in-control and out of control mean pi0 <- predict(m.bbin,newdata=abattoir.df[phase2,c("n","y","t")],type="response") pi1 <- plogis(qlogis(pi0)+log(R)) #Create matrix with in control and out of control proportions. #Categories are D=1 and D=0, where the latter is the reference category pi0m <- rbind(pi0, 1-pi0) pi1m <- rbind(pi1, 1-pi1) ###################################################################### # Use the multinomial surveillance function. To this end it is necessary # to create a new abattoir object containing counts and proportion for # each of the k=2 categories. For binomial data this appears a bit # redundant, but generalizes easier to k>2 categories. ###################################################################### abattoir2 <- sts(epoch=1:nrow(abattoir), start=c(2006,1), freq=52, observed=cbind(abattoir@observed, abattoir@populationFrac-abattoir@observed), populationFrac=cbind(abattoir@populationFrac,abattoir@populationFrac), state=matrix(0,nrow=nrow(abattoir),ncol=2), multinomialTS=TRUE) ###################################################################### #Function to use as dfun in the categoricalCUSUM #(just a wrapper to the dBB function). Note that from v 3.0-1 the #first argument of dBB changed its name from "y" to "x"! ###################################################################### mydBB.cusum <- function(y, mu, sigma, size, log = FALSE) { return(dBB(y[1,], mu = mu[1,], sigma = sigma, bd = size, log = log)) } #Create control object for multinom cusum and use the categoricalCUSUM #method control <- list(range=phase2,h=h,pi0=pi0m, pi1=pi1m, ret="cases", dfun=mydBB.cusum) surv <- categoricalCUSUM(abattoir2, control=control, sigma=exp(m.bbin$sigma.coef)) #Show results plot(surv[,1],dx.upperbound=0) lines(pi0,col="green") lines(pi1,col="red") #Index of the alarm which.max(alarms(surv[,1])) } } \author{M. \enc{Hhle}{Hoehle}} \keyword{regression} surveillance/man/layout.labels.Rd0000644000176200001440000001056312573360044016577 0ustar liggesusers\name{layout.labels} \alias{layout.labels} \alias{layout.scalebar} \title{ Layout Items for \code{spplot} } \description{ Generate \code{sp.layout} items for use by \code{\link{spplot}} or plot these items directly in the traditional graphics system. Function \code{layout.labels} draws labels at the coordinates of the spatial object, and \code{layout.scalebar} returns a labeled scale bar. } \usage{ layout.labels(obj, labels = TRUE, plot = FALSE) layout.scalebar(obj, corner = c(0.05, 0.95), scale = 1, labels = c(0, scale), height = 0.05, pos = 3, ..., plot = FALSE) } \arguments{ \item{obj}{ an object inheriting from a \code{\linkS4class{Spatial}} class. } \item{labels}{ specification of the labels. For \code{layout.labels}: \itemize{ \item a \code{FALSE} or \code{NULL} value omits labels (\code{NULL} is returned), \item \code{labels = TRUE} uses \code{row.names(obj)}, \item a character or numeric index for a column of \code{obj@data} which contains suitable labels, \item a vector of length \code{length(obj)} with labels, \item or a list of arguments for \code{\link[lattice]{panel.text}}, where the optional \code{labels} component follows the same rules as above. } For \code{layout.scalebar}, a character vector of length two giving the labels to be put above the left and right ends of the scale bar. } \item{corner}{ the location of the scale bar in the unit square, where \code{c(0,0)} refers to the bottom left corner. By default, the scale bar is placed in the top left corner (with a small buffer). } \item{scale}{ the width of the scale bar in the units of \code{\link{proj4string}(obj)}. If \code{identical(FALSE, \link{is.projected}(obj))} (i.e., \code{obj} has longlat coordinates), \code{scale} is interpreted in kilometres. } \item{height}{ the height of the scale bar, see \code{\link{layout.scale.bar}}. } \item{pos}{ a position specifier for the labels (see \code{\link{text}}). By default, the labels are plotted above the scale bar. } \item{\dots}{ further arguments for \code{\link[lattice]{panel.text}} (if \code{plot = FALSE}) or \code{\link{text}} (if \code{plot = TRUE}) to change the style of the labels, e.g., \code{cex}, \code{col}, and \code{font}. } \item{plot}{ logical indicating if the layout item should be plotted using the traditional graphics system. By default (\code{FALSE}), a list for subsequent use by \code{\link{spplot}} is returned. } } \value{ For \code{layout.labels}, a single \code{sp.layout} item, which is a list with first element \code{"panel.text"} and subsequent elements being arguments to that function based on the \code{labels} specification. For \code{layout.scalebar}, a list of \code{sp.layout} items comprising the polygonal scale bar and the labels. If these layout functions are called with \code{plot = TRUE}, the item is plotted directly using traditional graphics functions and \code{NULL} is returned. } \author{ Sebastian Meyer } \examples{ ## districts in the Regierungsbezirk Weser-Ems (longlat coordinates) data("measlesWeserEms") mapWE <- measlesWeserEms@map li1 <- layout.labels(mapWE, labels = list(font=2, labels="GEN")) li2 <- layout.scalebar(mapWE, corner = c(0.05, 0.05), scale = 20, labels = c("0", "20 km")) spplot(mapWE, zcol = "AREA", sp.layout = c(list(li1), li2), col.regions = rev(heat.colors(100)), scales = list(draw = TRUE)) ## districts in Bavaria (projected coordinates) load(system.file("shapes", "districtsD.RData", package = "surveillance")) bavaria <- districtsD[substr(row.names(districtsD), 1, 2) == "09", ] sb <- layout.scalebar(bavaria, corner = c(0.75,0.9), scale = 50, labels = c("0", "50 km"), cex = 0.8) spplot(bavaria, zcol = "POPULATION", sp.layout = sb, xlab = "x [km]", ylab = "y [km]", scales = list(draw = TRUE), col.regions = rev(heat.colors(100))) ## these layout functions also work in the traditional graphics system par(mar = c(0,0,0,0)) plot(bavaria, col = "lavender") layout.scalebar(bavaria, corner = c(0.75, 0.9), scale = 50, labels = c("0", "50 km"), plot = TRUE) layout.labels(bavaria, labels = list(cex = 0.8, labels = substr(bavaria$GEN, 1, 3)), plot = TRUE) } \keyword{aplot} \keyword{dplot} surveillance/man/deleval.Rd0000644000176200001440000000272413122471774015441 0ustar liggesusers\name{deleval} \alias{deleval} \docType{data} \title{Surgical Failures Data} \description{ The dataset from Steiner et al. (1999) on A synthetic dataset from the Danish meat inspection -- useful for illustrating the beta-binomial CUSUM. } \usage{data(abattoir)} \details{ Steiner et al. (1999) use data from de Leval et al. (1994) to illustrate monitoring of failure rates of a surgical procedure for a bivariate outcome. Over a period of six years an arterial switch operation was performed on 104 newborn babies. Since the death rate from this surgery was relatively low the idea of surgical "near miss" was introduced. It is defined as the need to reinstitute cardiopulmonary bypass after a trial period of weaning. The object of class \code{sts} contains the recordings of near misses and deaths from the surgery for the 104 newborn babies of the study. The data could also be handled by a multinomial CUSUM model. } \seealso{\code{\link{pairedbinCUSUM}}} \examples{ data("deleval") plot(deleval, xaxis.labelFormat=NULL,ylab="Response",xlab="Patient number") } \references{ Steiner, S. H., Cook, R. J., and Farewell, V. T. (1999), Monitoring paired binary surgical outcomes using cumulative sum charts, Statistics in Medicine, 18, pp. 69--86. De Leval, Marc R., Franiois, K., Bull, C., Brawn, W. B. and Spiegelhalter, D. (1994), Analysis of a cluster of surgical failures, Journal of Thoracic and Cardiovascular Surgery, March, pp. 914--924. } \keyword{datasets} surveillance/man/twinSIR_cox.Rd0000644000176200001440000000134312672347154016234 0ustar liggesusers\name{twinSIR_cox} \alias{cox} \title{ Identify Endemic Components in an Intensity Model } \description{ The special function \code{cox} marks terms in formulae of the functions \code{\link{twinSIR}} and \code{\link{simEpidata}} as endemic components, i.e. variables acting multiplicatively on the baseline infection intensity. An illustrative \code{twinSIR} call with two epidemic and two endemic covariates is: \code{twinSIR(~B1 + B2 + cox(vaccination) + cox(size), data=myEpidata)}. Technically, this function is implemented as \code{function(x) {x}} and defined as \dQuote{special} in \code{\link{terms.formula}}. } \seealso{ Usage in formulae of functions \code{\link{twinSIR}} and \code{\link{simEpidata}}. } \keyword{internal} surveillance/man/imdepi.Rd0000644000176200001440000001751214004763214015266 0ustar liggesusers\encoding{latin1} \docType{data} \name{imdepi} \alias{imdepi} \title{ Occurrence of Invasive Meningococcal Disease in Germany } \description{ \code{imdepi} contains data on the spatio-temporal location of 636 cases of invasive meningococcal disease (IMD) caused by the two most common meningococcal finetypes in Germany, \samp{B:P1.7-2,4:F1-5} (of serogroup B) and \samp{C:P1.5,2:F3-3} (of serogroup C). } \usage{ data("imdepi") } \format{ \code{imdepi} is an object of class \code{"\link{epidataCS}"} (a list with components \code{events}, \code{stgrid}, \code{W} and \code{qmatrix}). } \details{ The \code{imdepi} data is a simplified version of what has been analyzed by Meyer et al. (2012). Simplification is with respect to the temporal resolution of the \code{stgrid} (see below) to be used in \code{\link{twinstim}}'s endemic model component. In what follows, we describe the elements \code{events}, \code{stgrid}, \code{W}, and \code{qmatrix} of \code{imdepi} in greater detail. \code{imdepi$events} is a \code{"\linkS4class{SpatialPointsDataFrame}"} object (ETRS89 projection, i.e. EPSG code 3035, with unit \sQuote{km}) containing 636 events, each with the following entries: \describe{ \item{time:}{Time of the case occurrence measured in number of days since origin. Note that a U(0,1)-distributed random number has been subtracted from each of the original event times (days) to break ties (using \code{\link{untie}(imdepi_tied, amount=list(t=1))}).} \item{tile:}{Tile ID in the spatio-temporal grid (\code{stgrid}) of endemic covariates, where the event is contained in. This corresponds to one of the 413 districts of Germany. } \item{type:}{Event type, a factor with levels \code{"B"} and \code{"C"}.} \item{eps.t:}{Maximum temporal interaction range for the event. Here set to 30 days.} \item{eps.s:}{Maximum spatial interaction range for the event. Here set to 200 km.} \item{sex:}{Sex of the case, i.e. a factor with levels \code{"female"} and \code{"male"}. Note: for some cases this information is not available (\code{NA}).} \item{agegrp:}{Factor giving the age group of the case, i.e. 0-2, 3-18 or >=19. Note: for one case this information is not available (\code{NA}).} \item{BLOCK, start:}{Block ID and start time (in days since origin) of the cell in the spatio-temporal endemic covariate grid, which the event belongs to.} \item{popdensity:}{Population density (per square km) at the location of the event (corresponds to population density of the district where the event is located).} } There are further auxiliary columns attached to the events' data the names of which begin with a . (dot): These are created during conversion to the \code{"epidataCS"} class and are necessary for fitting the data with \code{twinstim}, see the description of the \code{"\link{epidataCS}"}-class. With \code{coordinates(imdepi$events)} one obtains the (x,y) locations of the events. The district identifier in \code{tile} is indexed according to the German official municipality key ( \dQuote{Amtlicher Gemeindeschl\enc{}{ue}ssel}). See \url{https://de.wikipedia.org/wiki/Amtlicher_Gemeindeschl\%C3\%BCssel} for details. The data component \code{stgrid} contains the spatio-temporal grid of endemic covariate information. In addition to the usual bookkeeping variables this includes: \describe{ \item{area:}{Area of the district \code{tile} in square kilometers.} \item{popdensity:}{Population density (inhabitants per square kilometer) computed from DESTATIS (Federal Statistical Office) information (Date: 31.12.2008) on communities level (LAU2) aggregated to district level (NUTS3).} } We have actually not included any time-dependent covariates here, we just established this grid with a (reduced -> fast) temporal resolution of \emph{monthly} intervals so that we can model endemic time trends and seasonality (in this discretized time). The entry \code{W} contains the observation window as a \code{"\linkS4class{SpatialPolygons}"} object, in this case the boundaries of Germany. It was obtained as \code{stateD <- rgeos::gUnaryUnion(districtsD)}, where \code{districtsD} represents Germany's districts as at 2009-01-01, simplified by the \dQuote{modified Visvalingam} algorithm (level 6.6\%) available at \url{https://MapShaper.org} (v. 0.1.17). The objects \code{districtsD} and \code{stateD} are contained in \code{system.file("shapes", "districtsD.RData", package="surveillance")}. The entry \code{qmatrix} is a \eqn{2\times 2}{2 x 2} identity matrix indicating that no transmission between the two finetypes can occur. } \source{ IMD case reports: German Reference Centre for Meningococci at the Department of Hygiene and Microbiology, Julius-Maximilians-Universit\enc{}{ae}t W\enc{}{ue}rzburg, Germany (\url{https://www.hygiene.uni-wuerzburg.de/meningococcus/}). Thanks to Dr. Johannes Elias and Prof. Dr. Ulrich Vogel for providing the data. Shapefile of Germany's districts as at 2009-01-01: German Federal Agency for Cartography and Geodesy, Frankfurt am Main, Germany, \url{https://gdz.bkg.bund.de/}. %% "Copy, distribution and making available to the public - also in %% parts - is allowed with reference." } \references{ Meyer, S., Elias, J. and H\enc{}{oe}hle, M. (2012): A space-time conditional intensity model for invasive meningococcal disease occurrence. \emph{Biometrics}, \bold{68}, 607-616. \doi{10.1111/j.1541-0420.2011.01684.x} } \seealso{ the data class \code{"\link{epidataCS}"}, and function \code{\link{twinstim}} for model fitting. } \examples{ data("imdepi") # Basic information print(imdepi, n=5, digits=2) # What is an epidataCS-object? str(imdepi, max.level=4) names(imdepi$events@data) # => events data.frame has hidden columns sapply(imdepi$events@data, class) # marks and print methods ignore these auxiliary columns # look at the B type only imdepiB <- subset(imdepi, type == "B") #<- subsetting applies to the 'events' component imdepiB # select only the last 10 events tail(imdepi, n=10) # there is also a corresponding 'head' method # Access event marks str(marks(imdepi)) # there is an update-method which assures that the object remains valid # when changing parameters like eps.s, eps.t or qmatrix update(imdepi, eps.t = 20) # Summary s <- summary(imdepi) s str(s) # Step function of number of infectives plot(s$counter, xlab = "Time [days]", ylab = "Number of infectious individuals", main = "Time series of IMD assuming 30 days infectious period") # distribution of number of potential sources of infection opar <- par(mfrow=c(1,2), las=1) for (type in c("B","C")) { plot(100*prop.table(table(s$nSources[s$eventTypes==type])), xlim=range(s$nSources), xlab = "Number of potential epidemic sources", ylab = "Proportion of events [\%]") } par(opar) # a histogram of the number of events along time (using the # plot-method for the epidataCS-class, see ?plot.epidataCS) opar <- par(mfrow = c(2,1)) plot(imdepi, "time", subset = type == "B", main = "Finetype B") plot(imdepi, "time", subset = type == "C", main = "Finetype C") par(opar) # Plot the spatial distribution of the events in W plot(imdepi, "space", points.args = list(col=c("indianred", "darkblue")), axes = TRUE, lwd = 2) title(xlab = "x [km]", ylab = "y [km]") \dontrun{ # or manually (no legends, no account for tied locations) plot(imdepi$W, lwd=2) plot(imdepi$events, pch=c(3,4)[imdepi$events$type], cex=0.8, col=c("indianred", "darkblue")[imdepi$events$type], add=TRUE) } \dontrun{ # Show a dynamic illustration of the spatio-temporal dynamics of the # spread during the first year of type B with a step size of 7 days animate(imdepiB, interval=c(0,365), time.spacing=7, sleep=0.1) } } \keyword{datasets} surveillance/man/nbOrder.Rd0000644000176200001440000000360012407020210015366 0ustar liggesusers\name{nbOrder} \alias{nbOrder} \title{ Determine Neighbourhood Order Matrix from Binary Adjacency Matrix } \description{ Given a square binary adjacency matrix, the function \code{nbOrder} determines the integer matrix of neighbourhood orders (shortest-path distance) using the function \code{\link[spdep]{nblag}} from the \pkg{spdep} package. } \usage{ nbOrder(neighbourhood, maxlag = 1) } \arguments{ \item{neighbourhood}{ a square, numeric or logical, and usually symmetric matrix with finite entries (and usually zeros on the diagonal) which indicates vertex adjacencies, i.e., first-order neighbourhood (interpreted as \code{neighbourhood == 1}, \emph{not} \code{>0}). } \item{maxlag}{ positive scalar integer specifying an upper bound for the neighbourhood order. The default (1) just returns the input neighbourhood matrix (converted to binary integer mode). \code{maxlag} is automatically trimmed to one less than the number of regions (there cannot be higher orders) and then converted to integer, thus, \code{maxlag = Inf} also works. } } \value{ An integer matrix of neighbourhood orders, i.e., the shortest-path distance matrix of the vertices. The \code{dimnames} of the input \code{neighbourhood} matrix are preserved. } \note{ By the end, the function issues a \code{\link{message}} informing about the range of maximum neighbourhood order by region. } \author{ Sebastian Meyer } \seealso{ \code{\link[spdep]{nblag}} from the \pkg{spdep} package, on which this wrapper depends. } \examples{ ## generate adjacency matrix set.seed(1) n <- 6 adjmat <- matrix(0, n, n) adjmat[lower.tri(adjmat)] <- sample(0:1, n*(n-1)/2, replace=TRUE) adjmat <- adjmat + t(adjmat) adjmat ## determine neighbourhood order matrix if (requireNamespace("spdep")) { nbmat <- nbOrder(adjmat, maxlag=Inf) nbmat } } \keyword{spatial} \keyword{utilities} surveillance/man/twinstim_epitest.Rd0000644000176200001440000002131214004512307017416 0ustar liggesusers\encoding{latin1} \name{twinstim_epitest} \alias{epitest} \alias{coef.epitest} \alias{plot.epitest} \title{Permutation Test for Space-Time Interaction in \code{"twinstim"}} \description{ The function \code{epitest} takes a \code{"twinstim"} model and tests if the spatio-temporal interaction invoked by the epidemic model component is statistically significant. The test only works for simple epidemic models, where \code{epidemic = ~1} (no additional parameters for event-specific infectivity), and requires the non-canonical \code{epilink="identity"} (see \code{\link{twinstim}}). A permutation test is performed by default, which is only valid if the endemic intensity is space-time separable. The approach is described in detail in Meyer et al. (2016), where it is also compared to alternative global tests for clustering such as the \code{\link{knox}} test. } \usage{ epitest(model, data, tiles, method = "time", B = 199, eps.s = NULL, eps.t = NULL, fixed = NULL, verbose = TRUE, compress = FALSE, ...) \method{coef}{epitest}(object, which = c("m1", "m0"), ...) \method{plot}{epitest}(x, teststat = c("simpleR0", "D"), ...) } \arguments{ \item{model}{ a simple epidemic \code{"\link{twinstim}"} with \code{epidemic = ~1}, fitted using the non-canonical \code{epilink="identity"}. Note that the permutation test is only valid for models with a space-time separable endemic intensity, where covariates vary either in space or time but not both. } \item{data}{ an object of class \code{"\link{epidataCS}"}, the \code{data} to which the \code{model} was fitted. } \item{tiles}{ (only used by \code{method = "simulate"}) a \code{"\linkS4class{SpatialPolygons}"} representation of the \code{tile}s in \code{data$stgrid}. } \item{method}{ one of the following character strings specifying the test method: \describe{ \item{\code{"LRT"}:}{ a simple likelihood ratio test of the epidemic \code{model} against the corresponding endemic-only model, } \item{\code{"time"}/\code{"space"}:}{ a Monte Carlo permutation test where the null distribution is obtained by relabeling time points or locations, respectively (using \code{\link{permute.epidataCS}}). } \item{\code{"simulate"}:}{ obtain the null distribution of the test statistic by simulations from the endemic-only model (using \code{\link{simEndemicEvents}}). } } } \item{B}{ the number of permutations for the Monte Carlo approach. The default number is rather low; if computationally feasible, \code{B = 999} is more appropriate. Note that this determines the \dQuote{resolution} of the p-value: the smallest attainable p-value is \code{1/(B+1)}. } \item{eps.s,eps.t}{arguments for \code{\link{simpleR0}}.} \item{fixed}{ optional character vector naming parameters to fix at their original value when re-fitting the \code{model} on permuted data. The special value \code{fixed = TRUE} means to fix all epidemic parameters but the intercept. } \item{verbose}{ the amount of tracing in the range \code{0:3}. Set to 0 (or \code{FALSE}) for no output, 1 (or \code{TRUE}, the default) for a progress bar, 2 for the test statistics resulting from each permutation, and to 3 for additional tracing of the log-likelihood maximization in each permutation (not useful if parallelized). Tracing does not work if permutations are parallelized using clusters. See \code{\link{plapply}} for other choices. } \item{compress}{ logical indicating if the \code{nobs}-dependent elements \code{"fitted"}, \code{"fittedComponents"}, and \code{"R0"} should be dropped from the permutation-based model fits. Not keeping these elements saves a lot of memory especially with a large number of events. Note, however, that the returned \code{permfits} then no longer are fully valid \code{"twinstim"} objects (but most methods will still work). } \item{\dots}{further arguments for \code{\link{plapply}} to configure parallel operation, i.e., \code{.parallel} as well as \code{.seed} to make the results reproducible.\cr For the \code{plot}-method, further arguments passed to \code{\link{truehist}}.\cr Ignored by the \code{coef}-method. } \item{object,x}{ an object of class \code{"epitest"} as returned by \code{epitest}. } \item{which}{ a character string indicating either the full (\code{"m1"}, default) or the endemic-only (\code{"m0"}) model. } \item{teststat}{ a character string determining the test statistic to plot, either \code{"\link{simpleR0}"} or \code{"D"} (twice the log-likelihood difference of the models). } } \value{ a list (inheriting from \code{"htest"}) with the following components: \item{method}{a character string indicating the type of test performed.} \item{data.name}{a character string giving the supplied \code{data} and \code{model} arguments.} \item{statistic}{the observed test statistic.} \item{parameter}{the (effective) number of permutations used to calculate the p-value (only those with convergent fits are used).} \item{p.value}{the p-value for the test. For the \code{method}s involving resampling under the null (\code{method != "LRT"}), it is based on the subset of convergent fits only and the p-value from the simple LRT is attached as an attribute \code{"LRT"}.} In addition, if \code{method != "LRT"}, the result will have the following elements: \item{permfits}{the list of model fits (endemic-only and epidemic) from the \code{B} permutations.} \item{permstats}{a data frame with \code{B} rows and the columns \code{"l0"} (log-likelihood of the endemic-only model \code{m0}), \code{"l1"} (log-likelihood of the epidemic model \code{m1}), \code{"D"} (twice their difference), \code{"simpleR0"} (the results of \code{\link{simpleR0}(m1, eps.s, eps.t)}), and \code{"converged"} (a boolean indicator if both models converged).} The \code{plot}-method invisibly returns \code{NULL}. The \code{coef}-method returns the \code{B} x \code{length(coef(model))} matrix of parameter estimates. } \details{ This space-time interaction test is limited to models with \code{epidemic = ~1}, since covariate effects are not identifiable under the null hypothesis of no space-time interaction. Estimating a rich epidemic \code{model} based on permuted data will most likely result in singular convergence. A similar issue might arise when the model employs parametric interaction functions, in which case \code{fixed=TRUE} can be used. For further details see Meyer et al. (2016). The test statistic is the reproduction number \code{\link{simpleR0}}. A likelihood ratio test of the supplied epidemic model against the corresponding endemic-only model is also available. By default, the null distribution of the test statistic under no space-time interaction is obtained by a Monte Carlo permutation approach (via \code{\link{permute.epidataCS}}) and therefore relies on a space-time separable endemic model component. The \code{plot}-method shows a \code{\link{truehist}} of the simulated null distribution together with the observed value. The \code{coef}-method extracts the parameter estimates from the \code{B} \code{permfits} (by default for the full model \code{which = "m1"}). } \references{ Meyer, S., Warnke, I., R\enc{}{oe}ssler, W. and Held, L. (2016): Model-based testing for space-time interaction using point processes: An application to psychiatric hospital admissions in an urban area. \emph{Spatial and Spatio-temporal Epidemiology}, \bold{17}, 15-25. \doi{10.1016/j.sste.2016.03.002}. Eprint: \url{https://arxiv.org/abs/1512.09052}. } \author{ Sebastian Meyer } \seealso{ \code{\link{permute.epidataCS}}, \code{\link{knox}} } \examples{ data("imdepi", "imdepifit") ## test for space-time interaction of the B-cases ## assuming spatial interaction to be constant within 50 km imdepiB50 <- update(subset(imdepi, type == "B"), eps.s = 50) imdfitB50 <- update(imdepifit, data = imdepiB50, epidemic = ~1, epilink = "identity", siaf = NULL, start = c("e.(Intercept)" = 0)) ## simple likelihood ratio test epitest(imdfitB50, imdepiB50, method = "LRT") ## permutation test (only a few permutations for speed) et <- epitest(imdfitB50, imdepiB50, B = 3 + 26*surveillance.options("allExamples"), verbose = 2 * (.Platform$OS.type == "unix"), .seed = 1, .parallel = 1 + surveillance.options("allExamples")) et plot(et) ## evidence against the null hypothesis of no space-time interaction summary(coef(et, which = "m1")) } \keyword{htest} surveillance/man/disProg2sts.Rd0000644000176200001440000000161512672030523016236 0ustar liggesusers\name{disProg2sts} \alias{disProg2sts} \alias{sts2disProg} \title{Convert disProg object to sts and vice versa} \description{ A small helper function to convert a \code{disProg} object to become an object of the S4 class \code{sts} and vice versa. In the future the \code{sts} should replace the \code{disProg} class, but for now this function allows for conversion between the two formats. } \usage{ disProg2sts(disProgObj, map=NULL) sts2disProg(sts) } \arguments{ \item{disProgObj}{an object of class \code{"disProg"}} \item{map}{an optional \code{"SpatialPolygons"} object} \item{sts}{an object of class \code{"sts"} to convert} } \value{ an object of class \code{"sts"} or \code{"disProg"}, respectively. } \seealso{ \code{\link{sts-class}} } \examples{ data(ha) print(disProg2sts(ha)) class(sts2disProg(disProg2sts(ha))) } \keyword{utilities} surveillance/man/salmHospitalized.Rd0000644000176200001440000000116113174706302017326 0ustar liggesusers\name{salmHospitalized} \alias{salmHospitalized} \docType{data} \title{Hospitalized Salmonella cases in Germany 2004-2014} \description{ Reported number of cases of Salmonella in Germany 2004-2014 (early 2014) that were hospitalized. The corresponding total number of cases is indicated in the slot \code{populationFrac} and \code{multinomialTS} is \code{TRUE}. } \usage{data(salmHospitalized)} \format{ An \code{"\linkS4class{sts}"} object. } \source{ The data are queried from the Survstat@RKI database of the German Robert Koch Institute (\url{https://survstat.rki.de/}). } \keyword{datasets} surveillance/man/sim.pointSource.Rd0000644000176200001440000000514213431030260017103 0ustar liggesusers\name{sim.pointSource} \alias{sim.pointSource} \encoding{latin1} \title{Simulate Point-Source Epidemics} \description{ Simulation of epidemics which were introduced by point sources. The basis of this programme is a combination of a Hidden Markov Model (to get random timepoints for outbreaks) and a simple model (compare \code{\link{sim.seasonalNoise}}) to simulate the baseline. } \usage{ sim.pointSource(p = 0.99, r = 0.01, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K) } \arguments{ \item{p}{probability to get a new outbreak at time i if there was one at time i-1, default 0.99.} \item{r}{probability to get no new outbreak at time i if there was none at time i-1, default 0.01.} \item{length}{number of weeks to model, default 400. \code{length} is ignored if \code{state} is given. In this case the length of \code{state} is used.} \item{A}{amplitude (range of sinus), default = 1.} \item{alpha}{parameter to move along the y-axis (negative values not allowed) with alpha > = A, default = 1.} \item{beta}{regression coefficient, default = 0.} \item{phi}{factor to create seasonal moves (moves the curve along the x-axis), default = 0.} \item{frequency}{factor to determine the oscillation-frequency, default = 1.} \item{state}{use a state chain to define the status at this timepoint (outbreak or not). If not given a Markov chain is generated by the programme, default NULL.} \item{K}{additional weigth for an outbreak which influences the distribution parameter mu, default = 0.} } \value{ a \code{disProg} (disease progress) object including a list of the observed, the state chain and nearly all input parameters. } \seealso{\code{\link{sim.seasonalNoise}}} \author{M. \enc{Hhle}{Hoehle}, A. Riebler, C. Lang} \examples{ set.seed(123) disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 208, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 2) plot(disProgObj) ## with predefined state chain state <- rep(c(0,0,0,0,0,0,0,0,1,1), 20) disProgObj <- sim.pointSource(state = state, K = 1.2) plot(disProgObj) ## simulate epidemic, send to RKI 1 system, plot, and compute quality values testSim <- function (..., K = 0, range = 200:400) { disProgObj <- sim.pointSource(..., K = K) survResults <- algo.call(disProgObj, control = list(list(funcName = "rki1", range = range))) plot(survResults[[1]], "RKI 1", "Simulation") algo.compare(survResults) } testSim(K = 2) testSim(r = 0.5, K = 5) # larger and more frequent outbreaks } \keyword{datagen} surveillance/man/abattoir.Rd0000644000176200001440000000160013174712261015616 0ustar liggesusers\name{abattoir} \alias{abattoir} \docType{data} \encoding{latin1} \title{Abattoir Data} \description{ A synthetic dataset from the Danish meat inspection -- useful for illustrating the beta-binomial CUSUM. } \usage{ data(abattoir) } \details{ The object of class \code{"sts"} contains an artificial data set inspired by meat inspection data used by Danish Pig Production, Denmark. For each week the number of pigs with positive audit reports is recorded together with the total number of audits made that week. } \seealso{\code{\link{categoricalCUSUM}}} \examples{ data("abattoir") plot(abattoir) population(abattoir) } \references{ \enc{Hhle}{Hoehle}, M. (2010): Online change-point detection in categorical time series. In: T. Kneib and G. Tutz (Eds.), Statistical Modelling and Regression Structures, Physica-Verlag. } \keyword{datasets} surveillance/man/residualsCT.Rd0000644000176200001440000000444713433452051016243 0ustar liggesusers\name{residualsCT} \alias{residuals.twinSIR} \alias{residuals.twinstim} \alias{residuals.simEpidataCS} \title{ Extract Cox-Snell-like Residuals of a Fitted Point Process } \description{ Extract the \dQuote{residual process} (cf. Ogata, 1988) of a fitted point process model specified through the conditional intensity function, for instance a model of class \code{"\link{twinSIR}"} or \code{"\link{twinstim}"} (and also \code{"\link{simEpidataCS}"}). The residuals are defined as the fitted cumulative intensities at the event times, and are generalized residuals similar to those discussed in Cox and Snell (1968). } \usage{ \method{residuals}{twinSIR}(object, ...) \method{residuals}{twinstim}(object, ...) \method{residuals}{simEpidataCS}(object, ...) } \arguments{ \item{object}{ an object of one of the aforementioned model classes. } \item{\dots}{unused (argument of the generic).} } \details{ For objects of class \code{twinstim}, the residuals may already be stored in the object as component \code{object$tau} if the model was fitted with \code{cumCIF = TRUE} (and they always are for \code{"simEpidataCS"}). In this case, the \code{residuals} method just extracts these values. Otherwise, the residuals have to be calculated, which is only possible with access to the model environment, i.e. \code{object} must have been fitted with \code{model = TRUE}. The calculated residuals are then also appended to \code{object} for future use. However, if \code{cumCIF} and \code{model} were both set to true in the \code{object} fit, then it is not possible to calculate the residuals and the method returns an error. } \value{ Numeric vector of length the number of events of the corresponding point process fitted by \code{object}. This is the observed residual process. } \references{ Ogata, Y. (1988) Statistical models for earthquake occurrences and residual analysis for point processes. \emph{Journal of the American Statistical Association}, 83, 9-27 Cox, D. R. & Snell, E. J. (1968) A general definition of residuals. \emph{Journal of the Royal Statistical Society. Series B (Methodological)}, 30, 248-275 } \seealso{ \code{\link{checkResidualProcess}} to graphically check the goodness-of-fit of the underlying model. } \author{ Sebastian Meyer } \keyword{methods} surveillance/man/inside.gpc.poly.Rd0000644000176200001440000000306613777627613017045 0ustar liggesusers\name{inside.gpc.poly} \alias{inside.gpc.poly} \title{ Test Whether Points are Inside a \code{"gpc.poly"} Polygon } \description{ Same as, e.g., \code{\link[spatstat.geom]{inside.owin}} from package \pkg{spatstat.geom} and \code{\link[sp]{point.in.polygon}} from package \pkg{sp}, i.e., test whether points lie inside or outside a given polygon. Actually, the method for \code{"gpc.poly"} documented here internally uses the \code{\link[sp]{point.in.polygon}} function. } \usage{ inside.gpc.poly(x, y = NULL, polyregion, mode.checked = FALSE) } \arguments{ \item{x,y}{ numeric vectors of coordinates of the points to be tested. The coordinates can be supplied in any form accepted by \code{\link{xy.coords}}. } \item{polyregion}{ an object of class \code{"gpc.poly"}. It is checked if the points specified through \code{x} and \code{y} fall into this polygonal region. } \item{mode.checked}{ passed to \code{\link[sp]{point.in.polygon}}. } } \details{ The nodes and edges of (non-hole) polygons are treated as being inside. Points that fall \emph{strictly} inside holes are treated as being outside of the polygon. } \value{ Logical vector whose \code{i}th entry is \code{TRUE} if the corresponding point \code{(x[i],y[i])} is inside \code{polyregion}. } \author{ Sebastian Meyer } \examples{ if (requireNamespace("rgeos")) { poly <- discpoly(c(0.5,0.5), 0.5, npoly=4, class="gpc.poly") pts <- cbind(x=runif(50), y=runif(50)) plot(poly) points(pts, col=1+inside.gpc.poly(pts, polyregion=poly)) } } \keyword{utilities} \keyword{spatial} surveillance/man/epidataCS_plot.Rd0000644000176200001440000002177313302740375016721 0ustar liggesusers\name{epidataCS_plot} \alias{plot.epidataCS} \alias{epidataCSplot_time} \alias{epidataCSplot_space} \title{ Plotting the Events of an Epidemic over Time and Space } \description{ The \code{plot} method for class \code{"epidataCS"} either plots the number of events along the time axis (\code{epidataCSplot_time}) as a \code{hist()}, or the locations of the events in the observation region \code{W} (\code{epidataCSplot_space}). The spatial plot can be enriched with tile-specific color levels to indicate attributes such as the population (using \code{\link{spplot}}). } \usage{ \method{plot}{epidataCS}(x, aggregate = c("time", "space"), subset, by = type, ...) epidataCSplot_time(x, subset, by = type, t0.Date = NULL, breaks = "stgrid", freq = TRUE, col = rainbow(nTypes), cumulative = list(), add = FALSE, mar = NULL, xlim = NULL, ylim = NULL, xlab = "Time", ylab = NULL, main = NULL, panel.first = abline(h=axTicks(2), lty=2, col="grey"), legend.types = list(), ...) epidataCSplot_space(x, subset, by = type, tiles = x$W, pop = NULL, cex.fun = sqrt, points.args = list(), add = FALSE, legend.types = list(), legend.counts = list(), sp.layout = NULL, ...) } \arguments{ \item{x}{ an object of class \code{"\link{epidataCS}"}. } \item{aggregate}{ character, one of \code{"time"} and \code{"space"}, referring to the specific plot functions \code{epidataCSplot_time} and \code{epidataCSplot_time}, respectively. For \code{"time"}, the number of events over time is plotted as \code{\link{hist}} (or \code{\link{hist.Date}}). For \code{"space"}, the observation region \code{x$W} (or the \code{tiles}) and the locations of the events therein are plotted. } \item{subset}{ logical expression indicating a subset of events to consider for plotting: missing values are taken as false. Note that the expression is evaluated in the data frame of event marks (\code{marks(x)}), which means that column names can be referred to by name (like in \code{\link{subset.data.frame}}). } \item{\dots}{ in the basic \code{plot}-method further arguments are passed to the \code{aggregate}-specific plot function. In \code{epidataCSplot_time}, further graphical parameters are passed to \code{\link{hist}} or \code{\link{hist.Date}}, respectively. In \code{epidataCSplot_space}, further arguments are passed to the \code{plot}-method for \code{"\linkS4class{SpatialPolygons}"}, which draws \code{tiles}. } \item{by}{an expression evaluated in \code{marks(x)}, defining how events should be stratified in the plot (the result is converted to a factor), or \code{NULL} to disregard event types. By default (\code{by = type}) the plot distinguishes between event types, i.e., the bars of the temporal plot are stacked by type, and the point colors in the spatial plot differ by type, respectively.\cr Note: to select specific event types for plotting use the \code{subset} argument, e.g., \code{subset=(type=="B")}.} \item{t0.Date}{the beginning of the observation period \code{t0 = x$stgrid$start[1]} as a \code{"\link{Date}"} (or anything coercible by \code{as.Date} without further arguments), enabling a nice x-axis using \code{\link{hist.Date}} and sensible \code{breaks} of the histogram, e.g., \code{breaks="months"}. The event times then equal \code{t0.Date + as.integer(x$events$time - t0)}, i.e. possible fractional parts of the event times are removed (which ensures that using \code{breaks = "months"} or other automatic types always works).} \item{breaks}{ a specification of the histogram break points, see \code{\link{hist}} (or \code{\link{hist.Date}} if \code{t0.Date} is used). The default value \code{"stgrid"} is special and means to use the temporal grid points \code{with(x$stgrid, c(start[1L], unique.default(stop)))} as breaks (or their \code{"Date"} equivalents). } \item{freq}{see \code{\link{hist}}, defaults to \code{TRUE}.} \item{col}{fill colour for the bars of the histogram, defaults to the vector of \code{\link{rainbow}} colours.} \item{cumulative}{if a list (of style options), lines for the cumulative number of events (per type) will be added to the plot. Possible options are \code{axis} (logical), \code{lab} (axis label), \code{maxat} (single integer affecting the axis range), \code{lwd}, \code{col}, and \code{offset} (a numeric vector of length the number of types).} \item{add}{logical (default: \code{FALSE}) indicating if the plot should be added to an existing window. Ignored if an \code{\link{spplot}} is created (if \code{pop} is non-\code{NULL}).} \item{mar}{see \code{\link{par}}. The default (\code{NULL}) is \code{mar <- par("mar")}, with \code{mar[4] <- mar[2]} if an axis is requested for the \code{cumulative} numbers.} \item{xlim,ylim}{\code{NULL} provides automatic axis limits.} \item{xlab,ylab}{axis labels (with sensible defaults).} \item{main}{main title of the plot (defaults to no title).} \item{panel.first}{expression that should be evaluated after the plotting window has been set up but before the histogram is plotted. Defaults to adding horizontal grid lines.} \item{legend.types}{if a list (of arguments for \code{\link{legend}}), a legend for the event types is added to the plot in case there is more than one type.} \item{tiles}{the observation region \code{x$W} (default) or, alternatively, a \code{"\linkS4class{SpatialPolygons}"} representation of the tiles of \code{x$stgrid}.} \item{pop}{if \code{tiles} is a \code{"\linkS4class{SpatialPolygonsDataFrame}"}, \code{pop} can specify an attribute to be displayed in a \code{levelplot} behind the point pattern, see \code{\link{spplot}}. By default (\code{NULL}), the conventional graphics system is used to display the \code{tiles} and event locations, otherwise the result is a \code{\link[lattice]{trellis.object}}.} \item{cex.fun}{function which takes a vector of counts of events at each unique location and returns a (vector of) \code{cex} value(s) for the sizes of the corresponding \code{points}. Defaults to the \code{sqrt()} function, which for the default circular \code{pch=1} means that the area of each point is proportional to the number of events at its location.} \item{points.args}{a list of (type-specific) graphical parameters for \code{\link{points}}, specifically \code{pch}, \code{lwd}, and \code{col}, which are all recycled to give the length \code{nlevels(x$events$type)}. In contrast, a possible \code{cex} element should be scalar (default: 0.5) and multiplies the sizes obtained from \code{cex.fun}.} \item{legend.counts}{if a list (of arguments for \code{\link{legend}}), a legend illustrating the effect of \code{cex.fun} is added to the plot. This list may contain a special element \code{counts}, which is an integer vector specifying the counts to illustrate.} \item{sp.layout}{optional list of additional layout items in case \code{pop} is non-\code{NULL}, see \code{\link{spplot}}.} } \value{ For \code{aggregate="time"} (i.e., \code{epidataCSplot_time}) the data of the histogram (as returned by \code{\link{hist}}), and for \code{aggregate="space"} (i.e., \code{epidataCSplot_space}) \code{NULL}, invisibly, or the \code{\link[lattice]{trellis.object}} generated by \code{\link{spplot}} (if \code{pop} is non-\code{NULL}). } \author{ Sebastian Meyer } \seealso{ \code{\link{animate.epidataCS}} } \examples{ data("imdepi") ## show the occurrence of events along time plot(imdepi, "time", main = "Histogram of event time points") plot(imdepi, "time", by = NULL, main = "Aggregated over both event types") ## show the distribution in space plot(imdepi, "space", lwd = 2, col = "lavender") if (surveillance.options("allExamples")) { ## with the district-specific population density in the background, ## a scale bar, and customized point style load(system.file("shapes", "districtsD.RData", package = "surveillance")) districtsD$log10popdens <- log10(districtsD$POPULATION/districtsD$AREA) keylabels <- (c(1,2,5) * rep(10^(1:3), each=3))[-1] plot(imdepi, "space", tiles = districtsD, pop = "log10popdens", ## modify point style for better visibility on gray background points.args = list(pch=c(1,3), col=c("orangered","blue"), lwd=2), ## metric scale bar, see proj4string(imdepi$W) sp.layout = layout.scalebar(imdepi$W, scale=100, labels=c("0","100 km")), ## gray scale for the population density and white borders col.regions = gray.colors(100, start=0.9, end=0.1), col = "white", ## color key is equidistant on log10(popdens) scale at = seq(1.3, 3.7, by=0.05), colorkey = list(labels=list(at=log10(keylabels), labels=keylabels))) grid::grid.text("Population density [per km2]", x=0.95, rot=90) } } \keyword{hplot} \keyword{methods} \keyword{spatial} surveillance/man/algo.quality.Rd0000644000176200001440000000376713122471774016446 0ustar liggesusers\name{algo.quality} \alias{algo.quality} \title{Computation of Quality Values for a Surveillance System Result} \description{ Computation of the quality values for a surveillance system output. } \usage{ algo.quality(sts, penalty = 20) } \arguments{ \item{sts}{object of class \code{survRes} or \code{sts}, which includes the state chain and the computed alarm chain} \item{penalty}{the maximal penalty for the lag} } \value{ a list of quality values: \item{TP}{Number of correct found outbreaks.} \item{FP}{Number of false found outbreaks.} \item{TN}{Number of correct found non outbreaks.} \item{FN}{Number of false found non outbreaks.} \item{sens}{True positive rate, meaning TP/(FN + TP).} \item{spec}{True negative rate, meaning TN/(TN + FP).} \item{dist}{Euclidean distance between (1-spec, sens) to (0,1).} \item{lag}{Lag of the outbreak recognizing by the system.} } \details{ The lag is defined as follows: In the state chain just the beginnings of an outbreak chain (outbreaks directly following each other) are considered. In the alarm chain, the range from the beginning of an outbreak until \eqn{min(next outbreak beginning,\code{penalty})} timepoints is considered. The \code{penalty} timepoints were chosen, to provide an upper bound on the penalty for not discovering an outbreak. Now the difference between the first alarm by the system and the defined beginning is denoted ``the lag'' Additionally outbreaks found by the system are not punished. At the end, the mean of the lags for every outbreak chain is returned as summary lag. } \seealso{\code{\link{algo.compare}}} \examples{ # Create a test object disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 200, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) # Let this object be tested from rki1 survResObj <- algo.rki1(disProgObj, control = list(range = 50:200)) # Compute the quality values algo.quality(survResObj) } \keyword{misc} surveillance/man/stsNClist_animate.Rd0000644000176200001440000000243412744770132017446 0ustar liggesusers\name{stsNClist_animate} \alias{stsNClist_animate} \alias{animate_nowcasts} \encoding{latin1} \title{Animate a sequence of nowcasts} \description{Animate a sequence of nowcasts stored as a list. } \usage{ animate_nowcasts(nowcasts,linelist_truth, method="bayes.trunc.ddcp", control=list(dRange=NULL,anim.dRange=NULL, plot.dRange=NULL, consistent=FALSE, sys.sleep = 1, ylim=NULL,cex.names=0.7, col=c("violetred3","#2171B5","orange","blue","black", "greenyellow")), showLambda=TRUE) } \arguments{ \item{nowcasts}{A list of objects of class \code{stsNC}} \item{linelist_truth}{True linelist} \item{method}{Which method to show (has to be present in the nowcasts)} \item{control}{List with control options} \item{showLambda}{Boolean indicating whether to show the estimate for the epidemic curve (only applied to \code{bayes.trunc.ddcp})} } \value{ This function is experimental and is not yet documented. } \details{ This function is experimental and might be changed in the future. } \author{M. \enc{Hhle}{Hoehle}} \examples{ ## See http://staff.math.su.se/hoehle/blog/2016/07/19/nowCast.html for ## a worked through example. Code will migrate into the package in due ## course. } \keyword{hplot} surveillance/DESCRIPTION0000644000176200001440000001255714031002053014452 0ustar liggesusersPackage: surveillance Title: Temporal and Spatio-Temporal Modeling and Monitoring of Epidemic Phenomena Version: 1.19.1 Date: 2021-03-30 Authors@R: c(MH = person("Michael", "Hhle", email = "hoehle@math.su.se", role = c("aut", "ths"), comment = c(ORCID = "0000-0002-0423-6702")), SM = person("Sebastian", "Meyer", email = "seb.meyer@fau.de", role = c("aut", "cre"), comment = c(ORCID = "0000-0002-1791-9449")), MP = person("Michaela", "Paul", role = "aut"), LH = person("Leonhard", "Held", email = "Leonhard.Held@uzh.ch", role = c("ctb", "ths")), person("Howard", "Burkom", role = "ctb"), person("Thais", "Correa", role = "ctb"), person("Mathias", "Hofmann", role = "ctb"), person("Christian", "Lang", role = "ctb"), person("Juliane", "Manitz", role = "ctb"), person("Andrea", "Riebler", role = "ctb"), person("Daniel", "Sabans Bov", role = "ctb"), MS = person("Malle", "Salmon", role = "ctb"), DS = person("Dirk", "Schumacher", role = "ctb"), person("Stefan", "Steiner", role = "ctb"), person("Mikko", "Virtanen", role = "ctb"), person("Wei", "Wei", role = "ctb"), person("Valentin", "Wimmer", role = "ctb"), person("R Core Team", role = "ctb", comment = "A few code segments are modified versions of code from base R")) Author: Michael Hhle [aut, ths] (), Sebastian Meyer [aut, cre] (), Michaela Paul [aut], Leonhard Held [ctb, ths], Howard Burkom [ctb], Thais Correa [ctb], Mathias Hofmann [ctb], Christian Lang [ctb], Juliane Manitz [ctb], Andrea Riebler [ctb], Daniel Sabans Bov [ctb], Malle Salmon [ctb], Dirk Schumacher [ctb], Stefan Steiner [ctb], Mikko Virtanen [ctb], Wei Wei [ctb], Valentin Wimmer [ctb], R Core Team [ctb] (A few code segments are modified versions of code from base R) Maintainer: Sebastian Meyer Depends: R (>= 3.6.0), methods, grDevices, graphics, stats, utils, sp (>= 1.0-15), xtable (>= 1.7-0) Imports: Rcpp (>= 0.11.1), polyCub (>= 0.8.0), MASS, Matrix, nlme, spatstat.geom, spatstat (>= 2.0) LinkingTo: Rcpp, polyCub Suggests: parallel, grid, gridExtra (>= 2.0.0), lattice, colorspace, scales, animation, msm, spc, quadprog, memoise, polyclip, rgeos, gpclib, maptools, intervals, spdep, numDeriv, maxLik, gsl, fanplot, hhh4contacts, tinytest (>= 1.2.4), coda, splancs, gamlss, INLA, runjags, MGLM (>= 0.1.0), knitr Enhances: xts, ggplot2 Description: Statistical methods for the modeling and monitoring of time series of counts, proportions and categorical data, as well as for the modeling of continuous-time point processes of epidemic phenomena. The monitoring methods focus on aberration detection in count data time series from public health surveillance of communicable diseases, but applications could just as well originate from environmetrics, reliability engineering, econometrics, or social sciences. The package implements many typical outbreak detection procedures such as the (improved) Farrington algorithm, or the negative binomial GLR-CUSUM method of Hhle and Paul (2008) . A novel CUSUM approach combining logistic and multinomial logistic modeling is also included. The package contains several real-world data sets, the ability to simulate outbreak data, and to visualize the results of the monitoring in a temporal, spatial or spatio-temporal fashion. A recent overview of the available monitoring procedures is given by Salmon et al. (2016) . For the retrospective analysis of epidemic spread, the package provides three endemic-epidemic modeling frameworks with tools for visualization, likelihood inference, and simulation. hhh4() estimates models for (multivariate) count time series following Paul and Held (2011) and Meyer and Held (2014) . twinSIR() models the susceptible-infectious-recovered (SIR) event history of a fixed population, e.g, epidemics across farms or networks, as a multivariate point process as proposed by Hhle (2009) . twinstim() estimates self-exciting point process models for a spatio-temporal point pattern of infective events, e.g., time-stamped geo-referenced surveillance data, as proposed by Meyer et al. (2012) . A recent overview of the implemented space-time modeling frameworks for epidemic phenomena is given by Meyer et al. (2017) . License: GPL-2 URL: https://surveillance.R-Forge.R-project.org/ Additional_repositories: https://inla.r-inla-download.org/R/stable/ Encoding: latin1 VignetteBuilder: utils, knitr NeedsCompilation: yes Packaged: 2021-03-30 12:11:05 UTC; smeyer Repository: CRAN Date/Publication: 2021-03-31 05:10:03 UTC surveillance/build/0000755000176200001440000000000014030612521014037 5ustar liggesuserssurveillance/build/vignette.rds0000644000176200001440000000132414030612521016376 0ustar liggesusersUKo@NU07М8VQZUmjA m챽z쮛 eٍ(w瑙oqZVեOC=z=oC:r6 dY!ȕVi.ӷ4lԷȅ`2†,B]pysqP=q*C~nW&R5 NJ1 4JB1m R?0(xT YX:(Y\:82ƜGXQ5,DžsH.(Vi A*b|OaK8hpMf+@bU8LE9:C&lMv}hLhK.894y̻_uB5'ng.JQ,[8 [L Ea ~GP UbYtz.B:+0 㰧+M٨rqOJZ3_׶3_[V3_wut}^-1dFoy& V{z I:,qweoʬw޳3e0݋W'>=nԧYE#SXx Pb $YcK;Yi"&VO,4RҲȚfdqU}{|` %#CsEo`Yaf!Mv?n!= fggwdz3z?/UQJ4#"%P_D|E_|{a=ƶ=[I<s0rf 12fzΖ^R(n{/[g-좟A^{[/E۱KjyeJ%W/gFnoUnɮm.[~`a 郩t׎ޢc>''.q (sl9?SXW}gtvf;+Wű7Ưǯ5oŲǬѲY-gK- ZzT#1n:^WE[ZoT-?Hm tNoّEPu4 GF&3[HmPM!ij|m<ڠwgjc+ja+_Xڦ}tlOHw pdrЪJ~P@<)U`ػv;fzن YJ6!Dz 'z֋oVsuӋ < pAx %T@<)MX1Uǒȶ"@ӅJ[ݬz 4P1II7 nZ ,yVsKb`V,g[fvX%VvKK LM;p&()ot(6 :KA!dx4Ke]fKV"[U"edOF BN3N'{g_PPtz@!,Av3cvqY>JS[۪U(ŰO]x峢YK>,T3?akWXQo` ֪lFwG͇crQ;pb`zož+2)8F>P5V^λJU=]lAV?v6;BBz%}rXDJ'=5Y# 9MpL,Cqǐp_sLR,IRxH"F95Jc\Eɝ^4՛uEsh?;U[<+?5nvҾ_[>S,b xvST0!a:\D.7:\V8Bk\-IrCQѵßzTQݏB C~fǠZ:f;gpfu^ՎA5˚vjOD8:'V5}yN P&LoC*~ޯSP_WKxzyd|swRri;p̽(#thS@<Ø$`-ʵx=  wCVڨ4ڭoU+G+g&&1 rID 0&!/Wجyʙ2ŲV+k%xc:|Apd%#Nn%'\bd:u8z6v#E:dY绻!(ː8>i#RbaPaq^VQ5>&7e؂ŊSA=Ydn9fo8^01ڹBq={ɦW99Px|+Ϣ,s`Kxmۈ$aSjͻӲ-H uNhJx 0ːE@́dՈކfqy@d7Ck@V: A5!ܻoe,*:<|D٪80=:pZK^Fz(̉SҠ~%Y* Fm*D]^|-ols|Y]""ׁS .耣[?gŞL #1cq!"t8Y<އ%H;{PѤ'(^>5BVvR9(^>JA- r9<Lg4B3TOI lt.nN uxjK!z$$wJnx[C&ѩ߉!JiGv^[(!C4L5B#^R&l'"8 |YeBDT,3˄VG` U3,J -]h]4-4|$~qWBPhIl85AerϨ&xlINn u? ySҲPds0ֱ+k 6JMӷ"M\ Y+=BsJBS*1tWzv/ :{T?ZYʧ/*> OhDiIwPل OhQf^6BH[>U[UL!*G' wChIGfi"{!SSBjuSɫRW.w ř|]!8 k5j7[P,d8k >*~S Mgn%ش9 3Vpg-\\bh\UVtmOZX mpdS/ysp"Cen\hؕ1T;#B[ Fbxy.Ȼ'ZnV|?.#ߤ>{!_ptfZS)Sʡ9%Jnrc!%wL+ d#r=!iV${~ 鞐& nhx!?Gڄv[T 6<5nF[(n|~sg?0#>M9C>igu~Q,rوP+ b9oƛ93Kv CH"0p o0"O>FAKPZ+$* w%Yu]zy-=3JL8YϸHE__[Ϭ0¡B=w5Vmߒ?j"Ir}>%G@<)~|}&3(Y#MJ(tJ FҠ6ZK# #9b(W(BX[[ gHv {ӥFtx ,>+ m7 |Z)Utz̨$ltB+r\`$>1ʬh>D$O#瘊 /^DӇ)mj?0=K0->k;qŏqdSV @V 6-*@@<]7a&OLq8(g-c׳~)S:[aZYZf03MPyym Yi#" 0 ѱ)ciNxVP|X7&;"VgON(,vlϪ%F=fMFX q<ѝ&Sނ#\zo GlNTuԛ&5%IHhĩ,R͖R6PlJh>"ϬЊf:96^J_fAwBVl6=d|lXrOi$TwQg /lCE-wYVE5SO}xcKpjP Xjy"ԊŖ7 ɞ}+Rģ;?%/b8ɦCf2\!{_i'XD d}>2JU8D@V!n{@fA:7P3|V@ufB>]3NF~)-@VZڰѸ.Yq<{\!u)x3bRDdF / QSAT[ uz6)mQģI&)>N<覨v,A%oB\8솣3wCޝb@ޣ]7gQaV lѤ./w1:.[&3{SQ1/.eV /Xv~xGŴ$47s]@<(q >(P#7S|V6Hj*i"t d%owᡄcL7)',J86X)6W(DX҆z@@6/EN ک%pc} pѵFZϲL9$6PB!_֦9CaN~DK8&d=J1#HJF6/K9Ɇcv#BPoYr1sQr$:CIC֮ Õo*E #;M.d>lC3G olK</%^5ׁvې G`C\% } tPk.(?}*[/P2`_dE@R vU_1NNA),qbΖ$ՃPw]J=(^ d%~zgᬭ*JoքO ?Ñց %9Tnh,.zS@-cFJ(siN^d5|,@.^I2[llW2/GVঐ!x53hWµ!D +zFRec:-(]L$)AooSxPD .vOBw$Sk2>f歷 mngռFBxڈ%~zSѷ eCxMmen{YNZ&=$UdOCV6MJx4il_^\|;^m~$ :Bv@1RWL Ux 7G$r|]PaE]Q5^ %+њYY{ɶq"@_Y4_VF1M˦ϳ'ACF*7VPli:\p@pXV4q +' j )sq㝷-khu!@qJ D Q[7%W]k'ُVh,!<YɵTVJB>]KvwPöndzk_KH&7!mׇ*gU{J|MAw.R@_YcĮ@ϝ|U8a)Q'mt-Jpd%qȭ|` kL=,ly_qV 9S 9%WNB쎪\ށ7Kҿ 6)ZK,mdʖGI]vx6 ]Gg y>+ռE;qe}'6i{DwF D7Q!z{rYI!DdviUЁ΅ZN޷E^!_[ _ӯ-jn_v#6'"s(\|Ijb--h.wA \Cڦ?mR~Ծ6%;kJG@<ݦ#(K]:ۈ0ƈGTv0 GQ$z'r W}wS 2s =# ->d$fvp ?Bj2Fڒ#$ǠV]pI$oSAJ BrJO AV;,~z]ϳ[ 8Q\W:5q; YmE]}M|& { pvEC%1V:6Vu@vԸF~|UZzhyF:97Z~1n{d%HrH)ExGtvBBIA2ҽjS?ސE#z +ڦ}J0oIA~:؇yXD!Zos峒; ÆƙN=<.PtkU)&z veʱgyQ Gl`# cSyhs#~k=bmU|;X $Ze\1}{ '|wnh:yCyBEaÌ ?9r~`.Y 5)}.p]ǟ-^\;34<6_ /cW0/]q%2j쳩 y @ GԷY֢kOEͷJ-CX,j[?1:MFϲՍ*1TG"QmlYn:\qjRtHO aʮ=}`mcΩYӧ@Kȿ5[r+'^/ui>尩j;gnejNվۋ[qNi}h;V;oJ%Tq';{fKF b'߱S;@,U̲]qLb{nOex4iyQoat`Z06+$1"#Nl'4M,^B[G!+ޥW91tϢbӑ+K'qȽ*F} J|kpڸҎ1mͅZ3uaܷyIydqb~R3撵U$ {)<~(kTy}ed9ڙzBOM1WI"?B;WuEʖe\a8NKA> ' +ؔGa7zk׺0ٛla 9MsS#w/2At<w ~?OO$A0="k_' L %+ + ҎNh<,5 gn%is\rRYK<2?쁒HWA09 kH;t ;Ž-I=L x`.i |!akuRYc%vq(,\L>)j 8o9 LWcLG%*j](1~camDlp9BA d?lK_Bh̄Cgѵ4׿~݁1JlaM]O|'cҬL G!79b]w\}DU$@~#&4 { hfXgO&Z+nU_G,d%l;"髦¾%/L\ _kN`-aVqb_w)Sxp*clzӇaKKxΆ3/VAg >[6y?czKAYy?42conȻI}X5=1z65771Q^H2N6Igsl^*+EwE|=dCnBv,PE{ؗ]&7A0=;uӤ PP5u5@oJZ6Ҏ1}%+N+ cճ+#/Xi$ ӛ/?-Mu]KDxUvB|\w,f \!Uw §jgiu&Z 31 Ϭ m=fU}S1LkX7Soi!Cֿ91U4adIᲲ3G؀ ;/sqքkC3!#魀˫cLG -_Sx"!Zv:=ҙŠEkG쫰+Z׷KҬ7Ǘex]Tu{fe腸S-B8泆Z5Wl&|frP4T ڴo5he6(ss~o>\aL[9 ZTj*dž3*z?Xio"3g3i֞nՂwU!Im9$InF|xMbT/QKI?1Z0%c^sE@ %_@<)Z$9ST{R)OzO1SՍoquiȶa(4Ozc!oct6ъNkyZi|Yu n b)9س_myi'KG|cۺRB__Bhm %/zҾF]o-vO-,_j~tB8$yh*q9La)'"+_JRrcy􇈠F5"Q(rI}M̴+̪bTu^SH.ЇQ\Jl#u\L}8g꓅Sj8sֈ aC'f0%7xHTE yoZӲaWMeWߤ59RF@Vfڶ6g_ڬvi/ *;5~>J6JtrvPbT<<)UθZP1[Qq$~UfAxW %H@<)ULS/ޜ7*&%_@^LF/J F<))iV  z5k-f!y4eK,)=aéL}m[ ~EvpxM#bӗtH v GTfX3NYWuQ<|zDhzqOX0@[BͯNbFrvCF hs.4q@Lv{ړ`8(8x& |dC Ĩg^$Me1ImnFO(0I϶.c Mq$pr鵖ZJM,phj =6qJnX\n`)XVmZ"Urb4|6>;ޛ#V;S~2'S OM-IrP% RlZ)nŖGXۭyqh;!=;X.)G8yJK29 #6M=ل98JP>?>!J~r%7< ri)J8л^{ 57w±Ģ7duNAot臽ݚSБ*{Je:6:5P"K+{J>' n819J4 ;!m- 1g4 }F.%7<Yk))&vH#%#vG#Gikع[ص}߭QVL؂]qvr$Bܻ7K@$*dyl'Ǫ,?> Pz6qF %ބ|Szɢ" "I&\td-gʪQyU'~zx:ЦSK vwl"S+zvd,Z۰ƁSK:C߬)Ĩl& GGYȩ%-?&B_/X֖a-Y3]QLHE8秳)gtey]zHb^aa !FIa2KTNڹH:_FL芛P*?!3 !&Bd1,~9*8 S$At)Lu˜l'^GvO G!UI_<\?Keӣ"ĮZIzL''S͎ǚ ֪Ycc,eij!O.ɨ8 'ES'iji-_bwR}O܎O@V:OTUHﺎʹRآUlf'46,Rg ?KvKt_A~>Lny} %7\>_.IH먼Q6U1(m[oI:[ZAop ͯ&xW>:BF(j0KdS>_xod;Q j s%7 V[zZDz*GJ9|'Z̳FHB3.adRr>P;+cQ@*BKV)h.I{ TYi"aƕ6"I5着\ƁUq[uD n)󴲪SUq(a|dIU-,QQ1:(.qJЦB8| Q9NCG˯~;h*K>G przQU%7Yzoȶ&px=m*ezx=4:&M#ͣەe!0yKS^O/탬tTo004*Q=0p2 ¶fi]c;^ 4!.?8(ox; :dxxrz':q_EXK@<_ Fje[)QCoMߐ`stl٬oz]aGVlVB=P;E6+2Z)Jʎ8Af)զM 2<|j^2<ޝj v̳thheH5+j3R*jcGQVY!])aJvv'mc m݁# n8'"{p=֧R8- w_n"M= h$*G(,ꦤ~|)G;GNi %w"*@?&az+cUR.O(I Rҟ=$QEUW;A95CD:dU>Ցԃ0nG4ҩ'B{G!+o#PrciNnV5b͉wgŸ&8!d$b?{>vWWMNnőr)#϶ӕls˜\m{_y| ټ̀BzףDGYճJ6?ϏK~ȅzfrgu0v ehD|02HؑE \(Ѥ##Jؑe q %7ߑi={y"X,uJ|,ǛĈ1ec&mHRzU"yW*j%Y._'KXSo$#n5rC'xkKo"E£0vA֟ U^9dN\{?nj`r;rx.}P|C1Ym]$d)|\>43sE3WMJx6yΛ9[ɪ'' N'%7yjH¡gyPFr3قY|'ѯU|o9Wckaծ.|yZ\,89DG/e? .5Sdy$}Ð6u dpl}7Z|l4YM>FvDe8y/%^?"&N_OM{UB_&(1va%&gS}.PrWbΙ*Kq`j)(Ir}t˫DgtJDxe_F}G*kpP_!>{G w!,/%7< YiG(|*QY!&!A^oγ9/ۈKo\q#~`ŚϏr}lVBԦ$ݗpBNs,^x&mPij &+=aB}.nSrÆΖo&"ǀ' En;Rq7$*C a՜j4цo=He֪w}DJ4x +5TSg:Ba ̴+dYѴŽ/R9GoCwS5˿m!L,oJ}YO5aRteI bmX7}Y^(Ti9JA+JS\~̳5W8O'!/|7cvqX/~8KoC#m4]EiC^${/mSij wP7ԷlOv 'fݯK h)BBaX@VNoPp򌶂{gR8]h1R{JqHVψ ހgqGQ<.%B7 ko;oy+9;Oޅ|W%  ?0;쌊9SLA)%މUM7;)QQxDYXQS'"r Y %+$y ain&X4j"#dDJx6JeO+ѳш>`DJno%dYLx 5mR]D؁jR Y]"B{]~SP*.E8 #Z7T1Uڸx !bpPFZ.Fq.H"eZukN):ɳ`YTyxlيU|(jKsK W1߃o }i#6W wL>%7 Y3WO(L quiXȣ;C,]%9SW[{(۷|A@<4"l/;dn{LZHl RÐӜ %4qJndu-%1]IAܼ([;D8p*轸KQae q֧p\ ţ8֢Ys 6f]ȶ|ЬV;NU 鹵t_~a  m43SJ,а4b=;ۓV$1:< t[<%x:K=+4hɍw!BJJ~L@<&B6 6Ћ{Obh葮%>aηSB[!o~-,Abܖ7Z٪~#@<.eT|= GF Ĩ^4ѥŘu1 ⍡<ӏJZ>fr%xnjhR|^c0przy5;ːO9J8[& +*p^|Kv~Kޅ甂q!]XiC_iO4/H_FDB> 1@^e+H*Qw^_+QBTF|f2yc+UD 0Y2e e!_׼YK`!6}u^k(^~JTc,C`َJ,ٺWuJVt{Ҋ %'6X~센ݥ},m`7,3?s,:YqұaPA‡˷ 3V~9NZ%_Z@63_!-~{.:tD) u "tNSr' 럧=|Ǎ&8p~+?IڴvMV)'(.>!“o|L%-8 /@V:>?bJ0Y?\)^GCZ,!dw7G-tc zvvE 3t&T5NMնl=YlDhp'oZ/pd 7|wCw,_x&=jBMZn-No*ʴXIKRqx#6.+P tMsvavktz&j8׋ۑUXqyIK*p)(t[@zst֌OK(ё,#YUӌBocL߭+ӁEUއ|_;7[*g:CZre-1 m,]ZOMI]LxMeܻ?~gٱ>dH7G86GiZ.bBgj+Ysdx<~ >3XfPeUɕgSؕ(MB6 _C '!Ojg2#7#_ BG4x"ۏ,az |@oųgcz4S=bօlەyZ ߱ `W_7 |LGȵpv^9gk7_Q .1dC /B֏80_yhc_g޾~_ٯ__y^Z濖}xER][uL9aJm#ء^gL$\ZD<6d R9 e^7.Qwij C!ĸI2%#`:s}˖]@-w~>Jޯ[q;<Y) HnOTx] 4}6pJm?p FXDlq#j'LGVUsob*#r\Z⬌=ג*Pw JWm.d4Bv9;~m _h\/!D5o@eZ[MZk%4B-j ROԲtp TǕR;|\Vɉ40t[oW1ʷ wCޝh={ Ib#<YɗX+xPn:SDx 痒:VCBVܤZ v8yHs7"_EV0:멍Ԭt6"KD"P5ܙ"C}/i06.jq۶rRSy`r;5A_MGS-jJ{yq}jF=4>$;?, "tV?f募QL 6K{ 9<|5c,Q[9UJ ҵux 5(VaUY?Ilg wUϭZcm?[ESFٟ MHjC)qr4%wx  e6E +._-|9Ouhϰk*gn%ӦMqy͉n-m08- ̪IYaz}j ρaMne7p?o\?dCטY rA`/gS̓Yr0vU_'hs?Y& j,ףŇsMer?^#H:tTq ` P-j𒶖+5pl3$R{'!+՘\3 Ү!np8Ym NA^Qȣݩa`rA^vt{re21ކ6P~ԈFS}|F2-Mkdܥߋ R;NXf[V&q>| y-NG֓zF) ^9WCg=ƒ)PXO>0"[4=T-"erlHNtxK/Ʋ\Q?9k9ڷwU]@V~9 zv DgZ@<(;.eh:ȃUV~Dt:.]e۪ٴ9Ћ?a+!R't)qWg,{ߓdAfu<BѪ&5~\B|Oɝ + yu?J 1cs D%tikժ4miSK/iڟNp}f18&k a]Vy3۸40#*t_j#`'z rO8YɑY5GзmIo)͝+t(6)!_MY(tƯbT/ mvz&}3݋?4ܜҏACV $br OSַA?gQl>^ ӔS\2x [֤mQFe8va6>*^oTx/5 LoMQ&T_p]ͺ\+#m5CJ+x\Fx6&baK'O)B`򿏴c,D3UǮn%*-!4iawI`BoY̕qy;A^gPxKM_$[Gv/2YyT ڴ|sɳZ̪eh]-o*M)Tk`O+ϞFPEUTU5[ IesdUJ_=}{kezӹ0 9ͷwx?D{Y&@i6MB 0>$p 6L/&C9Շr%ۣϊC1ujVO/-9vHq$y X^"ifg fy#*o( /w- M]2 7慜Z FxA!7˰Mv2!t ]0=r?M|׍ xT9ݏlʂ]qMd(ώ|S'Lof3 ӳ~{Ҏ1G /Law 0=+\6H;tjxF |D"8|L$W~q;v 7ec ӅB[V߂&ai7_9Ҏ1>WX ԛ`Mw4KG P&Lorxޅ|WbID*cys_jv_+t D ڤacנF M1r6فyL:9$MZOiP1F#Vc{QkDn-aJ?Z[uRB[J %_@<ݦD Ĩuk#ZKfHߡ({ 5@S?pd )wx"1ϵO5R < Y#!4mkL 8cӍK=kɳ{E](Ctb.E!)JX#wxF@~cLT9؜ @ڛcsZ,.[^=ȫA,? !qAVK?Α\htTH Bwwc@r x) 1 ndtԗq%IJoPl[ QFB= iA>L@啰l.V%#-[,7Pwa*a/W>tkJ@ZhrUӓimi^h NB7ױ6]w7BOV^P39# nBLmBNe 66rtk&*;G!UԃOoYog\s+ц5Pukzuɵ3CcCٯ ._+논=T>}cːzcpmҎ>ߊCcBĈT։֣k[,j[?1:wѶf7=S{/g$cۨ7Qۮs|],_5ֻ%I\ւ)R6WLJ}S.ӝ3ceaUizC[)KKSm}p'Lol}c[Dwgd*fJIwbmTZ˥7vK4ʅ(G\W޺4ȈVqvE_F'tO[ O+Ͼ3:gWZco_-_WFfeὒ֥/M_|֋6K0r5(1IebOWQqqw]]R%35?hщx'.F&sKܒY/_9*͊NB5_kWRtKve).㧍f8J?~(hA'iPl%nA!m#t>(~eiyӱvVTV핔ݪJ~-/ݢ %c f|=KM!XSxː/?8p$]nU%U@<ݦ-CץCnh!FQL?wIr_)e2,11!)%wxYZEidbz6 "C w դh@&mTMԣH'GaozzRM%f]bNiV,Əڦ#OACy?fo`9,2P*r/g"^u Ծ0%ۯǿlfTC98b7GGgfk )-KR0R$) X\$nGKn@?4Љ"\I)F /T@IIJR y\x4i\f쫚I&Q(_4Gtߜ,q!h7ٷ&{{4YnlMzx bP#F/+Qo߮_"ahf3mr{A49D{$)ͽh[T[D5C/]^|]5C/7 Ю<f)܂M>@.ƞ;_.㗾Fxn)yM%'ilN̴g_rt_c]Vp\kS K~ݮlp=쇺^|A cUkHr[N>!<YiEvC>]5/5D׺GvY1߲T#Cq6c}*tNÇu`9cR K.wΗ,Ub&xN}aST֧{mQ1-/v~$:,d1#}HL @Vj,F;C4f}T q?d5;͍e2 YqMrkiyP`ڕ^Xt]nŪt x8y~Saݾ46杕($(^)OǷ+J;Rrm< JpoKQV^$ǹ9VⲬKg d1ZȣYIO}^>NVeʌ04N~F1EvuTu}^Hdڹm|js:4 Y%/IMη%gؒb!ÐRf(#@NiXjGş51zllcM|Py0d55 BS RBAcij)zy+7d"U3"R7ﻮcd RhX@ǮX6Wn j?|P5+x }NtSA!ChO𔑆Q(6<6% R˞&':Vɣ$A_ɦl>jS?h*j(4mh"'#[Q:QЁT22#vEۧpj͐0dOߋR~=44jRv+@ g2r5>${r5Eg586ލnjnw}p +ٜr#% xTbRij):n#'YY]Ǘ6):O#Olmo5/HT$yZ?Us U3I6Qܹ\5b㼐YtKE7[{)oFWO]㘬h:N}k٪OkXdH~Y΀Вu*Zozk>V|"r'ϟ8V]l?q IxR(~"(ٻgOP|dO\G_7O Ö́' SPr'!LjR3 j \831ݴ7VZ k-@Vjp>2{{nchҘXvWY٬OTd+ [M|+yUv;v=$Y{l*>$ ^/gSz(F7ze;zBuvB>ڤ碽}ba=mī>ћe,4⿰=6^͓zRLT{ޜ:_| VK-ٔ,mH7@ wHZ^I1Lx!/]ϳiٞ\^|=%7 Yia(T#7 +$3<ݝ |Y?x b4iHaiw@2*q\XXQQ{3;:? 7߰>wѰMa2%( ?рL(m'iW4Y$oQ<5/rѭ;OKf %Ruj@$%W]ֻ̳i9:m:]#R<H&L҉xJ'3?RGE<%.+|t8))??Lk"sCogODgXsx2|O,'@TT =?3ȴ5TMvmPZG 6l?MhgR XT EW;?AH=F =ﭨqԊ6D=]h2h7I6nŭģIC>m>aηZwڊd6nm_R&66iEiOt&Q1*x8Fh$M1,St ^kU,!PJ o3?i# P|c.)f gқd

n {|0=;<ϞhR|.jҗ,]/G˅3;p5bCA޷ Z>;? '6t~fjr(rTQrw{##,]oitv4PIIz4F!An>=&BK)f?e.t聱sd>D ĨgR&ڮ8Ir3FtI [_NhV[z?IwP'v IǘN?kѮЩ !VFA!tx2SA!r5JSM8(D^VA0sSNcb RJmS7k)%$x EzM.5#Ai(z#tF9#R[) %CڑnUTOBVrH#.gם6Rٶ?07k*.eH+uy8hiSzn-X, ⁑UcRJ[fl3Z вO ?Yno%PcWE#5³jӼ햭 :[*cWfdC(:~MljH:t j#Qke{9@'^M'^A Q mĔ*dMFO|M'&*׀AkJPhY4#6ݘ;1IPUۖHH!2JsxzvM5.]j SaTk ]J!d&G#;2kފe;Y)Zᷨ3#rm Cs)|0sU31~FdOoCVntԉ; <]=+d.<AO{ 3= |; +y#%ł!dbBh\*ݚNQW"ӛs۪g5''R%krf;kgdž_ c7]>W y{,^=}65="!ǀ!Kz }mmV&":z`me^>`qE~zMFao*umTŨm׹gٵ_5ֻ% F./Sޫ lVկ/;\;Gg:]; 2ê;!hSR2~YdE۝0NER$|KlHa9UĕƓR ·Dg9OT6`|Ko7vQ.DG@ҪsO] /?hMJѷ[v~C# ~v ^y%f]/IHkjFt_DN(># mM6 =(vPēR+Xkӧ+Y?[Q'!BlE|CɟOJ17' {!MJh-Kz Pj/ nE bj*uޝb'l%vY15yXLN <Mw!~sO@oAVTJg@I_G{.q\;߆}(ygw@ G!j~6},9K֘g-yawO?ݟDp05fWFp8O6;B4v9˸[ FŘp56>4')"yX1 i;N<3(CE1!c>,ċr'Sk;q}E5!X>5wOM$JG@<~=F4ƈGXdB—%Lx`ɫ]~^(=1>=8ZgE[Pn_6|ĜS1 uj3o.3 >MĨ>җf{D4vއe6o !+Rn^}+Eƣ > 6uGL.Qrg!_V<8i#z*)["^ ?Hc?wu4I~CVZKm&V!uLU&R_(ѓUA~R|:!$d%ks}\je%-\+u͏M*..OJ :ѓ7Aň8 +W1JmxU ϯAh#*|6/mYqo#mwխ>#̢(GG!+mQʍr\/cOzsYi0! 7 ;ݶ)S,ztlìVp=vsR@fSm foX0!; ~MJ 1T(~tuh0ͯѺ$>*Ɛ(> A$ P|Hv\:w4]v%Jn(mO%N1jC|cUe5K/I9xl=Aƀ!_K7 EU;RlGnA~o[ HVܲ _ ׯPr=@zYMs j yJgxIl}d5=KJ% 4 Kvز,ZtRr!xvA>\ Z ?rٳe5ߪFֈjg/pl[>*~8`FU `!J]\vCNxMjNjk4`X͓愧gR*ǀǵ[QiplB.?߽Xi0GjL|&;&ބ|+N@@tyIWgƦ lcBt`*qL FX@voLv ?c1= ޘ@7&\籬>.琟 u*HuJ9. M(>`ƅmN,;0ÐLZRˠvDy-A+b|0,- OFȄ}Fug pNϱXtcXȾIE`Oj~"7HxC<Xh>ǯA=΁9qFo*l3=Ha~}r͔;$$:ͪ:#95W @:)|J.4J^RR"1#HTJۈ66 OOcp\cU=@R!On[[JRU{ݩK@2io*nӉ@Lnìs{Q'vt>k~o\p,ڗYʉ;[E|G!+m߬YգQu4<G\x~M<]f7&;0&lK~ MAyŠzdmJ{-jdF&8W߾nrM>i >%f]/RN1qԛÆm"-{;Q-a u[4fhdVdAN"=E( i袑=%1z@zC499@ zC (!Ӂ*:l4I??2%=22GS1gd/͊G ٕCbq?JǽH"qˣC %Y #hQb#'J竅mu1CzV[Sl G% Zrx^Qu]WKnrkoCNaHO'L׉f|ɴI;>ir5bl+MwGTu1qU%!fJb&ӐV{Gw@&•*̕^ĺ k, `1Rtj%:_ʷB67@0^ӟΰ|[FtKjfeer g heqͩj-?,Ke9nCRu!Nͅ/3cFh4[RF0xGѕ&w wPjkӈq8Y':VܑHk-}sMw,hmln 5KnAoMPo#ӱ+.ix)ā/.[~V2fiP6N^=έ\2ij1<%M(ִQ=Y[ڠWFGӼZ%#)I-n|>cW$$xҕorLJn09y߲LwR(cs :QJY=qilc}.jŠc/!O-1K1 { 2\L*rl-/U~jg!O횉` )vIj{fJmxFBB|Mk&*׀AK1wa1⦭-6##=+#!ل &Ԃy]%qԪT!d^G[L'Xf~[l1+EK3BÔ&}g>tk]L6.';ېFYU.'${8 YlGNK/!Pww!݄3 |Ugz w@VZNP)%$8Y4. M|U5@ {yFޜVBWdM.lubbb{]rPk|B~ƵGJz!o%Ϧg_$d2di\oϷ¯]-&ݨZN'X"fWO'rTż-}7ڨ{RlFYv{+goY}D.!vjotHO?x6YVc?P:?;r|t$+jOk̇MJY\gm훷-oEÝ0n-?Iْr+'{{~Z.؅o؞F(Q/}F4׌xv^df{Vfh,;?Ͳw>F|LsƆO:S #:\;SLo:lWzkOvmɱlGVLo-tnfBa'J37San*ů+M~i6f7HDK<H'%y \|o՗& $ Ovl kX2fW*4o)ȧ9`Mf\04o a|@RUcLGZ|[Գssضh3zI.Bf}[cȌ_:._a&Ha$neL5{i,&XsW3>ʓQ<&H;t1㌠ѸU|"E( Q5fk2_Z6(WZ3E.|a4WKwH2rA(8i&;fv&DH. ei۠JX\Ц} m'Ȕ˅]|e(G}{إg?¿mj廑7H;t-TM2K *Hƒgl:q61\;­psBdDD|}e5錔A>92e Iڲ k~,ͳnzz:"<$b,ەdqb,U2#'(azd.P5W=LL4<*jiokUkr( [ Ws5>:"٬!qݸΣLq& cpoUO\:C2A(kf$oens#5}NCZE3rPr#K b?g؟N ]|'G0s6/Rqݬ㹸2&e `L^Нspyo>ib\^tEaš!u1c:]ް- gi"b8j:h*Ȑ2~MG4Hr Qy/ܯEg~9Gh@$gA_8"v&0si0d3 )Ƙ#.ǀԤiQ,OTbQj;g +u$mS7kKHx A>fCɟEjŽI I^jD3_0 ?Z[uv~AtPģVϧK3אmPCmїm}jpC֝4?SXW}gtԺrZ|cjqڨ1Z6+|\.}n^a[i~:Ic_PY! N\oE]gz93[bڤEQRUpyX0lDYSz=YFw6~:xAQEȾ L*qj&`bf>e7MFL'ՅxogڡNQ[) ϟ8k#Xh7C)OpnQD:zX6+k,dOw4ȣ ;Ke4L*fh ^&jNʆZTwΗ,ՠny3 a2J8/ b2}Q@"ͺ7XWOd M)8~ Q9')>dCve~y oiK/&72/)L 5\jgGZ&5h15^]:ܳS@dIѮKvc>cp,wjxMN0*vM*4gYېoZܨQj˓ڭmO+pLzk(J<~#,v"6>/sقѭ}@ K*'$(hA- >XWQL ^_.zt ْ;OC>]drCLdU)G@<=2%@q06RPO[ @>Q>FExQƵ(NEJV8Ps) *6;ݩkj7AV7Ue[:ڄ-|ۏF{Y~Bz'Sn)5*2,=QO".u7_wJOE19˜jKoi`r!JFJ*ur8y;u2vbYorM Mn_@~l.n--4D6X%RVӞW^X\v}"He8 hf?Θ$3`@(8S(6A]hfl.frI:L˺G 5ʆ](͡ %/"&| w="tj%T/p|nDϫ$z9:9)(Vh8LǫYѬ4YQ9In_}T5â56隡 xh:ԽqEOq\`P#n争ͫ{e G؅~Jf4 _Gg#Ȉu<ݟr7FF{CQ+ERZ(}5t=!NyG@~okpY:A"X0NPO:่ Ftƀd{0Xaۿ4#> }w?V_D.IDJ^GeSm^9{ t"(ZVyyCsII{k<ݮ J_tjJnMצhOv@%< YzP(]c)-eqa. Ghr rhwBBOC\FB[^B-FcpVAoIg1hp( tmYŬQ1]N8٨$81BN(8%1D(pyٓ&Z4kNTgQ4dn&Ώ6ͨi~zJ~B)ު5(s; j\A]ċ۠.A]H:hP#>xlC"!,!)E4$14oH7(kHMiH%x ҥ4tyGьdZ.@hFJor(ѵ7#J u#Sfo '..iۃ 7i*`:K-7,=v{k^L7WV^ȝ\ ǚƒ؏o@V2RX#Յ"8!M\(T_(z?^<`燤QT !JXI).;?fވ!ǷQ+Njq6RL%+֚܅Q,!oV%[Q_Yu/@ UTw-Y *+aZ+kx i&_YYae4LPl/ј,Vh_k-܈ +J>zML#OCg1QC7-);cp|vv![gU+5[f~miɢQ@3 FBхU 7ƫȯ^8pPI82C,7(d_B~AaTTeozyh2ETwG C><6h)Sse+#]JY:<dQx(362/;_,+擢$ &z7 ?K*[_UR|vs)D w!]gq,c{ Ɛ-m ]f )YBjq^3C7TΨfvF>Y._'3Sm֔#h8l0ZY>ը9tT2yf;gEo>1*0^d=ڋrٵҋt+t@iY3rH yf&uHֆlw_)/ģICLaS($nҊ:%Ct^oXp8%y֥o@-cLǁQeNvO5'봎 1{_7[p{c>ov|PݹM((u3Oi:/c:ԋ(r=շ0(\y?{ߴWVBF"f_hgTVb8Ưv.)8`9! $iA19U`1( n]Dzblp;dfHFY]k+g.Y]Ay>4._hOPhDJ7‰ G9ayfPҧ(GBj\+ZN0>13([C@iyzwƙ0yHq%wCج,AR.Լ~NX0n)u[ۗ0p vdxzVDEնPeDǁ_BgʂI"C N[7_>JVQrۀ!׮滑18cօ~QXm*=35$u0;bBҔ{ӯ2AV N!x@ wiV :$8*]}[׵ th"p6*> |Y_#[ <|#?y YmlHϲME,N(/޻|?\TF[jQ. ; PcѤ;r[φLԚXƾ~Mwl>_'C/#^NƖ #Ye6ʃpϚPL鬉٬Ϛ$lV=a U1ԶBþk4usֶHܦm߅ňGK:YWf{akU7OMŭVg(/QJޖ! G'!L4CiYȅ)QLNJ׵7Ztȫ8{\FB n)QX\~Hɗm bTšmԪU[pC;VV?"{ IoHCN/x_R3#<Y)v_VK=GZ -6GN ݟN;3*k +Ijа7V0"|  mWBSF \Ad}j:#KWdu(NC~qԮ_@֯Z/4b1pZ͔ЈwA+8YzH}JsПqg8?0EY]y&|.[.ˏ-{V=έ\yTńeU8~mj!N]òM6V״6SM= $5/5JX\40ꏠ~$c4~7c;SA%.Rr=MA PŽ^NaHvq;TJckTb6 ;D(v  FvP)ST";TJn*%_pvPD?;D@ޓz{JeM7x,RQ: ,)cLWZpgXtbޱ8ҏFll3۞e=Nh?pB HJX '%_p]QYڦcުtMϑޏkkV8 F<)AE7ou 䖔Ї&OC>]E{]W~؜!S sP@ӝZޅ|7 )o{V5|MV>;1䏵k-t\% Y?T~qC\?5#Vpڦ}H(46u\{/SFtcJ7KiTITϹ9Jf`VV,/ [-iAVki:S=w—ps9~c&Y!fy\cUY I6^cS0㖘Ag hg6?> |iBUm.HggA6Yxq=뫚U)E ?+z4>_gsOڐ*~:*; J6&'p&in=/ _Ha6m:LϟnHK}2k|GA+Yհn)5*)2hj]DP.6zK_+~(]: #Fl֒C?χ3eR1Z3o?Ϳ$  dK=v-5N@rԥ[bBC݃wQ3v-+_DT֔6!ƞE P̷<:nv <J(-**zlBsNЍQ ,jmUI>>PƖ- F-i@j햵|ּD} sUӓimJ 'VQ$I 6(;`:'=զtzAp)ł봳)A;XfͱW[3Q < 2|zs~z[>6Isɚ\Nw:l5ڙᱡCc؍k~BKWsgMMϾH1e16iGo=lmP-WWTX'ZZBm̫٧l9hbF۾LmTtMU7Qۮs|ōW͢N,OjotHO?x6YVc?P:?;r|t$+jOk̇MJY\gm훷-o~Ý0n-?Iْr+'a\_*]i Q!M"O1jmCןui5gU+mїmjpӶ O+Ͼ3:'WZco_-_Ffe{tRu;wwR &pcW$J8m/位:KwW/gFu)QJ6c휥*mV+W1IZ5%YJ5 Bx.6 a/n%Z@<)~%UBx .Tb4VP'ēRzv%Pڶ6B>؅G`Cuf9, jf3ZCM~X&74 iV R!W~Uŕ~TG6b?" ͵@.d%UNP{ ٫Zu2bm;P1,HF.A(B-qSKƴ[ Jͭ#/ɿl,{Bwu<#d3[>v5[ɲ߉$F*؁n&+[ꕨUyt0NA;@=#Rx<YIR{Q.{ NPmҞxʉ^: <LjM(1^a ԬSگF˖ "0 ;g')2HRYSӡ!hА 㐕r*D6_lRe̯ZE{.FGZϏv\)ea0bEeh?)iGV0K%Xaf۵<~zcڈo4 khآ6ǒl8;TGdӌVzafeoW+f"Mx<{ k<z`ծ=@ANrʛd!+MIzGJn+d)i!jl:\{Dmǩ%:,Z^Q#ȏY3ƬNtzLǕltdvae/[ U㥝zX*VX!(PVvH&f7!Qrg'!C-mȷy1 h1($anW)>|rs˟[?bl(+%{.LQ mZ92 34 'ćM_6CG  ݷp0c*Yƶ7 ߀QN4Nwc-I9|>\;OqgL +kT~ZulK>c\|1`|X EϮj%L)MZ){ܤ(VJD~0Awr,ǁ9"Z~"3H;F۶NwT̈Jk뛽 6uxXY雏b.$$ L<݅rA j2! n8Oڥ|}ygQZMGhq̫ _LsO#ڜ/ @  ūi(j81d3SOT[y'< cg^}%٦)pd%7Z{Yzyt4Bv:f VQQ RW-IO#U"Ґ)4Y(JhT4v}c%ϭ9T:oeNxrzG{7(]SO\ƣ3M¤UڲU)"zD1_[<;k>|SZ׋J*X@nU 2Bn؂UV]r˸Q'"TIWi܅|W;'(2MO9P XѬ^$q";|J!0d%YSr'r9[2b. qzRs("d}/(*+Mor }X!\F'J͋,ż\2 s9Z$6l\d_]«jS'/nxLhn"xK5Srci*<$ԸE>d@o h` \NqCov 7mRh\R8'ޒkPBIʍ]ANߥɞ,cn2E.* wJq-F.( LmIC8nVV?_A!wt8uSrCOoXa O=/^|;u|itJ~#T*dayT6pR"_-׀w ѮpFN+ | Y|LJpm~8 14J^h:kyI7#M(s:e$u%$~(Ț'zEIOGIqrLjPwAv2ay;)S / =d p 'FivgF&NwӐ<( %wh8!ښɯOR0oGV?oEq;맺):rxRrZr:JOC>n(3@ixH$j~q壟[-5Om$ӣilV24{0\[!~#Th8#vFtY4AҮ'\Ma`r>8(Vjճm] SJmhLF45/N; }=Ɖu(`/8b5EYw" .~E˧j;?jz crBS5V8ZIwGwerv@4D%_<;UX-J2m^l˃t*{Eּ}elBp%7YIOAbēF 9llo`o{gq@[al z5j }&Zp%GBqhބ^ ,XAf"Ef1l9 L=-i!C0F [ M [Xz={ P6G3azqTV(+I}aҚ˭IPrv;8BJcL|Ubd9@O_dF)xRF)ыd})%?" 4ƘM(Hss!#Ŭq1Z54q1;M'UL N\)1p2m;Qe8E&I.̗eDK4f w6Yehv`hK C % 䡅(?ժBV^8QY5q1 ]0(iі>nA/A~M7kaObq%4efV\6=ĶPFM9"ʉn͜('ERy \}aJ/ x~G)gHrOrdiOl+xmPZx[  8\J#ú<:Ѭ,Hy$2<˯91.[(bH~QN| ӻ8#q.oBVZpkȉ[V0^Bxd3h:V}OճJIW%) 5"6uJ]\^5ǽ:K\x 5\z<o@c=skg% JZ h[/@k"(~K+k]5VP,"9wHhMJJl;pҨ_Y%7 \x]HX|Y_'HkX+r浃UӯktC\. wt^Bڄ]񐽺n⌇&fxQ˺?mDo5qZtkh&q$y$ !$x$x.Gr߾j_crfz ޾aYm,۲-˒%ْ!NQ,Dz_DZe'$KU_413ꯪ+4[mlѣq~z|LPS".ǀ' +M$\SQȣ]IW:4cDPovqx-H,2b"KnTlLz( tkw'+MAPEoaz5ºN2Ǯ.}F|*мOPv*%*ǀiOdr !%d"u+V]zs6d:$lc]z ͻJQCn~V*55pIޣ+Ei_HFgi:aD)Xt-zE=n5sWy!+h5&NNFN7Ű[2j~YO$j|w"O$O݈+hAB{ZQv$6ǀ jIOC>MCNT.AmJR7Y6N|!?q_ }} ͻ#N{!OHTt SFӳRt7 0t#^v A~dys>?$HI ZGLk"C]vkDex%ձWR7,ko6m 4A`_I]?\OՆ mxMcm%Q`5|v[NnCfC|ړ*Ʒ8yd CT{ w`+m~ElQ;:n(kYD{ d}څ(p_ "2_}+tj-]\M1"Pߦ .RZzÐJ%{b3Jv4 UgJ!9< Yi(7}SBQL-Si.S%UJE^MJ|AF2;Mp2;2Uq "2ݐrN{ O !{S\geۓ^;pDnA6p$}lLL,i,Sh\e*QvpJD{I3L*n.StvpJ Pe*n-S 5]兂zEDyn1ħn[ TI:ߍ&n[G8Y[Ν u[Ð 8j c#<bE_ʼnqiȧ?C3ޙNBdyyc A\!NtQ$d&bp Mbn熨LBۙnr xylrDccm:Q(A7umKaoBiκu}plYDߎ!>MG6`e|^t*ecv~1I C!< )⮬S1n!uǀ!+y MJʁ[Cd?*pmJ~8>;~]wyhγ}f罋v8at 뿜v?T'?(q-W݊\bA0  Y?1\+,6$ľv!M. }.Ӫ}O˓:<YSOgjZ)ȱ{B)ғg!+͡kxwщ2 "4 B<_͉!G@xZT55G*A{끤 d>%xfti8C]86$j> ̳A$G~:O;j/ M'@k;ca+!1ꕠ,YY{&>S)S9&T^-0PMն[*CUwxR 7<AI_"u&O4cWWܒj\?ЫqC琟k(`|wh^U6_r /r̄ϟa.8j+"VX&#<t:ezs׼y[ }wvl~ BB? vRx( 0aȇ;#cu#,4iưvB31lT壑"w8y-G_mbs 4%]G}$dxA`ҹBGHbDxä7dlw#@1D 4jMC7Ű[45;u[ah-h<-NgLƶl~a1Ǵ(ŏvL2ېkdUvbۯEЁOۼ . K?$Yzh&_R^̨o]ť@PTUN-rK%wſ*2d^}hK^0 4rcl&Hr66}޳ v [Eo?$h4j4oePMze±T}OG>P\ m.I CV[Z״6$vO+s&lW- @֏OKj8crIz-Mx6H 9C|mv 3c+PD80 9LtFcms8yLm6+Eq$ɮ6j%Ml!6i@V3Ry!)N̶@OU1?F 'pµʙ;~"AiȧU(smT;O=81ħٜɧTӤa?Q#6Plgsr/یFI T7КT=9}AV: [{8cUh@ڐR6/i)?#Y)N'n @>v%m@ۦ.>+4v隮8<a%ZyG$7AR7 v2R5'p xݮPvA؆FD!>t^-9cbx[Mf4JӜf!g;4Gg! K +8"rxRݖ3S34gUZ:pҦ| MC'LFg!jϝS<3'"P>KH f~@⑉r6/9!ui`ptgzg-g’]Y 2+ҝLB6lfAֿN:dLk6a|`hWuIZ *iY">WӐ&; Ym'H6E$g!OC|Pৢ ̄pywc!?cbH0Afڍtmwq[wF׿$ҹ ̌p4e`L;ѹC/Y$Ԓ1$IѸ 6͂?%lcl=Q^8jj0ۀCf 3Dg_ ua!=q pkcQD +]mNؾn}(ʙ/Ǚr\$;#[ _h^Ej?KI%ȗ TF)Ik4B + Md1Q(8y؂CgāWS{ԍͽt3ר:>dMUt^40~Le 3#k,HuBXHV^Jc~7Yᑏ =!!F,7BmctIv+F/n+Iⓧ C31)`r;Dޒ}k9@1( {@]"bح=뺭PKۮ^^?pq5SՈ3^(NDvfJOkzaV2<|bRwxҹ1TRZCOgG+b$˝/9Vz7a>Ak;ҟDޅ-KڞHӤqE:Q'qjyCႰʬb?l5'_@VZrpc1W킳o?+op6]{^%Ҫ̈́2SMxb/%>iJK5^JvO@>NY@kJl{-+,Y֒1PC֦I,Ym~EEYom~j< r "u' VKn6lpH 97Bms~S ; .t>]="RAȃʋͺmF4cOfxZM^e[Vl~OA O8$jU-J]5 d=,޳YJ<0gO;J0lnrXZA0ECdT'osn^Q fY.vV,j&vID]@pҽ5Řbx6$N/JsKzr@p   nM.EVXwr*Z~N;g^FEEl}h"GȶGenv7|>=<Y \ҟf!׮!{a[*-n 5t[0:E7huK4!oW_?E~I#ZU&_%VΪ.44 ,O/@6w.1`^lv(O'!Oj[̮ Sw) px~Rw8yBK<d& ;8Sg]I*-n -t)_#Q5= 5zmѥSJwiF|ߒ\_74N\RqII]p8e#LNDmpR߈%>%^'uc)S*| 6'6 ㎘_%*}-sϪ,_+p}[sp|v"|s&aIBNe!,LjOxK"u!e+g+@L֪$mowa-L֒[w:QJo>jS;4wxks?0iIF`rVO\HST0(پECw0 WlARX<@| _71䁓_ /AOyж/Wp]5-uSmzGhCV0_ڱV MY!!gɤ EWF_j8ⰊEqϮB#axT֤nmd,#I n2'ނ)14<єH&d{TӋ*M xqsMMa^E"ui`6eUCNN/_A'bgw!ߕ`:(k#uLJ B>~7; /7.UOd[[=}^\>]ia0\?} [|}4<2@dfmr8pw.˓#e۪~ymFR;)ڥz|EWjr.tE;/ X^\\wi~^JwR>PDO[fh).Y6taRYXWH* O ?.݄={U߮زl"5< oHFAJ3kyA|Xl+Rr(kd إÌG@hG@zuJJ,c#ħY+bAEK?Q5ÐV]N~Ҏ$9<XIR 3F:7gitf\ \4UZ=|QHYE:Z%tt:ؽ] V $4g*61ES?r8I=(IY󐕲N/v}\NAj"u7 3 [oF|Y)CQ!ȧz&OO!?&hމ[ "yGmmBB{Cb,6Y R2Ȭ32 <YmsXYEG:SQ uQN!+XvNBV:dn>Law!w ]ރK{sQY߇ͱzGIt*%cG6޳)z+sRC|"ۯK~oK iʦpeP=p܄Q^4qBBV˥-5qu=BV{i'GKD`4Ng^rg 𢐍<(@MKp*7BV*'"l!%0xW 0lXq42lN92aǻ+ nھ'sxTOm]cBV;0& x\%8 pLXsuiฐ ,՚ex^w%8[BVuo i!*?;BVn9aBNn#N?pgJ#u!!+z~ȟ#v*svW,:І8Wהexr7.|m@ GVRz[΢vXʼnlQG!t?0ѯh5qKϺ|,dby@!.^oGVka׃q؝9DJxNjOKsBV۩\`q=_Jvo:^)>o3hWlϢH5t/mb,=|^63^X ۰@xG=c{fpڌ!x\j 籧.A{L4bv4>`FȚ!/3 Yf^ڎ|3eLL 6&{9\ Ѕ ]PPa lιl6f Dh;p:>>!+f؏B.:A;| ͵8&dݏ A9; z.o ].%N !WZ?:3љWj7LC*6^86[jF݆k`i6J*zbP*HEAQեf,’V\:UZ~p?#LZF05ʛp#v/\0: t[aݡ` MɎضKj4604ۍ ptlnG2*36/dN%Ï09V^e,+ <!do PIĄB6q,!=`{cGbB#1ăhAVXw$ޞ>Yv,&j=BV6l-4ş-ydv  ܏'I[CHxv ǰn6Bmǭaх"ڸQfC4gژ^-rZ3#%;}uDlǏr}#<4 ۔єGДGgd!; <%ddkz8KOij=|bO,vn.~S6E+m7+,~YqOrl8+َ8N(,? 88vpj8ΒM Tg[jZܞ*쑽j˦pt jߜU \/:Hq4itVVtVХs,SFh蕰M  ChNh %k.’!!L W7<(_zl 'Rkگ,J0*1!+FhuFhhTS8i0Il@Z7|d0E|^|Gi7O O ?-"wi`^Ȅ-όI#C=<%d7ڝSZ/qہ$|˿xZj Ѭ^t8X OxEȄ_&k|nY f 2^k@-dWamco5.cuA8=]V,Odd?0DPNf3lkJ: P[\g8!dBc1 j Y1aYf|+~@Au2' /O4.]; eg<{P#eO!mMon WhQ։^UֲYg WGG_̻hvPU{o +LlzZsw޳~Dn'̭m9@*}m(~ll˜_+mf: N6jO̧po,.jj:VL7]/u#oyVLh3T{s؞])D)^ЧUy'_7ol^ROus5tACeI pFfܠSL~ e`m%T]:pX S"]IT* T BZb4_\J3f ԶF52x\6OO[$7n>;5ȖҡK A8rLkly^aGG!k>ǂ16_H$| ?ӷYYJL> )FqZ8h/*G,QHIʇ!ʅ<`O.i %]q~ |=;YAd 짴#Պ`BC@ p0 l?XyU+a{G.'] >dˍ/M"rZPmh.44ICnQus-, [Uno;T ʉI/cN=[xGǰiV2fh> AZJb`M/T&"R BwwBVZ7Y%j6U6{DhlkI7&4aTmkXrbCjMN5 C|f9@mz{!@=[wmH66;*zٌ ٢j7M;|jrک ˈpl Rn>4G_&vͦCsDhi))7͖ɦقݦقl . E8 Y=l~a`j3.;ڼo{[ [ZOg+)NZC)ly/݀a'=X`s7=Ff=@dنj[TI6ȶTdٝjR0 >mh£vgcuY jݖP(74 !ȇ:@;(x2hA!>S^{n1t'I2aQ-F쉟VךqE]jթ,fU}Ҟ퇆Xe'QmoYg/ ݵG"_p6{bwV>HI2o)Ix-Ǟ+|ṳ09!r(ˊ$<Վhڜ/YImTݽc)m; <o%i=:dHLM!N8PoGا5h%f/ǃ3byJ.icGH0&Lj寥=(emX+쉧ni5$|~a<|(4XHdڜ%Z Ҧ?{ޢl-޼ᦸcYLlgodzjJf[%H7ЯhphyۑMHvyn]ZKJoձŏV_S4̫=vϢu)FƟ > C]oo{I:&y[ cn0sGƝ0\9(9t:hyK픃>[擃:m>ѫ93&,(!y M5lYf4YfDIߤ)_'k& Y邃a2z!&qrCn<И^<{瞼%q-i"n0!wtߜP:Rei&D;4dN}(Gt{F1R!-ȷ폡"lӲ_4q4ghY;#v32O-^.ѳqtr-zUa{,W[$*m]?u2Ȭv{H$~^iG46*HB>ۅY=؞pk&L† S;JdxmDx.18<L8ziQtVObt+\Ea)|kU|Ib֤I}O uU@zc]]:4Ӭ6<ʱZ묻űnz[uh fWG,fBSM66WMZv#cOOm}V XxHoqJ-5C|j!1XCֿЍFl&B3gR]@nw,ZG<2NGKҽ!,disG-re UWoX,kw/HӤHMXa,! xT@p7ds'MBz{ 3m^A @U[[FldH3YIW4c:e³Jr7bRvN;]M.#V  nJlgʲ[[zZl;OC7 dvwʯ5Ηl6*҄Qnsih U&;hC;p2g3ɭdXlP<4d{*#{+ˤne YmO`MeC;Tg?J-&Nũ atMTELG w`ԝ!oͳiVpU4L֜(.dMȜpRRJi-Y>g\髭V%m?(,.Qj N,ad37ur̒U|/jjx5_%a,[;vR?[@0o_'Ԋo.JB_&ħIc8w:>6 4qw Cb@_{ `R/-< 6= YZ&sx˷_X8Б"!D/xGYTП NSb/2E:q4=*;#[/8M7ʹc]Xr+^9%NJntT:C0_/mT%JR^ VS޲]$9~x|ظ5λ~>;siXtVn"uǀ _nLHE$ɮ4_ܒ?3<~ u]CUv@V;(j*|}0prn/0gBl9F ƒa:D@R\ޅȷE=c+އqҊzs9B4f,}i.m)sBu2_$4wD~%G 3(>R ܈YQpF̰ZyxQ;<&d%!hP}s7X9e 5Ld3; @V:1Y۳י? < Y?{ lMUf7H4P1| k oHݿހ~n^nYh*+e,FzvC6%$N uہGR'=Ð>QjZfkiţ.]XJL4C_%Cn~盯8 )xN!b/r-h=qhӡ,?˂<큊gQJI&w۲nxW=#7bݝ]ބKD4[iLY), H됭\Rv Din`k I6^{[|^\,V5`/V%kZDxp6;}CHeE,ٯBPZ y y5_ 6 0I * Nh@|pwc ?K8Z%6F%i#:J5tcېowŘa@m1&xv!v"pdtfGն/Bֺkn~$J׀7!+5R#iނ|KS4p&UfeRĸ#|nIYPq+#n8},~ Vzn 气Zڈ&q{QvX/|\T !we< ݽ1C|&n%eMR)ͯ%LmD1Mwy6(ٖ~ R<|>tݙ̭%G)&fi^<*l^^@>`KI8Jڃ0THʠRAWRT~[U ۜp\P2&쫾]:Ѧl3W`e ǐ\`άY> ]XEh&Bׁ +թMg 뻤%-)+U6j+K(>pZ qYi̐RZBOϊ%oY]}wֿ;'kL6d]6xMٴԓhGDe8 yvD ۑHj/EU:?1 QrNCV f!mXҫܱ!&w!?ft?t깷 ;鍰pҸR ܑEb{S-ep Y/ӇvhoBTT 1<lə;@3 d@TBdH#4nnÜTG uG'!ԟa.Ց شө]&gbO١Vš9qlKvΔ&FQҾ]*Q8 [](]h}Gcݧm3@ieX!]{wYL4v/*ѿ2-zI{&<\l&:Q`B5fX}s^S|8yL͆oƚKf2NCA`OLƔ@TW7Ivi3;Qvk3A5u7>*[@J\?p ѷmF4cON$Ԥk )cqJSz%iȧM3ߓupx= Rw8yBǏ*35AxR\NAV_o S<>>MdnA~MnM1BV )Ζ;t[hlG*."hދH!%d! ƖJy<%ug R*Ѹ|rl`fm"3 yg z>g{RVq[5'"< 6qI.j$nC͇D*9< YiBH!ԍ5Y:J֒Uxͮ LvBh~`RpR Ouõa m»Ƞp@G7"~6CF]zɹ/Aտ::zVcGV᥵hOŏChw:8u޳Gw=I(n)ҩu7c#zyS Lz/ v:V*;ob=aZkNk~ eˎ%;ĩ]tU{º8/N~oy[q8BV:)Is!R7<XgOR dmI#Uh<|V_ݤ NFTQ=үwhOFcI3n&VRqFTy(5-WkdLnƛ8u=61J gl%ͯy˶S*YZJv3*RIڶ@>``Ir}'GrS:AVr| & +!D#ːr^7+v^NID p6kԭBfecd%`W|_qYɭ,?q)x8/Yvd{x rN*&mSxKoa4߃[;? ݄={$o]_vj RNj_ˈq. n@gAgeg[+82K:#B03VysZ Xa.UH1chxB v)Zuy:iHa%F'#GڿF$uGyXV\R)KY"Zk;.ˊ.]:_^vc~f㨽 )ŠOnR=st|Lz2| x0`r.7,d-3?C|{avk}Mؾ w`C|Su~G" x&9BmE i&z]/@֏TmPϋwZlyyuc!f#;pBV=hZPZ?pI.a^$3yΝDGc4Iݚ tq^=#:1ԛ79k1zY$/^.н{zT4,8HaG|/Bhl$ܴd[D:&H$[S1imȷH/ߜPiY#ȏ:$wS*ʎ} )<2*nv?$6j7ZvrS:]FHv>7{LE@>`Eu_' xfs Ml|؋e;51LA?ɯS.Zi9fV*'3#R7Jٔ;I|miQ uxLhW!뇭I/ H4?naV& ۷2\ttqCyyiTr=|gdW&ӉApL!1q@n)ٲ-_GȄvdݎo!Nc2޿}+zc yr?h?(B|[bg"eݦ?&$;^MaO@;a"÷7R%7if=vW!{)*W(rgIſk֌gm  嵿yx H'C֏ꥫ݉(L!Ow;ON7Ⱥ=.~ 6Bx,ձ?!d)S7_B^lnZ񉦟B)uUoImȷ\s;YȳdvW<-EsYic2<wI d;*уD0AfƚE#q&:"ǁugcO5R 7g9tI/CHH itFe?)7hRXO|vBY\'2A#6+osOOL@>5ߪVmcŚO@cnudmx!vԟ l鹑o#@ݭZ;g|Y"G _ TY6iX eI#b3<~;"uar;U<*s4pp*QVl9]j #14 M/Tnmt@Q"oh5Dg9%DZ^ۖ&nG#l b(^HL"sHLR7 2Y8>l(%E2leNݒ[gg,伴#6YJ^ºӐlW?E8YZ·"ucֲr~GHl ۇk#Ծ;/4,SC&l^Qj3qH^mnN{u_DggX^J!]w LEͫ>lU_Q28< hG忊v!<v틲F/Y5nFDpO5ϻS:\)d"kuA*8 EjuJ;Q Y)=JOP9BOnp#>lO~6ʾaf݄mZF[?Pؼq O@>X  M&p$"d 5Kv#1o+x 9zC C|7Iױ[#݄NٮG8Y锫g/f?Gx Cz41x8Jwk^A:2B֟S䣂*B_MdbTkXaX~*C6NB69h% ٤,dARȫ4e5 ACZ6>l,qwk/e'%J^NcF<[*ֹqb oLU@{b4@AUVߡ!>Za Jb$z4;M!}7dvxHnjWtlnV֤ r=xlbw?HFnw?E  Z!ytEqDt&]_1Kjy8?t7 BkКKrҀ p?d4+/u㐕iUFhڬtAÿohhhK<_ w@VZp)H,^i d`ۑv]ԟ!>Mw D\V4C#7Q8' #PlbDc/ x~BXZOVSr,o4#Ϫe?ʪVɿ-FTQyQ6ev( <2kW4F# Q.Ah~in#ڭ-H[bN)Y^ʖ"wx ~*y+%X/A1ڿ%.!oĿ%<Y#"i j ndL?eCT]Y#Gv.!JrvJÐE11< gb DNGS{LW<zn٦沨Vd{5q@vEt9f98-[j{?18~2' {x~6{ʄ;k JH/8LQ<_O#Ad?>=Jk2o)q#iʬFxX?A+lMy2 IP :5v+\_ww),IF+Hiĵʙ </ s^mR g.03c=FE㟣" `xK_iƦG-:/|C <Yy5q[, \p+ A !Ok;%J+ Eu+4_^O!7g ݄# zZdXںTj8 Ym2/a;PVm uvx#-nx/!=AA)iVn_ ' v񷲌o*c2<% ^@ǚKO@wc2 9%H=+y2o{a?onɋ'қ]1A"uہ +mv́5a/]Tjez8->L!x>D0YLY=ҦRlቡoVaI4;d4|+Ϟ+x勧Ve4 G <|:R?C|]j?:1mS'a?%8HvMMu1zKN9ǢLXQ\r <@4ʈ%kdD0-.͡hTEzׇͻJ6$ GX!j'&-H_?mO@?ؓv=IkL o"IȤt F_A_T'F[)pyf/&g L6.3݆mm8Mr֫y~틻w󫲦+0/§jھh\QHBAVʺz{ns]_ݲ#,1;4W}u?,d">98qc9MiOd. +4R7Y=ӁAol|nK6_[᫨֍*\[?pGJԎ溒E7N^joL8/!?~5KxIqf[-~$>d#**iTz"[H,EI\8N([HeY+8֣9yK{}pzBLSꡦƮ p.灗TـglqY~/e퉨\{%{HRñkP6NIvdT܁\];L(;y6k RI#! j|l |~#{&#Q!Lg&WL؉&^ZU&M}Vu F*ٵ WNؾMz}˚he ]jv:)̚wd[Ho Yҽ% ܪoyBGOAXv\+ #su#BVrbO5.X`->;hj&5e"z 8+d>ۢz`҄`یd[Nbo%q:mjJZS gmXQ^K`}Yvݛ=ty->FS?4-BޞPH꿡\S7**ەѧlO)xue-wM UDR=Ay\ւgKB6V!ol  ǐlK6AC!5^0(40p/tF*rȄ܁m}B6M/xoښA,Y!p; U]K({:J8΃50mS dZŴEMeMjLL !l!Q?pWSohsul 8RZ,cѻ xd4&99=;B6eMޚvvŚ6Tdmhl}!­ѯ.!$=x4oeNىVڠJ>!!!=H0۝ðsbz`fѮ2 wd-~ l`{C$B3^0 XQ`ZF`}]cw̻Z9sRǷ+$IQg36Hgl]6 = VjcD6h8Kl`{OV3^{ϱ{%s<H2E"nR1Gk&M|k~ZWxBj'-W6e38 qiGJ!A`F" v.{gUY&6Ye0O94VG [):axFwUFٶKHʩ,[>mSYt nA!p?]ۖoUL9ZnPvܲxNAv]=}[;gK j@gɩ4Kg5Ke>c@]wK0IY Y{#&.LOJ;`D5Ũ XRn޳$r w%xn-h{Nhݡ[u6ϔmK_0T̍"C'qބzHO=鯎Zsjk[1ƶvV*Э ki_[")S=Z7O}iw$8M [fMĆ(KKZuvؿotsk?^P ̅cQ>NfBzحWQCWuj>TҡFf 3ܞ*,FuT0i5Sڭ8cwJ95|J-,9>+Zٮ"*{,[:h꧃~I "^w Tr܄n1{Cw9vx`pAi2i4K%#kծVmalU*{q7q̂3~,^¨Hf{ t{疰5eM6kchH=:ouOVwhZq޲{YA`gQftҾ^bP<,N< J6v ̑cBVLZewa(w{  _D 𰐍ZlB:{(`+Q܃qnL)ִ.~!p*Y'z q}Rdg`3m5!┭VX j08Y jYvVKxV{\82jü5_+,Rne8+dCwB=xG$mBXZ̺V MZv=B/ Y 7kDcx_cc_YFv @0N{3M}?bs d"wuo#F]B#n#\\V(Y/==BV> +w<{~S^*o[h r%SH4]"sȄ - qg;z (/[ܲJ+(Cg܉'H$M<' Mzo3c5_+ή Iϸu1awC˙-CDTDŽl4EX>ฐM/g'?[y)Q `NeF? ,"Jht$(e\E;xk濠ZwBd9½ W:K~` NΤ%J[x,]~FA!IT:=Xo@q'ͻͦ؋gܓoN%x4x7դ5͡5(ahLv~s:iNNi6#{ ewBwNb Ae( v!6Q,Iaj5S -+tE"<.RD 4Ve8mLexO b :'OudoWWYZv"y8]ʻ!_oGXO0}x| y ^)PDOotˎƻy_ikµÅ; e-Ï:scx|vd߯GGyܪPqU*jV)c620)*P넥ZrWT#otbGiBiߍ#E퍚sLaPҴt1^^uJnb''gV鍟5lD<szeS|tN.D+r#VkU|&p eп+.: KS aş=p,دs䱶"}Dh{RJ1G{Y\ º4Oš k*" _ߌR>At~?x|.z6cx$нE/t8E׬ ly- 0&<Xi%TB'_+]hh_zp ~(?x3.uI*|Y=bh QB)?}9ϻp> w*c-f&¡r %{[Z ||R޲JD,"KOkIbΔ4bN^Pȴ?k8#L ň\eywBd^BsJiPSVvy.r:L]q*鸛\Z] + |>]?AgB~JDhnƂU*JV<֩,jR 8cK 6'gPkhf5*-z՛bkQIoM}S :i^TrqU'ڃ1ħ@=OJR9X-\xxtyH6l5Ss9wk~z˥-N aw)V "Mp#ߩ8{gw(۞cWqgU]l# YIF\df7'C>JY,\KoCM YN?lȀ#h>EUBVṙ~5Uф˻IASm PF({:pB_= v?y})4jȏ͇A1`,³"e+xbt.t v='J*V&O ⮋g+W&Wc`9O''yq'*y~~muWٲq/{pvY_ך(fbLe0'6U2g9]ͺSo"5]vɦDo/M,aE'N=JX3 9)82*xhKsR붹=6]"RDM<|cbp^:F{# 2Ӧ4" q-[Тc0ڤkL?|^,ߵJh"~TijL|Ru B#-}B] ,lxk3XJc8N5bN2u>2la3Dt0fG}})N| A;1XmquZ :&<Rkq7CSl.@e#SȎD- 3mt 36>5N/Wiyd$.B^nbTlogZg @ քQ Z]RC|>,%ɑCyyI`~^tixf!-H[U&1De x4%׿IYti:R,n%_X.ڙmdrL:$@4c R5NβpduF{!U75UEy.8 tZ,AW<qZqWd͋ޅ|WۼNד/R,.#,HOC>mndY{#*#1c7Rw8Yiܢ/u7LQ&!?o;G / [JݤНFo!lM]u#5C|Fn8 yXyi+lUV٪my> /\/=;L5-ȷM~ۜREݼQ$X6&v^='@%B|]Գ+ݟx ŰCݟ w rgKViawl=Y#X kb|&v))wڬ /MLuolJsDs8Xk=)>GӐ<SfJ=}h0{N7+~ }ERNW h[8__P,:x_h*.SpKNtLRVvTEȆ"DnT9S? y C|463a%7lӰ҇@B$dTn4!uہ!+]n9\">+EyeN>G8|/U?nw#u}9ss <ԕupxRĒ\#uÐ+ 77ӓl%m?iQG&JcAV^a4jhC;Ө / _!vy wb `,Uo^~@!`OIPD9ZtY{)Cƭ#`1҆!Po4ݞo\l|ll.]Rx#cMi1ǝI ihMLp5\$*%juI3/=K3.l+0_7>-d~-F\ϧ; ꈽNߩ]CQu}zP!tDA>Dچ99vKjV%pJ$0T»F%Л۳Z)y=a}>䝐2p}2Hp?d^)*B+K+."*-yxsN;.d%gSݮAg`jB߂V\;TsC|gm໐n<{24O*ǗN\ci~[)I' $_~>-,*Fʮ"`gNAҟP "ێKvs)Ȗ2y VB-u/GIQc]͊3S7U0QkFTk1ö R3G.[Wx%0 ބ7 .Ö lb|Y=|wO.&qL<&n; *H<&mÐb =yYu=AI#glg8@&< &M(O<9dsk[p7ԿC3w./ve&sH>, sp AV:‘l. G hA@-eaǗ(އ?>,*t@xےQsU6Ӟ/#XȥhʯZd vH~`RfR8ʦ(+_<-U0'v) .@8&-ڮZrCU?\7]Ȗ$ܰӅz{p' p,t~.9c{QApA#a/P;iwz!1ׅ)=a622DpxRԶ,C +9-UO=u*NUV^ָQmȷ9=G˫e~mu9j_C["*HdO[z\@C><{'m#̼|_+ۧùG3wV Zue!|K AE5/sfх[g+nsu,l3 BmRRݛR֛mHlsuOfH`-;!^Jq4r3 d-/H~x"%ۚ\me$kKֲAM7-Qok2)dD%<m[ @Djx\"u7!ϸGx~]9 *;#[9J#f{b@j߇%=#!.He?#_4IoxD'Ai^%{.Ճ ZVMUJWr*6o}ٝQפi3{ }MjT-^H#Mc QLճ{ $uSO)[j|c A솋0"FwJi_OxujE26G9bcAJㄜDžYV<̟dH|!kShck~*k\fOaDŽG!뻌a"*`Gk\RmѩHfEz{ +XS5&׀ek mTDyoNޔ HQ` DhFڰtf"u=`TÔU:`HtJ|Gj#S?O$cOZX=  D\4$Pr_[o!.̚/PZ\8SOD^4fukez NW!>٭yi#M/#>Z%r##tNW(^k/ ˖]'-ȷKue%@P+<Z))'B|tR#ס͍DLN~T7G? v5A,kJng!+4ͩ/ ն< {{*m.dm_n%"0&k8ۖRעD;r,C/cQM['Mm~s$e":G}yY\D 6-Ni.d3m Z}%k@aL& +l47&4dp69 <&lnNtn-P/PD >dvÌvU'@{~fL8 ysC%9wKV<ܲhL(֭} .0}} ߂l.q@5;ٞI>|gz&{8Y)a_De!;&%CK2䲾GbG cANEֆ>-1^!C+z.v^tgF:띛; 47{g,._jJcB,s:Wy_ *+ -Дj3oٓ1ȷq +n|gBy2NB\PSv"48k\p:F`ktb!șSuS4ZOsn<,6)H1ì+?QV3Zl'3_SVP .|V ͤRLF <]7&XcRqsiNLoQBy*}M&v+_8C|K+E/xo!r=r!C*8-M=Ø(Ho,Mش˲@E /i[nN9;ң` 6CL'#B'!w<Yi_tM=<ZTژ1Ȩ&[1[ QٚWe Y>H¬cD=q]90PYjz 6J ?L/%#j%[4Tݭd篧W̻(`ca^Qp=:&ߠ]M-#Rd5\R9Z:^[\/Q^ee?NHz^a ;x/^=ڽtR4QstVX>絩יN;EeNAVt -Z|%9Tܲ Bm44/v~8&K1ׅ#=aomNcCKxƈ2d#(% vQطykXѵņt5Q-͏f0Iy؎fQmÅ-(6Zj^nZ.zZ>˖Gc%RcFzE1vˏJF%_jL=j.KrU"@cֽ͆:E_Đ5J^KݵǻG [ hg\hM?XMi/Sj"v^ yJ~ d WPzkVѐ]u Kݐ1Pī!G YR>01*4R _1H@z| [!kgA6fy^T=?4c{5[eeܮBe -۱w3ہpVR[6zXK{b>֒YM;kcaҁ_FO<2ÐMn?' n7db \yG kH*t8YiUohҳD&;ƚ?*oC~ܙV |vb$v˭jswfjA0,olKo"ۧoڬ7>QkT7jŲC֟*œ dA x ZXyδi(Qß-EѬ)ۧ.N6^s^1d$9̯T'!+%,e\xLk/VA#zI^BeiJoAZ!qX\j~,0i+$3l[xIv #y4%XPtFuM>MՓ`fQp\tDVn~rx48el,cW(,C~ ̗gASK!D__)y߂nC8yXz&to E;hJaOr  Ve7S.RC_7ۀ]:\6tskTonEPz45zm>5tgAwDoh4̽WW\ypҎ^4_P8}Z:[>%6G#4s^o@]Dhf:q;b͎>aH.p.{3nn.,6V,!)MAP/lK&)-Jp"<@L{;B3&vߍ:lꊔ>%YqPG ݿI7݅>LŃ{l'f_svP_QpZ CV_i"O]ȭL! pҤD ڤϾN僺 v$< Y)[ :mˏ])+w-y絙_|ySΰ)[Yc\~ KxEmi4 ӐmccAw0AZkki`inP%<ֆQ0Uqy? F7Z#)(Qm:1%C6z z_0BNE<[Bsg>!|Be.q̺>|/{ϕ6x~/8۝x/[C4Hygs"Ʈ{ow{e>TȷC9A~]Er=[-[-9Bf,k< +ܩLyg W*grd/Cp>\SyPrVђSLsW`Ƴ#c9 Fhn g _bx_C`Mh:9njOt8Ζ._8+E̿"&4,ELދC"XqEK5{qKvV5? g Ǡ;B3 1. pUa4ɝyԼxF E\z}y %@4H}N5sJ16rR߁l9߶)i C|1Rn MI)[4~s1-AŘDR?C|r@ΟI uħY+,RcwO96])zcol:7ܱR5ɭ )3hXYȳŹ4鮙8HRJ'R- Y?`;ɢO *۠5M)#.$HϓzWd /èrJ7k=stVvʹu%Km6՞љOA]ӯLY]jg-9 vou,~c[ol^p+p#oyVLNB3T{s^O?#@r7ol߰DS֍蟿LUM/Tc:鯥ώ?g_xg|yᢽ0~h|klUF_%;H_Wq;Pw%>vȱS^Yk;/fF~W7ke\T2ua *4깏xj{_c M->y>Bi4f7Lo6Zz}d)ʧ>v40joT8.R;C| 4 2-ꦥsVɱZM0=6w1 &J$Ɉv[CnA‹jnͰd#,"BnEcj &H2VjWl!pd{T驢-O3sZL/So)R::M#fE٬K)OVn0F9JR+":uSfm`l@߆Tc-O0ž6;E# pu&-ln@knͷ[MULN5 C|5֥CcOVzKRFmƜEC}GJb n6WMvEg)T(PvӐ-DCtQ5&QLء jT(~F"^)Һ 1K4gjn&m' GcO?+wyvpi]A^O8yDԾIDvC9alEf^Mzk#8MBViRyvd#}~YjB _^oBVְ}hNOלϏO^JO3v(Q,[?hfcOIM 5#mU;C9aǎ´ cƲU4v d%/Kn u}wj7Jiа*2-k`rޘqo_}+pߩVc1.~Pq]Gz""+~=*hsHXvK{':M# ^]:'C3;>iƞl%tZp=yIGEU! QT)x)N{h˄=oBV5V¾皬-imȷybs5Zdg=py.&H`8ht5j8e,!İKCq@zC`.KOV{9t2IB'P'FCcݶŭ!6ց6U_.&h<vdxgwMX7̳JѪ,\sWX,[_=y$qtRX[Ma Za\EAY]H}O 4UF D7Umt;6%uaӛCx;TM:py- = / NGM~cϦX~[2cJHvD9Wi ݘS)8E2UccOKּ]!ƏΤ %<]oZ ?_8n`HDSȀC [VS,̈́H8ՙb(ڭU=O|06FH6;?R&oo.My- %ܕJ9[f*F/VBs ̓:᭔Ɯ6ӯ3gwPqy? k~޲E,D(Oh.S~FfO=&Ӭ vm!V%,Z` g,9%;IKKIF lbsz-8^fNX*zY 'pʾ<+;Z`&<NCw>jR-d͗l~VTaB mv ?9I׋e^l4jL▊+Ez`̷b&բeBmGTlT kU8yV\%ue 4BLsT Zx0#܎'ƒg$±Vn&k_L/S{nXHįܶVA&2Ac2֔)lS ..B&qM_K˜{x 7i]kHV'=5ՙ*HڄB#T=7PyútnDN4k7xɱS@β,߷};.nje泒΄J՗2<ԚEaᚬR ^^>+Ei<5;,bBw٢ļGEPo&'fo%j@$QYo=&$u#FvԚn{_??)NT&S=|ȝb q/Ǟi.ȧm"MH?]"?g[EvWdK2Pb |5<]X%>N^jhc lg!Ua0R$O&usM=7G ;pY΅6j׸M!9OY''F'ˇI`LBki$Ôpb'/]8DY/}t@Ð7:A>šM3(u'l׋^+N6K9p^%w1Fȝl\k mVIOU[V1}/ռ_igMzF n7v7IۚX 󩫉RO iN4fQ^(շs$z+%" +ֶԺso!wYHS]5?QGTg?Iln$OK[[d3&t5?PGvBh?u2T"Xn_ao=IA(&RKKKH޲-Eʯ1h_tirnUQ˙,ГjyqSA_ۃ2 +ULKəicQ; Q>BU:E#vwӴ3Në}CUo~%!Nӈ桿IԳflq&?şr-49I(疢T;xfBF/ F)?tX(Erj[XMuS1'`6+Vxz tRlJ,-DsBKΗOf>`4,L:rլ1G@0OdUvJ!B&#tۋxC|V  '!OjO76/jvjsW"'z ӐD׉3Q->[-M# 75>NR"0&4wwE?Y&; R%8Ne^`bKV/+b*|RY&A 1'.eۛw (O0&<6^}t^)1h_\P,,cQ 1ܲnaPƼLT %|{^]͇JُlEi"llNNOƢJhM: Kve? ΟW^Vcˆ֧O$lJw-İK+x:dV]:t-ʟ"S@Vknm~;TqN-޲S! ̔kN 9q&4:/]WA ;ysq5 YtGYZH^&uǁW!_.;tGIOk/B6C|K]zI^9^6Ba~Eİe#ԋQtKDYȽA]1lLX9%,CtM\LVʝXԢzUy Wpxϕ\*>vOI6A[g oAHe^Ü[nrZ>iNctwbHH?OV9{MrFDye3x-N~eX[~VG.)綒 hqϠڄ1,flНHOp{ƨXHBǨ> ) ߧjjbo-]\w$bXmI(4bg +[M{-x`5rHҟH:I{&G I4\5P]m1hbw]ݝʚ,];I,ING(d(o)/tNtǓ?9^VgWWL8W? Dx LL<RFb-+9& (Z? OK#/I#eRϒjEP#P` 7T'e?U%\e\ix.(J=7LشZOP끳>&j_ 졳m%wvN.wvG+po;{?!:v{;e c։Tl:1@E՛*@cK LW~ S1S/TW r%"TFD.miqoM"yE[xl"2wk(O{o)|;~+FGW?͛zпy3ӚzDqb(j-ŧOM}+}oޢB?v[1q&meVlqV]ˋ [,4>7٭7WoX8wп XbDSx,g¿0PݛN~k 7^7tAyvڧۣ19qbYf+owI^2\)ObD)f3/{U`4[JL.UJ] 5)D UnOgΎ .%"ls&R̼o4z%H2y;bB版]R=VNn__iQ:YܒS*(FOC%[S?V#vbSoD5xRq*VЬY 'v1G|we; W}=&AWȠd=uHc}teDi7l5s"GT5tzl/OD|)VjLdK>k!;7^W vJ)f).e:a /‰?PQ"Laܷ?Pзݚ3=c\̩"Z7WB.6jI:uVvu}" 꾒!"mȡt_.ߩaDIī՛N:L3,iNX^.8`qY0ŝ-֮̂XY"Ӵ~3Ioe^S_eD_IӱOc9 %֕QEL$~l ^Zf *LQ",tR|" ;q-/K3 UmjDY?A p_bu)4H_}$>AOK],6,'~ w51v{H`/,ߘ~^[S=:)N=xd m%ט5WZ~G/&w+n*8 ޣ?_R,OSۀ(I/ E'0ߨ;7ƖNg.^F 6}+m\ 9߶Jlt#FXLWH_)"9?+A[%(n-a.6U_OW/!1[(#EYsD)Qmz[SK^V{*e5r5y,=G=XQ qg.6p`P`ь^4qҵ.."pOGtJ%;2+B*PD֠4L<}=Cq%j' HE@ڬ~(#ާiApUJ]sN'NZYz0"94GurhdgR7Љ[<wR1w8eGv)&7 ГU9A3Ge* Pev9Oh6G-Hb2^IO`>DtJ)^(.OGiJ^-9M,Qoq1x.%oU1]BqkZЖdSҬc*s(6wg0(Wޣ4^qĀ%B[iKp:ED@(JLM29Lv޲^0yWS.`u܁̂xGt&iRml$n^fLE 桜;1q^F%&('b1hs6 'vR h  )Hz^Dpȉ !^ĝBvPd.xzgu6bHu ) _>j`|cieKƬ.X~-X0/ wv]y'bb(sDκuwK Fւy6K 0C%]@X< ^xhȎW1 =Ja5KWS]DXZq Dd2a{2ިx@dhA"rJ95hYj&LDR[;^:ժXʃkC9Ѣ@:/B q,s=٭mD+gjRfa>u/|jR|"X'.64oj+f[>fNbd+xI.D: ,8@Y+i؝f_) <t 1A?5`-; },n `w..槣7㜪@[$j ˶tP݀)+6_d o҃F"\O^>9r)8uK1co>cac©tTF>Ff>chw!ȺoH.xRܳ-x9j#-7@4hٕB0O#@S{_zݞH^8ldE_ <{%(n+`N㽒սh=ɣCg>fH$a |xroB%h8^nyYE7`征ch.^H:?8yzų #)q\V@y 5Y_fhVfTdkYÔM:E2 ,e}b⏦T΄Vm1F7z Bۗ}(n+q?e$#]xh_fdN*Y Cg)! זչo}STc']˲YVumG Y7xE(ިQˊS "9**FD4l#gzYR̻Bm-۔-C77sqV_å .$|sH7Pf4[NV"fQ+͢K}x>QF"fXv [+Eo4!\^D* 6=a| 9ssnE/#7 /qBl5U۷Lzn ,"E_TFb;QW{oT#D: [I[s&O7c7$o#o`vJu9U|SV0'먬=,v,?ܝխЭ1Hw*=Ā(6fuvO\~ W<9Fq ?JӶs}aU5 (R)Ye#D2E#V&6IKv,DîT ғ 41y{OҹY'pbDaX?Eľ7owoWiHx u>rx\X5F1&ד0I;dl"2J*I10*aᆴ<Ѱv:~wؤz;0۔|a" #C&Lp}/ޚ $w3ޓS7);dwMK2i? TNXAލ(npRȺsP|1sf}p7Q>Diw/ľs~Csplz8pdW vWNq%7`PVJwǙ7/Gx{,fߏYYx|3/v-z ը bQi ./ao 6أuzAq !hJs4%N U;VfMXi?#Ə^XX0,F"[rrG(L7*ژ=ۨ?†aYE툪VQbd ",[x״{JBʎac,8ѸO[hv+m+GqDϙW~h\p POdϏ{@lSV!/3 ~,ց/S}=\Lr:;"2kej&ck|[y VDZqXR^k*NPN9s^ŏ!?4ċx6nrx˷Y/t4i+5s{q5$P0|g(0BTDn/0f5 HEJ\7~Ny5?A(+ۗ0Jσe(t=BYҧs _B~z ?ɖI^XVN{Gy)?/-u6^xu-@ Eg;Lh]~n;꿅(i ؘVG')+u.5"JOntU:]m"neYZ%diժ?AY{Ъ"'NZȹAhe\86kS)I'mE=r?ħ Dn0Nz>?G@c/X2ާtNn#M }@Yw ESȏozJۡslAT?-j?B~zMw;;KwS>Pn6ΙPYA%OBY5 Rު# (*}8ByĜev2ċM^[/Q?~Yx~Oqc4UKu7/&K@e'e'!8Red=`'tݑ٭tȄ 2S#| iyjIʟMR~T jdf^W&c~qI0~r|w ؅,i{JG8M =Tj +8B~z࿗J{eOM{$ fc>ʛWߜf65JAr{(+=h(z f>jf{d/t>3p+emLGJfD;{dՒ{ uNKYj>*6e})JؿJI(yy~yΜF2e)-N[%iP"IRVr^EaIgdްɨތ!9c0g"Wz)C?!uw(6Zhwk?;0^HEَi7>eRrJqj";X5B/tV5SD^S3] 4EGYi͘;2\(2>4t>4)SPݛFKFyֹBGF;co\="Z9,r3YqJazSORg`WwS˼K+|G<+BU_z5R#"(Q 鹦!jꪦozUSu̇tW'i\HTh8~ީXhZbVaUྟxr Oa?Kٮ*zO FQfL߲ v{C(6*8_y>᫴WSR6egSO?ULWw_9*Wc44,=*YwJsv/"e.JsTd%j[}-FSOϕSpWzF@)@1OԷ\ڨ튩Y5Ǯ')OXvoBY[e ]0C61_'BL|lNkN:v3RTwi&n?(e(ö?,;AJ8Fy$;M?e$s,ZaY+={[GKyoQ GY-axҋ(ߟB~ ~3ڡED~z]}?B~zM/To ʺ.3}%ȏfܱ麅5<:ũ5Hxh#/D+'הwx찬D<\ML*w7/ґഏxҋ❽mca樬8kx3v5{5 $q,stwNMU0(>T訋ěoƉ(VGs,([z,dwv5pe|U [J~x+ JLuGi;Ô[o[Y 6/xs<3/QfݺWs =6lO8 f,I|b+g©jwEkzVk;㢸=:E\W#Q7=k,1gLVw>/Z/#xfoRi`@a LM`wxRАu~St6~ot]09B~4.#JRȏ&3-R'{b( ˞l5Fb!1x)oܑhFߏR{ HY-<#%*s d'3E2ot^1 㔕?B~4iL .J!9ld*vpp\>v!6یU0qY#o#h 7rZA@:iS* N] .ozӔcRڠm Ϩ,gbܧ{[ROϛ埳)fe7P q 3oL!?'-P I̶P>`lb۴t0enRJOS>6̻(vyoLaw|+LrrmĽt70G1Ǻ>㢴<Ѣli+dƭ;¬G+~4֥\:NF9"siLN#ܜ^#ަonX6ulйC|}vsaE>N(~c 3nLBc%'8FCã8DNG(ģdҥ#\zc1cd &W(+}&[7NJvlMO-\#ޣdkN"YZmR7 |NGe%.}tXَ@kSȏ&zUؾ𝊋Ix ϷDwH1Qvܲl%;xF6;Vߌ D>Ef/Zv5hƷNEV}4Brܶ+.jHr;lEΦ]?Ix}7i],3m}N(;? ,KQb#;l:eY1,FNd(3:?ZȞ}?H +^ <ˉ-LEoCeDFEVU%NS0"nj9HPώ' wT3s7p4"y&ez Q \H z ΅fHm-կfM1;8,%ՠwxfi ԮRJ&ާl? >̙U)֖~=,cr Tv{x m#k˲t>d S9\'4+P@BI҆gjzaV'W)_MJdG4x\#ަhgY7ۍHs'vpC5\8ZdBvGYPe՝?*vhju;~h>#I\7|\4AD5X9dSf-8EY*F`UqAKY|߲ a۸ͤ#>nu15¯%U޷NmhZV`Ty}V.+hH?-i/Pc{ W ,e%OA;H5^ x`eUk7gj3DF+}gfVhWhؕSAo)YS֗zjYđd&U7 bu)"MDG=g&pgBvu>fkh+snh nchk4ɓ$ L&$H?pؤbLxB-O&C@nCf4j_iuUYe  > PjT4ya͊t ~"~R-̗^%77"/O@B;*Zbb: MD0G8Fc}0Βѐ σW4c㾭Wʈ]JhR;~ͮ/^hK},pCǨoߢ66;u/Мl|ڷ> OyU֜&i p3N` ҳilg[(MnS;3K׃) ;zAܿuBˮ]y{y32o>V`לRu){ӏ*,4A1sOaeI<ș<8iv#vARI}H@ _kId8KAf ^g cnxH+UV~6Vl Tĵ4 i-ҴiL>/t3&kqPX/ϴkgs5kobEFwT'w>yoZ٭DzMo2k92f'PV3Us5:xP/,#[qU`[+%y=VHGEksHQX~, YF "`MMY~X&_<xA"pǑpd G>YنgJv,6Y?2cth۷3y̲=fe( #{ n-`m2ZOWB%] W@Hfu1ڬ@.hŨH\ȿ4yҚonuh)? _zp!'2D|nt5X -VV%؎638Hbߺ;Dn9YEfotlooF M9i=7Bd4*s#$%Wr4cjМcT,RFx@~NH8@"Mf6_h2TRBQK&@sQSODCS7h#~fYZU2z.5ŃnE$.:/܋L,d}YyĸYvVU(,`~St_͹~eϐ&+Lw.I=Fd:Q-MItX襶Ր? +Ou%͹up//?\aFO)2X`+sO2wVN]Y  <[?޵E=;hj4ϓ5pb}ý+5qK7jdgH+%n 9&;鋬&kqSQ-ڬ`y-RnI xb;cNџFy_"q$Oj(.˨xijLƢXq{)+3q[&%w;-#댎Dy_O_C>*|C.NTM4Z\rpa]hFGJAxByKbD~hЏzE(sϪǯegج3^ߥd7qaWۍre`[5ZInE8uǹ*7NIkt5jهJO':_u%c$4g %qUw ;N G+}jخVl'W>GL?'h.8RzzLwF#c$/~PG'ǯEXI2ӑZ^~hK[N4,'HC+S^"0-$,dkY -▾2~]aκu_\  Qukv軯d-FD}MD Io%Q 3#xi39A;B!Jњ(5p{?~;iv0 O\lT"l ~oy6"Z/jY5D%igN(3] ]UaBxu6"3{؀WFX(mAuƣeYNSJWOB<:S!]$㦏[O [?j][/pRtМ 9'w *_ qQW,egbgs?KLZG;tEj^:>+cZ};QdJ6 ?}K/0+]K!D}z2*U;wtWҮ^N/F9L}zW}mX3 c9:{|gEE~/ګLT P Y'cv.ev)!y%Mc/K[ wNwY -Vv5NF0nkhhti\^lPyA z;8gy^;kG˥9#K?"AA缵OK |֣oNuQ>?c]]vTܩÖw&kB'O>\$D=^:QVrwne)nmMJ@sw!vEXŜjW-IᢿVo _A0/)R\HfB*gU%烦]}ZE-(7'I|R[ˉO!p2>?٢?) bNH gBV X^k>[O,u<_"_2-wp7pD{)?/a 61)'/e ݼ J$OQ/5_@s5.a (ؿDZobW}_3iY/U.rgMmޡhk}o`w?v-u[@`kր T̉'L>bG%c(7zyW[0u,Tn;b9W['~q:H<Վk9rF[昶)ZKqofυ#٩Vi)Կ4%qЪ%/=y~B:C' sj}"6h9O=sE?Ι|ky-hJ2/d\K&kiJĕ[Wx,Qg[Sk,dhŲg9g  5,a1^Q X6U8mD&\zUz9X6A2+zSULQ`ʯܝmgz^5vړ{{- "Չ 'ƯL~jb_Y&;O>y<}ɛ\G<)ؼr?(oѲ6t^ ]s2 Lk͈5+;O]°\\r5:$zW3г7;s(E.#P Ck]oۙ#y:XgAӯOw|dveS K]VׁYZ,I}U֯ +U:;TW\Ɩol^u-O*9Nt%N.}4'uRo.T->>Tlbcm/(/gbe($i&#EK.Ke%ވ܍vXOy Z,}[Y%괎xQ:*P!֙vTYnU|TIPӮ) 1k9ׁ&ڙ~=Rv)zjY|tgt#W{ ]ޟvZVCK͙Q&|4iXK.J|Q4m@,-m"[Ko0Ư/ExlGnݱl9/Ы'VJ,Lu oZLzVh.^e[LWm&ZP:b?]d(q>g=tD.quML\8Iz,(.ލc?x6r0r\d`{(+Ef*]df)pc 1rR Mҫ=swUKE]Ru}ղ6Ptbt 2ft}NVqp_VE]~ҕ裋,ρ@4"[E6)"D@,-m"҆!ݺ%QjJ8|*=w5q= _~ 72ێ&x4 #hBLjV,+*GG | 춣 'IJCVHѺ2yaʅңdIJ$^*x+v(Oy&Fլ[{||i+֜mޞRmUGoGFxYdZNf);c 1rRM=AZ̤:}JWO}4vy @աMR5M-]pQJ_hΕ*7qHuDjO,H;bQ>XAC_uEC1M\e$7Gbg+5G)VP9b1QwܨK_f ܋7JWZ ^(=Aq&s3}ٳ3~j`e6W.<~^_\r'&2~F1c*bmδVV4*+++{bROXR4qbQ7NX˰:^6msSi :ƄJTOYҕ+-Eg;fSլ*_̥_zֱU<_t+a-} gwߩ;9Ę|[p+av%K{,NC{[CŊ[:dG c1z1Y O€g ZtmL8jr/6nܒpdk?WZcIr.<pљ8ebyX`f.F+WV&ce=~{pAUL#r˦s:u+w޾7pNjn׭Dwe"^NrOub6Iъ!`x!n{ъsk93JMEEm^Qh Ѥ޲ : yS k\RQ{(1Q6e/pѢli͖E6 d#"e&C~$F5s)P ]w{={ikHQv'>PFzi }|H! '@IEIo%Rߵ%|몋ăjk̀UPЕCÔו S(0@`LYE(#QV٥q򸶶@8v٩-ϯ8F Zć5/n| ,I^Cq IyrУ53O!?]֡!4ϵ+($||Ȏm9B~z/Lj(_ mxm- d*N^tJ:^64~|J!?]V|PnXhڵ tkvH&^|I=zXs_kVk*[w*V՛Ms/j5l]vPS(o0F|./aa)^Rɩzb:'WU{>U"\ FYiڹ,_6+S݁BUS3\Z֜P1WzEy"=[q;ڳ.Hwgͫj`Y3^𞋆p(L]Q>6m5l|&$xym'e:?$/GIx9g2';q@)ӂa[0v~K uI )3 Yr5nIA>'8mn\#ѭWlG[ݹEdb[H_ޜl-ʷ{&+d=sb8 ^]O^HO2sU,;&=Aޡ|Ėʲ!naٴL驴 [56s}ÔN6U'_#hP T% NpdE-֚%-'Lûb }Kp$ ")ÇVËvzaW)U9E;Ū1R T«!Ul&n4F[GAY^U[bhZ@c' ee6 suJ*sxUHY?q8X+)(qpCa( o@F7SO-3 E%Nanٞ,*34(!Zhxhl7|'pa죱6i":L|ڔ7-!| Tu`LoAKyo m&2,Po RlXwpQj(av);dkOn_zo: V._C2}nu#cʏy;r~Xw|/9,nͭ>~(x$VE+?4EA&yuJrFZX[G[DvA}Tg%!֚~6 uxR̙֠eemq$C>h:AZ;x;է%]uU;@fvL4gwhe88ǫH&@u?e}߬*ju1dU~wQV?=e[HzTժ# fcnj`zծDBY)c|WRU۬BWq2"kl Wj^4s9sҮ(0n#Tm]QVԱiW+]*$>7=U2?."/R/˞m?xRNRkޙSMBe웞G)맩 mZs**T*}x'^4jK2_\BOZaV,x~N#Vayڢ59>q9$q0;OBD,$n`A{wOem)09+ Ys;gj,J˔>"W9.ueԮCxh\۝#Toyjt*UFNQ0R98pp>t6wzvp4IUmʱ"L 8Cy/t t݈2e"r;!9w&܊w4td}_^G>_zR>5'NGJKG Lj3{/C^j|jnFhzv #q:m%'Q-U){j8SV pY#&Am[f*{'m\4g}͚2 N/S6P4w,t8w,gJŽg!Kzb);@Sȏozf^u(8M"?n?B~zM[ ADQtKZh& KdBkǯa?΋Ylf،3efY˟j/1@m|3Y I]@42("^>lI! \%ޥ&fʟbtպ!\+1]x 剻(+mT wSVs/Qc량5bMn)iI iHPoߦKM)U*jY*[kPPVʛ&7].C_HSq~!=8NT 8` $8K\l&ȏfWR*Q_[ O&-ֳ -/05-AfO'}')/oOX\76th;hTihn,jlel. Sk,d;H8?I:B>9b}Htb4fl_ΓG׬0lƢo<~lG^|]Վu-/F'guo6Dh;/fw3Fsϼf&Пj$u/>VYP+ӁY <76|vd K]2ڗHaU9Rg16߯P Xg^W0]z72~J΀/]CKQPh_wH h7˞8uCvv9*vh# P卆N ߬<$g(heC.iPV3:F/̸^+|ڕK},s}TO-H Hk&reє%X)'FBR[Ү@6zV)G ^_8@ICKo]#}M+B'֬څ/NX,-SQz*5yA۔oѓ9V: ϸ %)4'+yw]LEbH.wh8a)T b\9m~Ifjw9'i5״+0I^ԸaYh5 $ ĵZq}j,5ƞD*O= .\t&_X4v [J/݆_zZ(q̚H$9!(h͹!K4y_n5F~EϦ)m@R~QkӇ$چ|S0%Rۋ5'\*A71_\9VHܪW>5W/{>ѧɧÂ=b¶[F[bvNJ:Ѧ1Xֆ^ ߝ¶A_65|V{;Wpg#qe%Xgvյ;m~R7l;[~ R֎Åq`*62옵쌵{1(~O 117::~cnݵ^YǞquA;ShxuᡟJǬggpI.9D4;\6WGs~j/ds~vl`_lu@H첟GJlL}z7mdO4q2/ ^ɓRǽgO+yDf{eӜJmlbWlDĮ_&2ߕB&&߮ufvwKn̮HllʂCdIpe5#HgPcH.{9Vv\oĭ5W#y+"AFYJlcwl#$fc[ ^u ߗB~4iL:S$^k╛5epǏщ[ 8Iy-ϧMNŚg}Qy(}S;Sȏ&]IS!cD)V;V-)*!(Dr^K'fКRz ch=oǷl1.LWSHʷ&FCap̨+HF f1Cqc(yW E\kPc? ݰکy%GhB)4RV9f.m*g`:%&u %34{V.CD&6ɜ %7HV Ej XNM|*̡%hc =Jj>+ʉRުB+ kYs&7~ŶsFkί^;Ȁ|4Yo\4JNQA;eW \rř˅EBentZzkf`w {9&:iey5 lyA.lI!?2`Y<.).g(Ϊ߫"`]vCAl71q̟Vn?C! 3Jdvv.6+ŝ!l .%RȏڻS(vxs:z>\Y`&qlE:f ǝw/jӉūM?".;x~L7'[kTC# 0zd,&ݬb})?p_Y4lөO ŵE+;Dz$5:2@[^Z>nUbͪKNs)eۥ:Vh HҚ"JVxS ~27gߗui?$)Aq;()uQ =CY ) 읊++.^%ȏf/mv+&gh>SKfp GٜW5sIS(tAuj&9NAJW EU~$v{/ *I:%R‡oP~Cc(VݙCcͺuU+l3 F{75h%"thQ^YUJe L${]Bx41"N}{V8W<7]Cuʯaʾϲ=\81 >.(~m pպp۝ījw9/hH2cC/lHt-}ΫƨX).}O. 3IuQ DNV'ܶtCDr/M,P?&otUC-SѲUYU-ʷٍCdDS>/@P]Kndu 8NyܘVˆzek^kS/7ANY?Ԗba!L(:JEcW}Ǯ,Z\4G3(=>jLa9J "v_QxufJ\&ޡ ]>n/tM6bm-m)Z jĽQm&/nR~՗ O֝kV-*xp5Vx {*D6w2E@3u]<-7 S9lo,!iʧ Lݎ6)ߵUxMmvb]_ 0}U-˗ =MYImį$xƈ/O- 9ݤ;7iO;G)5)7d 'I#ݟQ1Yjl?R"x#ޡk#Ṥhx5g-'e,>VGJ' w'<)]H"ZO(n+8ڽtʲ+aN!x s(Q폳>y;/ht P@% %cs{-ٍ%(90b(~#SO7_AF`-+0hOt4Cnе]s"}q| S]ٮF)/wI`ڞsSc ®bSs LQG`VfUv%UȺLfIf6.7|uYݔw~2A{Rh2dRf7oU(PLy{_Hn\*sRVr)s ,QVZŏ+s \2JGmQ~mύʖAp[8ݚt[hiC$2 zx~eEߧwSVۄ сqI'(X(n4e}Kqc|UJ ,Kww,xӰC,^6+Ncpv{%UڻlMSw(do,!^QJ .y[GNQ qm]ɳlc."lK-6#|7]sxҜ VT}G%pION wxbfh"LK9)3l%˩@CYߕ>e\ sˮH,`\{)Gb*5n+q7bN.Ym5X~ذKr4H"|+J?;f_+LEz;/cM8Fy̘NNݮ~Wt.oPaP2nfqM7PoMw,:;Uw, ;*tUA0#ETO!+8mv,^>H*Ֆڎ]}Ov.cu.Eq;eK@8eC(i!&`H?by* {QDo+;Nj2;D݄L/Zj2$eʎw떮 (:G@P)ED~-u8CyFҭx\-fH .[noQ]!+4zQ2[SOZVH^J{);"ppx~\ Ԍous JԳ"W#x ׳MMh.vT]KY>d5oQMuZJf Y`-[uϯU7\wZ*7phod &Mn|+]CƃGq[)GqG(V+nkKp:JoynQzÙqG{(5^0_LĮt(w 9Fm+ (-HP50d7}[!luިz[m0:F<ܕ_n_ԧdwSV W{pm 2(T 2 a]2뛍: ܎OS>ݛAe*f2lCx4tj߼xRЯܰN`rA[_Vw>esa+#=#)kr0;GcC d xInmăo"m! 斚lj7^eE\_V?>-:K{r.A~4[e=v SfUOY;]n=viZKnFD(GMO .]:ߟWlVEih=pbWx*ޖ=u,TeJp(+y?5N"`)ӹgNQ!b֢d&'.\UlP#FZJ(fYO>:ۘb[-Ní85l5Ws+(*N +& e>IF׉S ^ ܞ>{iӑvvp1V'h/Z]d R)`Y/Ҷ9CKyo㇨?8SO˪ Hg(tdD7X}Gg)ݵYe>Iʓc֫V2=G11H輒=ۣ)GUXgZ#׬Mly蚵XŒϏuH+ZQ`&Ũ>8~ڻ//s)cڡ[Л)%iܿn ;蝷^~E)m@RgƟM+oۆ|S0%Rۋ":6iMwL.׏ھ!96 qUziiMC(vg ۖ I~XXsƾ!Z]d"%Z`(U P'ܡF;.ύ픕ZcUdv';T9<4;T+<^*7koֲ?(}ߠ)PloY/:v:6FFuusPc|d'"=8⻾g=U#Agh ۯ~Ro6?6o6?6off[kC]1ٳ]TBk8ptT0쪇n͡(ހrůrG^ʧV<֗i ~4bM&|DaVNы:'(+&wXo:&R'o ,M FH?F^FGҨœ8}~z1ܺ;tZ-E_|ؐ0,NԤGLSVZ`F{}ZzP)ޗA:Ǡڗ[ 17ƭO2ʞGUkVCW<J'qJj_/bk5xlN!?f Xe:@ed? >X<8AwPof ?Q@ 5l5G?*̡Cߍ)GC6< 'nԙB wRVZX;Y}$5DsI^-N IPQJ ")>B#.cRgtn3Ϻ73y-SS299>&2S@|G2]ϐ&~EW(Fz1;W.gMh^ _E^ բKeUҥOZGHN _ "ǭCEўgr1$Ugމ )'-%➺aPIF:s/怜{ӏޕi-{oh ЮxõhiXibeh Y!?1'sY>9k[Wr,gRҥv5TVוL\WDۊ{EZ]T VBj%9Ls-EVFj/z0ѐ~| FQ/`G}z& 㳵vЯ껇d4\a`Mr   C[n =w|6k:Sf-k1}E #؏4\ESz5D5'#%IIMz E.;uQrצ^r#J+a߆C8W7H7nzE9fqN>'o蛺;Y3;dYoa ?B~zM qХ$ #M=FwV%Y#*Sȏe[F"]zBn#ږ3Ȱ@>cȧ@s!PO~5$L=eh`(m]fp!6wS8/HZ@x>eQ1A9hhj?C}IqɳJIq@eQqRh5ƽ-0V) U&P|#x~8N+s/^=dsxu͑shNQFv )Gw0Ko><Ss X$(?JSY[:1V,DY|IU"qLogx = gAww) ?2ܰoy{q? ܞ3a:#DeP]2ΩQ>qǻ(nqv F{píWHzHۻwx L&#_c5}q4i͚4 ǠK?LrWŚz~F7(ŅSȏ&-F * ޝ5 "G5JGYQ}z4N@ÌfF@8'NQ@xRg<mu_P:!>ԚpeY~x^ͷ( 0eȎ$a96]'CmI8dLäL]٬+rCaR˙x0|{lfkn,e5gUޞ/E&(M_#NS7]v&*ժS3C|J6q Ϙ n]V*2L%,4VjaX8Pq!K($q BӛNS>_H!?}X! r˲6QVȍzIs\px%1s}Jߜ$(`CqGc簣vJŅoWSΘГ6N|Lq_\ a@AC!'.~^& O)EIhN#}Gmع܂"N/Qԛ>:FLvmIvBIf_f76f;$(nq'坽7Qc!8d)PJ(~O Ѥ!B<<2rm"/OIYIY=m98p+/ؾ]F F;:,e7en_g3?PL졻ŭ#LB^aFUP(&3"qxo4q~&2"PIk3a(?WrY}y?eCt\}B5:"< xR(9y_(+E8(~$(vO. ?! =K,RBA)GLPڕch2aXm#t)7XPFgo\cB4~9M<֛~ǵeG8^#;qy¤Bgw(ғB~4i&|DDҕnj<?o.೼65˫GnqwZ+[oR] /tug3mSG(7!,J?%VB7jUxrW NQ_#cK"6e{ɻDxŊ7kΆ o^RxbH_![8@y@y2 x5~U_3ll V|`gDsEk,"l&K═ |ke y!e7z8 'S vzрhxʬi5<^)u4.ԓ98f# 2y^nZ c70IppќV^ݛVv4 b<ʳ6zptiZMd*R$F4d!)~巴{@< Ws.*+v+)Ӳf!XSBwxi:a:R 7"RسU7fs(!>QCƒ,uʯ~EH!?=C9\ݛCUV`'dK'h1;B4VtkXGLYwN` Fͥ@lqeA(n3q7e%EQ:%SGDlU_/G);F| .W(_MGW)_՟cP5kբi;Zx]CfZErx~$+;=Y q 4`k[GFyvO5 A{Z*e*(d mWy FZNQ 1 m-uk٨kXUZs׳e'^vGw5vN3.PܒS*yn~y>.R<GU h3,7(e+zRfsu; 9uoӻ 󔕢&^W|'NHI@"6e}&#^$}3`3bS_0 f `ly` ͱl_ ]_oh_b XpK5< Q 'dHry+!vFS5}{۲] )+bvh u~=?2^TDz+b$ xoD({)-|~+[[:NGP7wg y:NQ|'^(v {u%s'H<5#IA۔r&A{RLj$y;B/e/KR5N6J{N>K)e;do7YxQ2R~m?K7\uвe] 4ij òx ݚc5hZUI$ʓ뮑3غVmax>|))e[l/;{[W;uraѱbYu|iH#+7[~/ xC~ZGQp8Mǡ)4s'Nk.W/j _xw)#essO8CVp hGЎCȒbD(%(8 ӚT|0q (0DeF?O^Aٮ:6#ާ}۵d(o0|C*־5fy[7mk]G3-'p|S_/6Q֟7w`|;N#;[01YT{x6v͝[qrU'+B!NSV 1,vLgL;|`gEDsoa΢<NCÔF̃A( FcpL _òu(`Q*̏F:hw1RFS8qmf*/SUc|N Ś-?SDgG7`8J[X18e}Qu~*ڬ1"=:91QQGë,)br(qvj i$Aߒuxxp9ƥ'Lx$gmy˨ _\(4*vFm *G6P9 ^|(qЌdGĩG *O2kTFxQH@+Ҵ *CĝwvP!I+^ͭۑN]HFYe1xM"NMq_#9ֆ'e9K<ŝ$NP?[0+$"es'M txҺ&3(I?܇Pum8X-=?=#5EFn0E'H; ^9?k,.e9 eSgF-잟 /|e*#I۟opr6u戵y?&ڻw(ov͘EB`]Kx=!`uXO#C˟f2gW(aթ)d.7) JCY4 H?+Q֟oaT&J(H6Z4mT\"09e*Xޢ|K/DF@e蕬)< J;(*p9 P$mRKkR85G曗zJvTu=~Fq'bAGK>Ey ߁k5'U`^3RqS?@ ̤1 $qmt^5^z{+v)jw@\"#^|7=3BLYU#>n<\!> V睺 "@!;]~@0kdt)a5Uo7kx%w-KKF'QV2F8CNY-2SiJ!?j7`ޠ|CB~4iD.tf-5;cdݔ׎9V`bIEŧǁհ@} 3gX'ٳ96 j;sV]VϾ(nJ1KK9 Q|>Y e0)R6ysn&S>)!/G)쐗@DW"^Q+ī됲WU[Jˊ|\#ަg6;)f#A`n/lS. }vfyٸȇ v0#ޤ/P4hfLK4ĭvCq(/-Xz0"i0`l-C{EPx g/@ lL)Ex##'|T w/- ~Lj(y*n ɕp|g[SOӳT2P*/SVT);MBY޲n{!dw sxRxA?L!?=ߝ9`vD jyVJ{8AiUZ-1('҆ݟnS˳}^M5FNp|dF y\1$\e ; SVQ>i*&([VPi/ݛVF:P`'O?rr)0=в CĻ]f U{ $g m n=[XlUa%.r/Q=(+_ E)!+ f-;`?_2,Ot(~X #<_VSbx8Q+}ŝ%NQV j^On r!c̎6CN _IF#nmYLfWٚR9cddeBTǥ]')_X-TUY_#!>JEӲ#DQ6ifThQ;0<ЭsiȪX'>?/}&)+-EBoīàkDpBۿ4)NCC{Ô#at]p'i jH!ަ~7~t:t[wT2MGfbZp0Yqf[FߥR6>Ȋ6bQV:3Ps#36 my,*SޮlAk֌UユN5Ѹ܍*==nUᕬ70XS )ZaRXQҁݻ0lAp+!c X߸x"׳e OLe,Onk`Yiʧ{3)`ֵU 'nw 2o}dw*%MaaF+S~ſB~4ia**K?\`rx˒xJXVI!OI.P&jO7-*tA J6Rh"zjD/PЛ:IHY?/n[o4Ye%0%z_c=-:P'8tBbmM'w;~'dK17̫ں- ;hE5d+Q')AU$8ޚǣt "D 绯D<7QV*$7X)뇕H(~4t{vxd%j{e5(~,|v9NݛWY*{ql=N$x"w6Q[Wvc~oMai$d3ߢ9 [qa&MEĎCy.풺>R'.^,ofCMf%ejO{Eupfbӱ]:v5F[?*RTzyd Q&QHކW[_ch'Կvxc?F%dY;?F%YÌM4f"Jb&˺*d-*Z=f'G>œ /.FjqB&EE$B)Lku-Yn{nQ ڬ^vJ,:c6Oam|> ftsf Q%zӐ2+`kg,~=1bHމY`!NB׌kcU1qCt;5[Df8Y\sK׫)Zo' OvF'ճ3 dSr³3R?@|mO5sl|˗$3}Hnf`h2y8Wme">| r+L~d䟡 @u1xIpY})#& 'HY` ;D`8yH=2٦h9^3 d 9*5]^aM o A!aΌe) nC?yԩ@nW5چ.qɨi~p$dUUtCa4L12oc9sm<'#qN0A^L8y@Y~jVeu:!~,dM_8 oBVv|EgeoIoQK&z93 E%73kE-6KN-!/&'!Rm,prRŒI{!˖Mp2EY@awM:q+<--jC&jyYYm6C=[8y 6tmջ%C '"Ke/E0pH{% 0#Y6zY/OCnÝ(nwFDYHX6Uc2!Y}deZf,fMn+dM '8 + ͬ曚 ME\*d\q$kW.#YgfzO=ƲK!\F~{0 Vցwms|[5Nڴͽ_r.H֘4u-'Ek7Z$k86cKs0ӺdɄEa Wz0 pmRRݺ0f ^o|].z cn0Rb>=( X'"Ya=q¡Z HJS'+j?{a?`?R{bf-k[^Ytˢ[JMXv8brfNbXH<?YT6m$O0li[",J&zFr'{zzߔ]>IΝ!Է3#)dGFub(4O8m8i]GgXvQ[NakbU>d sI[&֪UʐNDMDUJ^t&΀p@.KLNGKN3 RwqlA x,tТ]Hn@5D2^"YpqӦ:Odi8ZtHyd$kH5t=hQ$kȚcS؋b*&.>l,;{Cehj>{D͆D3\BfCںL({%AH&50gEqx>(Wwx!5!*_kOKJ/Y$%R rIMȬ9r^݃w8qu>%%4J$Knxiۖ_f8 |r)maIδdv9Hd.vG- 9w"Yqv(2s@jvߒͳ&zv6-꺁#YCڒ>ʞFmɒ v/sމd4_vNE\ qӸ d k\"q8c̥XII|Qk=c,Wk=Yy*:cq]F^JpWrwg㑬1PjW"ND Pp᠐|݇P,mDh$jU\[7X$KO4. 1Y 0ἾJ4v"N{,DW[H8܏ܯ3܈S?D$˽!^`-H]7P߲ t q}-)gljiHD=GaGjk]%ˎddddtvy4iY53rR'ѕ@Ywg"Yn,K#,ͤ|Ʉfr|z!q^&.G&ljf$˝ Jc@;>`.K>d)zkRI캑H5&_#L9G"H1h@Bzm\&u@} \7 JaI` H4ߦveFNGZx#wщg,{aIkkoou`|E-oDVagER˃M[vH nWL6C9 [ jth޻٢le8셹a^4\ŦZ/`-ΜBHqCzzzqV^VCM?Zy3Ӯ"? Q EFK#i^dQ60hAΣ^ ^'XȊXn8T,k2mӳRbql Ri+F8&Q{|rfwqQװ%Gcfx!5*ȴEH&>]c|oD֤ˮ]IQ{x35\+;#cwFŨrA@{IeөEm3}z(nƫX5g]ہʧ{V\+Z濃TiXZN'/_>z~J {Fߎ:] yG?>i|+fw ,o2ZOur|>ւO Ytա濜xv;(@5)Fә,9gy螺K]\sKn=hytύ/ :UcM l?8~lo/8Jȗ3VOJܴŶڵ?as ~ZZ꒵b~oơ{eqs?ozYhXJ+i\Cϱh R?.g?kf;qb,X;`\SSHm4gt2N9Ey`15Pn,\NE 6w[{ j ;4j-.X5bV2[5}\_Addw w;rе2E=L$k8mKq8.牯d$]h.m㑬 d{ wR<2? <NUՖe㓣Y B'|y] ^Hz-e}*l0Ѱ"O[pkx#,)Hq45$r<9YJ ]nD7ںOl/%X_au?m,]wBi_KQ6FXF.%"kJvN߾@*})~lLvyݸ?=elխ4,5.gO,.|jwYaVLɥd!.m~jo)37QE=#?B6SonPUSCk/71ke_Af81 tv2Q9ZXSn퐺ˑa0Y\- _,Ddv](j64n #EY w>v$$uHֱG!zFE\NZ i82r_MT؟M(s qK6_>tR='-mb)VVM÷M#7'U?W@uוYAj^%_STkȝ*]fXCB\p_bt9arC Gx wΰc3PaVʧfH7 zZ]g:$?RYFZ̳rF͜ZV%/řRLr+3#%*DDO++Ū,4my # O8Oj!{唲FW85<ײ؉1=)A#·)䕶k%=8q'D{T59c MѫѳqƯcLtG({%x<ʓl'ό7$T;֤S1IsaY7ߊd.1DN.wk=Oě8^G%~6hش7o-&sk4:V G  QEa3`NDÁ!b V~l̇aeSHr ew1?%Yi87gJyq}>FQVvԻᄝ>0='u3z:Zh1dj74jIbd~W!yF\z5;3\/of?+%*!ޕ#̡jm?`-{"Ƽ%*4.pG)es칃I|_p-} !r׋/4S|H&(a4DBGݎX筁Ur7Zi"w0M٫'N_x*^&!Mցf^ 7۫lváQ0Alئi~EVӼVм n(;ȇnE5EEH5(C 2ñQ7@Z>mۋo۴|"D!bṘҳB3K3a=yvspn53<FIӴKL>F9ʤagSL٦Y -0<+XIt/D0K( qRĐh#D b*3i¤mqfg٧Oޓ [~!Ē0ģO)Z,Z][vޚVr3PnskWIOpZB!~H|'Ű 녳Btt\RV!2P B,ZhxW:cYutך ܧcX٠g>iw&+0ܱ>_@eb,[|S6?òsyNkr^W޺Yyr%&]He!ĪI-eQQqIx<(玛̶}ND ([?ms5g AL6pk433W{c퍼D8b>D1j;׬9cEböP5ަ[Dža>9;a FG C:2A/ẀV>2ކq%X( Rj{̷lvQ6Tڅ)Mqרʵ;uv&Cx$ 5Ɓƛ0JѼ؏ore:TQ쌌olFvlo9#{ɵqYPP ok؋oA͕蝑PrO{j8CtzSb\t<^jC<'TJ_OZMۼOBN<WEֳUj~KD~n}tnwwt'N٭f'jfP\5'N˲QbE_Ϳwy&ڬFC [iG6w/{fS7ijgOFpdr10kkQ465պo)4*I iv] CO&9ZG| -ԍVA%._l yB'ʮb9JE| 7Wh/#ƋvҬAuz"g iCEB>!OSˮg*mlxX&lr_]C߅>}hݮɧ~6KHrZ ħ]./{<~eJ@⻺-ohBLoWC ħ->P=Qe:.&Po|pf)hӐffs BwԟM >MsG!!Q}bbK v8qڷml|>̊U'*o~tP|M&,Y&+R qZ.xR|GK4(p ƮXV {|g]}nIZeVΑb3j<#HӟiԆLtN@܏_ڜYfg~\)z~ Q"W留N1V@g~9.<{W4fT%LOt؍8< Yn>cQ\l[8AZ6 $]^o95@!ug K:qurɟ F̩WMn@kF_M[J4cd0xu;ltEݡYڶD2+?q.D>7M/$5R4*&!'I5%^>w SB:ҿmD%`rAcNq& pF)΄ ʖ#10<:kE<A-bX1!_ɳb2I&cJR֫ai/'fB7݈JM7Rdm회fNe6ݶ)2fr PCSVL%5$qٞdPT롥=yudd?I}WF⸷bmPeiH >jysԢQ"ZϵHqSXDcY=*cdKv$oF%(=tԈ3%n!4k8}@9]DFL^2X朅պMOyʰkKUڅYrKK,%E1vؒXproy־(HqJ6~SMCP,~==i7/ؕJkk,~_ +#FΫZY57=潥*9w3 J9%M;4Ըa]C7 |ig kK"TnUa 沋U%jwQ7Xizj{,a"zw!O{ooٽ 9U$ n2 4DbޭqR&6+,lrHrb{$wu rL6@ZoB1.Bh"PȅJ4QUʩxJlSCqwBT/ /(S~J+lW,b[J3/s=>H9[zkwn,uB{!F(,/}CWu$mt<+c&pv,z }%,|+paK"V%R+ j&F=di"y kI2KRv4DBoʴ"D9-OLO@-#4@_Ј>f!KB}F~}9zk_h>M'aGFZpTBg!tb^ܖŮڈPbްQ B*Syg[!FPMw.d쟘l8<)jhA 3Q?V{꾱E02:#T @|mfχp jz!bN68@|tUӳlݤie. mW56螽_TU/Z)4.a'$Kqh].ѱ\||ZNqiEI\kBl&{'ߌƕA'K lޚG"V,8Y}N(ƀ4ddRwv a#' osyY8G$*0Y*<7΋[o)7JE"p8Y֕r^ +`ņ{Y>ˬ$CL,C'ݝ-$ w=Z8qgi [m᤮xQFWKOA>YF" +ilqPYn^muQ4U3z.$"+~MAV~d%U[ -1q;Bj\1"uW!˥JT<Y=$̃Pi+@nڽ@&_[!jj+~nI0Ϫ6O >;Jn6U#tm7&mlЬzwE},9 r},;dr#eRLŬN`!(h]NARW$ L۬VDkA<"BZSM զEꆀv#?=8=;'SdH a@FiE)c &Ɖ=Gzbm-,x8gO>ucۯA2nK?Ӷ&R<ѵtD9<Yieu4g7 PnYnH4M0ams6f ILS;ڨ|Y}끨o%շ?o}%9AJ/FxamH-j)d}IR-^|Qt*4tzF63SxnUfJT/߁,!ӋdT5\ܺ'N/°3t.k[n"ڈ6dǢddj!:=󴆰m.H P-}"ބy d7N0o̗Yj1AV7dF{ KWdarަ%&6DA!Rwx1jٖH\/ >7n9si;GWhHSq4bBc9ԝ`ނ3HI0BoC 1̕ԝ&v*8ea@+tYpLWǧhˮGd]7✏`!eqGhRREnG |%b4 u$Ap 6ڿTZ )!2 3@|F9Q>}Mg!n.M怏!9[ߌc.퐲K;pP\R7®G>O'x )^զ#gOU%]hM` j4]Fmi[k.: D 1ڜPUB1n43~os73ajߨO՞#꺡듨'5SQ? ҇g4%3Wm31@|m6ēȅcFw)jru/[NtÒ$)p{xzT4$&;>$u)Sʭw8'jPDe8Y}vXVg`脓*^w[ju?3",Ao%G<  AY*w$dKbFNOA YTHBS!N7Zs ZSrW`iSQied{Eh&ciE}A%bpxQm } #[^δ- ;d]iMi%~\ ˿9J5(.d']hxA(4 pz1A5հD\' U0rd' [IIaQ !;*TB3 ygaWFa*~@nߛNySv]?y'(C:)FNlQHIbVh OTYaMdNwQ;XAA%_9,u>HK8 Yjqcmضm X>~ S*̃ d!v=sZZސ`w՝ͳ8|x %Y;c|{;ނܑdD6p0+;t=0F>,"þo-f }̨F8ց% /(^=`ng K]ZxR< r 瀗 Ky'g)R!x+]NCVn}#<'w!?H,jf-#w/BX$f.x \OR!ڣbo\mv Kí:ӄjG (ׇԙԏ&_iFW_g6tn],(ލ[/HF=-9->y^,CX&:b`f)#RJf=Rw xx#S j'gGqz ]HkwFfͱueo8J0?*ѕ'~etH1h ޜ0aa7gR_J vZj=. ՗So-^Rvb:eqao+Gӭ;=ǐl܁]>4"l~>n.mEI'hm:_% BDzFCx 5m筨 j!uׁ. oA`rF@4h yHv:j~]vTuE[&^΍Dd)6m~{%Π GUϨ Ѝ+ϾL:7hSېrvBǶCDd) %U@nC@ Æ|AԄ,Ye"q&HL oa 24{{FD&Yj3Rw8Y<BŨT¸SpQ"ZE܆kt1dkt§SHkC꺁Շ]q?M _3ֹ"A}}]lvVhX|Ɲ*#`Ge| #5ψ4w/ެuoݴlpʢ)W9K׫)ZʭtY՚m'[LOԘU#R7sԡ1n43<+XZ?0+ohpCKd#QaЩ|ag4c#Nc b7_ c!ԟɨ'톸 OAڷRwx>noࢹle8B[@O)mUj 8 딤nx:<FHS'գSnHIl56dzj47€ K ;YmS"Jug[dQԊ ]{V _+!ܣ1pdL|&K8y@YkX7,۠{YQfjk ?JBV!s ExkKe/EYyrv%*,A/$"ajnH;1&s8[gtX/CkC7x$.6$_#u%=D6>j( J Z?[L+ùfĀm4%u瀃7M/cxEFV ]e7y`@Wʦg۲Y d%d9&dg%5_:#,Ay=ŴH]8YnhThQ }m"s*f ft)#vSׄΦlh>p-w(b#̔Y!ftc3պU%3Ew=%ށ,w)}#m_@z| YjVb)H3Rsz.@V_p?L Y}߀U0E.]!e6亡ǏHv6Jn#ߌyme| Y}Y&5oD,7o`.x n eKz'!k X`V,FrQ7+J$ir ޘ)S*}aHRu i5hp 6 ǿ G8 Y=қm<^q(UggkzJ_0MXRĪ@flm ꄳW)eC;#G%?'BFdT ǀ ,GiZ*uÞ}D. ۰GN! _ ku2QIj ?$ Dͱuͮ"8QXлhzع8=FhC;oE~U)l EzG3+^OLօ7cd-&5h;VإF3<;RKi"q<Kow;<iGoڟcH>qNBnCvR7YIT:aj;M`vօ?=3췩FYjpm_kH}<3ofc~sdmi"?|Y"?j?zK:8خ:FXB'֍7^11|쭿FByA8T| .U'#{d Q?OAn 'iVlU3Xs+hk-4r+5&v8yN"Ez-p,+yx $ (I>܆ Uv|3R4JQz,U8AO}{[0zI$4Z {!_0|n8e &:׀!!*,@֐NU4*3*Ta'ٮ2 vHdY=2W prgZ~OԮbw{P=@L(䣭7R<Ya/B_4 sI7M&E{MIRzx?OT8 OƁ8 ߋBxme/%*D8Yj"UH]/;l ߧu/ E;l8#*jE.dw`5zmӁypv1, %ŖR>`&u=]Oak@DtoA{i ZnS )@yqF ,- xrgPC .uM f!Z:o(R%=h񇣦xrۖIy`IY>T ZP]K}8# udRwx zy Q{ ݉iC58? !lb_hJ;<Y*ˢX@ KeU3dRYuR]]JSNf7^|ԳTK 'DD{2 U@48j F XΟD{4Jd&@hhBOኆ|'T/u8YnͰY)>EƀnY8yNͺ򢽉?>>/n}ҍ-ziQ FMxܐk҇]Lc3Q/'< I&C4ȶHą|&/ _(#fтƟAAh))? ֦vtv5Q'GDz!K#u>ro$z4ƋRr9<Y.e|9];ݴ˟e+ ,:(\oSxAeX9GaBIYO V6hq\d$ /('x'ɺxAXğVDJ;%?99mtȕT n);@b :a ?~t-<d2QHxp6V#Qzّ)hDkxAV08"=gBc6O8ETBCZrhbƒ_A@v:)2 H(pL@|vR!a ?h"O |a/^iԿĖWsՠ˖V0~ d~8 YY2CƊ%њ~)0'?gڛ1a8e0οPo2 :a&˕H~節 XzsSʊXSN@*XRNBVO(~NAR!0 .]Qю\HOA>z!uӐuuĴU3j/;1,75(QH5A$_#(S<u1DFQ],< wH]/ d; )=ǺYd3"Ǻ3"R7 O`fD)m;8#"Oaۏ\'tK7?C n=<LF^`&'!K#"E-DYO&|N$_i[b^;m('ܭm :4kxϼzpNbvxr񌝔30*N^ȴͪY "{ s~++V<L vf66d_q/Jq4鳸ZVDtOCV?d̲>+Z!e'fYAzIPco@hQ} Rr-E7o{LGo?jCQOPsUa\AJh#4a Yg[H9r9G? j(> Ƴ%ETBVk7&jMD WJt4ӖGIAIRI.?> xYa+j$1 PLjqim8WC@V?W3â4LgJ|oA֗Ʒ?V&*@~Оƺ |rczt8!+sT!MhymE>+hl9x]XlY r it&j A(fZ?!C+7$E[䒶V;׽uӲmՓ`6 owz5Ed?N >EsBtzoM7Uv!a*Z߁4Fzxx/ZH OAPmKS"prL%u9^#S9rt #mw3Ax3)mwaNmfh,qTLC!4"phĽ⹵Z4^v᛾Pń ɟ  cԳ$LCJCf@;/|7ǀ!_"RL> ZH] dymCxڈ.v7bc 1" "ˆ dՇgQtkC2] oʳʢg{Od;2fx0H)ڎLӌ|./C.v%E ̔ˍ 42>6X( w_sX,EDGQOҢ}3^ƪ9\'\_V>ZR64\, }V,FåT6.>ybSYx.MѷvD?B^̷;`}[51$$>r|>ւO MV?K[ol_QwkF5_Xre4^5l~;i\+5׺փWIW vN]|zyRJYہ%6_pf姑|ڥkip=Ս_j>|ȡf3og/|j/Y+7[#VLqhoWv䇯-]/+ kB]0G1g区]`/v.J⋇O0#d2GˆQX-4<慥p\Y 2K'xyϔgEw,gug7)m{[xE ҄w!U.̊ /Z _yAF> ߡMY%u=@};⋣? Y_Va >48J /, \iVGK;p_&>w/WQr qxxdȷűdJv]  A>'u5؞O>/:5i&kw䯉v/NuvjcԊNm=VIڜ ͿSuԴWʺdW7jK|e1Zi%46ݷW]VL%d!o~jo)ӣD~Ed(0Fq~#OGS1iQNEnt(MAk%h.~`az|9U^3L<*,F`ԃ5K RK!,6YQz o3QVi[f.6MB^G)ȧ4) 4d|=27MR֛6й )"\S]ރ܆JHE wkR?4>YanJ&2u+r5{-Y:dݗ)VիW)bvuL; U1p 'LG@i]{;F=}ne䲛V ,JaFQz׮l?,Op7,~mo %G K7@x$^U=$lCTtǨ$&_5ӣzzXt(053l=&:$gfgK8y\J1dC-nT*͂[)X-tFnef|Dg%>E6)L8BS%*i=BwzvA?ae.qn(q0 &YAnIR;|@wzL|J:k9 _~<=nA?ބ32PBȼ/ʎqԷ@tZa9$~Ϩ|5&]1l^"9^Oy'CrX*舞M񋆷DZ.'w=x1F)q>tRo5)®a@X)=dm9==-LG}Sg_F@k/)p`3aP^wYv|K1=]1<G hwݣX, 2< 0IJ.P9Ρ s9 s'M=[ԺRz!a92{MNO8OEy{(Ԏܖ"H~`Xv.2_T~e\Q%7:cx h1K",zVhɥ%ah樯eZDhG3tq3;n'z$55c4us1E+٤&[[͜BQ-EGcQm,<%nMuz3>zɇ0ӷjZ8C6u-WEg?Yb?- >b>D1vςYM#eז*zĪZzO69?k>hů#lɍMˌQ(aE[mA DM '_N5#`Vc%*Ā615oo9~ 5SöaadP߀{/Ň,R#R;[WG1?y9.d%L%+E#V Q&(Ӌ-'.l͊n< (BL{^1j2u:2Lat8C4\ڃ{0yk6DsҔŋo_a=&Eda4e9 afkܥU4:B$@/{ȜPiCSP}wĦ (<^\9m6YJ .n8]]K*%*`:S## QߒʗoTa;Z} ?Ʈ&Vf+l)3YV(eWo9;b7hN !Hv {C,^qB,u"Ğ}-@+ Su鸔[[Yisp {÷蠨0ypĺ\|8bS u܈AMH?G.KP_02EM{zt_0 g|n_Z c D/ϴmY-Ѝ^brYЙť(JL^^X"T,XQ|jFd0Llpʦ7 m@mֽ0a|E 髼Mnx}}vJԦj{̷|,?odSXq]ء?TkC/9C/#DcJixydb(U g]7lW/qG{ 5Wnv8%1x֋џ0?|5?c v4q~R 53{:_n3Yr%_ͺݮfFjŬY*?/KB]~KN٭XjJk߱M[׵ q 'AsW Ȉ8aȇe jfh<H'&4N)oPCik%:v0nlAEa16 }-B0d}dR8ovELjQ(Hdk& h]tbħX+ӌ_.=/ ^N8e\6lZ{jzȽ!86YT6 f~& (7RlM5Q\7.QKP-@jN>S*WrR'&H]7G s]9`#j&FS/֎GO ?Q{n-1~;1f-o2o\Vu͋$V028YCXAF*ao-t(Ox᧨\5J|ip\0"A؟xX"Gr;Ի5c3ϔg'zn2$EDBMvsxadz~tpTj9 7Zo5&*Dk= ?gNriCK3M 음H}r.ߡh(j 6MQR#Z:@=sPKDI;14ݦQy}cY /C|Tt Tգt(9Oo}q Uq*yBqA꺁fpFP ǿ]ڭo@ &qNHliriGKѢD5|F ۖ:EAYg՝TK VӑIq<rϲoEw}s8yL3׉LS>=M5D꺁 K͵&9 _/H7k0i2Stݰ?p\15 Y 8y^s1~&E{=DU"R"p!X&l{%$"{,> {)V\wuO /D+ ~ߊIXŞZtx<C׫/_A3"3;TWi*peOze`XY`Q^̇A;O)]j~47`DJ#*Q=⸨)Spϛ&.KV UmLA qZZ?Ӹ&N-޶Bieg^cZ"Zs0blC%g1RX.8ń {#uU B>8^GjAj=ԟO  DK{^|+&d 3%_6Fͷm=1 "ǀw!˭4]1m GDgyQ7|iGM >E#w(1+D_#]m:eLdJx ֛,AJU=Q߭HY{ p2kXNt`ccݰlԱa&zoVԦm`rAW7I8yq4{UD;/G2:S tWuTd UMyQLj!= ye(Yl2m]{x*"cÃ#6:XBgJSINCHKO3lWȥ'vھ.ѥNtѥOeD4t@ԗ=q[`~`xDf]k:4}5U^Zj@V?((sdT>aJ_ͭULǷ QB'!K]=lTn}v p u5ό@=BۈNÓOlxm;OTYEA8y^,ǜ4+A,H&΍ڽP[@m!8&cVOII 7bvE99f}8+'jMD&d~q++AvZʔLTs":7 K9՟N(lI<$h1du$>r'JFwNbkBͰi%rtÖgdh{| LJfmiUY5- 3iTʭ}1]^w?%xބ,5iUMAh7JyvpTUg}4R@|4? e`߇ECW&̈́7?@{2:'N"Ƌ =kC%3JWRw Unq^ ?= J8S)"yY7I8@|aQ-Lsax!z(3i.ϞNΆ╞r r[o0 ppRwX~-*g,ۭ _&N#\H]D~!z"L_9~%>!ۈnvr֌e+0聬umL )126:^GFO_;ZXdybRLz)#L;`tgdT,GYa i'`7}Y#&)gU\{H1x{Z> M$lY@wP1!;P)PmuÀvCu s)clX7Qio`afSaI V 59BzP0 E ~L?'iO/<ωq$0癕z9}bAGK NBed |}bU R!KhxѰR^|IrQuFJϑbCԞe|qM49I']'H Ea o ,z=L!<)[=-ҳC:#xz6LO@;鍭 O7Sq0#{Q#"'Ӑ;̊s!T|4$ ħɔ/KTiuѓa de/&@|Fd_sS]THc t!Z8''P($xeQ>̩Rjͦ#!s΄U2Lv[~8U*0Y*PS#W3k֊ {`*jQD>dM މ|"! "d|Q[\3jG(#%WGvBK;՟:rE6\,KKN!/fQ)tFCx€GN/AZH]?2m$Q`kik}Q<+,Ð,ׁ%@t>h&>#GF}֢3oY-X4nKz?=Q~m3YmN O 73R |zj.ᑂ |YEEj de*ԟEߪIWbsx@3Z05݊o|8|MkE dt>FU°@R`ނ r#93_|^\~Z\;ق^؝ KB^@WI(p :M adEGׯ#n6KkZQ]Z}ٶl2I)Gx佴t@V&I|i?Gs~ioMK?l)Zm;1^Q̊h1B}>[O[ڿulA};s<.a-1via5ڪU:D71S 07L(.wM>dè$ }'$xeW!gex5_rpE J]L@.ss~]{QdH^8'6y0S_gl A~_:QV]̏.Qhn9o,1CaGS].S41AnSOi Y> ^r.,˔4&d%:/:5$$>r|>=]',=6GG)n`ZS_1]re4^`I#\J$փWyBâݡSu`KmklO#JK/9{r'%]bҵ}CGg^6j}M_VLo~8t:Y)KJFâtOS-5ON2("Nq~0}:(u%"L㗡L1W#(qKT͓+x@T/G|L3 ħ):~S\ބ3Mʛ,0Y.Tե^"u[:Q/-G юLdn O)_}s,fK@;_n KmruR < t u@Zɟb^^. %Jgȁ%4NʈGF+3pDCȯB ĨKtǨO^eQCG0 >jO&z2NMwlh#f_~?tǨ%MaZ7kt4e-C_z3Ϭ|GUSP;W)q+*ܚAa$lN _*}kt< (ށ@|T ќa#vPv;F=v{*[Wî-} <Y}s]SmQ!r.]D?q0 ylO@wzl9r`$t۩WM$YuS$( hݨn:=T`omc9tRWt㊱MMAJO 7G7}mݪOUaʴ)i|mToƏSP:+43djoޟB}Sw? 1fwn܀6nI#_7s`MxT,7 gh !W]73\0/dk$MVZ/:\e粅l>Ͼ-/0>c܋o R6%B3Z'̯ۙ)E@pzZG 3èTfo yB̦Z,U\'NJ#mrR=k;uܫ:98g5FʆcЪe011\>㟋B mNSigV{ka|Z*~i_> a'JhYS'<\0qf\g rGÅщ|ϫ #iBTV ꄳgy~3垦IW=OSK[Z^(kO{(u-i4>j F5׻gE3Lu A>67޸^,Vu@8yB'\4ǼCgbgdL~c`#g{ρ&vk=𙡠Zẕzj+;#呉䃇Cf(6q*䉫n}6\U4>QG7-?g ѲIJI-/#?d?xq KU޻]]*o~p_9$Wn5&?4qJe˯|)t3-&m{OFlp!a@k]W-a/$R#Cy=j3RE{2W{M e8n vE (_ǣF# ħX+OZ2nH,;$'ޘMRQٳK"z"jzdak!5׮bX+AF ̊cJOv,gBoh *_A3@ oBT'Y4Kf!J9=5nr'~`+,)dUS:M'|Ƈ\ijbˇ.c^ksN~ I 3"̽Z3aM2]S 3-GƖT?9r羵3, k^]jj=~( # 7dEJYɾdYn!^F8ئoJeX &(86I 95M 7 <5d_*CzDte)~!p2W ̷hIT3fc-&]D, Rwx1PmqdZpvgr &`w#MH1aOmd/sWD 1.@^PfD`lச<8c'?#fi.n^aO+M4;$"Y<}u"5A"l Nj:C7P*b(~ *PLGB+4T4J3=Z+lf$73mObFRag2Rt(lFXzso%x^/ݷTfi='!|c|s{\M?%P~=STݗP2{[g7#zDx&8tN.bħb/RSUfazў%FQ{C8v'|rHUڭi.\uDM rU(rⴲ0'ԷxB뇶eV6|]AW}\խM*&d-I6_)7֞!'Isz0p X{Zprn7"읈pzz+6ӂj[z|B/Xld]uIrj輓@|LmfK ]?$OF+eG@ͭ 6`MԎ ׹sKA{"Zc{6Ʌ쉴 g KmvmupQ{<$K5X>$j1:`oLep&| B/Feς42R < Ĺ$k^N@|b2,5);8<d2ZR]O|w6Q}H(o)#Ry[Kf#q]4[ }MSp{k Pbz~r܈9nv?9%[U])9zk'uG3 wz2-r xܡ-"<#" ; Yufxǎ;}/9L/ ԣs ɽ1<:U~9nZ5$ Dy-_~=*B|M7݄]4ڛ&jѺ{kĎM BT` Wݖv# <Yj,bq*m|#HL7ivE@Xi/m43 da:?{(yaE#zXJ< T^jP 9γytVJ-VD fTwxrHfF0 9rm^|U 0K%Stv䷺ߛN C!Vu;^gdXrvGUhO >ll&6byv4֬E 2'lN ST\ G_oܨA>]|[g hldN_e)S}A&#>uc.uݎ* ZM"Bq_MY\$u}fbjReW޷'|}\ C=EqNBnÚ+ށz8WWLQ_BTO _(GVekٲ`V/p `QH44 Hd\H4LjV8gEr[ɟo4fI|q g!* ‹chBg➟}D #eHuMu˶ޒ҄7O dO{>I}& }B1݄mQ?NHvjZ7Tm]u{$h&=-jX\R!x9MVzqщ /bL#d$dX} J"0q}MrUn},,d[3MӃMo9=]ܮ-3<ў'!K0*n{;L#hVQ"o:mbof}/:$7Ci''$;4e3$jQ3FǐˈY>4Vl[;!f z+< ݎyGg^ ̈29&tfCܨɄcFDr mpK. -svu/{e&*<7>#>EcEtBrS=~BOC>6בVb9>-3{խYf&Cغ1Rifx0,xIn(. wilZZ~o;X;SmgDAgSk-q(zgNG6.M&Lkp5ݾ,ʊF D*d]`1'KAۛ hAs€ J-jB(HvLP6BU*7=Q|Bzd?? +<.?,&Aq\k7vp\~NTy vW<>.YPx|E6M}ith]m)#^{z%J/<\p}huIiFeAo/pU{n[Y?~aW ^| Qb92VKjIG'7/K O3B=xBo)EgGmOgI/Piپdr!n#fM7fPKc Md [:ب1EH>@o y[~KqhIa5Œ 7|`~+0"LTpBԕ|գiA;1?aڬ ;kƾT"B޲H] 8 Y9DI\:!jAV_{DIk{$1:a"r9-T/rt=s["8? .h(dP9YBS{tĮxrI]0 9LȪ0(ڏNjDg!{1~{[/m3#Y!p_ ,}{#oW7 ͊Z TZu6R7| Y Iz%W ?ܒ [76hP} էn#vg߾&A0Ԃ6 SNe)[_Hm4dMO\FOchg#&w \*|[O35axXMzYR|`i C'!K&bVBz "“bR:Z= AvCR'F!R6MV\L7{"Qc΀l=ȄFanֲ»zkxƷsg?NKK]!;H9ǝB;Q>7S8,ձŬ (7hO̷ cpb>{n2S'jOD8 dh}l.и㾕K۱#8ھʎKJ v 5|m4[L:McħX+-ˆX^'mT d)T% /BHYZ!kuP1Y%U:nL >Z鏦+u/]2AR5 a?~mzp.zS!/:Sk~x CRh%f_S9brX\7A0*I4dZjWSBVO"ܕIS1ϘUf:7}+MMLS*x4+a&ZoEq35 N]3qz ޘf)*fm~Q<05 N%<0v~cS\F]-oc{VэwJhSSlE P{Cu&,4_qh;H]7,d5lxWo@oɚITÐ87%rC~}HQx NDF zCaRe!!N+JhMC'j GVq6{$.iBTZ&Sl?uC;0,Ez61K/P鬀unGd0i1]ZMY g@fر˴ʉ8)~C{w1AnX6_\ QC{ "d5]EwwfFn8 YUߔ"HMXv ]Cv1 éM$JԈd`ye (ad۫Y$H^|J!_nC@)|@ŬZe+ؠ]xz#0RB|{2=YQx&@rpӐuJ9 d GlYsճb5ע(-ӛP-<aڪo5Fv6C{U+MbT۫QL4bg@=-UѼpbF3]B}J 7|i\LӴ"j$4/pY6Wf[5[3*h~1 B>;0~itxh[k9Mԭu`t^f/JX׎{" ,gs7;5@)4d8K|fs8].P_6ξ+K#}<=eB݌%Qrܭ! KRWCx )enhD%ٛY.Ӗn& ]v6>^,njrmEhzG,7Q*A/1dtA R~T~}?莰 ?."䋭wפdsnKD.]DQ$a{ׂίͨ>1&S囆o]CWѩ=\ U% 7C1_@~oM[?{i$OCֹ%=P߭eqM".{Dtنx\f!g[IyUWu\!cQ'>Ǣ{MX, ymB(cc0=JKig~B^dx5.NL:l\EKSNzx=˾o|iW y8 &~"wD}&NP%Xkp~˞)po@' w`YgS(Yؚ͓׬.RT[x-#,LO= !w ~MM?~-&pl\Eym!8 y  lHp\h=QcxgԍAV?4[`q.M4fO ҿ9jSj](/N--ok̭nW)j,-ɷ׵闕O>V.{'JR8 }VùaFhqxx$.>ybSJ B7v!݉do`=^Il&X_a;}-$>>||A͟ zCAۭz]ҚÌ{:Ũ’.k};i\+ɥn=hyD}wNZS#_);;ۆ ?6J/^i>,uZ]vWkvb~ߊ7mCʮD0SLɅ$'YkŶҌ=y:ǃ4fPZAPi4Ru ?#0ap-C˦Dw`(Oq~0}M*Fx#(u%b(Sj$.Y:7~L5FP·*F`Vb֬l^oGCWfu33 2A. 6j&a9  oB:J;;F={mw^e Pkd~C-O ]LxEʘNrNCtQĹk:vw譁WF ´CV7Nۏi]׫XewV<ӯK{Ehg/>"da[}q a>{!* ]ѫ4޸k1m;N.워xE  g (~X2\/>ҵ!y?~W)i[ndxlh|bp>5L-qw`lRX;7[NhZ1K/@H {x Coo?1iEqV޺i6 ,5~* ᓷ<0?!=}>r` ]]-IOȓn O8>g2BYx6qZ[);A bL-ii!@rQD> jбqDgvYN}oAͦ1O鞡2#πTɭ,AJbz?ڄ ޸! )q$ oALx4&\]Goa׫b)Mo0ٹ_[Qr煶X;e4eNA_~ʬN+z)IM2껾a" rM.cRIOdޥJcl ՖjwYHh#lks#E7;B}]WM~#> hCg_C32v@}S/=s苛\wjme_3^|s%?86nFG&s C.ƕvσ5*5T]O&(S48Q 3+<Ϣ??|5?};5Ii?党⧷YC|s/.| `{y$U~FbݱVp_=$Wo5?46w/=89e7p<0n&ⓠ%0Yʾ7 5WE"FiЬ]BO$W]dqRT%öf\7GFԟL >ɆR-#>Z 4vyZ7U w]ꮺ>X<]gnݩABhTzL#|i˨)BhG (_d}tȖ$b<}f|H[`%MuãG2c3֌knݮв-Mr )@~\G07\(ʛg-ש<^\I)wavx9f)LJvg<զdUNevq;7*eY*t^-/Su $Z#O!4jPl'xD-ɦ&KƆk9D>7vR7|A덝!?T"5d"1 | Yݐ/9Xgfo]l;Jۚ6K?쩢eד!+d)G@ɒZ~ٳ}J0UNF+TbTT~Y:B1Ꙇ܇nE\~ũ3e~'Pw%%e#,OٵD<$]lQo0 {2[Hg 񷶱RQ\㜕$ I6e5%N]'42@~G Fx1h+++P1thp(kE<=DKM\E|(A7i52-ZgB1i-~:ũWM+F^_X*hI&Sd'@=̏|63kq=g&@)ՔvPv:1qBf#8ّtX+|p7:'(a(jhػC\1(%(1]*s_.q-Y>5\+,V_yb!_@OIV)b,j7vQi C3V_^̎t[Qqr-Xcd&jO)k&զ1z7.JTz>ZXdoAVN@|mwbW2"l;H|Tڏu6E1SlkXr^R2S&ŮA ۸hp Fg dbD"\4%hu^_/p hH4N&PϢ1e[gaLsh9B}ŭ)yUk7j;8&{(=n֫t]ӨxCE1qk4bmht @w-.9,u:Ol< LT#,g9o-KZ|V[U[{-)VA/*qvbT벧T g/QpħX+}W^3L=$8sj/cRI 8y,!v 8l.gl3zn\62=*aGAqHÊ#\!E" ỉ}曫oY>gIv7Vm`f :<*t㓫_25Q< eq7k7@jMTD'bħX+3/70LX/ <V9obwK ~jAi"k"{B+}Mn gm,g{6ӊ;nݣ.R[?9ݕ.^`bgV`V/ kh+zIx ٷlH)%Hy@Qn,W#>KOS:Ѝ֍2C'm;C~q}xN*ٮoΡ^|Qm}^J$:uwiU;~7BVR/ꥸ˼e^mRe?JRu 1Wyߍ+C_fZ:)RzMkwy)¥vb /$ qiT{_4J-oH(u%!L12>U:_u0" 'P3CޕRNOhTru@vl[#W@fAC8/Q=s% H& h>y.ԽI*J`\\Ǝ5'AJ_D̎'G' 6~/F|Db>mmt/`bg2}ْ3d^9+)9kl T|4p-1}oj+B@|4--FKy[FN6 (_$;Yjr/5YkWːS ħe2Q<.^^; KNm=6Lij#0 k! *nF#Kv7 O >MMr7х}h c_9H<&Fv7 }-g_h|xELqq:zU晫žoL-o]g^ypnx(/9ഗjUwߞyF/j F5:J+ ħX+Xhk{:ψ CʡcIfTC+ *2S("pߣ އGʌ_/&2]^ޅ|VhbE" ^|6t vCx %&r|˼M-F1cZO盌ZfvOBg׈2 &rZ& l g&N )icWh ܺA`kkkcU5{H4Osu8%UzHb_ߟr_։ wҟ!^pƛxk@ɟ %+M#aaElJ㜉ͩf%TӁM\b+D"3)0 YSJ8QgыbPJޣNҡS +JD'KE\O8YkWdZ2pDybxjV!ϷffHSqm>%27Ũ9_k2;,B.tHprM|e>% 0#v2ģ7g3{Oj|'F4鿘 cȤ'| Wu=ڊk1ٚnetQٖY 2&]nBQ/gڶ9pJF@p+/m7, $XQuDP)V^z#QUTaddR4YBR셬>Ӿ̬YNٮWBG0x&FIA`rN{#Ǧ]55HJk/ y[0 2o2?>~`RW]COWZGfV} r!w1RY=Lf:=ݱ ǴA~5d NpVz|cE7vbGK3,Aj~h#*w3ѐ6ܸD@V?w,(GoD&C!Ld4緌`㣅ODG3bxX܆5oRw8yHiq '~ CilzB$J0q5[ oQk4oLF3)蒇DEB=oWLgJ0l1ْuc), ϵ?V$OS>@;䭡U46kut/4*W*W}1m D ?O?\++V2l[ 0qCSG8-IoXԮK]إ'‹#ʭv!/jEcx򽎌/EXWuO3m 'd:6ұ$@~Ўo!?Tn#e?i \P;5l;,3 [ImP_z3Zq#(F-> ֹg Ƀ܁V+JZZMK<Š.b`X jkdaRT!2BVu=Rm5@|I]pT 6{sDi i~gѣ#pg3 FL~|';~-1CuD(Y5onyepǁǧOc; OB'Ia)ȧd6}z?>lcٴvxU>X ˝Q?>@|?3m?{g_>@Kae-Y,mdxA7?8"ڍ9`=n4C8y݈'_GD[糯&W^,)֙&Ё/B3zuhi9kƲGFKCQ 4DAK::w0RNuMSMZ7[.—&^|]l*x8#֫5ϥrt{:Qψ,1d;[z2?ӡ "f٬,)c#ϓnٟ<+BrnH`FvxOj*ӖqL$Wq%ue'L?51ˀJs*?ܱkjN@.-N Y}>+W[ﶨCm/w[`et.f "U/}>,(ѱEKac>?m 2k?;gqg1lVgd'F5Z][v^8WXh3諦cz=ΘF} '|eY'*'?SCaF3l:e V*V^.W'…jǯ pX-uߧe33$: Jg K]q됯+Tw%79ؓNwM3 Jj{$MV<'OD-*FACizԍZw{Ă4_@~܎]1>d8u,NX-׫dBS2&[9>dk0D\G%^!.GI !Q;4$̊0q=S2CF[iqKN= s~v,#b xQ-+X@Ayf1ܽ;#{OV0(>~ 9>.YP5|my5:-&b^&^>.FX!G\_tW7'j2D3fRvso6ud*Yk]|JL# XvJQ\/po&*cv7QrTGtji|[>)3^NW͙eW>vຶ?==)ާe/8Y.LN}ɢU*'/_>z~J! o/o0Bϻ{i70r) ԨiH|5bi/HCM7BV=V|G)nƳS?˞ièwȥ,i#f6Vr׿TT51ůXm_g\ɟ26I /T{g>uN;wBYS-+87+cS%d!.m~jo)?8u=  A\Er0$A]rp]5f@/{g{bWG\J^YQFvɉ,ɑ&v$RIlĉqF8_0Ʋzf{pNcꯪEkQTJ az0¾4ʅ(Gs O1x_:~n&+FKoOpφ+s׆Ej("ӐOk;4/A$dmIy B8yPpv6fyf1g=_z,_z>Cn_L !`~ϲ?z䧙 …-~ZƂjml1w̉?[+j֝Ԙ38V5m@!.]6qNt!`O'A o mT!C[~wh~2#~jq0Oq?)Y*~>7OWKդ=|K /qB` 4'@=nT΁;&wюV :ַ+;uw9q(٣Qjss?dieöVHg{tV(cIV:dD!NB/WZ"n5TPCIJF`A_b_lBI}h67 H V`>at/J>/ j`*Zn 9٩'䓩UѾoP! | (Va]:@%F<9IR9J9(%UTqH@<)5^?=sID[ly4Q |0R72;.Rm?JrQ@<)Bbԛ^|I;˪eס#ꮠ 8YmmPir*4yQ;y* [22cX="5=[6le"Pj@%R@<4FYξ581je%Nē6Euq6^d뤄-)"LE=1 $^oeo>:ªD,OC1'z4^Jr*~Ad2# )*JwG@.! xQKAJG@>]:2,#tfۢU. ZL#ȶGV{ ϵݵ貨g5:סs8 9kB0J8Yiƪ ҿ Ym"WtkZPXd$GB4Jp,4`MvdI~א3gD߈^;>ev,3tX=T,OݹS*$Cdx87 /C,IzpzXu{F<żKL纞K181jG\ȵN5!tIEғ?n*:*5S _G[o]E fTBܒtյV MB>Zm_~cnn^YyKx 1UZ(ģV!bLZZJ̄HH˶@RgAG ٪;6B&ŏ?hR TJq7ʆE=-Seu-OtB@< 4Ziz?N4ѩxaR 6_B0 W %?$ V oBV*& AV+ :{1Ѥr VNtb"pO .ڝ."Jx4i+f2]H8=>N%B@:]ݽʅua:33AH;?2>@?Xp?Y^7o{_awj*fc-3v/9;p`t'*/tׅ_⥄aD3+yV8ąJiPǐ?P ժjlYX7!pγxdniwHKmfCH1>6ץ1Kc<1c> Z6 UB~op(3J6σx\olqR9;\ kSyZ7Y$͖&I DR\dPO>}<Ϫ~.Ijsݴ*3^,M\PцK /t2E=a$wKliEY3Q][7fd9KْyK-ȷRBͳ\TZQy. cېok/`_5*lƨըJS{EEm7KF\̯EHNKg ,筟rs ҕL # \!riM{3 ]ۮLZ3ݵFubP [V幻cPmUJ\<7yN;w[3#=:48ڊdAKE_.b|9[9Yh%cStӕBm'& O1SC< ʫOb|8m*'Ǚ>b.M)4Ibԛ$)db:+q1jfrM B>B:[_DcPt6NQgc8qeZײX4 V5]("lGT./CZ; <]MWY8iuj5_66)iDzEck)T/FŮGW$YT$63T׭ĞIu_IAKP+,v/J u DrM>&I&fBG.%c-pR/R=+] Q$Mv 6dߞ{{3L7-$H {n=)>ty^>UR`6<Yɐ41:de˵ B>]\^|V!%wxUm-afwl#׀! G~]~ I\,Nhfd'_CSbC,!ʇiU l$%hk?Q9 <L^$< Yw= _5lI? )RqjrĮ4 Ñ|5ޅoHc,{]0-Nc".c|2Uho51.GtN~vxFG#'H*[ٔ!ǐ=줌E${YX+uSY_s|5 -!| 9.07:Kё[(v(8phzF.D5GT&!+y Ys\xm>Z8&G!:90 -yjh)GQ`'VuPp*UYyVYh53'm' #L{P ԖzlGSJOnQukaۿLn@(OJ j^ D%Qr7'xu4-ʶ9b5|Y5/`=>S>$D5Z}K5)C󡲭,fwBO|iCN)F I'0!w/"n (yZuS6<<7m8;'l1`gy_ZAL"tT\Gxi:(O{!6aV-dΖОlvAS /hgtie,)#-DGlFE dg^Ym\߷VlIK|zAZ'“T#~`Lq$}T!eZ'ɩo ;; <Yni\዁YaoMϏ1VԨɪq|zMP@+!C62fA<_T9~;@ ۶\@%Տd v_o x/Z_t_8-l߰e v a ̔f\ډUيM nkCmj~ Yy6Bc6]aY><8/*_56C(8-A7 U)fghZVh|̳փQza<ޘ? LW^E7ӉΠ]w8J^45Cm{5w̆m.LVɈhd2T8#qᶱECR2ZIt08|Sr6G@֐q<~^$;yVbqs%O>s#ZQ457i_ju}晶p,d%xB_Q1ʨr&-A|}%Q:< dgJn/T&`C9!CiR7$2 ih'􆤟H:d}C[7xoXA!lcoXU5 ^o|;̒U~}K Sq7Sp8ֺl5CǐkW9vi^-:'nLkNW#vDexr\C)k>y]IpEdՉ]>;b{I3 SNTtߚ) H8C C*\啨p Gp ^HDhA@<4]-ױ7c_*On[ۥ-/ /_+=W:ZZ'[ Lj=mkf㦋k`_L »m ̛(㠱 '}d01"LC'`=(8_&[n/c&2`icB',q< Y^O-,\R9l:a(^I\.%m;Y&r :MRzQ-8F<4N26g(&56gų'-kD0Yi.$7ۀB=7GƠ  ZU)"V>8Ŷ/4+hSؗwxEV$wP3)SܞY[({F0.>!d}#6TxNb,$M(fZʔ0Y{Wj> ED5ގP*#Y"O!뛔E&.bє[lWWP"b*O%)WT5]J(6d!bB? Y"w ?Цsqkw gPѐGˡרu‹/~$"tx kv xh{ʼn[Q퍄#G|45Kp52ZQ!NW w %w xu#{bg*A?~?3u؆VkLQTڨ@ڄ={R P_EN)^gkg_sO: R5^'!+|&_C 8Ym"~: ԚY&B %}s6Jx:ޗJ2 ZL? kC+?w/k* ُlnKg!럕J]CN…OTZ' YVQ9HPX&кT/{Sn*Ma1ǔ[@$\9Bj6ҹ'rx+7SrY'?(\v=J/Ir!РbTjhLO8Q]~ ꀔmԅ|^yϽqe).r FHv8jq=$& O9oYAo5 dBZD#M kԯAy 5C[L8(^Τ{QŒz~${pgq$Ng۷JFsÙv3{ Fvvxjf(GG'l^%Qvǁ#c"%ځ˻\=q %'WP*'!В@]NA_g9Q* W.@^ao ; o\1Tb E!$9wT:HRkJ4Nu׿L;ї GCVr-l' u('ނ|+5?C*$63*&s|%P@<4 4nkl=v[aҮJϪDa 4OgkL~ƫySgP5ɪoFD8RAE`b{Ɔx}Ԕ >Q7duRIcs ī|I+ }n2j-ն?H Yۤ.JkM{}洬)G,QZ3KG {I0'g#5SA G24w7k)0< < Y=F9wkmZ򧐈9`rQ_d*0F&b1yXpA&Q#an)Il֤w6G tW_굁ʊbe` %].`ޫ3]Tp8l3@2i\yf`ۀڈf(T=ŝ"9C~${ΐHɟp8NQѪ_TLZ:2j5Ӑ4rScn]ִlp/Uii ͫ'&ς/OVȂC0FHP %G@ќ' ΏEO]{nTW*Ihşŋ4:%aFVɤ`isPHbxHfH*w@eABV6]*g-]{j 燃(d5E8@EگYN@;"n x+HBQkJ5uJ+z0%_p2ʵB7L; #Zm,/vQqpwS͚fa^J<vD]y(Djx?|d<d;Sj@J.L|wqXו%},ΰl'>cېV?%ށ7K/hDgWjz21pJظok$(D](^4>Îboe6Gu!kmQ*ru{}HVf҈#( Syk/=v zaͨV'/v QqpԷ2硭- Eh_숶3P˪_ċ'iIPտU%< UMeqm#욫V/+= k 52mtPΡyMdt$^Us /q24~6 ?!;@4{҄R5wѕ,`]/e:ѕe/E:48;4wfstƽl&)n3U-z O SJ3C8:*>$;:w|Q@< ~EOΠZJw:roG%F#PrᤏK6;)UF~n_p=V3vJ|~||X[pRH_@ !?Ц ? R5*F-1cYC& Yb?VoDhR4HSQ 3v2"%KLd$$ !wi^??{r x=%wx {).Eϰst{iz*t 8Y㔯Qm&J[OCk@O/)y'?ѷGs, +m3{6N7jJiL:W ޔ]|G>)a MKL4&F<)U26HZ5&҃ A x [_5W$D& Wx:]~t&e[_'ٓD5Qbn5-}{Hf^ݦTbyx Oqރ|%7YiK!4qx| YyxiOVWݺSQ_*0\jӳ(;JD#(f:rHDu[ƞpY3偤ȟ"G2]qhjcV?G@D_Ruk:?K>0bVɪ/}ȒH>~4ZwQ4A˩Xo ]e|O%j鍽?8pA2vOJnk`Ni%N4/:8҉ΰA[CVlم!Y"i nݡ.)+4>4 + u(Anް>7SO$Ƣ\WR V.j&:aWx:] hҐ߰k(}=o֐OkAbKU,[~ (W:*>UУ?E Ĩ^46_)&M7cLgcYe 7,[ kVnGVQ?G~ Yߒo:&a1O~{ݖ]/$N!+ah958@SMj/$d% /"'>Q\gU1x4+Ȫ4;Q-Sݿv;LWcJ F{XοZnlt&̤bH2ff8y(E{/afH@>])'1Kƻ| tx 5j>gXFēꇵ[}~jZs;.MmoR9PA\>P;jaa *|4czfz+,(#Dhx % D/•<NDqf:3_o'VK| J G.ZV;H=5#]O8<.@^r5cU3S=/흥%mPliT/A4Nz n$qKFo@­*Q` rz'I(55ڜ4 \#*J5xiǂ ԟ0[pt*!ObK$:(d^MnԡOC>?+^BV:{Ş=rWR*Bx2n9}ՃuYe!.瀗 +.rBɝ2Lo }D2prz+.~%p Tg YH~6gbqx}m6/Qa1ѭAYk捾E2 Pxxxҿ=Aq4IrA8y05#VOTNOCVPԟ|Fv /ހdjMDJqJ&|ނvBB)[(m dCֈ7UZ?m;l²^8ô?[Dxqo" rzyHҖ*%# N?bRK"bģkK-ʤO!=mq[xs#Z/$d}y}9̰ƦĖSȀUjU^lo.~<^3&(Ȋgop{N1 ' A8 Yf{b-jUaD|CVZJjm=!)O:ߕSOģIcP秧oqim?CCA>̫N//jaEzkJݲ+},V>>Vvx4QRP VBu+K t⋹ŗ P$m2wz!^au'0%yV'77UX'!s!|Y3l=jXVvzvv+}ĻŨe/>|kFGIvXR#`Paۋd= [R[_})oVvM_j4|骖6fojXdU[-+l'Loj~ݲ8& Wz+sz[a&!岗xX\& j %a%[fH=hFxmsM'5YYB ~c|mlV_!n8[+D64uVY)7/!L4{vBW_ש~"η+)tUo#KOr8Q`8Yi'iTeu x&9% hB>1WI Ф.&_?6crs{ w}鬻].A>~nj;\.]ԚA7y,iB ́Iꣶ;Gƀ5Oxj;G\niOl,H+ ,jCm9Ís3Q;'[hmwYêU=Yady|!/ƮaܺIv#8<Yf%?JxMJ\^44vEmX*'dn׽6% d~^P` a׶worJ}-]i칥ƨ{nK0Zo[$;kUbn!o+)M'LIIWW{{EO匦WMk.[a? {p _"Yȳ=;%O@<4N3*hKxԦV.}4FD8U_,MKEe p~~ŬgTX'K[ z7Bv~SiK1Vj1J1n$[7%ZBm7Ǣc{MuaHhN+INWaSsuW0~^uNvp mJoQmewLt;Ҥ?*y= Z̐(;cH?ԦA_.G&ff`U^7LIP$ Hqt  JgS !._L֞!AEnK_b&M_e^B!3]Kcx]Y( tLtmVܼ||sǝd;(C>aYX;BR%I1za)ڹ 1z(#t)&9q}4tjAa'w-r eu8^.@^溏#BSCMQ2 ,/&YƌV3o5:fآ0dXSxHVڛT/BV b.[7 ERgضi[_<*ے%yZҋa8%. jhZiӡaehtxwєI`.y!"9-q]i_Pjc8 =: RBwD_~tLb:O)5y JxeGRTPHO7 <߅wшnj j[5y :ITAVڥ e!< v\-[giVhdNA(jN@&{E{h_lu/m`ްQҪf!G0W~[TrS-Wˣ ~Xe3,P,w,nGS BV ECM09]Yk h8!:-+bZVd#4iΈeuYI=)vo^~J/Ϋc:%պ{w|m:Ki5~)}Z[X9c0r@ޅOYCNXA1sVa:Mfa2eVLa`EcWxU >&8+4M>yC?Z3i$MJ_ l'T'H K$JG@0Qj)S#giE~ct> '@]Z Ow E^G~gBw&lqL"li2kU(0w3#KF%o_3N6 '+9O.TA8j%~g酖~p׫RŴmF~<}s*_p; tqԽ!'`Y-Q|8KFfloC MXB8jFpjTT-azmSo~|V6`փR19;}v{W!9p,Bk"Ɣ;YFcq=2WFp0&)^>xhӟnw;+vg~9G᫕Y#+խ<昞D 3bo+S9F̆^]QD`8sH;7QnBïIav9[ΖQ9y fk=QERgW8[o;ݛ[?2;22>kW\flnhsQlS6dĪM(,JtV; ( $D) sn%O@H^IK yI䍭1(:oz_px|wdG[ZDV3f4eʄ˗XhĎ|ۻQis3CZN);xXV5^sο7_&퉣 zr' @sbOT*huIڴmmcղ]^yFՉnOtɞ64 YOBV*[6lheh@zN높?- fEhkllڪ'Fŏ2,Ϭb*ufKe3e@~b-V`jao;"%P@<6tD GT.2~گb"8%[1 :GnLc?^|Q}FEM`r1F%ؑU` r)dWs 8y\[[Uϭ£r.= ?ϔ56" u%>ǁg iRr϶_]9bTTqlD}*K% ?ӦO9kbmo:2<(Qw7C{]MGigZn+ KUA+uf)&0V? xe9Or7:~HX;#^Z<9s𔱹Hh,?oOu Y5•L|jbf1W8fp4 {B9k o6P T l@17YE*p,}kuWYŬNjG%r}3!6gi ǔCE4+ 21i C0%7lqr`JeU( (kfr˞UV0ڃ֛JO(bQnZ4bԛ7#6 GX&ܲ>)#;e<qgM?gx@= ǚ=ȧ'"魸 m}*1c}uXkEZq.h{1v lShu!t܄MLw '!+ȷ2MhV?ԫNޚlYS >oM{SHMqZ^ 3`YYM^$ۻt[Z;fqU Ũ)~{ Qww5 !<XjMFA 3@YITí~>lF.iQ\8W'F以aṿ! bE]Q}vk 7OdQ:1D㘀x4qT-hl<*tܤNcȏ~BQ';+:d P}*w-S=[}GuT|= GAFqw@N.# l ¦X* = -<(Ӹ<0٪vieoRfkuOq{2nUώu[XL_?;?95]1V, 6Y0Y`'US_v'<3Rid"KIs>^') #9q!ף;l%񺌤<ڕGLڌp7!|slm:dp$h *(klD<됯kktIg^h Iؾ^`],{ 5O04+= <~OAߝOuZFFuZVCnC 3CV) 9hu* ţQJ3T %'aL I'Sl >ZA)޳v;h2?},mX i %(oG'+tȝJxRe޿$L]NgtZ٧{iqTf=th*4 RbU^7L`z| Crl%s v/MS@VSRe^BQbYhq]{(4ZV[f )r(g]ɳmrkMPP 2f9 e+A3|}Z 4:7axq8 su,>6"h́\&͓nӌU)[ó n sb9r)؄囆OlQ {ʄ4݅yuR K[jqx'Z[OHx %*GC%՜[UQϭ,09buv%!a J^MR1gRhtkJ%ɷGQrkGDDYc͜Aq?:p]۟Y|N}^qHi8##bT,V0Q};![.=^|172!oӐwr8O;i8=#>:qp6$>ÚKe?]|>փό\omys粲[añ]W)6bQiwQˎfͯD.!"rjZƫm,FVYT~Vie6sj I]]3 :Z'ᦫ:ܛپA]-_VMo0㆑Cwˮ^_7/8Tzw"7N( Q!M  ^]:t霃8@+7ᬑIɊ5S:5>),pSp?Zpl[r#~1ښyKE@LtGc&^h;ߤ `SN>|x:^ 'Q'uk!qUP(UF4 GʞPGέpV#|z#dޫuU8'^LlQZxsu'5$Zk㼀xR^|An),l5 $j-t?zrE3ϴ %sۖU3P3!3;Ng2DlB֬,vPdo ol5j,{Xo8SvgҢzǐ?&rB@fMƃu#BmmB#8{PRdŻ7O)S߻ђ>4{\|)N<G#,A>\U6"2 <~eƀBP!JD.TT\rƠ.9=IR5j5{SQ̎+$@J h6>@#^| BVsߖson0E4bv/0Hv/%'`J[ .# Rdyhn!hVX8eII>t<_ZPU7 s&=?Qr#ې|Jde .ҟ@Vwnt ǍlX׵=R/+,l߂twkivpvqTZz`׵HJ;>;E0GUhLbT/T6Ө/`:AuNOOCa6|i/;(Yt2Ȭc%wxâ<֔י>?Dxe1BB7? E!7w87l%i!F@!8IU.`6q8< Y(ѮFmՑr,%wxҡy#!Q8'LFcǴǧS|EG6igˆ,[:jDjQTovUǑdJQ=vɔQϔ:K,1̳B=uLܒ;wCl[$]٤p|tWpڹx̚~\|4*eU2;O(mKiU'dؽ,ؼgOg4 DxImrgi}U -o)+"< Y]Ff-ܰ]g p\}-M;F 3m7xn]XZ}ָsM"H 5 -KmӶ\kcU#,Yso{XwBa#K3(+< AxmB=?&)'쁬oF廠@(.3vxLωЩ\Vt r6}V />aD dPT/177\g(!C^x 4_F3*z@o4$gY_G4akcxf,}ǜxE31in,Ouiٶͱጅd!.SP=Q^.V/^W o|! ٸ3}wG'8͔h%wN٭VQ5:O_iRթ˥8!kUDrIz`ղ]^y!j;~oa\ۿաiOfxhh< YhZoٰ-!!Z-uJ(!T%hIp{B:O@>сEuXTPēRl[C%|" V+?,`Ӊ ju@x^^U=ЫPbpGŗ}}xl؇:1~ACECXlH =t@%Ʀ5cKCuی!_E\V-!xaͻբzV`M lfjUH4$>KpP@9Vk^+vb^ջLQKd!à\lHcuƛzr*Bx rzKstՅ\2ȬBɝ^|9u60 d%mB!`t2挈K3"a<{IlB Ό(2$.4JP.yA1j/ϴ&e.SU"eMY"Ob׫6o;n!o%Ȍ' OrIj93|kXbKMw /h3V퀶^)zl+Z=llt.#lJcv'sR^$ᜒ[y|j|iۖf@~ #CѬhnkQ\%-FbݭpÈ.́x wa7g3 e˾֥gzD xfu(׉*< f!cۼbvb)JD$AqiFa ?)>Uh\"{=hazF|{ivpvq ӘӤ<fkBWh5juM<N{51=,oGx .hCPWy]lה|j(yZtkh0gQ5⢇xm-[ 5*Ly;ݣLGy+^By4ٵ^ A*9mz^854Th8 y8*:V6-ln77L3Vikl!dA='J.pr MIT:4Sw܋d8S}vk,kX>8U@ox65[kD㸀x4k_Z][Pcܬlw3 q0_<.waX_@ o˰NL;8Sr{ ӍPk lmWB}K*6 ըq| Y@$Jf&(xD@ް*}0x0{X/C }EhAt1": |6^d,Ai{!'QsRAjSm|xRp89ͦ䆀 C8=yJSjf,e]xy4kW]G`דm*y| Y k1gR.}Ө9ƆݕԔG-N@ ?C>Cm8Jrylys؎f^`z' z*rx rzLM*RձڕdOd]GYv"Kt%:%Jçvv95_N//jaOzkJݲ+},V>>Vvx4+ gBir4Z//KR!i1pӥNj/_&u/d' n@n on8753,$>Ͳ.>aAPgSpogIeuKkӥ|)F%_,F&rf\6Y8xcI;`Vs\Ԙ=28Iaq_ 3;;R˵7k_BqՋ .!?^PfA6v^_|3~lo]!L8 yZ7/if^ ][v ZGF̈OzFelvo葟f.lqCvl̎b/~0&&ҭ;15-&Caj8 qGFCk]Mj) Ǒ݋"<Y׸3}wG'w/YR?%357tޤuK /[E>М̌R7@_ubj.] jPb Ovd GFWa%JXq8y@Pke1vX{P |uPlZ@Ь:OMrsΖ0çTm+_1O0(LWm7JG.nC(l&IJәLBʺ+䤙gf/l}oA㷇9=.Skr/1DmV3b5 v{]>ULᡁzؓI8zL+,`@ߟYKFߴε ! J -S_%$5NaOr)nko~4gË9HN'!ƏyS45(i[abI$W^o}"(8d5s#n~YgJ< EPV!Zi~8KJ8S=vi?Qs֥s&16]jCK88ٶ/ -Q9m×IĎG |+fp 3!%w8 YSP)(g#-xڲٶv-tk]k9ħ2(0 &b3[qD㔀x4+n^|۰s=`7"^`vtɬ\ϐ>aq|mj}yV}gV9$J3$h\fRH#pҌBziFpvo1Qʆ{\6EP!>i9#yCTNAVŐa(ckWP6G7nE0ygDʊ*NJ V ۦexIJ>o影.ӐO_(Ag19+ 4kDzk\^|35rx 59eLu`r19%K֝BrUt89]|YKI\ ڭj@~c֕z7h\  [ǬVXl1KH, Q\ uI5^$NM(>tQúiCZQ)59G +-O_vOAVAxKxk1%Oǵx;V-޻۝IQdrJ}56bģYcikjTG3g"T1<ʞ̶+FYY8 ܄.BU#Yʔo%BڝqIzlGkQi`F(뺵;,(0k!hVQ'aHʢYk8M"*4' +Qr i%L)c͠4kH;ACAuz87, Vz>JnxwƁ$ v!NqdYJx:އ܊4c|^PA{ދf=滇nxF90=p`Ss Xsp(3:Mmț w;fVLJV|3̄SZTⶳɲ7;,A.NCϐVIIЭL{x'cᖲaY$䓩)Ft? Rz'^Gۯ:)`rA>`UpxWT66vEƀs+ ^PJm(ԔHqхȜyEx/v$qʁn^|Mjz@.!)6/RNCVZmG3S\J:NwHcLZ;;vsԲfYGH+5|ZFO® ]X6.F*d%?NnM wvW}wYT jPp0Ky[3=@6CJ48JH nJ&pcPOd8C?)GJ,C$adZlى19< YiGN=(Ajkے(dpj[ѢJ^܁jJ8"d\^J.jѕ p.Q`XlyB2w\8y31 N]hw.InOK ؑ/JPkMN5 8yHN)|W~ +溼K m3v,S[ZR;Yiɧ_2G/d:OɝCVPob^{{uF&!OYLcGic%ޚfn7r?<)q*͗]@pڐО ux r~/^Q~ss)tjrA'jHX"%kvq[KPtkawG[Pn={1Ѭ1U9F$&}3: Rܬ;<Y{ECO@VP(lcӭuqyނ|K@PJjSj@NCV! aBeϤC3\BR6tS !KT +9W w܀灏 .= Gɱ܃LdgjQ}vˀ{[ ; 8LbT8:@}0d8X\AdBɤď;Ĕ-q-Ҕ3PO1>E J FҠvե41N0y3pSJ"sx[%IUJCM'n LDzWkk4Żו+d%Id߽IB7xdM"%2 6`̫mEeZs fdi/D*Q8qc#MmA)nW͜R5P8efrsnfE #G oQr@A@I, +pMb}n)+8FT7< 7uBCk굚mo!ǁ3wZ)@~S&PH8.@V:hxgLuY@<)eK'!aښ|)^\*W {}Jݰ'Dn8Yn5o1烜B= r!(OQ%J@<^@ 8 } 79 n(;o9<(Uۢ.eWiQ{upH}ˋH8ƇAFK-x:޷|i6-@n8yHgi&u;FQ=C[ǀ7 +Oq훝dJ>/ kKh˶jro-^݈ B>}MIkˮ]\5z-5rG+%ރkW{+T뫨2qSAdb/~ bzx -Aeu-Jn/pdWzDɏ^${6x@_P:6~`Aw~`UyG yfPMj%Iȓ]Qvͺ!{8^4n!(0A`M ^eJc @ ݃5^$;Yi(+ v#b@A`_+<3=3РxXi"#EztǻK/O"=6<hH@ wfLYyz~xc5,k<; ,@.t~j/-B.v~KHi& 7R𥳐iWp0@ۆx1e,ߕ ydҌ4Sj ]}t{̴Cy8KSc>| I> U<٣+z)HC}z憟eM D37OsAG#U~èY^ǵ]f lTsph6ly+ 4'|I̚2&L߬." h ~fU̪ڦ`rv % ,ANk(q-ȷRm>3f4W Jnr_ I&M4f!5M)G&}%]gu;gJ5svnٕϾ\gff+{W\+;n1jٛ-;Jh?J"ڭm/SUj-Gyβ $ Y?l<1SsMW]s?,}كW&?L>'u'<1^ui٤faö3h⥨op Ro$ŅG<+dMx{~bQ7p~1۸NDKsG;# YH̕iiZD;2xK3 ln1`_VW 7)|j3ŹOXy,I>k%YMl&5n~?Hyl$R #Q7iVL  Jmeo+d~ ?$!]vH$z5٨Dᯯ_ě|̒ʮgR6Fɬffyغ7?\ xiQ绣Y!0"<Y)6v'9;$k DxImr=#qJȖ,*&jviG4u@z ҿv!+-F&177TB{ ux/F{")^n.m(iZZN^d6v 75Lz wNA;7 3P 2#պMKcps< d{'x )mU8 YmZ^1gնFMqf(-l=q%fHmbܻ¢3tNm\ jsh"4< 1Nhn}AhG|A9I` Gpvc҄ꆳoi~c*4i1qҽ55G?Eg*QYm&sLj,tȚD"Q%&V5 ':ڱVT*BVWo=(8;lҳ "5|Ugm¬_.A^jcz{lzJOn 9|Nvlm|+4)uh) b=v׬a3IV-x 43Z6'7Pل /r0[+|N(_gOɉ^EP*J? 耛`|6<(3p2?,}&;$"p )!1#<{n}-B%_bs9RȰj¿%(Ԇ0PܜjEIV0YiMN(q6vZ=N˪q|6'w mtMmSӓҠ*dZFy2 ;h$R܋dh$R}vH,䋺Hinj[z7Ɍ71%{ xR̋fԜc|][)_%M Cx 9߾D&*(4%wECn,AG!w1_ABW7$SDx%'`hw\#}nf,r z~e+sHI~q)~=tEaq湴&/ّҏ_<] t⋹ŗ 9:'aҝ&q˔~7yl&ELTrPqka7J^T7ս\ۙl!όe+)* YmgJG;~ 4E:e,ܧ$.i2 e6>m_Vh^|A:e=q-\^|V)%w^׮aA֥c 쬴]N <|5C>U-d+nt>T3ߢ2m&iʛl>nCB~$;H0r76l &8yH> pvU1M9 <YiEFC>]#ܸV a U0 %7oZڨLTĩ7N58#;{.v$<PkTNy(^M./CV;\:(V T*& <֙!+洰z]v'U ~Dy"p/mwx5n`ѫ릵.'j&Ryliykuu#!PA1}vI 7JP8YKYf9zho{c ʣPxcF 1m&C4W…L㨉&ۤS+ZH=b,p&Q`s3p4<$߅ )W| Y)Qrw tkwK1.SiSFwA,ԅ"d=ז]1KڪHT"sxRy ed}ÆҮ1;SK!?׮%_JȟhxB%ֶ/@D1d×J(H/8>D;/{Tr=o!|}"L{XzAMa_!0d%3@ɝN@֏0T`OBCTm"3 T0-' u&,D cWJn/[)+ӐN6; +3ncV#|Ġ1"u>Q]9|Y߬>7\BEuQ$%--smbr xۓǐk~@/2Udli37iBGVl}i3)x3\U2v]۔ RG| n$6瀗!+ 5y4 atek{k=Wd@7Ưa7ԻQ].QQGEX~+8Q%w=zz&i3}(D~_y =)Go2zl})>,L蛼GV6< t<{1ѬcqoZ0@)jd9ck֥M[% 2 OQ߄Ðl. 34 ccFzDz0{%'|v>#,4F "Lg{UУ }lh`ŁԜ5~B"LgO|Kg'F#ьqhhS*;˰qh~5|Y7}50gXV~ KPESVN(+,!,*TUMǘ ]/τ~YڑЊ[{e1)he뮐*k,$-^iR*Z&F,K-uS賐gy x}J#m8y ^");< Y˴Tb9e^1+yoβ:D!Owa !Ԛ*]Wײ U&h8 <t*'28)ۗ?mb|6*3`^U ބz_>t qIO }roCV ~+DNCVTZoOXSYFweO4~%ު1]:g"h셹izyTৠv80>/x4PR!X30Q`հ|Y5{gp7‘ܯyQnsC@W2i C&ΛsgUj52+l)0G&ٱX)vfl/09_Yb)ZU֞; tWXqJBy-JCM>u[<+MGKS[4љ> -U-G}$J4dy?LnW-ׁbszkeGJùxXX(&Js/nO$uѨGxt~zcǢQr93Nք&TXo[ ߚ ϰ˲.>aW_b٭]W)6OPĻŨe] -7D.a25rS]vAۋ'AtZ^~*ڿۆ{No{euҳ<{6JքMt![]UΟ޷۩?2ekմVx? 8t:_q%JŲ_AC[m=0zrKH<"=Z#%I@<4.3Tբ8ln SbYL4&LoQ~o"mB~faollV,7WVyFk1&sq)uNHx z6 ?޵wLna9d@o7/kQ0ҭjc:kXCbiФghʱO4W ? 'ډehA>hoԃuXľЮzc J F=mվ;wLx4Kjt!wn͚鳭 G(-«N}׏ Ɓw + H;$Ǧi)&A]@֚C|62Ud||6l%8?T"yqJKx2˾i&rJt {R\C Ĩ/D//3Qdhi uqN~M硈OC/&iǘAi7+UdYH|7GV4+e / yke=˺F\}-MZzgo?c]٨tp| jbyِ&-Nh8zVD]]6\ Ft"B%k׌/gR٦g}4>SJkH|d !kğ lsv[7,x! P ̚U1V 8l45R2lCV_A~lQذ*dFwNv4 p4o£j48a6<~?ҎQoo#a!Ghc:ӭ'0)m74Zؙb1|cX#v ?$|Yy0X1͚ܰut8kÑ8,ٌ Cȕo`Gx{q'Ysku; 6˚h?5׽et'Xpx!.[2u{NlT bXyY I/_xI+ƺV!ZnͧbWiߎ&)ES<{sп0o5G|<˥_hNc>EpFUhNbL2ohOa XlsWR^O 'Fzx}k6`潋lP/(( y+غQaU:ȲGJw좲=C"]x:M%c*AeH7'4@>)iV݊崊 Ӈ" l1wV(c6 ǚrU0aMB>Z]mV{Q'-w~(= +TT-eZ}{k z.' :}~T?@2C>O'ƒ@HfONh!z֪?ۏ xCP}Ѣ1Qi I3g1|:5MIV&gG.ooo(.)ȧxO2g *лCVZijb J"ph&Ng7rYi E<06G3}nq4VL-Gby4iS" 'ycaFu; eN<fAkJ 9ju(GF`͜>),CVJW(+9#߯PGh}ej3?Z!jE-VkggTlssy["ђ݊Ye7az n5:3ilgCÐu{0fkR5B1p %#o/ʪ0 pd&5ⷽe%c袞82YXb*#YTn/BQ^|I[5_ɖ3BK# qdǐBl_ɧ)rLzK6Lp=7C}3M7l\BXP۫ږg|JqLa߁[˯ssG^PKڄ{d3;~2jᯇs[pB3σn.k-NS_;v, v?]}C4!SO3].}S{W vx J\gO OCN8.m4-ոJoy`: gԲLdAa a!#m <>d_zA |16d[܈;B [!7dk[VaWM䝒7 n0uӤ)e xe#3g8[6mAuV>doC4yLSmpoWpd>e3 6O'Bzx/SˋNty+m#E6c>j= Enh~DK B8y(Qo}}}By%Ja;Zt<a"^$ YP}--&~y+ eŁ:ݶ8 ΑnFd.ϷdS.@^Hcssc"+_A8 *f8r@E|"W`K{ @PNeķ303ùƗh )\__Mۈ¦:d>'/"xnnެCgBT~䵉^|DC]W'M/}ߊ|/e8ege̲xF,:^W7 } nf$jQ Y_ON(s!vM `'ц '30U3;lBL_wbDpW2 7C=TfhuH/S%DtX q*d'-d"qAv5Ny1Ke0^;C_fHmyGLh230,U`gǑ%# Nt'xv*.ی]  6waK_њ:5G^ +Zf ˕_1KYG6jd!kGFCVy0m'W&#6Uy ;gI * c:M{zt AeNqyFRL9 K;&&on}v7W UBżPgcBTr"'%UBIzӎ3/fn4.KS4hBViZ0P¤ GDX<-rgs|Mow6+k|2#_i!%?) EZUKȹ%Lt\Lq"˃F4 ~mOW[M/G^53TȲŽhIӢ[MY7ȟ9Ϟúƭu#Kͻgqk/V ɳuϊLE.nዎ|gsN%l2Wlb8?3>{ɟ?k&YT^W:\ )e߼qw-/m33-z컆a߳Uӓ4}T~n½vZ>Ęθg]dlíQ<7Zc MyڶjL&3'n4sRiy~h_5J86I E.##>rf jau;O]9o`L(]Gi?>zLweQi8ZE~@Pns7ަYF}Gugby),k/|ѩ0g{ዛcۨS] $^'&׮_^c SL= {عzHl"%j Ԛng4HX"b u~=3u44㱐{v2>Х9F<ޠ(dڂbe+ Y,5{dDs(OiI!q kߠ^..滫Ah9JD d'tMb6vvnq`{ټPD;VY춃TK6X/mTA=cLvR\MեO(pĮ j[f#X*^‰y줰SHR9![٧>/ԍ,W$nr;u7h5t]ߊV\>r+sIg ٷ.:ޱɳoU]ܾ-׮t:gydH6 ~Pzkm=Q*Jwn ooR)!oAzc^hrH;5_ny,Vd4$>Z1~|A͟ _,Xw:nž)*fMrVuCˎ˃MrDv[vAۋ'AWVԯ1o_m_=\⷇Ggdڬ=ͽ3? g Xkմc0qCwˮD{*Jb%ƖQ{:_,ѷ7/{Դ&rCw_l^Y(~6bzHPUt_}K/V޿}+coYutze]+^R-be,1^2/Vُ~Wv~2&XcelW9][~ƕߋz֕_(뫶_te*AKك7cˮÚX6A;Qlpi A >?FW԰U5@̢}Xeɋ,[hNlDz-;N9$c[e[[NlE'Τ{ `Kwȑ]s. |Su{_$D= #4T*6[=*Ma MmqˉhMV,=x⺸gwyQcLAlG>^! !lTǯ*ǩcz=Fp\hY5-+DЌ#0 *M1f:})GGO23lmE)ɚ+nBvBaP7hwD-x=~IU:Os!Q=%| ޓNWގ$ckt`1|6b.K}7E  #kisk NR6N6]hhQͭ{U$?RE}2.UQ1̒R#$`SWAr`<°6doJ:J甤n,^&c(f!g_0Ҏk8%J7}#4UJշ[mnse/usԪGg 𶋫U L wh\n,2gnN%uދfzX{e+g;_봭%<;;քJ{dcY&T+cOL/nڡQfP#F6 êV3b領_YxTLJ݋N_(p.mX./clJ.Hx؅x\ ] ۉ{ބ|SXB23-]Hl4:v!u-ȷ{=1AkA{;4&ԇCm @RV)|c1R\ؗhh D^`Z6ѐ#k.Bwc}6Rbw:_m+LW,PܸݡJIdNꠅfѱt{!6~ u>m#y֚Y)ZY,̣w~^j |dpQn8='ɫJb(Nǿ ,G{ދM:vmG;"ت%U+9{pܒD5U E=6[4$E_)IeLVO1;GMk\xAF 5 L17gLzӐe . NR*Y]#㶑p<$1DCV_Kj̰4Q$)lV*;HyFxW#%sP‘wDU#e_>j=GN"A- ,uQj ~/D<z;\,GI'i!dA5ӱs,ʅc [˅G>>'ڰ\+JL#GtV9[pVM'm3Up=+zNl eA02ij7SS-Lo@C2v}w$yb4| 2r>7iRFHr='Cd' h㐺^Irâ?=vR9LAN ؘ. d}p T6ώ/0 F;&U-sm' Y}2@ٵNNC RwxeKu%>o%Ir0OɮfSe0 FHh*c10?gyFQ'sM4j_Brԯ?]Kt-081=0p6X>,h= <>?m? ?L(PH04%cG5H\Ce#6tfUP}5}J!_]yK' -h6xl*om6Do Q[HHB͆G IxD' l7yq!.1& ƀp8Q=4 e#eYRoS0}f+Ae,DKTg*ytmë6x& p0B` qDueGF3gw$+O+"4 | Y:s4F좾"Prq|nCz' Mg?2|u]+g ٴgI8d9&~Lxj8y_.B^T!VeMbiQj[N;"ت|U+ѭu/~I~ {9AYY|AW e=[h qQb\0P^clop+/ܻ>,(خ"g2ٻ_Cm {ۼUUzvx,գ0j=##Gr"?|q+VjR=B_<FR~VuŬr+Iȓ*ioKNibm^ m/3D{1+oWCm"koe8#k1WjW-ҌΤo7X6c6K[lݦ >G#(=|ow\q_z{N!)2c4 W:r"GQD_&]Q|e> riP03DƠh3$b=6.0aa ly;^\*W>5*lmVXH>Yfl~e/#,\fY*?R7 ׵.-vA$CV_=~wxڤ'mN ~HGdBW+S לD_=8[mNc.g}A./q "ܰ-7혘/@JkǤ "ʦ5ФI!.W!_myGKlnTp򢲁/lGO{w!oAnE7]!M^b׶'C Qԉ>fFx;31 ydjYmcB4wcdnewiD[5QX.AV_J}7*C=vqcC>x!u]~R[Hd&嵹Ob "s?ģ> K U;|=׫cv園:fh7aT:X%uWIIlsXyQ*CQF4Ӊ}ť@Ǻ/q@:( 澟O( NўYG8˒͒r)%lux]mw^'c%Rč5| YE'Kgw! և65QYVfki&.'g!7a !+h&edz}{ӊ񉠑omEȋlV4ؾ͖ Kmb9d%nԿA<4&+hmrydƨzKn w UkfwE<.C^nWǐ+oJx|qU돬JQss}@El/p9Zo"wӵh'lNm]غ)7>y7hCC ,c;R <Y t3MSģUj{,,;o.A&P-nuWc)Q&BWsؤ. Y.S#o+siy3*$] ~R!QzJ!#MudPFז=1y| SRTmTԵH;_(]%j( D8P9 ;;[f'މd'>"p*!ŶlOf 1IT \76cDK.l]l[vXas' 6_< Y*]nd`]-j&"UQ38A<_%$5,arRxsO9C>4^/ @V/{u)l0A!cwV ̋:NECV_N \E3r;E\0'$#<~k:W 8@k)@ڮBlZ= AT6i޵UUK(Y&md[ oGޘ9 \eBE78%r*^.^gܴ2jdho#PezT=Z?(`;x[;4uPóoQg0YGW^C;ah;Q]غ(o_Pڰ<6K61?u[EODd}@yZB"Ro%ۣ>zD3M*S+zeʯ.2P[_qKNUKƲ_X|߇M,+Kxa]=EY,wQsF:x/hG8NV2ޣ&o6 ;m< jWQ{ѣ^@c%RyXIp|uҿ^KMCC.;$Wsz|pw[ 5E4j[X1̓!뇊V}U+Y1tIPK{ H]|^x-Ư@c}sQ[R+0)8qmIזEm\ب\ކ,#' dTmN +xݖp BՁ.u TJ sR + wWV kAVʱSLG&Cŭ擶,*dџ<,"Q8]>it8m,o m[hWdU6oMvN!+ۨHNQ&"lrf[^:oJ7#Zcې0^K9Q`G»pZMj(ڪZl* {}}a:[M^!>ET!/6޳H du$Ke{nE4BVey+U{JV<6zVp6|pIEu{b]LCxW"u7 P"cېm#|Rr p Rs2|Y2an\>^! /adUŅC7a6G7P?RV-}_ Vs/ڲY =%%e1ZêV#ģh{/ r"Hxt LQ[`ȔH@|. K-^_vg9r8dhG0w j sY*W͓K{)cnΘu'dJ; @K(^,)>anB\CW6a┬ob|0?KVرeyW,EE۸0{B!-El/:6`bEX-8;o@ڻ- }^Z,-<}%+E\!Kb-b@V:3爺Qx |YImŽ@|Ťy ̩Y-8yCN' O(dًX*>?! ;>,K|'|PޛN8QBmϐN`>eNa?d~ 7Anhm)QhnK}CRu\]Gw؏7O?w!:߄/נ.:VR!ȇˈa, =R 6(pT7k@+/0bs8y;;) d{*R{T0C'x z][vUVv~2ëTūWQ]mW!-a E ^ Eȋ=[{hY}~ qx| Y}F`S":](5(wu[K457Cێ ڗ|XNCw/HQUaE`Pߙ>>m}Uq7xP\*TJv)NBkNAR6!f.>Ц$T*]O ho}nX)^z'g Ns279zbG弾gXj ].XfveGhγ}j/;WdJM۬{ /yt@񓋺bZ`cF8YHaBk|^&AmmO9fAYj]Q3/k{ÃufQ~=0/ ֏Dn4x'my->d Do#08j|Y=K%5II8O6Խ'I?  O}'O?.iHBjex\^FiF |)f|V,%ӷ͠ӭ*+ryI1iAVuRx e#@9O[\ZE/z> |2yV kJșҦ]ѣ?w"lz?е\4,(K;< YbQ;/"u]գSrZ:y˗im`f)LĆ3'\?u]Kݻc7|.~Q|UW!K G ^Ex 懣z(莄a7n4Q['4QzK\v7v15EO=TBE.l[9kn-H5lw%_cWTT6'\i K]Om cE€gYi!`ZAqlXW ЋԟƋi4}Z,@IڧV'yJ qCZ[! u]f0/$GC-,-SM e3/62ǀ _R29OG!jfg]6qV8;vY&/3m۴Y!kk?÷ا{554VuT)Z,A(dS?Xrh@6ߑ!94L#c,QH/BtDh xd,Q sg22)'D&d #u- cZIϙ%`!V * MgWR edd 6'YYXrs0AΪ_6p#-x!MS%c7dƪD_} P)+"YD;&\xlDځC&CџgO/ͻSω15*lsg3?&:F0`Q߈o,p 2w Tv- ;5[;J׭rl lыhӞ Ӟ%53{vD-h)O!ٸG-4h [L(hZJr!/? AV0O/RmYǿ*6 ںZXtf4Cw I}G,F?2V0hu"b&A>ꛯVR. \ymͦ F$Է"Qg"  9salӆkQ~` +p,1f gZ6WM2I:/Y~ &hJ/Y (FRm3X=Aw_ *nj˯»"Dpr+f :gokqD=# AR'26R0G8N"AP) MƬTmxuR@OoDOӝNCzQ$.쨑.VajT6{nN!K{߁k>܂PRANGO}¿nC}=n%%JxvК6L7F7c ӔKK kEі`g^R}Xhmr_[-D#[ޑ5۠kNY,wS $SmcfևGO;@rvHZIizuR&%42{6.~%~#()&M0?`G |?U[JV][Z^ԟe?pD '!O6ߟIT4ݟ1|7ԟG݋^>mam-Z9x /@-$tmٹZ]e ۷-B~Wl([u6kSPi~G<9 <L{ G#< z 5itHd]#?A,uuEO ^h!O#Guj'il pK`77CgMqe`ԉ51!ucǔ-&u1Q OiɖMf K]k+nimȷߘI\(ҸSmaG׭fڵ)!%w TanQHV UQîj/ g!jkL17gL6| _E[$|YC9 < moBKPw5ģHC|CD l&<_L:hUYYxԕQ.FO*rAbzCG0u{&dźRw8yFXe6x4Ye/ ufa[5dd% AihBzi] $IXu Sgfc37n o idOz"ڦ^e3#O28,'N-KH ~_}v >}7F %q-Вۼa9Nm=vh-]ad~=uox} ɼ=k[y6pzM(E%Dp Ծ g o%9.RNӿ{ QL$$b}zv dNAj|>0b`h^ѝǞBWΚO9`dKT+4t bcQe2nܐ3 ,WӐ5*ӓAp<9vpk߻ KәW֛i*ߛsĹՊ]ٗ>;v:52"ɛקo7ndɸXW^>[lAS<#omvOr7YFXau? u?~YcxY]->{g >|K)F߽U_[)9eտQ̩ $oOo_XmU)~U؁`z9W>4:1Ajޙ A-ӯᆿPZ׬Wg?k\1ơݿqv)x;1J|B}M}K=^J;KfxufO}>FF_w*W\.D/T*w_CAzU}O%5)S Q8 #ݳ]bOrgӼ8;m#Cyyp#,/clmع E:> |e #KXv+܂o_9OlK-ź]geTR^Z9 `8o[Ngێ\t}_)Ka;S sLMV/V/ ۍ%2UfJ%1{pI7 VYɤX8}zMKəb'u h$%;Q?"h謘,p*PPlVLi.k ;f+oF0⭄H]>\}Ӻv:fY]=G.a.6X[B r[6`]6``2]Mcd\&dı9vK5Df";11h"Gah]CGa}|0@EmJĮᲆʀ]L /!\ڃ$6ĤxH`?5tgy_N*9uppo0ހpOЬi?ZiDjتM#5WCk> pozYԇG3厝lྎQ<&m/-G:ʎƳk)y8LƢr0bJίFpˊ{\ul ҫ嗂 O}N~P199ҕhƽL>AOM#~E{[ؑ)3.K.H}eY ////Bn}L>z"yKU7炩#kY5$ tH;1Df7>E'ĮhpYVn9.cR LqYCa+!Ơ/r8Вla3>㲆vV6 7pn8@ajM-U E']}у| Nl2(i:6sΦ 3j\Z-|]t{戯dF|{',_/w .kczspB: fS.+^T7[?_Kp[.qYke=o3Vp .]NS_ |ez: H217).C.,$F fWt%o]\nB[n0S=D}( JV{!\wYt p.{dZsG#0t] <ߙ}@1ӣ,. V*d Ljz2QLHWf gE#Tۡװ)8f*Βq;R8r3BJR76փ|t!f0 zCnwvwf+:NsDv2cool㌆{=8 u[kp*.`Cdei8&G;Zk@YPkӒ6b+yJ,3GjG1‘rF`:oԫT8K]c<އpﳬ>F' Mn|Ss1.07F*XV)N>xROR?HC39;1V?J3E+٫{qOkKq5sƽss)Snq  [Vo'N CNp1x<+wݩ̇N@wPӾ}7RޣN'=cu.[fA-!*Oi ѫxSJAX$XU2'9UZ7.p/HF#w {eěsmɠadː6(oيi:rSVk¤;1zc>SҾP`߻q hx5ꦁ[9Q~e+-h> BX-ܪZGOiv*1Ut7xkvBt@Dʍ~  _H3p:@nz=,GAtMP _ojED8 ]ej je);.&UX aLiɷ9DP5R AF`ѻO4 {ؑW Aj|+]A>l3h%,Q kŮC>;R xi=T|4\\ >nku)zc=8Ak琺S,d)Ŏ.<,q 7b5j/R}~dMMkk1 K;­6$!UgzF)oORYڌ1໐'gY*a*"BlV8'/iY/opfsmζyM4u)fP¥ RV-ˏZa^%YMCGV#ģh{nOmRY1wvn9Ky{U>?;S֩|-"EH- o#*[%#-HsAʃ;EdNqHܴRbbRA޹D4]ꨋ1 &w/|0RFtPC>}w|X(YRcx}T@z &4lCm cRVFnC4\ jI6TJbW{!QXRhv  *6蚘5;g/.Z,J5ű9O;bR3zh:L&rxz'q~ASS\"VcX_!uAӋ.6hdlsx \Mz GfafX3n)gOC7 7!ԝބytXpb1Ccȏc;q5A<4ޥn=^B0]{AwܼV)I-ט ]nLNqk)QȣښE{Ap T[ $ĊmiME]fnBYjϼrUrx 1>C!(3j-Dݑ3rqvR!(Y]|O8A>^~},clمӀ,wUdC]&lܦѭbh-#l6<W$FIz%H:D/x 6bA9]b&?4DNy˳q.*,HF|M{P֜CV? ^?.A^RJ<5 if0טɱAĵ[bh c?ٲ 7Z_ns-a⓺HV\&ujaRihīx rB*R < Y=b+En q%(ae׳|#OC:=|Y}ii#I3ėj=Cv3 vCޓf/A<:V;y &8 ӵJs׵J>2d>E}*̐XܒkC?d(Dt KuC>܂@hi0Ml;Զp~b'OT0} ,=,. < Y0VgTPxv۷ٱ0Uft+EU6=6<^'9 =qx,0{Z̽U1@LDR7\,"6 *PgFνu$sY<Ƒf W7:ZHB_p6;u~X-l4rIBOHQ-TE;ܶ {!-&j[#*Gp$x esǷjyB.: ,wCI 1A<0U-#ll6ibqxrkNѾFe؀u Kq%nJ:MVRkrN7m(8ĭU87qr|V&ȱ>bu0;E,XBGcF5 ;[n%P3KΧ Dk >\|\s0vX!ZL}$̀p;yo! it-ۡ[#rx_=U+@N ޢ$T\R{(Z%}؈dؤŕ-5|#paRخu~kޏ5xԭʡ'2C٣; # N4 R 4nI9`*rE*}'(ut#aMpR4 ;%t:^&LZ5]N=z'E!osbZÿNE\9ZW%40 Yj:a1[.o|/@h#n/"j4V\*9 < /CV bA> 9 c42d< ^,] 1mxS0We,rxr ,&d a,2,S"ےYOT\6" KJ2Ky@(,CnMFe؀;"D؛hZ%tea,a@@1?Fcyqf!ouG$3<:c>xaY` };ҿYn9Kq4O ק?>!%3}a9,HTMH]'P0)i=NiS3amrx,~ So;g3XMJI0УQ>n+ iX̎>mζCm wI}Gwj=R> f!:oQk`7I Vxʥ2߰z@(zs V7{^;P@diR{bCKBlS7=^u<m g:kw Lp+A.&ğql;Gڑ"&oq'~k離 Ϙi )7:/ڴ19U,,[A?䔔NNr3 }>ym'4pD=ԥꅢY*6)a)u׵YF*<pX2lM4!7cxR ,EKxA ǭPd!.IRZ㐺v`/deݫZqA׸%u"An>KDzh&j'[gqc3ߴk$ R< yz1C Z Gد)Dz(/JUW\%5 lb(Ba /0?CoBC`XK8 7+ugG">c&Jj[a>m&ٿ:z? `vHH}[4/U:MQj ?v*TV:W~ y>;YLWedx TAL1jہiiu[ ȒQeR'uqC$p Fs:|+ig={hƘvM5y-h+C|3+7e|.sĹՊ]ٗ>;v:52"ɛקo7ndɘwKg ^3<70G/ވ 'g#ʰgH鬱eov|<Ŭ㮏і^`w{}V1ۅU2_{Aj}q>b#DdNo'y{j`u@Hxι?=q|։ 6/5Uvlj~ 7ݔ¥ߨ~VWm׊fzK_ۊ7CsN+wމW^/qhz+XUʅWO32Pj~~~߅(E!LCSj;0{oa:nG~eo%#ÇP[`V ǍWU|g- @j|IfW B4\+m%xEx5jq 8Yj j7pӐu^Ho<هci >,;txLWQ؆kAJOgYfsbLr%Z͐(<@jьq6/$x(~ҩ s*FjTq 7#*qMM@rRho3#AB!n0o/3{Bno26gm8aG~g V%Bζ!1ٴ\Pz,BZaRjiџb 40qx>mH1]wZao5S~'tǔgkܹNq)@&] F/PZǪ!T.r鉌19*n_k* Dzp2逦8!-Dz+K|IcL;| R/?%qP ݁^rΠ3:wS& M8 Y=yZ>\-D>;&T$cI$z>=Bwzz#cZd[WN>p"<zg1ˇuw"/J.讯RGY(oP4Gxe+ xQyA[J΁nb¯[A6dۑf _,O۟Iܜ)?\164*1H6b_qK&lXV6ʦ!N|P'=MmU[6p LBNvYʚa=cT;@*deQ:&~:3 nJ.= *R*%ޟN&w;*OԖ7gH3:tQi!: j]yEȋ/rψ?tӬ+p&?]0ރV? A]{}cG Z6=ߊ4$ ҄b3<ɳq/BEj+Lu?K /)i6ߋ/M ~4߄{Ɓ;S}oGML |vrme˭ɗ{!jI^&?;XmcÏ&? QMp %~PU~ |3]O Q~cw(]/]GcԸ6!^oe ?U/Yx׺S/Ѽ׬SrSǣGNjfir켆9e_~@?;i9)k4)7Ż!0@uknhH-{'K#u`)=J0Bz珝rF&7 ="ǐ+\Jf!6}exӧ6 򹀃M8yNT!p,:Ii'gr:,Yt $ ڛ?KX%rNv:B9eZ4u/zJI!)sT ' O(Ӯ%:g*{K-SEv% p $H 6ϛxrS;yeƬ8TĎpBBݭc>}2^UQ˱fpcA̓}0+دoGԖ>w_ft/IhPO.\6Y[ (H]3D_,^p뜦Yzūk jxgC4/ϿpDdFiiRb3a8(Qek f cyp|.h.6 6}ӄ}ՒLlsxpKDd5l.)9owR},=eᏅ%+DP\MB,Yeș%z-by>. |xkEEW8ڄfn LRN؀D8heF3<YjՎAؾU ۡ4A CY!2m@(. < B yC!ѱ4N&\N=,#B=_N66ߑ"h5EMӵMLӜ5 kLڴKc/p[Na0HSHU `/ݥ\IT/,crFؐx >G8yLbꄍh5.EIJG陯D pB8F7vmM;"ت2eU+9|w$dS谪F_(.nا W Ղ"oY ޅ|%3 g:fzTK4z#GL\9:m$vCVd¡h[4!*GC1ǔ rТ}ҍ攔ꊶvx ,LVK9}4eYYkIYXSw"R< Y=;1l0V66n,ND$w[Xos'xU FMMYƚl8rFXQSOhi adNFo9f,SҺy'.mSSJ]wC8yJcS̞ԍ!OkT+UWd028s"Ռμ&F׀ 㑑G$O\4ʮ(+_w `xF0]mh z f%j̒#aÍoyh$N꺀rRsԈw!mOiegr #&:4v< nS#g.|uTxuN;ՁqH `S*kdXގQpT1| )l/ \>>$mk]Kϴ H9hY),%^|ԝ^|^|IEA;j?HwP8 Y}j9N-FirV | wᾄy7Fca6Er_%X KZ@w..y7(+nSq8te ;0 UAM8M-UM؇PȍPJr휱i|vz-[z]r|V6oEH!/*?Nm|Ұ͢E 0(i⶘>*(됯7s"ui 7וf>%=7"NJRꋾI|(ҐsI(zuꙂU;4T Š;\[i5bhir-V+OdZVV£,䬲Ŏ[AS D(AVOiב&v-n`/wOCĵ#&`f0skí\e+ɽ=%K3/KQ~|H [FuAƑ]hB&;=/DixrbR7 1 E}J^f;; e8SB7a63vma)`OUw/O@Gs QjQg. ;M߂hi:%ģa"'yZ^Fj-<%"UyJTM<%u=Svh32O} a3OI]P_p.Z)|4SԏVeK=K[O,n$ORQ!UW^;ԁtBs wB<4W#塛@-Sx U{ O埪Za:^/A. Kֹ=j31A<6c+TI ''%j=% C f q<]N@Po M 2 Y«A!k8=JeD[4q.BVYUrfckU",T+'*zYv4 m%D9 ew9]=Qb xBdX/Up)H(7Rwxees d֚Y)^L%tx\ލs;h!.ʶZWK1Z ',@Km> dUy=-odv&ܯohۂD4Q9< vnA5N#gg!m]xaxCm x#ėjh{1K:U F4"G`X hpV?kX\x_rK$ؾ[DXq6x4I*&8ԽWp,P6ˉYk&u,d}I9GO>Rw8yNAjAwiMj|#Ǘ_ {|՗ܒDuɡ_dxMv?ʸ[֦Y>i1o?oM'*Q04a$PMdv> D{Ƭݢ֐Qu ) k*ggx"uO([*_}- .GDE[o⧢~EOA~֒^Sp!BH4/rC'!lgN)R Fj(Hddki`r.!ugc C;gȸ9J&m ,>D_ύPVp" nPsu? ʆ A`l fdžD *uqbQ1RK~8d9ώן K]'L?;`8_@>-{ce7UClv|u h?ފX݈n2f;YHLdsg0 RuQȣͱ Y`ܡڊp+&&W!bdZqۖacZ1}>)܊P7udЊlN'Ck{\lN, C*2c6܆PXLaGx%>@dʹN d<(8dug ) A> M&ْy=k"Rv jsrx z!Tg hl*I恋cY= B>dUAhA;L70ÍR ,`(d}5`N.#Gc!yrˎfiq YnY~{e2 YjeJ.W7 H k/7JwĶi,!/7.!HSOy,5fnVTR +4/ʧndQZ~ais*I?]2d)kx<}iV:R5h^|Q[HmdQN@h|c uշgG 8 Yj}XrAkE@ls]%e6y/r2<6?٩s@\.̍MNބ܄8Ynfi+a4 VIіAV_b9a·}| r6:)pỶD}(G!6އHe;9+ d"8ݾ^BMbNҡvmCRV:|oQ7RG;%} ՛jTmF4"Gfal:Ȁ)gxv.~,]'̷؄Sb}{}`aFzj" /mWS:Hd W6&T;%z?q\aރ,Q.AVpYsǷxN&bS!v߲ t(ѓ9*B})w@7aGoI;-%I}G[5J~'wZaQ*۵B,B@Qr@f*% 5sAox>+"_UqKJhC%~MBnhn[hmoat K[l#u#R? OVBJvXI1#Ow4b1G._rm66`&J7!$_jHCD'lE|N6/!|KhÐmc \"{ c dՆ=F@O .SUKhBc 5jKxH,AVW\BJ'|G7gb>kV42(;MaX o.P\z+QYgU oqzo4'IT~_=ܿٗq60]İcj6I^ݖxkېod>h'3JM{n> <YgFoLG꺁M37*!1Irr郡KK wNB:$̤p 򔲷 d 3 2~5W\@%,w|/MNz, zg!_lK&EbKiꓠ (0YDwاٞ#ApxTcLZc@VO'PA"9 !p9VbA< ;1y0o_  Hۦ-r$(@6KN3ϙI/eͼYf0s{/0,n^Mƀw!m|oB!+ɤcMjT8. rhJCn8Y*kY pRc*KoO5d6?v0;-iYeoE;@t,59;,h=< z<ȋuтCx8U\gYYmxN\g 4q,YeXDvrTu[wSb.DΦn[%:-Y>;-r3LANihZRNEF!K}9AVūh;;x#OzlYevtC_xtnAyAڨ-׎iiZx"F됯7އ~dKf(C7vNm2r콞Q-[OӐu^23"uCQȣ=x!W \ωi5_/f k͵R[mEDg 85{?Bظ=v{"6R < Y*TWR <c\QUymL7RD,dTMW8Ƅ={6%: LA  G 7>5,@si e zBIUN\QAֹSnCV7h%r{џd6E]'a|@ϊz ܄%g OR'Y` T?Z<;#ulg!K-%h/Nxߎ I$b9݄jeR?A<4U7~4 ܕGI<섂)>5If!˭&15}N< 596gDx6Ѵ98U2礑"mo*-H d{}~=MH,>dȪ#0h& OX.R V^f8y@=mtAn 1%&?xڡ%&H}G4 _ZaoIK KykTf6q4x6VK‹1tAv$,7Zl*A-<ǬŐIM8@ )[FR*0 Y_-?}*!;RōryQQl4pz$&*%`M{iFrh WPNBjMo_ES%vF۵[.TI݋O~*8qnMkCw!~;OmDixM%b͗ԝ^,ZdYv+BģHrf%Y*UuÊ*E*փnoZ%`ձD_䏂<=WȬ1h'Կ|?ѶFL=vBU6IoZyK8!.}@ukfR3-d tSxX 7!!ugc7xj Fce ,9 bӷjDX_mS0 Ugq5J\V2#9x&E=tMɒu$tdyNcLfV$Ar2^3iל31`1gLz{/BxOx%5凂P0_.6ASf|DS LAgUeעL+e?s`/" I`X6=]#Dz'!/E.5 ̒YvmLPOTOBVZ7loâj~$mэv̳ew% 2e=sYx %H꟣ -ϥ[=|JXH!ERcj_'2 _S&%_[-M,A#DFY­X9XvJM? v \Xxi?6D8еy $.D,&7!o5܂PgbuMu&tu5<#8OEvyH6 x-UyzEě:psݕG2QQz f(\Bg:/8Tqشrx77 Vig$bcǴ5brxƷRހ^k6eK 'E2dt`D)Ho6!Kq!b2 Q';Y=pB;[Tp_<׃ 5ދH]x YƝsJAoDGaR >%QQ< Y:V{lul0*Gĩx AuC;$[%*UHG /3){ Y}c1T]ʿu-؄'=P-aXC'tN쏲L qFa;#Cxܴ^WD&9 <Y*SEm!` rJL3,u׃4ރ܄* *>ds';3Ah-֙ϡ P>*e5%/D4pr&!%;~Jg…GpxԹڷ3%#%:nB'Sx=GY1; Yj(殤nx 09Ve956|F݌j3QLB~L ys/Hik ۥ5˥71&Y=?O|Ad6tJmsϖ ~ oAV/G(3N؀ .M@{$'pYFN6ӱ$Ύ+.-3Mbfzr|.[g%3Q~Gy^0^ykHOML\qA8ɴHzP&> v)/UY(׵ƹvB/UY7V+A >G4dE,N5ϣ[rJ!U˧eB;+|pbKZ_ڐ!wt 5>Q] "~OCVT@o(=ATٿg!<L㝆%g!U6Q/V7|_[ԕ0p|Kz! 7.IDQH]'LB(kWAZ (fo2UY:|pLK|7~BQ*9jl:@[Vܞ9YB2Va{Vu]?pDK\.)ol_u~u_V4cwd< Y"429 l"9[$o>HJ./A]'eNCڂ7Jxֵky}RV>ԖH!dUjDa8Yj\l/0m2pDsr8 yR.]RU6&֜ܲ ] "DxƻCviDa8yH_ӕ]؜@nBJCVT31O:ǵYc.cRi&KnJTo's甉.b7+C{ĺBn)8R~ PY, A5Z"ؤY',חE:XˀMh&F{[sff?H*UGT{-O&G[Д@ҙDl3u1IJ.><@h&,t\M)O©GsG%:eʯ3D: e{V0ʕ~eycGecӐND*MP"E}^而 "}:4R]>Q*-b]/ t&l,-vq_$Kp6 2œ sE#oPAaG"< Yd i+Zrvm=oH}G4 _Za+Z%˯Xb|XcuHhPxt '+^Ky؋Z0Q=*Uބbce"a@d]Y` #6葺r['џ 6r3~mL%JF⧱f,+P]SRcE/b G?hڅԥ a3MIEKcXZxJx%X]ބ,b-g Khw|dX%Rԙ,pQ\WIǼh|cExNh\E껙m(:3Ϟ>KS ղџU RX- mI}G[ Zaж]s,A- #מWدBģR C[MuJl|ŵJ iez[R|NqRXݾavn0Y*TR?PϖBzI% "\pxxu]j _=A<z~%Run RE ł.R Y=0wZҖϿҖbR ^@ќthlh)Js6ޣ?Jgٌve0GP>2&M:8 Y}6zk_Pm,*لh! 7)^|QT2X+n7oLPH.BVOVjL=>Z>-Nپ'b U|}ҟtp;BV4pT~G%oށ,5ȋy]B)  z:6,^JSL+< /@wm[QoSuxG|-aѩr58}RG_vBiyuy؀HuCn x ec.FXhG' SD.A^RR.U،4FjC%1)Wpw٧]mlK1tV? l1 +U‘῅ S&VlџOVq h@kwJ8 Yp$< gw{q#st_А pY=-'}ycx]$\LpYCPt8v޶2Fw?pr6ytD&ia _34 [9mb\Ɓ\nBEQnx*voE1׀7,V76#=ܜ1)cG\ -3\沆àQ2.s2lbL*JއL ;Yk$TWXGb *U˥#_*E -ᔃϬT}NpWOrHٷL&?toH]'HB sYí<":^xmif{׵ED\ \n/uK"ާ>LA5RJzN>W2o-IR)+lBi\ME@3;#G^L[șB37M`*Kyg/IrGOy[`\, NS\ZcQB;Sw8e Y I喬y*F[Sy_7)]lucNx!t,(%8e +)^X<5Fxi8 5^WpD]8>Ⲇ-TV پtC3^/4MGqE8E.Z{ <4YVN-\hM=Ė- VskrWH%}\HWa(n|2BXK=Y/ޅ*)v ~ Hx Oie9`TR \nB4"u#%`˒;xU8fİ#5vmA;j(?bzp%,W꽭;l4 p>ͦ~%DAU:pUQU;ȏ/3Z)F)8eVRRG%)S_4'|ަ1UR.u22cA/;.wôujރpﱨ:\{^gֶt5SSYc٧ YZ3PZa3Jl=/ ۋ NB\d$6ʓ1<5k]3L,a&;]+`nG:^xJ+m 'hݶ':.E9 \䲆=+z#p2V U%Q=ͪ 2;!ԿjfiB\NLho!uG vU19`˒:2Fw\kO9$M;P}ܹ(}ux*tJa#uVx^ۜ*͋l6]^X4m r)`Zj@ρl!}a.woI( xH];2Lz7r‡|+\p,`O]_RȾpŃ{{؜r3uG).k8~aX/sY1Yycu[gNgõ!=ayj r \:٣>1Y\Ox_> \.aA=.kHE^A$k>4.ìTqEq-^Q- Cc^8 nXX+#D +b㲎\0Y%Oкs6e,r8'kE\&lEƁ\֘^?_6gb6qY.W8x'ܢ 7MVkH;̠ I훈QGT 1}s\?/'c\nFFځY.k˔]fƁWa2+ނ C[ [r>E{WA؛P'À'$3vMע+y]~+//bc>0M7=Jgtlk!ガ'Z;gK9BяR{x1&uy.k1Z' \ְ'x6w(NJC+a`.,3>e[rxrՖĚkf,gI0eإ+y70}+cZZJFx# U|5+ 3ҡ5 ůbZͫith>-< ;hI.',u*p˒?=)`#>Ⲇ&B٢ Tw)Q (Kp'BlHEi!A4${4AпP``&\!f;9nx9DXY ǀ\!ĜpΤ!&~t6 3-b~*Memvpo1;<7rYE;0}ߵW+PX2{ӄcsx8E/rYùSF.%"v e 3_ѢT^CqzaL|)5\j(ܡ5 wca;SMPCoz.ޜN4(kwC{t{?pn/`׾(H+ g׾514>R;7Ɨ.b4 m| ޴u+/A/ O^"бVeq$H OrYvrfN/pY5#FQGWՁF *%܈E#.kXbmT>._N4Mcʹ /ҕh'9I8)5$3C4%E*I5 wa:Ƀkzg*!/ ܘVUx#STBaLXވQ{8edO/Ի1[ωy#.kwe.*\rJc0Ju?}*:\[x`x䌲YuJ; e 7v4Cd'9}s%$81').K#(-+EAkwԣ`8wMڛzJ jyC|2@m7zhtGĠ kLm &I"SK&TaaظH-h:n 'D0 4RwxFB-k\TD&L؁&ܡ]x=n@iN 5,7#Og{pa|~ٳXaQ"@y-xLC<.d{gEC"mp1/IAmOqYci׃@2).7#ԝf!y̋ԏ0|=Qj\pvKxEk>Myrfظ~*"/<"NI`oBuC}7EΉZpڒsJT)(Ϲ;aڤEQ2a=~O O߫a4oo5sWMN`KͥC-g,*p]/N'(;mmOmdfTV!u{\PYEQ|e D2m48Bq{b ܒwN`/Ƴ`vwzq*" &aVfu)|e .u%FTL03ĴLXbkgkr{ok+97FYe MZRHm$uٗ=_qW=ssƞgl'AL7NE3O۲bX#ltJڡ65, !R?Wizz͆qjoV$%\&` U)r1ZwJ)_]K^5;+'_%IDeyIP~RO9Cf}8ǹ,$n3 .KeTNqYQԂ۰ nfjraIx4Y_u쮕ǯ48eԻ #azJ]\O.osE{B=z4\m$L)NL#R I4xR ,h2fBze MZ$Qz"T:Au2t xrYB~CL-pP5Z^ڿn2E{]sYCo-^ד2\䲆;.YţHI^mpvR񚝫mEsqpdB#Ѵ[@H1ไ[@ז[%&F\DRy.kpiEpB)n`/Qut81=xQDǹaEx FO0|>{Aظ)Xׇ<O\e9etG!b2.OX2prU#4*5R:D^|ErYnq$M]0|e ̚ٻܪ JnJ7kfؗеf&X/it`Ic|;sY2cۋ6 R?Piz`͂囂i N@r7Z9Wb0,3j0ZQ7 C>2[%ܜ\OUCQ\#Z؋a8PoRϲdRe1Y.j5 z(Q\sYa=Nfa5lfIΦDfx3bIb8e y9] _a4y;8u1]Go/~I@BFݼڥrŏ.cMc) h,BQ#.ˍib~p~> 9\i?~oyE]P<qYcA)U^qE';e NC7e NqYr-^˘fxrM3 㲆r.E'wF\*Z n`}\XPu"2Eaa|1U!E [+9KGx_ V"4)He,5tO=Y-{,D5{’)l> !<9a>r5(_O1A"uIR]6uV/a\,OUwCb&QNp9䲎$vlR?UC$YϦvr+.$WkزK5W33lr#1 fI݌Yxc ,qhLh]p_KPSxq 3 u.RjmJ"N!&@=+Tp6+R\&֦bʅ+\nknᲆ+>X.eg|%|O71sY#\;J-kgl0Y H*ŀ}.\Y fq\(CTj%7 a4R> DCQb l[8;@QCD :U`q^\Kq+e703 e '9~jNGFqh'RW',p\HO0|MÄH0UP!NmZ": Tc_ԠBū>Ɯ^aE `=I|n+0Fv J*H^a3Nxķ].4 c)crYEcꍱkW-rY%V 43KϳJ,̾ ݠ_d~}w @yLx0lLRe0c:<Eޯ*ezM&qe҇`F 3#C`$5Us7Fhm[nx%#l I)ArioxAYgxGoEl hhx6F"TGyfz4GǥH'c''oLLq9vEnCT?Ӝ*n!Fa-7RKj^Do0|;Q"!T?= f3ƃ "|JH7!i0w%x#\|RxrD'qH.$tVH`[ޠg|"4%>"6`t;`>ZR5yvJTa 4KL!E889'3j?I] #[⺁y![p܆;+ Dc*Ŭc@}WP;v:52"cr";g׮NM\dvbͫٸkz½ļk;0e\oGKj/Ft xa\|& >_HuF[{SDK}5ZŨn߷RrV]t>E6r1SIޞ'ߞmv}.]`UmVmGf;~.m+c[cF'&`m"oy7{7U۵bY^׶bCS*{R+m/ ї=]0o #Qq.YVB>c<嫋WW4 ρ9eSٲܱ="Vs-;yo)R-]Am0J%lP.D+m_K$I`Z{ҘDR岆&)H/pYcQ}K}6D%cI]8e %hƘ>956y}zFE\pxnVnч(:>yulᝅW7NǼ؞KߍH(]h~KT)ֱBu qcp/u/ꥨcPF,j#'ZMFSZ~_U%0Xџ}D|Q75>6$b1z6K[ǏSzX]{}m+hr_%ZX7従;rOk}7} [u=3O?(f{'xTkHScqJ#IUg~ L3쩦_ &*[~V^0~UrV4*^x- fxo KfuУme9.kHKDmI4!V|{`6M[XȫMU*r͞\݀f_ikrYY6%X8e m{~T\s:NJWΚe.C$K\K WcT>C( aO/sYҋS7$L܌MRw8e *=FzrvUƮ]nD*p˄Mo߇}ʭpBQ޼>}cݜq#kf}hg̣BlFXau?VywG[RwVRԱQ#vV/$oOo_XmU)~U׺sEzL4,u6Ï,.FjXaVLʥK=0[CHa4JŘDA=wB޼a,xŲ :emZH*E#Riբ'LpqE-o1bԯjH f?jzXًYl *Z ԓk\3 Ĭvu.]?bFoor9qSڮ.prM/1+fneBEs$3Ƶkc7+Ec.j +Q^Fcm !~~:Gx#Jm5lUߎ/UPt ޅG +k Z7EyN|&|[@㇔asJb*v`U?~i  t GAwTna#Dc xr;4 4BB܎e5ۧ<&<\5D.prbEfB>?.sYՎAď9"I*FW|k fR0dF'2ymIОkwoZ~)A>?5\LA)5 ;Ovb(8fj'Z"_pcI+1uX,{8.-YeI%ky[,˶lٖe֌==P.I\SU-Yo $l B^!@$HB c֏}un$wi.^:9s=\s ;oa:ky+ .6>8swx(Q|-Wα'QL(]+C# K]xW*? YcVhVTR@WrD9r|V9;w.A]ϒ+ϵb.P/Yq~2/%ci+7={N`8崰llrwO?}A6njby0<8UǥuIkWL?m;kS)w d2 [J{Wg_]$.'(7Ш7>,᳌.M0ܹ1ޠ1Ip-YPt$- n1˼Jg-/W6ƚVć߫u ai{(Aav,pxRH|>Q?  !zPq1R@.A%߯UK(XvaiWA4U1ǃIhٯ t[0+>K?y|%G=Ppz~$iZ 3R66#|Mz3-sYOxTJa>HO].мz(3LWi̷e7,{nq shkhTbyrr&g3pFQ/8SiU/Qk,ˬ;ޕ&mx{reot`Y2cT"@BӐP̬G9Tl0niLs8[Ƕ CP\?΢<:u4W!hUۧKWgp: Fq}ɧD7B}kh*|G'!%(m rVOگN8U`Z?~ R/\c; ZŹk6%io-OC֪hQw\ӳU:wF3s|tUb7^`LWb4P_#cR\WΩa_4`yc@^ ͨ{BB}qc|& Rv;eVc\k9k[5"4moj#EDcS~_|WgѠjAήǼujgj0X ϣ3{˱<) Ghz0&G7~O P,Z:xY.2X[e3>WHsӗ(bk[2_@-K6 Ϣ%/,;ӵӉºYx vVx[>B/w5OfP_ ‚CBo`w~r\J29O UDQ*<&tLNα7R@ ;lJ:4.yl`ԤaW ^6\m?QM7_]\ @xO+b\9]ؽmyscwqƒZEOТ'-z dŠU.CK` LYnH_y^^eZAwKjffܬίZz0YWF @چ2=~ApvEO y4c^JO!?}TCAF/LBT א\1E!ڑF0څJKM6Mwj-+MG W?GoEj F|Lß)ޯ8rFҲ#< 4Pɇourfj9a*'OJճ*:6;;Q/>WZ8ڻ"kHo)%J5Xlս%.f5r4Q#T nӿAMO=۠Mir6-Ϭid9+o#o@&LboB} ;pRM\zAƅqvF/f 4aB甅,j2ossllA i Ųbt[B|9\_g!Ye90>Vq-ڎ.؎ ,MD-J:7ߣ9ќ ?xٵ\ڸ7hŵ-_{!!ohfBNp[B/Rf5{U+N\4e"0bOOW:dkkKUqOpRO(K=ޖ #RBQFa@0B}+y-pMV;Sӓf@EJ 'F3ƨ܂U)3R04*Ouuu hQ}`0)q?'²9hλ^nB}>;!wݨTrV}x3n1Aբi3 ѧuaJmxn"*Qq3ϲZ w!Oj11%*3jD%No?2P5Ȼ2m (k|v*g&&y 1s/qb£K\Cd \`2 %c|Ž|³>esyĿD8Aб6b;jiwbve2XsKىFcYm^*wAcјEၻ0߀`/Ll;Ju*"f0\ȨzM8?.ZqWWhth0.)ĕAbB}Y26PO#EiR\NdsԸ[TxmI4P%"Z+,}gcZ 78-CNοkAc A97Pkr !a?ct. )  {$Oxt&\ *aȏyD|D3hTϝ TkB#cˮuAfxL%@#P߹2B&ٻcJvpIW!]1P h]>d-U\gä+\ePR`l>,10q*PQ]*'JDE,GlQXj\qRO(K1ߨTdT k`lݣQQ2=|!C@Od#sh /X9rT5d۳]F}'XGɑz}GTVF*xx)|8HC_[@qơ+Qj *d YՂm&9pG-~QNџhv*%?;oc /B>jp]V?$+=ߝKg6~|Jղɫ7f&a%<-rыur/l|9isI1S/9,CuE/=Ϻ^8}gqUk۸HĖ7j~~?ǟaWo|+ ~K_KKom^~BH ~ղ]򑝇r)ZF`#~wkFh! 1JL)#t>&#]cr#ՏhnѤn9rk? Toٰ-_5JgiN:hI)f/Ax(&!6 aC.PWR1]PFW%:uW1]PFjytKUܑN6ߡ4Ufg@hup6J?ׁ@?h˾I|h!*[g)xZ-j#"h'UŁ PC΀s xuet~ nʹ0[>EEx3^˔M΋z-h3P1?ؠLdc1v\xboQBe_Z0:,p@y"!b{ 0NuB(7IP(m/հlbvCLw~>DY5CZR39& G' yx{2/#(AF G }L) v, f;΂UV>~㊨) @SJ;m-a+2W&]6Ci|3gAK`EBO;OOO>,]I^EL3`#Tv/tpBT&)'wað"|7].V_8m̅Չ~3i2ZJ9){BTNˍHSu*bXV̝>3KO'gg!Mx_>t -n;t6 7<~vVi8(UϧzkyN\#eUi(;/%$; =qmM 4ӰϢJB^}I;[ ~Hwv]$* 1j1 D*$ǡ(#Ө831 iNɖ̱,Г7o ojtPR(?SƻOdk¿Z/b7aOBX)SʂODo UnnХk?e) 8zy](QXA{ }} 3z(td % ]-ِkr؟I70)˅IP[̱Bz4#cXFҶI[o)ڶTpO%cSðVT쌏FJ_f5A -* A?˓]=6aR l-W=cet/D&ko B #*K=9BT69΃Wn 6^ݶrmZըdK.V5(^uW~}t2^Db]XE/5<kR 5b7xZؼ^KHT PUXt=B< ;2 }T`' 4GvzN!MND9A-0 :;c >4vm>~o ۵ěT®k}\E!zAK \}*#1cGQehs&faضI;u*+.|:{Eg8VM7U7;+ژ)P,;V]ݫec xtT_4^tJJ%IAű'v[ n_b<ZY !v Q$ƙQ|+ף33:luJaվw!)atݎ=BYKT_'A Nmcn/v&617a} zbGT&1bR_YZ1]/c,,}Er'e /Pp/j#GM<ڴsѶy#S?jb›NW| i\PKGѩ͛7*I -B#.!eL%+@n[^)Uo(5(8Z=*cP`P\sj<\ci3@ A_8:+]5g[wJDK{ç/G/Z{&/c&'LfDG) h|Q-wbS%%1-- lN~T'ض '{c.'uUHgyh {-jtUKW.lǀ@jX/*!]xT]BK^yM7uNOGCx \mՈi$F7u SG="ģ%UsfٲQ ؝tC 2ǵGXx)M>3o w5ǩ2EtPCP7 hmM$`&UA؍/V?|9e"xCI&@E!+~'M &lh%qC鋮{3D͈DAK3;̀mo4+F>6ٶه7a߄@?Pp)6RG"~M[0'1cmo i6 km ovqUp=TfJI?jHUg_RC<:m%Z`9Hr $Y4tgN$IO)KM. *#S*1Hi˳x| h# =U~,'g@ % ADfxة0W$3́)u gRbޅ}@I߅݂p??V5Ŗwh=npM<Z*^MлdBS<퉫C ytn,A_[# r#fݫ.-D5z6^?7-* 3a옍JtX s;b7튕拪Zx%VBJNщWzV]gTʁ/{@͠#$h ib}/tv NyZO$-cHF< ܬpېY935a348j=HPYD"O鿎4(0ui3 &jC"I2n2qUMW3$ ^ .!Ї2FMxabYGi)V tj?'A)iw.W@_kEh&xtg+k0VD9 <X~xDʲWQK&^6h8!37n\<3$ hNb < d uOVX%?C<h9r;~4x8Ltiai {G4}} ̕pVXo!vG#-ýp˂^ݎ? ݡQ /Zvm_~o z7BU K zRB_E'Wxs7sE5iC{Ay ccfŢ1Q^}.;Z};D1֣$MxܕX cБ:*3"_FuԯNKJ9pFѬl8#1l׌Xƛ/jaJ"Va]B~U(jjxcRYvU*܁Z ;|8ϫ4)Lk'<J;g0 4wmpb7]yWU jצ#TW=Js"Գ]Lƣ<_p;vrEAل"_჻n_ѹSCU2#(ߧOs F֥6PHTNNT~6=;ǶM^vluVԦH p z1QA> !>=o}>(#[Cn5ͭJdЪb[29f @KٜXatkT1rC,M5^MlD˕~YHOJ"k.55k#sZؖXp\}6KD>݂ibw zv8ah~/t3&*dOR-; A캀RC֓E'%dxXR¤DF g@ǒ'%b?C<-~5za'Z\BS348+nim}Qs& '@O4ߜ5L0:zs&1rsLs5ڕ͹cXdisu,ֆI:*wsj+!M.Tm77O4Oym. Nytר,T1k`Y?JlA wVb(xZYokY+9nNR7k,n9$*h}9bO}]fxnx=u>NA~myñ5=,Z."Ͱ,fu胄m&4;;ƞ<{gLԖH<;ͷ%bwx ofKC~`LO{|:9, pǎIOʼ_"\>NbY[3b;Q T+"<? ބ[}XݤJ.!UFTVt g)S75C:\%AZ&-uᠺ0Jr!f (hnՉ:C$19:$8Cd`{a/_! yCb W$VP= }bf (!n+!J^/$G6x=qGĎ1j cWKeɔ.Vd3-c Gx PޞosqBGBu%t2O)kw Z3IIEa9^MB96Wx'Qz-Į W_JXog@hѓ2J݂Mbwxvh>hMGPhs$I/2V6] hq$Ǝ 6̀δfvMc!`af -8|3̅(h``:J֎p9:%{d -e&*6}KYWCd6v/ I7@?Sdt454uzˎw,ھ$}Bz@:ȳV6rtqiGqq QHҞZCJRl_~I8JOm?م;$;'D]+XH~!Їߡ]7pTy)jԓL8˳V-9.DKr OSo䉌@K]!,p=xSxԥX:؄~,SDJn~|$Qؐ&@s[w"lцԷa^#̃ka,i:ieA$uhh6'Pω ߵ^G )fo;Bs< 6%[ w ~gJ)%bC<w D(63gU?MSC$Pxԭ;b=إ7@])p YnA/O᭰ţE7L=5h;Sc"=a!.P|HQ p'T!)at^_O&@?9uC:KV$ nZ#cЏ嘧1ҩаW+L#1X36Ü A'rSRqE~\:m{36VjLU_`VtO(Z^xL(dǨ.Ql> @gIZ3 3S'-O;!;}q1FY7, J]jߗܡuJ!G}3X+,<s8Z*`.6\rb7DgI8.9xIe"DM@K7Eնc3iu}VYxL#0p)S(M;%@ZmJ캁MEۦ:?%!YR{+ؾ[5׭^w.ZgCL31urn{$:7@'>`QA^5/kke9.Bk*MX==/x(ղuSXh v+Cr= &$k1 = ٦I dP;@whxh$NPJ䉘qN hXzU(KP$醀'/܌c2W*H7 qMV);ţ$C#Reirݿ(2'mJ!1j1$@j ?C|o*tO"ģ`NO=e ̲~&(ߟ+"<ZJb/G !djX|-LrRd\r!iE%F=&Xv1˞D5HKT}~8MafEsR&8Xu 1hEX|*Vaݢ[w63%x{L^ |uMCxNӐp/aWπ7~W3Rzu1sRq= 7Yے"'N)-+)UJwL^gx Tt'A+]L`; :zl5.MbVϖꔣZ>eˊNϹŬkJղ87;;>}cMe>9Fd0M|2y-Ϟ/<{31co:won͗g\Ls2R'Noϲ>{2}NL9D?ȴy)/*bN KƥJࡼ٧?$W<|v%8No+yu~Gzk r~Ie6 ,G7f(Zw-&kqOKm/!`ԯTkȶ{Z5mQT˛1}o7nr䡖; Y1`q!ȉ|J!ضHc+U/r6ul/ >@+ɩ\f|pɮ \{yyI)m]&l0? |o75x1$RG (GX?6E= ϧ¸^xT]4ƞ+[F9ܨ/8Uq^1e(`Zo B5kݧ*_LOz~RMI'G5z yĮ@3l2!]',hm!o@ǀp?.q8(;Q9( ZjGC.AOBVL hje4TXhI#_Ju*#1lT"Tʎ}TqJ*\#DG񵜡"=f1eV-,D 3\?1s㈻Tq*PxN? a @KŶ ~;j=tmbY:m:J|MղRR~>- ,C\m@3A-hPnT)> ;CӦ/fd } %y3"\dP->'><=P8% "?$)~ܐmϲ%v? /}G uyբ|((/9A5^[(@^'EG4xg);o / -uY[\PDs/ 'ZzNnSq6M665 mttC6*6i&q=h}~'JQC!V17sRU%cGk|%9h 'b@K9 bcAoLG%yhzZ1T;֦25?ZnrnzGuT|%+fZ FWBŊժ5P-%D gɶt2$o&9Vس*賒8=1ģs]6?('p <VH%Z Я5 td@X/R/hE ef 0LV80 :,cV̱ug3lV|O Vxy^TO|mqd81xN h5ұ$xOjĒ^a2]i!Ҟ)|oIIxeǪV7:. 5Kv֤4Y_ߟL '%l܄}OTA&2B{ qceae ̂풙oN5(#[4mJlب|R,[D$U63+&O"LRI&Oܺԝ^^R ; nA (APGK惾]xXa"YEJ /FвM;EP֞۱DpV` Ĭ{X+iefnØ*WaQV ፮X.zI94A$.3 *9(q&0Ay1vF\S?#j"$ (h}&BzR$yE'y ѦޱuNc2=9ސ^VOfQ5&&AK-ވ囥li MN1Cpb̨ۿ?4i0&q}0 Z*Fl%vT-]yMd50tUЉ cM$Rx}ey O<-سZrx2pJPr0vBTQ=R$IQУzT oW\^݂ĎVeE򢽘g@( $_ uQH~Ϙ1qs6N R:pbĮxie;ʙiv%v]ar1ی9+4 ~F4Gb/-$Î^}ID,b!ՠZ*;@wH^&J<ZJi)iFb E,ƹwg(H).׍ }&ԷYQ80XMRզ+f0ZRVWp8vC8O/+.\785zVRJ1FP#x5 _u}Iz/UF?0⚔6Rc&huߩs]V GB#5X<5VS mJOo%~8nɰykT~"}RۄޏaaWI&T p#|,N^g9kYU4bq6,C̾ E8zJsDm AozHA='ɤ *̲+F9 dy0'=a+"-s7\$Jsx{X}_ {FޗN2AЙV]"b ^xqجW7Mn,Ulg>j=凇E۲齜CAl\Sd"<Z=P|M/-T:+^b7Z7v]QZ A{AKqgUGb/ UPa7nm+-HoZazzS%W<Z]#UR3s]XKzP|Z=2ykWQ) 92LY!2\fJ! h.>N$I0hb ; _v̗kĎ7ökD퉄>ݞR&PO/ <PMuAWNmC& s\ZzSQ{"ALRʖLUXjA*P:vNן^}VF0h#\Cp%̶<_bJ9 f[Ƥ5X a֩BR-[.u,m0ڈ^DDjij;=MmQ*AKEw(U+s8H&C&jG$-R0 Z2W硰nѨutuӜ:B&na `&|bƂ-,JL>OOPOwk> }w^:Q`'A; ZþN2d]5sxhf xwhb?C .*ȷdiďl6,QC"O,Lܤ!7,?z[|Je~7b<J:x!m vM;;'Ia-ؠ!v4]YDiaxTŬH$Y-8AA8NfH 1",\,!^C`n(RY0qGPrh G$[ Zm+wS4p\Tz7فQ꟞7H^ GL]$h]+=O-׋㟊mW` ;4#!{}d` HʞoU-=gOh#I<Z*%;8x (*nAI3<4a2^?S/O^k!Bτs甛0mxIJ~MH%=24Yk%#I{mIY]wgIk{)r䕨$<1hmQ#˂BP=(Q}^J|RHJ1/14hTxpaQf~#VQA~&5ސTGAKQo[c\sN53-'_XE ֬<(o6(״ OǬ o<6 L a|ux;C%݄t{>&:vWbc'TG@50 >} dpLƻO uxTT`]?|T!p(- \-eJ|ZCd@#ӿ&1GX`vE(okU fy*',MTo$1cGQeh:,T9tUVSg C.%Zʧ^pkQW)!a?~mSFWu7L˶rAfC8RwӘkB٨ՠ|nr::H0- G컫N\-UiLl'vo*gdn_#afAȸ6T0G=nzMT A\z']/ \N캀)]WfTI MUC.A$cYI)sDH9anZ0,vh@~]*҄3.Bͱgct8z=]^}]Yw{pzڑƷi_!-Q5< _~'qNHd8hdxFvSؑ>jsbt#&Ao?`%~~8ސD?&7Į x 1e}oK$qrUV4 a&AKB8-W9`7D3Iqx=ei !J |-jx O^1T83wcaބ;4vꄈצ(7${cخ7vHvh=~RѪФƾߋ.hϔTT%5"NG@5>@܎GA**ݵE9 u $&>R ~3O:T'ضqD&{c(="7z`9J𾆷M@<~G!kjb dgTfR_ (C= zr+^o}_v:ҚC7'.Ո pli[C 0Zɓ$$.^T.c^%& \lgU֚SK5/Ǿp-*Hm9»*7 O=t[iO)p'ie=5,{nJ1h#ƘVÍa"c(}E>H 3}3)DAҔDܺ7AT7fcGR 7[99߉!E1$u/|{JSͲytQ`!Ɇà-?: q0eyMBG$t4eE.].VXwQdHA'd^t릵s0 ̉Z]HO$h}1Loq|qFdаU*S@]|J'=|ZtxpIh0h9kG̊i;5Zf8z\Piriꮘ0lh&Itն?nPp\~C۟Oiʮ.`!Ͼ(Q2^ ICbЀ$;̂nm[p\?+VyN.%[m;8 t mxZKa&A@/QaxD ;L5{$)E߳%yL{rIOkb&4Zs /fw _|wվQS<ǁG_ fV&%$J/#EQBMgSdM@"an H*.*N0$1V\aSv޳U^&KY2% ]!wöd/(m|*iI{iŷx8z^}a"y؃\.tƋVЅ=M3Cg{U:x@C@Ќ-e.^Tn˅-N9nRBxa[{ bQZ("ph ժzw60ׁA1 RkpVCΆؑ8# [~ ERKmo,s*;>Q2œ|K6w /rv{!9e:+j[DxYmgQԨHt bFEƀemul399}ӵ6dr xhe8 Z0a  x䃰/?7͆"_uj K6+*24 @?PnL";a R-&~o ߻A OAnUHA` @^HmG h 54j$X (~ޜj R1xn*l(_=IjAIZgźƛsĭx &hvLo=_a={-lULo.OE㠥֠bC/;<ZڵҞU^q?x̬XEd ~0n7 7$IYR aFc՝pg|0瀓'm_/ 5]-5 i 8Zx}}>^B>F_vwPtHxt .$vg@"γtѬ&y!Vۅz{0XqzT6veGZ,Z>̳XڰamA,pyA[Xl"Lp"on@6 Z*}*ɲ-˴ILԲHYI! [٨\?Bp*uѦ9 LaS>e+-8ạ`HRyGxnn–*$`xwv x4*>{y˼[IQ$:S4bU6%>ء ڸW0l`n KB!O>|o^a4˯y/:I,*w4&hyKf a4-Jwضq+w`"_TGJ:ķ#J| [s@RHĮx\6M"c1#.WҀbGȅ~CfNIleAW}q-_ !=a` @k8$j>CcAOO@eD/Os#v=.% _E]>`Eu+gopP?a hg}ٷI!`tF'g1r5$Cx sIBKAOQg!J*UfZ0l&0pGniضq;}kU= {1lcStG2[6Hz# \ 01Cuޏp4̅,š[ v2_I/ZMĭݲ4lcbo{jrt(9K̕=xomXV6E[Wao~Mǚ2~ ]1d;Pw1tCoNpKi8xòC]E)MMA/DSGvwsZ}ţRA-̐;ˤ fAmd@ĭ8z\Yqa=a|G|aHr倏@R-x2<1"ZHtr `4'Oh'OvcUQ{!?X[ zXFժ ;PPyS@+VOY18pN.mw`V?wzH+VNHAh5ҝGAҝس\Wa\ e7a3KIU; -[7huu _8AOk;>C=;*[z? ɔ&[cd: MՎ+7{'v@iPj$9 ̂n}H4\S@Ɓ&s;(1vyVVWuه5JGPΟ @K>@H40 jv8ZǢ`ȝ$Y +ԷTRn4-uWl~ v!CjkUߛHh0Pilڍ Zpt 8zF(S0*cK>;oؼ=E)oCei@NςVO@a"ƀ~*'la0"]``O~gT {":!]/p4ն`g1#.WQ^ՆIs1lg0rNSۂy Y8$gh}o( Ʋ2kJ0 ZdPv]^3jÐtSyR󥰞ƁAס'\$1hsrB7^\4ER>t[!n#(맗9T%vLh= r8 ZnS=z؟q0ʖ N`Ku'ss"xysfvxIedbdUt-*#jK$)}.!wFR1]ϰYZQ[dEؼKIM$:WI%n媤?ݟ 77@Sv3PR-5}gl3V]$mX?s Z*9H;r M)onQ %l.`DAD(ҭB]3X“/fY}RGK՗>r1]R8jRb J:@z5ǖK|@!GVp\״Ðj\74{Cӡ=苲Yf nuNPVFݲj4lcRbjw2D/;;(x= qR.D}(q;=.0$-]w4-kh_ՐUFG@K.uGA*jV.`KV-"Aǁ@K +- | Z}Gx#K1ģ((mi*70ߣe [̧cض1ow`~4h}Iq^)LFxJ%0 Z_;-T67[횶,;8 t 8z  \t <ߡjF&?!ܳISM=^>;_B 茲2 laȞ]`Ɛ؁ O?CE/1)ςV?(_2CA(h"x׆8gGE(ã;J&YBAʞcEr[1k@]hg+G)cm8o9kxV/t-ǁBZG*{VF-i`&ph!!O8r Xqb&TTGo䯏L2H֑0 A|ݖݶCZ|zs͹ŬkJղ87;;>}cMe>_&'sצ'S\0L37s/Ϟ/<{3tH&o:%-?_RzX_X#'7[9 N:R'j2FR!ekմF_ތ};w||'t<+4D!TXaLj0'}6W#Ц]̲'atqZ<9RB9[ΦfDEeZD;8EiM8!D9;_.6|R-Ȟ.ji#iB,u3!Ke[a+Pf#mH%,jHk(5W3Y69395>9sp&qB=z=zXGJi (S559=19=>Wkq]pmjFKC$ԧeNWk!w/faB"jaBm E ؇wv` ߗ} +iǗNă!}* 2:tHޏJlw}y3o3o-7jb]!BMSrOk>o#kdf{#_XE=I&rR41dJ/+ : c&Ǖ~z]<@LLoA,Fjje,ed Vxr{,TṿcY֐$Â5 cMB.bHkHacTҡͭiٶQ.v{h?e:^Xyx$5t/Dd }>ƚb9U܃oɐ=K!-(&쩌fsA5 rHZ_.!$p*[BM~]kׄ{32 ҄-(5Q7JiP9J 3WO|rsVb6{* "^$=v$&ԉ^ڞ+xmWi{QΊ6MqU+H 5_R6&n\O>4[; NSBUv +%͘~#G]c^>|# t*dwoj\Lq`(!$bxeýQ.55 i /45^kmdXk8*;Piyp ^˕PkCߴttc!݊Įx<[Quq_C1..? <ҊYW cVuIK!-l@r40r{a iڋ`DwBZ^q{҄ ཐϊ]x?S8x^ Em4ZG%fwű+B i颍wtHkȈ:Eo8YdW] Ez$ݐ؏/{i؞~T^ c|FNmPaUh`c(sTjYGE˫\.Q<^^Ttb{@~sr$g:#,$Ěb?<߬.~MA:BP6_ .fgd|f̹,USo*%Veymnfq2CF;kӪ\6x29FsjhVr1xIQ< yۈJ;LB&c-hT\G{О%<:Mgh'Z`ܭ&ޠ5/gΆ2NV­>Bk)f5h$=eϽ݇|Aб6,jΏϱ籫~?-Cc{z=MAFքS>܅dzA{Zw\!ټ7v &~XЪcVڳJt󤰸 .aᣡp6mNJ!"\mP"XԪ`58*^eQ(]wO$tɘc;u7BM;s]b)a`D$T!kP됊pR A,BM x $.xd>;^o L}Fyddmȶg|i*vÄ ~Scľ hxf5'(۩0mtݽ!/̭MmtmkNn/`ѣI)=U߲-2E z!^Zg a VA#u8p'Ϟhv*%A;gg{_z苐^p]V?$+ٚ ~2,ӏO<ǣWoL]+LLďND>`uW_}jo7`%]Ll÷ʓb_4*MmMg]̞ᄈ8b^o\nb5i*fo|+ ~K_ꋯ؎_{v _!];0U6wG hlLF?њш! 1JLq?g@c\; l8 ?C }1g:jY<<ɮfDS}. /ǘgM>mVdZ+x,#cc{a7,Z.ρ>}E+V9a!8jk@;b& PX_CM,`eVaͱBgՍeeerż7}$q L?R\?}W0`s?`V|[]_{mqslq B#'LؠףZ X r)xVY6kzb^a6taQy6tũޘxb)'*h&jomnvА|54~כ7of5"L'Ņ? g@( .3iR9VyٌtF&_F!xsb!2%%35ǰ+d{3;WT&S.O .!58pH EZeE's= {R>J T{H^V)GV//mc%I/96./lkdejYjŋl{ORr9~yg2##UXkdvb6?tjϜ8J!U]BM ~Cý"a1ܑ!#4B?f0fB"ښ5U j[Y+6 jzAc0kW9݂9؝^>\b颹jTm?SQТYO@?Qv/$`6,yj\C|52)6Cs]6?h&n]wDGtm{dIE~O AkWڞhj @c94hϾMX\kYг*1 22 3\>)1ģ(ej 0_@Pި&,!* > }$n~&nWӠջV)_Nfr=׀Rm]1lz"EU-m zAU]"ģ19 Ƙ.0 ~0V42+Cs22Ga&$5M7? ,h9&i5# xme9Ȍº fnoݩEb g9%}aE+?e~p'Բ&zG@KLu>9(ρJj#>ц. T+TiVX7 /s&ƂT-,ֲ5 NbӰD[2>,ݢDG )S U~f _q9bb3h·[+TrK82k;82rWLzI.>Q ՒH۠uzL M@KyLjSCvޱ6f e*6fe^c $3A?AkVY=67&EG*D"}WY~Qe/o#xvh; /(slx6aΦ拪ZŇGw8zHFGԅFb!E]B._Vj/]/( } b9&Obs[K;Ճ{%K}_Y*[ C "lǪ$@@_h?^}QYUG x'ũ%4t 8 Zx,~U1ģZҊNk*$-l|ĢDLQl%v 茲Xo1'\v Z=dBIxja1'AlàToN Ա;SvM evuDjEGQc1T3ӵ 8LSqNvɤ5RE[@ׄJs9rEI*˾VȗSrT'Jlj#v3Ջ_Lfi+\⚞Y/櫈; | cy-eye6/54E=aO$g@[ջF ZݣTa;4!!ENmca]xZ5Wp\TMBR\.UőcGQqo\q˩N*îH,n)jf`6y7Ao+Ot&YU4{ 0Le'@ Ka i^`aa--U-:Lav$}}C~QxGGϟ cu:<,π_+?<'XgxO[8 -}Oې,L<>c2>)lXQrja w;;vMÛv_}ٛ,- zRm_}}(B=7/K*,9MǶ*l*T$u >#Az>3m3\9Kd<3ah[.V:LmqM9YVNA #ۿ>1K'ضq%{cخ1ZU-:0]*-:0NBiR# kQY' iCJ I$>:極^nٴ=*gzT+yhx˂:0:(7ܣW{ ^3yRbB( ӕa+j$cIcdɶŨ@^fK8zs uN=\b>xZ>y)FQ` +=t8C< keL ˠEN7B}à`RߠjRb7jl,1lxߨ݋م, /!_p(Z萪H1SA}t* XV4WGFyμ͐$=c4HX t z|/ uC8G`SST+*c$Ѿ K`A9U9I8S}WYdPe88K&<aD/* '6,4h = | AgkW%T+SxKeAKxu8VTEn*Vv6۲'/1~`^ wT\` ufV5 #wZ {ACh$YcL6߆ QդD;Ķ ZjuFk}FKQ$1):L& _$< Z4W b#Mg8.AKtk. h 5E $&wd_(/ӼgD:-3bw8z 42ЬMS.%ՖvM{dB_ ͊E; rx,NB'@-idBB#'cGF, xTK\#@˝<:LBjjUܪUzF]}M#ͱ #Ѯ tՈEH"&-Nhwj+޷%$R?Pi~'$h1 \Eש_yp-Iu 8 zRG ]w[)==C87n׸Brߣe׭$M?0h$M1!v#(+f" r8 zĹ=0`t +E?̃η~kWmP_uIYv,arcYs3]d.^RnCwNѝ T3nuXƨDژh'Oruz=<Z*M)5l;_KP+Kz$ik_Sz$4vk{`_#"oٚ'ck$`tFFC+DLVwe//̄Pi!|>H5C]GoGV'ݡ$ m}0ە>,#XPcEBPJ5#T&q4z2BywnL~»E"Zw#MJ*L^݂$%b7LVORaٰgeZkeHe0AX a GcG`јaFoÛ6U-~ݖmeѳ7$S`JU1ģ+|$)L+X> ~Ze _l%?}^-w^d$pLfb^^wyN.AZXI\G7'otMJpתTcãBK1o5NV#7jLw &XX‰o:"fߎ>DzαHcGQs4'̩N w? k%<ZܭMBRz?~o |oZx}gD,^؟RENyy|/B傣yV"sR G#`$Ko@.'Ao˲-^1aE-d>nQt]-#G_@  Ģ'aM㠥v=ʿeX I1ģɸpaf),M"jY$5cG] ՗;ͤ?LI8lFt@"zGAK+ HĮx 1eL{Q7^ȣ6`\/gG>nRFa~ΟE9ߗ,~?[LY^Ҹ!~ T³7 L>D}uJ Z~ya7_&eLLꌌԉ'g?mϞ̲u߯x9ćpK뚺2l#PG[EivIN岳 |I%lk.u~_ɫyЮw0~IYe6lۧ|Icya ~l$8KM@:vNshwJ/JlU7v~v)M=s't<+4!TXv)8-شY$E W )rv E{#_XEÿ$^H)n\2cɔf)Ĥ!iv-.&Q& [=Pq,U {˳x&Yf*ʺYvJf`U/ZP? ka4`OJr}ףx΁V`mXz'1AŃx?5vyUwôl%$< Z_۽˕k \;G@Q׋p-'f? ѦkNե`y)ϝUpEM^BT†͏N6lxem~]BI)Hذ!vWWS wHOONflr5LL׆WCrT͇RlҲF^u毢YGtI="#uO+esf(;2zEԱ#Mfh.#kJ^].߾@^XC%ec0_L4%\j " KR!]"_ތ 1R4;&OmTc>1T@Ooا㽐H1ģ8ЦՎ0!ŷ2ss|@Q!Q^ !cQ7W֏^~a'"Tֽ9C<-$^OtZVQ9*$!4hk:Įx g]'P:a g ۦEW+o!X$Fr rIŦz YO::Km@jCځ*N)տ  \4|#=ffZ`?C4+[Jk,d.EsO?01 TOglt`JlASr25h2f@g`#cV=cT{}s|pmg|aIXڟ@y7asosx&{)#`B* &E!V؟!M bQTdṠ^( YQ/ *w#W ?C<3b~h;A;{O@;{“zvi]xX I8`NEȞC9Dsޢ-)^? x Vʆ[h>ʪTRjb DfGTRxKmvL݉3,}H^8SVM37}|hݔw#eQ/1*"JR09@P>XapL3&j^$G.x4Y~ɽ6>aM@ϩRJC<. rmx7@w/ޠhk_SnP^XutUSdxžFúD9;BȆǻeW#Щ{BT~Hpx0fF3j1(9z(MzD!l`#0rǠ+f1Wkrm} wjV5x넏@GaAW*..iieq磱 =#~cp;Bvy‡-*J,r>) mط\Fcu.ֵ a&"TݞCbqxS:hI~7VI@H:#1bGQgi &YQkyGr]IuxT(;!yK!=mB/V5*ZX|{`Z$j$({rc}3̊^IqeKԲcImQmJ IWR=3R1 279.lYfLB~,,֬5[]La̲TlZ8{<i%Ef x[8Gx6&pXlona'ضэ {c.7bo~BU {).!YOߓ qTFb O\Q ͔FTfF1<=AA |jf Kyg+lZ9O„ O03NHR1"v3a:7wC"E=C)!o]fD QCoĮxjv< 2sE@aTϕ|b26KfYxB}OYdUabhJ$N/p(ղ&m2f ]cӣ_Ex^@-mDU]%Z=0pv,mQ#aMeTZXu^x"aNGA6ĦB)+) X.RZFZ |dz'cV$YO%>%9Yt&'K{^Wgdh=eQ#9OM]fAou&憐q eA&"33s&,m ):"|m3R=Y| 27VM[87.y6nP[6KYf6Xa=01|W}Se:+~a#z408 6,Z%KKF I<{ͷBbw8Z=A/c@L>nWOGP*_K!;ئehh$[ԕDrVF05$rSJfY93')f!a@NY$yE b?40Znc2n$d+Y4W-ړR̽x RM|qπ{T-ph?'qѦZVь6okzI {ā;4ZN$i'ۜ`j {]ڠ99U5;NΡN*ғ1ģ0crJ.MZ~6~_ab éZ&hs]pַρ>4q0_VDR1qxXy F{/A: KF<lΦU6jf5%Bnխ](0]xyirE87Kս1)Pp"ϰ0\X \):,(0!1rLDc"q2<|؝NT ?Oec (jR$1huזeJo~L*j=;vT^>5=I5#ˬx94 Z}GOzZ΂;,-]2|Z VnA/*k3+A<.V+^Uhv~@Ëx2#tNH44 FAb#՛# HF5~X9p\0&j$q(?2Ь=px}NتA*^}5-5ImRqgpf鸠Y̱yϫ 9T2^WT%<ȾZXbEGp!J$ t ]?(Ki£a 0aLM#3ouxw%3 a"WM#GH>q!ρJ3B]?DmD^-{>;k9pi-n4K]Zv Zݚ,m`Gwa=+4YR#d8 bw@3u薄:&uyGFi6ƀr1ßȱU$J٢D \y<,?eejߊEit\k*ԾW1Z!( ~w\<( sx䄾@'AThxj%FB?{Ut^Y5!h3 /_  Q!zGAKib<@8@OVX<0 ՐC;m`Ƥ[Dخ›*U-kOMjR̤HI[߃K5cl (t%6.eDX -;)_[⚁;p-=‡ݽ'N5e2zڞq{nUū=f]v he0,㋄YReČW6SQP2r,t$Wz(_ԺK/WE!- gàuI0OQVѹOD )*NK 8 35^ґH E)<ߍ8#+;_ {#Է>$ _ HlXO$UM.^lnVޏ'cGQrE{Z_SPH/&보g x"8΃VVPߧ{H<g@Za-شiw,cy[lV+.K}8 ZP#O^`Y6@Kk)E]d!e\ᣜg&?O/2h^x= = }0Hd`lDb7F-U-잍kUx0_Ue$` d#ּ$ɔݮIv@nxA"y0P1c@=tu=7gJ3 zSeO( %FMx =GD'"7.bwx t;Z#i`&5Yˆ=wl;1lP›/jaWalDvjEGQcX`k>Bc;ט8Kr;io9eQ";h}c|`.ꃐOBbw8Z=xmZh&V9@q͜$O/07b%m$יHDQEQD6EQ"%"c.8dd[`7AL$Nܗs8w&}9w8&doޛ7nެW7АjMs$ ^Uz8Y*5]-A\uiR{D% @nRw8YtlGl,P)Ι,$#^7շnY8hJw Q2H늹byNYW3Ar݋|btx rI~iRb|b0 <Y* q[@d)S sJːviG!;ϱPmvhy?O;%['NG7'׮R~|kZ;%TA7V[5[+Zo.XvjǛUka f\]53m.M$x@Ԣ 3$c1ģXYy2m1 fQFjvA0 9mzҵM!sxԸ(6q,d 13 Q| lA>uATm'u== ]R^c#o:gZ9 <ܞZ4 鏤Re-c}(VWAsO-XYFpTd@CsoUԠM ,52K!wp2pXos xTyN#@}Q{{aSv1漋 q9 < ʤnpr˦dKSqvQ*,㙨MaKȦG'%3dhσGow^RdFAl{!!  O6hj I= yjhD]PIDة_WMN-,V\/p=2]uF4bGnTb}v،"-04,]4;MFÄMt[l u=}BSMO˞U#ZNfO{"~S]c:d<2RZ|wCZ/NAUn - 6fd(6h C{v^au4?KlZx[t-p@Ԡ bᤁoEb%+>4]ߦր V–S̢O OC$7T82oZǧן&HgZz:vQl5Ϭxcbf uT4nؕPӡ0a7&KpQ[qK;]]PAםwǰS߯Z O^ɔTx6<$R )RYu^~ԞW1ey[4P_!6)#e< y3,jCw~%upFb'!fHל&ZO}a褀!kA6_&ԟg.<|(,uY!u?=hk$wZG ;ox[\^,uMo@ o9Њ K-M8d)J8 yZ`pJ^ ܋"++KO@>z!u'!T^c V$+&cb Dexrfo. YݽV䰧mexH4c[ʬ0]}MvLOAnCR7< Yj;0gK̢DF _S'laߓZ&2}D۶I]PC,#1ӎM4dXUfCM?`.Ozcir5dJf7! ޤV (5eQd2'Q6awi`r:8Yni714Zt+N5F|.Ox1ew Qfi0M M R 9iA4NoZHzWDf8 Yj,prըm[Kې#/D/΅Gضb$o&d}D63-R#ې+S wbsoQ`n'BdH]Pܮ++† !wa[!oXMUhE jlM|ͱ-xߢZ n4[USQ-Dy#14m$v#ַ@ ]7bd8 ODMHNC[!&|LegD8h۩|Rw*XӋN$G ۏZM@'MmI]P?#՗T:,/|AZ,䬺-q`r}.C<<"d gAϭ[4PY,h%pWCw`Jƚm͇xm3<ʌqbcqp Tg8d&Rzƴh +oV *꽜Y^n֚e(sg',<,7++4r L=eLWJg L/߁F/4f2°=c_UpWW-FIl0kfX eirsA2Mw/xM/C'R!E Z:t^[ BxzÈ!Vw_pA:u2D! < Mke ܆:n8yBV7j;3-X> iev9v0O:B&T=]Z,LOiG2:JlgTh/(i# {&ee ,GB: 6vth c)W1llPbG#=hW|bvxtaokpoK\ Ki Y=T}d.Q;tɀ G(N%.Kmd=Ѳ|7 lGn½ydCI8545ކ](-(K(jNDr8Yŭ@TAZo„27aBoלބ ՜WCބuCV!c0­j&7>Ŗ>B4OA: dlٴf2\{j40YjK>6 )[;K%D&w KO{x~ yGx3<63v|MTI[; <Y.&-p![C 6N++N+H}w ;51SMYTrC_NtjAz{!K-K0Z;Tkh baTV-v-F0?dh[Wh!A%8*dadUv|. &BX_ǰҵ4 O8Y=;F"mL I#Q>G,g-^C ս!>A~[F˰G|DG{RN?7󪵰xy͸wC-=VC(]]+Ζx Q ;dS d"u1c}Tdc# ;< |?CĞ/rfƑ.GcةI66یY/p'd;> #{bGnoD羑;Zr)/.7߰˳)ZI4&wqEJn~ōl/YMj$۲FMt-8(-m.!}W?C7!m'˸bEcF+bFDӐʵ;O!jA d[{mkFDxBP1J~h-Gr2޶3^yH- K( @>Ё$۸"@3.QDة Z|E^j=1ģXas+<Mp$LNuj\gW;#ZZ&qwgo`{,;HW$JH I@vM&,pt/Rw 8YZm7}O|H\^13av|bLfǓr ^¡ C&<+z*I,d)Q̨Hd;E-oAVy&IW, d#u;w#=`R4TsېąԝCQsRHB (8Ć~d9kdõ^x AZͪ#OoRrY6v 5RٔAm$0Ig DzYE't)1m0(bz x z 9HTˁT-$d{ d"uJ-DR j9(Ib%`ߢ6D !Owd8;BSfGx'viM躴lmU|v8K4YHkaë EQ^,5U6x>CVM!}a<jHRv|FAx>b3ǒOX2s܆;LCV/<"gg!U!p.WJb34.ΐvjqO6]X.LrTjQh}L,fpUHEz}keǒRW)[ѭ,F =4#jX8y+݂IU4u(}y?!~#)`.cH37Ӑ;-h'-M/3Żgae{ӡ9M2-Dx2˭o"8 Y*bE1Ԙ_^|v|)1<.fDةͦOBh MkFKڍ}sIVvcyv:zk.Ee۰ .Dc \ 7jqZM tO_#*~hWo l!ŷ`?!H^@ޣqQ(@H$/&!'iB{ Ð.9$'w /C>b>jFW^R ebD꺀!ez41)X<C |ɖJ*OZֹ,瓰ARڏ_GjbFZq.jQDx?d $ OU'XrâQpmH h2yِ,b3l5M"HQ`:HJ9CvBxIe[};#n)rWO|f;lKkD2uڤ #Қ>DҚ>D8 yhA3Տx\CV_?hD ~{ʐIy GMdsk8㭷\R 8Yj`?e3Uߥ q FD̆3ۖa4zFvTSNHO/pFñV+Mv RLg;R kNG;`0|%#Paw? 9 ܓнkN9T25rxH?d /S+GdC[._Qde9f2/_q`rNG!q|x2ܣ>SFq㊑5;8ގH? dHT#CW;I~$N,}B {G KubVD_,u}d2fVry*[N'>NZ}d@fa"cߢtI3;btz N숩' e {ȠMcN*y[U[ZP^F" `'θ?';=˞-Idv܆L6>.kT9 61f,H1&JqYnY\MF"#{e^)c4,pvo9ǻ(Ԫ]Н΢T,7̢f^Z{ > dQל&Z#6;ՃEJlyƢ[av\F8 RIǹ܎y0;qYh&By57 dwj$t2 Q"P(3/-s m!eƺ1MmkW?+OOs- v1H'hakxH]8Nxe~aˎf`U 9w?{?^ږ{'0%H`[XPg,%YnN̞IU7pr3z+mƊfE{iɢ}Q.KgD& <6\lԥ\L*ڊIy y ŧ0Ee͚pZ.(5aQ`mȭqYCn22h<Gg&23\[oI5kB/r\ְcQ#h$ V06=џ6\Z偳\picJ0zi e !! I?,ٞh'F08wn|jⲆ]*K%Dex]=u>]zBFgJ͐54[mP~dRGTPF[97WIQ.C[XHBCxq'4[ !ބY~B0.Sp's JI

>wٻceϥ8߬-/?X~=={_|asIOrY|lo=nTmӨ?hbE<%SDlUiX}VmljՒ?8 Ruq8|Kyi(4&Vk1QZ11N1q[֍,]+Aq7eP] 1VPҬ4;mI]7Pߘؕ,0ev8nШVgvҡ5R^Q~cTLhQ|+Øw+%Yvoo*^x&kΉ:x"3td}÷AAb@{QSY]<ZhAGpQgQq)4 e|ӳ fP,NXcK/!lT<i8E9ϭ3]JffKvN݊GHrˊ*6}/5%,ddY )~,a9MNj:k\MLI 5&ښI ס7EhOԝf!%󎌑O'&D[^䲆 e143 ._fM|$-0 ؝L]xQ >fǿ\V{u:ߌi`֥{-Dq`N{\gM~I/zBƛeÂ5b tm'Jj8C>mZU-)[ oo*We_U(+kr; n퀶e ]]> <J}mOG!F 3oȟ('sY덒M|Rw8e 7Je܅rYCSOjDmwa ò2=zŌbb._ym2-J{ک|2m0\67ld~,79ˆ@>O]>lRM]ţ9tK?ڇܨtC oUiMWB3O|FŻnIoɓYEݬ2zN3CjŮGjSg︋e>`o4#dL&[ ZJ'`J9t1=Sֶ_2pfV)7ϩzz/p}-Օmۭ<韏QWkv}{*6jVL7v^.c=+MֻńUx!ҵG~b+L~IqLH-A7v7v/<5Ծ{o(hc||p;_˄a`-~`N}2Sbjva&cqx.VIx+!H88q J >\ Ҵ@<.iRBgf[ JBXJI*1 65&f%Xq& 4:ҡp<;FSCѣVvQxR:&O.jPNnan6#45zhpP&!n7,%N?=Y G3 ͳڡ3RZ1qEmPsi:6rl4W&L SWFv@U]Z F5Ƣ-ϧY-[5Q"):_׳(m"]vK*\ Hוc&,a܍VX?l9 RmDƊ{EA~[R|dZFq>B[^ګ+xQ:k>& ىQZ/Nūc]S|dGuӯu<-[?3cz/:GE"j#4\ȫ4&r.6ka+a/aL-5<"=_2zq0ke]%+(8{zڨWm /lm]oؑWŻM_ZVo4x_} JZ1`%%X$d=hgmzMR˜( !V4$L۞0EYT:6l]-reZgI$vA `I8 0"x"-XZ+,IcW w@P"LQ  Bo!|fBb>r@2~jz,%tAk2QTXD[YMB#}o+6xZYZ#Y:T2 Me~26gcB|[]-G7칋/*|Y^?-~8)V< Bxrj)\ AߠM Q"BDuh%`|ϣƅٽvDγ3Ѳ5۷WEEc,хDu esd^p|(А=ta\Mޣ×iJRz o:}t2* ŠK/nM𵻥Bj"3A{9 8ne)[.YKAh7,.a$YMŋ(PαZOa cT+IZS,,zU9˞ٿ2'Ns_"_<'q(ʧYLL-@MZi|Wi۴ʶfW*El< 5ѺpuV'bqXu`"B|5JC;EY6h?n?45J-w.eJ;yQ>_>_EKaK卵mXe?SgvV[n~I1 ԷϚ@ķcY癣#:A ,{vCEK (7hmby*jhz8&\s /^ ڄyη&jnOɸ{.*$EMQإ(ooAeh̳h0Y9f= ()SX<W2 껹p?'d\7-F+SX?!n+ee͛{CGYt#YvE~̺Ya!?ٯ(o9B}CkKlug6̓qvp$!=ְџ_\t f=!{px=IPT,6IiѤ0Ba>.nʬIڰK'D% W-42ŧ#oDu`_n߉*%8w8zvd5 P(gtr P B}T ӯ/6lK&,*.-̂Ecdi}b7n0:ry?f[Όڪx7Qq|[nun(ޢS։)B`|N>,m pv(5r)G7#} B/ܨc?MP~%wHa~4&0% ZKRkBW4h24ȦlT85K%LnZ,_~~x q<:Q ~ ͒jՏs뭙h~"|;JvRUs!m!{}ݠ-b'͓蝔nG? ~d0k\n }z= 1.2.4)") q("no") } ## provide simple replacement for test_that() expectation bundles test_that <- function (desc, code) { eval(substitute(code), new.env(parent = parent.frame())) invisible() } ## we use verbose = 1 to print_status() only after each test file, ## not after each expression (verbose = 2) tinytest::test_package("surveillance", testdir = "testthat", verbose = 1) surveillance/tests/testthat/0000755000176200001440000000000014013521730015744 5ustar liggesuserssurveillance/tests/testthat/test-hhh4+derivatives.R0000644000176200001440000001462113751307650022237 0ustar liggesusers### Fixed effects hhh4() model fit and involved analytical derivatives data("measlesWeserEms") measlesModel <- list( end = list(f = addSeason2formula(~1 + t, S=1, period=52), offset = population(measlesWeserEms)), ar = list(f = ~1), ne = list(f = ~1 + log(pop), weights = W_powerlaw(maxlag = 5, normalize = TRUE)), family = "NegBin1", data = list(pop = population(measlesWeserEms)) ) measlesFit <- hhh4(stsObj = measlesWeserEms, control = measlesModel) test_that("estimates and standard errors are reproducible", { ## dput(coef(measlesFit, se = TRUE)) orig <- structure( c(-0.499636482022272, 0.551345030080107, 0.96093157194767, -0.153585641356373, 0.00333284018297979, 1.01500011496702, -0.588738943313705, 5.52782609236691, 1.81915612994789, 0.121781347106564, 1.27401298230559, 0.453889365025671, 0.281013375484401, 0.00459840327748742, 0.210642721317572, 0.191921649336323, 1.87984346848385, 0.265016986696184), .Dim = c(9L, 2L), .Dimnames = list(c("ar.1", "ne.1", "ne.log(pop)", "end.1", "end.t", "end.sin(2 * pi * t/52)", "end.cos(2 * pi * t/52)", "neweights.d", "overdisp"), c("Estimate", "Std. Error")) ) expect_equal(coef(measlesFit, se = TRUE), orig, tolerance = 1e-6) # increased for Solaris Sparc ## tolerance determined empirically by an R build with --disable-long-double }) test_that("neighbourhood weights array yields the same results", { What <- getNEweights(measlesFit) ## put that in an array for time-varying weights in hhh4 ## (they are not actually varying here) Warray <- array(What, dim = c(dim(What),nrow(measlesWeserEms)), dimnames = c(dimnames(What), list(NULL))) measlesFit_Warray <- update(measlesFit, ne = list(weights = Warray), use.estimates = FALSE) ## NOTE: variance estimates are different because of fixed powerlaw expect_equal(measlesFit_Warray, measlesFit, ignore = c("control", "coefficients", "se", "cov", "dim")) expect_equal(coef(measlesFit_Warray), coef(measlesFit)[names(coef(measlesFit_Warray))], tolerance = 1e-6) # triggered by 64-bit win-builder }) test_that("score vector and Fisher info agree with numerical approximations", if (requireNamespace("numDeriv")) { test <- function (neweights) { Wname <- deparse(substitute(neweights)) measlesModel$ne$weights <- neweights capture.output( # hide reports as we use a different tolerance pencomp <- hhh4(measlesWeserEms, measlesModel, check.analyticals = "numDeriv")$pen ) expect_equal(pencomp$score$analytic, pencomp$score$numeric, tolerance = .Machine$double.eps^0.5, info = Wname) expect_equal(pencomp$fisher$analytic, pencomp$fisher$numeric, tolerance = .Machine$double.eps^0.25, info = Wname) } test(W_powerlaw(maxlag = 5, normalize = FALSE, log = FALSE)) ## normalized PL with maxlag < max(nbmat) failed in surveillance < 1.9.0: test(W_powerlaw(maxlag = 3, normalize = TRUE, log = TRUE)) ## check unconstrained weights test(W_np(maxlag = 5, truncate = TRUE, normalize = FALSE)) test(W_np(maxlag = 3, truncate = FALSE, normalize = TRUE)) ## test two-component formulations (AR within NE) measlesModel$ar <- list(f = ~ -1) test(W_powerlaw(maxlag = 3, normalize = TRUE, log = TRUE, from0 = TRUE)) test(W_np(maxlag = 1, truncate = FALSE, normalize = FALSE, from0 = TRUE)) test(W_np(maxlag = 3, truncate = TRUE, normalize = TRUE, from0 = TRUE)) }) test_that("automatic and manual normalization are equivalent", { ## check for equivalent functions for (type in c("powerlaw", "np")) { W_type <- get(paste0("W_", type), mode = "function") w0 <- W_type(maxlag = 3, normalize = TRUE) w1 <- surveillance:::scaleNEweights.list( W_type(maxlag = 3, normalize = FALSE), normalize = TRUE) pars <- w0$initial nbmat <- neighbourhood(measlesWeserEms) expect_equal(w1$w(pars, nbmat), w0$w(pars, nbmat)) ## for the power law, dw and d2w are length 1 lists in w1 but not in w0 unlistIfPL <- if (type == "powerlaw") function (x) x[[1L]] else identity expect_equal(unlistIfPL(w1$dw(pars, nbmat)), w0$dw(pars, nbmat)) expect_equal(unlistIfPL(w1$d2w(pars, nbmat)), w0$d2w(pars, nbmat)) ## microbenchmark::microbenchmark(w1$d2w(pars, nbmat), w0$d2w(pars, nbmat)) ## -> type-specific implementations of normalized derivatives are faster } ## check for equivalent fits (rather redundant) measlesFit2 <- hhh4( stsObj = measlesWeserEms, control = modifyList(measlesModel, list( ne = list( weights = W_powerlaw(maxlag = 5, normalize = FALSE), normalize = TRUE # -> use scaleNEweights.list() ))) ) expect_equal(measlesFit, measlesFit2, ignore = "control", tolerance = 1e-6) # increased to pass on 32-bit Windows }) measlesWeserEms2 <- measlesWeserEms neighbourhood(measlesWeserEms2) <- neighbourhood(measlesWeserEms2) + 1L test_that("W_powerlaw(..., from0 = TRUE) equals manual approach", { measlesModel2 <- modifyList(measlesModel, list( ar = list(f = ~ -1), ne = list(weights = W_powerlaw(maxlag = 5, from0 = TRUE)) )) measlesFit2 <- hhh4(measlesWeserEms, measlesModel2) ## manual approach measlesModel2_manual <- modifyList(measlesModel2, list( ne = list(weights = W_powerlaw(maxlag = 5 + 1)) )) measlesFit2_manual <- hhh4(measlesWeserEms2, measlesModel2_manual) expect_equal(measlesFit2, measlesFit2_manual, ignore = c("control", "stsObj")) }) test_that("W_np(..., from0 = TRUE) equals manual approach", { measlesModel2 <- modifyList(measlesModel, list( ar = list(f = ~ -1), ne = list(weights = W_np(maxlag = 2, from0 = TRUE)) )) measlesFit2 <- hhh4(measlesWeserEms, measlesModel2) ## manual approach measlesModel2_manual <- modifyList(measlesModel2, list( ne = list(weights = W_np(maxlag = 2 + 1)) )) measlesFit2_manual <- hhh4(measlesWeserEms2, measlesModel2_manual) expect_equal(measlesFit2, measlesFit2_manual, ignore = c("control", "stsObj")) }) surveillance/tests/testthat/test-siafs.R0000644000176200001440000001363213777627613020205 0ustar liggesusers### Spatial interaction functions for twinstim() ## spatstat is no longer suggested, so is unavailable during R CMD check if (packageVersion("polyCub") <= "0.7.1") exit_file("need polyCub > 0.7.1 to run these tests") ### test bundle myexpectation <- function (siaf, intrfr, intrderivr, pargrid, type = 1, ...) { ## check analytical intrfr specification against numerical approximation if (!missing(intrfr)) apply(pargrid, 1, function (pars) expect_silent(capture.output( polyCub::checkintrfr(intrfr, siaf$f, pars, type, center=c(0,0), rs=c(1,2,5,10,20,50)) ))) ## also check intrfr for deriv if (!missing(intrderivr)) for (paridx in seq_along(intrderivr)) apply(pargrid, 1, function (pars) expect_silent(capture.output( polyCub::checkintrfr(intrderivr[[paridx]], function (...) siaf$deriv(...)[,paridx], pars, type, center=c(0,0), rs=c(1,2,5,10,20,50)) ))) ## check deriv, F, Deriv against numerical approximations checksiafres <- surveillance:::checksiaf(siaf, pargrid, type, ...) for (i in which(!sapply(checksiafres, is.null))) expect_true(unique(attr(checksiafres[[i]], "all.equal")), info = names(checksiafres)[i]) } ### test all pre-defined spatial interaction functions test_that("Gaussian 'F.adaptive' implementation agrees with numerical approximation", myexpectation(siaf.gaussian(F.adaptive=0.05), # Deriv uses polyCub.SV pargrid=as.matrix(log(c(0.5, 1, 3))), tolerance=0.01, method="midpoint", dimyx=150)) test_that("Gaussian iso-C-implementation agrees with numerical approximation", myexpectation(siaf.gaussian(F.adaptive=FALSE, F.method="iso"), pargrid=as.matrix(log(c(0.5, 1, 3))), tolerance=0.0005, method="SV", nGQ=25)) test_that("Exponential implementation agrees with numerical approximation", myexpectation(siaf.exponential(engine = "R"), surveillance:::intrfr.exponential, list(surveillance:::intrfr.exponential.dlogsigma), pargrid=as.matrix(log(c(0.1, 1, 2))), tolerance=0.0005, method="SV", nGQ=25)) test_that("Power-law implementation agrees with numerical approximation", myexpectation(siaf.powerlaw(engine = "R"), surveillance:::intrfr.powerlaw, list(surveillance:::intrfr.powerlaw.dlogsigma, surveillance:::intrfr.powerlaw.dlogd), pargrid=cbind(0.5,log(c(0.1,1,2))), tolerance=0.0005, method="SV", nGQ=13)) test_that("1-parameter power-law agrees with numerical approximations", myexpectation(siaf.powerlaw1(sigma = exp(0.5)), pargrid=as.matrix(log(c(0.1,1,2))), tolerance=0.0005, method="SV", nGQ=13)) test_that("Lagged power-law implementation agrees with numeric results", myexpectation(siaf.powerlawL(engine = "R"), surveillance:::intrfr.powerlawL, list(surveillance:::intrfr.powerlawL.dlogsigma, surveillance:::intrfr.powerlawL.dlogd), pargrid=cbind(-0.5,log(c(0.1,1,2))), tolerance=0.01, method="midpoint", dimyx=150)) test_that("Student implementation agrees with numerical approximation", myexpectation(siaf.student(engine = "R"), surveillance:::intrfr.student, list(surveillance:::intrfr.student.dlogsigma, surveillance:::intrfr.student.dlogd), pargrid=cbind(0.5,log(c(0.1,1,2))), tolerance=0.0005, method="SV", nGQ=5)) test_that("Step kernel implementation agrees with numerical approximation", myexpectation(siaf.step(c(0.1,0.5,1)), pargrid=-t(c(0.5,0.1,0.2)), tolerance=0.01, method="midpoint", dimyx=150)) ## ## plot the polygon on which F and Deriv are tested (to choose parameters) ## showsiaf <- function (siaf, pars) { ## plotpolyf(LETTERR, siaf$f, pars, print.args=list(split=c(1,1,2,1), more=TRUE)) ## plotpolyf(LETTERR, function (...) siaf$deriv(...)[,1], pars, print.args=list(split=c(2,1,2,1))) ## } ## showsiaf(siaf.student(), c(0.5,-0.5)) ### test new C-implementations of F and Deriv functions expect_equal_CnR <- function (siafgen, pargrid) { polydomain <- surveillance:::LETTERR siafR <- siafgen(engine = "R") siafC <- siafgen(engine = "C") ## check F resF <- apply(pargrid, 1, function (pars) c(C = siafC$F(polydomain, , pars), R = siafR$F(polydomain, , pars))) expect_equal(resF["C",], resF["R",], info = "C-version of F (current) vs. R-version of F (target)") ## check Deriv resDeriv <- apply(pargrid, 1, function (pars) c(siafC$Deriv(polydomain, , pars), siafR$Deriv(polydomain, , pars))) p <- siafR$npars expect_equal(resDeriv[seq_len(p),], resDeriv[p+seq_len(p),], info = "C-version of Deriv (current) vs. R-version of Deriv (target)") } test_that("siaf.exponential() engines agree", { expect_equal_CnR(siafgen = siaf.exponential, pargrid = matrix(log(c(0.1,1,2)))) }) test_that("siaf.powerlaw() engines agree", { expect_equal_CnR(siafgen = siaf.powerlaw, pargrid = cbind(0.5,log(c(0.1,1,2)))) }) test_that("siaf.student() engines agree", { expect_equal_CnR(siafgen = siaf.student, pargrid = cbind(0.5,log(c(0.1,1,2)))) }) test_that("siaf.powerlawL() engines agree", { expect_equal_CnR(siafgen = siaf.powerlawL, pargrid = cbind(-0.5,log(c(0.1,1,2)))) }) surveillance/tests/testthat/test-plapply.R0000644000176200001440000000036613746767713020563 0ustar liggesuserstest_that("plapply() results are reproducible", { res1 <- plapply(c(1, 1), rnorm, .parallel = 2, .seed = 1, .verbose = FALSE) res2 <- plapply(c(1, 1), rnorm, .parallel = 2, .seed = 1, .verbose = FALSE) expect_identical(res1, res2) }) surveillance/tests/testthat/test-earsC.R0000644000176200001440000000467113757503172020127 0ustar liggesuserstest_that("earsC returns a sts object", { #Sim data and convert to sts object disProgObj <- sim.pointSource(p = 0.99, r = 0.5, length = 208, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) stsObj = disProg2sts( disProgObj) res1 <- earsC(stsObj, control = list(range = 20:208, method = "C1")) res2 <- earsC(stsObj, control = list(range = 20:208, method = "C2", alpha = 0.05)) res3 <- earsC(stsObj, control = list(range = 20:208, method = "C3", sigma = 0.5)) expect_inherits(res1, "sts") expect_inherits(res2, "sts") expect_inherits(res3, "sts") data("salmNewport") in2011 <- which(isoWeekYear(epoch(salmNewport))$ISOYear == 2011) salmNewportGermany <- aggregate(salmNewport, by = "unit") control <- list(range = in2011, method = "C1", alpha = 0.05) surv <- earsC(salmNewportGermany, control = control) expect_inherits(surv, "sts") expect_true(max(surv@upperbound[1:4] - c(3.278854, 3.278854, 3.436517, 3.855617)) < 0.000001) }) test_that("earsC returns error messages",{ data("salmNewport") salmNewportGermany <- aggregate(salmNewport, by = "unit") control <- list(range = length(salmNewportGermany), method = "C1", alpha = 0.05, baseline = 2) expect_error(earsC(salmNewportGermany, control = control), "Minimum baseline to use is 3.") control <- list(range = length(salmNewportGermany), method = "C1", alpha = 0.05, minSigma = - 2) expect_error(earsC(salmNewportGermany, control = control), "The minimum sigma parameter") in2011 <- which(isoWeekYear(epoch(salmNewport))$ISOYear == 2011) control <- list(range = in2011, method = "C1", alpha = 0.05, baseline = 1500) expect_error(earsC(salmNewportGermany, control = control), "The vector of observed is too short!") }) test_that("The range is well defined",{ data("salmNewport") salmNewportGermany <- aggregate(salmNewport, by = "unit") control <- list(range = length(salmNewportGermany), method = "C1", alpha = 0.05, baseline = 2) surv <- earsC(salmNewportGermany, control = list(method = "C1", baseline = 10)) expect_true(length(surv@upperbound) == length(salmNewportGermany@observed) - 10) }) surveillance/tests/testthat/test-hhh4_weights.R0000644000176200001440000000225013746767713021461 0ustar liggesusers### Neighbourhood weights in hhh4() observed <- cbind(c(1,2,4), c(1,2,4)) test_that("AR-only and NE-only fit agree in toy scenario", { counts <- sts(observed) m1 <- hhh4(counts, control = list( end = list(f = ~ -1), family = "Poisson", ar = list(f = ~1))) expect_equivalent(coef(m1, idx2Exp=TRUE), 2) ## same fit via NE (because units have identical counts) m2 <- hhh4(counts, control = list( end = list(f = ~ -1), family = "Poisson", ne = list(f = ~1, weights = matrix(c(0,1,1,0), 2, 2)))) m1$control <- m2$control <- m1$lags <- m2$lags <- NULL expect_equivalent(m1, m2) }) test_that("time-varying NE weights align with time index of mu", { W <- matrix(c(0,1,1,0), 2, 2) Wt <- array(c(W, W, 0*W), c(dim(W), 3)) # w_jit = 0 for t=3 off <- surveillance:::weightedSumNE(observed, Wt, lag = 1) expect_true(all(is.na(off[1L,]))) expect_identical(off[3L,], c(0, 0)) # NE sum is zero at t=3 ## failed in surveillance <= 1.18.0, where w_ji(t-1) * y_j(t-1) ## was calculated, whereas w_jit * y_j(t-1) was used for simulation, ## the latter being the desired behaviour (same time index as covariates) }) surveillance/tests/testthat/test-createLambda.R0000644000176200001440000000554213751220571021425 0ustar liggesusersdata("measlesWeserEms") ## a simple endemic model measlesFit0 <- hhh4(measlesWeserEms, list( end = list(f = addSeason2formula(~1), offset = population(measlesWeserEms)), family = "NegBin1" )) test_that("endemic-only model has zero-valued Lambda matrix", { res <- getMaxEV_season(measlesFit0) expect_equal(res$maxEV.const, 0) zeromat <- matrix(0, measlesFit0$nUnit, measlesFit0$nUnit) expect_equal(res$Lambda.const, zeromat) expect_equal(surveillance:::createLambda(measlesFit0)(2), zeromat) }) ## + AR component measlesFit1 <- update(measlesFit0, ar = list(f = addSeason2formula(~1))) test_that("autoregressive model has a diagonal Lambda matrix", { res <- getMaxEV_season(measlesFit1) expect_equal(res$Lambda.const, diag(res$maxEV.const, measlesFit1$nUnit)) expect_equal(surveillance:::createLambda(measlesFit1)(2), diag(res$maxEV.season[2], measlesFit1$nUnit)) }) ## + NE component measlesFit2 <- update(measlesFit1, ne = list(f = ~1, weights = neighbourhood(measlesWeserEms) == 1)) # symmetric measlesFit3 <- update(measlesFit2, ne = list(normalize = TRUE)) # asymetric test_that("getMaxEV() and getMaxEV_season() agree", { expect_equal(getMaxEV_season(measlesFit2)$maxEV.season, getMaxEV(measlesFit2)[seq_len(measlesWeserEms@freq)]) expect_equal(getMaxEV_season(measlesFit3)$maxEV.season, getMaxEV(measlesFit3)[seq_len(measlesWeserEms@freq)]) }) ## AR within NE + unit-specific epidemic covariate measlesFit4 <- update(measlesFit0, ne = list(f = ~pop, weights = (neighbourhood(measlesWeserEms)+1)^-2, normalize = TRUE), data = list(pop = population(measlesWeserEms))) ## calculate "nu + Lambda Y_{t-1}" and compare to fitted(object) check_createLambda <- function (object) { mname <- deparse(substitute(object)) model <- terms(object) means <- meanHHH(object$coefficients, model, subset = seq_len(model$nTime)) expect_equal(means$mean[model$subset,,drop=FALSE], fitted(object), expected.label = paste0("fitted(", mname, ")")) Lambda <- surveillance:::createLambda(object) if (any(object$lags != 1, na.rm = TRUE)) stop("check not implemented for lags != 1") meansByLambda <- t(vapply( X = object$control$subset, FUN = function(t) means$endemic[t,] + Lambda(t) %*% model$response[t-1,], FUN.VALUE = numeric(object$nUnit), USE.NAMES = FALSE)) expect_equal(meansByLambda, unname(fitted(object)), expected.label = paste0("fitted(", mname, ")")) } test_that("multivariate formulation using Lambda agrees with fitted values", { check_createLambda(measlesFit0) check_createLambda(measlesFit1) check_createLambda(measlesFit2) check_createLambda(measlesFit3) # failed in surveillance < 1.13.1 check_createLambda(measlesFit4) # failed in surveillance < 1.13.1 }) surveillance/tests/testthat/test-bodaDelay.R0000644000176200001440000001565113762443561020757 0ustar liggesusers### ## Checking the provided reporting triangle ### data('salmAllOnset') # Control slot for the proposed algorithm with D=10 correction rangeTest <- 410:412 alpha <- 0.05 controlDelay <- list(range = rangeTest, b = 4, w = 3, pastAberrations = TRUE, mc.munu=10, mc.y=10, verbose = FALSE,populationOffset=FALSE, alpha = alpha, trend = TRUE, limit54=c(0,50), noPeriods = 10, pastWeeksNotIncluded = 26, delay=TRUE) test_that("The absence of reporting triangle throws an error",{ data("salmNewport") expect_error(bodaDelay(salmNewport, controlDelay),"You have to") }) test_that("The function spots uncorrect reporting triangles",{ stsFake <- salmAllOnset stsFake@control$reportingTriangle$n <- head(stsFake@control$reportingTriangle$n,n=10) expect_error(bodaDelay(stsFake, controlDelay),"The reporting triangle number") stsFake <- salmAllOnset stsFake@control$reportingTriangle$n[1,] <- stsFake@control$reportingTriangle$n[1,]/2 expect_error(bodaDelay(stsFake, controlDelay),"The reporting triangle is wrong") }) ### ## Data glm function ### epochAsDate <- TRUE epochStr <- "week" freq <- 52 b <- controlDelay$b w <- controlDelay$w populationOffset <- controlDelay$populationOffset noPeriods <- controlDelay$noPeriods verbose <- controlDelay$verbose reportingTriangle <- salmAllOnset@control$reportingTriangle timeTrend <- controlDelay$trend alpha <- controlDelay$alpha populationOffset <- controlDelay$populationOffset factorsBool <- controlDelay$factorsBool pastAberrations <- controlDelay$pastAberrations glmWarnings <- controlDelay$glmWarnings delay <- controlDelay$delay k <- controlDelay$k verbose <- controlDelay$verbose pastWeeksNotIncluded <- controlDelay$pastWeeksNotIncluded mc.munu <- controlDelay$mc.munu mc.y <- controlDelay$mc.y vectorOfDates <- as.Date(salmAllOnset@epoch, origin="1970-01-01") dayToConsider <- vectorOfDates[rangeTest[1]] observed <- salmAllOnset@observed population <- salmAllOnset@populationFrac dataGLM <- surveillance:::bodaDelay.data.glm(dayToConsider=dayToConsider, b=b, freq=freq, epochAsDate=epochAsDate, epochStr=epochStr, vectorOfDates=vectorOfDates,w=w, noPeriods=noPeriods, observed=observed,population=population, verbose=verbose, pastWeeksNotIncluded=pastWeeksNotIncluded, reportingTriangle=reportingTriangle, delay=delay) delay <- FALSE dataGLMNoDelay <- surveillance:::bodaDelay.data.glm(dayToConsider=dayToConsider, b=b, freq=freq, epochAsDate=epochAsDate, epochStr=epochStr, vectorOfDates=vectorOfDates,w=w, noPeriods=noPeriods, observed=observed,population=population, verbose=verbose, pastWeeksNotIncluded=pastWeeksNotIncluded, reportingTriangle=reportingTriangle, delay=delay) test_that("the output is a data.frame",{ expect_true(class(dataGLM)=="data.frame") expect_true(class(dataGLMNoDelay)=="data.frame") }) test_that("the data frame contains all variables",{ expect_equal(names(dataGLM)==c( "response", "wtime","population","seasgroups","vectorOfDates","delay"),rep(TRUE,6)) expect_equal(names(dataGLMNoDelay)==c( "response", "wtime","population","seasgroups","vectorOfDates"),rep(TRUE,5)) }) test_that("the variables have the right class",{ expect_equal(class(dataGLM$response),"numeric") expect_equal(class(dataGLM$wtime),"numeric") expect_equal(class(dataGLM$population),"numeric") expect_equal(class(dataGLM$seasgroups),"factor") expect_equal(class(dataGLM$vectorOfDates),"Date") expect_equal(class(dataGLM$delay),"numeric") expect_equal(class(dataGLMNoDelay$response),"numeric") expect_equal(class(dataGLMNoDelay$wtime),"numeric") expect_equal(class(dataGLMNoDelay$population),"numeric") expect_equal(class(dataGLMNoDelay$seasgroups),"factor") expect_equal(class(dataGLMNoDelay$vectorOfDates),"Date") }) test_that("the time variable is ok with diff 1",{ delayWtime <- as.numeric(levels(as.factor(dataGLM$wtime))) expect_equal(diff(delayWtime)==rep(1,length(delayWtime)-1),rep(TRUE,length(delayWtime)-1)) expect_equal(diff(dataGLMNoDelay$wtime)==rep(1,length(dataGLMNoDelay$wtime)-1),rep(TRUE,length(dataGLMNoDelay$wtime)-1)) }) test_that("the factor variable has the right number of levels",{ expect_true(length(levels(dataGLM$seasgroups))==noPeriods) expect_true(length(levels(dataGLMNoDelay$seasgroups))==noPeriods) }) ### ## Fit glm function ### argumentsGLM <- list(dataGLM=dataGLM,reportingTriangle=reportingTriangle, timeTrend=timeTrend,alpha=alpha, populationOffset=populationOffset, factorsBool=TRUE,pastAberrations=FALSE, glmWarnings=glmWarnings, verbose=verbose,delay=delay,k=k,control=controlDelay) if(surveillance.options("allExamples") && require("INLA")) { # needs to be attached argumentsGLM$inferenceMethod <- "INLA" model <- do.call(surveillance:::bodaDelay.fitGLM, args=argumentsGLM) test_that("the fitGLM function gives the right class of output",{ expect_equal(class(model),"inla") }) } argumentsGLM$inferenceMethod <- "asym" model <- do.call(surveillance:::bodaDelay.fitGLM, args=argumentsGLM) test_that("the fitGLM function gives the right class of output",{ expect_equal(class(model), c("negbin", "glm", "lm")) }) ### ## formula function ### test_that("We get the right formula",{ expect_identical(surveillance:::formulaGLMDelay(timeBool=TRUE,factorsBool=FALSE), "response ~ 1+wtime") expect_identical(surveillance:::formulaGLMDelay(timeBool=FALSE,factorsBool=FALSE), "response ~ 1") expect_identical(surveillance:::formulaGLMDelay(timeBool=TRUE,factorsBool=FALSE), "response ~ 1+wtime") expect_identical(surveillance:::formulaGLMDelay(timeBool=TRUE,factorsBool=TRUE), "response ~ 1+wtime+as.factor(seasgroups)") expect_identical(surveillance:::formulaGLMDelay(timeBool=TRUE,factorsBool=TRUE,delay=TRUE), "response ~ 1+wtime+as.factor(seasgroups)+as.factor(delay)") expect_identical(surveillance:::formulaGLMDelay(timeBool=TRUE,factorsBool=FALSE,outbreak=TRUE), "response ~ 1+wtime+f(outbreakOrNot,model='linear', prec.linear = 1)") }) surveillance/tests/testthat/test-farringtonFlexible.R0000644000176200001440000003624613757503172022721 0ustar liggesusersdata("salmonella.agona") # sts object lala <- paste(salmonella.agona$start[1],salmonella.agona$start[2],"1",sep=" ") firstMonday <- as.POSIXlt(lala, format = "%Y %W %u") salm.ts <- salmonella.agona$observed dates <- as.Date(firstMonday) + 7 * 0:(length(salm.ts) - 1) start=c(salmonella.agona$start[1],salmonella.agona$start[2]) salm <- new("sts",epoch = as.numeric(dates), start = start, freq = 52, observed = salm.ts, epochAsDate = TRUE) ### ## WEIGHTS FUNCTION ### test_that("gamma = 1 if everything below the threshold",{ s <- rep(0,10) weightsThreshold <- 0 weights <- algo.farrington.assign.weights(s,weightsThreshold) expect_equal(weights,rep(1,10)) }) test_that(" A case that was checked by hand",{ s <- rep(2,10) s[1:5] <- 0 weightsThreshold <- 0 weights <- algo.farrington.assign.weights(s,weightsThreshold) expect_equal(weights[1:5],rep(1.6,5)) expect_equal(weights[6:10],rep(0.4,5)) }) ### ## RESIDUALS FUNCTION ### test_that(" residuals should be zero",{ x <- rpois(10,1) y <- exp(x) model <- glm(y~x,family = quasipoisson(link="log")) phi <- max(summary(model)$dispersion,1) s <- anscombe.residuals(model,phi) expect_equal(as.numeric(s),rep(0,10)) }) test_that(" residuals should not be zero",{ x <- rpois(1000,1) y <- exp(x)+runif(1) model <- glm(y~x,family = quasipoisson(link="log")) phi <- max(summary(model)$dispersion,1) s <- anscombe.residuals(model,phi) expect_true(mean(s)>0) }) ### ## FORMULA FUNCTION ### test_that("We get the right formula",{ expect_identical(surveillance:::formulaGLM(populationOffset=FALSE,timeBool=TRUE,factorsBool=FALSE), "response ~ 1+wtime") expect_identical(surveillance:::formulaGLM(populationOffset=FALSE,timeBool=FALSE,factorsBool=FALSE), "response ~ 1") expect_identical(surveillance:::formulaGLM(populationOffset=TRUE,timeBool=TRUE,factorsBool=FALSE), "response ~ 1+wtime+offset(log(population))") expect_identical(surveillance:::formulaGLM(populationOffset=TRUE,timeBool=TRUE,factorsBool=TRUE), "response ~ 1+wtime+offset(log(population))+seasgroups") }) ### ## REFERENCE TIME POINTS FUNCTION ### test_that("We get the expected timepoints with weekly data",{ # Case with weekly data with dates dayToConsider <- as.Date("2013-06-06") b <- 3 freq <- 52 epochAsDate <- TRUE epochStr <- "week" lala <- surveillance:::algo.farrington.referencetimepoints(dayToConsider,b=b,freq=freq,epochAsDate,epochStr) # Do we get the same day as dayToConsider? expect_equal(as.numeric(format(lala, "%w")),rep(4,4)) # Actually for this example I know the dates one should get expect_equal(sort(lala),sort(c(as.Date("2010-06-03"),as.Date("2013-06-06"),as.Date("2012-06-07"),as.Date("2011-06-09")))) }) test_that("We get the expected timepoints with monthly data",{ dayToConsider <- 48 b <- 3 freq <- 12 epochAsDate <- FALSE epochStr <- "month" lala <- surveillance:::algo.farrington.referencetimepoints(dayToConsider,b=b,freq=freq,epochAsDate,epochStr) expect_equal(lala,c(48,36,24,12)) }) test_that("one gets a warning if too many years back",{ dayToConsider <- 48 b <- 3 freq <- 12 epochAsDate <- FALSE epochStr <- "month" expect_warning(surveillance:::algo.farrington.referencetimepoints(dayToConsider,b=8,freq=freq,epochAsDate,epochStr), "Some reference") # apply code control1 <- list(range=250,noPeriods=10,populationOffset=FALSE, fitFun="algo.farrington.fitGLM.flexible", b=10,w=3,weightsThreshold=2.58, pastWeeksNotIncluded=26, pThresholdTrend=1,trend=TRUE, thresholdMethod="muan",alpha=0.05,glmWarnings=FALSE) expect_error(farringtonFlexible(salm,control=control1),"Some reference") }) ### ## FIT GLM FUNCTION ### # Case with convergence control<- list(range=250,noPeriods=10,populationOffset=TRUE, fitFun="algo.farrington.fitGLM.flexible", b=40,w=3,weightsThreshold=2.58, pastWeeksNotIncluded=26, pThresholdTrend=1,trend=TRUE, thresholdMethod="muan",alpha=0.05,glmWarnings=FALSE) response=salm@observed[1:120] dataGLM <- data.frame(response=response,wtime=1:120, population=runif(120)*100, seasgroups=as.factor(rep(1:12,10))) arguments <- list(dataGLM=dataGLM, timeTrend=TRUE, populationOffset=TRUE, factorsBool=TRUE,reweight=TRUE, weightsThreshold=0.5,glmWarnings=control$glmWarnings, control=control) model <- do.call(surveillance:::algo.farrington.fitGLM.flexible, args=arguments) test_that("The fit glm function gives the right class of output?",{ expect_identical(class(model),c("glm","lm")) }) test_that("The fit glm function gives as many coefficients as expected",{ expect_equal(dim(summary(model)$coefficients)[1], length(levels(dataGLM$seasgroups))-1+1+1) }) test_that("wtime, response, phi and weights were added to the model",{ expect_false(is.null(model$phi)) expect_false(is.null(model$wtime)) expect_false(is.null(model$response)) expect_false(is.null(model$population)) expect_false(is.null(model$weights)) }) test_that("reweighting was done",{ expect_true(all(model$weights!=1)) }) test_that("there are no weights if very high threshold",{ arguments$reweight <- TRUE arguments$weightsThreshold <- 100000 model <- do.call(surveillance:::algo.farrington.fitGLM.flexible, args=arguments) expect_true(all(model$weights==1)) }) test_that("there is not a too small overdispersion",{ expect_true(model$phi>=1) }) ### ## BLOCKS FUNCTION ### referenceTimePoints <- c(as.Date("2010-06-03"),as.Date("2013-06-06"),as.Date("2012-06-07"),as.Date("2011-06-09")) firstDay <- as.Date("1990-06-07") vectorOfDates <- dates <- as.Date(firstDay) + 7 * 0:1300 freq <- 52 dayToConsider <- as.Date("2013-06-06") b <- 3 w <- 3 epochAsDate <- TRUE # p=1 p <- 1 lala <- surveillance:::blocks(referenceTimePoints,vectorOfDates,freq,dayToConsider,b,w,p,epochAsDate) test_that("the reference window has the right length",{ expect_equal(length(vectorOfDates[is.na(lala)==FALSE&lala==p]),w+1+b*(2*w+1)) # p>1 p <- 8 lala <- surveillance:::blocks(referenceTimePoints,vectorOfDates,freq,dayToConsider,b,w,p,epochAsDate) # reference windows expect_equal(length(vectorOfDates[is.na(lala)==FALSE&lala==p]),w+1+b*(2*w+1)) }) lili <- as.factor(lala[is.na(lala)==FALSE]) test_that("there are as many levels as expected",{ expect_equal(length(levels(lili)),p) }) p <- 8 lala <- surveillance:::blocks(referenceTimePoints,vectorOfDates,freq,dayToConsider,b,w,p,epochAsDate) lili <- as.factor(lala[is.na(lala)==FALSE]) lolo <- lili[lili!=p] test_that("periods of roughly the same length each year",{ expect_equal(as.numeric(abs(diff(table(lolo))[1:(p-2)])<=b),rep(1,(p-2))) }) ### ## THRESHOLD FUNCTION FARRINGTON ### predFit <- 5 predSeFit <- 0.2 wtime <- 380 skewness.transform <- "2/88" alpha <- 0.05 y <- 8 method <- "delta" phi <- 1 test_that("the function recognizes wrong exponents",{ expect_error(surveillance:::algo.farrington.threshold.farrington( predFit, predSeFit, phi, skewness.transform, alpha, y, method ), "proper exponent") }) test_that("some results we know are found",{ skewness.transform <- "none" lala <- surveillance:::algo.farrington.threshold.farrington( predFit, predSeFit, phi, skewness.transform, alpha, y, method ) # Should always be ok lala <- as.numeric(lala) expect_true(lala[3]<=1&lala[1]>=0) expect_true(lala[2]>lala[1]) expect_true(lala[1]>=0) # Here we know the results expect_equal(as.numeric(lala), c(1.3073128, 8.6926872, 0.0907246, 0.8124165), tolerance = 1e-6, scale = 1) # Here we calculated some examples skewness.transform <- "1/2" lala <- surveillance:::algo.farrington.threshold.farrington( predFit, predSeFit, phi, skewness.transform, alpha, y, method ) expect_equal(as.numeric(lala), c(1.9891097, 9.3744842, 0.1189986, 0.6857951), tolerance = 1e-6, scale = 1) skewness.transform <- "2/3" lala <- surveillance:::algo.farrington.threshold.farrington( predFit, predSeFit, phi, skewness.transform, alpha, y, method ) expect_equal(as.numeric(lala), c(1.8084477, 9.1154825, 0.1094727, 0.7289546), tolerance = 1e-6, scale = 1) }) ### ## THRESHOLD FUNCTION NOUFAILY ### predFit <- log(5) predSeFit <- log(2) wtime <- 380 skewness.transform <- "none" alpha <- 0.05 y <- 11 phi <- 1.5 method <- "muan" lala <- surveillance:::algo.farrington.threshold.noufaily( predFit, predSeFit, phi, skewness.transform, alpha, y, method ) test_that("some results we know are found",{ # Should always be ok lala <- as.numeric(lala) expect_true(lala[3]<=1&lala[1]>=0) expect_true(lala[2]>lala[1]) expect_true(lala[1]>=0) # Here we calculated some examples expect_equal(as.numeric(lala), c(8.0000000, 24.0000000, 0.8597797, 0.4193982), tolerance = 1e-6, scale = 1) phi <- 1.0 method <- "muan" lala <- surveillance:::algo.farrington.threshold.noufaily( predFit, predSeFit, phi, skewness.transform, alpha, y, method ) expect_equal(as.numeric(lala), c(9.0000000, 22.0000000, 0.9093099, 0.4605347), tolerance = 1e-6, scale = 1) phi <- 1.5 method <- "nbPlugin" lala <- surveillance:::algo.farrington.threshold.noufaily( predFit, predSeFit, phi, skewness.transform, alpha, y, method ) expect_equal(as.numeric(lala), c(1.00000000, 10.00000000, 0.03763657, 1.11918153), tolerance = 1e-6, scale = 1) phi <- 1.0 method <- "nbPlugin" lala <- surveillance:::algo.farrington.threshold.noufaily( predFit, predSeFit, phi, skewness.transform, alpha, y, method ) expect_equal(as.numeric(lala), c(2.00000000, 9.00000000, 0.01369527, 1.27061541), tolerance = 1e-6, scale = 1) }) ### ## DATA GLM FUNCTION ### b <- 3 freq <- 52 dayToConsider <- as.Date("2013-05-30") epochAsDate <- TRUE epochStr <- "week" firstDay <- as.Date("1990-06-07") vectorOfDates <- dates <- as.Date(firstDay) + 7 * 0:1300 w <- 3 noPeriods <- 10 observed <- rnorm(1301)+runif(1301)+30 population <- rnorm(1301)+10 verbose <- FALSE pastWeeksNotIncluded <- w k <- 1200 lala <- surveillance:::algo.farrington.data.glm(dayToConsider, b, freq, epochAsDate,epochStr, vectorOfDates,w,noPeriods, observed,population, verbose,pastWeeksNotIncluded,k) test_that("the output is a data.frame",{ expect_true(class(lala)=="data.frame") }) test_that("the data frame contains all variables",{ expect_identical(names(lala), c("response", "wtime","population","seasgroups","vectorOfDates")) }) test_that("the time variable is ok with diff 1",{ expect_equal(diff(lala$wtime), rep(1,length(lala$wtime)-1)) }) test_that("the factor variable has the right number of levels",{ expect_true(length(levels(lala$seasgroups))==noPeriods) }) observed[1150] <- NA lala <- surveillance:::algo.farrington.data.glm(dayToConsider, b, freq, epochAsDate,epochStr, vectorOfDates,w,noPeriods, observed,population, verbose,pastWeeksNotIncluded,k) test_that("the data frame has the right dimensions",{ expect_equal(dim(lala),c(156,5)) }) ### ## GLM FUNCTION ### dataGLM <- lala timeTrend <- TRUE populationOffset <- TRUE factorsBool <- TRUE reweight <- TRUE weightsThreshold <- 1 pThresholdTrend <- 1 b <- 3 noPeriods <- 10 typePred <- "link" fitFun <- "algo.farrington.fitGLM.flexible" glmWarnings <- FALSE epochAsDate <- TRUE dayToConsider <- as.Date("2013-05-30") diffDates <- 7 populationNow <- 10 test_that("the output has the needed variables",{ finalModel <- surveillance:::algo.farrington.glm(dataGLM,timeTrend,populationOffset,factorsBool, reweight,weightsThreshold,pThresholdTrend,b, noPeriods,typePred,fitFun,glmWarnings,epochAsDate, dayToConsider,diffDates,populationNow,verbose=FALSE) expect_identical(names(finalModel), c("pred","doTrend","coeffTime","phi")) }) test_that("no time trend in no time trend",{ pThresholdTrend <- 1 b <- 2 finalModel <- surveillance:::algo.farrington.glm(dataGLM,timeTrend,populationOffset,factorsBool, reweight,weightsThreshold,pThresholdTrend,b, noPeriods,typePred,fitFun,glmWarnings,epochAsDate, dayToConsider,diffDates,populationNow,verbose=FALSE) expect_false(finalModel$doTrend) }) ### ## ALARMS ### test <- farringtonFlexible(salm,control=list(thresholdMethod="nbPlugin",alpha=0.1)) test_that("there are only alarms when expected",{ # No alarm when observed is 0 expect_true(sum(test@alarm[test@observed==0])==0) # No alarm when the observed counts are UNDER the threshold expect_true(sum(observed(test)>upperbound(test),na.rm=TRUE)==sum(test@alarm==TRUE)) }) ### ## NO CONVERGENCE ### timeSeries <- rep(0,698) timeSeries[696] <- 1 algoControl <- list(noPeriods=10,alpha = 0.01,verbose = F, b=5,w=4,weightsThreshold=2.58,pastWeeksNotIncluded=26, pThresholdTrend=1,thresholdMethod='nbPlugin',limit54 = c(4,5), range = (length(timeSeries) - 1):length(timeSeries), glmWarnings = FALSE) seriesSTSObject <- new('sts', observed = timeSeries, epoch = as.numeric(seq(as.Date('2001-01-01'),length.out=length(timeSeries), by='1 week')), epochAsDate = TRUE) test_that("The code does not produce any error",{ # It is ok if the code does not produce any error expect_warning(farringtonFlexible(seriesSTSObject, control = algoControl)) }) ### ## NA ### timeSeries <- observed <- rnorm(698)*10+runif(698)*100+30 algoControl <- list(noPeriods=10,alpha = 0.01,verbose = F, b=5,w=4,weightsThreshold=2.58,pastWeeksNotIncluded=w, pThresholdTrend=1,thresholdMethod='nbPlugin',limit54 = c(4,5), range = (length(timeSeries) - 1):length(timeSeries), glmWarnings = FALSE) seriesSTSObject <- new('sts', observed = timeSeries, epoch = as.numeric(seq(as.Date('2001-01-01'),length.out=length(timeSeries), by='1 week')), epochAsDate = TRUE) test_that("The code does not produce any error",{ farringtonFlexible(seriesSTSObject, control = algoControl) results1 <- farringtonFlexible(seriesSTSObject, control = algoControl) expect_inherits(results1, "sts") seriesSTSObject@observed[680:690] <- NA results2 <- farringtonFlexible(seriesSTSObject, control = algoControl) expect_inherits(results2, "sts") }) surveillance/tests/testthat/test-hhh4_offsets.R0000644000176200001440000000403013751215235021435 0ustar liggesusers### hhh4() with epidemic offsets ## select two adjacent regions data("measlesWeserEms") expect_message( measles2 <- measlesWeserEms[,c("03457","03454")], "could invalidate" ) expect_equivalent(neighbourhood(measles2), matrix(c(0,1,1,0), 2, 2)) ## AR model fit1 <- hhh4(measles2, list(ar = list(f = ~1))) ##plot(fit1, units=NULL) ## use estimated exp(lambda) as offset -> new lambda should be 0, equal fit o1 <- exp(fit1$coefficients[["ar.1"]]) fit1o <- hhh4(measles2, list( ar = list(f = ~1, offset = matrix(o1, nrow(measles2), ncol(measles2))) )) test_that("model with AR offset is fitted correctly", { expect_equal(fit1o$coefficients[["ar.1"]], 0) expect_equal(fitted(fit1o), fitted(fit1)) }) ## same test with an AR+NE model fit2 <- hhh4(measles2, list(ar = list(f = ~1), ne = list(f = ~1))) ##plot(fit2, units=NULL) o2_ar <- exp(fit2$coefficients[["ar.1"]]) o2_ne <- exp(fit2$coefficients[["ne.1"]]) fit2o <- hhh4(measles2, list( ar = list(f = ~1, offset = matrix(o2_ar, nrow(measles2), ncol(measles2))), ne = list(f = ~1, offset = matrix(o2_ne, nrow(measles2), ncol(measles2))) )) test_that("model with AR+NE offsets is fitted correctly", { expect_equal(fit2o$coefficients[["ar.1"]], 0, scale = 1) # use abs. diff expect_equal(fit2o$coefficients[["ne.1"]], 0, scale = 1, tolerance = 1e-6) # for ATLAS/MKL/OpenBLAS expect_equal(fitted(fit2o), fitted(fit2)) }) ## createLambda() and thus maxEV was wrong in surveillance <= 1.16.1 test_that("Lambda matrix incorporates epidemic offsets", { expect_equal(getMaxEV(fit1o)[1], getMaxEV(fit1)[1]) expect_equal(getMaxEV(fit2o)[1], getMaxEV(fit2)[1]) }) ## simulate.hhh4() was wrong in surveillance <= 1.16.1 test_that("simulation accounts for epidemic offsets", { ## check the relative difference in the total number of cases obs <- fitted(fit2o) sim <- simulate(fit2o, seed = 1, y.start = observed(measles2)[1,], subset = fit2o$control$subset, simplify = TRUE) expect_true(abs(sum(sim)/sum(obs)-1) < 0.5) }) surveillance/tests/testthat/test-toLatex.sts.R0000644000176200001440000000632713757503172021322 0ustar liggesusersdata("ha.sts") data("salmonella.agona") test_that("toLatex accepts basic input and returns Latex", { control <- list( noPeriods=10,populationBool=FALSE, fitFun="algo.farrington.fitGLM.flexible", b=4,w=3,weightsThreshold=2.58, pastWeeksNotIncluded=26, pThresholdTrend=1,trend=TRUE, thresholdMethod="new",alpha=0.01 ) result <- ha.sts result@alarm[,7] <- TRUE result@upperbound[,7] <- 1 laTex <- toLatex(result, subset=(280:290), table.placement="h", size = "scriptsize", sanitize.text.function = identity, NA.string = "-",include.rownames=FALSE) laTex3 <- toLatex(result, subset=(280:290), alarmPrefix = "aaaa", alarmSuffix = "bbbb", table.placement="h", size = "scriptsize", sanitize.text.function = identity, NA.string = "-",include.rownames=FALSE) expect_true(grepl("aaaa", paste(as.character(laTex3), collapse = ' '))) expect_true(grepl("bbbb", paste(as.character(laTex3), collapse = ' '))) expect_inherits(laTex, "Latex") expect_inherits(laTex3, "Latex") }) test_that("caption is incorporated", { testCaption <- "Please print my caption" latex <- toLatex(ha.sts, caption = testCaption) expect_true(grepl(testCaption, paste(as.character(latex), collapse = ' '))) }) test_that("label is incorporated", { testLabel <- "Please print my label" latex <- toLatex(ha.sts, label = testLabel) expect_true(grepl(testLabel, paste(as.character(latex), collapse = ' '))) }) test_that("ubColumnLabel is incorporated", { testUBLabel <- "Upperbound" latex <- toLatex(ha.sts, ubColumnLabel = testUBLabel) expect_true(grepl(testUBLabel, paste(as.character(latex), collapse = ' '))) }) test_that("one can override the default table column labels", { columnLabels <- c("Jahr", "Woche", "chwi1", "UB", "frkr2", "UB", "lich3", "UB", "mahe4", "UB", "mitt5", "UB", "neuk6", "UB", "pank7", "UB", "rein8", "UB", "span9", "UB", "zehl10", "UB", "scho11", "UB", "trko12", "UB") latex <- toLatex(ha.sts, columnLabels = columnLabels) expect_true(all( sapply(columnLabels, function(l) grepl(l, paste(as.character(latex), collapse = ' ')) , USE.NAMES = FALSE) )) }) test_that("toLatex works with output from farringtonFlexible()", { # Create the corresponding sts object from the old disProg object salm <- disProg2sts(salmonella.agona) # Farrington with old options control1 <- list(range=(260:312), noPeriods=1,populationOffset=FALSE, fitFun="algo.farrington.fitGLM.flexible", b=4,w=3,weightsThreshold=1, pastWeeksNotIncluded=3, pThresholdTrend=0.05,trend=TRUE, thresholdMethod="delta",alpha=0.1) salm1 <- farringtonFlexible(salm,control=control1) expect_inherits(toLatex(salm1), "Latex") }) test_that("toLatex stops if 'subset' is not applicable", { expect_error(toLatex(ha.sts, subset=(-5:290))) expect_error(toLatex(ha.sts, subset=(1:10000))) expect_error(toLatex(ha.sts, subset=(10000:100000))) }) surveillance/tests/testthat/test-algo.glrnb.R0000644000176200001440000000424713746767713021131 0ustar liggesusers## Simulation parameters S <- 1 ; t <- 1:120 ; m <- length(t) beta <- c(1.5,0.6,0.6) omega <- 2*pi/52 #log mu_{0,t} alpha <- 0.2 base <- beta[1] + beta[2] * cos(omega*t) + beta[3] * sin(omega*t) #Generate example data with changepoint and tau=tau tau <- 100 kappa <- 0.4 mu0 <- exp(base) mu1 <- exp(base + kappa) ## Generate counts set.seed(42) x <- rnbinom(length(t),mu=mu0*(exp(kappa)^(t>=tau)),size=1/alpha) s.ts <- create.disProg(week=t, observed=x, state=(t>=tau)) ## Define control object cntrl1 <- list(range=t,c.ARL=5, mu0=mu0, alpha=alpha, change="intercept", ret="value", dir="inc") ## Run algorithm glr.ts1 <- algo.glrnb(s.ts, control=cntrl1) ## Correct upperbound (rounded) ## dput(signif(c(glr.ts1$upperbound), 7)) correctUpperbound <- c( 0.0933664, 0, 0.001387989, 0.4392282, 1.239898, 2.983766, 1.954988, 1.722341, 1.586777, 0.7331938, 0.9337575, 0.7903225, 1.104522, 1.425098, 1.24129, 1.633672, 2.033343, 1.788079, 1.397671, 0.9081794, 0.797097, 0.7270934, 0.5248943, 0.3093548, 0.2622768, 0.2301054, 0.1595651, 0.1484989, 0.06889605, 0.1504776, 0.04138495, 0.02219845, 0.0231524, 0.009575689, 0.1504776, 0.5827537, 0.0357062, 0.005011513, 0, 1.390972, 0.3167743, 0.5717088, 0.1053871, 0.003442552, 0.0005934715, 0, 0, 0.05509335, 0.1375619, 0.2449853, 0.6840703, 0.5427538, 0.05675776, 0.06656547, 0.09036596, 0.209314, 0.1392091, 0.03494786, 0.026216, 0.277202, 0.01762547, 0, 0, 0, 3.564077, 1.41019, 0.290548, 0.3740241, 0.4269062, 0.1296794, 0.1298662, 0.6322042, 0.2115204, 0.107457, 0.9366399, 0.1379007, 0.1509654, 0.03392803, 0.005775552, 0, 0, 0, 0, 0, 0.001143512, 0.001637927, 1.021689, 1.965804, 1.83044, 1.017412, 0.3033473, 0.1689957, 0.4051742, 0.1247774, 0.1460143, 0.03590031, 0.9459381, 0.4189531, 0.2637725, 0.03925406, 0.01374443, 0.2283519, 2.535301, 1.406133, 1.692899, 2.021258, 2.951635, 4.25683, 4.77543, 3.90064, 3.646361, 3.680106, 4.236502, 5.522696, 0.1221651, 0.4054735, 0.6761779, 0.8039129, 0.3913383, 0.1261521) test_that("upperbound equals pre-computed value", expect_equal(c(glr.ts1$upperbound), correctUpperbound, tolerance=1e-6)) surveillance/tests/testthat/test-sts.R0000644000176200001440000001034713746767713017713 0ustar liggesuserstest_that("\"sts\" prototype is a valid object", expect_true(validObject(new("sts")))) mysts <- sts(1:10, frequency = 4, start = c(1959, 2)) test_that("conversion from \"ts\" to \"sts\" works as expected", { myts <- ts(1:10, frequency = 4, start = c(1959, 2)) expect_identical(as(myts, "sts"), mysts) ## this failed in surveillance 1.11.0 due to a wrong "start" calculation }) test_that("if missing(observed), initialize-method copies slots", { mysts_updated <- initialize(mysts, epoch = 2:11) expect_identical(mysts_updated@epoch, 2:11) mysts_updated@epoch <- mysts@epoch expect_identical(mysts_updated, mysts) ## construct stsBP from existing "sts" object mystsBP <- new("stsBP", mysts, ci = array(NA_real_, c(10,1,2)), lambda = array(NA_real_, c(10,1,1))) expect_identical(as(mystsBP, "sts"), mysts) }) test_that("different initializations of \"stsBP\" work as expected", { mystsBP <- new("stsBP", observed = 1:10, freq = 4, start = c(1959, 2), ci = array(NA_real_, c(10,1,2)), lambda = array(NA_real_, c(10,1,0))) expect_identical(mystsBP, as(mysts, "stsBP")) }) test_that("different initializations of \"stsNC\" work as expected", { mystsNC <- new("stsNC", observed = 1:10, freq = 4, start = c(1959, 2), pi = array(NA_real_, c(10,1,2)), SR = array(NA_real_, c(10,0,0))) expect_identical(mystsNC, as(mysts, "stsNC")) }) test_that("sts(..., population) sets the populationFrac slot", { ## for sts() construction, "population" is an alias for "populationFrac" ## (the internal slot name), introduced in the space-time JSS paper sts1 <- sts(cbind(1:3, 11:13), population = c(10, 20)) sts2 <- sts(cbind(1:3, 11:13), populationFrac = c(10, 20)) expect_identical(sts1, sts2) }) test_that("\"sts\" conversion to a (tidy) data frame works consistently", { ## univariate sts mystsdata <- as.data.frame(mysts, as.Date = FALSE) expect_identical(tidy.sts(mysts)[names(mystsdata)], mystsdata) ## multivariate sts data("momo") momo3tidy_uv <- tidy.sts(momo[,3]) momo3tidy_mv <- subset(tidy.sts(momo), unit == levels(unit)[3]) momo3tidy_mv$unit <- momo3tidy_mv$unit[drop=TRUE] row.names(momo3tidy_mv) <- NULL expect_identical(momo3tidy_uv, momo3tidy_mv) }) test_that("we can subset epochs of an \"sts\" object", { expect_identical(mysts[TRUE,TRUE], mysts) expect_identical(mysts[2,]@start, c(1959, 3)) ## negative and 0 indices produced wrong "start" in surveillance <= 1.16.2 expect_identical(mysts[-1,], mysts[2:10,]) expect_identical(mysts[0,]@start, mysts@start) }) test_that("colnames need to be identical (only for multivariate data)", { slots_dn <- c("observed", "state", "alarm", "upperbound", "populationFrac") ## ignore colnames mismatch for univariate time series sts_args_1 <- lapply(setNames(nm = slots_dn), function (slot) matrix(0, 1, 1, dimnames = list(NULL, slot))) sts_args_1$neighbourhood <- matrix(0, 1, 1, dimnames = list("a", "a")) expect_silent(do.call(sts, sts_args_1)) ## multivariate time series with inconsistent column order are invalid sts_args_2 <- list( observed = matrix(0, 1, 2, dimnames = list(NULL, c("r1", "r2"))) ) sts_args_2[slots_dn[-1]] <- list(sts_args_2$observed[,2:1,drop=FALSE]) sts_args_2$neighbourhood <- matrix(0, 2, 2, dimnames = rep(list(c("r2", "r1")), 2)) expect_error(do.call(sts, sts_args_2), "colnames") # new in surveillance > 1.17.1 ## column names can be missing for other slots expect_silent(do.call(sts, c(sts_args_2[1], lapply(sts_args_2[-1], unname)))) }) test_that("epoch() finds Monday of 'start' week (ISO)", { mydate <- as.Date("2020-01-27") expect_identical(strftime(mydate, "%u"), "1") # Monday start <- unlist(isoWeekYear(mydate), use.names = FALSE) expect_equivalent(start, c(2020, 5)) # ISO week 5 expect_identical(strftime(mydate, "%W"), "04") # UK week 4 mysts <- sts(1:3, start = start) expect_equal(epoch(mysts, as.Date = TRUE)[1], mydate) ## failed in surveillance 1.18.0, where epoch(x, as.Date=TRUE) ## used %W to interpret the 'start' week, so here returned "2020-02-03" }) surveillance/tests/testthat/test-calibration.R0000644000176200001440000000320713746767713021366 0ustar liggesusers### Calibration tests for Poisson or NegBin predictions mu <- c(0.1, 1, 3, 6, pi, 100) size1 <- 0.5 size2 <- c(0.1, 0.1, 10, 10, 100, 100) ##set.seed(2); y <- rnbinom(length(mu), mu = mu, size = size1) y <- c(0, 0, 2, 14, 5, 63) zExpected <- rbind( dss = c(P = 6.07760977730636, NB1 = -0.468561113465647, NB2 = 2.81071829075294), logs = c(P = 5.95533908528874, NB1 = 0.403872251419915, NB2 = 2.77090543018323), rps = c(P = 4.45647234878906, NB1 = -0.437254253267393, NB2 = 2.57223607389215) ) delta <- 1e-4 #sqrt(.Machine$double.eps) for (score in rownames(zExpected)) { .zExpected <- zExpected[score, , drop = TRUE] ## if package "gsl" is not available, rps_EV is less accurate tol_equal <- if (score == "rps" && !requireNamespace("gsl", quietly = TRUE)) 1e-4 else .Machine$double.eps^0.5 test_that(paste0("still the same z-statistics with ", score), { ## Poisson predictions zP <- calibrationTest(y, mu, which = score, tolerance = delta)$statistic expect_equal(zP, .zExpected["P"], check.attributes = FALSE, tolerance = tol_equal) ## NegBin predictions with common size parameter zNB1 <- calibrationTest(y, mu, size1, which = score, tolerance = delta)$statistic expect_equal(zNB1, .zExpected["NB1"], check.attributes = FALSE, tolerance = tol_equal) ## NegBin predictions with varying size parameter zNB2 <- calibrationTest(y, mu, size2, which = score, tolerance = delta)$statistic expect_equal(zNB2, .zExpected["NB2"], check.attributes = FALSE, tolerance = tol_equal) }) } surveillance/tests/testthat/test-twinstim_score.R0000644000176200001440000000525713746767713022157 0ustar liggesusers### Likelihood and score function of twinstim() ## Note: derivatives of interaction functions are tested in separate files ## we thus use the relatively fast Gaussian kernel here data("imdepi") model <- twinstim( endemic = addSeason2formula(~offset(log(popdensity)), S = 1, period = 365, timevar = "start"), epidemic = ~type, siaf = siaf.gaussian(), tiaf = tiaf.step(2), data = imdepi, optim.args = NULL, verbose = FALSE ) theta <- c("h.(Intercept)" = -20, "h.sin(2 * pi * start/365)" = 0.2, "h.cos(2 * pi * start/365)" = 0.3, "e.(Intercept)" = -10, "e.typeC" = -0.9, "e.siaf.1" = 2, "e.tiaf.1" = -1) test_that("likelihood is still the same", { expect_equal(model$ll(theta), -9579.65468598488) }) test_that("score vector agrees with numerical approximation", { numsc <- if (surveillance.options("allExamples") && requireNamespace("numDeriv")) { numDeriv::grad(func = model$ll, x = theta) } else { # for faster --as-cran tests c(-321.766081898055, -17.0779781937451, -37.1712258869585, -21.4444934196989, -5.43080160401029, -15.085241575699, -20.1708323190602) } expect_equal(model$sc(theta), numsc) }) ## Note: twinstim() uses an estimate of the _expected_ Fisher information, ## which does not necessarily agree with the negative Hessian of the ll ## (it does asymptotically **at the MLE**) ## numfi <- -numDeriv::hessian(func = model$ll, x = theta) ## anafi <- model$fi(theta) test_that("one-parameter power law agrees with more general implementation", { m0 <- update.default(model, siaf = siaf.powerlaw(), tiaf = NULL, subset = time < 30) m1 <- update.default(m0, siaf = siaf.powerlaw1(sigma = exp(2))) expect_equal(m0$ll(theta), m1$ll(c(head(theta, -2), -1))) expect_equal(m0$sc(theta)[-6], m1$sc(c(head(theta, -2), -1))) }) ### now check with identity link for the epidemic predictor model2 <- update.default(model, siaf = NULL, tiaf = NULL, epidemic = ~1, epilink = "log") model2i <- update.default(model2, epilink = "identity") theta2 <- theta2i <- theta[1:4] theta2i["e.(Intercept)"] <- exp(theta2["e.(Intercept)"]) test_that("likelihoods with log-link and identity link are the same", { expect_equal(model2i$ll(theta2i), model2$ll(theta2)) }) test_that("identity link score vector agrees with numerical approximation", { numsc <- if (surveillance.options("allExamples") && requireNamespace("numDeriv")) { numDeriv::grad(func = model2i$ll, x = theta2i) } else { # for faster --as-cran tests c(-679.706275919901, -91.0659401491325, -114.082117122738, -1532144485.45524) } expect_equal(model2i$sc(theta2i), numsc) }) surveillance/tests/testthat/test-nbOrder.R0000644000176200001440000000223013751307650020447 0ustar liggesusers## generate random adjancency matrix ## radjmat <- function (n) { ## adjmat <- matrix(0L, n, n, dimnames=list(letters[1:n],letters[1:n])) ## adjmat[lower.tri(adjmat)] <- sample(0:1, n*(n-1)/2, replace=TRUE) ## adjmat + t(adjmat) ## } ## set.seed(3); adjmat <- radjmat(5) adjmat <- structure( c(0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L), .Dim = c(5L, 5L), .Dimnames = rep.int(list(c("a", "b", "c", "d", "e")), 2L) ) ## validated matrix of neighbourhood orders nbmat <- structure( c(0L, 2L, 1L, 3L, 2L, 2L, 0L, 1L, 1L, 2L, 1L, 1L, 0L, 2L, 1L, 3L, 1L, 2L, 0L, 1L, 2L, 2L, 1L, 1L, 0L), .Dim = c(5L, 5L), .Dimnames = rep.int(list(c("a", "b", "c", "d", "e")), 2L) ) test_that("nbOrder() returns the validated matrix", if (requireNamespace("spdep")) { expect_identical(suppressMessages(nbOrder(adjmat, maxlag=Inf)), nbmat) }) test_that("zetaweights(.,maxlag=1,normalize=FALSE) is inverse of nbOrder", { expect_identical(zetaweights(nbmat, maxlag=1, normalize=FALSE), 1*adjmat) }) surveillance/tests/testthat/test-hhh4_ARasNE.R0000644000176200001440000000221313746767713021057 0ustar liggesusers### Validate AR hhh4 via NE with identity W data("measlesWeserEms") ## fit with AR component as usual vaccdata <- matrix(measlesWeserEms@map$vacc2.2004, byrow = TRUE, nrow = nrow(measlesWeserEms), ncol = ncol(measlesWeserEms)) measlesModel <- list( ar = list(f = addSeason2formula(~1 + vacc2, S=2, period=52)), end = list(f = addSeason2formula(~1, S=1, period=52), offset = population(measlesWeserEms)), family = "NegBin1", data = list(vacc2 = vaccdata)) measlesFit <- hhh4(measlesWeserEms, measlesModel) ## now use an identity matrix as W in the NE component instead of AR measlesFit2 <- suppressWarnings( update(measlesFit, ar = list(f = ~-1), ne = list(f = measlesModel$ar$f, weights = diag(ncol(measlesWeserEms))), use.estimates = FALSE) ) ## compare fits test_that("AR-hhh4 agrees with using identity W in NE", { expect_equivalent(coef(measlesFit2), coef(measlesFit)) expect_equivalent(measlesFit2$cov, measlesFit$cov) expect_equal(logLik(measlesFit2), logLik(measlesFit)) expect_equal(fitted(measlesFit2), fitted(measlesFit)) }) surveillance/tests/testthat/test-formatDate.R0000644000176200001440000000474613746767713021176 0ustar liggesusersd2 <- as.Date(c("2001-01-01","2002-05-01")) test_that("Formatting date vectors with ISO8601 and UK conventions", expect_identical(formatDate(d2, "W%V-%G / W%W-%Y / %d-%m-%Y"), c("W01-2001 / W01-2001 / 01-01-2001", "W18-2002 / W17-2002 / 01-05-2002"))) test_that("Formatting quarters", { expect_identical(formatDate(d2,"%Q"), c("1","2")) expect_identical(formatDate(d2,"%q"), c("1","31")) expect_identical(as.character(d2 - as.numeric(formatDate(d2,"%q")) + 1), c("2001-01-01","2002-04-01")) }) test_that("Formatting date vectors with roman letters for quarters", expect_identical(formatDate(d2,"%G\n%OQ"), c("2001\nI","2002\nII"))) #Some checks for the atChange dates <- seq(as.Date("2007-01-01"),as.Date("2013-01-01"),by="1 week") #Format with conversion string x <- as.numeric(formatDate(dates,"%m")) xm1 <- as.numeric(formatDate(dates[1]-7,"%m")) #At change test_that("atChange function works for %m", expect_identical( atChange(x,xm1), c(1L, 6L, 10L, 14L, 19L, 23L, 27L, 32L, 36L, 40L, 45L, 49L, 54L, 58L, 62L, 67L, 71L, 75L, 80L, 84L, 88L, 93L, 97L, 101L, 106L, 110L, 114L, 119L, 123L, 127L, 132L, 136L, 141L, 145L, 149L, 154L, 158L, 162L, 166L, 171L, 175L, 180L, 184L, 188L, 193L, 197L, 201L, 206L, 210L, 215L, 219L, 223L, 227L, 232L, 236L, 240L, 245L, 249L, 254L, 258L, 262L, 267L, 271L, 275L, 280L, 284L, 288L, 293L, 297L, 301L, 306L, 310L))) #Test every second change function test_that("at2ndChange function works for %m", expect_identical( at2ndChange(x,xm1), c(1L, 10L, 19L, 27L, 36L, 45L, 54L, 62L, 71L, 80L, 88L, 97L, 106L, 114L, 123L, 132L, 141L, 149L, 158L, 166L, 175L, 184L, 193L, 201L, 210L, 219L, 227L, 236L, 245L, 254L, 262L, 271L, 280L, 288L, 297L, 306L))) #### Year formatting x <- as.numeric(formatDate(dates,"%Y")) xm1 <- as.numeric(formatDate(dates[1]-7,"%Y")) test_that("atMedian function works for %Y", expect_identical( atMedian(x,xm1), c(26L, 79L, 131L, 183L, 235L, 287L))) test_that("at2ndChange function works for %Y", expect_identical( dates[at2ndChange(x,xm1)], as.Date(c("2007-01-01","2009-01-05","2011-01-03")))) #Does the following look as expected? (hard to check with testthat) #data("rotaBB") #plot(rotaBB, xaxis.tickFreq=list("%Y"=atChange), xaxis.labelFreq=list("%Y"=at2ndChange),xaxis.labelFormat="%Y",xlab="time (months)") surveillance/tests/testthat/test-determineSources.R0000644000176200001440000000116314013521730022365 0ustar liggesusersdata("imdepi") test_that("determineSources() yields same result as old implementation", { sources0 <- surveillance:::determineSources.epidataCS(imdepi, method = "R") expect_identical(sources0, imdepi$events$.sources) sources1 <- surveillance:::determineSources( imdepi$events$time, imdepi$events$eps.t, coordinates(imdepi$events), imdepi$events$eps.s, imdepi$events$type, imdepi$qmatrix ) expect_identical(sources1, imdepi$events$.sources) sources2 <- surveillance:::determineSources.epidataCS(imdepi, method = "C") expect_identical(sources2, imdepi$events$.sources) }) surveillance/tests/testthat/test-hhh4_NegBinGrouped.R0000644000176200001440000001562513746767713022511 0ustar liggesusers### hhh4() model with shared overdispersion parameters ## use a small subset of districts from the fluBYBW data data("fluBYBW") fluBWsub <- fluBYBW[, substr(colnames(fluBYBW), 1, 2) %in% "81"] ## stsplot_space(fluBWsub, labels = TRUE) ## set "neighbourhood" to order of adjacency + 1 neighbourhood(fluBWsub) <- # nbOrder(neighbourhood(fluBWsub), maxlag = 5) + 1 structure( c(1, 4, 3, 2, 2, 4, 2, 4, 3, 3, 4, 4, 5, 4, 1, 2, 3, 4, 5, 4, 2, 3, 4, 3, 4, 4, 3, 2, 1, 2, 3, 4, 3, 2, 2, 3, 3, 4, 4, 2, 3, 2, 1, 2, 4, 3, 3, 2, 3, 3, 4, 4, 2, 4, 3, 2, 1, 4, 2, 3, 2, 3, 3, 4, 4, 4, 5, 4, 4, 4, 1, 3, 4, 3, 2, 3, 3, 4, 2, 4, 3, 3, 2, 3, 1, 3, 2, 2, 3, 3, 4, 4, 2, 2, 3, 3, 4, 3, 1, 2, 3, 2, 3, 3, 3, 3, 2, 2, 2, 3, 2, 2, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 3, 2, 1, 2, 2, 3, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 2, 2, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 2, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1), .Dim = c(13L, 13L), .Dimnames = list( c("8115", "8135", "8117", "8116", "8111", "8121", "8118", "8136", "8119", "8125", "8127", "8126", "8128"), c("8115", "8135", "8117", "8116", "8111", "8121", "8118", "8136", "8119", "8125", "8127", "8126", "8128"))) ## a crazy model base fluModel <- list( end = list(f = addSeason2formula(~0 + ri(type="iid"))), ne = list(f = ~0 + fe(1, unitSpecific = TRUE), weights = W_powerlaw(maxlag = 3)), start = list(random = rep.int(0, ncol(fluBWsub))) ) if (FALSE) { # check derivatives fluDeriv <- hhh4(stsObj = fluBWsub, control = c(fluModel, list(family = "NegBinM")), check.analyticals = TRUE) ana <- fluDeriv$pen$fisher$analytic num <- fluDeriv$pen$fisher$numeric equal <- mapply(function (...) isTRUE(all.equal.numeric(...)), ana, num, tolerance = 1e-4) dim(equal) <- dim(ana) Matrix::image(Matrix::Matrix(equal)) } ## fit a model with unit-specific overdispersion parameters using "NegBinM", ## equal to family = factor(colnames(fluBWsub), levels=colnames(fluBWsub)) fluFitM <- hhh4(stsObj = fluBWsub, control = c(fluModel, list( family = "NegBinM"))) test_that("\"NegBinM\" fit is invariant to the ordering of the overdispersion parameters", { fluFitM_reordered <- hhh4(stsObj = fluBWsub, control = c(fluModel, list( family = factor(colnames(fluBWsub), levels=rev(colnames(fluBWsub)))))) expect_equal(fluFitM_reordered$loglikelihood, fluFitM$loglikelihood) expect_equal(fluFitM_reordered$margll, fluFitM$margll) expect_equal(fluFitM_reordered$coefficients[names(fluFitM$coefficients)], fluFitM$coefficients) }) test_that("random intercepts can be extracted", { ris <- ranef(fluFitM, intercept = TRUE) expect_equal(dimnames(ris), list(colnames(fluBWsub), "end.ri(iid)")) ## compute endemic predictor at t = 0 (i.e., subset = 1) end.exppred.t0 <- meanHHH(theta = fluFitM$coefficients, model = terms(fluFitM), subset = 1)$end.exppred expect_equal(exp(ris + fluFitM$coefficients["end.cos(2 * pi * t/52)"]), t(end.exppred.t0), check.attributes = FALSE) }) ## fit a model with shared overdispersion parameters fluFitShared <- hhh4(stsObj = fluBWsub, control = c(fluModel, list( family = factor(substr(colnames(fluBWsub), 3, 3) == "1", levels = c(TRUE, FALSE), labels = c("region1", "elsewhere"))))) test_that("estimates with shared overdispersion are reproducible", { ## dput(coef(fluFitShared, se = TRUE)) orig <- structure( c(0.0172448275799737, -2.29936227176632, -0.311391919170833, 0.0173369590386396, 0.242634649538434, -0.73402605050834, -0.0411427686831543, -0.917845995715638, -0.324146451650439, -0.252506337389155, 0.153202205413176, -0.857813219848051, -1.00758863915022, 2.01735387997105, 2.38047570484809, -4.38317074697181, 2.46949727973784, 0.549903756338196, 1.12432744953686, 0.647372578569298, 0.21388842588635, -0.437822769909503, 0.255185408180267, 0.92949604237045, -1.09633602928844, 0.298117843865811, -0.68452091605681, 0.23456335139387, 0.162259631408099, 0.209619606465627, -0.10216429396362, -0.629658878921399, 0.114133112372732, 0.823887580788133, 0.12141926111051, 0.113879127629599, 0.109816278251024, 0.221038616887962, 0.115707006557826, 0.187260599970159, 0.121830940397345, 0.172070355414403, 0.157444513096506, 0.254811666726125, 0.268571254537371, 0.215202234247305, 0.212970632033808, 0.262762514629277, 0.205440489731246, 0.0567461846032841, 0.154168532075271, 0.320248263514015, 0.309517737483193, 0.366585194306804, 0.370748971125027, 0.304859567470968, 0.397763842736319, 0.357894067104384, 0.380956131344983, 0.344676554711052, 0.37300484854814, 0.378382126329053, 0.342270280546076, 0.359489843015429), .Dim = c(32L, 2L), .Dimnames = list( c("ne.1.8115", "ne.1.8135", "ne.1.8117", "ne.1.8116", "ne.1.8111", "ne.1.8121", "ne.1.8118", "ne.1.8136", "ne.1.8119", "ne.1.8125", "ne.1.8127", "ne.1.8126", "ne.1.8128", "end.sin(2 * pi * t/52)", "end.cos(2 * pi * t/52)", "end.ri(iid)", "neweights.d", "overdisp.region1", "overdisp.elsewhere", "end.ri(iid).8115", "end.ri(iid).8135", "end.ri(iid).8117", "end.ri(iid).8116", "end.ri(iid).8111", "end.ri(iid).8121", "end.ri(iid).8118", "end.ri(iid).8136", "end.ri(iid).8119", "end.ri(iid).8125", "end.ri(iid).8127", "end.ri(iid).8126", "end.ri(iid).8128"), c("Estimate", "Std. Error")) ) expect_equal(coef(fluFitShared, se = TRUE), orig) }) test_that("calibrationTest.oneStepAhead() works and \"final\" is equivalent to fit", { mysubset <- tail(fluFitShared$control$subset, 16) osa_final <- oneStepAhead(fluFitShared, tp = mysubset[1L]-1L, type = "final", verbose = FALSE) idx <- 3:5 # ignore "method" and "data.name" in calibrationTest() output expect_equal(calibrationTest(osa_final, which = "dss")[idx], calibrationTest(fluFitShared, which = "dss", subset = mysubset)[idx]) }) test_that("simulation correctly uses shared overdispersion parameters", { fluSimShared <- simulate(fluFitShared, seed = 1) ## simulate from the NegBinM model using the estimates from the shared fit psiShared <- coeflist(fluFitShared)$fixed$overdisp psiByUnit <- psiShared[fluFitShared$control$family] names(psiByUnit) <- paste0("overdisp.", names(fluFitShared$control$family)) coefsM <- c(coef(fluFitShared), psiByUnit)[names(coef(fluFitM))] fluSimSharedM <- simulate(fluFitM, seed = 1, coefs = coefsM) expect_identical(observed(fluSimShared), observed(fluSimSharedM)) ## fails for surveillance 1.12.2 }) surveillance/tests/testthat/test-tiafs.R0000644000176200001440000000501313751221007020153 0ustar liggesusers### Temporal interaction functions for twinstim() test_that("Step kernel of a single type agrees with numerical approximations", { steptiaf <- tiaf.step(c(7,20), maxRange=25, nTypes=1) logvals <- log(c(1.2,0.2)) ##curve(steptiaf$g(x, logvals), 0, 30, n=301) ## check G Gana <- steptiaf$G(0:30, logvals) Gnum <- sapply(0:30, function (upper) { integrate(steptiaf$g, 0, upper, logvals, rel.tol=1e-8)$value }) expect_equal(Gana, Gnum, tolerance = 1e-8) ## check deriv if (requireNamespace("maxLik", quietly = TRUE)) { checkderiv <- maxLik::compareDerivatives( f = function(pars, x) steptiaf$g(x, pars), grad = function(pars, x) steptiaf$deriv(x, pars), t0 = logvals, x = c(0.5,2,5,7,10,15,20,25,30), print = FALSE) expect_true(checkderiv$maxRelDiffGrad < 1e-8) } ## check Deriv for (paridx in seq_along(logvals)) expect_equal( steptiaf$Deriv(0:30, logvals)[,paridx], sapply(0:30, function (upper) integrate(function(...) steptiaf$deriv(...)[,paridx], 0, upper, logvals, rel.tol=1e-6)$value), tolerance = 1e-6, label = paste0("steptiaf$Deriv()[,",paridx,"]"), expected.label = "integrate() approximation" ) }) test_that("Step kernel with maxRange>max(eps.t) is equivalent to maxRange=Inf", { data("imdepi", package="surveillance") imdfit_steptiafInf <- twinstim( endemic = ~offset(log(popdensity)) + I(start/365 - 3.5), epidemic = ~1, siaf = siaf.constant(), tiaf = tiaf.step(c(7,20), maxRange=Inf), data = imdepi, optim.args = NULL, verbose = FALSE) maxepst <- max(imdepi$events$eps.t) imdfit_steptiaf30 <- update.default( ## update() might call an update.list-method registered by another ## package, e.g., gdata (2.18.0) implicitly loaded in other tests imdfit_steptiafInf, tiaf = tiaf.step(c(7,20), maxRange=maxepst+0.1)) coefs <- c(-20, -0.05, -15, -0.5, 0.2, -1) expect_identical(imdfit_steptiafInf$ll(coefs), imdfit_steptiaf30$ll(coefs)) expect_identical(imdfit_steptiafInf$sc(coefs), imdfit_steptiaf30$sc(coefs)) }) surveillance/src/0000755000176200001440000000000014030612531013530 5ustar liggesuserssurveillance/src/determineSources.cc0000644000176200001440000000464114013521730017365 0ustar liggesusers/******************************************************************************* // Determine potential triggering events close in space and time // // Copyright (C) 2016,2021 Sebastian Meyer // // This file is part of the R package "surveillance", // free software under the terms of the GNU General Public License, version 2, // a copy of which is available at https://www.R-project.org/Licenses/. *******************************************************************************/ #include using namespace Rcpp; // Euclidean distance of a set of points to a single point (x0, y0) NumericVector distsN1(NumericVector x, NumericVector y, double x0, double y0) { // hypot(x, y) is not (yet) vectorized by Rcpp sugar return sqrt(pow(x - x0, 2.0) + pow(y - y0, 2.0)); } RcppExport SEXP determineSources( SEXP eventTimesSEXP, SEXP eps_tSEXP, SEXP eventCoordsSEXP, SEXP eps_sSEXP, SEXP eventTypesSEXP, SEXP qmatrixSEXP ){ BEGIN_RCPP NumericVector eventTimes(eventTimesSEXP); NumericVector eps_t(eps_tSEXP); NumericMatrix eventCoords(eventCoordsSEXP); NumericVector eps_s(eps_sSEXP); IntegerVector eventTypes(eventTypesSEXP); LogicalMatrix qmatrix(qmatrixSEXP); int N = eventTimes.size(); NumericVector removalTimes = eventTimes + eps_t; NumericMatrix::Column xcoords = eventCoords(_,0); NumericMatrix::Column ycoords = eventCoords(_,1); List sources(N); LogicalVector infectivity(N); LogicalVector proximity(N); LogicalVector matchType(N); LogicalVector typeInfective(qmatrix.nrow()); IntegerVector eventTypes0 = eventTypes - 1; // for correct indexing IntegerVector idx = seq_len(N); for (int i = 0; i < N; ++i) { infectivity = (eventTimes < eventTimes[i]) & (removalTimes >= eventTimes[i]); // "<" not "<=" because CIF is left-continuous. // Also guarantees no self-infection. proximity = distsN1(xcoords, ycoords, eventCoords(i,0), eventCoords(i,1)) <= eps_s; typeInfective = qmatrix(_,eventTypes0[i]); //<- logical vector indicating for each type if it could infect type of i matchType = typeInfective[eventTypes0]; sources[i] = idx[infectivity & proximity & matchType]; } return wrap(sources); END_RCPP } surveillance/src/twins.cc0000644000176200001440000024750712743646613015243 0ustar liggesusers/******************************************************************* * Authors: * Mathias Hofmann * Michael Hoehle * Volker Schmid * Contributors: * Michaela Paul * Daniel Sabanes Bove * Sebastian Meyer * History: * July 2016 (SM) -- dropped deprecated "register" storage class specifier * April 2012 (SM) -- replaced exit() calls by Rf_error() * March 2012 (DSB) -- changed long types to int to be in accordance with R * (we observed bad allocations in 64 bit machines) * May 2010 (DSB) -- modified from Oct 2008 * * Markov Chain Monte Carlo (MCMC) estimation in the Branching Process * like Epidemic Model. Instead of a slow R solution this code * provides a faster C++ solution. Can be invoked through R or be * programmed as a librrary. This code uses the Gnu Scientific Library * (GSL) available from http://sources.redhat.com/gsl/ * * For now this code is quick & dirty. A more OO framework would be nice * to enable better programming, but this will probably be speedwise slower. *******************************************************************/ #include #include /*New C++ uses header iostream (without the .h) followed by a namespace*/ using namespace std; #include /* Replaced calls to GSL with functions from the R API */ #include #include /*wrappers to what used to be GSL functions*/ #include "gsl_wrappers.h" // Dynamic_2d_array class by David Maisonave (609-345-1007) (www.axter.com) // Description: // The dynamic array class listed below is more efficient then other // similar classes that use temporary objects as return types, or use // an std::vector as a return type. // // It's also more compatible with a C style 2D array, in that the // array is in one continuous memory block. This makes it possible // to pass this array object to a C Function that has a C-Style // 2D array for a parameter. // Example usage: /* Dynamic_2d_array MyIntArray(12, 34); MyIntArray[0][1] = 123; cout << MyIntArray[0][1] << endl; */ template < class T > class Dynamic_2d_array { public: // constructor Dynamic_2d_array(size_t row, size_t col) : m_row(row), m_col(col), m_data((row!=0 && col!=0) ? new T[row*col] : NULL) {} // copy ctr Dynamic_2d_array(const Dynamic_2d_array& src) : m_row(src.m_row), m_col(src.m_col), m_data((src.m_row!=0 && src.m_col!=0) ? new T[src.m_row*src.m_col] : NULL) { for(size_t r=0; r LongMatrix; typedef Dynamic_2d_array DoubleMatrix; typedef Dynamic_2d_array IntMatrix; // Analogous class for vectors (== 1D arrays) template < class T > class Dynamic_1d_array { public: // constructor Dynamic_1d_array(size_t length) : m_length(length), m_data((length !=0) ? new T[length] : NULL) {} // copy ctr Dynamic_1d_array(const Dynamic_1d_array& src) : m_length(src.m_length), m_data((src.m_length!=0) ? new T[src.m_length] : NULL) { for(size_t i=0; i LongVector; typedef Dynamic_1d_array DoubleVector; typedef Dynamic_1d_array IntVector; /************************************ Globals *************************************/ /*Setup params*/ int overdispersion; int varnu; int la_rev; int K_geom; int la_estim; int nu_trend; int theta_pred_estim; int xi_estim; int delta_rev; int xi_estim_delta; int epsilon_rev; int xi_estim_epsilon; int xi_estim_psi; double psiRWSigma = 0.25; double xRWSigma = 0.25; double taubetaRWSigma = 0.25; /*Priors*/ double alpha_lambda = 1.0; double beta_lambda = 1.0; double alpha_xi = 1.0; double beta_xi = 1.0; double p_K = 1.0; double alpha_nu = 1.0; double beta_nu = 1.0; double alpha_psi = 1.0; double beta_psi = 10.0; double alpha_a=1; double alpha_b=0.001; double beta_a=1.0; double beta_b=.00001; double gamma_a=1; double gamma_b=0.001; double delta_a=1; double delta_b=0.001; double epsilon_a=1; double epsilon_b=0.001; /********************************************************************* * Compute sum from 1 to I and 1 to n of a vektor with indices 0,...,I * of a vektor with indices 0,...,n * Parameters: * * X a vector with indices 0,..,I of a vector with indices 0,...,n * I "length" of vector (true length due to zero indice is I+1) *********************************************************************/ double sumIn(const LongMatrix& X, int I, int n) { double res = 0; for (int i=1; i<=I; i++){ for (int t=1; t<=n; t++) { res += X[i][t]; } } return(res); } /********************************************************************* * Compute sum from 1 to I and 1 to n of a vektor with indices 0,...,I * of a vektor with indices 0,...,n * This is the double version * Parameters: * * X a vector with indices 0,..,I of a vector with indices 0,...,n * I "length" of vector (true length due to zero indice is I+1) *********************************************************************/ double sumIn(const DoubleMatrix& X, int I, int n) { double res = 0; for (int i=1; i<=I; i++){ for (int t=1; t<=n; t++) { res += X[i][t]; } } return(res); } /********************************************************************* * Compute sum from 1 to I and 1 to n of a vektor with indices 0,...,I * of a vektor with indices 0,...,n * Parameters: * * X a vector with indices 0,..,I of a vector with indices 0,...,n * I "length" of vector (true length due to zero indice is I+1) *********************************************************************/ double sumIn2(const LongMatrix& X, int I, int n) { double res = 0; for (int i=1; i<=I; i++){ for (int t=2; t<=n; t++) { res += X[i][t]; } } return(res); } /********************************************************************* * Compute sum from 1 to I and 1 to n of a vektor with indices 0,...,I * of a vektor with indices 0,...,n * This is the double version * Parameters: * * X a vector with indices 0,..,I of a vector with indices 0,...,n * I "length" of vector (true length due to zero indice is I+1) *********************************************************************/ double sumIn2(const DoubleMatrix& X, int I, int n) { double res = 0; for (int i=1; i<=I; i++){ for (int t=2; t<=n; t++) { res += X[i][t]; } } return(res); } /********************************************************************* * Compute sum from 1 to I of a vektor with indices 0,...,I * of a vektor with indices 0,...,n * Parameters: * * X a vector with indices 0,..,I of a vector with indices 0,...,n * I "length" of vector (true length due to zero indice is I+1) *********************************************************************/ double sumI1(const LongMatrix& X, int I, int t) { double res = 0; for (int i=1; i<=I; i++) { res += X[i][t]; } return(res); } /********************************************************************* * Compute sum from 1 to I of a vektor with indices 0,...,I * of a vektor with indices 0,...,n * This is the double version * Parameters: * * X a vector with indices 0,..,I of a vector with indices 0,...,n * I "length" of vector (true length due to zero indice is I+1) *********************************************************************/ double sumI1(const DoubleMatrix& X, int I, int t) { double res = 0; for (int i=1; i<=I; i++) { res += X[i][t]; } return(res); } /********************************************************************* * factorial function *********************************************************************/ long factorial(long x){ long fac=1; if(x<0){ Rf_error("negative value passed to factorial function\n");} else{ if(x==0){fac=1;} else{ for(int i=1;i<=x;i++){ fac*=i; } } } return(fac); } /********************************************************************* * logit function *********************************************************************/ double logit(double y){ if(y <= 0 || y >= 1){ Rf_error("y <= 0 or y >= 1 in logit function.\n"); } double logit; logit = log(y/(1-y)); return(logit); } /********************************************************************* * inverse logit function *********************************************************************/ double invlogit(double y){ double invlogit; invlogit = 1/(1 + exp(-y)); return(invlogit); } /********************************************************************* * inverse logit function diff. *********************************************************************/ double invlogitd(double y){ double invlogitd; invlogitd = exp(-y)/pow((1.0 + exp(-y)),2); return(invlogitd); } /********************************************************************* * Makes one Metropolis-Hastings update step, log-scale *********************************************************************/ double updateMHlog(double &par, double parStar, double logFpar, double logFparStar, double &acceptedpar) { double accpar = exp(logFparStar - logFpar); if (gsl_rng_uniform() <= accpar) {par = parStar; acceptedpar++;} return(0); } /********************************************************************* * Makes one Metropolis-Hastings update step *********************************************************************/ double updateMH(double &par, double parStar, double Fpar, double FparStar, double &acceptedpar) { double accpar = FparStar/Fpar; if (gsl_rng_uniform() <= accpar) {par = parStar; acceptedpar++;} return(0); } /********************************************************************* * Tunes a parameter *********************************************************************/ double tune(double& parameter, double accepted, double samples, double& tunepar, double a=0.3, double b=0.4){ tunepar=1; if ((accepted/samples>a) && (accepted/samplesb) { parameter *= 1.5; } else if (accepted/samples0){return x;}else{return -x;} } double MIN(double a, double b) { if (a2) { REprintf("Error in the twins.cc function invers()\n"); } for (int i=0; i< k*k; i++) { A[i]=ergebnis[i]; } return; } void mxschreibe(double* A, int a, int b) { for (int i=0; i= gsl_rng_uniform()) { alpha[i]=alphaneu; acc_alpha += 1; } } return; } void erzeuge_b_Q(DoubleVector& gamma , double* my, double* Q, const DoubleVector& alpha, DoubleVector& delta, DoubleVector& beta, const LongMatrix& X, const LongMatrix& Z, const LongMatrix& Y, int n, int I, double taubeta, int rw, const DoubleMatrix& lambda, double p, const DoubleMatrix& xcov, int ncov, const DoubleMatrix& omega, const DoubleMatrix& omegaX,int scov, int mode) { if (mode==1) { /* b-vektor des Proposals*/ for (int t=0;tgsl_rng_uniform()){ gamma[j] = gammajStar; acc_gamma += 1; } return; } void update_beta_t(int t, const DoubleVector& alpha, DoubleVector& beta, DoubleVector& gamma, DoubleVector& delta, int ncov, const DoubleMatrix& xcov, const LongMatrix& X, int n, int I, double taubeta, long& acc_beta, const DoubleMatrix& omega, int scov) { double h = 0; double c = 0; double d = 0; for(int i=1;i<=I;i++){ h -= omega[i][t]*delta[t]*exp(alpha[i] + beta[t] + sumg(ncov,xcov,gamma,t,scov)); /* h ist h(beta[t]^0), beta ist \beta^0, betatStar ist \beta*/ c += X[i][t]; } if(t==2){ c -= taubeta*(beta[t+2]-2*beta[t+1]); d = taubeta; } if(t==3){ c -= taubeta*((beta[t+2]-2*beta[t+1]) + (-2*beta[t+1] - 2*beta[t-1])); d = 5*taubeta; } if((t>=4)&&(t<=(n-2))){ c -= taubeta*((beta[t+2]-2*beta[t+1]) + (-2*beta[t+1] - 2*beta[t-1]) + (beta[t-2] - 2*beta[t-1])); d = 6*taubeta; } if(t==(n-1)){ c -= taubeta*((-2*beta[t+1] - 2*beta[t-1]) + (beta[t-2] - 2*beta[t-1])); d = 5*taubeta; } if(t==n){ c -= taubeta*(beta[t-2] - 2*beta[t-1]); d = taubeta; } double s = sqrt(1/(d - h)); /* s ist s*/ double b = c + (1 - beta[t])*h; double m = b*s*s; double betatStar = gsl_ran_gaussian(s) + m; double h2 = 0; for(int i=1;i<=I;i++){ h2 -= omega[i][t]*delta[t]*exp(alpha[i] + betatStar + sumg(ncov,xcov,gamma,t,scov)); /* h2 ist h(beta[t])*/ } double s2 = sqrt(1/(d - h2)); /* s2 ist s^0*/ double b2 = c + (1 - betatStar)*h2; double m2 = b2*s2*s2; double a = 0; a += betatStar*c; a -= beta[t]*c; a -= 0.5*d*betatStar*betatStar; a += 0.5*d*beta[t]*beta[t]; a += h2; a -= h; a += log(s); a -= log(s2); a += 0.5*((betatStar-m)/s)*((betatStar-m)/s); a -= 0.5*((beta[t]-m2)/s2)*((beta[t]-m2)/s2); if(exp(a)>gsl_rng_uniform()){ beta[t] = betatStar; acc_beta += 1; } return; } void update_lambda_br(DoubleMatrix& lambda, DoubleMatrix& lambda_br,DoubleVector& xi_lambda, IntMatrix& breakpoints, IntMatrix& breakpointsStar, IntVector& K, IntVector& KStar, IntVector& Km1, double alpha_lambda, double beta_lambda, const LongMatrix& Y, const LongMatrix& Z, int n, int I, double& acceptedbr, const DoubleMatrix& omega, int theta_pred_estim, int xi_estim, int K_geom, double p_K, double alpha_xi, double beta_xi) { /*update breakpoints of lambda using reversible jump MCMC*/ int newbreakpoint =0; int removebreakpoint=0; int newbreakpointnumber=0; int u; double v=1; double a; double alpha_la; double beta_la; for(int i=1;i<=I;i++){ if(!theta_pred_estim){ a=gsl_rng_uniform(); if(a<0.5){u=1;}else{u=2;} if(K[i]==1){u=2;v=.5;} /*K[i] is number of segments of lambda*/ if(K[i]==(n-1)){u=1;v=.5;} /*if(!theta_pred_estim) max of K[i] is n-1*/ /*decide if new brreakpoint or remove breakpoint*/ if(u==1){/*remove breakpoint*/ if(K[i]==2){v=2;} KStar[i]=K[i]-1; a=gsl_rng_uniform(); removebreakpoint=(int)floor(a*(double)(K[i]-1))+1; /*generate breakpointsStar*/ for(int k=1;kn){need=1;} for(int k=1;k<=K[i];k++){ if(newbreakpoint==breakpoints[i][k]){ need=1; } } }/*while(need==1)*/ /*generate breakpointsStar*/ for(int k=1;k<=K[i];k++){ if((newbreakpoint>breakpoints[i][k-1])&&(newbreakpoint(n+1)){need=1;} for(int k=1;k<=K[i];k++){ if(newbreakpoint==breakpoints[i][k]){ need=1; } } }/*while(need==1)*/ /*generate breakpointsStar*/ for(int k=1;k<=K[i];k++){ if((newbreakpoint>breakpoints[i][k-1])&&(newbreakpointn){need=1;} for(int k=1;k<=K_delta;k++){ if(newbreakpoint==breakpoints_delta[k]){ need=1; } } }//while(need==1) //generate breakpointsStar_delta for(int k=1;k<=K_delta;k++){ if((newbreakpoint>breakpoints_delta[k-1])&&(newbreakpointn){need=1;} for(int k=1;k<=K_epsilon;k++){ if(newbreakpoint==breakpoints_epsilon[k]){ need=1; } } }//while(need==1) //generate breakpointsStar_epsilon for(int k=1;k<=K_epsilon;k++){ if((newbreakpoint>breakpoints_epsilon[k-1])&&(newbreakpoint> n; // Rprintf("n=%d\n",n); // int I=1; // //fin >> I; // //cout << "I=" << I << endl; // long **Z = new long*[I+1]; // for (long i=0; i<=I; i++){ // Z[i] = new long[n+1]; // } // for (long t=0; t<=n; t++){ // Z[0][t]=0; // } // for (long i=0; i<=I; i++){ // Z[i][0]=0; // } // //Start @ index 1. (Z[0] is not defined) // int t=1; // while (!fin.eof() && (t<=n)) { // int i=1; // while (!fin.eof() && (i<=I)) { // fin >> Z[i][t]; // i++; // } // t++; // } // fin.close(); // //Return the result consisting of Z and n // *size = n; // *size2 = I; // return(Z); // } /* Calculate the deviance of the data we use that the data, Z, is a * sum of Poisson distributed variables, i.e. it is Poisson * distributed. * * Z_t = S_t + X_t + Y_t, i.e. * Z_t ~ Po(nu*p + nu*(1-p) + lambda*W_{t-1}) * * D = -2log p(Z|theta) + 2 log p(Z|\mu(theta)=Z) */ double satdevalt(int n, int I, const LongMatrix& X, const LongMatrix& Y, const LongMatrix& Z, const DoubleMatrix& omega, const DoubleMatrix& lambda, const DoubleMatrix& nu, double *xi, DoubleMatrix& eta, DoubleMatrix& eta2, DoubleMatrix& varr, double psi, int overdispersion) { double res = 0; //Loop over all data for (int i=1; i<=I; i++) { for (int t=2; t<=n; t++) { //Use the equation derived for the saturated deviance in the paper //calculate the mean and variance of Z[i][t] eta[i][t] = (nu[i][t]*xi[i]+lambda[i][t]*Z[i][t-1]); eta2[i][t] = eta[i][t]; if(overdispersion){ varr[i][t] = eta2[i][t]*(1+eta2[i][t]/psi); }else{ varr[i][t] = eta2[i][t]; } //calculate the Deviance in the Poisson and NegBin case if(!overdispersion){ if (Z[i][t] == 0) { res += 2 * eta[i][t]; } else { res += 2 * ( Z[i][t] * log(Z[i][t]/eta[i][t]) - Z[i][t] + eta[i][t]); } } if(overdispersion){ if (Z[i][t] == 0) { res += 2 * ( - (Z[i][t]+psi) * log((Z[i][t]+psi)/(eta[i][t]+psi))); } else { res += 2 * ( - (Z[i][t]+psi) * log((Z[i][t]+psi)/(eta[i][t]+psi)) + Z[i][t] * log(Z[i][t]/eta[i][t])); } } } } return(res); } /* Calculate the deviance of the data we use that the data, Z, is a * sum of Poisson distributed variables, i.e. it is Poisson * distributed. * * Z_t = X_t + Y_t, i.e. * Z_t ~ Po(nu_t + lambda_t*Z_{t-1}) * * D = -2log p(Z|theta) */ double satdev(int n, int I, const LongMatrix& Z, const DoubleMatrix& lambda, const DoubleMatrix& nu, double *xi, DoubleVector& epsilon, DoubleMatrix& eta, double psi, int overdispersion) { double res = 0; //Loop over all data for (int i=1; i<=I; i++) { for (int t=2; t<=n; t++) { //Use the equation derived for the saturated deviance in the paper //calculate the mean and variance of Z[i][t] eta[i][t] = (epsilon[t] + nu[i][t]*xi[i]+lambda[i][t]*Z[i][t-1]); //calculate the Deviance in the Poisson and NegBin case if(!overdispersion){ res -= 2 * ( Z[i][t] * log(eta[i][t]) - gsl_sf_lngamma(Z[i][t]+1) - eta[i][t]); } if(overdispersion){ res -= 2 * ( gsl_sf_lngamma(Z[i][t]+psi) - gsl_sf_lngamma(Z[i][t]+1) - gsl_sf_lngamma(psi) - (Z[i][t]+psi)*log(eta[i][t]+psi) + psi*log(psi) + Z[i][t]*log(eta[i][t])); } } } return(res); } // Calculate chi square the sum of the qudratic pearson residuals (z-mean)/sd double chisq(int n, int I, const LongMatrix& Z, const DoubleMatrix& lambda, const DoubleMatrix& nu, double *xi, DoubleVector& epsilon, DoubleMatrix& eta, DoubleMatrix& varr, DoubleMatrix& rpearson, double psi, int overdispersion) { double res = 0; //Loop over all data for (int i=1; i<=I; i++) { for (int t=2; t<=n; t++) { //calculate the mean and variance of Z[i][t] eta[i][t] = (epsilon[t] + nu[i][t]*xi[i]+lambda[i][t]*Z[i][t-1]); if(overdispersion){ varr[i][t] = eta[i][t]*(1+eta[i][t]/psi); }else{ varr[i][t] = eta[i][t]; } rpearson[i][t] = (Z[i][t]-eta[i][t])/sqrt(varr[i][t]); //calculate chisq in the Poisson and NegBin case res += rpearson[i][t]*rpearson[i][t]; } } return(res); } /********************************************************************** * Estimation in the basic epidemic model * */ void bplem_estimate(int verbose, ofstream &logfile, ofstream &logfile2, ofstream &acclog, const LongMatrix& Z, double* xi, int n, int I, int T, int nfreq, int burnin, int filter, int samples, int rw) { //Model parameters - start values double nu_const = alpha_nu/beta_nu; double lambda_const = 0.5; double psi = alpha_psi / beta_psi; double x = logit(lambda_const); if(!verbose) { Rprintf("------------------------------------------------\n"); if (!la_rev){ Rprintf("lambda: Ga(%f, %f)-->\t%f\n", alpha_lambda, beta_lambda, lambda_const); } if(!varnu){ Rprintf("nu: Ga(%f, %f)-->\t%f\n", alpha_nu, beta_nu, nu_const); } if(overdispersion){ Rprintf("psi: Ga(%f, %f)-->\t%f\n", alpha_psi, beta_psi, psi); } Rprintf("------------------------------------------------\n"); } //Allocate arrays for all latent variables and initialize them // first all 2D arrays (matrices) LongMatrix X(I+1, n+1); LongMatrix Y(I+1, n+1); LongMatrix S(I+1, n+1); DoubleMatrix omega(I+1, n+1); DoubleMatrix sumX(I+1, n+1); DoubleMatrix sumY(I+1, n+1); DoubleMatrix sumS(I+1, n+1); DoubleMatrix sumomega(I+1, n+1); DoubleMatrix nu(I+1, n+1); DoubleMatrix lambda(I+1, n+2); DoubleMatrix lambda_br(I+1, n+2); DoubleMatrix eta(I+1, n+1); DoubleMatrix eta2(I+1, n+1); DoubleMatrix varr(I+1, n+1); DoubleMatrix rpearson(I+1, n+1); DoubleMatrix Sumeta(I+1, n+1); DoubleMatrix Sumvarr(I+1, n+1); DoubleMatrix Sumrpearson(I+1, n+1); IntMatrix breakpoints(I+1, n+2); IntMatrix breakpointsStar(I+1, n+2); LongMatrix bp(I+1, n+2); // long** X = new long*[I+1]; // long** Y = new long*[I+1]; // long** S = new long*[I+1]; // double **omega= new double*[I+1]; // double** sumX = new double*[I+1]; // double** sumY = new double*[I+1]; // double** sumS = new double*[I+1]; // double **sumomega= new double*[I+1]; // double **nu= new double*[I+1]; // double *alpha=new double[I+1]; // double* beta= new double[n+1]; // double **lambda=new double*[I+1]; // double **lambda_br=new double*[I+1]; // double **eta=new double*[I+1]; // double **eta2=new double*[I+1]; // double **varr=new double*[I+1]; // double **rpearson=new double*[I+1]; // double **Sumeta=new double*[I+1]; // double **Sumvarr=new double*[I+1]; // double **Sumrpearson=new double*[I+1]; // int **breakpoints=new int*[I+1]; // int **breakpointsStar=new int*[I+1]; // long **bp=new long*[I+1]; // We would have to delete the pointers manually at the end of the routine // in order not to corrupt the memory!!! // for (long i=0; i<=I; i++){ // X[i]=new long[n+1]; // Y[i]=new long[n+1]; // S[i]=new long[n+1]; // omega[i]=new double[n+1]; // sumX[i]=new double[n+1]; // sumY[i]=new double[n+1]; // sumS[i]=new double[n+1]; // sumomega[i]=new double[n+1]; // nu[i]=new double[n+1]; // lambda[i]=new double[n+2]; // lambda_br[i]=new double[n+2]; // breakpoints[i]=new int[n+2]; // breakpointsStar[i]=new int[n+2]; // bp[i]=new long[n+2]; // eta[i]=new double[n+1]; // eta2[i]=new double[n+1]; // varr[i]=new double[n+1]; // rpearson[i]=new double[n+1]; // Sumeta[i]=new double[n+1]; // Sumvarr[i]=new double[n+1]; // Sumrpearson[i]=new double[n+1]; // } // then the rest (1D arrays and numbers) DoubleVector alpha(I + 1); DoubleVector beta(n + 1); DoubleVector delta(n + 2); DoubleVector delta_br(n + 2); double xi_delta = 1; DoubleVector epsilon(n + 2); DoubleVector epsilon_br(n + 2); double xi_epsilon = 1; double xi_psi = 1; IntVector K(I + 1); IntVector Km1(I + 1); IntVector KStar(I + 1); DoubleVector xi_lambda(I + 1); IntVector breakpoints_delta(n+2); IntVector breakpointsStar_delta(n+2); LongVector bp_delta(n+2); int K_delta = 0; int Km1_delta = 0; int KStar_delta = 0; IntVector breakpoints_epsilon(n+2); IntVector breakpointsStar_epsilon(n+2); LongVector bp_epsilon(n+2); int K_epsilon = 0; int Km1_epsilon = 0; int KStar_epsilon = 0; LongVector Xnp1(I + 1); LongVector Snp1(I + 1); LongVector Ynp1(I + 1); LongVector Znp1(I + 1); DoubleVector omeganp1(I + 1); DoubleVector nunp1(I + 1); if(!varnu){ for (int i=0; i<=I; i++) { for (int t=0; t<=n; t++) { nu[i][t] = alpha_nu/beta_nu; } } } for (int i=0; i<=I; i++) { for (int t=0; t<=n; t++) { lambda[i][t] = lambda_const; } } for (int i=0; i<=I; i++) { for (int t=0; t<=n; t++) { X[i][t] = 0; S[i][t] = 0; Y[i][t] = Z[i][t]; omega[i][t] = 1; eta[i][t] = 0; bp[i][t] = 0; bp_delta[t] = 0; bp_epsilon[t] = 0; sumX[i][t] = 0; sumY[i][t] = 0; sumS[i][t] = 0; sumomega[i][t] = 0; Sumeta[i][t] = 0; Sumrpearson[i][t] = 0; } bp[i][n+1] = 0; xi_lambda[i] = 1; bp_delta[n+1] = 0; bp_epsilon[n+1] = 0; } /* Fuer Saisonkomponenente */ int ncov; int scov = 0; if(delta_rev){ scov = 1; } // determine the number of covariates and allocate then // the vectors and design matrix. ncov = nu_trend ? (nfreq * 2 + 2) : (nfreq * 2 + 1); DoubleVector gamma(ncov); DoubleVector gammaneu(ncov); DoubleMatrix xcov(ncov, n+2); // bad, do not do that: // double* gamma; // double* gammaneu = NULL; // double** xcov; if(!nu_trend){ // ncov=nfreq*2+1; // gamma = new double[ncov]; // gammaneu = new double[ncov]; // xcov = new double*[ncov]; // for (int i=0; i tuneSampleSize && (!verbose) && (sampleCounter % (int)floor(sampleSize/100.0) == 0)) { Rprintf("%d%%", sampleCounter*100 / sampleSize); } if(0){ if(varnu){ if ((sampleCounter % 100 == 0)) { Rprintf("alpha\t%f beta\t%f %f gamma[0]\t%f gamma[1]\t%f gamma[2]\t%f %f lambda\t%f\n", (double)acc_alpha/I, beta[2], (double)acc_beta, gamma[0], gamma[1], gamma[2],(double)acc_gamma, lambda[1][2]); /* cout<< "alpha\t" << (double)acc_alpha/I<<" " << "beta\t" <<" "<< beta[2] <<" "<< (double)acc_beta<<" " << "gamma[0]\t" <<" "<< gamma[0] <<" "<< "gamma[1]\t" <<" " << gamma[1] <<" "<< "gamma[2]\t" <<" "<< gamma[2] <<" " << (double)acc_gamma<<" " << "lambda\t" << lambda[1][2] << endl;*/ } } if(la_rev){ if ((sampleCounter % 100 == 0)) { Rprintf("K\t%d\n", K[1]); } } if(delta_rev){ if ((sampleCounter % 100 == 0)) { Rprintf("K_delta\t%f delta[2]\t%f\n", K_delta, delta[2]); } } if(epsilon_rev){ if ((sampleCounter % 100 == 0)) { Rprintf("K_epsilon\t%f epsilon[2]\t%f\n", K_epsilon, epsilon[2]); } } } // cout << ":"<) superflous. double accpsi = exp(logFPsiStar-logFPsi); //Do we accept? if ((psi>0) && (gsl_rng_uniform() <= accpsi)) {psi = psiStar; acceptedPsi++;} } //update xi_psi if(xi_estim_psi){ double a = alpha_psi + 1; double b = beta_psi + psi; xi_psi = gsl_ran_gamma (a, 1/b); } ////////////////////////////////////////////////////////////////////////// //State information to file if we are on an filter'th sample if ((sampleCounter>burnin) && ((sampleCounter-burnin) % filter == 0)) { logfile << sampleCounter << "\t"; if (!la_rev){ logfile << lambda_const << "\t"; } logfile << psi << "\t"; logfile << xi_psi << "\t"; if(!varnu){ logfile << nu_const << "\t"; } } if(varnu){ // Unterprogramme fuer den Update von alpha und beta if (I>=2) { alphaupdate(gamma, alpha, beta, delta, lambda, 1, I, n, Y, X, acc_alpha, taualpha, ncov, xcov, xreg, omega, omega, scov,1); taualpha=update_tau_alpha(alpha, I, alpha_a, alpha_b, xreg); if (sampleCounter%3==0) { if(scov==0){ double asum=0; for (int i=1; i<=I; i++) { asum+=(alpha[i]-xreg[i]); } for (int i=1; i<=I; i++) { alpha[i]-=(asum/I); } gamma[0]=gamma[0]+(asum/I); } } } else { alpha[1]=0.0; } //Update fuer zeitlichen effekt mit RW if (rw>0) { // update_beta_nurrw(gamma, alpha, beta, delta, X, Z, Y, n, I, taubeta, rw, 1, lambda, acc_beta, sampleCounter, my, my2, temp, z, theta, Q, Q2, L, L2, xcov, ncov, scov, omega, omega, 1); //update_beta_block(alpha, beta, gamma, delta, X, n, I, taubeta, rw, acc_beta, sampleCounter, n1, n2, my, my2, z, theta, beta0, Q, Q2, L, L2, xcov, ncov, scov, omega); /*hofmann - no fortran update_beta_tau_block(alpha, beta, gamma, delta, beta_a, beta_b, X, n, I, taubeta, rw, acc_beta, taubetaRWSigma, taubetaStar, sampleCounter, n1, n2, my, my2, z, theta, beta0, Q, Q2, L, L2, xcov, ncov, scov, omega); */ //taubeta=beta_a/beta_b; // taubeta=hyper(rw, beta, beta_a, beta_b, n); //taubeta=720; //if(sampleCounter%500==1){cout << taubeta << endl << endl;} // for(int t=2;t<=n;t++){ // update_beta_t(t, alpha, beta, gamma, delta, ncov, xcov, X, n, I, taubeta, acc_beta, omega, scov); // } if(scov==0){ // if (sampleCounter%1==0) // { double bsum=0; for (int t=2; t<=n; t++) { bsum+=(beta[t]); } for (int t=2; t<=n; t++) { beta[t]-=(bsum/(n-1)); } gamma[0]=gamma[0]+(bsum/(n-1)); // } } } //if (rw>0) //update saison //update_gamma( alpha, beta, gamma,ncov, xcov, X, Z, Y, n, I, taugamma, 1, lambda, acc_gamma, P, P2, gammaalt, z2, L, Q, omega, omega,1); taugamma=gamma_b; // cout << gamma[0]<<" " << gamma[1] << endl; for(int j=scov;jburnin) && ((sampleCounter-burnin) % filter == 0)) { // for (int i=1;i<=I; i++) { // for (int t=1; t<=n; t++) { // logfile << nu[i][t] << "\t"; // } // } // logfile << mu << "\t"; for (int j=0; jburnin) && ((sampleCounter-burnin) % filter == 0)) { logfile << Km1_delta<<"\t"<< xi_delta<<"\t"; for (int j=2; j<=n; j++) { logfile << delta[j] << "\t"; } } if (sampleCounter>burnin) { for (int k=1; k<=K_delta; k++) { for (int j=2; j<=n; j++) { if (breakpoints_delta[k]==j){ bp_delta[j]+=1; } } } } }//if(delta_rev) }//if }//if varnu if(epsilon_rev){ update_epsilon_br(epsilon, epsilon_br, xi_epsilon, breakpoints_epsilon, breakpointsStar_epsilon, K_epsilon, KStar_epsilon, Km1_epsilon, epsilon_a, epsilon_b, S, n, I, acceptedbr_epsilon, omega, xi_estim_epsilon, K_geom, p_K, alpha_xi, beta_xi); if ((sampleCounter>burnin) && ((sampleCounter-burnin) % filter == 0)) { logfile << Km1_epsilon<<"\t"<< xi_epsilon<<"\t"; for (int j=2; j<=n; j++) { logfile << epsilon[j] << "\t"; } } if (sampleCounter>burnin) { for (int k=1; k<=K_epsilon; k++) { for (int j=2; j<=n; j++) { if (breakpoints_epsilon[k]==j){ bp_epsilon[j]+=1; } } } } }//if(epsilon_rev) if(la_estim){ if (la_rev) { update_lambda_br(lambda, lambda_br, xi_lambda, breakpoints, breakpointsStar, K, KStar, Km1, alpha_lambda, beta_lambda, Y, Z, n, I, acceptedbr, omega, theta_pred_estim, xi_estim, K_geom, p_K, alpha_xi, beta_xi); if ((sampleCounter>burnin) && ((sampleCounter-burnin) % filter == 0)) { logfile << Km1[1]<<"\t"<< xi_lambda[1]<<"\t"; for (int j=2; j<=n; j++) { logfile << lambda[1][j] << "\t"; } } for (int i=1;i<=I; i++) { if (sampleCounter>burnin) { for (int k=1; k<=K[i]; k++) { for (int j=2; j<=n; j++) { if (breakpoints[i][k]==j){ bp[i][j]+=1; } } } } } }//if(la_rev) } // if(la_estim) // cout << S[1][106] << endl; // cout << "test" << endl; //Loop over the individual X[t], Y[t], S[t], and omega[t] for (int i=1;i<=I; i++) { for (int t=2; t<=n; t++) { //Update X double binp = nu[i][t]*xi[i] / (epsilon[t] + nu[i][t]*xi[i] + lambda[i][t] * Z[i][t-1]); X[i][t] = gsl_ran_binomial( binp, Z[i][t]); //Update S binp = epsilon[t] / (epsilon[t] + lambda[i][t] * Z[i][t-1]); //hoehle 9 Apr 2009 -- protection against Z[i][t-1]==0 case, leading to binp = nan if (Z[i][t-1] == 0) {binp = 1;} S[i][t] = gsl_ran_binomial( binp, (Z[i][t] - X[i][t])); //Update Y Y[i][t] = Z[i][t] - X[i][t] - S[i][t]; //Debug //cout << "i=" << i << "\tt=" << t << "\tX=" << X[i][t] << "\tY=" << Y[i][t] << "\tZ=" << Z[i][t] << "\tS=" << S[i][t] << "\tepsilon=" << epsilon[t] << "\tbinp=" << binp << endl; //Update omega[t] in case of overdispersion if(overdispersion){ double a = psi + Z[i][t]; double b = psi + epsilon[t] + nu[i][t] + lambda[i][t]*Z[i][t-1]; omega[i][t] = gsl_ran_gamma(a,1/b); } //Write state to log-file. if (sampleCounter>burnin) { sumX[i][t] += X[i][t]; sumY[i][t] += Y[i][t]; sumS[i][t] += S[i][t]; sumomega[i][t] += omega[i][t]; Sumeta[i][t] += eta[i][t]; Sumvarr[i][t] += varr[i][t]; Sumrpearson[i][t] += rpearson[i][t]; } }//for t }//for i // cout << "test2" << endl; // cout << Z[1][2] << endl; // cout << X[1][2] << endl; // cout << Y[1][2] << endl; // cout << S[1][2] << endl; //Praediktive Verteilung fuer variables nu for (int i=1;i<=I;i++) { if(!theta_pred_estim){ double p_thetanp1 = ((double(K[i]))/double(n)); //(1+double(K[i])) if(K_geom){ p_thetanp1 = (double(K[i])*(1.0-p_K)*(1.0-pow((double)1.0-p_K,double(n-1))))/((double(n)-1.0)*(1.0-pow((double)1.0-p_K,double(n)))); } if(gsl_rng_uniform()<=p_thetanp1){ if (sampleCounter>burnin) { bp[i][n+1] += 1; } double alpha_la = alpha_lambda; double beta_la = beta_lambda; if(xi_estim){ beta_la = xi_lambda[i]; } lambda[i][n+1]=gsl_ran_gamma(alpha_la,1/beta_la); } } if(overdispersion){ omeganp1[i] = gsl_ran_gamma(psi,1/psi); }else{ omeganp1[i] = 1; } if(varnu){ a = 0; for(int j=scov;j0){ a += gsl_ran_gaussian(sqrt(1/taubeta)) + (2*beta[n-1]-beta[n]); } if(delta_rev){ double p_thetanp1 = ((double(K[i]))/double(n)); //(1+double(K[i])) if(K_geom){ p_thetanp1 = ((double(K[i]))*(1.0-p_K)*(1.0-pow((double)1.0-p_K,double(n-1))))/((double(n)-1.0)*(1.0-pow((double)1.0-p_K,double(n)))); } if(gsl_rng_uniform()<=p_thetanp1){ if (sampleCounter>burnin) { bp_delta[n+1] += 1; } double alpha_de = delta_a; double beta_de = delta_b; if(xi_estim){ beta_de = xi_delta; } delta[n+1]=gsl_ran_gamma(alpha_de,1/beta_de); } a += log(delta[n+1]); } nunp1[i] = exp(a); }else{ nunp1[i]=nu[i][n]; } if(epsilon_rev){ double p_thetanp1 = ((double(K[i]))/double(n)); //(1+double(K[i])) if(K_geom){ p_thetanp1 = ((double(K[i]))*(1.0-p_K)*(1.0-pow((double)1.0-p_K,double(n-1))))/((double(n)-1.0)*(1.0-pow((double)1.0-p_K,double(n)))); } if(gsl_rng_uniform()<=p_thetanp1){ if (sampleCounter>burnin) { bp_epsilon[n+1] += 1; } double alpha_ep = epsilon_a; double beta_ep = epsilon_b; if(xi_estim){ beta_ep = xi_epsilon; } epsilon[n+1]=gsl_ran_gamma(alpha_ep,1/beta_ep); } } Xnp1[i] = gsl_ran_poisson(omeganp1[i]*nunp1[i]*xi[i]); Ynp1[i] = gsl_ran_poisson(lambda[i][n+1]*omeganp1[i]*(Z[i][n])); Snp1[i] = gsl_ran_poisson(omeganp1[i]*epsilon[n+1]); Znp1[i] = Xnp1[i] + Ynp1[i] + Snp1[i]; if ((sampleCounter>burnin) && ((sampleCounter-burnin) % filter == 0)) { logfile << Znp1[1] << "\t"; } } if ((sampleCounter>burnin) && ((sampleCounter-burnin) % filter == 0)) { logfile << satdev(n,I,Z,lambda,nu,xi,epsilon,eta,psi,overdispersion) << endl; } logfile.flush(); //Tuning if(sampleCounter == tuneSampleSize){ if (!la_rev) { Rprintf("Current xRWSigma= %f --> acc rate= %f\n", xRWSigma, acceptedlambda/tuneSampleSize); tune(xRWSigma, acceptedlambda, tuneSampleSize,tunex); Rprintf("Corrected xRWSigma= %f\n", xRWSigma); } if(overdispersion){ Rprintf("\nCurrent psiRWSigma= %f --> acc rate = %f\n", psiRWSigma, acceptedPsi/tuneSampleSize); tune(psiRWSigma, acceptedPsi, tuneSampleSize,tunepsi); Rprintf("Corrected psiRWSigma= %f\n", psiRWSigma); } if(varnu&&(rw>0)){ Rprintf("Current taubetaRWSigma= %f --> acc rate %f\n", taubetaRWSigma, acc_beta/tuneSampleSize); tune(taubetaRWSigma, acc_beta, tuneSampleSize,tunetaubeta,0.1,0.4); Rprintf("Corrected taubetaRWSigma= %f\n", taubetaRWSigma); } //tunetaubeta = 0; need=tunex + tunepsi + tunetaubeta; if(need > 0){ acceptedlambda = 0; acceptedbr = 0; acceptedbr_delta = 0; acceptedbr_epsilon = 0; acceptedPsi = 0; sampleCounter = 0; if(varnu){ acc_beta=0; acc_alpha=0; acc_gamma=0; } //Fix seed of generator to reproduce results. // gsl_rng_set(r,seed); }//if }//if sampleCounter++; }//while counter //Write means to logfile2 for (int t=1;t<=n;t++) { logfile2 << (double)sumX[1][t]/((double)samples*(double)filter) << "\t" << (double)sumY[1][t]/((double)samples*(double)filter)<< "\t" << (double)sumomega[1][t]/((double)samples*(double)filter) << "\t"<< (double)bp[1][t]/((double)samples*(double)filter) << "\t"; } logfile2 << (double)bp[1][n+1]/((double)samples*(double)filter) << "\t"; logfile2 << endl; //Write accepted status to file if(overdispersion){acclog << "psi\t" << psiRWSigma << "\t" << (double)acceptedPsi/(double)sampleSize << endl;} if (!la_rev){acclog << "lambda\t" << xRWSigma << "\t" << (double)acceptedlambda/(double)sampleSize << endl;} if (la_rev){acclog << "br\t" << 0 << "\t" << (double)acceptedbr/(double)sampleSize << endl;} if(I>1){acclog << "alpha\t" << 0 <<"\t" <<(double)acc_alpha/((double)sampleSize*I)<0)){acclog <<"beta\t"<<0 <<"\t"<< (double)acc_beta/((double)sampleSize*(double)(n-1.0))< #include #include /*** C-implementation of "intrfr" functions ***/ // power-law kernel static double intrfr_powerlaw(double R, double *logpars) { double sigma = exp(logpars[0]); double d = exp(logpars[1]); double onemd = 1.0 - d; double twomd = 2.0 - d; if (fabs(onemd) < 1e-7) { return R - sigma * log1p(R/sigma); } else if (fabs(twomd) < 1e-7) { return log1p(R/sigma) - R/(R+sigma); } else { return (R*pow(R+sigma,onemd) - (pow(R+sigma,twomd) - pow(sigma,twomd))/twomd) / onemd; } } static double intrfr_powerlaw_dlogsigma(double R, double *logpars) { double newlogpars[2] = {logpars[0], log1p(exp(logpars[1]))}; // sigma*d = exp(logsigma+logd) return -exp(logpars[0]+logpars[1]) * intrfr_powerlaw(R, newlogpars); } static double intrfr_powerlaw_dlogd(double R, double *logpars) { double sigma = exp(logpars[0]); double d = exp(logpars[1]); double onemd = 1.0 - d; double twomd = 2.0 - d; if (fabs(onemd) < 1e-7) { return sigma * logpars[0] * (1.0-logpars[0]/2.0) - log(R+sigma) * (R+sigma) + sigma/2.0 * pow(log(R+sigma),2.0) + R; } else if (fabs(twomd) < 1e-7) { return (-log(R+sigma) * ((R+sigma)*log(R+sigma) + 2.0*sigma) + (R+sigma)*logpars[0]*(logpars[0]+2.0) + 2.0*R) / (R+sigma); } else { return (pow(sigma,twomd) * (logpars[0]*(-d*d + 3.0*d - 2.0) - 2.0*d + 3.0) + pow(R+sigma,onemd) * (log(R+sigma)*onemd*twomd * (sigma - R*onemd) + R*(d*d+1.0) + 2.0*d*(sigma-R) - 3.0*sigma) ) * d/onemd/onemd/twomd/twomd; } } // student kernel static double intrfr_student(double R, double *logpars) { double sigma = exp(logpars[0]); double d = exp(logpars[1]); double onemd = 1.0 - d; if (fabs(onemd) < 1e-7) { return log(R*R+sigma*sigma) / 2.0 - logpars[0]; } else { return ( pow(R*R+sigma*sigma,onemd) - pow(sigma*sigma,onemd) )/2/onemd; } } static double intrfr_student_dlogsigma(double R, double *logpars) { double sigma = exp(logpars[0]); double d = exp(logpars[1]); return sigma*sigma * ( pow(R*R+sigma*sigma,-d) - pow(sigma,-2.0*d) ); } static double intrfr_student_dlogd_primitive(double x, double sigma, double d) { double x2ps2 = x*x + sigma*sigma; double dm1 = d - 1.0; return (d*dm1*log(x2ps2) + d) / (2.0*dm1*dm1 * pow(x2ps2,dm1)); } static double intrfr_student_dlogd(double R, double *logpars) { double sigma = exp(logpars[0]); double d = exp(logpars[1]); if (fabs(d-1.0) < 1e-7) { return pow(logpars[0], 2.0) - pow(log(R*R+sigma*sigma), 2.0) / 4.0; } else { return intrfr_student_dlogd_primitive(R, sigma, d) - intrfr_student_dlogd_primitive(0.0, sigma, d); } } // lagged power-law kernel static double intrfr_powerlawL_sigmadxplint(double R, double sigma, double d) { double twomd = 2.0 - d; double xplint = (fabs(twomd) < 1e-7) ? log(R/sigma) : (pow(R,twomd)-pow(sigma,twomd))/twomd; return pow(sigma,d) * xplint; } static double intrfr_powerlawL(double R, double *logpars) { double sigma = exp(logpars[0]); double upper = (R > sigma) ? sigma : R; double res = upper*upper / 2.0; // integral over x*constant part if (R <= sigma) { return res; } else { return res + intrfr_powerlawL_sigmadxplint(R, sigma, exp(logpars[1])); } } static double intrfr_powerlawL_dlogsigma(double R, double *logpars) { double sigma = exp(logpars[0]); if (R <= sigma) { return 0.0; } double d = exp(logpars[1]); return d * intrfr_powerlawL_sigmadxplint(R, sigma, d); } static double intrfr_powerlawL_dlogd(double R, double *logpars) { double sigma = exp(logpars[0]); if (R <= sigma) { return 0.0; } double d = exp(logpars[1]); double twomd = 2.0 - d; double sigmadRtwomdd = pow(sigma,d) * pow(R,twomd) * d; return (fabs(twomd) < 1e-7) ? -pow(sigma*log(R/sigma), 2.0) : (sigmadRtwomdd * (-twomd)*log(R/sigma) - d*sigma*sigma + sigmadRtwomdd)/(twomd*twomd); } // Gaussian kernel static double intrfr_gaussian(double R, double *logsigma) { double sigma2 = exp(2*logsigma[0]); return sigma2 * (1 - exp(-R*R/2/sigma2)); } static double intrfr_gaussian_dlogsigma(double R, double *logsigma) { double sigma2 = exp(2*logsigma[0]); double R2sigma2 = R*R/2/sigma2; return 2*sigma2 * (1 - (1+R2sigma2)/exp(R2sigma2)); } // Exponential kernel static double intrfr_exponential(double R, double *logsigma) { double sigma = exp(logsigma[0]); return sigma * (sigma - (R+sigma)*exp(-R/sigma)); } static double intrfr_exponential_dlogsigma(double R, double *logsigma) { double sigma = exp(logsigma[0]); return 2*sigma*sigma - ((R+sigma)*(R+sigma) + sigma*sigma)*exp(-R/sigma); } /*** function to be called from R ***/ void siaf_polyCub1_iso( double *x, double *y, // vertex coordinates (open) int *L, // number of vertices int *intrfr_code, // F(R) identifier double *pars, // parameters for F(R) int *subdivisions, double *epsabs, double *epsrel, // Rdqags options int *stop_on_error, double *value, double *abserr, int *neval) // results { intrfr_fn intrfr; switch(*intrfr_code) { // = INTRFR_CODE in ../R/twinstim_siaf_polyCub_iso.R case 10: intrfr = intrfr_powerlaw; break; case 11: intrfr = intrfr_powerlaw_dlogsigma; break; case 12: intrfr = intrfr_powerlaw_dlogd; break; case 20: intrfr = intrfr_student; break; case 21: intrfr = intrfr_student_dlogsigma; break; case 22: intrfr = intrfr_student_dlogd; break; case 30: intrfr = intrfr_powerlawL; break; case 31: intrfr = intrfr_powerlawL_dlogsigma; break; case 32: intrfr = intrfr_powerlawL_dlogd; break; case 40: intrfr = intrfr_gaussian; break; case 41: intrfr = intrfr_gaussian_dlogsigma; break; case 50: intrfr = intrfr_exponential; break; case 51: intrfr = intrfr_exponential_dlogsigma; break; default: error("unknown intrfr_code"); break; } double center_x = 0.0; double center_y = 0.0; polyCub_iso(x, y, L, intrfr, pars, ¢er_x, ¢er_y, subdivisions, epsabs, epsrel, stop_on_error, value, abserr, neval); return; } surveillance/src/stcd-assuncaocorrea.cc0000644000176200001440000002424111746064472020025 0ustar liggesusers/** * File based on algoritmos.cpp and sv.cpp from the TerraView plugin. * C++ source originally created by Marcos Oliveira Prates on 06 April 2006 * * R interface by Michael Höhle initiated on 12 Jan 2009 */ #include "stcd-assuncaocorrea.h" #include #include using namespace std; // Calculate the number of events in the cylinder B( (xk,yk), rho) // (i.e. represented by the boolean matrix MSpace) between event times // (tj,ti] // // Params: // MSpace - contains for each pair of points is geographically // B( (xi,yi), rho) // EvtN - The last event, i.e. t_i // EvtJ - The first event, i.e. t_j int CalculaNCj(short **MSpace, const int EvtN, const int EvtJ) { int i; int Soma=0; for (i=EvtJ;i<=EvtN;i++) Soma += MSpace[EvtJ][i]; return(Soma); } // Calculate the number of events in the cylinder B( (xj,yj), rho) // (i.e. represented by the boolean matrix MSpace) between event times // (0,t_n] int ContaEvt(short **MSpace, const int EvtN, const int EvtJ) { int i; int Soma=0; for (i=0;i<=EvtN;i++) Soma += MSpace[EvtJ][i]; return(Soma); } ////////////////////////////////////////////////////////////////////// // Comment: Unfortunately, this function has not been commented in the // TerraView and hence it has been a bit difficult to document its exact // use. // // Params: // ev - a list of the events // RaioC - radius of the cylinder // epslon - relative change \lambda(s,t)(1+epsilon*I_{C_k}(s,t)) // areaA - area of the observation window A (also denoted W) // areaAcapBk - area of A \ B(s_k,\rho) for all k=1,\ldots,n // cusum - return Shiryaev-Roberts (FALSE) or CUSUM (TRUE) test // statistic // R - array of length ev where the computed values of R_n are // to be returned in. ////////////////////////////////////////////////////////////////////// int SistemadeVigilancia(SVEventLst &ev, const double RaioC, const double epslon, const double areaA, double *areaAcapBk, const int cusum, std::valarray &R) { size_t i, j, NCj, NumTotEvt, NumEvtCil; short **MSpace; double pontox, pontoy, DistEucl, Soma, UCj, fator; //order the event list ev.sort(); SVEventLst::size_type n_event = ev.size(); //create the spatio matrix MSpace = new short* [n_event]; if( MSpace == NULL ) return 1; for( i = 0; i < n_event; i++ ) { MSpace[i] = new short[n_event]; if( MSpace[i] == NULL ) { delete []MSpace; return 1; } } //create the output vector R.resize(n_event); if( R.size() != n_event ) { for( i = 0; i < n_event; i++ ) { delete []MSpace[i]; } delete []MSpace; return 1; } //Populate the spatio matrix with 1's if within radius rho in space //and 0 if not i = 0; for( SVEventLst::iterator it = ev.begin(); it != ev.end(); ++it, i++ ) { j = 0; for( SVEventLst::iterator jt = ev.begin(); jt != ev.end(); ++jt, j++ ) { pontox = (*it).x-(*jt).x; pontoy = (*it).y-(*jt).y; DistEucl = sqrt((pontox*pontox)+(pontoy*pontoy)); if((DistEucl < RaioC)) MSpace[i][j]=1; else MSpace[i][j]=0; } } ////////////////////////////////////////////////////////////////////// //Sequentually, for n=1,2,3,... compute the value of R_n by //by summing up all contributions of Lambda_{k,n} to form R_n, i.e. // \sum_{k=1}^n \Lambda_{k,n} ////////////////////////////////////////////////////////////////////// double LambdaMax = 0, Lambda; SVEventLst::iterator it2, jt2, ev0; //Loop over all n for( i = 0; i < n_event; i++ ) { Soma = 0.0; //Loop over 1<= k <= n (in code k is called j and n is i) for( j = 0; j <= i; j++ ) { //N(C_{k,n}) NCj = CalculaNCj(MSpace,i,j); //N(B(s_k, \rho) \times (0,t_n]) NumTotEvt = ContaEvt(MSpace,i,j); //N(A \times (t_k,t_n) ) = n-k+1 NumEvtCil = i-j+1; UCj = ((double)NumEvtCil*(double)NumTotEvt)/(double)(i+1); fator = 1.0+epslon; Lambda = pow(fator,(double)NCj) * exp((-epslon)*UCj); /* //Alternative estimation having the desired property for \rho->\infty // N( A \times (0,t_k] \cup (A\times (t_k,t_n) \backslash C_{k,n}) ) // \nu( A \times (0,t_k] \cup (A\times (t_k,t_n) \backslash C_{k,n}) ) double iCount=0; double jCount=0; ev0 = ev.begin(); for( it2 = ev.begin(); iCount < i ; ++it2, iCount++ ); for( jt2 = ev.begin(); jCount < j ; ++jt2, jCount++ ); double NNoCkn = ((j-1) + (NumEvtCil - NCj)); double volCkn = areaAcapBk[j] * ((*it2).t - (*jt2).t); double volNoCkn = areaA * ((*it2).t - (*ev0).t) - volCkn; UCj = (NNoCkn / volNoCkn) * volCkn; // Debug // cout << "----> k=" << j << " n= " << i << endl; // cout << "t_k=" << (*jt2).t << endl; // cout << "t_n=" << (*it2).t << endl; // cout << "N(C_{k,n}) = NCj; // cout << "N(W\\times(0,t_n) \\backslash C_{k,n}))=" << NNoCkn << endl; // cout << "vol(C_{k,n}))=" << volCkn << endl; // cout << "vol(W\\times(0,t_n) \backslash C_{k,n})=" << volNoCkn << endl; //// cout << "mu(C_{k,n})=" << UCj << endl; //Lambda = pow(fator,(double)NCj) * exp((-epslon)*UCj); */ //Summation for the Shiryaev-Roberts statistics Soma += Lambda; //Find maximum k of \Lambda_{k,n} for the CUSUM statistics if (Lambda> LambdaMax) { LambdaMax = Lambda; } } //Depending on the summation scheme compute the statistic. if (cusum) { R[i] = LambdaMax; } else { R[i] = Soma; } } //clean memory for( i = 0; i < n_event; i++ ) { delete [] MSpace[i]; } delete [] MSpace; return 0; } int CalculaLambda(SVEventLst &ev, const double RaioC, const double epslon, std::valarray &R, unsigned int &numObs) { size_t i, j, NCj, NumTotEvt, NumEvtCil; short **MSpace; double pontox, pontoy, DistEucl, UCj, fator, lambda, lambdaMax; ev.sort(); SVEventLst::size_type n_event = ev.size(); //create the spatio matrix MSpace = new short* [n_event]; if( MSpace == NULL ) return 1; for( i = 0; i < n_event; i++ ) { MSpace[i] = new short[n_event]; if( MSpace[i] == NULL ) { delete []MSpace; return 1; } } //create the output vector R.resize(n_event); if( R.size() != n_event ) { for( i = 0; i < n_event; i++ ) { delete []MSpace[i]; } delete []MSpace; return 1; } //populate the spatio matrix with 1 if is close in spatio and 0 if not i = 0; for( SVEventLst::iterator it = ev.begin(); it != ev.end(); ++it, i++ ) { j = 0; for( SVEventLst::iterator jt = ev.begin(); jt != ev.end(); ++jt, j++ ) { pontox = (*it).x-(*jt).x; pontoy = (*it).y-(*jt).y; DistEucl = sqrt((pontox*pontox)+(pontoy*pontoy)); if((DistEucl < RaioC)) MSpace[i][j]=1; else MSpace[i][j]=0; } } //do the calculus to find the output value of each event i = numObs; lambdaMax = 0; for( j = 0; j <= i; j++ ) { NCj = CalculaNCj(MSpace,i,j); NumTotEvt = ContaEvt(MSpace,i,j); NumEvtCil = i-j+1; UCj = ((double)NumEvtCil*(double)NumTotEvt)/(double)(i+1); fator = 1.0+epslon; lambda = (pow(fator,(double)NCj) * exp((-epslon)*UCj)); if (lambda > lambdaMax){ lambdaMax = lambda; numObs = j; } } //clean memory for( i = 0; i < n_event; i++ ) { delete [] MSpace[i]; } delete [] MSpace; return 0; } ////////////////////////////////////////////////////////////////////// // Shiryaev-Roberts space time detection as explained in the paper // by Correa and Assuncao (2009). // // Params: // x - array with x location of events // y - array with y location of events // t - array with time point of the events (on some arbitrary time scale) // n - number of elements in x, y and t (the same for the three vectors) // radius - cluster of the radius // epsilon - relative ratio of the intensity functions to detect for // areaA - area of the observation region (also denoted W) // areaAcapBk - area of A \ B(s_k,\rho) for all k=1,\ldots,n // threshold -- upper threshold when to sound the alarm // Rarray -- array of length n, this will contain the statistics calced // by the function // idxFirstAlarm -- index in the x,y,t vector resulting in the alarm // idxClusterCenter -- index in the x,y,t vector containing the cluster // center ////////////////////////////////////////////////////////////////////// extern "C" { void SRspacetime(double *x, double *y, double *t, int *n, double *radius, double *epsilon, double *areaA, double *areaAcapBk, int *cusum, double *threshold, double *Rarray, int *idxFirstAlarm, int *idxClusterCenter) { //Create SVEventLst SVEvent e; SVEventLst eList; unsigned int i; int j; //Fill coordinates of event list for(j=0;j<*n;j++){ e.x = x[j]; e.y = y[j]; e.t = t[j]; eList.push_back(e); } //Array of test statistic values std::valarray R; //Call SistemadeVigilancia, this calculates the SR statistics R_n SistemadeVigilancia(eList,*radius,*epsilon,*areaA,areaAcapBk,*cusum, R); //Debug purposes //cout << "Size of R = " << R.size() << endl; //Move values of test statistic for return for(i=0;i*threshold){ controle = true; break; } } //Advancing the iterator "it" to the point //where the alarm is generated. if (controle) { unsigned int cont = 0; SVEventLst::iterator it = eList.begin(); while((cont < i) && (it != eList.end())){ ++it; ++cont; } *idxFirstAlarm = cont; //Determine the cluster center of the alarm unsigned int num = cont; CalculaLambda(eList,*radius,*epsilon,R,num); //Index of the cluster center *idxClusterCenter = num; } else { //If no alarms, then return -1 for both alarm idx and cluster center idx *idxFirstAlarm = -2; *idxClusterCenter = -2; } //Clean up (nothing to clean) and done } } surveillance/src/surveillance.c0000644000176200001440000011053114013525371016377 0ustar liggesusers/** C routines for the surveillance package Author: (C) Michael Hhle Date: 8 Jan 2008 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, a copy is available at http://www.r-project.org/Licenses/ Atm the only C routines are concerned with the GLR computations in the algorithm algo.prc. //should check that these really work... void lr_cusum - intercept chart with known kappa void glr_cusum - intercept chart with estimated kappa void glr_cusum_window -- window limited intercept chart with estimated kappa //removedvoid glr_epi void glr_epi_window //History 17 Feb 2009 -- added LR scheme for negative binomial (still experimental) 08 Jan 2007 -- added the files for the negative binomial computations 21 Sep 2007 -- modified code to get around error of extreme strict (=pedantic) MacOS compiling on CRAN 28 Nov 2006 -- file created */ /*#define DEBUG*/ #include #include #include #include /* header */ /* void lr_cusum(int* ,double* , int *, double *, double *,int *, double *) ; void glr_cusum(int* ,double* , int *, int *, double *,int *, double *, int *, int *, int *) ; */ /* Helper function for x^2 */ static R_INLINE double sqr(double x) { return(x*x); } /*====================================================================== Poisson GLR detector ====================================================================== */ /********************************************************************** C implementation of the LR test for the seasonal Poisson chart with fixed change in the intercept Params: x - array of observed values (pos 0 is first interesting value) mu0 - array with the means once in-control (pos 0 is first interesting value) lx - length of the x and mu0 array kappa- the change in intercept to detect (here known in advance) c_ARL- when to sound alarm threshold ret_N- here the return value is stored ret_lr- GLR value for each n to be returned ret_cases - The number of cases to be returned ret - what should be returned (value of lr-statistic, cases)? **********************************************************************/ void lr_cusum(int* x,double* mu0, int *lx_R, double *kappa_R, double *c_ARL_R,int *ret_N, double *ret_lr, double *ret_cases, int *ret_R) { /* Pointers to something useful */ int lx = *lx_R; double c_ARL = *c_ARL_R; double kappa = *kappa_R; int ret = *ret_R; /* Loop variables */ register int n=0; int stop = 0; int N = lx; /* Loop over all 0 <= n <= length(x) */ while ((n < lx)) { /*Compute for one n*/ /*printf("n=%d\n",n);*/ double zn = kappa * x[n] + (1-exp(kappa))*mu0[n]; #ifdef DEBUG printf("For kappa=%f and mu[%d]=%f:\tx[%d]=%f, LR=%f\n",kappa,n,mu0[n],n,x[n],zn); #endif /* Add up */ if (n==0) { ret_lr[n] = fmax(0,zn); /*5.11.2009 -- Bug fix. There was a small programming error for the computing the cases for n==0. if (ret==2) ret_cases[n] = (c_ARL + mu0[n]*(kappa-1))/kappa ; */ if (ret==2) ret_cases[n] = (c_ARL + mu0[n]*(exp(kappa)-1))/kappa ; } else { ret_lr[n] = fmax(0,ret_lr[n-1] + zn); if (ret==2) ret_cases[n] = (c_ARL - ret_lr[n-1] + mu0[n]*(exp(kappa)-1))/kappa ; } /* Find the first time that the GLR increases c_ARL there we stop */ if ((ret_lr[n] > c_ARL) && !stop) { N = n; stop = 1; break; } /* Advance counter */ n++; } /* Return value (add 1 for R/SPlus array compability */ *ret_N = N+1; } /*********************************************************************** Function for the computation of the glr-statistic with time-varying in-control value Params n - timepoint n where the glr-statistic should be computed x - array with observations mu0 - array with estimated in-comtrol parameters dir - direction of testing (up (1) or down (-1) the function returns max_1<=k<=n sup_theta sum_t=k^n log f_theta(x_t)/f_theta0(x_t) ************************************************************************/ static double glr (int n, int x[], double mu0[], int dir){ /* For the recursive computation of kappa_ml */ double sumx = 0; double summu0 = 0; /* Define max of the GLR stats */ double maxGLR = -1e99; /* Loop variable */ register int k; /* For fitting and summation */ double kappa_ml = 0; double sum = 0; /* Loop over all k */ for (k=n; k>=0; k--) { /* Backwards loop makes calculations faster */ /* Recursive update of the kappa.ml quantitities */ sumx += x[k]; summu0 += mu0[k]; /* Calculate MLE of kappa */ kappa_ml = dir*fmax(0,dir*log(sumx/summu0)); /* Recursive updating of the likelihood ratios -- See notes on the 21 september printout. This is fast! */ sum = kappa_ml * sumx + (1-exp(kappa_ml))*summu0; /* save max value */ if (sum > maxGLR) { maxGLR = sum;} } return(maxGLR); } /*********************************************************************** Function for the computation of the window-limited glr-statistic with time-varying in-control value Params n - timepoint n where the glr-statistic should be computed x - array with observations mu0 - array with estimated in-comtrol parameters dir - direction of testing (up (1) or down (-1) M - max time to go back in time from N Mtilde - number of vals we will need to estimate a detection the function returns max(0,n-M) <= k <= n-Mtilde sup_theta sum_t=k^n log f_theta(x_t)/f_theta0(x_t) ************************************************************************/ static double glr_window (int n, int x[], double mu0[], int dir, int M, int Mtilde){ /* Define max of the GLR stats */ double maxGLR = -1e99; /* Loop variable */ register int k,l; /* For the recursive computation of kappa_ml compute for (n-Mtilde+1):n */ double sumx = 0; double summu0 = 0; /* For fitting and summation */ double sum = 0; double kappa_ml = 0; for (l=n-Mtilde+1; l<=n; l++) { sumx += x[l]; summu0 += mu0[l]; } /* Loop over all max(0,n-M) <= k <= n-Mtilde -- do this backwards */ /* for (k=max(0,n-M); k<= (n-Mtilde); k++) { */ for (k=n-Mtilde; k>=fmax(0,n-M); k--) { /* Recursive update of the kappa.ml quantitities */ sumx += x[k]; summu0 += mu0[k]; kappa_ml = dir*fmax(0,dir*log(sumx/summu0));; /*Calculate sum of likelihood ratios using recursive updating (fast!)*/ sum = kappa_ml * sumx + (1-exp(kappa_ml))*summu0; /* Save the max value */ if (sum > maxGLR) { maxGLR = sum;} } return(maxGLR); } /********************************************************************** Fast C implementation of the sequential GLR test without windowing for Poisson distributed variables, this function can test in both directions (up/down) and there is the possibility ( in opposite to old function glr_cusum) to return the number of cases at timepoint n to produce an alarm at any timepoint 1<=k<=n Params: x - array of observed values (pos 0 is first interesting value) mu0 - array with the means once in-control (pos 0 is first interesting value) lx - length of the x and mu0 array n0 - number of burn-in values (number of observations, not array index!) c_ARL- when to sound alarm threshold ret_N- here the return value is stored ret_glr- GLR value for each n to be returned dir - direction of testing ret - what should be returned (value of glr-statistic, cases)? **********************************************************************/ void glr_cusum(int* x,double* mu0, int *lx_R, int *n0_R, double *c_ARL_R,int *ret_N, double *ret_glr, double *ret_cases, int *dir_R, int *ret_R) { /* Pointers to something useful */ int lx = *lx_R; int n0 = *n0_R; int dir = *dir_R; int ret = *ret_R; double c_ARL = *c_ARL_R; /* Loop variables */ register int n; /*l,n0-1*/ for (n=0; n= c_ARL */ while ((dir*glrnew < c_ARL*dir)){ /* increase/decrease xnnew */ xnnew = xnnew + 1; /* put this value in vector x at timepoint n */ x[n] = xnnew; /* compute the glr-statistic */ glrnew = glr(n,x,mu0,dir); } /* save the value */ ret_cases[n] = xnnew; /* set x[n] back to original value so that we can go to next n*/ x[n] = xnold; } /* Find the first time that the GLR increases c_ARL there we stop */ if ((ret_glr[n] >= c_ARL) && !stop) { N = n; stop = 1; break; } /*Advance counter*/ n++; } /* Return value (add 1 for R/SPlus array compability */ *ret_N = N+1; } /********************************************************************** Fast C implementation of the sequential GLR test without windowing for Poisson distributed variables Params: x - array of observed values (pos 0 is first interesting value) mu0 - array with the means once in-control (pos 0 is first interesting value) lx - length of the x and mu0 array Mtilde - number of vals we will need to estimate a detection M - max time to go back in time from N c_ARL- when to sound alarm threshold **********************************************************************/ void glr_cusum_window(int* x,double* mu0, int *lx_R, int *M_R, int *Mtilde_R, double *c_ARL_R,int *ret_N, double *ret_glr, double *ret_cases, int *dir_R, int *ret_R) { /* Pointers to something useful */ int lx = *lx_R; int M = *M_R; int Mtilde = *Mtilde_R; int dir = *dir_R; int ret = *ret_R; double c_ARL = *c_ARL_R; /* Loop variables (n>Mtilde, so we start with n=Mtilde (due to -1 in index) */ register int n = Mtilde; /*l*/ int stop = 0; int N = lx; /* Precalculation of log(mu0) -- apparently not used anymore */ //double logmu0[lx]; //for (l=0;l= c_ARL */ while ((dir*glrnew < c_ARL*dir)){ /* increase/decrease xnnew */ xnnew = xnnew + 1; /* put this value in vector x at timepoint n */ x[n] = xnnew; /* compute the glr-statistic */ glrnew = glr_window(n,x,mu0,dir,M,Mtilde); } /* save the value */ ret_cases[n] = xnnew; /* set x[n] back to original value so that we can go to next n*/ x[n] = xnold; } /* Debug*/ /* printf("For n=%d the best GLR value is %f\n",n,maxGLR);*/ /* Find the first time that the GLR increases c_ARL there we stop */ if ((ret_glr[n] >= c_ARL) && !stop) { N = n; stop = 1; break; } /* Advance counter */ n++; } /* Return value (add 1 for R/SPlus array compability */ *ret_N = N+1; } /*====================================================================== GLR in the Epidemic Poisson model ====================================================================== */ /*Helper functions*/ /* Score function */ static R_INLINE double score(double phi, int *x, double *xm1, double *mu0, int k, int n) { register int i; double sum = 0; /*printf("[1] ");*/ for (i=k; i<=n; i++) { sum += (x[i]*xm1[i])/(exp(phi)*xm1[i]+mu0[i]) - xm1[i]; } /*printf("\n");*/ return(exp(phi)*sum); } /*fisher information*/ static R_INLINE double fisher(double phi,int *x,double *xm1, double *mu0, int k,int n,double scorephi) { register int i; double sum = 0; for (i=k; i<=n; i++) { sum += (x[i]*sqr(xm1[i]))/sqr(exp(phi)*xm1[i]+mu0[i]); } return(-scorephi + exp(2.0*phi)*sum); } /********************************************************************** GLR detector for the epidemic Poisson model described in Held et. al (2005). Parameters: x -- the data (as array) mu0 -- base means under H0 lx -- length of x Mtilde_R -- number of obs needed to get good estimate (typically 1) M -- Mtilde < M xm10 -- observed value of x_0 (0 for initialization, but known if >1st round) c_ARL_R -- constant determining when to signal alarm ret_N -- the return value ret_lr --- GLR value for each n to be returned **********************************************************************/ void glr_epi_window(int* x,double* mu0, int *lx_R, int *Mtilde_R, int *M_R, double *xm10, double *c_ARL_R,int *ret_N, double *ret_glr) { /* printf("====> begin glr_epi\n"); */ /* Pointers to something useful */ int lx = *lx_R; /* length of x */ int Mtilde = *Mtilde_R; int M = *M_R; double c_ARL = *c_ARL_R; /* Loop variables */ register int n, k,i; /* Init return values up to the first position */ int n0 = fmax(Mtilde-1,0); /*hoehle: 25.9: changepoint can happen at position one: fmax(Mtilde-1,1);*/ for (n=0; n-18) & (fabs(exp(phi_new) - exp(phi_old)) > 1e-6) & (iter maxGLR) { maxGLR = lnk;} } /*Debug */ /*printf("For n=%d the best GLR value is %f\n",n,maxGLR); */ /*Save the return value */ ret_glr[n] = maxGLR; /*Find the first time that the GLR increases c_ARL there we stop */ if ((maxGLR > c_ARL) && !stop) { N = n; stop = 1; break; } /*Advance counter */ n++; } /*Set the remaining values to zero */ for (i=n+1;i begin lr_cusum_nb\n"); #endif /* Pointers to something useful */ int lx = *lx_R; double c_ARL = *c_ARL_R; double kappa = *kappa_R; double alpha = *alpha_R; int ret = *ret_R; #ifdef DEBUG printf("lx = %d\n",lx); printf("alpha = %f\n",alpha); #endif /* Loop variables */ register int n=0; int stop = 0; int N = lx; /* Loop over all 0 <= n <= length(x) */ while ((n < lx)) { /*Compute for one n*/ #ifdef DEBUG printf("n=%d\n",n); #endif /* LR for one NB variable as given in the first equation of Sect 2.1 in the Hoehle and Paul (2008) paper */ double zn = kappa * x[n] + (x[n]+1/alpha)*log( (1+alpha*mu0[n])/(1+alpha*mu0[n]*exp(kappa)) ); /* Recursive CUSUM as given in (4) by Hoehle and Paul (2008) */ if (n==0) { /* Statistic */ ret_lr[n] = fmax(0,zn); /* Number of cases it takes to sound an alarm - backcalc'ed by backcalc.mws*/ if (ret==2) ret_cases[n] = -(log((1+alpha*mu0[n])/(1+alpha*mu0[n]*exp(kappa)))-c_ARL*alpha)/alpha/(kappa+log((1+alpha*mu0[n])/(1+alpha*mu0[n]*exp(kappa)))); } else { /* Statistic */ ret_lr[n] = fmax(0,ret_lr[n-1] + zn); /* Number of cases it takes to sound an alarm -- backcalc.mws*/ if (ret==2) ret_cases[n] = -(ret_lr[n-1]*alpha+log((1+alpha*mu0[n])/(1+alpha*mu0[n]*exp(kappa)))-c_ARL*alpha)/alpha/(kappa+log((1+alpha*mu0[n])/(1+alpha*mu0[n]*exp(kappa)))); } /* Find the first time that the GLR increases c_ARL there we stop */ if ((ret_lr[n] > c_ARL) && !stop) { N = n; stop = 1; break; } /* Advance counter */ n++; } /* Return value (add 1 for R/SPlus array compability */ *ret_N = N+1; } /* ====================================================================== Functions for the intercept chart ====================================================================== */ /* Score function for intercept chart*/ static R_INLINE double nbScore(double kappa, int *x, double *mu0, double alpha, int k, int n) { register int i; double sum = 0; /*printf("[1] ");*/ for (i=k; i<=n; i++) { sum += (x[i]-exp(kappa)*mu0[i])/(1+alpha*exp(kappa)*mu0[i]); } /*printf("\n");*/ return(sum); } /*fisher information for intercept chart -- its minus the hesse */ static R_INLINE double nbFisher(double kappa,int *x, double *mu0, double alpha, int k,int n) { register int i; double sum = 0; for (i=k; i<=n; i++) { sum += mu0[i]*(alpha*x[i]+1)/sqr(1+alpha*exp(kappa)*mu0[i]); } return( exp(kappa)*sum); } /* Formula to compute a single l_{n,k} for the intercept chart */ static R_INLINE double nblnk(double kappa,int *x, double *mu0, double alpha, int k,int n) { register int i; double lnk = 0; for (i=k;i<=n;i++) { lnk += kappa * x[i] + (x[i] + 1/alpha) * log( (1+alpha*mu0[i])/(1+alpha*mu0[i]*exp(kappa))); } return(lnk); } /********************************************************************** GLR detector for the negative binomial model described in Hoehle and Paul (2008). Parameters: x -- the data (as array) mu0 -- base means under H0 alpha -- fixed dispersion parameter of the NegBin distribution (see Lawless (1987)) lx -- length of x Mtilde_R -- number of obs needed to get good estimate (typically 1) M -- Mtilde < M c_ARL_R -- constant determining when to signal alarm ret_N -- the return value ret_lr --- GLR value for each n to be returned **********************************************************************/ void glr_nb_window(int* x,double* mu0, double* alpha_R, int *lx_R, int *Mtilde_R, int *M_R, double *c_ARL_R,int *ret_N, double *ret_glr, int *dir_R) { #ifdef DEBUG printf("====> begin glr_nb_window\n"); #endif /* Pointers to something useful */ int lx = *lx_R; /* length of x */ int Mtilde = *Mtilde_R; int M = *M_R; double c_ARL = *c_ARL_R; double alpha = *alpha_R; int dir = *dir_R; /* Loop variables */ register int n, k,i; /*changepoint can happen at position one (ie. index zero in C*/ int n0 = fmax(Mtilde-1,0); #ifdef DEBUG printf("Length of the data = %d\n",lx); printf("starting at n0= %d\n",n0); #endif /* Show the data */ /*for (n=0; n-18) & (fabs(kappa_new - kappa_old) > 1e-6) & (iter maxGLR) { maxGLR = lnk;} } /*Debug */ #ifdef DEBUG printf("For n=%d the highest GLR value is %f\n",n,maxGLR); #endif /*Save the return value */ ret_glr[n] = maxGLR; /*Find the first time that the GLR increases c_ARL there we stop */ /*hoehle: now >= */ if ((maxGLR >= c_ARL) && !stop) { N = n; stop = 1; break; } /*Advance counter */ n++; } /*Set the remaining values to zero */ for (i=n+1;i begin glr_nbgeneral_window \n"); #endif /* Pointers to something useful */ int lx = *lx_R; /* length of x */ int Mtilde = *Mtilde_R; int M = *M_R; double c_ARL = *c_ARL_R; double alpha = *alpha_R; /* int dir = *dir_R; -- currently direction is not supported?? */ /* Loop variables */ register int n, k,i; /*changepoint can happen at position one (ie. index zero in C*/ int n0 = fmax(Mtilde-1,0); /* Compute x_{t-1} */ double xm1[lx]; xm1[0] = *xm10; /* used to be 0 */ for (i=1; i-18) & (fabs(theta_new - theta_old) > 1e-6) & (iter maxGLR) { maxGLR = lnk;} } /*Debug */ #ifdef DEBUG printf("For n=%d the highest GLR value is %f\n",n,maxGLR); #endif /*Save the return value */ ret_glr[n] = maxGLR; /*Find the first time that the GLR increases c_ARL there we stop */ /*hoehle: now >= */ if ((maxGLR >= c_ARL) && !stop) { N = n; stop = 1; break; } /*Advance counter */ n++; } /*Set the remaining values to zero */ for (i=n+1;i tools::package_native_routine_registration_skeleton("..", character_only = FALSE) // for surveillance 1.19.1 // + adding R_forceSymbols(dll, TRUE); // + adding function declarations via cproto -I/usr/share/R/include -e *.c *******************************************************************************/ #include #include #include // for NULL #include /* .C calls */ extern void glr_cusum(int *x, double *mu0, int *lx_R, int *n0_R, double *c_ARL_R, int *ret_N, double *ret_glr, double *ret_cases, int *dir_R, int *ret_R); extern void glr_cusum_window(int *x, double *mu0, int *lx_R, int *M_R, int *Mtilde_R, double *c_ARL_R, int *ret_N, double *ret_glr, double *ret_cases, int *dir_R, int *ret_R); extern void glr_epi_window(int *x, double *mu0, int *lx_R, int *Mtilde_R, int *M_R, double *xm10, double *c_ARL_R, int *ret_N, double *ret_glr); extern void glr_nbgeneral_window(int *x, double *mu0, double *alpha_R, int *lx_R, int *Mtilde_R, int *M_R, double *xm10, double *c_ARL_R, int *ret_N, double *ret_glr, int *dir_R); extern void glr_nb_window(int *x, double *mu0, double *alpha_R, int *lx_R, int *Mtilde_R, int *M_R, double *c_ARL_R, int *ret_N, double *ret_glr, int *dir_R); extern void lr_cusum(int *x, double *mu0, int *lx_R, double *kappa_R, double *c_ARL_R, int *ret_N, double *ret_lr, double *ret_cases, int *ret_R); extern void lr_cusum_nb(int *x, double *mu0, double *alpha_R, int *lx_R, double *kappa_R, double *c_ARL_R, int *ret_N, double *ret_lr, double *ret_cases, int *ret_R); extern void pkolmogorov2x(double *x, Sint *n); extern void pkstwo(Sint *n, double *x, double *tol); extern void siaf_polyCub1_iso(double *x, double *y, int *L, int *intrfr_code, double *pars, int *subdivisions, double *epsabs, double *epsrel, int *stop_on_error, double *value, double *abserr, int *neval); extern void SRspacetime(double *x, double *y, double *t, int *n, double *radius, double *epsilon, double *areaA, double *areaAcapBk, int *cusum, double *threshold, double *Rarray, int *idxFirstAlarm, int *idxClusterCenter); extern void twins(int *x_ptr, int *n_ptr, int *I_ptr, char **logFile_ptr, char **logFile2_ptr, int *burnin_ptr, int *filter_ptr, int *sampleSize_ptr, double *alpha_xi_ptr, double *beta_xi_ptr, int *T_ptr, int *nfreq_ptr, double *psiRWSigma_ptr, double *alpha_psi_ptr, double *beta_psi_ptr, int *nu_trend_ptr); /* .Call calls */ extern SEXP determineSources(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP eq3a(SEXP, SEXP, SEXP); static const R_CMethodDef CEntries[] = { {"glr_cusum", (DL_FUNC) &glr_cusum, 10}, {"glr_cusum_window", (DL_FUNC) &glr_cusum_window, 11}, {"glr_epi_window", (DL_FUNC) &glr_epi_window, 9}, {"glr_nbgeneral_window", (DL_FUNC) &glr_nbgeneral_window, 11}, {"glr_nb_window", (DL_FUNC) &glr_nb_window, 10}, {"lr_cusum", (DL_FUNC) &lr_cusum, 9}, {"lr_cusum_nb", (DL_FUNC) &lr_cusum_nb, 10}, {"pkolmogorov2x", (DL_FUNC) &pkolmogorov2x, 2}, {"pkstwo", (DL_FUNC) &pkstwo, 3}, {"siaf_polyCub1_iso", (DL_FUNC) &siaf_polyCub1_iso, 12}, {"SRspacetime", (DL_FUNC) &SRspacetime, 13}, {"twins", (DL_FUNC) &twins, 16}, {NULL, NULL, 0} }; static const R_CallMethodDef CallEntries[] = { {"determineSources", (DL_FUNC) &determineSources, 6}, {"eq3a", (DL_FUNC) &eq3a, 3}, {NULL, NULL, 0} }; void R_init_surveillance(DllInfo *dll) { R_registerRoutines(dll, CEntries, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); R_forceSymbols(dll, TRUE); } surveillance/src/backproj.cc0000644000176200001440000000271312625315364015651 0ustar liggesusers#include using namespace Rcpp; RcppExport SEXP eq3a(SEXP rlambdaOld, SEXP ry, SEXP rincuPmf) { BEGIN_RCPP // get arguments NumericVector lambdaOld(rlambdaOld); int T = lambdaOld.length(); NumericVector y(ry); NumericVector incuPmf(rincuPmf); // Create long enough vectors for queries about dincu and pincu NumericVector dincu(T); NumericVector pincu(T); pincu[0] = dincu[0]; for (int i=1; i * Date: Aug 2008 * * * Header file containing wrappers for GSL related calls * to R calls using the R API. This code is used in twins.cc *******************************************************************/ /* new definitions to replace GSL code */ // Remove the dead RNG variable (DSB 04/05/2010): // int r; double gsl_rng_uniform () { // GetRNGstate(); double res = runif(0,1); //PutRNGstate(); return(res); } double gsl_ran_gaussian(double sigma) { //GetRNGstate(); double res = rnorm(0.0,sigma); //PutRNGstate(); return(res); } double gsl_ran_gamma(double a, double b) { //GetRNGstate(); double res = rgamma(a,b); //PutRNGstate(); return(res); } unsigned int gsl_ran_poisson(double lambda) { //GetRNGstate(); unsigned int res = rpois(lambda); //PutRNGstate(); return(res); } unsigned int gsl_ran_binomial(double p, unsigned int n) { //GetRNGstate(); unsigned int res = rbinom(n,p); //PutRNGstate(); return(res); } //hoehle: The original function assumes mu>0, which needs not be the case! //This version handles that part. This is the log version. double gsl_ran_poisson_log_pdf (const unsigned int k, const double mu) { double p; if (mu==0) { return(log((double)(k == 0))); } else { double lf = lgammafn(k+1); /*gsl2R: gsl_sf_lnfact(k) */ p = k*log(mu) - lf - mu; return p; } } double gsl_sf_lngamma(double x) { return(lgammafn(x)); } double gsl_ran_beta_pdf (double x, double a, double b) { return(dbeta(x,a,b,0)); } /********************************************************************** * Log version of the Gamma pdf with mean a*b and variance a*b^2. * **********************************************************************/ double gsl_ran_gamma_log_pdf (const double x, const double a, const double b) { if (x < 0) { //This is problematic! return log((double)0) ; } else if (x == 0) { if (a == 1) return log(1/b) ; else return log((double)0) ; } else if (a == 1) { return -x/b - log(b) ; } else { double p; /*gsl2R: double lngamma = gsl_sf_lngamma (a);*/ double lngamma = lgammafn(a); p = (a-1)*log(x) - x/b - lngamma - a*log(b); return p; } } /* Seed random number generator */ //void gsl_rng_set(int r, long seed) { // set.seed(seed); //} surveillance/src/stcd-assuncaocorrea.h0000644000176200001440000000261011746064472017663 0ustar liggesusers/** * File based on algoritmos.cpp and sv.cpp from the TerraView plugin. * C++ source originally created by Marcos Oliveira Prates from the * Department of Statistics, UFMG, Brazil on 06 April 2006 * * R interface by Michael Höhle initiated on 12 Jan 2009 * Note: Some function names and documentation are in Portugese */ #ifndef SRSPACETIME_H #define SRSPACETIME_H #include #include struct SVEvent { double x, y, t; friend bool operator<(const SVEvent &a, const SVEvent &b) { return (a.t < b.t); } }; //STL is used (check its use) typedef std::list SVEventLst; //Functions provided in sr-spacetime.cc int CalculaNCj(short **MSpace, const int EvtN, const int EvtJ); int ContaEvt(short **MSpace, const int EvtN, const int EvtJ); //int SistemadeVigilancia(SVEventLst &, const double RaioC, const double epslon, // std::valarray &R); //New version with different estimation approach int SistemadeVigilancia(SVEventLst &ev, const double RaioC, const double epslon, const double areaA, double *areaAcapBk, const int cusum, std::valarray &R); int CalculaLambda(SVEventLst &ev, const double RaioC, const double epslon, std::valarray &R, unsigned int &numObs); // Hoehle wrapper function to create SVEvent list //void SRspacetime(double *x, double *y, double *t, int *n, double *radius, double *epsilon, double *Rarray); #endif surveillance/src/ks.c0000644000176200001440000001265014013524252014320 0ustar liggesusers/* * 16-Aug 2012 / (C) Michael Hoehle * This file is a modified version of the code ks.c available * at http://svn.r-project.org/R/trunk/src/library/stats/src/ks.c (r60102) * The file is copyright 1999-2009 by The R Core Team under GPL-2 * (or later) as shown below. As stated in the GPL-2 license * the present file is again available under GPL-2. * * License: * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, a copy is available at * http://www.r-project.org/Licenses/ */ /* ks.c Compute the asymptotic distribution of the one- and two-sample two-sided Kolmogorov-Smirnov statistics, and the exact distributions in the two-sided one-sample and two-sample cases. */ #include #include /* constants */ /*#include "ctest.h"*/ static double K(int n, double d); static void m_multiply(double *A, double *B, double *C, int m); static void m_power(double *A, int eA, double *V, int *eV, int m, int n); /* Two-sample two-sided asymptotic distribution */ void pkstwo(Sint *n, double *x, double *tol) { /* x[1:n] is input and output * * Compute * \sum_{k=-\infty}^\infty (-1)^k e^{-2 k^2 x^2} * = 1 + 2 \sum_{k=1}^\infty (-1)^k e^{-2 k^2 x^2} * = \frac{\sqrt{2\pi}}{x} \sum_{k=1}^\infty \exp(-(2k-1)^2\pi^2/(8x^2)) * * See e.g. J. Durbin (1973), Distribution Theory for Tests Based on the * Sample Distribution Function. SIAM. * * The 'standard' series expansion obviously cannot be used close to 0; * we use the alternative series for x < 1, and a rather crude estimate * of the series remainder term in this case, in particular using that * ue^(-lu^2) \le e^(-lu^2 + u) \le e^(-(l-1)u^2 - u^2+u) \le e^(-(l-1)) * provided that u and l are >= 1. * * (But note that for reasonable tolerances, one could simply take 0 as * the value for x < 0.2, and use the standard expansion otherwise.) * */ double new, old, s, w, z; Sint i, k, k_max; k_max = (Sint) sqrt(2 - log(*tol)); for(i = 0; i < *n; i++) { if(x[i] < 1) { z = - (M_PI_2 * M_PI_4) / (x[i] * x[i]); w = log(x[i]); s = 0; for(k = 1; k < k_max; k += 2) { s += exp(k * k * z - w); } x[i] = s / M_1_SQRT_2PI; } else { z = -2 * x[i] * x[i]; s = -1; k = 1; old = 0; new = 1; while(fabs(old - new) > *tol) { old = new; new += 2 * s * exp(z * k * k); s *= -1; k++; } x[i] = new; } } } /* The two-sided one-sample 'exact' distribution */ void pkolmogorov2x(double *x, Sint *n) { /* x is input and output. */ *x = K(*n, *x); } static double K(int n, double d) { /* Compute Kolmogorov's distribution. Code published in George Marsaglia and Wai Wan Tsang and Jingbo Wang (2003), "Evaluating Kolmogorov's distribution". Journal of Statistical Software, Volume 8, 2003, Issue 18. URL: http://www.jstatsoft.org/v08/i18/. */ int k, m, i, j, g, eH, eQ; double h, s, *H, *Q; /* The faster right-tail approximation is omitted here. s = d*d*n; if(s > 7.24 || (s > 3.76 && n > 99)) return 1-2*exp(-(2.000071+.331/sqrt(n)+1.409/n)*s); */ k = (int) (n * d) + 1; m = 2 * k - 1; h = k - n * d; H = (double*) Calloc(m * m, double); Q = (double*) Calloc(m * m, double); for(i = 0; i < m; i++) for(j = 0; j < m; j++) if(i - j + 1 < 0) H[i * m + j] = 0; else H[i * m + j] = 1; for(i = 0; i < m; i++) { H[i * m] -= pow(h, i + 1); H[(m - 1) * m + i] -= pow(h, (m - i)); } H[(m - 1) * m] += ((2 * h - 1 > 0) ? pow(2 * h - 1, m) : 0); for(i = 0; i < m; i++) for(j=0; j < m; j++) if(i - j + 1 > 0) for(g = 1; g <= i - j + 1; g++) H[i * m + j] /= g; eH = 0; m_power(H, eH, Q, &eQ, m, n); s = Q[(k - 1) * m + k - 1]; for(i = 1; i <= n; i++) { s = s * i / n; if(s < 1e-140) { s *= 1e140; eQ -= 140; } } s *= pow(10., eQ); Free(H); Free(Q); return(s); } static void m_multiply(double *A, double *B, double *C, int m) { /* Auxiliary routine used by K(). Matrix multiplication. */ int i, j, k; double s; for(i = 0; i < m; i++) for(j = 0; j < m; j++) { s = 0.; for(k = 0; k < m; k++) s+= A[i * m + k] * B[k * m + j]; C[i * m + j] = s; } } static void m_power(double *A, int eA, double *V, int *eV, int m, int n) { /* Auxiliary routine used by K(). Matrix power. */ double *B; int eB , i; if(n == 1) { for(i = 0; i < m * m; i++) V[i] = A[i]; *eV = eA; return; } m_power(A, eA, V, eV, m, n / 2); B = (double*) Calloc(m * m, double); m_multiply(V, V, B, m); eB = 2 * (*eV); if((n % 2) == 0) { for(i = 0; i < m * m; i++) V[i] = B[i]; *eV = eB; } else { m_multiply(A, B, V, m); *eV = eA + eB; } if(V[(m / 2) * m + (m / 2)] > 1e140) { for(i = 0; i < m * m; i++) V[i] = V[i] * 1e-140; *eV += 140; } Free(B); } surveillance/vignettes/0000755000176200001440000000000014030612531014751 5ustar liggesuserssurveillance/vignettes/surveillance.Rnw0000644000176200001440000005527414004512307020153 0ustar liggesusers%\VignetteIndexEntry{Getting started with outbreak detection} \documentclass[a4paper,11pt]{article} \usepackage[T1]{fontenc} \usepackage{graphicx} \usepackage{natbib} \bibliographystyle{apalike} \usepackage{lmodern} \usepackage{amsmath} \usepackage{amsfonts,amssymb} \newcommand{\pkg}[1]{{\bfseries #1}} \newcommand{\surveillance}{\pkg{surveillance}} \usepackage{hyperref} \hypersetup{ pdfauthor = {Michael H\"ohle and Andrea Riebler and Michaela Paul}, pdftitle = {Getting started with outbreak detection}, pdfsubject = {R package 'surveillance'} } \title{Getting started with outbreak detection} \author{ Michael H{\"o}hle\thanks{Author of correspondance: Department of Statistics, University of Munich, Ludwigstr.\ 33, 80539 M{\"u}nchen, Germany, Email: \texttt{hoehle@stat.uni-muenchen.de}} , Andrea Riebler and Michaela Paul\\ Department of Statistics\\ University of Munich\\ Germany } \date{17 November 2007} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Sweave %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{Sweave} %Put all in another directory \SweaveOpts{prefix.string=plots/surveillance, width=9, height=4.5} \setkeys{Gin}{width=1\textwidth} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Initial R code %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% <>= library("surveillance") options(SweaveHooks=list(fig=function() par(mar=c(4,4,2,0)+.5))) options(width=70) ## create directory for plots dir.create("plots", showWarnings=FALSE) ###################################################################### #Do we need to compute or can we just fetch results ###################################################################### CACHEFILE <- "surveillance-cache.RData" compute <- !file.exists(CACHEFILE) message("Doing computations: ", compute) if(!compute) load(CACHEFILE) @ \begin{document} \fbox{\vbox{\small \noindent\textbf{Disclaimer}: This vignette reflects package state at version 0.9-7 and is hence somewhat outdated. New functionality has been added to the package: this includes various endemic-epidemic modelling frameworks for surveillance data (\texttt{hhh4}, \texttt{twinSIR}, and \texttt{twinstim}), as well as more outbreak detection methods (\texttt{glrnb}, \texttt{boda}, and \texttt{farringtonFlexible}). These new features are described in detail in \citet{meyer.etal2014} and \citet{salmon.etal2014}, respectively. %and corresponding vignettes are included in the package; %see \texttt{vignette(package = "surveillance")} for an overview. Note in particular that use of the new \texttt{S4} class \texttt{sts} instead of \texttt{disProg} is encouraged to encapsulate time series data. }} {\let\newpage\relax\maketitle} \begin{abstract} \noindent This document gives an introduction to the \textsf{R} package \surveillance\ containing tools for outbreak detection in routinely collected surveillance data. The package contains an implementation of the procedures described by~\citet{stroup89}, \citet{farrington96} and the system used at the Robert Koch Institute, Germany. For evaluation purposes, the package contains example data sets and functionality to generate surveillance data by simulation. To compare the algorithms, benchmark numbers like sensitivity, specificity, and detection delay can be computed for a set of time series. Being an open-source package it should be easy to integrate new algorithms; as an example of this process, a simple Bayesian surveillance algorithm is described, implemented and evaluated.\\ \noindent{\bf Keywords:} infectious disease, monitoring, aberrations, outbreak, time series of counts. \end{abstract} \newpage \section{Introduction}\label{sec:intro} Public health authorities have in an attempt to meet the threats of infectious diseases to society created comprehensive mechanisms for the collection of disease data. As a consequence, the abundance of data has demanded the development of automated algorithms for the detection of abnormalities. Typically, such an algorithm monitors a univariate time series of counts using a combination of heuristic methods and statistical modelling. Prominent examples of surveillance algorithms are the work by~\citet{stroup89} and~\citet{farrington96}. A comprehensive survey of outbreak detection methods can be found in~\citep{farrington2003}. The R-package \texttt{surveillance} was written with the aim of providing a test-bench for surveillance algorithms. From the Comprehensive R Archive Network (CRAN) the package can be downloaded together with its source code. It allows users to test new algorithms and compare their results with those of standard surveillance methods. A few real world outbreak datasets are included together with mechanisms for simulating surveillance data. With the package at hand, comparisons like the one described by~\citet{hutwagner2005} should be easy to conduct. The purpose of this document is to illustrate the basic functionality of the package with R-code examples. Section~\ref{sec:data} contains a description of the data format used to store surveillance data, mentions the built-in datasets and illustrates how to create new datasets by simulation. Section~\ref{sec:algo} contains a short description of how to use the surveillance algorithms and illustrate the results. Further information on the individual functions can be found on the corresponding help pages of the package. \section{Surveillance Data}\label{sec:data} Denote by $\{y_t\>;t=1,\ldots,n\}$ the time series of counts representing the surveillance data. Because such data typically are collected on a weekly basis, we shall also use the alternative notation $\{y_{i:j}\}$ with $j=\{1,\ldots,52\}$ being the week number in year $i=\{-b,\ldots,-1,0\}$. That way the years are indexed such that most current year has index zero. For evaluation of the outbreak detection algorithms it is also possible for each week to store -- if known -- whether there was an outbreak that week. The resulting multivariate series $\{(y_t,x_t)\>; t=1,\ldots,n\}$ is in \texttt{surveillance} given by an object of class \texttt{disProg} (disease progress), which is basically a \texttt{list} containing two vectors: the observed number of counts and a boolean vector \texttt{state} indicating whether there was an outbreak that week. A number of time series are contained in the package (see \texttt{data(package="surveillance")}), mainly originating from the SurvStat@RKI database at \url{https://survstat.rki.de/} maintained by the Robert Koch Institute, Germany~\citep{survstat}. For example the object \texttt{k1} describes cryptosporidiosis surveillance data for the German federal state Baden-W\"{u}rttemberg 2001-2005. The peak in 2001 is due to an outbreak of cryptosporidiosis among a group of army soldiers in a boot camp~\citep{bulletin3901}. <>= data(k1) plot(k1, main = "Cryptosporidiosis in BW 2001-2005") @ For evaluation purposes it is also of interest to generate surveillance data using simulation. The package contains functionality to generate surveillance data containing point-source like outbreaks, for example with a Salmonella serovar. The model is a Hidden Markov Model (HMM) where a binary state $X_t, t=1,\ldots,n$, denotes whether there was an outbreak and $Y_t$ is the number of observed counts, see Figure~\ref{fig:hmm}. \begin{figure}[htb] \centering \includegraphics[width=.75\textwidth]{surveillance-hmm} \caption{The Hidden Markov Model} \label{fig:hmm} \end{figure} The state $X_t$ is a homogenous Markov chain with transition matrix \begin{center} \begin{tabular}{c|cc} $X_t\backslash X_{t+1}$ & 0 & 1\\ \hline $0$ & $p$ & $1 - p$ \\ $1$ & $1 - r$ & $r$ \end{tabular} \end{center} Hence $1-p$ is the probability to switch to an outbreak state and $1-r$ is the probability that $X_t=1$ is followed by $X_{t+1}=1$. Furthermore, the observation $Y_t$ is Poisson-distributed with log-link mean depending on a seasonal effect and time trend, i.e.\ \[ \log \mu_t = A \cdot \sin \, (\omega \cdot (t + \varphi)) + \alpha + \beta t. \] In case of an outbreak $(X_t=1)$ the mean increases with a value of $K$, altogether \begin{equation}\label{eq:hmm} Y_t \sim \operatorname{Po}(\mu_t + K \cdot X_t). \end{equation} The model in (\ref{eq:hmm}) corresponds to a single-source, common-vehicle outbreak, where the length of an outbreak is controlled by the transition probability $r$. The daily numbers of outbreak-cases are simply independently Poisson distributed with mean $K$. A physiologically better motivated alternative could be to operate with a stochastic incubation time (e.g.\ log-normal or gamma distributed) for each individual exposed to the source, which results in a temporal diffusion of the peak. The advantage of (\ref{eq:hmm}) is that estimation can be done by a generalized linear model (GLM) using $X_t$ as covariate and that it allows for an easy definition of a correctly identified outbreak: each $X_t=1$ has to be identified. More advanced setups would require more involved definitions of an outbreak, e.g.\ as a connected series of time instances, where the number of outbreak cases is greater than zero. Care is then required in defining what a correctly identified outbreak for time-wise overlapping outbreaks means. In \surveillance\ the function \verb+sim.pointSource+ is used to simulate such a point-source epidemic; the result is an object of class \verb+disProg+. \label{ex:sts} <<>>= set.seed(1234) sts <- sim.pointSource(p = 0.99, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) @ <>= plot(sts) @ \section{Surveillance Algorithms}\label{sec:algo} Surveillance data often exhibit strong seasonality, therefore most surveillance algorithms only use a set of so called \emph{reference values} as basis for drawing conclusions. Let $y_{0:t}$ be the number of cases of the current week (denoted week $t$ in year $0$), $b$ the number of years to go back in time and $w$ the number of weeks around $t$ to include from those previous years. For the year zero we use $w_0$ as the number of previous weeks to include -- typically $w_0=w$. Altogether the set of reference values is thus defined to be \[ R(w,w_0,b) = \left(\bigcup\limits_{i=1}^b\bigcup\limits_{j=\,-w}^w y_{-i:t+j}\right) \cup \left(\bigcup_{k=-w_0}^{-1} y_{0:t+k}\right) \] Note that the number of cases of the current week is not part of $R(w,w_0,b)$. A surveillance algorithm is a procedure using the reference values to create a prediction $\hat{y}_{0:t}$ for the current week. This prediction is then compared with the observed $y_{0:t}$: if the observed number of cases is much higher than the predicted number, the current week is flagged for further investigations. In order to do surveillance for time $0:t$ an important concern is the choice of $b$ and $w$. Values as far back as time $-b:t-w$ contribute to $R(w,w_0,b)$ and thus have to exist in the observed time series. Currently, we have implemented four different type of algorithms in \surveillance. The Centers for Disease Control and Prevention (CDC) method~\citep{stroup89}, the Communicable Disease Surveillance Centre (CDSC) method~\citep{farrington96}, the method used at the Robert Koch Institute (RKI), Germany~\citep{altmann2003}, and a Bayesian approach documented in~\citet{riebler2004}. A detailed description of each method is beyond the scope of this note, but to give an idea of the framework the Bayesian approach developed in~\citet{riebler2004} is presented: Within a Bayesian framework, quantiles of the predictive posterior distribution are used as a measure for defining alarm thresholds. The model assumes that the reference values are identically and independently Poisson distributed with parameter $\lambda$ and a Gamma-distribution is used as Prior distribution for $\lambda$. The reference values are defined to be $R_{\text{Bayes}}= R(w,w_0,b) = \{y_1, \ldots, y_{n}\}$ and $y_{0:t}$ is the value we are trying to predict. Thus, $\lambda \sim \text{Ga}(\alpha, \beta)$ and $y_i|\lambda \sim \text{Po}(\lambda)$, $i = 1,\ldots,{n}$. Standard derivations show that the posterior distribution is \begin{equation*} \lambda|y_1, \ldots, y_{n} \sim \text{Ga}(\alpha + \sum_{i=1}^{n} y_i, \beta + n). \end{equation*} Computing the predictive distribution \begin{equation*} f(y_{0:t}|y_1,\ldots,y_{n}) = \int\limits^\infty_0{f(y_{0:t}|\lambda)\, f(\lambda|y_1,\ldots,y_{n})}\, d\lambda \end{equation*} we get the Poisson-Gamma-distribution \begin{equation*} y_{0:t}|y_1,\ldots,y_{n} \sim \text{PoGa}(\alpha + \sum_{i=1}^{n} y_i, \beta + n), \end{equation*} which is a generalization of the negative Binomial distribution, i.e.\ \[ y_{0:t}|y_1,\ldots,y_{n} \sim \text{NegBin}(\alpha + \sum_{i=1}^{n} y_i, \tfrac{\beta + n}{\beta + n + 1}). \] Using the Jeffrey's Prior $\text{Ga}(\tfrac{1}{2}, 0)$ as non-informative Prior distribution for $\lambda$ the parameters of the negative Binomial distribution are \begin{align*} \alpha + \sum_{i=1}^{n} y_i &= \frac{1}{2} + \sum_{y_{i:j} \in R_{\text{Bayes}}}\!\! y_{i:j} \quad % \intertext{and} \quad\text{and}\quad \frac{\beta + n}{\beta + n + 1} = \frac{|R_{\text{Bayes}}|}{|R_{\text{Bayes}}| + 1}. \end{align*} Using a quantile-parameter $\alpha$, the smallest value $y_\alpha$ is computed, so that \begin{equation*} P(y \leq y_\alpha) \geq 1-\alpha. \end{equation*} Now \begin{equation*} A_{0:t} = I(y_{0:t} \geq y_\alpha), \end{equation*} i.e. if $y_{0:t}\geq y_\alpha$ the current week is flagged as an alarm. As an example, the \verb+Bayes1+ method uses the last six weeks as reference values, i.e.\ $R(w,w_0,b)=(6,6,0)$, and is applied to the \texttt{k1} dataset with $\alpha=0.01$ as follows. <>= k1.b660 <- algo.bayes(k1, control = list(range = 27:192, b = 0, w = 6, alpha = 0.01)) plot(k1.b660, disease = "k1", firstweek = 1, startyear = 2001) @ Several extensions of this simple Bayesian approach are imaginable, for example the inane over-dispersion of the data could be modeled by using a negative-binomial distribution, time trends and mechanisms to correct for past outbreaks could be integrated, but all at the cost of non-standard inference for the predictive distribution. Here simulation based methods like Markov Chain Monte Carlo or heuristic approximations have to be used to obtain the required alarm thresholds. In general, the \verb+surveillance+ package makes it easy to add additional algorithms -- also those not based on reference values -- by using the existing implementations as starting point. The following call uses the CDC and Farrington procedure on the simulated time series \verb+sts+ from page~\pageref{ex:sts}. Note that the CDC procedure operates with four-week aggregated data -- to better compare the upper bound value, the aggregated number of counts for each week are shown as circles in the plot. <>= cntrl <- list(range=300:400,m=1,w=3,b=5,alpha=0.01) sts.cdc <- algo.cdc(sts, control = cntrl) sts.farrington <- algo.farrington(sts, control = cntrl) @ <>= if (compute) { <> } @ <>= par(mfcol=c(1,2)) plot(sts.cdc, legend.opts=NULL) plot(sts.farrington, legend.opts=NULL) @ Typically, one is interested in evaluating the performance of the various surveillance algorithms. An easy way is to look at the sensitivity and specificity of the procedure -- a correct identification of an outbreak is defined as follows: if the algorithm raises an alarm for time $t$, i.e.\ $A_t=1$ and $X_t=1$ we have a correct classification, if $A_t=1$ and $X_t=0$ we have a false-positive, etc. In case of more involved outbreak models, where an outbreak lasts for more than one week, a correct identification could be if at least one of the outbreak weeks is correctly identified, see e.g.\ \citet{hutwagner2005}. To compute various performance scores the function \verb+algo.quality+ can be used on a \verb+survRes+ object. <<>>= print(algo.quality(k1.b660)) @ This computes the number of false positives, true negatives, false negatives, the sensitivity and the specificity. Furthermore, \texttt{dist} is defined as \[ \sqrt{(Spec-1)^2 + (Sens - 1)^2}, \] that is the distance to the optimal point $(1,1)$, which serves as a heuristic way of combining sensitivity and specificity into a single score. Of course, weighted versions are also imaginable. Finally, \texttt{lag} is the average number of weeks between the first of a consecutive number of $X_t=1$'s (i.e.\ an outbreak) and the first alarm raised by the algorithm. To compare the results of several algorithms on a single time series we declare a list of control objects -- each containing the name and settings of the algorithm we want to apply to the data. <>= control <- list( list(funcName = "rki1"), list(funcName = "rki2"), list(funcName = "rki3"), list(funcName = "bayes1"), list(funcName = "bayes2"), list(funcName = "bayes3"), list(funcName = "cdc", alpha=0.05), list(funcName = "farrington", alpha=0.05) ) control <- lapply(control, function(ctrl) { ctrl$range <- 300:400; return(ctrl) }) @ % In the above, \texttt{rki1}, \texttt{rki2} and \texttt{rki3} are three methods with reference values $R_\text{rki1}(6,6,0)$, $R_\text{rki2}(6,6,1)$ and $R_\text{rki3}(4,0,2)$, all called with $\alpha=0.05$. The \texttt{bayes*} methods use the Bayesian algorithm with the same setup of reference values. The CDC method is special since it operates on aggregated four-week blocks. To make everything comparable, a common $\alpha=0.05$ level is used for all algorithms. All algorithms in \texttt{control} are applied to \texttt{sts} using: <>= algo.compare(algo.call(sts, control = control)) @ <>= if (compute) { acall <- algo.call(sts, control = control) } print(algo.compare(acall), digits = 3) @ A test on a set of time series can be done as follows. Firstly, a list containing 10 simulated time series is created. Secondly, all the algorithms specified in the \texttt{control} object are applied to each series. Finally the results for the 10 series are combined in one result matrix. <>= #Create 10 series ten <- lapply(1:10,function(x) { sim.pointSource(p = 0.975, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7)}) @ <>= #Do surveillance on all 10, get results as list ten.surv <- lapply(ten,function(ts) { algo.compare(algo.call(ts,control=control)) }) @ <>= if (compute) { <> } @ <>= #Average results algo.summary(ten.surv) @ <>= print(algo.summary(ten.surv), digits = 3) @ A similar procedure can be applied when evaluating the 14 surveillance series drawn from SurvStat@RKI~\citep{survstat}. A problem is however, that the series after conversion to 52 weeks/year are of length 209 weeks. This is insufficient to apply e.g.\ the CDC algorithm. To conduct the comparison on as large a dataset as possible the following trick is used: The function \texttt{enlargeData} replicates the requested \texttt{range} and inserts it before the original data, after which the evaluation can be done on all 209 values. <>= #Update range in each - cyclic continuation range = (2*4*52) + 1:length(k1$observed) control <- lapply(control,function(cntrl) { cntrl$range=range;return(cntrl)}) #Auxiliary function to enlarge data enlargeData <- function(disProgObj, range = 1:156, times = 1){ disProgObj$observed <- c(rep(disProgObj$observed[range], times), disProgObj$observed) disProgObj$state <- c(rep(disProgObj$state[range], times), disProgObj$state) return(disProgObj) } #Outbreaks outbrks <- c("m1", "m2", "m3", "m4", "m5", "q1_nrwh", "q2", "s1", "s2", "s3", "k1", "n1", "n2", "h1_nrwrp") #Load and enlarge data. outbrks <- lapply(outbrks,function(name) { data(list=name) enlargeData(get(name),range=1:(4*52),times=2) }) #Apply function to one one.survstat.surv <- function(outbrk) { algo.compare(algo.call(outbrk,control=control)) } @ <>= algo.summary(lapply(outbrks,one.survstat.surv)) @ <>= if (compute) { res.survstat <- algo.summary(lapply(outbrks,one.survstat.surv)) } print(res.survstat, digits=3) @ In both this study and the earlier simulation study the Bayesian approach seems to do quite well. However, the extent of the comparisons do not make allowance for any more supported statements. Consult the work of~\citet{riebler2004} for a more thorough comparison using simulation studies. <>= if (compute) { # save computed results save(list=c("sts.cdc","sts.farrington","acall","res.survstat", "ten.surv"), file=CACHEFILE) tools::resaveRdaFiles(CACHEFILE) } @ \section{Discussion and Future Work} Many extensions and additions are imaginable to improve the package. For now, the package is intended as an academic tool providing a test-bench for integrating new surveillance algorithms. Because all algorithms are implemented in R, performance has not been an issue. Especially the current implementation of the Farrington Procedure is rather slow and would benefit from an optimization possible with fragments written in C. One important improvement would be to provide more involved mechanisms for the simulation of epidemics. In particular it would be interesting to include multi-day outbreaks originating from single-source exposure, but with delay due to varying incubation time~\citep{hutwagner2005} or SEIR-like epidemics~\citep{andersson2000}. However, defining what is meant by a correct outbreak identification, especially in the case of overlapping outbreaks, creates new challenges which have to be met. \section{Acknowledgements} We are grateful to K.\ Stark and D.\ Altmann, RKI, Germany, for discussions and information on the surveillance methods used by the RKI. Our thanks to C.\ Lang, University of Munich, for his work on the R--implementation and M. Kobl, T. Schuster and M. Rossman, University of Munich, for their initial work on gathering the outbreak data from SurvStat@RKI. The research was conducted with financial support from the Collaborative Research Centre SFB 386 funded by the German research foundation (DFG). \bibliography{references} \end{document} surveillance/vignettes/hhh4-cache.RData0000644000176200001440000000775612655465227017624 0ustar liggesusersZy<[6QRJ2Yۑo"[jM ]irJ+Wimǖ+!Pdky՘9sN9)j4M @h4w+FSД^e wwMVVd \=n~͉/Z%߹r.\y"B+np`ߩgwbOV}ſYPah=tMƽy t,ظbNաtk_NNzJ/9R~mRpo9Ph}b:0܆ܰ+Lз;Aj|T:eLԘoȇCn IL>7nځ\3px9S C<ӷ 1YZQaǼ- W^' h5=淽5If.L2Nm^X ƖA L^+n<- >k{"8Qvf' 4.k:f_0}3|T5>hY N骔$pnjuଭJՓ~يP#$*1(>epS ,N*^|*+'Ce@r#,Z`i)[; 5hh^8a.0^[U{r'ȝ$;̊:ymL_G !OdμmPeHG$zy%dÇ`(\9X.5EWfsUth'wY@pM%]8c<=Qt'; fh^y-]QQu*!@]"w8 "wZzkWBUj'.հm\ 5e8JX[8suo>( G/NFI֢7o8e3_AyWAMK~m, Z^c~^m0?|x'4̈.g^մJzSxUIؕF\8lhI8PQUN9bYfizîKPF+F G9COLz)@p*C.VՏBR.69 ^I<[T?bK3+Ԙi޶;G=g]kamT-d@~›uV4:/{37tF-SQfzP'jOd§qUCvu8>.<~%{=vˈM2_@YG5Qn`"(aܶOPQ REf4, UtND6y_f+7:ek=|I#Ea2zPMRoPu2eY0g'ppz=.ȇf?ZZ1$ܞedD'ai6zxe2^X00͎c/o*R7z:->eBeƺ#1p2(n+asAz3 é TU_7=BOGg묢}ΞAJehC(;k7X_%PàQ(#}uL6AU 1.{ AorAٱ?G="|-9u[P35 4kM*.!B{h_C{m"F)0pޯP??ѥ}_\h >~ɨ)F-^zPnƾv~HZ&kT?D?N [vއGf*9WT]q_P[.D*{`(MLopIh(AfWېha62r||8QR6w23/*Wsvؑ"nd쇊E+Qp5c7ڣhzB DzJM^%ůs ͺe?a?k9_ ~!42lY3O:a?},{Y?Y{.A'x|\fٸyg7S^uA@oz_d'臋IF6/:x߄#cO3}3=I1G&>O."Ewa?,~M<>= ## purl=FALSE => not included in the tangle'd R script knitr::opts_chunk$set(echo = TRUE, tidy = FALSE, results = 'markup', fig.path='plots/twinstim-', fig.width = 8, fig.height = 4, fig.align = "center", fig.scap = NA, out.width = NULL, cache = FALSE, error = FALSE, warning = FALSE, message = FALSE) knitr::render_sweave() # use Sweave environments knitr::set_header(highlight = '') # no \usepackage{Sweave} (part of jss class) ## add a chunk option "strip.white.output" to remove leading and trailing white ## space (empty lines) from output chunks ('strip.white' has no effect) local({ default_output_hook <- knitr::knit_hooks$get("output") knitr::knit_hooks$set(output = function (x, options) { if (isTRUE(options[["strip.white.output"]])) { x <- sub("[[:space:]]+$", "\n", # set a single trailing \n sub("^[[:space:]]+", "", x)) # remove leading space } default_output_hook(x, options) }) }) ## R settings options(prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) # JSS options(width = 85, digits = 4) options(scipen = 1) # so that 1e-4 gets printed as 0.0001 ## xtable settings options(xtable.booktabs = TRUE, xtable.size = "small", xtable.sanitize.text.function = identity, xtable.comment = FALSE) @ <>= ## load the "cool" package library("surveillance") ## Compute everything or fetch cached results? message("Doing computations: ", COMPUTE <- !file.exists("twinstim-cache.RData")) if (!COMPUTE) load("twinstim-cache.RData", verbose = TRUE) @ \documentclass[nojss,nofooter,article]{jss} \usepackage[latin1]{inputenc} % Rnw is ASCII, but automatic package bib isn't \title{% \vspace{-1.5cm} \fbox{\vbox{\normalfont\footnotesize This introduction to the \code{twinstim} modeling framework of the \proglang{R}~package \pkg{surveillance} is based on a publication in the \textit{Journal of Statistical Software} -- \citet[Section~3]{meyer.etal2014} -- which is the suggested reference if you use the \code{twinstim} implementation in your own work.}}\\[1cm] \code{twinstim}: An endemic-epidemic modeling framework for spatio-temporal point patterns} \Plaintitle{twinstim: An endemic-epidemic modeling framework for spatio-temporal point patterns} \Shorttitle{Endemic-epidemic modeling of spatio-temporal point patterns} \author{Sebastian Meyer\thanks{Author of correspondence: \email{seb.meyer@fau.de}}\\Friedrich-Alexander-Universit{\"a}t\\Erlangen-N{\"u}rnberg \And Leonhard Held\\University of Zurich \And Michael H\"ohle\\Stockholm University} \Plainauthor{Sebastian Meyer, Leonhard Held, Michael H\"ohle} %% Basic packages \usepackage{lmodern} % successor of CM -> searchable Umlauts (1 char) \usepackage[english]{babel} % language of the manuscript is American English %% Math packages \usepackage{amsmath,amsfonts} % amsfonts defines \mathbb \usepackage{bm} % \bm: alternative to \boldsymbol from amsfonts %% Packages for figures and tables \usepackage{booktabs} % make tables look nicer \usepackage{subcaption} % successor of subfig, which supersedes subfigure %% knitr uses \subfloat, which subcaption only provides since v1.3 (2019/08/31) \providecommand{\subfloat}[2][need a sub-caption]{\subcaptionbox{#1}{#2}} %% Handy math commands \newcommand{\abs}[1]{\lvert#1\rvert} \newcommand{\norm}[1]{\lVert#1\rVert} \newcommand{\given}{\,\vert\,} \newcommand{\dif}{\,\mathrm{d}} \newcommand{\IR}{\mathbb{R}} \newcommand{\IN}{\mathbb{N}} \newcommand{\ind}{\mathbb{I}} \DeclareMathOperator{\Po}{Po} \DeclareMathOperator{\NegBin}{NegBin} \DeclareMathOperator{\N}{N} %% Additional commands \newcommand{\class}[1]{\code{#1}} % could use quotes (JSS does not like them) \newcommand{\CRANpkg}[1]{\href{https://CRAN.R-project.org/package=#1}{\pkg{#1}}} %% Reduce the font size of code input and output \DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl, fontsize=\small} \DefineVerbatimEnvironment{Soutput}{Verbatim}{fontsize=\small} %% Abstract \Abstract{ The availability of geocoded health data and the inherent temporal structure of communicable diseases have led to an increased interest in statistical models and software for spatio-temporal data with epidemic features. The \proglang{R}~package \pkg{surveillance} can handle various levels of aggregation at which infective events have been recorded. This vignette illustrates the analysis of \emph{point-referenced} surveillance data using the endemic-epidemic point process model ``\code{twinstim}'' proposed by \citet{meyer.etal2011} and extended in \citet{meyer.held2013}. %% (For other types of surveillance data, see %% \code{vignette("twinSIR")} and \code{vignette("hhh4\_spacetime")}.) We first describe the general modeling approach and then exemplify data handling, model fitting, visualization, and simulation methods for time-stamped geo-referenced case reports of invasive meningococcal disease (IMD) caused by the two most common bacterial finetypes of meningococci in Germany, 2002--2008. } \Keywords{% spatio-temporal point pattern, endemic-epidemic modeling, infectious disease epidemiology, self-exciting point process, spatial interaction function, branching process with immigration} \begin{document} %% \vfill %% { %% \renewcommand{\abstractname}{Outline} % local change %% \begin{abstract} %% We start by describing the general model class in %% Section~\ref{sec:twinstim:methods}. %% Section~\ref{sec:twinstim:data} introduces the example data and the %% associated class \class{epidataCS}, %% Section~\ref{sec:twinstim:fit} presents the core functionality of %% fitting and analyzing such data using \code{twinstim}, and %% Section~\ref{sec:twinstim:simulation} shows how to simulate realizations %% from a fitted model. %% \end{abstract} %% } %% \vfill %% \newpage \section[Model class]{Model class: \code{twinstim}} \label{sec:twinstim:methods} Infective events occur at specific points in continuous space and time, which gives rise to a spatio-temporal point pattern $\{(\bm{s}_i,t_i): i = 1,\dotsc,n\}$ from a region~$\bm{W}$ observed during a period~$(0,T]$. The locations~$\bm{s}_i$ and time points~$t_i$ of the $n$~events can be regarded as a realization of a self-exciting spatio-temporal point process, which can be characterized by its conditional intensity function (CIF, also termed intensity process) $\lambda(\bm{s},t)$. It represents the instantaneous event rate at location~$\bm{s}$ at time point~$t$ given all past events, and is often more verbosely denoted by~$\lambda^*$ or by explicit conditioning on the ``history''~$\mathcal{H}_t$ of the process. \citet[Chapter~7]{Daley.Vere-Jones2003} provide a rigorous mathematical definition of this concept, which is key to likelihood analysis and simulation of ``evolutionary'' point processes. \citet{meyer.etal2011} formulated the model class ``\code{twinstim}'' -- a \emph{two}-component \emph{s}patio-\emph{t}emporal \emph{i}ntensity \emph{m}odel -- by a superposition of an endemic and an epidemic component: \begin{equation} \label{eqn:twinstim} \lambda(\bm{s},t) = \nu_{[\bm{s}][t]} + \sum_{j \in I(\bm{s},t)} \eta_j \, f(\norm{\bm{s}-\bm{s}_j}) \, g(t-t_j) \:. \end{equation} This model constitutes a branching process with immigration. Part of the event rate is due to the first, endemic component, which reflects sporadic events caused by unobserved sources of infection. This background rate of new events is modeled by a log-linear predictor $\nu_{[\bm{s}][t]}$ incorporating regional and/or time-varying characteristics. Here, the space-time index $[\bm{s}][t]$ refers to the region covering $\bm{s}$ during the period containing $t$ and thus spans a whole spatio-temporal grid on which the involved covariates are measured, e.g., district $\times$ month. We will later see that the endemic component therefore simply equals an inhomogeneous Poisson process for the event counts by cell of that grid. The second, observation-driven epidemic component adds ``infection pressure'' from the set \begin{equation*} I(\bm{s},t) = \big\{ j : t_j < t \:\wedge\: t-t_j \le \tau_j \:\wedge\: \norm{\bm{s}-\bm{s}_j} \le \delta_j \big\} \end{equation*} of past events and hence makes the process ``self-exciting''. During its infectious period of length~$\tau_j$ and within its spatial interaction radius~$\delta_j$, the model assumes each event~$j$ to trigger further events, which are called offspring, secondary cases, or aftershocks, depending on the application. The triggering rate (or force of infection) is proportional to a log-linear predictor~$\eta_j$ associated with event-specific characteristics (``marks'') $\bm{m}_j$, which are usually attached to the point pattern of events. The decay of infection pressure with increasing spatial and temporal distance from the infective event is modeled by parametric interaction functions~$f$ and~$g$, respectively. A simple assumption for the time course of infectivity is $g(t) = 1$. Alternatives include exponential decay, a step function, or empirically derived functions such as Omori's law for aftershock intervals. With regard to spatial interaction, a Gaussian kernel $f(x) = \exp\left\{-x^2/(2 \sigma^2)\right\}$ could be chosen. However, in modeling the spread of human infectious diseases on larger scales, a heavy-tailed power-law kernel $f(x) = (x+\sigma)^{-d}$ was found to perform better \citep{meyer.held2013}. The (possibly infinite) upper bounds~$\tau_j$ and~$\delta_j$ provide a way of modeling event-specific interaction ranges. However, since these need to be pre-specified, a common assumption is $\tau_j \equiv \tau$ and $\delta_j \equiv \delta$, where the infectious period~$\tau$ and the spatial interaction radius~$\delta$ are determined by subject-matter considerations. \subsection{Model-based effective reproduction numbers} Similar to the simple SIR model \citep[see, e.g.,][Section 2.1]{Keeling.Rohani2008}, the above point process model~\eqref{eqn:twinstim} features a reproduction number derived from its branching process interpretation. As soon as an event occurs (individual becomes infected), it triggers offspring (secondary cases) around its origin $(\bm{s}_j, t_j)$ according to an inhomogeneous Poisson process with rate $\eta_j \, f(\norm{\bm{s}-\bm{s}_j}) \, g(t-t_j)$. Since this triggering process is independent of the event's parentage and of other events, the expected number $\mu_j$ of events triggered by event $j$ can be obtained by integrating the triggering rate over the observed interaction domain: \begin{equation} \label{eqn:R0:twinstim} \mu_j = \eta_j \cdot \left[ \int_0^{\min(T-t_j,\tau_j)} g(t) \,dt \right] \cdot \left[ \int_{\bm{R}_j} f(\norm{\bm{s}}) \,d\bm{s} \right] \:, \end{equation} where \begin{equation} \label{eqn:twinstim:IR} \bm{R}_j = (b(\bm{s}_j,\delta_j) \cap \bm{W}) - \bm{s}_j \end{equation} is event $j$'s influence region centered at $\bm{s}_j$, and $b(\bm{s}_j, \delta_j)$ denotes the disc centered at $\bm{s}_j$ with radius $\delta_j$. Note that the above model-based reproduction number $\mu_j$ is event-specific since it depends on event marks through $\eta_j$, on the interaction ranges $\delta_j$ and $\tau_j$, as well as on the event location $\bm{s}_j$ and time point $t_j$. If the model assumes unique interaction ranges $\delta$ and $\tau$, a single reference number of secondary cases can be extrapolated from Equation~\ref{eqn:R0:twinstim} by imputing an unbounded domain $\bm{W} = \IR^2$ and $T = \infty$ \citep{meyer.etal2015}. Equation~\ref{eqn:R0:twinstim} can also be motivated by looking at a spatio-temporal version of the simple SIR model wrapped into the \class{twinstim} class~\eqref{eqn:twinstim}. This means: no endemic component, homogeneous force of infection ($\eta_j \equiv \beta$), homogeneous mixing in space ($f(x) = 1$, $\delta_j \equiv \infty$), and exponential decay of infectivity over time ($g(t) = e^{-\alpha t}$, $\tau_j \equiv \infty$). Then, for $T \rightarrow \infty$, \begin{equation*} \mu = \beta \cdot \left[ \int_0^\infty e^{-\alpha t} \,dt \right] \cdot \left[ \int_{\bm{W}-\bm{s}_j} 1 \,d\bm{s} \right] = \beta \cdot \abs{\bm{W}} / \alpha \:, \end{equation*} which corresponds to the basic reproduction number known from the simple SIR model by interpreting $\abs{\bm{W}}$ as the population size, $\beta$ as the transmission rate and $\alpha$ as the removal rate. If $\mu < 1$, the process is sub-critical, i.e., its eventual extinction is almost sure. However, it is crucial to understand that in a full model with an endemic component, new infections may always occur via ``immigration''. Hence, reproduction numbers in \class{twinstim} are adjusted for infections occurring independently of previous infections. This also means that a misspecified endemic component may distort model-based reproduction numbers \citep{meyer.etal2015}. Furthermore, under-reporting and implemented control measures imply that the estimates are to be thought of as \emph{effective} reproduction numbers. \subsection{Likelihood inference} The log-likelihood of the point process model~\eqref{eqn:twinstim} is a function of all parameters in the log-linear predictors $\nu_{[\bm{s}][t]}$ and $\eta_j$ and in the interaction functions $f$ and $g$. It has the form %% \begin{equation} \label{eqn:twinstim:marked:loglik} %% l(\bm{\theta}) = \left[ \sum_{i=1}^{n} \log\lambda(\bm{s}_i,t_i,k_i) \right] - %% \sum_{k\in\mathcal{K}} \int_0^T \int_{\bm{W}} \lambda(\bm{s},t,k) \dif\bm{s} %% \dif t \:, %% \end{equation} \begin{equation} \label{eqn:twinstim:loglik} \left[ \sum_{i=1}^{n} \log\lambda(\bm{s}_i,t_i) \right] - \int_0^T \int_{\bm{W}} \lambda(\bm{s},t) \dif\bm{s} \dif t \:. \end{equation} %\citep[Proposition~7.3.III]{Daley.Vere-Jones2003} To estimate the model parameters, we maximize the above log-likelihood numerically using the quasi-Newton algorithm available through the \proglang{R}~function \code{nlminb}. We thereby employ the analytical score function and an approximation of the expected Fisher information worked out by \citet[Web Appendices A and B]{meyer.etal2011}. The space-time integral in the log-likelihood \eqref{eqn:twinstim:loglik} poses no difficulties for the endemic component of $\lambda(\bm{s},t)$, since $\nu_{[\bm{s}][t]}$ is defined on a spatio-temporal grid. However, integration of the epidemic component involves two-dimensional integrals $\int_{\bm{R}_i} f(\norm{\bm{s}}) \dif\bm{s}$ over the influence regions~$\bm{R}_i$, which are represented by polygons (as is~$\bm{W}$). Similar integrals appear in the score function, where $f(\norm{\bm{s}})$ is replaced by partial derivatives with respect to kernel parameters. Calculation of these integrals is trivial for (piecewise) constant~$f$, but otherwise requires numerical integration. The \proglang{R}~package \CRANpkg{polyCub} \citep{meyer2019} offers various cubature methods for polygonal domains. % For Gaussian~$f$, we apply a midpoint rule with $\sigma$-adaptive bandwidth % %% combined with an analytical formula via the $\chi^2$ distribution % %% if the $6\sigma$-circle around $\bm{s}_i$ is contained in $\bm{R}_i$. % and use product Gauss cubature \citep{sommariva.vianello2007} % to approximate the integrals in the score function. % For the recently implemented power-law kernels, Of particular relevance for \code{twinstim} is the \code{polyCub.iso} method, which takes advantage of the assumed isotropy of spatial interaction such that numerical integration remains in only one dimension \citep[Supplement~B, Section~2]{meyer.held2013}. We \CRANpkg{memoise} \citep{R:memoise} the cubature function during log-likelihood maximization to avoid integration for unchanged parameters of~$f$. \subsection{Special cases: Single-component models} If the \emph{epidemic} component is omitted in Equation~\ref{eqn:twinstim}, the point process model becomes equivalent to a Poisson regression model for aggregated counts. This provides a link to ecological regression approaches in general and to the count data model \code{hhh4} illustrated in \code{vignette("hhh4")} and \code{vignette("hhh4\_spacetime")}. To see this, recall that the endemic component $\nu_{[\bm{s}][t]}$ is piecewise constant on the spatio-temporal grid with cells $([\bm{s}],[t])$. Hence the log-likelihood~\eqref{eqn:twinstim:loglik} of an endemic-only \code{twinstim} simplifies to a sum over all these cells, \begin{equation*} \sum_{[\bm{s}],[t]} \left\{ Y_{[\bm{s}][t]} \log\nu_{[\bm{s}][t]} - \abs{[\bm{s}]} \, \abs{[t]} \, \nu_{[\bm{s}][t]} \right\} \:, \end{equation*} where $Y_{[\bm{s}][t]}$ is the aggregated number of events observed in cell $([\bm{s}],[t])$, and $\abs{[\bm{s}]}$ and $\abs{[t]}$ denote cell area and length, respectively. Except for an additive constant, the above log-likelihood is equivalently obtained from the Poisson model $Y_{[\bm{s}][t]} \sim \Po( \abs{[\bm{s}]} \, \abs{[t]} \, \nu_{[\bm{s}][t]})$. This relation offers a means of code validation using the established \code{glm} function to fit an endemic-only \code{twinstim} model -- see the examples in \code{help("glm_epidataCS")}. %% The \code{help("glm_epidataCS")} also shows how to fit %% an equivalent endemic-only \code{hhh4} model. If, in contrast, the \emph{endemic} component is omitted, all events are necessarily triggered by other observed events. For such a model to be identifiable, a prehistory of events must exist to trigger the first event, and interaction typically needs to be unbounded such that each event can actually be linked to potential source events. \subsection[Extension: Event types]{Extension: \code{twinstim} with event types} To model the example data on invasive meningococcal disease in the remainder of this section, we actually need to use an extended version $\lambda(\bm{s},t,k)$ of Equation~\ref{eqn:twinstim}, which accounts for different event types~$k$ with own transmission dynamics. This introduces a further dimension in the point process, and the second log-likelihood component in Equation~\ref{eqn:twinstim:loglik} accordingly splits into a sum over all event types. We refer to \citet[Sections~2.4 and~3]{meyer.etal2011} for the technical details of this type-specific \code{twinstim} class. The basic idea is that the meningococcal finetypes share the same endemic pattern (e.g., seasonality), while infections of different finetypes are not associated via transmission. This means that the force of infection is restricted to previously infected individuals with the same bacterial finetype~$k$, i.e., the epidemic sum in Equation~\ref{eqn:twinstim} is over the set $I(\bm{s},t,k) = I(\bm{s},t) \cap \{j: k_j = k\}$. The implementation has limited support for type-dependent interaction functions $f_{k_j}$ and $g_{k_j}$ (not further considered here). \section[Data structure]{Data structure: \class{epidataCS}} \label{sec:twinstim:data} <>= ## extract components from imdepi to reconstruct data("imdepi") events <- SpatialPointsDataFrame( coords = coordinates(imdepi$events), data = marks(imdepi, coords=FALSE), proj4string = imdepi$events@proj4string # ETRS89 projection (+units=km) ) stgrid <- imdepi$stgrid[,-1] @ <>= load(system.file("shapes", "districtsD.RData", package = "surveillance")) @ The first step toward fitting a \code{twinstim} is to turn the relevant data into an object of the dedicated class \class{epidataCS}.\footnote{ The suffix ``CS'' indicates that the data-generating point process is indexed in continuous space. } The primary ingredients of this class are a spatio-temporal point pattern (\code{events}) and its underlying observation region (\code{W}). An additional spatio-temporal grid (\code{stgrid}) holds (time-varying) area-level covariates for the endemic regression part. We exemplify this data class by the \class{epidataCS} object for the \Sexpr{nobs(imdepi)} cases of invasive meningococcal disease in Germany originally analyzed by \citet{meyer.etal2011}. It is already contained in the \pkg{surveillance} package as \code{data("imdepi")} and has been constructed as follows: <>= imdepi <- as.epidataCS(events = events, W = stateD, stgrid = stgrid, qmatrix = diag(2), nCircle2Poly = 16) @ The function \code{as.epidataCS} checks the consistency of the three data ingredients described in detail below. It also pre-computes auxiliary variables for model fitting, e.g., the individual influence regions~\eqref{eqn:twinstim:IR}, which are intersections of the observation region with discs %of radius \code{eps.s} centered at the event location approximated by polygons with \code{nCircle2Poly = 16} edges. The intersections are computed using functionality of the package \CRANpkg{polyclip} \citep{R:polyclip}. For multitype epidemics as in our example, the additional indicator matrix \code{qmatrix} specifies transmissibility across event types. An identity matrix corresponds to an independent spread of the event types, i.e., cases of one type can not produce cases of another type. \subsection{Data ingredients} The core \code{events} data must be provided in the form of a \class{SpatialPointsDataFrame} as defined by the package \CRANpkg{sp} \citep{R:sp}: <>= summary(events) @ <>= oopt <- options(width=100) ## hack to reduce the 'print.gap' in the data summary but not for the bbox local({ print.summary.Spatial <- sp:::print.summary.Spatial environment(print.summary.Spatial) <- environment() print.table <- function (x, ..., print.gap = 0) { base::print.table(x, ..., print.gap = print.gap) } print.summary.Spatial(summary(events)) }) options(oopt) @ The associated event coordinates are residence postcode centroids, projected in the \emph{European Terrestrial Reference System 1989} (in kilometer units) to enable Euclidean geometry. See the \code{spTransform}-methods for how to project latitude and longitude coordinates into a planar coordinate reference system (CRS). The data frame associated with these spatial coordinates ($\bm{s}_i$) contains a number of required variables and additional event marks (in the notation of Section~\ref{sec:twinstim:methods}: $\{(t_i,[\bm{s}_i],k_i,\tau_i,\delta_i,\bm{m}_i): i = 1,\dotsc,n\}$). For the IMD data, the event \code{time} is measured in days since the beginning of the observation period 2002--2008 and is subject to a tie-breaking procedure (described later). The \code{tile} column refers to the region of the spatio-temporal grid where the event occurred and here contains the official key of the administrative district of the patient's residence. There are two \code{type}s of events labeled as \code{"B"} and \code{"C"}, which refer to the serogroups of the two meningococcal finetypes \emph{B:P1.7-2,4:F1-5} and \emph{C:P1.5,2:F3-3} contained in the data. The \code{eps.t} and \code{eps.s} columns specify upper limits for temporal and spatial interaction, respectively. Here, the infectious period is assumed to last a maximum of 30 days and spatial interaction is limited to a 200 km radius for all cases. The latter has numerical advantages for a Gaussian interaction function $f$ with a relatively small standard deviation. For a power-law kernel, however, this restriction will be dropped to enable occasional long-range transmission. The last two data attributes displayed in the above \code{event} summary are covariates from the case reports: the gender and age group of the patient. For the observation region \code{W}, we use a polygon representation of Germany's boundary. Since the observation region defines the integration domain in the point process log-likelihood~\eqref{eqn:twinstim:loglik}, the more detailed the polygons of \code{W} are the longer it will take to fit a \code{twinstim}. It is thus advisable to sacrifice some shape details for speed by reducing the polygon complexity, e.g., by applying \code{ms_simplify} from the \CRANpkg{rmapshaper} package \citep{R:rmapshaper}. Alternative tools in \proglang{R} are \CRANpkg{spatstat}'s \code{simplify.owin} procedure \citep{R:spatstat} and the function \code{thinnedSpatialPoly} in package \CRANpkg{maptools} \citep{R:maptools}, which implements the Douglas-Peucker reduction method. The \pkg{surveillance} package already contains a simplified representation of Germany's boundaries: <>= <> @ This file contains both the \class{SpatialPolygonsDataFrame} \code{districtsD} of Germany's \Sexpr{length(districtsD)} administrative districts as at January 1, 2009, as well as their union \code{stateD}. %obtained by the call \code{rgeos::gUnaryUnion(districtsD)} \citep{R:rgeos}. These boundaries are projected in the same CRS as the \code{events} data. The \code{stgrid} input for the endemic model component is a data frame with (time-varying) area-level covariates, e.g., socio-economic or ecological characteristics. In our example: <>= .stgrid.excerpt <- format(rbind(head(stgrid, 3), tail(stgrid, 3)), digits=3) rbind(.stgrid.excerpt[1:3,], "..."="...", .stgrid.excerpt[4:6,]) @ Numeric (\code{start},\code{stop}] columns index the time periods and the factor variable \code{tile} identifies the regions of the grid. Note that the given time intervals (here: months) also define the resolution of possible time trends and seasonality of the piecewise constant endemic intensity. We choose monthly intervals to reduce package size and computational cost compared to the weekly resolution originally used by \citet{meyer.etal2011} and \citet{meyer.held2013}. The above \code{stgrid} data frame thus consists of 7 (years) times 12 (months) blocks of \Sexpr{nlevels(stgrid[["tile"]])} (districts) rows each. The \code{area} column gives the area of the respective \code{tile} in square kilometers (compatible with the CRS used for \code{events} and \code{W}). A geographic representation of the regions in \code{stgrid} is not required for model estimation, and is thus not part of the \class{epidataCS} class. %It is, however, necessary for plots of the fitted intensity and for %simulation from the estimated model. In our example, the area-level data only consists of the population density \code{popdensity}, whereas \citet{meyer.etal2011} additionally incorporated (lagged) weekly influenza counts by district as a time-dependent covariate. %% In another application, \citet{meyer.etal2015} used a large number of socio-economic %% characteristics to model psychiatric hospital admissions. \subsection{Data handling and visualization} The generated \class{epidataCS} object \code{imdepi} is a simple list of the checked ingredients <>= cat(paste0('\\code{', names(imdepi), '}', collapse = ", "), ".", sep = "") @ Several methods for data handling and visualization are available for such objects as listed in Table~\ref{tab:methods:epidataCS} and briefly presented in the remainder of this section. <>= print(xtable( surveillance:::functionTable( class = "epidataCS", functions = list( Convert = c("epidataCS2sts"), Extract = c("getSourceDists"))), caption="Generic and \\textit{non-generic} functions applicable to \\class{epidataCS} objects.", label="tab:methods:epidataCS" ), include.rownames = FALSE) @ Printing an \class{epidataCS} object presents some metadata and the first \Sexpr{formals(surveillance:::print.epidataCS)[["n"]]} events by default: <>= imdepi @ During conversion to \class{epidataCS}, the last three columns \code{BLOCK} (time interval index), \code{start} and \code{popdensity} have been merged from the checked \code{stgrid} to the \code{events} data frame. The event marks including time and location can be extracted in a standard data frame by \code{marks(imdepi)} -- inspired by package \CRANpkg{spatstat} -- and this is summarized by \code{summary(imdepi)}. <>= (simdepi <- summary(imdepi)) @ The number of potential sources of infection per event (denoted \texttt{|.sources|} in the above output) is additionally summarized. It is determined by the events' maximum ranges of interaction \code{eps.t} and \code{eps.s}. The event-specific set of potential sources is stored in the (hidden) list \code{imdepi$events$.sources} (events are referenced by row index), and the event-specific numbers of potential sources are stored in the summarized object as \code{simdepi$nSources}. A simple plot of the number of infectives as a function of time (Figure~\ref{fig:imdepi_stepfun}) %determined by the event times and infectious periods can be obtained by the step function converter: <>= par(mar = c(5, 5, 1, 1), las = 1) plot(as.stepfun(imdepi), xlim = summary(imdepi)$timeRange, xaxs = "i", xlab = "Time [days]", ylab = "Current number of infectives", main = "") #axis(1, at = 2557, labels = "T", font = 2, tcl = -0.3, mgp = c(3, 0.3, 0)) @ \pagebreak[1] The \code{plot}-method for \class{epidataCS} offers aggregation of the events over time or space: <>= par(las = 1) plot(imdepi, "time", col = c("indianred", "darkblue"), ylim = c(0, 20)) par(mar = c(0, 0, 0, 0)) plot(imdepi, "space", lwd = 2, points.args = list(pch = c(1, 19), col = c("indianred", "darkblue"))) layout.scalebar(imdepi$W, scale = 100, labels = c("0", "100 km"), plot = TRUE) @ \pagebreak[1] The time-series plot (Figure~\ref{fig:imdepi_plot-1}) shows the monthly aggregated number of cases by finetype in a stacked histogram as well as each type's cumulative number over time. The spatial plot (Figure~\ref{fig:imdepi_plot-2}) shows the observation window \code{W} with the locations of all cases (by type), where the areas of the points are proportional to the number of cases at the respective location. Additional shading by the population is possible and exemplified in \code{help("plot.epidataCS")}. The above static plots do not capture the space-time dynamics of epidemic spread. An animation may provide additional insight and can be produced by the corresponding \code{animate}-method. For instance, to look at the first year of the B-type in a weekly sequence of snapshots in a web browser (using facilities of the \CRANpkg{animation} package of \citealp{R:animation}): <>= animation::saveHTML( animate(subset(imdepi, type == "B"), interval = c(0, 365), time.spacing = 7), nmax = Inf, interval = 0.2, loop = FALSE, title = "First year of type B") @ Selecting events from \class{epidataCS} as for the animation above is enabled by the \code{[}- and \code{subset}-methods, which return a new \class{epidataCS} object containing only the selected \code{events}. A limited data sampling resolution may lead to tied event times or locations, which are in conflict with a continuous spatio-temporal point process model. For instance, a temporal residual analysis would suggest model deficiencies \citep[Figure 4]{meyer.etal2011}, and a power-law kernel for spatial interaction may diverge if there are events with zero distance to potential source events \citep{meyer.held2013}. The function \code{untie} breaks ties by random shifts. This has already been applied to the event \emph{times} in the provided \code{imdepi} data by subtracting a U$(0,1)$-distributed random number from the original dates. The event \emph{coordinates} in the IMD data are subject to interval censoring at the level of Germany's postcode regions. A possible replacement for the given centroids would thus be a random location within the corresponding postcode area. Lacking a suitable shapefile, \citet{meyer.held2013} shifted all locations by a random vector with length up to half the observed minimum spatial separation: <>= eventDists <- dist(coordinates(imdepi$events)) minsep <- min(eventDists[eventDists > 0]) set.seed(321) imdepi_untied <- untie(imdepi, amount = list(s = minsep / 2)) @ Note that random tie-breaking requires sensitivity analyses as discussed by \citet{meyer.held2013}, but these are skipped here for the sake of brevity. The \code{update}-method is useful to change the values of the maximum interaction ranges \code{eps.t} and \code{eps.s}, since it takes care of the necessary updates of the hidden auxiliary variables in an \class{epidataCS} object. For unbounded spatial interaction: <>= imdepi_untied_infeps <- update(imdepi_untied, eps.s = Inf) @ Last but not least, \class{epidataCS} can be aggregated to \class{epidata} (from \code{vignette("twinSIR")}) or \class{sts} (from \code{vignette("hhh4_spacetime")}). The method \code{as.epidata.epidataCS} aggregates events by region (\code{tile}), and the function \code{epidataCS2sts} yields counts by region and time interval. The latter could be analyzed by an areal time-series model such as \code{hhh4} (see \code{vignette("hhh4\_spacetime")}). We can also use \class{sts} visualizations, e.g.\ (Figure~\ref{fig:imdsts_plot}): <>= imdsts <- epidataCS2sts(imdepi, freq = 12, start = c(2002, 1), tiles = districtsD) par(las = 1, lab = c(7,7,7), mar = c(5,5,1,1)) plot(imdsts, type = observed ~ time) plot(imdsts, type = observed ~ unit, population = districtsD$POPULATION / 100000) @ \section{Modeling and inference} \label{sec:twinstim:fit} Having prepared the data as an object of class \class{epidataCS}, the function \code{twinstim} can be used to perform likelihood inference for conditional intensity models of the form~\eqref{eqn:twinstim}. The main arguments for \code{twinstim} are the formulae of the \code{endemic} and \code{epidemic} linear predictors ($\nu_{[\bm{s}][t]} = \exp$(\code{endemic}) and $\eta_j = \exp$(\code{epidemic})), and the spatial and temporal interaction functions \code{siaf} ($f$) and \code{tiaf} ($g$), respectively. Both formulae are parsed internally using the standard \code{model.frame} toolbox from package \pkg{stats} and thus can handle factor variables and interaction terms. While the \code{endemic} linear predictor incorporates %time-dependent and/or area-level covariates from \code{stgrid}, %% and in the disease mapping context usually contains at least the population density as a multiplicative offset, i.e., %% \code{endemic = ~offset(log(popdensity))}. There can be additional effects of time, %% which are functions of the variable \code{start} from \code{stgrid}, %% or effects of, e.g., socio-demographic and ecological variables. the \code{epidemic} formula may use both \code{stgrid} variables and event marks to be associated with the force of infection. %% For instance, \code{epidemic = ~log(popdensity) + type} corresponds to %% $\eta_j = \rho_{[\bm{s}_j]}^{\gamma_{\rho}} \exp(\gamma_0 + \gamma_C \ind(k_j=C))$, %% which models different infectivity of the event types, and scales %% with population density (a grid-based covariate) to reflect higher %% contact rates and thus infectivity in more densly populated regions. For the interaction functions, several alternatives are predefined as listed in Table~\ref{tab:iafs}. They are applicable out-of-the-box and illustrated as part of the following modeling exercise for the IMD data. Own interaction functions can also be implemented following the structure described in \code{help("siaf")} and \code{help("tiaf")}, respectively. <>= twinstim_iafs <- suppressWarnings( cbind("Spatial (\\code{siaf.*})" = ls(pattern="^siaf\\.", pos="package:surveillance"), "Temporal (\\code{tiaf.*})" = ls(pattern="^tiaf\\.", pos="package:surveillance")) ) twinstim_iafs <- apply(twinstim_iafs, 2, function (x) { is.na(x) <- duplicated(x) x }) print(xtable(substring(twinstim_iafs, 6), label="tab:iafs", caption="Predefined spatial and temporal interaction functions."), include.rownames=FALSE, sanitize.text.function=function(x) paste0("\\code{", x, "}"), sanitize.colnames.function=identity, sanitize.rownames.function=identity) @ \subsection{Basic example} To illustrate statistical inference with \code{twinstim}, we will estimate several models for the simplified and ``untied'' IMD data presented in Section~\ref{sec:twinstim:data}. In the endemic component, we include the district-specific population density as a multiplicative offset, a (centered) time trend, and a sinusoidal wave of frequency $2\pi/365$ to capture seasonality, where the \code{start} variable from \code{stgrid} measures time: <>= (endemic <- addSeason2formula(~offset(log(popdensity)) + I(start / 365 - 3.5), period = 365, timevar = "start")) @ See \citet[Section~2.2]{held.paul2012} for how such sine/cosine terms reflect seasonality. Because of the aforementioned integrations in the log-likelihood~\eqref{eqn:twinstim:loglik}, it is advisable to first fit an endemic-only model to obtain reasonable start values for more complex epidemic models: <>= imdfit_endemic <- twinstim(endemic = endemic, epidemic = ~0, data = imdepi_untied, subset = !is.na(agegrp)) @ We exclude the single case with unknown age group from this analysis since we will later estimate an effect of the age group on the force of infection. Many of the standard functions to access model fits in \proglang{R} are also implemented for \class{twinstim} fits (see Table~\ref{tab:methods:twinstim}). For example, we can produce the usual model summary: <>= summary(imdfit_endemic) @ Because of the aforementioned equivalence of the endemic component with a Poisson regression model, the coefficients can be interpreted as log rate ratios in the usual way. For instance, the endemic rate is estimated to decrease by \code{1 - exp(coef(imdfit_endemic)[2])} $=$ \Sexpr{round(100*(1-exp(coef(imdfit_endemic)[2])),1)}\% per year. Coefficient correlations can be retrieved via the argument \code{correlation = TRUE} in the \code{summary} call just like for \code{summary.glm}, or via \code{cov2cor(vcov(imdfit_endemic))}. <>= print(xtable( surveillance:::functionTable( class = "twinstim", functions = list( Display = c("iafplot", "checkResidualProcess"), Extract = c("intensity.twinstim", "simpleR0"), Modify = c("stepComponent"), Other = c("epitest"))), caption="Generic and \\textit{non-generic} functions applicable to \\class{twinstim} objects. Note that there is no need for specific \\code{coef}, \\code{confint}, \\code{AIC} or \\code{BIC} methods, since the respective default methods from package \\pkg{stats} apply outright.", label="tab:methods:twinstim" ), include.rownames = FALSE) @ We now update the endemic model to take additional spatio-temporal dependence between events into account. Infectivity shall depend on the meningococcal finetype and the age group of the patient, and is assumed to be constant over time (default), $g(t)=\ind_{(0,30]}(t)$, with a Gaussian distance-decay $f(x) = \exp\left\{-x^2/(2 \sigma^2)\right\}$. This model was originally selected by \citet{meyer.etal2011} and can be fitted as follows: <>= imdfit_Gaussian <- update(imdfit_endemic, epidemic = ~type + agegrp, siaf = siaf.gaussian(), cores = 2 * (.Platform$OS.type == "unix")) @ On Unix-alikes, the numerical integrations of $f(\norm{\bm{s}})$ in the log-likelihood and $\frac{\partial f(\norm{\bm{s}})}{\partial \log\sigma}$ in the score function (note that $\sigma$ is estimated on the log-scale) can be performed in parallel via %the ``multicore'' functions \code{mclapply} \textit{et al.}\ from the base package \pkg{parallel}, here with \code{cores = 2} processes. Table~\ref{tab:imdfit_Gaussian} shows the output of \code{twinstim}'s \code{xtable} method \citep{R:xtable} applied to the above model fit, providing a table of estimated rate ratios for the endemic and epidemic effects. The alternative \code{toLatex} method simply translates the \code{summary} table of coefficients to \LaTeX\ without \code{exp}-transformation. On the subject-matter level, we can conclude from Table~\ref{tab:imdfit_Gaussian} that the meningococcal finetype of serogroup~C is less than half as infectious as the B-type, and that patients in the age group 3 to 18 years are estimated to cause twice as many secondary infections as infants aged 0 to 2 years. <>= print(xtable(imdfit_Gaussian, caption="Estimated rate ratios (RR) and associated Wald confidence intervals (CI) for endemic (\\code{h.}) and epidemic (\\code{e.}) terms. This table was generated by \\code{xtable(imdfit\\_Gaussian)}.", label="tab:imdfit_Gaussian"), sanitize.text.function=NULL, sanitize.colnames.function=NULL, sanitize.rownames.function=function(x) paste0("\\code{", x, "}")) @ \subsection{Model-based effective reproduction numbers} The event-specific reproduction numbers~\eqref{eqn:R0:twinstim} can be extracted from fitted \class{twinstim} objects via the \code{R0} method. For the above IMD model, we obtain the following mean numbers of secondary infections by finetype: <<>>= R0_events <- R0(imdfit_Gaussian) tapply(R0_events, marks(imdepi_untied)[names(R0_events), "type"], mean) @ Confidence intervals %for the estimated reproduction numbers $\hat\mu_j$ can be obtained via Monte Carlo simulation, where Equation~\ref{eqn:R0:twinstim} is repeatedly evaluated with parameters sampled from the asymptotic multivariate normal distribution of the maximum likelihood estimate. For this purpose, the \code{R0}-method takes an argument \code{newcoef}, which is exemplified in \code{help("R0")}. %% Note that except for (piecewise) constant $f$, computing confidence intervals for %% $\hat\mu_j$ takes a considerable amount of time since the integrals over the %% polygons $\bm{R}_j$ have to be solved numerically for each new set of parameters. \subsection{Interaction functions} <>= imdfit_exponential <- update(imdfit_Gaussian, siaf = siaf.exponential()) @ <>= imdfit_powerlaw <- update(imdfit_Gaussian, siaf = siaf.powerlaw(), data = imdepi_untied_infeps, start = c("e.(Intercept)" = -6.2, "e.siaf.1" = 1.5, "e.siaf.2" = 0.9)) @ <>= imdfit_step4 <- update(imdfit_Gaussian, siaf = siaf.step(exp(1:4 * log(100) / 5), maxRange = 100)) @ <>= save(imdfit_Gaussian, imdfit_exponential, imdfit_powerlaw, imdfit_step4, file = "twinstim-cache.RData", compress = "xz") @ Figure~\ref{fig:imdfit_siafs} shows several estimated spatial interaction functions, which can be plotted by, e.g., \code{plot(imdfit_Gaussian, "siaf")}. <>= par(mar = c(5,5,1,1)) set.seed(2) # Monte-Carlo confidence intervals plot(imdfit_Gaussian, "siaf", xlim=c(0,42), ylim=c(0,5e-5), lty=c(1,3), xlab = expression("Distance " * x * " from host [km]")) plot(imdfit_exponential, "siaf", add=TRUE, col.estimate=5, lty = c(5,3)) plot(imdfit_powerlaw, "siaf", add=TRUE, col.estimate=4, lty=c(2,3)) plot(imdfit_step4, "siaf", add=TRUE, col.estimate=3, lty=c(4,3)) legend("topright", legend=c("Power law", "Exponential", "Gaussian", "Step (df=4)"), col=c(4,5,2,3), lty=c(2,5,1,4), lwd=3, bty="n") @ The estimated standard deviation $\hat\sigma$ of the Gaussian kernel is: <<>>= exp(cbind("Estimate" = coef(imdfit_Gaussian)["e.siaf.1"], confint(imdfit_Gaussian, parm = "e.siaf.1"))) @ \citet{meyer.held2013} found that a power-law decay of spatial interaction more appropriately describes the spread of human infectious diseases. A power-law kernel concentrates on short-range interaction, but also exhibits a heavier tail reflecting occasional transmission over large distances. %This result is supported by the power-law distribution of short-time human %travel \citep{brockmann.etal2006}, which is an important driver of epidemic spread. To estimate the power law $f(x) = (x+\sigma)^{-d}$, we use the prepared \code{eps.s = Inf} version of the \class{epidataCS} object, and update the model as follows: <>= <> @ To reduce the runtime of this example, we specified convenient \code{start} values for some parameters. The estimated parameters $(\hat\sigma, \hat d)$ are: <<>>= exp(cbind("Estimate" = coef(imdfit_powerlaw)[c("e.siaf.1", "e.siaf.2")], confint(imdfit_powerlaw, parm = c("e.siaf.1", "e.siaf.2")))) @ Sometimes $\sigma$ is difficult to estimate, and also in this example, its confidence interval is relatively large. The one-parameter version \code{siaf.powerlaw1} can be used to estimate a power-law decay with fixed $\sigma = 1$. A more common option is the exponential kernel $f(x) = \exp(-x/\sigma)$: <>= <> @ Table~\ref{tab:iafs} also lists the step function kernel as an alternative, which is particularly useful for two reasons. First, it is a more flexible approach since it estimates interaction between the given knots without assuming an overall functional form. Second, the spatial integrals in the log-likelihood can be computed analytically for the step function kernel, which therefore offers a quick estimate of spatial interaction. We update the Gaussian model to use four steps at log-equidistant knots up to an interaction range of 100 km: <>= <> @ Figure~\ref{fig:imdfit_siafs} suggests that the estimated step function is in line with the power law. Note that suitable knots for the step function could also be derived from quantiles of the observed distances between events and their potential source events, e.g.: <<>>= quantile(getSourceDists(imdepi_untied_infeps, "space"), c(1,2,4,8)/100) @ For the temporal interaction function $g(t)$, model updates and plots are similarly possible, e.g., using \code{update(imdfit_Gaussian, tiaf = tiaf.exponential())}. However, the events in the IMD data are too rare to infer the time-course of infectivity with confidence. <>= local({ nSources <- sapply(levels(imdepi$events$type), function (.type) { mean(summary(subset(imdepi_untied_infeps, type==.type))$nSources) }) structure( paste("Specifically, there are only", paste0(round(nSources,1), " (", names(nSources), ")", collapse=" and "), "cases on average within the preceding 30 days", "(potential sources of infection)."), class="Latex") }) @ \subsection{Model selection} <>= AIC(imdfit_endemic, imdfit_Gaussian, imdfit_exponential, imdfit_powerlaw, imdfit_step4) @ Akaike's Information Criterion (AIC) suggests superiority of the power-law vs.\ the exponential, Gaussian, and endemic-only models. The more flexible step function yields the best AIC value, but its shape strongly depends on the chosen knots and is not guaranteed to be monotonically decreasing. The function \code{stepComponent} -- a wrapper around the \code{step} function from \pkg{stats} -- can be used to perform AIC-based stepwise selection within a given model component. <>= ## Example of AIC-based stepwise selection of the endemic model imdfit_endemic_sel <- stepComponent(imdfit_endemic, component = "endemic") ## -> none of the endemic predictors is removed from the model @ \subsection{Model diagnostics} The element \code{"fittedComponents"} of a \class{twinstim} object contains the endemic and epidemic values of the estimated intensity at each event occurrence. However, plots of the conditional intensity (and its components) as a function of location or time provide more insight into the fitted process. Evaluation of \code{intensity.twinstim} requires the model environment to be stored with the fit. By default, \code{model = FALSE} in \code{twinstim}, but if the data are still available, the model environment can also be added afterwards using the convenient \code{update} method: <>= imdfit_powerlaw <- update(imdfit_powerlaw, model = TRUE) @ Figure~\ref{fig:imdfit_powerlaw_intensityplot_time} shows an \code{intensityplot} of the fitted ``ground'' intensity $\sum_{k=1}^2 \int_{\bm{W}} \hat\lambda(\bm{s},t,k) \dif \bm{s}$: %aggregated over both event types: <>= intensityplot(imdfit_powerlaw, which = "total", aggregate = "time", types = 1:2) @ <>= par(mar = c(5,5,1,1), las = 1) intensity_endprop <- intensityplot(imdfit_powerlaw, aggregate="time", which="endemic proportion", plot=FALSE) intensity_total <- intensityplot(imdfit_powerlaw, aggregate="time", which="total", tgrid=501, lwd=2, xlab="Time [days]", ylab="Intensity") curve(intensity_endprop(x) * intensity_total(x), add=TRUE, col=2, lwd=2, n=501) #curve(intensity_endprop(x), add=TRUE, col=2, lty=2, n=501) text(2500, 0.36, labels="total", col=1, pos=2, font=2) text(2500, 0.08, labels="endemic", col=2, pos=2, font=2) @ %% Note that this represents a realization of a stochastic process, since it %% depends on the occurred events. The estimated endemic intensity component has also been added to the plot. It exhibits strong seasonality and a slow negative trend. The proportion of the endemic intensity is rather constant along time since no major outbreaks occurred. This proportion can be visualized separately by specifying \code{which = "endemic proportion"} in the above call. <>= meanepiprop <- integrate(intensityplot(imdfit_powerlaw, which="epidemic proportion"), 50, 2450, subdivisions=2000, rel.tol=1e-3)$value / 2400 @ Spatial \code{intensityplot}s as in Figure~\ref{fig:imdfit_powerlaw_intensityplot_space} can be produced via \code{aggregate = "space"} and require a geographic representation of \code{stgrid}. The epidemic proportion is naturally high around clusters of cases and even more so if the population density is low. %% The function \code{epitest} offers a model-based global test for epidemicity, %% while \code{knox} and \code{stKtest} implement related classical approaches %% \citep{meyer.etal2015}. <>= for (.type in 1:2) { print(intensityplot(imdfit_powerlaw, aggregate="space", which="epidemic proportion", types=.type, tiles=districtsD, sgrid=1000, col.regions = grey(seq(1,0,length.out=10)), at = seq(0,1,by=0.1))) grid::grid.text("Epidemic proportion", x=1, rot=90, vjust=-1) } @ Another diagnostic tool is the function \code{checkResidualProcess} (Figure~\ref{fig:imdfit_checkResidualProcess}), which transforms the temporal ``residual process'' in such a way that it exhibits a uniform distribution and lacks serial correlation if the fitted model describes the true CIF well \citep[see][Section~3.3]{ogata1988}. % more recent work: \citet{clements.etal2011} <>= par(mar = c(5, 5, 1, 1)) checkResidualProcess(imdfit_powerlaw) @ \section{Simulation} \label{sec:twinstim:simulation} %% Simulations from the fitted model are also useful to investigate the %% goodness of fit. To identify regions with unexpected IMD dynamics, \citet{meyer.etal2011} compared the observed numbers of cases by district to the respective 2.5\% and 97.5\% quantiles of 100 simulations from the selected model. Furthermore, simulations allow us to investigate the stochastic volatility of the endemic-epidemic process, to obtain probabilistic forecasts, and to perform parametric bootstrap of the spatio-temporal point pattern. The simulation algorithm we apply is described in \citet[Section 4]{meyer.etal2011}. It requires a geographic representation of the \code{stgrid}, as well as functionality for sampling locations from the spatial kernel $f_2(\bm{s}) := f(\norm{\bm{s}})$. This is implemented for all predefined spatial interaction functions listed in Table~\ref{tab:iafs}. %For instance for the %power-law kernel, we pass via polar coordinates (with density then proportional %to $rf(r)$) %, a function also involved in the efficient cubature of % %$f_2(\bm{s})$ via Green's theorem) %and the inverse transformation method with numerical root finding for the %quantiles. Event marks are by default sampled from their respective empirical distribution in the original data. %but a customized generator can be supplied as argument \code{rmarks}. The following code runs \emph{a single} simulation over the last year based on the estimated power-law model: <>= imdsim <- simulate(imdfit_powerlaw, nsim = 1, seed = 1, t0 = 2191, T = 2555, data = imdepi_untied_infeps, tiles = districtsD) @ This yields an object of the class \class{simEpidataCS}, which extends \class{epidataCS}. It carries additional components from the generating model to enable an \code{R0}-method and \code{intensityplot}s for simulated data. %All methods for \class{epidataCS} are applicable. %% The result is simplified in that only the \code{events} instead of a full %% \class{epidataCS} object are retained from every run to save memory and %% computation time. All other components, which do not vary between simulations, %% e.g., the \code{stgrid}, are only stored from the first run. %% There is a \code{[[}-method for such \class{simEpidataCSlist}s in order to %% extract single simulations as full \class{simEpidataCS} objects from the %% simplified structure. %Extracting a single simulation (e.g., \code{imdsims[[1]]}) Figure~\ref{fig:imdsim_plot} shows the cumulative number of cases from the simulation appended to the first six years of data. <>= .t0 <- imdsim$timeRange[1] .cumoffset <- c(table(subset(imdepi, time < .t0)$events$type)) par(mar = c(5,5,1,1), las = 1) plot(imdepi, ylim = c(0, 20), col = c("indianred", "darkblue"), subset = time < .t0, cumulative = list(maxat = 336), xlab = "Time [days]") plot(imdsim, add = TRUE, legend.types = FALSE, col = adjustcolor(c("indianred", "darkblue"), alpha.f = 0.5), subset = !is.na(source), # exclude events of the prehistory cumulative = list(offset = .cumoffset, maxat = 336, axis = FALSE), border = NA, density = 0) # no histogram for simulations plot(imdepi, add = TRUE, legend.types = FALSE, col = 1, subset = time >= .t0, cumulative = list(offset = .cumoffset, maxat = 336, axis = FALSE), border = NA, density = 0) # no histogram for the last year's data abline(v = .t0, lty = 2, lwd = 2) @ %% Because we have started simulation at time \code{t0 = 0}, %% no events from \code{data} have been used as the prehistory, i.e., %% the first simulated event is necessarily driven by the endemic model component. A special feature of such simulated epidemics is that the source of each event is known: <>= table(imdsim$events$source > 0, exclude = NULL) @ The stored \code{source} value is 0 for endemic events, \code{NA} for events of the prehistory but still infective at \code{t0}, and otherwise corresponds to the row index of the infective source. %% Averaged over all 30 simulations, the proportion of events triggered by %% previous events is %% Sexpr{mean(sapply(imdsims$eventsList, function(x) mean(x$source > 0, na.rm = TRUE)))}. %-------------- % BIBLIOGRAPHY %-------------- <>= ## create automatic references for R packages .Rbibfile <- file("twinstim-R.bib", "w", encoding = "latin1") Rbib <- knitr::write_bib( c("memoise", "sp", "polyclip", "maptools", "animation", "xtable"), file = NULL, tweak = FALSE, prefix = "R:") ## write_bib() to file does enc2utf8() -> fails for ISO8859-15 session charset writeLines(unlist(Rbib, use.names = FALSE), .Rbibfile) close(.Rbibfile) @ \bibliography{references,twinstim-R} \end{document} surveillance/vignettes/hhh4.Rnw0000644000176200001440000010241013627516414016310 0ustar liggesusers%\VignetteIndexEntry{hhh4: An endemic-epidemic modelling framework for infectious disease counts} %\VignetteDepends{surveillance, Matrix} \documentclass[a4paper,11pt]{article} \usepackage[T1]{fontenc} \usepackage[english]{babel} \usepackage{graphicx} \usepackage{color} \usepackage{natbib} \usepackage{lmodern} \usepackage{bm} \usepackage{amsmath} \usepackage{amsfonts,amssymb} \setlength{\parindent}{0pt} \setcounter{secnumdepth}{1} \newcommand{\Po}{\operatorname{Po}} \newcommand{\NegBin}{\operatorname{NegBin}} \newcommand{\N}{\mathcal{N}} \newcommand{\pkg}[1]{{\fontseries{b}\selectfont #1}} \newcommand{\surveillance}{\pkg{surveillance}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\hhh}{\texttt{hhh4}} \newcommand{\R}{\textsf{R}} \newcommand{\sts}{\texttt{sts}} \newcommand{\example}[1]{\subsubsection*{Example: #1}} %%% Meta data \usepackage{hyperref} \hypersetup{ pdfauthor = {Michaela Paul and Sebastian Meyer}, pdftitle = {'hhh4': An endemic-epidemic modelling framework for infectious disease counts}, pdfsubject = {R package 'surveillance'} } \newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}} \title{\code{hhh4}: An endemic-epidemic modelling framework for infectious disease counts} \author{ Michaela Paul and Sebastian Meyer\thanks{Author of correspondence: \email{seb.meyer@fau.de} (new affiliation)}\\ Epidemiology, Biostatistics and Prevention Institute\\ University of Zurich, Zurich, Switzerland } \date{8 February 2016} %%% Sweave \usepackage{Sweave} \SweaveOpts{prefix.string=plots/hhh4, keep.source=T, strip.white=true} \definecolor{Sinput}{rgb}{0,0,0.56} \DefineVerbatimEnvironment{Sinput}{Verbatim}{formatcom={\color{Sinput}},fontshape=sl,fontsize=\footnotesize} \DefineVerbatimEnvironment{Soutput}{Verbatim}{fontshape=sl,fontsize=\footnotesize} %%% Initial R code <>= library("surveillance") options(width=75) ## create directory for plots dir.create("plots", showWarnings=FALSE) ###################################################### ## Do we need to compute or can we just fetch results? ###################################################### compute <- !file.exists("hhh4-cache.RData") message("Doing computations: ", compute) if(!compute) load("hhh4-cache.RData") @ \begin{document} \maketitle \begin{abstract} \noindent The \R\ package \surveillance\ provides tools for the visualization, modelling and monitoring of epidemic phenomena. This vignette is concerned with the \hhh\ modelling framework for univariate and multivariate time series of infectious disease counts proposed by \citet{held-etal-2005}, and further extended by \citet{paul-etal-2008}, \citet{paul-held-2011}, \citet{held.paul2012}, and \citet{meyer.held2013}. The implementation is illustrated using several built-in surveillance data sets. The special case of \emph{spatio-temporal} \hhh\ models is also covered in \citet[Section~5]{meyer.etal2014}, which is available as the extra \verb+vignette("hhh4_spacetime")+. \end{abstract} \section{Introduction}\label{sec:intro} To meet the threats of infectious diseases, many countries have established surveillance systems for the reporting of various infectious diseases. The systematic and standardized reporting at a national and regional level aims to recognize all outbreaks quickly, even when aberrant cases are dispersed in space. Traditionally, notification data, i.e.\ counts of cases confirmed according to a specific definition and reported daily, weekly or monthly on a regional or national level, are used for surveillance purposes. The \R-package \surveillance\ provides functionality for the retrospective modelling and prospective aberration detection in the resulting surveillance time series. Overviews of the outbreak detection functionality of \surveillance\ are given by \citet{hoehle-mazick-2010} and \citet{salmon.etal2014}. This document illustrates the functionality of the function \hhh\ for the modelling of univariate and multivariate time series of infectious disease counts. It is part of the \surveillance\ package as of version 1.3. The remainder of this vignette unfolds as follows: Section~\ref{sec:data} introduces the S4 class data structure used to store surveillance time series data within the package. Access and visualization methods are outlined by means of built-in data sets. In Section~\ref{sec:model}, the statistical modelling approach by \citet{held-etal-2005} and further model extensions are described. After the general function call and arguments are shown, the detailed usage of \hhh\ is demonstrated in Section~\ref{sec:hhh} using data introduced in Section~\ref{sec:data}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Surveillance data}\label{sec:data} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Denote by $\{y_{it}; i=1,\ldots,I,t=1,\ldots,T\}$ the multivariate time series of disease counts for a specific partition of gender, age and location. Here, $T$ denotes the length of the time series and $I$ denotes the number of units (e.g\ geographical regions or age groups) being monitored. Such data are represented using objects of the S4 class \sts\ (surveillance time series). \subsection[The sts data class]{The \sts\ data class} The \sts\ class contains the $T\times I$ matrix of counts $y_{it}$ in a slot \code{observed}. An integer slot \code{epoch} denotes the time index $1\leq t \leq T$ of each row in \code{observed}. The number of observations per year, e.g.\ 52 for weekly or 12 for monthly data, is denoted by \code{freq}. Furthermore, \code{start} denotes a vector of length two containing the start of the time series as \code{c(year, epoch)}. For spatially stratified time series, the slot \code{neighbourhood} denotes an $I \times I$ adjacency matrix with elements 1 if two regions are neighbors and 0 otherwise. For map visualizations, the slot \code{map} links the multivariate time series to geographical regions stored in a \code{"SpatialPolygons"} object (package \pkg{sp}). Additionally, the slot \code{populationFrac} contains a $T\times I$ matrix representing population fractions in unit $i$ at time $t$. The \sts\ data class is also described in \citet[Section~2.1]{hoehle-mazick-2010}, \citet[Section~1.1]{salmon.etal2014}, \citet[Section~5.2]{meyer.etal2014}, and on the associated help page \code{help("sts")}. \subsection{Some example data sets} The package \surveillance\ contains a number of time series in the \code{data} directory. Most data sets originate from the SurvStat@RKI database\footnote{\url{https://survstat.rki.de}}, maintained by the Robert Koch Institute (RKI) in Germany. Selected data sets will be analyzed in Section~\ref{sec:hhh} and are introduced in the following. Note that many of the built-in datasets are stored in the S3 class data structure \mbox{\code{disProg}} used in ancient versions of the \surveillance\ package (until 2006). They can be easily converted into the new S4 \sts\ data structure using the function \code{disProg2sts}. The resulting \sts\ object can be accessed similar as standard \code{matrix} objects and allows easy temporal and spatial aggregation as will be shown in the remainder of this section. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \example{Influenza and meningococcal disease, Germany, 2001--2006} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As a first example, the weekly number of influenza and meningococcal disease cases in Germany is considered. <>= # load data data("influMen") # convert to sts class and print basic information about the time series print(fluMen <- disProg2sts(influMen)) @ The univariate time series of meningococcal disease counts can be obtained with <>= meningo <- fluMen[, "meningococcus"] dim(meningo) @ The \code{plot} function provides ways to visualize the multivariate time series in time, space and space-time, as controlled by the \code{type} argument: \setkeys{Gin}{width=1\textwidth} <>= plot(fluMen, type = observed ~ time | unit, # type of plot (default) same.scale = FALSE, # unit-specific ylim? col = "grey") # color of bars @ See \code{help("stsplot")} for a detailed description of the plot routines. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \example{Influenza, Southern Germany, 2001--2008} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The spatio-temporal spread of influenza in the 140 Kreise (districts) of Bavaria and Baden-W\"urttemberg is analyzed using the weekly number of cases reported to the RKI~\citep{survstat-fluByBw} in the years 2001--2008. An \sts\ object containing the data is created as follows: <>= # read in observed number of cases flu.counts <- as.matrix(read.table(system.file("extdata/counts_flu_BYBW.txt", package = "surveillance"), check.names = FALSE)) @ \begin{center} \setkeys{Gin}{width=.5\textwidth} <>= # read in 0/1 adjacency matrix (1 if regions share a common border) nhood <- as.matrix(read.table(system.file("extdata/neighbourhood_BYBW.txt", package = "surveillance"), check.names = FALSE)) library("Matrix") print(image(Matrix(nhood))) @ \end{center} <>= # read in population fractions popfracs <- read.table(system.file("extdata/population_2001-12-31_BYBW.txt", package = "surveillance"), header = TRUE)$popFrac # create sts object flu <- sts(flu.counts, start = c(2001, 1), frequency = 52, population = popfracs, neighbourhood = nhood) @ These data are already included as \code{data("fluBYBW")} in \surveillance. In addition to the \sts\ object created above, \code{fluBYBW} contains a map of the administrative districts of Bavaria and Baden-W\"urttemberg. This works by specifying a \code{"SpatialPolygons"} representation of the districts as an extra argument \code{map} in the above \sts\ call. Such a \code{"SpatialPolygons"} object can be obtained from, e.g, an external shapefile using the function \mbox{\code{readOGR}} from package \pkg{rgdal}. A map enables plots and animations of the cumulative number of cases by region. For instance, a disease incidence map of the year 2001 can be obtained as follows: \setkeys{Gin}{width=.5\textwidth} \begin{center} <>= data("fluBYBW") plot(fluBYBW[year(fluBYBW) == 2001, ], # select year 2001 type = observed ~ unit, # total counts by region population = fluBYBW@map$X31_12_01 / 100000) # per 100000 inhabitants grid::grid.text("Incidence [per 100'000 inhabitants]", x = 0.5, y = 0.02) @ \end{center} <>= # consistency check local({ fluBYBW@map <- flu@map stopifnot(all.equal(fluBYBW, flu)) }) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \example{Measles, Germany, 2005--2007} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The following data set contains the weekly number of measles cases in the 16 German federal states, in the years 2005--2007. These data have been analyzed by \citet{herzog-etal-2010} after aggregation into bi-weekly periods. <>= data("measlesDE") measles2w <- aggregate(measlesDE, nfreq = 26) @ \setkeys{Gin}{width=.75\textwidth} \begin{center} <>= plot(measles2w, type = observed ~ time, # aggregate counts over all units main = "Bi-weekly number of measles cases in Germany") @ \end{center} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Model formulation}\label{sec:model} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Retrospective surveillance aims to identify outbreaks and (spatio-)temporal patterns through statistical modelling. Motivated by a branching process with immigration, \citet{held-etal-2005} suggest the following model for the analysis of univariate time series of infectious disease counts $\{y_{t}; t=1,\ldots,T\}$. The counts are assumed to be Poisson distributed with conditional mean \begin{align*} \mu_{t} = \lambda y_{t-1}+ \nu_{t}, \quad(\lambda,\nu_{t}>0) \end{align*} where $\lambda$ and $\nu_t$ are unknown quantities. The mean incidence is decomposed additively into two components: an epidemic or \emph{autoregressive} component $\lambda y_{t-1}$, and an \emph{endemic} component $\nu_t$. The former should be able to capture occasional outbreaks whereas the latter explains a baseline rate of cases with stable temporal pattern. \citet{held-etal-2005} suggest the following parametric model for the endemic component: \begin{align}\label{eq:nu_t} \log(\nu_t) =\alpha + \beta t + \left\{\sum_{s=1}^S \gamma_s \sin(\omega_s t) + \delta_s \cos(\omega_s t)\right\}, \end{align} where $\alpha$ is an intercept, $\beta$ is a trend parameter, and the terms in curly brackets are used to model seasonal variation. Here, $\gamma_s$ and $\delta_s$ are unknown parameters, $S$ denotes the number of harmonics to include, and $\omega_s=2\pi s/$\code{freq} are Fourier frequencies (e.g.\ \code{freq = 52} for weekly data). For ease of interpretation, the seasonal terms in \eqref{eq:nu_t} can be written equivalently as \begin{align*} \gamma_s \sin(\omega_s t) + \delta_s \cos(\omega_s t)= A_s \sin(\omega_s t +\varphi_s) \end{align*} with amplitude $A_s=\sqrt{\gamma_s^2+\delta_s^2}$ describing the magnitude, and phase difference $\tan(\varphi_s)=\delta_s/\gamma_s$ describing the onset of the sine wave. To account for overdispersion, the Poisson model may be replaced by a negative binomial model. Then, the conditional mean $\mu_t$ remains the same but the conditional variance increases to $\mu_t (1+\mu_t \psi)$ with additional unknown overdispersion parameter $\psi>0$. The model is extended to multivariate time series $\{y_{it}\}$ in \citet{held-etal-2005} and \citet{paul-etal-2008} by including an additional \emph{neighbor-driven} component, where past cases in other (neighboring) units also enter as explanatory covariates. The conditional mean $\mu_{it}$ is then given by \begin{align} \label{eq:mu_it} \mu_{it} = \lambda y_{i,t-1} + \phi \sum_{j\neq i} w_{ji} y_{j,t-1} +e_{it} \nu_{t}, \end{align} where the unknown parameter $\phi$ quantifies the influence of other units $j$ on unit $i$, $w_{ji}$ are weights reflecting between-unit transmission and $e_{it}$ corresponds to an offset (such as population fractions at time $t$ in region $i$). A simple choice for the weights is $w_{ji}=1$ if units $j$ and $i$ are adjacent and 0 otherwise. See \citet{paul-etal-2008} for a discussion of alternative weights, and \citet{meyer.held2013} for how to estimate these weights in the spatial setting using a parametric power-law formulation based on the order of adjacency. When analyzing a specific disease observed in, say, multiple regions or several pathogens (such as influenza and meningococcal disease), the assumption of equal incidence levels or disease transmission across units is questionable. To address such heterogeneity, the unknown quantities $\lambda$, $\phi$, and $\nu_t$ in \eqref{eq:mu_it} may also depend on unit $i$. This can be done via \begin{itemize} \item unit-specific fixed parameters, e.g.\ $\log(\lambda_i)=\alpha_i$ \citep{paul-etal-2008}; \item unit-specific random effects, e.g\ $\log(\lambda_i)=\alpha_0 +a_i$, $a_i \stackrel{\text{iid}}{\sim} \N(0,\sigma^2_\lambda)$ \citep{paul-held-2011}; \item linking parameters with known (possibly time-varying) explanatory variables, e.g.\ $\log(\lambda_i)=\alpha_0 +x_i\alpha_1$ with region-specific vaccination coverage $x_i$ \citep{herzog-etal-2010}. \end{itemize} In general, the parameters of all three model components may depend on both time and unit. A call to \hhh\ fits a Poisson or negative binomial model with conditional mean \begin{align*} \mu_{it} = \lambda_{it} y_{i,t-1} + \phi_{it} \sum_{j\neq i} w_{ji} y_{j,t-1} +e_{it} \nu_{it} \end{align*} to a (multivariate) time series of counts. Here, the three unknown quantities are modelled as log-linear predictors \begin{align} \log(\lambda_{it}) &= \alpha_0 + a_i +\bm{u}_{it}^\top \bm{\alpha} \tag{\code{ar}}\\ \log(\phi_{it}) &= \beta_0 + b_i +\bm{x}_{it}^\top \bm{\beta} \tag{\code{ne}}\\ \log(\nu_{it}) &= \gamma_0 + c_i +\bm{z}_{it}^\top \bm{\gamma}\tag{\code{end}} \end{align} where $\alpha_0,\beta_0,\gamma_0$ are intercepts, $\bm{\alpha},\bm{\beta},\bm{\gamma}$ are vectors of unknown parameters corresponding to covariate vectors $\bm{u}_{it},\bm{x}_{it},\bm{z}_{it}$, and $a_i,b_i,c_i$ are random effects. For instance, model~\eqref{eq:nu_t} with $S=1$ seasonal terms may be represented as $\bm{z}_{it}=(t,\sin(2\pi/\code{freq}\;t),\cos(2\pi/\code{freq}\;t))^\top$. The stacked vector of all random effects is assumed to follow a normal distribution with mean $\bm{0}$ and covariance matrix $\bm{\Sigma}$. In applications, each of the components \code{ar}, \code{ne}, and \code{end} may be omitted in parts or as a whole. If the model does not contain random effects, standard likelihood inference can be performed. Otherwise, inference is based on penalized quasi-likelihood as described in detail in \citet{paul-held-2011}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Function call and control settings}\label{sec:hhh} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The estimation procedure is called with <>= hhh4(sts, control) @ where \code{sts} denotes a (multivariate) surveillance time series and the model is specified in the argument \code{control} in consistency with other algorithms in \surveillance. The \code{control} setting is a list of the following arguments (here with default values): <>= control = list( ar = list(f = ~ -1, # formula for log(lambda_it) offset = 1), # optional multiplicative offset ne = list(f = ~ -1, # formula for log(phi_it) offset = 1, # optional multiplicative offset weights = neighbourhood(stsObj) == 1), # (w_ji) matrix end = list(f = ~ 1, # formula for log(nu_it) offset = 1), # optional multiplicative offset e_it family = "Poisson", # Poisson or NegBin model subset = 2:nrow(stsObj), # subset of observations to be used optimizer = list(stop = list(tol = 1e-5, niter = 100), # stop rules regression = list(method = "nlminb"), # for penLogLik variance = list(method = "nlminb")), # for marLogLik verbose = FALSE, # level of progress reporting start = list(fixed = NULL, # list with initial values for fixed, random = NULL, # random, and sd.corr = NULL), # variance parameters data = list(t = epoch(stsObj)-1),# named list of covariates keep.terms = FALSE # whether to keep the model terms ) @ The first three arguments \code{ar}, \code{ne}, and \code{end} specify the model components using \code{formula} objects. By default, the counts $y_{it}$ are assumed to be Poisson distributed, but a negative binomial model can be chosen by setting \mbox{\code{family = "NegBin1"}}. By default, both the penalized and marginal log-likelihoods are maximized using the quasi-Newton algorithm available via the \R\ function \code{nlminb}. The methods from \code{optim} may also be used, e.g., \mbox{\code{optimizer = list(variance = list(method="Nelder-Mead")}} is a useful alternative for maximization of the marginal log-likelihood with respect to the variance parameters. Initial values for the fixed, random, and variance parameters can be specified in the \code{start} argument. If the model contains covariates, these have to be provided in the \code{data} argument. If a covariate does not vary across units, it may be given as a vector of length $T$. Otherwise, covariate values must be given in a matrix of size $T \times I$. In the following, the functionality of \hhh\ is demonstrated using the data sets introduced in Section~\ref{sec:data} and previously analyzed in \citet{paul-etal-2008}, \citet{paul-held-2011} and \citet{herzog-etal-2010}. Selected results are reproduced. For a thorough discussion we refer to these papers. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Univariate modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As a first example, consider the univariate time series of meningococcal infections in Germany, 01/2001--52/2006 \citep[cf.][Table~1]{paul-etal-2008}. A Poisson model without autoregression and $S=1$ seasonal term is specified as follows: <>= # specify a formula object for the endemic component ( f_S1 <- addSeason2formula(f = ~ 1, S = 1, period = 52) ) # fit the Poisson model result0 <- hhh4(meningo, control = list(end = list(f = f_S1), family = "Poisson")) summary(result0) @ To fit the corresponding negative binomial model, we can use the convenient \code{update} method: <>= result1 <- update(result0, family = "NegBin1") @ Note that the \code{update} method by default uses the parameter estimates from the original model as start values when fitting the updated model; see \code{help("update.hhh4")} for details. We can calculate Akaike's Information Criterion for the two models to check whether accounting for overdispersion is useful for these data: <<>>= AIC(result0, result1) @ Due to the default control settings with \verb|ar = list(f = ~ -1)|, the autoregressive component has been omitted in the above models. It can be included by the following model update: <>= # fit an autoregressive model result2 <- update(result1, ar = list(f = ~ 1)) @ To extract only the ML estimates and standard errors instead of a full model \code{summary}, the \code{coef} method can be used: <<>>= coef(result2, se = TRUE, # also return standard errors amplitudeShift = TRUE, # transform sine/cosine coefficients # to amplitude/shift parameters idx2Exp = TRUE) # exponentiate remaining parameters @ Here, \code{exp(ar.1)} is the autoregressive coefficient $\lambda$ and can be interpreted as the epidemic proportion of disease incidence \citep{held.paul2012}. Note that the above transformation arguments \code{amplitudeShift} and \code{idx2Exp} can also be used in the \code{summary} method. Many other standard methods are implemented for \code{"hhh4"} fits, see, e.g., \code{help("confint.hhh4")}. A plot of the fitted model components can be easily obtained: \begin{center} <>= plot(result2) @ \end{center} See the comprehensive \code{help("plot.hhh4")} for further options. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Bivariate modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Now, the weekly numbers of both meningococcal disease (\textsc{MEN}) and influenza (\textsc{FLU}) cases are analyzed to investigate whether influenza infections predispose meningococcal disease \citep[cf.][Table~2]{paul-etal-2008}. This requires disease-specific parameters which are specified in the formula object with \code{fe(\ldots)}. In the following, a negative binomial model with mean \begin{align*} \binom{\mu_{\text{men},t}} {\mu_{\text{flu},t}}= \begin{pmatrix} \lambda_\text{men} & \phi \\ 0 & \lambda_\text{flu} \\ \end{pmatrix} \binom{\text{\sc men}_{t-1}}{\text{\sc flu}_{t-1}} + \binom{\nu_{\text{men},t}}{\nu_{\text{flu},t}}\,, \end{align*} where the endemic component includes $S=3$ seasonal terms for the \textsc{FLU} data and $S=1$ seasonal terms for the \textsc{MEN} data is considered. Here, $\phi$ quantifies the influence of past influenza cases on the meningococcal disease incidence. This model corresponds to the second model of Table~2 in \citet{paul-etal-2008} and is fitted as follows: <>= # no "transmission" from meningococcus to influenza neighbourhood(fluMen)["meningococcus","influenza"] <- 0 neighbourhood(fluMen) @ <>= # create formula for endemic component f.end <- addSeason2formula(f = ~ -1 + fe(1, unitSpecific = TRUE), # disease-specific intercepts S = c(3, 1), # S = 3 for flu, S = 1 for men period = 52) # specify model m <- list(ar = list(f = ~ -1 + fe(1, unitSpecific = TRUE)), ne = list(f = ~ 1, # phi, only relevant for meningococcus due to weights = neighbourhood(fluMen)), # the weight matrix end = list(f = f.end), family = "NegBinM") # disease-specific overdispersion # fit model result <- hhh4(fluMen, control = m) summary(result, idx2Exp=1:3) @ A plot of the estimated mean components can be obtained as follows: \setkeys{Gin}{width=1\textwidth} \begin{center} <>= plot(result, units = 1:2, legend = 2, legend.args = list( legend = c("influenza-driven", "autoregressive", "endemic"))) @ \end{center} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Multivariate modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% For disease counts observed in a large number of regions, say, (i.e.\ highly multivariate time series of counts) the use of region-specific parameters to account for regional heterogeneity is no longer feasible as estimation and identifiability problems may occur. Here we illustrate two approaches: region-specific random effects and region-specific covariates. For a more detailed illustration of areal \code{hhh4} models, see \verb+vignette("hhh4_spacetime")+, which uses \verb+data("measlesWeserEms")+ as an example. \subsubsection*{Influenza, Southern Germany, 2001--2008} \citet{paul-held-2011} propose a random effects formulation to analyze the weekly number of influenza cases in \Sexpr{ncol(fluBYBW)} districts of Southern Germany. For example, consider a model with random intercepts in the endemic component: $c_i \stackrel{iid}{\sim} \N(0,\sigma^2_\nu), i=1,\ldots,I$. Such effects are specified as: <>= f.end <- ~ -1 + ri(type = "iid", corr = "all") @ The alternative \code{type = "car"} would assume spatially correlated random effects; see \citet{paul-held-2011} for details. The argument \code{corr = "all"} allows for correlation between region-specific random effects in different components, e.g., random incidence levels $c_i$ in the endemic component and random effects $b_i$ in the neighbor-driven component. The following call to \hhh\ fits such a random effects model with linear trend and $S=3$ seasonal terms in the endemic component, a fixed autoregressive parameter $\lambda$, and first-order transmission weights $w_{ji}=\mathbb{I}(j\sim i)$ -- normalized such that $\sum_i w_{ji} = 1$ for all rows $j$ -- to the influenza data \citep[cf.][Table~3, model~B2]{paul-held-2011}. <>= # endemic component: iid random effects, linear trend, S=3 seasonal terms f.end <- addSeason2formula(f = ~ -1 + ri(type="iid", corr="all") + I((t-208)/100), S = 3, period = 52) # model specification model.B2 <- list(ar = list(f = ~ 1), ne = list(f = ~ -1 + ri(type="iid", corr="all"), weights = neighbourhood(fluBYBW), normalize = TRUE), # all(rowSums(weights) == 1) end = list(f = f.end, offset = population(fluBYBW)), family = "NegBin1", verbose = TRUE, optimizer = list(variance = list(method = "Nelder-Mead"))) # default start values for random effects are sampled from a normal set.seed(42) @ <>= if(compute){ result.B2 <- hhh4(fluBYBW, model.B2) s.B2 <- summary(result.B2, maxEV = TRUE, idx2Exp = 1:3) #pred.B2 <- oneStepAhead(result.B2, tp = nrow(fluBYBW) - 2*52) predfinal.B2 <- oneStepAhead(result.B2, tp = nrow(fluBYBW) - 2*52, type = "final") meanSc.B2 <- colMeans(scores(predfinal.B2)) save(s.B2, meanSc.B2, file="hhh4-cache.RData") } @ <>= # fit the model (takes about 35 seconds) result.B2 <- hhh4(fluBYBW, model.B2) summary(result.B2, maxEV = TRUE, idx2Exp = 1:3) @ <>= s.B2 @ Model choice based on information criteria such as AIC or BIC is well explored and understood for models that correspond to fixed-effects likelihoods. However, in the presence of random effects their use can be problematic. For model selection in time series models, the comparison of successive one-step-ahead forecasts with the actually observed data provides a natural alternative. In this context, \citet{gneiting-raftery-2007} recommend the use of strictly proper scoring rules, such as the logarithmic score (logs) or the ranked probability score (rps). See \citet{czado-etal-2009} and \citet{paul-held-2011} for further details. One-step-ahead predictions for the last 2 years for model B2 could be obtained as follows: <>= pred.B2 <- oneStepAhead(result.B2, tp = nrow(fluBYBW) - 2*52) @ However, computing ``rolling'' one-step-ahead predictions from a random effects model is computationally expensive, since the model needs to be refitted at every time point. The above call would take approximately 45 minutes! So for the purpose of this vignette, we use the fitted model based on the whole time series to compute all (fake) predictions during the last two years: <>= predfinal.B2 <- oneStepAhead(result.B2, tp = nrow(fluBYBW) - 2*52, type = "final") @ The mean scores (logs and rps) corresponding to this set of predictions can then be computed as follows: <>= colMeans(scores(predfinal.B2, which = c("logs", "rps"))) @ <>= meanSc.B2[c("logs", "rps")] @ Using predictive model assessments, \citet{meyer.held2013} found that power-law transmission weights more appropriately reflect the spread of influenza than the previously used first-order weights (which actually allow the epidemic to spread only to directly adjacent districts within one week). These power-law weights can be constructed by the function \code{W\_powerlaw} and require the \code{neighbourhood} of the \sts\ object to contain adjacency orders. The latter can be easily obtained from the binary adjacency matrix using the function \code{nbOrder}. See the corresponding help pages or \citet[Section~5]{meyer.etal2014} for illustrations. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection*{Measles, German federal states, 2005--2007} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% <>= data(MMRcoverageDE) cardVac1 <- MMRcoverageDE[1:16,3:4] adjustVac <- function(cardVac, p=0.5,nrow=1){ card <- cardVac[,1] vac <- cardVac[,2] vacAdj <- vac*card + p*vac*(1-card) return(matrix(vacAdj,nrow=nrow, ncol=length(vacAdj), byrow=TRUE)) } vac0 <- 1-adjustVac(cardVac1,p=0.5,nrow=measles2w@freq*3) colnames(vac0) <- colnames(measles2w) @ As a last example, consider the number of measles cases in the 16 federal states of Germany, in the years 2005--2007. There is considerable regional variation in the incidence pattern which is most likely due to differences in vaccination coverage. In the following, information about vaccination coverage in each state, namely the log proportion of unvaccinated school starters, is included as explanatory variable in a model for the bi-weekly aggregated measles data. See \citet{herzog-etal-2010} for further details. Vaccination coverage levels for the year 2006 are available in the dataset \code{data(MMRcoverageDE)}. This dataset can be used to compute the $\Sexpr{nrow(vac0)}\times \Sexpr{ncol(vac0)}$ matrix \code{vac0} with adjusted proportions of unvaccinated school starters in each state $i$ used by \citet{herzog-etal-2010}. The first few entries of this matrix are shown below: <<>>= vac0[1:2, 1:6] @ We fit a Poisson model, which links the autoregressive parameter with this covariate and contains $S=1$ seasonal term in the endemic component \citep[cf.][Table~3, model~A0]{herzog-etal-2010}: <>= # endemic component: Intercept + sine/cosine terms f.end <- addSeason2formula(f = ~ 1, S = 1, period = 26) # autoregressive component: Intercept + vaccination coverage information model.A0 <- list(ar = list(f = ~ 1 + logVac0), end = list(f = f.end, offset = population(measles2w)), data = list(t = epoch(measles2w), logVac0 = log(vac0))) # fit the model result.A0 <- hhh4(measles2w, model.A0) summary(result.A0, amplitudeShift = TRUE) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Conclusion} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As part of the \R~package \surveillance, the function \hhh\ provides a flexible tool for the modelling of multivariate time series of infectious disease counts. The presented count data model is able to account for serial and spatio-temporal correlation, as well as heterogeneity in incidence levels and disease transmission. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \bibliographystyle{apalike} \renewcommand{\bibfont}{\small} \bibliography{references} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{document} surveillance/vignettes/hhh4_spacetime-cache.RData0000644000176200001440000001433012676602723021635 0ustar liggesusers[y8mYQd S)Q(4!DE@R)[B2C6O6m)/ss3]ֽ{zg=zz CBL!&YRI~0%28py 鏎 āCVk}=`Vx M6=[=}]ߎ sXY(|G7C=0|M(h^?-CpE8-3+֣-85H;JTBY&ջ mP-tJ g .;@6Ih9c0# EC$RP+׈[4?AjL~jUE /AEi/|g:\B*sn;( UvA 6n Cht0nY WUs?/5ZQj͗PdsUYd—kWAoPb (c]2~(ۛa 1CoRZ֭+~PqiAY 4S:; BӄV\zZQ 4T7*ͫɗ/~m!;$ozihOY#:ʀz]|?yD3j Oyd`26)5 m&n {R=._K]eF`ХF5 yw:`n3? `b>  =9c1Wa;!vګ 髹0PuF4t57v@OOjADԨ)@~C5lD=D3^o}G:/~ܶ}9[ٗ#;}fjm$%74/+aQG8>с ;,Eltsg)4ދ}Zxv9(\[5\Ŏȡs)Do`*uȚށE/B>.bx> T4# /C*.l7;K]7[EOqU9NAEQU*.pZ6q̅Vj0%{e7%ǔC/UBƗc9-/Q!N`}8JRs"aN"3ĨQX[yX 4ȹ)Ƥ &b{ޱ~3޹'`wOF>a*5!sQ&;"3n}w 9 PJCA+'n.JչoŽ[z>.êLJGHJ2 U=7f}+dD1{]D״ ۷'"a'& Dwj?,<9EZŦ `$B!fҷ9&Td(/~o TtZWC]kGf3 JU:\4 7<6)P)-4o]w>&[si@ *4N-k)sͷ,(Lu˅bbOku \;Bwy/Nr] U?<я$2z1[(^f3 e.Yyh>iE=" E.2(7nEwx ^u\=}غ#wdXׅrJݱRja\HKj2&Ɉj⫚ĪgU\դV9dV琑]^ Jq GRL|!س֐\kH1Z@jm~ ֐YkȮ5bk+J7 ZA2ncQ_sZ+W.r^|!e_Feu$._[9A|˸=˸r~p:\Yoy^p5/0j2b]qYYϒu{!wK7JR7RZ1@*Uztb+ܗ볚ۮIRLqon`r}Ƶђk2 w01)ח=O,|Ǥ"/w\?H RcI+;E悠Пgs+ C囙)v!\5Bl 1ᱪ\ѝ|nm Z}݁}N"=%%F!MNt;lPrDS@T#Q_q09}iZnohѭkaa4 0;%Yu4|u{p0pٰ<3x[焦]Z^:aÎ'CHxHo69@\ᦩot uAY& p(m 8X•A*( *,)Ύ` -)/@ w ^|a4NB~/ywݐni|$s+E Z"@mu2PB/vC ft%!fCT7Q\y w`Ƞ)XIk$yqE!w aQS':61K!#i"dш',o=V=ɲ xtL Jo7lD8;H<~'oD09vBy8"˃=2 CQUD Q9H~zr:ۿ5i*҇.bSOз&9&L8Fj:`haY pv(Z=04+zw:کY9?_E ,[ _'︿փƠ |DLbjru}o ug}jHrzfiv> }oGN@Jx]vsn$I+6AR7/+'=yTި*W: 1 qKAp o\_J:\9ؠ+CEUZM@eF? )[޸פPFb됈*kcwk}Hs8PI$M`זYk;{CAp 8vɴ{N[fq`DXLiCxCv6QxǞC)\x,:xKC /_晟Q":}p b:05a8؏\kaػO#nֵF-t–]&+AtHE8lkqU{퓪30Qij~2m08&5ƏȄ+C{;~L 4FkR>hV 7ҬFD͠EPW| hGXr!=IS@Vu,;LʓW糞]P :q% PVa$d-KEZqP|ǹo t4Assfsv{H^5N6w!ΖˎhZC]Ô 5E]A%|5D|Bέ,/!q? )-JouV WOCV41[.֐նs\ cCO9ջ%cS>3$DA-T~du,,&VBnl'$ JGDٱ\ÓYMy a~GԚ"0k[ xߤgi*ۯ^`@Z» `sf7F :+kxiFl4x,|yg3\Nhx)^.\ɁawʨQh|l1G[t ' <)dW]P$81:7s!YVg-^ [һP]hk935CqD6:ʌnbP-NjP'< Ղ[JwwgSf|Y펇OE@P*D=EJFR@>k'SҐKЫ<8/iml~ҜYng >Zs\r/i$B[Ŕ6^%q?QpV-u>JP]iRv@ħGZ=D5~ IpA[NWjPOxRx۲3eWɺ.s={%!OmSoI|m87".4!č~m'8'P:NA46ԕ@[6U>~ b4:AO|e|A㇓>34&p) hqo~)h=żOڜ uɪnPS;-'q^Wa0Q\0}F٧zVӄ+&oaҋK݆h3'osߜϱd"w-=wH:> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 5 0 obj << /Type /Page /Parent 3 0 R /Contents 6 0 R /Resources 4 0 R >> endobj 6 0 obj << /Length 571 /Filter /FlateDecode >> stream xVnA +XJE. wQa@>5={!]3]ryb83a$"bJ_XäՈ`fȘ2?θ(8;Lfa\H8b l`Î0  #lSB3` ;ۃq|?@w0wA' B. ma0[J (ҌIUIbcyB [1zBJ zj_&[P_em !ۂ^}cv*ɖD\QV#!߃EB$nmD\/{*Ayy̱$hѰZhx6/́Y"Ç5δ,I}FN4Kzܬi#exV,vrendstream endobj 3 0 obj << /Type /Pages /Kids [ 5 0 R ] /Count 1 /MediaBox [0 0 504 216] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> >> endobj 7 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 8 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 7 0 R >> endobj 9 0 obj << /Type /Font /Subtype /Type1 /Name /F11 /BaseFont /Symbol >> endobj xref 0 10 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000000934 00000 n 0000001017 00000 n 0000000212 00000 n 0000000292 00000 n 0000001109 00000 n 0000001366 00000 n 0000001464 00000 n trailer << /Size 10 /Info 1 0 R /Root 2 0 R >> startxref 1542 %%EOF surveillance/vignettes/monitoringCounts-cache/pMC.RData0000644000176200001440000000024314004306220022762 0ustar liggesusers r0b```b`a`f`b2Y# | 4~m O8m!Ǖs,e?uJ쥏i?M宾}[6.D+&ɪe_g@P4a>ZPsurveillance/vignettes/monitoringCounts-cache/pMarkovChain.RData0000644000176200001440000000034414004306220024667 0ustar liggesusers r0b```b`a`f`b2Y# y |˜33|> ?IgJqp\7=x >dub]c`l A hڂN' ` % .Pn_S!CU& 8w Ơ%]A0  SLrr lp΀K LV]_3P8Z 4ۀB0AP4H`AX6`8 N*'5$5PP`6n@ aנ C@ n0L3@JlA#8 Jup<; ] k ke@xw %vx: 4@g HƂI`:RPց-pEp g-ɂt|3XG@}8zP_0LS,\P6 ?t72 ,!_|X'尉\ƘМCicJ.J1.r}+JH8TkM_P*~_=H8%ƛcS _W%KT/%mP n+` 9#br>#G[EgMr}!S =%Ƴԁ_Ї:%%[>ϫ: vhڠBE{VIo7?yccBbkC B2toxtFwfۗ^Ždz7-Ӄ>fF/`eڝ+V] 4:Y-4:W+X{Vc=۟ɶa]75u62Km4zjb־|9쾳X{}f>e]? 돏lOaegdz~iyٶL3}_kFL3{ghtlBbvn]g!kf\W^mb!F``74;nt;8Ӭyl.k2%}mXktc?~/sa{Lk}/3Ӭ24~&4:'m3?v8bObf4ә=}dqs:?ٸdd_k&_G_/g:^n_]~_̮ d~Q{1hm|ev_NLe}Kr5Z;rX|nvgϳY|}(9̿rX/`=_3;evh$'3MlgƱ4bv~ʮ왥/xK[GWZvf~=6vۭ]][Z!kzLweVxZdFkǓ6}t le6\̔jtXpLIcbkE2!o(x1ꄈqPSͽs5\s5\S#y:o~99fhL?˾ٯ)npXkз(BBbFk&9/m{ bbՉOnu/(< xh:yݾ9NM#YjT|s.n˩#;'0u{И脸HcI*C)ocj.j3`.ЁX 5ׂ8#<'0os/SBmP`=!X !B:Fh ҏLA3X7@k`'X_ ?G:.'no~>aL,a AJ 0D00X D$&` [H`f@]0i dLaYA"`=+,r V<VU`&k:H?R&l[6;.L=~pap}P^~XJ?NSgpg9p\A) `)\7dd'[6w=p`=/ 7;xg9x^Wd Vxރ# Mv /J?z\Luʣ E١Kەw9͸x>p*t'u] l%^ϊ*cޤ K)d277YH^d7ٌyebyP_X|/59oSS{'yZfyQ!y#}^ѦYPƗLmFWv߂[g_|6 # S䟛Pwe?SQw%l@xεCݿEaTke ^s479W/h;-v}7~xQТ^9fu*𑜇= w#9<>ٶ̓l;;=9]"ZEpȉݮeMU;7,&ܦjlOڶu@8Hrvdmv'G/2;h"ea&ʥ YGCHf-]TBK7-BV7<ÉL&L#Q'ddl9_39ι[4\w]G ?l!MM/jDG [U62'5;@bڥQ[IGwփXud1xU}=b>`ӼOotyN!ݶ$WTq^hXk.P-ךCM7td3LKX^uӶKmlen&o#!Sⓗc(?X^ ]:m.;w} y9}Y䱤ȑuֹζ\GMD\kOf3 wƃf2eٕuo8g2>Жrr5hrcg䐽mvnlΦmT#16'[9 ;#y87|uc3|QɪD&ȯivc][ecc h޷+wщ~m1nǥq;{Vɯ|u yw;~LkS1z{)qMXRrvh_݌9n/q*\F X֍{9:h1(wܺUAEyQd9â drrSm>SE{:S~uL6SA?h@_}r4fe/t[MlFN\ׄycs$2cUyE*U/r"%ݦlO>m&zp՝1*&pQN]öcu[վ+3pنdoQ&ݱHLvE_מ^ҏb[Y~=Ѐ:dk<*o]~MV~Fy9ꓰ:e|/U/ӛ; m{d|ja@xMn{Ȯœ!o_>'jn}IF,dk`h'9Ǭ4m9u>]16Vr hwG{'7[f} OnؙR{Mh%<=,V.A}{fߔE;'A32ZEMO<#I%M}Ao&(&;{הjuɳLգ-۶נz^'#~ErϚterW: ,y>on&!qhإKq?11*%]vSmQc6#FGnXm3w1MN3WGp&,W{tj ;&i0jXUΚuG$nWnZeAgۙs-$y!ԋ=l&@}XYiX5kzR_tvy@`ۨG}iʚ;Oω**DҢ Er,[R2p/+f+ iz[۝&OV%+os;&uv>u-Nnxw,{Et*r"g$iqGНlh`NsKƗO=AA%Fd1Tk=_9,Frk_pRmӹkvQJ*Y?zmrK$E ]!)t wsI%:Ar=>l˟GyKpצuX7-_z=WdXzc_=nL;1r>UovKk&w8>h퇻d_cٞ n/TB͛ȢEF9drs\9id4Od _2Zu{0dm9SXr2-9>0C{ةiSrBNA8 9ۇ%ȼeyaq,n#9 9&gTufW9'6 B^/,N^%ߵdM~բ?.esjwI͋W<vI&"ي]^g0n.~1¦Z o"JMRW%KUN kupY9Y:ol.m[=I$ٍx:YLWJ욒͇\x~x~x~x~x~x~x~x~?x~x~x~x~x~x~x~x~U~_x~x~x~x~x~x~x~x~)+h_NחpLSۧr On->Tۧr OnJ/ >STnSۧr p;UnSۧrGϿW… .\TJMCuhuT}e+૭_mӖntب1axm(N~ub""ښz_NP C-73GڪlSHuC RƠ ^CEhZDu+Rv@*e RTj Dv7=A/ RTb|J5] CA8p @4`-J} O#( fƂq@*1>LI L zO`~:T{ 42| 4_@6J/b N_ y HWUH׀`X6ll?dP }`?8dapR HOgYpE ȗTB ʀT:nrp ;.t@*<< RI7@*GI[}*poSn->[}*ϕXTnSۧr p;UnSۧr /̅ .\OJ%(\`el)8"μ3/̋Ib$/\I85^fe@surveillance/vignettes/monitoringCounts-cache/boda.RData0000644000176200001440000000616314004306220023217 0ustar liggesuserswNd;ϊc*Ev$&YX&[]Oa/ςaW7g||ߏ;ss9wr'SI} 0 ۰ m\J!CYɻҡdK5𯞍 ,uU~N˦J)=ѣ*t$B\{s c )| ?0^Ƌڰl0v} 8r8 0f,u ]p5ý0< k6|K;a gl[0vq?L#a2LTp.\Yh6 7Ýp?< %xރO+0FAk)l;(F ap 8fp)BtU0n{!xW->ogiC_X ґa ` ȃa'@`#OsB@=ep-Qx^7] \G &5#a u4Bp %PVó ,3~2Ѹhe06a; Ű 8QHip6̆* - p,G)x^w#2(Y V`O8p tqp\ p\]1_/ xi*ƟM&b3ЯOZ啱)Ca}'=Xؔez1X =8vg\E.ssIv`76|Ƨt_6iy91شaq U<[xLlg-tm3c5gMbAlС.,&X`]t\|'JX˸%>`66kWmtSl!1Vg+E ko'&$XjzE 2u÷,?XԵпEl_ҿm&@s⑋'69ݻJx6m:q6~fюM-bI{ {C;fL|dMo~G}Bw[7&XWf]7?/\?K|ɥ%XA'c vlbh!>جM$>du[nmΤ:&̥ v&ey'mo sļ"l'ȞDEĐ"bRM}o.dW4H\uugcZj=m&n[}"X蓮3H*2~J,TFE꺞A1P=;ꇿ>C*\pU J.#"HIw ECIy>׈#~*\ ZﰿSHX d:,i'2eAZΙe@)GD_M{Rrc=-Ze̳J-2Ir_ZfEϳri_֡^ޫyPW.RϮ!oWƝ}ddU^FeJ;$|WpSi/ֲ-D]ljQ>"ww'|,R]ojmJY5.9Gdy`Dvї̿U",4>/R_>O ~^ֵFz-3:KhuYҳ~wceOV'cCjexľE/)qwew@ rȵ4݃.iU5?JS(մDquW/vl˪ZΓ~P۽jfkEg۵X_GGVY#[k%OrI??)I8QNC"Ҳ[Z\눔yt.(R߇~g%!en.E~l`6MJm@a ORvlJ;a'Q@av1!SkX P{^7~??p `,"Ppר#(8cԱp!"9PX:yy: X+U1KN|@a* @>N_Mt ?TgY0 l8+u>\`6\wc Wg9%|_7-|K{~g e: JXt6x;TEz!d=~qg!1k_.y`vu//*X(~c{<H2ē2j`.d(F |WXp(r_!?V U{~ U$l.Sœ~O ґhG5w)M7+^پ/UɎFCJv~C ɝDQa(lFa(lWۄ1O 迎Eq ig!xg_6j9surveillance/vignettes/monitoringCounts-cache/fPlot.R0000755000176200001440000000442413752253562022630 0ustar liggesusers#!/usr/bin/env Rscript # Create plots for fig:fPlot in vignette("monitoringCounts") # Author: Ma"elle Salmon # Code removed from vignette to drop ggplot2 dependency library(ggplot2) # for rectanges widthRectangles <- 10 # dimensions for the ticks heightTick <- 4 xTicks <- c(15,67,119) yTicksStart <- rep(0,3) yTicksEnd <- rep(0,3) yTicksEnd2 <- rep(-5,3) textTicks <- c("t-2*p","t-p","t[0]") xBigTicks <- c(xTicks[1:2]-widthRectangles/2,xTicks[1:2]+widthRectangles/2,xTicks[3]-widthRectangles/2,xTicks[3]) yTicksBigEnd <- rep(0,6) yTicksBigStart <- rep(heightTick,6) # to draw the horizontal line vectorDates <- rep(0,150) dates <- seq(1:150) data <- data.frame(dates,vectorDates) xPeriods <- c(15,67,117,15+26,67+26) p <- ggplot() + # white theme_void() + geom_segment(aes(x = 0, y = -20, xend = 200, yend = 10), size=2, arrow = arrow(length = unit(0.5, "cm")), colour ='white') + # time arrow geom_segment(aes(x = 0, y = 0, xend = 150, yend = 0), size=1, arrow = arrow(length = unit(0.5, "cm"))) + # ticks geom_segment(aes(x = xTicks, y = yTicksEnd2, xend = xTicks, yend = yTicksStart ), arrow = arrow(length = unit(0.3, "cm")),size=1) + # big ticks geom_segment(aes(x = xBigTicks, y = yTicksBigStart, xend = xBigTicks, yend = yTicksBigEnd*2), size=1) + # time label annotate("text", label = "Time", x = 170, y = 0, size = 8, colour = "black", family="serif") + # ticks labels annotate('text',label=c("t[0]-2 %.% freq","t[0]-freq","t[0]"),x = xTicks, y = yTicksEnd - 10, size = 8,family="serif",parse=T) ## noPeriods = 2 pdf("fPlot1.pdf", width = 7, height = 3, colormodel = "gray") p + # periods labels annotate('text',label=c("A","A","A","B","B"),x = xPeriods, y = rep(6,5), size = 8,family="serif",parse=T) dev.off() ## noPeriods = 3 yTicksBigEnd2 <- rep(0,4) yTicksBigStart2 <- rep(heightTick,4) newX <- c(xTicks[1:2]+widthRectangles/2+52-widthRectangles,xTicks[1:2]+52/2) xPeriods <- c(15,67,117,15+16,67+16,15+35,67+35) pdf("fPlot2.pdf", width = 7, height = 3, colormodel = "gray") p + geom_segment(aes(x = newX, y = yTicksBigStart2, xend = newX, yend = yTicksBigEnd2), size=1) + # periods labels annotate('text',label=c("A","A","A","B","B","C","C"),x = xPeriods, y = rep(6,7), size = 8,family="serif",parse=T) dev.off() surveillance/vignettes/monitoringCounts-cache/fPlot1.pdf0000644000176200001440000000336513752253562023261 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20201109152824) /ModDate (D:20201109152824) /Title (R Graphics Output) /Producer (R 4.0.3) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 5 0 obj << /Type /Page /Parent 3 0 R /Contents 6 0 R /Resources 4 0 R >> endobj 6 0 obj << /Length 532 /Filter /FlateDecode >> stream xVnA +XJE.c w†|j~'{hW!* -gKrpgx1HD8?|5'I̐1 2?θ(8;Lz0.D$Q1w&[AvaGUlCl-FS#|~T6b`2ڠےZ8tTf\9_uϾՔ۪9)9cLUsQR|kyڲ6Y׀7I9`>ƭ„C5b}#M77r{NtM ]j;p?;]N,\R%%sl1+5JTM/CwlSp*U6!7_m )5ԧ/3~]OM-52[P_Sr !;ǂscv:ɖ }W\Fa_lh`-DB6I&M%׺aCZeI$t> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> >> endobj 7 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 8 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 7 0 R >> endobj 9 0 obj << /Type /Font /Subtype /Type1 /Name /F11 /BaseFont /Symbol >> endobj xref 0 10 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000000895 00000 n 0000000978 00000 n 0000000212 00000 n 0000000292 00000 n 0000001070 00000 n 0000001327 00000 n 0000001425 00000 n trailer << /Size 10 /Info 1 0 R /Root 2 0 R >> startxref 1503 %%EOF surveillance/vignettes/monitoringCounts.Rnw0000644000176200001440000031473414024124757021051 0ustar liggesusers%\VignetteIndexEntry{Monitoring count time series in R: Aberration detection in public health surveillance} %\VignetteDepends{surveillance, gamlss, INLA, MGLM} \documentclass[nojss]{jss} \usepackage{amsmath,bm} \usepackage{subfig} \newcommand{\BetaBin}{\operatorname{BetaBin}} \newcommand{\Var}{\operatorname{Var}} \newcommand{\logit}{\operatorname{logit}} \newcommand{\NB}{\operatorname{NB}} %% almost as usual \author{Ma\"elle Salmon\\Robert Koch Institute \And Dirk Schumacher\\Robert Koch Institute \And Michael H\"ohle\\ Stockholm University,\\Robert Koch Institute } \title{ \vspace{-2.2cm} \fbox{\vbox{\normalfont\footnotesize This vignette corresponds to an article published in the\\ \textit{Journal of Statistical Software} 2016;\textbf{70}(10):1--35. \doi{10.18637/jss.v070.i10}.}}\\[1cm] Monitoring Count Time Series in \proglang{R}: Aberration Detection in Public Health Surveillance} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Ma\"elle Salmon, Dirk Schumacher, Michael H\"ohle} %% comma-separated \Plaintitle{Monitoring Count Time Series in R: Aberration Detection in Public Health Surveillance} % without formatting \Shorttitle{\pkg{surveillance}: Aberration detection in \proglang{R}} %% a short title (if necessary) %% an abstract and keywords \Abstract{ Public health surveillance aims at lessening disease burden by, e.g., timely recognizing emerging outbreaks in case of infectious diseases. Seen from a statistical perspective, this implies the use of appropriate methods for monitoring time series of aggregated case reports. This paper presents the tools for such automatic aberration detection offered by the \textsf{R} package \pkg{surveillance}. We introduce the functionalities for the visualization, modeling and monitoring of surveillance time series. With respect to modeling we focus on univariate time series modeling based on generalized linear models (GLMs), multivariate GLMs, generalized additive models and generalized additive models for location, shape and scale. Applications of such modeling include illustrating implementational improvements and extensions of the well-known Farrington algorithm, e.g., by spline-modeling or by treating it in a Bayesian context. Furthermore, we look at categorical time series and address overdispersion using beta-binomial or Dirichlet-multinomial modeling. With respect to monitoring we consider detectors based on either a Shewhart-like single timepoint comparison between the observed count and the predictive distribution or by likelihood-ratio based cumulative sum methods. Finally, we illustrate how \pkg{surveillance} can support aberration detection in practice by integrating it into the monitoring workflow of a public health institution. Altogether, the present article shows how well \pkg{surveillance} can support automatic aberration detection in a public health surveillance context. } \Keywords{\proglang{R}, \texttt{surveillance}, outbreak detection, statistical process control} \Plainkeywords{R, surveillance, outbreak detection, statistical process control} %% without formatting %% at least one keyword must be supplied \Address{ Ma\"{e}lle Salmon, Dirk Schumacher\\ Department for Infectious Diseases Epidemiology\\ Robert Koch Institut Berlin\\ Seestrasse 10\\ 13353 Berlin, Germany\\ E-mail: \email{maelle.salmon@yahoo.se}, \email{mail@dirk-schumacher.net}\\ URL: \url{https://masalmon.github.io/}\\ \phantom{URL: }\url{http://www.dirk-schumacher.net/}\\ Michael H\"{o}hle\\ Department of Mathematics\\ Stockholm University\\ Kr\"{a}ftriket\\ 106 91 Stockholm, Sweden\\ E-mail: \email{hoehle@math.su.se}\\ URL: \url{https://www.math.su.se/~hoehle/} } \begin{document} \SweaveOpts{concordance=TRUE} \maketitle \section{Introduction} \label{sec:0} Nowadays, the fight against infectious diseases does not only require treating patients and setting up measures for prevention but also demands the timely recognition of emerging outbreaks in order to avoid their expansion. Along these lines, health institutions such as hospitals and public health authorities collect and store information about health events -- typically represented as individual case reports containing clinical information, and subject to specific case definitions. Analysing these data is crucial. It enables situational awareness in general and the timely detection of aberrant counts in particular, empowering the prevention of additional disease cases through early interventions. For any specific aggregation of characteristics of events, such as over-the-counter sales of pain medication, new cases of foot-and-mouth disease among cattle, or adults becoming sick with hepatitis C in Germany, data can be represented as time series of counts with days, weeks, months or years as time units of the aggregation. Abnormally high or low values at a given time point can reveal critical issues such as an outbreak of the disease or a malfunction of data transmission. Thus, identifying aberrations in the collected data is decisive, for human as well as for animal health. In this paper we present the \proglang{R} package \pkg{surveillance} which is available from the Comprehensive \proglang{R} Archive Network (CRAN) at \url{https://CRAN.R-project.org/package=surveillance}. It implements a range of methods for aberration detection in time series of counts and proportions. Statistical algorithms provide an objective and reproducible analysis of the data and allow the automation of time-consuming aspects of the monitoring process. In the recent years, a variety of such tools has flourished in the literature. Reviews of methods for aberration detection in time series of counts can be found in~\citet{Buckeridge2007}~and~\citet{Unkel2012}. However, the great variety of statistical algorithms for aberration detection can be a hurdle to practitioners wishing to find a suitable method for their data. It is our experience that ready-to-use and understandable implementation and the possibility to use the methods in a routine and automatic fashion are the criteria most important to the epidemiologists. The package offers an open-source implementation of state-of-the-art methods for the prospective detection of outbreaks in count data time series with established methods, as well as the visualization of the analysed time series. With the package, the practitioner can introduce statistical surveillance into routine practice without too much difficulty. As far as we know, the package is now used in several public health institutions in Europe: at the National Public Health Institute of Finland, at the Swedish Institute for Communicable Disease Control, at the French National Reference Centre for Salmonella, and at the Robert Koch Institute (RKI) in Berlin. The use of \pkg{surveillance} at the RKI shall be the focus of this paper. The package also provides many other functions serving epidemic modeling purposes. Such susceptible-infectious-recovered based models and their extensions towards regression based approaches are documented in other works~\citep{held-etal-2005,held_etal2006,meyer.etal2011,meyer.etal2014}. The present paper is designed as an extension of two previous articles about the \pkg{surveillance} package published as~\citet{hoehle-2007} and~\citet{hoehle-mazick-2010}. On the one hand, the paper aims at giving an overview of the new features added to the package since the publication of the two former papers. On the other hand it intends to illustrate how well the \pkg{surveillance} package can support routine practical disease surveillance by presenting the current surveillance system of infectious diseases at the RKI. This paper is structured as follows. Section~\ref{sec:1} gives an introduction to the data structure used in the package for representing and visualizing univariate or multivariate time series. Furthermore, the structure and use of aberration detection algorithms are explained. Section~\ref{sec:2} leads the reader through different surveillance methods available in the package. Section~\ref{sec:3} describes the integration of such methods in a complete surveillance system as currently in use at the RKI. Finally, a discussion rounds off the work. \section{Getting to know the basics of the package} <>= options(width=77) ## create directories for plots and cache dir.create("plots", showWarnings=FALSE) dir.create("monitoringCounts-cache", showWarnings=FALSE) ## load packages library('surveillance') library('gamlss') @ \SweaveOpts{prefix.string=plots/monitoringCounts} \label{sec:1} The package provides a central S4 data class \code{sts} to capture multivariate or univariate time series. All further methods use objects of this class as an input. Therefore we first describe the \code{sts} class and then show the typical usage of a function for aberration detection, including visualization. All monitoring methods of the package conform to the same syntax. \subsection{How to store time series and related information} In \pkg{surveillance}, time series of counts and related information are encoded in a specific S4-class called \code{sts} (\textit{surveillance time series}) that represents possibly multivariate time series of counts. Denote the counts as $\left( y_{it} ; i = 1, \ldots,m, t = 1, \ldots, n \right)$, where $n$ is the length of the time series and $m$ is the number of entities, e.g., geographical regions, hospitals or age groups, being monitored. An example which we shall look at in more details is a time series representing the weekly counts of cases of infection with \textit{Salmonella Newport} in all 16 federal states of Germany from 2004 to 2013 with $n=525$ weeks and $m=16$ geographical units. Infections with \textit{Salmonella Newport}, a subtype of \textit{Salmonella}, can trigger gastroenteritis, prompting the seek of medical care. Infections with \textit{Salmonella} are notifiable in Germany since 2001 with data being forwarded to the RKI by federal states health authorities on behalf of the local health authorities. \subsubsection[Slots of the class sts]{Slots of the class \texttt{sts}} The key slots of the \code{sts} class are those describing the observed counts and the corresponding time periods of the aggregation. The observed counts $\left(y_{it}\right)$ are stored in the $n \times m$ matrix \code{observed}. A number of other slots characterize time. First, \code{epoch} denotes the corresponding time period of the aggregation. If the Boolean \code{epochAsDate} is \code{TRUE}, \code{epoch} is the numeric representation of \code{Date} objects corresponding to each observation in \code{observed}. If the Boolean \code{epochAsDate} is \code{FALSE}, \code{epoch} is the time index $1 \leq t \leq n$ of each of these observations. Then, \code{freq} is the number of observations per year: 365 for daily data, 52 for weekly data and 12 for monthly data. Finally, \code{start} is a vector representing the origin of the time series with two values that are the year and the epoch within that year for the first observation of the time series -- \code{c(2014, 1)} for a weekly time series starting on the first week of 2014 for instance. Other slots enable the storage of additional information. Known aberrations are recorded in the Boolean slot \code{state} of the same dimensions as \code{observed} with \code{TRUE} indicating an outbreak and \code{FALSE} indicating the absence of any known aberration. The monitored population in each of the units is stored in slot \code{populationFrac}, which gives either proportions or numbers. The geography of the zone under surveillance is accessible through slot \code{map} which is an object of class \code{SpatialPolygonsDataFrame}~\citep{sp1,sp2} providing a shape of the $m$ areas which are monitored and slot \code{neighbourhood}, which is a symmetric matrix of Booleans size $m^2$ stating the neighborhood matrix. Slot \code{map} is pertinent when units are geographical units, whereas \code{neighbourhood} could be useful in any case, e.g., for storing a contact matrix between age groups for modeling purposes. Finally, if monitoring has been performed on the data the information on its control arguments and its results are stored in \code{control}, \code{upperbound} and \code{alarm} presented in Section~\ref{sec:howto}. \subsubsection[Creation of an object of class sts]{Creation of an object of class \texttt{sts}} The creation of an \code{sts} object is straightforward, requiring a call of the constructor function \code{sts} together with the slots to be assigned as arguments. The input of data from external files is one possibility for getting the counts as it is described in \citet{hoehle-mazick-2010}. To exemplify the process we shall use weekly counts of \textit{Salmonella Newport} in Germany loaded using \code{data("salmNewport")}. Alternatively, one can use coercion methods to convert between the \texttt{ts} class and the \texttt{sts} class. Note that this only converts the content of the slot \texttt{observed}, that is, <>= data("salmNewport") @ <>= all.equal(observed(salmNewport), observed(as(as(salmNewport, "ts"), "sts"))) @ <>= stopifnot( <> ) @ Using the \texttt{ts} class as intermediate step also allows the conversion between other time series classes, e.g., from packages \pkg{zoo}~\citep{zoo} or \pkg{xts}~\citep{xts}. <>= # This code is the one used for the Salmon et al. (2016) JSS article. # Using this code all examples from the article can be reproduced. # computeALL is FALSE to avoid the computationally intensive parts # of the code (use of simulations to find a threshold value for categoricalCUSUM, # use of the boda function) but one can set it to TRUE to have it run. computeALL <- FALSE @ <>= # Define plot parameters #Add lines using grid by a hook function. Use NULL to align with tick marks hookFunc <- function() { grid(NA,NULL,lwd=1) } cex.text <- 1.7 cex.axis <- cex.text cex.main <- cex.text cex.lab <- cex.text cex.leg <- cex.text line.lwd <- 2#1 stsPlotCol <- c("mediumblue","mediumblue","red2") alarm.symbol <- list(pch=17, col="red2", cex=2,lwd=3) #Define list with arguments to use with do.call("legend", legOpts) legOpts <- list(x="topleft",legend=c(expression(U[t])),bty="n",lty=1,lwd=line.lwd,col=alarm.symbol$col,horiz=TRUE,cex=cex.leg) #How should the par of each plot look? par.list <- list(mar=c(6,5,5,5),family="Times") #Do this once y.max <- 0 plotOpts <- list(col=stsPlotCol,ylim=c(0,y.max), main='',lwd=c(1,line.lwd,line.lwd), dx.upperbound=0, #otherwise the upperbound line is put 0.5 off cex.lab=cex.lab, cex.axis=cex.axis, cex.main=cex.main, ylab="No. of reports", xlab="Time (weeks)",lty=c(1,1,1), legend.opts=legOpts,alarm.symbol=alarm.symbol, xaxis.tickFreq=list("%V"=atChange,"%m"=atChange,"%G"=atChange), xaxis.labelFreq=list("%Y"=atMedian), xaxis.labelFormat="%Y", par.list=par.list,hookFunc=hookFunc) @ \setkeys{Gin}{height=7cm, width=15cm} \begin{figure} \begin{center} <>= y.max <- max(aggregate(salmNewport,by="unit")@observed,na.rm=TRUE) plotOpts2 <- modifyList(plotOpts,list(x=salmNewport,legend.opts=NULL,ylim=c(0,y.max),type = observed ~ time),keep.null=TRUE) plotOpts2$par.list <- list(mar=c(6,5,0,5),family="Times") plotOpts2$xaxis.tickFreq <- list("%m"=atChange,"%G"=atChange) do.call("plot",plotOpts2) @ \end{center} \vspace{-1cm} \caption{Weekly number of cases of S. Newport in Germany, 2004-2013.} \label{fig:Newport} \end{figure} \subsubsection[Basic manipulation of objects of the class sts]{Basic manipulation of objects of the class \texttt{sts}} This time series above is represented as a multivariate \code{sts} object whose dimensions correspond to the 16 German federal states. Values are weekly counts so \code{freq = 52}. Weeks are indexed by \code{Date} here (\code{epochAsDate = TRUE}). One can thus for instance get the weekday of the date by calling \code{weekdays(epoch(salmNewport))} (all Mondays here). Furthermore, one can use the function \code{format} (and the package specific platform independent version \code{dateFormat}) to obtain \code{strftime} compatible formatting of the epochs. Another advantage of using \code{Date} objects is that the plot functions have been re-written for better management of ticks and labelling of the x-axis based on \code{strftime} compatible conversion specifications. For example, to get ticks at all weeks corresponding to the first week in a month as well as all weeks corresponding to the first in a year while placing labels consisting of the year at the median index per year: <>= plot(salmNewport, type = observed ~ time, xaxis.tickFreq = list("%m" = atChange, "%G" = atChange), xaxis.labelFreq = list("%Y" = atMedian), xaxis.labelFormat = "%Y") @ which is shown in Figure~\ref{fig:Newport}. Here, the \code{atChange} and \code{atMedian} functions are small helper functions and the respective tick lengths are controlled by the \pkg{surveillance} specific option \code{surveillance.options("stsTickFactors")}. Actually \code{sts} objects can be plotted using different options: \code{type = observed ~ time} produces the time series for whole Germany as shown in Figure~\ref{fig:Newport}, whereas \code{type = observed ~ time | unit} is a panelled graph with each panel representing the time series of counts of a federal state as seen in Figure~\ref{fig:unit}. \setkeys{Gin}{height=7cm, width=9cm} \begin{figure} %\begin{center} %\hspace*{\fill}% \hspace{-1em} \subfloat[]{ <>= y.max <- max(observed(salmNewport[,2]),observed(salmNewport[,3]),na.rm=TRUE) plotOpts2 <- modifyList(plotOpts,list(x=salmNewport[,2],legend.opts=NULL,ylim=c(0,y.max)),keep.null=TRUE) plotOpts2$xaxis.tickFreq <- list("%G"=atChange) do.call("plot",plotOpts2) @ }\hspace{-3em}% \subfloat[]{ <>= plotOpts2 <- modifyList(plotOpts,list(x=salmNewport[,3],legend.opts=NULL,ylim=c(0,y.max)),keep.null=TRUE) plotOpts2$xaxis.tickFreq <- list("%G"=atChange) do.call("plot",plotOpts2) @ } %\hspace*{\fill}% \caption{Weekly count of S. Newport in the German federal states (a) Bavaria and (b) Berlin.} \label{fig:unit} %\end{center} \end{figure} Once created one can use typical subset operations on a \code{sts} object: for instance \code{salmNewport[} \code{1:10, "Berlin"]} is a new \code{sts} object with weekly counts for Berlin during the 10 first weeks of the initial dataset; \code{salmNewport[isoWeekYear(epoch(salmNewport))\$ISOYear<=2010,]} uses the \code{surveillance}'s \code{isoWeekYear()} function to get a \code{sts} object with weekly counts for all federal states up to 2010. Moreover, one can take advantage of the \proglang{R} function \code{aggregate()}. For instance, \code{aggregate(salmNewport,by="unit")} returns a \code{sts} object representing weekly counts of \textit{Salmonella Newport} in Germany as a whole, whereas \code{aggregate(salmNewport, by = "time")} corresponds to the total count of cases in each federal state over the whole period. \subsection{How to use aberration detection algorithms} \label{sec:howto} Monitoring algorithms of the package operate on objects of the class \code{sts} as described below. \subsubsection{Statistical framework for aberration detection} We introduce the framework for aberration detection on an univariate time series of counts $\left\{y_t,\> t=1,2,\ldots\right\}$. Surveillance aims at detecting an \textit{aberration}, that is to say, an important change in the process occurring at an unknown time $\tau$. This change can be a step increase of the counts of cases or a more gradual change~\citep{Sonesson2003}. Based on the possibility of such a change, for each time $t$ we want to differentiate between the two states \textit{in-control} and \textit{out-of-control}. At any timepoint $t_0\geq 1$, the available information -- i.e., past counts -- is defined as $\bm{y}_{t_0} = \left\{ y_t\>;\> t\leq t_0\right\}$. Detection is based on a statistic $r(\cdot)$ with resulting alarm time $T_A = \min\left\{ t_0\geq 1 : r(\bm{y}_{t_0}) > g\right\}$ where $g$ is a known threshold. Functions for aberration detection thus use past data to estimate $r(\bm{y}_{t_0})$, and compare it to the threshold $g$, above which the current count can be considered as suspicious and thus doomed as \textit{out-of-control}. Threshold values and alarm Booleans for each timepoint of the monitored range are saved in the slots \code{upperbound} and \code{alarm}, of the same dimensions as \code{observed}, while the method parameters used for computing the threshold values and alarm Booleans are stored in the slot \code{control}. \subsubsection{Aberration detection in the package} To perform such a monitoring of the counts of cases, one has to choose one of the surveillance algorithms of the package -- this choice will be the topic of Section~\ref{sec:using}. Then, one must indicate which part of the time series or \code{range} has to be monitored -- for instance the current year. Lastly, one needs to specify the parameters specific to the algorithm. \subsubsection{Example with the EARS C1 method} We will illustrate the basic principle by using the \code{earsC}~function~that implements the EARS (Early Aberration Detection System) methods of the CDC as described in~\citet{SIM:SIM3197}. This algorithm is especially convenient in situations when little historic information is available. It offers three variants called C1, C2 and C3. Here we shall expand on C1 for which the baseline are the 7 timepoints before the assessed timepoint $t_0$, that is to say $\left(y_{t_0-7},\ldots,y_{t_0-1}\right)$. The expected value is the mean of the baseline. The method is based on a statistic called $C_{t_0}$ defined as $C_{t_0}= \frac{(y_{t_0}-\bar{y}_{t_0})}{s_{t_0}}$, where $$\bar{y}_{t_0}= \frac{1}{7} \cdot\sum_{i=t_0-7}^{t_0-1} y_i \textnormal{ and } s_{t_0}^2= \frac{1}{7-1} \cdot\sum_{i=t_0-7}^{t_0-1} \left(y_i - \bar{y}_{t_0}\right)^2 \,.$$ Under the null hypothesis of no outbreak, it is assumed that $C_{t_0} \stackrel{H_0}{\sim} {N}(0,1)$. The upperbound $U_{t_0}$ is found by assuming that $y_t$ is normal, estimating parameters by plug-in and then taking the $(1-\alpha)$-th quantile of this distribution, i.e. $U_{t_0}= \bar{y}_{t_0} + z_{1-\alpha}s_{t_0}$, where $z_{1-\alpha}$ is the $(1-\alpha)$-quantile of the standard normal distribution. An alarm is raised if $y_{t_0} > U_{t_0}$. The output of the algorithm is a \code{sts} object that contains subsets of slots \code{observed}, \code{population} and \code{state} defined by the range of timepoints specified in the input -- \textit{e.g} the last 20 timepoints of the time series, and with the slots \code{upperbound} and \code{alarm} filled by the output of the algorithm. Information relative to the \code{range} of data to be monitored and to the parameters of the algorithm, such as \code{alpha} for \code{earsC}, has to be formulated in the slot \code{control}. This information is also stored in the slot \code{control} of the returned \code{sts} object for later inspection. <>= in2011 <- which(isoWeekYear(epoch(salmNewport))$ISOYear == 2011) salmNewportGermany <- aggregate(salmNewport, by = "unit") control <- list(range = in2011, method = "C1", alpha = 0.05) surv <- earsC(salmNewportGermany, control = control) plot(surv) @ \setkeys{Gin}{height=7cm, width=15cm} \begin{figure} \begin{center} <>= y.max <- max(observed(surv),upperbound(surv),na.rm=TRUE) do.call("plot",modifyList(plotOpts,list(x=surv,ylim=c(0,y.max)),keep.null=TRUE)) @ \end{center} \vspace{-1cm} \caption{Weekly reports of S. Newport in Germany in 2011 monitored by the EARS C1 method. The line represents the upperbound calculated by the algorithm. Triangles indicate alarms that are the timepoints where the observed number of counts is higher than the upperbound.} \label{fig:NewportEARS} \end{figure} The \code{sts} object is easily visualized using the function \code{plot} as depicted in Figure~\ref{fig:NewportEARS}, which shows the upperbound as a solid line and the alarms -- timepoints where the upperbound has been exceeded -- as triangles. The four last alarms correspond to a known outbreak in 2011 due to sprouts~\citep{Newport2011}. One sees that the upperbound right after the outbreak is affected by the outbreak: it is very high, so that a smaller outbreak would not be detected. The EARS methods C1, C2 and C3 are simple in that they only use information from the very recent past. This is appropriate when data has only been collected for a short time or when one expects the count to be fairly constant. However, data from the less recent past often encompass relevant information about e.g., seasonality and time trend, that one should take into account when estimating the expected count and the associated threshold. For instance, ignoring an increasing time trend could decrease sensitivity. Inversely, overlooking an annual surge in counts during the summer could decrease specificity. Therefore, it is advisable to use detection methods whose underlying models incorporate essential characteristics of time series of disease count data such as overdispersion, seasonality, time trend and presence of past outbreaks in the records~\citep{Unkel2012,Shmueli2010}. Moreover, the EARS methods do not compute a proper prediction interval for the current count. Sounder statistical methods will be reviewed in the next section. \section[Using surveillance in selected contexts]{Using \pkg{surveillance} in selected contexts} \label{sec:using} \label{sec:2} More than a dozen algorithms for aberration detection are implemented in the package. Among those, this section presents a set of representative algorithms, which are already in routine application at several public health institutions or which we think have the potential to become so. First we describe the Farrington method introduced by~\citet{farrington96} together with the improvements proposed by~\citet{Noufaily2012}. As a Bayesian counterpart to these methods we present the BODA method published by~\citet{Manitz2013} which allows the easy integration of covariates. All these methods perform one-timepoint detection in that they detect aberrations only when the count at the currently monitored timepoint is above the threshold. Hence, no accumulation of evidence takes place. As an extension, we introduce an implementation of the negative binomial cumulative sum (CUSUM) of~\citet{hoehle.paul2008} that allows the detection of sustained shifts by accumulating evidence over several timepoints. Finally, we present a method suitable for categorical data described in~\citet{hoehle2010} that is also based on cumulative sums. \subsection{One size fits them all for count data} Two implementations of the Farrington method, which is currently \textit{the} method of choice at European public health institutes \citep{hulth_etal2010}, exist in the package. First, the original method as described in \citet{farrington96} is implemented as the function \code{farrington}. Its use was already described in \citet{hoehle-mazick-2010}. Now, the newly implemented function \code{farringtonFlexible} supports the use of this \textit{original method} as well as of the \textit{improved method} built on suggestions made by~\citet{Noufaily2012} for improving the specificity without reducing the sensitivity. In the function \code{farringtonFlexible} one can choose to use the original method or the improved method by specification of appropriate \code{control} arguments. Which variant of the algorithm is to be used is determined by the contents of the \code{control} slot. In the example below, \code{control1} corresponds to the use of the original method and \code{control2} indicates the options for the improved method. <>= control1 <- list(range = in2011, noPeriods = 1, b = 4, w = 3, weightsThreshold = 1, pastWeeksNotIncluded = 3, pThresholdTrend = 0.05, thresholdMethod = "delta") control2 <- list(range = in2011, noPeriods = 10, b = 4, w = 3, weightsThreshold = 2.58, pastWeeksNotIncluded = 26, pThresholdTrend = 1, thresholdMethod = "nbPlugin") @ <>= control1$limit54 <- control2$limit54 <- c(0,50) # for the figure @ In both cases the steps of the algorithm are the same. In a first step, an overdispersed Poisson generalized linear model with log link is fitted to the reference data $\bm{y}_{t_0} \subseteq \left\{ y_t\>;\> t\leq t_0\right\}$, where $\E(y_t)=\mu_t$ with $\log \mu_t = \alpha + \beta t$ and $\Var(y_t)=\phi\cdot\mu_t$ and where $\phi\geq1$ is ensured. The original method took seasonality into account by using a subset of the available data as reference data for fitting the GLM: \code{w} timepoints centred around the timepoint located $1,2,\ldots,b$ years before $t_0$, amounting to a total $b \cdot (2w+1)$ reference values. However, it was shown in~\citet{Noufaily2012} that the algorithm performs better when using more historical data. In order to do do so without disregarding seasonality, the authors introduced a zero order spline with 11 knots, which can be conveniently represented as a 10-level factor. We have extended this idea in our implementation so that one can choose an arbitrary number of periods in each year. Thus, $\log \mu_t = \alpha + \beta t +\gamma_{c(t)}$ where $\gamma_{c(t)}$ are the coefficients of a zero order spline with $\mathtt{noPeriods}+1$ knots, which can be conveniently represented as a $\mathtt{noPeriods}$-level factor that reflects seasonality. Here, $c(t)$ is a function indicating in which season or period of the year $t$ belongs to. The algorithm uses \code{w}, \code{b} and \texttt{noPeriods} to deduce the length of periods so they have the same length up to rounding. An exception is the reference window centred around $t_0$. Figure~\ref{fig:fPlot} shows a minimal example, where each character corresponds to a different period. Note that setting $\mathtt{noPeriods} = 1$ corresponds to using the original method with only a subset of the data: there is only one period defined per year, the reference window around $t_0$ and other timepoints are not included in the model. \setkeys{Gin}{height=3cm, width=7cm} \begin{figure} \subfloat[$\texttt{noPeriods}=2$]{ \includegraphics[width=0.45\textwidth]{monitoringCounts-cache/fPlot1.pdf} } \qquad \subfloat[$\texttt{noPeriods}=3$]{ \includegraphics[width=0.45\textwidth]{monitoringCounts-cache/fPlot2.pdf} } \caption{Construction of the noPeriods-level factor to account for seasonality, depending on the value of the half-window size $w$ and of the freq of the data. Here the number of years to go back in the past $b$ is 2. Each level of the factor variable corresponds to a period delimited by ticks and is denoted by a character. The windows around $t_0$ are respectively of size $2w+1$,~$2w+1$ and $w+1$. The segments between them are divided into the other periods so that they have the same length up to rounding.} \label{fig:fPlot} \end{figure} Moreover, it was shown in \citet{Noufaily2012} that it is better to exclude the last 26 weeks before $t_0$ from the baseline in order to avoid reducing sensitivity when an outbreak has started recently before $t_0$. In the \code{farringtonFlexible} function, one controls this by specifying \code{pastWeeksNotIncluded}, which is the number of last timepoints before $t_0$ that are not to be used. The (historical) default is to use \code{pastWeeksNotIncluded = w}. Lastly, in the new implementation a population offset can be included in the GLM by setting \code{populationBool} to \code{TRUE} and supplying the possibly time-varying population size in the \code{population} slot of the \code{sts} object, but this will not be discussed further here. In a second step, the expected number of counts $\mu_{t_0}$ is predicted for the current timepoint $t_0$ using this GLM. An upperbound $U_{t_0}$ is calculated based on this predicted value and its variance. The two versions of the algorithm make different assumptions for this calculation. The original method assumes that a transformation of the prediction error $g\left(y_{t_0}-\hat{\mu}_{t_0}\right)$ is normally distributed, for instance when using the identity transformation $g(x)=x$ one obtains $$y_{t_0} - \hat{\mu}_0 \sim \mathcal{N}(0,\Var(y_{t_0}-\hat{\mu}_0)) \,.$$ The upperbound of the prediction interval is then calculated based on this distribution. First we have that $$ \Var(y_{t_0}-\hat{\mu}_{t_0}) = \Var(\hat{y}_{t_0}) + \Var(\hat{\mu}_{t_0})=\phi\mu_0+\Var(\hat{\mu}_{t_0}) $$ with $\Var(\hat{y}_{t_0})$ being the variance of an observation and $\Var(\hat{\mu}_{t_0})$ being the variance of the estimate. The threshold, defined as the upperbound of a one-sided $(1-\alpha)\cdot 100\%$ prediction interval, is then $$U_{t_0} = \hat{\mu}_0 + z_{1-\alpha}\sqrt{\widehat{\Var}(y_{t_0}-\hat{\mu}_{t_0})} \,.$$ This method can be used by setting the control option \code{thresholdMethod} equal to "\code{delta}". However, a weakness of this procedure is the normality assumption itself, so that an alternative was presented in \citet{Noufaily2012} and implemented as \code{thresholdMethod="Noufaily"}. The central assumption of this approach is that $y_{t_0} \sim \NB\left(\mu_{t_0},\nu\right)$, with $\mu_{t_0}$ the mean of the distribution and $\nu=\frac{\mu_{t_0}}{\phi-1}$ its overdispersion parameter. In this parameterization, we still have $\E(y_t)=\mu_t$ and $\Var(y_t)=\phi\cdot\mu_t$ with $\phi>1$ -- otherwise a Poisson distribution is assumed for the observed count. The threshold is defined as a quantile of the negative binomial distribution with plug-in estimates $\hat{\mu}_{t_0}$ and $\hat{\phi}$. Note that this disregards the estimation uncertainty in $\hat{\mu}_{t_0}$ and $\hat{\phi}$. As a consequence, the method "\code{muan}" (\textit{mu} for $\mu$ and \textit{an} for asymptotic normal) tries to solve the problem by using the asymptotic normal distribution of $(\hat{\alpha},\hat{\beta})$ to derive the upper $(1-\alpha)\cdot 100\%$ quantile of the asymptotic normal distribution of $\hat{\mu}_{t_0}=\hat{\alpha}+\hat{\beta}t_0$. Note that this does not reflect all estimation uncertainty because it disregards the estimation uncertainty of $\hat{\phi}$. Note also that for time series where the variance of the estimator is large, the upperbound also ends up being very large. Thus, the method "\code{nbPlugin}" seems to provide information that is easier to interpret by epidemiologists but with "\code{muan}" being more statistically correct. In a last step, the observed count $y_{t_0}$ is compared to the upperbound $U_{t_0}$ and an alarm is raised if $y_{t_0} > U_{t_0}$. In both cases the fitting of the GLM involves three important steps. First, the algorithm performs an optional power-transformation for skewness correction and variance stabilisation, depending on the value of the parameter \code{powertrans} in the \code{control} slot. Then, the significance of the time trend is checked. The time trend is included only when significant at a chosen level \code{pThresholdTrend}, when there are more than three years reference data and if no overextrapolation occurs because of the time trend. Lastly, past outbreaks are reweighted based on their Anscombe residuals. In \code{farringtonFlexible} the limit for reweighting past counts, \code{weightsThreshold}, can be specified by the user. If the Anscombe residual of a count is higher than \code{weightsThreshold} it is reweighted accordingly in a second fitting of the GLM. \citet{farrington96} used a value of $1$ whereas \citet{Noufaily2012} advise a value of $2.56$ so that the reweighting procedure is less drastic, because it also shrinks the variance of the observations. The original method is widely used in public health surveillance~\citep{hulth_etal2010}. The reason for its success is primarily that it does not need to be fine-tuned for each specific pathogen. It is hence easy to implement it for scanning data for many different pathogens. Furthermore, it does tackle classical issues of surveillance data: overdispersion, presence of past outbreaks that are reweighted, seasonality that is taken into account differently in the two methods. An example of use of the function is shown in Figure~\ref{fig:newportFar} with the code below. <>= salm.farrington <- farringtonFlexible(salmNewportGermany, control1) salm.noufaily <- farringtonFlexible(salmNewportGermany, control2) @ \setkeys{Gin}{height=7cm, width=9cm} \begin{figure} \hspace{-1em} %\begin{center} \subfloat[]{ <>= y.max <- max(observed(salm.farrington),upperbound(salm.farrington),observed(salm.noufaily),upperbound(salm.noufaily),na.rm=TRUE) do.call("plot",modifyList(plotOpts,list(x=salm.farrington,ylim=c(0,y.max)))) @ } \hspace{-3em} \subfloat[]{ <>= do.call("plot",modifyList(plotOpts,list(x=salm.noufaily,ylim=c(0,y.max)))) @ } \caption{S. Newport in Germany in 2011 monitored by (a) the original method and (b) the improved method. For the figure we turned off the option that the threshold is only computed if there were more than 5 cases during the 4 last timepoints including $t_0$. One gets less alarms with the most recent method and still does not miss the outbreak in the summer. Simulations on more time series support the use of the improved method instead of the original method.} \label{fig:newportFar} \end{figure} % With our implementation of the improvements presented in \citet{Noufaily2012} we hope that the method with time can replace the original method in routine use. The RKI system described in Section~\ref{sec:RKI} already uses this improved method. \subsubsection{Similar methods in the package} The package also contains further methods based on a subset of the historical data: \code{bayes}, \code{rki} and \code{cdc}. See Table~\ref{table:ref} for the corresponding references. Here, \code{bayes} uses a simple conjugate prior-posterior approach and computes the parameters of a negative binomial distribution based on past values. The procedure \code{rki} makes either the assumption of a normal or a Poisson distribution based on the mean of past counts. Finally, \code{cdc} aggregates weekly data into 4-week-counts and computes a normal distribution based upper confidence interval. None of these methods offer the inclusion of a linear trend, down-weighting of past outbreaks or power transformation of the data. Although these methods are nice to have at hand, we recommend using the improved method implemented in the function \code{farringtonFlexible} because it is rather fast and makes use of more historical data than the other methods. \subsection{A Bayesian refinement} The \code{farringtonFlexible} function described previously was a first indication that the \textit{monitoring} of surveillance time series requires a good \textit{modeling} of the time series before assessing aberrations. Generalized linear models (GLMs) and generalized additive models (GAMs) are well-established and powerful modeling frameworks for handling the count data nature and trends of time series in a regression context. The \code{boda} procedure~\citep{Manitz2013} continues this line of thinking by extending the simple GLMs used in the \code{farrington} and \code{farringtonFlexible} procedures to a fully fledged Bayesian GAM allowing for penalized splines, e.g., to describe trends and seasonality, while simultaneously adjusting for previous outbreaks or concurrent processes influencing the case counts. A particular advantage of the Bayesian approach is that it constitutes a seamless framework for performing both estimation and subsequent prediction: the uncertainty in parameter estimation is directly carried forward to the predictive posterior distribution. No asymptotic normal approximations nor plug-in inference is needed. For fast approximate Bayesian inference we use the \pkg{INLA} \proglang{R} package~\citep{INLA} to fit the Bayesian GAM. Still, monitoring with \code{boda} is substantially slower than using the Farrington procedures. Furthermore, detailed regression modeling is only meaningful if the time series is known to be subject to external influences on which information is available. Hence, the typical use at a public health institution would be the detailed analysis of a few selected time series, e.g., critical ones or those with known trend character. As an example, \citet{Manitz2013} studied the influence of absolute humidity on the occurrence of weekly reported campylobacter cases in Germany. <>= # Load data and create \code{sts}-object data("campyDE") cam.sts <- sts(epoch=campyDE$date, observed=campyDE$case, state=campyDE$state) par(las=1) # Plot y.max <- max(observed(cam.sts),upperbound(cam.sts),na.rm=TRUE) plotOpts3 <- modifyList(plotOpts,list(x=cam.sts,ylab="",legend.opts=NULL,ylim=c(0,y.max),type = observed ~ time),keep.null=TRUE) plotOpts3$xaxis.tickFreq <- list("%m"=atChange,"%G"=atChange) do.call("plot",plotOpts3) par(las=0) #mtext(side=2,text="No. of reports", # las=0,line=3, cex=cex.text,family="Times") par(family="Times") text(-20, 2600, "No. of\n reports", pos = 3, xpd = T,cex=cex.text) text(510, 2900, "Absolute humidity", pos = 3, xpd = T,cex=cex.text) text(510, 2550, expression(paste("[",g/m^3,"]", sep='')), pos = 3, xpd = T,cex=cex.text) lines(campyDE$hum*50, col="white", lwd=2) axis(side=4, at=seq(0,2500,by=500),labels=seq(0,50,by=10),las=1,cex.lab=cex.text, cex=cex.text,cex.axis=cex.text,pos=length(epoch(cam.sts))+20) #mtext(side=4,text=expression(paste("Absolute humidity [ ",g/m^3,"]", sep='')), # las=0,line=1, cex=cex.text,family="Times") @ \setkeys{Gin}{height=7cm, width=15cm} \begin{figure} \begin{center} <>= <> @ \end{center} \vspace{-1cm} \caption{Weekly number of reported campylobacteriosis cases in Germany 2002-2011 as vertical bars. In addition, the corresponding mean absolute humidity time series is shown as a white curve.} \label{fig:campyDE} \end{figure} <>= data("campyDE") cam.sts <- sts(epoch = campyDE$date, observed = campyDE$case, state = campyDE$state) plot(cam.sts, col = "mediumblue") lines(campyDE$hum * 50, col = "white", lwd = 2) axis(4, at = seq(0, 2500, by = 500), labels = seq(0, 50, by = 10)) @ The corresponding plot of the weekly time series is shown in Figure~\ref{fig:campyDE}. We observe a strong association between humidity and case numbers - an association which is stronger than with, e.g., temperature or relative humidity. As noted in \citet{Manitz2013} the excess in cases in 2007 is thus partly explained by the high atmospheric humidity. Furthermore, an increase in case numbers during the 2011 STEC O104:H4 outbreak is observed, which is explained by increased awareness and testing of many gastroenteritits pathogens during that period. The hypothesis is thus that there is no actual increased disease activity~\citep{bernard_etal2014}. Unfortunately, the German reporting system only records positive test results without keeping track of the number of actual tests performed -- otherwise this would have been a natural adjustment variable. Altogether, the series contains several artefacts which appear prudent to address when monitoring the campylobacteriosis series. The GAM in \code{boda} is based on the negative binomial distribution with time-varying expectation and time constant overdispersion parameter, i.e., \begin{align*} y_t &\sim \operatorname{NB}(\mu_t,\nu) \end{align*} with $\mu_{t}$ the mean of the distribution and $\nu$ the dispersion parameter~\citep{lawless1987}. Hence, we have $\E(y_t)=\mu_t$ and $\Var(y_t)=\mu_t\cdot(1+\mu_t/\nu)$. The linear predictor is given by \begin{align*} \log(\mu_t) &= \alpha_{0t} + \beta t + \gamma_t + \bm{x}_t^\top \bm{\delta} + \xi z_t, \quad t=1,\ldots,t_0. \end{align*} Here, the time-varying intercept $\alpha_{0t}$ is described by a penalized spline (e.g., first or second order random walk) and $\gamma_t$ denotes a periodic penalized spline (as implemented in \code{INLA}) with period equal to the periodicity of the data. Furthermore, $\beta$ characterizes the effect of a possible linear trend (on the log-scale) and $\xi$ is the effect of previous outbreaks. Typically, $z_t$ is a zero-one process denoting if there was an outbreak in week $t$, but more involved adaptive and non-binary forms are imaginable. Finally, $\bm{x}_t$ denotes a vector of possibly time-varying covariates, which influence the expected number of cases. Data from timepoints $1,\ldots,t_0-1$ are now used to determine the posterior distribution of all model parameters and subsequently the posterior predictive distribution of $y_{t_0}$ is computed. If the actual observed value of $y_{t_0}$ is above the $(1-\alpha)\cdot 100\%$ quantile of the predictive posterior distribution an alarm is flagged for $t_0$. Below we illustrate the use of \code{boda} to monitor the campylobacteriosis time series from 2007. In the first case we include in the model for $\log\left(\mu_t\right)$ penalized splines for trend and seasonality and a simple linear trend. <>= library("INLA") rangeBoda <- which(epoch(cam.sts) >= as.Date("2007-01-01")) control.boda <- list(range = rangeBoda, X = NULL, trend = TRUE, season = TRUE, prior = "iid", alpha = 0.025, mc.munu = 10000, mc.y = 1000, samplingMethod = "marginals") boda <- boda(cam.sts, control = control.boda) @ <>= if (computeALL) { ##hoehle 2018-07-18: changed code to use NICELOOKINGboda, but that's iid. Reason: ##The option 'rw1' currently crashes INLA. <> save(list = c("boda", "control.boda", "rangeBoda"), file = "monitoringCounts-cache/boda.RData") } else { load("monitoringCounts-cache/boda.RData") } @ In the second case we instead use only penalized and linear trend components, and, furthermore, include as covariates lags 1--4 of the absolute humidity as well as zero-one indicators for $t_0$ belonging to the last two weeks (\code{christmas}) or first two weeks (\code{newyears}) of the year, respectively. These covariates shall account for systematically changed reporting behavior at the turn of the year (c.f.\ Figure~\ref{fig:campyDE}). Finally, \code{O104period} is an indicator variable on whether the reporting week belongs to the W21--W30 2011 period of increased awareness during the O104:H4 STEC outbreak. No additional correction for past outbreaks is made. <>= covarNames <- c("l1.hum", "l2.hum", "l3.hum", "l4.hum", "newyears", "christmas", "O104period") control.boda2 <- modifyList(control.boda, list(X = campyDE[, covarNames], season = FALSE)) boda.covars <- boda(cam.sts, control = control.boda2) @ <>= if (computeALL) { <> save(list = c("boda.covars", "covarNames", "control.boda2"), file = "monitoringCounts-cache/boda.covars.RData") } else { load("monitoringCounts-cache/boda.covars.RData") } @ We plot \code{boda.covars} in Figure~\ref{fig:b} and compare the alarms of the two \code{boda} calls with \code{farrington}, \code{farringtonFlexible} and \code{bayes} in Figure~\ref{fig:alarmplot} (plot \code{type = alarm ~ time}). \fbox{\vbox{ Note (2018-07-19): We currently have to use the argument \code{prior = "iid"} in both calls of the \code{boda} function, because the procedure crashes when using recent versions of \pkg{INLA} (\code{>= 17.06.20}) with argument \code{prior = "rw1"}. %(the original results were produced using version 0.0-1458166556, %and version 0.0-1485844051 from 2017-01-31 also works) This means results in this vignette deviate from the results reported in the JSS paper -- in particular we do not get any alarms when using the \code{boda} procedure with covariates. We are looking into the problem. }} Note here that the \code{bayes} procedure is not really useful as the adjustment for seasonality only works poorly. Moreover, we think that this method produces many false alarms for this time series because it disregards the increasing time trend in number of reported cases. Furthermore, it becomes clear that the improved Farrington procedure acts similar to the original procedure, but the improved reweighting and trend inclusion produces fewer alarms. The \code{boda} method is to be seen as a step towards more Bayesian thinking in aberration detection. However, besides its time demands for a detailed modeling, the speed of the procedure is also prohibitive as regards routine application. As a response~\citet{Maelle} introduce a method which has two advantages: it allows to adjust outbreak detection for reporting delays and includes an approximate inference method much faster than the INLA inference method. However, its linear predictor is more in the style of~\citet{Noufaily2012} not allowing for additional covariates or penalized options for the intercept. \setkeys{Gin}{height=7cm, width=15cm} \begin{figure} \begin{center} <>= y.max <- max(observed(boda.covars),upperbound(boda.covars),na.rm=TRUE) plotOpts2 <- modifyList(plotOpts,list(x=boda.covars,ylim=c(0,y.max)),keep.null=TRUE) plotOpts2$xaxis.tickFreq <- list("%m"=atChange,"%G"=atChange) do.call("plot",plotOpts2) @ \end{center} \vspace{-1cm} \caption{Weekly reports of Campylobacter in Germany in 2007-2011 monitored by the boda method with covariates. The line represents the upperbound calculated by the algorithm. Triangles indicate alarms, \textit{i.e.}, timepoints where the observed number of counts is higher than the upperbound.} \label{fig:b} \end{figure} <>= control.far <- list(range=rangeBoda,b=4,w=5,alpha=0.025*2) far <- farrington(cam.sts,control=control.far) #Both farringtonFlexible and algo.bayes uses a one-sided interval just as boda. control.far2 <-modifyList(control.far,list(alpha=0.025)) farflex <- farringtonFlexible(cam.sts,control=control.far2) bayes <- suppressWarnings(bayes(cam.sts,control=control.far2)) @ <>= # Small helper function to combine several equally long univariate sts objects combineSTS <- function(stsList) { epoch <- as.numeric(epoch(stsList[[1]])) observed <- NULL alarm <- NULL for (i in 1:length(stsList)) { observed <- cbind(observed,observed(stsList[[i]])) alarm <- cbind(alarm,alarms(stsList[[i]])) } colnames(observed) <- colnames(alarm) <- names(stsList) res <- sts(epoch=as.numeric(epoch), epochAsDate=TRUE, observed=observed, alarm=alarm) return(res) } @ <>= # Make an artifical object containing two columns - one with the boda output # and one with the farrington output cam.surv <- combineSTS(list(boda.covars=boda.covars,boda=boda,bayes=bayes, farrington=far,farringtonFlexible=farflex)) par(mar=c(4,8,2.1,2),family="Times") plot(cam.surv,type = alarm ~ time,lvl=rep(1,ncol(cam.surv)), alarm.symbol=list(pch=17, col="red2", cex=1,lwd=3), cex.axis=1,xlab="Time (weeks)",cex.lab=1,xaxis.tickFreq=list("%m"=atChange,"%G"=atChange),xaxis.labelFreq=list("%G"=at2ndChange), xaxis.labelFormat="%G") @ \setkeys{Gin}{height=7cm, width=16cm} \begin{figure} \begin{center} <>= <> @ \end{center} \caption{Alarmplot showing the alarms for the campylobacteriosis time series for four different algorithms.} \label{fig:alarmplot} \end{figure} \subsection{Beyond one-timepoint detection} GLMs as used in the Farrington method are suitable for the purpose of aberration detection since they allow a regression approach for adjusting counts for known phenomena such as trend or seasonality in surveillance data. Nevertheless, the Farrington method only performs one-timepoint detection. In some contexts it can be more relevant to detect sustained shifts early, e.g., an outbreak could be characterized at first by counts slightly higher than usual in subsequent weeks without each weekly count being flagged by one-timepoint detection methods. Control charts inspired by statistical process control (SPC) e.g., cumulative sums would allow the detection of sustained shifts. Yet they were not tailored to the specific characteristics of surveillance data such as overdispersion or seasonality. The method presented in \citet{hoehle.paul2008} conducts a synthesis of both worlds, i.e., traditional surveillance methods and SPC. The method is implemented in the package as the function \code{glrnb}, whose use is explained here. \subsubsection{Definition of the control chart} For the control chart, two distributions are defined, one for each of the two states \textit{in-control} and \textit{out-of-control}, whose likelihoods are compared at each time step. The \textit{in-control} distribution $f_{\bm{\theta}_0}(y_t|\bm{z}_t)$ with the covariates $\bm{z}_t$ is estimated by a GLM of the Poisson or negative binomial family with a log link, depending on the overdispersion of the data. In this context, the standard model for the \textit{in-control} mean is $$\log \mu_{0,t}=\beta_0+\beta_1t+\sum_{s=1}^S\left[\beta_{2s}\cos \left(\frac{2\pi s t}{\mathtt{Period}}\right)+\beta_{2s+1}\sin \left(\frac{2\pi s t}{\mathtt{Period}}\right)\right] $$ where $S$ is the number of harmonic waves to use and \texttt{Period} is the period of the data as indicated in the \code{control} slot, for instance 52 for weekly data. However, more flexible linear predictors, e.g., containing splines, concurrent covariates or an offset could be used on the right hand-side of the equation. The GLM could therefore be made very similar to the one used by~\citet{Noufaily2012}, with reweighting of past outbreaks and various criteria for including the time trend. The parameters of the \textit{in-control} and \textit{out-of-control} models are respectively given by $\bm{\theta}_0$ and $\bm{\theta}_1$. The \textit{out-of-control} mean is defined as a function of the \textit{in-control} mean, either with a multiplicative shift (additive on the log-scale) whose size $\kappa$ can be given as an input or reestimated at each timepoint $t>1$, $\mu_{1,t}=\mu_{0,t}\cdot \exp(\kappa)$, or with an unknown autoregressive component as in \citet{held-etal-2005}, $\mu_{1,t}=\mu_{0,t}+\lambda y_{t-1}$ with unknown $\lambda>0$. In \code{glrnb}, timepoints are divided into two intervals: phase 1 and phase 2. The \textit{in-control} mean and overdispersion are estimated with a GLM fitted on phase 1 data, whereas surveillance operates on phase 2 data. When $\lambda$ is fixed, one uses a likelihood-ratio (LR) and defines the stopping time for alarm as $$N=\min \left\{ t_0 \geq 1 : \max_{1\leq t \leq t_0} \left[ \sum_{s=t}^{t_0} \log\left\{ \frac{f_{\bm{\theta}_1}(y_s|\bm{z}_s)}{f_{\bm{\theta}_0}(y_s|\bm{z}_s)} \right\} \right] \geq \mathtt{c.ARL} \right\},$$ where $\mathtt{c.ARL}$ is the threshold of the CUSUM. When $\lambda$ is unknown and with the autoregressive component one has to use a generalized likelihood ratio (GLR) with the following stopping rule to estimate them on the fly at each time point so that $$N_G=\min \left\{ t_0 \geq 1 : \max_{1\leq t \leq t_0} \sup_{\bm{\theta} \in \bm{\Theta}} \left[ \sum_{s=t}^{t_0} \log\left\{ \frac{f_{\bm{\theta}}(y_s|\bm{z}_s)}{f_{\bm{\theta}_0}(y_s|\bm{z}_s)} \right\} \right] \geq \mathtt{c.ARL} \right\} \,.$$ Thus, one does not make any hypothesis about the specific value of the change to detect, but this GLR is more computationally intensive than the LR. \subsubsection{Practical use} For using \code{glrnb} one has two choices to make. First, one has to choose an \textit{in-control} model that will be fitted on phase 1 data. One can either provide the predictions for the vector of \textit{in-control} means \code{mu0} and the overdispersion parameter \code{alpha} by relying on an external fit, or use the built-in GLM estimator, that will use all data before the beginning of the surveillance range to fit a GLM with the number of harmonics \code{S} and a time trend if \code{trend} is \code{TRUE}. The choice of the exact \textit{in-control} model depends on the data under surveillance. Performing model selection is a compulsory step in practical applications. Then, one needs to tune the surveillance function itself, for one of the two possible change forms, \code{intercept}~or~\code{epi}.~One~can choose either to set \code{theta} to a given value and thus perform LR instead of GLR. The value of \code{theta} has to be adapted to the specific context in which the algorithm is applied: how big are shifts one wants to detect optimally? Is it better not to specify any and use GLR instead? The threshold \texttt{c.ARL} also has to be specified by the user. As explained in \citet{hoehle-mazick-2010} one can compute the threshold for a desired run-length in control through direct Monte Carlo simulation or a Markov chain approximation. Lastly, as mentioned in \citet{hoehle.paul2008}, a window-limited approach of surveillance, instead of looking at all the timepoints until the first observation, can make computation faster. Here we apply \code{glrnb} to the time series of report counts of \textit{Salmonella Newport} in Germany by assuming a known multiplicative shift of factor $2$ and by using the built-in estimator to fit an \textit{in-control} model with one harmonic for seasonality and a trend. This model will be refitted after each alarm, but first we use data from the years before 2011 as reference or \code{phase1}, and the data from 2011 as data to be monitored or \code{phase2}. The threshold \texttt{c.ARL} was chosen to be 4 as we found with the same approach as \citet{hoehle-mazick-2010} that it made the probability of a false alarm within one year smaller than 0.1. Figure~\ref{fig:glrnb}~shows the results of this monitoring. <>= phase1 <- which(isoWeekYear(epoch(salmNewportGermany))$ISOYear < 2011) phase2 <- in2011 control <- list(range = phase2, c.ARL = 4, theta = log(2), ret = "cases", mu0 = list(S = 1, trend = TRUE, refit = FALSE)) salmGlrnb <- glrnb(salmNewportGermany, control = control) @ \setkeys{Gin}{height=7cm, width=15cm} \begin{figure} \begin{center} <>= y.max <- max(observed(salmGlrnb),upperbound(salmGlrnb),na.rm=TRUE) do.call("plot",modifyList(plotOpts,list(x=salmGlrnb,ylim=c(0,y.max)))) @ \end{center} \vspace{-1cm} \caption{S. Newport in Germany in 2011 monitored by the \texttt{glrnb} function.} \label{fig:glrnb} \end{figure} The implementation of \code{glrnb} on individual time series was already thoroughly explained in \citet{hoehle-mazick-2010}. Our objective in the present document is rather to provide practical tips for the implementation of this function on huge amounts of data in public health surveillance applications. Issues of computational speed become very significant in such a context. Our proposal to reduce the computational burden incurred by this algorithm is to compute the \textit{in-control} model for each time serie (pathogen, subtype, subtype in a given location, etc.) only once a year and to use this estimation for the computation of a threshold for each time series. An idea to avoid starting with an initial value of zero in the CUSUM is to use either $\left(\frac{1}{2}\right)\cdot\mathtt{c.ARL}$ as a starting value (fast initial response CUSUM as presented in~\citet{lucas1982fast}) or to let surveillance run with the new \textit{in-control} model during a buffer period and use the resulting CUSUM as an initial value. One could also choose the maximum of these two possible starting values as a starting value. During the buffer period alarms would be generated with the old model. Lastly, using GLR is much more computationally intensive than using LR, whereas LR performs reasonably well on shifts different from the one indicated by \code{theta} as seen in the simulation studies of~\citet{hoehle.paul2008}. Our advice would therefore be to use LR with a reasonable predefined \code{theta}. The amount of historical data used each year to update the model, the length of the buffer period and the value of \code{theta} have to be fixed for each specific application, e.g., using simulations and/or discussion with experts. \subsubsection{Similar methods in the package} The algorithm \code{glrPois} is the same function as \code{glrnb} but for Poisson distributed data. Other CUSUM methods for count data are found in the package: \code{cusum} and \code{rogerson}. Both methods are discussed and compared to \code{glrnb} in \citet{hoehle.paul2008}. The package also includes a semi-parametric method \code{outbreakP} that aims at detecting changes from a constant level to a monotonically increasing incidence, for instance the beginning of the influenza season. See Table~\ref{table:ref} for the corresponding references. \subsection{A method for monitoring categorical data} All monitoring methods presented up to now have been methods for analysing count data. Nevertheless, in public health surveillance one also encounters categorical time series which are time series where the response variable obtains one of $k\geq2$ different categories (nominal or ordinal). When $k=2$ the time series is binary, for instance representing a specific outcome in cases such as hospitalization, death or a positive result to some diagnostic test. One can also think of applications with $k>2$ if one studies, e.g., the age groups of the cases in the context of monitoring a vaccination program: vaccination targeted at children could induce a shift towards older cases which one wants to detect as quickly as possible -- this will be explained thoroughly with an example. The developments of prospective surveillance methods for such categorical time series were up to recently limited to CUSUM-based approaches for binary data such as those explained in~\citet{Chen1978},~\citet{Reynolds2000} and~\citet{rogerson_yamada2004}. Other than being only suitable for binary data these methods have the drawback of not handling overdispersion. A method improving on these two limitations while casting the problem into a more comprehending GLM regression framework for categorical data was presented in~\citet{hoehle2010}. It is implemented as the function \code{categoricalCUSUM}. The way \code{categoricalCUSUM} operates is very similar to what \code{glrnb} does with fixed \textit{out-of-control} parameter. First, the parameters in a multivariate GLM for the \textit{in-control} distribution are estimated from the historical data. Then the \textit{out-of-control} distribution is defined by a given change in the parameters of this GLM, e.g., an intercept change, as explained later. Lastly, prospective monitoring is performed on current data using a likelihood ratio detector which compares the likelihood of the response under the \textit{in-control} and \textit{out-of-control} distributions. \subsubsection{Categorical CUSUM for binomial models} The challenge when performing these steps with categorical data from surveillance systems is finding an appropriate model. Binary GLMs as presented in Chapter~6 of \citet{Fahrmeir.etal2013} could be a solution but they do not tackle well the inherent overdispersion in the binomial time series. Of course one could choose a quasi family but these are not proper statistical distributions making many issues such as prediction complicated. A better alternative is offered by the use of \textit{generalized additive models for location, scale and shape} \citep[GAMLSS,][]{Rigby2005}, that support distributions such as the beta-binomial distribution, suitable for overdispersed binary data. With GAMLSS one can model the dependency of the mean -- \textit{location} -- upon explanatory variables but the regression modeling is also extended to other parameters of the distribution, e.g., scale. Moreover any modelled parameter can be put under surveillance, be it the mean (as in the example later developed) or the time trend in the linear predictor of the mean. This very flexible modeling framework is implemented in \proglang{R} through the \pkg{gamlss} package~\citep{StasJSS}. As an example we consider the time series of the weekly number of hospitalized cases among all \textit{Salmonella} cases in Germany in Jan 2004--Jan 2014, depicted in Figure~\ref{fig:cat1}. We use 2004--2012 data to estimate the \textit{in-control} parameters and then perform surveillance on the data from 2013 and early 2014. We start by preprocessing the data. <>= data("salmHospitalized") isoWeekYearData <- isoWeekYear(epoch(salmHospitalized)) dataBefore2013 <- which(isoWeekYearData$ISOYear < 2013) data2013 <- which(isoWeekYearData$ISOYear == 2013) dataEarly2014 <- which(isoWeekYearData$ISOYear == 2014 & isoWeekYearData$ISOWeek <= 4) phase1 <- dataBefore2013 phase2 <- c(data2013, dataEarly2014) salmHospitalized.df <- cbind(as.data.frame(salmHospitalized), weekNumber = isoWeekYearData$ISOWeek) names(salmHospitalized.df) <- c("y", "t", "state", "alarm", "upperbound", "n", "freq", "epochInPeriod", "weekNumber") @ We assume that the number of hospitalized cases follows a beta-binomial distribution, i.e., $ y_t \sim \BetaBin(n_t,\pi_t,\sigma_t)$ with $n_t$ the total number of reported cases at time $t$, $\pi_t$ the proportion of these cases that were hospitalized and $\sigma$ the dispersion parameter. In this parametrization, $$E(y_t)=n_t \pi_t,\quad \text{and}$$ $$\Var(y_t)=n_t \pi_t(1-\pi_t)\left( 1 + \frac{\sigma(n_t-1)}{\sigma+1} \right) \,.$$ We choose to model the expectation $n_t \pi_t$ using a beta-binomial model with a logit-link which is a special case of a GAMLSS, i.e., $$\logit(\pi_t)=\bm{z}_t^\top\bm{\beta}$$ where $\bm{z}_t$ is a vector of possibly time-varying covariates and $\bm{\beta}$ a vector of covariate effects in our example. <>= y.max <- max(observed(salmHospitalized)/population(salmHospitalized),upperbound(salmHospitalized)/population(salmHospitalized),na.rm=TRUE) plotOpts2 <- modifyList(plotOpts,list(x=salmHospitalized,legend.opts=NULL,ylab="",ylim=c(0,y.max)),keep.null=TRUE) plotOpts2$xaxis.tickFreq <- list("%G"=atChange,"%m"=atChange) plotOpts2$par.list <- list(mar=c(6,5,5,5),family="Times",las=1) do.call("plot",plotOpts2) lines(salmHospitalized@populationFrac/4000,col="grey80",lwd=2) lines(campyDE$hum*50, col="white", lwd=2) axis(side=4, at=seq(0,2000,by=500)/4000,labels=as.character(seq(0,2000,by=500)),las=1, cex=2,cex.axis=1.5,pos=length(observed(salmHospitalized))+20) par(family="Times") text(-20, 0.6, "Proportion", pos = 3, xpd = T,cex=cex.text) text(520, 0.6, "Total number of \n reported cases", pos = 3, xpd = T,cex=cex.text) @ \begin{figure} \begin{center} <>= <> @ \end{center} \vspace{-1cm} \caption{Weekly proportion of Salmonella cases that were hospitalized in Germany 2004-2014. In addition the corresponding number of reported cases is shown as a light curve.} \label{fig:cat1} \end{figure} The proportion of hospitalized cases varies throughout the year as seen in Figure~\ref{fig:cat1}. One observes that in the summer the proportion of hospitalized cases is smaller than in other seasons. However, over the holidays in December the proportion of hospitalized cases increases. Note that the number of non-hospitalized cases drops while the number of hospitalized cases remains constant (data not shown): this might be explained by the fact that cases that are not serious enough to go to the hospital are not seen by general practitioners because sick workers do not need a sick note during the holidays. Therefore, the \textit{in-control} model should contain these elements, as well as the fact that there is an increasing trend of the proportion because GPs prescribe less and less stool diagnoses so that more diagnoses are done on hospitalized cases. We choose a model with an intercept, a time trend, two harmonic terms and a factor variable for the first two weeks of each year. The variable \code{epochInPeriod} takes into account the fact that not all years have 52 weeks. <>= vars <- c( "y", "n", "t", "epochInPeriod", "weekNumber") m.bbin <- gamlss(cbind(y, n-y) ~ 1 + t + sin(2 * pi * epochInPeriod) + cos(2 * pi * epochInPeriod) + sin(4 * pi * epochInPeriod) + cos(4 * pi * epochInPeriod) + I(weekNumber == 1) + I(weekNumber == 2), sigma.formula =~ 1, family = BB(sigma.link = "log"), data = salmHospitalized.df[phase1, vars]) @ The change we aim to detect is defined by a multiplicative change of odds, from $\frac{\pi_t^0}{(1-\pi_t^0)}$ to $R\cdot\frac{\pi_t^0}{(1-\pi_t^0)}$ with $R>0$, similar to what was done in~\citet{Steiner1999} for the logistic regression model. This is equivalent to an additive change of the log-odds, $$\logit(\pi_t^1)=\logit(\pi_t^0)+\log R$$ with $\pi_t^0$ being the \textit{in-control} proportion and $\pi_t^1$ the \textit{out-of-control} distribution. The likelihood ratio based CUSUM statistic is now defined as $$C_{t_0}=\max_{1\leq t \leq {t_0}}\left( \sum_{s=t}^{t_0} \log \left( \frac{f(y_s;\bm{z}_s,\bm{\theta}_1)}{f(y_s;\bm{z}_s,\bm{\theta}_0)} \right) \right)$$ with $\bm{\theta}_0$ and $\bm{\theta}_1$ being the vector in- and \textit{out-of-control} parameters, respectively. Given a threshold \code{h}, an alarm is sounded at the first time when $C_{t_0}>\mathtt{h}$. We set the parameters of the \code{categoricalCUSUM} to optimally detect a doubling of the odds in 2013 and 2014, i.e., $R=2$. Furthermore, we for now set the threshold of the CUSUM at $h=2$. We use the GAMLSS to predict the mean of the \textit{in-control} and \textit{out-of-control} distributions and store them into matrices with two columns among which the second one represents the reference category. <>= R <- 2 h <- 2 pi0 <- predict(m.bbin, newdata = salmHospitalized.df[phase2, vars], type = "response") pi1 <- plogis(qlogis(pi0) + log(R)) pi0m <- rbind(pi0, 1 - pi0) pi1m <- rbind(pi1, 1 - pi1) @ Note that the \code{categoricalCUSUM} function is constructed to operate on the observed slot of \code{sts}-objects which have as columns the number of cases in each category at each timepoint, \textit{i.e.}, each row of the observed slot contains the elements $(y_{t1},...,y_{tk})$. <>= populationHosp <- unname(cbind( population(salmHospitalized), population(salmHospitalized))) observedHosp <- cbind( "Yes" = as.vector(observed(salmHospitalized)), "No" = as.vector(population(salmHospitalized) - observed(salmHospitalized))) salmHospitalized.multi <- sts( freq = 52, start = c(2004, 1), epoch = epoch(salmHospitalized), observed = observedHosp, population = populationHosp, multinomialTS = TRUE) @ Furthermore, one needs to define a wrapper for the distribution function in order to have an argument named \code{"mu"} in the function. <>= dBB.cusum <- function(y, mu, sigma, size, log = FALSE) { dBB(if (is.matrix(y)) y[1,] else y, if (is.matrix(y)) mu[1,] else mu, sigma = sigma, bd = size, log = log) } @ After these preliminary steps, the monitoring can be performed. <>= controlCat <- list(range = phase2, h = 2, pi0 = pi0m, pi1 = pi1m, ret = "cases", dfun = dBB.cusum) salmHospitalizedCat <- categoricalCUSUM(salmHospitalized.multi, control = controlCat, sigma = exp(m.bbin$sigma.coef)) @ The results can be seen in Figure~\ref{fig:catDouble}(a). With the given settings, there are alarms at week 16 in 2004 and at week 3 in 2004. The one in 2014 corresponds to the usual peak of the beginning of the year, which was larger than expected this year, maybe because the weekdays of the holidays were particularly worker-friendly so that sick notes were even less needed. The value for the threshold \code{h} can be determined following the procedures presented in \citet{hoehle-mazick-2010} for count data, and as in the code exhibited below. Two methods can be used for determining the probability of a false alarm within a pre-specified number of steps for a given value of the threshold \code{h}: a Monte Carlo method relying on, e.g., 1000 simulations and a Markov Chain approximation of the CUSUM. The former is much more computationally intensive than the latter: with the code below, the Monte Carlo method needed approximately 300 times more time than the Markov Chain method. Since both results are close we recommend the Markov Chain approximation for practical use. The Monte Carlo method works by sampling observed values from the estimated distribution and performing monitoring with \code{categoricalCUSUM} on this \code{sts} object. As observed values are estimated from the \textit{in-control} distribution every alarm thus obtained is a false alarm so that the simulations allow to estimate the probability of a false alarm when monitoring \textit{in-control} data over the timepoints of \code{phase2}. The Markov Chain approximation introduced by \citet{brook_evans1972} is implemented as \code{LRCUSUM.runlength} which is already used for \code{glrnb}. Results from both methods can be seen in Figure~\ref{fig:catDouble}(b). We chose a value of 2 for \code{h} so that the probability of a false alarm within the 56 timepoints of \code{phase2} is less than $0.1$. One first has to set the values of the threshold to be investigated and to prepare the function used for simulation, that draws observed values from the \textit{in-control} distribution and performs monitoring on the corresponding time series, then indicating if there was at least one alarm. Then 1000 simulations were performed with a fixed seed value for the sake of reproducibility. Afterwards, we tested the Markov Chain approximation using the function \code{LRCUSUM.runlength} over the same grid of values for the threshold. <<>>= h.grid <- seq(1, 10, by = 0.5) @ <>= simone <- function(sts, h) { y <- rBB(length(phase2), mu = pi0m[1, , drop = FALSE], bd = population(sts)[phase2, ], sigma = exp(m.bbin$sigma.coef)) observed(sts)[phase2, ] <- cbind(y, population(sts)[phase2, 1] - y) one.surv <- categoricalCUSUM(sts, control = modifyList(controlCat, list(h = h)), sigma = exp(m.bbin$sigma.coef)) return(any(alarms(one.surv)[, 1])) } set.seed(123) nSims <- 1000 pMC <- sapply(h.grid, function(h) { mean(replicate(nSims, simone(salmHospitalized.multi, h))) }) pMarkovChain <- sapply(h.grid, function(h) { TA <- LRCUSUM.runlength(mu = pi0m[1,,drop = FALSE], mu0 = pi0m[1,,drop = FALSE], mu1 = pi1m[1,,drop = FALSE], n = population(salmHospitalized.multi)[phase2, ], h = h, dfun = dBB.cusum, sigma = exp(m.bbin$sigma.coef)) return(tail(TA$cdf, n = 1)) }) @ <>= if (computeALL) { <> save(pMC, file = "monitoringCounts-cache/pMC.RData") save(pMarkovChain, file = "monitoringCounts-cache/pMarkovChain.RData") } else { load("monitoringCounts-cache/pMC.RData") load("monitoringCounts-cache/pMarkovChain.RData") } @ \setkeys{Gin}{height=7cm, width=9cm} \begin{figure} \hspace{-1em} \subfloat[]{ <>= y.max <- max(observed(salmHospitalizedCat[,1])/population(salmHospitalizedCat[,1]),upperbound(salmHospitalizedCat[,1])/population(salmHospitalizedCat[,1]),na.rm=TRUE) plotOpts3 <- modifyList(plotOpts,list(x=salmHospitalizedCat[,1],ylab="Proportion",ylim=c(0,y.max))) plotOpts3$legend.opts <- list(x="top",bty="n",legend=c(expression(U[t])),lty=1,lwd=line.lwd,col=alarm.symbol$col,horiz=TRUE,cex=cex.leg) do.call("plot",plotOpts3) @ } \hspace{-3em} \subfloat[]{ <>= par(mar=c(6,5,5,5),family="Times") matplot(h.grid, cbind(pMC,pMarkovChain),type="l",ylab=expression(P(T[A] <= 56 * "|" * tau * "=" * infinity)),xlab="Threshold h",col=1,cex=cex.text, cex.axis =cex.text,cex.lab=cex.text) prob <- 0.1 lines(range(h.grid),rep(prob,2),lty=5,lwd=2) axis(2,at=prob,las=1,cex.axis=0.7,labels=FALSE) par(family="Times") legend(4,0.08,c("Monte Carlo","Markov chain"), lty=1:2,col=1,cex=cex.text,bty="n") @ } \caption{(a) Results of the monitoring with categorical CUSUM of the proportion of Salmonella cases that were hospitalized in Germany in Jan 2013 - Jan 2014. (b) Probability of a false alarm within the 56 timepoints of the monitoring as a function of the threshold $h$.} \label{fig:catDouble} \end{figure} The procedure for using the function for multicategorical variables follows the same steps (as illustrated later). Moreover, one could expand the approach to utilize the multiple regression possibilities offered by GAMLSS. Here we chose to try to detect a change in the mean of the distribution of counts but as GAMLSS provides more general regression tools than GLM we could also aim at detecting a change in the time trend included in the model for the mean. \subsubsection{Categorical CUSUM for multinomial models} In order to illustrate the use of \code{categoricalCUSUM} for more than two classes we analyse the monthly number of rotavirus cases in the federal state Brandenburg during 2002-2013 and which are stratified into the five age-groups 00-04, 05-09, 10-14, 15-69, 70+ years. In 2006 two rotavirus vaccines were introduced, which are administered in children at the age of 4--6 months. Since then, coverage of these vaccination has steadily increased and interest is to detect possible age-shifts in the distribution of cases. <>= data("rotaBB") plot(rotaBB) @ \setkeys{Gin}{height=7cm, width=15cm} \begin{figure} %Remove this slot as soon as possible and replace it with just ROTAPLOT!! <>= par(mar=c(5.1,20.1,4.1,0),family="Times") plot(rotaBB,xlab="Time (months)",ylab="", col="mediumblue",cex=cex.text,cex.lab=cex.text,cex.axis=cex.text,cex.main=cex.text, xaxis.tickFreq=list("%G"=atChange), xaxis.labelFreq=list("%G"=at2ndChange), xaxis.labelFormat="%G") par(las=0,family="Times") mtext("Proportion of reported cases", side=2, line=19, cex=1) @ \caption{Monthly proportions in five age-groups for the reported rotavirus cases in Brandenburg, Germany, \Sexpr{paste(format(range(epoch(rotaBB)),"%Y"),collapse="-")}.} \label{fig:vac} \end{figure} From Figure~\ref{fig:vac} we observe a shift in proportion away from the very young. However, interpreting the proportions only makes sense in combination with the absolute numbers. In these plots (not shown) it becomes clear that the absolute numbers in the 0--4 year old have decreased since 2009. However, in the 70+ group a small increase is observed with 2013 by far being the strongest season so far. <>= # Select a palette for drawing pal <- c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00") #= RColorBrewer::brewer.pal("Set1",n=ncol(rotaBB)) # Show time series of monthly proportions (matplot does not work with dates) plotTS <- function(prop=TRUE) { for (i in 1:ncol(rotaBB)) { fun <- if (i==1) plot else lines if (!prop) { fun(epoch(rotaBB),observed(rotaBB)[,i],type="l",xlab="Time (months)",ylab="Reported cases",ylim=c(0,max(observed(rotaBB))),col=pal[i],lwd=2) } else { fun(epoch(rotaBB),observed(rotaBB)[,i,drop=FALSE]/rowSums(observed(rotaBB)),type="l",xlab="Time (months)",ylab="Proportion of reported cases",ylim=c(0,max(observed(rotaBB)/rowSums(observed(rotaBB)))),col=pal[i],lwd=2) } } # Add legend axis(1,at=as.numeric(epoch(rotaBB)),labels=FALSE,tck=-0.01) legend(x="left",colnames(rotaBB),col=pal,lty=1,lwd=2,bg="white") } # plotTS(prop=TRUE) # Show absolute cases plotTS(prop=FALSE) # Even easier rotaBB.copy <- rotaBB ; rotaBB.copy@multinomialTS <- FALSE plot(rotaBB.copy) @ Hence, our interest is in prospectively detecting a possible age-shift. Since the vaccine was recommended for routine vaccination in Brandenburg in 2009 we choose to start the monitoring at that time point. We do so by fitting a multinomial logit-model containing a trend as well as one harmonic wave and use the age group 0--4 years as reference category, to the data from the years 2002-2008. Different \proglang{R} packages implement such type of modeling, but we shall use the \pkg{MGLM} package~\citep{MGLM}, because it also offers the fitting of extended multinomial regression models allowing for extra dispersion. <<>>= rotaBB.df <- as.data.frame(rotaBB) X <- with(rotaBB.df, cbind(intercept = 1, epoch, sin1 = sin(2 * pi * epochInPeriod), cos1 = cos(2 * pi * epochInPeriod))) phase1 <- epoch(rotaBB) < as.Date("2009-01-01") phase2 <- !phase1 library("MGLM") ## MGLMreg automatically takes the last class as ref so we reorder order <- c(2:5, 1); reorder <- c(5, 1:4) m0 <- MGLMreg(as.matrix(rotaBB.df[phase1, order]) ~ -1 + X[phase1, ], dist = "MN") @ As described in \citet{hoehle2010} we can try to detect a specific shift in the intercept coefficients of the model. For example, a multiplicative shift of factor 2 in the example below, in the odds of each of the four age categories against the reference category is modelled by changing the intercept value of each category. Based on this, the \textit{in-control} and \textit{out-of-control} proportions are easily computed using the \code{predict} function for \code{MGLMreg} objects. <<>>= m1 <- m0 m1@coefficients[1, ] <- m0@coefficients[1, ] + log(2) pi0 <- t(predict(m0, newdata = X[phase2, ])[, reorder]) pi1 <- t(predict(m1, newdata = X[phase2, ])[, reorder]) @ For applying the \code{categoricalCUSUM} function one needs to define a compatible wrapper function for the multinomial as in the binomial example. With $\bm{\pi}^0$ and $\bm{\pi}^1$ in place one only needs to define a wrapper function, which defines the PMF of the sampling distribution -- in this case the multinomial -- in a \code{categoricalCUSUM} compatible way. <>= dfun <- function(y, size, mu, log = FALSE) { dmultinom(x = y, size = size, prob = mu, log = log) } h <- 2 # threshold for the CUSUM statistic control <- list(range = seq(nrow(rotaBB))[phase2], h = h, pi0 = pi0, pi1 = pi1, ret = "value", dfun = dfun) surv <- categoricalCUSUM(rotaBB,control=control) @ <>= alarmDates <- epoch(surv)[which(alarms(surv)[,1]==1)] format(alarmDates,"%b %Y") @ <>= #Number of MC samples nSamples <- 1e4 #Do MC simone.stop <- function(sts, control) { phase2Times <- seq(nrow(sts))[phase2] #Generate new phase2 data from the fitted in control model y <- sapply(1:length(phase2Times), function(i) { rmultinom(n=1, prob=pi0[,i],size=population(sts)[phase2Times[i],1]) }) observed(sts)[phase2Times,] <- t(y) one.surv <- categoricalCUSUM(sts, control=control) #compute P(S<=length(phase2)) return(any(alarms(one.surv)[,1]>0)) } set.seed(1233) rlMN <- replicate(nSamples, simone.stop(rotaBB, control=control)) mean(rlMN) # 0.5002 @ The resulting CUSUM statistic $C_t$ as a function of time is shown in Figure~\ref{fig:ct}(a). The first time an aberration is detected is July 2009. Using 10000 Monte Carlo simulations we estimate that with the chosen threshold $h=2$ the probability for a false alarm within the 60 time points of \code{phase2} is 0.02. As the above example shows, the LR based categorical CUSUM is rather flexible in handling any type of multivariate GLM modeling to specify the \textit{in-control} and \textit{out-of-control} proportions. However, it requires a direction of the change to be specified -- for which detection is optimal. One sensitive part of such monitoring is the fit of the multinomial distribution to a multivariate time series of proportions, which usually exhibit extra dispersion when compared to the multinomial. For example comparing the AIC between the multinomial logit-model and a Dirichlet-multinomial model with $\alpha_{ti} = \exp(\bm{x}_t^\top\bm{\beta})$~\citep{MGLM} shows that overdispersion is present. The Dirichlet distribution is the multicategorical equivalent of the beta-binomial distribution. We exemplify its use in the code below. <<>>= m0.dm <- MGLMreg(as.matrix(rotaBB.df[phase1, 1:5]) ~ -1 + X[phase1, ], dist = "DM") c(m0@AIC, m0.dm@AIC) @ Hence, the above estimated false alarm probability might be too low for the actual monitoring problem, because the variation in the time series is larger than implied by the multinomial. Hence, it appears prudent to repeat the analysis using the more flexible Dirichlet-multinomial model. This is straightforward with \code{categoricalCUSUM} once the \textit{out-of-control} proportions are specified in terms of the model. Such a specification is, however, hampered by the fact that the two models use different parametrizations. For performing monitoring in this new setting we first need to calculate the $\alpha$'s of the multinomial-Dirichlet for the \textit{in-control} and \textit{out-of-control} distributions. <<>>= ## Change intercept in the first class (for DM all 5 classes are modeled) delta <- 2 m1.dm <- m0.dm m1.dm@coefficients[1, ] <- m0.dm@coefficients[1, ] + c(-delta, rep(delta/4, 4)) alpha0 <- exp(X[phase2,] %*% m0.dm@coefficients) alpha1 <- exp(X[phase2,] %*% m1.dm@coefficients) dfun <- function(y, size, mu, log = FALSE) { dLog <- ddirmn(t(y), t(mu)) if (log) dLog else exp(dLog) } h <- 2 control <- list(range = seq(nrow(rotaBB))[phase2], h = h, pi0 = t(alpha0), pi1 = t(alpha1), ret = "value", dfun = dfun) surv.dm <- categoricalCUSUM(rotaBB, control = control) @ <>= matplot(alpha0/rowSums(alpha0),type="l",lwd=3,lty=1,ylim=c(0,1)) matlines(alpha1/rowSums(alpha1),type="l",lwd=1,lty=2) @ \setkeys{Gin}{height=7cm, width=9cm} \begin{figure} \hspace{-1em} \subfloat[]{ <>= surv@observed[,1] <- 0 surv@multinomialTS <- FALSE surv.dm@observed[,1] <- 0 surv.dm@multinomialTS <- FALSE y.max <- max(observed(surv.dm[,1]),upperbound(surv.dm[,1]),observed(surv[,1]),upperbound(surv[,1]),na.rm=TRUE) plotOpts3 <- modifyList(plotOpts,list(x=surv[,1],ylim=c(0,y.max),ylab=expression(C[t]),xlab="")) plotOpts3$legend.opts <- list(x="topleft",bty="n",legend="R",lty=1,lwd=line.lwd,col=alarm.symbol$col,horiz=TRUE,cex=cex.leg) do.call("plot",plotOpts3) lines( c(0,1e99), rep(h,2),lwd=2,col="darkgray",lty=1) par(family="Times") mtext(side=1,text="Time (weeks)", las=0,line=3, cex=cex.text) @ } \hspace{-3em} \subfloat[]{ <>= plotOpts3 <- modifyList(plotOpts,list(x=surv.dm[,1],ylim=c(0,y.max),ylab=expression(C[t]),xlab="")) plotOpts3$legend.opts <- list(x="topleft",bty="n",legend="R",lty=1,lwd=line.lwd,col=alarm.symbol$col,horiz=TRUE,cex=cex.text) y.max <- max(observed(surv.dm[,1]),upperbound(surv.dm[,1]),observed(surv[,1]),upperbound(surv[,1]),na.rm=TRUE) do.call("plot",plotOpts3) lines( c(0,1e99), rep(h,2),lwd=2,col="darkgray",lty=1) par(family="Times") mtext(side=1,text="Time (weeks)", las=0,line=3, cex=cex.text) @ } \caption{Categorical CUSUM statistic $C_t$. Once $C_t>\Sexpr{h}$ an alarm is sounded and the statistic is reset. In (a) surveillance uses the multinomial distribution and in (b) surveillance uses the Dirichlet-multinomial distribution.} \label{fig:ct} \end{figure} The resulting CUSUM statistic $C_t$ using the Dirichlet multinomial distribution is shown in Figure~\ref{fig:ct}(b). We notice a rather similar behavior even though the shift-type specified by this model is slightly different than in the model of Figure~\ref{fig:ct}(a). \subsubsection{Categorical data in routine surveillance} The multidimensionality of data available in public health surveillance creates many opportunities for the analysis of categorical time series, for example: sex ratio of cases of a given disease, age group distribution, regions sending data, etc. If one is interested in monitoring with respect to a categorical variable, a choice has to be made between monitoring each time series individually, for instance a time series of \textit{Salmonella} cases for each age group, or monitoring the distribution of cases with respect to that factor jointly \textit{via} \code{categoricalCUSUM}. A downside of the latter solution is that one has to specify the change parameter \code{R} in advance, which can be quite a hurdle if one has no pre-conceived idea of what could happen for, say, the age shift after the introduction of a vaccine. Alternatively, one could employ an ensemble of monitors or monitor an aggregate. However, more straightforward applications could be found in the (binomial) surveillance of positive diagnostics given laboratory test data and not only data about confirmed cases. An alternative would be to apply \code{farringtonFlexible} while using the number of tests as \code{populationOffset}. \subsubsection{Similar methods in the package} The package also offers another CUSUM method suitable for binary data, \code{pairedbinCUSUM} that implements the method introduced by~\citet{Steiner1999}, which does not, however, take overdispersion into account as well as \code{glrnb}. The algorithm \code{rogerson} also supports the analysis of binomial data. See Table~\ref{table:ref} for the corresponding references. \subsection{Other algorithms implemented in the package} We conclude this description of surveillance methods by giving an overview of all algorithms implemented in the package with the corresponding references in Table~\ref{table:ref}. One can refer to the relative reference articles and to the reference manual of the package for more information about each method. Criteria for choosing a method in practice are numerous. First one needs to ponder on the amount of historical data at hand -- for instance the EARS methods only need data for the last timepoints whereas the Farrington methods use data up to $b$ years in the past. Then one should consider the amount of past data used by the algorithm -- historical reference methods use only a subset of the past data, namely the timepoints located around the same timepoint in the past years, whereas other methods use all past data included in the reference data. This can be a criterion of choice since one can prefer using all available data. It is also important to decide whether one wants to detect one-timepoint aberration or more prolonged shifts. And lastly, an important criterion is how much work needs to be done for finetuning the algorithm for each specific time series. The package on the one hand provides the means for analysing nearly all type of surveillance data and on the other hand makes the comparison of algorithms possible. This is useful in practical applications when those algorithms are implemented into routine use, which will be the topic of Section~\ref{sec:routine}. \begin{table}[t!] \centering \begin{tabular}{lp{11cm}} \hline Function & References \\ \hline \code{bayes} & \citet{riebler2004} \\ \code{boda} & \citet{Manitz2013} \\ \code{bodaDelay} & \citet{Maelle} \\ \code{categoricalCUSUM} & \citet{hoehle2010}\\ \code{cdc} & \citet{stroup89,farrington2003} \\ \code{cusum} & \citet{rossi_etal99,pierce_schafer86} \\ \code{earsC} & \citet{SIM:SIM3197} \\ \code{farrington} & \citet{farrington96} \\ \code{farringtonFlexible} & \citet{farrington96,Noufaily2012} \\ \code{glrnb} & \citet{hoehle.paul2008} \\ \code{glrpois} & \citet{hoehle.paul2008} \\ \code{outbreakP} & \citet{frisen_etal2009,fri2009} \\ \code{pairedbinCUSUM} & \citet{Steiner1999} \\ \code{rki} & Not available -- unpublished \\ \code{rogerson} & \citet{rogerson_yamada2004} \\ \hline \end{tabular} \caption{Algorithms for aberration detection implemented in \pkg{surveillance}.} \label{table:ref} \end{table} \section[Implementing surveillance in routine monitoring]{Implementing \pkg{surveillance} in routine monitoring} \label{sec:routine} \label{sec:3} Combining \pkg{surveillance} with other \proglang{R} packages and programs is easy, allowing the integration of the aberration detection into a comprehensive surveillance system to be used in routine practice. In our opinion, such a surveillance system has to at least support the following process: loading data from local databases, analysing them within \pkg{surveillance} and sending the results of this analysis to the end-user who is typically an epidemiologist in charge of the specific pathogen. This section exemplifies the integration of the package into a whole analysis stack, first through the introduction of a simple workflow from data query to a \code{Sweave}~\citep{sweave} or \pkg{knitr}~\citep{knitr} report of signals, and secondly through the presentation of the more elaborate system in use at the German Robert Koch Institute. \subsection{A simple surveillance system} Suppose you have a database with surveillance time series but little resources to build a surveillance system encompassing all the above stages. Using \proglang{R} and \code{Sweave} or \code{knitr} for \LaTeX~you can still set up a simple surveillance analysis without having to do everything by hand. You only need to input the data into \proglang{R} and create \code{sts} objects for each time series of interest as explained thoroughly in~\citet{hoehle-mazick-2010}. Then, after choosing a surveillance algorithm, say \code{farringtonFlexible}, and feeding it with the appropriate \code{control} argument, you can get a \code{sts} object with upperbounds and alarms for each of your time series of interest over the \code{range} supplied in \code{control}. For defining the range automatically one could use the \proglang{R} function \code{Sys.Date()} to get today's date. These steps can be introduced as a code chunk in a \code{Sweave} or \code{knitr} code that will translate it into a report that you can send to the epidemiologists in charge of the respective pathogen whose cases are monitored. Below is an example of a short code segment showing the analysis of the \textit{S. Newport} weekly counts of cases in the German federal states Baden-W\"{u}rttemberg and North Rhine-Westphalia with the improved method implemented in \code{farringtonFlexible}. The package provides a \code{toLatex} method for \code{sts} objects that produces a table with the observed number of counts and upperbound for each column in \code{observed}, where alarms can be highlighted by for instance bold text. The resulting table is shown in Table~\ref{tableResults}. <<>>= today <- which(epoch(salmNewport) == as.Date("2013-12-23")) rangeAnalysis <- (today - 4):today in2013 <- which(isoWeekYear(epoch(salmNewport))$ISOYear == 2013) algoParameters <- list(range = rangeAnalysis, noPeriods = 10, populationBool = FALSE, b = 4, w = 3, weightsThreshold = 2.58, pastWeeksNotIncluded = 26, pThresholdTrend = 1, thresholdMethod = "nbPlugin", alpha = 0.05, limit54 = c(0, 50)) results <- farringtonFlexible(salmNewport[, c("Baden.Wuerttemberg", "North.Rhine.Westphalia")], control = algoParameters) @ <>= start <- isoWeekYear(epoch(salmNewport)[min(rangeAnalysis)]) end <- isoWeekYear(epoch(salmNewport)[max(rangeAnalysis)]) caption <- paste0("Results of the analysis of reported S. Newport ", "counts in two German federal states for the weeks ", start$ISOYear, "-W", start$ISOWeek, " to ", end$ISOYear, "-W", end$ISOWeek, ". Bold red counts indicate weeks with alarms.") toLatex(results, caption = caption, label = "tableResults", ubColumnLabel = "Threshold", include.rownames = FALSE, sanitize.text.function = identity) @ The advantage of this approach is that it can be made automatic. The downside of such a system is that the report is not interactive, for instance one cannot click on the cases and get the linelist. Nevertheless, this is a workable solution in many cases -- especially when human and financial resources are narrow. In the next section, we present a more advanced surveillance system built on the package. \subsection{Automatic detection of outbreaks at the Robert Koch Institute} \label{sec:RKI} The package \pkg{surveillance} was used as a core building block for designing and implementing the automated outbreak detection system at the RKI in Germany~\citep{Dirk}. The text below describes the system as it was in early 2014. Due to the Infection Protection Act (IfSG) the RKI daily receives over 1,000 notifiable disease reports. The system analyses about half a million time series per day to identify possible aberrations in the reported number of cases. Structurally, it consists of two components: an analytical process written in \proglang{R} that daily monitors the data and a reporting component that compiles and communicates the results to the epidemiologists. The analysis task in the described version of the system relied on \pkg{surveillance} and three other \proglang{R} packages, namely \pkg{data.table}, \pkg{RODBC} and \pkg{testthat} as described in the following. The data-backend is an OLAP-system~\citep{SSAS} and relational databases, which are queried using \pkg{RODBC}~\citep{rodbc2013}. The case reports are then rapidly aggregated into univariate time series using \pkg{data.table}~\citep{datatable2013}. To each time series we apply the \code{farringtonFlexible} algorithm on univariate \code{sts} objects and store the analysis results in another SQL-database. We make intensive use of \pkg{testthat}~\citep{testthat2013} for automatic testing of the component. Although \proglang{R} is not the typical language to write bigger software components for production, choosing \proglang{R} in combination with \pkg{surveillance} enabled us to quickly develop the analysis workflow. We can hence report positive experience using \proglang{R} also for larger software components in production. The reporting component was realized using Microsoft Reporting Services~\citep{SSRS}, because this technology is widely used within the RKI. It allows quick development of reports and works well with existing Microsoft Office tools, which the end-user, the epidemiologist, is used to. For example, one major requirement by the epidemiologists was to have the results compiled as Excel documents. Moreover, pathogen-specific reports are automatically sent once a week by email to epidemiologists in charge of the respective pathogen. Having state-of-the-art detection methods already implemented in \pkg{surveillance} helped us to focus on other challenges during development, such as bringing the system in the organization's workflow and finding ways to efficiently and effectively analyse about half a million of time series per day. In addition, major developments in the \proglang{R} component can be shared with the community and are thus available to other public health institutes as well. \section{Discussion} \label{sec:4} The \proglang{R} package \pkg{surveillance} was initially created as an implementational framework for the development and the evaluation of outbreak detection algorithms in routine collected public health surveillance data. Throughout the years it has more and more also become a tool for the use of surveillance in routine practice. The presented description aimed at showing the potential of the package for aberration detection. Other functions offered by the package for modeling~\citep{meyer.etal2014}, nowcasting~\citep{hoehle-heiden} or back-projection of incidence cases~\citep{becker_marschner93} are documented elsewhere and contribute to widening the scope of possible analysis in infectious disease epidemiology when using \pkg{surveillance}. Future areas of interest for the package are, e.g., to better take into account the multivariate and hierarchical structure of the data streams analysed. Another important topic is the adjustment for reporting delays when performing the surveillance~\citep{Maelle}. The package can be obtained from CRAN and resources for learning its use are listed in the documentation section of the project (\url{https://surveillance.R-Forge.R-project.org/}). As all \proglang{R} packages, \pkg{surveillance} is distributed with a manual describing each function with corresponding examples. The manual, the present article and two previous ones~\citep{hoehle-2007, hoehle-mazick-2010} form a good basis for getting started with the package. The data and analysis of the present manuscript are accessible as the vignette \texttt{"monitoringCounts.Rnw"} in the package. Since all functionality is available just at the cost of learning \proglang{R} we hope that parts of the package can be useful in health facilities around the world. Even though the package is tailored for surveillance in public health contexts, properties such as overdispersion, low counts, presence of past outbreaks, apply to a wide range of count and categorical time series in other surveillance contexts such as financial surveillance~\citep{frisen2008financial}, occupational safety monitoring~\citep{accident} or environmental surveillance~\citep{Radio}. Other \proglang{R} packages can be worth of interest to \pkg{surveillance} users. Statistical process control is offered by two other packages, \pkg{spc}~\citep{spc} and \pkg{qcc}~\citep{qcc}. The package \pkg{strucchange} allows detecting structural changes in general parametric models including GLMs~\citep{strucchange}, while the package \pkg{tscount} provides methods for regression and (retrospective) intervention analysis for count time series based on GLMs~\citep{tscount, liboschik_tscount_2015} . For epidemic modelling and outbreaks, packages such as \pkg{EpiEstim}~\citep{EpiEstim}, \pkg{outbreaker}~\citep{outbreaker} and \pkg{OutbreakTools}~\citep{OutbreakTools} offer good functionalities for investigating outbreaks that may for instance have been detected through to the use of \pkg{surveillance}. They are listed on the website of the \textit{\proglang{R}-epi project} (\url{https://sites.google.com/site/therepiproject}) that was initiated for compiling information about \proglang{R} tools useful for infectious diseases epidemiology. Another software of interest for aberration detection is \pkg{SaTScan}~\citep{SaTScan} which allows the detection of spatial, temporal and space-time clusters of events -- note that it is not a \proglang{R} package. Code contributions to the package are very welcome as well as feedback and suggestions for improving the package. \section*{Acknowledgments} The authors would like to express their gratitude to all contributors to the package, in particular Juliane Manitz, University of G\"{o}ttingen, Germany, for her work on the \texttt{boda} code, and Angela Noufaily, The Open University, Milton Keynes, UK, for providing us with the code used in her article that we extended for \texttt{farringtonFlexible}. The work of M. Salmon was financed by a PhD grant of the RKI. \bibliography{monitoringCounts,references} \end{document} surveillance/vignettes/references.bib0000644000176200001440000003475314012513122017560 0ustar liggesusers@Unpublished{altmann2003, author = {D. Altmann}, title = {The surveillance system of the {Robert Koch Institute}, {Germany}}, note = {Personal communication}, year = {2003}, } @Book{andersson2000, title = {Stochastic Epidemic Models and their Statistical Analysis}, publisher = {Springer-Verlag}, year = {2000}, author = {H. Andersson and T. Britton}, volume = {151}, series = {Springer Lectures Notes in Statistics}, } @Article{czado-etal-2009, author = {Claudia Czado and Tilmann Gneiting and Leonhard Held}, title = {Predictive model assessment for count data}, journal = {Biometrics}, year = {2009}, volume = {65}, number = {4}, pages = {1254--1261}, doi = {10.1111/j.1541-0420.2009.01191.x}, } @Book{Daley.Vere-Jones2003, title = {An Introduction to the Theory of Point Processes}, publisher = {Springer-Verlag}, year = {2003}, author = {Daley, Daryl J. and Vere-Jones, David}, editor = {Gani, Joseph M. and Heyde, Christopher C. and Kurtz, Thomas G.}, volume = {I: Elementary Theory and Methods}, series = {Probability and its Applications}, address = {New York}, edition = {2nd}, isbn = {0-387-95541-0}, } @Book{Fahrmeir.etal2013, title = {Regression: Models, Methods and Applications}, publisher = {Springer-Verlag}, year = {2013}, author = {Ludwig Fahrmeir and Thomas Kneib and Stefan Lang and Brian Marx}, isbn = {978-3-642-34332-2}, doi = {10.1007/978-3-642-34333-9}, } @Article{farrington96, author = {C. P. Farrington and N. J. Andrews and A. D. Beale and M. A. Catchpole}, title = {A statistical algorithm for the early detection of outbreaks of infectious disease}, journal = {Journal of the Royal Statistical Society. Series A (Statistics in Society)}, year = {1996}, volume = {159}, pages = {547--563}, } @InCollection{farrington2003, author = {Paddy Farrington and Nick Andrews}, title = {Outbreak Detection: Application to Infectious Disease Surveillance}, booktitle = {Monitoring the Health of Populations}, publisher = {Oxford University Press}, year = {2003}, editor = {Ron Brookmeyer and Donna F. Stroup}, chapter = {8}, pages = {203--231}, } @Article{geilhufe.etal2012, author = {Marc Geilhufe and Leonhard Held and Stein Olav Skr{\o}vseth and Gunnar S. Simonsen and Fred Godtliebsen}, title = {Power law approximations of movement network data for modeling infectious disease spread}, journal = {Biometrical Journal}, year = {2014}, volume = {56}, number = {3}, pages = {363--382}, doi = {10.1002/bimj.201200262}, } @Article{gneiting-raftery-2007, author = {Tilmann Gneiting and Adrian E. Raftery}, title = {Strictly proper scoring rules, prediction, and estimation}, journal = {Journal of the American Statistical Association}, year = {2007}, volume = {102}, number = {477}, pages = {359--378}, doi = {10.1198/016214506000001437}, } @Article{held-etal-2005, author = {Leonhard Held and Michael H{\"o}hle and Mathias Hofmann}, title = {A statistical framework for the analysis of multivariate infectious disease surveillance counts}, journal = {Statistical Modelling}, year = {2005}, volume = {5}, number = {3}, pages = {187--199}, doi = {10.1191/1471082X05st098oa}, } @Article{held.paul2012, author = {Held, Leonhard and Paul, Michaela}, title = {Modeling seasonality in space-time infectious disease surveillance data}, journal = {Biometrical Journal}, year = {2012}, volume = {54}, number = {6}, pages = {824--843}, doi = {10.1002/bimj.201200037}, } @Article{herzog-etal-2010, author = {Herzog, S. A. and Paul, M. and Held, L.}, title = {Heterogeneity in vaccination coverage explains the size and occurrence of measles epidemics in {German} surveillance data}, journal = {Epidemiology and Infection}, year = {2011}, volume = {139}, number = {4}, pages = {505--515}, doi = {10.1017/S0950268810001664}, } @Article{hoehle-2007, author = {H{\"o}hle, M.}, title = {\texttt{surveillance}: {A}n \textsf{R} package for the monitoring of infectious diseases}, journal = {Computational Statistics}, year = {2007}, volume = {22}, number = {4}, pages = {571--582}, doi = {10.1007/s00180-007-0074-8}, } @Article{hoehle2009, author = {Michael H{\"o}hle}, title = {Additive-multiplicative regression models for spatio-temporal epidemics}, journal = {Biometrical Journal}, year = {2009}, volume = {51}, number = {6}, pages = {961--978}, doi = {10.1002/bimj.200900050}, } @Article{hoehle.anderheiden2014, author = {Michael H{\"o}hle and Matthias {an der Heiden}}, title = {{Bayesian} nowcasting during the {STEC} {O104:H4} outbreak in {Germany}, 2011}, journal = {Biometrics}, year = {2014}, volume = {70}, number = {4}, pages = {993--1002}, doi = {10.1111/biom.12194}, } @InCollection{hoehle-mazick-2010, author = {H{\"o}hle, M. and Mazick, A.}, title = {Aberration detection in \textsf{R} illustrated by {Danish} mortality monitoring}, booktitle = {Biosurveillance: Methods and Case Studies}, publisher = {Chapman \& Hall/CRC}, year = {2010}, editor = {Kass-Hout, T. and Zhang, X.}, chapter = {12}, pages = {215--238}, } @Article{hoehle.paul2008, author = {Michael H{\"o}hle and Michaela Paul}, title = {Count data regression charts for the monitoring of surveillance time series}, journal = {Computational Statistics and Data Analysis}, year = {2008}, volume = {52}, number = {9}, pages = {4357--4368}, doi = {10.1016/j.csda.2008.02.015}, } @Article{hughes.king2003, author = {Anthony W. Hughes and Maxwell L. King}, title = {Model selection using {AIC} in the presence of one-sided information}, journal = {Journal of Statistical Planning and Inference}, year = {2003}, volume = {115}, number = {2}, pages = {397--411}, doi = {10.1016/S0378-3758(02)00159-3}, } @Article{hutwagner2005, author = {L. Hutwagner and T. Browne and G.M Seeman and A.T. Fleischhauer}, title = {Comparing abberration detection methods with simulated data}, journal = {Emerging Infectious Diseases}, year = {2005}, volume = {11}, pages = {314--316}, doi = {10.3201/eid1102.040587}, } @Article{bulletin3901, author = {{Robert Koch-Institut}}, title = {{G}ruppenerkrankung in {B}aden-{W}{\"u}rttemberg: {V}erdacht auf {K}ryptosporidiose}, journal = {Epidemiologisches Bulletin}, volume = {39}, year = {2001}, pages = {298--299}, } @Book{Keeling.Rohani2008, title = {Modeling Infectious Diseases in Humans and Animals}, publisher = {Princeton University Press}, year = {2008}, author = {Matt J. Keeling and Pejman Rohani}, url = {http://www.modelinginfectiousdiseases.org/}, } @Misc{survstat, author = {{Robert Koch-Institut}}, title = {{SurvStat@RKI}}, howpublished = {\url{https://survstat.rki.de/}}, year = {2004}, note = {Date of query: September 2004}, } @Misc{survstat-fluByBw, author = {{Robert Koch-Institut}}, title = {{SurvStat@RKI}}, howpublished = {\url{https://survstat.rki.de/}}, year = {2009}, note = {Accessed March 2009}, } @Article{lai95, author = {T. L. Lai}, title = {Sequential changepoint detection in quality control and dynamical systems}, journal = {Journal of the Royal Statistical Society. Series B (Methodological)}, year = {1995}, volume = {57}, number = {4}, pages = {613--658}, } @Article{manitz.hoehle2013, author = {Juliane Manitz and Michael H{\"o}hle}, title = {Bayesian outbreak detection algorithm for monitoring reported cases of campylobacteriosis in {Germany}}, journal = {Biometrical Journal}, year = {2013}, volume = {55}, number = {4}, pages = {509--526}, doi = {10.1002/bimj.201200141}, } @Book{Martinussen.Scheike2006, title = {Dynamic Regression Models for Survival Data}, publisher = {Springer-Verlag}, year = {2006}, author = {Martinussen, Torben and Scheike, Thomas H.}, series = {Statistics for Biology and Health}, } @Article{meyer.etal2011, author = {Sebastian Meyer and Johannes Elias and Michael H{\"o}hle}, title = {A space-time conditional intensity model for invasive meningococcal disease occurrence}, journal = {Biometrics}, year = {2012}, volume = {68}, number = {2}, pages = {607--616}, doi = {10.1111/j.1541-0420.2011.01684.x}, } @Article{meyer.held2015, author = {Sebastian Meyer and Leonhard Held}, title = {Incorporating social contact data in spatio-temporal models for infectious disease spread}, journal = {Biostatistics}, year = {2017}, volume = {18}, number = {2}, pages = {338--351}, doi = {10.1093/biostatistics/kxw051}, } @Article{meyer.held2013, author = {Sebastian Meyer and Leonhard Held}, title = {Power-law models for infectious disease spread}, journal = {Annals of Applied Statistics}, year = {2014}, volume = {8}, number = {3}, pages = {1612--1639}, doi = {10.1214/14-AOAS743}, } @Article{meyer.etal2014, author = {Sebastian Meyer and Leonhard Held and Michael H{\"o}hle}, title = {Spatio-temporal analysis of epidemic phenomena using the \textsf{R} package \texttt{surveillance}}, journal = {Journal of Statistical Software}, year = {2017}, volume = {77}, number = {11}, pages = {1--55}, doi = {10.18637/jss.v077.i11}, } @Article{meyer.etal2015, author = {Sebastian Meyer and Ingeborg Warnke and Wulf R{\"o}ssler and Leonhard Held}, title = {Model-based testing for space-time interaction using point processes: {A}n application to psychiatric hospital admissions in an urban area}, journal = {Spatial and Spatio-temporal Epidemiology}, year = {2016}, volume = {17}, pages = {15--25}, doi = {10.1016/j.sste.2016.03.002}, } @Article{neal.roberts2004, author = {Neal, P. J. and Roberts, G. O.}, title = {Statistical inference and model selection for the 1861~{Hagelloch} measles epidemic}, journal = {Biostatistics}, year = {2004}, volume = {5}, number = {2}, pages = {249--261}, doi = {10.1093/biostatistics/5.2.249}, } @Article{ogata1988, author = {Yosihiko Ogata}, title = {Statistical models for earthquake occurrences and residual analysis for point processes}, journal = {Journal of the American Statistical Association}, year = {1988}, volume = {83}, number = {401}, pages = {9--27}, } @Article{paul-held-2011, author = {Michaela Paul and Leonhard Held}, title = {Predictive assessment of a non-linear random effects model for multivariate time series of infectious disease counts}, journal = {Statistics in Medicine}, year = {2011}, volume = {30}, number = {10}, pages = {1118--1136}, doi = {10.1002/sim.4177}, } @Article{paul-etal-2008, author = {Michaela Paul and Leonhard Held and Andr{\'e} Michael Toschke}, title = {Multivariate modelling of infectious disease surveillance data}, journal = {Statistics in Medicine}, year = {2008}, volume = {27}, number = {29}, pages = {6250--6267}, doi = {10.1002/sim.3440}, } @MastersThesis{riebler2004, author = {A. Riebler}, title = {{Empirischer Vergleich von statistischen Methoden zur Ausbruchserkennung bei Surveillance Daten}}, school = {Department of Statistics, University of Munich}, year = {2004}, type = {Bachelor's thesis}, } @Article{salmon.etal2014, author = {Ma{\"e}lle Salmon and Dirk Schumacher and Michael H{\"o}hle}, title = {Monitoring count time series in \textsf{R}: {A}berration detection in public health surveillance}, journal = {Journal of Statistical Software}, year = {2016}, volume = {70}, number = {10}, pages = {1--35}, doi = {10.18637/jss.v070.i10}, } @Book{Silvapulle.Sen2005, title = {Constrained Statistical Inference: Order, Inequality, and Shape Constraints}, publisher = {Wiley}, year = {2005}, author = {Silvapulle, Mervyn J. and Sen, Pranab Kumar}, series = {Wiley Series in Probability and Statistics}, isbn = {0-471-20827-2}, doi = {10.1002/9781118165614}, } @Article{stroup89, author = {D.F. Stroup and G.D. Williamson and J.L. Herndon and J.M. Karon}, title = {Detection of aberrations in the occurrence of notifiable diseases surveillance data}, journal = {Statistics in Medicine}, year = {1989}, volume = {8}, pages = {323--329}, doi = {10.1002/sim.4780080312}, } @Article{wei.held2013, author = {Wei, Wei and Held, Leonhard}, title = {Calibration tests for count data}, journal = {Test}, year = {2014}, volume = {23}, number = {4}, pages = {787--805}, doi = {10.1007/s11749-014-0380-8}, } @Article{ruckdeschel.kohl2014, author = {Peter Ruckdeschel and Matthias Kohl}, title = {General purpose convolution algorithm in {S4} classes by means of {FFT}}, journal = {Journal of Statistical Software}, year = {2014}, volume = {59}, number = {4}, pages = {1--25}, doi = {10.18637/jss.v059.i04}, } @Article{meyer2019, author = {Sebastian Meyer}, title = {\texttt{polyCub}: An \textsf{R} package for integration over polygons}, journal = {Journal of Open Source Software}, year = {2019}, volume = {4}, number = {34}, pages = {1056}, doi = {10.21105/joss.01056}, } @Manual{R:rmapshaper, title = {\texttt{rmapshaper}: Client for 'mapshaper' for 'Geospatial' Operations}, author = {Andy Teucher and Kenton Russell}, year = {2020}, url = {https://CRAN.R-project.org/package=rmapshaper}, } @Book{R:ggplot2, author = {Hadley Wickham}, title = {ggplot2: Elegant Graphics for Data Analysis}, publisher = {Springer-Verlag}, address = {New York}, year = {2016}, isbn = {978-3-319-24277-4}, url = {https://ggplot2.tidyverse.org}, } @Book{R:spatstat, title = {Spatial Point Patterns: Methodology and Applications with {R}}, author = {Adrian Baddeley and Ege Rubak and Rolf Turner}, year = {2015}, publisher = {Chapman and Hall/CRC}, address = {London}, isbn = {978-1-4822-1020-0}, } @Manual{R:quadprog, title = {\texttt{quadprog}: Functions to Solve Quadratic Programming Problems}, author = {Berwin A. Turlach and Andreas Weingessel}, year = {2019}, url = {https://CRAN.R-project.org/package=quadprog}, } surveillance/vignettes/twinSIR.Rnw0000644000176200001440000006132014012513122016776 0ustar liggesusers%\VignetteIndexEntry{twinSIR: Individual-level epidemic modeling for a fixed population with known distances} %\VignetteEngine{knitr::knitr} %\VignetteDepends{surveillance, quadprog} <>= ## purl=FALSE => not included in the tangle'd R script knitr::opts_chunk$set(echo = TRUE, tidy = FALSE, results = 'markup', fig.path='plots/twinSIR-', fig.width = 8, fig.height = 4.5, fig.align = "center", fig.scap = NA, out.width = NULL, cache = FALSE, error = FALSE, warning = FALSE, message = FALSE) knitr::render_sweave() # use Sweave environments knitr::set_header(highlight = '') # no \usepackage{Sweave} (part of jss class) ## R settings options(prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) # JSS options(width = 85, digits = 4) options(scipen = 1) # so that 1e-4 gets printed as 0.0001 ## xtable settings options(xtable.booktabs = TRUE, xtable.size = "small", xtable.sanitize.text.function = identity, xtable.comment = FALSE) @ <>= ## load the "cool" package library("surveillance") ## Compute everything or fetch cached results? message("Doing computations: ", COMPUTE <- !file.exists("twinSIR-cache.RData")) if (!COMPUTE) load("twinSIR-cache.RData", verbose = TRUE) @ \documentclass[nojss,nofooter,article]{jss} \usepackage[latin1]{inputenc} % Rnw is ASCII, but automatic package bib isn't \title{% \vspace{-1.5cm} \fbox{\vbox{\normalfont\footnotesize This introduction to the \code{twinSIR} modeling framework of the \proglang{R}~package \pkg{surveillance} is based on a publication in the \textit{Journal of Statistical Software} -- \citet[Section~4]{meyer.etal2014} -- which is the suggested reference if you use the \code{twinSIR} implementation in your own work.}}\\[1cm] \code{twinSIR}: Individual-level epidemic modeling for a fixed population with known distances} \Plaintitle{twinSIR: Individual-level epidemic modeling for a fixed population with known distances} \Shorttitle{Modeling epidemics in a fixed population with known distances} \author{Sebastian Meyer\thanks{Author of correspondence: \email{seb.meyer@fau.de}}\\Friedrich-Alexander-Universit{\"a}t\\Erlangen-N{\"u}rnberg \And Leonhard Held\\University of Zurich \And Michael H\"ohle\\Stockholm University} \Plainauthor{Sebastian Meyer, Leonhard Held, Michael H\"ohle} %% Basic packages \usepackage{lmodern} % successor of CM -> searchable Umlauts (1 char) \usepackage[english]{babel} % language of the manuscript is American English %% Math packages \usepackage{amsmath,amsfonts} % amsfonts defines \mathbb \usepackage{bm} % \bm: alternative to \boldsymbol from amsfonts %% Packages for figures and tables \usepackage{booktabs} % make tables look nicer \usepackage{subcaption} % successor of subfig, which supersedes subfigure %% knitr uses \subfloat, which subcaption only provides since v1.3 (2019/08/31) \providecommand{\subfloat}[2][need a sub-caption]{\subcaptionbox{#1}{#2}} %% Handy math commands \newcommand{\abs}[1]{\lvert#1\rvert} \newcommand{\norm}[1]{\lVert#1\rVert} \newcommand{\given}{\,\vert\,} \newcommand{\dif}{\,\mathrm{d}} \newcommand{\IR}{\mathbb{R}} \newcommand{\IN}{\mathbb{N}} \newcommand{\ind}{\mathbb{I}} \DeclareMathOperator{\Po}{Po} \DeclareMathOperator{\NegBin}{NegBin} \DeclareMathOperator{\N}{N} %% Additional commands \newcommand{\class}[1]{\code{#1}} % could use quotes (JSS does not like them) \newcommand{\CRANpkg}[1]{\href{https://CRAN.R-project.org/package=#1}{\pkg{#1}}} %% Reduce the font size of code input and output \DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl, fontsize=\small} \DefineVerbatimEnvironment{Soutput}{Verbatim}{fontsize=\small} %% Abstract \Abstract{ The availability of geocoded health data and the inherent temporal structure of communicable diseases have led to an increased interest in statistical models and software for spatio-temporal data with epidemic features. The \proglang{R}~package \pkg{surveillance} can handle various levels of aggregation at which infective events have been recorded. This vignette illustrates the analysis of individual-level surveillance data for a fixed population, of which the complete SIR event history is assumed to be known. Typical applications for the multivariate, temporal point process model ``\code{twinSIR}'' of \citet{hoehle2009} include the spread of infectious livestock diseases across farms, household models for childhood diseases, and epidemics across networks. %% (For other types of surveillance data, see %% \code{vignette("twinstim")} and \code{vignette("hhh4\_spacetime")}.) We first describe the general modeling approach and then exemplify data handling, model fitting, and visualization for a particularly well-documented measles outbreak among children of the isolated German village Hagelloch in 1861. %% Due to the many similarities with the spatio-temporal point process model %% ``\code{twinstim}'' described and illustrated in \code{vignette("twinstim")}, %% we condense the \code{twinSIR} treatment accordingly. } \Keywords{% individual-level surveillance data, endemic-epidemic modeling, infectious disease epidemiology, self-exciting point process, branching process with immigration} \begin{document} \section[Model class]{Model class: \code{twinSIR}} \label{sec:twinSIR:methods} The spatio-temporal point process regression model ``\code{twinstim}'' (\citealp{meyer.etal2011}, illustrated in \code{vignette("twinstim")}) is indexed in a continuous spatial domain, i.e., the set of possible event locations %(the susceptible ``population'') consists of the whole observation region and is thus infinite. In contrast, if infections can only occur at a known discrete set of sites, such as for livestock diseases among farms, the conditional intensity function (CIF) of the underlying point process formally becomes $\lambda_i(t)$. It characterizes the instantaneous rate of infection of individual $i$ at time $t$, given the sets $S(t)$ and $I(t)$ of susceptible and infectious individuals, respectively (just before time $t$). %In a similar regression view as in \code{vignette("twinstim")}, \citet{hoehle2009} proposed the following endemic-epidemic multivariate temporal point process model (``\code{twinSIR}''): \begin{equation} \label{eqn:twinSIR} \lambda_i(t) = \lambda_0(t) \, \nu_i(t) + \sum_{j \in I(t)} \left\{ f(d_{ij}) + \bm{w}_{ij}^\top \bm{\alpha}^{(w)} \right\} \:, %\qquad \text{if } i \in S(t)\:, \end{equation} if $i \in S(t)$, i.e., if individual $i$ is currently susceptible, and $\lambda_i(t) = 0$ otherwise. The rate decomposes into two components. The first, endemic component consists of a Cox proportional hazards formulation containing a semi-parametric baseline hazard $\lambda_0(t)$ and a log-linear predictor $\nu_i(t)=\exp\left( \bm{z}_i(t)^\top \bm{\beta} \right)$ of covariates modeling infection from external sources. Furthermore, an additive epidemic component captures transmission from the set $I(t)$ of currently infectious individuals. The force of infection of individual $i$ depends on the distance $d_{ij}$ to each infective source $j \in I(t)$ through a distance kernel \begin{equation} \label{eqn:twinSIR:f} f(u) = \sum_{m=1}^M \alpha_m^{(f)} B_m(u) \: \geq 0 \:, \end{equation} which is represented by a linear combination of non-negative basis functions $B_m$ with the $\alpha_m^{(f)}$'s being the respective coefficients. For instance, $f$ could be modeled by a B-spline \citep[Section~8.1]{Fahrmeir.etal2013}, and $d_{ij}$ could refer to the Euclidean distance $\norm{\bm{s}_i - \bm{s}_j}$ between the individuals' locations $\bm{s}_i$ and $\bm{s}_j$, or to the geodesic distance between the nodes $i$ and $j$ in a network. The distance-based force of infection is modified additively by a linear predictor of covariates $\bm{w}_{ij}$ describing the interaction of individuals $i$ and~$j$ further. Hence, the whole epidemic component of Equation~\ref{eqn:twinSIR} can be written as a single linear predictor $\bm{x}_i(t)^\top \bm{\alpha}$ by interchanging the summation order to \begin{equation} \label{eqn:twinSIR:x} \sum_{m=1}^M \alpha^{(f)}_m \sum_{j \in I(t)} B_m(d_{ij}) + \sum_{k=1}^K \alpha^{(w)}_k \sum_{j \in I(t)} w_{ijk} = \bm{x}_i(t)^\top \bm{\alpha} \:, \end{equation} such that $\bm{x}_i(t)$ comprises all epidemic terms summed over $j\in I(t)$. Note that the use of additive covariates $\bm{w}_{ij}$ on top of the distance kernel in \eqref{eqn:twinSIR} is different from \code{twinstim}'s multiplicative approach. One advantage of the additive approach is that the subsequent linear decomposition of the distance kernel allows one to gather all parts of the epidemic component in a single linear predictor. Hence, the above model represents a CIF extension of what in the context of survival analysis is known as an additive-multiplicative hazard model~\citep{Martinussen.Scheike2006}. As a consequence, the \code{twinSIR} model could in principle be fitted with the \CRANpkg{timereg} package, which yields estimates for the cumulative hazards. However, \citet{hoehle2009} chooses a more direct inferential approach: To ensure that the CIF $\lambda_i(t)$ is non-negative, all covariates are encoded such that the components of $\bm{w}_{ij}$ are non-negative. Additionally, the parameter vector $\bm{\alpha}$ is constrained to be non-negative. Subsequent parameter inference is then based on the resulting constrained penalized likelihood which gives directly interpretable estimates of $\bm{\alpha}$. Future work could investigate the potential of a multiplicative approach for the epidemic component in \code{twinSIR}. \section[Data structure]{Data structure: \class{epidata}} \label{sec:twinSIR:data} New SIR-type event data typically arrive in the form of a simple data frame with one row per individual and sequential event time points as columns. For the 1861 Hagelloch measles epidemic, which has previously been analyzed by, e.g., \citet{neal.roberts2004}, such a data set of the 188 affected children is contained in the \pkg{surveillance} package: <>= data("hagelloch") head(hagelloch.df, n = 5) @ The \code{help("hagelloch")} contains a description of all columns. Here we concentrate on the event columns \code{PRO} (appearance of prodromes), \code{ERU} (eruption), and \code{DEAD} (day of death if during the outbreak). We take the day on which the index case developed first symptoms, 30 October 1861 (\code{min(hagelloch.df$PRO)}), as the start of the epidemic, i.e., we condition on this case being initially infectious. % t0 = 1861-10-31 00:00:00 As for \code{twinstim}, the property of point processes that concurrent events have zero probability requires special treatment. Ties are due to the interval censoring of the data to a daily basis -- we broke these ties by adding random jitter to the event times within the given days. The resulting columns \code{tPRO}, \code{tERU}, and \code{tDEAD} are relative to the defined start time. Following \citet{neal.roberts2004}, we assume that each child becomes infectious (S~$\rightarrow$~I event at time \code{tI}) one day before the appearance of prodromes, and is removed from the epidemic (I~$\rightarrow$~R event at time \code{tR}) three days after the appearance of rash or at the time of death, whichever comes first. For further processing of the data, we convert \code{hagelloch.df} to the standardized \class{epidata} structure for \code{twinSIR}. This is done by the converter function \code{as.epidata}, which also checks consistency and optionally pre-calculates the epidemic terms $\bm{x}_i(t)$ of Equation~\ref{eqn:twinSIR:x} to be incorporated in a \code{twinSIR} model. The following call generates the \class{epidata} object \code{hagelloch}: <>= hagelloch <- as.epidata(hagelloch.df, t0 = 0, tI.col = "tI", tR.col = "tR", id.col = "PN", coords.cols = c("x.loc", "y.loc"), f = list(household = function(u) u == 0, nothousehold = function(u) u > 0), w = list(c1 = function (CL.i, CL.j) CL.i == "1st class" & CL.j == CL.i, c2 = function (CL.i, CL.j) CL.i == "2nd class" & CL.j == CL.i), keep.cols = c("SEX", "AGE", "CL")) @ The coordinates (\code{x.loc}, \code{y.loc}) correspond to the location of the household the child lives in and are measured in meters. Note that \class{twinSIR} allows for tied locations of individuals, but assumes the relevant spatial location to be fixed during the entire observation period. By default, the Euclidean distance between the given coordinates will be used. Alternatively, \code{as.epidata} also accepts a pre-computed distance matrix via its argument \code{D} without requiring spatial coordinates. The argument \code{f} lists distance-dependent basis functions $B_m$ for which the epidemic terms $\sum_{j\in I(t)} B_m(d_{ij})$ shall be generated. Here, \code{household} ($x_{i,H}(t)$) and \code{nothousehold} ($x_{i,\bar{H}}(t)$) count for each child the number of currently infective children in its household and outside its household, respectively. Similar to \citet{neal.roberts2004}, we also calculate the covariate-based epidemic terms \code{c1} ($x_{i,c1}(t)$) and \code{c2} ($x_{i,c2}(t)$) % from $w_{ijk} = \ind(\code{CL}_i = k, \code{CL}_j = \code{CL}_i)$ counting the number of currently infective classmates. Note from the corresponding definitions of $w_{ij1}$ and $w_{ij2}$ in \code{w} that \code{c1} is always zero for children of the second class and \code{c2} is always zero for children of the first class. For pre-school children, both variables equal zero over the whole period. By the last argument \code{keep.cols}, we choose to only keep the covariates \code{SEX}, \code{AGE}, and school \code{CL}ass from \code{hagelloch.df}. The first few rows of the generated \class{epidata} object are shown below: <>= head(hagelloch, n = 5) @ The \class{epidata} structure inherits from counting processes as implemented by the \class{Surv} class of package \CRANpkg{survival} and also used in \CRANpkg{timereg}. Specifically, the observation period is split up into consecutive time intervals (\code{start}; \code{stop}] of constant conditional intensities. As the CIF $\lambda_i(t)$ of Equation~\eqref{eqn:twinSIR} only changes at time points, where the set of infectious individuals $I(t)$ or some endemic covariate in $\nu_i(t)$ change, those occurrences define the break points of the time intervals. Altogether, the \code{hagelloch} event history consists of \Sexpr{nrow(hagelloch)/nlevels(hagelloch$id)} time \code{BLOCK}s of \Sexpr{nlevels(hagelloch[["id"]])} rows, where each row describes the state of individual \code{id} during the corresponding time interval. The susceptibility status and the I- and R-events are captured by the columns \code{atRiskY}, \code{event} and \code{Revent}, respectively. The \code{atRiskY} column indicates if the individual is at risk of becoming infected in the current interval. The event columns indicate, which individual was infected or removed at the \code{stop} time. Note that at most one entry in the \code{event} and \code{Revent} columns is 1, all others are 0. Apart from being the input format for \code{twinSIR} models, the \class{epidata} class has several associated methods (Table~\ref{tab:methods:epidata}), which are similar in spirit to the methods described for \class{epidataCS}. <>= print(xtable( surveillance:::functionTable("epidata", list(Display = c("stateplot"))), caption="Generic and \\textit{non-generic} functions applicable to \\class{epidata} objects.", label="tab:methods:epidata"), include.rownames = FALSE) @ For example, Figure~\ref{fig:hagelloch_plot} illustrates the course of the Hagelloch measles epidemic by counting processes for the number of susceptible, infectious and removed children, respectively. Figure~\ref{fig:hagelloch_households} shows the locations of the households. An \code{animate}d map can also be produced to view the households' states over time and a simple \code{stateplot} shows the changes for a selected unit. <>= par(mar = c(5, 5, 1, 1)) plot(hagelloch, xlab = "Time [days]") @ <>= par(mar = c(5, 5, 1, 1)) hagelloch_coords <- summary(hagelloch)$coordinates plot(hagelloch_coords, xlab = "x [m]", ylab = "y [m]", pch = 15, asp = 1, cex = sqrt(multiplicity(hagelloch_coords))) legend(x = "topleft", pch = 15, legend = c(1, 4, 8), pt.cex = sqrt(c(1, 4, 8)), title = "Household size") @ \section{Modeling and inference} \label{sec:twinSIR:fit} \subsection{Basic example} To illustrate the flexibility of \code{twinSIR} we will analyze the Hagelloch data using class room and household indicators similar to \citet{neal.roberts2004}. We include an additional endemic background rate $\exp(\beta_0)$, which allows for multiple outbreaks triggered by external sources. Consequently, we do not need to ignore the child that got infected about one month after the end of the main epidemic (see the last event mark in Figure~\ref{fig:hagelloch_plot}). % ATM, there is no way to fit a twinSIR without an endemic component. Altogether, the CIF for a child $i$ is modeled as \begin{equation} \label{eqn:twinSIR:hagelloch} \lambda_i(t) = Y_i(t) \cdot \left[ \exp(\beta_0) + \alpha_H x_{i,H}(t) + \alpha_{c1} x_{i,c1}(t) + \alpha_{c2} x_{i,c2}(t) + \alpha_{\bar{H}} x_{i,\bar{H}}(t) \right] \:, \end{equation} where $Y_i(t) = \ind(i \in S(t))$ is the at-risk indicator. By counting the number of infectious classmates separately for both school classes as described in the previous section, we allow for class-specific effects $\alpha_{c1}$ and $\alpha_{c2}$ on the force of infection. The model is estimated by maximum likelihood \citep{hoehle2009} using the call <>= hagellochFit <- twinSIR(~household + c1 + c2 + nothousehold, data = hagelloch) @ and the fit is summarized below: <>= set.seed(1) summary(hagellochFit) @ <>= ## drop leading and trailing empty lines writeLines(tail(head(capture.output({ <> }), -1), -1)) @ The results show, e.g., a \Sexpr{sprintf("%.4f",coef(hagellochFit)["c1"])} / \Sexpr{sprintf("%.4f",coef(hagellochFit)["c2"])} $=$ \Sexpr{format(coef(hagellochFit)["c1"]/coef(hagellochFit)["c2"])} times higher transmission between individuals in the 1st class than in the 2nd class. Furthermore, an infectious housemate adds \Sexpr{sprintf("%.4f",coef(hagellochFit)["household"])} / \Sexpr{sprintf("%.4f",coef(hagellochFit)["nothousehold"])} $=$ \Sexpr{format(coef(hagellochFit)["household"]/coef(hagellochFit)["nothousehold"])} times as much infection pressure as infectious children outside the household. The endemic background rate of infection in a population with no current measles cases is estimated to be $\exp(\hat{\beta}_0) = \exp(\Sexpr{format(coef(hagellochFit)["cox(logbaseline)"])}) = \Sexpr{format(exp(coef(hagellochFit)["cox(logbaseline)"]))}$. An associated Wald confidence interval (CI) based on the asymptotic normality of the maximum likelihood estimator (MLE) can be obtained by \code{exp}-transforming the \code{confint} for $\beta_0$: <>= exp(confint(hagellochFit, parm = "cox(logbaseline)")) @ Note that Wald confidence intervals for the epidemic parameters $\bm{\alpha}$ are to be treated carefully, because their construction does not take the restricted parameter space into account. For more adequate statistical inference, the behavior of the log-likelihood near the MLE can be investigated using the \code{profile}-method for \class{twinSIR} objects. For instance, to evaluate the normalized profile log-likelihood of $\alpha_{c1}$ and $\alpha_{c2}$ on an equidistant grid of 25 points within the corresponding 95\% Wald CIs, we do: <>= prof <- profile(hagellochFit, list(c(match("c1", names(coef(hagellochFit))), NA, NA, 25), c(match("c2", names(coef(hagellochFit))), NA, NA, 25))) @ The profiling result contains 95\% highest likelihood based CIs for the parameters, as well as the Wald CIs for comparison: <<>>= prof$ci.hl @ The entire functional form of the normalized profile log-likelihood on the requested grid as stored in \code{prof$lp} can be visualized by: <>= plot(prof) @ The above model summary also reports the one-sided AIC~\citep{hughes.king2003}, which can be used for model selection under positivity constraints on $\bm{\alpha}$ as described in \citet{hoehle2009}. The involved parameter penalty is determined by Monte Carlo simulation, which is why we did \code{set.seed} before the \code{summary} call. The algorithm is described in \citet[p.~79, Simulation 3]{Silvapulle.Sen2005} and involves quadratic programming using package \CRANpkg{quadprog} \citep{R:quadprog}. If there are less than three constrained parameters in a \code{twinSIR} model, the penalty is computed analytically. \subsection{Model diagnostics} <>= print(xtable( surveillance:::functionTable("twinSIR", functions=list(Display = c("checkResidualProcess"))), caption="Generic and \\textit{non-generic} functions for \\class{twinSIR}. There are no specific \\code{coef} or \\code{confint} methods, since the respective default methods from package \\pkg{stats} apply outright.", label="tab:methods:twinSIR"), include.rownames = FALSE) @ Table~\ref{tab:methods:twinSIR} lists all methods for the \class{twinSIR} class. For example, to investigate how the conditional intensity function decomposes into endemic and epidemic components over time, we produce Figure~\ref{fig:hagellochFit_plot-1} by: <>= par(mar = c(5, 5, 1, 1)) plot(hagellochFit, which = "epidemic proportion", xlab = "time [days]") checkResidualProcess(hagellochFit, plot = 1) @ Note that the last infection was necessarily caused by the endemic component since there were no more infectious children in the observed population which could have triggered the new case. We can also inspect temporal Cox-Snell-like \code{residuals} of the fitted point process using the function \code{checkResidualProcess} as for the spatio-temporal point process models in \code{vignette("twinstim")}. The resulting Figure~\ref{fig:hagellochFit_plot-2} reveals some deficiencies of the model in describing the waiting times between events, which might be related to the assumption of fixed infection periods. <>= knots <- c(100, 200) fstep <- list( B1 = function(D) D > 0 & D < knots[1], B2 = function(D) D >= knots[1] & D < knots[2], B3 = function(D) D >= knots[2]) @ To illustrate AIC-based model selection, we may consider a more flexible model for local spread using a step function for the distance kernel $f(u)$ in Equation \ref{eqn:twinSIR:f}. An updated model with <>= .allknots <- c(0, knots, "\\infty") cat(paste0("$B_{", seq_along(fstep), "} = ", "I_{", ifelse(seq_along(fstep)==1,"(","["), .allknots[-length(.allknots)], ";", .allknots[-1], ")}(u)$", collapse = ", ")) @ can be fitted as follows: <>= <> hagellochFit_fstep <- twinSIR( ~household + c1 + c2 + B1 + B2 + B3, data = update(hagelloch, f = fstep)) @ <>= set.seed(1) AIC(hagellochFit, hagellochFit_fstep) @ Hence the simpler model with just a \code{nothousehold} component instead of the more flexible distance-based step function is preferred. \section{Simulation} \label{sec:twinSIR:simulation} Simulation from fitted \code{twinSIR} models is described in detail in~\citet[Section~4]{hoehle2009}. The implementation is made available by an appropriate \code{simulate}-method for class \class{twinSIR}. We skip the illustration here and refer to \code{help("simulate.twinSIR")}. %-------------- % BIBLIOGRAPHY %-------------- \bibliography{references} <>= save(prof, file = "twinSIR-cache.RData") @ \end{document} surveillance/vignettes/surveillance-cache.RData0000644000176200001440000002361013627521447021445 0ustar liggesusersX9sMEDAHّ(z *JWQV]T{zذ(vlVA2dI2dvwkRA2 !#Kd$)W:ϦϦ\֓қʡ%̷*4l:]N|Q@q"H-Ka$_ϣy}(C#ee(eaYF-J Q[PSÞ塾P"Je3f>GPQ:ٶ{ ?\G x,= gwMW3 ΋?1l^&($E1H|k>>[ZxXZ̔#C GT ZIи!H(.>$>fxm%}BhF.4"yP` "؈ARcE!}Pdzرݥ@8PhJ)R(МBnMXrFA]GTTRQuV]Ǣa_]ݵv2ݵ]K]ݵ < x,= ]kۻkk+xqݵ8kiǿmMOprmn0KSEW.Z.ZnZ]ܝ)]^{]ЕuZ%i_ZZ쮭e/0fll j/`wlZ?ֈ8J/sWAEvAφ ktafE}BRZAO÷GŒ`qhv0pF`i=j `,rTlpJ1Wp^cЬ 8py`'}sH!'?//)O޿.=?h\gN=R CO ^ft=%M:Mo>8'}>6_\VVԹ_g\N]c[AVL˫=>?zv"{i@?>t\yɍmvWmN3~jv &`\DtJZ׎$wvr[-;Y.o.<1ܚ,s[4[{>f5o;\ebr;M_Bga++&f^n  r aw-v]˴/vbw-A`w-v2oj3X6`{H _5][ZlXx{tkGWs3W^#K@(BN[!qFEGfd#гЂBklxҖNP$Z!eY҉6ŀePHS4?mvтhZCGZQ=f4eQsZ-i˞#lX ԯ,*dQvz4e_~PkV ޽c"'m5/"j ~-@^3hB hkވ_e *:^'ӠCƮ`Xbв`" Sy``X}Jjhkc]s(?~;y!x- qgy+%`wXn9tH!0zV|o\0#/jn~ SU-y~`8vqq 9 =7?yL4dm an蟸Q4}f8PIt=nu27%N8[lH:3ruPm XQid'Nq_y|J+7*Pd)#§h&C3'DϬ`ڸ~3jE['3zѨ'>&EgR707;m023}`8B%5MFA #KOtWV*1o{f }7sǁ?߭Th&<f i s@PE9&NL(ځvh в0 wTL\T\SqOMGĐ']7 Diir(w,,o|.##hcVٯA><*;<n},@eٶD2GǦȜpo7-حJɸN_Gn,r׵כ|.BVQTKJ"O~2y:=ܺ0l537u/"wii*"_peeBIQK|F?yn1:f>&Xܷc^pFzdE K}{%LysѠ[ζWvUD^gB{kE ~$̶O60i3LԔ-[O?=g{L-m:\Gy27HDК/<\@i|42w-!\ymVȫ)Wfnq)saE&vZNq'[k%ɧ&OЫgn̴c2pjyÌȫj Kyi3G c{w%w~E?@8wy[AqEWӥEsfP?dAv}AȎs;/ƟYdۣV֦V˷M ˒K^jdZpB93>~ jSpO^؜Z˚^Uz?# 4.> R?j)zsР+BP~'pG& 2Y/:/vPFnO xn[ڭW"? w'?BG,|=kW~#;iB-v4 '(_τd^=uȂkl!_e*n/6@>c9y>j;%UuML(" n)$?e;H <B5@I̩A] . }*4{ĝ[ EaQPi24A@~zDUЖYj2+nhsPxZR7N} ϭՏ kRׇ]]=O?CФPYXJB7]~`nd8yh 7T>Am熙`ׄm\ ݠoRd[LuCW0%+&ς-f$??.'==~fT՘7-= ~Kwޙ,сcᩘ85C#zG/@Iv#w4FH6JՉoe;04"68QwñU%ae4'@WDe2X T 唒k*Rq"VT^3}E.1b# ?ҷ& Jhg4 i9%ԏN|vi`Fr]ʍ4O4IӒ}iF:>>H."'nF ~j#'wi2C&jH^_[־ise;ȝR[}O<̗{kaeB wand~#_|:LI=col3!,&PF\(hא|nkg+B/N?vyWaA G 4ȀݱE4}[1b'UiW“EbcZzqWS .Sj(G_+AS% WSKP]^Brq9K.n`DdVCӠ\ڏ'e:qqfŇ/aPy>&_/ZuQwzQA,Q_oVs ߹8 Oh3gnf{c4qe+Y9~X7] eb_p7hKwp'rr+7XnǗ_!nXae[ʘmmmG7v:z a`$pzP\Vz\j{Eϫ܄JJ<>3Z@+<ۓggּ$x̘֣eOppϻoT/%xY*r?lx=Rg{ؚץ^ONxǃWo߼s g>].ung*K ɂ;fĽ޿.]ԛrc<3m0(/&&΋ ]#h.îEE07Qt =jk MYڝ~5 }Szj&=m~UUOZ)q+*{[vu+mKI<l,FYl'8Y0?ls/0;r1?B@_{S:G'v,0m90O+/ؙ~i,o3fA뉫ZħsvtRO^@I#ZMtfϛ71uY :&OWY7O֣S˫C>.̀2ۥ_Ót]92|CAMYt{^>47I=7WԖOڹo iwEtSH͹5qC P6A۱#K隗.h ]0a: :[W X _=L3^)\O]#s6 մ!r6i3t齡u.Ӎn{ofG9D$i3י fI{ekr)wa,[ljU[pA"%L,&;-.ۏۡZw꺀ΎJV̓&;LiFb/]M&u5ӭ x>$MM[]>*> (҈+JR=J#҈ J#ҨAmA x< ҀCwM^iT+QFrv꣟6cf<1Bz> OqçS)n7S$}ǷI#bC#JRA tORa#^ᐗ*aCpF8C>C>mГSUy(G>_q`5Rۣzp<^'M..ﰄy,+ӿ?^=~6^w)t~"#,&ʞpGG<52s7f6Rqý-]qW#mh}UAOz/M3OR@|+zB(9s2908zԓ[r%?3N,'uS^ӜDۜD; {PqN»3G #8y|}VZo!oKK<=su)W81 ?u|ր3HҏP^Y y-Ώ*yڄ&2-=S WE,ҧrS~Vt׶+>t ҿ |Y8u{"f\x뾉)Syf7f~뾆$!ߝC&^T왫[HoI%GYɿ"T;R{.޸qG:'G _̙?̥o8p˳{/1$Rb]<_Po:yNR9O:\xU\3}QI9WDG<> =s"ܙ5W^Y3;=@Tp&<{f;Z?*<窡ŊW⺿c }33g.'Y W+Jg˳+˳|}-_YO9vw}ػ} ?2ri9=;6wA)`{8֯yn jz. y3E]x5\p0G<]Xwxl[=A#_-6_/J֗Mq~͗JTD;KW:>㙫f.~<surveillance/vignettes/monitoringCounts.bib0000644000176200001440000004147614004512307021025 0ustar liggesusers@Article{newport2011, author = {C Bayer and H Bernard and R Prager and W Rabsch and P Hiller and B Malorny and B Pfefferkorn and C Frank and A de Jong and I Friesema and {others}}, title = {An Outbreak of Salmonella Newport Associated with Mung Bean Sprouts in Germany and the Netherlands, October to November 2011}, journal = {Eurosurveillance}, year = {2014}, volume = {19}, number = {1}, doi = {10.2807/1560-7917.es2014.19.1.20665}, } @Article{becker_marschner93, author = {N. G. Becker and I. C. Marschner}, title = {A Method for Estimating the Age-Specific Relative Risk of {HIV} Infection from {AIDS} Incidence Data}, journal = {Biometrika}, year = {1993}, volume = {80}, number = {1}, doi = {10.1093/biomet/80.1.165}, } @ARTICLE{hoehle-heiden, author = {{H{\"o}hle}, Michael and an der Heiden, Matthias}, title = {{B}ayesian {N}owcasting during the {STEC} {O104:H4} {O}utbreak in {G}ermany, 2011}, journal = {Biometrics}, volume = {70}, number = {4}, issn = {1541-0420}, doi = {10.1111/biom.12194}, pages = {993--1002}, year = {2014}, } @Article{bernard_etal2014, author = {H. Bernard and D. Werber and M. H{\"o}hle}, title = {Estimating the Under-Reporting of Norovirus Illness in {G}ermany Utilizing Enhanced Awareness of Diarrhoea during a Large Outbreak of {S}higa Toxin-Producing {E. Coli O104:H4} in 2011}, journal = {BMC Infectious Diseases}, year = {2014}, volume = {14}, number = {1}, pages = {1--6}, doi = {10.1186/1471-2334-14-116}, } @Book{sp2, title = {Applied Spatial Data Analysis With \proglang{R}}, edition = {2nd}, publisher = {Springer-Verlag}, year = {2013}, author = {Roger S. Bivand and Edzer Pebesma and Virgilio Gomez-Rubio}, doi = {10.1007/978-1-4614-7618-4}, } @Article{brook_evans1972, author = {D. Brook and D. A. Evans}, title = {An Approach to the Probability Distribution of Cusum Run Length}, journal = {Biometrika}, year = {1972}, volume = {59}, pages = {539--549}, number = {3}, doi = {10.1093/biomet/59.3.539}, } @Article{buckeridge2007, author = {David L. Buckeridge}, title = {Outbreak Detection through Automated Surveillance: A Review of the Determinants of Detection}, journal = {Journal of Biomedical Informatics}, year = {2007}, volume = {40}, pages = {370--379}, number = {4}, } @Article{chen1978, author = {Rina Chen}, title = {A Surveillance System for Congenital Malformations}, journal = {Journal of the American Statistical Association}, year = {1978}, volume = {73}, pages = {323-327}, number = {362}, doi = {10.2307/2286660}, } @Manual{epiestim, title = {\pkg{EpiEstim}: A Package to Estimate Time Varying Reproduction Numbers from Epidemic Curves}, author = {Anne Cori}, year = {2013}, note = {\proglang{R} package version 1.1-2}, url = {https://CRAN.R-project.org/package=EpiEstim}, } @Manual{datatable2013, title = {\pkg{data.table}: Extension of \pkg{data.frame} for Fast Indexing, Fast Ordered Joins, Fast Assignment, Fast Grouping and List Columns}, author = {M Dowle and A Srinivasan and T Short and S Lianoglou}, year = {2015}, note = {\proglang{R} package version 1.9.6}, url = {https://CRAN.R-project.org/package=data.table}, } @Article{sim:sim3197, author = {Ronald D. Fricker and Benjamin L. Hegler and David A. Dunfee}, title = {Comparing Syndromic Surveillance Detection Methods: EARS' versus a CUSUM-Based Methodology}, journal = {Statistics in Medicine}, year = {2008}, volume = {27}, pages = {3407--3429}, number = {17}, doi = {10.1002/sim.3197}, } @Book{frisen2008financial, title = {Financial Surveillance}, publisher = {John Wiley \& Sons}, year = {2008}, author = {Marianne Fris{\'e}n}, } @Article{fri2009, author = {Marianne Fris{\'e}n and Eva Andersson}, title = {Semiparametric Surveillance of Monotonic Changes}, journal = {Sequential Analysis}, year = {2009}, volume = {28}, pages = {434-454}, number = {4}, doi = {10.1080/07474940903238029}, } @Article{frisen_etal2009, author = {M. Fris{\'e}n and E. Andersson and L. Schi{\"o}ler}, title = {Robust Outbreak Surveillance of Epidemics in Sweden}, journal = {Statistics in Medicine}, year = {2009}, volume = {28}, pages = {476-493}, doi = {10.1002/sim.3483}, } @InCollection{hoehle2010, author = {Michael H\"{o}hle}, title = {Online Change-Point Detection in Categorical Time Series}, booktitle = {Statistical Modelling and Regression Structures}, publisher = {Physica-Verlag HD}, year = {2010}, editor = {Thomas Kneib and Gerhard Tutz}, pages = {377-397}, } @Article{held_etal2006, author = {L. Held and M. Hofmann and M. H{\"o}hle and V. Schmid}, title = {A Two Component Model for Counts of Infectious Diseases}, journal = {Biostatistics}, year = {2006}, volume = {7}, pages = {422--437}, doi = {10.1093/biostatistics/kxj016}, } @Article{hulth_etal2010, author = {A. Hulth and N. Andrews and S. Ethelberg and J. Dreesman and D. Faensen and W. {van Pelt} and J. Schnitzler}, title = {Practical Usage of Computer-Supported Outbreak Detection in Five European Countries}, journal = {Eurosurveillance}, year = {2010}, volume = {15}, number = {36}, } @Article{outbreaker, author = {Thibaut Jombart and Anne Cori and Xavier Didelot and Simon Cauchemez and Christophe Fraser and Neil Ferguson}, title = {Bayesian Reconstruction of Disease Outbreaks by Combining Epidemiologic and Genomic Data}, journal = {PLoS Computional Biology}, year = {2014}, volume = {10}, pages = {e1003457}, number = {1}, doi = {10.1371/journal.pcbi.1003457}, } @Manual{spc, title = {\pkg{spc}: Statistical Process Control -- Collection of Some Useful Functions}, author = {Sven Knoth}, year = {2016}, note = {\proglang{R} package version 0.5.3}, url = {https://CRAN.R-project.org/package=spc}, } @Manual{satscan, title = {\pkg{SaTScan}: Software for the Spatial, Temporal and Space-Time Scan Statistics}, author = {Martin Kulldorff}, address = {Boston}, year = {1997}, url = {https://www.satscan.org/}, } @Article{lawless1987, author = {Jerald F Lawless}, title = {Negative Binomial and Mixed Poisson Regression}, journal = {Canadian Journal of Statistics}, year = {1987}, volume = {15}, pages = {209--225}, number = {3}, publisher = {John Wiley \& Sons}, doi = {10.2307/3314912}, } @InProceedings{sweave, author = {Friedrich Leisch}, title = {\texttt{Sweave} and Beyond: Computations on Text Documents}, booktitle = {Proceedings of the 3rd International Workshop on Distributed Statistical Computing, Vienna, Austria}, year = {2003}, editor = {Kurt Hornik and Friedrich Leisch and Achim Zeileis}, note = {{ISSN 1609-395X}}, url = {https://www.R-project.org/conferences/DSC-2003/Proceedings/}, } @Manual{tscount, title = {\pkg{tscount}: Analysis of Count Time Series}, author = {Tobias Liboschik and Roland Fried and Konstantinos Fokianos and Philipp Probst}, year = {2015}, note = {\proglang{R} package version 1.0.0}, url = {https://CRAN.R-project.org/package=tscount}, } @article{liboschik_tscount_2015, title = {{tscount}: An R Package for Analysis of Count Time Series Following Generalized Linear Models}, volume = {06/15}, doi = {10.17877/DE290R-7239}, language = {en}, journal = {TU Dortmund, SFB 823 Discussion Paper}, author = {Liboschik, Tobias and Fokianos, Konstantinos and Fried, Roland}, year = {2015} } @Article{lucas1982fast, author = {James M Lucas and Ronald B Crosier}, title = {Fast Initial Response for CUSUM Quality-Control Schemes: Give Your CUSUM a Head Start}, journal = {Technometrics}, year = {1982}, volume = {24}, pages = {199--205}, number = {3}, doi = {10.2307/1268679}, } @Article{radio, author = {Peng Luo and Timothy A DeVol and Julia L Sharp}, title = {CUSUM Analyses of Time-Interval Data for Online Radiation Monitoring}, journal = {Health Physics}, year = {2012}, volume = {102}, pages = {637--645}, number = {6}, publisher = {LWW}, doi = {10.1097/hp.0b013e3182430106}, } @Article{manitz2013, author = {Juliane Manitz and Michael H\"{o}hle}, title = {Bayesian Outbreak Detection Algorithm for Monitoring Reported Cases of Campylobacteriosis in Germany}, journal = {Biometrical Journal}, year = {2013}, volume = {55}, pages = {509--526}, number = {4}, issn = {1521-4036}, doi = {10.1002/bimj.201200141}, } @Manual{ssas, title = {Microsoft SQL Server Analysis Services, Version~2012}, author = {{Microsoft Corp.}}, year = {2012}, url = {https://www.microsoft.com/}, } @Manual{ssrs, title = {Microsoft SQL Server Reporting Services, Version~2012}, author = {{Microsoft Corp.}}, year = {2012}, url = {https://www.microsoft.com/}, } @Article{noufaily2012, author = {A. Noufaily and D. G. Enki and P. Farrington and P. Garthwaite and N. Andrews and A. Charlett}, title = {An Improved Algorithm for Outbreak Detection in Multiple Surveillance Systems}, journal = {Statistics in Medicine}, year = {2012}, volume = {32}, pages = {1206--1222}, number = {7}, doi = {10.1002/sim.5595}, } @Article{sp1, author = {Edzer J. Pebesma and Roger S. Bivand}, title = {Classes and Methods for Spatial Data in \proglang{R}}, journal = {\proglang{R} News}, year = {2005}, volume = {5}, pages = {9--13}, number = {2}, url = {https://CRAN.R-project.org/doc/Rnews/}, } @Article{pierce_schafer86, author = {D. A. Pierce and D. W. Schafer}, title = {Residuals in Generalized Linear Models}, journal = {Journal of the American Statistical Association}, year = {1986}, volume = {81}, pages = {977-986}, number = {396}, doi = {10.2307/2289071}, } @Article{reynolds2000, author = {{Reynolds, Jr.}, Marion R. and Zachary G. Stoumbos}, title = {A General Approach to Modeling CUSUM Charts for a Proportion}, journal = {IIE Transactions}, year = {2000}, volume = {32}, pages = {515-535}, number = {6}, language = {English}, publisher = {Kluwer Academic Publishers}, doi = {10.1080/07408170008963928}, } @Article{rigby2005, author = {R. A. Rigby and D. M. Stasinopoulos}, title = {Generalized Additive Models for Location, Scale and Shape}, journal = {Journal of the Royal Statistical Society C}, year = {2005}, volume = {54}, pages = {507--554}, number = {3}, doi = {10.1111/j.1467-9876.2005.00510.x}, } @Manual{rodbc2013, title = {\pkg{RODBC}: ODBC Database Access}, author = {Brian Ripley and Michael Lapsley}, year = {2016}, note = {\proglang{R} package version 1.3-13}, url = {https://CRAN.R-project.org/package=RODBC}, } @Article{rogerson_yamada2004, author = {P. A. Rogerson and I. Yamada}, title = {Approaches to Syndromic Surveillance When Data Consist of Small Regional Counts}, journal = {Morbidity and Mortality Weekly Report}, year = {2004}, volume = {53}, pages = {79--85}, doi = {10.1037/e307182005-016}, } @Article{rossi_etal99, author = {G. Rossi and L. Lampugnani and M. Marchi}, title = {An Approximate {CUSUM} Procedure for Surveillance of Health Events}, journal = {Statistics in Medicine}, year = {1999}, volume = {18}, pages = {2111--2122}, doi = {10.1002/(sici)1097-0258(19990830)18:16<2111::aid-sim171>3.0.co;2-q}, } @Article{inla, title = {Approximate Bayesian Inference for Latent Gaussian Models Using Integrated Nested Laplace Approximations}, author = {H. Rue and S. Martino and N. Chopin}, journal = {Journal of the Royal Statistical Society B}, year = {2009}, volume = {71}, number = {2}, pages = {319--392}, doi = {10.1111/j.1467-9868.2008.00700.x} } @Manual{xts, title = {\pkg{xts}: eXtensible Time Series}, author = {Jeffrey A. Ryan and Joshua M. Ulrich}, year = {2014}, note = {\proglang{R} package version 0.9-7}, url = {https://CRAN.R-project.org/package=xts}, } @Article{dirk, author = {M. Salmon and D. Schumacher and H. Burmann and C. Frank and H. Claus and M. H{\"o}hle}, title = {A {S}ystem for {A}utomated {O}utbreak {D}etection of {C}ommunicable {D}iseases in {G}ermany}, year = {2016}, volume = {21}, number = {13}, doi = {10.2807/1560-7917.ES.2016.21.13.30180}, } @Article{maelle, author = {M. Salmon and D. Schumacher and K. Stark and M. H{\"o}hle}, title = {{B}ayesian Outbreak Detection in the Presence of Reporting Delays}, journal = {Biometrical Journal}, year = {2015}, volume = {57}, number = {6}, pages = {1051--1067}, doi = {10.1002/bimj.201400159}, } @Article{accident, author = {Anna Schuh and Jaime A. Camelio and William H. Woodall}, title = {Control Charts for Accident Frequency: a Motivation for Real-Time Occupational Safety Monitoring}, journal = {International Journal of Injury Control and Safety Promotion}, year = {2014}, volume = {21}, number = {2}, pages = {154--162}, doi = {10.1080/17457300.2013.792285}, } @Article{qcc, author = {Luca Scrucca}, title = {\pkg{qcc}: An \proglang{R} Package for Quality Control Charting and Statistical Process Control}, journal = {\proglang{R} News}, year = {2004}, volume = {4}, number = {1}, pages = {11--17}, url = {https://CRAN.R-project.org/doc/Rnews/}, } @Article{shmueli2010, author = {Galit Shmueli and Howard Burkom}, title = {Statistical Challenges Facing Early Outbreak Detection in Biosurveillance}, journal = {Technometrics}, year = {2010}, volume = {52}, pages = {39-51}, number = {1}, doi = {10.1198/tech.2010.06134}, } @Article{sonesson2003, author = {Christian Sonesson and David Bock}, title = {A Review and Discussion of Prospective Statistical Surveillance in Public Health}, journal = {Journal of the Royal Statistical Society A}, year = {2003}, volume = {166}, pages = {5--21}, number = {1}, doi = {10.1111/1467-985x.00256}, } @Article{stasjss, author = {D. Mikis Stasinopoulos and Robert A. Rigby}, title = {Generalized Additive Models for Location Scale and Shape (GAMLSS) in \proglang{R}}, journal = {Journal of Statistical Software}, year = {2007}, volume = {23}, pages = {1--46}, number = {7}, doi = {10.18637/jss.v023.i07}, } @Article{steiner1999, author = {S. H. Steiner and R. J. Cook and V. T. Farewell}, title = {Monitoring Paired Binary Surgical Outcomes Using Cumulative Sum Charts}, journal = {Statistics in Medicine}, year = {1999}, volume = {18}, pages = {69--86}, doi = {10.1002/(sici)1097-0258(19990115)18:1<69::aid-sim966>3.0.co;2-l}, } @Manual{outbreaktools, title = {\pkg{OutbreakTools}: Basic Tools for the Analysis of Disease Outbreaks}, author = {{The Hackout Team}}, year = {2016}, note = {\proglang{R} package version 0.1-14}, url = {https://CRAN.R-project.org/package=OutbreakTools}, } @Article{unkel2012, author = {Steffen Unkel and C. Paddy Farrington and Paul H. Garthwaite and Chris Robertson and Nick Andrews}, title = {Statistical Methods for the Prospective Detection of Infectious Disease Outbreaks: A Review}, journal = {Journal of the Royal Statistical Society A}, year = {2012}, volume = {175}, pages = {49--82}, number = {1}, doi = {10.1111/j.1467-985x.2011.00714.x}, } @Manual{testthat2013, title = {\pkg{testthat}: Unit Testing for \proglang{R}}, author = {Hadley Wickham}, year = {2016}, note = {\proglang{R} package version 1.0.2}, url = {https://CRAN.R-project.org/package=testthat}, } @InCollection{knitr, booktitle = {Implementing Reproducible Computational Research}, editor = {Victoria Stodden and Friedrich Leisch and Roger D. Peng}, title = {\pkg{knitr}: A Comprehensive Tool for Reproducible Research in \proglang{R}}, author = {Yihui Xie}, publisher = {Chapman and Hall/CRC}, year = {2014}, } @Article{zoo, author = {Achim Zeileis and Gabor Grothendieck}, title = {\pkg{zoo}: S3 Infrastructure for Regular and Irregular Time Series}, journal = {Journal of Statistical Software}, year = {2005}, volume = {14}, pages = {1--27}, number = {6}, doi = {10.18637/jss.v014.i06}, } @Article{strucchange, author = {Achim Zeileis and Friedrich Leisch and Kurt Hornik and Christian Kleiber}, title = {\pkg{strucchange}: An \proglang{R} Package for Testing for Structural Change in Linear Regression Models}, journal = {Journal of Statistical Software}, year = {2002}, volume = {7}, pages = {1--38}, number = {2}, doi = {10.18637/jss.v007.i02}, } @Manual{mglm, title = {\pkg{MGLM}: Multivariate Response Generalized Linear Models}, author = {Yiwen Zhang and Hua Zhou}, year = {2016}, note = {\proglang{R} package version 0.0.7}, url = {https://CRAN.R-project.org/package=MGLM}, } surveillance/vignettes/twinSIR-cache.RData0000644000176200001440000000324512674766245020322 0ustar liggesusersyTSG$@Q,⊶ -E ւH @a1(T;ĭXѣ[\"*@ t_}$3w7oI]̂4@1 2  hָ4k],Y7Z'azʒ+.38Vз4:0y@Yyi6 sJV9 kk|?ij$fOmped4<@ Le18jxxilG#:׭zc8| [4"X̀CڇH uߠ6];_~wK :OS|d~}}4^cX󅱸aB8$$j`t$1b!jH$%EQ~' 4\V XAEm2q!P|+RUEEm0|6.H澀,ow蔖C`(`d.`ςBof \=VÑMx E'N^qwhq!OKaIsQgںbT4|勝* FVZd~=}P aҐ9- G6C1fWV9(䧐]I/c+9TgX@k}_eH^*oCdtJ*8R$DmFxۤNIa҅A%^o surveillance/vignettes/hhh4_spacetime.Rnw0000644000176200001440000016023114006074376020345 0ustar liggesusers%\VignetteIndexEntry{hhh4 (spatio-temporal): Endemic-epidemic modeling of areal count time series} %\VignetteEngine{knitr::knitr} %\VignetteDepends{surveillance, lattice, spdep, gsl, colorspace, animation, gridExtra, scales, fanplot, hhh4contacts} <>= ## purl=FALSE => not included in the tangle'd R script knitr::opts_chunk$set(echo = TRUE, tidy = FALSE, results = 'markup', fig.path='plots/hhh4_spacetime-', fig.width = 8, fig.height = 4.5, fig.align = "center", fig.scap = NA, out.width = NULL, cache = FALSE, error = FALSE, warning = FALSE, message = FALSE) knitr::render_sweave() # use Sweave environments knitr::set_header(highlight = '') # no \usepackage{Sweave} (part of jss class) ## R settings options(prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) # JSS options(width = 85, digits = 4) options(scipen = 1) # so that 1e-4 gets printed as 0.0001 ## xtable settings options(xtable.booktabs = TRUE, xtable.size = "small", xtable.sanitize.text.function = identity, xtable.comment = FALSE) @ <>= ## load the "cool" package library("surveillance") ## Compute everything or fetch cached results? message("Doing computations: ", COMPUTE <- !file.exists("hhh4_spacetime-cache.RData")) if (!COMPUTE) load("hhh4_spacetime-cache.RData", verbose = TRUE) @ \documentclass[nojss,nofooter,article]{jss} \usepackage[latin1]{inputenc} % Rnw is ASCII, but automatic package bib isn't \title{% \vspace{-1.5cm} \fbox{\vbox{\normalfont\footnotesize This introduction to spatio-temporal \code{hhh4} models implemented in the \proglang{R}~package \pkg{surveillance} is based on a publication in the \textit{Journal of Statistical Software} -- \citet[Section~5]{meyer.etal2014} -- which is the suggested reference if you use the \code{hhh4} implementation in your own work.}}\\[1cm] \code{hhh4}: Endemic-epidemic modeling\\of areal count time series} \Plaintitle{hhh4: Endemic-epidemic modeling of areal count time series} \Shorttitle{Endemic-epidemic modeling of areal count time series} \author{Sebastian Meyer\thanks{Author of correspondence: \email{seb.meyer@fau.de}}\\Friedrich-Alexander-Universit{\"a}t\\Erlangen-N{\"u}rnberg \And Leonhard Held\\University of Zurich \And Michael H\"ohle\\Stockholm University} \Plainauthor{Sebastian Meyer, Leonhard Held, Michael H\"ohle} %% Basic packages \usepackage{lmodern} % successor of CM -> searchable Umlauts (1 char) \usepackage[english]{babel} % language of the manuscript is American English %% Math packages \usepackage{amsmath,amsfonts} % amsfonts defines \mathbb \usepackage{bm} % \bm: alternative to \boldsymbol from amsfonts %% Packages for figures and tables \usepackage{booktabs} % make tables look nicer \usepackage{subcaption} % successor of subfig, which supersedes subfigure %% knitr uses \subfloat, which subcaption only provides since v1.3 (2019/08/31) \providecommand{\subfloat}[2][need a sub-caption]{\subcaptionbox{#1}{#2}} %% Handy math commands \newcommand{\abs}[1]{\lvert#1\rvert} \newcommand{\norm}[1]{\lVert#1\rVert} \newcommand{\given}{\,\vert\,} \newcommand{\dif}{\,\mathrm{d}} \newcommand{\IR}{\mathbb{R}} \newcommand{\IN}{\mathbb{N}} \newcommand{\ind}{\mathbb{I}} \DeclareMathOperator{\Po}{Po} \DeclareMathOperator{\NegBin}{NegBin} \DeclareMathOperator{\N}{N} %% Additional commands \newcommand{\class}[1]{\code{#1}} % could use quotes (JSS does not like them) \newcommand{\CRANpkg}[1]{\href{https://CRAN.R-project.org/package=#1}{\pkg{#1}}} %% Reduce the font size of code input and output \DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl, fontsize=\small} \DefineVerbatimEnvironment{Soutput}{Verbatim}{fontsize=\small} %% Abstract \Abstract{ The availability of geocoded health data and the inherent temporal structure of communicable diseases have led to an increased interest in statistical models and software for spatio-temporal data with epidemic features. The \proglang{R}~package \pkg{surveillance} can handle various levels of aggregation at which infective events have been recorded. This vignette illustrates the analysis of area-level time series of counts using the endemic-epidemic multivariate time-series model ``\code{hhh4}'' described in, e.g., \citet[Section~3]{meyer.held2013}. See \code{vignette("hhh4")} for a more general introduction to \code{hhh4} models, including the univariate and non-spatial bivariate case. %% (For other types of surveillance data, see %% \code{vignette("twinstim")} and \code{vignette("twinSIR")}.) We first describe the general modeling approach and then exemplify data handling, model fitting, visualization, and simulation methods for weekly counts of measles infections by district in the Weser-Ems region of Lower Saxony, Germany, 2001--2002. } \Keywords{% areal time series of counts, endemic-epidemic modeling, infectious disease epidemiology, branching process with immigration} \begin{document} %% \vfill %% { %% \renewcommand{\abstractname}{Outline} % local change %% \begin{abstract} %% We start by describing the general model class in Section~\ref{sec:hhh4:methods}. %% Section~\ref{sec:hhh4:data} introduces the data and the associated \proglang{S}4-class %% \class{sts} (``surveillance time series''). %% In Section~\ref{sec:hhh4:fit}, a simple model for the measles data based on the %% original analysis of \citet{held-etal-2005} is introduced, %% which is then sequentially improved by suitable model extensions. %% The final Section~\ref{sec:hhh4:simulation} illustrates simulation from fitted %% \class{hhh4} models. %% \end{abstract} %% } %% \vfill %% \newpage \section[Model class]{Model class: \code{hhh4}} \label{sec:hhh4:methods} An endemic-epidemic multivariate time-series model for infectious disease counts $Y_{it}$ from units $i=1,\dotsc,I$ during periods $t=1,\dotsc,T$ was proposed by \citet{held-etal-2005} and was later extended in a series of papers \citep{paul-etal-2008,paul-held-2011,held.paul2012,meyer.held2013}. In its most general formulation, this so-called ``\code{hhh4}'' model assumes that, conditional on past observations, $Y_{it}$ has a negative binomial distribution with mean \begin{equation} \label{eqn:hhh4} \mu_{it} = e_{it} \, \nu_{it} + \lambda_{it} \, Y_{i,t-1} + \phi_{it} \sum_{j \ne i} w_{ji} \, Y_{j,t-1} \end{equation} and overdispersion parameter $\psi_i > 0$ such that the conditional variance of $Y_{it}$ is $\mu_{it} (1+\psi_i \mu_{it})$. Shared overdispersion parameters, e.g., $\psi_i\equiv\psi$, are supported as well as replacing the negative binomial by a Poisson distribution, which corresponds to the limit $\psi_i\equiv 0$. Similar to the point process models in \code{vignette("twinstim")} and \code{vignette("twinSIR")}, the mean~\eqref{eqn:hhh4} decomposes additively into endemic and epidemic components. The endemic mean is usually modeled proportional to an offset of expected counts~$e_{it}$. In spatial applications of the multivariate \code{hhh4} model as in this paper, the ``unit''~$i$ refers to a geographical region and we typically use (the fraction of) the population living in region~$i$ as the endemic offset. The observation-driven epidemic component splits up into autoregressive effects, i.e., reproduction of the disease within region~$i$, and neighborhood effects, i.e., transmission from other regions~$j$. Overall, Equation~\ref{eqn:hhh4} becomes a rich regression model by allowing for log-linear predictors in all three components: \begin{align} \label{eqn:hhh4:predictors} \log(\nu_{it}) &= \alpha_i^{(\nu)} + {\bm{\beta}^{(\nu)}}^\top \bm{z}^{(\nu)}_{it} \:, \\ \log(\lambda_{it}) &= \alpha_i^{(\lambda)} + {\bm{\beta}^{(\lambda)}}^\top \bm{z}^{(\lambda)}_{it} \:, \\ \log(\phi_{it}) &= \alpha_i^{(\phi)} + {\bm{\beta}^{(\phi)}}^\top \bm{z}^{(\phi)}_{it} \:. \end{align} %% The superscripts in brackets distinguish the component-specific parameters. The intercepts of these predictors can be assumed identical across units, unit-specific, or random (and possibly correlated). %\citep{paul-held-2011} The regression terms often involve sine-cosine effects of time to reflect seasonally varying incidence, %\citep{held.paul2012} but may, e.g., also capture heterogeneous vaccination coverage \citep{herzog-etal-2010}. Data on infections imported from outside the study region may enter the endemic component \citep{geilhufe.etal2012}, which generally accounts for cases not directly linked to other observed cases, e.g., due to edge effects. For a single time series of counts $Y_t$, \code{hhh4} can be regarded as an extension of \code{glm.nb} from package \CRANpkg{MASS} \citep{R:MASS} to account for autoregression. See the \code{vignette("hhh4")} for examples of modeling univariate and bivariate count time series using \code{hhh4}. With multiple regions, spatio-temporal dependence is adopted by the third component in Equation~\ref{eqn:hhh4} with weights $w_{ji}$ reflecting the flow of infections from region $j$ to region $i$. These transmission weights may be informed by movement network data \citep{paul-etal-2008,geilhufe.etal2012}, but may also be estimated parametrically. A suitable choice to reflect epidemiological coupling between regions \citep[Chapter~7]{Keeling.Rohani2008} is a power-law distance decay $w_{ji} = o_{ji}^{-d}$ defined in terms of the adjacency order~$o_{ji}$ in the neighborhood graph of the regions \citep{meyer.held2013}. %% For instance, a second-order neighbor~$j$ of a region~$i$ ($o_{ji} = 2$) is a %% region adjacent to a first-order neighbor of $i$, but not itself directly %% adjacent to $i$. Note that we usually normalize the transmission weights such that $\sum_i w_{ji} = 1$, i.e., the $Y_{j,t-1}$ cases are distributed among the regions proportionally to the $j$th row vector of the weight matrix $(w_{ji})$. Likelihood inference for the above multivariate time-series model has been established by \citet{paul-held-2011} with extensions for parametric neighborhood weights by \citet{meyer.held2013}. Supplied with the analytical score function and Fisher information, the function \code{hhh4} by default uses the quasi-Newton algorithm available through the \proglang{R} function \code{nlminb} to maximize the log-likelihood. Convergence is usually fast even for a large number of parameters. If the model contains random effects, the penalized and marginal log-likelihoods are maximized alternately until convergence. Computation of the marginal Fisher information is accelerated using the \CRANpkg{Matrix} package \citep{R:Matrix}. \section[Data structure]{Data structure: \class{sts}} \label{sec:hhh4:data} <>= ## extract components from measlesWeserEms to reconstruct data("measlesWeserEms") counts <- observed(measlesWeserEms) map <- measlesWeserEms@map populationFrac <- measlesWeserEms@populationFrac @ In public health surveillance, routine reports of infections to public health authorities give rise to spatio-temporal data, which are usually made available in the form of aggregated counts by region and period. The Robert Koch Institute (RKI) in Germany, for example, maintains a database of cases of notifiable diseases, which can be queried via the \emph{SurvStat@RKI} online service (\url{https://survstat.rki.de}). To exemplify area-level \code{hhh4} models in the remainder of this manuscript, we use weekly counts of measles infections by district in the Weser-Ems region of Lower Saxony, Germany, 2001--2002, downloaded from \emph{SurvStat@RKI} (as of Annual Report 2005). These data are contained in \pkg{surveillance} as \code{data("measlesWeserEms")} -- an object of the \proglang{S}4-class \class{sts} (``surveillance time series'') used for data input in \code{hhh4} models and briefly introduced below. See \citet{hoehle-mazick-2010} and \citet{salmon.etal2014} for more detailed descriptions of this class, which is also used for the prospective aberration detection facilities of the \pkg{surveillance} package. The epidemic modeling of multivariate count time series essentially involves three data matrices: a $T \times I$ matrix of the observed counts, a corresponding matrix with potentially time-varying population numbers (or fractions), and an $I \times I$ neighborhood matrix quantifying the coupling between the $I$ units. In our example, the latter consists of the adjacency orders~$o_{ji}$ between the districts. A map of the districts in the form of a \code{SpatialPolygons} object (defined by the \CRANpkg{sp} package of \citealp{R:sp}) can be used to derive the matrix of adjacency orders automatically using the functions \code{poly2adjmat} and \code{nbOrder}, which wrap functionality of package \CRANpkg{spdep} \citep{R:spdep}: <>= weserems_adjmat <- poly2adjmat(map) weserems_nbOrder <- nbOrder(weserems_adjmat, maxlag = Inf) @ Visual inspection of the adjacencies identified by \code{poly2adjmat} is recommended, e.g., via labelling each district with the number of its neighbors, i.e., \code{rowSums(weserems_adjmat)}. If adjacencies are not detected, this is probably due to sliver polygons. In that case either increase the \code{snap} tolerance in \code{poly2adjmat} or use \CRANpkg{rmapshaper} \citep{R:rmapshaper} to simplify and snap adjacent polygons in advance. Given the aforementioned ingredients, the \class{sts} object \code{measlesWeserEms} has been constructed as follows: <>= measlesWeserEms <- sts(counts, start = c(2001, 1), frequency = 52, population = populationFrac, neighbourhood = weserems_nbOrder, map = map) @ Here, \code{start} and \code{frequency} have the same meaning as for classical time-series objects of class \class{ts}, i.e., (year, sample number) of the first observation and the number of observations per year. Note that \code{data("measlesWeserEms")} constitutes a corrected version of \code{data("measles.weser")} originally analyzed by \citet[Section~3.2]{held-etal-2005}. Differences are documented on the associated help page. We can visualize such \class{sts} data in four ways: individual time series, overall time series, map of accumulated counts by district, or animated maps. For instance, the two plots in Figure~\ref{fig:measlesWeserEms} have been generated by the following code: <>= par(mar = c(5,5,1,1)) plot(measlesWeserEms, type = observed ~ time) plot(measlesWeserEms, type = observed ~ unit, population = measlesWeserEms@map$POPULATION / 100000, labels = list(font = 2), colorkey = list(space = "right"), sp.layout = layout.scalebar(measlesWeserEms@map, corner = c(0.05, 0.05), scale = 50, labels = c("0", "50 km"), height = 0.03)) @ The overall time-series plot in Figure~\ref{fig:measlesWeserEms-1} reveals strong seasonality in the data with slightly different patterns in the two years. The spatial plot in Figure~\ref{fig:measlesWeserEms-2} is a tweaked \code{spplot} (package \CRANpkg{sp}) with colors from \CRANpkg{colorspace} \citep{R:colorspace} using $\sqrt{}$-equidistant cut points. The default plot \code{type} is \code{observed ~ time | unit} and displays the district-specific time series. Here we show the output of the equivalent \code{autoplot}-method (Figure~\ref{fig:measlesWeserEms15}), which is based on and requires \CRANpkg{ggplot2} \citep{R:ggplot2}: <0), "affected districts."), out.width="\\linewidth", fig.width=10, fig.height=6, fig.pos="htb">>= if (require("ggplot2")) { autoplot(measlesWeserEms, units = which(colSums(observed(measlesWeserEms)) > 0)) } else plot(measlesWeserEms, units = which(colSums(observed(measlesWeserEms)) > 0)) @ The districts \Sexpr{paste0(paste0(row.names(measlesWeserEms@map), " (", measlesWeserEms@map[["GEN"]], ")")[colSums(observed(measlesWeserEms)) == 0], collapse = " and ")} without any reported cases are excluded in Figure~\ref{fig:measlesWeserEms15}. Obviously, the districts have been affected by measles to a very heterogeneous extent during these two years. An animation of the data can be easily produced as well. We recommend to use converters of the \CRANpkg{animation} package \citep{R:animation}, e.g., to watch the series of plots in a web browser. The following code will generate weekly disease maps during the year 2001 with the respective total number of cases shown in a legend and -- if package \CRANpkg{gridExtra} \citep{R:gridExtra} is available -- an evolving time-series plot at the bottom: <>= animation::saveHTML( animate(measlesWeserEms, tps = 1:52, total.args = list()), title = "Evolution of the measles epidemic in the Weser-Ems region, 2001", ani.width = 500, ani.height = 600) @ <>= ## to perform the following analysis using biweekly aggregated measles counts: measlesWeserEms <- aggregate(measlesWeserEms, by = "time", nfreq = 26) @ \pagebreak \section{Modeling and inference} \label{sec:hhh4:fit} For multivariate surveillance time series of counts such as the \code{measlesWeserEms} data, the function \code{hhh4} fits models of the form~\eqref{eqn:hhh4} via (penalized) maximum likelihood. We start by modeling the measles counts in the Weser-Ems region by a slightly simplified version of the original negative binomial model used by \citet{held-etal-2005}. Instead of district-specific intercepts $\alpha_i^{(\nu)}$ in the endemic component, we first assume a common intercept $\alpha^{(\nu)}$ in order to not be forced to exclude the two districts without any reported cases of measles. After the estimation and illustration of this basic model, we will discuss the following sequential extensions: covariates (district-specific vaccination coverage), estimated transmission weights, and random effects to eventually account for unobserved heterogeneity of the districts. %epidemic seasonality, biweekly aggregation \subsection{Basic model} Our initial model has the following mean structure: \begin{align} \mu_{it} &= e_i \, \nu_t + \lambda \, Y_{i,t-1} + \phi \sum_{j \ne i} w_{ji} Y_{j,t-1}\:,\label{eqn:hhh4:basic}\\ \log(\nu_t) &= \alpha^{(\nu)} + \beta_t t + \gamma \sin(\omega t) + \delta \cos(\omega t)\:. \label{eqn:hhh4:basic:end} \end{align} To account for temporal variation of disease incidence, the endemic log-linear predictor $\nu_t$ incorporates an overall trend and a sinusoidal wave of frequency $\omega=2\pi/52$. As a basic district-specific measure of disease incidence, the population fraction $e_i$ is included as a multiplicative offset. The epidemic parameters $\lambda = \exp(\alpha^{(\lambda)})$ and $\phi = \exp(\alpha^{(\phi)})$ are assumed homogeneous across districts and constant over time. Furthermore, we define $w_{ji} = \ind(j \sim i) = \ind(o_{ji} = 1)$ for the time being, which means that the epidemic can only arrive from directly adjacent districts. This \class{hhh4} model transforms into the following list of \code{control} arguments: <>= measlesModel_basic <- list( end = list(f = addSeason2formula(~1 + t, period = measlesWeserEms@freq), offset = population(measlesWeserEms)), ar = list(f = ~1), ne = list(f = ~1, weights = neighbourhood(measlesWeserEms) == 1), family = "NegBin1") @ The formulae of the three predictors $\log\nu_t$, $\log\lambda$ and $\log\phi$ are specified as element \code{f} of the \code{end}, \code{ar}, and \code{ne} lists, respectively. For the endemic formula we use the convenient function \code{addSeason2formula} to generate the sine-cosine terms, and we take the multiplicative \code{offset} of population fractions $e_i$ from the \code{measlesWeserEms} object. The autoregressive part only consists of the intercept $\alpha^{(\lambda)}$, whereas the neighborhood component specifies the intercept $\alpha^{(\phi)}$ and also the matrix of transmission \code{weights} $(w_{ji})$ to use -- here a simple indicator of first-order adjacency. The chosen \code{family} corresponds to a negative binomial model with a common overdispersion parameter $\psi$ for all districts. Alternatives are \code{"Poisson"}, \code{"NegBinM"} ($\psi_i$), or a factor determining which groups of districts share a common overdispersion parameter. Together with the data, the complete list of control arguments is then fed into the \code{hhh4} function to estimate the model: <>= measlesFit_basic <- hhh4(stsObj = measlesWeserEms, control = measlesModel_basic) @ The fitted model is summarized below: <>= summary(measlesFit_basic, idx2Exp = TRUE, amplitudeShift = TRUE, maxEV = TRUE) @ The \code{idx2Exp} argument of the \code{summary} method requests the estimates for $\lambda$, $\phi$, $\alpha^{(\nu)}$ and $\exp(\beta_t)$ instead of their respective internal log-values. For instance, \code{exp(end.t)} represents the seasonality-adjusted factor by which the basic endemic incidence increases per week. The \code{amplitudeShift} argument transforms the internal coefficients $\gamma$ and $\delta$ of the sine-cosine terms to the amplitude $A$ and phase shift $\varphi$ of the corresponding sinusoidal wave $A \sin(\omega t + \varphi)$ in $\log\nu_t$ \citep{paul-etal-2008}. The resulting multiplicative effect of seasonality on $\nu_t$ is shown in Figure~\ref{fig:measlesFit_basic_endseason} produced by: <>= plot(measlesFit_basic, type = "season", components = "end", main = "") @ The epidemic potential of the process as determined by the parameters $\lambda$ and $\phi$ is best investigated by a combined measure: the dominant eigenvalue (\code{maxEV}) of the matrix $\bm{\Lambda}$ %$\Lambda_t$, %such that $\bm{\mu}_t = \bm{\nu}_t + \bm{\Lambda} \bm{Y}_{t-1}$ which has the entries $(\Lambda)_{ii} = \lambda$ %$(\Lambda_t)_{ii} = \lambda_{it}$ on the diagonal and $(\Lambda)_{ij} = \phi w_{ji}$ %$(\Lambda_t)_{ij} = \phi_{it} w_{ji}$ for $j\ne i$ \citep{paul-etal-2008}. If the dominant eigenvalue is smaller than 1, it can be interpreted as the epidemic proportion of disease incidence. In the above model, the estimate is \Sexpr{round(100*getMaxEV(measlesFit_basic)[1])}\%. Another way to judge the relative importance of the three model components is via a plot of the fitted mean components along with the observed counts. Figure~\ref{fig:measlesFitted_basic} shows this for the five districts with more than 50 cases as well as for the sum over all districts: <>= districts2plot <- which(colSums(observed(measlesWeserEms)) > 50) par(mfrow = c(2,3), mar = c(3, 5, 2, 1), las = 1) plot(measlesFit_basic, type = "fitted", units = districts2plot, hide0s = TRUE, par.settings = NULL, legend = 1) plot(measlesFit_basic, type = "fitted", total = TRUE, hide0s = TRUE, par.settings = NULL, legend = FALSE) -> fitted_components @ We can see from the plots that the largest portion of the fitted mean indeed results from the within-district autoregressive component with very little contribution of cases from adjacent districts and a rather small endemic incidence. The \code{plot} method invisibly returns the component values in a list of matrices (one by unit). In the above code, we have assigned the result from plotting the overall fit (via \code{total = TRUE}) to the object \code{fitted_components}. Here we show the values for the weeks 20 to 22 (corresponding to the weeks 21 to 23 of the measles time series): <<>>= fitted_components$Overall[20:22,] @ The first column of this matrix refers to the fitted mean (epidemic + endemic). The four following columns refer to the epidemic (own + neighbours), endemic, autoregressive (``own''), and neighbourhood components of the mean. The last three columns refer to the point estimates of $\lambda$, $\phi$, and $\nu_t$, respectively. These values allow us to calculate the (time-averaged) proportions of the mean explained by the different components: <<>>= colSums(fitted_components$Overall)[3:5] / sum(fitted_components$Overall[,1]) @ Note that the ``epidemic proportion'' obtained here (\Sexpr{round(100*sum(fitted_components$Overall[,2]) / sum(fitted_components$Overall[,1]))}\%) is a function of the observed time series (so could be called ``empirical''), whereas the dominant eigenvalue calculated further above is a theoretical property derived from the autoregressive parameters alone. Finally, the \code{overdisp} parameter from the model summary and its 95\% confidence interval <<>>= confint(measlesFit_basic, parm = "overdisp") @ suggest that a negative binomial distribution with overdispersion is more adequate than a Poisson model corresponding to $\psi = 0$. We can underpin this finding by an AIC comparison, taking advantage of the convenient \code{update} method for \class{hhh4} fits: <>= AIC(measlesFit_basic, update(measlesFit_basic, family = "Poisson")) @ Other plot \code{type}s and methods for fitted \class{hhh4} models as listed in Table~\ref{tab:methods:hhh4} will be applied in the course of the following model extensions. <>= print(xtable( surveillance:::functionTable("hhh4", functions=list( Extract="getNEweights", Other="oneStepAhead" )), caption="Generic and \\textit{non-generic} functions applicable to \\class{hhh4} objects.", label="tab:methods:hhh4"), include.rownames = FALSE) @ \enlargethispage{\baselineskip} \subsection{Covariates} The \class{hhh4} model framework allows for covariate effects on the endemic or epidemic contributions to disease incidence. Covariates may vary over both regions and time and thus obey the same $T \times I$ matrix structure as the observed counts. For infectious disease models, the regional vaccination coverage is an important example of such a covariate, since it reflects the (remaining) susceptible population. In a thorough analysis of measles occurrence in the German federal states, \citet{herzog-etal-2010} found vaccination coverage to be associated with outbreak size. We follow their approach of using the district-specific proportion $1-v_i$ of unvaccinated children just starting school as a proxy for the susceptible population. As $v_i$ we use the proportion of children vaccinated with at least one dose among the ones presenting their vaccination card at school entry in district $i$ in the year 2004.\footnote{% First year with data for all districts -- available from the public health department of Lower Saxony (\url{https://www.nlga.niedersachsen.de/portal/live.php?navigation_id=36791&article_id=135436&_psmand=20}).} %% Note: districts are more heterogeneous in 2004 than in later years. %% Data is based on abecedarians in 2004, i.e.\ born in 1998, recommended to %% be twice vaccinated against Measles by the end of year 2000. This time-constant covariate needs to be transformed to the common matrix structure for incorporation in \code{hhh4}: <>= Sprop <- matrix(1 - measlesWeserEms@map@data$vacc1.2004, nrow = nrow(measlesWeserEms), ncol = ncol(measlesWeserEms), byrow = TRUE) summary(Sprop[1, ]) @ There are several ways to account for the susceptible proportion in our model, among which the simplest is to update the endemic population offset $e_i$ by multiplication with $(1-v_i)$. \citet{herzog-etal-2010} found that the susceptible proportion is best added as a covariate in the autoregressive component in the form \[ \lambda_i \, Y_{i,t-1} = \exp\big(\alpha^{(\lambda)} + \beta_s \log(1-v_i)\big) \, Y_{i,t-1} = \exp\big(\alpha^{(\lambda)}\big) \, (1-v_i)^{\beta_s} \, Y_{i,t-1} \] according to the mass action principle \citep{Keeling.Rohani2008}. A higher proportion of susceptibles in district $i$ is expected to boost the generation of new infections, i.e., $\beta_s > 0$. Alternatively, this effect could be assumed as an offset, i.e., $\beta_s \equiv 1$. To choose between endemic and/or autoregressive effects, and multiplicative offset vs.\ covariate modeling, we perform AIC-based model selection. First, we set up a grid of possible component updates: <>= Soptions <- c("unchanged", "Soffset", "Scovar") SmodelGrid <- expand.grid(end = Soptions, ar = Soptions) row.names(SmodelGrid) <- do.call("paste", c(SmodelGrid, list(sep = "|"))) @ Then we update the initial model \code{measlesFit_basic} according to each row of \code{SmodelGrid}: <>= measlesFits_vacc <- apply(X = SmodelGrid, MARGIN = 1, FUN = function (options) { updatecomp <- function (comp, option) switch(option, "unchanged" = list(), "Soffset" = list(offset = comp$offset * Sprop), "Scovar" = list(f = update(comp$f, ~. + log(Sprop)))) update(measlesFit_basic, end = updatecomp(measlesFit_basic$control$end, options[1]), ar = updatecomp(measlesFit_basic$control$ar, options[2]), data = list(Sprop = Sprop)) }) @ The resulting object \code{measlesFits_vacc} is a list of \Sexpr{nrow(SmodelGrid)} \class{hhh4} fits, which are named according to the corresponding \code{Soptions} used for the endemic and autoregressive components. We construct a call of the function \code{AIC} taking all list elements as arguments: <>= aics_vacc <- do.call(AIC, lapply(names(measlesFits_vacc), as.name), envir = as.environment(measlesFits_vacc)) @ <<>>= aics_vacc[order(aics_vacc[, "AIC"]), ] @ <>= if (AIC(measlesFits_vacc[["Scovar|unchanged"]]) > min(aics_vacc[,"AIC"])) stop("`Scovar|unchanged` is not the AIC-minimal vaccination model") @ Hence, AIC increases if the susceptible proportion is only added to the autoregressive component, but we see a remarkable improvement when adding it to the endemic component. The best model is obtained by leaving the autoregressive component unchanged ($\lambda$) and adding the term $\beta_s \log(1-v_i)$ to the endemic predictor in Equation~\ref{eqn:hhh4:basic:end}. <>= measlesFit_vacc <- update(measlesFit_basic, end = list(f = update(formula(measlesFit_basic)$end, ~. + log(Sprop))), data = list(Sprop = Sprop)) coef(measlesFit_vacc, se = TRUE)["end.log(Sprop)", ] @ The estimated exponent $\hat{\beta}_s$ is both clearly positive and different from the offset assumption. In other words, if a district's fraction of susceptibles is doubled, the endemic measles incidence is estimated to multiply by $2^{\hat{\beta}_s}$: <<>>= 2^cbind("Estimate" = coef(measlesFit_vacc), confint(measlesFit_vacc))["end.log(Sprop)",] @ \subsection{Spatial interaction} Up to now, the model assumed that the epidemic can only arrive from directly adjacent districts ($w_{ji} = \ind(j\sim i)$), and that all districts have the same ability $\phi$ to import cases from neighboring regions. Given that humans travel further and preferrably to metropolitan areas, both assumptions seem overly simplistic and should be tuned toward a ``gravity'' model for human interaction. First, to reflect commuter-driven spread %\citep[Section~6.3.3.1]{Keeling.Rohani2008} in our model, we scale the district's susceptibility with respect to its population fraction by multiplying $\phi$ with $e_i^{\beta_{pop}}$: <>= measlesFit_nepop <- update(measlesFit_vacc, ne = list(f = ~log(pop)), data = list(pop = population(measlesWeserEms))) @ As in a similar analyses of influenza \citep{geilhufe.etal2012,meyer.held2013}, we find strong evidence for such an agglomeration effect: AIC decreases from \Sexpr{round(AIC(measlesFit_vacc))} to \Sexpr{round(AIC(measlesFit_nepop))} and the estimated exponent $\hat{\beta}_{pop}$ is <<>>= cbind("Estimate" = coef(measlesFit_nepop), confint(measlesFit_nepop))["ne.log(pop)",] @ Second, to account for long-range transmission of cases, \citet{meyer.held2013} proposed to estimate the weights $w_{ji}$ as a function of the adjacency order $o_{ji}$ between the districts. For instance, a power-law model assumes the form $w_{ji} = o_{ji}^{-d}$, for $j\ne i$ and $w_{jj}=0$, where the decay parameter $d$ is to be estimated. Normalization to $w_{ji} / \sum_k w_{jk}$ is recommended and applied by default when choosing \code{W_powerlaw} as weights in the neighborhood component: <>= measlesFit_powerlaw <- update(measlesFit_nepop, ne = list(weights = W_powerlaw(maxlag = 5))) @ The argument \code{maxlag} sets an upper bound for spatial interaction in terms of adjacency order. Here we set no limit since \code{max(neighbourhood(measlesWeserEms))} is \Sexpr{max(neighbourhood(measlesWeserEms))}. The decay parameter $d$ is estimated to be <<>>= cbind("Estimate" = coef(measlesFit_powerlaw), confint(measlesFit_powerlaw))["neweights.d",] @ which represents a strong decay of spatial interaction for higher-order neighbors. As an alternative to the parametric power law, unconstrained weights up to \code{maxlag} can be estimated by using \code{W_np} instead of \code{W_powerlaw}. For instance, \code{W_np(maxlag = 2)} corresponds to a second-order model, i.e., \mbox{$w_{ji} = 1 \cdot \ind(o_{ji} = 1) + e^{\omega_2} \cdot \ind(o_{ji} = 2)$}, which is also row-normalized by default: <>= measlesFit_np2 <- update(measlesFit_nepop, ne = list(weights = W_np(maxlag = 2))) @ Figure~\ref{fig:measlesFit_neweights-2} shows both the power-law model $o^{-\hat{d}}$ and the second-order model. %, where $e^{\hat{\omega}_2}$ is Alternatively, the plot \code{type = "neweights"} for \class{hhh4} fits can produce a \code{stripplot} \citep{R:lattice} of $w_{ji}$ against $o_{ji}$ as shown in Figure~\ref{fig:measlesFit_neweights-1} for the power-law model: <>= library("lattice") trellis.par.set("reference.line", list(lwd=3, col="gray")) trellis.par.set("fontsize", list(text=14)) set.seed(20200303) plot(measlesFit_powerlaw, type = "neweights", plotter = stripplot, panel = function (...) {panel.stripplot(...); panel.average(...)}, jitter.data = TRUE, xlab = expression(o[ji]), ylab = expression(w[ji])) ## non-normalized weights (power law and unconstrained second-order weight) local({ colPL <- "#0080ff" ogrid <- 1:5 par(mar=c(3.6,4,2.2,2), mgp=c(2.1,0.8,0)) plot(ogrid, ogrid^-coef(measlesFit_powerlaw)["neweights.d"], col=colPL, xlab="Adjacency order", ylab="Non-normalized weight", type="b", lwd=2) matlines(t(sapply(ogrid, function (x) x^-confint(measlesFit_powerlaw, parm="neweights.d"))), type="l", lty=2, col=colPL) w2 <- exp(c(coef(measlesFit_np2)["neweights.d"], confint(measlesFit_np2, parm="neweights.d"))) lines(ogrid, c(1,w2[1],0,0,0), type="b", pch=19, lwd=2) arrows(x0=2, y0=w2[2], y1=w2[3], length=0.1, angle=90, code=3, lty=2) legend("topright", col=c(colPL, 1), pch=c(1,19), lwd=2, bty="n", inset=0.1, y.intersp=1.5, legend=c("Power-law model", "Second-order model")) }) @ Note that only horizontal jitter is added in this case. Because of normalization, the weight $w_{ji}$ for transmission from district $j$ to district $i$ is determined not only by the districts' neighborhood $o_{ji}$ but also by the total amount of neighborhood of district $j$ in the form of $\sum_{k\ne j} o_{jk}^{-d}$, which causes some variation of the weights for a specific order of adjacency. The function \code{getNEweights} can be used to extract the estimated weight matrix $(w_{ji})$. An AIC comparison of the different models for the transmission weights yields: <<>>= AIC(measlesFit_nepop, measlesFit_powerlaw, measlesFit_np2) @ AIC improves when accounting for transmission from higher-order neighbors by a power law or a second-order model. In spite of the latter resulting in a slightly better fit, we will use the power-law model as a basis for further model extensions since the stand-alone second-order effect is not always identifiable in more complex models and is scientifically implausible. \subsection{Random effects} \citet{paul-held-2011} introduced random effects for \class{hhh4} models, which are useful if the districts exhibit heterogeneous incidence levels not explained by observed covariates, and especially if the number of districts is large. For infectious disease surveillance data, a typical example of unobserved heterogeneity is underreporting. Our measles data even contain two districts without any reported cases, while the district with the smallest population (03402, SK Emden) had the second-largest number of cases reported and the highest overall incidence (see Figures~\ref{fig:measlesWeserEms-2} and~\ref{fig:measlesWeserEms15}). Hence, allowing for district-specific intercepts in the endemic or epidemic components is expected to improve the model fit. For independent random effects $\alpha_i^{(\nu)} \stackrel{iid}{\sim} \N(\alpha^{(\nu)}, \sigma_\nu^2)$, $\alpha_i^{(\lambda)} \stackrel{iid}{\sim} \N(\alpha^{(\lambda)}, \sigma_\lambda^2)$, and $\alpha_i^{(\phi)} \stackrel{iid}{\sim} \N(\alpha^{(\phi)}, \sigma_\phi^2)$ in all three components, we update the corresponding formulae as follows: <>= measlesFit_ri <- update(measlesFit_powerlaw, end = list(f = update(formula(measlesFit_powerlaw)$end, ~. + ri() - 1)), ar = list(f = update(formula(measlesFit_powerlaw)$ar, ~. + ri() - 1)), ne = list(f = update(formula(measlesFit_powerlaw)$ne, ~. + ri() - 1))) @ <>= summary(measlesFit_ri, amplitudeShift = TRUE, maxEV = TRUE) @ <>= ## strip leading and trailing empty lines writeLines(tail(head(capture.output({ <> }), -1), -1)) @ The summary now contains an extra section with the estimated variance components $\sigma_\lambda^2$, $\sigma_\phi^2$, and $\sigma_\nu^2$. We did not assume correlation between the three random effects, but this is possible by specifying \code{ri(corr = "all")} in the component formulae. The implementation also supports a conditional autoregressive formulation for spatially correlated intercepts via \code{ri(type = "car")}. The estimated district-specific deviations $\alpha_i^{(\cdot)} - \alpha^{(\cdot)}$ can be extracted by the \code{ranef}-method: <<>>= head(ranef(measlesFit_ri, tomatrix = TRUE), n = 3) @ The \code{exp}-transformed deviations correspond to district-specific multiplicative effects on the model components, which can be visualized via the plot \code{type = "ri"} as follows (Figure~\ref{fig:measlesFit_ri_map}): % exp=TRUE relies on 'scales::log_breaks' <>= for (comp in c("ar", "ne", "end")) { print(plot(measlesFit_ri, type = "ri", component = comp, exp = TRUE, labels = list(cex = 0.6))) } @ For the autoregressive component in Figure~\ref{fig:measlesFit_ri_map-1}, we see a pronounced heterogeneity between the three western districts in pink and the remaining districts. These three districts have been affected by large local outbreaks and are also the ones with the highest overall numbers of cases. In contrast, the city of Oldenburg (03403) is estimated with a relatively low autoregressive coefficient: $\lambda_i = \exp(\alpha_i^{(\lambda)})$ can be extracted using the \code{intercept} argument as <<>>= exp(ranef(measlesFit_ri, intercept = TRUE)["03403", "ar.ri(iid)"]) @ However, this district seems to import more cases from other districts than explained by its population (Figure~\ref{fig:measlesFit_ri_map-2}). In Figure~\ref{fig:measlesFit_ri_map-3}, the two districts without any reported measles cases (03401 and 03405) appear in cyan, which means that they exhibit a relatively low endemic incidence after adjusting for the population and susceptible proportion. Such districts could be suspected of a larger amount of underreporting. We plot the new model fit (Figure~\ref{fig:measlesFitted_ri}) for comparison with the initial fit shown in Figure~\ref{fig:measlesFitted_basic}: <>= par(mfrow = c(2,3), mar = c(3, 5, 2, 1), las = 1) plot(measlesFit_ri, type = "fitted", units = districts2plot, hide0s = TRUE, par.settings = NULL, legend = 1) plot(measlesFit_ri, type = "fitted", total = TRUE, hide0s = TRUE, par.settings = NULL, legend = FALSE) @ For some of these districts, a great amount of cases is now explained via transmission from neighboring regions while others are mainly influenced by the local autoregression. The decomposition of the estimated mean by district can also be seen from the related plot \code{type = "maps"} (Figure~\ref{fig:measlesFitted_maps}): <>= plot(measlesFit_ri, type = "maps", which = c("epi.own", "epi.neighbours", "endemic"), prop = TRUE, labels = list(cex = 0.6)) @ The extra flexibility of the random effects model comes at a price. First, the runtime of the estimation increases considerably from \Sexpr{round(measlesFit_powerlaw[["runtime"]]["elapsed"], 1)} seconds for the previous power-law model \code{measlesFit_powerlaw} to \Sexpr{round(measlesFit_ri[["runtime"]]["elapsed"], 1)} seconds with random effects. Furthermore, we no longer obtain AIC values, since random effects invalidate simple AIC-based model comparisons. For quantitative comparisons of model performance we have to resort to more sophisticated techniques presented in the next section. \subsection{Predictive model assessment} \citet{paul-held-2011} suggest to evaluate one-step-ahead forecasts from competing models using proper scoring rules for count data \citep{czado-etal-2009}. These scores measure the discrepancy between the predictive distribution $P$ from a fitted model and the later observed value $y$. A well-known example is the squared error score (``ses'') $(y-\mu_P)^2$, which is usually averaged over a set of forecasts to obtain the mean squared error. The Dawid-Sebastiani score (``dss'') additionally evaluates sharpness. The logarithmic score (``logs'') and the ranked probability score (``rps'') assess the whole predictive distribution with respect to calibration and sharpness. Lower scores correspond to better predictions. In the \class{hhh4} framework, predictive model assessment is made available by the functions \code{oneStepAhead}, \code{scores}, \code{pit}, and \code{calibrationTest}. We will use the second quarter of 2002 as the test period, and compare the basic model, the power-law model, and the random effects model. First, we use the \code{"final"} fits on the complete time series to compute the predictions, which then simply correspond to the fitted values during the test period: <>= tp <- c(65, 77) models2compare <- paste0("measlesFit_", c("basic", "powerlaw", "ri")) measlesPreds1 <- lapply(mget(models2compare), oneStepAhead, tp = tp, type = "final") @ <>= stopifnot(all.equal(measlesPreds1$measlesFit_powerlaw$pred, fitted(measlesFit_powerlaw)[tp[1]:tp[2],], check.attributes = FALSE)) @ Note that in this case, the log-score for a model's prediction in district $i$ in week $t$ equals the associated negative log-likelihood contribution. Comparing the mean scores from different models is thus essentially a goodness-of-fit assessment: <>= stopifnot(all.equal( measlesFit_powerlaw$loglikelihood, -sum(scores(oneStepAhead(measlesFit_powerlaw, tp = 1, type = "final"), which = "logs", individual = TRUE)))) @ <>= SCORES <- c("logs", "rps", "dss", "ses") measlesScores1 <- lapply(measlesPreds1, scores, which = SCORES, individual = TRUE) t(sapply(measlesScores1, colMeans, dims = 2)) @ All scoring rules claim that the random effects model gives the best fit during the second quarter of 2002. Now we turn to true one-week-ahead predictions of \code{type = "rolling"}, which means that we always refit the model up to week $t$ to get predictions for week $t+1$: <>= measlesPreds2 <- lapply(mget(models2compare), oneStepAhead, tp = tp, type = "rolling", which.start = "final") @ Figure~\ref{fig:measlesPreds2_plot} shows \CRANpkg{fanplot}s \citep{R:fanplot} of the sequential one-week-ahead forecasts from the random effects models for the same districts as in Figure~\ref{fig:measlesFitted_ri}: <>= par(mfrow = sort(n2mfrow(length(districts2plot))), mar = c(4.5,4.5,2,1)) for (unit in names(districts2plot)) plot(measlesPreds2[["measlesFit_ri"]], unit = unit, main = unit, key.args = if (unit == tail(names(districts2plot),1)) list()) @ The \code{plot}-method for \class{oneStepAhead} predictions is based on the associated \code{quantile}-method (a \code{confint}-method is also available). Note that the sum of these negative binomial distributed forecasts over all districts is not negative binomial distributed. The package \CRANpkg{distr} \citep{ruckdeschel.kohl2014} could be used to approximate the distribution of the aggregated one-step-ahead forecasts (not shown here). Looking at the average scores of these forecasts over all weeks and districts, the most parsimonious initial model \code{measlesFit_basic} actually turns out best: <>= measlesScores2 <- lapply(measlesPreds2, scores, which = SCORES, individual = TRUE) t(sapply(measlesScores2, colMeans, dims = 2)) @ Statistical significance of the differences in mean scores can be investigated by a \code{permutationTest} for paired data or a paired $t$-test: <>= set.seed(321) sapply(SCORES, function (score) permutationTest( measlesScores2$measlesFit_ri[, , score], measlesScores2$measlesFit_basic[, , score], nPermutation = 999)) @ Hence, there is no clear evidence for a difference between the basic and the random effects model with regard to predictive performance during the test period. Whether predictions of a particular model are well calibrated can be formally investigated by \code{calibrationTest}s for count data as recently proposed by \citet{wei.held2013}. For example: <>= calibrationTest(measlesPreds2[["measlesFit_ri"]], which = "rps") @ <>= ## strip leading and trailing empty lines writeLines(tail(head(capture.output({ <> }), -1), -1)) @ Thus, there is no evidence of miscalibrated predictions from the random effects model. \citet{czado-etal-2009} describe an alternative informal approach to assess calibration: probability integral transform (PIT) histograms for count data (Figure~\ref{fig:measlesPreds2_pit}). <>= par(mfrow = sort(n2mfrow(length(measlesPreds2))), mar = c(4.5,4.5,2,1), las = 1) for (m in models2compare) pit(measlesPreds2[[m]], plot = list(ylim = c(0, 1.25), main = m)) @ Under the hypothesis of calibration, i.e., $y_{it} \sim P_{it}$ for all predictive distributions $P_{it}$ in the test period, the PIT histogram is uniform. Underdispersed predictions lead to U-shaped histograms, and bias causes skewness. In this aggregate view of the predictions over all districts and weeks of the test period, predictive performance is comparable between the models, and there is no evidence of badly dispersed predictions. However, the right-hand decay in all histograms suggests that all models tend to predict higher counts than observed. This is most likely related to the seasonal shift between the years 2001 and 2002. In 2001, the peak of the epidemic was in the second quarter, while it already occurred in the first quarter in 2002 (cp.\ Figure~\ref{fig:measlesWeserEms-1}). \subsection{Further modeling options} In the previous sections we extended our model for measles in the Weser-Ems region with respect to spatial variation of the counts and their interaction. Temporal variation was only accounted for in the endemic component, which included a long-term trend and a sinusoidal wave on the log-scale. \citet{held.paul2012} suggest to also allow seasonal variation of the epidemic force by adding a superposition of $S$ harmonic waves of fundamental frequency~$\omega$, $\sum_{s=1}^S \left\{ \gamma_s \sin(s\,\omega t) + \delta_s \cos(s\,\omega t) \right\}$, to the log-linear predictors of the autoregressive and/or neighborhood component -- just like for $\log\nu_t$ in Equation~\ref{eqn:hhh4:basic:end} with $S=1$. However, given only two years of measles surveillance and the apparent shift of seasonality with regard to the start of the outbreak in 2002 compared to 2001, more complex seasonal models are likely to overfit the data. Concerning the coding in \proglang{R}, sine-cosine terms can be added to the epidemic components without difficulties by again using the convenient function \code{addSeason2formula}. Updating a previous model for different numbers of harmonics is even simpler, since the \code{update}-method has a corresponding argument \code{S}. The plots of \code{type = "season"} and \code{type = "maxEV"} for \class{hhh4} fits can visualize the estimated component seasonality. Performing model selection and interpreting seasonality or other covariate effects across \emph{three} different model components may become quiet complicated. Power-law weights actually enable a more parsimonious model formulation, where the autoregressive and neighbourhood components are merged into a single epidemic component: \begin{equation} \mu_{it} = e_{it} \, \nu_{it} + \phi_{it} \sum_{j} (o_{ji} + 1)^{-d} \, Y_{j,t-1} \:. \end{equation} With only two predictors left, model selection and interpretation is simpler, and model extensions are more straightforward, for example stratification by age group \citep{meyer.held2015} as mentioned further below. To fit such a two-component model, the autoregressive component has to be excluded (\code{ar = list(f = ~ -1)}) and power-law weights have to be modified to start from adjacency order~0 (via \code{W_powerlaw(..., from0 = TRUE)}). <>= ## a simplified model which includes the autoregression in the power law measlesFit_powerlaw2 <- update(measlesFit_powerlaw, ar = list(f = ~ -1), ne = list(weights = W_powerlaw(maxlag = 5, from0 = TRUE))) AIC(measlesFit_powerlaw, measlesFit_powerlaw2) ## simpler is really worse; probably needs random effects @ All of our models for the measles surveillance data incorporated an epidemic effect of the counts from the local district and its neighbors. Without further notice, we thereby assumed a lag equal to the observation interval of one week. However, the generation time of measles is around 10 days, which is why \citet{herzog-etal-2010} aggregated their weekly measles surveillance data into biweekly intervals. We can perform a sensitivity analysis by running the whole code of the current section based on \code{aggregate(measlesWeserEms, nfreq = 26)}. Doing so, the parameter estimates of the various models retain their order of magnitude and conclusions remain the same. However, with the number of time points halved, the complex random effects model would not always be identifiable when calculating one-week-ahead predictions during the test period. %% basic model: same epidemic parameters and dominant eigenvalue (0.78), same overdispersion (1.94) %% vaccination: the exponent $\beta_s$ for the susceptible proportion in the %% extended model \code{"Scovar|unchanged"} is closer to 1 (1.24), which is why %% \code{"Soffset|unchanged"} is selected by AIC. %% random effects: less variance, but similar pattern We have shown several options to account for the spatio-temporal dynamics of infectious disease spread. However, for directly transmitted human diseases, the social phenomenon of ``like seeks like'' results in contact patterns between subgroups of a population, which extend the pure distance decay of interaction. Especially for school children, social contacts are highly age-dependent. A useful epidemic model should therefore be additionally stratified by age group and take the inherent contact structure into account. How this extension can be incorporated in the spatio-temporal endemic-epidemic modeling framework \class{hhh4} has recently been investigated by \citet{meyer.held2015}. The associated \CRANpkg{hhh4contacts} package \citep{R:hhh4contacts} contains a demo script to exemplify this modeling approach with surveillance data on norovirus gastroenteritis and an age-structured contact matrix. \section{Simulation} \label{sec:hhh4:simulation} Simulation from fitted \class{hhh4} models is enabled by an associated \code{simulate}-method. Compared to the point process models described in \code{vignette("twinstim")} and \code{vignette("twinSIR")}, simulation is less complex since it essentially consists of sequential calls of \code{rnbinom} (or \code{rpois}). At each time point $t$, the mean $\mu_{it}$ is determined by plugging in the parameter estimates and the counts $Y_{i,t-1}$ simulated at the previous time point. In addition to a model fit, we thus need to specify an initial vector of counts \code{y.start}. As an example, we simulate 100 realizations of the evolution of measles during the year 2002 based on the fitted random effects model and the counts of the last week of the year 2001 in the 17 districts: <>= (y.start <- observed(measlesWeserEms)[52, ]) measlesSim <- simulate(measlesFit_ri, nsim = 100, seed = 1, subset = 53:104, y.start = y.start) @ The simulated counts are returned as a $52\times 17\times 100$ array instead of a list of 100 \class{sts} objects. We can, e.g., look at the final size distribution of the simulations: <<>>= summary(colSums(measlesSim, dims = 2)) @ A few large outbreaks have been simulated, but the mean size is below the observed number of \code{sum(observed(measlesWeserEms)[53:104, ])} $= \Sexpr{sum(observed(measlesWeserEms)[53:104,])}$ cases in the year 2002. Using the \code{plot}-method associated with such \code{hhh4} simulations, Figure~\ref{fig:measlesSim_plot_time} shows the weekly number of observed cases compared to the long-term forecast via a fan chart: <>= plot(measlesSim, "fan", means.args = list(), key.args = list()) @ We refer to \code{help("simulate.hhh4")} and \code{help("plot.hhh4sims")} for further examples. \pagebreak[2] %-------------- % BIBLIOGRAPHY %-------------- <>= ## create automatic references for R packages .Rbibfile <- file("hhh4_spacetime-R.bib", "w", encoding = "latin1") Rbib <- knitr::write_bib( c("MASS", "Matrix", "spdep", "colorspace", "gridExtra", "lattice", "sp", "animation", "fanplot", "hhh4contacts"), file = NULL, tweak = FALSE, prefix = "R:") ## write_bib() to file does enc2utf8() -> fails for ISO8859-15 session charset writeLines(unlist(Rbib, use.names = FALSE), .Rbibfile) close(.Rbibfile) @ \bibliography{references,hhh4_spacetime-R} <>= save(aics_vacc, measlesPreds2, file = "hhh4_spacetime-cache.RData") @ \end{document} surveillance/vignettes/surveillance-hmm.pdf0000644000176200001440000001434314030612507020727 0ustar liggesusers%PDF-1.4 % 4 0 obj << /Length 39 /Filter /FlateDecode >> stream x+2T0BC]Cs]3 K\.}\C|@.m/ endstream endobj 3 0 obj << /Type /Page /Contents 4 0 R /Resources 2 0 R /MediaBox [0 0 254 77] /Parent 5 0 R >> endobj 1 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./tmp-pdfcrop-18780-stdin.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 6 0 R /BBox [0 0 595 842] /Resources << /ProcSet [ /PDF /Text ] /ExtGState << /R7 7 0 R >>/Font << /R12 8 0 R/R10 9 0 R/R8 10 0 R>> >> /Length 1058 /Filter /FlateDecode >> stream xW=$5Wt[I`6!E ҉ϳ.wF1 z]\;տ|nwnJrX%RSW DΒ\:ucVOj]ʔ===q>/?ce>?^/P JCEPAɁͯO(E0"L3גO'Kƹ(#2$ʊS0E6g&U0VX$T;s|}髚)'.Xʮ{z('>(y Et5x!F ԧ"E$"Ř݇&6o٭?F?VMG=|G>ݟ n,1i0 bc)Hc ,5@8Uqp؞ f>G@3aU#A%QPG{*pC oOvL2:a)PE0})^W~21 A}<@'0:"o^[}q{c0!hmurY%a"Dgwؖ?>#oPLyG{*L=D}/ yosG't#4V?<#oPLG{*2`CI'H}^)d  Uu֋ 3|,l"pΤ jb: ,tUi/:HD>R5{.wPz,L7˦~s_k=ey;9C|qRZ+zu;K@3kNf/b\UNC\I\c'W\e˸+JX&j,)/|$,|X7]aO|-wB<iSܩ`=t}, endstream endobj 6 0 obj << /Producer (GPL Ghostscript 9.26) /CreationDate (D:20210330141047+02'00') /ModDate (D:20210330141047+02'00') /Creator (dvips\(k\) 5.995 Copyright 2015 Radical Eye Software) /Title (surveillance-hmm.dvi) >> endobj 7 0 obj << /Type /ExtGState /OPM 1 >> endobj 8 0 obj << /BaseFont /ISYWPX+LMMathItalic7-Regular /FontDescriptor 11 0 R /Type /Font /FirstChar 110 /LastChar 110 /Widths [ 706] /Encoding /WinAnsiEncoding /Subtype /Type1 >> endobj 9 0 obj << /BaseFont /ZRBLWS+LMRoman7-Regular /FontDescriptor 12 0 R /Type /Font /FirstChar 49 /LastChar 51 /Widths [ 569 569 569] /Encoding /WinAnsiEncoding /Subtype /Type1 >> endobj 10 0 obj << /BaseFont /EENAQQ+LMMathItalic10-Regular /FontDescriptor 13 0 R /Type /Font /FirstChar 58 /LastChar 89 /Widths [ 278 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 828 581] /Encoding 14 0 R /Subtype /Type1 >> endobj 11 0 obj << /Type /FontDescriptor /FontName /ISYWPX+LMMathItalic7-Regular /FontBBox [ 0 -10 658 441] /Flags 131104 /Ascent 441 /CapHeight 441 /Descent -10 /ItalicAngle 0 /StemV 98 /MissingWidth 280 /XHeight 441 /CharSet (/n) /FontFile3 15 0 R >> endobj 12 0 obj << /Type /FontDescriptor /FontName /ZRBLWS+LMRoman7-Regular /FontBBox [ 0 -20 514 664] /Flags 65568 /Ascent 664 /CapHeight 664 /Descent -20 /ItalicAngle 0 /StemV 77 /MissingWidth 280 /CharSet (/one/three/two) /FontFile3 16 0 R >> endobj 13 0 obj << /Type /FontDescriptor /FontName /EENAQQ+LMMathItalic10-Regular /FontBBox [ 0 0 851 683] /Flags 65540 /Ascent 683 /CapHeight 683 /Descent 0 /ItalicAngle 0 /StemV 127 /MissingWidth 280 /CharSet (/X/Y/period) /FontFile3 17 0 R >> endobj 14 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 58/period] >> endobj 15 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 446 >> stream xcd`ab`ddM,,IL6 JM/I,ɨf!C_nn?4' ~-S_PYQ`d`` $-*ˋ3R|ˁ y I9i i ! A AzX]"rL _rUjߕEguwv6˕X'$%ws\hŒ;䧯09aɹr-&u'tԕuqӷlܬclX:)@0H{]kQwdsgϘ2An”ɫ',XTb‰ ',w-7hODoD*Yo.7K|S/϶2KH>g7򞾞=fL8gmo ;" endstream endobj 16 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 599 >> stream xcd`ab`dd M3 JM/I, f!CǴ:<<,~!=3#cxzs~AeQfzF.THTpSJL//THKQS/ f*h)$f$))F(+k; g```642f`bf{ac5|, X{r +>]{ؗuyt%usHg_}r?سRپBMzO}}u߽D3:J[k9~}yn]W]8;"M#M]b-V]:pW7V&n[756uwvK0`Ww˿:g|c1hM}ٸZ [~3f6 \XrEW/[´ ;N$gwFvuvvuqt7v6N#Piӎ%ꝶ{E9UI~_γM2m=-n9.|n=}z'^ó|bI'20% endstream endobj 17 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 722 >> stream xmOqw[0jQbcu&A4*j(>Q@ڲn[) hix`<`"/sLH0]jk6p8d[vZǷ[G$[Gŋ C@ p?&;#m#OVUlw dg[zH=A6Ukf('٭7u.RF\nn!4lrU1 ݭǰ:؅JR|@ˊm'E|/Rieb Uvwj/T4ϭ |eqw/ & :y#n? ӓ&,NFnO8+4Ρ$DfoBcaV=a-o=` Y E5LX=d@P}6eoraE%_,LÂǚ8D'T~l(q1y"P%?9PO& QlK.ʩ`?H0@B7߷KƘd~bej'BFw[b`PIR爐YD K^@3A[/41t.]e>p0p.0 UtLNThMJ-,,<)qeM+EY\P e$ɖʉ|/˳,Qn&NrqlN~*UV endstream endobj 2 0 obj << /XObject << /Im1 1 0 R >> /ProcSet [ /PDF ] >> endobj 5 0 obj << /Type /Pages /Count 1 /Kids [3 0 R] >> endobj 18 0 obj << /Type /Catalog /Pages 5 0 R >> endobj 19 0 obj << /Producer (pdfTeX-1.40.16) /Creator (TeX) /CreationDate (D:20210330141047+02'00') /ModDate (D:20210330141047+02'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.16 (TeX Live 2015/Debian) kpathsea version 6.2.1) >> endobj xref 0 20 0000000000 65535 f 0000000236 00000 n 0000005376 00000 n 0000000133 00000 n 0000000015 00000 n 0000005441 00000 n 0000001625 00000 n 0000001848 00000 n 0000001893 00000 n 0000002076 00000 n 0000002260 00000 n 0000002499 00000 n 0000002752 00000 n 0000002998 00000 n 0000003244 00000 n 0000003339 00000 n 0000003875 00000 n 0000004564 00000 n 0000005498 00000 n 0000005548 00000 n trailer << /Size 20 /Root 18 0 R /Info 19 0 R /ID [<9CB636887951931D7931EBDD66B70E69> <9CB636887951931D7931EBDD66B70E69>] >> startxref 5815 %%EOF surveillance/vignettes/glrnb.Rnw0000644000176200001440000005404614004512307016557 0ustar liggesusers%\VignetteIndexEntry{algo.glrnb: Count data regression charts using the generalized likelihood ratio statistic} \documentclass[a4paper,11pt]{article} \usepackage[T1]{fontenc} \usepackage{graphicx} \usepackage{natbib} \bibliographystyle{apalike} \usepackage{lmodern} \usepackage{amsmath} \usepackage{amsfonts,amssymb} \setlength{\parindent}{0pt} %%% Meta data \usepackage{hyperref} \hypersetup{ pdfauthor = {Valentin Wimmer and Michael H\"ohle}, pdftitle = {'algo.glrnb': Count data regression charts using the generalized likelihood ratio statistic}, pdfsubject = {R package 'surveillance'} } \title{\texttt{algo.glrnb}: Count data regression charts using the generalized likelihood ratio statistic} \author{ Valentin Wimmer$^{(1,2)}$\thanks{Author of correspondence: \texttt{Valentin.Wimmer@gmx.de}}\; and Michael H\"{o}hle$^{(1,2)}$ \\ (1) Department of Statistics, University of Munich, Germany\\ (2) MC-Health -- Munich Center of Health Sciences } \date{6 June 2008} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Sweave %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{Sweave} \SweaveOpts{prefix.string=plots/glrnb} \setkeys{Gin}{width=1\textwidth} \DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl,fontsize=\footnotesize} \DefineVerbatimEnvironment{Soutput}{Verbatim}{fontsize=\footnotesize} \DefineVerbatimEnvironment{Scode}{Verbatim}{fontshape=sl,fontsize=\footnotesize} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Initial R code %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% <>= library("surveillance") options(SweaveHooks=list(fig=function() par(mar=c(4,4,2,0)+.5))) options(width=70) set.seed(247) ## create directory for plots dir.create("plots", showWarnings=FALSE) @ \begin{document} \maketitle \begin{abstract} \noindent The aim of this document is to show the use of the function \verb+algo.glrnb+ for a type of count data regression chart, the generalized likelihood ratio (GLR) statistic. The function is part of the \textsf{R} package \textbf{surveillance} \citep{hoehle-2007}, which provides outbreak detection algorithms for surveillance data. For an introduction to these monitoring features of the package, see \texttt{vignette("surveillance")}. There one can find information about the data structure of the \verb+disProg+ and \verb+survRes+ objects. Furthermore tools for outbreak detection, such as a Bayesian approach, procedures described by \citet{stroup89}, \citet{farrington96} and the methods used at the Robert Koch Institut, Germany, are explained. The function \verb+algo.glrnb+ is the implementation of the control charts for poisson and negative binomial distributions for monitoring time series of counts described in \citet{hoehle.paul2008}. This document gives an overview of the different features of the function and illustrations of its use are given for simulated and real surveillance data. \\ \noindent{\bf Keywords:} change-point detection, generalized regression charts, poisson and negative binomial distribution, increase and decrease \end{abstract} \section{Introduction}\label{sec:intro} For the monitoring of infectious diseases it is necessary to monitor time series of routinely collected surveillance data. Methods of the statistic process control (SPC) can be used for this purpose. Here it is important, that the methods can handle the special features of surveillance data, e.g.\ seasonality of the disease or the count data nature of the collected data. It is also important, that not only the number of counts of one time point (week, month) are regarded but instead the cases of previous time points are considered, because beside abrupt changes also small constant changes should be detected. CUSUM-methods (function \verb+algo.cusum+), LR-charts or GLR-methods as described by \citet{lai95} and \citet{hoehle.paul2008} can afford this. With the function \verb+algo.glrnb+ these methods can easily applied to surveillance data. A typical assumption for time series of counts is, that the observed counts at each time point follow a Poisson distribution. If overdispersion is likely, the negative binomial distribution provides a better alternative. Both distributions are provided by \verb+algo.glrnb+. In the GLR-scheme, an outbreak can be defined as a change in the intercept. The function \verb+algo.glrnb+ allows the user to specify whether increases or decreases in mean should be regarded. For each time point a GLR-statistic is computed, if this statistic exceeds a threshold value, an alarm is given. The function also provides the possibility to return the number of cases that would have been necessary to produce an alarm. This vignette is organized as follows: First, in Section \ref{sec:prel} the data structure is explained, in Section \ref{sec:glr} a short introduction in the theory of the GLR-charts is given and Section \ref{sec:control} shows the different \verb+control+-settings. % In Section \ref{sec:extensions} some possible extensions are presented. \section{Preliminaries}\label{sec:prel} Consider the situation, where a time series of counts is collected for surveillance purpose. In each interval, usually one week, the number of cases of the interesting disease in an area (country, district) is counted. The resulting time series is denoted by $\{y_t\>;t=1,\ldots,n\}$. Usually the data are collected on line, so that the time point $n$ is the actual time point. Our aim is to decide with the aid of a statistic for each time point $n$ if there is an outbreak at this or any former time point. If an outbreak is detected, the algorithm gives an alarm. Observed time series of counts are saved in a \verb+disProg+ object, a list containing the time series of counts, the number of weeks and a state chain. The state is 1, if e.g. the Robert Koch-Institut declares the week to be part of an outbreak and 0 otherwise. By using the state chain the quality of the surveillance algorithm can be tested. %The 'surveillance'-package provides standard plot routines for the surveillance objects. As an first example the number of cases of salmonella hadar in the years 2001-2006 is examined. \\ \textit{Example 1:} <>= data(shadar) plot(shadar,main="Number of salmonella hadar cases in Germany 2001-2006") @ The package provides the possibility to simulate surveillance data with the functions \verb+sim.pointSource+, \verb+sim.seasonalNoise+ and \verb+sim.HHH+. See \citet{hoehle-2007} and \texttt{vignette("surveillance")} for further information. \\ \textit{Example 2:} <>= # Simulate data simData <- sim.pointSource(length=300,K=0.5,r=0.6,p=0.95) @ <>= plot(simData) @ \section{LR and GLR-charts}\label{sec:glr} Our aim is to detect a significant change in the number of cases. This is done as follows. One assumes, that there is a number of cases that is usual, the in control mean $\mu_0$. The in-control mean is defined in \citet{hoehle.paul2008} to be \begin{equation} \label{mu0} \operatorname{log}(\mu_{0,t})=\beta_0 + \beta_1t + \sum_{s=1}^S(\beta_{2s} \cos(\omega s t) + \beta_{2s+1}\sin(\omega s t)). \end{equation} If an outbreak occurs, the number of cases increases and the situation is out-of control and the algorithm should produce an alarm. The change is assumed to be an additive increase on log scale, \begin{equation} \label{interceptchange} \operatorname{log}(\mu_1)= \operatorname{log}(\mu_0) + \kappa . \end{equation} If $\mu_0$ is unknown one could use a part of the data to estimate it with a generalized linear model (GLM). If $\kappa$ is known, LR-charts can be used, if not, $\kappa$ has to be estimated, which is the GLR-scheme setting. For each time point, the likelihood ratio statistic is computed as follows \begin{equation} \label{cusum} GLR(n)=\max_{1 \leq k \leq n} \sup_{\theta \in \Theta} \left[ \sum_{t=k}^n \log \left\{ \frac{f_{\theta}(y_t)}{f_{\theta_0}(y_t)} \right\} \right] . \end{equation} Now $N=\inf \{n \geq 1 : GLR(n) \geq c_{\gamma} \}$ is the first time point where the GLR-statistic is above a threshold $c_{\gamma}$. For this time point $N$ an alarm is given. If the parameter $\kappa$ and hence $\theta=\kappa$ is known, the maximisation over $\theta$ can be omitted. With the function \verb+algo.glrnb+ one can compute the the GLR-statistic for every time point. If the actual value extends the chosen threshold $c_{\gamma}$, an alarm is given. After every alarm, the algorithm gets reset and the surveillance starts again. The result of a call of \verb+algo.glrnb+ is an object of class \verb+survRes+. This is basically a list of several arguments. The most important one is the \verb+upperbound+ statistic, which is a vector of length $n$ containing the likelihood-ratio-statistic for every time point under surveillance. The \verb+alarm+-vector contains a boolean for every time point whether there was an alarm or not. \\ At this point in the vignette we move more into the applied direction and refer the user to \citet{hoehle.paul2008} for further theoretical details about the GLR procedure. The next example demonstrates the surveillance with the \verb+algo.glrnb+ in a learning by doing type of way. The example should demonstrate primarily the result of the surveillance. More details to the control-options follow in the next section. All control values are set here on default and the first two years are used to find a model for the in-control mean and so surveillance is starting in week 105. A plot of the results can be obtained as follows <>= survObj <- algo.glrnb(shadar,control=list(range=105:295,alpha=0)) plot(survObj,startyear=2003) @ The default value for $c_{\gamma}$ is 5. The upperbound statistic is above this value several times in the third quarter of 2006 (time points marked by small triangles in the plot). In the next section follow a description of the control-setting for tuning the behavior of the algorithm, e.g.\ one can search not only for increases in mean as shown in the example but also for decreases. \section{Control-settings}\label{sec:control} In this section, the purpose and use of the control settings of the \verb+algo.glrnb+ function are shown and illustrated by the examples from Section \ref{sec:prel}. The control-setting is a list of the following arguments. <>= control=list(range=range,c.ARL=5, mu0=NULL, alpha=0, Mtilde=1, M=-1, change="intercept",theta=NULL, dir=c("inc","dec"),ret=c("cases","value")) @ \begin{itemize} \item \verb+range+ \\ The \verb+range+ is a vector of consecutive indices for the week numbers in the \verb+disProg+ object for which surveillance should be done. If a model for the in-control parameter $\mu_0$ is known (\verb+mu0+ is not \verb+NULL+), the surveillance can start at time point one. Otherwise it is necessary to estimate the values for \verb+mu0+ with a GLM. Thus, the range should not start at the first time point but instead use the first weeks/months as control-range. (Note: It is important to use enough data for estimating $\mu_0$, but one should be careful that these data are in control) With the following call one uses the first 2 years (104 weeks) for estimating $\mu_0$ and the the years 2003 to 2006 will be on line monitored. <>= control=list(range=105:length(shadar$observed)) algo.glrnb(disProgObj=shadar,control=control) @ \item \verb+alpha+ \\ This is the (known) dispersion parameter $\alpha$ of the negative binomial distribution. If \verb+alpha+=0, modeling corresponds to the Poisson distribution. In this case, the call of \verb+algo.glrnb+ is similar to a call of \verb+algo.glrpois+. If $\alpha$ is known, the value can be specified in the \verb+control+-settings. <>= control=list(range=105:295,alpha=3) algo.glrnb(disProgObj=shadar,control=control) @ If overdispersion is present in the data, but the dispersion parameter $\alpha$ is unknown, an estimation $\hat{\alpha}$ is calculated as part of the in-control model estimation. Use \verb+alpha=NULL+ to get this estimation. The estimated value $\hat{\alpha}$ is saved in the \verb+survRes+-Object in the \verb+control+-list. Use <>= control=list(range=105:295,alpha=NULL) surv <- algo.glrnb(shadar,control=control) surv$control$alpha @ to get the estimated dispersion parameter for the salmonella data. \item \verb+mu0+ \\ This vector contains the values for $\mu_0$ for each time point in the \verb+range+. If it has the value \verb+NULL+ the observed values with indices 1 to \verb+range+-1 are used to fit a GLM. If there is no knowledge about the in-control parameter, one can use the values before the range to find an seasonal model as in equation \ref{mu0}. \verb+mu0+ is at the moment a list of three argument: \verb+S+ is the number of harmonics to include in the model, \verb+trend+ is Boolean whether a linear trend $\beta_1t$ should be considered. The default is to use the same model of $\mu_0$ for the whole surveillance. An alternative is, to fit a new model after every detected outbreak. If refitting should be done, choose \verb+refit=TRUE+ in the \verb+mu0+ list. In this case, the observed value from time point 1 to the time point of the last alarm are used for estimating a GLM. Then we get a new model after every alarm. In the following example a model with \verb+S+=2 harmonics and no linear trend is fitted for the Salmonella data. The observed cases from the first two years are used for fitting the GLM. <>= control=list(range=105:295,mu0=list(S=2,trend=FALSE)) algo.glrnb(disProgObj=shadar,control=control) @ <>= control=list(range=105:295,mu0=list(S=2,trend=F,refit=T)) surv <- algo.glrnb(disProgObj=shadar,control=control) @ The predicted values for the in-control mean in the range are shown as a dashed line in the following plot. <>= plot(shadar) with(surv$control,lines(mu0~range,lty=2,lwd=4,col=4)) @ Information about the used model is saved in the \verb+survRes+-object, too. <>= surv$control$mu0Model @ The $\mu_0$ model is fitted by a call of the function \verb+estimateGLRNbHook+, %% Instead of using the standard seasonal negative binomial model from equation \ref{mu0}, one can change the \texttt{R}-code of the function \verb+estimateGLRNbHook+ to get any desired model. which is defined as follows: <>= estimateGLRNbHook @ \iffalse To include own models in the \verb+estimateGLRNbHook+ function, the code of the function has to be changed. In the following code chunk \verb+estimateGLRNbHook+ is modified so that weights are included in the model (here always Poisson, ignoring \verb+alpha+). \begin{small} \begin{verbatim} estimateGLRNbHook <- function() { control <- parent.frame()$control p <- parent.frame()$disProgObj$freq range <- parent.frame()$range train <- 1:(range[1]-1) test <- range #Weights of training data - sliding window also possible weights <- exp(-0.3 * ((max(train)-train)) %/% 12) data <- data.frame(y=parent.frame()$disProgObj$observed[train],t=train) formula <- "y ~ 1 " if (control$mu0Model$trend) { formula <- paste(formula," + t",sep="") } for (s in 1:control$mu0Model$S) { formula <- paste(formula,"+cos(2*",s,"*pi/p*t)+ sin(2*",s,"*pi/p*t)",sep="") } m <- eval(substitute(glm(form,family=poisson(),data=data,weights=weights), list(form=as.formula(formula)))) return(list(mod=m,pred=as.numeric(predict(m,newdata=data.frame(t=test), type="response")))) } \end{verbatim} \end{small} \fi The fitted model from the call of \verb+estimateGLRNbHook+ is saved. The result of a call of \verb+glm.nb+ is in the standard setting an object of class \verb+negbin+ inheriting from class \verb+glm+. So methods as \verb+summary+, \verb+plot+ of \verb+predict+ can be used on this object. If refitting is done, the list of the used models is saved. Use <>= coef(surv$control$mu0Model$fitted[[1]]) @ to get the estimated values of the first (and in case of \verb+refit=FALSE+ only) model for the parameter vector $\beta$ given in (\ref{mu0}). \item \verb+c.ARL+ \\ This is just the threshold $c_{\gamma}$ for the GLR-test (see equation \ref{cusum}). The smaller the value is chosen, the more likely it is to detect an outbreak but on the other hand false alarms can be produced. <>= control=list(range=105:295,alpha=0) surv <- algo.glrnb(disProgObj=shadar,control=control) table(surv$alarm) @ For a choice of $c_{\gamma}$ we get \Sexpr{table(surv$alarm)[2]} alarms. In the following table the results for different choices of the threshold are shown. <>= num <- rep(NA) for (i in 1:6){ num[i] <- table(algo.glrnb(disProgObj=shadar,control=c(control,c.ARL=i))$alarm)[2] } @ \begin{table}[h] \centering \caption{Number of alarms for salmonella hadar data for varying c.ARL} \label{c.ARL} \begin{tabular}{l|cccccc} \verb+c.ARL+ & 1 & 2 & 3 & 4 & 5 & 6 \\ \hline no. of alarms & \Sexpr{num[1]} & \Sexpr{num[2]} & \Sexpr{num[3]} & \Sexpr{num[4]} & \Sexpr{num[5]} & \Sexpr{num[6]} \end{tabular} \end{table} \item \verb+change+ \\ There are two possibilitys to define an outbreak. The intercept-change is described in Section \ref{sec:glr} and equation \ref{interceptchange}. Use \verb+change="intercept"+ to choose this possibility. The other alternative is the epidemic chart, where an auto-regressive model is used. See \citet{held-etal-2005} and \citet{hoehle.paul2008} for more details. A call with \verb+change="epi"+ in the control-settings leads to this alternative. Note that in the epidemic chart not every feature of \verb+algo.glrnb+ is available. \item \verb+theta+ \\ If the change in intercept in the intercept-charts is known in advance, this value can be passed to the function (see Section \ref{sec:glr}). These LR-charts are faster but can lead to inferior results if a wrong value of \verb+theta+ is used compared to the actual out-of-control value (\citet{hoehle.paul2008}). If an increase of 50 percent in cases is common when there is an outbreak which corresponds to a $\kappa$ of $\log(1.5)=0.405$ in equation \ref{interceptchange} use <>= control=list(range=105:295,theta=0.4) algo.glrnb(disProgObj=shadar,control=control) @ If there is no knowledge about this value (which is the usual situation), it is not necessary to specify \verb+theta+. In the GLR-charts, the value for $\kappa$ is calculated by a maximation of the likelihood. Use the call <>= control=list(range=105:295,theta=NULL) algo.glrnb(disProgObj=shadar,control=control) @ in this situation. \item \verb+ret+ \\ The \verb+upperbound+-statistic of a \verb+survRes+-object is usually filled with the LR- or GLR-statistic of equation \ref{cusum}. A small value means, that the in-control-situation is likely, a big value is a hint for an outbreak. If you choose \verb+ret="value"+, the upperbound slot is filled with the GLR-statistic. These values are plotted then, too. The alternative return value is \verb+"cases"+. In this case, the number of cases at time point $n$ that would have been necessary to produce an alarm are computed. The advantage of this option is the easy interpretation. If the actual number of cases is more extreme than the computed one, an alarm is given. With the following call, this is done for the salmonella data. <>= control=list(range=105:295,ret="cases",alpha=0) surv2 <- algo.glrnb(disProgObj=shadar,control=control) @ <>= plot(surv2,startyear=2003) @ Of course, the alarm time points are the same as with \verb+ret="cases"+. \item \verb+dir+ \\ In the surveillance of infectious diseases it is regular to detect an increase in the number of infected persons. This is also the standard setting for \verb+algo.glrnb+. But in other applications it could be of interest to detect a decrease of counts. For this purpose, the \verb+dir+-option is available. If \verb+dir+ is set to \verb+"inc"+, only increases in regard to the in-control mean are taken into account in the likelihood-ratio-statistic. With \verb+dir="dec"+, only decreases are considered. As an example we take the salmonella data again, but know we look at the number of cases that would have been necessary if a decrease should be detected. <>= control=list(range=105:295,ret="cases",dir="dec",alpha=0) surv3 <- algo.glrnb(disProgObj=shadar,control=control) @ <>= plot(surv3,startyear=2003) @ The observed number of cases is below the computed threshold several times in 2005 to 2006 and alarms are given. \item \verb+Mtilde+ and \verb+M+ \\ These parameters are necessary for the so called ''window-limited'' GLR scheme. Here the maximation is not performed for all $1 \leq k \leq n$ but instead only for a window $k \in \{n-M,...,n-\tilde{M}+1 \}$ of values. Note that $1 \leq \tilde{M} \leq M$, where the minimum delay $\tilde{M}$ is the minimal required sample size to obtain a sufficient estimate of $\theta_1=(\mu_0,\kappa)$ ~\citep{hoehle.paul2008}. The advantage of using a window of values instead of all values is the faster computation, but in the setup with intercept-charts and $\theta_1=\kappa$ this doesn't bother much and $\tilde{M}=1$ is sufficient. \end{itemize} \section{Discussion} As seen, the function \verb+algo.glrnb+ allows many possibilities for doing surveillance for a time series of counts. In order to achieve fast computations, the function is implemented in C. An important issue in surveillance is the quality of the used algorithms. This can be measured by the sensitivity and the specificity of the result. The aim of our future work is to provide the possibility for computing the quality and in the next step to include a ROC-approach in order to have a more formal framework for the choice of threshold $c_{\gamma}$. %\include{extensions} %\renewcommand{\bibsection}{\section{REFERENCES}} \bibliography{references} \end{document} surveillance/vignettes/twinstim-cache.RData0000644000176200001440000036205013612606270020623 0ustar liggesusers7zXZi"6!X1])TW"nRʟKMd[_;zk<p~{WeMUіF*3>[QcjXH4Fו"vS5IXf\MFd[8;kpx ps7g@&ڄװ}n>K&^N\L8ƌ}ks[ulyvݨMfh0ėNlLqL+]\y xx5^Ex`N EE6F Xeir8 >` N~|[OWMH`-EBU70 Ny6\Ml-Pd-Ai-\XךFfk8D|Nz *`ւܣg 垊~HHJ{dI_5 ptwEy{4ՀNaKT^sG>8ϗ@AOyliw?RF*`29U9\OD TkJ/@nS%n<í۸ 0Fn&BMÚCAB-7o'?O5%b-SVhv)D3>{W" {.c=\nMtN-54&&=rmfnNd;n s/h }s@t%c zR:ek7l4 ecP LE[-ogLJpٯ mڙWLJ5޴@%,Hڭ9DÞXU3fYNo_(LD%݊[4'pV :ۖ)RofN+$H Ƚ/MhT*K;9LP!2#ʿsWn-]7{6#pQ+'pqSLXJ Ry̙-BgP:` n]h$z+v;c PsU#>2i9o8vF%Fƨخb b(QU.wG%<A4֫v7sc皰SdN%՜~Ѳ͈GmbzL:*/[T6%kЎLqNxci0I^|}tmzi+.b`}bsഝXJ@k$5 פt\Km4@Y[JP ;fҚyP6EC`+BYAb ܴ{ 5GKp;:edE<57p V2&) ϓM뵧d~i` a tź0E9œ e2"]+C"(?ٺҐ)/q\0n6JnbMW{w7NXt H}5.PZ(rRB&ƬJB"68Q4NWIՕIöZšɗ$DI}Id3X)]IL )izĂ@CxqqЩ ɊR ZѾRdT xTݱS[IbP+ &|<:lMvkHuozB3Qu؝)_t[;Ѹ2;'!TmmY[x23!)ֻT0 ?2VHS_>+U{]})ʫsZ}T F=f oGcauhZsu1  R0yu͇3Z)9g3fYGOp!3@!CRǩmAՏOr@)U\O\'l7#c"zN A#H3+Ybqkp#(KtKT}IPJrL!"g9NN<8\DU7Qv_툄& 啪GҨ;׌e (υj[y{cߕfYǵkTifsUM9x o0{qlhW1}f&zܬ- 8(\R7UV7.,Th7|GrcUdfZ jy>Bh#C[я |ywJ ؄bɬ.H>N͋!,UǬƺ_pF PQѷ,V5.,v4oN>&J2!,xyY62 j#L?9n?g eIȰu/%?P HL4U%ļ}^xX 2f2v] krj?zF(Q51]}hEЋ.1bfj6mTbp,61{sujTZeg%pbux=TA^%=k?+gʢ}$DOЏXCv;o/)M&;g^mؽ{E+¢g?‹iƦ2͹xdwVvrṆҦph.N!B5i oUo6Z@2oulPhs;eeCOn_:=z&W)bu1g 8g[|)c;34!d9aX:[ &RV'-9]d!'`ZȂVi6@5Z/,o;Дq|HRSc/bBj](`E ŎEQA1RG$! Gi[!]oURx|<>$N*kY,epQXT])_鐉@Lv/>er{ XP`ۄ|q|K_R7ۖ ȭK=q|6'Έ3~;d-6Wyw8]:B}w@ea)&Rb}GZ)a{zޛJ֏+Jc[Lغ_ӐHX3EuEOK~U֡=#]44i՛UWt "aI>R&؂W /ϦSWgnvӝ8ReB> )d 5~2O3:=*VT,ixFࣻbxdR0]ge@)e\t⃮ϸd g9$ji5ї{Ϋue6h{>ZP"R{  )HTJ= õZl)I-MA7؈[AKLNU~ϛA-<7/b||Tn(`*,YPVmյśS*I,` Ν}a4-| f4hDDvjtrh)XT|{1/t :0~f,IȢS{4h`W!afAhkv KF3ZOԍmOYئWs,rWAQ:dF[Z!|JExq3}&gXMt%؈EHF)q`Yd95Vi_RPi ~L_u)7ZW|| ?]V:.]9qh˃6A[{UQ&FNn͗=tl7n} ,`z\d4m>|K^;N|lCLVj%&#dܵ5{P@sJB3:X{H 79ۨ: E@#o36nXwA'2+YW@Ca(!x C0W{V2쎫6.C|rvScVb^H{g}cԓ1Tq$A[PUwq~  w߬6Iw.L8z@L%Q% "B{yJWaVJ]ե(}O+<*בGR_6o!P>{Aj~]xz.kdz/oECo澜{=ѱ %C,;AN lu8yӱD7O~g(KeF`RISR%pq+Rr,?,RDR,u t"E?&ŠpoN=h!\ۑn)V*&K]N5[< :Mgˆ A7/W6"VUˑ٤_&@Gf QR#y^{٦~vb(c8t4R}]#&z}B3M,coW[+Â:(hCyu1]-6]%,#֤D"y-l' +6_)y\]a1r4Q.YVOB*NYw(kpv ļiI:4珤B5KRoY}+2s) 6Ylr $j~IJۅ|Z EX0l^o.+:EPr ~yNx撌 Fo:f_F(3BV}%jzatV a_@]ÄQ /]$Rр5@&YtDm8;)mBSKR#W>EB4y=''[m<< ~,*^ &z揧kQSW$2R W_^Ggls-QLKFs$ çbJ%X˅ @C,~3Of̒bzI%.\<;ꘝ-?5WC$Iv>Y'nE;H-3ICo"Hz4$ ػ<:l]Kai_G¼ o(;S#IAJǜijʧWCJ3k Hv2S?޻>nώBrgBl.7Bo"heo1r-'JуA0 BtAQhFXt//DwojW|93/+%զJ3tID)#^}|E%!ֳiz7WrS w9`b,$Gfo:q]g:PPI2jb;γrh9ᄥf[Yz& {` 7NfHs)Ce(TV%@ʍ {Wm;LS9}Js7G>. -&X)-Xz OÛ*QjKTˏFaL˞,u9Hsz^dsU-~FvZ{Nڢ찼V E&ҲcƕCҁKsOwp.c6pr K޺F4LkľrTmV#B@r: If>([ >Y+Qs)Uok>#\ZJ)yZKj8!& w7e̯~/]K>Xj+@?ѣ/:mFˎ 5$ϩ [buGK{wNpt󾂄]swilpٝRAlyM1ldջȽgiؤ\'3Sx\igOnrXL^|T^{"'CaWMǽapaV1a<﯎Cu|M8/cG8Z LMh\JOKcMD[PP_dH1QZ2`= ] /GZr`L a$\oiVC @]]6cOU;[K"ˡRJnAǜAPz͆cvYlpu(uT%HH]SjIjQ#ֲU-$WUVZr1"p?*RJ>$+x @ˆSN]&اXt"JTQ<n&)cb!+B M0?]#M)@ CfvgVy&v")^`8 r0Y{Ѱh[Ys@i}TF FwwM=1P[m Ol '~I -]zYnvv9u$ c7aތ'㴝_]Mx]\Iy/ 8v;E>W$R2w(#׫?$)klv0h  + rF7"8gV]S5Tt{ {(r['7"B.$=n汕d$?@Cbn)$8({2Y IJi6cWĭ /k)i wǵ{EyYW>p!NbX{q$p+giPػnAdsGT&lj ݀G;VߑH(HXot.ĥ@DVYx؍0Zg~\@᷂J|O BeY!T&fy˿Pi$[x[wrjaLV|iƽdŏbEj1!ׯ.ߎHJMEXu /ŬbDXA-L;cw@;ν{0{\f9ٓ㗡l8y"vpOǗQE2-13^jkAHr/J&. n5ץa쑲f-ۇ.(x-~[f@F2 *!4=&cN|ҙ#e)ށitHL.`3%ީ \6;¯4>D?*ũwP9i;']:> s7m`BpD6m МG̻/E_ņ%&)=4J2Wa"b("RWP|+@p(ɲZo 4Rh~l" L gƑ>uKr*^(Q4v|>)|Ӫ#IK-l8ɘ?X!/'1QNLBtm vtHjR>ѐ(o š+~E0vDH?X+sл뉡$`GkIK JQqI$/{dǘ2Uyt=td7*2\@eso;bwf_QaÛ J3F~}!s<14b(H: R.AzFwū=pAS~2˽OҪD|md @a^"\[$9uz ;6K򕉂EI\ŷ IZaWVCnmJ<k_jlk>x ԗS)$+Ɛ6h3?\XAQǦs^x3E{ͼZ~C5DA*ŵr)O؈Db ˶t 쾟7ӟ-Abt|N1 Qh\19ќ<#,=j#+T\mX+.Hۛ,"Ԓ|PĖ.&Zw v$4`yiL1KtcH0<0>ڳDk> Ě~4_\̛"GTҮYӸ~#ib+l4i.'AA&ZZ }sַojxFgo+#|9nzߏR>&<fhiI:kIJpn*.O2zc\Q-vG9jq/7 zi>21#f;,aE˩BF03ܻ!"}Wԓc:V`G?;36DӐ<6{ NX^X諾\=-eX`/sG0v9ن.a Ћo U(FcbsAG"=8S%t31 A 1F=2V_k1|$DɀȨR6U4QjmP_tH#Q{ hLX1Xv"í~\S 5#n}p[/?zR6cG95?)N+%9KyAM¼5V-;ӢIm H`: ?ǒ"GձB?cC"^||?;"TS(mP˸TVe3RdT[(pLƻ(&8c]cN|x}У)J,i~ jeQ]^rL.{R.%,9kG}΁ NzSyJZlXH͊ʱ! HZmbQл7 r[\ 0-s`7ƳO;LjMH ]Ŵ<06K<>h _H>Y\s%IwFrU~|i q8g:˜0+#3ɋ8x3 >x>/Lb7Eta6 8>b߿:y&E!dz!|NGQ$L`$%w1tL#hZ8t 1I GOz=C|xoBx%E<ǘ>ͽ ry+ j<\uJ◞([EMXʤQON|bKnZOQ;gW)%`'5{.bpv|7y=ݧE1\rDc۹A}A-|(ZUcHwpp Jku4ve1|}kSb 9[@y^.wJ:2n#_UALP{fOo#:1Ў">zp2ar{z㼑iu+8 ZPij60y`i]E3ԇL\V&/2Os O1H2Kԛ!%;D:?sfz#< VE$ƻe,a@حJ^P2pi,(ZY7Rl!$C MK.x W{:q7 I VzuYgnqlqHK``g՞\U3͜L;-{?܀ͣNNO0Ð:uRwaia%R6~ ք=Qrz9ȭp]@ Ӹ$p^rD^~O=&w\hw|N Ojy<54 DH0LA&[UTh\>lgn *WΫJ,"e]0^ ftP >i+@Ҝc\_ꂧ~T3%3VҳEkmśR3=2v!B^҇YAYIJZQċ)lIgc+lpN"8R pay󯪰rd8Sucpu71 ݞ{:6XR"ڀ2A BNF5F@ L8k|[ o`r͸$7`CrX* 1qVk3J&AUSx"4 ؞bx m%G9|Ipst/kE%p w:.y2.uʄBN9&@Ԓbb%J:&:nvП6K7sB1 DxjDu+lAk 5^wJe͆1 lMz],T!\d')=b="Y3?Ga/?;ҩ7 |JbAɺQi`FuV5eZBu[ N\ycw,&7cس7 kLp,"1TIvEތ7 3'kCGL8(g|JdEw1 grY-K>\!^f1vcYLTw]1adV-IJX3RMy:b\GBw2*;mo3ZƚCm*rH `EHy&vہhE2iŠȺ m3;?epeu9^8a(` u2;-wq'$xL ?|]8?AQ'aV,6„a8fDADXܤo O瀹9}{.Gvw_.6"wp͸-#JE19Wݣ샙*}tPl$}A @$]Eyim098ZܤqxWGU K123f~3c9#rQ n3A@~p{\Ie Gp"^qTP"~iô;(niҖ2  Z}| :nًI>m])Cɭ %o x4|vAH+ֵ\Hܺz YV:isd0 suzlՌnclZB%bُߨqszrU) K;|äyxMh0v.nub4M5~V7=¡Eؽ{\*ċU[SWgN`k`)ַxpO(02δ+Tf>^< M[Wa 7{Z=e~@[᜼j E aT->KԦ\E3tcH3t6fzqWюA RN|uz^,t#u 4*V}w!i u?|2B@Us|%Mt2ro1~-\jQf&&@!Ժ =&dΰ q xP^k0Hp0,&u~:cyRmFkQZ`Wb5e?QyLԙ>M+R^[9x h%=bywQey{cL^9X:5^KW۶9}to&8JiǨ_( vd8fߖM\O gzUBe\cC0oR6F3C@y&@Av;<g73.T* oю!|QҦ6%%2zLIM~[Y;5zdd ML?b\: hN31n6H/4qM> UhsX_z^Ug>X 9LP|}MG=6m-H { DX/$TR;JL(T[wy>{OxLW2=K0yZ^RM"aQۋC3Guz].gu* yHc=]ˌH{.-D"U+S[ΰ4p@*a.t׶iLV+E.ăp7Aʭsq<~Y D H L{*[Ɓ y'CDت$Ztg]DY!rܲ 1e2  :I---h$hW ~ PSOor >,&g4%YeʡB?t Zm/kՀe*"O1E!6 a:"h!tT=B\3޹ ]XsX 8vP j3TU[7YI/fn.c֘ o3"c)ȗpdTww;ibZ)N9\GYg"/ VQ3Ś|H;US"KsM*@:8{4P@G7aD$un/1Dc6NT\8&,bk#x/2+. 1\0.wDhp{~pby:)c mKS/gqarEdTew7 ľ'+$h3Mh I({Z OҾpdƦ$y!!۰$@?&\(ZtO݄ $~[ġړMw]RaB:l"\AN@SL2įPE3-_8ܣ1ǯC˖҂~F2Tp;Ѷ0w2E09e2W?rz)账:"fKQ3#yG Rzk }uA0OLOeFϯXUKލyj?//> X%sbq.2KhhXɲ'z i~Ʌ ߆?uq1 ;rlr;kjKCVmLdj-`{~* M5NH>pvGc'%5Bv7jL(N샻^|KQ1C@f̙\?3v7> _<9I§Qe񼲞5azf4opV1!v^b֩~ $rZ,#>/yoq1&G3k-ta~r. j7Qi}lIkm#SqlیIB_:epe :_5B)V#HyR>h7'IK1CmV!VچpW5&6"R1NHxBiޜFmh΅'gFK&V9u̲F<9|rtud|_EpI(zN.O֣uK#5CmEk=Fn%f_A"8k3j9dOg)8E钁1b8X47"F$"^Y O0]ZULju Ͱ<;@tpD|Xɗf)%&ctN] i.4UFR|-dVq)b C ?n>  9dwQ`LMV(sK{ۅ6biSۨٔ8=xQKKNGfov/%nr ڎToMR)t>f|f1tJ&[^dIl.5){t+nulK/䚼,O+n`Ļ~G,p]1tz [-Ư 퀰i y҃e?Ä%22QpYJs)oZ:K$D VI?1RvȰD˧`z~2%u:;)&AО;'XńI[g ^ ^]u/=Ͽ>p.w8s[GN1^7p qω)^Fdhflo6)LgӲ$!e! Y:٥s>TH=v=BYLqСÐy:.6Z Dv#% ;7Hn}JK]mސgL<;`Ռ+0nq f I;&HcC/Ϥc&ÀtE6 ?-SŨS,S$\FCD,yusvl@)1ω9{_|l-y9jDW,Ϡ*=-љNH Ybe.}0 ooeB/ >G`noX9U3ج[CbqY+Y ]dԘstIcw+B>8x!ooV$eMqSŶn{'+Пee_ѼL/U9^%YB?fcE=~NE˯C 3ƜV$z4:D; CAG{uMTn3\5upIzu'r!H 70[_1YGWPJ \Y$zӞzrϷw#Wv#}GtǍtڂ2yj|?'as_R\ڛ8\Y8JzF&_E <<<~i}]f2X\oJuB$x>N5V0Nk/aC<c<6{&0`'oL[|{YXcAr B腆wd{m~C :i-/]B Z0WB?Xݯp*/ǡL)9sX;ddqHkp 5˼YI۲^uBSjԵtHbS_baWxkVc^#}EZ.J mbKl/*v m>c>dS1tqƨFU0a5lT\̩ZDžagu?xzE JAҲlb.ȝ_Gy(nM&_D&mj?7^zEK~+mJ]L=١R4U㎻I/ü3[uEFEA&8x^n/}Cv)9;l 6=-,;Dn-ɧ"3/> ^i=/@ ȁ׀@ z1+xQ˄0a0nQ۪q Hzk^lcXn[P@ҤqLq5aŠ+-I|C6*uX}ɵ%g[7Z@-LSgF Z![x12f1?/f|/B_paRo;O)9= ?[!TSfVE22H77Q}P)ao\ڛ'c$K@"J"١Zo LS)ۇEwS-[E]}暮 0: BjiOV]M#֏w3 ʷ4; fC3A>aRۚn^a3V Hў92+5fe=I~2ռUܨ!`tnD,'},tB*XJ~p x'?M؊ aW+\ {UІ4 :tOїi[%GrҰM~Q.phJC^M@MaGsQJK咬Za e<.N1?=uZ6_$q-?MmPf,Ͱ.r0PUg+lC.4vrFM,zmjBD^EFs֚&5A6v8C7+{Q):%UNk'h@m?jND]#_Dz:D_ÆoJ퐠~ХDl>kdD8pʢ)Iс.59|=u>Ya: }c{V7h]\Yvfkڴ/h [p1ôt)Xu(ml*K&D~,l~^ݗxmx9jr }JH}z#|F;J0>wkԀhc57L={I:-Nh'bpɕ»A ݞ`j3Y;FDMU^z lnNNu1 B'(_- sfljj9 =<'.z{fprVKKYiPQs[Aae GmWqzyv'2O.\g h8|]jnCTd1Nf<>tQXk1pD N Es3š8{+:6׶L•~5{-u'aᶲ-+uhd29zǸwln0kF+Zg$.ֈ3 ku ^3eމ& 7]+ dUك)SD EU}\dᩴcа'u6v>YpmXLs)\Sc"-|q<=RRם@[j\, M(x#BO6 Ѵ7H_HBVl nz<}x o= 6C?Ey:$fu?yXU=wsGyIzH!}q»r$p@= @h8eaJDO*:n/dٟ6vwm߿K)O~6 k)\ (UzYDE𾍤X`cο,H:]wW6Dc!NUqyۜ?NО(?Zէ&$_RRG3=5sPNO!8-ټʂ>ch0z7. V2=n !զ2_jĶG  /0FHzC!ڗCRl$A 6jaarq)l*8-Cmܚ8 A?jwшW~Pnȟ IcA.{Sfr,:]t]A*y֏kp /giEf Kۻ%cLFqlp-s=M pxڱwEF,m$2 X=حݭ3_<7,=ft~a@ )^(RXX%%5,sL='0 q$ՙHSJ䍈8Pw e+i c|iO>alsā$_7DZ[]֘@]) c']ɯ%vuzoj`i-p܁>S q: 2:c{'C)Еd^Ŋis`>,J}AK]9^"7GMD.˦ Y/;))O?PIrƼ;B`XUTQ Xfu%Wb.!_]-RWS| ܓ&5=g4&x E`9#xz/:LkJ*@N?0E  ~ū1NoSE.{‡~"i/0'Flws˷|ZvQ^Dry)Oz%*lYgl< o1*/R) ~QH#uLOlhuW8$o&x-;9ّ 2{EYI8?j[x]}Mۻ:"Bݗa9;(ȯemTh")/yQe)XCjcs Ur G>r41ؕrZDuT^*mnR=˜ձRMXdon2}i&!HG^Z={3}Or3jل8/Тƀ7p5ȫ~_mTNݣJ-]x#2Xjd %خz8r<}@2~iSAl|@Y͇^6lkwM3F\ z珁Qd=ܗJH*z/7BP7k?}5uӈ@x_*}F8-Uetq'$lK ;Ua{e/澤~o(rp+1jCO#_)sn35:(zͧ7c!ΐ5;"8Þp|^k{{iRhp~8]~z' U|9o&prҖ-%0AcK#<90o8w!Tz)wo4XÂ֋7nLOzT{ye EXUY +%lA2oz$)]of"t7'.l8BN}7U,XW/O$an֚9!4ƥL'#.(*9ۊ~k,2=vHqpkp9hQiR6R F=yr޽a) ju_W2 %kk+c$D:$ҲOmdzUe3HZ[>bƋNR+M!@qOO6Mz#vU~>أN@}'T)Kf]ИG,f$fp#Xt 7)/5B1%:_U 3$1Vcgϖ$PֹP i1Y] 2ֶ+`b5hTul9hDҲrf<9BFEs\Ny)B$Ҟ)PG_?=OӘQ%e3b.*pėj']4wJki&> I,%s+F|Ply$l$[K@k@YRy gZVr|AYt,nkDlo*ǫޗ9`QrN'oOQذY`}KW'Q Z(]o5Gs A̗yn[X߾Ѿ.BiɑiRjt"\#YKJkA[zeVVsmh\8W$|MeHA0כrayū@ D%Xc:#?R1)VŦ_EmLƩS( |bfmbr` h˙FW)] sqq`@ah݋DZi},9v0~vD.;*%tǙv$Rɪ'Mf`6dն$91ÈZ\ xC#")pFJ$'j *AQF6mَ zqUƷdQݻ'.Lxr?Dnٞ^ ږ[ %T %@4 `r`unޚ_z` k<ӛpa4<V/.i3?cSwQ7EV߱pNZ|d&gn:˺&dtqwIx+)fZ.^=B8 2@tb읏ATɯ?َKPP{Ox@}D8b8l|жg̨8FDf\ZS[7[ x9}i(+3HV3]{3Q]A0·ozi6h̤v| tګF~HӖ\\Tt: sCjq#LMSO 8M]uBJ%+72aؒ(ĴW2'rsQ1d0wDksשɑ`2eͫ"N2As.@[U Wdh{`%H0;[f)"=Ft7@BF߁GY( ;[ۙ0 vx3S? 3}+r 'Lu@Ǭ^go8.?`j$m1]>JbDKJJke하ٻg.1[+XQa}&/ڢNU_%,W{Gu:O#%ՅH=Zhf16ߜ[h]=?8BNy08"Tɉܾ+Au ~,uq$ŠӺ) o"zk|!9P19%Y0ENCXvY;:UgIl3 ZUp!6΄LW ͊F# )=\%Y^+$3#E Q^gi'{h|_VXiaonX~"I edU\v1tϔhdxQ$Uy:{j3/_~Xf>48խ<),AAëN@:Ny.U@8_ܷdu&c >X!#wW.vz $Oi1VG/qwq_gw7_6~uuSz>POU'"0ЙC4*ׁCBXG2plzD ;lMՍgOk#u "j vk:%Ii +4)<걃pv05B5dTõilRt~sܤG/@bKˊ{T('ELppm2aK^PkE9<]R mPV;_-J`4l Zb5D`N"bʢ/)$Mhg%@3}pzZGu*$:l۞ꦋbK*i.$Oz+x",NVQKg]Y&?ff'܍T]-(N=JX3$YLZfF[VEZ)t9<inFx<B+44|ю =?J<MOY猿G$O88 Uhf9H%]4D>fVsA&!m~ z3~:芊`t"ڽ!ITш,gуߍiW(F6,rKh<'Ek1`/V7*II2MY+Lo#BirC uYy޼j@&Vϑ8(j% Բg.}ixɭQtT;4[00a/M>c-|^]/7^mڍq= DP ?:AţIo|4zs'=:@zL&҄jf[^~0Ev|ohU)IջA\2'=zՃU|Ae2 Ҍ !vE;2D{ Ngj7!u:: G?l*5X<4[JE^_y+Q0:6_ oEL)~lqcz > )+fx>fe8qExvgalX/UsZobVOd pD `]ƪbXʤ\6)X6ceϯWWMj0n ϪB -aX"Qt0Yjl J" h ݂֦8EsUێsr9n]y"Q:z1묓$|ɗd]uBӷwM9i;zkWtzfnRG\(…-i6 agUdu>82LU[B@FM&& ›|w2&k\eQC3ePk0ӬN\+S̥OTY5wi:ҥ8TVZ`PKddcm Y i),?p"m:τXur0ш&}0ns?ԃe[Wp}Akz)Ot@ҥ?So+/XJNTmfWabU[_1hl2{# A#*ꃲ3"qg@3QV(AJ\ =Ԇ}RծapԠnE+;J. ~2+zo%uuEfS)P.Av 5\^ JVՕ^g?Y0F#@F˝ө*`|8=p`RAk@!aehTabƖrPمI&{ט Z'vY.=|1\lg bv[!ͽz A&h$̐dpa{к@y(|IPvl381[;2sق4K|9N I+d wfg*t^AesB_6k%%Am$TLhpL=6Ba=X1%xoؒ.3N <4YZİQh%zў.Y7Z5q@@;bMK\nTLHcڏv:6tԅ3cut+n), aNZ<`ݶBf; ,(" Z5 F>|A*ah;,8e$ W[3F5Qn7:\j@1BLzL0Aјe~_gfwg2-[svp$;Y&g05-i ?YU4y.j ML_Jy~e*k@ojr-ʶ*ƀxnf1Ag]X⦒p ^Y8p ٧%߳ ; ?H\튎z",@|^R ЃQ}2Y/sUgۗ/z:g9?VS 2=mYhltp bHEœvı#)rr E 8IWv%#^z=Rwd7jJ#E}M:&m{>2DP|͝B !yP,JmfM|d܊I+I:cRm[.vjʟDk{L[(\dDS=%^㥺>}HBdY2(n! 6mf{ՑL3*פoXJtUڝc0$>XuV*q􋢎 @]SH"R UdlyG?$xb_{ψꢓWHhOI6 ys5.L *AԖT4' ^0st.җ::L tg/$ZI׸WۡvM[ 3J?ˊn(JGsN83'u_!6鐭Sp+#檑O{^:#,+*ç938y=BY""V)g%E/AjE=Ɯ+>M@Ǔ41P[ kSZ#-2cb|# Fld1;('&I4ӛp P8 YE嶛i\=:{ >6DB/ z-dDp)C2!Re3Tݩ#5[c%7Fs8} 6}P7(z60h!ĵߘϾ]<%_[6tM7ׄ Hf&hUpE+ ̎RK]Xgqe[z:N@k{?N5WQko}Fr"QSeZc*`,LHP7sVD;~p*R{Ԩ?yre'mV#p + Q/-B;!FVsH$KҔ&A]#iL\B(ij  ىY=-OcNC){MsA "sI[dU{<{F;)7nIj+Ԡ*ψt2DvOhO/8s#v; .ר.aN82)93 %G"Whjq=E 1FXPZ%\dFG&>|B`pβCfnqM GZ34g; fuf ]Z۱U 5߬He?OV瑐n_edx㣕^Er_-#>Gu_=u u V]r& fޏbr,] ~!,bfl%fTv[" 0 eMbh Y:φ6+q=jz(Z?B̥b5U;&S21#)@uQR^eۢc"rLSE*>m{HDn-N9[q):È@U"nlbǹd4V.vb.B#k=]hɚ-Я4`Vo:&Y5X\BG6 Nu]:{jjKz0M5lID ;(cc^|:KZ|g7=[6ow{OuHNAq uts|ʛlԢR2EԚ:l~'v~iRh ;A IUu9i aD/f EQr=:R6ѿgWZ{%r"кbAddI*QKTQh>u ' ;k/n=@sHcqkZʃ0 غffj!u0swyבBLl wdӾH'y $ ǧ(ZPe/VceFH[7tUA[::uݵ21;>GMUEEcb1ʒil>1L5<2$V1tޯ'\g\=ũe(cD{X_dz "S"0ꇤd9oK\<+ p|~罗7g@D% q葹CTI04q=ckrxR ҔOg'٣[I)>+5Wwt@r $h T$$!9=+Sa01լյ4j/;X?XN3z~Wsr,$~$&>xXϋhkE $AϐYd|s/Aca :$:֥"$5nӣσ텚fV%RYvzրn}I(vR8N?w堒0*MLoz"pˁϳPL5J"a8 !`A6aM-g }p4k\a$eɭ.Fh$Gd(? Dݘw<,y;(u{7v+>d m^  y"&5ZJԋy^I)VF0*)zvPSg@CY߃E-M֪ d{o:꒗1bZE(ϕ/ 2²IVYY?q%,Dzxضȏah`^A:*h/Eԋzԁa$z3]+n4Qk8YrHzǔ*0{L28Vq糼;h(DnǴЏ 8S_A!QBUf*m7: 1%gPՃSM3XϱKҘ\uTKܧց~Lb{KKQ6g2^v%|&5E8r<1X$(cMZ ?AEt~S 78U|i<Wr)2-nxAfoT=Ίd%g ̦1z>hڭs_5U4nL^_pvTN@Km&ح|č®7z߇gnp\@o*XJڼ9,QL {5,v}'_TFLQGdhꋹnߜ}2zZryFU9+'FűL%F\?U肛Vj5hAAy@AdMuPQ \Ƴh &h,ڵBv gt0-x]3&AGmDbDmT˒=+&g0DQt)Ѭ{ۅBHc`Y¯qjĥEoK]Ysyt6D Tvr ٛ6eiRԽ0Y\쩉Ġ벤9\p#md]z<_3LEx. ՘M v tEDqTrt9GvŀMj>xeEN"0D }O9{x G4~MYTr'8>_DUSO>-kF܋\'aofsf\ߝm&`֟? fFn!=ku"#Xt|+)`FDa}M(ܛ∛ q9_Ep)jk';W.ϲu D0ӈUCmǑ(SZSUW"`nW@Ո~TTc``SW= $:om2rEnf{n|-W2s` ;De,#%&N$ 2BLX%/ǘE^xuPV~m8*0;O'0srt^!7'ATmC[FKVYCOjYaȰx?pXwa`Qis"f03I>ǝ/k$Ń& f .\2.I^Nst2͊8R@^&4Quk$c@u dhtw>|bIYtƚ!:yzLR?L׵I9clH2󟹜ckB[o'3? ĺ*a!~EGgzF|ڵGd@4hz ZW;ޭl&^´|vFi+sQX{_gYOԑ(Ivk>jF[՛ ϣ3k\m-0`jDۋM@ 4uTȪ5LQ_SeUy )ւ0Y{A'qPdzok)'տyTԘC É'kTvR#E/R9vGW9sG Fs6Ը/+nƓX# Ae3L4l&i+9l:ˉ8@02J"*+T&*3nWv^20<jUG' ^$lʗ8ZO|ƿD}ײe ҩ5y DY!%|!sp.) K͕h5L~@5![HihO!qwtC0wQ!U8 ҇u(Sij0'qZ^ v+#ܟ~%63T8V]ADcEM2h3Z6c+2,2~afɕdhbE F.lZ)/HrfEvQ[x6CN{aStiP\^Ul蜭X@mL_v_W7B-_7Txkt'VYFNy6T;%:\,;̞P;};*[AOٔd(aŻIl:Ħ{Ҙt(`"&`~ԋʼn 2rS7_tksJ+ %NsfH5;4ZI'#4%vP#%|O ISQ(ynG u5k<gj@yCC˾- _YHU KuTvEX ߋ4%ΜW)oPl %3M] O c1+ 6@;Fe82mP,_@,ҩ.Z*qT2៑EOԢjI\e#X]/} r4LEd ~G G~^(-2xH˝gTfFWL,>\Dsi,vJ?6?hWR)Y֘Cuqr'Ijx*Fx&}B\A(+J/0`a'P^݃Rԣ[O~gBu:~?MVRg\5L8~6%I}.,senЀ7L3wKyD5Ztl]v#OKL=V!} y10plO?Gsw32ͰnDBn>&JxiaNN ۠?ĴhQA:w 4~su8)Vo #o'n8FʃpV 3,+ӆV_afm=D,EN +4 QBk+sLY4T<L/{/&XTvYC.i͘`Mr+G&⧥r\$g4(GbcJVz726h@O&,CQ'{tFdc3PK e_XgާX#<`&u9tKeYCq -~t07zY4Q\Zee`+QPN`NwE匎:trg7s_"WviĘ#faXP/lBïZNGtkE,*n_?V)׭nAJl#_Qrs?bCy[+&$\M X(NP08K2[VJG|\sSt=wz`3 "Cz=Fۨ:0v^Zl?V^ \ r;gZi8ӳ5`E7nT&˼[|?YK}ؿ*i,(ȥ3#Ǡ`g?jCMqܾ2RN:9<&_]1gUB^YaT"N|$xWT5._~42%a]_pюmWdA1jRI wsGlo0-8 y?)>h<ʎ0}BlyZ+նK Xl+/p97@jvni2FeKb6&kb& 4rIJϱ\0~P|zB"F9]=8aj/}̖f/OdbZ[g;8D4<'Qo11 +$ Hizf-?+YvjO>[fѫHRܬ)׳da{]-3Ǖ!%ҷܺɤ*Ǹc9ؒq u~D{tT첰F dqM.l9딩ᱛ<Xi}h\es!yĘmQSֻmR>%TV^SnFYR6iF FžDj0)3uʎt0> '% 3vX<82X‘ML> br]IRd"u8&n|ohʓWփ9wQ^4.uiY TT?Բr _ջrZ/=9嶲3}Vq~g¸F_]a%fV "rqtݍ{!%TBx>'x hZK95:c0O6+718[o&SJSOAeޠ9v03b2b*4I1<q|I I鴎HmӚ}O-ŞP_PMwէJQcT耢-'Kk+-qqS>Wȝ}#~9cHbބ?6JvvS?*eiy h95QVǿ:>BtdG9mEu"rA/J; w]hb]-u r-AKjEP $e2n3) ?^8nv!mS.0 ejDAw.1BٮiOTcJj[SC`We ;Jޭ!LWྏeqyVB͢9[cAV kL{^O!lpː-)AW8-j},@7{qYnS~5+$4 KK*FՎ-J~j.14nafCr+^荜Κs-gT֐5/Er3ltsY`ꭤ/ak{N7…s0|9zvKFa4`Ҟ.!:Rxxf`>Hbq^ߤ!.cQ,Ʋ8;\5*Xgj~rRb-H¥-<ctX.( 9tScw'𖳏Ѥz^ٻ5Urm0"?Vw7w}B ZմD .eCmJOպ> d`t:`3Q{cx? ϛ*f3]SO={' 0{BO0YWmfP/b$I{`F>d`y*D+ im GX[rc+j5er5Z Uh.qy!˒\QC&ݪmx6Cx17r@\Pm5){] U'7 -aH-Bɍa4#038 $i @)zYX@*e1H*Oxa5.v7gw,+'ruV0R?iCl5 lB # 3z΂[G/@} ;'ŲJJqj zP2 EɑV3a,\ce/*3&9 :M!)^uWE`+I76T]0p$Yս$A] SV碵rK?c[dX>$cET'^ۏΧEHR0Se(YSR^{l)[^[Ȕt,G}ޢ%/Mu{H{luI>1Nƚ [}G`K[:3D ' &ߚ*1`LWHwg`/({4K?/Y΁\# qL@LwsǴ fKp_v mE5GbK?bg`7_Wl+znNNBei))Ίw~ҥ kUS%ߖ> kw`XmerѠcB,N-᭤פ&~ xpݼ6vN4Σɏ/6~^7Hz=F\PЗUvm^YyLR<w^\ 3N'epoPOp.N'e4D`MCEϺg|I2!ЍU}YlCgY3s Rr*nקL@Ft -DIK̽ +6pcg #?\LE1R䒡x@}MR )zE,X8%PMe/ngڨ;-5j?={-Fgzɡ˼uvȤJV׾jfQ}:NЇSڈPIF&-<4F#܀G;S]ޤ/j[^r6;IB⫝̸OgFY=r,˯H V0"#HtN4^ 7Ϫ{m껩}}&/9Z_LSbBs]Pw1ڊTFL>I}l6G6]1M@x_zT,QADޅRVl03іNa\1w[,dE՛9ݚGoSZdՃ&55R ?aNJ븑yz.U*acrߥZp6}. U_|UG݋ H7[B|_ x՛vϥHBMߕwI)Tq\v# !DCi?ikཏǓe8|^G0ɠJ#S|.=bWW\_Q5EXJr NX'> /]>f?0,"jjv T|PdHr^ϳ^k.k".UT E{WĒ"J1,0"L: J *Li/"vV+ЈV3="!8C &e[+ƈУ>&%X^A`OgH6pjCb[({; 1h A0΂ ;%KBRyn@L\AO}DϪcFs,7!P&ܽJoً) 1yF-Ӡ&rҙI8ݒY+K;Tq׈E֝s 8@ۈ[>qVĮ\faO5TLv%~\B,}33XJʸ_OCwXeVH=aOM7ɬ#nnز|M& (5G`b#=CKLH-Tĵ \Cպ/Їo~`?d&UT>a.n(Ik'iBtuZeR cqΆ6֑P֐&{F`?%ju :7#B% }8 n#ڣUqds7VW@.,kq &MJ1rJ7?\C 澝{ҏTS2a,WRz Łr@!k?N `0mM=+]΀Y5m7[p =F)-?4Y_eQf-z;0zYWCن+5>ba&bxk6 r$% A/rRHeci ̵xIhed`j^0b/XMX0ꗷ}ڡ5z<{Fo كe:Ν:7OےcbA98m 5YHf Qu`N3ɼDGmӝ=Wi-=iԮo2a|Vnqv#;4&@+;e]@ (AH^'dx-"Kɹ[̺>8L1ьsɗZh&V2>VMeݴ*Ӏ%Znb˂\/"8=-$,Ϟ;at5[I`5b#V7\2^V~3T_@D e.}?n5dު) gxt%h B VbtF*| F4CbI|!s}ZQϟɪrYv=Li:ƀrA NmlJlG-Qm.K 9o@3 5.Hx'se:5r[(E3C;|Yy2eyт{ V~AF_=6]~Cû85혼5 ;Ioz"qGD-{ai5!$dtozt!Omq2X"sK*9D0}eDђdBQ䜬DY㯒ô7Ⱐ@K49afFR>J HG P٢% 7DC@pB.x R`5XZJ{7::DO'4umc8D#Bov*ɝS=}ֽT/RAx@TƓH!"]u4q@Yc.b\FrU_S<3 $x zKתPא\C,0Tv+W8I 46KmIqX24I=Gg]l ES¿EuIm5fiJ YF4u^bAAAl)ԙ_W 5P !49wQBOI 4cg{]dq|U n$C@VAZbbYήil}~e3wOba{`ԓ9ë=;]Gf8H6Y{|~vT'{kJg2n0@DK{2i] XBPKFiKC+b) ZĹO6^ \QkUf;*!}zEu[8}g!.Ig˲uyEMr ez7, 2ħ hEk6 Sft??CM.b7ߣUO|< P{VCLuVKH|?oTl tNahp^W|?'7F}P9s*;a|9; sLԭcAaI%TV5UZU`?ߖ͉-R)2>燰4&^+Նg?*AqXSz1j\K[ue{|5kAOPIVKݨ~M9Bg4\1{=ܴ4]\y F{^9$C " D=6;{c=άb*?*Bȯ"Y W{pX2,NIW# Fr#:r?Õݑ7DAG.,&;kF;)e虯/ ~}%P$vd1/MEencNrZ> i7[=}6T\%\;FϧİǞj&~ *K94֖A/N,`v7f<ׄ?8ay XB|sSnLaS;CGfBB& p\7:S""ʐGrpζt]J H5J)Jj3+3E&ǵ1LxV_7Abۙ?;UUR/7l*NT yXXeO+ bǀ{ADiZ:fc[] Z+EZ-YZgΨ|FUM@ f,U͒Sѭǭ]h'(:#|Ջ҇?pV>9< cZ9- v -L]F|.D{/J'(e[W5 (Ç $ΝS S$ 6cft@Ed O1 ('e'VHDmz ;KģuVXwq̈1X8b I~ꯦfݍ8Y OTM n[ zW{ NlEi "T݉Fk# [5K6](hWUbIۘ`却goc+4o3zL᳗<Uϵ:ҷM~↽hON鑺)?f>XŬ*~pB4q"6fmvZ}Qr`+F u #^c (;yڜ2 EVpefE94 p0d6Cu(fV g8w({f0Xk1oarnW2 7k`[lj-`\QTU?У۟ݶβ![_([o~n'޲wyͰb$A*:MyZ-^gC~LD`rsv}ѡ%*-%MH\4l\d}DŒwoyPTU(Ş-})Xf,r FWV.f5`y1g$ºDK1O${;; d~ȵ!NUYy?Fa#P"yڀ1 ]/Pqx%Oa@:|(T,/Z$n.͹vܩq,#:@e؍\*91?[*+S_`a-j;Wj:8W ۝Y& ˒I/ڌi_|jVԵ qM73Cnz7>~cĈ;/*^liYnF7Y l 4n!12M8?DJ0ZNG@gsC xf*>W!Gq2RT;NcKC=P =6?ANk+7_\:#)Uΐ;Zژ55>H}êAkbo,bs1H}^dş$ )VJzNQ8K9#Qoa}*;o~Kxs /總'K ajyDHj^e7!Ï,ڄ H熑#Y*}zG^J1B ",k& MJt/Y0si캐P1RZτ=YeiHwR[* Yܓ c?/M,p%4-6D`!<1N@'ix-F\~ei: st̸n9 D5%GTgBKԔ?n~DMKK80gR+"l5G^'​1BI!C.O_NKL[D|тKw" MSуNVX ֐vIq@CL \ʏtj6-YHT<ֿ 7  #PҐ {@7X|7 QS~/0?[Kۍa*h^=5Z: jik<Ѵ6K f}vW[M\8&riJ1Q~y60dSk#[]$? #.Чj]%7倣' 䂮BNk͒o%mk7' [0*ݿ6՝Y΋= )eLEQ2O(@ :0l6 ZU+I&d,o6j'!|} p|gkG~$%,RqE`A煷HxSIz*ZQbL`fˈVnʩf,݊ѭ8Q2|kpDGP]V~vvs@L:V{@.ChLsȮUת<$ if&36(sDMRT%ё>0  6޻^>?aY@۔6 g?s0I⠄d7}gRuψM%IE}$=pL0ƫ*g>i%=ۦsEU#ps;yj`Sj[fnS fѕd9q.'(B:?wM2"wVۓQ#SCN6$bK~ls~^QUyY V1}LBSK؀c\u]-1a QOʅJK |*!bØCeH-[Aj!ddKji`}eų*#?8l]NMX5Z wڳ).Nv9 {ﮔwDݏ f _z d-)r?}35{ix ].>ڵ}F)AL, DQ~v-^*@XfPbT(qIhZJ&v7.S5TR@夯l }K/d3 ͩ$Sn-OnMuoZҨ!bpC>R# hA *2XT*{&i8B@FLIAP)ʞL&W@9[9&;Br!(Kjɀ0bUW1ǁl2OY76COąSWpLa|?P.{tx$ѫ7FbqQPd\K7zjTKDtM~ I c\,u Bb띧sqտk uNv:zP_e~ #=sB ^?$~uФAiS-f{.֝=Vv<1;2Վob"R}i`CRC"|աx5'jWBDsfAܠSzsI9+©P9%mir w)tC̞;Fѹ 0tl>O(Vi:iw8V/uiv`v`_8}&?nARn5~ANw@ :AH !&:8wXL@#:Jgȧ?٦,߭^9i^ T@ȓ74Ab?/%G”[^Qx ނ9sϪJWVI@տp |_v0>ҩ3x1|'l,OAn/Isڻcn;0D[I}i-bYh{\hn>.2CL6

"ש}Y_'FGwp?Od.Jv|9h$beAP4IGQC܋{Y^Ξ/ߍWHR6Ʒ"$uޮT%wS;Tkm kl@ޭqaCD`.|{.؛ 6ݙ<nհxۏMpjch{Y4N )g=a=$u YDT{.!GW􎪆tV"So@um36lCؑu!h9~E;ו!VKcLe0m 16}VeVe(dR\E;BU{ơyVlrWXaUSW:oR!]C-^"u7ȂaX) =U8Y VPAz͕\Uݘt?<)t.1UF@, pbe^VGo kʀG[e(u^Z\k #W{\LxM^c@zнŰ/ c~DF`Cڕ 3GݐcW 8@AR0 Wm hh&1 6+pcjZ~d!r&$oƊ"&xj8'%`UU@L^AQZ 24q:T2Pű9."~ekE=Ɂt̚%PꋞWF?[Hl@W}ĝw\.D{yF J{ etnͥ#^ՔL$+AЕoa83sa`nьaWs) :IY>~<.S2sRp| $j>򩯲b-ƪIB.\Q_~+pByGuDmER ǴxHq_(cXs鿿@^< m12d)!Fx,iK"ZVjn[kK ׽ /̌YMUɁ0*~{ysJ/'&6֝y~nQeDKQ65p` shmC-d=Ұ}%_. -8m ja hyu9{HPi{Iݩri-" nޒC:b8 uj@=0:Q]6r'fy3cK;rb%Xb4)g}(j 3Jڂr0ϚOU!rMU}* oX9Xr!(s@[$WM拪kI;4M׳bu Qk3A X?66Pgv`ЙJYhvAmG4Z+_4|Fh0UY+7p1g4."҇x.`K6A±rC3SJ,Gm9a fu0;4AӃ=98,F"` Kkn[=RfW=&$gZr&]5[[33mfx׾ dcR`JCv! ~aCQoy" D^Dף,,mON«?z6䦲lq{ (H 7U5Z1T~s dJ5#7[Ks#ahس D,();. Fo$VJqC3BΎ^qG^ԉWɊgCkv~$dOZGc[6T/B.U f̶}ﴸȿ0_uɥ '4_R,YkCj,T6g㠦 GӍ'U9ԢuIElHvgŵ!OK3$5/ Vdg3OIZ.\~n>h7gjn gղ-%3k&y GQ7` T8P갟 Ξ%Gz#- HG$l%;^Plѝ4hgŗFuLj,cqہL ehs{c;ҵ O0CNuGJT_ BuO_QPδ(dDZ*1^S%N ,2 JkpeHJh"YS _[~2=榇p)N{'^p,^5F`MW, rP;VUUqT: 'd6NKn]ץ+QF/)IeSN&LOzk)=1>qӻWR,콗߇ۉD"? t,˛2~+iUoT:φD5flBSрqiZ̡kUp\-dX&c[jlq`n[LKX ^"Y h&$O\R# 7A>zj1بk sLйH8ʙ.}S*EoF%@x2pBtk;e7<2 'P@+A8TUMJLʏ+/ΚɃm+뽉FfsUu~S ]⣬8L :n䩲8bz*kwwHJDE%˗wV4﷝@ 2Ty|tDIQ ;p"2rdƽcJ`Cc>sot۪]NmE-a&I)rMp_pc(*$Ë?c2*5dٛps.ax ~9) 8b/5oњ)p'Ӭ!3Jqa喍mbL/ H! )u%20b<^~YFz MqYJTD$~hXIntwG9 0`^AbPZNIbRU CҚ%bd2OT lBl>7Y6g'p d U"=eג}RMrn˂bY=J*oJՓseqnQl.;Of&8C@$hj"J. evxLiCWl%3‡gB"KS"U'ɥgST̉OBrR:̖jBYnt=a\v}SGʍ`P䶣R߳W]Z07h-l:x̓ۂR˔s_~I,΅py[C1/4y\  k#)B ddk QZ%i&Iud@tR 5 `/T|wk}K Ǜ_VC)SEt$i'4kͦ@#($Zͦ%C*ghTS'=ʄOP#)Ϲǂ8׃J]}iX!]Pdpds. -ƍm~вWǮ]ILkIXB_9V`|hS\pk`TBM^oX*|$3!U*^lNhZ=+:hւG{%Lx I \C'}>!_>wˀ\V}(@Q-)sj^8_wּ>R{IMbS2vK),:K/uݨ1:aK+RO2>,?[n@:8Jn :-?kN8 O8ez`(2zNI8Aj]:>v'MUiE鰼2Z.=M=>mb%9MolE`)u\S4kGVWz=d etF=]!y~ks]obAe+? @d#ġ$?7҅Zy4rKZAT`l덲vS4 Za/t G:~{MsgU`5+.*Ю;@: ^ڤ#C~ݷzߎ9@)%;Z;8FhȜeG'}C'o-= YqDXH>ڑ8DX[@hKhcJ՘:e2xEyZrcs3!෥@lceG v \fT#_ S-ºt2mG&!j{Ǹ-U]h+C,9('i@=: *8{F80puo^& P#7^Ѥ8< Oᴥp{ ).7ުJ]Id/fW%e;E<kO/6l@ljX,NKlour=102}$ös&&P]'ʮKG <ұ1g.- (k"[}yNu_~^f^tf+0>e3bS${Зh+?#RF_vnE@=<=9; $aF< e¹Љ 娾{+wz vfQLxߙ"*,+H'/%CBUimiy0 iĸR6hJզﭦeLZK.ztBb,'N< ]Wv?EUDE̶Ά!LƌZ 8yޚ k?^KQj{ÓG:dMiLZVm ?Ǩo*?@,h_6L^:Y){'4ոC\ 2[9VC0c;PMEdDNFǛ/o3utkA-ǂ\ Yf(3G>#R#.@WՏ[}tvٳm!+zIñd/as`gw;a%7#5)5MtX}pIՏ o3~DN Z*+#|#[Ou[O6o@DJ$+yo&}iL9یy%=NOt+"AkciC:uvVsMа D"zOe  ɫ;@#Pb*nZs:jߔ ґI7ߝ dd| `O^ ]L7Stٍylg[#sYXc1cSZFPDy"Z8,$~aآX|]yau K%1ں rZ\dt6n|}-6[U \Wo"ƨ Tț3ӆD |DoHN,g\*ΩՄMU Ѱ8rQMk` 5Pgv/9Ap0آxg{~zbnEQoV5R\37S戀LKJ!KEcK{kx4oV?DL4%'UڇmK;OFrzޏݮ%$qo!{+\i04T CGsI S;{ BOĩSE&-)ƥc{?#28&|^ `wfIfK)$3$ "V029I+6k z_xѩ.sea`(1q9GptZj ,\"h$'hCڷtbJ>}q:O>b'(},q-|Sobߓ6!kDņJiL>Ae vevM.G;z/nNp\@j\<㬜k$Jg}~0*-"2i4BNv~P0t9ؒɔn1OZRW[FNi(ͺ5n, E&ۄYvT~{dvo"7XxG=B,~*c?.X#@ܫ_*?{`_6٤1CdZ#_ 1mf4U^L?O@KwUJB~QPXϵ%bl dPxlSZcahy. {E t9y =ͶHD: R^˪ \ož ]%̟/(?CX xڭ sk\{2y C.qط˃ʸƝ!v% [o) P3B#f0Ŷv'F2 isP.4ojyݪHz>}PU/xP8lctqtxg-41 0_a^LfLڠ+bL.~՘c6i3B>tp=O1-4Rr&` ij zͅNI౒ݜg4Kί,d*VYpC@|61F{4M\kD#p_Q__#:v |VL|kcseq.^^kU3+Ois>_$~ʛڐKmsg)gúE YAS%߅ ɚsȏ .?x- T2,fgڄ06ި7ݼWqӶu1bF2 1- j_RW׆끲Ld!b?yQ},o=|X}K8"gc N"eo+r]jATsS$&PQIXRPy}vQ)<>nu[ j Y,N VzuѴ/P#kDjc΢âl$Uj72[b9_w!uV8OF\_Vק-æK_RsU)jޔ'ynj1 ^E 2B#/˄, OdRjYV_:YD =N\q}q߈p k,lia)\W^GndMo'5A֯t K#OCH44̀ C+eNYp nMҶ`to,4QdL{i} x~ZI*?Sg3VMtkSa̿ V4$3a9%?.2DQۄ=#Wgx1i qNrytt9np7ܝnru5ki]<G߻)MS|<)pzej߶Z#>>5.yZz4#V<֖~+CH8GNB7uTI͆^9{J C@Ebt>ͤR(+=,gWzh(P.h3 |AK!n}.f3 p{C1ɽhi"o:buAu ]w΂8lv0X(>tv;% nSwTTkP U#UiAqSmp1AtC`r5y|E@P)aw|/{55"%䳢"?F&Onّ72ҘSx| !E&8H9fѸC<2^9D]* 2ˁ4ΰ^Я^^ln *>,-B8 Ftl{{ ^erRsSP%3ꌱn0GoӆP,-c0 V #=;1{f1Df`#?h3^|*X&Ţ/1hy^+` aR611߷KzC+0EPqh!`}r|r4,Ix8"P Z@AO|8`J[j')S0 u:mHofɖC|(/1M ATn|۸axbJ %{$7N` o4wvh !Bz$ݞU<^=pN;HNlŞ2Jbۺ{Zl>&4SIb̴> ʔtWAUJq~e񜛹RQptUd* Yu]'@:ST$ZKy5xn#'ANP !ԗ^@JEpfQ㼾qoFrZjIVb5qټGm8/貂BB {}uWE?EgmE594D5a :o^VFY5 v(S9m0aپzb1|Xt~P"]eޒ:`M3eH$N _:$.LV}Ȏ: 4i5)0rCw ,!fNc*_a; D7IǠD֭׶kX.`gjusWKޟ9}mDpdxƵ Rt =54:/w 4# 7I XR zf<*td_p3)%pۺdRbl;5TMjMM]">9BI,cM׃Yٷi7h}-$/]{Ƽscܶ5jqfK7j o{!-Da1 B[)M8BKLRC߰Ykb0 G> v,I$&!Uzo@OzMK/X^7imn8RHӀ6I'uPbDtSjjrkpȎn2y8L>1 4+`ձ6j(ai r*iUs_2Z5VhAA E *MKZ$& k -Sex\]%YT:wV͇l+)??9"p>D 9 [ Qs|\"o2;ghؔEt/73h`e݇'TxX}GvA*8z5aP0Sqa"+$ċ\: p`:PndPⁿ 1 wZ. B~]0 {%n\$)Dssܕ*O3 LJUh@MLa91c Qj=n lj {2\4NXjISw5-)2ʨטe-*5c~vÚqa$TGQ#yM\w l3;, YU1)1~$QԼ7>Z{V]RIGppw~c1I=YXMJ2To d z?ZB Z 9j}lq4y'hʐlePU4Oas}DAT+sY!.i'򧩠-GX D{~?eEQߏ1| (UGЬ<(_rT=Lj!z=U6/^/p+9UqmHyug\0+UJ/n3ky4+I`Jv9>#9B)-zI>+jT T{Fఎ$rf瘇hw>`^ E9 Kt;`oMiPBƀ~*si M=+@h,"|eV6hU ׶ʬC 6%FkˎŻ%ZWL}2Nǝ 弽:UVIYHaTDBP 2=7(rC">|puϞ}0 &\K)ۄIT^Я>Mk%믜I| :kLN` 0bk{.eQH4"J |Z&xƒ;yHcGfͯnL8G]TqeĝKE m^T$ 0 H6wjySX4yG 2LD)>,9rZYպ;f&i( 밳hQ NX؟ԵSw:x_*ÕEC:t`O2Kd_('e/y`[j]E~A݋^~Zݰr&ƺհC *r)D&Y(p92n@|#b>Zۓ|~G]Pr+sWj*-',ZALS+nv$fbTCэJ2,Y[/< pt-NiQl]BSU@Xϭ\n6͋%]^3#0Sf ]f+'c$6ÉvIu\Rdb>]ρmb2eF1wN-,"BճJ/%k0OǤFtcz!i HUdeCfX%^wV.cw~o y>~K 73mDc]A۔tBm9-0l1uT+b6#.V~H} M: yK9rDYrhf-M^#VwRdFF8?ٚA=0j.(+H]Gc1`1?hUsЉѨJ-bt\<Д uAѻwFMVbhЛnu @XqY˨jԝf'P`Dqh\"7qks7KfzDBc`ܮih' U&,`REϼ;V[\.1~3 U&b|QbD5#ĐB%?zʿ? O{3LGp+a`^E .侟蜞]jvS?b:i!E Ds~הRkeFQtE;[m~K[YxiR)!H6}fSi&Fۥ*9Gez"`Yy>B" Xv <2)TNP< %'&Sg[:R!(d\`NICG ?hf+?}t>uZ=8@i٬4&9]U u jo1#H @:#ߚHn:+wص;»6-YY 8T_gLHU 'M dҍ=p4i7izT'h.N]B\NOM"E![H\$u '/g;αW‡ 2L̓YxCwL]<+E?\h`Xp7oVzAS_j5E4;NcH]k1Dk=}-v+Gm^I`^co{RNDI;T ε#IEkTʁs t ;Q%9ಳ#$a n终WZ wkL*ёߚv?Tbw<2{ Evu^)5=! W,IK՜Rt{MgϟEn3t:+Oe3z N=PmUsdwūų?6,u]с# K{) WG~{1UgN?W`:qnqigbWfcYkƜI}GNl"ݤ ^^"FL+CZP *7Y!^:Hh*Ke΍٦!k l8-RN^,FdzPp8My;es#2 hJ|jm_m}38q}zS Q.bY=rGl|fV5DZWRkwq)r߸ܠѳybU*ݝ tU!F핢g0 O1EBfM@Ζapܰzwq&"9͘0T[[nA|λEqyGNg#ۄ9bPG,dELR> x7._5-8u Ö=:vM* ) ϒ)Hq%{BD`=2V(pe1Q8'Qw6;A`OQ,Ae{`EQ%$]h_.+9Q#@0Ә x3Nn, }Xvfk,p6͆uo23v}0HhG]e,}Fqŗg_UIR\ThlPG0Pz9+qQœ/uy1e=.qjFWkg["m¸*)qI:Mc?%Щ26ЩMC@PhWS,uv,?Ӣ`J!1}FhA+2;DWy::G$)%"pz[['=coܬU~[wI2# ome??1Ww_Tfsk{IdajkheIIbitZZf2ڨ"*Zy.+r}yX 1̴`~cć4_P"D N-em h+[hUc:,S~; (7%fMSH,ga&o0<o3Mfњ-\7RnL_;!iHLn,x$,rz/j:<\Kiz߫pBQ7j`\(&(SUECQn@YVd@p [B 6V.~B}uTKH6jQxC-.TdE.̺j`edNOӨpR]p*?*Ptttvt+s?6OjRB -\c4%z6L:D 'J)ss?U,p@G@-,E1 $tb!70i&t4͖EvjuPɃ}.?_].=6ɦ"$)(6 Pjsr%y\"uwG5RyɄ"STڢ3bE׻`W' } hHFHpY1Bp1 0=FpV%S)G)C>ч؜uK*԰;5_~{D1VQo?=p*wȐ k:pL e5('kke4% !0T{)%hd}zs'(?B$ S,FBW/51а$& G_jV0h^vsh4}bٳDmVq]r Jpu[f{=T408=5}NzcS+è"a )(ĄQ}SId `QhKZab[ARۆN}vn6qaV.Ek-έgІu#v㵝[FI"FƯ챝֎%(s^븝},ofC9_ݷ%/]ӟ[7kj}d .JI#RA/~/H>up%/HAP`sDusWKCVlwVN}58C? *aһ=HRSv SSٯrs*yqH !9QՇbU[],ÄU69.UQbvKgf+Ԓ 11zY21QL|U#1J= aOݔ7ߍ / Pm -.أLh8~8{)4]zDbI= $7;+\QeN'tKT䯙V222lCgaTwG8LG暎S.&[u@$/lxQhd%++ɱw9Z/ ~U/f kvteZ(@~tʥ]P-` , a}w84ϋ]u2}τ'2 KX!ƄoxR|5t߸:7jܪBSU$3gHtb3m?j0T R"[^?vT.>h3e\Cy`RQ~ՍVl}PK5UV͞xn겠97c>5W*M@9bdt3i'U8't`-jɣ`$vͶyōA-͐m~HBq p+||y@ce+ I20o[vMx3>?2wW%ù91* E+v%+D(F*97-+WL0aֹ("rE03+ 'swO0zo 6 ./g > /V{(<jv֊6̙qE5mgwԳ;2y(BՁlW_n#wԄ:'5#9O4w4F-LL@Y}t|LJ )V ƞd8nIUz2]fp( C"^hnzm.;_KjbKR>3xB)U KbHyHl̇Η,M;<,sĸA fy_{@۲y >DFL!rPX3UзŞ8I]9.+;vm|=53m{>Ya|PQP[~o}7ayk?JMog4F(QMwXGOCV܃H!o2v{ I Rҽh&-3c sp\U!m{+i<Lb,^zNR [N;& r(덖D +P"`sC.2lHFheяN&brDoew&-O[e; RH˙"HqSfWM{} CiSL k@Zg=did mPn(:ج!.e53)Ҩ!/MzzeO ğ^տW"[aXsQ*xwf!_JqBfz>y_)!gu NucI&"%+RYBWReP#6;ל CtWrQG+ߖ[>Ih%H"R &ww{`QPq4|H”n,;/]OEy jsb ҆ӈ`3J.X5af9y|`+&I8AT1iRLe #_-k˔|R:k)MLEYKr;W'J5"9cpUK\h@ Ynw Ȫ䯴&2AUtq8;O.>ӽq_P|p?cri3Ġ`;#ψ?eQx"i@e4pYl6ZslJ gHj(4 óI> M ܎~0TN연8Cr[*xnb<psQn\l`B{~UXNXMBG锹퉍8-F}n,o^Ywl*CF47ӗʡ$w-"wFoRBDnh^wCdoJ:(aHeRb'qǁW>cxf0P}>0<+2Sש є'umJFSdBЊ vX0JK}#j;X{xCV2iY"T.\wf/5S׮L[ W/Z)UE2N0$swZ@~?eYb>+8j:w3c_>}\e\Ԃˮ%NRtXHK6SM%yI5FS>k.B >ZO䷵5E16٨BOD /·3cL^P!{ْ<^jt7XŚZ;~ɥJ_FKԶjɖf$"Œ m>ī^L~/Vy3iPIss2[ex?G2|FI-hLݽO#ս@I1ª!n@prHJꧻh9{O_؊ D%= Y]Z DDbX/ͪhzpmlBp eAua6eG(j '$u;Uuޯ鰰'`NMR\'%z30[(;"MC K#a#WeJnAtSnOv 5Hꚻ '5tU"]&Ytg}qfiˆ|&#m6eƿn* ";n!I?Nt0 t2}$UpŚDŷFVR? :&4b big7u /a8`I2ޮxR&=@¾)ģ 2;ȗ=l1%G-a`GWxhꆴ`؂83mFJ!B).GIFk0 gRN+u`oA`~ONW(.dY ?v繤f_N !]Rј^hf+ŎHEc)_;)\N!V(/r# x;lCEllsAbjpE wۿTUqMd8\yjd_$(6\3g[Mh݋IaVbK`!0ÙB`.0;!Rk_R0,5etMG&]عӚ T#RLauRы9kƛY^AIKl q_fjߝ^˜멒ܙec{m(aip=Vm}I);"mxrNhЁ2wԏ3<|r> R1E _?y_Y[QQf? ![V',m2coϬ3xtᏜX֔q3ZvK\_I.۩52RtMC36qbZzg~ϥ#$%*#gu@{Jc51 =;$?9L@B蜺%~0cOAX ,$@C z rCpb(JcM\~=);fa ;2OrFbMXW+Cˆvt::.B0Zdph,nJc6CvO1:/atacm(D1 ~\>O(Y H/RRZ|E}P)3?G|Rt;MˉCaf셒ai Ah\cL39ݓWDl-ATZ#ԼL1R[p´zFu; mT8mQ'^^c$*&B64 ȫq25}4컷?vHZ~Agq8rm57LZP|UrM fo7C)6zK0N;~wqZ-p]*̼ٞ|hϸQO݈xalWE¡iIb{dU˯ᲈNX>$ZH>[gv^9q)R W_$yzh<5~̖d/C0)neњr7*Yxc NCjiS-G,-ھjeG!/UQ$(ùMgv%*41A,GYQ: ]i^34#A9ϴj.N !GU.{!,:<]Ӟ:>5.F ^oS:IppCێ=BfZ(T ߓ/=?H< ry+~C=:'c73>ܻE"F̓ aϒ b9 H;k~.a9eȔ*/4~&~[JC>!/3F+%TᨓN=2cvVbcu݊KV=i؞cw){?x7ʭ'æYnIsa`+cJRjMa䃷*JToD/]LA`xjCH_, sx_xi+4Š>l.=`0n֦k Åb}ph̑)gVT 2 )gװ8/t18mKV<,<1|g~>Oq-f6׷/vBjiJ׽\$ asG3Àa?¯>OM2 TpA^#?=S*3fKs>T,Xb$- lEsT"B, `Q) M02$9?Fr[GľJvhMJd} sZJ+-UJqۋS;ܤp?aN|d،H;XlцV.5@c6id0o58dl[㠘u6HʽIdY~iWnNMl cb^vY䀗1\h90-f"di#?#j8C@?v뭓΋PV$Cƒg,p 0ŠЪљtG OsFCі[[bJ;mm\dY˶h= Mɰ'ei'oRᐄ\9kxy ?QtзUO3S;,(ꁁ OO; lXv%Ru:/9,m!ģy&1pxn3N('4{q4cN4e]6#i,w:gbbFGR(5uY._q«`08er>P#45_& кeϯwb9v y|:UJ~lӓ0IZu U65Ovf3O0|qφ U~SAph *3YIY8{;u|3\5^RQxVGGp8!9YyVmcx˲3=j O-{`U F`NP(gSxvu{[.Q OALvxvvwC7pl\GŖ?U"yIv咱B!MguǞ,Ҫ}:uK){,2WPR,ˬImo\׏ gP$t历my>u6+֗bM)`)|2٭WӲ YWWGQD[ Q,F$9ݺmgXc ԄMX $Ou̘oFGe1iˢ+K U(=E˳dn~ɧ6Hd:D<l|fa'8CH;fZ& fi8vbASyMTސ[b+(5Ƭgi[~0xJEW3gѾ0<#|HmrU8Ms- atM:֡؃nV`]H0;% $fz@u*,k(=wѾ{J Wxdb㗗K1dZw͍x7HXجYC,,h$TVy pHU1_LQnU̸SQ>f5I ^O(0Q`z(5;*uoSHpXL w5hL3\sd^UO-0>đV5uaQ|y'Z& e:B_)n( Qj\)Ͱw&Hiϲv$t'ko;v\%$ռA抆54079Z焤;>"Ջ5Kc1ChS۾QIؙ 6?-_Qf" $!Es฀J4NwмqnJ;ʙIؽr`3>Y˗7̺ >\ްac@j"F<>>=K8-[U.7+Oأ;u݇B|DNfX*AԩpK4!K9C  %} !i.^nQ񟌏~$iYqǯ>KγoIXN]qjb!g޹uLgz~]mKtc~E},zMED*ϛ@:.ϙrTPG")G4O0HQ#C_۷x;`ϻu:͏Hev.x2KX|%R?#BF"yy`[؂dtw󄙍±~jkKsUry-tuOLKIr[ަz8$i=i',yl~:]fpV)Y&5/MNΣ#tqg?mEt N"J wVkd`AAuX`K\ _H8.stwDxĨ`_ľoeL_hk "Ia`pp0R$0d fYn;Ĵʝ-4e6\n2 s`Ŗ GxH[kaM\x5.Ǐqi }{:TxĮ_Ƙ&c&ۈ=usz*Q#{V?-;UD2]0~9bMF:7vnB j-:S Ҕ^򰄬skFSYgbz[QQQ7S9DT Mn;9nܫ8@j[Mkc*C8z 3#]6*TMa"1*Ğ"e|2a~uqz?|ULi󎠂]W0? y*.F?u 䡠@Xp[D,!(8ۤ\=_P+G h x)6`ŸKl A?OfpYn[(#{0bg%IS`Z?>1ū 4Z֯zqO`+^L$$Hk'( k6ŗyz(25;WcQkŨtpjH'ا=p[D9=o@rVgzPC~Gw)Ǻx1l,ap[J6&j\]D)qˈ%u^>ux}ˆ+MteY^_8}ڡ\1N.r ^+})VZ4%տ&%M'm%8f: hIob,L(isҎ4ʺS)L=>M;mn{O.~h&ADYTɈ޸ܯ ?HCb+{>Dm =&:`1ЋUjn[B<?̓%EaġlRdƟ(XNX.^៾Q)Y7V[bЌ畠vr1QxmLڋrw\ M[4`^ =v;S1h%jIn!C`-hH~abeݸE#LZ~ {rt 9u^294\8q'@sN`^^Dui&sdLfmW7O-bWL KHC,Gιty+dL!\=4qiSA9~ BFX77vCDꗉp8]T#CGC_<0%.VH7)rͭ&#F :cdCzN̛ٚV?79F`g[E,l :-`!ɤX_j@H~H+( ]Sm-ASU[X8i Zpw䊪38۶ QC[Bqw,F7a5ICj` m_%| 4rf8zt1b@ai7Su|?][;Ta?hm ǿi ;nhAlQO@t>F2@-ubȔoު 3csIaXQUD(; izJ2[=#KVpbu>Dߵy + 9t3TttЃ*C6>T3Ͷ2{vjq4Es$G~hM9A/~N$U6Y ۺ4t0h_j߈#V ҝWdӀ0)EQLf hqfХbBwx̧Vǚ}zh{/M[ߪ> pөgDm[ gZo1TƗ'΄޲eH|VurޣqY'$rwsbcqOmO?r 3q{Ol\`a>uc?bK"bnN[m)l-B#O+:|;@ܸNΉ@UG#9(t3\k.K@{DɨaL9lЪ1‚~Ā t_ĵkV܇?2 rPo;c8 n[s6YҿPp6H g]/~kmԺ8 %8:(0l aOJGU: hle*N8 rWJPaEaTT{#%#rE/2;`] ulv+aM1i3%LPq!@UxH,Uj Q4ӹ`jߞ%ӻ{e763ߚՙkd{>R3>9Lb;0acW'}518FXě2)1_7bDnA6u4rCkTvL,Wg Qp8Z-Ɲ}h_b_DޟI" C{X=nWZŀ:%;LjI?~60 /Jf1sd %E' !` ؕ.6P' oe/B-oc%D!%at~ƘD io˸'IhCט`!W]tLc.tL71zdB6?< P[ƶMC.4N&M#ry&t3wbg)/UjD$E$v+u]*oEAS%g5䉏xuc/KP3Fs1^g }=1m1-bdϴ[0[2>ϨqQ`P, p hΕD?c|8HW.^ RN_hbPqr֍qm\]EyV6^-pZ$?2XD@4+"Мww9pt!eoX ***t8eUn>4A@9= ~_j^Oh&'Fo8`0ˋJ4<@صtwzmE%YSS  9 SVM}z·'_],w0ee=*wnvsSZCHQE3ƺ.%>jiX[ &A0lgzWWV߉FtȑU*!:OU lVM|t w}EKi>H/s=Mi^YڭpڌS}n]JEk7&VzIڳruB"Pu|A$( RNj\cnFtS gO4C9,ok8#~^ [%7#@>9:ڶ_rʦKWy*?FD隉+wT2qν(%ay[L @qm*,r`qbrO2h*ME[q$hڟ`@v8i袧8+%Ovq#]E JǤֲ\wN hJ~aizd؁d\/Kcy;{$٣4/L5Q88=a)< QL [vIN&=mpx^II*LCxG$YF(q:)~AXEsw|sӥ$Jyhn(A~dY{Y @\ޞP_B##8kftMl&cbz 䍙\BNGw v_LݧNiE,eTcnЋHr&ِzje\"w>5mژCtqLWA^莈9<UO>)u_(x%☚q 5O\27888#Q+4TR=Hcyj] śh_NcA*ROQveKʁx}.EK%e˅M>s#7A| tj}wzd~}V.kb=cȨ$0X檘a5AUq ēw.F  dr@ݴzbbmt$Eqzܢ.@g/2õ4Ceק֨Y)A? 3K3Z?:8T-mx&ًc t}ar`-2?&x=#=ñ6yElӜ0bӞ7KS8+ZaCWp-{I*$ g~,ʛ @uC8wOI&A{ JHL$s0lWe衆N=~kᒕi< 7=wĆT,Iu' HFi*!cWۍ(*tjdwv_ц,Ƹ,/lSgyJ eap[bw0,1:^!w_p|lZx=H?-"\ug#3rR='n`;$QU|=Y ’:5m,0^'$I(Xrcc}"bir 6%ljގȄz"DXRWDk8/յ! :ݲ/,Oi[:ѥ/JV7kĉ\Ħ>?zkVi62 ѪcfҜȢI GV  { ѻJrI-Y N?}zهҐDobL$Z=]ْQ;z`6UaH\Pwbl瓏&u- ƪ'FɳO-taÀOElɗ t/½FAZ׮Mo{ʯ#>0P-t=}JU| #9IdX] ט:Ph ^CBAv5x:3׻ y?0G.%IW$=KK~uq{4rdqUVHlU63*cλ" F]*ɬ@჌"Z~z'!` uJh0z׮DCT|2 w03GL ؕ i G)cEE8<&9Z,K&4\ \q 3m>AFO $9:㸥Bu'Ҝi7-zj-UY Zĭsj|4.QQ`5Jz!YڕBɘY[oGWS}1̱C6EEl>WEDfr|Ph #D eBxn [|[CUގ I2|: AјnoRjCTr֒$E&ʝd(1ռ~]1TT^wI7xEz{+.&*iı$#ͺ<'}/0:LЕT8l}f<$YKiޕg1_4g{(e!=f'Tk#h2@whE'<,*~4F(q#ϧ}׮ |RR,^wۗ;TViNqNnѨ@m}"Rn Qr^_>7LXavP8_́#3MdvV9vþnןƖ Ҕw3 M(v@ب~ZEʫčj>T.ΩÄ5 @nN:i`&sxm(?mgZ̳BV`c%>Bfx(2XH{WD-tV//CYV0%'\2 5xH= <#s;'׿v|*~n1)% ,o5!G*EE8+Q;TV/#Ս炒5Pׂ y\Y.,r Y+1[{wŃzeoM}bW*&0=eN!3F]>%?ۅ%>e6%obQ䮣z:2?*x{幑u@]&ٮ@ƍ|,L.y8@P3K`hJɡZ\+0̰ ~^$d\Hr+K$vkݻxe?L7!yDyR[5sl  FY.,ĵoh_#?[&4|R܈6~l6Wo=#KCV5bCcI8f1@9W!^\寶aZȇcӊElՓ6-ԯ.ѐvf i2FC2q%;fT;Qk~;?k9w3|@6\t9? yLb+mnف`*]XឈOVPF0 G64:fekZg0fAM&xSWxq)XV~ᛃ^t<1 [A+"O`[y<(+}e5o&>}CP6w&_Ot"ƈ,TJ=td yglblxZ>z:EEH'4tJ ;z(3gRN '䧃8AV\lNR:^`7/ζL1o'rEl;:7w#2$慨q ڟ=ZB׃<})[e M n]Ɂb7ȝCuA EYOt[un*ʒa1%8QcǮ EfgiY0v?m~Ѻ%biK/O _-ra6p*BV\z̺TKuZ* k⌽L-.b^;đĤ.$yѢn>5N43#! V1N. 23u;|~/1љ~>hEΎ%%g~N@p]@e~,Qzajw%K鰞6u9<\G %m{f#y\7R1u"pf+)SѤAւJ/4w}^hCV!{N׌\ǵ߸G^WX඼~a\U;A!>OdYY|u4OǞ6o5ud`~>]d^'4u^Y`FژLS!='?h*rW y¯\uVp? &[ 1vS iwwƇj\e>Ж& V D$ T?MJ|Dm5BSɞzFJ7.PXƯ?eކ LpT "zWl"l慡q?Ds@9yo KKٟjxcК 2xߤ{EAF,! 4cw* 2G9_Sa"671~ٰ0U m0K[,ňHScwK *C2h-XDO E z7N@/褋TX-l1~D_T􇟫=Uk.-)*_Su\)n乺Rl@d [T-)!֠v/ }=־[6-nS,vj2';~jxA@)w1FCفmv0g 5Bwx <-ԙі ħAvQ\WPAjBgB=~FƓd4V1T1-\uf0Djb ;no G4%ղk; N6oK{EC3'sk#Ø £aa!`nY:Jjw3oghe}*0]9V%GjQ%L0iC:G I?#p@1)->ȚS9sPӴ`6]Z7~d}NF40/b%g,޾qw];^gؼ,,nvU0$ gR]Ͱ܈߳%# ,_߇%㹬( %k>=Q[Ԥg4@=ɾB#@엘+x6B;Әޅ۹,:cC\ܶ11uA5]lC*'ʹvtwltOmظe, Yx7˅-2BMDmfS.P V =:3EJ]/IٍQX ˆsY+DZVp fJ(|Wp*m]q+!(wJB kՔ$iTiH(ӇZHVBv&هKEd(S^0]/*; {0{$-|A a+uX!i6YHiUĺxDAf3y5v82Uv'Wu,m?}h7VRTRj H&/Yk8p|jpY 9_+7N:ʾ(MPBrJ(?*_>c<Kj͔EPX6nʢj5^ [:K }CKrM a#E,_XII2zgKf`A0W2$McB6ӥqoozi kI)Iu*~̻%4Ǿ:R> eD~C[s?F%'L9D'3=޴ ̠1:bv_ Փmhqs.$E.rxw fg<260eg9@U@M{c۔m5is (hIeEWbrQ QؚMwaUKPQw{IYQPM5fy!qo%!OC(s]Ehqf!-*e;ѸPk -|y.3>NӿVc*GPk46+k>oxk5rhyi0^¹VN~A-㕺X+ZL׈/]%Q4{ф_IUq[/6Wupqe"lw-mT[/-4iBD!"X\0] t`Y o C$z2nE*ݷv)>?*h +mE4@MLT/B^,~ 0|^VւƨguH_2xw=%Q$ gQ|g )B O(\` s,/6sP- nic[5Wbr0/778-&=}k4}1DJ\JfljGeyigƹopH7ܓAE mĉ{R03j/BD Aف窍)Ni2=Bv+ OIɛ@c]k(4&{SW&.Մ]~wǎAWCr/ sWDSU۾%saZih!yQsʰљ u QʿWÞ[`)zUm~#X x+Na^qrx)S&ZM,p 7]ؖpC} $Hs=Opv.>I琭o|<1z (^ݑo9)Rݨ} Y@i!!;| ,7P6DzAUqEXvBZHnxc9k`@'M"k PU%)K:@n!^rlV8߁8Fe:r/W ؋?i*7U: /vIF2 >Xq#2}iR.krH-t/1\ҴH%BN<=/~hǝ.tn\3{fY]M1)AX@ܲI\|ۍs\l_ ^5)2؆KZ_pPˑ|jAZ{HOu_dp 8DUGf:ћ3Ww!pVjjhSR'6ryp(i=x7\a$ a]IXqQ6`H+D}R eT:nnZ)"B%4+S{0L\c:fA a4$9G;+h1N K,hؓ Գo|qGBlr.,?Ee)ȬW Qɋ4%4 #jߜ9Cx==sB,T;4lN stٳ+FXJO|1Ox@+]J5[8r Hko#) Fxj>M Cw-lJuiF|~Did43zP@nB l0n,V0Qe+Sx3`֯ Ox}Mf^aT`ꈹ݅ΑpLCawdCI2 Gߥxo8ZY՗FkϹi9}Cew ܞKVt_~ԱdH'/),lMs";5)F Qz!6Q=z^%szؔ 9|{%l]@jz5Pg,Q'߻f(KkC (P Q~cI$tyזtJ2= 8 NҼaQttTr-p-80TѪT5uZRPe%'̞ۇOSN4KyD/$-j@yziǯ_Hu6UapֿGniҪO4,k]CE }G~Ԛ˅ĝ3=#děI`J>mgm)XVH%w!oT+vFW]Paޕ[v|4L,HZr})#m_SHA,zX?-{hy֫9):5dExm,lT<f,KmU׶rcRĴ1ۄP2MacdG2\Z]cL |_RA#VO=޷)X* ZD4pXh ; )j siHvAh.][dV*9h /=:CቤzZNJ2bWuʫ:|q@1f$_iUN-T&{"rhI0䠺!c[FM_Hu|ԛƕȂ)Ær_#E< tSb`B=B*{۔&ܬB PhUάQ^[d!AUVDxcXpO,vUpSC>ݶQrYUƜڌ;!nv[g%{7#A`tZ%A"4B<AYcC@l!5hUbv*+mqvEi1Y'Z"t9c rQx^UQN uڸJ(Ҧ z̈7_EO~EacHo-LnT5Jl9vmDPI>kwV#^:<Ϩ! e>)8-]DS2"0_2[`6l!:!s2 a2Sne[S@uPa#jė^9wĺ8*UoS &%!u7B[rx{3>UU E0A{hJb)t̍J|훰z4jb w QmĵqC^W<i9)*e8:/OF.`HoF]qg0ڎQ1lī?Ғ!p_5&Uc#QspƏ lG >qQqgi؀;\d~Z碈^Xa\Iϕ$}֤x~c\`əBqEb/ϐ<7%3:nZDnn\!ud,e`Naz)NlՏP*ʿo 7*AjNrnq.Ӽ$_j!!{ DŊl Ea$mD|%1^" G JtCl9[H$ؐe5t!sZ8W++Ы`^<Jg'/+@hZwfH"ڞ]PX.CF:o}-W#H_U|MW}awfzEׂ6_S `F:Q%C,_sXZ]^y&W䖀 eL EFkÁ1ڵ/5˱!30Qq2S0u&BL1qWȏ&kT:r_N4Btkc bie׮]߁SY[%͑j0:䴄]-Y/ LnR+z%@*qmW9, H3<#`]Oz`Qv(eɏ}q]VSԢVWVL m;=@O5b4[X)^N(>H0p&,zBW.k~GNNO%'9ƒX.)-2f̈́{Ue.Xb r&I"fp1) 3M8LF uYw=>f1X&֧ȚmW Y<3?%:u,-DǝQ ?% CY( 3J2o}lm\U}Wuzb喳dOX/S`f11tw:~*%m J!`5tF=NYkWB3|b |߬{g*@`PC@QҀA7q<M N;mMk+ Gb];e`5Xy}ŗx_ A*Ai.Qi316HysE0\O#<ɔi;m G ֎nըPc'hYN.N7OKy#Ȫ,QiLö u0'Ns0uD'X$u~K޳adG}}\/c-b _R []gFT34/|c '])Fو_[y;pC*u2?H[[j P a'm>?+~DU :7RDa"3d?AӽDVZV.1tnMݷ}"x2۞Ri/;3?U$qP\5x wњ {9k*Cq]s[.7$n~l=MEk$cC6_R pAS`Gӿ)&$Yoo>R6CK9_5, *˃.ǘW6M Gx"nE8UgO?&WdW/E}+Z6X3L Xƈ`swp_4`od Myr(%M~/a, mcp2H֟Gqvgw&G-~3At*4 ;`s/q!3)UG34QhJHwb=E}axh`/ ?'=+M;! ,-m#${,uy}"`;7O:uVAoͥxp{͵d@]LuBv8nNN;Q$4-Lk{FF|>`VsrP}.?*cV1["z5\Ms,0zt,5Q 6sWDv}auCq''T Ya*eey]+'H*r`FtN7Fix4 HV,Ph@C%),Ҳ\9FS!6T᝱nt>(өq8>Ѭ'}_,N4k*9S>Hb9u NjC40%Q?ܘ] ,^`PbH|OOZlF[)xt~X+Ƣ U6RY.dnw|70}B.ڑJD8vZ CW42Nb!;(()hd@iŘ[Vqf`i;`1*B8upДЏ1 a"$.~S(g!^$ʉOy ];(ZP")^k /Iʋ.g9[Beq҆ͼ3 ` rj/<4`jbQ7ݏ&$ƚibCb1u98?"]#;zsܒ $ěVJFd;amssv =KF@NHxmY! xv[>-s(uĒZ;3.A` GC-^|ogV5FԯܡBhœ"Pj7DwB?wK !:Q1)I0o:B~} Et(OH*@<K h =U%+&ΨBy#t>;$)`a<aOCoR :])/E#OjvHe(I4ڟ 3~Ps (vIY`e\׏0w>lJ9鹒v<Уһڕ#fTELT2Fz!B+$ /*lҀϜս"~aC6joT,~iU`iNybRdS:Z;^2l^C/r*HcZ\+dGr3KG%0O70Z8NL(])]l.1~ {ha)hJˇ1rJ0jqAcb 9ɣ&FWP S~"IT,@'vL4Tǧ.BO0b0ۗ*?,]Z^^-6D2rfJ[, $7 "/p".o.q^ZEſC3+<Q&YJM^ڛH4u͌<MȸWjETM`p<( g3O9s?m+dTy?FJC$jyY%:_׬}S Nӵ|RqYs~!D<5uM3P݆aJz`cpmJT76_$ yŦ?M.i8Oxz;f60C嶕\?-ich¬۟ؗ'!7VBWsm &Щ# Y?V|{tz%q[]%ĕinii) ?^%oHfqcLO/9+Ś0-;Z1%'N 8ʦb#54-r.rט~`tem$rDٯNq4qXA`)=Qe"vfPxᵬ_` }BQe\A;/!4G+,~'a$>-,%MhlN);9@.gLP~Ve`p7P[zTi$}\IY{8?o9½\qr8OJiLA5hN@Ȳ2ڪWes»k ڗ\o0xKg.¡*GpZWBlW:>HIX%GwP&%=c%;uLjkpLPV0@qiOV|F=,e 34\ {2oW@%l1ף .$TYb `kL"52⓳F|@z=QՖZ_LryjβSWd7YA Ŝ>IPZ (oI4{2<0E6ب$\wn]~A:<7j5TIbvNqyμ1tF/rAc|{a{OVbu=\Ot U͙sBOAY}Y4)LutGIr*(1% TEeLKd_PȻs皬KWn8HO#.+.}Ϥl'9@1k7RVާ݊V2IJKJHkcj*tq@Jean_s,O~^9g͜꣕-l"[eJls*Iy1xȰa3%H Q6 P<`-]3D93jVM~yjq;h`QhUaߎ3&Avz$3Cz߷wk:k)|jq('(,N G͈F9̌"Y׭d-O/4r$ P-icڃQJaA ߳ 6#~4͖HEϟWV1/({1bb0S ?ӟ_}$HG6F^Ak\JHDZNb!a+P*[ ^_%aЬ!Ǘa Ofi(Jb7 H[3uj : \,4"c6TĶdWP(B܄[w`'v~De#oQNA9O EI^=5U @ɢbiEwAN2f&=Js,rW4`b jM/'J|(%``OhIjv斠5q_b)$-9F+-^ql+u"LMVD(XEø%zXDtZ$zq_ҪY ! h 7uW[A]6(c#BL_TQ7.|% c.2`~fԂyڵdʐBv'h"Ŀ.X{ :՞;>d %R9@f gH(v&`yUm:ZxMC e)-nMZb*"(˄20GK;C)W<$ 0t IR1@qǶ[E8>(;:A-ԫ?LXE7 `N*ddݕF_ ж?;dKJ(K[, PfۄUKɄKb>e5oiU`?m T}UQcpN5j/Уr.'&&. ;Y(Fw= 1.k( !³#"|+[bpi\5gL7l~3Z's'vK<3֟1P2TG2!*N\CsEU$wWajﬓ6Ibo2/sHe>3`Cͬ]<NpO α̸dIHٴ`[t惰zer)rK le̦ @+ó8Ѿnic?B!F2:8A8%Q:jA33]{ܵ5^|(*%0QtJ o6 C&tFU+h!-4旵3Oݾڗ;bOp9|^yŐ=ƺ`V)K'jEHh*|y Yh F :yG̛pЌwNx,. ²-ȸyLhl@D(#;j:,x sE*} \ey؍<]dl&.+Ξk'\80ޕP 2J-ɥf?~_0 ؀sKҔV殑IM3qS"cBzMZS=Rs& Z[t{oDfR y$[ 4ra:pUuQ2D)K %ᐥڠ13ͩuŠ/h%^`JkfCtZint*#5R -)8xzc(!sYLw OwVyE,y t+f#6 =$ r*D`..=N=ԫؓ`tf0ǻx%iM6nWҷMik:y4~Я`-4!CGU7m]6mF/{^ǎUiA}v!4lH0]nYIl{^g6~u7#6ˌ;}dѾ%˩zѱC}臏G.03HA8ɋqʊƏ5%P"(qo# bMCY=p]f+ ^%QM\ .5 gCCϑ-3tIdˆvڕ]j/F&tqыZ)jIkWi/yϖW!'I3jɶ90v3hS%}|5ȅyȓhqׄ<|WN&* ;-8]"o`N̨rNj y^6_k]iPӛґ@IvYȫ$o?y^k UTIp +ecV'nVU`.0"I!\ÁXib}n㪒Dln+yg즘_aK)(W_p:2Yn.Q-yh2i ={DN~"Ο|b{5A ¶Z : ^̙ %=E`ǾT9bDN[H~BNOj]=YPypMN\Pћ% Jnt 0ªvF GuyH0^;=4 XK4iݗ>@\KT~fN`]ƍS/Ws-'·#O5r>ydqݍ:g}ĺn͸0-y=34>}J-^M~Bdc`Ns瓏KC4)ь1N‰Cxczg52 #[:dN1;vK1„>L_z~k}xIl1xuWLe6|||8`ث¼X$tUpoa:~LL--6FݺY(YARJm ȧPAOl> }gg5L4ZyiFLjvDs,ЏY)^֥."{B~ctڡI+oު{(۩F)Z*A<dcU.ڨԸ<ʈ16);9\1Fa(hnR+Ξ{ȣ kf\ġOW=,y+ 8U}jy1pB\(bvT q8g9\VT 3ά-DϿ1ilmy1`]mgeեj)4FEt- U!BktnI='`=6_L{4YV@gaGhڎiׄrn /xl/1j`4B:*N-P s)%L#H`8 YЍ0DfɦָQ׌b vk9G3$B,\n? wy~l0@+EH_{$|q̺̋ \4Q[.jItR>X_Ê>}p0 ZC%-0F$u܆rq{򭮤|^tvuznIx}vC+,(ٳD,?g306o֚U^h{UH OK R#+Y]- wS1Hg︠ J\0 <˜  KI1kQQhIηqqXvGdNz  @ӊSyں>"\sS &vciJ,Qܮ"煃9|ٓi*d&+U`3T:IDoQzt .hEJ4_rJjn"3oCe:7|$cpw 66SݷifC.=LgzFӲ~Dg1w ,3+'oMB;(gRġa豐,DL'?^J6%䄛AZ|-l@5$Ϭt}LBZyH4keOkd9&aν\o|(P9YώEO@.IORK?u~URJƕ= ҈N3DO9؟wF{(zsPwU3TQY~OaCg4ͤjJ颻$n,w.Ƈ-a%Tktsӱ_+o{h&J؞tUw+,Zr-5MyrbRI C%O9TaT=Kˇ>\-M=(m@n*k\>r^LY-pZ|g!H 8L%"X0;;>7K1@e?IH w1Xi^Sv$-N=Z}||3TdhQKIk̀ }r3)8҅KxfWA6( ™'^ B3LN@ŏebsQZ)+_tH);jrJ X?!% $R9b {>_1N5hچ&V&Qdͼg +CMr*--N)?5l88~n9)5ԟWUGǒ^b;JT"S0HW$Yddȧ*Jz# EzEE sG)tOA Ԑ‹2^^>Х=dm),|N6(COR1PL-ґ4NA%9!]:3Zt j3ނIQr/OJUk9&#oސفrtRӱdM7ƙg Ⓦ[= j:^ x6p0(Y*z1Yƌ Bqrhcj$W`òΖ_Cep_ .i@QnH.?k᯦_f$P6icԧ7LJu#gȶ5Vhـ5m0y?ۄUt}k^J8j$;hQO|nm{R/D{oEAo*JљgB]绚{,_L_T7&Rp]s2ŷ$Sڣ5(a7uʝua7΢M\ËWɼ{?\3xMO0d3"z}(&Z?zGB7wM \Տdctc^8_nz4)B)BֱzĠ fͬErf$~*Q# N.)E df\!Eݴ-1>${/Iߑ)^ѹ>){f=AD gG, 4'0S-ہa7}֨\eYlͩJU p$}i06PFpO2H)$V>T$4*)x`in{8Xmm#aQ1B%nktp!eM4¬bm7j|cElN^_N:=\xYnLN^TKb4. ߲槲o3["3n668Φ3k#t[14:ɲF02gTG qst 3Xm}ގ_cW=_L[l@Me|-Pn1 g jW1bM!P$L 2.䨠{SD~̙%JY5Un͢V+?\LۑŁ:uVܶʣiG 3 .^,1>zżLؓzm\NldkATp< lj_%qcb'`2a}~U Ui 2-ޣW)c 1Vs!grM35|7𲹽 : uz OZ de0nɪOc)U*u|]Y*,f-Yb3ЄnVjUk_!]GK,X:FOM,CbE)hkwύ5trӗD#"!ug&gqI 7_X~ _ɟrOd~yZ1UXRX]. Y/NXDhsoNHծR <:v%ohJJiqЏxG!U?,Kfa\!VMUb w 0&KH_#dG|J "p쏹,¤n5A`'U珦#$Wny(BS$)Q 3~E)2nI\qCnQ{4li8L 4Yi*p4s Kzf>$y0<*e,jjiN4U9BCxZzm;eW Sar4p㺟$k FfIFDq.;O"CA#ͱiU >uqLn-uME7ηcE ] ZJ"FJْ[|=,J58ê7 Gpv8mi~XQn.Q <(.Ɉm\/g2t /c_؟F]JiQxO?T%x; 5sz\Abrz9[ CK/ C7S.Hpر\wz}E531@kWazs |5ƃC3`cܶvbC&ncsDb:%GJpn:b=(z3\yT C5V;NIqj3DxoqE$T]5nJzEujbd=a!gz1ZKGߪW;ᰐ>3YQ\j=wjyS9<lV@nMLZ)Ps!3mcCQ(5^B@X=#Y@p/BK(/z>``V1źMaE+j*@MQJN2%- 3AMt%A4l]`a0|@5e+rm^es˶X(B^?oweQ!58ϝt9f>@QmUPi%Dq)_d)zG393uhe&Ӡ*6ːJ紐~Rˍ hҿ?g誡qB<̰Ν D<hVފ3f3I7SС*4P3 ‖¯Y{$N؊sa*Z1TΗZt{ayl2] i=-J Raji½-Щ %ͪ DW2toE~j\ JT2'2c0%XΠJ u>0 YZsurveillance/R/0000755000176200001440000000000014027110155013143 5ustar liggesuserssurveillance/R/stK.R0000644000176200001440000001535712532032517014046 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Space-time K-function analysis of "epidataCS" objects ### along the lines of Diggle et al (1995): ### "Second-order analysis of space-time clustering" (Stat Methods Med Res) ### ### Copyright (C) 2015 Sebastian Meyer ### $Revision: 1347 $ ### $Date: 2015-05-29 11:45:51 +0200 (Fri, 29. May 2015) $ ################################################################################ ## call K-function methods in package "splancs" stKcall <- function (which = c("stkhat", "stsecal", "stmctest"), object, eps.s, eps.t, ...) { stopifnot(inherits(object, "epidataCS")) ## get the function which <- match.arg(which) FUN <- get(which, mode = "function", envir = getNamespace("splancs")) ## default arguments commonArgs <- list( pts = coordinates(object$events), times = object$events$time, poly = NULL, tlimits = summary(object)$timeRange, s = eps.s, tm = eps.t ) args <- modifyList(commonArgs, list(...)) if (is.null(args$poly)) { # use coordinates of first polygon if (length(object$W) > 1L || length(object$W@polygons[[1]]@Polygons) > 1L) stop("package \"splancs\" does not support multi-'poly'gons") args$poly <- coordinates(object$W@polygons[[1L]]@Polygons[[1L]]) } if (which == "stmctest" && is.null(args[["nsim"]])) { args$nsim <- 199L } ## unfortunately, argument names are not consistent across functions if (which == "stsecal") names(args)[names(args) == "tlimits"] <- "tlim" if (which == "stmctest") names(args)[names(args) == "tm"] <- "tt" ## call the selected splancs function do.call(FUN, args) } ## Monte-Carlo test for space-time interaction stKtest <- function (object, eps.s = NULL, eps.t = NULL, B = 199, cores = 1, seed = NULL, poly = object$W) { stopifnot(inherits(object, "epidataCS"), isScalar(cores), cores > 0, isScalar(B), B > 0) cores <- as.integer(cores) B <- as.integer(B) ## naive default grids if (is.null(eps.s)) eps.s <- seq(0, min(object$events$eps.s, apply(bbox(object$W), 1, diff)/2), length.out = 10) if (is.null(eps.t)) eps.t <- seq(0, min(object$events$eps.t, tail(object$stgrid$stop,1L)/2), length.out = 10) ## extract coordinates of the polygon polycoordslist <- xylist(poly) if (length(polycoordslist) > 1L) { stop("package \"splancs\" does not support multi-'poly'gons") } Wcoords <- as.matrix(as.data.frame(polycoordslist[[1L]])) ## calculate K-function stK <- stKcall("stkhat", object = object, eps.s = eps.s, eps.t = eps.t, poly = Wcoords) ## calculate standard error seD <- stKcall("stsecal", object = object, eps.s = eps.s, eps.t = eps.t, poly = Wcoords) ## perform Monte Carlo permutation test (parallelized) permt <- plapply( X = diff(round(seq(from = 0, to = B, length.out = cores + 1L))), FUN = function (nsim) { stKcall("stmctest", object = object, eps.s = eps.s, eps.t = eps.t, poly = Wcoords, nsim = nsim, quiet = TRUE)[["t"]] }, .parallel = cores, .seed = seed, .verbose = FALSE ) mctest <- list( "t0" = sum(stK$kst - outer(stK$ks, stK$kt)), "t" = unlist(permt, recursive = FALSE, use.names = FALSE) ) PVAL <- mean(c(mctest[["t0"]], mctest[["t"]]) >= mctest[["t0"]]) ## return test results structure( list(method = "Diggle et al (1995) K-function test for space-time clustering", data.name = deparse(substitute(object)), statistic = setNames(mctest$t0, "U"), # sum of residuals parameter = setNames(B, "B"), p.value = PVAL, pts = coordinates(object$events), stK = stK, seD = seD, mctest = mctest), class = c("stKtest", "htest") ) } ## diagnostic plots related to space-time K-function analysis ## inspired by splancs::stdiagn authored by Barry Rowlingson and Peter Diggle plot.stKtest <- function (x, which = c("D", "R", "MC"), args.D = list(), args.D0 = args.D, args.R = list(), args.MC = list(), mfrow = sort(n2mfrow(length(which))), ...) { stkh <- x$stK stse <- x$seD stmc <- x$mctest if (identical(which, "stdiagn")) { splancs::stdiagn(pts = x$pts, stkh = stkh, stse = stse, stmc = stmc) return(invisible()) } which <- match.arg(which, several.ok = TRUE) stopifnot(is.list(args.D), is.list(args.D0), is.list(args.R), is.list(args.MC)) ## K_0(s,t) = K(s) * K(t) K0 <- outer(stkh$ks, stkh$kt) ## D(s,t) = K(s,t) - K_0(s,t) st.D <- stkh$kst - K0 if (!is.null(mfrow)) { omfrow <- par(mfrow = mfrow) on.exit(par(omfrow)) } ## D plots Dzero <- which[which %in% c("D", "D0")] == "D0" whichDzero <- match(Dzero, c(FALSE, TRUE)) omar <- par(mar = if (is.null(args.D[["mar"]])) c(2,2,par("mar")[3L],1) else args.D[["mar"]]) mapply( FUN = function (z, Dzero, args) { defaultArgs <- list( x = stkh$s, y = stkh$t, z = z, main = if (Dzero) "Excess risk" else "D plot", xlab = "Distance", ylab = "Time lag", zlab = "", ticktype = "detailed", shade = 0.5, col = "lavender", theta = -30, phi = 15, expand = 0.5 ) do.call("persp", modifyList(defaultArgs, args)) }, z = list(st.D, st.D/K0)[whichDzero], Dzero = Dzero, args = list(args.D, args.D0)[whichDzero], SIMPLIFY = FALSE, USE.NAMES = FALSE ) par(omar) ## Residual plot if ("R" %in% which) { st.R <- st.D/stse defaultArgs.R <- list( x = K0, y = st.R, panel.first = quote(abline(h = c(-2,0,2), lty = c(2,1,2))), xlab = "K(s)K(t)", ylab = "R", main = "Standardized residuals", ylim = range(0, st.R, finite = TRUE) ) do.call("plot.default", modifyList(defaultArgs.R, args.R)) } ## MC permutation test plot if ("MC" %in% which) { defaultArgs.MC <- list( permstats = stmc$t, xmarks = setNames(stmc$t0, "observed"), main = "MC permutation test" ) do.call("permtestplot", modifyList(defaultArgs.MC, args.MC)) } invisible() } surveillance/R/AllClass.R0000644000176200001440000001124613554101316014773 0ustar liggesusers# ------------- class sts ---------------------------------------- .sts <- setClass( "sts", slots = c( epoch = "numeric", # this slot was called "week" in surveillance < 1.3 freq = "numeric", start = "numeric", observed = "matrix", state = "matrix", alarm = "matrix", upperbound = "matrix", neighbourhood = "matrix", populationFrac = "matrix", map = "SpatialPolygons", control = "list", ## New slots added in version 1.1-2 to handle proportion time series: epochAsDate = "logical", multinomialTS = "logical" ), prototype = list( start = c(2000, 1), freq = 52, # historical defaults epochAsDate = FALSE, multinomialTS = FALSE ), validity = function (object) { dimObserved <- dim(object@observed) namesObserved <- colnames(object@observed) errors <- c( if (!isScalar(object@freq) || object@freq <= 0) "'freq' must be a single positive number", if (length(object@start) != 2) "'start' must be of length two: (year, week/month/idx)", if (!is.numeric(object@observed)) "'observed' must be a numeric matrix", ## check consistency of slot dimensions wrt dim(observed): if (length(object@epoch) != dimObserved[1L]) "'epoch' must be of length 'nrow(observed)'", if (!identical(dim(object@state), dimObserved)) "'state' must have the same dimensions as 'observed'", if (!identical(dim(object@alarm), dimObserved)) "'alarm' must have the same dimensions as 'observed'", if (!identical(dim(object@upperbound), dimObserved)) "'upperbound' must have the same dimensions as 'observed'", if (!identical(dim(object@neighbourhood), dimObserved[c(2L,2L)])) "'neighbourhood' must be a square matrix of size 'ncol(observed)'", if (!identical(dim(object@populationFrac), dimObserved)) "'populationFrac' must have the same dimensions as 'observed'", ## disallow NULL colnames in *multivariate* "sts" objects if (dimObserved[2L] > 1 && is.null(namesObserved)) "units must be named (set 'colnames(observed)')", ## FIXME: should we generally disallow NULL colnames? ## NOTE: aggregate(by="unit") previously (<= 1.15.0) had no colnames ## if a map is provided, it must cover all colnames(observed): if (length(object@map) > 0 && # i.e., not the empty prototype !all(namesObserved %in% row.names(object@map))) "'map' is incomplete; ensure that all(colnames(observed) %in% row.names(map))", ## check booleans if (length(object@epochAsDate) != 1 || is.na(object@epochAsDate)) "'epochAsDate' must be either TRUE or FALSE", ## FIXME: we should enforce epoch[1L] to correspond to start ## if (!object@epochAsDate && object@epoch[1L] != 1) ## "'epoch' must be an integer sequence starting at 1", if (length(object@multinomialTS) != 1 || is.na(object@multinomialTS)) "'multinomialTS' must be either TRUE or FALSE" ) ## detect mismatch in column names between different slots if (dimObserved[2L] > 1 && !is.null(namesObserved)) { slots_dn <- c("state", "alarm", "upperbound", "populationFrac", "neighbourhood") errors_dn <- lapply(slots_dn, function (name) { cn <- colnames(slot(object, name)) if (!is.null(cn) && !identical(cn, namesObserved)) paste0("'colnames(", name, ")' differ from 'colnames(observed)'") }) errors <- c(errors, unlist(errors_dn)) } if (length(errors) > 0) errors else TRUE } ) ###################################################################### # Definition of the stsBP class for backprojections. ###################################################################### setClass("stsBP", slots = list( ci = "array", lambda = "array" ), contains = "sts") ###################################################################### # Definition of the stsNC class for nowcasts. ###################################################################### setClass("stsNC", slots = list( reportingTriangle = "matrix", predPMF = "list", pi = "array", truth = "sts", delayCDF = "list", SR = "array" ), contains = "sts") surveillance/R/bodaDelay.R0000644000176200001440000006306714004512307015166 0ustar liggesusers# ____________________________ # |\_________________________/|\ # || || \ # || bodaDelay || \ # || || | # || || | # || || | # || || | # || || | # || || / # ||_________________________|| / # |/_________________________\|/ # __\_________________/__/|_ # |_______________________|/ ) # ________________________ (__ # /oooo oooo oooo oooo /| _ )_ # /ooooooooooooooooooooooo/ / (_)_(_) # /ooooooooooooooooooooooo/ / (o o) #/C=_____________________/_/ ==\o/== # Author: M.Salmon ################################################################################ # CONTENTS ################################################################################ # # MAIN FUNCTION # Function that manages input and output. # # FIT GLM FUNCTION # Function that fits a GLM. # # THRESHOLD FUNCTION # Function that calculates the threshold. # # DATA GLM FUNCTION # Function that prepares data for the GLM. # # FORMULA FUNCTION # Function that writes the formula for the GLM. ################################################################################ # END OF CONTENTS ################################################################################ ################################################################################ # MAIN FUNCTION ################################################################################ bodaDelay <- function(sts, control = list(range = NULL, b = 5, w = 3, mc.munu=100, mc.y=10, pastAberrations = TRUE, verbose = FALSE, alpha = 0.05, trend = TRUE, limit54=c(5,4), inferenceMethod=c("asym","INLA"), quantileMethod=c("MC","MM"), noPeriods = 1, pastWeeksNotIncluded = NULL, delay = FALSE)) { ###################################################################### # Use special Date class mechanism to find reference months/weeks/days ###################################################################### if (is.null( sts@epochAsDate)) { epochAsDate <- FALSE } else { epochAsDate <- sts@epochAsDate } ###################################################################### # Fetch observed and population ###################################################################### # Fetch observed observed <- observed(sts) freq <- sts@freq if (epochAsDate) { epochStr <- switch( as.character(freq), "12" = "month","52" = "week", "365" = "day") } else { epochStr <- "none" } # Fetch population (if it exists) if (!is.null(population(sts))) { population <- population(sts) } else { population <- rep(1,length(observed)) } ###################################################################### # Fix missing control options ###################################################################### if (is.null(control[["b",exact=TRUE]])) { control$b = 5 } if (is.null(control[["w", exact = TRUE]])) { control$w = 3 } if (is.null(control[["range", exact=TRUE]])) { control$range <- (freq*(control$b)+control$w +1):dim(observed)[1] } if (is.null(control[["pastAberrations",exact=TRUE]])) {control$pastAberrations=TRUE} if (is.null(control[["verbose",exact=TRUE]])) {control$verbose=FALSE} if (is.null(control[["alpha",exact=TRUE]])) {control$alpha=0.05} if (is.null(control[["trend",exact=TRUE]])) {control$trend=TRUE} # No alarm is sounded # if fewer than cases = 5 reports were received in the past period = 4 # weeks. limit54=c(cases,period) is a vector allowing the user to change # these numbers if (is.null(control[["limit54",exact=TRUE]])) {control$limit54=c(5,4)} if (is.null(control[["noPeriods",exact=TRUE]])){control$noPeriods=1} # Use factors in the model? Depends on noPeriods, no input from the user. if (control$noPeriods!=1) { control$factorsBool=TRUE } else { control$factorsBool=FALSE } # How many past weeks not to take into account? if (is.null(control[["pastWeeksNotIncluded",exact=TRUE]])){ control$pastWeeksNotIncluded=control$w } # Correct for delays? if (is.null(control[["delay",exact=TRUE]])) { control$delay = FALSE } # Reporting triangle here? if (control$delay) { if (is.null( sts@control$reportingTriangle$n)) {stop("You have to provide a reporting triangle in control of the sts-object")} if (!(length(apply(sts@control$reportingTriangle$n,1,sum,na.rm=TRUE))==length(sts@observed))) {stop("The reporting triangle number of lines is not the length of the observed slot.")} if (!(sum(apply(sts@control$reportingTriangle$n,1,sum,na.rm=TRUE)==sts@observed)==length(sts@observed))) {stop("The reporting triangle is wrong: not all cases are in the reporting triangle.")} } # setting for monte carlo integration if(is.null(control[["mc.munu",exact=TRUE]])){ control$mc.munu <- 100 } if(is.null(control[["mc.y",exact=TRUE]])){ control$mc.y <- 10 } ###################################################################### # Check options ###################################################################### if (!((control$limit54[1] >= 0) && (control$limit54[2] > 0))) { stop("The limit54 arguments are out of bounds: cases >= 0 and period > 0.") } # inference method if(is.null(control[["inferenceMethod",exact=TRUE]])){ control$inferenceMethod <- "asym" } else { control$inferenceMethod <- match.arg(control$inferenceMethod, c("asym","INLA")) } if(is.null(control[["quantileMethod",exact=TRUE]])){ control$quantileMethod <- "MC" } else { control$quantileMethod <- match.arg(control$quantileMethod, c("MC","MM")) } #Check if the INLA package is available. if (control$inferenceMethod=="INLA"){ if (!requireNamespace("INLA", quietly = TRUE)) { stop("The bodaDelay function requires the INLA package to be installed.\n", " The package is not available on CRAN, but can be easily obtained\n", " from .\n", " Alternatively, set inferenceMethod to \"asym\".") } } # Define objects n <- control$b*(2*control$w+1) # loop over columns of sts #Vector of dates if (epochAsDate){ vectorOfDates <- as.Date(sts@epoch, origin="1970-01-01") } else { vectorOfDates <- seq_len(length(observed)) } # Parameters b <- control$b w <- control$w noPeriods <- control$noPeriods verbose <- control$verbose reportingTriangle <- sts@control$reportingTriangle timeTrend <- control$trend alpha <- control$alpha factorsBool <- control$factorsBool pastAberrations <- control$pastAberrations glmWarnings <- control$glmWarnings delay <- control$delay k <- control$k verbose <- control$verbose pastWeeksNotIncluded <- control$pastWeeksNotIncluded mc.munu <- control$mc.munu mc.y <- control$mc.y # Loop over control$range for (k in control$range) { ###################################################################### # Prepare data for the glm ###################################################################### dayToConsider <- vectorOfDates[k] diffDates <- diff(vectorOfDates) delay <- control$delay dataGLM <- bodaDelay.data.glm(dayToConsider=dayToConsider, b=b, freq=freq, epochAsDate=epochAsDate, epochStr=epochStr, vectorOfDates=vectorOfDates,w=w, noPeriods=noPeriods, observed=observed,population=population, verbose=verbose, pastWeeksNotIncluded=pastWeeksNotIncluded, reportingTriangle=reportingTriangle, delay=delay) ###################################################################### # Fit the model ###################################################################### argumentsGLM <- list(dataGLM=dataGLM,reportingTriangle=reportingTriangle, timeTrend=timeTrend,alpha=alpha, factorsBool=factorsBool,pastAberrations=pastAberrations, glmWarnings=glmWarnings, verbose=verbose,delay=delay, inferenceMethod=control$inferenceMethod) model <- do.call(bodaDelay.fitGLM, args=argumentsGLM) if(is.null(model)){ sts@upperbound[k] <- NA sts@alarm[k] <- NA } else{ ###################################################################### # Calculate the threshold ###################################################################### quantileMethod <- control$quantileMethod argumentsThreshold <- list(model,alpha=alpha,dataGLM=dataGLM,reportingTriangle, delay=delay,k=k,control=control,mc.munu=mc.munu,mc.y=mc.y, inferenceMethod=control$inferenceMethod, quantileMethod=quantileMethod) threshold <- do.call(bodaDelay.threshold,argumentsThreshold) ###################################################################### # Output results if enough cases ###################################################################### sts@upperbound[k] <- threshold enoughCases <- (sum(observed[(k-control$limit54[2]+1):k]) >=control$limit54[1]) sts@alarm[k] <- FALSE if (is.na(threshold)){sts@alarm[k] <- NA} else { if (sts@observed[k]>sts@upperbound[k]) {sts@alarm[k] <- TRUE} } if(!enoughCases){ sts@upperbound[k] <- NA sts@alarm[k] <- NA } } } #done looping over all time points return(sts[control$range,]) } ################################################################################ # END OF MAIN FUNCTION ################################################################################ ################################################################################ # FIT GLM FUNCTION ################################################################################ bodaDelay.fitGLM <- function(dataGLM,reportingTriangle,alpha, timeTrend,factorsBool,delay,pastAberrations, glmWarnings,verbose,inferenceMethod,...) { # Model formula depends on whether to include a time trend or not. theModel <- formulaGLMDelay(timeBool=timeTrend,factorsBool,delay,outbreak=FALSE) if(inferenceMethod=="INLA"){ E <- max(0,mean(dataGLM$response, na.rm=TRUE)) link=1 model <- INLA::inla(as.formula(theModel),data=dataGLM, family='nbinomial',E=E, control.predictor=list(compute=TRUE,link=link), control.compute=list(cpo=TRUE,config=TRUE), control.inla = list(int.strategy = "grid",dz=1,diff.logdens = 10), control.family = list(hyper = list(theta = list(prior = "normal", param = c(0, 0.001))))) if (pastAberrations){ # if we have failures => recompute those manually #if (sum(model$cpo$failure,na.rm=TRUE)!=0){ # model <- inla.cpo(model) #} # Calculate the mid p-value vpit <- model$cpo$pit vcpo <- model$cpo$cpo midpvalue <- vpit - 0.5*vcpo # Detect the point with a high mid p-value # outbreakOrNot <- midpvalue #outbreakOrNot[midpvalue <= (1-alpha)] <- 0 outbreakOrNot <- ifelse(midpvalue > (1-alpha), 1, 0) outbreakOrNot[is.na(outbreakOrNot)] <- 0# FALSE outbreakOrNot[is.na(dataGLM$response)] <- 0#FALSE # Only recompute the model if it will bring something! if (sum(outbreakOrNot)>0){ dataGLM <- cbind(dataGLM,outbreakOrNot) theModel <- formulaGLMDelay(timeBool=timeTrend,factorsBool,delay,outbreak=TRUE) model <- INLA::inla(as.formula(theModel),data=dataGLM, family='nbinomial',E=E, control.predictor=list(compute=TRUE,link=link), control.compute=list(cpo=FALSE,config=TRUE), control.inla = list(int.strategy = "grid",dz=1,diff.logdens = 10), control.family = list(hyper = list(theta = list(prior = "normal", param = c(0, 0.001))))) # if we have failures => recompute those manually # if (sum(model$cpo$failure,na.rm=TRUE)!=0){model <- inla.cpo(model)} vpit <- model$cpo$pit vcpo <- model$cpo$cpo midpvalue <- vpit - 0.5*vcpo } } } if (inferenceMethod=="asym"){ model <- MASS::glm.nb(as.formula(theModel),data=dataGLM) if(!model$converged){ return(NULL) } } return(model) } ################################################################################ # END OF FIT GLM FUNCTION ################################################################################ ################################################################################ # THRESHOLD FUNCTION ################################################################################ bodaDelay.threshold <- function(model, mc.munu,mc.y,alpha, delay,k,control,dataGLM,reportingTriangle, inferenceMethod,quantileMethod...) { quantileMethod <- control$quantileMethod if (inferenceMethod=="INLA"){ E <- max(0,mean(dataGLM$response, na.rm=TRUE)) # Sample from the posterior jointSample <- INLA::inla.posterior.sample(mc.munu,model, intern = TRUE) # take variation in size hyperprior into account by also sampling from it theta <- t(sapply(jointSample, function(x) x$hyperpar)) if (delay){ mu_Tt <- numeric(mc.munu) N_Tt <- numeric(mc.munu*mc.y) # Maximal delay + 1 Dmax0 <- ncol(as.matrix(reportingTriangle$n)) # The sum has to be up to min(D,T-t). This is how we find the right indices. loopLimit <- min(Dmax0,which(is.na(as.matrix(reportingTriangle$n)[k,]))-1,na.rm=TRUE) # Find the mu_td and sum for (d in 1:loopLimit) { if(sum(dataGLM$response[dataGLM$delay==d],na.rm=TRUE)!=0){ mu_Tt <- mu_Tt + exp(t(sapply(jointSample, function(x) x$latent[[nrow(dataGLM)-Dmax0+d]]))) } } # with no delay this is similar to boda. } else { mu_Tt <- exp(t(sapply(jointSample, function(x) x$latent[[nrow(dataGLM)]]))) } } if (inferenceMethod=="asym"){ E <- 1 # Sample from the posterior set.seed(1) # take variation in size hyperprior into account by also sampling from it theta <- rnorm(n=mc.munu,mean=summary(model)$theta,sd=summary(model)$SE.theta) if (delay){ # Maximal delay + 1 Dmax0 <- ncol(as.matrix(reportingTriangle$n)) mu_Tt <- numeric(mc.munu) newData <- tail(dataGLM,n=Dmax0) P=predict(model,type="link",se.fit=TRUE, newdata=newData) # The sum has to be up to min(D,T-t). This is how we find the right indices. loopLimit <- min(Dmax0,which(is.na(as.matrix(reportingTriangle$n)[k,]))-1,na.rm=TRUE) # Find the mu_td and sum for (d in 1:loopLimit) { if(sum(dataGLM$response[dataGLM$delay==d],na.rm=TRUE)!=0){ mu_Tt <- mu_Tt + exp(rnorm(n=mc.munu,mean=P$fit[d],sd=P$se.fit[d])) } } # with no delay this is similar to boda. } else { newData <- tail(dataGLM,n=1) P=try(predict(model,type="link",se.fit=TRUE, newdata=newData),silent=TRUE) if (inherits(P, "try-error")){P<- NA return(NA)} set.seed(1) mu_Tt <- exp(rnorm(n=mc.munu,mean=P$fit,sd=P$se.fit)) } } # can only use positive theta (mu_Tt is positive anyway) mu_Tt <- mu_Tt[theta>0] theta <- theta[theta>0] if(quantileMethod=="MC"){ N_Tt <- rnbinom(n=mc.y*mc.munu,size=theta,mu=E*mu_Tt) qi <- quantile(N_Tt, probs=(1-alpha), type=3, na.rm=TRUE) } if(quantileMethod=="MM"){ minBracket <- qnbinom(p=(1-alpha), mu=E*min(mu_Tt), size=max(theta)) maxBracket <- qnbinom(p=(1-alpha), mu=E*max(mu_Tt), size=min(theta)) qi <- qmix(p=(1-alpha), mu=E*mu_Tt, size=theta, bracket=c(minBracket, maxBracket)) } return(as.numeric(qi)) } ################################################################################ # END OF THRESHOLD GLM FUNCTION ################################################################################ ################################################################################ # DATA GLM FUNCTION ################################################################################ bodaDelay.data.glm <- function(dayToConsider, b, freq, epochAsDate,epochStr, vectorOfDates,w,noPeriods, observed,population, verbose,pastWeeksNotIncluded,reportingTriangle,delay){ # Identify reference time points # Same date but with one year, two year, etc, lag # b+1 because we need to have the current week in the vector referenceTimePoints <- algo.farrington.referencetimepoints(dayToConsider,b=b, freq=freq, epochAsDate=epochAsDate, epochStr=epochStr ) if (sum((vectorOfDates %in% min(referenceTimePoints)) == rep(FALSE,length(vectorOfDates))) == length(vectorOfDates)){ warning("Some reference values did not exist (index<1).") } # Create the blocks for the noPeriods between windows (including windows) # If noPeriods=1 this is a way of identifying windows, actually. blocks <- blocks(referenceTimePoints,vectorOfDates,epochStr,dayToConsider, b,w,noPeriods,epochAsDate) # Here add option for not taking the X past weeks into account # to avoid adaptation of the model to emerging outbreaks blocksID <- blocks # Extract values for the timepoints of interest only blockIndexes <- which(is.na(blocksID)==FALSE) # Time # if epochAsDate make sure wtime has a 1 increment if (epochAsDate){ wtime <- (as.numeric(vectorOfDates[blockIndexes])- as.numeric(vectorOfDates[blockIndexes][1]))/as.numeric(diff(vectorOfDates))[1] } else { wtime <- as.numeric(vectorOfDates[blockIndexes]) } # Factors seasgroups <- as.factor(blocks[blockIndexes]) # Observed response <- as.numeric(observed[blockIndexes]) response[length(response)] <- NA # Population pop <- population[blockIndexes] if (verbose) { print(response)} # If the delays are not to be taken into account it is like farringtonFlexible if (!delay) { dataGLM <- data.frame(response=response,wtime=wtime,population=pop, seasgroups=seasgroups,vectorOfDates=vectorOfDates[blockIndexes]) dataGLM$response[(nrow(dataGLM)-pastWeeksNotIncluded):nrow(dataGLM)] <- NA } # If the delays are to be taken into account we need a bigger dataframe else { # Delays delays <- as.factor(0:(dim(reportingTriangle$n)[2]-1)) # Take the subset of the reporting triangle corresponding to the timepoints used for fitting the model reportingTriangleGLM <- reportingTriangle$n[rownames(reportingTriangle$n) %in% as.character(vectorOfDates[blockIndexes]),] # All vectors of data will be this long: each entry will correspond to one t and one d lengthGLM <- dim(reportingTriangleGLM)[2]*dim(reportingTriangleGLM)[1] # Create the vectors for storing data responseGLM <- numeric(lengthGLM) wtimeGLM <- numeric(lengthGLM) seasgroupsGLM <- numeric(lengthGLM) popGLM <- numeric(lengthGLM) vectorOfDatesGLM <- numeric(lengthGLM) delaysGLM <- numeric(lengthGLM) # Fill them D by D D <- dim(reportingTriangleGLM)[2] for (i in (1:dim(reportingTriangleGLM)[1])){ vectorOfDatesGLM[((i-1)*D+1):(i*D)] <- rep(vectorOfDates[blockIndexes][i],D) wtimeGLM[((i-1)*D+1):(i*D)] <- rep(wtime[i],D) popGLM[((i-1)*D+1):(i*D)] <- rep(pop[i],D) seasgroupsGLM[((i-1)*D+1):(i*D)] <- rep(seasgroups[i],D) responseGLM[((i-1)*D+1):(i*D)] <- reportingTriangleGLM[i,] delaysGLM[((i-1)*D+1):(i*D)] <- 0:(D-1) } responseGLM[((i-1)*D+1):(i*D)] <- rep (NA, D) responseGLM[(length(responseGLM)-pastWeeksNotIncluded*D):length(responseGLM)] <- NA dataGLM <- data.frame(response=responseGLM,wtime=wtimeGLM,population=popGLM, seasgroups=as.factor(seasgroupsGLM),vectorOfDates=as.Date(vectorOfDatesGLM,origin="1970-01-01"),delay=delaysGLM) } return(as.data.frame(dataGLM)) } ################################################################################ # END OF DATA GLM FUNCTION ################################################################################ ################################################################################ # FORMULA FUNCTION ################################################################################ # Function for writing the good formula depending on timeTrend, # and factorsBool formulaGLMDelay <- function(timeBool=TRUE,factorsBool=FALSE,delay=FALSE,outbreak=FALSE){ # Description # Args: # populationOffset: --- # Returns: # Vector of X # Smallest formula formulaString <- "response ~ 1" # With time trend? if (timeBool){ formulaString <- paste(formulaString,"+wtime",sep ="")} # With factors? if(factorsBool){ formulaString <- paste(formulaString,"+as.factor(seasgroups)",sep ="")} # # With delays? if(delay){ formulaString <- paste(formulaString,"+as.factor(delay)",sep ="")} if(outbreak){ formulaString <- paste(formulaString,"+f(outbreakOrNot,model='linear', prec.linear = 1)",sep ="")} # Return formula as a string return(formulaString) } ################################################################################ # END OF FORMULA FUNCTION ################################################################################ ###################################################################### # CDF of the negbin mixture with different means and sizes ###################################################################### pmix <- function(y, mu, size) { PN <- pnbinom(y, mu=mu, size=size) lala <- 1/sum(!is.na(PN))*sum(PN, na.rm=TRUE) return(lala) } ###################################################################### # END OF CDF of the negbin mixture with different means and sizes ###################################################################### ###################################################################### # Find the root(s) of a 1D function using the bisection method # # Params: # f - the function to minimize or the first derivate of the function to optim # reltol - relative tolerance epsilon ###################################################################### bisection <- function(f, bracket) { ##Boolean for convergence convergence <- FALSE ##Loop until converged while (!convergence) { #Half the interval (problem with ints: what uneven number?) x <- ceiling(mean(bracket)) ##Direct hit? -> stop if (isTRUE(all.equal(f(x),0))) break ##Choose the interval, containing the root bracket <- if (f(bracket[1])*f(x) <= 0) c(bracket[1],x) else c(x,bracket[2]) ##Have we obtained convergence? convergence <- (bracket[1]+1) == bracket[2] } #Return the value of x^{n+1} return(ceiling(mean(bracket))) } ###################################################################### # END OF BISECTION FUNCTION ###################################################################### ###################################################################### ##Find the p-quantile of the mixture distribution using bisectioning ## ## Parameters: ## p - the q_p quantile is found ## mu - mean vector ## size - size param ## bracket - vector length two, s.t. qmix(bracket[1] < 1-alpha and ## qmix(bracket[2]) > 1-alpha. Exception: if bracket[1]=0 ## then qmix(bracket[1] > 1-alpha is ok. ###################################################################### qmix <- function(p, mu, size, bracket=c(0,mu*100)) { target <- function(y) { pmix(y=y,mu=mu,size=size) - p } if (target(bracket[1]) * target(bracket[2]) > 0) { if ((bracket[1] == 0) & (target(bracket[1]) > 0)) return(0) stop("Not a good bracket.") } bisection(target, bracket=bracket) } surveillance/R/algo_farrington.R0000644000176200001440000005046114024124757016461 0ustar liggesusers### R code from vignette source 'Rnw/algo_farrington.Rnw' ### Encoding: ISO8859-1 ################################################### ### code chunk number 1: algo_farrington.Rnw:25-35 ################################################### anscombe.residuals <- function(m,phi) { y <- m$y mu <- fitted.values(m) #Compute raw Anscombe residuals a <- 3/2*(y^(2/3) * mu^(-1/6) - mu^(1/2)) #Compute standardized residuals a <- a/sqrt(phi * (1-hatvalues(m))) return(a) } ################################################################################ # WEIGHTS FUNCTION ################################################################################ algo.farrington.assign.weights <- function(s,weightsThreshold=1) { #s_i^(-2) for s_iweightsThreshold) )) omega <- numeric(length(s)) omega[s>weightsThreshold] <- gamma*(s[s>weightsThreshold]^(-2)) omega[s<=weightsThreshold] <- gamma return(omega) } ################################################### ### code chunk number 3: algo_farrington.Rnw:136-305 ################################################### algo.farrington.fitGLM <- function(response,wtime,timeTrend=TRUE,reweight=TRUE,...) { #Model formula depends on whether to include a time trend or not. theModel <- as.formula(ifelse(timeTrend, "response~1+wtime","response~1")) #Fit it -- this is slow. An improvement would be to use glm.fit here. model <- glm(theModel, family = quasipoisson(link="log")) #Check convergence - if no convergence we return empty handed. if (!model$converged) { #Try without time dependence if (timeTrend) { cat("Warning: No convergence with timeTrend -- trying without.\n") #Set model to one without time trend theModel <- as.formula("response~1") model <- glm(response ~ 1, family = quasipoisson(link="log")) } if (!model$converged) { cat("Warning: No convergence in this case.\n") print(cbind(response,wtime)) return(NULL) } } #Overdispersion parameter phi phi <- max(summary(model)$dispersion,1) #In case reweighting using Anscome residuals is requested if (reweight) { s <- anscombe.residuals(model,phi) omega <- algo.farrington.assign.weights(s) model <- glm(theModel,family=quasipoisson(link="log"),weights=omega) #Here, the overdispersion often becomes small, so we use the max #to ensure we don't operate with quantities less than 1. phi <- max(summary(model)$dispersion,1) } # end of refit. #Add wtime, response and phi to the model model$phi <- phi model$wtime <- wtime model$response <- response #Done return(model) } ###################################################################### # The algo.farrington.fitGLM function in a version using glm.fit # which is faster than the call using "glm. # This saves lots of overhead and increases speed. # # Author: Mikko Virtanen (@thl.fi) with minor modifications by Michael Hoehle # Date: 9 June 2010 # # Note: Not all glm results may work on the output. But for the # necessary ones for the algo.farrington procedure work. ###################################################################### algo.farrington.fitGLM.fast <- function(response,wtime,timeTrend=TRUE,reweight=TRUE, ...) { #Create design matrix and formula needed for the terms object #Results depends on whether to include a time trend or not. if (timeTrend) { design<-cbind(intercept=1,wtime=wtime) Formula<-response~wtime } else { design<-matrix(1,nrow=length(wtime),dimnames=list(NULL,c("intercept"))) Formula<-response~1 } #Fit it using glm.fit which is faster than calling "glm" model <- glm.fit(design,response, family = quasipoisson(link = "log")) #Check convergence - if no convergence we return empty handed. if (!model$converged) { #Try without time dependence if (timeTrend) { cat("Warning: No convergence with timeTrend -- trying without.\n") #Drop time from design matrix design <- design[,1,drop=FALSE] #Refit model <- glm.fit(design,response, family = quasipoisson(link = "log")) Formula<-response~1 } #No convergence and no time trend. That's not good. } #Fix class of output to glm/lm object in order for anscombe.residuals to work #Note though: not all glm methods may work for the result class(model) <- c("glm","lm") #Overdispersion parameter phi phi <- max(summary.glm(model)$dispersion,1) #In case reweighting using Anscome residuals is requested if (reweight) { s <- anscombe.residuals(model,phi) omega <- algo.farrington.assign.weights(s) model <- glm.fit(design,response, family = quasipoisson(link = "log"), weights = omega) #Here, the overdispersion often becomes small, so we use the max #to ensure we don't operate with quantities less than 1. phi <- max(summary.glm(model)$dispersion,1) } # end of refit. model$phi <- phi model$wtime <- wtime model$response <- response model$terms <- terms(Formula) # cheating a bit, all methods for glm may not work class(model)<-c("algo.farrington.glm","glm","lm") # 23/10/2012 (SM): # added "lm" class to avoid warnings # from predict.lm about fake object #Done return(model) } ###################################################################### # Experimental function to include a population offset in the # farrington procedure based on algo.farrington.fitGLM # Alternative: include populationOffset argument in the two other # fit functions, but I suspect use of this is not so common # # Parameters: # takes an additional "population" parameter ###################################################################### algo.farrington.fitGLM.populationOffset <- function(response,wtime,population,timeTrend=TRUE,reweight=TRUE,...) { #Model formula depends on whether to include a time trend or not. theModel <- as.formula(ifelse(timeTrend, "response~offset(log(population)) + 1 + wtime","response~offset(log(population)) + 1")) #Fit it -- this is slow. An improvement would be to use glm.fit here. model <- glm(theModel, family = quasipoisson(link="log")) #Check convergence - if no convergence we return empty handed. if (!model$converged) { #Try without time dependence if (timeTrend) { model <- glm(response ~ 1, family = quasipoisson(link="log")) cat("Warning: No convergence with timeTrend -- trying without.\n") } if (!model$converged) { cat("Warning: No convergence in this case.\n") print(cbind(response,wtime)) return(NULL) } } #Overdispersion parameter phi phi <- max(summary(model)$dispersion,1) #In case reweighting using Anscome residuals is requested if (reweight) { s <- anscombe.residuals(model,phi) omega <- algo.farrington.assign.weights(s) model <- glm(theModel,family=quasipoisson(link="log"),weights=omega) #Here, the overdispersion often becomes small, so we use the max #to ensure we don't operate with quantities less than 1. phi <- max(summary(model)$dispersion,1) } # end of refit. #Add wtime, response and phi to the model model$phi <- phi model$wtime <- wtime model$response <- response model$population <- population #Done return(model) } ################################################### ### code chunk number 4: algo_farrington.Rnw:344-370 ################################################### algo.farrington.threshold <- function(pred,phi,alpha=0.01,skewness.transform="none",y) { #Fetch mu0 and var(mu0) from the prediction object mu0 <- pred$fit tau <- phi + (pred$se.fit^2)/mu0 #Standard deviation of prediction, i.e. sqrt(var(h(Y_0)-h(\mu_0))) switch(skewness.transform, "none" = { se <- sqrt(mu0*tau); exponent <- 1}, "1/2" = { se <- sqrt(1/4*tau); exponent <- 1/2}, "2/3" = { se <- sqrt(4/9*mu0^(1/3)*tau); exponent <- 2/3}, { stop("No proper exponent in algo.farrington.threshold.")}) #Note that lu can contain NA's if e.g. (-1.47)^(3/2) lu <- sort((mu0^exponent + c(-1,1)*qnorm(1-alpha/2)*se)^(1/exponent),na.last=FALSE) #Ensure that lower bound is non-negative lu[1] <- max(0,lu[1],na.rm=TRUE) #Compute quantiles of the predictive distribution based on the #normal approximation on the transformed scale q <- pnorm( y^(exponent) , mean=mu0^exponent, sd=se) m <- qnorm(0.5, mean=mu0^exponent, sd=se)^(1/exponent) #Return lower and upper bounds return(c(lu,q=q,m=m)) } ################################################### ### code chunk number 5: algo_farrington.Rnw:412-451 ################################################### ###################################################################### # Compute indices of reference value using Date class # # Params: # t0 - Date object describing the time point # b - Number of years to go back in time # w - Half width of window to include reference values for # epochStr - "1 month", "1 week" or "1 day" # epochs - Vector containing the epoch value of the sts/disProg object # # Details: # Using the Date class the reference values are formed as follows: # Starting from d0 go i, i in 1,...,b years back in time. # # Returns: # a vector of indices in epochs which match ###################################################################### refvalIdxByDate <- function(t0, b, w, epochStr, epochs) { refDays <- NULL refPoints <- seq( t0, length.out=b+1, by="-1 year")[-1] #Loop over all b-lagged points and append appropriate w-lagged points for (j in 1:length(refPoints)) { refPointWindow <- c(rev(seq(refPoints[j], length.out=w+1, by=paste("-",epochStr,sep=""))), seq(refPoints[j], length.out=w+1, by=epochStr)[-1]) refDays <- append(refDays,refPointWindow) } if (epochStr == "1 week") { #What weekday is t0 (0=Sunday, 1=Monday, ...) epochWeekDay <- as.numeric(format(t0,"%w")) #How many days to go forward to obtain the next "epochWeekDay", i.e. (d0 - d) mod 7 dx.forward <- (epochWeekDay - as.numeric(format(refDays,"%w"))) %% 7 #How many days to go backward to obtain the next "epochWeekDay", i.e. (d - d0) mod 7 dx.backward <- (as.numeric(format(refDays,"%w")) - epochWeekDay) %% 7 #What is shorter - go forward or go backward? #By convention: always go to the closest weekday as t0 refDays <- refDays + ifelse(dx.forward < dx.backward, dx.forward, -dx.backward) } if (epochStr == "1 month") { #What day of the month is t0 (it is assumed that all epochs have the same value here) epochDay <- as.numeric(format(t0,"%d")) #By convention: go back in time to closest 1st of month refDays <- refDays - (as.numeric(format(refDays, "%d")) - epochDay) } #Find the index of these reference values wtime <- match(as.numeric(refDays), epochs) return(wtime) } ################################################### ### code chunk number 6: algo_farrington.Rnw:571-769 ################################################### algo.farrington <- function(disProgObj, control=list( range=NULL, b=5, w=3, reweight=TRUE, verbose=FALSE, plot=FALSE, alpha=0.05, trend=TRUE, limit54=c(5,4), powertrans="2/3", fitFun="algo.farrington.fitGLM.fast") ) { #Fetch observed observed <- disProgObj$observed freq <- disProgObj$freq epochStr <- switch( as.character(freq), "12" = "1 month","52" = "1 week","365" = "1 day") #Fetch population (if it exists) if (!is.null(disProgObj$populationFrac)) { population <- disProgObj$populationFrac } else { population <- rep(1,length(observed)) } ###################################################################### # Initialize and check control options ###################################################################### defaultControl <- eval(formals()$control) control <- modifyList(defaultControl, control, keep.null = TRUE) if (is.null(control$range)) { control$range <- (freq*control$b - control$w):length(observed) } control$fitFun <- match.arg(control$fitFun, c("algo.farrington.fitGLM.fast", "algo.farrington.fitGLM", "algo.farrington.fitGLM.populationOffset")) #Use special Date class mechanism to find reference months/weeks/days if (is.null(disProgObj[["epochAsDate",exact=TRUE]])) { epochAsDate <- FALSE } else { epochAsDate <- disProgObj[["epochAsDate",exact=TRUE]] } #check options if (!((control$limit54[1] >= 0) & (control$limit54[2] > 0))) { stop("The limit54 arguments are out of bounds: cases >= 0 and period > 0.") } #Check control$range is within bounds. if (any((control$range < 1) | (control$range > length(disProgObj$observed)))) { stop("Range values are out of bounds (has to be within 1..",length(disProgObj$observed)," for the present data).") } # initialize the necessary vectors alarm <- matrix(data = 0, nrow = length(control$range), ncol = 1) trend <- matrix(data = 0, nrow = length(control$range), ncol = 1) upperbound <- matrix(data = 0, nrow = length(control$range), ncol = 1) # predictive distribution pd <- matrix(data = 0, nrow = length(control$range), ncol = 2) # Define objects n <- control$b*(2*control$w+1) # 2: Fit of the initial model and first estimation of mean and dispersion # parameter for (k in control$range) { # transform the observed vector in the way # that the timepoint to be evaluated is at last position #shortObserved <- observed[1:(maxRange - k + 1)] if (control$verbose) { cat("k=",k,"\n")} #Find index of all epochs, which are to be used as reference values #i.e. with index k-w,..,k+w #in the years (current year)-1,...,(current year)-b if (!epochAsDate) { wtimeAll <- NULL for (i in control$b:1){ wtimeAll <- append(wtimeAll,seq(k-freq*i-control$w,k-freq*i+control$w,by=1)) } #Select them as reference values - but only those who exist wtime <- wtimeAll[wtimeAll>0] if (length(wtimeAll) != length(wtime)) { warning("@ range= ",k,": With current b and w then ",length(wtimeAll) - length(wtime),"/",length(wtimeAll), " reference values did not exist (index<1).") } } else { #Alternative approach using Dates t0 <- as.Date(disProgObj$week[k], origin="1970-01-01") wtimeAll <- refvalIdxByDate( t0=t0, b=control$b, w=control$w, epochStr=epochStr, epochs=disProgObj$week) #Select them as reference values (but only those not being NA!) wtime <- wtimeAll[!is.na(wtimeAll)] #Throw warning if necessary if (length(wtimeAll) != length(wtime)) { warning("@ range= ",k,": With current b and w then ",length(wtimeAll) - length(wtime),"/",length(wtimeAll), " reference values did not exist (index<1).") } } #Extract values from indices response <- observed[wtime] pop <- population[wtime] if (control$verbose) { print(response)} ###################################################################### #Fit the model with overdispersion -- the initial fit ###################################################################### #New feature: fitFun can now be the fast function for fitting the GLM model <- do.call(control$fitFun, args=list(response=response,wtime=wtime,population=pop,timeTrend=control$trend,reweight=control$reweight)) #Stupid check to pass on NULL values from the algo.farrington.fitGLM proc. if (is.null(model)) return(model) ###################################################################### #Time trend # #Check whether to include time trend, to do this we need to check whether #1) wtime is signifcant at the 95lvl #2) the predicted value is not larger than any observed value #3) the historical data span at least 3 years. doTrend <- control$trend #Bug discovered by Julia Kammerer and Sabrina Heckl: Only investigate trend if it actually was part of the GLM #if (control$trend) { if ("wtime" %in% names(coef(model))){ #is the p-value for the trend significant (0.05) level p <- summary.glm(model)$coefficients["wtime",4] significant <- (p < 0.05) #prediction for time k mu0Hat <- predict.glm(model,data.frame(wtime=c(k),population=population[k]),type="response") #have to use at least three years of data to allow for a trend atLeastThreeYears <- (control$b>=3) #no horrible predictions noExtrapolation <- mu0Hat <= max(response) #All 3 criteria have to be met in order to include the trend. Otherwise #it is removed. Only necessary to check this if a trend is requested. if (!(atLeastThreeYears && significant && noExtrapolation)) { doTrend <- FALSE model <- do.call(control$fitFun, args=list(response=response,wtime=wtime,population=pop,timeTrend=FALSE,reweight=control$reweight)) } } else { doTrend <- FALSE } #done with time trend ###################################################################### ###################################################################### # Calculate prediction & confidence interval # ###################################################################### #Predict value - note that the se is the mean CI #and not the prediction error of a single observation pred <- predict.glm(model,data.frame(wtime=c(k),population=population[k]),dispersion=model$phi, type="response",se.fit=TRUE) #Calculate lower and upper threshold lu <- algo.farrington.threshold(pred,model$phi,skewness.transform=control$powertrans,alpha=control$alpha, observed[k]) ###################################################################### # If requested show a plot of the fit. ###################################################################### if (control$plot) { #Compute all predictions data <- data.frame(wtime=seq(min(wtime),k,length.out=1000)) preds <- predict(model,data,type="response",dispersion=model$phi) #Show a plot of the model fit. plot(c(wtime, k), c(response,observed[k]),ylim=range(c(observed[data$wtime],lu)),,xlab="time",ylab="No. infected",main=paste("Prediction at time t=",k," with b=",control$b,",w=",control$w,sep=""),pch=c(rep(1,length(wtime)),16)) #Add the prediction lines(data$wtime,preds,col=1,pch=2) #Add the thresholds to the plot lines(rep(k,2),lu[1:2],col=3,lty=2) } ###################################################################### #Postprocessing steps ###################################################################### #Compute exceedance score unless less than 5 reports during last 4 weeks. #Changed in version 0.9-7 - current week is included now enoughCases <- (sum(observed[(k-control$limit54[2]+1):k])>=control$limit54[1]) #18 May 2006: Bug/unexpected feature found by Y. Le Strat. #the okHistory variable meant to protect against zero count problems, #but instead it resulted in exceedance score == 0 for low counts. #Now removed to be concordant with the Farrington 1996 paper. X <- ifelse(enoughCases,(observed[k] - pred$fit) / (lu[2] - pred$fit),0) #Do we have an alarm -- i.e. is observation beyond CI?? #upperbound only relevant if we can have an alarm (enoughCases) trend[k-min(control$range)+1] <- doTrend alarm[k-min(control$range)+1] <- (X>1) upperbound[k-min(control$range)+1] <- ifelse(enoughCases,lu[2],0) #Compute bounds of the predictive pd[k-min(control$range)+1,] <- lu[c(3,4)] }#done looping over all time points #Add name and data name to control object. control$name <- paste("farrington(",control$w,",",0,",",control$b,")",sep="") control$data <- paste(deparse(substitute(disProgObj))) #Add information about predictive distribution control$pd <- pd # return alarm and upperbound vectors result <- list(alarm = alarm, upperbound = upperbound, trend=trend, disProgObj=disProgObj, control=control) class(result) <- "survRes" #Done return(result) } surveillance/R/newtonRaphson.R0000644000176200001440000001450312166473572016157 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Michaela's own implementation of a Newton-Raphson optimizer ### ### Copyright (C) 2010-2012 Michaela Paul ### $Revision: 589 $ ### $Date: 2013-07-08 10:25:30 +0200 (Mon, 08. Jul 2013) $ ################################################################################ ##################### # x - initial parameter values # control arguments: # scoreTol - convergence if max(abs(score)) < scoreTol # paramTol - convergence if rel change in theta < paramTol # F.inc - eigenvalues of the hessian are computed when the Cholesky factorization # fails, and a constant added to the diagonal to make the smallest # eigenvalue= F.inc * largest # fn must return loglikelihood with score and fisher as attributes # fn <- function(theta,...){ # ll <- loglik(theta,...) # attr(ll,"score") <- score(theta,...) # attr(ll,"fisher") <- fisher(theta,...) # return(ll) # } newtonRaphson <- function(x,fn,..., control=list(), verbose=FALSE){ # set default values control.default <- list(scoreTol=1e-5, paramTol=1e-8, F.inc=0.01, stepFrac=0.5, niter=30) control <- modifyList(control.default, control) # number of step reductions, not positive definite Fisher matrices during iterations steph <- notpd <- 0 convergence <- 99 i <- 0 rel.tol <- function(x,xnew){ sqrt(sum((xnew-x)^2)/sum(x^2)) } score <- function(fn){ return(attr(fn,"score")) } fisher <- function(fn){ return(attr(fn,"fisher")) } ll0 <- c(fn(x,...)) if(verbose>1) cat("initial loglikelihood",ll0,"\n\n") # fn cannot be computed at initial par if(!is.finite(ll0) | is.na(ll0)){ cat("fn can not be computed at initial parameter values.\n") return(list(convergence=30, notpd = notpd, steph = steph)) } while(convergence != 0 & (i< control$niter)){ i <- i+1 ll <- fn(x,...) if(max(abs(score(ll))) < control$scoreTol){ convergence <- 0 break } # get cholesky decompositon F <- fisher(ll) F.chol <- try(chol(F),silent=TRUE) # could still give a nearly singular matrix # => could also check condition number if(inherits(F.chol,"try-error")){ if(verbose>1) cat("fisher is not pd\n") # fisher is not pd notpd <- notpd +1 ev <- eigen(F,symmetric=TRUE, only.values=TRUE)$values # add a constant to diag(F) diag(F) <- diag(F) + (control$F.inc*(max(abs(ev))) - min(ev))/(1-control$F.inc) # compute cholesky decomposition of modified fisher F.chol <- chol(F) } direction <- chol2inv(F.chol)%*% score(ll) if(max(abs(direction)) < control$paramTol*(max(abs(x))+1e-8) ){ convergence <- 0 break } # do Newton-Raphson step x.new <- c(x + direction) ll.new <- fn(x.new,...) if(verbose>1) cat("iteration",i,"\trel.tol =",rel.tol(x,x.new),"\tabs.tol(score) =",max(abs(score(ll.new))),"\n") if(verbose>2) cat("theta =",round(x.new,2),"\n") if(verbose>1) cat("loglikelihood =",ll.new,"\n") ## Backtracking: reduce stepsize until we really improve the loglikelihood # ll(x1,lambda) = ll(x0) + lambda * fisher(x0)^-1 %*% score(x0) i.backstep <- 0 ## Gray (2001) Ch 3: Unconstrained Optimization and Solving Nonlinear Equations # It is technically possible to construct sequences where ll(x1) > ll(x0) # at each step but where the sequence never converges. # For this reason a slightly stronger condition is usually used. # Dennis and Schnabel (1983): Numerical Methods for Unconstrained # Optimization and Nonlinear Equations. SIAM. (ch 6,3.2, p.126) # recommend requiring that lambda satisfy # ll(x1) > ll(x0) + 1e-4 *(x1-x0)' %*% score(x0) while((is.na(ll.new) || (ll.new < c(ll)+ (1e-4)*sum(direction*score(ll)))) & (i.backstep <= 20)){ if(verbose>1 & i.backstep==0) cat("backtracking: ") i.backstep <- i.backstep +1 steph <- steph +1 # reduce stepsize by a fixed fraction stepFrac direction <- control$stepFrac*direction x.new <- c(x + direction) ll.new <- fn(x.new,...) if(verbose>1) cat("*") } if(verbose & i.backstep>0) cat("\n") if(i.backstep >20){ if(verbose>1)cat("backtracking did not improve fn\n") #cat("ll: ",ll,"\tll.new: ",ll.new,"\n") convergence <- 10 break } x <- c(x.new) if(verbose>1) cat("\n") } ll <- fn(x,...) # max number of iterations reached, but check for convergence if(max(abs(score(ll))) < control$scoreTol){ convergence <- 0 } # convergence if # 1) relative difference between parameters is small # 2) absolute value of gradient is small # 3) stop after niter iterations if(i==control$niter & convergence !=0){ if(verbose>1) cat("Newton-Raphson stopped after",i,"iterations!\n") # iteration limit reached without convergence convergence <- 10 } if(verbose>1) cat("iteration",i,"\trel.tol =",rel.tol(x,x.new),"\tabs.tol(score) =",max(abs(score(ll))),"\n") if(verbose>2) cat("theta =",round(x.new,2),"\n") if(verbose>1) cat("loglikelihood =",c(ll),"\n\n") # loglikelihood loglik <- c(ll) # fisher info F <- fisher(ll) if(inherits(try(solve(F),silent=TRUE),"try-error")){ cat("\n\n***************************************\nfisher not regular!\n") #print(summary(x)) return(list(coefficients=x, loglikelihood=loglik, fisher=FALSE, convergence=22, notpd = notpd, steph = steph)) } # check if solution is a maximum (i.e. if fisher is pd ) eps <- 1e-10 if(!all(eigen(F,symmetric=TRUE, only.values=TRUE)$values > eps)){ if(verbose>1) cat("fisher information at solution is not pd\n") return(list(coefficients=x, loglikelihood=loglik, fisher=FALSE, convergence=21, notpd = notpd, steph = steph)) } if(verbose>0) cat("number of iterations = ",i," coverged = ", convergence ==0," log-likelihood = ",loglik, " notpd = ", notpd, " steph = ", steph, "\n") result <- list(coefficients=x, loglikelihood=loglik, fisher=FALSE, convergence=convergence, notpd=notpd, steph=steph,niter=i) return(result) } surveillance/R/earsC.R0000644000176200001440000001615013020355717014335 0ustar liggesusers# \|||/ # (o o) # ,~~~ooO~~(_)~~~~~~~~~, # | EARS | # | surveillance | # | methods | # | C1, C2 and C3 | # '~~~~~~~~~~~~~~ooO~~~' # |__|__| # || || # ooO Ooo ###################################################################### # Implementation of the EARS surveillance methods. ###################################################################### # DESCRIPTION ###################################################################### # Given a time series of disease counts per month/week/day # this function determines whether there was an outbreak at given time points: # it deduces for each time point an expected value from past values, # it defines an upperbound based on this value and on the variability # of past values # and then it compares the observed value with the upperbound. # If the observed value is greater than the upperbound # then an alert is flagged. # Three methods are implemented. # They do not use the same amount of past data # and are expected to have different specificity and sensibility # from C1 to C3 # the amount of past data used increases, # so does the sensibility # but the specificity decreases. ###################################################################### # PARAMETERS ###################################################################### # range : range of timepoints over which the function will look for # outbreaks. # method : which of the three EARS methods C1, C2 and C3 should be used. # ###################################################################### # INPUT ###################################################################### # A R object of class sts ###################################################################### # OUTPUT ###################################################################### # The same R object of class sts with slot alarm and upperbound filled # by the function ###################################################################### earsC <- function(sts, control = list(range = NULL, method = "C1", baseline = 7, minSigma = 0, alpha = 0.001)) { ###################################################################### #Handle I/O ###################################################################### #If list elements are empty fill them! if (is.null(control[["baseline", exact = TRUE]])) { control$baseline <- 7 } if (is.null(control[["minSigma", exact = TRUE]])) { control$minSigma <- 0 } baseline <- control$baseline minSigma <- control$minSigma if(minSigma < 0) { stop("The minimum sigma parameter (minSigma) needs to be positive") } if (baseline < 3) { stop("Minimum baseline to use is 3.") } # Method if (is.null(control[["method", exact = TRUE]])) { control$method <- "C1" } # Extracting the method method <- match.arg( control$method, c("C1","C2","C3"),several.ok=FALSE) # Range # By default it will take all possible weeks # which is not the same depending on the method if (is.null(control[["range",exact=TRUE]])) { if (method == "C1"){ control$range <- seq(from=baseline+1, to=dim(sts@observed)[1],by=1) } if (method == "C2"){ control$range <- seq(from=baseline+3, to=dim(sts@observed)[1],by=1) } if (method == "C3"){ control$range <- seq(from=baseline+5, to=dim(sts@observed)[1],by=1) } } # zAlpha if (is.null(control[["alpha",exact=TRUE]])) { # C1 and C2: Risk of 1st type error of 10-3 # This corresponds to an Z(1-zAlpha) of about 3 if (method %in% c("C1","C2")) { control$alpha = 0.001 } # C3: Risk of 1st type error of 0.025 # This corresponds to an Z(1-zAlpha) of about 2 if (method=="C3") { control$alpha = 0.025 } } # Calculating the threshold zAlpha zAlpha <- qnorm((1-control$alpha)) #Deduce necessary amount of data from method maxLag <- switch(method, C1 = baseline, C2 = baseline+2, C3 = baseline+4) # Order range in case it was not given in the right order control$range = sort(control$range) ###################################################################### #Loop over all columns in the sts object #Call the right EARS function depending on the method chosen (1, 2 or 3) ##################################################################### for (j in 1:ncol(sts)) { # check if the vector observed includes all necessary data: maxLag values. if((control$range[1] - maxLag) < 1) { stop("The vector of observed is too short!") } ###################################################################### # Method C1 or C2 ###################################################################### if(method == "C1"){ # construct the matrix for calculations ndx <- as.vector(outer(control$range, baseline:1, FUN = "-")) refVals <- matrix(observed(sts)[,j][ndx], ncol = baseline) sts@upperbound[control$range, j] <- apply(refVals,1, mean) + zAlpha * pmax(apply(refVals, 1, sd), minSigma) } if (method == "C2") { # construct the matrix for calculations ndx <- as.vector(outer(control$range, (baseline + 2):3, FUN = "-")) refVals <- matrix(observed(sts)[,j][ndx], ncol = baseline) sts@upperbound[control$range, j] <- apply(refVals,1, mean) + zAlpha * pmax(apply(refVals, 1, sd), minSigma) } if (method == "C3") { # refVals <- NULL rangeC2 = ((min(control$range) - 2):max(control$range)) ##HB replacing loop: ndx <- as.vector(outer(rangeC2, (baseline + 2):3, FUN = "-")) refVals <- matrix(observed(sts)[,j][ndx], ncol = baseline) ##HB using argument 'minSigma' to avoid dividing by zero, huge zscores: C2 <- (observed(sts)[rangeC2, j] - apply(refVals, 1, mean))/ pmax(apply(refVals, 1, sd), minSigma) partUpperboundLag2 <- pmax(rep(0, length = length(C2) - 2), C2[1:(length(C2) - 2)] - 1) partUpperboundLag1 <- pmax(rep(0, length = length(C2) - 2), C2[2:(length(C2) - 1)] - 1) ##HB using argument 'minSigma' to avoid alerting threshold that is zero or too small sts@upperbound[control$range, j] <- observed(sts)[control$range, j] + pmax(apply(as.matrix(refVals[3:length(C2), ]),1, sd),minSigma) * (zAlpha - (partUpperboundLag2 + partUpperboundLag1)) sts@upperbound[control$range, j] = pmax(rep(0, length(control$range)), sts@upperbound[control$range, j]) } } #Copy administrative information control$name <- paste("EARS_", method, sep = "") control$data <- paste(deparse(substitute(sts))) sts@control <- control sts@alarm[control$range, ] <- matrix(observed(sts)[control$range, ] > upperbound(sts)[control$range, ]) return(sts[control$range, ]) } surveillance/R/intersectPolyCircle.R0000644000176200001440000000404612455232124017264 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Compute the intersection of a circular domain with a polygonal domain of ### various classes (currently: owin, gpc.poly, or SpatialPolygons) ### ### Copyright (C) 2009-2015 Sebastian Meyer ### $Revision: 1169 $ ### $Date: 2015-01-13 16:05:56 +0100 (Tue, 13. Jan 2015) $ ################################################################################ intersectPolyCircle.gpc.poly <- function (object, center, radius, npoly = 32, useGEOS = FALSE, ...) { if (useGEOS) { loadNamespace("rgeos") # coerce gpc.poly to SpatialPolygons res <- intersectPolyCircle.SpatialPolygons(as(object, "SpatialPolygons"), center, radius, npoly) as(res, "gpc.poly") # also defined in rgeos } else { gpclibCheck() circle <- discpoly(center, radius, npoly = npoly, class = "gpc.poly") gpclib::intersect(circle, object) # this order seems to be faster } } intersectPolyCircle.owin <- function (object, center, radius, npoly = 32, ...) { circle <- disc(radius = radius, centre = center, npoly = npoly) res <- intersect.owin(circle, object) # order does not affect runtime ## ensure "polygonal" type (because of rescue.rectangle in intersect.owin) as.polygonal(res) } intersectPolyCircle.SpatialPolygons <- function (object, center, radius, npoly = 32, ...) { circle <- discpoly(center, radius, npoly = npoly, class = "Polygon") circleSpP <- SpatialPolygons(list(Polygons(list(circle), "0"))) ## ensure that circleSpP has exactly the same proj4string as 'object' circleSpP@proj4string <- object@proj4string rgeos::gIntersection(circleSpP, object) } surveillance/R/magic.dim.R0000644000176200001440000000636413276245673015152 0ustar liggesusers###################################################################### # Compute a suitable layout for plotting ###################################################################### magic.dim <- function(k){ if(k==1) return(c(1,1)) #factorize k factors <- primeFactors(k) #find the best factorization of k into two factors res <- bestCombination(factors) #if k is a prime or the difference between the two factors of k is too large #rather use the roots of the next square number greater than k #up is root of the smallest square number >= k up <- ceiling(sqrt(k)) #low is root of the biggest square number < k low <- up -1 if(diff(res) >5){ # e.g. k=11 is a prime, the next square number is 16 so up=4 and low=3 # low^2 = 9 < 11 is naturally too small, up^2=16 > 11 so c(4,4) is a solution # but low*up = 3*4 = 12 > 11 is also adequate and a better solution if((k - low^2) < up) res <- c(low,up) else res <- c(up,up) } return(sort(res)) } ###################################################################### # Compute the prime number factorization of an integer ###################################################################### primeFactors <- function(x){ if(x==1) return(1) factors<- numeric(0) i<-1 #start with i=2 and divide x by i (as often as possible) then try division by i+1 #until all factors are found, i.e. x=1 while(i < x){ i <- i+1 while((x %% i)==0){ # each time a new factor i is found, save it and proceed with x = x/i # e.g. k=20: 2 is a factor of x=20, continue with x = 10 = 20/2 # 2 is a factor of x=10, continue with x = 5 = 10/2 # 3 and 4 are no factors of x = 5 # 5 is a factor of x = 5, continue with x = 1 # result: 20 = c(2, 2, 5) factors <- c(factors, i) x <- x/i } } return(factors) } ###################################################################### # Given a prime number factorization of a number, e.g. 36 # yields x=c(2,2,3,3) # and parition x into two groups, such that the product of the numbers # in group one is as similar as possible to the product # of the numbers of group two. This is useful in magic.dim # # Params: # x - the prime number factorization # # Returns: # c(prod(set1),prod(set2)) ###################################################################### bestCombination <- function(x) { #Compute the power set of 0:1^length(x), i.e. a binary indicator for #variable stating whether to include it in set 1 or not. combos <- as.matrix(expand.grid(rep(list(0:1),length(x)))) mode(combos) <- "logical" #Small helper function, given a vector of length(x) stating whether #to include an element in set1 or not, compute the product #of set1 and set2=x\backslash set1 #set1: all those for which include is TRUE, set2: all those for which #include is FALSE setsize <- function(include) { c(prod(x[include]),prod(x[!include])) } #Compute the product of set1 and set2 for each possible combination sizes <- apply(combos,MARGIN=1,FUN=setsize) #Calculate the combination, where x is as close to y as possible bestConfig <- combos[which.min(abs(diff(sizes))),] #Return this setsize of this configuration return(setsize(bestConfig)) } surveillance/R/stsNClist_animate.R0000644000176200001440000002276013653223255016733 0ustar liggesusers###################################################################### # Function to plot a sequence of nowcasts. Can be wrapped with the # animation package to produce PDF or Web animations # # Parameters: # linelist_truth - data.frame containing the linelist of cases/reports # dEventCol - name of the column containing the time of event (as Date) # dReportCol - name of the column containing the time of report receipt (as Date) # aggrgate.by - aggregation level (se function linelist2sts) # nowcasts - a list of nowcasts (if NULL then they are generated on the fly - Note: This is currently not implemented!) # method - which method to animate. Has to be part of the individual nowcast objects in 'nowcasts' # control - control object for controlling how the plotting is done ###################################################################### animate_nowcasts <- function(nowcasts,linelist_truth, method="bayes.trunc.ddcp", control=list(dRange=NULL,anim.dRange=NULL, plot.dRange=NULL,consistent=FALSE,sys.sleep=1,ylim=NULL,cex.names=0.7,col=c("violetred3","#2171B5","orange","blue","black","greenyellow")),showLambda=TRUE) { ##Extract the dEventCol and dReportCol from the nowcasts dEventCol <- nowcasts[[1]]@control$call$dEventCol dReportCol <- nowcasts[[1]]@control$call$dReportCol aggregate.by <- nowcasts[[1]]@control$call$aggregate.by ##Boolean indicator for those having information on dEventCol validVarInfo <- !is.na(linelist_truth[,dEventCol]) ##Show info about what is being illustrated message(paste("Total of ",nrow(linelist_truth)," cases in linelist_truth.\nIllustring reporting for ",sum(!is.na(linelist_truth[,dEventCol]))," cases with information on \"",dEventCol,"\"\n",sep="")) ##Reduce linelist_truth to those who have the appropriate information linelist_truth <- linelist_truth[validVarInfo,] ######################################### ## Check and set default control options ######################################### if (is.null(control[["dRange",exact=TRUE]])) { range <- range(c(linelist_truth[,dEventCol],linelist_truth[,dReportCol]),na.rm=TRUE) } else { range <- control$dRange } range.dates <- seq(range[1],range[2],by=aggregate.by) #plot.dRange if (is.null(control[["plot.dRange",exact=TRUE]])) { control$plot.dRange <- range(range) } #anim.dRange if (is.null(control[["anim.dRange",exact=TRUE]])) { control$anim.dRange <- control$dRange } #sys.sleep if (is.null(control[["sys.sleep",exact=TRUE]])) control$sys.sleep <- 1 if (is.null(control[["cex.names",exact=TRUE]])) control$cex.names <- 1 if (is.null(control[["col",exact=TRUE]])) control$col <- c("violetred3","#2171B5","orange","blue","black","springgreen4") if (is.null(control[["showLambda",exact=TRUE]])) control$showLambda <- TRUE ##Check that a list of nowcasts is available if (is.null(nowcasts)) { stop("not implemented!") } ##################### # Preprocessing block ##################### #Create an sts object with the true number of cases.. sts <- linelist2sts(linelist_truth,dEventCol,aggregate.by=aggregate.by,dRange=range) #Index of the time points in the plot.dRange plot.dates.idx <- as.numeric(control$plot.dRange - range[1] + 1) #Index of the animate dates anim.dates <- seq(control$anim.dRange[1],control$anim.dRange[2],by="1 day") idxSet <- pmatch(anim.dates,range.dates) ##Find ylim if (is.null(control[["ylim",exact=TRUE]])) { ymax <- max(observed(sts),upperbound(sts),na.rm=TRUE) ymax <- max(ymax,unlist(lapply(nowcasts, function(nc) max(c(observed(nc),upperbound(nc),predint(nc)),na.rm=TRUE)))) control$ylim <- c(0,ymax) } ##====================== ## Loop over all dates ##====================== ##Loop over all days. always show what we know for (i in idxSet) { ##fix this #Set "today" curDate <- as.Date(range.dates[i]) message("Animating ",as.character(curDate),"...") #Choose all reports available until this "today" linelist_truth.avail <- linelist_truth[ linelist_truth[,dReportCol] <= curDate,] #If consistency checking is requested remove all entries which #are "beyond" today if (!is.null(control$consistent)) { linelist_truth.avail <- linelist_truth.avail[ linelist_truth.avail[,dEventCol] <= curDate,] } ##Check that date exists in nowcast list. sts.nowcast <- nowcasts[[as.character(curDate)]] if (is.null(sts.nowcast)) { stop("Date: ",as.character(curDate)," not available in nowcasts.") } ##Check that method exists in nowcast object if (!(method %in% nowcasts[[as.character(curDate)]]@control$method)) { stop("Method ",method," not in nowcasts!") } ##Exract the used safePredictLag control$safePredictLag <- sts.nowcast@control$now - max(sts.nowcast@control$when) ##Fill upperbound and CI slots with output of that method (not pretty code: ToDo Improve!!) N.tInf.support <- sts.nowcast@control$N.tInf.support Ps <- sts.nowcast@predPMF when <- sts.nowcast@control$when dateRange <- epoch(sts.nowcast) idxt <- which(dateRange %in% when) alpha <- sts.nowcast@control$alpha ##Loop over all time points for (i in idxt) { predPMF <- Ps[[as.character(dateRange[i])]][[method]] sts.nowcast@upperbound[i,] <- median(N.tInf.support[which.max( cumsum(predPMF)>0.5)]) sts.nowcast@pi[i,,] <- N.tInf.support[c(which.max(cumsum(predPMF) >= alpha/2),which.max(cumsum(predPMF) >= 1-alpha/2))] } dimnames(sts.nowcast@pi) <- list(as.character(dateRange),NULL,paste( c(alpha/2*100,(1-alpha/2)*100),"%",sep="")) #Done upperbound(sts.nowcast)[-idxt] <- NA #All events which (in an ideal world) would be available now linelist_truth.now <- linelist_truth[ linelist_truth[,dEventCol] <= curDate,] sts.now <- linelist2sts(linelist_truth.now,dEventCol,aggregate.by=aggregate.by,dRange=c(range[1],curDate))#range) ##Percentage of possible observations which are available sum(observed(sts.nowcast)) sum(upperbound(sts.nowcast)) message(sprintf("(%.0f%% of total cases in linelist_truth reported)\n",sum(observed(sts.nowcast))/sum(observed(sts.now))*100)) ##Show the true number of counts observed(sts) <- matrix(0,nrow=nrow(sts),ncol=1) upperbound(sts) <- matrix(0,nrow=nrow(sts),ncol=1) observed(sts)[1:nrow(sts.now),] <- observed(sts.now) upperbound(sts)[1:nrow(sts.now),] <- upperbound(sts.now) ##Plot the true number of counts as sts object plot(sts,legend=NULL,dx.upperbound=0,main="",lwd=c(1,1,3),ylab="No. Cases",ylim=control$ylim,lty=c(1,1,1),axes=FALSE,xlab="",col=c(control$col[c(1,1)],NULL), xlim=plot.dates.idx,xaxs="i") ####################start to change. Use proper customizable arguments ### plot the nowcast using the S4 method and then add the other ### stuff on top of it... ##Add the nowcast plot(sts.nowcast,dx.upperbound=0,axes=FALSE,col=control$col[c(2,2,3)],lty=c(1,1,1),legend=NULL,add=TRUE,lwd=c(3,3,3),xlim=plot.dates.idx,xaxs="i") ##Last proper index idx <- nrow(sts.nowcast) - which.max(!is.na(rev(upperbound(sts.nowcast)))) + 1 ##Continue line from plot lines( idx+c(-0.5,0.5), rep(upperbound(sts.nowcast)[idx,],2),lty=1,col=control$col[3],lwd=3) ##Add CIs from the nowcast for (i in 1:nrow(sts.nowcast)) { lines( i+c(-0.3,0.3), rep(sts.nowcast@pi[i,,1],2),lty=1,col=control$col[3]) lines( i+c(-0.3,0.3), rep(sts.nowcast@pi[i,,2],2),lty=1,col=control$col[3]) lines( rep(i,each=2), sts.nowcast@pi[i,,],lty=2,col=control$col[3]) } ##Add lambda_t if it exists. if (method == "bayes.trunc.ddcp" && control$showLambda) { lambda <- attr(delayCDF(sts.nowcast)[["bayes.trunc.ddcp"]],"model")$lambda showIdx <- seq(ncol(lambda) - control$safePredictLag) ##matlines( showIdx,t(lambda)[showIdx,],col="gray",lwd=c(1,2,1),lty=c(2,1,2)) ##If m parameter is used then also only show the polynomial up to m times back. if (!is.null(sts.nowcast@control$call$m)) { showIdx <- seq(ncol(lambda) - sts.nowcast@control$call$m, ncol(lambda) - control$safePredictLag, by=1) } matlines( showIdx, t(lambda)[showIdx,],col="gray",lwd=c(1,2,1),lty=c(2,1,2)) } ##Add axis information axis(2) ##Add extra line parts on x-axis axis(1,at=0:1e3,tick=TRUE,lwd.ticks=0,labels=rep("",1e3+1)) axis(1,at=0:1e3,tick=TRUE,lwd.ticks=1,tcl=-0.2,labels=rep("",1e3+1)) ##Hilight the mondays is.monday <- format(range.dates,"%w") == 1 axis(1,at=(1:length(range.dates))[is.monday],labels=format(range.dates[is.monday],"%a %d %b"),las=2,cex.axis=control$cex.names) ##Show month breaks dom <- as.numeric(format(range.dates,"%d")) axis(1,at=which(dom==1),labels=rep("",sum(dom==1)),tcl=-0.8,lwd=0,lwd.ticks=1) ####################stop to change ##Extra text <- c("Events up to \"now\"","Reports received by \"now\"",paste("Nowcasts by ",method,sep=""), if (method=="bayes.trunc.ddcp") expression(lambda[t]*" of bayes.trunc.ddcp") else NULL) col <- c(control$col[1:3], if (method=="bayes.trunc.ddcp") "gray" else NULL) legend(x="topright",text,col=col, lwd=3,lty=1) ##Add now symbol points(curDate-range[1]+1,0,pch=10,col=control$col[6],cex=1.5) ##Add nowcast symbol points(curDate-range[1]+1-control$safePredictLag,0,pch=9,col=control$col[3],cex=1.5) ##Add this to the legend legend(x="right",c("Now","Nowcast horizon"),pch=c(10,9),col=control$col[c(6,3)],pt.cex=1.5) ##Pause Sys.sleep(control$sys.sleep) } invisible() } surveillance/R/sysdata.rda0000644000176200001440000002375014026445515015324 0ustar liggesusers7zXZi"6!X'])TW"nRʟF X#Qd[ZfFdO~ڒ.e$/|<07qKܜn,,m?TKDU0NOp1HɝsQY]\fՅ k?m; M4H'hNag!7}[ݼݫ!h(`'>:uGxЯ9##q2shIʡqcU `#Ьnt !iYOS7?b-IWP: 1I!i}b TR|eXn #@DKP wkNÔ'>MAfS6]BۮZoh%UBr֢-Ojg{d3u1R˰OVG6ǧ S\CindI%ڝ?We uP?۸e7*z8]/y҃Qmf]>;bg89 9fJe\u'Ki\L$QUd.D- 7lkuijʼ܎ d1#k^u#1k{ ?&Us!/sq1e< $ U亱"}S %_b& !}ݰIF@(#jR ,0yV ?ϑ[ 5{Q@.d{5@QW9Ӵ-ed҉W: ǻ6^QxU6<6=L>-޿ , kGYbltRmvg Է9og3+4%8è׻8e]B' }D=?7H$d]txVĪXޒr OВmy竷c@i,$Jn::exaf} ԓ QkYu9O텡n\m̶@gP_ S&r QB벪B- T7^J?:0#B)uW{M:1R/7ܐSDԧ#Gn~GL_E*#iBv=Ar]F~j#`"!aEx Jj(^7rWVCa&"U'[CWԷWzJ,E2n.תMX_GUmya ۣ <8>!aHt̒>LY@Z1<: 5= w1I_Qִ-]\%?K!_{Ybڹ]7Y<7Ìgx,*Voܰ(/2G eJ"0q&KJl$ aǴ.Vs+!坩%'p 9,X}5&88 c&", b2 hӝdS>Qk;dssYNa3;|41w8C_LF2dݚ^e`L޳$^J%14?ɀ-!i>;mT\*]djP߸h#c1"wqu[g,E1Dg(VѰj)ӠW&:l"F-Hn o?UÊc8+RM>4Skl`W.jїnAi9)Cukݼk_Y8OeeK hZ/uy2< x zMll4<+ݥ]' "rj$hn^f&]d5nXVmo'چrKCFgu)w垵40yUzހC* N ҾoMQ*}sMW^TD L-,غLA8NaWWDp<,1e p$D O=]滲b<2B\g;)rU 3!+@&MV" Mmn؊ OThޮ9h8>`u}9ID_\N`].$R4T%,>2n% m>DŽaa`\X==\މ0D,`]X 5[ *6 D|/>\ M\iÇ]ڹHTܚm|4lAGU? /RF$6eҧ^~=<V$G=PL_s r\G몑Zo =Ѳ- _\D뾤0ITFpSC݌A$yF-a8 h[H}trҼ_ێP5&Rd }݋6 p.Oug8qlj6hDE1 ՅE )CȾ`pQiu/8@gVG$HS ~ 6Zb]q.8,Q:d ~(/E%¸q)S58J!-<'GAv@Vk9n~?ޝƭd*BmT"=HrYWn;{LE @@аUV}S|+6\XE:,"!RЇf_}ٿm01N7 //Xû:J]ͱ̇*,VZI 1$iFjz+Ӳ,cL7iٻg+|F)S A%ov<=EQ GGH%_bӪbh}Z WUɯ_}!ζFp q!<5t y>=';G/ɯ.n9GL74-1z/54ؗ_u6| mOF}Hq>A Sq/jHH?|=x:E%pr e׋7$$0vC'PirAn"(ա Qvu _1 85\3B{ $n5Lf*f0iD?e289r :4j64L'p;eO]#qb%K"v5dt:tt(<3Z9y\ǜT{ߦUh;|c$-8vdtieo! EJ byZ4px3ej ݐ7߫y!=~sRG6kdƵ3}d{뭳 n棼ܢ e7K%\4ab?3ؘ0OR--3Ns+ӓR6 f<NwN# !^)hFwxALV8΅wCX}$"GFaIsdչTw?$L'CpsG1I{i/@{|Uug d_z+gy?ejTYs^H]뺕r} G$9|%(p:(dŦc)_(&%xuc崂lh7H!00DykŸӞDmU+@5{ ,c: k-PVޙrB橉F{Nrzu=M.Ԫ=A@fJE),Mn֏=SPGmtHh= { Ps t.4Pw~sz)'dr$AJwuQ;ܩU)Z>'*`?):Omyn׿mزLV7ӹLISO H1[7&%fMVYX~t'U*'"_mM;+V8-52p]Ӟv '0ڿ&l-Nbkfv`ҧ NM*3Vo(-TyM6 l%Ǩnz(ep!s0/YEw/NLnW+*ۖV hשU4I|[PKZ;IT8K,4!س+ۢy ΜKZNٓNw N [Cދ -aP6Zƻ19>K +YR N b?H;Nf( R_~9DkDtC P/]ckDJh:ڣdR+2za,g#6O`k ,5Cea:Y +mNQjc@NCs:]d&ݐO;~۴F mgBj?".ޮ$hSSΉyXʬ22,(35tʂ>lC,}C܄Ғn1 ?&B"},óbeԆNϸֆ7M@b|n N;f^> E7Ǩ%虲BnUZ6,#Y6GѰED=JST豄"\`aip˓Q0'C]@ر~4ާPŠ2q uio1c]x^_pgYe"6S]E0j f7e!J;UYN i&"x:nK>lo/B)T 0/reSvNhA5c@ԑ^QQYw9bHV[n,fsIeZvs?V9GG}wJ|fhPeh#[VE d1Z8WFgU2?isƶJwWNmcВ*W>jzC/iaajT ? MEa$5L6z'DZ|w 9In}!FS\B1n%$dmPbJ[~pQ֖VGeUL{ZU'9inW؛!'زJJ4Yn7З.Bj;紽8JީcM*4} aM$$2EumQhBg hbӂ {\@|RD AI6O2|9j"|**[)PgnIQE: m-(~lvP :oq`%EϬR` ݋n'NƏyQ~>LŵX0A"bt#wD(m`tOe? =ъh~ J3e_< 1 QM|SF5mӒBWA{8@=q]L~<ڔNk}Q%epҿ9'l/{yͥ4[Y+NH@ܽSYLޛeS?2%x32Δǚ ~sAˀGE&"5ͩ%To+*srkOb6xOOfks jFz7JpLK*d<;!NTNOA@ENcNIFXխgc\Ԫkݨ㼾uD).KU^@RJ)̪?cX\M<҈V⢭{DQDƏn7 `<`WAi+zab^`!|96v4[vN 0U;,aXl NgZWT_~⒜;lc<l<# U/ {(:QCP2)/n^ Ü9~Fr1c0֌Q1bhVMXwh|өE(ZN^ M& l-)l=>+z{[In #k'Laݥ)%[! j?l6!'l|/JCN{?N*^M>F_F7a -FVmG$|#qG;S.Ǿdw?8iǡ@əF\2gfg.le8)m]*ujѝ^j{AEmL.iqH>3I5dp;(2 TG#zSB u9rA&Z#A h0?ŗMdtT͇"^;?!"yˁ"ZI MӐ/-RyB{t)ڱl38УSXװT,11ξbIϊ׈q,̼?O.mG2ȸ=`ܧ%%k}dγ=ꓼ$n*3!Y)fTUƞ{.F.zjwN3_57jwX V+WEEՄ)ͪ_Sm7n^NvϊZˉ8dmReS%BNîq||[Qos\ܨNzz*w^1bנiU[#]^eޞҝK,O} Ͽqe‹G*Zc4meXjwBI'=ׇ]JE8}=8m]܀X'22g {3GSE{. tWUG 6/m#1:l2 t0$/a r}1RM'V3ڒ3u Bzpa2yi4D^SLeXTajߛ2߅2OTCD6d癰y|_z"yiXY0prM;A88dFr9kM5?.s 3lA^F}>Y`#tv>H>uвp:*p)s9wۧbHDl~Z»_ˣ g/!ifm6ldlheX|'V֋]]0y/NI~P -Ogm>0 YZsurveillance/R/isScalar.R0000644000176200001440000000110013117531333015023 0ustar liggesusers################################################################################ ### Check if an R object is scalar, i.e., a numeric vector of length 1 ### ### Copyright (C) 2009,2017 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ isScalar <- function (x) { length(x) == 1L && is.vector(x, mode = "numeric") } surveillance/R/knox.R0000644000176200001440000001260412707631463014265 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Knox test for space-time interaction ### ### Copyright (C) 2015-2016 Sebastian Meyer ### $Revision: 1703 $ ### $Date: 2016-04-26 11:21:55 +0200 (Tue, 26. Apr 2016) $ ################################################################################ knox <- function (dt, ds, eps.t, eps.s, simulate.p.value = TRUE, B = 999, ...) { stopifnot(length(dt) == length(ds)) if (isSymmetric.matrix(dt) || isSymmetric.matrix(ds)) warning("symmetric input matrix detected; use 'lower.tri'?") ## logical vectors indicating which pairs are close in time and space closeInTime <- if (is.logical(dt)) { dt } else { stopifnot(is.numeric(dt), isScalar(eps.t)) dt <= eps.t } closeInSpace <- if (is.logical(ds)) { ds } else { stopifnot(is.numeric(ds), isScalar(eps.s)) ds <= eps.s } ## manually build the contingency table (table() with factor() is too slow) .lab <- c("close", "not close") knoxtab <- array( tabulate(4L - closeInTime - 2L*closeInSpace, nbins = 4L), dim = c(2L, 2L), dimnames = list( dt = if (is.logical(dt)) .lab else paste(c("<=", " >"), eps.t), ds = if (is.logical(ds)) .lab else paste(c("<=", " >"), eps.s) )) class(knoxtab) <- "table" ## expected number of close pairs in the absence of spatio-temporal interaction npairs <- sum(knoxtab) expected <- sum(knoxtab[1L,]) / npairs * sum(knoxtab[,1L]) ##<- this order of terms avoids integer overflow ## test statistic is the number of spatio-temporally close pairs METHOD <- "Knox test" STATISTIC <- knoxtab[1L] ## determine statistical significance pval_Poisson <- ppois(STATISTIC, expected, lower.tail = FALSE) PVAL <- if (simulate.p.value) { # Monte Carlo permutation approach stopifnot(isScalar(B)) B <- as.integer(B) METHOD <- paste(METHOD, "with simulated p-value") PARAMETER <- setNames(B, "B") permstats <- plapply(X = integer(B), FUN = function (...) sum(closeInSpace & closeInTime[sample.int(npairs)]), ...) structure(mean(c(STATISTIC, permstats, recursive = TRUE) >= STATISTIC), Poisson = pval_Poisson) } else { METHOD <- paste(METHOD, "with Poisson approximation") PARAMETER <- setNames(expected, "lambda") pval_Poisson } ## return test results structure( list(method = METHOD, data.name = paste("dt =", deparse(substitute(dt)), "and ds =", deparse(substitute(ds))), statistic = setNames(STATISTIC, "number of close pairs"), parameter = PARAMETER, p.value = PVAL, alternative = "greater", null.value = setNames(expected, "number"), permstats = if (simulate.p.value) { unlist(permstats, recursive = FALSE, use.names = FALSE) }, table = knoxtab), class = c("knox", "htest") ) } print.knox <- function (x, ...) { ## first print by the default method for class "htest" NextMethod("print") ## then also output the contingency table cat("contingency table:\n") print(x$table) cat("\n") invisible(x) } plot.knox <- function (x, ...) { if (is.null(permstats <- x[["permstats"]])) { stop("this plot-method is for a permutation-based Knox test") } defaultArgs <- list( permstats = permstats, xmarks = setNames(c(x[["null.value"]], x[["statistic"]]), c("expected", "observed")), xlab = "number of close pairs" ) do.call("permtestplot", modifyList(defaultArgs, list(...))) } xtable.knox <- function (x, caption = NULL, label = NULL, align = paste0("r|rr", if (!is.null(sumlabel)) "|r"), digits = 0, display = NULL, ..., sumlabel = "$\\sum$") { tab <- x$table if (!is.null(sumlabel)) { FUN <- setNames(list(sum), sumlabel) tab <- addmargins(tab, FUN = FUN, quiet = TRUE) } xtable(tab, caption = caption, label = label, align = align, digits = digits, display = display, ...) } toLatex.knox <- function (object, dnn = names(dimnames(object$table)), hline.after = NULL, sanitize.text.function = NULL, ...) { xtab <- xtable(object, ...) if (is.null(hline.after)) hline.after <- unique(c(-1,0,2,nrow(xtab))) if (is.null(sanitize.text.function)) sanitize.text.function <- function (x) gsub("<=", "$\\le$", gsub(">", "$>$", x, fixed = TRUE), fixed = TRUE) res <- toLatex.xtable(xtab, hline.after = hline.after, sanitize.text.function = sanitize.text.function, ...) if (is.null(dnn)) { res } else { stopifnot(length(dnn) == 2) headeridx <- grep("&", res, fixed = TRUE)[1L] res[headeridx] <- paste0(dnn[1L], res[headeridx]) res <- append(res, paste0(" & \\multicolumn{2}{|c|}{", dnn[2L], "} & \\\\"), after = headeridx - 1L) class(res) <- "Latex" res } } surveillance/R/isoWeekYear.R0000644000176200001440000000544113430566615015536 0ustar liggesusers###################################################################### # Extract numerical ISO week and year from a Date object # # Details: # This now simply wraps strftime(x, "%V") and strftime(x, "%G"), # supported on Windows since R 3.1.0. Thus, a handmade implementation # of isoWeekYear as in surveillance <= 1.16.2 is no longer necessary. # # Parameters: # Y -- year or a Date/POSIXt object # M -- month (only used if Y is the year) # D -- day (only used if Y is the year) # # Returns: # numeric ISO year and week of the date ###################################################################### isoWeekYear <- function(Y, M, D) { if (!inherits(Y, c("Date", "POSIXt"))) Y <- strptime(paste(Y,M,D,sep="-"),"%Y-%m-%d") Wn <- as.numeric(strftime(Y, "%V")) Yn <- as.numeric(strftime(Y, "%G")) return(list(ISOYear = Yn, ISOWeek = Wn)) } ###################################################################### # An extension of format.Date with additional formatting strings # - "%Q" / "%OQ" for the quarter (1-4 / I-IV) the month belongs to # - "%q" days within quarter # If these formats are not used, base format() is called. # # Params: # x - An object of type Date to be converted. # format - A character string. ###################################################################### #Small helper function - vectorized gsub, but disregarding names of x gsub2 <- function(pattern, replacement, x) { len <- length(x) mapply(FUN = gsub, pattern = rep_len(as.character(pattern), len), replacement = rep_len(as.character(replacement), len), x = x, MoreArgs = list(fixed = TRUE), SIMPLIFY = TRUE, USE.NAMES = FALSE) } formatDate <- function(x, format) { ##Anything to do? if (!grepl("%Q|%OQ|%q", format)) { #nope return(format(x,format)) } ##Replicate string formatStr <- rep_len(format,length(x)) ##If days within quarter requested (this is kind of slow) if (grepl("%q",format)) { ##Loop over vectors of dates dateOfQuarter <- sapply(x, function(date) { ##Month number in quarter modQ <- (as.numeric(format(date,"%m"))-1) %% 3 dateInMonth <- seq(date,length.out=2,by=paste0("-",modQ," month"))[2] ##Move to first of month return(dateInMonth - as.numeric(format(dateInMonth,"%d")) + 1) }) dayInQuarter <- as.numeric(x - dateOfQuarter) + 1 formatStr <- gsub2("%q",as.character(dayInQuarter),formatStr) } if (grepl("%Q|%OQ",format)) { Q <- (as.numeric(format(x,"%m"))-1) %/% 3 + 1 #quarter formatStr <- gsub2("%Q",as.character(Q),formatStr) formatStr <- gsub2("%OQ",as.roman(Q),formatStr) } ##The rest of the formatting - works normally as defined by strptime res <- character(length(x)) for (i in 1:length(x)) res[i] <- format(x[i],formatStr[i]) return(res) } surveillance/R/zzz.R0000644000176200001440000000351513117705477014147 0ustar liggesusers####################################### ### Hook functions for package start-up ####################################### gpclibCheck <- function (fatal = TRUE) { gpclibOK <- surveillance.options("gpclib") if (!gpclibOK && fatal) { message("Note: The gpclib license is accepted by ", sQuote("surveillance.options(gpclib=TRUE)"), ".") stop("acceptance of the gpclib license is required") } gpclibOK } .onLoad <- function (libname, pkgname) { ## initialize options reset.surveillance.options() } .onAttach <- function (libname, pkgname) { ## Startup message VERSION <- packageVersion(pkgname, lib.loc=libname) packageStartupMessage("This is ", pkgname, " ", VERSION, ". ", "For overview type ", sQuote(paste0("help(", pkgname, ")")), ".") ## decide if we should run all examples (some take a few seconds) allExamples <- if (interactive()) { TRUE } else { # R CMD check ## only do all examples if a specific environment variable is set ## (to any value different from "") nzchar(Sys.getenv("_R_SURVEILLANCE_ALL_EXAMPLES_")) ## CAVE: testing for _R_CHECK_TIMINGS_ as in surveillance < 1.9-1 ## won't necessarily skip long examples for daily checks on CRAN (see ## https://stat.ethz.ch/pipermail/r-devel/2012-September/064812.html ## ). For instance, the daily Windows checks run without timings. } surveillance.options(allExamples = allExamples) } ########################### ### Little helper functions ########################### ### determines multiplicities in a matrix (or data frame) ### and returns unique rows with appended column of counts ### using spatstat's multiplicity methods countunique <- function (x) unique(cbind(x, COUNT = multiplicity(x))) surveillance/R/twinstim_siaf_powerlawL.R0000644000176200001440000001723013165643423020220 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### _L_agged power-law kernel f(s) = (||s||/sigma)^-d for ||s|| >= sigma, else 1 ### Similar to the density of the Pareto distribution (but value 1 for < sigma) ### ### Copyright (C) 2013-2014,2017 Sebastian Meyer ### $Revision: 1988 $ ### $Date: 2017-10-06 11:04:19 +0200 (Fri, 06. Oct 2017) $ ################################################################################ siaf.powerlawL <- function (nTypes = 1, validpars = NULL, engine = "C") { nTypes <- as.integer(nTypes) stopifnot(length(nTypes) == 1L, nTypes > 0L) engine <- match.arg(engine, c("C", "R")) ## for the moment we don't make this type-specific if (nTypes != 1) stop("type-specific shapes are not yet implemented") ## helper expression, note: logpars=c(logscale=logsigma, logd=logd) tmp <- expression( logsigma <- logpars[[1L]], # used "[[" to drop names logd <- logpars[[2L]], sigma <- exp(logsigma), d <- exp(logd) ) ## spatial kernel f <- function (s, logpars, types = NULL) {} body(f) <- as.call(c(as.name("{"), tmp, expression( sLength <- sqrt(.rowSums(s^2, L <- length(s)/2, 2L)), fvals <- rep.int(1, L), inPLrange <- which(sLength > sigma), fvals[inPLrange] <- (sLength[inPLrange]/sigma)^-d, fvals ))) environment(f) <- baseenv() ## numerically integrate f over a polygonal domain F <- siaf_F_polyCub_iso(intrfr_name = "intrfr.powerlawL", engine = engine) ## fast integration of f over a circular domain Fcircle <- function (r, logpars, type = NULL) {} body(Fcircle) <- as.call(c(as.name("{"), tmp, expression( ## trivial case: radius of integration domain < sigma (=> constant f) if (r <= sigma) return(pi * r^2), ## otherwise, if r > sigma, integration via f^-1 fofr <- (r/sigma)^-d, basevolume <- pi * r^2 * fofr, # cylinder volume up to height f(r) intfinvsq <- sigma^2 * if (d == 2) -d*log(sigma/r) else { d/(d-2) * (1 - (sigma/r)^(d-2)) }, basevolume + pi * intfinvsq ) )) environment(Fcircle) <- baseenv() ## derivative of f wrt logpars ## CAVE: the derivative of f wrt logsigma is mathematically NaN at x=sigma ## this non-differentiability at the treshhold causes false convergence ## warnings by nlminb but is otherwise not relevant (could use slow and ## robust Nelder-Mead instead) deriv <- function (s, logpars, types = NULL) {} body(deriv) <- as.call(c(as.name("{"), tmp, expression( sLength <- sqrt(.rowSums(s^2, L <- length(s)/2, 2L)), derivlogsigma <- derivlogd <- numeric(L), inPLrange <- which(sLength > sigma), fPL <- (sLength[inPLrange]/sigma)^-d, derivlogsigma[inPLrange] <- d * fPL, derivlogd[inPLrange] <- fPL * log(fPL), cbind(derivlogsigma, derivlogd) ))) environment(deriv) <- baseenv() ## Numerical integration of 'deriv' over a polygonal domain Deriv <- siaf_Deriv_polyCub_iso( intrfr_names = c("intrfr.powerlawL.dlogsigma", "intrfr.powerlawL.dlogd"), engine = engine) ## simulate from the lagged power law (within a maximum distance 'ub') ##simulate <- siaf.simulatePC(intrfr.powerlawL) # <- generic simulator ##environment(simulate) <- getNamespace("surveillance") ## faster implementation taking advantage of the constant component: simulate <- function (n, logpars, type, ub) { sigma <- exp(logpars[[1L]]) d <- exp(logpars[[2L]]) ## Sampling via polar coordinates and inversion method ## random angle theta <- runif(n, 0, 2*pi) ## sampling radius r ## trivial case u < sigma: p(r) \propto r on [0;u] if (ub < sigma) { r <- ub * sqrt(runif(n)) # inversion sampling ## now rotate each point by a random angle to cover all directions return(r * cbind(cos(theta), sin(theta))) } ## case u >= sigma: p(r) \propto r if r sample component unir <- runif(n) <= mass1 / (mass1 + mass2) ## samples from the uniform short-range component: n1 <- sum(unir) r1 <- sigma * sqrt(runif(n1)) # similar to the case u < sigma ## samples from power-law component: p2(r) \propto r^(-d+1) on [sigma;u] ## For d>2 only, we could use VGAM::rpareto(n,sigma,d-2), d=1 is trivial n2 <- n - n1 r2 <- if (d==1) runif(n2, sigma, ub) else { # inversion sampling P2inv <- if (d == 2) { function (z) ub^z * sigma^(1-z) } else { function (z) (z*ub^(2-d) + (1-z)*sigma^(2-d))^(1/(2-d)) } P2inv(runif(n2)) } ## put samples from both components together r <- c(r1, r2) ## now rotate each point by a random angle to cover all directions r * cbind(cos(theta), sin(theta)) } environment(simulate) <- getNamespace("stats") ## return the kernel specification list(f=f, F=F, Fcircle=Fcircle, deriv=deriv, Deriv=Deriv, simulate=simulate, npars=2L, validpars=validpars) } ## integrate x*f(x) from 0 to R (vectorized) intrfr.powerlawL <- function (R, logpars, types = NULL) { sigma <- exp(logpars[[1L]]) d <- exp(logpars[[2L]]) pl <- which(R > sigma) upper <- R upper[pl] <- sigma res <- upper^2 / 2 # integral over x*constant part xplint <- if (d == 2) log(R[pl]/sigma) else (R[pl]^(2-d)-sigma^(2-d))/(2-d) res[pl] <- res[pl] + sigma^d * xplint res } ## integrate x * (df(x)/dlogsigma) from 0 to R (vectorized) intrfr.powerlawL.dlogsigma <- function (R, logpars, types = NULL) { sigma <- exp(logpars[[1L]]) d <- exp(logpars[[2L]]) pl <- which(R > sigma) res <- numeric(length(R)) xplint <- if (d == 2) log(R[pl]/sigma) else (R[pl]^(2-d)-sigma^(2-d))/(2-d) res[pl] <- d * sigma^d * xplint res } ## local({ # validation via numerical integration -> tests/testthat/test-siafs.R ## p <- function (r, sigma, d) ## r * siaf.powerlawL()$deriv(cbind(r,0), log(c(sigma,d)))[,1L] ## Pnum <- function (r, sigma, d) sapply(r, function (.r) { ## integrate(p, 0, .r, sigma=sigma, d=d, rel.tol=1e-8)$value ## }) ## r <- c(1,2,5,10,20,50,100) ## dev.null <- sapply(c(1,2,1.6), function(d) stopifnot(isTRUE( ## all.equal(intrfr.powerlawL.dlogsigma(r, log(c(3, d))), Pnum(r, 3, d))))) ## }) ## integrate x * (df(x)/dlogd) from 0 to R (vectorized) intrfr.powerlawL.dlogd <- function (R, logpars, types = NULL) { sigma <- exp(logpars[[1L]]) d <- exp(logpars[[2L]]) pl <- which(R > sigma) res <- numeric(length(R)) res[pl] <- if (d == 2) -(sigma*log(R[pl]/sigma))^2 else (sigma^d * R[pl]^(2-d) * (d-2)*d*log(R[pl]/sigma) - d*(sigma^2 - R[pl]^(2-d)*sigma^d)) / (d-2)^2 res } surveillance/R/twinstim_siaf_powerlaw1.R0000644000176200001440000000611713506665415020173 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### 1-parameter power-law kernel f(s) = (1 + ||s||)^-d, i.e., sigma = 1 ### ### Copyright (C) 2019 Sebastian Meyer ### $Revision: 2430 $ ### $Date: 2019-07-02 16:32:45 +0200 (Tue, 02. Jul 2019) $ ################################################################################ siaf.powerlaw1 <- function (nTypes = 1, validpars = NULL, sigma = 1) { nTypes <- as.integer(nTypes) stopifnot(length(nTypes) == 1L, nTypes > 0L) stopifnot(isScalar(sigma), sigma > 0) SIAF <- siaf.powerlaw(nTypes) # we can reuse some functions from there ## for the moment we don't make this type-specific if (nTypes != 1) stop("type-specific shapes are not yet implemented") ## spatial kernel f <- function (s, logd, types = NULL, sigma = 1) { d <- exp(logd) sLength <- sqrt(.rowSums(s^2, nrow(s), 2L)) (sLength + sigma)^-d } ## set desired sigma as default value formals(f)$sigma <- sigma environment(f) <- baseenv() ## numerically integrate f over a polygonal domain F <- function (polydomain, f, logd, type = NULL, logsigma = 0, ...) { logpars <- c(logsigma, logd) siaf_polyCub_iso(polydomain$bdry, "intrfr.powerlaw", logpars, list(...)) } formals(F)$logsigma <- log(sigma) environment(F) <- getNamespace("surveillance") ## fast integration of f over a circular domain Fcircle <- SIAF$Fcircle # hack original two-parameter version ... body(Fcircle)[2:4] <- NULL names(formals(Fcircle))[2] <- "logd" formals(Fcircle)$sigma <- sigma ## derivative of f wrt logpars deriv <- function (s, logd, types = NULL, sigma = 1) { d <- exp(logd) sLength <- sqrt(.rowSums(s^2, nrow(s), 2L)) tmp <- -d*log(sLength + sigma) matrix(tmp * exp(tmp)) } formals(deriv)$sigma <- sigma environment(deriv) <- baseenv() ## Numerical integration of 'deriv' over a polygonal domain Deriv <- function (polydomain, deriv, logd, type = NULL, logsigma = 0, ...) { logpars <- c(logsigma, logd) siaf_polyCub_iso(polydomain$bdry, "intrfr.powerlaw.dlogd", logpars, list(...)) } formals(Deriv)$logsigma <- log(sigma) environment(Deriv) <- getNamespace("surveillance") ## Simulation function (via polar coordinates) simulate <- SIAF$simulate # hack original two-parameter version ... names(formals(simulate))[2] <- "logd" formals(simulate)$logsigma <- log(sigma) body(simulate) <- as.call( append(as.list(body(simulate)), quote(siafpars <- c(logsigma, logd)), after = 1) ) ## return the kernel specification list(f = f, F = F, Fcircle = Fcircle, deriv = deriv, Deriv = Deriv, simulate = simulate, npars = 1L, validpars = validpars) } surveillance/R/algo_glrnb.R0000644000176200001440000003143714013521730015404 0ustar liggesusers###################################################################### # # Implementation of GLR and ordinary Poisson/NegBin CUSUM # -- documentation converted to Rd format. # # Author: Michael Hoehle (with contributions by Valentin Wimmer) # Date: 8 Jan 2008 # History # - 2016-01-17 added ret="cases" for glr using the NegBin distribution ###################################################################### algo.glrnb <- function(disProgObj, control = list(range=range,c.ARL=5, mu0=NULL, alpha=0, Mtilde=1, M=-1, change="intercept", theta=NULL,dir=c("inc","dec"), ret=c("cases","value"),xMax=1e4)) { #Small helper function either <- function(cond, whenTrue, whenFalse) { if (cond) return(whenTrue) else return(whenFalse) } # Set the default values if not yet set if(is.null(control[["c.ARL",exact=TRUE]])) control$c.ARL <- 5 if(is.null(control[["change",exact=TRUE]])) control$change <- "intercept" if(is.null(control[["Mtilde",exact=TRUE]])) control$Mtilde <- 1 if(is.null(control[["M",exact=TRUE]])) control$M <- -1 if(is.null(control[["dir",exact=TRUE]])) control$dir <- "inc" if(is.null(control[["ret",exact=TRUE]])) control$ret <- "value" if(is.null(control[["xMax",exact=TRUE]])) control$xMax <- 1e4 if(!is.null(control[["theta",exact=TRUE]])) { if(control[["theta",exact=TRUE]] == 1) { stop("Error: theta has to be larger than 1!") } } ##Set alpha to null as default. Not necessary, coz it would be taken from ##glrnb output. ##if(is.null(control[["alpha",exact=TRUE]])) control$alpha <- 0 #GLM (only filled if estimated) m <- NULL ################################################ #Extract the important parts from the arguments ################################################ observed <- disProgObj$observed #range is fixed, but t is modified as we iterate the cusum t <- control$range ; range <- control$range control$mu0Model <- NULL control$dir <- match.arg(control$dir, c("inc","dec")) dir <- ifelse(control$dir=="inc",1,-1) control$ret <- match.arg(control$ret, c("value","cases")) ret <- pmatch(control$ret,c("value","cases")) mod <- list() # Estimate m (the expected number of cases), i.e. parameter lambda of a # poisson distribution based on time points 1:t-1 if (is.null(control[["mu0",exact=TRUE]]) | is.list(control[["mu0",exact=TRUE]])) { #Initialize if (is.null(control[["mu0",exact=TRUE]])) control$mu0 <- list() if (is.null(control[["mu0",exact=TRUE]][["S"]])) control$mu0$S <- 1 if (is.null(control[["mu0",exact=TRUE]][["trend"]])) control$mu0$trend <- FALSE if (is.null(control[["mu0",exact=TRUE]][["refit"]])) control$mu0$refit <- FALSE control$mu0Model <- control$mu0 #Estimate using a hook function (lazy evaluation) control$mu0 <- estimateGLRNbHook()$pred mod[[1]] <- estimateGLRNbHook()$mod # if it is necessary to estimate alpha. Note: glm.nb uses a different # parametrization of the negative binomial distribution, i.e. the # variance is 'mu + mu^2/size' (?dnbinom). # Hence the correct alpha is 1/theta. But now it's the same every time. if(is.null(control[["alpha",exact=TRUE]])) control$alpha <- 1/mod[[1]]$theta } #The counts x <- observed[control$range] mu0 <- control$mu0 #Reserve space for the results # start with cusum[timePoint -1] = 0, i.e. set cusum[1] = 0 alarm <- matrix(data = 0, nrow = length(t), ncol = 1) upperbound <- matrix(data = 0, nrow = length(t), ncol = 1) #Setup counters for the progress doneidx <- 0 N <- 1 xm10 <- 0 noofalarms <- 0 noOfTimePoints <- length(t) #Loop as long as we are not through the sequence while (doneidx < noOfTimePoints) { # cat("Doneidx === ",doneidx,"\n") # Call the C-interface -- this should depend on the type if (control$change == "intercept") { #Generalized likehood ratio vs. ordinary CUSUM if (is.null(control[["theta",exact=TRUE]])) { if (control$alpha == 0) { #poisson if (control$M > 0 ){ # window limited res <- .C(C_glr_cusum_window,as.integer(x),as.double(mu0),length(x),as.integer(control$M),as.integer(control$Mtilde),as.double(control$c.ARL),N=as.integer(0),val=as.double(numeric(length(x))),cases=as.double(numeric(length(x))),as.integer(dir),as.integer(ret)) } else { # standard, not window limited res <- .C(C_glr_cusum,as.integer(x),as.double(mu0),length(x),as.integer(control$Mtilde),as.double(control$c.ARL),N=as.integer(0),val=as.double(numeric(length(x))),cases=as.double(numeric(length(x))),as.integer(dir),as.integer(ret)) } } else { #negbin. This is direcly the window limited version, does M=-1 work here? res <- .C(C_glr_nb_window,x=as.integer(x),mu0=as.double(mu0),alpha=as.double(control$alpha),lx=length(x),Mtilde=as.integer(control$Mtilde),M=as.integer(control$M),c.ARL=as.double(control$c.ARL),N=as.integer(0),val=as.double(numeric(length(x))),dir=as.integer(dir)) ##hoehle - 2016-01-17. Try out calculating upper bound in terms of cases if (control$ret == "cases") { ##Warn that this might be slow. message("Return of cases is for the GLR detector based on the negative binomial distribution is currently\n only implemented brute force and hence might be very slow!") ### browser() myx <- x res$cases <- rep(0,length(res$val)) for (pos in seq_len(min(length(x),res$N))) { myx <- x gotAlarm <- (res$N <= pos) #already got an alarm at the position? direction <- ifelse(gotAlarm, -1, 1) #go up or down? alarmChange <- FALSE #have we suceeded in changing x such that the alarm status changed? #Loop over values until one is such that an alarm at (or before!) the time point is given while (!alarmChange & (myx[pos] <= control$xMax) & (myx[pos] >=1)) { myx[pos] <- myx[pos] + direction ##cat("pos=",pos,"x=",myx[pos],"\n") tmpRes <- .C(C_glr_nb_window,x=as.integer(myx),mu0=as.double(mu0),alpha=as.double(control$alpha),lx=length(myx),Mtilde=as.integer(control$Mtilde),M=as.integer(control$M),c.ARL=as.double(control$c.ARL),N=as.integer(0),val=as.double(numeric(length(myx))),dir=as.integer(dir)) if (!gotAlarm & (tmpRes$N <= pos)) { alarmChange <- TRUE ; res$cases[pos] <- myx[pos]} if (gotAlarm & (tmpRes$N > pos)) { alarmChange <- TRUE ; res$cases[pos] <- myx[pos] + 1} } if (!alarmChange) { res$cases[pos] <- ifelse(gotAlarm,NA,1e99) } #didn't find alarm before control$xMax } } ##end new 2016 addition to calculate 'cases' for negbin glrnb } } else { ###################### !is.null(control$theta), i.e. ordinary CUSUM if (control$alpha == 0) { #poisson res <- .C(C_lr_cusum,x=as.integer(x),mu0=as.double(mu0),lx=length(x),as.double(control$theta),c.ARL=as.double(control$c.ARL),N=as.integer(0),val=as.double(numeric(length(x))),cases=as.double(numeric(length(x))),as.integer(ret)) } else { #negbin res <- .C(C_lr_cusum_nb,x=as.integer(x),mu0=as.double(mu0),alpha=as.double(control$alpha),lx=length(x),as.double(control$theta),c.ARL=as.double(control$c.ARL),N=as.integer(0),val=as.double(numeric(length(x))),cases=as.double(numeric(length(x))),as.integer(ret)) } } } else { ################### Epidemic chart ####################### if (control$change == "epi") { if (control$alpha == 0) { #pois res <- .C(C_glr_epi_window,as.integer(x),as.double(mu0),length(x),as.integer(control$Mtilde),as.integer(control$M),as.double(xm10),as.double(control$c.ARL),N=as.integer(0),val=as.double(numeric(length(x)))) } else { res <- .C(C_glr_nbgeneral_window,as.integer(x),as.double(mu0),alpha=as.double(control$alpha),lx=length(x),Mtilde=as.integer(control$Mtilde),M=as.integer(control$M),xm10=as.double(xm10),c.ARL=as.double(control$c.ARL),N=as.integer(0),val=as.double(numeric(length(x))),dir=as.integer(dir)) } } } ##In case an alarm found log this and reset the chart at res$N+1 if (res$N <= length(x)) { #Put appropriate value in upperbound upperbound[1:res$N + doneidx] <- either(ret == 1, res$val[1:res$N] ,res$cases[1:res$N]) alarm[res$N + doneidx] <- TRUE #Chop & get ready for next round xm10 <- x[res$N] #put start value x_0 to last value x <- x[-(1:res$N)] ; t <- t[-(1:res$N)] #If no refitting is to be done things are easy if (!is.list(control$mu0Model) || (control$mu0Model$refit == FALSE)) { mu0 <- mu0[-(1:res$N)] } else { #Update the range (how to change back??) range <- range[-(1:res$N)] mu0 <- estimateGLRNbHook()$pred mod[[noofalarms+2]] <- estimateGLRNbHook()$mod control$mu0[(doneidx + res$N + 1):length(control$mu0)] <- mu0 #Note: No updating of alpha is currently done. } noofalarms <- noofalarms + 1 } doneidx <- doneidx + res$N } #fix of the problem that no upperbound-statistic is returned after #last alarm upperbound[(doneidx-res$N+1):nrow(upperbound)] <- either(ret == 1, res$val, res$cases) #fix of the problem that no upperbound-statistic is returned #in case of no alarm if (noofalarms == 0) { upperbound <- either(ret==1, res$val, res$cases) } # ensure upper bound is positive and not NaN upperbound[is.na(upperbound)] <- 0 upperbound[upperbound < 0] <- 0 # Add name and data name to control object algoName <- either(control$alpha == 0, "glrpois:", "glrnb:") control$name <- paste(algoName, control$change) control$data <- paste(deparse(substitute(disProgObj))) control$m <- m control$mu0Model$fitted <- mod # return alarm and upperbound vectors result <- list(alarm = alarm, upperbound = upperbound, disProgObj=disProgObj,control=control) class(result) = "survRes" # for surveillance system result return(result) } ##################################################################### ### Function to estimate a Poisson or glm.nb model on the fly - to be ### called within the algo.glrnb function. Experts can customize this ### function. ##################################################################### estimateGLRNbHook <- function() { #Fetch control object from parent control <- parent.frame()$control #The period p <- parent.frame()$disProgObj$freq #Current range to perform surveillance on range <- parent.frame()$range #Define phase1 & phase2 data set (phase2= the rest) train <- 1:(range[1]-1) test <- range #Perform an estimation based on all observations before timePoint #Event better - don't do this at all in the algorithm - force #user to do it himself - coz its a model selection problem data <- data.frame(y=parent.frame()$disProgObj$observed[train],t=train) #Build the model equation formula <- "y ~ 1 " if (control$mu0Model$trend) { formula <- paste(formula," + t",sep="") } for (s in seq_len(control$mu0Model$S)) { formula <- paste(formula,"+cos(2*",s,"*pi/p*t)+ sin(2*",s,"*pi/p*t)",sep="") } ##hoehle - 2016-01-16 -- problematic: a full model was fitted, but ##this implied a different alpha. Changed now such that a glm ##is fitted having the specified alpha (i.e. theta) fixed. ##Determine appropriate fitter function if (is.null(control[["alpha",exact=TRUE]])) { ##Fit while also estimating alpha (if possible!) m <- eval(substitute(glm.nb(form,data=data),list(form=as.formula(formula)))) } else { ##Fit the Poisson GLM if (control$alpha == 0) { message(paste0("glrnb: Fitting Poisson model because alpha == 0")) m <- eval(substitute(glm(form,family=poisson(),data=data),list(form=as.formula(formula)))) } else { message(paste0("glrnb: Fitting glm.nb model with alpha=",control$alpha)) m <- eval(substitute(glm(form,family=negative.binomial(theta=1/control$alpha),data=data),list(form=as.formula(formula)))) } } #Predict mu_{0,t} pred <- as.numeric(predict(m,newdata=data.frame(t=range),type="response")) return(list(mod=m,pred=pred)) } ###################################################################### # simple wrapper for the Poisson case ###################################################################### algo.glrpois <- function(disProgObj, control = list(range=range,c.ARL=5, mu0=NULL, Mtilde=1, M=-1, change="intercept", theta=NULL,dir=c("inc","dec"), ret=c("cases","value"),xMax=1e4)) { if (is.null(control$alpha)) { control$alpha <- 0 } else if (control$alpha != 0) { stop("algo.glrpois has to operate with control$alpha = 0") } algo.glrnb(disProgObj, control) } surveillance/R/stsplot_space.R0000644000176200001440000001772414024100031016151 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Snapshot map (spplot) of an sts-object or matrix of counts ### ### Copyright (C) 2013-2014,2016,2017,2020,2021 Sebastian Meyer ### $Revision: 2662 $ ### $Date: 2021-03-16 10:53:29 +0100 (Tue, 16. Mar 2021) $ ################################################################################ ## x: "sts" or (simulated) matrix of counts ## tps: one or more time points. The unit-specific _sum_ of time points "tps" is ## plotted. tps=NULL means cumulation over all time points in x. ## at: number of levels for the grouped counts or specific break points to ## use, or list(n, data, trafo) passed to getPrettyIntervals(), ## where data and trafo are optional. ## CAVE: intervals are closed on the left and open to the right. ## From panel.levelplot: zcol[z >= at[i] & z < at[i + 1]] <- i ## i.e. at=0:1 will have NA (=also white) for counts=1, thus we have to ## ensure max(at) > max(counts) stsplot_space <- function (x, tps = NULL, map = x@map, population = NULL, main = NULL, labels = FALSE, at = 10, col.regions = NULL, colorkey = list(space="bottom", labels=list(at=at)), total.args = NULL, gpar.missing = list(col="darkgrey", lty=2, lwd=2), sp.layout = NULL, xlim = bbox(map)[1, ], ylim = bbox(map)[2, ], ...) { counts <- if (inherits(x, "sts")) observed(x) else x if (is.null(tps)) tps <- seq_len(nrow(counts)) if (length(map) == 0L) stop("no map") if (is.null(colnames(counts))) stop("need 'colnames(x)' (to be matched against 'row.names(map)')") if (!all(colnames(counts) %in% row.names(map))) stop("incomplete 'map'; ensure that 'all(colnames(x) %in% row.names(map))'") ## compute data to plot ncases <- getCumCounts(counts, tps) total <- sum(ncases) if (!is.null(population)) { # divide counts by region-specific population population <- parse_population_argument(population, x) # pop matrix populationByRegion <- population[tps[1L],] # pop at first time point ncases <- ncases / populationByRegion # (cumulative) incidence by region total <- total / sum(populationByRegion) } ## add ncases to map@data map <- as(map, "SpatialPolygonsDataFrame") map$ncases <- NA_real_ map$ncases[match(colnames(counts),row.names(map))] <- ncases ## default main title if (is.null(main) && inherits(x, "sts")) main <- stsTimeRange2text(x, tps) ## check/determine color break points 'at' at <- checkat(at, ncases, counts = is.null(population)) ## default color palette if (is.null(col.regions)) { separate0 <- is.null(population) && at[1] == 0 && at[2] <= 1 col.regions <- c(if (separate0) "white", .hcl.colors(length(at)-1-separate0)) } ## colorkey settings if (!missing(colorkey) && is.list(colorkey)) colorkey <- modifyList(eval(formals()$colorkey), colorkey) ## automatic additions to sp.layout (region labels and total) if (is.list(gpar.missing) && any(is.na(map$ncases))) { layout.missing <- c(list("sp.polygons", obj=map[is.na(map$ncases),]), gpar.missing) sp.layout <- c(sp.layout, list(layout.missing)) } if (!is.null(layout.labels <- layout.labels(map, labels))) { sp.layout <- c(sp.layout, list(layout.labels)) } if (is.list(total.args)) { total.args <- modifyList(list(label="Overall: ", x=1, y=0), total.args) if (is.null(total.args$just)) total.args$just <- with (total.args, if (all(c(x,y) %in% 0:1)) { c(c("left", "right")[1+x], c("bottom","top")[1+y]) } else "center") total.args$label <- paste0(total.args$label, round(total,1)) layout.total <- c(grid::grid.text, total.args) ## "grid.text" wouldn't work since package "sp" doesn't import it sp.layout <- c(sp.layout, list(layout.total)) } ## generate the spplot() args <- list(quote(map[!is.na(map$ncases),]), "ncases", main=main, col.regions=col.regions, at=at, colorkey=colorkey, sp.layout=sp.layout, xlim=xlim, ylim=ylim, quote(...)) do.call("spplot", args) } ####################################################### ### Auxiliary functions for the "sts" snapshot function ####################################################### ## sum of counts by unit over time points "tps" ## the resulting vector has no names getCumCounts <- function (counts, tps) { ntps <- length(tps) if (ntps == 1) { counts[tps,] } else { .colSums(counts[tps,,drop=FALSE], ntps, ncol(counts)) } } parse_population_argument <- function (population, x) { if (is.matrix(population)) { if (!identical(dim(population), dim(x))) stop("'dim(population)' does not match the data dimensions") } else if (isScalar(population)) { # a unit, e.g., per 1000 inhabitants if (!inherits(x, "sts")) stop("'", deparse(substitute(x)), "' is no \"sts\" object; ", "population numbers must be supplied") population <- population(x) / population } else { # region-specific population numbers (as in surveillance <= 1.12.2) stopifnot(is.vector(population, mode = "numeric")) if (length(population) != ncol(x)) stop("'length(population)' does not match the number of data columns") population <- rep(population, each = nrow(x)) dim(population) <- dim(x) } population } checkat <- function (at, data, counts = TRUE) { # for non-transformed "data" if (isScalar(at)) at <- list(n=at) if (is.list(at)) { at <- modifyList(list(n=10, data=data, counts=counts), at) do.call("getPrettyIntervals", at) } else { # manual breaks stopifnot(is.vector(at, mode = "numeric"), !anyNA(at)) at <- sort(at) r <- range(data, na.rm = TRUE) c(if (r[1L] < at[1L]) 0, at, if (r[2L] >= at[length(at)]) { ## round up max to 1 significant digit (including 0.1 to 0.2) .decs <- 10^floor(log10(r[2L])) ceiling(r[2L]/.decs + sqrt(.Machine$double.eps))*.decs }) } } getPrettyIntervals <- function (n, data, trafo=NULL, counts=TRUE, ...) { maxcount <- max(data, na.rm=TRUE) if (counts && maxcount < n) { # no aggregation of counts necessary at <- 0:ceiling(maxcount+sqrt(.Machine$double.eps)) # max(at) > maxcount } else { at <- if (is.null(trafo)) { # equivalent to trafo=scales::sqrt_trans() pretty(sqrt(data), n=n+1, ...)^2 } else { scales::trans_breaks(trafo$trans, trafo$inv, n=n+1, ...)(data) } ## { # alternative: quantile-based scale (esp. for incidence plots) ## quantile(data, probs=seq(0,1,length.out=n+1), na.rm=TRUE) ## } if (counts && at[1] == 0 && at[2] > 1) # we want 0 counts separately ("white") at <- sort(c(1, at)) if (at[length(at)] == maxcount) # ensure max(at) > max(data) at[length(at)] <- at[length(at)] + if (counts) 1 else 0.001*diff(range(at)) } at } stsTime2text <- function (stsObj, tps=TRUE, fmt=NULL) { if (is.null(fmt)) fmt <- switch(as.character(stsObj@freq), "1" = "%i", "52" = "%i-W%02i", "%i/%i") sprintf(fmt, year(stsObj)[tps], epochInYear(stsObj)[tps]) } stsTimeRange2text <- function (stsObj, tps, fmt=NULL, sep=" to ") { tpsRangeYW <- stsTime2text(stsObj, tps=range(tps), fmt=fmt) paste0(unique(tpsRangeYW), collapse=sep) } surveillance/R/twinstim_siaf_student.R0000644000176200001440000000762213165643423017736 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Student (t) kernel f(s) = (||s||^2+sigma^2)^-d ### This is a reparametrization of the t-kernel; For d=1, this is the kernel of ### the Cauchy density with scale sigma; in Geostatistics, a correlation ### function of this kind is known as the Cauchy model. ### ### Copyright (C) 2013-2014,2017 Sebastian Meyer ### $Revision: 1988 $ ### $Date: 2017-10-06 11:04:19 +0200 (Fri, 06. Oct 2017) $ ################################################################################ siaf.student <- function (nTypes = 1, validpars = NULL, engine = "C") { nTypes <- as.integer(nTypes) stopifnot(length(nTypes) == 1L, nTypes > 0L) engine <- match.arg(engine, c("C", "R")) ## for the moment we don't make this type-specific if (nTypes != 1) stop("type-specific shapes are not yet implemented") ## helper expression, note: logpars=c(logscale=logsigma, logd=logd) tmp <- expression( logsigma <- logpars[[1L]], # used "[[" to drop names logd <- logpars[[2L]], sigma <- exp(logsigma), d <- exp(logd) ) ## spatial kernel f <- function (s, logpars, types = NULL) {} body(f) <- as.call(c(as.name("{"), tmp, expression(s2 <- .rowSums(s^2, nrow(s), 2L)), expression((s2+sigma^2)^-d) )) environment(f) <- baseenv() ## numerically integrate f over a polygonal domain F <- siaf_F_polyCub_iso(intrfr_name = "intrfr.student", engine = engine) ## fast integration of f over a circular domain ## is not relevant for this heavy-tail kernel since we don't use ## 'effRange', and usually eps.s=Inf ##Fcircle <- function (r, logpars, type = NULL) {} ## derivative of f wrt logpars deriv <- f body(deriv)[[length(body(deriv))]] <- # assignment for return value of f substitute(fvals <- x, list(x=body(deriv)[[length(body(deriv))]])) body(deriv) <- as.call(c(as.list(body(deriv)), expression( derivlogsigma <- -2*d*sigma^2 * fvals / (s2+sigma^2), derivlogd <- log(fvals) * fvals, cbind(derivlogsigma, derivlogd, deparse.level = 0) ))) environment(deriv) <- baseenv() ## Numerical integration of 'deriv' over a polygonal domain Deriv <- siaf_Deriv_polyCub_iso( intrfr_names = c("intrfr.student.dlogsigma", "intrfr.student.dlogd"), engine = engine) ## simulation from the kernel (via polar coordinates) simulate <- siaf.simulatePC(intrfr.student) environment(simulate) <- getNamespace("surveillance") ## return the kernel specification list(f=f, F=F, deriv=deriv, Deriv=Deriv, simulate=simulate, npars=2L, validpars=validpars) } ## integrate x*f(x) from 0 to R (vectorized) intrfr.student <- function (R, logpars, types = NULL) { sigma <- exp(logpars[[1L]]) d <- exp(logpars[[2L]]) if (d == 1) { log(R^2+sigma^2) / 2 - log(sigma) } else { ( (R^2+sigma^2)^(-d+1) - (sigma^2)^(-d+1) ) / (2-2*d) } } ## integrate x * (df(x)/dlogsigma) from 0 to R (vectorized) intrfr.student.dlogsigma <- function (R, logpars, types = NULL) { sigma <- exp(logpars[[1L]]) d <- exp(logpars[[2L]]) sigma^2 * ( (R^2+sigma^2)^-d - sigma^(-2*d) ) } ## integrate x * (df(x)/dlogd) from 0 to R (vectorized) intrfr.student.dlogd <- function (R, logpars, types = NULL) { sigma <- exp(logpars[[1L]]) d <- exp(logpars[[2L]]) if (d == 1) { log(sigma)^2 - log(R^2+sigma^2)^2 / 4 } else { # thanks to Maple 17 primitive <- function (x) { x2ps2 <- x^2 + sigma^2 (d*(d-1)*log(x2ps2) + d) / (2*(d-1)^2 * (x2ps2)^(d-1)) } primitive(R) - primitive(0) } } surveillance/R/algo_twins.R0000644000176200001440000002130214013521730015432 0ustar liggesusers###################################################################### # Experimental version -- integrating the twins program into # the surveillance package ###################################################################### algo.twins <- function(disProgObj, control= list(burnin=1000, filter=10, sampleSize=2500, noOfHarmonics=1, alpha_xi=10, beta_xi=10, psiRWSigma=0.25, alpha_psi=1, beta_psi=0.1, nu_trend=FALSE, logFile="twins.log")) { if (inherits(disProgObj, "sts")) disProgObj <- sts2disProg(disProgObj) if (ncol(disProgObj$observed)>1) { stop("algo.twins() only handles univariate time series of counts") } ## Determine period from data T <- as.integer(disProgObj$freq) ## set default values (if not provided in control) if(is.null(control[["burnin",exact=TRUE]])) control$burnin <- 1000 if(is.null(control[["filter",exact=TRUE]])) control$filter <- 10 if(is.null(control[["sampleSize",exact=TRUE]])) control$sampleSize <- 2500 if(is.null(control[["alpha_xi",exact=TRUE]])) control$alpha_xi <- 10 if(is.null(control[["beta_xi",exact=TRUE]])) control$beta_xi <- 10 if(is.null(control[["psiRWSigma",exact=TRUE]])) control$psiRWSigma <- 0.25 if(is.null(control[["alpha_psi",exact=TRUE]])) control$alpha_psi <- 1 if(is.null(control[["beta_psi",exact=TRUE]])) control$beta_psi <- 0.1 if(is.null(control[["nu_trend",exact=TRUE]])) control$nu_trend <- FALSE if(is.null(control[["logFile",exact=TRUE]])) control$logFile <- "twins.log" if(is.null(control[["noOfHarmonics",exact=TRUE]])) control$noOfHarmonics <- 1 nfreq <- control$noOfHarmonics control$logFile2 <- paste(control$logFile,"2",sep="") ## Call the C code x <- disProgObj$observed n <- as.integer(dim(x)[1]) I <- as.integer(dim(x)[2]) with(control, .C(C_twins, x=as.integer(x), n=n, I=I, logFile=logFile, logFile2=logFile2, burnin=as.integer(burnin), filter=as.integer(filter), sampleSize=as.integer(sampleSize), alpha_xi=as.double(alpha_xi), beta_xi=as.double(beta_xi), T=as.integer(T), nfreq=as.integer(nfreq), psiRWSigma=as.double(0.25), alpha_psi=as.double(alpha_psi), beta_psi=as.double(beta_psi), nu_trend=as.integer(nu_trend))) ## Log files results <- read.table(control$logFile,header=T,na.strings=c("NaN","-NaN")) results2 <- read.table(control$logFile2,header=T,na.strings=c("NaN","-NaN")) acc <- read.table(paste(control$logFile,".acc",sep=""),col.names=c("name","RWSigma","acc")) rownames(acc) <- acc[,1] acc <- acc[,-1] ## Nothing is returned by the function - result is not a ## standard survObj result <- structure(list(control=control, disProgObj=disProgObj, logFile=results, logFile2=results2), class="atwins") return(result) } ###################################################################### # Adapted the functions form figures.R ###################################################################### ## Helper functions to make list of Z and the means of X,Y and omega make.pois <- function(obj) { n <- nrow(obj$disProgObj$observed) m<-list() m$n <- n m$Z <- obj$disProgObj$observed m$X <- numeric(n) m$Y <- numeric(n) m$omega <- numeric(n) ## Read means at each time instance Vars <- c("X","Y","omega") for (t in 1:n) { for (v in Vars) { m[[v]][t] <- obj$logFile2[,paste(v,".",t,".",sep="")] } } return(m) } pois.plot <- function(m.results,...) { plotorder <- c(expression(Z),expression(Y),expression(X)) plotcols <- c(1,"red","blue") lwd <- c(1,3,3) sts <- disProg2sts(m.results$disProgObj) ## Make default legend if nothing else is specified. if (!"legend.opts" %in% names(list(...))) { plot(sts,legend.opts=list(x="topleft",legend=paste(plotorder),lwd=lwd,col=plotcols,horiz=TRUE,y.intersp=0,lty=1,pch=NA),...) } else { plot(sts,...) } ## Add Y and X lines for (i in 2:length(plotorder)) { lines(1:(m.results$n)+0.5,m.results[[paste(plotorder[i])]][c(2:m.results$n,m.results$n)],type="s",col=plotcols[i],lwd=lwd[i]) } } ## makes list of gamma, zeta and nu make.nu <- function(obj) { n <- nrow(obj$disProgObj$observed) samplesize <- obj$control$sampleSize frequencies <- obj$control$noOfHarmonics # instead of just always "1" ! season <- obj$disProgObj$freq basefrequency <- 2 * pi / season ## optionally also get the linear time trend coefficient withTrend <- obj$control$nu_trend ## this list will be returned at the end m<-list() ## first get all the gamma's from the logFile matrix into nicer elements of ## the list m for (j in 0:(2*frequencies + withTrend)) { m$gamma[[j+1]] <- numeric(samplesize) m[["gamma"]][[j+1]] <- obj$logFile[,paste("gamma",".",j,".",sep="")] } ## zeta is a list which has one element for each time point (vector of samples) m$zeta<-list() ## for all time points: for (t in 1:n) { ## start with the intercept m$zeta[[t]]<-m$gamma[[1]] ## add all harmonic terms for(j in 1:frequencies){ m$zeta[[t]] <- m$zeta[[t]] + m$gamma[[2*j]]*sin(basefrequency*j*(t-1)) + m$gamma[[2*j+1]]*cos(basefrequency*j*(t-1)) } ## and (optionally) finally add the linear trend if(withTrend) { m$zeta[[t]] <- m$zeta[[t]] + m$gamma[[2*frequencies + 2]] * (t - n/2) } } ## nu is the analogous list with the exponentiated zeta's m$nu<-list() for (t in 1:n) { m$nu[[t]]<-exp(m$zeta[[t]]) } ## also copy the number of harmonics m$frequencies <- frequencies ## and return return(m) } ## Function to plot median, and quantiles over time for m.par (m.par is list of n vectors, x is time) tms.plot <-function(x,m.par,xlab="",ylab="",ylim=FALSE,...){ m<-list() n<-length(m.par) m$median<-numeric(n) for (t in 1:n) { m$median[t]<- median(m.par[[t]]) m$q025[t]<- quantile(m.par[[t]],0.025) m$q975[t]<- quantile(m.par[[t]],0.975) } if(!ylim){ ymin<-min(m$q025) ymax<-max(m$q975) ylim=c(ymin,ymax) } plot(x-1,m$q975[x],type="l",col="red",main="",xlab=xlab,ylab=ylab,ylim=ylim,...) lines(x-1,m$median[x],type="l") lines(x-1,m$q025[x],type="l",col="red") } ###################################################################### # Function to plot an atwins object -- currently not # properly documented ###################################################################### plot.atwins <- function(x, which=c(1,4,6,7), ask=TRUE,...) { ## Extract from the 3 dots if(is.null(which)) { which <- c(1,4,6,7) } if(is.null(ask)) { ask <- TRUE } ## Make list of X,Y,Z,omega means of results2 m.results <-make.pois(x) m.results$disProgObj <- x$disProgObj ## Make list of results of gamma, zeta and nu nu<-make.nu(x) ## Plots show <- rep(FALSE,7) show[which] <- TRUE par(ask=ask) if (show[1]) { par(mfcol=c(1,1)) pois.plot(m.results,...) } if (show[2]) { ## make room for 2 * (frequencies + 1) panels par(mfcol=c(2,nu$frequencies+1)) ## and plot all gamma coefficients (possibly including the linear time ## trend coef) for(j in seq_along(nu$gamma)) { plot(nu$gamma[[j]],type="l",ylab=paste("gamma",j - 1,sep="")) } } if (show[3]) { par(mfcol=c(1,1)) plot(x$logFile$K,type="l",ylab=expression(K)) plot(x$logFile$xilambda,type="l",ylab=expression(xi)) plot(x$logFile$psi,type="l",ylab=expression(psi)) } if (show[4]) { par(mfcol=c(1,2)) acf(x$logFile$K,lag.max = 500,main="",xlab=expression(K)) acf(x$logFile$psi,lag.max = 500,main="",xlab=expression(psi)) } if (show[5]) { par(mfcol=c(1,1)) tms.plot(2:m.results$n,nu$nu,xlab="time") } if (show[6]) { par(mfcol=c(1,2)) hist(x$logFile$K,main="",xlab=expression(K),prob=TRUE,breaks=seq(-0.5,max(x$logFile$K)+0.5,1)) hist(x$logFile$psi,main="",xlab=expression(psi),prob=TRUE,nclass=50) } if (show[7]) { par(mfcol=c(1,1)) hist(x$logFile$Znp1,main="",xlab=expression(Z[n+1]),prob=TRUE,breaks=seq(-0.5,max(x$logFile$Znp1)+0.5,1)) } } surveillance/R/hhh4_W_powerlaw.R0000644000176200001440000001310013375012563016333 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Parametric power-law specification for neighbourhood weights in hhh4() ### ### Copyright (C) 2012-2016,2018 Sebastian Meyer ### $Revision: 2244 $ ### $Date: 2018-11-20 15:08:51 +0100 (Tue, 20. Nov 2018) $ ################################################################################ ### Construct weight matrix wji according to the Zeta-distribution with respect ### to the orders of neighbourhood (in nbmat, as e.g. obtained from nbOrder()), ### optionally fulfilling rowSums(wji) = 1 ## As a formula (for j != i, otherwise wji = 0): ## wji = pzeta(oji; d, maxlag) / sum_k pzeta(ojk; d, maxlag) ## Here, oji = oij is the order of nb of i and j, ## and pzeta(o; d, m) = o^-d / sum_{r=1}^m r^-d is the Zeta-distribution ## on 1:m (also called Zipf's law). ## Special cases: maxlag >= max(nbmat) yields the weights ## wji = oji^-d / sum_k ojk^-d ## and maxlag=1 yields the classical weights wji=1/nj. zetaweights <- function (nbmat, d = 1, maxlag = max(nbmat), normalize = FALSE) { ## raw (non-normalized) zeta-distribution on 1:maxlag zeta <- c(0, seq_len(maxlag)^-d) # first 0 is for lag 0 (i.e., diag(nbmat)) ## replace order by zetaweight of that order wji <- zeta[nbmat + 1L] # results in vector wji[is.na(wji)] <- 0 # for lags > maxlag ## set dim and names dim(wji) <- dim(nbmat) dimnames(wji) <- dimnames(nbmat) if (normalize) normalizeW(wji) else wji } ### powerlaw weights ## in the non-truncated case, i.e. maxlag = max(nbmat), ## the raw powerlaw weights are defined as w_ji = o_ji^-d, o_ji >= 1 ## and with (row-)normalization we have w_ji = o_ji^-d / sum_k o_jk^-d ## from0 = TRUE results in a power-law for o_ji >= 0: w(o) = (o + 1)^-d W_powerlaw <- function (maxlag, normalize = TRUE, log = FALSE, initial = if (log) 0 else 1, from0 = FALSE) { if (missing(maxlag)) { stop("'maxlag' must be specified (e.g. maximum neighbourhood order)") ## specifying 'maxlag' in zetaweights is actually optional since it has ## the default value max(nbmat). however, repeatedly asking for this ## maximum would be really inefficient. } else { stopifnot(isScalar(maxlag), maxlag >= 2 - from0) if (from0) maxlag <- maxlag + 1L } stopifnot(isScalar(initial)) ## main function which returns the weight matrix weights.call <- call("zetaweights", if (from0) quote(nbmat + 1L) else quote(nbmat), quote(d), maxlag, normalize) weights <- as.function(c(alist(d=, nbmat=, ...=), call("{", weights.call)), envir=getNamespace("surveillance")) if (log) { # the parameter d is interpreted on log-scale ## we prepend the necessary conversion d <- exp(d) body(weights) <- as.call(append(as.list(body(weights)), quote(d <- exp(d)), after=1)) } ## construct derivatives with respect to "d" (or log(d), respectively) dweights <- d2weights <- as.function(c(alist(d=, nbmat=, ...=), quote({})), envir=getNamespace("surveillance")) weights.call[[5L]] <- FALSE # normalize separately header <- c( if (log) quote(d <- exp(d)), # such that d is again on original scale substitute(Wraw <- weights.call, list(weights.call=weights.call)), if (normalize) expression( nUnits <- nrow(Wraw), norm <- .rowSums(Wraw, nUnits, nUnits) ), expression( # Wraw == 0 means o = 0 (diagonal) or o > maxlag => deriv = 0 is.na(Wraw) <- Wraw == 0, # set to NA since we will take the log logo <- -log(Wraw)/d # = log(nbmat) with NA's at Wraw == 0 ), if (normalize) quote(W <- Wraw / norm) else quote(W <- Wraw) ) footer <- expression(deriv[is.na(deriv)] <- 0, deriv) ## first derivative tmp1 <- expression( ## in surveillance < 1.9-0, 'norm' and 'tmpnorm' were based on 'nbmat', ## which is incorrect for the truncated case maxlag < max(nbmat) tmpnorm <- .rowSums(Wraw * -logo, nUnits, nUnits, na.rm=TRUE) / norm, tmp1 <- logo + tmpnorm ) deriv1 <- if (normalize) { expression(deriv <- W * -tmp1) } else expression(deriv <- W * -logo) body(dweights) <- as.call(c(as.name("{"), header, if (normalize) tmp1, deriv1, if (log) expression(deriv <- deriv * d), # this is the non-log d footer )) ## second derivative body(d2weights) <- as.call(c(as.name("{"), header, if (normalize) { c(tmp1, expression( tmp2 <- .rowSums(Wraw * logo^2, nUnits, nUnits, na.rm=TRUE) / norm - tmpnorm^2, deriv <- W * (tmp1^2 - tmp2) )) } else expression(deriv <- W * logo^2), if (log) c( do.call("substitute", list(deriv1[[1L]], list(deriv=as.name("deriv1")))), expression(deriv <- deriv * d^2 + deriv1 * d) # this is the non-log d ), footer )) ## return list of functions list(w=weights, dw=dweights, d2w=d2weights, initial=initial) } surveillance/R/stsNC.R0000644000176200001440000002537713653223255014350 0ustar liggesusers###################################################################### # initialize-method for "stsNC" objects ###################################################################### init.stsNC <- function(.Object, ..., reportingTriangle, predPMF, pi, truth, delayCDF, SR) { .Object <- callNextMethod() # use initialize,sts-method ## initialize defaults for extra stsNC-slots or check supplied values dimObserved <- dim(.Object@observed) if (missing(pi)) { .Object@pi <- array(NA_integer_, dim = c(dimObserved, 2L)) } else { dimPI <- dim(.Object@pi) if (length(dimPI) != 3 || any(dimPI != c(dimObserved, 2L))) stop("dim(pi) = (", paste0(dimPI, collapse=","), ")") } if (missing(SR)) { .Object@SR <- array(NA_real_, dim = c(nrow(.Object@observed),0L,0L)) } else { stopifnot(length(dim(.Object@SR)) == 3) } if (missing(truth)) .Object@truth <- as(.Object, "sts") return(.Object) } setMethod("initialize", "stsNC", init.stsNC) ###################################################################### # Special coerce method to account for consistent dimensions ###################################################################### setAs(from = "sts", to = "stsNC", function (from) { new("stsNC", from, pi = array(NA_real_, dim = c(dim(from@observed), 2L)), truth = from, SR = array(NA_real_, dim = c(nrow(from@observed), 0L, 0L))) }) ###################################################################### # plot-method for the "stsNC" class, which starts by # using the inherited method, but with some additional plotting # put into the .hookFunSpecial function. # # Parameters: # same as the for the plot method of sts objects. ###################################################################### setMethod(f="plot", signature=signature(x="stsNC", y="missing"), function (x, type = observed ~ time | unit, ...) { ## if special type "delay" (only applies for stsNC objects) if (type == "delay") { stsNC_plotDelay(x, ...) return(invisible()) } ## environment of hook function will be set to evaluation ## environment of stsplot_time1() and only then be called legend.opts <- lty <- lwd <- "accommodate tools:::.check_code_usage_in_package()" #Hook function specifically for nowcasting objects. nowcastPlotHook <- function() { #Define some colors for the plotting as well as some plot symbols color <- surveillance.options("colors") pchList <- c(nowSymbol=10) #Prolong line of last observation (this should go into the plot function idx <- nrow(x) - which.max(!is.na(rev(upperbound(x)))) + 1 #Continue line from plot - use same style as stsplot_time1 lines( idx+c(-0.5,0.5), rep(upperbound(x)[idx,],2),col=col[3],lwd=lwd[3],lty=lty[3]) #Add the prediction intervals as bars (where not NA). Conf level #is found in x@control$alpha idxt <- which(apply(x@pi[1:nrow(x),1,],1,function(x) all(!is.na(x)))) for (i in idxt) { lines( i+c(-0.3,0.3), rep(x@pi[i,,1],2),lty=1,col=color["piBars"]) lines( i+c(-0.3,0.3), rep(x@pi[i,,2],2),lty=1,col=color["piBars"]) lines( rep(i,each=2), x@pi[i,,],lty=2,col=color["piBars"]) } #Extract now date and date range of the plotting startDate <- epoch(x)[1] #Add "now" symbol on x-axis. Plotting now takes possible temporal aggregation into account. #points(x@control$now-startDate+1,0,pch=pchList["nowSymbol"],col=color["nowSymbol"],cex=1.5) points(x@control$timeDelay(startDate,x@control$now)+1,0,pch=pchList["nowSymbol"],col=color["nowSymbol"],cex=1.5) #Add this to the legend if (!is.null(legend.opts)) { legend(x="topright",c("Now"),pch=pchList["nowSymbol"],col=color["nowSymbol"],bg="white") } return(invisible()) } callNextMethod(x=x, type=type, ..., .hookFuncInheritance=nowcastPlotHook) }) ###################################### ## For plotting the delay distribution ###################################### ###################################################################### ## Convert discrete time hazards to PMF ## Parameters: ## haz - vector with entries for (0,...,Dmax) ###################################################################### haz2pmf <- function(haz) { PMF <- 0*haz for (i in 0:(length(haz)-1)) { PMF[i+1] <- haz[i+1] * (1-sum(PMF[seq(i)])) } return(PMF) } ###################################################################### # Find a quantile of a discrete random variable with support on # 0,...,D and which has a PMF given by the vector prob. We # define the q quantile as \min_{x} F(x) \geq q. # # Parameters: # prob - vector on 0,..,D containing the PMF # q - quantile to compute ###################################################################### pmfQuantile <- function(prob,q=0.5) { which.max(cumsum(prob) >= q)-1 } ###################################################################### ## Show empirical and, if available, model based median of delay ## distribution as a function of occurence time t. ## ## Parameters: ## nc - nowcast object ## rT.truth - reporting triangle as it would be at the end. Typically ## this is taken directly from the nc object. ## dates - vector of dates where to show the result ## w - half-width of moving window ## modelQuantiles - which model quantiles to show ###################################################################### stsNC_plotDelay <- function(nc, rT.truth=NULL, dates=NULL, w=1, modelQuantiles=0.5, epochUnit=NULL) { ##Extract reporting triangle from the nc object if (is.null(rT.truth)) { rT.truth <- reportingTriangle(nc) } ##Which dates to plot if (is.null(dates)) { dates <- epoch(nc) } ##Determine the appropriate unit of the delay if (is.null(epochUnit)) { epochUnit <- switch( as.character(nc@freq), "12" = "months", "%m" = "months", "52" = "weeks", "%V"="weeks", "%j"="days", "365" = "days") } ##Determine max delay from reporting triangle. D <- nc@control$D res <- matrix(NA, nrow=length(dates), ncol=D+1) ##which data variables are actually in rT.truth isThere <- !is.na(sapply(dates, function(date) pmatch(as.character(date),rownames(rT.truth)))) idx <- which(isThere) ##Loop over all time points. for (i in (w+min(idx)):(max(idx)-w)) { now <- dates[i] the_idx <- pmatch(as.character(now),rownames(rT.truth)) subset <- rT.truth[the_idx + c(-w:w),,drop=FALSE] res[i,] <- colSums(subset,na.rm=TRUE) / sum(subset,na.rm=TRUE) } ##A slightly modified function to determine quantiles, which can ##handle NAs (if there is no case at all) quantile <- function(q) { apply(res, 1, function(x) { if (all(is.na(x))) return(NA) else return(which.max(cumsum(x) >= q) - 1) }) } ##Find 10%, 50% and 90% quantiles quants <- sapply(c(0.1,0.5,0.9), quantile) ##Make a plot (use plot.Dates instead of matplot) plot(dates, quants[,2],xlab="Time of occurence",ylab=paste0("Delay (",epochUnit,")"),ylim=c(0,15),col=1,lty=c(1),lwd=4,type="n") idxFirstTruncObs <- which(dates == (nc@control$now - D)) idxNow <- which(dates == nc@control$now) polygon( dates[c(idxFirstTruncObs,idxFirstTruncObs,idxNow,idxNow)], c(-1e99,1e99,1e99,-1e99), col=rgb(0.95,0.95,0.95),lwd=0.001) text( dates[round(mean(c(idxNow,idxFirstTruncObs)))], D, "right truncated\n observations",adj=c(0.5,0.5)) lines(dates, quants[,2],col=1,lty=c(1),lwd=4) matlines(dates, quants[,c(1,3)],type="l",col=1,lty=c(2,3),lwd=c(1,1)) legend_str <- c(expression(q[0.1](T)),expression(q[0.5](T)),expression(q[0.9](T))) legend_lty <- c(2,1,3) legend_col <- c(1,1,1) legend_lwd <- c(1,4,1) ##Which dates have been analysed in the nowcasts dates2show <- attr(reportingTriangle(nc),"t02s") ##Loop over all model based estimates model_CDF <- delayCDF(nc) if (length(model_CDF) > 0) { for (methodIdx in seq_len(length(model_CDF))) { ##browser() ##Fetch CDF from model (can be a vector or a matrix) theCDF <- delayCDF(nc)[[names(model_CDF)[methodIdx]]] if (!is.matrix(theCDF)) { theCDF <- matrix(theCDF, ncol=length(theCDF),nrow=length(dates2show),byrow=TRUE) } cdf <- cbind(0,theCDF) pmf <- t(apply(cdf,1,diff)) ##Determine model quantiles quants.model <- matrix(NA, nrow=length(dates2show),ncol=length(modelQuantiles),dimnames=list(as.character(dates2show),modelQuantiles)) for (t in 1:length(dates2show)) { quants.model[t,] <- sapply(modelQuantiles, function(q) pmfQuantile( pmf[t,],q=q)) } ##Make sure the NAs in the beginning agree i <- 1 while (all(is.na(quants[i,]))) {quants.model[i,] <- NA ; i <- i + 1} legend_str <- c(legend_str,substitute(q[0.5]^methodName(T),list(methodName=names(model_CDF)[methodIdx]))) legend_lty <- c(legend_lty,3+methodIdx) legend_col <- c(legend_col,"gray") legend_lwd <- c(legend_lwd,2) ##only estimates up to 'now' are to be shown and which are within ##the moving window of m time points show <- (nc@control$now - dates2show <= nc@control$m) matlines(dates2show[show], quants.model[show,], col=tail(legend_col,n=1),lwd=ifelse(modelQuantiles==0.5,tail(legend_lwd,n=1),1),lty=ifelse(modelQuantiles==0.5,tail(legend_lty,n=1),2)) } ##Show lines for breakpoints (if available from the model) if ("bayes.trunc.ddcp" %in% names(model_CDF)) { ddcp.model <- attr(model_CDF[["bayes.trunc.ddcp"]], "model") changePoints <- as.Date(colnames(ddcp.model$W)) ## hoehle: changed, if ddcp.model contains weekend effects, these give NA dates. changePoints <- changePoints[!is.na(changePoints)] for (i in 1:length(changePoints)) { axis(1,at=changePoints[i], changePoints[i], las=1, cex.axis=0.7,line=-2.5) lines( rep(changePoints[i],2),c(0,1e99),lty=2) } } } ##Make a legend ##c(expression(q[0.1](T)),expression(q[0.5](T)),expression(q[0.9](T)),expression(q[0.5]^"ddcp"(T))) legend(x="bottomleft",legend_str,lty=legend_lty,col=legend_col,lwd=legend_lwd) ##Add title if (!is.null(nc)) { title(nc@control$now) } ##Done invisible() } surveillance/R/twinSIR_profile.R0000644000176200001440000002404714024100031016341 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### profile-method for class "twinSIR" to calculate the profile log-likelihood ### (normalized) as well as profile likelihood based confidence intervals ### ### Copyright (C) 2009 Michael Hoehle, 2014 Sebastian Meyer ### $Revision: 2662 $ ### $Date: 2021-03-16 10:53:29 +0100 (Tue, 16. Mar 2021) $ ################################################################################ ###################################################################### # Function to compute likelihood based confidence interval, basically # the two solutions to # f(\theta) = l(\theta)-l(\hat{theta)) + 1/2 dchisq(1-alpha,df=1)=0 # are found. # # # Parameters: # logliktilde - normalized likelihood function(theta, ...) # theta.hat - the MLE # lower - search interval [lower,theta.hat] for f=0 # upper - search interval [theta.hat,upper] for f=0 # alpha - confidence level (see Equation 2.6 in Pawitan (2003) # ... - additional arguments passed to function logliktilde ###################################################################### likelihood.ci <- function (logliktilde, theta.hat, lower, upper, alpha = 0.05, ...) { # Highest Likelihood intervall -- target function f <- function(theta, ...) { logliktilde(theta, ...) + 1/2*qchisq(1-alpha, df=1) } # Compute upper and lower boundary numerically hl.lower <- uniroot(f, interval = c(lower, theta.hat), ...)$root hl.upper <- uniroot(f, interval = c(theta.hat, upper), ...)$root return(c(hl.lower,hl.upper)) } ###################################################################### # Function to compute estimated and profile likelihood based # confidence intervals. Heavy computations might be necessary! # #Params: # fitted - output from a fit with twinSIR # profile - list with 4D vector as entries - format: # c(index, lower, upper, grid size) # where index is the index in the coef vector # lower and upper are the parameter limits (can be NA) # grid size is the grid size of the equally spaced grid # between lower and upper (can be 0) # alpha - (1-alpha)% profile likelihood CIs are computed. # If alpha <= 0 then no CIs are computed # control - control object to use for optim in the profile loglik computations # # Returns: # list with profile loglikelihood evaluations on the grid # and highest likelihood and wald confidence intervals ###################################################################### profile.twinSIR <- function (fitted, profile, alpha = 0.05, control = list(fnscale = -1, factr = 1e1, maxit = 100), ...) { ## Check that input is ok profile <- as.list(profile) if (length(profile) == 0L) { stop("nothing to do") } lapply(profile, function(one) { if (length(one) != 4L) { stop("each profile entry has to be of form ", "'c(index, lower, upper, grid size)'") }}) if (is.null(fitted[["model"]])) { stop("'fitted' must contain the model component") } px <- ncol(fitted$model$X) pz <- ncol(fitted$model$Z) ## Control of the optim procedure if (is.null(control[["fnscale",exact=TRUE]])) { control$fnscale <- -1 } if (is.null(control[["factr",exact=TRUE]])) { control$factr <- 1e1 } if (is.null(control[["maxit",exact=TRUE]])) { control$maxit <- 100 } ## Estimated normalized likelihood function ltildeestim <- function(thetai,i) { theta <- theta.ml theta[i] <- thetai with(fitted$model, .loglik(theta, X=X, Z=Z, survs=survs, weights=weights)) - loglik.theta.ml } ## Profile normalized likelihood function ltildeprofile <- function(thetai,i) { emptyTheta <- rep(0, length(theta.ml)) # Likelihood l(theta_{-i}) = l(theta_i, theta_i) ltildethetaminusi <- function(thetaminusi) { theta <- emptyTheta theta[-i] <- thetaminusi theta[i] <- thetai with(fitted$model, .loglik(theta, X=X, Z=Z, survs=survs, weights=weights)) - loglik.theta.ml } # Score function of all params except thetaminusi stildethetaminusi <- function(thetaminusi) { theta <- emptyTheta theta[-i] <- thetaminusi theta[i] <- thetai with(fitted$model, .score(theta, X=X, Z=Z, survs=survs, weights=weights))[-i] } # Call optim using L-BFGS-B. For harder constrains we need constr.Optim lower <- if (fitted$method == "L-BFGS-B") { c(rep(0,px),rep(-Inf,pz))[-i] } else { -Inf } upper <- if (fitted$method == "L-BFGS-B") { c(rep(Inf,px),rep(Inf,pz))[-i] } else { Inf } resOthers <- tryCatch(with(fitted$model, optim(theta.ml[-i], fn = ltildethetaminusi, gr = stildethetaminusi, method = fitted$method, control = control, lower = lower, upper = upper)), warning = function(w) print(w), error = function(e) list(value=NA)) resOthers$value } ## Initialize theta.ml <- coef(fitted) loglik.theta.ml <- c(logLik(fitted)) se <- sqrt(diag(vcov(fitted))) resProfile <- list() ## Perform profile computations for all requested parameters cat("Evaluating the profile log-likelihood on a grid ...\n") for (i in 1:length(profile)) { cat("i= ",i,"/",length(profile),"\n") #Index of the parameter in the theta vector idx <- profile[[i]][1] #If no borders are given use those from wald intervals (unconstrained) if (is.na(profile[[i]][2])) profile[[i]][2] <- theta.ml[idx] - 3*se[idx] if (is.na(profile[[i]][3])) profile[[i]][3] <- theta.ml[idx] + 3*se[idx] #Evaluate profile loglik on a grid (if requested) if (profile[[i]][4] > 0) { thetai.grid <- seq(profile[[i]][2],profile[[i]][3],length.out=profile[[i]][4]) resProfile[[i]] <- matrix(NA, nrow = length(thetai.grid), ncol = 4L, dimnames = list(NULL, c("grid","profile","estimated","wald"))) for (j in 1:length(thetai.grid)) { cat("\tj= ",j,"/",length(thetai.grid),"\n") resProfile[[i]][j,] <- c(thetai.grid[j], ltildeprofile(thetai.grid[j],idx), ltildeestim(thetai.grid[j],idx), #9 June 2009: Bug discovered by L. Held. as part of paper revision. C.f. Pawitan p.63 - 1/2*(1/se[idx]^2)*(thetai.grid[j] - theta.ml[idx])^2) } } } #9 June 2009. This did not work. # names(resProfile) <- names(theta.ml)[sapply(profile, function(x) x[4L]) > 0] names(resProfile) <- names(theta.ml)[sapply(profile, function(x) x[1L])] ## Profile likelihood intervals ciProfile <- matrix(NA, nrow = length(profile), ncol = 6L, dimnames = list(NULL, c("idx","hl.low","hl.up","wald.low","wald.up","mle"))) ciProfile[,"idx"] <- sapply(profile, "[", 1L) ciProfile[,"mle"] <- theta.ml[ciProfile[,"idx"]] rownames(ciProfile) <- names(theta.ml)[ciProfile[,"idx"]] if (alpha > 0) { cat("Computing profile likelihood-based confidence intervals ...\n") lower <- if (fitted$method == "L-BFGS-B") { c(rep(0,px),rep(-Inf,pz)) } else { -Inf } for (i in seq_along(profile)) { cat(i,"/", length(profile),"\n") #Index of the parameter in the theta vector idx <- profile[[i]][1] #Compute highest likelihood intervals ci.hl <- tryCatch( likelihood.ci(ltildeprofile, theta.hat = theta.ml[idx], lower = max(lower[idx], theta.ml[idx]-5*se[idx]), upper = theta.ml[idx]+5*se[idx], alpha = alpha, i = idx), warning = function(w) print(w), error = function(e) rep(NA,2)) #Wald intervals based on expected fisher information ci.wald <- theta.ml[idx] + c(-1,1) * qnorm(1-alpha/2) * se[idx] ciProfile[i,2:5] <- c(ci.hl, ci.wald) } } res <- list(lp=resProfile, ci.hl=ciProfile, profileObj=profile) class(res) <- "profile.twinSIR" return(res) } ###################################################################### ## Plot the result of the profiler ## Parameters: ## x - the result of calling profile() on a "twinSIR" object ## which - names of selected parameters, NULL meaning all available ## conf.level - level for the horizontal line for -qchisq(,df=1)/2 ## legend - logical indicating whether to add a legend to the plot, ## or numeric vector of indexes of plots where to add the legend ###################################################################### plot.profile.twinSIR <- function(x, which = NULL, conf.level = 0.95, xlab = which, ylab = "normalized log-likelihood", legend = TRUE, par.settings = list(), ...) { ## extract relevant components of 'x' lp <- x$lp[!vapply(X=x$lp, FUN=is.null, FUN.VALUE=FALSE, USE.NAMES=FALSE)] mle <- x$ci.hl[,"mle"] ## check arguments which <- if (is.null(which)) { names(lp) } else { match.arg(which, names(lp), several.ok = TRUE) } xlab <- rep_len(xlab, length(which)) if (is.logical(legend)) legend <- which(legend) if (is.list(par.settings)) { par.defaults <- list(mfrow = sort(n2mfrow(length(which))), mar = c(5,5,1,1)+.1, las = 1) par.settings <- modifyList(par.defaults, par.settings) opar <- do.call("par", par.settings) on.exit(par(opar)) } ## loop over parameters for (i in seq_along(which)) { coefname <- which[i] matplot(lp[[coefname]][,1L], lp[[coefname]][,-1L], type = "l", col = 1:3, lty = 1:3, xlab = xlab[i], ylab = ylab) if (i %in% legend) { legend(x = "bottomright", legend = c("profile","estimated","Wald"), col = 1:3, lty = 1:3) } ## some lines which help interpretation segments(x0=mle[coefname], y0=par("usr")[3L], y1=0, lty=2, col="darkgray") abline(h=-1/2*qchisq(conf.level, df=1), lty=2, col="darkgray") } } surveillance/R/epidata_plot.R0000644000176200001440000001567112420561350015747 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### The plot-method for "epidata" (via plot.summary.epidata) shows the evolution ### of the numbers of susceptible, infectious and recovered individuals. ### The extra function "stateplot" shows the event history of one individual. ### ### Copyright (C) 2008-2009, 2013-2014 Sebastian Meyer ### $Revision: 1080 $ ### $Date: 2014-10-19 00:00:08 +0200 (Sun, 19. Oct 2014) $ ################################################################################ plot.epidata <- function(x, ...) { sx <- summary(x) plot.summary.epidata(sx, ...) } plot.summary.epidata <- function (x, lty = c(2,1,3), lwd = 2, col = c("#1B9E77", "#D95F02", "#7570B3"), col.hor = col, col.vert = col, xlab = "Time", ylab = "Number of individuals", xlim = NULL, ylim = NULL, legend.opts = list(), do.axis4 = NULL, panel.first = grid(), rug.opts = list(), which.rug = c("infections", "removals", "susceptibility", "all"), ...) { counters <- x[["counters"]] type <- x[["type"]] n <- counters[1L,"nSusceptible"] m <- counters[1L,"nInfectious"] N <- n + m times <- counters[-1L,"time"] if (missing(lty)) { lty <- c(2, 1, 3 * (type %in% c("SIR","SIRS"))) } recycle3 <- function (xnam) assign(xnam, rep(get(xnam), length.out = 3), inherits = TRUE) for(varname in c("lty", "lwd", "col", "col.hor", "col.vert")) recycle3(varname) if (is.null(xlim)) { xlim <- attr(x, "timeRange") if (xlim[2] == Inf) xlim[2] <- times[length(times)] } if (is.null(ylim)) ylim <- c(0, max( (lty[1] > 0) * {if (type %in% c("SIRS", "SIS")) N else n}, (lty[2] > 0) * max(counters$nInfectious), (lty[3] > 0) * max(counters$nRemoved) )) # basic plotting frame plot(xlim, ylim, type = "n", xlab = xlab, ylab = ylab, panel.first = panel.first, ...) abline(h = c(0, N), col = "grey") # for real xlim in lines.stepfun (see 'dr' adjustment in plot.stepfun code) fakexlim <- c(1,2) * (xlim[2] + 2*xlim[1])/3 - c(0,xlim[1]) # this isn't nice, a user argument 'dr' in plot.stepfun would be appreciated # add #Susceptibles if (all(counters$nSusceptible == n)) { lines(x = xlim, y = c(n,n), lty = lty[1], lwd = lwd[1], col = col.hor[1], ...) } else { lines(stepfun(times, counters$nSusceptible), xlim = fakexlim, lty = lty[1], lwd = lwd[1], col.hor = col.hor[1], col.vert = col.vert[1], do.points = FALSE, ...) } # add #Infected if (all(counters$nInfectious == m)) { lines(x = xlim, y = c(m,m), lty = lty[2], lwd = lwd[2], col = col.hor[2], ...) } else { lines(stepfun(times, counters$nInfectious), xlim = fakexlim, lty = lty[2], lwd = lwd[2], col.hor = col.hor[2], col.vert = col.vert[2], do.points = FALSE, ...) } # add #Removed if (all(counters$nRemoved == 0)) { lines(x = xlim, y = c(0,0), lty = lty[3], lwd = lwd[3], col = col.hor[3], ...) } else { lines(stepfun(times, counters$nRemoved), xlim = fakexlim, lty = lty[3], lwd = lwd[3], col.hor = col.hor[3], col.vert = col.vert[3], do.points = FALSE, ...) } # add special annotations if (is.null(do.axis4)) do.axis4 <- type == "SIR" if (do.axis4) { finalvalues <- counters[nrow(counters), c("nSusceptible", "nRemoved")] axis(4, at = finalvalues[lty[c(1,3)] > 0], font = 2, ...) } if (is.list(rug.opts)) { if (is.null(rug.opts$ticksize)) rug.opts$ticksize <- 0.02 if (is.null(rug.opts$quiet)) rug.opts$quiet <- TRUE which.rug <- match.arg(which.rug) if (is.null(rug.opts$col)) rug.opts$col <- switch(which.rug, all = 1, infections = col.hor[2], removals = col.hor[3], susceptibility = col.hor[1]) rugLocations <- switch(which.rug, all = times, infections = attr(x, "eventTimes"), removals = counters$time[counters$type == "R"], susceptibility = counters$time[counters$type == "S"] ) if (length(rugLocations) > 0) { do.call(rug, c(list(x = rugLocations), rug.opts)) } } if (is.list(legend.opts)) { legend.opts <- modifyList( list(x = "topright", bty = "n", inset = c(0,0.02), legend = c("susceptible", "infectious", "removed")[lty>0], lty = lty[lty>0], lwd = lwd[lty>0], col = col.hor[lty>0]), legend.opts) do.call(legend, legend.opts) } invisible(as.matrix( counters[c("time", "nSusceptible", "nInfectious", "nRemoved")] )) } ################################################################################ # PLOT THE STATE CHANGES OF ONE INDIVIDUAL OF "epidata" # ... will be passed to the plot function (stepfun or curve), # e.g. add, xlim, ylim, main, xlab, ylab, ... ################################################################################ stateplot <- function(x, id, ...) { sx <- getSummary(x, class = "epidata") .id <- as.character(id) if (length(.id) != 1) { stop ("'id' must have length 1") } initiallyInfected <- sx[["initiallyInfected"]] if (! .id %in% levels(initiallyInfected)) { stop ("invalid 'id', does not exist in 'x'") } isInitiallyInfected <- .id %in% initiallyInfected counters <- sx[["counters"]] states <- levels(counters[["type"]]) path <- counters[which(counters$id == .id), c("time", "type")] # remove pseudo-R-events, which come before S-event directSevents <- which(duplicated(path[["time"]])) path_noPseudoR <- if (length(directSevents)) { path[-(directSevents-1), ] } else { path } pathfunc <- if (nrow(path_noPseudoR) > 0) { stepfun( x = path_noPseudoR[["time"]], y = c(1+isInitiallyInfected, unclass(path_noPseudoR[["type"]])), right = FALSE ) } else { function(t) rep(1+isInitiallyInfected, length(t)) } # plot it dotargs <- list(...) nms <- names(dotargs) if(! "xlab" %in% nms) dotargs$xlab <- "time" if(! "ylab" %in% nms) dotargs$ylab <- "state" if(! "main" %in% nms) dotargs$main <- "" if(! "xlim" %in% nms) dotargs$xlim <- attr(sx, "timeRange") if(! "xaxs" %in% nms) dotargs$xaxs <- "i" if(! "do.points" %in% nms && inherits(pathfunc, "stepfun")) { dotargs$do.points <- FALSE } do.call("plot", args = c(list(x = pathfunc, yaxt = "n"), dotargs)) axis(2, at = seq_along(states), labels = states) invisible(pathfunc) } surveillance/R/twinstim_tiaf_exponential.R0000644000176200001440000000523113165704240020564 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Exponential temporal interaction function g(t) = exp(-alpha*t) ### ### Copyright (C) 2009-2014,2017 Sebastian Meyer ### $Revision: 1994 $ ### $Date: 2017-10-06 15:44:00 +0200 (Fri, 06. Oct 2017) $ ################################################################################ ## nTypes: determines the number of parameters of the Exponential kernel. ## In a multitype epidemic, the different types may share ## the same temporal interaction function (type-invariant), in which case ## nTypes=1. Otherwise nTypes should equal the number of event types of the ## epidemic, in which case every type has its own alpha. tiaf.exponential <- function (nTypes = 1, validpars = NULL) { nTypes <- as.integer(nTypes) stopifnot(length(nTypes) == 1L, nTypes > 0L) ## function definitions for nTypes = 1 (length(alpha) == 1) g <- function (t, alpha, types) { exp(-alpha*t) } G <- function (t, alpha, types) { if (alpha==0) t else -exp(-alpha*t)/alpha } deriv <- function (t, alpha, types) { as.matrix( -t*exp(-alpha*t) ) } Deriv <- function (t, alpha, types) { as.matrix( if (alpha==0) -t^2/2 else (t+1/alpha)*exp(-alpha*t)/alpha ) } ## adaptions for nTypes > 1 if (nTypes > 1) { ## time points vector t, length(types) = length(t) body(g) <- as.call(append(as.list(body(g)), quote(alpha <- alpha[types]), after=1)) body(G) <- quote({ alpha <- alpha[types] ifelse (alpha==0, t, -exp(-alpha*t)/alpha) }) body(deriv) <- quote({ L <- length(t) deriv <- matrix(0, L, length(alpha)) alpha <- alpha[types] deriv[cbind(1:L,types)] <- -t*exp(-alpha*t) deriv }) body(Deriv) <- quote({ L <- length(t) Deriv <- matrix(0, L, length(alpha)) alpha <- alpha[types] Deriv[cbind(1:L,types)] <- ifelse(alpha==0, -t^2/2, (t+1/alpha)*exp(-alpha*t)/alpha) Deriv }) } ## functions only need the base environment environment(g) <- environment(G) <- environment(deriv) <- environment(Deriv) <- baseenv() ## return the kernel specification list(g=g, G=G, deriv=deriv, Deriv=Deriv, npars=nTypes, validpars=validpars) } surveillance/R/hcl.colors.R0000644000176200001440000000205314024100031015321 0ustar liggesusers################################################################################ ### Generate a color palette via the colorspace package ### ### Copyright (C) 2007 Michael Hoehle, 2012-2014,2017,2019,2021 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ .hcl.colors <- function (ncolors=100, use.color=TRUE) { GYR <- if (requireNamespace("colorspace", quietly=TRUE)) { ## the Zeil-ice colors colorspace::heat_hcl(ncolors, h=c(0,120), c.=if (use.color) c(90,30) else c(0,0), l=c(50,90), power=c(0.75, 1.2)) } else if (use.color) { grDevices::hcl.colors(n = ncolors, palette = "Heat 2") ## this is the same as colorspace::heat_hcl(ncolors) } else { grey.colors(ncolors) } return(rev(GYR)) } surveillance/R/twinstim_siaf_polyCub_iso.R0000644000176200001440000001022414013521730020514 0ustar liggesusers################################################################################ ### C-Level Cubature of "siaf" over Polygonal Domains using 'polyCub_iso' ### ### Copyright (C) 2017,2020,2021 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at https://www.R-project.org/Licenses/. ################################################################################ ### construct a call using either .polyCub.iso or its C-version .call.polyCub.iso <- function (intrfr_name, engine = "C") { if (engine == "C") { call("siaf_polyCub_iso", quote(polydomain$bdry), intrfr_name, quote(siafpars), quote(list(...))) } else { call(".polyCub.iso", quote(polydomain$bdry), as.name(intrfr_name), quote(siafpars), center = c(0,0), control = quote(list(...))) } } ## construct siaf$F function siaf_F_polyCub_iso <- function (intrfr_name, engine = "C") { F <- function (polydomain, f, siafpars, type, ...) {} body(F) <- .call.polyCub.iso(intrfr_name, engine) environment(F) <- getNamespace("surveillance") return(F) } ## construct siaf$Deriv function siaf_Deriv_polyCub_iso <- function (intrfr_names, engine = "C") { Deriv <- function (polydomain, deriv, siafpars, type, ...) {} res_names <- paste0("res", seq_along(intrfr_names)) calls <- mapply( FUN = function (intrfr_name, res_name) call("<-", as.name(res_name), .call.polyCub.iso(intrfr_name, engine)), intrfr_name = intrfr_names, res_name = res_names, SIMPLIFY = FALSE, USE.NAMES = FALSE ) result <- as.call(c(as.name("c"), lapply(res_names, as.name))) body(Deriv) <- as.call(c(as.name("{"), calls, result)) environment(Deriv) <- getNamespace("surveillance") return(Deriv) } ## 'polys' is a list of polygons in the form of owin$bdry ## 'intrfr_name' identifies the function used in the integrand ## 'pars' is a vector of parameters for "intrfr" siaf_polyCub_iso <- function (polys, intrfr_name, pars, control = list()) { ## default control arguments for polyCub_iso / Rdqags ## similar to args(stats::integrate) control <- modifyList( list(subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, stop.on.error = TRUE), control) if (is.null(control[["abs.tol"]])) control$abs.tol <- control$rel.tol ## integrate over each polygon ints <- lapply(X = polys, FUN = siaf_polyCub1_iso, intrfr_code = INTRFR_CODE[intrfr_name], pars = pars, subdivisions = control$subdivisions, rel.tol = control$rel.tol, abs.tol = control$abs.tol, stop.on.error = control$stop.on.error) sum(unlist(ints, recursive = FALSE, use.names = FALSE)) } ## 'xypoly' is a list(x, y) of vertex coordinates (open) siaf_polyCub1_iso <- function (xypoly, intrfr_code, pars, subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, abs.tol = rel.tol, stop.on.error = TRUE) { if (length(xypoly[["y"]]) != (L <- length(xypoly[["x"]]))) stop("xypoly$x and xypoly$y must have equal length") .C(C_siaf_polyCub1_iso, as.double(xypoly$x), as.double(xypoly$y), as.integer(L), as.integer(intrfr_code), as.double(pars), as.integer(subdivisions), as.double(abs.tol), as.double(rel.tol), as.integer(stop.on.error), value = double(1L), abserr = double(1L), neval = integer(1L) )$value } ## integer codes are used to select the corresponding C-routine, ## see ../src/twinstim_siaf_polyCub_iso.c INTRFR_CODE <- c( "intrfr.powerlaw" = 10L, "intrfr.powerlaw.dlogsigma" = 11L, "intrfr.powerlaw.dlogd" = 12L, "intrfr.student" = 20L, "intrfr.student.dlogsigma" = 21L, "intrfr.student.dlogd" = 22L, "intrfr.powerlawL" = 30L, "intrfr.powerlawL.dlogsigma" = 31L, "intrfr.powerlawL.dlogd" = 32L, "intrfr.gaussian" = 40L, "intrfr.gaussian.dlogsigma" = 41L, "intrfr.exponential" = 50L, "intrfr.exponential.dlogsigma" = 51L ) surveillance/R/hhh4_W_np.R0000644000176200001440000001577413375534315015137 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Non-parametric specification of neighbourhood weights in hhh4() ### ### Copyright (C) 2014,2018 Sebastian Meyer ### $Revision: 2246 $ ### $Date: 2018-11-22 15:10:21 +0100 (Thu, 22. Nov 2018) $ ################################################################################ ### non-parametric estimation of weight function, i.e., provide each ### neighbourhood order (including 0 if from0=TRUE) up to 'maxlag' with its ### own (unconstrained) weight. For identifiability: ### - lowest order is fixed to weight=1 ### - usually maxlag < max(nborder) (since only few pairs with highest orders), ### and 'truncate' indicates if there should be zero weight for orders above ### 'maxlag' (default), or the same as for order 'maxlag' ### Thus, if from0, the parameters refer to lags 1:maxlag, otherwise 2:maxlag W_np <- function (maxlag, truncate = TRUE, normalize = TRUE, initial = log(zetaweights(2:(maxlag+from0))), from0 = FALSE, to0 = truncate) # 'to0' has been renamed to 'truncate' { if (missing(maxlag)) { stop("'maxlag' must be specified (usually < max. neighbourhood order)") } else { stopifnot(isScalar(maxlag), maxlag >= 2 - from0) # at least one parameter } stopifnot(is.vector(initial, mode = "numeric"), length(initial) == maxlag + from0 - 1) if (!missing(to0)) { .Deprecated(msg = "argument 'to0' has been renamed; use 'truncate'") truncate <- to0 } ## auxiliary expression used in 'dw' and 'd2w' below indicatormatrixExpr <- if (truncate) { quote(nbmat==nbOrder) } else { if (from0) { # maxlag = npars quote(if(nbOrder==npars) nbmat>=nbOrder else nbmat==nbOrder) } else { # maxlag = 1 + npars quote(if(nbOrder==1L+npars) nbmat>=nbOrder else nbmat==nbOrder) } } ## weights as a function of parameters and a matrix of neighbourhood orders w <- function (logweights, nbmat, ...) {} body(w) <- substitute( { weights <- exp(logweights) # values for orders (2-from0):maxlag npars <- length(weights) W <- .WEIGHTS[1L+nbmat] # substituted depending on 'from0' ## repeat last coefficient for higher orders without separate estimate W[is.na(W)] <- .HOWEIGHT # substituted depending on 'truncate' dim(W) <- dimW <- dim(nbmat) # nUnits x nUnits dimnames(W) <- dimnames(nbmat) .RETVAL # substituted depending on 'normalize' }, list( .WEIGHTS = if (from0) quote(c(1, weights)) else quote(c(0, 1, weights)), .HOWEIGHT = if (truncate) 0 else quote(weights[npars]), .RETVAL = if (normalize) quote(W / (norm <- .rowSums(W, dimW[1L], dimW[2L]))) else quote(W) )) ## version of w with assignment of its return value (for use in normalized ## versions of dw and d2w) .w <- w body(.w)[[length(body(.w))]] <- substitute(Wnorm <- x, list(x=body(.w)[[length(body(.w))]])) ## derivative of w(logweights) -> a list of matrices (one for each param.) if (normalize) { dw <- .w ## append code to calculate first derivatives body(dw) <- as.call(c(as.list(body(dw)), eval(substitute( expression( FUN <- function (nbOrder, weight) { ind <- .INDICATORMATRIX (ind - Wnorm*.rowSums(ind,dimW[1L],dimW[2L])) * weight/norm }, mapply(FUN, .LAGS, weights, SIMPLIFY=FALSE, USE.NAMES=FALSE) ), list(.INDICATORMATRIX = indicatormatrixExpr, .LAGS = if (from0) quote(seq_len(npars)) else quote(1L + seq_len(npars))) )))) } else { dw <- function (logweights, nbmat, ...) {} body(dw) <- substitute( { weights <- exp(logweights) npars <- length(weights) FUN <- function (nbOrder, weight) weight * (.INDICATORMATRIX) mapply(FUN, .LAGS, weights, SIMPLIFY=FALSE, USE.NAMES=FALSE) }, list(.INDICATORMATRIX = indicatormatrixExpr, .LAGS = if (from0) quote(seq_len(npars)) else quote(1L + seq_len(npars)))) } ## result of d2w must be a list of matrices of length npars*(npars+1L)/2L if (normalize) { d2w <- .w body(d2w) <- as.call(c(as.list(body(d2w)), eval(substitute( expression( seqnpars <- seq_len(npars), inds <- lapply(.LAGS, function (nbOrder) { ind <- .INDICATORMATRIX indrs <- .rowSums(ind, dimW[1L], dimW[2L]) list(indterm = ind - Wnorm * indrs, indrs = indrs) }), k <- rep.int(seqnpars, npars), # row index l <- rep.int(seqnpars, rep.int(npars,npars)), # column index ##<- 12x faster than expand.grid(seqnpars,seqnpars) lowertri <- k >= l, ##<- and 2.5x faster than ##kl <- which(lower.tri(matrix(,npars,npars), diag=TRUE), arr.ind=TRUE) norm2 <- norm^2, mapply(function (k, l) weights[k] / norm2 * if (k==l) { inds[[k]][[1L]] * (norm - 2*weights[k]*inds[[k]][[2L]]) } else { -weights[l] * (inds[[k]][[1L]] * inds[[l]][[2L]] + inds[[l]][[1L]] * inds[[k]][[2L]]) }, k[lowertri], l[lowertri], # inds[k[lowertri]], inds[l[lowertri]], SIMPLIFY=FALSE, USE.NAMES=FALSE) ), list(.INDICATORMATRIX = indicatormatrixExpr, .LAGS = if (from0) quote(seqnpars) else quote(1L + seqnpars)) )))) } else { # for k=k', second derivative = first derivative, otherwise 0 d2w <- dw if (length(initial) > 1) { ## add assignment for the return value of dw body(d2w)[[length(body(d2w))]] <- substitute(dW <- x, list(x=body(d2w)[[length(body(d2w))]])) ## append code to generate the list of second derivatives body(d2w) <- as.call(c(as.list(body(d2w)), expression( d2wlength <- (npars^2+npars)/2, ## indices of diagonal elements in x[lower.tri(x,diag=TRUE)] d2wdiag <- c(1L,1L+cumsum(seq.int(npars,2L))), d2wlist <- rep.int(list(0*nbmat), d2wlength), d2wlist[d2wdiag] <- dW, d2wlist ))) } } ## Done environment(w) <- environment(dw) <- environment(d2w) <- .GlobalEnv list(w = w, dw = dw, d2w = d2w, initial = initial) } surveillance/R/twinstim.R0000644000176200001440000016627513514362332015173 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Maximum Likelihood inference for the two-component spatio-temporal intensity ### model described in Meyer et al (2012), DOI: 10.1111/j.1541-0420.2011.01684.x ### ### Copyright (C) 2009-2019 Sebastian Meyer ### $Revision: 2460 $ ### $Date: 2019-07-19 17:42:18 +0200 (Fri, 19. Jul 2019) $ ################################################################################ ## model.frame() evaluates 'subset' and '...' with 'data' utils::globalVariables(c("tile", "type", "BLOCK", ".obsInfLength", ".bdist", "area")) twinstim <- function ( endemic, epidemic, siaf, tiaf, qmatrix = data$qmatrix, data, subset, t0 = data$stgrid$start[1], T = tail(data$stgrid$stop,1), na.action = na.fail, start = NULL, partial = FALSE, epilink = "log", control.siaf = list(F=list(), Deriv=list()), optim.args = list(), finetune = FALSE, model = FALSE, cumCIF = FALSE, cumCIF.pb = interactive(), cores = 1, verbose = TRUE ) { #################### ### Preparations ### #################### ptm <- proc.time() cl <- match.call() partial <- as.logical(partial) finetune <- if (partial) FALSE else as.logical(finetune) ## (inverse) link function for the epidemic linear predictor of event marks epilink <- match.arg(epilink, choices = c("log", "identity")) epilinkinv <- switch(epilink, "log" = exp, "identity" = identity) ## Clean the model environment when exiting the function on.exit(suppressWarnings(rm(cl, cumCIF, cumCIF.pb, data, doHessian, eventsData, finetune, neghess, fisherinfo, fit, fixed, functions, globalEndemicIntercept, inmfe, initpars, ll, negll, loglik, msgConvergence, msgNotConverged, mfe, mfhEvents, mfhGrid, model, my.na.action, na.action, namesOptimUser, namesOptimArgs, nlminbControl, nlminbRes, nlmObjective, nlmControl, nlmRes, nmRes, optim.args, optimArgs, control.siaf, optimMethod, optimRes, optimRes1, optimValid, origenv.endemic, origenv.epidemic, partial, partialloglik, ptm, qmatrix, res, negsc, score, start, subset, tmpexpr, typeSpecificEndemicIntercept, useScore, verbose, whichfixed, inherits = FALSE))) ## also set fixed[st]iafpars to FALSE (for free posteriori evaluations, and ## to be defined for score function evaluation with optim.args=NULL) on.exit(fixedsiafpars <- fixedtiafpars <- FALSE, add = TRUE) ### Verify that 'data' inherits from "epidataCS" if (!inherits(data, "epidataCS")) { stop("'data' must inherit from class \"epidataCS\"") } ### Check time range if (!isScalar(t0) || !isScalar(T)) { stop("endpoints 't0' and 'T' must be single numbers") } if (T <= t0) { stop("'T' must be greater than 't0'") } if (!t0 %in% data$stgrid$start) { justBeforet0 <- match(TRUE, data$stgrid$start > t0) - 1L # if 't0' is beyond the time range covered by 'data$stgrid' if (is.na(justBeforet0)) justBeforet0 <- length(data$stgrid$start) # t0 was too big if (justBeforet0 == 0L) justBeforet0 <- 1L # t0 was too small t0 <- data$stgrid$start[justBeforet0] warning("replaced 't0' by the value ", t0, " (must be a 'start' time of 'data$stgrid')") } if (!T %in% data$stgrid$stop) { justAfterT <- match(TRUE, data$stgrid$stop > T) # if 'T' is beyond the time range covered by 'data$stgrid' if (is.na(justAfterT)) justAfterT <- length(data$stgrid$stop) # T was too big T <- data$stgrid$stop[justAfterT] warning("replaced 'T' by the value ", T, " (must be a 'stop' time of 'data$stgrid')") } ### Subset events eventsData <- if (missing(subset)) data$events@data else { do.call("subset.data.frame", args = list( x = quote(data$events@data), subset = cl$subset, drop = FALSE )) } ############################################################# ### Build up a model.frame for both components separately ### ############################################################# ########################## ### epidemic component ### ########################## ### Parse epidemic formula if (missing(epidemic)) { origenv.epidemic <- parent.frame() epidemic <- ~ 0 } else { origenv.epidemic <- environment(epidemic) environment(epidemic) <- environment() ## such that t0 and T are found in the subset expression below } epidemic <- terms(epidemic, data = eventsData, keep.order = TRUE) if (!is.null(attr(epidemic, "offset"))) { warning("offsets are not implemented for the 'epidemic' component") } ### Generate model frame # na.action mod such that for simulated epidataCS, where events of the # prehistory have missing 'BLOCK' indexes, those NA's do not matter. # ok because actually, 'eventBlocks' are only used in the partial likelihood # and there only eventBlocks[includes] is used (i.e. no prehistory events) my.na.action <- function (object, ...) { prehistevents <- row.names(object)[object[["(time)"]] <= t0] if (length(prehistevents) == 0L) return(na.action(object, ...)) origprehistblocks <- object[prehistevents, "(BLOCK)"] # all NA object[prehistevents, "(BLOCK)"] <- 0L # temporary set non-NA xx <- na.action(object, ...) xx[match(prehistevents,row.names(xx),nomatch=0L), "(BLOCK)"] <- origprehistblocks[prehistevents %in% row.names(xx)] xx } mfe <- model.frame(epidemic, data = eventsData, subset = time + eps.t > t0 & time <= T, # here we can have some additional rows (individuals) compared to mfhEvents, which is established below! # Namely those with time in (t0-eps.t; t0], i.e. still infective individuals, which are part of the prehistory of the process na.action = my.na.action, # since R 2.10.0 patched also works with epidemic = ~1 and na.action=na.fail (see PR#14066) drop.unused.levels = FALSE, time = time, tile = tile, type = type, eps.t = eps.t, eps.s = eps.s, BLOCK = BLOCK, obsInfLength = .obsInfLength, bdist = .bdist) ### Extract essential information from model frame # 'inmfe' indexes rows of data$events@data and is necessary for subsetting # influenceRegion (list incompatible with model.frame) and coordinates. # Note: model.frame() takes row.names from data inmfe <- which(row.names(data$events@data) %in% row.names(mfe)) N <- length(inmfe) # mfe also contains events of the prehistory eventTimes <- mfe[["(time)"]] # I don't use model.extract since it returns named vectors # Indicate events after t0, which are actually part of the process # (events in (-Inf;t0] only contribute in sum over infected individuals) includes <- which(eventTimes > t0) # this indexes mfe! Nin <- length(includes) if (Nin == 0L) { stop("none of the ", nrow(data$events@data), " supplied ", "events is in the model (check 'subset', 't0' and 'T')") } eventBlocks <- mfe[["(BLOCK)"]] # only necessary for partial log-likelihood eventTypes <- factor(mfe[["(type)"]]) # drop unused levels typeNames <- levels(eventTypes) nTypes <- length(typeNames) if (verbose && nTypes > 1L) cat("marked point pattern of", nTypes, "types\n") qmatrix <- checkQ(qmatrix, typeNames) # we only need the integer codes for the calculations eventTypes <- as.integer(eventTypes) ### Generate model matrix mme <- model.matrix(epidemic, mfe) xlevels_epidemic <- .getXlevels(epidemic, mfe) q <- ncol(mme) hase <- q > 0L ### Extract further model components (only if q > 0) if (hase) { eps.t <- mfe[["(eps.t)"]] removalTimes <- eventTimes + eps.t eps.s <- mfe[["(eps.s)"]] bdist <- mfe[["(bdist)"]] gIntUpper <- mfe[["(obsInfLength)"]] gIntLower <- pmax(0, t0-eventTimes) eventCoords <- coordinates(data$events)[inmfe,,drop=FALSE] influenceRegion <- data$events@data$.influenceRegion[inmfe] iRareas <- vapply(X = influenceRegion, FUN = attr, which = "area", FUN.VALUE = 0, USE.NAMES = FALSE) eventSources <- if (N == nobs(data) && identical(qmatrix, data$qmatrix)) { data$events@data$.sources } else { # re-determine because subsetting has invalidated row indexes if (verbose) cat("updating list of potential sources ...\n") determineSources(eventTimes = eventTimes, eps.t = eps.t, eventCoords = eventCoords, eps.s = eps.s, eventTypes = eventTypes, qmatrix = qmatrix) } ## calculate sum_{k=1}^K q_{kappa_j,k} for all j = 1:N qSum <- unname(rowSums(qmatrix)[eventTypes]) # N-vector } else if (verbose) { message("no epidemic component in model") } ### Drop "terms" and restore original formula environment epidemic <- formula(epidemic) if (epilink != "log") # set as attribute only if non-standard link function attr(epidemic, "link") <- epilink environment(epidemic) <- origenv.epidemic ## We keep the original formula environment since it will be used to ## evaluate the modified twinstim-call in drop1/add1 (with default ## enclos=baseenv()), and cl$data should be visible from there. ## Alternatively, we could set it to parent.frame(). ######################### ### endemic component ### ######################### ### Parse endemic formula if (missing(endemic)) { origenv.endemic <- parent.frame() endemic <- ~ 0 } else { origenv.endemic <- environment(endemic) environment(endemic) <- environment() ## such that t0 and T are found in the subset expressions below } endemic <- terms(endemic, data = data$stgrid, keep.order = TRUE) ## check for type-specific endemic intercept and remove it from the formula ## (will be handled separately) typeSpecificEndemicIntercept <- "1 | type" %in% attr(endemic, "term.labels") if (typeSpecificEndemicIntercept) { endemic <- update.formula(endemic, ~ . - (1|type)) # this drops the terms attributes endemic <- terms(endemic, data = data$stgrid, keep.order = TRUE) } globalEndemicIntercept <- if (typeSpecificEndemicIntercept) { attr(endemic, "intercept") <- 1L # we need this to ensure that we have correct contrasts FALSE } else attr(endemic, "intercept") == 1L nbeta0 <- globalEndemicIntercept + typeSpecificEndemicIntercept * nTypes ### Generate endemic model frame and model matrix on event data mfhEvents <- model.frame(endemic, data = eventsData[row.names(mfe),], subset = time>t0 & time<=T, na.action = na.fail, # since R 2.10.0 patched also works with # endemic = ~1 (see PR#14066) drop.unused.levels = FALSE) mmhEvents <- model.matrix(endemic, mfhEvents) xlevels_endemic <- .getXlevels(endemic, mfhEvents) # exclude intercept from endemic model matrix below, will be treated separately if (nbeta0 > 0) mmhEvents <- mmhEvents[,-1,drop=FALSE] #stopifnot(nrow(mmhEvents) == Nin) p <- ncol(mmhEvents) hash <- (nbeta0+p) > 0L ### Generate model frame and model matrix on grid data (only if p > 0) if (hash) { offsetEvents <- model.offset(mfhEvents) mfhGrid <- model.frame(endemic, data = data$stgrid, subset = start >= t0 & stop <= T, na.action = na.fail, # since R 2.10.0 patched also works with # endemic = ~1 (see PR#14066) drop.unused.levels = FALSE, BLOCK=BLOCK, tile=tile, dt=stop-start, ds=area) # 'tile' is redundant here for fitting but useful # for debugging & necessary for intensityplots gridBlocks <- mfhGrid[["(BLOCK)"]] histIntervals <- data$stgrid[!duplicated.default( data$stgrid$BLOCK, nmax = data$stgrid$BLOCK[length(data$stgrid$BLOCK)] ), c("BLOCK", "start", "stop")] # sorted row.names(histIntervals) <- NULL histIntervals <- histIntervals[histIntervals$start >= t0 & histIntervals$stop <= T,] gridTiles <- mfhGrid[["(tile)"]] # only needed for intensityplot mmhGrid <- model.matrix(endemic, mfhGrid) nGrid <- nrow(mmhGrid) # exclude intercept from endemic model matrix below, will be treated separately if (nbeta0 > 0) mmhGrid <- mmhGrid[,-1,drop=FALSE] # Extract endemic model components offsetGrid <- model.offset(mfhGrid) dt <- mfhGrid[["(dt)"]] ds <- mfhGrid[["(ds)"]] ## expression to calculate the endemic part on the grid -> .hIntTW() if (p > 0L) { hGridExpr <- quote(drop(mmhGrid %*% beta)) if (!is.null(offsetGrid)) hGridExpr <- call("+", quote(offsetGrid), hGridExpr) } else { hGridExpr <- if (is.null(offsetGrid)) quote(numeric(nGrid)) else quote(offsetGrid) } hGridExpr <- call("exp", hGridExpr) ## expression to calculate the endemic part for the events -> .hEvents() hEventsExpr <- if (p > 0L) { quote(drop(mmhEvents %*% beta)) } else { quote(numeric(Nin)) } if (nbeta0 == 1L) { # global intercept hEventsExpr <- call("+", quote(beta0), hEventsExpr) } else if (nbeta0 > 1L) { # type-specific intercept hEventsExpr <- call("+", quote(beta0[eventTypes[includes]]), hEventsExpr) } if (!is.null(offsetEvents)) hEventsExpr <- call("+", quote(offsetEvents), hEventsExpr) hEventsExpr <- call("exp", hEventsExpr) } else if (verbose) message("no endemic component in model") ### Drop "terms" and restore original formula environment endemic <- if (typeSpecificEndemicIntercept) { ## re-add it to the endemic formula update.formula(formula(endemic), ~ (1|type) + .) } else formula(endemic) environment(endemic) <- origenv.endemic ## We keep the original formula environment since it will be used to ## evaluate the modified twinstim-call in drop1/add1 (with default ## enclos=baseenv()), and cl$data should be visible from there. ## Alternatively, we could set it to parent.frame(). ### Stop if model is degenerate if (!hash) { if (hase) { if (nEventsWithoutSources <- sum(lengths(eventSources[includes]) == 0)) stop("found ", nEventsWithoutSources, " events without .sources ", "(impossible in a purely epidemic model)") } else { stop("nothing to do: neither endemic nor epidemic parts were specified") } } ############################# ### Interaction functions ### ############################# if (hase) { ## Check interaction functions siaf <- do.call(".parseiaf", args = alist(siaf, "siaf", eps.s, verbose)) constantsiaf <- attr(siaf, "constant") nsiafpars <- siaf$npars tiaf <- do.call(".parseiaf", args = alist(tiaf, "tiaf", eps.t, verbose)) constanttiaf <- attr(tiaf, "constant") ntiafpars <- tiaf$npars ## Check control.siaf if (constantsiaf) { control.siaf <- NULL } else if (is.list(control.siaf)) { if (!is.null(control.siaf$F)) stopifnot(is.list(control.siaf$F)) if (!is.null(control.siaf$Deriv)) stopifnot(is.list(control.siaf$Deriv)) } else if (!is.null(control.siaf)) { stop("'control.siaf' must be a list or NULL") } ## should we compute siafInt in parallel? useParallel <- cores > 1L && requireNamespace("parallel") ## but do not parallelize for a memoised siaf.step (becomes slower) if (useParallel && !is.null(attr(siaf, "knots")) && !is.null(attr(siaf, "maxRange")) && requireNamespace("memoise", quietly = TRUE) && memoise::is.memoised(environment(siaf$f)$ringAreas)) { cores <- 1L useParallel <- FALSE } ## Define function that integrates the 'tiaf' function .tiafInt <- .tiafIntFUN() ## Define function that integrates the two-dimensional 'siaf' function ## over the influence regions of the events ..siafInt <- if (is.null(control.siaf[["siafInt"]])) { .siafInt <- .siafIntFUN(siaf = siaf, noCircularIR = all(eps.s > bdist), parallel = useParallel) ## Memoisation of .siafInt if (!constantsiaf && requireNamespace("memoise")) { memoise::memoise(.siafInt) ## => speed-up optimization since 'nlminb' evaluates the loglik and ## score for the same set of parameters at the end of each iteration } else { if (!constantsiaf && verbose) message("Continuing without memoisation of 'siaf$f' cubature ...") .siafInt } } else { ## predefined cubature results in epitest(..., fixed = TRUE), ## where siafInt is identical during all permutations (only permuted) stopifnot(is.vector(control.siaf[["siafInt"]], mode = "numeric"), length(control.siaf[["siafInt"]]) == N) local({ env <- new.env(hash = FALSE, parent = .GlobalEnv) env$siafInt <- control.siaf[["siafInt"]] as.function(alist(siafpars=, ...=, siafInt), envir = env) }) } .siafInt.args <- c(alist(siafpars), control.siaf$F) } else { if (!missing(siaf) && !is.null(siaf)) warning("'siaf' can only be modelled in conjunction with an 'epidemic' process") if (!missing(tiaf) && !is.null(tiaf)) warning("'tiaf' can only be modelled in conjunction with an 'epidemic' process") siaf <- tiaf <- NULL nsiafpars <- ntiafpars <- 0L control.siaf <- NULL } hassiafpars <- nsiafpars > 0L hastiafpars <- ntiafpars > 0L ## Can we calculate the score function? useScore <- if (partial) FALSE else if (hase) { (!hassiafpars | !is.null(siaf$deriv)) & (!hastiafpars | (!is.null(tiaf$deriv)) & !is.null(tiaf$Deriv)) } else TRUE ## Define function that applies siaf$Deriv on all events (integrate the ## two-dimensional siaf$deriv function) if (useScore && hassiafpars) { .siafDeriv <- mapplyFUN( c(alist(siaf$Deriv, influenceRegion, type=eventTypes), list(MoreArgs=quote(list(siaf$deriv, siafpars, ...)), SIMPLIFY=TRUE, USE.NAMES=FALSE)), ##<- we explicitly quote() the ...-part instead of simply including ## it in the above alist() - only to make checkUsage() happy ## depending on nsiafpars, mapply() will return an N-vector ## or a nsiafpars x N matrix => transform to N x nsiafpars: after = quote(if (is.matrix(res)) t(res) else as.matrix(res)), parallel = useParallel) .siafDeriv.args <- c(alist(siafpars), control.siaf$Deriv) } ############################################################################ ### Log-likelihood function, score function, expected Fisher information ### ############################################################################ ### Total number of parameters (= length of 'theta') npars <- nbeta0 + p + q + nsiafpars + ntiafpars # REMINDER: # theta - parameter vector c(beta0, beta, gamma, siafpars, tiafpars), where # beta0 - endemic intercept (maybe type-specific) # beta - other parameters of the endemic component exp(offset + eta_h(t,s)) # gamma - coefficients of the epidemic predictor # siafpars- parameters of the epidemic spatial interaction function # tiafpars- parameters of the epidemic temporal interaction function # mmh[Events/Grid] - model matrix related to beta, i.e the endemic component, # either for events only or for the whole spatio-temporal grid # offset[Events/Grid] - offset vector related to the endemic component (can be NULL), # either for events only or for the whole spatio-temporal grid # dt, ds - columns of the spatio-temporal grid (dt = stop-start, ds = area) # mme - model matrix related to gamma in the epidemic component # siaf, tiaf - spatial/temporal interaction function (NULL, list or numeric) # eventTimes, eventCoords, eventSources, gIntLower, gIntUpper, influenceRegion - # columns of the events data frame if (hash) { ### Calculates the endemic component (for i in includes -> Nin-vector) ### h(t_i,s_i,kappa_i) = exp(offset_i + beta_{0,kappa_i} + eta_h(t_i,s_i)) .hEvents <- function (beta0, beta) {} body(.hEvents) <- hEventsExpr ### Integral of the endemic component over [0;uppert] x W .hIntTW <- function (beta, score = NULL, #matrix(1,nrow(mmhGrid),1L) uppert = NULL) {} body(.hIntTW) <- as.call(c(as.name("{"), expression( subtimeidx <- if (!is.null(uppert)) { # && isScalar(uppert) && t0 <= uppert && uppert < T if (uppert == t0) return(0) # actually never happens # since uppert %in% eventTimes[includes] > t0 idx <- match(TRUE, histIntervals$stop >= uppert) firstBlockBeyondUpper <- histIntervals$BLOCK[idx] newdt <- uppert - histIntervals$start[idx] dt[gridBlocks == firstBlockBeyondUpper] <- newdt which(gridBlocks <= firstBlockBeyondUpper) } else NULL ), substitute(hGrid <- hGridExpr, list(hGridExpr=hGridExpr)), expression(sumterms <- hGrid * ds * dt), expression(if (is.null(score)) { if (is.null(subtimeidx)) sum(sumterms) else sum(sumterms[subtimeidx]) } else { if (is.null(subtimeidx)) .colSums(score * sumterms, nGrid, ncol(score)) else .colSums((score * sumterms)[subtimeidx,,drop=FALSE], length(subtimeidx), ncol(score)) }) )) } if (hase) { ### Calculates the epidemic component for all events .eEvents <- function (gammapred, siafpars, tiafpars, ncolsRes = 1L, score = matrix(1,N,ncolsRes), f = siaf$f, g = tiaf$g) # second line arguments are for score functions with defaults for loglik { e <- vapply(X = includes, FUN = function (i) { sources <- eventSources[[i]] nsources <- length(sources) if (nsources == 0L) numeric(ncolsRes) else { scoresources <- score[sources,,drop=FALSE] predsources <- gammapred[sources] repi <- rep.int(i, nsources) sdiff <- eventCoords[repi,,drop=FALSE] - eventCoords[sources,,drop=FALSE] fsources <- f(sdiff, siafpars, eventTypes[sources]) tdiff <- eventTimes[repi] - eventTimes[sources] gsources <- g(tdiff, tiafpars, eventTypes[sources]) # if(length(predsources) != NROW(fsources) || NROW(fsources) != NROW(gsources)) browser() .colSums(scoresources * predsources * fsources * gsources, nsources, ncolsRes) } }, FUN.VALUE = numeric(ncolsRes), USE.NAMES = FALSE) ## return a vector if ncolsRes=1, otherwise a matrix (Nin x ncolsRes) if (ncolsRes == 1L) e else t(e) } } ### Calculates the two components of the integrated intensity function ### over [0;uppert] x W x K heIntTWK <- function (beta0, beta, gammapred, siafpars, tiafpars, uppert = NULL) {} body(heIntTWK) <- as.call(c(as.name("{"), if (hash) { # endemic component expression( hIntTW <- .hIntTW(beta, uppert = uppert), .beta0 <- rep_len(if (nbeta0==0L) 0 else beta0, nTypes), fact <- sum(exp(.beta0)), hInt <- fact * hIntTW ) } else { expression(hInt <- 0) }, if (hase) { # epidemic component c(expression(siafInt <- do.call("..siafInt", .siafInt.args)),#N-vector if (useParallel) expression( # print "try-catch"ed errors if (any(.nonfinitesiafint <- !is.finite(siafInt))) stop("invalid result of 'siaf$F' for 'siafpars=c(", paste(signif(siafpars, getOption("digits")), collapse=", "), ")':\n", paste(unique(siafInt[.nonfinitesiafint]), sep="\n"), call.=FALSE) ), expression( if (!is.null(uppert)) { # && isScalar(uppert) && t0 <= uppert && uppert < T gIntUpper <- pmin(uppert-eventTimes, eps.t) subtimeidx <- eventTimes < uppert tiafIntSub <- .tiafInt(tiafpars, from = gIntLower[subtimeidx], to = gIntUpper[subtimeidx], type = eventTypes[subtimeidx]) eInt <- sum(qSum[subtimeidx] * gammapred[subtimeidx] * siafInt[subtimeidx] * tiafIntSub) } else { tiafInt <- .tiafInt(tiafpars) eInt <- sum(qSum * gammapred * siafInt * tiafInt) } ) ) } else expression(eInt <- 0), expression(c(hInt, eInt)) )) ### Calculates the log-likelihood loglik <- function (theta) { # Extract parameters from theta beta0 <- theta[seq_len(nbeta0)] beta <- theta[nbeta0+seq_len(p)] gamma <- theta[nbeta0+p+seq_len(q)] siafpars <- theta[nbeta0+p+q+seq_len(nsiafpars)] tiafpars <- theta[nbeta0+p+q+nsiafpars+seq_len(ntiafpars)] # dN part of the log-likelihood hEvents <- if (hash) .hEvents(beta0, beta) else 0 eEvents <- if (hase) { gammapred <- drop(epilinkinv(mme %*% gamma)) # N-vector .eEvents(gammapred, siafpars, tiafpars) # Nin-vector! (only 'includes' here) } else 0 lambdaEvents <- hEvents + eEvents # Nin-vector llEvents <- sum(log(lambdaEvents)) # * llEvents is -Inf in case of 0-intensity at any event time # * If epilinkinv is 'identity', lambdaEvents < 0 if eEvents < -hEvents, # and llEvents is NaN with a warning (intensity must be positive) if (is.nan(llEvents)) # nlminb() does not like NA function values llEvents <- -Inf # lambda integral of the log-likelihood heInt <- heIntTWK(beta0, beta, gammapred, siafpars, tiafpars) # !hase => missing(gammapred), but lazy evaluation omits an error in this case because heIntTWK doesn't ask for gammapred llInt <- sum(heInt) # Return the log-likelihood ll <- llEvents - llInt ll } ### Calculates the score vector score <- function (theta) { # Extract parameters from theta beta0 <- theta[seq_len(nbeta0)] beta <- theta[nbeta0+seq_len(p)] gamma <- theta[nbeta0+p+seq_len(q)] siafpars <- theta[nbeta0+p+q+seq_len(nsiafpars)] tiafpars <- theta[nbeta0+p+q+nsiafpars+seq_len(ntiafpars)] if (hase) { gammapred <- drop(epilinkinv(mme %*% gamma)) # N-vector hEvents <- if (hash) .hEvents(beta0, beta) else 0 eEvents <- .eEvents(gammapred, siafpars, tiafpars) # Nin-vector! (only 'includes' here) lambdaEvents <- hEvents + eEvents # Nin-vector siafInt <- do.call("..siafInt", .siafInt.args) # N-vector tiafInt <- .tiafInt(tiafpars) # N-vector } # score vector for beta hScore <- if (hash) { score_beta0 <- if (nbeta0 == 1L) local({ # global intercept sEvents <- if (hase) { hEvents / lambdaEvents } else rep.int(1, Nin) sEventsSum <- sum(sEvents) sInt <- nTypes*exp(beta0) * .hIntTW(beta) sEventsSum - unname(sInt) }) else if (nbeta0 > 1L) local({ # type-specific intercepts ind <- sapply(seq_len(nTypes), function (type) eventTypes[includes] == type, simplify=TRUE, USE.NAMES=FALSE) # logical Nin x nTypes matrix sEvents <- if (hase) { ind * hEvents / lambdaEvents } else ind sEventsSum <- .colSums(sEvents, Nin, nTypes) sInt <- exp(beta0) * .hIntTW(beta) sEventsSum - unname(sInt) }) else numeric(0L) # i.e. nbeta0 == 0L score_beta <- if (p > 0L) local({ sEvents <- if (hase) { mmhEvents * hEvents / lambdaEvents } else mmhEvents sEventsSum <- .colSums(sEvents, Nin, p) fact <- if (nbeta0 > 1L) sum(exp(beta0)) else if (nbeta0 == 1L) nTypes*exp(beta0) else nTypes sInt <- fact * .hIntTW(beta, mmhGrid) sEventsSum - sInt }) else numeric(0L) c(score_beta0, score_beta) } else numeric(0L) # score vector for gamma, siafpars and tiafpars eScore <- if (hase) { score_gamma <- local({ nom <- .eEvents(switch(epilink, "log" = gammapred, "identity" = rep.int(1, N)), siafpars, tiafpars, ncolsRes=q, score=mme) # Nin-vector if q=1 sEventsSum <- .colSums(nom / lambdaEvents, Nin, q) # |-> dotted version also works for vector-arguments dgammapred <- switch(epilink, "log" = mme * gammapred, "identity" = mme) sInt <- .colSums(dgammapred * (qSum * siafInt * tiafInt), N, q) sEventsSum - sInt }) score_siafpars <- if (hassiafpars && !fixedsiafpars) local({ nom <- .eEvents(gammapred, siafpars, tiafpars, ncolsRes=nsiafpars, f=siaf$deriv) sEventsSum <- .colSums(nom / lambdaEvents, Nin, nsiafpars) derivInt <- do.call(".siafDeriv", .siafDeriv.args) # N x nsiafpars matrix ## if useParallel, derivInt may contain "try-catch"ed errors ## in which case we receive a one-column character or list matrix if (!is.numeric(derivInt)) # we can throw a helpful error message stop("invalid result of 'siaf$Deriv' for 'siafpars=c(", paste(signif(siafpars, getOption("digits")), collapse=", "), ")':\n", paste(unique(derivInt[sapply(derivInt, is.character)]), sep="\n"), call.=FALSE) sInt <- .colSums(derivInt * (qSum * gammapred * tiafInt), N, nsiafpars) sEventsSum - sInt }) else numeric(nsiafpars) # if 'fixedsiafpars', this part is unused score_tiafpars <- if (hastiafpars && !fixedtiafpars) local({ nom <- .eEvents(gammapred, siafpars, tiafpars, ncolsRes=ntiafpars, g=tiaf$deriv) sEventsSum <- .colSums(nom / lambdaEvents, Nin, ntiafpars) derivIntUpper <- tiaf$Deriv(gIntUpper, tiafpars, eventTypes) derivIntLower <- tiaf$Deriv(gIntLower, tiafpars, eventTypes) derivInt <- derivIntUpper - derivIntLower # N x ntiafpars matrix sInt <- .colSums(derivInt * (qSum * gammapred * siafInt), N, ntiafpars) sEventsSum - sInt }) else numeric(ntiafpars) # if 'fixedtiafpars', this part is unused c(score_gamma, score_siafpars, score_tiafpars) } else numeric(0L) # return the score vector scorevec <- c(hScore, eScore) scorevec } ### Estimates the expected Fisher information matrix ### by the "optional variation process" (Martinussen & Scheike, p. 64), ### or see Rathbun (1996, equation (4.7)) fisherinfo <- function (theta) { # Extract parameters from theta beta0 <- theta[seq_len(nbeta0)] beta <- theta[nbeta0+seq_len(p)] gamma <- theta[nbeta0+p+seq_len(q)] siafpars <- theta[nbeta0+p+q+seq_len(nsiafpars)] tiafpars <- theta[nbeta0+p+q+nsiafpars+seq_len(ntiafpars)] # only events (intdN) part of the score function needed zeromatrix <- matrix(0, Nin, 0) if (hase) { gammapred <- drop(epilinkinv(mme %*% gamma)) # N-vector hEvents <- if (hash) .hEvents(beta0, beta) else 0 eEvents <- .eEvents(gammapred, siafpars, tiafpars) # Nin-vector! (only 'includes' here) lambdaEvents <- hEvents + eEvents # Nin-vector } # for beta hScoreEvents <- if (hash) { scoreEvents_beta0 <- if (nbeta0 > 1L) local({ # type-specific intercepts ind <- sapply(seq_len(nTypes), function (type) eventTypes[includes] == type, simplify=TRUE, USE.NAMES=FALSE) # logical Nin x nTypes matrix if (hase) { ind * hEvents / lambdaEvents } else ind }) else if (nbeta0 == 1L) { # global intercept if (hase) { hEvents / lambdaEvents } else matrix(1, Nin, 1L) } else zeromatrix scoreEvents_beta <- if (p > 0L) { if (hase) { mmhEvents * hEvents / lambdaEvents } else mmhEvents # Nin x p matrix } else zeromatrix unname(cbind(scoreEvents_beta0, scoreEvents_beta, deparse.level=0)) } else zeromatrix # for gamma, siafpars and tiafpars eScoreEvents <- if (hase) { scoreEvents_gamma_nom <- .eEvents(switch(epilink, "log" = gammapred, "identity" = rep.int(1, N)), siafpars, tiafpars, ncolsRes = q, score = mme) # Ninxq matrix scoreEvents_siafpars_nom <- if (hassiafpars) { .eEvents(gammapred, siafpars, tiafpars, ncolsRes = nsiafpars, f = siaf$deriv) # Ninxnsiafpars matrix } else zeromatrix scoreEvents_tiafpars_nom <- if (hastiafpars) { .eEvents(gammapred, siafpars, tiafpars, ncolsRes = ntiafpars, g = tiaf$deriv) # Ninxntiafpars matrix } else zeromatrix eScoreEvents_nom <- cbind(scoreEvents_gamma_nom, scoreEvents_siafpars_nom, scoreEvents_tiafpars_nom, deparse.level=0) eScoreEvents_nom / lambdaEvents } else zeromatrix scoreEvents <- cbind(hScoreEvents, eScoreEvents, deparse.level=0) ## Build the optional variation process (Martinussen & Scheike, p64) ## info <- matrix(0, nrow = npars, ncol = npars, ## dimnames = list(names(theta), names(theta))) ## for (i in 1:Nin) info <- info + crossprod(scoreEvents[i,,drop=FALSE]) ## oh dear, this is nothing else but t(scoreEvents) %*% scoreEvents crossprod(scoreEvents) } ### Calculates the partial log-likelihood for continuous space ### (Diggle et al., 2009) partialloglik <- function (theta) { # Extract parameters from theta beta0 <- theta[seq_len(nbeta0)] beta <- theta[nbeta0+seq_len(p)] gamma <- theta[nbeta0+p+seq_len(q)] siafpars <- theta[nbeta0+p+q+seq_len(nsiafpars)] tiafpars <- theta[nbeta0+p+q+nsiafpars+seq_len(ntiafpars)] # calculcate the observed intensities hEvents <- if (hash) .hEvents(beta0, beta) else 0 eEvents <- if (hase) { gammapred <- drop(epilinkinv(mme %*% gamma)) # N-vector .eEvents(gammapred, siafpars, tiafpars) # Nin-vector! (only 'includes' here) } else 0 lambdaEvents <- hEvents + eEvents # Nin-vector # calculate integral of lambda(t_i, s, kappa) over at-risk set = (observation region x types) hInts <- if (hash) { # endemic component hGrid <- eval(hGridExpr) # integral over W and types for each time block in mfhGrid fact <- if (nbeta0 > 1L) sum(exp(beta0)) else if (nbeta0 == 1L) nTypes*exp(beta0) else nTypes hInt_blocks <- fact * tapply(hGrid*ds, gridBlocks, sum, simplify=TRUE) .idx <- match(eventBlocks[includes], names(hInt_blocks)) unname(hInt_blocks[.idx]) # Nin-vector } else 0 eInts <- if (hase) { # epidemic component siafInt <- do.call("..siafInt", .siafInt.args) # N-vector gs <- gammapred * siafInt # N-vector sapply(includes, function (i) { timeSources <- determineSources1(i, eventTimes, removalTimes, 0, Inf, NULL) nSources <- length(timeSources) if (nSources == 0L) 0 else { repi <- rep.int(i, nSources) tdiff <- eventTimes[repi] - eventTimes[timeSources] gsources <- tiaf$g(tdiff, tiafpars, eventTypes[timeSources]) sum(qSum[timeSources] * gs[timeSources] * gsources) } }, simplify=TRUE, USE.NAMES=FALSE) # Nin-vector } else 0 lambdaEventsIntW <- hInts + eInts # Nin-vector # Calculate and return the partial log-likelihood p <- lambdaEvents / lambdaEventsIntW # Nin-vector pll <- sum(log(p)) pll } ################################ ### Prepare for optimization ### ################################ ll <- if (partial) partialloglik else loglik functions <- list(ll = ll, sc = if (useScore) score else NULL, fi = if (useScore) fisherinfo else NULL) ### Include check for validity of siafpars and tiafpars ('validpars') in ll if (!is.null(siaf$validpars)) { body(ll) <- as.call(append(as.list(body(ll)), as.list(expression( if (hassiafpars && !siaf$validpars(siafpars)) { if (!isTRUE(optimArgs$control$trace == 0)) # default: NULL cat("(invalid 'siafpars' in loglik)\n") return(-Inf) } )), after = grep("^siafpars <-", body(ll)))) } if (!is.null(tiaf$validpars)) { body(ll) <- as.call(append(as.list(body(ll)), as.list(expression( if (hastiafpars && !tiaf$validpars(tiafpars)) { if (!isTRUE(optimArgs$control$trace == 0)) # default: NULL cat("(invalid 'tiafpars' in loglik)\n") return(-Inf) } )), after = grep("^tiafpars <-", body(ll)))) } ### Check that optim.args is a list or NULL if (is.null(optim.args)) { # no optimisation requested setting <- functions on.exit(rm(setting), add = TRUE) # Append model information setting$npars <- c(nbeta0 = nbeta0, p = p, q = q, nsiafpars = nsiafpars, ntiafpars = ntiafpars) setting$qmatrix <- qmatrix # -> information about nTypes and typeNames setting$formula <- list(endemic = endemic, epidemic = epidemic, siaf = siaf, tiaf = tiaf) # Return settings setting$call <- cl environment(setting) <- environment() if (verbose) message("optimization skipped", " (returning functions in data environment)") return(setting) } else if (!is.list(optim.args)) stop("'optim.args' must be a list or NULL") ### Check initial value for theta initpars <- rep(0, npars) names(initpars) <- c( if (nbeta0 > 1L) { paste0("h.type",typeNames) } else if (nbeta0 == 1L) "h.(Intercept)", if (p > 0L) paste("h", colnames(mmhEvents), sep = "."), if (hase) paste("e", colnames(mme), sep = "."), if (hassiafpars) paste("e.siaf", seq_len(nsiafpars), sep="."), if (hastiafpars) paste("e.tiaf", seq_len(ntiafpars), sep=".") ) ## some naive defaults if (nbeta0 > 0) initpars[seq_len(nbeta0)] <- crudebeta0( nEvents = Nin, offset.mean = if (is.null(offsetGrid)) 0 else weighted.mean(offsetGrid, ds), W.area = sum(ds[gridBlocks==histIntervals[1,"BLOCK"]]), period = T-t0, nTypes = nTypes ) if (hase && "e.(Intercept)" %in% names(initpars) && epilink == "log") initpars["e.(Intercept)"] <- -9 # suitable value depends on [st]iafInt if (hassiafpars && identical(body(siaf$f)[[2L]], quote(sds <- exp(pars)))) { ## "detect" siaf.gaussian => use 10% of bbox diameter as initial sd initpars[paste0("e.siaf.", seq_len(nsiafpars))] <- round(log(0.1*sqrt(sum(apply(bbox(data$W), 1L, diff.default)^2)))) } ## manual par-specification overrides these defaults if (!is.null(optim.args[["par"]])) { if (!is.vector(optim.args$par, mode="numeric")) { stop("'optim.args$par' must be a numeric vector") } if (length(optim.args$par) != npars) { stop(gettextf(paste("'optim.args$par' (%d) does not have the same", "length as the number of unknown parameters (%d)"), length(optim.args$par), npars)) } initpars[] <- optim.args$par } ## values in "start" overwrite defaults and optim.args$par if (!is.null(start)) { start <- check_twinstim_start(start) start <- start[names(start) %in% names(initpars)] initpars[names(start)] <- start } ## warn if initial intercept is negative when the identity link is used if (epilink == "identity" && "e.(Intercept)" %in% names(initpars) && initpars["e.(Intercept)"] < 0) warning("identity link and negative start value for \"e.(Intercept)\"") ## update optim.args$par optim.args$par <- initpars ### Fixed parameters during optimization fixed <- optim.args[["fixed"]] optim.args[["fixed"]] <- NULL whichfixed <- if (is.null(fixed)) { integer(0L) } else if (isTRUE(fixed)) { seq_len(npars) } else { stopifnot(is.vector(fixed)) if (is.numeric(fixed)) { stopifnot(fixed %in% seq_len(npars)) fixed } else if (is.character(fixed)) { ## we silently ignore names of non-existent parameters intersect(fixed, names(initpars)) } else if (is.logical(fixed)) { stopifnot(length(fixed) == npars) which(fixed) } else { stop("'optim.args$fixed' must be a numeric, character or logical vector") } } fixed <- setNames(logical(npars), names(initpars)) # FALSE fixed[whichfixed] <- TRUE fixedsiafpars <- hassiafpars && all(fixed[paste("e.siaf", 1:nsiafpars, sep=".")]) fixedtiafpars <- hastiafpars && all(fixed[paste("e.tiaf", 1:ntiafpars, sep=".")]) ### Define negative log-likelihood (score, hessian) for minimization ### as a function of the non-fixed parameters negll <- ll body(negll)[[length(body(negll))]] <- call("-", body(negll)[[length(body(negll))]]) negsc <- if (useScore) { negsc <- score body(negsc)[[length(body(negsc))]] <- call("-", body(negsc)[[length(body(negsc))]]) negsc } else NULL neghess <- if (useScore) fisherinfo else NULL if (any(fixed)) { ## modify negll, negsc and neghess for subvector optimization optim.args$par <- initpars[!fixed] if (verbose) { if (all(fixed)) { cat("\nno numerical likelihood optimization, all parameters fixed:\n") } else cat("\nfixed parameters during optimization:\n") print(initpars[fixed]) } tmpexpr <- expression( initpars[!fixed] <- theta, theta <- initpars ) body(negll) <- as.call(append(as.list(body(negll)), as.list(tmpexpr), 1)) if (useScore) { body(negsc) <- as.call(append(as.list(body(negsc)), as.list(tmpexpr), 1)) body(neghess) <- as.call(append(as.list(body(neghess)), as.list(tmpexpr), 1)) # return non-fixed sub-vector / sub-matrix only body(negsc)[[length(body(negsc))]] <- call("[", body(negsc)[[length(body(negsc))]], quote(!fixed)) body(neghess)[[length(body(neghess))]] <- call("[", body(neghess)[[length(body(neghess))]], quote(!fixed), quote(!fixed), drop=FALSE) } ## if siafpars or tiafpars are fixed, pre-evaluate integrals if (fixedsiafpars) { if (verbose) cat("pre-evaluating 'siaf' integrals with fixed parameters ...\n") if (!"memoise" %in% loadedNamespaces()) cat("WARNING: Memoization of siaf integration not available!\n", " Repeated integrations with same parameters ", "are redundant and slow!\n", " Really consider installing package \"memoise\"!\n", sep="") siafInt <- local({ siafpars <- initpars[paste("e.siaf", 1:nsiafpars, sep=".")] do.call("..siafInt", .siafInt.args) # memoise()d }) } if (fixedtiafpars) { if (verbose) cat("pre-evaluating 'tiaf' integrals with fixed parameters ...\n") tiafInt <- .tiafInt(initpars[paste("e.tiaf", 1:ntiafpars, sep=".")]) ## re-define .tiafInt such that it just returns the pre-evaluated ## integrals if called with the default arguments .tiafInt.orig <- .tiafInt body(.tiafInt) <- expression( if (nargs() == 1L) tiafInt else .tiafInt.orig(tiafpars, from, to, type, G) ) ## restore the original function at the end on.exit({ .tiafInt <- .tiafInt.orig rm(.tiafInt.orig) }, add=TRUE) } } if (any(!fixed)) { #################### ### Optimization ### #################### ## Configure the optim procedure (check optim.args) # default arguments optimArgs <- list(par = NULL, # replaced by optim.args$par below fn = quote(negll), gr = quote(negsc), method = if (partial) "Nelder-Mead" else "nlminb", lower = -Inf, upper = Inf, control = list(), hessian = TRUE) # user arguments namesOptimArgs <- names(optimArgs) namesOptimUser <- names(optim.args) optimValid <- namesOptimUser %in% namesOptimArgs optimArgs[namesOptimUser[optimValid]] <- optim.args[optimValid] if (any(!optimValid)) { warning("unknown names in optim.args: ", paste(namesOptimUser[!optimValid], collapse = ", "), immediate. = TRUE) } doHessian <- optimArgs$hessian optimMethod <- optimArgs$method ## Call 'optim', 'nlminb', or 'nlm' with the above arguments if (verbose) { cat("\nminimizing the negative", if (partial) "partial", "log-likelihood", "using", if (optimMethod %in% c("nlm", "nlminb")) paste0("'",optimMethod,"()'") else { paste0("'optim()'s \"", optimMethod, "\"") }, "...\n") cat("initial parameters:\n") print(optimArgs$par) } optimRes1 <- if (optimMethod == "nlminb") { nlminbControl <- control2nlminb(optimArgs$control, defaults = list(trace=1L, rel.tol=1e-6)) ## sqrt(.Machine$double.eps) is the default reltol used in optim, ## which usually equals about 1.49e-08. ## The default rel.tol of nlminb (1e-10) seems too small ## (nlminb often does not finish despite no "relevant" change in loglik). ## I therefore use 1e-6, which is also the default in package nlme ## (see 'lmeControl'). if (nlminbControl$trace > 0L) { cat("negative log-likelihood and parameters ") if (nlminbControl$trace == 1L) cat("in each iteration") else { cat("every", nlminbControl$trace, "iterations") } cat(":\n") } nlminbRes <- nlminb(start = optimArgs$par, objective = negll, gradient = negsc, hessian = if (doHessian) neghess else NULL, control = nlminbControl, lower = optimArgs$lower, upper = optimArgs$upper) nlminbRes$value <- -nlminbRes$objective nlminbRes$counts <- nlminbRes$evaluations nlminbRes } else if (optimMethod == "nlm") { nlmObjective <- function (theta) { value <- negll(theta) grad <- negsc(theta) #hess <- neghess(theta) structure(value, gradient = grad)#, hessian = hess) } nlmControl <- optimArgs$control if (is.null(nlmControl[["print.level"]])) { nlmControl$print.level <- min(nlmControl$trace, 2L) } nlmControl$trace <- nlmControl$REPORT <- NULL if (is.null(nlmControl[["iterlim"]])) { nlmControl$iterlim <- nlmControl$maxit } nlmControl$maxit <- NULL nlmControl$check.analyticals <- FALSE ##<- we use the negative _expected_ Fisher information as the Hessian, ## which is of course different from the true Hessian (=neg. obs. Fisher info) nlmRes <- do.call("nlm", c(alist(f = nlmObjective, p = optimArgs$par, hessian = doHessian), nlmControl)) names(nlmRes)[names(nlmRes) == "estimate"] <- "par" nlmRes$value <- -nlmRes$minimum nlmRes$counts <- rep.int(nlmRes$iterations, 2L) nlmRes$convergence <- if (nlmRes$code %in% 1:2) 0L else nlmRes$code nlmRes } else { # use optim() optimArgs$control <- modifyList(list(trace=1L, REPORT=1L), optimArgs$control) if (finetune) optimArgs$hessian <- FALSE res <- do.call("optim", optimArgs) res$value <- -res$value res } ## Optional fine-tuning of ML estimates by robust Nelder-Mead optimRes <- if (finetune) { if (verbose) { cat("\nMLE from first optimization:\n") print(optimRes1$par) cat("loglik(MLE) =", optimRes1$value, "\n") cat("\nfine-tuning MLE using Nelder-Mead optimization ...\n") } optimArgs$par <- optimRes1$par optimArgs$method <- "Nelder-Mead" optimArgs$hessian <- doHessian optimArgs$control <- modifyList(list(trace=1L), optimArgs$control) nmRes <- do.call("optim", optimArgs) nmRes$value <- -nmRes$value nmRes$counts[2L] <- 0L # 0 gradient evaluations (replace NA for addition below) nmRes } else optimRes1 ## Convergence message msgConvergence <- if (finetune || optimMethod != "nlminb") { paste("code", optimRes$convergence) } else optimRes$message if (optimRes$convergence != 0) { msgNotConverged <- paste0("optimization routine did not converge (", msgConvergence, ")") warning(msgNotConverged) if (verbose) { cat("\nWARNING: ", msgNotConverged, "!\n", sep="") if ((finetune || optimMethod != "nlminb") && !is.null(optimRes$message) && nzchar(optimRes$message)) { cat("MESSAGE: \"", optimRes$message, "\"\n", sep="") } if (hase && useScore && !constantsiaf && grepl("false", msgNotConverged)) { cat("SOLUTION: increase the precision of 'siaf$Deriv' (and 'siaf$F')\n") if (optimMethod == "nlminb") { cat(" or nlminb's false convergence tolerance 'xf.tol'\n") } } } } if (verbose) { cat("\n", if (finetune) "final ", "MLE:\n", sep = "") print(optimRes$par) cat("loglik(MLE) =", optimRes$value, "\n") } } ############## ### Return ### ############## ### Set up list object to be returned fit <- list( coefficients = if (any(fixed)) { if (all(fixed)) initpars else unlist(modifyList(as.list(initpars), as.list(optimRes$par))) } else optimRes$par, loglik = structure(if (all(fixed)) ll(initpars) else optimRes$value, partial = partial), counts = if (all(fixed)) c("function"=1L, "gradient"=0L) else { optimRes1$counts + if (finetune) optimRes$counts else c(0L, 0L) }, converged = if (all(fixed) || (optimRes$convergence == 0)) TRUE else msgConvergence ) ### Add Fisher information matrices # estimation of the expected Fisher information matrix fit["fisherinfo"] <- list( if (useScore) structure( fisherinfo(fit$coefficients), dimnames = list(names(initpars), names(initpars)) ) ) # If requested, add observed fisher info (= negative hessian at maximum) fit["fisherinfo.observed"] <- list( if (any(!fixed) && !is.null(optimRes$hessian)) optimRes$hessian ## no "-" here because we optimized the negative log-likelihood ) ### Add fitted intensity values and integrated intensities at events # final coefficients theta <- fit$coefficients beta0 <- theta[seq_len(nbeta0)] beta <- theta[nbeta0+seq_len(p)] gamma <- theta[nbeta0+p+seq_len(q)] siafpars <- theta[nbeta0+p+q+seq_len(nsiafpars)] tiafpars <- theta[nbeta0+p+q+nsiafpars+seq_len(ntiafpars)] # final siaf and tiaf integrals over influence regions / periods # and final gammapred (also used by intensity.twinstim) if (hase) { gammapred <- drop(epilinkinv(mme %*% gamma)) # N-vector if (!fixedsiafpars) siafInt <- do.call("..siafInt", .siafInt.args) if (!fixedtiafpars) tiafInt <- .tiafInt(tiafpars) } # fitted intensities hEvents <- if (hash) .hEvents(unname(beta0), beta) else rep.int(0, Nin) eEvents <- if (hase) { .eEvents(gammapred, siafpars, tiafpars) # Nin-vector! (only 'includes' here) } else rep.int(0, Nin) fit$fitted <- hEvents + eEvents # = lambdaEvents # Nin-vector fit$fittedComponents <- cbind(h = hEvents, e = eEvents) rm(hEvents, eEvents) # calculate cumulative ground intensities at event times # Note: this function is also used by residuals.twinstim LambdagEvents <- function (cores = 1L, cumCIF.pb = interactive()) { if (cores != 1L) cumCIF.pb <- FALSE if (cumCIF.pb) pb <- txtProgressBar(min=0, max=Nin, initial=0, style=3) heIntEvents <- if (cores == 1L) { sapply(seq_len(Nin), function (i) { if (cumCIF.pb) setTxtProgressBar(pb, i) heIntTWK(beta0, beta, gammapred, siafpars, tiafpars, eventTimes[includes[i]]) }, simplify=TRUE, USE.NAMES=FALSE) } else { # cannot use progress bar simplify2array(parallel::mclapply( X=eventTimes[includes], FUN=heIntTWK, beta0=beta0, beta=beta, gammapred=gammapred, siafpars=siafpars,tiafpars=tiafpars, mc.preschedule=TRUE, mc.cores=cores ), higher=FALSE) } if (cumCIF.pb) close(pb) setNames(.colSums(heIntEvents, 2L, Nin), rownames(mmhEvents)) } fit["tau"] <- list( if (cumCIF) { if (verbose) cat("\nCalculating fitted cumulative intensities at events ...\n") LambdagEvents(cores, cumCIF.pb) }) # calculate observed R0's: mu_j = spatio-temporal integral of e_j(t,s) over # the observation domain (t0;T] x W (not whole R+ x R^2) fit$R0 <- if (hase) qSum * gammapred * siafInt * tiafInt else rep.int(0, N) names(fit$R0) <- row.names(mfe) ### Append model information fit$npars <- c(nbeta0 = nbeta0, p = p, q = q, nsiafpars = nsiafpars, ntiafpars = ntiafpars) fit$qmatrix <- qmatrix # -> information about nTypes and typeNames fit$bbox <- bbox(data$W) # for completeness and for iafplot fit$timeRange <- c(t0, T) # for simulate.twinstim's defaults fit$formula <- list(endemic = endemic, epidemic = epidemic, siaf = siaf, tiaf = tiaf) fit["xlevels"] <- list( if (length(xlevels_endemic) + length(xlevels_epidemic) > 0) { list(endemic = xlevels_endemic, epidemic = xlevels_epidemic) } else NULL) fit["control.siaf"] <- list(control.siaf) # might be NULL ### Append optimizer configuration optim.args$par <- initpars # reset to also include fixed coefficients if (any(fixed)) optim.args$fixed <- names(initpars)[fixed] # restore fit$optim.args <- optim.args fit["functions"] <- list( if (model) { environment(fit) <- environment() functions }) ### Return object of class "twinstim" if (verbose) cat("\nDone.\n") fit$call <- cl fit$runtime <- structure(proc.time() - ptm, cores=cores) class(fit) <- "twinstim" return(fit) } surveillance/R/algo_cdc.R0000644000176200001440000000677312003517525015042 0ustar liggesusers################################################### ### chunk number 1: ################################################### # Implementation of the CDC surveillance system. # The system evaluates specified timepoints and gives alarm if it recognizes # an outbreak for this timepoint. # algo.cdcLatestTimepoint <- function(disProgObj, timePoint = NULL, control = list(b = 5, m = 1, alpha=0.025)){ observed <- disProgObj$observed freq <- disProgObj$freq # If there is no value in timePoint, then take the last value in observed if(is.null(timePoint)){ timePoint = length(observed) } # check if the vector observed includes all necessary data. if((timePoint-(control$b*freq)-control$m*4) < 1){ stop("The vector of observed is too short!") } ###################################################################### #Find which weeks to take -- hoehle 27.3.2007 - fixed bug taking #things in the wrong time order (more recent values) ###################################################################### midx <- seq(-control$m*4-3,control$m*4) yidx <- ((-control$b):(-1))*freq baseidx <- sort(rep(yidx,each=length(midx)) + midx) months <- rep(1:((2*control$m+1)*control$b),each=4) basevec <- as.integer(by(observed[timePoint + baseidx ],months,sum)) # Create a normal distribution based upper confidence interval # (we will use the prediction interval described in # Farrington & Andrew (2003)) upCi <- mean(basevec)+qnorm(1-control$alpha/2)*sd(basevec)*sqrt(1+1/length(basevec)) #Counts for the current mounth yt0 <- sum(observed[timePoint:(timePoint-3)]) # Alarm if the actual value is larger than the upper limit. alarm <- yt0 > upCi # Save aggregated score for later visualisation. aggr <- yt0 result <- list(alarm=alarm, upperbound=upCi,aggr=aggr) class(result) = "survRes" # for surveillance system result return(result) } # 'algo.cdc' calls 'algo.bayesLatestTimepoint' for data points given by range. algo.cdc <- function(disProgObj, control = list(range = range, b=5, m=1, alpha=0.025)){ if(disProgObj$freq != 52) { stop("algo.cdc only works for weekly data.") } # initialize the necessary vectors alarm <- matrix(data = 0, nrow = length(control$range), ncol = 1) aggr <- matrix(data = 0, nrow = length(control$range), ncol = 1) upperbound <- matrix(data = 0, nrow = length(control$range), ncol = 1) #Set control options (standard CDC options) if (is.null(control$range)) { control$range <- (disProgObj$freq*control$b - control$m):length(disProgObj$observed) } if (is.null(control$b)) {control$b=5} if (is.null(control$m)) {control$m=1} #bug fixed if (is.null(control$alpha)) {control$alpha=0.025} count <- 1 for(i in control$range){ # call algo.cdcLatestTimepoint result <- algo.cdcLatestTimepoint(disProgObj, i,control=control) # store the results in the right order alarm[count] <- result$alarm aggr[count] <- result$aggr upperbound[count] <- result$upperbound count <- count + 1 } #Add name and data name to control object. control$name <- paste("cdc(",control$m*4,"*,",0,",",control$b,")",sep="") control$data <- paste(deparse(substitute(disProgObj))) # Return the vectors- # as a special feature CDC objects contain an "aggr" identifier # containing the aggregated counts for each week. result <- list(alarm = alarm, upperbound = upperbound, disProgObj=disProgObj, control=control, aggr=aggr) class(result) = "survRes" # for surveillance system result return(result) } surveillance/R/functionTable.R0000644000176200001440000000711513122025572016072 0ustar liggesusers################################################################################ ### Categorize functions and methods for a specific class ### (this is an internal utility function used in some of the package vignettes) ### ### Copyright (C) 2014-2017 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ functionTable <- function (class, functions = list(), format = "\\texttt", format.nongenerics = "\\textit", horizontal = FALSE) { ## categorization of known generic functions KNOWNGENERICS <- list( Display = c("print", "summary", "xtable", "plot", "animate", "as.stepfun", "intensityplot"), Subset = c("[", "head", "tail", "subset"), Extract = c("nobs", "marks", "coef", "fixef", "ranef", "vcov", "confint", "coeflist", "logLik", "AIC", "extractAIC", "profile", "residuals", "terms", "formula", "R0"), Modify = c("update", "untie", "add1", "drop1"), Convert = c("as.epidata"), Other = c("predict", "simulate", "pit", "scores", "calibrationTest") ) if (is.null(names(functions))) # put all functions in category "Other" functions <- list(Other = unlist(functions, use.names=FALSE)) ## union known generics with specified functions categoryNames <- union(names(KNOWNGENERICS), names(functions)) knowngenerics <- mapply( FUN = union, setNames(KNOWNGENERICS[categoryNames], categoryNames), functions[categoryNames], SIMPLIFY = FALSE, USE.NAMES = TRUE) ## get registered methods and associated generics allmethods <- methods(class = class) allgenerics <- attr(allmethods, "info")$generic genericsList <- lapply(X = knowngenerics, FUN = intersect, allgenerics) genericsList$Other <- c(genericsList$Other, setdiff(allgenerics, unlist(genericsList, use.names=FALSE))) ## all extra 'functions' are not generic or without a method for 'class' nongenericsList <- lapply(X = functions, FUN = function (fnames) { res <- setdiff(fnames, allgenerics) ## note: we do not check if these functions actually exist() if (length(res)) paste0(format.nongenerics, "{", res, "}") else res }) ## merge generics and non-generics functionList <- mapply(FUN = c, genericsList, nongenericsList[names(genericsList)], SIMPLIFY = FALSE, USE.NAMES = TRUE) ## transform list into a matrix by filling with empty cells categoryLengths <- lengths(functionList, use.names = FALSE) nrows <- max(categoryLengths) functionTable <- if (horizontal) { as.matrix(vapply(X = functionList[categoryLengths > 0L], FUN = function (x) paste0(format, "{", x, "}", collapse = ", "), FUN.VALUE = character(1L), USE.NAMES = TRUE)) } else { vapply(X = functionList[categoryLengths > 0L], FUN = function (x) c(paste0(format, "{", x, "}"), rep.int(NA_character_, nrows-length(x))), FUN.VALUE = character(nrows), USE.NAMES = TRUE) } ## done functionTable #xtable::xtable(functionTable, ...) } surveillance/R/boda.R0000644000176200001440000002533514004512307014203 0ustar liggesusers###################################################################### # An implementation of the Bayesian Outbreak Detection Algorithm (BODA) # described in Manitz and H{\"o}hle (2013), Biometrical Journal. # # Note: The algorithm requires the non-CRAN package INLA to run. # You can easily install this package as described at # https://www.r-inla.org/download-install # # # Author: # The initial code was written by J. Manitz, which was then later # adapted and modified for integration into the package by M. Hoehle. # Contributions by M. Salmon. # # Date: # Code continuously developed during 2010-2014 # # Changes: # MS@2015-02-18 # fixed problem that the posterior was drawn from the respective marginals # instead of the joint distribution. # MH@2014-02-05 # changed tcltk progress bar to text based one and modified code, # use S4 sts object (no wrapping wanted) and changed to new INLA # function name for calculating the transformed marginal. ###################################################################### boda <- function(sts, control=list(range=NULL, X=NULL, trend=FALSE, season=FALSE, prior=c('iid','rw1','rw2'), alpha=0.05, mc.munu=100, mc.y=10, verbose=FALSE,multicore=TRUE, samplingMethod=c('joint','marginals'), quantileMethod=c("MC","MM"))) { #Check if the INLA package is available. if (!requireNamespace("INLA", quietly = TRUE)) { stop("The boda function requires the INLA package to be installed.\n", " The package is not available on CRAN, but can be easily obtained\n", " from .") } #Possibly speed up the computations by using multiple cores. if (is.null(control[["multicore",exact=TRUE]])) { control$multicore <- TRUE } if (control$multicore) { INLA::inla.setOption("num.threads", parallel::detectCores(logical = TRUE)) } #Stop if the sts object is multivariate if (ncol(sts)>1) { stop("boda currently only handles univariate sts objects.") } # quantileMethod parameter if(is.null(control[["quantileMethod",exact=TRUE]])){ control$quantileMethod <- "MC" } else { control$quantileMethod <- match.arg(control$quantileMethod, c("MC","MM")) } # extract data observed <- as.vector(observed(sts)) state <- as.vector(sts@state) time <- 1:length(observed) # clean model data from given outbreaks -- this is now part of the modelling # observed[which(state==1)] <- NA ### define range # missing range if(is.null(control[["range",exact=TRUE]])){ warning('No range given. Range is defined as time from second period until end of time series.') control$range <- (sts@freq+1):length(observed) } # check that range is subset of time series indices if(!all(control$range %in% time)){ stop("Evaluation period 'range' has to be vector of time series indices.") } #set order of range control$range <- sort(control$range) ### show extra output from INLA if(is.null(control[["verbose",exact=TRUE]])) { control$verbose <- FALSE } ### setting for different models if(is.null(control[["trend",exact=TRUE]])){ control$trend <- FALSE } if(is.null(control[["season",exact=TRUE]])){ control$season <- FALSE } if(!is.logical(control$trend)||!is.logical(control$season)){ stop('trend and season are logical parameters.') } ### Prior prior <- match.arg(control$prior, c('iid','rw1','rw2')) if(is.vector(control$X)){ control$X <- as.matrix(control$X,ncol=1) } # sampling method for the parameters samplingMethod <- match.arg(control$samplingMethod, c('joint','marginals')) # setting for threshold calcuation if(is.null(control[["alpha",exact=TRUE]])){ control$alpha <- 0.05 } if(control$alpha <= 0 | control$alpha >= 1){ stop("The significance level 'alpha' has to be a probability, and thus has to be between 0 and 1.") } # setting for monte carlo integration if(is.null(control[["mc.munu",exact=TRUE]])){ control$mc.munu <- 100 } if(is.null(control[["mc.y",exact=TRUE]])){ control$mc.y <- 10 } if(!control$mc.munu>0 || control$mc.munu!=round(control$mc.munu,0) || !control$mc.y>0 || control$mc.y!=round(control$mc.y,0)){ stop('Number of Monte Carlo trials has to be an integer larger than zero') } ### set model formula and data modelformula <- paste("observed ~ f(time, model='",prior,"', cyclic=FALSE)", sep="") dat <- data.frame(observed=observed, time=time) # outbreak id if(sum(state)>0){ modelformula <- paste(modelformula, "+ f(state, model='linear')", sep="") dat <- data.frame(dat, state=state) } # trend if(control$trend){ modelformula <- paste(modelformula, "+ f(timeT, model='linear')", sep="") dat <- data.frame(dat, timeT=time) } # season if(control$season){ modelformula <- paste(modelformula, "+ f(timeS, model='seasonal', season.length=",sts@freq,")", sep="") dat <- data.frame(dat, timeS=time) } # covariables X.formula <- NULL if(!is.null(control$X)){ if(nrow(control$X)!=length(observed)){ stop("Argument for covariates 'X' has to have the same length like the time series") } for(i in 1:ncol(control$X)){ X.formula <- (paste(X.formula ,'+', colnames(control$X)[i])) } modelformula <- paste(modelformula, X.formula, sep="") dat <- data.frame(dat, control$X) } modelformula <- as.formula(modelformula) ##### sequential steps ##### #If there is more than one time point in range, then setup a progress bar #(now text based. Alternative: tcltk based) useProgressBar <- length(control$range)>1 if (useProgressBar) { pb <- txtProgressBar(min=min(control$range), max=max(control$range), initial=0,style=3) } #Allocate vector of thresholds xi <- rep(NA,length(observed)) #Loop over all time points in 'range' for(i in control$range){ # prepare data frame dati <- dat[1:i,] dati$observed[i] <- NA #current value to be predicted dati$state[i] <- 0 #current state to be predicted # fit model and calculate quantile using INLA & MC sampling # browser() xi[i] <- bodaFit(dat=dati, samplingMethod=samplingMethod, modelformula=modelformula, prior=prior, alpha=control$alpha, mc.munu=control$mc.munu, mc.y=control$mc.y, quantileMethod=control$quantileMethod) # update progress bar if (useProgressBar) setTxtProgressBar(pb, i) } # close progress bar if (useProgressBar) close(pb) # compare observed with threshold an trigger alarm: FALSE=no alarm sts@alarm[,1] <- observed > xi sts@upperbound[,1] <- xi control$name <- paste('boda(prior=',prior,')',sep='') sts@control <- control # return result as an sts object return(sts[control$range,]) } ####################################################################### # Helper function for fitting the Bayesian GAM using INLA and computing # the (1-alpha)*100% quantile for the posterior predictive of y[T1] # # Parameters: # dat - data.frame containing the data # modelformula - formula to use for fitting the model with inla # prior - what type of prior for the spline c('iid','rw1','rw2') # alpha - quantile to compute in the predictive posterior # mc.munu - no. of Monte Carlo samples for the mu/size param in the NegBin # mc.y - no. of samples for y. # # Returns: # (1-alpha)*100% quantile for the posterior predictive of y[T1] ###################################################################### bodaFit <- function(dat=dat, modelformula=modelformula,prior=prior,alpha=alpha, mc.munu=mc.munu, mc.y=mc.y, samplingMethod=samplingMethod,quantileMethod=quantileMethod,...) { # set time point T1 <- nrow(dat) ### fit model link <- 1 E <- mean(dat$observed, na.rm=TRUE) model <- INLA::inla(modelformula, data=dat, family='nbinomial',E=E, control.predictor=list(compute=TRUE,link=link), control.compute=list(cpo=FALSE,config=TRUE), control.inla = list(int.strategy = "grid",dz=1,diff.logdens = 10)) if(is.null(model)){ return(qi=NA) } if(samplingMethod=='marginals'){ # draw sample from marginal posteriori of muT1 & etaT1 to determine predictive # quantile by sampling. hoehle: inla.marginal.transform does not exist anymore! # Since the observation corresponding to T1 is NA we manually need to transform # the fitted values (had there been an observation this is not necessary!!) marg <- try(INLA::inla.tmarginal(function(x) x,model$marginals.fitted.values[[T1]]), silent=TRUE) if(inherits(marg,'try-error')){ return(qi=NA) } mT1 <- try(INLA::inla.rmarginal(n=mc.munu,marg), silent=TRUE) if(inherits(mT1,'try-error')){ return(qi=NA) } # take variation in size hyperprior into account by also sampling from it mtheta <- model$internal.marginals.hyperpar[[1]] theta <- exp(INLA::inla.rmarginal(n=mc.munu,mtheta)) if(inherits(theta,'try-error')){ return(qi=NA) } } if (samplingMethod=='joint'){ # Sample from the posterior jointSample <- INLA::inla.posterior.sample(mc.munu,model, intern = TRUE) # take variation in size hyperprior into account by also sampling from it theta <- exp(t(sapply(jointSample, function(x) x$hyperpar[[1]]))) mT1 <- exp(t(sapply(jointSample, function(x) x$latent[[T1]]))) yT1 <- rnbinom(n=mc.y*mc.munu,size=theta,mu=E*mT1) } if(quantileMethod=="MC"){ #Draw (mc.munu \times mc.y) responses. Would be nice, if we could #determine the quantile of the predictive posterior in more direct form yT1 <- numeric(mc.munu*mc.y) #NULL idx <- seq(mc.y) for(j in seq(mc.munu)) { idx <- idx + mc.y yT1[idx] <- rnbinom(n=mc.y,size=theta[j],mu=E*mT1[j]) } qi <- quantile(yT1, probs=(1-alpha), type=3, na.rm=TRUE) } if(quantileMethod=="MM"){ mT1 <- mT1[mT1>=0&theta>0] theta <- theta[mT1>=0&theta>0] minBracket <- qnbinom(p=(1-alpha), mu=E*min(mT1), size=max(theta)) maxBracket <- qnbinom(p=(1-alpha), mu=E*max(mT1), size=min(theta)) qi <- qmix(p=(1-alpha), mu=E*mT1, size=theta, bracket=c(minBracket, maxBracket)) } return(qi) } #done bodaFit surveillance/R/hhh4_simulate_plot.R0000644000176200001440000003321413752335607017104 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Plots for an array "hhh4sims" of simulated counts from an "hhh4" model, ### or a list thereof as produced by different "hhh4" models (same period!) ### ### Copyright (C) 2013-2018,2020 Sebastian Meyer ### $Revision: 2590 $ ### $Date: 2020-11-09 22:58:31 +0100 (Mon, 09. Nov 2020) $ ################################################################################ plot.hhh4sims <- function (x, ...) { ## use the object name of x x <- eval(substitute(as.hhh4simslist(x)), envir = parent.frame()) plot.hhh4simslist(x, ...) } ## class for a list of "hhh4sims" arrays from different models ## (over the same period with same initial values) hhh4simslist <- function (x, initial, stsObserved) { ## drop attributes from every single hhh4sims object for (i in seq_along(x)) attr(x[[i]], "class") <- attr(x[[i]], "initial") <- attr(x[[i]], "stsObserved") <- NULL ## set as list attributes attr(x, "initial") <- initial attr(x, "stsObserved") <- stsObserved class(x) <- "hhh4simslist" x } ## converter functions as.hhh4simslist <- function (x, ...) UseMethod("as.hhh4simslist") as.hhh4simslist.hhh4sims <- function (x, ...) { ## we do not use x here, but construct a list() from the sys.call() ## such that as.hhh4simslist(name1 = model1, name2 = model2) works cl <- sys.call() cl[[1L]] <- as.name("list") xx <- eval(cl, envir = parent.frame()) objnames <- as.character(cl)[-1L] if (is.null(names(xx))) { names(xx) <- objnames } else { names(xx)[names(xx) == ""] <- objnames[names(xx) == ""] } as.hhh4simslist.list(xx) } as.hhh4simslist.list <- function (x, ...) { ## verify class lapply(X = x, FUN = function (Xi) if (!inherits(Xi, "hhh4sims")) stop(sQuote("x"), " is not a list of ", dQuote("hhh4sims"))) hhh4simslist(x, initial = attr(x[[1L]], "initial"), stsObserved = attr(x[[1L]], "stsObserved")) } as.hhh4simslist.hhh4simslist <- function (x, ...) x ## 'x[i]': select models (elements of the list) ## 'x[i,j,]': subset simulations while keeping attributes in sync "[.hhh4simslist" <- function (x, i, j, ..., drop = FALSE) { ## case 1: select models if (nargs() == 2L) { ## select elements of the list xx <- NextMethod("[") ## restore class attributes xx <- hhh4simslist(xx, initial = attr(x, "initial"), stsObserved = attr(x, "stsObserved")) return(xx) } ## case 2: subset simulations, i.e., index individual arrays cl <- sys.call() cl[[1L]] <- as.name("[") cl[[2L]] <- quote(x) cl$drop <- drop subseti <- as.function(c(alist(x=), cl), envir = parent.frame()) x[] <- lapply(X = unclass(x), subseti) # unclass to use default [[ subset_hhh4sims_attributes(x, i, j) } ## select a specific "hhh4sims" from the list of simulations ## (the inverse of as.hhh4simslist.hhh4sims(xx)) "[[.hhh4simslist" <- function (x, i) { xx <- NextMethod("[[") a <- attributes(xx) attributes(xx) <- c(a[c("dim", "dimnames")], attributes(x)[c("initial", "stsObserved")], list(class = "hhh4sims"), a[c("call", "seed")]) xx } ## aggregate predictions over time and/or (groups of) units aggregate.hhh4simslist <- function (x, units = TRUE, time = FALSE, ..., drop = FALSE) { if (drop || time) { # unclass(x) to use default "[["-method in lapply lapply(X = unclass(x), FUN = aggregate.hhh4sims, units = units, time = time, ..., drop = TRUE) } else { as.hhh4simslist.list( lapply(X = x, FUN = aggregate.hhh4sims, units = units, time = time, ..., drop = FALSE) ) } } #################### ### plot methods ### #################### check_groups <- function (groups, units) { if (is.null(groups)) { factor(rep.int("overall", length(units))) } else if (isTRUE(groups)) { factor(units, levels = units) } else { stopifnot(length(groups) == length(units)) as.factor(groups) } } plot.hhh4simslist <- function (x, type = c("size", "time", "fan"), ..., groups = NULL, par.settings = list()) { FUN <- paste("plotHHH4sims", match.arg(type), sep = "_") groups <- check_groups(groups, colnames(attr(x, "stsObserved"), do.NULL=FALSE)) ngroups <- nlevels(groups) if (is.list(par.settings)) { par.defaults <- list(mar = c(4,4,2,0.5)+.1, las = 1) if (ngroups > 1) par.defaults$mfrow <- sort(n2mfrow(ngroups)) par.settings <- modifyList(par.defaults, par.settings) opar <- do.call("par", par.settings) on.exit(par(opar)) } if (ngroups == 1) { do.call(FUN, list(quote(x), ...)) } else { # stratified plots by groups of units invisible(sapply( X = levels(groups), FUN = function (group) { x_group <- x[, which(group == groups) , ] # [-method has drop=F do.call(FUN, list(quote(x_group), ..., main = group)) }, simplify = FALSE, USE.NAMES = TRUE)) } } ### simulated final size distribution as boxplots aggregated over all units plotHHH4sims_size <- function (x, horizontal = TRUE, trafo = NULL, observed = TRUE, names = base::names(x), ...) { x <- as.hhh4simslist(x) if (horizontal) { names <- rev(names) x <- rev(x) } if (is.null(trafo)) #trafo <- scales::identity_trans() trafo <- list(name = "identity", transform = identity) if (isTRUE(observed)) observed <- list() nsims <- sapply(X = unclass(x), # simply use the default "[["-method FUN = colSums, dims = 2, # sum over 1:2 (time x unit) simplify = TRUE, USE.NAMES = TRUE) nsimstrafo <- trafo$transform(nsims) ## default boxplot arguments fslab <- "size" if (trafo$name != "identity") fslab <- paste0(fslab, " (", trafo$name, "-scale)") defaultArgs <- list(ylab=fslab, yaxt="n", las=1, cex.axis=1, border=1) if (horizontal) names(defaultArgs) <- sub("^y", "x", names(defaultArgs)) ## defaultArgs$mai <- par("mai") ## defaultArgs$mai[2] <- max(strwidth(boxplot.args$names, units="inches", ## cex=boxplot.args$cex.axis)) ## if (trafo$name != "identity") { ## ## ?bxp: 'yaxs' and 'ylim' are used 'along the boxplot' ## defaultArgs <- c(defaultArgs, ## list(ylim=c(0,max(nsimstrafo)*1.05), yaxs="i")) ## } ## generate boxplots boxplot.args <- modifyList(defaultArgs, list(...)) boxplot.args$horizontal <- horizontal boxplot.args$names <- names do.call("boxplot", c(list(x=nsimstrafo), boxplot.args)) ## add means if (horizontal) { points(x=colMeans(nsimstrafo), y=1:ncol(nsimstrafo), pch=8, col=boxplot.args$border) } else points(colMeans(nsimstrafo), pch=8, col=boxplot.args$border) ## add axis aty <- pretty(nsims, n=par("lab")[2-horizontal]) ##aty <- checkat(list(n=par("lab")[2], trafo=trafo), nsims) # linear on sqrt-scale axis(2-horizontal, at=trafo$transform(aty), labels=aty, las=boxplot.args$las) ## add line showing observed size if (is.list(observed)) { nObs <- sum(observed(attr(x, "stsObserved"))) observed <- modifyList( list(col = 1, lty = 2, lwd = 2, labels = nObs, font = 2, las = boxplot.args$las, mgp = if (horizontal) c(3, 0.4, 0)), observed) observed_line <- c( setNames(list(trafo$transform(nObs)), if (horizontal) "v" else "h"), observed[c("col", "lty", "lwd")]) do.call("abline", observed_line) if (!is.null(observed[["labels"]])) do.call("axis", c( list(side = 2-horizontal, at = trafo$transform(nObs)), observed)) } ## numeric summary mysummary <- function(x) c(mean=mean(x), quantile(x, probs=c(0.025, 0.5, 0.975))) nsum <- t(apply(nsims, 2, mysummary)) invisible(nsum) } ### Plot mean time series of the simulated counts plotHHH4sims_time <- function ( x, average = mean, individual = length(x) == 1, conf.level = if (individual) 0.95 else NULL, #score = "rps", matplot.args = list(), initial.args = list(), legend = length(x) > 1, xlim = NULL, ylim = NULL, add = FALSE, ...) { x <- as.hhh4simslist(x) nModels <- length(x) ytInit <- rowSums(attr(x, "initial")) stsObserved <- attr(x, "stsObserved") ytObs <- rowSums(observed(stsObserved)) ytSim <- aggregate.hhh4simslist(x, units = TRUE, time = FALSE, drop = TRUE) average <- match.fun(average) ytMeans <- vapply( X = ytSim, FUN = function (x) apply(x, 1, average), FUN.VALUE = numeric(length(ytObs)), USE.NAMES = TRUE) ## axis range if (is.null(xlim) && is.list(initial.args)) xlim <- c(1 - length(ytInit) - 0.5, length(ytObs) + 0.5) if (is.null(ylim)) ylim <- c(0, max(ytObs, if (individual) unlist(ytSim, recursive = FALSE, use.names = FALSE) else ytMeans)) ## graphical parameters stopifnot(is.list(matplot.args)) matplot.args <- modifyList( list(y = ytMeans, type = "b", lty = 1, lwd = 3, pch = 20, col = rainbow(nModels)), matplot.args) col <- rep_len(matplot.args$col, nModels) ## observed time series data during simulation period if (!add) plot(stsObserved, type = observed ~ time, xlim = xlim, ylim = ylim, ...) ## add initial counts if (is.list(initial.args)) { initial.args <- modifyList( list(x = seq(to = 0, by = 1, length.out = length(ytInit)), y = ytInit, type = "h", lwd = 5), initial.args) do.call("lines", initial.args) } ## add counts of individual simulation runs if (individual) { for (i in seq_len(nModels)) matlines(ytSim[[i]], lty=1, col=adjustcolor(col[i], alpha.f=0.1)) col <- ifelse(colSums(col2rgb(col)) == 0, "grey", adjustcolor(col, transform=diag(c(.5, .5, .5, 1)))) } ## add means (or medians) matplot.args[["col"]] <- col do.call("matlines", matplot.args) ## add CIs if (isScalar(conf.level)) { alpha2 <- (1-conf.level)/2 ytQuant <- lapply(ytSim, function (sims) t(apply(sims, 1, quantile, probs=c(alpha2, 1-alpha2)))) matlines(sapply(ytQuant, "[", TRUE, 1L), col=col, lwd=matplot.args$lwd, lty=2) matlines(sapply(ytQuant, "[", TRUE, 2L), col=col, lwd=matplot.args$lwd, lty=2) } ## add scores ## if (length(score)==1) { ## scorestime <- simplify2array( ## simscores(x, by="time", scores=score, plot=FALSE), ## higher=FALSE) ## matlines(scales::rescale(scorestime, to=ylim), ## lty=2, lwd=1, col=col) ## } ## add legend if (!identical(FALSE, legend)) { xnames <- if (is.vector(legend, mode = "character")) { if (length(legend) != length(x)) warning("'length(legend)' should be ", length(x)) legend } else { names(x) } legendArgs <- list(x="topright", legend=xnames, bty="n", col=col, lwd=matplot.args$lwd, lty=matplot.args$lty) if (is.list(legend)) legendArgs <- modifyList(legendArgs, legend) do.call("legend", legendArgs) } ## Done ret <- cbind(observed = ytObs, ytMeans) ## if (length(score) == 1) ## attr(ret, score) <- scorestime invisible(ret) } ### Better for a single model: "fanplot" plotHHH4sims_fan <- function (x, which = 1, fan.args = list(), observed.args = list(), initial.args = list(), means.args = NULL, key.args = NULL, xlim = NULL, ylim = NULL, add = FALSE, xaxis = list(), ...) { x <- as.hhh4simslist(x)[[which]] ytInit <- rowSums(attr(x, "initial")) stsObserved <- attr(x, "stsObserved") ytObs <- rowSums(observed(stsObserved)) ytSim <- aggregate.hhh4sims(x, units = TRUE, time = FALSE, drop = TRUE) ## graphical parameters if (is.null(xlim) && is.list(initial.args)) xlim <- c(1 - length(ytInit) - 0.5, length(ytObs) + 0.5) stopifnot(is.list(fan.args)) fan.args <- modifyList( list(probs = seq.int(0.01, 0.99, 0.01)), fan.args, keep.null = TRUE) ## compute the quantiles quantiles <- t(apply(ytSim, 1, quantile, probs = fan.args$probs)) ## create (or add) the fanplot fanplot(quantiles = quantiles, probs = fan.args$probs, means = rowMeans(ytSim), observed = ytObs, fan.args = fan.args, means.args = means.args, observed.args = observed.args, key.args = key.args, xlim = xlim, ylim = ylim, add = add, xaxt = if (is.list(xaxis)) "n" else "s", ...) ## add initial counts if (is.list(initial.args)) { initial.args <- modifyList( list(x = seq(to = 0, by = 1, length.out = length(ytInit)), y = ytInit, type = "p", pch = 19), initial.args) do.call("lines", initial.args) } ## add time axis if (is.list(xaxis)) { xaxis <- modifyList(list(epochsAsDate = TRUE), xaxis) do.call("addFormattedXAxis", c(list(x = stsObserved), xaxis)) } invisible(NULL) } surveillance/R/sts_creation.R0000644000176200001440000000641513430613231015771 0ustar liggesusers################################################################################ ### Simulate count time series with outbreaks (following Noufaily et al, 2012) ### ### Copyright (C) 2014-2015 Maelle Salmon ################################################################################ sts_creation <- function(theta,beta,gamma1,gamma2,m,overdispersion,dates, sizesOutbreak,datesOutbreak,delayMax,alpha, densityDelay) { lengthT <- length(dates) # Baseline observed <- rep(NA,lengthT) upperbound <- rep(NA,lengthT) state <- logical(length=lengthT) for (t in 1:lengthT) { if (m==0){season=0} if (m==1){season=gamma1*cos(2*pi*t/52)+ gamma2*sin(2*pi*t/52)} if (m==2){season=gamma1*cos(2*pi*t/52)+ gamma2*sin(2*pi*t/52)+gamma1*cos(4*pi*t/52)+ gamma2*sin(4*pi*t/52)} mu <- exp(theta + beta*t + season) observed[t] <- rnbinom(mu=mu,size=overdispersion,n=1) upperbound[t] <- qnbinom(mu=mu,size=overdispersion,p=(1-alpha)) } # Outbreaks nOutbreaks <- length(sizesOutbreak) if (nOutbreaks>1){ dens <- lognormDiscrete(Dmax=20,logmu=0,sigma=0.5) for (i in 1:nOutbreaks){ tOutbreak <- which(dates==datesOutbreak[i]) numberOfCases <- rpois(n=1,lambda=sizesOutbreak[i]*(mu*(1+mu/overdispersion))) cases <- rep(0,length(dens)) if (numberOfCases!=0){ for (case in 1:numberOfCases){ t <- sample(x=1:length(dens),size=1,prob=dens) cases[t] <- cases[t] + 1 } } cases <- cases[cases>0] if(sum(cases)>0){ observed[tOutbreak:(tOutbreak+length(cases)-1)] <- observed[tOutbreak:(tOutbreak+length(cases)-1)] + cases state[tOutbreak:(tOutbreak+length(cases)-1)] <- TRUE } } } observed <- observed[1:lengthT] # Reporting triangle if (!is.null(densityDelay)){ # use density delay n <- matrix(0, lengthT, delayMax + 1,dimnames=list(as.character(dates),NULL)) for (t in 1:lengthT){ if(observed[t]!=0){ for (case in 1:observed[t]){ delay <- sample(x=0:delayMax,size=1,prob=densityDelay) if (delay > delayMax) {delay <- delayMax} n[t, delay + 1] <- n[t, delay + 1] + 1 } } } } else{ # Using a poisson as for the outbreaks because it looks good n <- matrix(0, lengthT, D + 1,dimnames=list(as.character(dates),NULL)) for (t in 1:lengthT){ if(observed[t]!=0){ for (case in 1:observed[t]){ delay <- rpois(n=1, lambda=1.5) if (delay > D) {delay <- D} n[t, delay + 1] <- n[t, delay + 1] + 1 } } } } # Create the sts start <- unlist(isoWeekYear(dates[1]), use.names = FALSE) newSts <- new("sts", epoch = as.numeric(dates), start = start, upperbound = as.matrix(upperbound), freq = 52, observed = observed, state = as.matrix(state), epochAsDate = TRUE) newSts@control$reportingTriangle$n <- n return(newSts) } ## FUNCTION FOR DISCRETIZING THE LOG NORM DISTRIBUTION lognormDiscrete <- function(Dmax=20,logmu=0,sigma=0.5){ Fd <- plnorm(0:Dmax, meanlog = logmu, sdlog = sigma) FdDmax <- plnorm(Dmax, meanlog = logmu, sdlog = sigma) #Normalize prob <- diff(Fd)/FdDmax return(prob) } surveillance/R/algo_call.R0000644000176200001440000001511013566727577015236 0ustar liggesusers################################################### ### chunk number 1: ################################################### # 'algo.quality' calculates quality values # like specifity, sensitivity for a surveillance method # # Parameters: # survResObj: object of class survRes, which includes the state chain and # the computed alarm chain ###################################################################### ## Hot fix function fixing two issues in the algo.quality function. ## ## Author: Michael Hoehle ## Date: 2015-11-24 ## ## 1) The function does not work if state or alarms are coded as TRUE/FALSE ## instead of 0/1. ## 2) algo.quality doesn't work for sts objects. ## ## The function now branches on the appropriate thing to do depending on ## what class the argument is. This is not necessarily very good object ## oriented programming, but it works for now. ###################################################################### algo.quality <- function (sts, penalty = 20) { if (inherits(sts, "survRes")) { state <- sts$disProgObj$state[sts$control$range] * 1 alarm <- sts$alarm * 1 } else { if (inherits(sts, "sts")) { if (ncol(sts) > 1) { stop("Function only works for univariate objects.") } state <- sts@state*1 alarm <- alarms(sts)*1 } else { stop(paste0("Class ",class(sts)," not supported!")) } } state <- factor(state, levels = c(0, 1)) alarm <- factor(alarm, levels = c(0, 1)) confusionTable <- table(state, alarm) sens = confusionTable[2, 2]/(confusionTable[2, 2] + confusionTable[2, 1]) spec = confusionTable[1, 1]/(confusionTable[1, 2] + confusionTable[1, 1]) TP = confusionTable[2, 2] FN = confusionTable[2, 1] TN = confusionTable[1, 1] FP = confusionTable[1, 2] dist = sqrt(((1 - spec) - 0)^2 + (sens - 1)^2) if (!(is.element(1, state))) { lag = 0 } else { lag <- c() outbegins <- c() varA <- which(state == 1) outbegins <- c(outbegins, varA[1]) if (length(varA) > 1) { varB <- diff(varA) outbegins <- c(outbegins, varA[which(varB != 1) + 1]) } count <- 1 for (i in outbegins) { if (count < length(outbegins)) { pos <- match(1, alarm[i:min(i + penalty, (outbegins[count + 1] - 1))]) if (is.na(pos)) { lag <- c(lag, penalty) } else { lag <- c(lag, pos - 1) } } else { pos <- match(1, alarm[i:min(i + penalty, length(alarm))]) if (is.na(pos)) { lag <- c(lag, penalty) } else { lag <- c(lag, pos - 1) } } count <- count + 1 } lag <- mean(lag) } result <- list(TP = TP, FP = FP, TN = TN, FN = FN, sens = sens, spec = spec, dist = dist, mlag = lag) class(result) <- "algoQV" return(result) } ################################################### ### chunk number 2: ################################################### print.algoQV <- function(x,...) { qualityValues <- c("TP", "FP", "TN", "FN", "Sens", "Spec", "dist", "mlag" ) class(x) <- "list" result <- t(as.matrix(x)) #Give the result matrix names dimnames(result)[[2]] <- qualityValues #Print to screen print(result) invisible() } ################################################### ### chunk number 3: ################################################### xtable.algoQV <- function(x, caption = NULL, label = NULL, align = NULL, digits = NULL, display = NULL, ...) { n <- names(x) x <- matrix(x,nrow=1) dimnames(x)[[2]] <- n xtable(x,caption, label, align, digits, display, ...) } ################################################### ### chunk number 4: ################################################### # 'algo.call' calls the defined surveillance algorithms for # a specified observed vector. # # Parameter # disProgObj: object of class survRes, which includes the state chain, the observed # control: specifies which surveillance systems should be used with their parameters. # The parameter funcName and range must be specified where funcName must be # the apropriate function (without 'algo.') # range (in control): positions in observed which should be computed algo.call <- function(disProgObj, control = list( list(funcName = "rki1", range = range), list(funcName = "rki", range = range, b = 2, w = 4, actY = TRUE), list(funcName = "rki", range = range, b = 2, w = 5, actY = TRUE) ) ) { #Function to apply one algorithm to the disProgObj onecall <- function(i) { do.call(paste("algo.",control[[i]]$funcName, sep=""), list(disProgObj = disProgObj, control = control[[i]])) } #Apply each algorithm in the control list to the disProgObj survResults <- lapply(1:length(control),onecall) #Create some fancy naming.. names(survResults) <- lapply(survResults,function(survObj) {survObj$control$name}) #Done return(survResults) } ################################################### ### chunk number 5: ################################################### algo.compare <- function(survResList){ return(t(sapply(survResList,algo.quality))) } ################################################### ### chunk number 6: ################################################### algo.summary <- function(compMatrices){ # check if the input is large enough for summing if(length(compMatrices) < 1){ stop("It's an empty list !") } if(length(compMatrices) == 1){ return(compMatrices[[1]]) } #Stupid conversion... compMatrices <- lapply(compMatrices,function(one) { n <- dimnames(one) one <- matrix(as.numeric(one),nrow=dim(one)[[1]]) dimnames(one) <- n return(one) }) # Compute the whole result wholeResult = compMatrices[[1]] lag = matrix(0,length(compMatrices),length(wholeResult[,1])) lag[1,] = wholeResult[,8] for(i in 2:length(compMatrices)){ wholeResult = wholeResult + compMatrices[[i]] lag[i,] = compMatrices[[i]][,8] } # Sens (TP) wholeResult[,5] = wholeResult[,1]/(wholeResult[,1]+wholeResult[,4]) # Spec (TN/(TN+FP)) wholeResult[,6] = wholeResult[,3]/(wholeResult[,2]+wholeResult[,3]) # dist wholeResult[,7] = sqrt((wholeResult[,6]-1)^2 + (wholeResult[,5]-1)^2) # median(lag) for(i in 1:length(wholeResult[,1])){ wholeResult[i,8] = mean(lag[,i]) } #class(wholeResult) <- "compMatrix" # comparison matrix return(wholeResult) } surveillance/R/hhh4_amplitudeShift.R0000644000176200001440000000452313627533641017205 0ustar liggesusers## convert between sin/cos and amplitude/shift formulation ################################################### # y = gamma*sin(omega*t)+delta*cos(omega*t) # = A*sin(omega*t + phi) # with Amplitude A= sqrt(gamma^2+delta^2) # and shift phi= arctan(delta/gamma) ################################################# sinCos2amplitudeShift <- function(params){ # number of sin+cos terms lengthParams <- length(params) if(lengthParams %% 2 != 0) stop("wrong number of params") index.sin <- seq(1,lengthParams,by=2) one <- function(i=1){ coef.sin <- params[i] coef.cos <- params[i+1] amplitude <- sqrt(coef.cos^2+coef.sin^2) shift <- atan2(coef.cos, coef.sin) return(c(amplitude,shift)) } return(c(sapply(index.sin,one))) } amplitudeShift2sinCos <- function(params){ lengthParams <- length(params) if (lengthParams %% 2 != 0) stop("wrong number of params") index.A <- seq(1, lengthParams, by = 2) one <- function(i = 1) { coef.A <- params[i] coef.shift <- params[i + 1] coef.cos <- -coef.A*tan(coef.shift)/sqrt(1+tan(coef.shift)^2) coef.sin <- -coef.A/sqrt(1+tan(coef.shift)^2) return(c(coef.sin,coef.cos)) } return(c(sapply(index.A, one))) } ############################################## # y = gamma*sin(omega*t)+delta*cos(omega*t) # g(gamma,delta) = [sqrt(gamma^2+delta^2), arctan(delta/gamma) ]' # compute jacobian (dg_i(x)/dx_j)_ij ############################################# jacobianAmplitudeShift <- function(params){ # number of sin+cos terms lengthParams <- length(params) if(lengthParams %% 2 != 0) stop("wrong number of params") index.sin <- seq(1,lengthParams,by=2) # function to compute jacobian of the transformation sinCos2AmplitudeShift() one <- function(i=1){ coef.sin <- params[i] coef.cos <- params[i+1] dAmplitude.dcoef.sin <- coef.sin/sqrt(coef.cos^2+coef.sin^2) dAmplitude.dcoef.cos <- coef.cos/sqrt(coef.cos^2+coef.sin^2) dShift.dcoef.sin <- - coef.cos/(coef.cos^2+coef.sin^2) dShift.dcoef.cos <- coef.sin/(coef.cos^2+coef.sin^2) return(c(dAmplitude.dcoef.sin,dShift.dcoef.sin,dAmplitude.dcoef.cos,dShift.dcoef.cos)) } jacobi<-sapply(index.sin,one) res <- matrix(0,nrow=lengthParams,ncol=lengthParams) j<-0 for (i in index.sin){ j<-j+1 res[i:(i+1),i:(i+1)] <- jacobi[,j] } return(res) } surveillance/R/hhh4_methods.R0000644000176200001440000005436513627544033015674 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Standard methods for hhh4-fits ### ### Copyright (C) 2010-2012 Michaela Paul, 2012-2020 Sebastian Meyer ### $Revision: 2534 $ ### $Date: 2020-03-03 22:11:55 +0100 (Tue, 03. Mar 2020) $ ################################################################################ ## NOTE: we also apply print.hhh4 in print.summary.hhh4() print.hhh4 <- function (x, digits = max(3, getOption("digits")-3), ...) { if (!x$convergence) { cat('Results are not reliable! Try different starting values.\n') return(invisible(x)) } if (!is.null(x$call)) { cat("\nCall: \n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n\n", sep = "") } if (x$dim["random"] > 0) { cat('Random effects:\n') .printREmat(if (is.null(x$REmat)) .getREmat(x) else x$REmat, digits = digits) cat("\nFixed effects:\n") } else if (x$dim["fixed"] > 0) { cat("Coefficients:\n") } if (x$dim["fixed"] > 0) { print.default( format(if (is.null(x$fixef)) fixef.hhh4(x, ...) else x$fixef, digits=digits), quote = FALSE, print.gap = 2) } else cat("No coefficients\n") cat("\n") invisible(x) } ## get estimated covariance matrix of random effects .getREmat <- function (object) { ## return NULL if model has no random effects if (is.null(REmat <- object$Sigma)) return(NULL) ## hhh4()$Sigma is named since r791 only -> derive names from Sigma.orig if (is.null(dimnames(REmat))) dimnames(REmat) <- rep.int( list(sub("^sd\\.", "", names(object$Sigma.orig)[seq_len(nrow(REmat))])), 2L) attr(REmat, "correlation") <- cov2cor(REmat) attr(REmat, "sd") <- sqrt(diag(REmat)) REmat } .printREmat <- function (REmat, digits = 4) { V <- round(diag(REmat), digits=digits) corr <- round(attr(REmat, "correlation"), digits=digits) corr[upper.tri(corr,diag=TRUE)] <- "" V.corr <- cbind(V, corr, deparse.level=0) colnames(V.corr) <- c("Var", "Corr", rep.int("", ncol(corr)-1L)) print.default(V.corr, quote=FALSE) } summary.hhh4 <- function (object, maxEV = FALSE, ...) { ## do not summarize results in case of non-convergence if (!object$convergence) { cat('Results are not reliable! Try different starting values.\n') return(invisible(object)) } ret <- c(object[c("call", "convergence", "dim", "loglikelihood", "margll", "lags", "nTime", "nUnit")], list(fixef = fixef.hhh4(object, se=TRUE, ...), ranef = ranef.hhh4(object, ...), REmat = .getREmat(object), AIC = AIC(object), BIC = BIC(object), maxEV_range = if (maxEV) unique(range(getMaxEV(object))))) class(ret) <- "summary.hhh4" return(ret) } print.summary.hhh4 <- function (x, digits = max(3, getOption("digits")-3), ...) { ## x$convergence is always TRUE if we have a summary print.hhh4(x) # also works for summary.hhh4-objects if (!is.null(x$maxEV_range)) cat("Epidemic dominant eigenvalue: ", paste(sprintf("%.2f", x$maxEV_range), collapse = " -- "), "\n\n") if(x$dim["random"]==0){ cat('Log-likelihood: ',round(x$loglikelihood,digits=digits-2),'\n') cat('AIC: ',round(x$AIC,digits=digits-2),'\n') cat('BIC: ',round(x$BIC,digits=digits-2),'\n\n') } else { cat('Penalized log-likelihood: ',round(x$loglikelihood,digits=digits-2),'\n') cat('Marginal log-likelihood: ',round(x$margll,digits=digits-2),'\n\n') } cat('Number of units: ', x$nUnit, '\n') cat('Number of time points: ', x$nTime, '\n') if (!is.null(x$lags)) { # only available since surveillance 1.8-0 if (!is.na(x$lags["ar"]) && x$lags["ar"] != 1) cat("Non-default autoregressive lag: ", x$lags[["ar"]], "\n") if (!is.na(x$lags["ne"]) && x$lags["ne"] != 1) cat("Non-default neighbor-driven lag: ", x$lags[["ne"]], "\n") } cat("\n") invisible(x) } terms.hhh4 <- function (x, ...) { if (is.null(x$terms)) interpretControl(x$control,x$stsObj) else x$terms } nobs.hhh4 <- function (object, ...) { if (object$convergence) object$nObs else NA_real_ } logLik.hhh4 <- function(object, ...) { val <- if (object$convergence) object$loglikelihood else { warning("algorithm did not converge") NA_real_ } attr(val, "df") <- if (object$dim["random"]) NA_integer_ else object$dim[["fixed"]] # use "[[" to drop the name attr(val, "nobs") <- nobs.hhh4(object) class(val) <- "logLik" val } coef.hhh4 <- function(object, se=FALSE, reparamPsi=TRUE, idx2Exp=NULL, amplitudeShift=FALSE, ...) { if (identical(object$control$family, "Poisson")) reparamPsi <- FALSE coefs <- object$coefficients coefnames <- names(coefs) idx <- getCoefIdxRenamed(coefnames, reparamPsi, idx2Exp, amplitudeShift, warn=!se) ## transform and rename if (length(idx$Psi)) { coefs[idx$Psi] <- exp(-coefs[idx$Psi]) # -log(overdisp) -> overdisp coefnames[idx$Psi] <- names(idx$Psi) } if (length(idx$toExp)) { coefs[idx$toExp] <- exp(coefs[idx$toExp]) coefnames[idx$toExp] <- names(idx$toExp) } if (length(idx$AS)) { coefs[idx$AS] <- sinCos2amplitudeShift(coefs[idx$AS]) coefnames[idx$AS] <- names(idx$AS) } ## set new names names(coefs) <- coefnames if (se) { cov <- vcov.hhh4(object, reparamPsi=reparamPsi, idx2Exp=idx2Exp, amplitudeShift=amplitudeShift) cbind("Estimate"=coefs, "Std. Error"=sqrt(diag(cov))) } else coefs } vcov.hhh4 <- function (object, reparamPsi=TRUE, idx2Exp=NULL, amplitudeShift=FALSE, ...) { if (identical(object$control$family, "Poisson")) reparamPsi <- FALSE idx <- getCoefIdxRenamed(names(object$coefficients), reparamPsi, idx2Exp, amplitudeShift, warn=FALSE) newcoefs <- coef.hhh4(object, se=FALSE, reparamPsi=reparamPsi, idx2Exp=idx2Exp, amplitudeShift=amplitudeShift) ## Use multivariate Delta rule => D %*% vcov %*% t(D), D: Jacobian. ## For idx2Exp and reparamPsi, we only transform coefficients independently, ## i.e. D is diagonal (with elements 'd') d <- rep.int(1, length(newcoefs)) if (length(idx$Psi)) # h = exp(-psi), h' = -exp(-psi) d[idx$Psi] <- -newcoefs[idx$Psi] if (length(idx$toExp)) # h = exp(coef), h' = exp(coef) d[idx$toExp] <- newcoefs[idx$toExp] ## For the amplitude/shift-transformation, D is non-diagonal vcov <- if (length(idx$AS)) { D <- diag(d, length(d)) D[idx$AS,idx$AS] <- jacobianAmplitudeShift(newcoefs[idx$AS]) D %*% object$cov %*% t(D) } else t(t(object$cov*d)*d) # 30 times faster than via matrix products dimnames(vcov) <- list(names(newcoefs), names(newcoefs)) vcov } getCoefIdxRenamed <- function (coefnames, reparamPsi=TRUE, idx2Exp=NULL, amplitudeShift=FALSE, warn=TRUE) { ## indexes of overdispersion parameters idxPsi <- if (reparamPsi) { idxPsi <- grep("-log(overdisp", coefnames, fixed=TRUE) ## change labels from "-log(overdisp.xxx)" to "overdisp.xxx" names(idxPsi) <- substr(coefnames[idxPsi], start=6, stop=nchar(coefnames[idxPsi])-1L) if (length(idxPsi) == 0L) { # backward compatibility (internal psi coef # was named "overdisp" prior to r406) idxPsi <- grep("^overdisp", coefnames) names(idxPsi) <- coefnames[idxPsi] } idxPsi } else NULL ## indexes of *pairs* of sine-cosine coefficients idxAS <- if (amplitudeShift) { idx_sin <- grep(".sin(", coefnames, fixed=TRUE) idx_cos <- match(sub(".sin(", ".cos(", coefnames[idx_sin], fixed=TRUE), coefnames) if (anyNA(idx_cos)) stop("failed to detect sine-cosine pairs") idxAS <- c(rbind(idx_sin, idx_cos)) # pairwise coefficients names(idxAS) <- sub(".sin", ".A", coefnames[idxAS], fixed=TRUE) names(idxAS) <- sub(".cos", ".s", names(idxAS), fixed=TRUE) idxAS } else NULL ## indexes of coefficients to exp()-transform if (isTRUE(idx2Exp)) { idxLogCovar <- grep(".log(", coefnames, fixed = TRUE) idx2Exp <- setdiff(seq_along(coefnames), c(idxLogCovar, idxPsi, idxAS)) } else if (length(idx2Exp)) { stopifnot(is.vector(idx2Exp, mode = "numeric")) ## index sets must be disjoint if (length(idxOverlap <- intersect(c(idxPsi, idxAS), idx2Exp))) { if (warn) warning("following 'idx2Exp' were ignored due to overlap: ", paste(idxOverlap, collapse=", ")) idx2Exp <- setdiff(idx2Exp, idxOverlap) } } if (length(idx2Exp)) names(idx2Exp) <- paste0("exp(", coefnames[idx2Exp], ")") ## done list(Psi=idxPsi, AS=idxAS, toExp=idx2Exp) } fixef.hhh4 <- function (object,...) { if (object$dim[1L] > 0) { head(coef.hhh4(object, ...), object$dim[1L]) } else NULL } ranef.hhh4 <- function (object, tomatrix = FALSE, intercept = FALSE, ...) { if (object$dim[2L] > 0){ ranefvec <- tail(coef.hhh4(object, ...), object$dim[2L]) } else return(NULL) if (intercept) tomatrix <- TRUE if (!tomatrix) return(ranefvec) ## transform to a nUnits x c matrix (c %in% 1:3) model <- terms.hhh4(object) idxRE <- model$indexRE idxs <- unique(idxRE) mat <- vapply(X = idxs, FUN = function (idx) { RE <- ranefvec[idxRE==idx] Z <- model$terms["Z.intercept",][[idx]] "%m%" <- get(model$terms["mult",][[idx]]) c(Z %m% RE) }, FUN.VALUE = numeric(model$nUnits), USE.NAMES = FALSE) dimnames(mat) <- list( colnames(model$response), model$namesFE[match(idxs, model$indexFE)] ) if (intercept) { FE <- object$coefficients[colnames(mat)] mat <- t(t(mat) + FE) } return(mat) } ## adaption of stats::confint.default authored by the R Core Team confint.hhh4 <- function (object, parm, level = 0.95, reparamPsi=TRUE, idx2Exp=NULL, amplitudeShift=FALSE, ...) { cf <- coef.hhh4(object, se=TRUE, reparamPsi=reparamPsi, idx2Exp=idx2Exp, amplitudeShift=amplitudeShift, ...) ## CAVE: random intercepts have no names (all "") if (missing(parm)) parm <- seq_len(nrow(cf)) pnames <- if (is.numeric(parm)) rownames(cf)[parm] else parm a <- (1 - level)/2 a <- c(a, 1 - a) pct <- paste(format(100*a, trim=TRUE, scientific=FALSE, digits=3), "%") fac <- qnorm(a) ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(pnames, pct)) ses <- cf[parm,2] ci[] <- cf[parm,1] + ses %o% fac ci } ## mean predictions for a subset of 1:nrow(object$stsObj) predict.hhh4 <- function(object, newSubset = object$control$subset, type = "response", ...) { if (type == "response" && all((m <- match(newSubset, object$control$subset, nomatch=0L)) > 0)) { ## we can extract fitted means from object object$fitted.values[m,,drop=FALSE] } else { ## means for time points not fitted (not part of object$control$subset) predicted <- meanHHH(coef.hhh4(object, reparamPsi=FALSE), terms.hhh4(object), subset=newSubset) if (type=="response") predicted$mean else { type <- match.arg(type, names(predicted)) predicted[[type]] } } } ### refit hhh4-model ## ...: arguments modifying the original control list ## S: a named list to adjust the number of harmonics of the three components ## subset.upper: refit on a subset of the data up to that time point ## use.estimates: use fitted parameters as new start values update.hhh4 <- function (object, ..., S = NULL, subset.upper = NULL, use.estimates = object$convergence, evaluate = TRUE) { control <- object$control ## first modify the control list according to the components in ... extras <- list(...) control <- modifyList(control, extras) ## adjust start values control$start <- if (use.estimates) { # use parameter estimates hhh4coef2start(object) } else local({ # re-use previous 'start' specification ## for pre-1.8-2 "hhh4" objects, ## object$control$start is not necessarily a complete list: template <- eval(formals(hhh4)$control$start) template[] <- object$control$start[names(template)] template }) ## and update according to an extra 'start' argument if (!is.null(extras[["start"]])) { if (!is.list(extras$start) || is.null(names(extras$start))) { stop("'start' must be a named list, see 'help(\"hhh4\")'") } control$start[] <- mapply( FUN = function (now, extra) { if (is.null(names(extra))) { extra } else { # can retain non-extra values now[names(extra)] <- extra now } }, control$start, extras$start[names(control$start)], SIMPLIFY = FALSE, USE.NAMES = FALSE ) } ## update initial values of parametric weight function if (use.estimates && length(coefW <- coefW(object)) && ! "weights" %in% names(extras$ne)) { # only if function is unchanged control$ne$weights$initial <- coefW } ## adjust seasonality if (!is.null(S)) { stopifnot(is.list(S), !is.null(names(S)), names(S) %in% c("ar", "ne", "end")) control[names(S)] <- mapply(function (comp, S) { comp$f <- addSeason2formula(removeSeasonFromFormula(comp$f), period = object$stsObj@freq, S = S) comp }, control[names(S)], S, SIMPLIFY=FALSE, USE.NAMES=FALSE) } ## use a different time range of the data (only changing the end) ## Note: surveillance < 1.15.0 disallowed subset.upper > max(control$subset) if (isScalar(subset.upper)) { if (subset.upper < control$subset[1L]) stop("'subset.upper' is smaller than the lower bound of 'subset'") control$subset <- control$subset[1L]:subset.upper } ## fit the updated model or just return the modified control list if (evaluate) { hhh4(stsObj = object$stsObj, control = control) } else { control } } ## remove sine-cosine terms from a formula ## f: usually a model "formula", but can generally be of any class for which ## terms() and formula() apply removeSeasonFromFormula <- function (f) { fterms <- terms(f, keep.order = TRUE) ## search sine-cosine terms of the forms "sin(..." and "fe(sin(..." idxSinCos <- grep("^(fe\\()?(sin|cos)\\(", attr(fterms, "term.labels")) formula(if (length(idxSinCos)) fterms[-idxSinCos] else f) } ## remove all temporal terms from a formula removeTimeFromFormula <- function (f, timevar = "t") { fterms <- terms(f, keep.order = TRUE) containsTime <- vapply(attr(fterms, "variables")[-1L], FUN = function (x) timevar %in% all.vars(x), FUN.VALUE = TRUE, USE.NAMES = FALSE) formula(if (any(containsTime)) fterms[!containsTime] else f) } ## convert fitted parameters to a list suitable for control$start hhh4coef2start <- function (fit) { res <- list(fixed = fit$coefficients[seq_len(fit$dim[1L])], random = fit$coefficients[fit$dim[1L]+seq_len(fit$dim[2L])], sd.corr = fit$Sigma.orig) if (any(!nzchar(names(res$random)))) { # no names pre 1.8-2 names(res$random) <- NULL } res } ## extract coefficients in a list coeflist.hhh4 <- function (x, ...) { ## determine number of parameters by parameter group model <- terms.hhh4(x) dim.fe.group <- unlist(model$terms["dim.fe",], recursive = FALSE, use.names = FALSE) dim.re.group <- unlist(model$terms["dim.re",], recursive = FALSE, use.names = FALSE) nFERE <- lapply(X = list(fe = dim.fe.group, re = dim.re.group), FUN = function (dims) { nParByComp <- tapply( X = dims, INDEX = factor( unlist(model$terms["offsetComp",], recursive = FALSE, use.names = FALSE), levels = 1:3, labels = c("ar", "ne", "end")), FUN = sum, simplify = TRUE) nParByComp[is.na(nParByComp)] <- 0 # component not in model nParByComp }) ## extract coefficients in a list (by parameter group) coefs <- coef.hhh4(x, se = FALSE, ...) list(fixed = coeflist.default(coefs[seq_len(x$dim[1L])], c(nFERE$fe, "neweights" = model$nd, "overdisp" = model$nOverdisp)), random = coeflist.default(coefs[x$dim[1L] + seq_len(x$dim[2L])], nFERE$re), sd.corr = x$Sigma.orig) } ## extract estimated overdispersion in dnbinom() parametrization (and as matrix) psi2size.hhh4 <- function (object, subset = object$control$subset, units = NULL) { size <- sizeHHH(object$coefficients, terms.hhh4(object), subset = subset) if (!is.null(size) && !is.null(units)) { if (is.null(subset)) { warning("ignoring 'units' (not compatible with 'subset = NULL')") size } else { size[, units, drop = FALSE] } } else { size } } ## character vector of model components that are "inModel" componentsHHH4 <- function (object) names(which(sapply(object$control[c("ar", "ne", "end")], "[[", "inModel"))) ## deviance residuals residuals.hhh4 <- function (object, type = c("deviance", "response"), ...) { type <- match.arg(type) obs <- observed(object$stsObj)[object$control$subset,] fit <- fitted(object) if (type == "response") return(obs - fit) ## deviance residuals ## Cf. residuals.ah, it calculates: ## deviance = sign(y - mean) * sqrt(2 * (distr(y) - distr(mean))) ## pearson = (y - mean)/sqrt(variance) dev.resids <- if (identical(object$control$family, "Poisson")) { poisson()$dev.resids } else { size <- if (identical(object$control$family, "NegBin1")) { psi2size.hhh4(object, subset = NULL) } else { psi2size.hhh4(object) # CAVE: a matrix -> non-standard "size" } negative.binomial(size)$dev.resids } di2 <- dev.resids(y=obs, mu=fit, wt=1) sign(obs-fit) * sqrt(pmax.int(di2, 0)) } ## extract the formulae of the three log-linear predictors formula.hhh4 <- function (x, ...) { lapply(x$control[c("ar", "ne", "end")], "[[", "f") } ## decompose the fitted mean of a "hhh4" model returning an array ## with dimensions (t, i, j), where the first j index is "endemic" decompose.hhh4 <- function (x, coefs = x$coefficients, ...) { ## get three major components from meanHHH() function meancomps <- meanHHH(coefs, terms.hhh4(x)) ## this contains c("endemic", "epi.own", "epi.neighbours") ## but we really want the mean by neighbour neArray <- c(meancomps$ne.exppred) * neOffsetArray(x, coefW(coefs)) ##<- ne.exppred is (t, i) and recycled for (t, i, j) stopifnot(all.equal(rowSums(neArray, dims = 2), meancomps$epi.neighbours, check.attributes = FALSE)) ## add autoregressive part to neArray diagidx <- cbind(c(row(meancomps$epi.own)), c(col(meancomps$epi.own)), c(col(meancomps$epi.own))) ## usually: neArray[diagidx] == 0 neArray[diagidx] <- neArray[diagidx] + meancomps$epi.own ## add endemic component to the array res <- array(c(meancomps$endemic, neArray), dim = dim(neArray) + c(0, 0, 1), dimnames = with(dimnames(neArray), list(t=t, i=i, j=c("endemic",j)))) stopifnot(all.equal(rowSums(res, dims = 2), meancomps$mean, check.attributes = FALSE)) res } ## get the w_{ji} Y_{j,t-1} values from a hhh4() fit ## (i.e., before summing the neighbourhood component over j) ## in an array with dimensions (t, i, j) neOffsetArray <- function (object, pars = coefW(object), subset = object$control$subset) { ## initialize array ordered as (j, t, i) for apply() below res <- array(data = 0, dim = c(object$nUnit, length(subset), object$nUnit), dimnames = list( "j" = colnames(object$stsObj), "t" = rownames(object$stsObj)[subset], "i" = colnames(object$stsObj))) ## calculate array values if the fit has an NE component if ("ne" %in% componentsHHH4(object)) { W <- getNEweights(object, pars = pars) Y <- observed(object$stsObj) tm1 <- subset - object$control$ne$lag is.na(tm1) <- tm1 <= 0 tYtm1 <- t(Y[tm1,,drop=FALSE]) res[] <- apply(W, 2L, function (wi) tYtm1 * wi) offset <- object$control$ne$offset res <- if (length(offset) > 1L) { offset <- offset[subset,,drop=FALSE] res * rep(offset, each = object$nUnit) } else { res * offset } ## stopifnot(all.equal( ## colSums(res), # sum over j ## terms.hhh4(object)$offset$ne(pars)[subset,,drop=FALSE], ## check.attributes = FALSE)) } ## permute dimensions as (t, i, j) aperm(res, perm = c(2L, 3L, 1L), resize = TRUE) } ## compare two hhh4 fits ignoring at least the "runtime" and "call" elements all.equal.hhh4 <- function (target, current, ..., ignore = NULL) { if (!inherits(target, "hhh4")) return("'target' is not a \"hhh4\" object") if (!inherits(current, "hhh4")) return("'current' is not a \"hhh4\" object") ignore <- unique.default(c(ignore, "runtime", "call")) target[ignore] <- current[ignore] <- list(NULL) NextMethod("all.equal") } surveillance/R/epidata_animate.R0000644000176200001440000001377013433272425016414 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Two types of spatio-temporal animations of "epidata" are supported: ### - sequential plots regardless of time between events (i.e. only ordering) ### - chronological animation with timer ### ### Copyright (C) 2008-2009, 2012, 2014, 2019 Sebastian Meyer ### $Revision: 2357 $ ### $Date: 2019-02-20 16:41:09 +0100 (Wed, 20. Feb 2019) $ ################################################################################ animate.epidata <- function (object, ...) { s <- summary(object) animate.summary.epidata(s, ...) } animate.summary.epidata <- function (object, main = "An animation of the epidemic", pch = 19, col = c(3, 2, gray(0.6)), time.spacing = NULL, sleep = quote(5/.nTimes), legend.opts = list(), timer.opts = list(), end = NULL, generate.snapshots = NULL, ...) { counters <- object[["counters"]] # remove pseudo-R-events, which come before S-event directSevents <- which(duplicated(counters[["time"]])) counters_noPseudoR <- if (length(directSevents)) { counters[-(directSevents-1), ] } else { counters } # remove initial row and keep essential columns eventTable <- counters_noPseudoR[-1, c("time", "type", "id")] eventTable[["type"]] <- unclass(eventTable[["type"]]) # get integer codes .nTimes <- nrow(eventTable) # extract initial individual information (id, at-risk, coordinates) coords <- object[["coordinates"]] d <- ncol(coords) if (d > 2L) { stop("spatial plotting in more than two dimensions is not implemented") } else if (d == 1L) { coords <- cbind(coords, 0) } else if (d == 0L) { stop ("'object' does not contain any defined coordinates") } # plot the initial state pch <- rep(pch, length.out = 3) col <- rep(col, length.out = 3) isInitiallyInfected <- rownames(coords) %in% object[["initiallyInfected"]] plot(coords, pch = ifelse(isInitiallyInfected, pch[2L], pch[1L]), col = ifelse(isInitiallyInfected, col[2L], col[1L]), main = main, ...) if (is.list(legend.opts)) { if (is.null(legend.opts[["x",exact=TRUE]])) legend.opts$x <- "topright" if (is.null(legend.opts$legend)) legend.opts$legend <- c("susceptible", "infectious", "removed") if (is.null(legend.opts$col)) legend.opts$col <- col if (is.null(legend.opts$pch)) legend.opts$pch <- pch do.call(legend, legend.opts) } # animate the epidemic by iteratively re-drawing points at the coordinates sleep <- eval(sleep) if (is.null(time.spacing)) { # plot events sequentially for(i in seq_len(.nTimes)) { if (dev.interactive()) Sys.sleep(sleep) tmp <- eventTable[i,] # c(time, type, id) points(coords[as.character(tmp[["id"]]),,drop=FALSE], pch = pch[tmp[["type"]]], col = col[tmp[["type"]]]) } } else { # plot events chronologically if (is.null(end)) end <- eventTable[.nTimes, "time"] + time.spacing timeGrid <- seq(from = time.spacing, to = end, by = time.spacing) timeWidth <- nchar(timeGrid[length(timeGrid)]) timeDigits <- if (grepl(".", as.character(time.spacing), fixed = TRUE)) { nchar(strsplit(as.character(time.spacing), split = ".", fixed = TRUE)[[1L]][2L]) } else 0 form <- paste("%", timeWidth, ".", timeDigits, "f", sep = "") if (is.list(timer.opts)) { if (is.null(timer.opts[["x",exact=TRUE]])) timer.opts$x <- "bottomright" if (is.null(timer.opts$title)) timer.opts$title <- "time" if (is.null(timer.opts$box.lty)) timer.opts$box.lty <- 0 if (is.null(timer.opts$adj)) timer.opts$adj <- c(0.5,0.5) if (is.null(timer.opts$inset)) timer.opts$inset <- 0.01 if (is.null(timer.opts$bg)) timer.opts$bg <- "white" do.call(legend, c(list(legend = sprintf(form, 0)), timer.opts)) } oldtp <- tp <- attr(object, "timeRange")[1L] i <- 1L # to be used in the file argument in dev.print if (is.vector(generate.snapshots, mode="character") && length(generate.snapshots) == 1L && requireNamespace("animation")) { img.name <- generate.snapshots ani.dev <- animation::ani.options("ani.dev") if (is.character(ani.dev)) ani.dev <- get(ani.dev) imgdir <- animation::ani.options("imgdir") imgtype <- animation::ani.options("ani.type") generate.snapshots <- list( device = ani.dev, file = quote(file.path(imgdir, paste0(img.name,i,".",imgtype))), width = animation::ani.options("ani.width"), height = animation::ani.options("ani.height") ) } if (is.list(generate.snapshots)) { do.call(dev.print, generate.snapshots) } for(i in 1L+seq_along(timeGrid)) { tp <- timeGrid[i-1L] if (dev.interactive()) Sys.sleep(sleep) timeIndex <- which(eventTable[["time"]] > oldtp & eventTable[["time"]] <= tp) if (length(timeIndex) > 0L) { tmp <- eventTable[timeIndex,] # c(time, type, id) points(coords[as.character(tmp[["id"]]),,drop=FALSE], pch = pch[tmp[["type"]]], col = col[tmp[["type"]]]) } if (is.list(timer.opts)) { do.call(legend, c(list(legend = sprintf(form,tp)), timer.opts)) } oldtp <- tp if (is.list(generate.snapshots)) { do.call(dev.print, generate.snapshots) } } } invisible(NULL) } surveillance/R/plapply.R0000644000176200001440000001070313621207062014753 0ustar liggesusers################################################################################ ### Parallelized lapply (wrapping around mclapply and parLapply) ### taking care of the random seed and printing progress information ### ### Copyright (C) 2015 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at https://www.R-project.org/Licenses/. ################################################################################ plapply <- function (X, FUN, ..., .parallel = 1, .seed = NULL, .verbose = TRUE) { if (!(useCluster <- inherits(.parallel, "cluster"))) { stopifnot(length(.parallel) == 1L, is.vector(.parallel, "numeric"), .parallel >= 1) .parallel <- as.vector(.parallel, mode = "integer") if (.Platform$OS.type == "windows" && .parallel > 1L) { useCluster <- TRUE .parallel <- parallel::makeCluster(.parallel) on.exit(parallel::stopCluster(.parallel)) } } FUN <- match.fun(FUN) .FUN <- if (useCluster || is.primitive(FUN)) { FUN # no support for reporting to the master || add.on.exit } else { # be verbose on.exit of FUN verboseExpr <- if (isTRUE(.verbose)) { ## progress bar or dots if (.parallel == 1L && interactive()) { env <- new.env(hash = FALSE, parent = environment(FUN)) environment(FUN) <- env # where the progress bar lives env$pb <- txtProgressBar(min = 0, max = length(X), initial = 0, style = 3) on.exit(close(env$pb), add = TRUE) quote(setTxtProgressBar(pb, pb$getVal() + 1L)) } else { on.exit(cat("\n"), add = TRUE) quote(cat(".")) } } else if (is.call(.verbose) || is.expression(.verbose)) { ## custom call or expression .verbose } else if (is.character(.verbose)) { ## custom progress symbol on.exit(cat("\n"), add = TRUE) substitute(cat(.verbose)) } # else NULL (no output) ## add on.exit(verboseExpr) to body(FUN) do.call(add.on.exit, list(FUN, verboseExpr)) } ## set random seed for reproducibility if (!is.null(.seed)) { if (useCluster) { parallel::clusterSetRNGStream(cl = .parallel, iseed = .seed) } else { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) { set.seed(NULL) # initialize } .orig.seed <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", .orig.seed, envir = .GlobalEnv), add = TRUE) if (.parallel == 1L) { set.seed(seed = .seed) } else { stopifnot(requireNamespace("parallel", quietly = TRUE)) ## Note @ R 3.1.3: this loading of package "parallel" ## before set.seed() is crucial; otherwise, the first run of ## plapply() would not be reproducible !!! set.seed(seed = .seed, kind = "L'Ecuyer-CMRG") parallel::mc.reset.stream() } } } ## rock'n'roll if (useCluster) { parallel::parLapply(cl = .parallel, X = X, fun = .FUN, ...) } else if (.parallel == 1L) { lapply(X = X, FUN = .FUN, ...) } else { # use forking parallel::mclapply(X = X, FUN = .FUN, ..., mc.preschedule = TRUE, mc.set.seed = TRUE, mc.silent = FALSE, mc.cores = .parallel) } } ## add an on.exit() statement at the beginning of a function add.on.exit <- function (FUN, expr) { FUN <- match.fun(FUN) if (is.null(expr <- substitute(expr))) { return(FUN) } if (is.primitive(FUN)) { # body(FUN) is NULL stop("not implemented for primitive functions") } onexitexpr <- substitute(on.exit(expr)) obody <- body(FUN) body(FUN) <- if (is.call(obody) && identical(as.name("{"), obody[[1L]])) { ## body(FUN) is a braced expression (usual case) ## and we insert on.exit(expr) directly after "{" as.call(append(x = as.list(obody), values = onexitexpr, after = 1L)) } else { ## body(FUN) is a symbol or a single call like UseMethod("print") as.call(c(as.name("{"), onexitexpr, obody)) } FUN } surveillance/R/sim_pointSource.R0000644000176200001440000000423310662666102016463 0ustar liggesusers################################################### ### chunk number 1: ################################################### # Programme to simulate epidemies which were # introduced by point sources. # The basis of this proagramme is a combination of # a Hidden Markov Modell (to get random dates # for outbreaks) and a simple Model to simulate # the epidemy. # # Parameters: # r - probability to get a new epidemy at time i if there was one # at time i-1 # p - probability to get no new epidemy at time i if there was none # at time i-1 # length - number of timesteps to visit # # Parameters for the background: # A - Amplitude, default = 1. # alpha - Incidence, default = 1. # beta - time dependent regression coefficient, default = 0. # phi - weeks of seaonal move, default = 0. # frequency - frequency of the sinus, default = 1. # state - a eventually given markov chain, # which defines the status at this time (outbreak or not) # K - additional weigth for an outbreak sim.pointSource <- function(p = 0.99, r = 0.01, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K){ if(is.null(state)){ # create a markov-chain state <- matrix(data = 0, ncol = 1, nrow = length) state[1] <- 0 #hoehle - fix: rbinom(1,1,0.5) # always begin with a zero # create the transition matrix transitionMatrix <- matrix(data = c(p, (1-r),(1-p), r), nrow = 2, ncol = 2) if(length(state) > 1){ # just do it if there is a preceding value for (i in 2:length){ # check the matrix for the correct line and take the right # probability. The last value of state is the newest. state[i] <- rbinom(1,1,transitionMatrix[state[i-1] + 1, 2]) } } } # go sure to have the rigth length as parameter length <- length(state) observed <-sim.seasonalNoise(A, alpha, beta, phi, length, frequency, state, K)$seasonalBackground result <- list(observed = observed, state = state, A = A, alpha = alpha, beta = beta, K = K, p = p, r = r, freq=52, start=c(2001,1)) class(result) = "disProg" # for disease progress return(result) } surveillance/R/hhh4_plot.R0000644000176200001440000010207713606037402015173 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Plot-method(s) for fitted hhh4() models ### ### Copyright (C) 2010-2012 Michaela Paul, 2012-2020 Sebastian Meyer ### $Revision: 2511 $ ### $Date: 2020-01-10 10:08:18 +0100 (Fri, 10. Jan 2020) $ ################################################################################ plot.hhh4 <- function (x, type = c("fitted", "season", "maxEV", "maps", "ri", "neweights"), ...) { stopifnot(x$convergence) cl <- sys.call() # not match.call() because plotHHH4_season() has no 'x' ## remove the type argument from the call if (is.null(names(cl)) && nargs() > 1L) { # unnamed call plot(x, type) cl[[3L]] <- NULL # remove the second argument } else { cl$type <- NULL } cl[[1L]] <- as.name(paste("plotHHH4", match.arg(type), sep="_")) eval(cl, envir = parent.frame()) } ### ### Time series of fitted component means and observed counts for selected units ### plotHHH4_fitted <- function (x, units = 1, names = NULL, col = c("grey85", "blue", "orange"), pch = 19, pt.cex = 0.6, pt.col = 1, par.settings = list(), legend = TRUE, legend.args = list(), legend.observed = FALSE, decompose = NULL, total = FALSE, meanHHH = NULL, ...) { if (total) { units <- "Overall" # only used as a label } else if (is.null(units)) { units <- seq_len(x$nUnit) } if (!is.null(names)) stopifnot(length(units) == length(names)) if (isTRUE(decompose)) decompose <- colnames(x$stsObj) ## get decomposed mean => no need to compute it in each plotHHH4_fitted1() if (is.null(meanHHH)) { meanHHH <- if (is.null(decompose)) { meanHHH(x$coefficients, terms.hhh4(x)) } else { decompose.hhh4(x) } } ## check color vector col <- if (is.null(decompose) && length(col) == 4) { ## compatibility with surveillance < 1.10-0 pt.col <- col[4L] rev(col[-4L]) } else { plotHHH4_fitted_check_col_decompose(col, decompose) } ## setup graphical parameters if (is.list(par.settings)) { par.defaults <- list(mfrow = sort(n2mfrow(length(units))), mar = c(4,4,2,0.5)+.1, las = 1) par.settings <- modifyList(par.defaults, par.settings) opar <- do.call("par", par.settings) on.exit(par(opar)) } ## legend options if (is.logical(legend)) legend <- which(legend) if (!is.list(legend.args)) { if (length(legend) > 0) warning("ignored 'legend' since 'legend.args' is not a list") legend <- integer(0L) } if (length(legend) > 0) { legendidx <- 1L + c( if (legend.observed && !is.na(pch)) 0L, if (is.null(decompose)) { which(c("ne","ar","end") %in% componentsHHH4(x)) } else seq_along(col)) default.args <- list( x="topright", col=c(pt.col,rev(col))[legendidx], lwd=6, lty=c(NA,rep.int(1,length(col)))[legendidx], pch=c(pch,rep.int(NA,length(col)))[legendidx], pt.cex=pt.cex, pt.lwd=1, bty="n", inset=0.02, legend=if (is.null(decompose)) { c("observed","spatiotemporal","autoregressive","endemic")[legendidx] } else c("observed", rev(decompose), "endemic")[legendidx] ) legend.args <- modifyList(default.args, legend.args) } ## plot fitted values region by region meanHHHunits <- vector(mode="list", length=length(units)) names(meanHHHunits) <- if (is.character(units)) units else colnames(x$stsObj)[units] for(i in seq_along(units)) { meanHHHunits[[i]] <- plotHHH4_fitted1(x, unit=units[i], main=names[i], col=col, pch=pch, pt.cex=pt.cex, pt.col=pt.col, decompose=decompose, total=total, meanHHH=meanHHH, ...) if (i %in% legend) do.call("legend", args=legend.args) } invisible(meanHHHunits) } plotHHH4_fitted_check_col_decompose <- function (col, decompose) { if (is.null(decompose)) { stopifnot(length(col) == 3L) } else { nUnit <- length(decompose) if (length(col) == nUnit) { col <- c("grey85", col) # first color is for "endemic" } else if (length(col) != 1L + nUnit) { warning("'col' should be of length ", 1L + nUnit) col <- c(col[1L], rep_len(col[-1L], nUnit)) } } col } ### plot estimated component means for a single region plotHHH4_fitted1 <- function(x, unit=1, main=NULL, col=c("grey85", "blue", "orange"), pch=19, pt.cex=0.6, pt.col=1, border=col, start=x$stsObj@start, end=NULL, xaxis=NULL, xlim=NULL, ylim=NULL, xlab="", ylab="No. infected", hide0s=FALSE, decompose=NULL, total=FALSE, meanHHH=NULL) { stsObj <- x$stsObj if (!total && is.character(unit) && is.na(unit <- match(.unit <- unit, colnames(stsObj)))) stop("region '", .unit, "' does not exist") if (is.null(main)) main <- if (total) "Overall" else colnames(stsObj)[unit] if (isTRUE(decompose)) decompose <- colnames(stsObj) ## get observed counts obs <- if (total) rowSums(observed(stsObj)) else observed(stsObj)[,unit] ## time range for plotting start0 <- yearepoch2point(stsObj@start, stsObj@freq, toleft=TRUE) start <- yearepoch2point(start, stsObj@freq) tp <- start0 + seq_along(obs)/stsObj@freq # all observation time points if (start < start0 || start > tp[length(tp)]) stop("'start' is not within the time range of 'x$stsObj'") end <- if(is.null(end)) tp[length(tp)] else yearepoch2point(end,stsObj@freq) stopifnot(start < end) tpInRange <- which(tp >= start & tp <= end) # plot only those tpInSubset <- intersect(x$control$subset, tpInRange) # fitted time points ## use time indexes as x-values for use of addFormattedXAxis() if (is.list(xaxis)) { tp <- seq_along(obs) start <- tpInRange[1L] end <- tpInRange[length(tpInRange)] } ## get fitted component means if (is.null(meanHHH)) { meanHHH <- if (is.null(decompose)) { meanHHH(x$coefficients, terms.hhh4(x)) } else { decompose.hhh4(x) } } meanHHHunit <- if (is.null(decompose)) { if (total) { sapply(meanHHH, rowSums) } else { sapply(meanHHH, "[", i=TRUE, j=unit) } } else { if (!setequal(decompose, dimnames(meanHHH)[[3L]][-1L])) stop("'decompose' must be (a permutation of) the fitted units") if (total) { apply(meanHHH[,,c("endemic",decompose)], c(1L, 3L), sum) } else { meanHHH[,unit,c("endemic",decompose)] } } stopifnot(is.matrix(meanHHHunit), !is.null(colnames(meanHHHunit)), nrow(meanHHHunit) == length(x$control$subset)) meanHHHunit <- meanHHHunit[x$control$subset %in% tpInRange,,drop=FALSE] if (any(is.na(meanHHHunit))) { # -> polygon() would be wrong ## could be due to wrong x$control$subset wrt the epidemic lags ## a workaround is then to set 'start' to a later time point stop("predicted mean contains missing values") } ## check color vector col <- if (is.null(decompose) && length(col) == 4L) { ## compatibility with surveillance < 1.10-0 pt.col <- col[4L] rev(col[-4L]) } else { plotHHH4_fitted_check_col_decompose(col, decompose) } ## establish basic plot window if (is.null(ylim)) ylim <- c(0, max(obs[tpInRange],na.rm=TRUE)) plot(c(start,end), ylim, xlim=xlim, xlab=xlab, ylab=ylab, type="n", xaxt = if (is.list(xaxis)) "n" else "s") if (is.list(xaxis)) do.call("addFormattedXAxis", c(list(x = stsObj), xaxis)) title(main=main, line=0.5) ## draw polygons if (is.null(decompose)) { non0 <- which(c("end", "ar", "ne") %in% componentsHHH4(x)) plotComponentPolygons( x = tp[tpInSubset], y = meanHHHunit[,c("endemic", "epi.own", "epi.neighbours")[non0],drop=FALSE], col = col[non0], border = border[non0], add = TRUE) } else { non0 <- apply(X = meanHHHunit > 0, MARGIN = 2L, FUN = any) plotComponentPolygons(x = tp[tpInSubset], y = meanHHHunit[, non0, drop = FALSE], col = col[non0], border = border[non0], add = TRUE) } ## add observed counts within [start;end] ptidx <- if (hide0s) intersect(tpInRange, which(obs > 0)) else tpInRange points(tp[ptidx], obs[ptidx], col=pt.col, pch=pch, cex=pt.cex) ## invisibly return the fitted component means for the selected region invisible(meanHHHunit) } ### function which does the actual plotting of the polygons plotComponentPolygons <- function (x, y, col = 1:6, border = col, add = FALSE) { if (!is.vector(x, mode = "numeric") || is.unsorted(x, strictly = TRUE)) stop("'x' must be a strictly increasing sequence of time points") stopifnot(nrow(y <- as.matrix(y)) == (nTime <- length(x))) # y >= 0 yc <- if ((nPoly <- ncol(y)) > 1L) { apply(X = y, MARGIN = 1L, FUN = cumsum) # nPoly x nTime } else t(y) if (!add) { ## establish basic plot window plot(range(x), range(yc[nPoly,]), type = "n") } ## recycle graphical parameters col <- rep_len(col, nPoly) border <- rep_len(border, nPoly) ## draw polygons xpoly <- c(x[1L], x, x[length(x)]) for (poly in nPoly:1) { polygon(x = xpoly, y = c(0, yc[poly, ], 0), col = col[poly], border = border[poly]) } } ### ### Maps of the fitted mean components averaged over time ### plotHHH4_maps <- function (x, which = c("mean", "endemic", "epi.own", "epi.neighbours"), prop = FALSE, main = which, zmax = NULL, col.regions = NULL, labels = FALSE, sp.layout = NULL, ..., map = x$stsObj@map, meanHHH = NULL) { which <- match.arg(which, several.ok = TRUE) if (is.null(col.regions)) col.regions <- .hcl.colors(10) ## extract district-specific mean components if (is.null(meanHHH)) { meanHHH <- meanHHH(x$coefficients, terms.hhh4(x)) } ## select relevant components and convert to an array meanHHH <- simplify2array( meanHHH[c("mean", "endemic", "epi.own", "epi.neighbours")], higher = TRUE) ## convert to proportions if (prop) { meanHHH[,,-1L] <- meanHHH[,,-1L,drop=FALSE] / c(meanHHH[,,1L]) } ## select only 'which' components meanHHH <- meanHHH[,,which,drop=FALSE] ## check map map <- as(map, "SpatialPolygonsDataFrame") if (!all(dimnames(meanHHH)[[2L]] %in% row.names(map))) { stop("'row.names(map)' do not cover all fitted districts") } ## average over time comps <- as.data.frame(colMeans(meanHHH, dims = 1)) ## attach to map data map@data <- cbind(map@data, comps[row.names(map),,drop=FALSE]) ## color key range if (is.null(zmax)) { zmax <- if (prop) { ceiling(10*sapply(comps, max))/10 } else ceiling(sapply(comps, max)) ## sub-components should have the same color range .idxsub <- setdiff(seq_along(zmax), match("mean", names(zmax))) zmax[.idxsub] <- suppressWarnings(max(zmax[.idxsub])) } ## add sp.layout item for district labels if (!is.null(layout.labels <- layout.labels(map, labels))) { sp.layout <- c(sp.layout, list(layout.labels)) } ## produce maps grobs <- mapply( FUN = function (zcol, main, zmax) if (is.na(zmax)) { # automatic color breaks over range of values spplot(map, zcol = zcol, main = main, cuts = length(col.regions) - 1L, col.regions = col.regions, sp.layout = sp.layout, ...) } else { # breakpoints from 0 to zmax spplot(map, zcol = zcol, main = main, at = seq(0, zmax, length.out = length(col.regions) + 1L), col.regions = col.regions, sp.layout = sp.layout, ...) }, zcol = names(comps), main = main, zmax = zmax, SIMPLIFY = FALSE, USE.NAMES = FALSE) if (length(grobs) == 1L) { grobs[[1L]] } else { mfrow <- sort(n2mfrow(length(grobs))) gridExtra::grid.arrange(grobs = grobs, nrow = mfrow[1L], ncol = mfrow[2L]) } } ### ### Map of estimated random intercepts of a specific component ### plotHHH4_ri <- function (x, component, exp = FALSE, at = list(n = 10), col.regions = cm.colors(100), colorkey = TRUE, labels = FALSE, sp.layout = NULL, gpar.missing = list(col="darkgrey", lty=2, lwd=2), ...) { ranefmatrix <- ranef.hhh4(x, tomatrix=TRUE) if (is.null(ranefmatrix)) stop("model has no random effects") stopifnot(length(component) == 1L) if (is.na(comp <- pmatch(component, colnames(ranefmatrix)))) stop("'component' must (partially) match one of ", paste(dQuote(colnames(ranefmatrix)), collapse=", ")) map <- as(x$stsObj@map, "SpatialPolygonsDataFrame") if (length(map) == 0L) stop("'x$stsObj' has no map") map$ranef <- ranefmatrix[,comp][row.names(map)] .range <- c(-1, 1) * max(abs(map$ranef), na.rm = TRUE) # 0-centered if (exp) { map$ranef <- exp(map$ranef) .range <- exp(.range) } if (is.list(at)) { at <- modifyList(list(n = 10, range = .range), at) at <- if (exp) { stopifnot(at$range[1] > 0) scales::log_breaks(n = at$n)(at$range) } else { seq(at$range[1L], at$range[2L], length.out = at$n) } if (exp && isTRUE(colorkey)) colorkey <- list(at = log(at), labels = list(at = log(at), labels = at)) } if (is.list(gpar.missing) && any(is.na(map$ranef))) { sp.layout <- c(sp.layout, c(list("sp.polygons", map[is.na(map$ranef),]), gpar.missing)) } if (!is.null(layout.labels <- layout.labels(map, labels))) { sp.layout <- c(sp.layout, list(layout.labels)) } spplot(map[!is.na(map$ranef),], zcol = "ranef", sp.layout = sp.layout, col.regions = col.regions, at = at, colorkey = colorkey, ...) } ### ### Plot the course of the dominant eigenvalue of one or several hhh4-fits ### plotHHH4_maxEV <- function (..., matplot.args = list(), refline.args = list(), legend.args = list()) { objnams <- unlist(lapply(match.call(expand.dots=FALSE)$..., deparse)) objects <- getHHH4list(..., .names = objnams) ## get time points epoch <- attr(objects, "epoch") start <- attr(objects, "start") freq <- attr(objects, "freq") start0 <- yearepoch2point(start, freq, toleft=TRUE) tp <- start0 + seq_along(epoch) / freq ## compute course of dominant eigenvalue for all models maxEV <- sapply(objects, getMaxEV, simplify=TRUE, USE.NAMES=TRUE) ## line style matplot.args <- modifyList( list(type="l", col=c(1,2,6,3), lty=c(1,3,2,4), lwd=1.7, cex=1, pch=NULL, xlab="", ylab="dominant eigenvalue", ylim=c(0,max(2,maxEV))), matplot.args) ## main plot do.call("matplot", c(list(x=tp, y=maxEV), matplot.args)) ## add reference line if (is.list(refline.args)) do.call("abline", modifyList(list(h=1, lty=3, col="grey"), refline.args)) ## add legend if (missing(legend.args) && length(objects) == 1) legend.args <- NULL # omit legend if (is.list(legend.args)) { legend.args <- modifyList( c(list(x="topright", inset=0.02, legend=names(objects), bty="n"), matplot.args[c("col", "lwd", "lty", "pch")], with(matplot.args, list(pt.cex=cex, text.col=col))), legend.args) do.call("legend", legend.args) } ## done invisible(maxEV) } getMaxEV <- function (x) { Lambda <- createLambda(x) if (identical(type <- attr(Lambda, "type"), "zero")) { rep.int(0, nrow(x$stsObj)) } else { diagonal <- identical(type, "diagonal") vapply(X = seq_len(nrow(x$stsObj)), FUN = function (t) maxEV(Lambda(t), symmetric = FALSE, diagonal = diagonal), FUN.VALUE = 0, USE.NAMES = FALSE) } } ## generate a function that computes the Lambda_t matrix createLambda <- function (object) { nTime <- nrow(object$stsObj) nUnit <- object$nUnit if (identical(componentsHHH4(object), "end")) { # no epidemic components zeromat <- matrix(0, nUnit, nUnit) Lambda <- function (t) zeromat attr(Lambda, "type") <- "zero" return(Lambda) } exppreds <- get_exppreds_with_offsets(object) W <- getNEweights(object) Wt <- if (is.null(W)) { NULL } else if (is.matrix(W)) { function (t) W } else { function (t) W[,,t] } type <- NULL Lambda <- if (is.null(Wt)) { # no neighbourhood component type <- "diagonal" function (t) { stopifnot(isScalar(t) && t > 0 && t <= nTime) diag(exppreds$ar[t,], nUnit, nUnit) } } else { function (t) { stopifnot(isScalar(t) && t > 0 && t <= nTime) Lambda <- exppreds$ne[t,] * t(Wt(t)) diag(Lambda) <- diag(Lambda) + exppreds$ar[t,] Lambda } } attr(Lambda, "type") <- type Lambda } ## extract exppreds multiplied with offsets ## note: theta = coef(object) would also work since psi is not involved here get_exppreds_with_offsets <- function (object, subset = seq_len(nrow(object$stsObj)), theta = object$coefficients) { model <- terms.hhh4(object) means <- meanHHH(theta, model, subset = subset) res <- sapply(X = c("ar", "ne", "end"), FUN = function (comp) { exppred <- means[[paste0(comp, ".exppred")]] offset <- object$control[[comp]]$offset if (length(offset) > 1) offset <- offset[subset,,drop=FALSE] exppred * offset }, simplify = FALSE, USE.NAMES = TRUE) res } ## determine the dominant eigenvalue of the Lambda matrix maxEV <- function (Lambda, symmetric = isSymmetric.matrix(Lambda), diagonal = FALSE) { maxEV <- if (diagonal) { max(Lambda) # faster than max(diag(Lambda)) } else { eigen(Lambda, symmetric = symmetric, only.values = TRUE)$values[1L] } ## dominant eigenvalue may be complex if (is.complex(maxEV)) { if (Im(maxEV) == 0) { # if other eigenvalues are complex Re(maxEV) } else { warning("dominant eigenvalue is complex, using its absolute value") abs(maxEV) } } else { maxEV } } ### ### Plot estimated seasonality (sine-cosine terms) of one or several hhh4-fits ### either as multiplicative effect on the 'components' (intercept=FALSE) ### or with intercept=TRUE, which only makes sense if there are no further ### non-centered covariates and offsets. ### plotHHH4_season <- function (..., components = NULL, intercept = FALSE, xlim = NULL, ylim = NULL, xlab = NULL, ylab = "", main = NULL, par.settings = list(), matplot.args = list(), legend = NULL, legend.args = list(), refline.args = list(), unit = 1) { objnams <- unlist(lapply(match.call(expand.dots=FALSE)$..., deparse)) objects <- getHHH4list(..., .names = objnams) freq <- attr(objects, "freq") components <- if (is.null(components)) { intersect(c("end", "ar", "ne"), unique(unlist( lapply(objects, componentsHHH4), use.names = FALSE))) } else { match.arg(components, choices = c("ar", "ne", "end", "maxEV"), several.ok = TRUE) } ## x-axis if (is.null(xlim)) xlim <- c(1,freq) if (is.null(xlab)) xlab <- if (freq==52) "week" else if (freq==12) "month" else "time" ## auxiliary function for an argument list "x" with named "defaults" list withDefaults <- function(x, defaults) { if (is.null(x)) defaults else if (is.list(x)) { if (is.null(names(x))) { # x must be complete stopifnot(length(x) == length(defaults)) setNames(x, names(defaults)) } else modifyList(defaults, x) # x might be a subset of parameters } else if (is.atomic(x)) { setNames(rep(list(x), length(defaults)), names(defaults)) } else stop("'", deparse(substitute(x)), "' is not suitably specified") } ## component-specific arguments ylim <- withDefaults(ylim, list(ar=NULL, ne=NULL, end=NULL, maxEV=NULL)) ylab <- withDefaults(ylab, list(ar=expression(hat(lambda)), ne=expression(hat(phi)), end=expression(hat(nu)), maxEV="dominant eigenvalue")) main <- withDefaults(main, list(ar="autoregressive component", ne="spatiotemporal component", end="endemic component", maxEV="dominant eigenvalue")) anyMain <- any(unlist(lapply(main, nchar), recursive=FALSE, use.names=FALSE) > 0) ## basic graphical settings if (is.list(par.settings)) { par.defaults <- list(mfrow=sort(n2mfrow(length(components))), mar=c(4,5,if(anyMain) 2 else 1,1)+.1, las=1) par.settings <- modifyList(par.defaults, par.settings) opar <- do.call("par", par.settings) on.exit(par(opar)) } ## line style matplot.args <- modifyList(list(type="l", col=c(1,2,6,3), lty=c(1,3,2,4), lwd=1.7, cex=1, pch=NULL), matplot.args) ## legend options if (is.null(legend)) legend <- length(objects) > 1 if (is.logical(legend)) legend <- which(legend) if (!is.list(legend.args)) { if (length(legend) > 0) warning("ignored 'legend' since 'legend.args' is not a list") legend <- integer(0L) } if (length(legend) > 0) { default.args <- c( list(x="topright", inset=0.02, legend=names(objects), bty="n"), matplot.args[c("col", "lwd", "lty", "pch")], with(matplot.args, list(pt.cex=cex, text.col=col)) ) legend.args <- modifyList(default.args, legend.args) } ## plot seasonality in individual model components seasons <- list() for(comp in setdiff(components, "maxEV")){ s2 <- lapply(objects, getSeason, component = comp, unit = unit) seasons[[comp]] <- exp(vapply(s2, FUN = if (intercept) { function (intseas) do.call("+", intseas) } else { function (intseas) intseas$season # disregard intercept }, FUN.VALUE = numeric(freq), USE.NAMES = TRUE)) do.call("matplot", # x defaults to 1:freq c(list(seasons[[comp]], xlim=xlim, ylim=ylim[[comp]], xlab=xlab, ylab=ylab[[comp]], main=main[[comp]]), matplot.args)) if (is.list(refline.args) && !intercept && any(seasons[[comp]] != 1)) do.call("abline", modifyList(list(h=1, lty=3, col="grey"), refline.args)) if (match(comp, components) %in% legend) do.call("legend", legend.args) } ## plot seasonality of dominant eigenvalue if ("maxEV" %in% components) { seasons[["maxEV"]] <- vapply(objects, FUN = function (obj) { getMaxEV_season(obj)$maxEV.season }, FUN.VALUE = numeric(freq), USE.NAMES = TRUE) do.call("matplot", c(list(seasons[["maxEV"]], xlim=xlim, ylim=if (is.null(ylim[["maxEV"]])) c(0,max(2,seasons[["maxEV"]])) else ylim[["maxEV"]], xlab=xlab, ylab=ylab[["maxEV"]], main=main[["maxEV"]]), matplot.args)) if (is.list(refline.args)) do.call("abline", modifyList(list(h=1, lty=3, col="grey"), refline.args)) if (4 %in% legend) do.call("legend", legend.args) } ## invisibly return the data that has been plotted invisible(seasons) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # get estimated intercept and seasonal pattern in the different components # CAVE: other covariates and offsets are ignored #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ getSeason <- function(x, component = c("end", "ar", "ne"), unit = 1) { stopifnot(inherits(x, "hhh4")) component <- match.arg(component) startseason <- getSeasonStart(x) freq <- x$stsObj@freq if (is.character(unit)) unit <- match(unit, colnames(x$stsObj)) ## return -Inf is component is not in the model (-> exp(-Inf) = 0) if (!component %in% componentsHHH4(x)) return(list(intercept=-Inf, season=rep.int(-Inf, freq))) ## get the intercept est <- fixef.hhh4(x, reparamPsi=FALSE) intercept <- unname(est[grep(paste0("^", component, "\\.(1|ri)"), names(est))]) if (length(intercept) == 0) { intercept <- 0 # no intercept (not standard) } else if (length(intercept) > 1) { # unit-specific intercepts if (length(intercept) != ncol(x$stsObj)) stop(component,"-component has incomplete unit-specific intercepts") intercept <- intercept[unit] if (is.na(intercept)) stop("the specified 'unit' does not exist") } ## get seasonality terms (relying on sin(2*pi*t/52)-kind coefficient names) coefSinCos <- est[grep(paste0("^",component, "\\.(sin|cos)\\("), names(est))] if (unitspecific <- length(grep(").", names(coefSinCos), fixed=TRUE))) { if (unitspecific < length(coefSinCos)) stop("cannot handle partially unit-specific seasonality") coefSinCos <- coefSinCos[grep(paste0(").",colnames(x$stsObj)[unit]), names(coefSinCos), fixed=TRUE)] ## drop .unitname-suffix since non-syntactic (cannot reformulate()) names(coefSinCos) <- sub("\\)\\..+$", ")", names(coefSinCos)) } if (length(coefSinCos)==0) return(list(intercept=intercept, season=rep.int(0,freq))) fSinCos <- reformulate( sub(paste0("^",component,"\\."), "", names(coefSinCos)), intercept=FALSE) mmSinCos <- model.matrix(fSinCos, data=data.frame(t=startseason-1 + seq_len(freq))) ## Done list(intercept=intercept, season=as.vector(mmSinCos %*% coefSinCos)) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # compute dominant eigenvalue of Lambda_t # CAVE: no support for Lambda_it #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ getMaxEV_season <- function (x) { stopifnot(inherits(x, "hhh4")) nUnits <- x$nUnit freq <- x$stsObj@freq components <- componentsHHH4(x) ## CAVE: this function ignores epidemic covariates/offsets ## and unit-specific seasonality if (nUnits > 1L && any(c("ar", "ne") %in% components)) { compOK <- vapply(x$control[c("ar","ne")], FUN = function (comp) { terms <- terms(x)$terms epiterms <- terms[,terms["offsetComp",] %in% seq_len(2L),drop=FALSE] identical(as.numeric(comp$offset), 1) && length(all.vars(removeTimeFromFormula(comp$f))) == 0L && all(!unlist(epiterms["unitSpecific",])) }, FUN.VALUE = TRUE, USE.NAMES = FALSE) if (any(!compOK)) warning("epidemic components have (unit-specific) ", "covariates/offsets not accounted for;\n", " use getMaxEV() or plotHHH4_maxEV()") } ## global intercepts and seasonality s2.lambda <- getSeason(x, "ar") s2.phi <- getSeason(x, "ne") ## unit-specific intercepts ris <- ranef.hhh4(x, tomatrix=TRUE) ri.lambda <- ris[,pmatch("ar.ri", colnames(ris), nomatch=0L),drop=TRUE] if (length(ri.lambda) == 0L) ri.lambda <- rep.int(0, nUnits) ri.phi <- ris[,pmatch("ne.ri", colnames(ris), nomatch=0L),drop=TRUE] if (length(ri.phi) == 0L) ri.phi <- rep.int(0, nUnits) ## get neighbourhood weights as a function of time W <- getNEweights(x) # NULL, matrix or 3-dim array if (!is.null(W) && !is.matrix(W)) stop("neighbourhood weights are time-varying; ", # and thus probably changing within or across seasons "use getMaxEV() or plotHHH4_maxEV()") ## create the Lambda_t matrix createLambda <- function (t) { Lambda <- if ("ne" %in% components) { exp(s2.phi$intercept + ri.phi + if(t==0) 0 else s2.phi$season[t]) * t(W) } else matrix(0, nUnits, nUnits) if ("ar" %in% components) { diag(Lambda) <- diag(Lambda) + exp(s2.lambda$intercept + ri.lambda + if(t==0) 0 else s2.lambda$season[t]) } Lambda } ## do this for t in 0:freq diagonal <- !("ne" %in% components) .maxEV <- function (t) { maxEV(createLambda(t), symmetric = FALSE, diagonal = diagonal) } maxEV.const <- .maxEV(0) maxEV.season <- if (all(c(s2.phi$season, s2.lambda$season) %in% c(-Inf, 0))) { rep.int(maxEV.const, freq) } else { vapply(X = seq_len(freq), FUN = .maxEV, FUN.VALUE = 0, USE.NAMES = FALSE) } ## Done list(maxEV.season = maxEV.season, maxEV.const = maxEV.const, Lambda.const = createLambda(0)) } ## Determine the time point t of the start of a season in a hhh4() fit. ## If \code{object$stsObj@start[2] == 1}, it simply equals ## \code{object$control$data$t[1]}. Otherwise, the \code{stsObj} time series ## starts within a year (at sample \code{s}, say) and the beginning of ## the next season is ## \code{object$control$data$t[1] + object$stsObj@freq - s + 1}. getSeasonStart <- function (object) { if ((startsample <- object$stsObj@start[2]) == 1) { object$control$data$t[1L] } else { object$control$data$t[1L] + object$stsObj@freq-startsample + 1 } } ### ### plot neighbourhood weight as a function of distance (neighbourhood order) ### plotHHH4_neweights <- function (x, plotter = boxplot, ..., exclude = 0, maxlag = Inf) { plotter <- match.fun(plotter) ## orders of neighbourhood (o_ji) nbmat <- neighbourhood(x$stsObj) if (all(nbmat %in% 0:1)) { message("'neighbourhood(x$stsObj)' is binary; ", "computing neighbourhood orders ...") nbmat <- nbOrder(nbmat, maxlag=maxlag) } ## extract (estimated) weight matrix (w_ji) W <- getNEweights(x) if (is.null(W)) { # if no spatio-temporal component in the model W <- nbmat W[] <- 0 } ## draw the boxplot Distance <- factor(nbmat, exclude = exclude) notexcluded <- which(!is.na(Distance)) Distance <- Distance[notexcluded] Weight <- W[notexcluded] plotter(Weight ~ Distance, ...) } ### ### auxiliary functions ### yearepoch2point <- function (yearepoch, frequency, toleft=FALSE) yearepoch[1L] + (yearepoch[2L] - toleft) / frequency getHHH4list <- function (..., .names = NA_character_) { objects <- list(...) if (length(objects) == 1L && is.list(objects[[1L]]) && inherits(objects[[1L]][[1L]], "hhh4")) { ## ... is a single list of fits objects <- objects[[1L]] if (is.null(names(objects))) names(objects) <- seq_along(objects) } else { names(objects) <- if (is.null(names(objects))) .names else { ifelse(nzchar(names(objects)), names(objects), .names) } } if (!all(sapply(objects, inherits, what="hhh4"))) stop("'...' must consist of hhh4()-fits only") ## check common epoch, start and frequency and append them as attributes epoch <- unique(t(sapply(objects, function(x) x$stsObj@epoch))) if (nrow(epoch) > 1) stop("supplied hhh4-models obey different 'epoch's") attr(objects, "epoch") <- drop(epoch) start <- unique(t(sapply(objects, function(x) x$stsObj@start))) if (nrow(start) > 1) stop("supplied hhh4-models obey different start times") attr(objects, "start") <- drop(start) freq <- unique(sapply(objects, function(x) x$stsObj@freq)) if (length(freq)>1) stop("supplied hhh4-models obey different frequencies") attr(objects, "freq") <- freq ## done return(objects) } surveillance/R/epidataCS_plot.R0000644000176200001440000003467713263671176016222 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### plot-method for "epidataCS" objects ### ### Copyright (C) 2009-2015 Sebastian Meyer ### $Revision: 2100 $ ### $Date: 2018-04-12 16:51:42 +0200 (Thu, 12. Apr 2018) $ ################################################################################ plot.epidataCS <- function (x, aggregate = c("time", "space"), subset, by = type, ...) { aggregate <- match.arg(aggregate) FUN <- paste("epidataCSplot", aggregate, sep = "_") do.call(FUN, args = list(x = quote(x), subset = substitute(subset), by = substitute(by), ...)) } ### plot.epidataCS(x, aggregate = "time") -> number of cases over time ## in case t0.Date is specified, hist.Date() is used and breaks must set in ... (e.g. "months") epidataCSplot_time <- function (x, subset, by = type, t0.Date = NULL, breaks = "stgrid", freq = TRUE, col = rainbow(nTypes), cumulative = list(), add = FALSE, mar = NULL, xlim = NULL, ylim = NULL, xlab = "Time", ylab = NULL, main = NULL, panel.first = abline(h=axTicks(2), lty=2, col="grey"), legend.types = list(), ...) { timeRange <- with(x$stgrid, c(start[1L], stop[length(stop)])) ## subset event marks eventMarks <- if (missing(subset)) { marks.epidataCS(x, coords = FALSE) } else { do.call(base::subset, list( x = quote(marks.epidataCS(x, coords = FALSE)), subset = substitute(subset) )) } if (nrow(eventMarks) == 0L) stop("no events left after 'subset'") ## extract the data to plot by <- substitute(by) eventTimesTypes <- eventMarks[c("time", "type")] eventTimesTypes$type <- if (is.null(by)) { # disregard event types factor("all") } else { # stratification of counts (default is to stack bars by event type) as.factor(eval(by, envir = eventMarks)) } typeNames <- levels(eventTimesTypes$type) nTypes <- length(typeNames) if (!freq && nTypes > 1L) warning("a stacked barplot of multiple event types only makes sense for 'freq=TRUE'") ## default breaks at stop times of stgrid if (identical(breaks, "stgrid")) { breaks <- c(timeRange[1L], unique.default(x$stgrid$stop)) if (any(eventTimesTypes$time < timeRange[1L])) { message("Note: ignoring events of the prehistory (before \"stgrid\")") eventTimesTypes <- base::subset(eventTimesTypes, time >= timeRange[1L]) if (nrow(eventTimesTypes) == 0L) stop("no events left to plot") } } ## calculate cumulative numbers if requested if (is.list(cumulative)) { csums <- tapply(eventTimesTypes$time, eventTimesTypes["type"], function (t) cumsum(table(t)), simplify=FALSE) if (!is.null(cumulative[["offset"]])) { stopifnot(is.vector(cumulative$offset, mode="numeric"), length(cumulative$offset) == nTypes) csums <- mapply(FUN="+", csums, cumulative$offset, SIMPLIFY=FALSE, USE.NAMES=TRUE) } if (is.null(cumulative[["axis"]])) cumulative[["axis"]] <- TRUE } eventTimesTypes$type <- as.integer(eventTimesTypes$type) typesEffective <- sort(unique(eventTimesTypes$type)) col <- rep_len(col, nTypes) if (!is.null(t0.Date)) { stopifnot(length(t0.Date) == 1L) t0.Date <- as.Date(t0.Date) t0 <- timeRange[1L] if (is.numeric(breaks) && length(breaks) > 1L) # transform to Date breaks <- t0.Date + (breaks - t0) if (is.null(xlim)) xlim <- t0.Date + (timeRange - t0) if (missing(xlab) && is.character(breaks)) xlab <- paste0("Time (", breaks, ")") eventTimesTypes$time <- t0.Date + as.integer(eventTimesTypes$time - t0) ## we need integer dates here because otherwise, if the last event ## occurs on the last day of a month, year, etc. (depending on ## 'breaks') with a fractional date (e.g. as.Date("2009-12-31") + 0.5), ## then the automatic 'breaks' (e.g., breaks = "months") will not cover ## the data (in the example, it will only reach until ## as.Date("2009-12-31")). The following would fail: ## data("imdepi"); plot(imdepi, t0.Date = "2002-01-15", breaks = "months") } gethistdata <- function (breaks, types = seq_len(nTypes)) { times <- eventTimesTypes$time[eventTimesTypes$type %in% types] if (is.null(t0.Date)) { hist(times, breaks=breaks, plot=FALSE, warn.unused=FALSE, ...) } else { hist(times, breaks=breaks, plot=FALSE, ...) ## warn.unused=FALSE is hard-coded in hist.Date } } histdata <- gethistdata(breaks=breaks) if (!is.null(t0.Date)) { ## hist.Date() drops the Date class, but we need it for later re-use class(histdata$breaks) <- "Date" } ## establish the basic plot window if (!add) { if (is.null(xlim)) xlim <- timeRange if (is.null(ylim)) { ylim <- range(0, histdata[[if (freq) "counts" else "density"]]) } if (is.null(ylab)) { ylab <- if (freq) "Number of cases" else "Density of cases" } if (is.null(mar)) { mar <- par("mar") if (is.list(cumulative) && cumulative$axis) mar[4L] <- mar[2L] } opar <- par(mar = mar); on.exit(par(opar)) plot(x=xlim, y=ylim, xlab=xlab, ylab=ylab, main=main, type="n", bty="n") force(panel.first) } ## plot histogram (over all types) suppressWarnings( # about wrong AREAS if breaks are non-equidistant plot(histdata, freq = freq, add = TRUE, col = col[typesEffective[1L]], ...) ) if (!add) # doesn't work as expected when adding to plot with cumulative axis box() # because white filling of bars might overdraw the inital box ## add type-specific sub-histograms for (typeIdx in seq_along(typesEffective)[-1L]) { .histdata <- gethistdata( breaks = histdata$breaks, # have to use same breaks types = typesEffective[typeIdx:length(typesEffective)] ) suppressWarnings( # about wrong AREAS if breaks are non-equidistant plot(.histdata, freq = freq, add = TRUE, col = col[typesEffective[typeIdx]], ...) ) } ## optionally add cumulative number of cases if (is.list(cumulative)) { aT2 <- axTicks(2) div <- length(aT2) - 1L darken <- function (col, f = 0.6) apply(X = col2rgb(col, alpha = TRUE), MARGIN = 2L, FUN = function (x) rgb(f*x[1L], f*x[2L], f*x[3L], x[4L], maxColorValue = 255)) cumulative <- modifyList( list(maxat = ceiling(max(unlist(csums))/div)*div, col = darken(col), lwd = 3, axis = TRUE, lab = "Cumulative number of cases"), cumulative) csum2y <- function (x) x / cumulative$maxat * aT2[length(aT2)] for (typeIdx in typesEffective) { .times <- as.numeric(names(csums[[typeIdx]])) lines(if (is.null(t0.Date)) .times else t0.Date + .times - t0, csum2y(csums[[typeIdx]]), lwd=cumulative$lwd, col=cumulative$col[typeIdx]) } if (cumulative$axis) { axis(4, at=aT2, labels=aT2/aT2[length(aT2)]*cumulative$maxat) mtext(cumulative$lab, side=4, line=3, las=0) } } ## optionally add legend if (is.list(legend.types) && length(typesEffective) > 1) { legend.types <- modifyList( list(x="topleft", legend=typeNames[typesEffective], title=deparse(by, nlines = 1), fill=col[typesEffective]), legend.types) do.call("legend", legend.types) } invisible(histdata) } ### plot.epidataCS(x, aggregate = "space") -> spatial point pattern epidataCSplot_space <- function (x, subset, by = type, tiles = x$W, pop = NULL, cex.fun = sqrt, points.args = list(), add = FALSE, legend.types = list(), legend.counts = list(), sp.layout = NULL, ...) { ## extract the points to plot events <- if (missing(subset)) { x$events } else { # calls sp:::subset.Spatial eval(substitute(base::subset(x$events, subset=.subset), list(.subset=substitute(subset)))) } ## should the plot distinguish between different event types? by <- substitute(by) events@data$type <- if (is.null(by)) { # disregard event types factor("all") } else { # default is to distinguish points by event type as.factor(eval(by, envir = events@data)) } typeNames <- levels(events$type) nTypes <- length(typeNames) eventCoordsTypes <- data.frame( coordinates(events), type = as.integer(events$type), row.names = NULL, check.rows = FALSE, check.names = FALSE) ## count events by location and type eventCoordsTypesCounts <- if (is.null(pop)) { countunique(eventCoordsTypes) } else { ## work with "SpatialPolygons" -> spplot() events$COUNT <- multiplicity(eventCoordsTypes) events[!duplicated(eventCoordsTypes), c("type", "COUNT")] } pointCounts <- eventCoordsTypesCounts$COUNT countsLegend <- unique(round(10^(do.call("seq", c( as.list(log10(range(pointCounts))), list(length.out=5) ))))) typesEffective <- sort(unique(eventCoordsTypesCounts$type)) ## point style colTypes <- list(...)[["colTypes"]] # backwards compatibility for < 1.8 if (is.null(colTypes)) { colTypes <- rainbow(nTypes) } else warning("argument 'colTypes' is deprecated; ", "use 'points.args$col' instead") points.args <- modifyList(list(pch=1, col=colTypes, lwd=1, cex=0.5), points.args) styleArgs <- c("pch", "col", "lwd") points.args[styleArgs] <- lapply(points.args[styleArgs], rep_len, length.out=nTypes) ## select style parameters according to the events' types points.args_pointwise <- points.args points.args_pointwise[styleArgs] <- lapply( points.args_pointwise[styleArgs], "[", eventCoordsTypesCounts$type) points.args_pointwise$cex <- points.args_pointwise$cex * cex.fun(pointCounts) ## plot if (is.null(pop)) { ## classical plotting system if (!add) plot(tiles, ...) do.call("points", c(alist(x=eventCoordsTypesCounts[,1:2,drop=FALSE]), points.args_pointwise)) ## optionally add legends if (is.list(legend.types) && length(typesEffective) > 1) { legend.types <- modifyList( list(x="topright", legend=typeNames[typesEffective], title=deparse(by, nlines = 1), #pt.cex=points.args$cex, # better use par("cex") pch=points.args$pch[typesEffective], col=points.args$col[typesEffective], pt.lwd=points.args$lwd[typesEffective]), legend.types) do.call("legend", legend.types) } if (is.list(legend.counts) && any(pointCounts > 1)) { if (!is.null(legend.counts[["counts"]])) { countsLegend <- as.vector(legend.counts[["counts"]], mode="integer") legend.counts[["counts"]] <- NULL } legend.counts <- modifyList( list(x="bottomright", bty="n", legend=countsLegend, pt.cex=points.args$cex * cex.fun(countsLegend), pch=points.args$pch[1L], col=if(length(unique(points.args$col)) == 1L) points.args$col[1L] else 1, pt.lwd=points.args$lwd[1L]), legend.counts) do.call("legend", legend.counts) } invisible() } else { if (!is(tiles, "SpatialPolygonsDataFrame")) { stop("'pop' requires 'tiles' to be a \"SpatialPolygonsDataFrame\"") } ## grid plotting system -> spplot() layout.points <- c(list("sp.points", eventCoordsTypesCounts), points.args_pointwise) ## optional legend definitions legend.types <- if (is.list(legend.types) && length(typesEffective) > 1) { legend.types <- modifyList( list(corner = c(1, 1), # "topright" title = deparse(by, nlines = 1), cex.title = 1, border = TRUE, points = list( pch = points.args$pch[typesEffective], col = points.args$col[typesEffective], lwd = points.args$lwd[typesEffective] ), text = list(typeNames[typesEffective])), legend.types ) corner.types <- legend.types$corner legend.types$corner <- NULL list(inside = list(fun = lattice::draw.key(legend.types), corner = corner.types)) } legend.counts <- if (is.list(legend.counts) && any(pointCounts > 1)) { if (!is.null(legend.counts[["counts"]])) { countsLegend <- as.vector(legend.counts[["counts"]], mode="integer") legend.counts[["counts"]] <- NULL } legend.counts <- modifyList( list(corner = c(1,0), # "bottomright" points = list( cex = points.args$cex * cex.fun(countsLegend), pch = points.args$pch[1L], col = if(length(unique(points.args$col)) == 1L) points.args$col[1L] else 1, lwd = points.args$lwd[1L] ), text = list(as.character(countsLegend)), padding.text=2, between=0), legend.counts ) corner.counts <- legend.counts$corner legend.counts$corner <- NULL list(inside = list(fun = lattice::draw.key(legend.counts), corner = corner.counts)) } ## create the plot spplot(obj = tiles, zcol = pop, sp.layout = c(list(layout.points), sp.layout), legend = c(legend.types, legend.counts), ...) } } surveillance/R/twinSIR.R0000644000176200001440000005254713203323012014633 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Function 'twinSIR' performs (penalized) maximum likelihood inference ### for the Hoehle (2009) model. Now with REML estimation of smoothing ### parameter lambda. ### ### Copyright (C) 2008-2009 Michael Hoehle ### Copyright (C) 2008-2009,2014,2017 Sebastian Meyer ### $Revision: 2046 $ ### $Date: 2017-11-16 15:51:54 +0100 (Thu, 16. Nov 2017) $ ################################################################################ ## ATTENTION: the .loglik and .score functions assume atRiskY == 1 data ###################################################################### # Log-Likelihood function # # PARAMS: # theta - parameter vector c(alpha,beta), where # beta also contains the baseline coefficients in the first place # X - covariate matrix related to alpha, i.e. the epidemic component # Z - covariate matrix related to beta, i.e. the Cox-like endemic component # survs - data.frame with columns id, start, stop and event # weights - vector of length nrow(X) indicating the number of individuals # with the same covariates. weights are allowed to change over time. # Note: it is assumed that none of the individuals covered by # "weights" can have an actual event, if so they need to have their # own row ###################################################################### .loglik <- function(theta, X, Z, survs, weights) { # Calculate epidemic (e) and endemic (h) component of the infection intensity eh <- .eh(theta, X, Z) # Calculate infection intensity assuming atRiskY == 1 for all rows lambdaNoY <- rowSums(eh) # dN Part of the loglik isEvent <- survs$event == 1 events <- which(isEvent) intdN <- numeric(length(isEvent)) # zeros intdN[events] <- weights[events] * log(lambdaNoY[events]) # here one might have got -Inf values in case of 0-intensity at an event time # lambda integral of the log-likelihood dt <- survs$stop - survs$start intlambda <- weights * lambdaNoY * dt # Return the log-likelihood loglik <- sum( intdN - intlambda ) return(loglik) } ###################################################################### # Penalized log-likelihood function # Additional Params: # lambda.smooth - smoothing parameter # K - penalty matrix on the beta component ###################################################################### .ploglik <- function(theta, X, Z, survs, weights, lambda.smooth, K) { loglik <- .loglik(theta, X, Z, survs, weights) if (lambda.smooth == 0) { return(loglik) } # Add penalty term and return the penalized log-likelihood beta <- theta[ncol(X) + seq_len(ncol(Z))] penalty <- lambda.smooth/2 * drop(t(beta) %*% K %*% beta) return(loglik - penalty) } ###################################################################### # Score function # Params: see .loglik ###################################################################### .score <- function(theta, X, Z, survs, weights) { dimX <- dim(X) nRows <- dimX[1] px <- dimX[2] pz <- ncol(Z) isEvent <- survs$event == 1 # event indicator for the dN integral events <- which(isEvent) dt <- survs$stop - survs$start # for the dt integral # Calculate epidemic (e) and endemic (h) component of the infection intensity eh <- .eh(theta, X, Z) h <- eh[,2,drop=TRUE] # Calculate infection intensity at event times lambdaEvents <- rowSums(eh[events,,drop=FALSE]) score <- if (px > 0L) { wX <- X * weights part1intdN <- matrix(0, nrow = nRows, ncol = px, dimnames = dimnames(X)) part1intdN[events,] <- wX[events,] / lambdaEvents part1intlambda <- wX * dt colSums(part1intdN - part1intlambda) } else NULL if (pz > 0L) { wZh <- Z * (h * weights) part2intdN <- matrix(0, nrow = nRows, ncol = pz, dimnames = dimnames(Z)) part2intdN[events,] <- wZh[events,] / lambdaEvents part2intlambda <- wZh * dt part2 <- colSums(part2intdN - part2intlambda) score <- c(score, part2) } return(score) } ###################################################################### # Penalized Score function # Additional Params: see .ploglik ###################################################################### .pscore <- function(theta, X, Z, survs, weights, lambda.smooth, K, ...) { score <- .score(theta, X, Z, survs, weights) if (lambda.smooth == 0) { return(score) } # Add penalty term and return the penalized Score function beta <- theta[ncol(X) + seq_len(ncol(Z))] penalty <- c(rep.int(0, ncol(X)), lambda.smooth * K %*% beta) return(score - penalty) } ###################################################################### # Fisher information matrix function # Params: see .loglik ###################################################################### .fisherinfo <- function(theta, X, Z, survs, weights) { px <- ncol(X) pz <- ncol(Z) isEvent <- survs$event == 1 # event indicator events <- which(isEvent) # Fisher matrix calculation only incorporates data at event times! Xevents <- X[events,,drop = FALSE] Zevents <- Z[events,,drop = FALSE] # Calculate epidemic (e) and endemic (h) component of the infection intensity eh <- .eh(theta, Xevents, Zevents) h <- eh[,2,drop=TRUE] # Calculate infection intensity lambda <- rowSums(eh) # calculate intdN of d/dtheta log(lambda_i(t)) for all individuals with events wpl <- weights[events] / lambda dloglambda <- if (px > 0L) Xevents * wpl else NULL if (pz > 0L) { dloglambda <- cbind(dloglambda, Zevents * (h * wpl)) } # Build the optional variation process (Martinussen & Scheike, p64) fisherinfo <- matrix(0, nrow=px+pz, ncol=px+pz) for (i in seq_len(nrow(dloglambda))) { x <- dloglambda[i,,drop=FALSE] # single-ROW matrix fisherinfo <- fisherinfo + crossprod(x) # t(x) %*% x } return(fisherinfo) } ###################################################################### # Fisher information matrix function # Additional Params: see .ploglik ###################################################################### .pfisherinfo <- function(theta, X, Z, survs, weights, lambda.smooth, K) { fisherinfo <- .fisherinfo(theta, X, Z, survs, weights) if (lambda.smooth == 0) { return(fisherinfo) } # Add penalty term and return the penalized Fisher information matrix penalty <- matrix(0, ncol=ncol(fisherinfo), nrow=nrow(fisherinfo)) zIndex <- ncol(X) + seq_len(ncol(Z)) penalty[zIndex,zIndex] <- lambda.smooth * K return(fisherinfo + penalty) } ###################################################################### # Marginal likelihood of the log(smoothing) parameter as given # by a Laplace approximation c.f. Kneib & Fahrmeir (2006), p.9. # or Cai et al (2002) # # Params: # log.lambda.smooth - log parametrization to ensure positive value of # lambda.smooth # theta - fixed regression parameters # X - design matrix of additive part # Z - design matrix of multiplicative part # survs - the data.frame containing the data in survs format # weights - for weighting individual entries # K - smoother matrix # # Returns: # value of lmarg ###################################################################### .lmarg.lambda <- function(log.lambda.smooth, theta, X, Z, survs, weights, K) { #Contribution of the penalized likelihood loglik <- .ploglik(theta, X, Z, survs, weights, exp(log.lambda.smooth), K) #Laplace approximation using TP representation H <- .pfisherinfo(theta, X, Z, survs, weights, exp(log.lambda.smooth), K) beta <- theta[ncol(X) + seq_len(ncol(Z))] #[Q]: Extract baseline terms from model and translate into #TP-spline setting, i.e. a B-spline of 0th order is assumed baselineIdx <- grep("cox\\(logbaseline.*\\)",dimnames(Z)[[2]]) b <- diff(beta[baselineIdx]) laplace <- 1/2*(length(b)-1)*log.lambda.smooth - 1/2*log(det(H)) return(loglik + laplace) } ###################################################################### # Model fitter. Prepares everything and uses optim's (L-)BFGS(-B) to # maximize the (penalized) log-likelihood. ###################################################################### twinSIR <- function (formula, data, weights, subset, knots = NULL, nIntervals = 1, lambda.smooth = 0, penalty = 1, optim.args = list(), model = TRUE, keep.data = FALSE) { cl <- match.call() ## Verify that 'data' inherits from "epidata" data <- eval(cl$data, parent.frame()) if (!inherits(data, "epidata")) { stop("'data' must inherit from class \"epidata\"") } ## Extract the time range of the epidemic timeRange <- attr(data, "timeRange") minTime <- timeRange[1L] maxTime <- timeRange[2L] # ## NOTE: modification of 'data' has no effect with the current evaluation # ## of model.frame in the parent.frame() as the original 'data' will # ## be used. # ## Impute blocks for 'knots', which are not existing stop times # if (is.vector(knots, mode = "numeric")) { # insideKnot <- (knots > minTime) & (knots < maxTime) # if (any(!insideKnot)) { # warning("only 'knots' inside the observation period are considered") # } # knots <- sort(knots[insideKnot]) # data <- intersperse(data, knots) # } ############################ ### Build up model.frame ### (this is derived from the coxph function) ############################ mfnames <- c("", "formula", "data", "weights", "subset") mf <- cl[match(mfnames, names(cl), nomatch = 0L)] mf$id <- as.name("id") mf$atRiskY <- as.name("atRiskY") mf$subset <- if (is.null(mf$subset)) { call("==", mf$atRiskY, 1) } else { call("&", mf$subset, call("==", mf$atRiskY, 1)) } if(length(formula) == 2L) { # i.e. no response specified formula[3L] <- formula[2L] formula[[2L]] <- quote(cbind(start, stop, event)) } mf$na.action <- as.name("na.fail") special <- c("cox") Terms <- terms(formula, specials = special, data = data, keep.order = FALSE) mf$formula <- Terms mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) ########################################################### ### Check arguments and extract components of the model ### ########################################################### ## Extract and check 'weights' weights <- model.extract(mf, "weights") if (is.null(weights)) { weights <- rep(1, nrow(mf)) names(weights) <- attr(mf, "row.names") } else { if (!is.vector(weights, mode="numeric")) { stop("'weights' must be a numeric vector") } if (any(weights < 0)) { stop("negative 'weights' not allowed") } } ## Extract the response response <- model.response(mf) survs <- data.frame(id = model.extract(mf, "id"), start = response[,1L], stop = response[,2L], event = response[,3L], check.names = FALSE, stringsAsFactors = FALSE) attr(survs, "eventTimes") <- survs$stop[survs$event == 1] ##<- equals attr(data, "eventTimes") if missing(subset) attr(survs, "timeRange") <- timeRange ## Check that we have events if (length(attr(survs, "eventTimes")) == 0) warning("no events in data", if (!missing(subset)) " (subject to 'subset')") ## Check specified baseline intervals if (is.null(knots) && isScalar(nIntervals)) { knots <- if (nIntervals == 1) { numeric(0) } else if (nIntervals > 1) { quantile(attr(survs, "eventTimes"), probs = seq(from=0, to=1, length.out=nIntervals+1)[-c(1,nIntervals+1)], type = 1, names = FALSE) } else { stop("'nIntervals' must be a single number >= 1") } } else if (is.vector(knots, mode = "numeric")) { isInsideKnot <- (knots > minTime) & (knots < maxTime) if (any(!isInsideKnot)) { warning("only 'knots' inside the observation period are considered") knots <- knots[isInsideKnot] } isStopKnot <- knots %in% unique(survs$stop) if (any(!isStopKnot)) { stop("'knots' must be a subset of 'unique(data$stop[data$atRiskY==1])'", if (!missing(subset)) ",\n where 'data' is subject to 'subset'") } knots <- sort(knots) } else { stop("'knots' (a numeric vector) or 'nIntervals' (a single number) ", "must be specified") } intervals <- c(minTime, knots, maxTime) nIntervals <- length(intervals) - 1L message( sprintf(ngettext(nIntervals, "Initialized %d log-baseline interval: ", "Initialized %d log-baseline intervals: "), nIntervals), paste(format(intervals, trim = TRUE), collapse=" ") ) ## Extract the two parts of the design matrix: ## Z contains the Cox part, X contains the epidemic part, there's no intercept des <- read.design(mf, Terms) X <- des$X; px <- ncol(X) Z <- des$Z ## Add variables for the piecewise constant baseline to Z (if requested) if (nIntervals == 1L) { nEvents <- length(attr(survs, "eventTimes")) if (attr(Terms, "intercept") == 1) Z <- cbind("cox(logbaseline)" = 1, Z) } else { # we have more than one baseline interval/parameter intervalIndices <- findInterval(survs$start, intervals, rightmost.closed = FALSE) intervalNumbers <- seq_len(nIntervals) baselineVars <- sapply(intervalNumbers, function(i) intervalIndices == i) dimnames(baselineVars) <- list(NULL, paste("cox(logbaseline.", intervalNumbers, ")", sep="")) Z <- cbind(baselineVars, Z) nEvents <- as.vector(table(factor(intervalIndices[survs$event == 1], levels = seq_len(nIntervals)))) } pz <- ncol(Z) ## Check that we have at least one parameter if (pz == 0L && px == 0L) { stop("nothing to do: neither a baseline nor covariates have been specified") } ## Check lambda.smooth if (!isScalar(lambda.smooth)) { stop("'lambda.smooth' must be scalar") } if (lambda.smooth != 0 && pz == 0L) { lambda.smooth <- 0 message("Note: 'lambda.smooth' was set to 0, because there was no endemic ", "component in the formula.") } ## Setup penalty matrix if (isScalar(penalty)) { K <- matrix(0, ncol = pz, nrow = pz) if (lambda.smooth != 0 && nIntervals > 1L) { # do we have equidistant knots? knotSpacings <- diff(intervals) #equidistant <- all(sapply(knotSpacings[-1], function(x) isTRUE(all.equal(x,knotSpacings[1])))) equidistant <- isTRUE(all.equal(diff(knotSpacings), rep.int(0,nIntervals-1))) if (equidistant) { # K = D'D only works for equidistant knots # difference matrix of order 'penalty' D <- diff(diag(nIntervals), differences=penalty) K[intervalNumbers,intervalNumbers] <- crossprod(D) # t(D) %*% D } else { # special weighting scheme for the non-equidistant case if (penalty != 1) { stop("ATM, non-equidistant knots only work for 1st order penalty") } #Use Fahrmeir & Lang (2001), p.206 invdelta <- 1/diff(intervals) * mean(diff(intervals)) #Use Fahrmeir & Lang (2001), p.206 for (i in seq_len(nIntervals)) { idx2 <- cbind(j=c(-1,1) + i, deltaidx=i+c(-1,0),fac=c(-1,-1)) idx2 <- idx2[idx2[,"j"] > 0 & idx2[,"j"] <= nIntervals,,drop=FALSE] #Off diagonal elements K[i, idx2[,"j"]] <- invdelta[idx2[,"deltaidx"]] * idx2[,"fac"] #Diagonal element K[i, i] <- sum(invdelta[idx2[,"deltaidx"]]) } message("Note: non-equidistant knots. Using penalization matrix ", "correcting for distance between knots.\n") # print(K) # browser() } } } else if (is.matrix(penalty) && ncol(penalty) == pz && nrow(penalty) == pz) { K <- penalty } else { stop("'penalty' must either be a single number or a square matrix of ", "dimension ", pz, "x", pz, ", fitting the number of unknown ", "parameters in the endemic component (baseline and covariates)") } ## Check that optim.args is a list if (!is.list(optim.args)) { stop("'optim.args' must be a list") } ## Check start value for theta if (!is.null(optim.args[["par"]])) { if (!is.vector(optim.args$par, mode="numeric")) { stop("'optim.args$par' must be a numeric vector or NULL") } if (length(optim.args$par) != px + pz) { stop(gettextf(paste("'optim.args$par' (%d) does not have the same length", "as the number of unknown parameters (%d + %d = %d)"), length(optim.args$par), px, pz, px + pz)) } } else { optim.args$par <- c(rep.int(1, px), rep.int(0, pz)) } message("Initial parameter vector: ", paste(optim.args$par, collapse=" ")) ## Set names for theta names(optim.args$par) <- c(colnames(X), colnames(Z)) #################### ### Optimization ### #################### ## Configuring the optim procedure (check optim.args) optimControl <- list(trace = 1, fnscale = -1, maxit = 300, factr = 1e7) optimControl[names(optim.args[["control"]])] <- optim.args[["control"]] optim.args$control <- optimControl optimArgs <- list(par = optim.args$par, fn = .ploglik, gr = .pscore, X = X, Z = Z, survs = survs, weights = weights, lambda.smooth = lambda.smooth, K = K, method = "L-BFGS-B", lower = c(rep(0,px), rep(-Inf,pz)), upper = rep(Inf,px+pz), control = list(), hessian = FALSE) namesOptimArgs <- names(optimArgs) namesOptimUser <- names(optim.args) optimValid <- namesOptimUser %in% namesOptimArgs optimArgs[namesOptimUser[optimValid]] <- optim.args[optimValid] if (any(!optimValid)) warning("unknown names in optim.args: ", paste(namesOptimUser[!optimValid], collapse = ", ")) if (! "method" %in% namesOptimUser && px == 0L) { optimArgs$method <- "BFGS" } if (optimArgs$method != "L-BFGS-B") { optimArgs$lower <- -Inf optimArgs$upper <- Inf } #Fit model using fixed smoothing parameter or use mixed model #representation to estimate lambda.smooth using marginal likelihood if (lambda.smooth == -1) { if (isScalar(penalty) && penalty == 1) { ################################################################### ##TODO: Need to check for B-spline (?). Move options into ctrl obj ################################################################### #Iterative procedure where we change between optimizing regression #parameters given fixed smoothing parameter and optimizing the #smoothing parameter given fixed regression parameters (Gauss-Seidel) #procedure. The tuning parameters (5) could go into the control object. lambda.smooth <- 5 reltol <- 1e-2 maxit <- 25 #Parameters for keeping track of the iterations lambda.smoothOld <- 1e99 iter <- 0 #Loop until relative convergence or max-iteration reached while ((abs(lambda.smooth-lambda.smoothOld)/lambda.smoothOld > reltol) & (iter < maxit)) { #Iteration begins iter <- iter + 1 if (optimControl$trace > 0) { cat("==> Iteration ",iter," of Gauss-Seidel maximization. lambda.smooth = ",lambda.smooth,"\n") } #Step 1 - maximize (alpha,beta) with fixed lambda optimArgs$lambda.smooth <- lambda.smooth optimRes <- do.call("optim", optimArgs) theta <- optimRes$par optimArgs$par <- theta #better start value the next time #Step 2 - maximize log(lambda) with fixed (alpha,beta) optimLambda <- optim(log(lambda.smooth), .lmarg.lambda, control=list(fnscale=-1,trace=1),method="BFGS", theta=theta, X=X, Z=Z, survs=survs, weights=weights, K=K) lambda.smoothOld <- lambda.smooth lambda.smooth <- exp(optimLambda$par) } #Done, update optimArgs with new smoothing parameter optimArgs$lambda.smooth <- lambda.smooth } else { stop("REML estimation using TP-splines only works for 1st order differences.") } } ## Call optim with the arguments above (including the news smoothing param) optimRes <- do.call("optim", optimArgs) ############## ### Return ### ############## ## Set up list object to be returned fit <- list(coefficients = optimRes$par, lambda.smooth = lambda.smooth, loglik = optimRes$value, counts = optimRes$counts, converged = (optimRes$convergence == 0)) ## If requested, add observed fisher info (= negative hessian at maximum) if (!is.null(optimRes$hessian)) { fit$fisherinfo.observed <- -optimRes$hessian } ## Add own (exact) fisher info computation fit$fisherinfo <- .pfisherinfo(theta = fit$coefficients, X = X, Z = Z, survs = survs, weights = weights, lambda.smooth = lambda.smooth, K = K) ## Add 'method' fit$method <- optimArgs$method ## Append further information fit$intervals <- intervals fit$nEvents <- nEvents if (model) { fit$model <- list( survs = survs, X = X, Z = Z, weights = weights, lambda.smooth = lambda.smooth, K = K, f = attr(data, "f")[match(colnames(X), names(attr(data, "f")), nomatch=0)], w = attr(data, "w")[match(colnames(X), names(attr(data, "w")), nomatch=0)] ) } if (keep.data) { fit$data <- data } fit$call <- cl fit$formula <- formula(Terms) fit$terms <- Terms ## Return object of class "twinSIR" class(fit) <- "twinSIR" return(fit) } surveillance/R/fanplot.R0000644000176200001440000000715413325600040014735 0ustar liggesusers################################################################################ ### Wrapper function for fanplot::fan() ### ### Copyright (C) 2017-2018 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ fanplot <- function (quantiles, probs, means = NULL, observed = NULL, start = 1, fan.args = list(), means.args = list(), observed.args = list(), key.args = NULL, xlim = NULL, ylim = NULL, log = "", xlab = "Time", ylab = "No. infected", add = FALSE, ...) { if (!requireNamespace("fanplot", quietly = TRUE)) stop("package ", sQuote("fanplot"), " is missing; ", "do 'install.packages(\"fanplot\")'") stopifnot(is.matrix(quantiles), length(probs) == ncol(quantiles), is.null(means) || length(means) == nrow(quantiles), is.null(observed) || length(observed) == nrow(quantiles), isScalar(start)) ## axis range ylog <- grepl("y", log) if (is.null(xlim)) xlim <- c(1 - 0.5, nrow(quantiles) + 0.5) + (start-1) if (is.null(ylim)) { ylim <- range(quantiles, observed) if (!ylog && ylim[1L] > 0) { ylim[1L] <- 0 } } ## graphical parameters stopifnot(is.list(fan.args)) fan.args <- modifyList( list(data = t(quantiles), data.type = "values", probs = probs, start = start, fan.col = heat.colors, ln = NULL), fan.args, keep.null = TRUE) ## initialize empty plot if (!add) plot.default(xlim, ylim, type = "n", log = log, xlab = xlab, ylab = ylab, ...) ## add fan do.call(fanplot::fan, fan.args) ## add point predictions if (!is.null(means) && is.list(means.args)) { means.args <- modifyList( list(x = seq_along(means) + (start-1), y = means, type = "l", lwd = 2, col = "white"), means.args) do.call("lines", means.args) } ## add observed time series if (!is.null(observed) && is.list(observed.args)) { observed.args <- modifyList( list(x = seq_along(observed) + (start-1), y = observed, type = "b", lwd = 2), observed.args) do.call("lines", observed.args) } ## add color key if (is.list(key.args)) { defaultyrange <- local({ if (ylog) ylim <- log(ylim) {if (ylog) exp else identity}(c(ylim[1L] + mean(ylim), ylim[2L])) }) key.args <- modifyList( list(start = xlim[2L] - 1, ylim = defaultyrange, data.type = "values", style = "boxfan", probs = fan.args$probs, fan.col = fan.args$fan.col, ln = NULL, space = 0.9, rlab = quantile(fan.args$probs, names = FALSE, type = 1)), key.args) ## convert ylim to data yvals <- if (ylog) { exp(seq.int(from = log(key.args$ylim[1L]), to = log(key.args$ylim[2L]), length.out = length(fan.args$probs))) } else { seq.int(from = key.args$ylim[1L], to = key.args$ylim[2L], length.out = length(fan.args$probs)) } key.args$data <- matrix(yvals) key.args$ylim <- NULL tryCatch(do.call(fanplot::fan, key.args), error = function (e) warning("color key could not be drawn, probably due to non-standard 'probs'", call. = FALSE)) } invisible(NULL) } surveillance/R/calibration.R0000644000176200001440000000511013350442732015561 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Calibration tests for count data based on proper scoring rules ### Reference: Wei and Held (2014), Test, 23, 787-805 ### ### Copyright (C) 2015,2018 Sebastian Meyer ### $Revision: 2223 $ ### $Date: 2018-09-19 14:49:30 +0200 (Wed, 19. Sep 2018) $ ################################################################################ ## perform a calibration test given observations x ## with Poisson (size = NULL) or NegBin predictions calibrationTest.default <- function (x, mu, size = NULL, which = c("dss", "logs", "rps"), tolerance = 1e-4, method = 2, ...) { stopifnot(x >= 0, mu > 0, is.null(size) || all(size > 0)) ## calculate scores which <- match.arg(which) score <- do.call(which, args = alist(x = x, mu = mu, size = size)) ## calculate z-statistic z <- calibrationZ(score, mu, size, which, tolerance, method) ## calculate two-sided p-value p <- 2 * pnorm(-abs(z)) ## construct an object of class "htest" res <- list( method = paste0("Calibration Test for Count Data (based on ", toupper(which), ")"), data.name = deparse(substitute(x)), statistic = c("z" = z), parameter = c("n" = length(x)), p.value = p ) class(res) <- "htest" res } ## compute the calibration z-statistic given the computed scores calibrationZ <- function (score, mu, size = NULL, which = c("dss", "logs", "rps"), tolerance = 1e-4, method = 2) { stopifnot(method %in% 1:2) ## expectation and variance of score for given predictive distribution EV <- score_EV(mu, size, tolerance, which) ## calculate the z-statistic z <- do.call(paste0("zScore", method), args = alist(score, EV[[1L]], EV[[2L]])) z } ## compute the calibration z-statistic and p-value ## from a set of scores and their null expectations and variances zScore1 <- function (score, E0, V0) { n <- length(score) ## emean <- mean(E0) ## varmean <- sum(V0) / n^2 ## (mean(score) - emean) / sqrt(varmean) sum(score - E0) / sqrt(sum(V0)) } ## alternative z-statistic Z* zScore2 <- function (score, E0, V0) { n <- length(score) sum((score - E0) / sqrt(V0)) / sqrt(n) } surveillance/R/sts_observation.R0000644000176200001440000000233113346465003016521 0ustar liggesusers################################################################################ ### Function for creating an "sts" object with a given observation date ### ### Copyright (C) 2014-2015 Maelle Salmon ################################################################################ sts_observation <- function(sts,dateObservation,cut=TRUE) { # The sts object we shall return stsSub <- sts # Index of the observation date line1 <- which(epoch(sts)==dateObservation) # Maximal delay D <- dim(stsSub@control$reportingTriangle$n)[2]-1 # Number of dates theEnd <- dim(stsSub@control$reportingTriangle$n)[1] # Nothing observed after the observation date (I am a genius) stsSub@control$reportingTriangle$n[(line1+1):theEnd,] <- NA stsSub@observed[(line1+1):theEnd] <- 0 # Not everything observed before the observation date for (i in 1:D){ stsSub@control$reportingTriangle$n[line1+1-i,(i+1):(D+1)] <- NA stsSub@observed[line1+1-i] <- sum(stsSub@control$reportingTriangle$n[line1+1-i,],na.rm=T) } stsSub@control$reportingTriangle$n <- stsSub@control$reportingTriangle$n[1:line1,] # Return the new sts object if (cut){return(stsSub[1:line1])} else{return(stsSub)} } surveillance/R/options.R0000644000176200001440000001052612375650445015004 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Description: Set up surveillance.options. ### The code below is inspired by the options management of the ### spatstat package authored by Adrian Baddeley and Rolf Turner, which is ### available under GPL-2 from http://CRAN.R-project.org/package=spatstat ### ### Copyright (C) 2012 Sebastian Meyer ### $Revision: 960 $ ### $Date: 2014-08-22 16:18:13 +0200 (Fri, 22. Aug 2014) $ ################################################################################ .Options <- new.env() ## Specify options .Options$gpclib <- list( default = FALSE, # no gpclib due to license restrictions check = function(x) { if (!is.logical(x) || length(x) != 1L) return(FALSE) if (x && !requireNamespace("gpclib")) { warning("cannot set gpclib=TRUE") return(FALSE) } TRUE }, valid = "a single logical value" ) .Options$allExamples <- list( default = TRUE, # maybe disabled by .onAttach() check = function(x) is.logical(x) && length(x) == 1L, valid = "a single logical value" ) #Tick sizes of sts xaxis relative to par()$tcl .Options$stsTickFactors <- list( default = c("%d"=0.33,"%W"=0.33,"%V"=0.33,"%m"=1,"%Q"=1.25,"%Y"=1.5,"%G"=1.5), check = function(x) is.vector(x, mode="numeric") && !is.null(names(x)), valid = "a named vector of relative tick sizes" ) #Colors for the prediction intervals in nowcast plots .Options$colors <- list( default = c(nowSymbol="springgreen4",piBars="orange"), check = function(x) is.character(x), valid = "a vector of color names" ) ## Function to activate the defaults reset.surveillance.options <- function () { opts <- sapply(ls(.Options, all.names=TRUE), function (option) { .Options[[option]]$value <- .Options[[option]]$default }, simplify=FALSE, USE.NAMES=TRUE) invisible(opts) } ## Internal function to query options get.surveillance.options <- function (x, drop = TRUE) { opts <- lapply(.Options, "[[", "value") if (drop && !missing(x) && length(x) == 1L) opts[[x]] else opts[x] } ## Exported function to modify and query options surveillance.options <- function (...) { knownOptions <- ls(.Options, all.names=TRUE) called <- list(...) if (length(called) == 0) return(get.surveillance.options()) if (is.null(names(called)) && length(called)==1) { x <- called[[1]] if (is.null(x)) return(get.surveillance.options()) if (is.list(x)) called <- x } if (is.null(names(called))) # case: surveillance.options("par1","par2",...) { ischar <- unlist(lapply(called, is.character)) if(all(ischar)) { choices <- unlist(called) ok <- choices %in% knownOptions if(!all(ok)) stop("unrecognised option(s): ", called[!ok]) return(get.surveillance.options(choices)) } else { wrong <- called[!ischar] offending <- unlist(lapply(wrong, deparse, nlines=1, control="delayPromises")) offending <- paste(offending, collapse=",") stop("unrecognised mode of argument(s) [", offending, "]:", "\n should be character string or name=value pair") } } else { # case: surveillance.options(name=value, name2=value2, ...) assignto <- names(called) if (!all(nzchar(assignto))) stop("options must all be identified by name=value") recog <- assignto %in% knownOptions if(!all(recog)) stop("unrecognised option(s): ", assignto[!recog]) ## validate and assign new values oldopts <- get.surveillance.options(assignto, drop=FALSE) for(i in seq_along(assignto)) { nama <- assignto[i] valo <- called[[i]] entry <- .Options[[nama]] if (!entry$check(valo)) stop("option ", dQuote(nama), " should be ", entry$valid) .Options[[nama]]$value <- valo } ## done invisible(oldopts) } } surveillance/R/algo_cusum.R0000644000176200001440000002060312237174420015433 0ustar liggesusers################################################### ### chunk number 1: ################################################### algo.cusum <- function(disProgObj, control = list(range=range, k=1.04, h=2.26, m=NULL, trans="standard",alpha=NULL)){ # Set the default values if not yet set if(is.null(control$k)) control$k <- 1.04 if(is.null(control$h)) control$h <- 2.26 if(is.null(control$trans)) control$trans <- "standard" if(is.null(control$alpha)) control$alpha <- 0.1 alpha <- control$alpha observed <- disProgObj$observed timePoint <- control$range[1] # Estimate m (the expected number of cases), i.e. parameter lambda of a # poisson distribution based on time points 1:t-1 if(is.null(control$m)) { m <- mean(observed[1:(timePoint-1)]) } else if (is.numeric(control$m)) { m <- control$m } else if (control$m == "glm") { #Fit a glm to the first observations training <- 1:(timePoint-1) #Set the time index t <- disProgObj$start[2] + training - 1 #Set the observations x <- observed[training] #Set period p <- disProgObj$freq df <- data.frame(x=x,t=t) control$m.glm<- glm(x ~ 1 + cos(2*pi/p*t) + sin(2*pi/p*t) ,family=poisson(),data=df) #predict the values in range t.new <- disProgObj$start[2] + control$range - 1 m <- predict(control$m.glm,newdata=data.frame(t=t.new),type="response") } #No transformation #standObs <- observed[control$range] x <- observed[control$range] standObs <- switch(control$trans, # compute standardized variables z3 (proposed by Rossi) "rossi" = (x - 3*m + 2*sqrt(x*m))/(2*sqrt(m)), # compute standardized variables z1 (based on asympotic normality) "standard" = (x - m)/sqrt(m), # anscombe residuals "anscombe" = 3/2*(x^(2/3)-m^(2/3))/m^(1/6), # anscombe residuals as in pierce schafer based on 2nd order approx of E(X) "anscombe2nd" = (x^(2/3)-(m^(2/3)-m^(-1/3)/9))/(2/3*m^(1/6)), # compute Pearson residuals for NegBin "pearsonNegBin" = (x - m)/sqrt(m+alpha*m^2), # anscombe residuals for NegBin "anscombeNegBin" = anscombeNB(x,mu=m,alpha=alpha), # don't do anything "none" = x, stop("invalid 'trans'formation") ) # initialize the necessary vectors # start with cusum[timePoint -1] = 0, i.e. set cusum[1] = 0 cusum <- matrix(0,nrow=(length(control$range)+1), ncol=1) alarm <- matrix(data = 0, nrow = (length(control$range)+1), ncol = 1) for (t in 1:length(control$range)){ # compute cumulated sums of standardized observations corrected with the # reference value k for all time points in range cusum[t+1]<- max(0, cusum[t]+(standObs[t]-control$k)) # give alarm if the cusum is larger than the decision boundary h alarm[t+1] <- cusum[t+1] >= control$h } #Backtransform h <- control$h k <- control$k Ctm1 <- cusum[1:length(control$range)] upperbound <- switch(control$trans, # standardized variables z3 (proposed by Rossi) "rossi" = 2*h*m^(1/2)+2*k*m^(1/2)-2*Ctm1*m^(1/2)+5*m-2*(4*m^2+2*m^(3/2)*h+2*m^(3/2)*k-2*m^(3/2)*Ctm1)^(1/2), # standardized variables z1 (based on asympotic normality) "standard" = ceiling(sqrt(m)*(h+k-Ctm1)+ m), # anscombe residuals "anscombe" = ifelse( ((2/3)*m^(1/6)*(h+k-Ctm1)+m^(2/3))<0, 0, (2/3*m^(1/6)*(h+k-Ctm1)+m^(2/3))^(3/2) ), # anscombe residuals ? "anscombe2nd" = ifelse( ((2/3)*m^(1/6)*(h+k-Ctm1)+(m^(2/3)-m^(1/3)/9))<0, 0, (2/3*m^(1/6)*(h+k-Ctm1)+(m^(2/3)-m^(1/3)/9))^(3/2) ), # Pearson residuals for NegBin "pearsonNegBin" = sqrt(m+alpha*m^2)*(h+k-Ctm1)+ m, # anscombe residuals for NegBin ? "anscombeNegBin" = h-cusum[-1], # don't do anything "none" = h-cusum[-1] ) # ensure upper bound is positive and not NaN upperbound[is.na(upperbound)] <- 0 upperbound[upperbound < 0] <- 0 # discard cusum[1] and alarm[1] cusum <- cusum[-1] alarm <- alarm[-1] #Add name and data name to control object. control$name <- paste("cusum:", control$trans) control$data <- paste(deparse(substitute(disProgObj))) control$m <- m # return alarm and upperbound vectors result <- list(alarm = alarm, upperbound = upperbound, disProgObj=disProgObj,control=control, cusum=cusum) class(result) = "survRes" # for surveillance system result return(result) } ################################################### ### chunk number 2: ################################################### ###################################################################### # Program to test the transformation of NegBin variables # using the transformation similar to Anscombe residuals ###################################################################### ##################################################################### # function to evaluate hypgeom_2F1(1/3,2/3, 5/3, x) # "exact" values for x = -(0:10) and linear interpolation for x = -(10:100) #################################################################### hypgeom2F1special <- function(x) { #Return the z (the approximation grid), which is closest to x idx <- which.min(abs(surveillance.gvar.z-x)) if(x >= -10) return(surveillance.gvar.hyp[idx]) else{ # find out interval that contains x if((x-surveillance.gvar.z[idx]) < 0){ idxLow <- idx +1 idxUp <- idx } else { idxLow <- idx idxUp <- idx -1 } #linear interpolation: f(x)=f(x0)+(f(x1)-f(x0))/1*(x-x0) return(surveillance.gvar.hyp[idxLow]+(surveillance.gvar.hyp[idxUp]-surveillance.gvar.hyp[idxLow])*(x-surveillance.gvar.z[idxLow])) } } ##################################################################### # compute anscombe residuals for Y ~ NegBin(mu, alpha) using hypgeom2F1 function # E(Y)= \mu, Var(Y) = \mu + \alpha*\mu^2 ################################################################# anscombeNB <- function(y,mu,alpha=0.1) { hypgeom.mu <- 3/2*mu^(2/3)*hypgeom2F1special(-alpha*mu) one <- function(y){ up <- 3/2*y^(2/3) * hypgeom2F1special(-alpha*y) - hypgeom.mu down <- (mu+alpha*mu^2)^(1/6) return(up/down) } return(sapply(y,one)) } ################################################### ### chunk number 3: ################################################### ###################################################################### # Given a specification of the average run length in the (a)cceptance # and (r)ejected setting determine the k and h values in a standard # normal setting. # # Description: # Functions from the spc package are used in a simple univariate # root finding problem. # # Params: # ARLa - average run length in acceptance setting (i.e. number before # false alarm # ARLw - average run length in rejection state (i.e. number before # an increase is detected (i.e. detection delay) # method - optim method to use, see ?optim # # Returns: # list( k - reference value, h - decision interval) ###################################################################### find.kh <- function(ARLa=500,ARLr=7,sided="one",method="BFGS",verbose=FALSE) { if (!requireNamespace("spc")) stop("find.kh() requires package ", dQuote("spc")) #Small helper function which is to be minimized fun <- function(k) { if (k>0) { #Compute decision interval h <- spc::xcusum.crit(L0=ARLa,k=k,r=50,sided=sided) #Check if xcusum.crit managed to find a solution if (is.nan(h)) stop("spc::xcusum.crit was not able to find a h corresponding to ", "ARLa=",ARLa," and k=",k) if (h > 0) { #Compute ARLr given the above computed h arlr <- spc::xcusum.arl(k,h,mu=2*k,r=50,sided=sided) #Deviation from the requested ARLr if (verbose) { cat("k=",k," score = ",(arlr-ARLr)^2,"\n") } return( (arlr-ARLr)^2 ) } else { return(1e99) } } else { return( 1e99) } } k <- optim(1,fun,method=method)$par return(list(k=k,h=spc::xcusum.crit(L0=ARLa,k=k,r=50,sided=sided))) } surveillance/R/catCUSUM.R0000644000176200001440000002135613432625235014672 0ustar liggesusers######################################################################### # Categorical CUSUM for y_t \sim M_k(n_t, \pi_t) for t=1,...,tmax # Workhorse function doing the actual computations - no semantic checks # are performed here, we expect "proper" input. # # Params: # y - (k) \times tmax observation matrix for all categories # pi0 - (k) \times tmax in-control prob vector for all categories # pi1 - (k) \times tmax out-of-control prob vector for all categories # dfun - PMF function of the categorical response, i.e. multinomial, binomial, # beta-binom, etc. # n - vector of dim tmax containing the varying sizes # h - decision threshold of the Categorical CUSUM # calc.at - ######################################################################### catcusum.LLRcompute <- function(y, pi0, pi1, h, dfun, n, calc.at=TRUE,...) { #Initialize variables t <- 0 stopped <- FALSE S <- numeric(ncol(y)+1) U <- numeric(ncol(y)+1) ##Check if dfun is the binomial isBinomialPMF <- isTRUE(attr(dfun,which="isBinomialPMF")) #Run the Categorical LR CUSUM while (!stopped) { #Increase time t <- t+1 #Compute log likelihood ratio llr <- dfun(y=y[,t,drop=FALSE], size=n[t], mu=pi1[,t,drop=FALSE], log=TRUE,...) - dfun(y=y[,t,drop=FALSE], size=n[t], mu=pi0[,t,drop=FALSE], log=TRUE, ...) #Add to CUSUM S[t+1] <- max(0,S[t] + llr) #For binomial data it is also possible to compute how many cases it would take #to sound an alarm given the past. if ((nrow(y) == 2) & calc.at) { ##For the binomial PMF it is possible to compute the number needed for an ##alarm exactly if (isBinomialPMF) { ##Calculations in ../maple/numberneededbeforealarm.mw. at <- (h - S[t] - n[t] * ( log(1 - pi1[1,t]) - log(1-pi0[1,t]))) / (log(pi1[1,t]) - log(pi0[1,t]) - log(1-pi1[1,t]) + log(1-pi0[1,t])) U[t+1] = ceiling(max(0,at)) ##Note: U[t+1] Can be higher than corresponding n_t. if (U[t+1]>n[t]) U[t+1] <- NA } else { #Compute the value at by trying all values betweeen 0 and n_t. If #no alarm, then we know the value for an alarm must be larger than y_t if (S[t+1]>h) { ay <- rbind(seq(0,y[1,t],by=1),n[t]-seq(0,y[1,t],by=1)) } else { ay <- rbind(seq(y[1,t],n[t],by=1),n[t]-seq(y[1,t],n[t],by=1)) } llr <- dfun(ay, size=n[t], mu=pi1[,t,drop=FALSE], log=TRUE,...) - dfun(ay, size=n[t], mu=pi0[,t,drop=FALSE], log=TRUE, ...) alarm <- llr > h-S[t] ##Is any a_t==TRUE?, i.e. does a y_t exist or is the set over which to ##take the minimum empty? if (any(alarm)) { U[t+1] <- ay[1,which.max(alarm)] } else { U[t+1] <- NA } } } ##Only run to the first alarm. Then reset. if ((S[t+1] > h) | (t==ncol(y))) { stopped <- TRUE} } ##If no alarm at the end put rl to end (its censored! hoehle: Actually it should be length+1! ##but the chopping is written such that copying occurs until the final index (hence we can't ##just do ncol(pi0)+1 ##Hence, N is more like the last index investigated. if (any(S[-1]>h)) { t <- which.max(S[-1] > h) } else { t <- ncol(pi0) ##Last one } ##Missing: cases needs to be returned! return(list(N=t,val=S[-1],cases=U[-1])) } ###################################################################### ## Wrap function to process sts object by categoricalCUSUM (new S4 ## style). Time varying number of counts is found in slot populationFrac. ## ## Params: ## control - list with the following components ## * range - vector of indices in disProgObj to monitor ## * h - threshold, once CUSUM > h we have an alarm ## * pi0 - (k-1) \times tmax in-control prob vector for all but ref cat ## * pi1 - (k-1) \times tmax out-of-control prob vector for all but ref cat ## * dfun - PMF to use for the computations, dmultinom, dbinom, dBB, etc. ## ... - further parameters to be sent to dfun ###################################################################### categoricalCUSUM <- function(stsObj, control = list(range=NULL,h=5, pi0=NULL, pi1=NULL, dfun=NULL, ret=c("cases","value")),...) { ##Set the default values if not yet set if(is.null(control[["pi0"]])) { stop("no specification of in-control proportion vector pi0") } if(is.null(control[["pi1"]])) { stop("no specification of out-of-control proportion vector pi1") } if(is.null(control[["dfun"]])) { stop("no specification of the distribution to use, e.g. dbinom, dmultinom or similar") } if(is.null(control[["h"]])) control$h <- 5 if(is.null(control[["ret"]])) control$ret <- "value" ##Extract the important parts from the arguments if (is.numeric(control[["range"]])) { range <- control$range } else { stop("the range needs to be an index vector") } stsObj <- stsObj[range,] y <- t(stsObj@observed) pi0 <- control[["pi0"]] pi1 <- control[["pi1"]] dfun <- control[["dfun"]] control$ret <- match.arg(control$ret, c("value","cases")) ##Total number of objects that are investigated. Note this ##can't be deduced from the observed y, because only (c-1) columns ##are reported so using: n <- apply(y, 2, sum) is wrong! ##Assumption: all populationFrac's contain n_t and we can take just one n <- stsObj@populationFrac[,1] ##Semantic checks if ( ((ncol(y) != ncol(pi0)) | (ncol(pi0) != ncol(pi1))) | ((nrow(y) != nrow(pi0)) | (nrow(pi0) != nrow(pi1)))) { stop("dimensions of y, pi0 and pi1 have to match") } if ((control$ret == "cases") & nrow(pi0) != 2) { stop("cases can only be returned in case k=2") } if (length(n) != ncol(y)) { stop("length of n has to be equal to number of columns in y") } ##Check if all n entries are the same if (!all(apply(stsObj@populationFrac,1,function(x) all.equal(as.numeric(x),rev(as.numeric(x)))))) { stop("all entries for n have to be the same in populationFrac") } ##Reserve space for the results ##start with cusum[timePoint -1] = 0, i.e. set cusum[1] = 0 alarm <- matrix(data = FALSE, nrow = length(range), ncol = nrow(y)) upperbound <- matrix(data = 0, nrow = length(range), ncol = nrow(y)) ##Small helper function to be used along the way --> move to other file! either <- function(cond, whenTrue, whenFalse) { if (cond) return(whenTrue) else return(whenFalse) } ##Setup counters for the progress doneidx <- 0 N <- 1 noofalarms <- 0 noOfTimePoints <- length(range) ####################################################### ##Loop as long as we are not through the entire sequence ####################################################### while (doneidx < noOfTimePoints) { ##Run Categorical CUSUM until the next alarm res <- catcusum.LLRcompute(y=y, pi0=pi0, pi1=pi1, n=n, h=control$h, dfun=dfun,calc.at=(control$ret=="cases"),...) ##Note: res$N is the last index investigated in the updated y vector. ##If res$N == ncol(y) no alarm was found in the last segment. ##In case an alarm found put in into the log and reset the chart at res$N+1. if (res$N < ncol(y)) { ##Put appropriate value in upperbound upperbound[1:res$N + doneidx,] <- matrix(rep(either(control$ret == "value", res$val[1:res$N] ,res$cases[1:res$N]),each=ncol(upperbound)),ncol=ncol(upperbound),byrow=TRUE) alarm[res$N + doneidx,] <- TRUE ##Chop & get ready for next round y <- y[,-(1:res$N),drop=FALSE] pi0 <- pi0[,-(1:res$N),drop=FALSE] pi1 <- pi1[,-(1:res$N),drop=FALSE] n <- n[-(1:res$N)] ##Add to the number of alarms noofalarms <- noofalarms + 1 } ##cat("doneidx = ",doneidx, "\t res$N =", res$N,"\n") ##Update index of how far we are in the time series doneidx <- doneidx + res$N } ##Add upperbound-statistic of last segment (note: an alarm might or might be reached here) upperbound[(doneidx-res$N+1):nrow(upperbound),] <- matrix( rep(either(control$ret == "value", res$val, res$cases),each=ncol(upperbound)),ncol=ncol(upperbound),byrow=TRUE) ##Inherit alarms as well (last time point might contain an alarm!) alarm[(doneidx-res$N+1):nrow(upperbound),] <- matrix( rep(res$val > control$h,each=ncol(alarm)), ncol=ncol(alarm),byrow=TRUE) # Add name and data name to control object control$name <- "categoricalCUSUM" control$data <- NULL #not supported anymore #store results in the sts object stsObj@alarm <- alarm stsObj@upperbound <- upperbound stsObj@control <- control #Ensure dimnames in the new object stsObj <- fix.dimnames(stsObj) #Done return(stsObj) } surveillance/R/plot.survRes.R0000644000176200001440000001342313566727577015754 0ustar liggesusersplot.survRes.one <- function(x, method=x$control$name, disease=x$control$data, domany=FALSE,ylim=NULL,xaxis.years=TRUE,startyear = 2001, firstweek = 1, xlab="time", ylab="No. infected", main=NULL, type="hhs",lty=c(1,1,2),col=c(1,1,4), outbreak.symbol = list(pch=3, col=3),alarm.symbol=list(pch=24, col=2),legend.opts=list(x="top",legend=c("Infected", "Upperbound", "Alarm", "Outbreak"),lty=NULL,col=NULL,pch=NULL), ...) { ################## Handle the NULL arguments ######################################################## if (is.null(main)) main = paste("Analysis of ", as.character(disease), " using ", as.character(method),sep="") #No titles are drawn when more than one is plotted. if (domany) main = "" survResObj <- x observed <- survResObj$disProgObj$observed[survResObj$control$range] state <- survResObj$disProgObj$state[survResObj$control$range] #print(list(...)) # width of the column tab <- 0.5 # left/right help for constructing the columns observedxl <- (1:length(observed))-tab observedxr <- (1:length(observed))+tab upperboundx <- (1:length(survResObj$upperbound)) #-0.5 # control where the highest value is max <- max(max(observed),max(survResObj$upperbound)) #if ylim is not specified #if(is.null(ylim)){ # ylim <- c(-1/20*max, max) #} #~~~~~~~~~~~~~~~~~~~~~~~~~~ if (is.null(ylim)) { max <- max(max(observed), max(survResObj$upperbound)) ylim <- c(-1/20 * max, max) } else { max <- ylim[2] } #ensure that there is enough space for the alarm/outbreak symbols if(ylim[1]>=0) ylim[1] <- -1/20*max #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #Generate the matrices to plot xstuff <- cbind(observedxl, observedxr, upperboundx) #no adjusting + min(x$control$range) - 1 ystuff <- cbind(observed, observed, survResObj$upperbound) #Plot the results using one Large plot call (we do this by modifying #the call). matplot(x=xstuff,y=ystuff,xlab=xlab,ylab=ylab,main=main,ylim=ylim,axes = !(xaxis.years),type=type,lty=lty,col=col,...) if (!is.null(survResObj$aggr)) { points(upperboundx+tab,survResObj$aggr,col=1) } for(i in 1:length(observed)){ matlines( c(i-tab, i+tab), c(observed[i],observed[i]),col=col[1]) if(survResObj$alarm[i] == 1) matpoints( i, -1/40*max, pch=alarm.symbol$pch, col=alarm.symbol$col) if(state[i] == 1) matpoints( i, -1/20*max, pch=outbreak.symbol$pch, col=outbreak.symbol$col) } # check where to place the legend. If the left upper side is free place it there if (max * 2/3 >= max( max(observed[1:floor(1/4 * length(observed))]), max(survResObj$upperbound[1:floor(1/4 * length(survResObj$upperbound))]) )) { xlegpos <- 0 } #Label of x-axis if(xaxis.years){ # get the number of quarters lying in range for getting the year and quarter order myat.week <- seq(ceiling((52-firstweek+1)/13) * 13 + 1, length(observed)+(floor((52-firstweek + 1)/13) * 13 +1), by=13) # get the right year order year <- (myat.week - 52) %/% 52 + startyear # function to define the quarter order quarterFunc <- function(i) { switch(i+1,"I","II","III","IV")} # get the right number and order of quarter labels quarter <- sapply( (myat.week-1) %/% 13 %% 4, quarterFunc) # get the positions for the axis labels myat.week <- myat.week - (52 - firstweek + 1) # construct the computed axis labels #cex <- par()$cex.axis #if (cex == 1) { mylabels.week <- paste(year,"\n\n",quarter,sep="") #} else { # mylabels.week <- paste(year,"\n",quarter,sep="") #} axis( at=myat.week , labels=mylabels.week , side=1, line = 1 ) axis( side=2 ) } if(is.list(legend.opts)) { #Fill empty (mandatory) slots in legend.opts list if (is.null(legend.opts$lty)) legend.opts$lty = c(lty[1],lty[3],NA,NA) if (is.null(legend.opts$col)) legend.opts$col = c(col[1],col[3],alarm.symbol$col,outbreak.symbol$col) if (is.null(legend.opts$pch)) legend.opts$pch = c(NA,NA,alarm.symbol$pch,outbreak.symbol$pch) if (is.null(legend.opts$x)) legend.opts$x = "top" if (is.null(legend.opts$legend)) legend.opts$legend = c("Infected", "Upperbound", "Alarm", "Outbreak") do.call("legend",legend.opts) } invisible() } #the main function -- cant we do better than this? plot.survRes <- function(x, method=x$control$name, disease=x$control$data, xaxis.years=TRUE,startyear = 2001, firstweek = 1, same.scale=TRUE,...) { observed <- x$disProgObj$observed state <- x$disProgObj$state alarm <- x$alarm #univariate timeseries ? if(is.vector(observed)) observed <- matrix(observed,ncol=1) if(is.vector(state)) state <- matrix(state,ncol=1) if(is.vector(alarm)) alarm <- matrix(alarm,ncol=1) nAreas <- ncol(observed) max <- max(max(observed),max(x$upperbound)) #multivariate time series if(nAreas > 1){ #all areas in one plot #set window size par(mfrow=magic.dim(nAreas),mar=c(2,1,1,1)) if(same.scale) { ylim <- c(-1/20*max, max) } else { ylim <- NULL } #plot areas k <- 1:nAreas sapply(k, function(k) { #Create the survRes dP <- create.disProg(x$disProgObj$week, observed[,k], state[,k],start=x$start) obj <- list(alarm=alarm[,k],disProgObj=dP,control=x$control,upperbound=x$upperbound[,k]) class(obj) <- "survRes" plot.survRes.one(obj,startyear = startyear, firstweek = firstweek, xaxis.years=xaxis.years, ylim=ylim, legend.opts=NULL,domany=TRUE,... ) mtext(colnames(observed)[k],line=-1.3) }) #reset graphical params par(mfrow=c(1,1), mar=c(5, 4, 4, 2)+0.1) } else { #univariate time series plot.survRes.one(x=x, startyear = startyear, firstweek = firstweek, xaxis.years=xaxis.years, domany=FALSE,...) } invisible() } surveillance/R/twinstim_siaf_powerlaw.R0000644000176200001440000001327713165643423020113 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Power-law kernel f(s) = (||s||+sigma)^-d ### This is the pure kernel of the Lomax density (the density requires d>1, but ### for the siaf specification we only want d to be positive) ### ### Copyright (C) 2013-2014,2017 Sebastian Meyer ### $Revision: 1988 $ ### $Date: 2017-10-06 11:04:19 +0200 (Fri, 06. Oct 2017) $ ################################################################################ siaf.powerlaw <- function (nTypes = 1, validpars = NULL, engine = "C") { nTypes <- as.integer(nTypes) stopifnot(length(nTypes) == 1L, nTypes > 0L) engine <- match.arg(engine, c("C", "R")) ## for the moment we don't make this type-specific if (nTypes != 1) stop("type-specific shapes are not yet implemented") ## helper expression, note: logpars=c(logscale=logsigma, logd=logd) tmp <- expression( logsigma <- logpars[[1L]], # used "[[" to drop names logd <- logpars[[2L]], sigma <- exp(logsigma), d <- exp(logd) ) ## spatial kernel f <- function (s, logpars, types = NULL) {} body(f) <- as.call(c(as.name("{"), tmp, expression(sLength <- sqrt(.rowSums(s^2, nrow(s), 2L))), expression((sLength+sigma)^-d) )) environment(f) <- baseenv() ## numerically integrate f over a polygonal domain F <- siaf_F_polyCub_iso(intrfr_name = "intrfr.powerlaw", engine = engine) ## fast integration of f over a circular domain Fcircle <- function (r, logpars, type = NULL) {} body(Fcircle) <- as.call(c(as.name("{"), tmp, expression( fofr <- (r+sigma)^-d, fof0 <- sigma^-d, ## calculate cylinder volume up to height f(r) basevolume <- if (is.infinite(r)) 0 else pi * r^2 * fofr, ## r=Inf is used in R0(,trimmed=F), Fcircle(Inf) is finite if d>2 Ifinvsq <- function (z) { if (d == 1) { -1/z - 2*sigma*log(z) + sigma^2*z } else if (d == 2) { log(z) - 4*sigma*sqrt(z) + sigma^2*z } else { z^(1-2/d) * d / (d-2) - z^(1-1/d) * 2*sigma*d/(d-1) + sigma^2*z } }, intfinvsq <- Ifinvsq(fof0) - Ifinvsq(fofr), basevolume + pi * intfinvsq ) )) environment(Fcircle) <- baseenv() ## derivative of f wrt logpars deriv <- function (s, logpars, types = NULL) {} body(deriv) <- as.call(c(as.name("{"), tmp, expression( sLength <- sqrt(.rowSums(s^2, nrow(s), 2L)), rsigma <- sLength + sigma, rsigmad <- rsigma^d, derivlogsigma <- -d*sigma / rsigmad / rsigma, derivlogd <- -d*log(rsigma) / rsigmad, cbind(derivlogsigma, derivlogd) ) )) environment(deriv) <- baseenv() ## Numerical integration of 'deriv' over a polygonal domain Deriv <- siaf_Deriv_polyCub_iso( intrfr_names = c("intrfr.powerlaw.dlogsigma", "intrfr.powerlaw.dlogd"), engine = engine) ## Simulation function (via polar coordinates) simulate <- siaf.simulatePC(intrfr.powerlaw) ## if (!is.finite(ub)) normconst <- { ## ## for sampling on [0;Inf] the density is only proper if d > 2 ## if (d <= 2) stop("improper density for d<=2, 'ub' must be finite") ## 1/(sigma^(d-2) * (d-2)*(d-1)) # = intrfr.powerlaw(Inf) ## } environment(simulate) <- getNamespace("surveillance") ## return the kernel specification list(f=f, F=F, Fcircle=Fcircle, deriv=deriv, Deriv=Deriv, simulate=simulate, npars=2L, validpars=validpars) } ## integrate x*f(x) from 0 to R (vectorized) intrfr.powerlaw <- function (R, logpars, types = NULL) { sigma <- exp(logpars[[1L]]) d <- exp(logpars[[2L]]) if (d == 1) { R - sigma * log(R/sigma + 1) } else if (d == 2) { log(R/sigma + 1) - R/(R+sigma) } else { (R*(R+sigma)^(1-d) - ((R+sigma)^(2-d) - sigma^(2-d))/(2-d)) / (1-d) } } ## local({ # validation via numerical integration -> tests/testthat/test-siafs.R ## p <- function (r, sigma, d) r * (r+sigma)^-d ## Pnum <- function (r, sigma, d) sapply(r, function (.r) { ## integrate(p, 0, .r, sigma=sigma, d=d)$value ## }) ## r <- c(1,2,5,10,20,50,100) ## dev.null <- sapply(c(1,2,1.6), function(d) stopifnot(isTRUE( ## all.equal(intrfr.powerlaw(r, log(c(3, d))), Pnum(r, 3, d))))) ## }) ## integrate x * (df(x)/dlogsigma) from 0 to R (vectorized) intrfr.powerlaw.dlogsigma <- function (R, logpars, types = NULL) { pars <- exp(logpars) -prod(pars) * intrfr.powerlaw(R, log(pars+c(0,1)), types) } ## integrate x * (df(x)/dlogd) from 0 to R (vectorized) ## (thanks to Maple 17) -> validated in tests/testthat/test-siafs.R intrfr.powerlaw.dlogd <- function (R, logpars, types = NULL) { sigma <- exp(logpars[[1L]]) d <- exp(logpars[[2L]]) if (d == 1) { sigma * logpars[[1L]] * (1-logpars[[1L]]/2) - log(R+sigma) * (R+sigma) + sigma/2 * log(R+sigma)^2 + R } else if (d == 2) { (-log(R+sigma) * ((R+sigma)*log(R+sigma) + 2*sigma) + (R+sigma)*logpars[[1L]]*(logpars[[1L]]+2) + 2*R) / (R+sigma) } else { (sigma^(2-d) * (logpars[[1L]]*(-d^2 + 3*d - 2) - 2*d + 3) + (R+sigma)^(1-d) * (log(R+sigma)*(d-1)*(d-2) * (R*(d-1) + sigma) + R*(d^2+1) + 2*d*(sigma-R) - 3*sigma) ) * d / (d-1)^2 / (d-2)^2 } } surveillance/R/sts_ggplot.R0000644000176200001440000000366214026737231015473 0ustar liggesusers################################################################################ ### Plot a surveillance time series ("sts") object using ggplot2 ### ### Copyright (C) 2018,2021 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ autoplot.sts <- function (object, population = FALSE, units = NULL, as.one = FALSE, scales = "fixed", width = NULL, ...) { stopifnot(is(object, "sts")) data <- tidy.sts(object) if (all(is.na(data$date))) stop("could not derive date index via 'epoch(object, as.Date=TRUE)';", "\n attach dates using 'epoch(object) <- DATE_OBJECT' first") ## sensible default width for weekly/daily data if (is.null(width)) { if (object@freq == 52) width <- 7 if (object@freq == 365) width <- 1 } ## select subset of units to plot if (!is.null(units)) { ## ensure that 'units' are labels, not indices units <- unname(setNames(nm = levels(data$unit))[units]) data <- data[data$unit %in% units, , drop=FALSE] } ## scale counts by population if (doInc <- isScalar(population) || isTRUE(population)) data$observed <- data$observed / (data$population / population) p <- ggplot2::ggplot( data = data, mapping = ggplot2::aes_(x = ~date, y = ~observed, group = ~unit), environment = parent.frame() ) if (as.one) { p <- p + ggplot2::geom_line(ggplot2::aes_(colour = ~unit)) } else { p <- p + ggplot2::geom_col(width = width) + ggplot2::facet_wrap(~unit, scales = scales, drop = TRUE) } p + ggplot2::labs(x = "Time", y = if(doInc) "Incidence" else "No. infected") } surveillance/R/twinSIR_simulation.R0000644000176200001440000006451013557773606017126 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Simulate from a "twinSIR" model as described in Hoehle (2009) ### ### Copyright (C) 2009 Michael Hoehle, 2009, 2012, 2014, 2019 Sebastian Meyer ### $Revision: 2497 $ ### $Date: 2019-11-04 11:03:50 +0100 (Mon, 04. Nov 2019) $ ################################################################################ ## Apart from simulation of SIR data, it is possible to simulate ## - SI: infPeriod = function(ids) rep(Inf, length(ids) ## - SIS: remPeriod = function(ids) rep(0, length(ids) ## - SIRS: remPeriod in (0;Inf) ## ## One can even simulate from a Cox model with the following settings: ## + no removal (i.e. infPeriod = function(ids) rep(Inf, length(ids)) ## + no epidemic component (i.e. no alpha, no f, no w). simEpidata <- function (formula, data, id.col, I0.col, coords.cols, subset, beta, h0, f = list(), w = list(), alpha, infPeriod, remPeriod = function(ids) rep(Inf, length(ids)), end = Inf, trace = FALSE, .allocate = NULL) { stopifnot(inherits(formula, "formula"), is.data.frame(data)) cl <- match.call() ####################### ### Check arguments ### ####################### ### Build up model.frame mfnames <- c("", "formula", "data", "subset") mf <- cl[match(mfnames, names(cl), nomatch = 0L)] if (!"subset" %in% names(mf)) { # subset can be missing ## need explicit argument to avoid partial matching with coords.cols mf["subset"] <- list(NULL) } mf$na.action <- as.name("na.fail") mf$drop.unused.levels <- FALSE mf$xlev <- list() ## additional columns for the model frame if (inherits(data, "epidata")) { id.col <- "id" I0.col <- "atRiskY" # but we need !atRiskY (will be considered below) coords.cols <- names(data)[attr(data, "coords.cols")] if(length(formula) == 2L) { # i.e. no response specified formula[3L] <- formula[2L] formula[[2L]] <- quote(cbind(start, stop)) } } else { for(colarg in c("id.col", "I0.col", "coords.cols")) { colidx <- get(colarg, inherits = FALSE) if (is.numeric(colidx)) { tmp <- names(data)[colidx] if (any(is.na(tmp))) { stop("'", colarg, " = ", deparse(cl[[colarg]]), "': ", "column index must be in [1; ", ncol(data), "=ncol(data)]") } assign(colarg, tmp, inherits = FALSE) } } } mf$I0 <- if (is.null(I0.col)) { substitute(rep(0, N), list(N=nrow(data))) } else as.name(I0.col) mf$id <- as.name(id.col) for(coords.col in coords.cols) { mf[[coords.col]] <- as.name(coords.col) } special <- c("cox") Terms <- terms(formula, specials = special, data = data, keep.order = TRUE, simplify = FALSE) mf$formula <- Terms mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) ### Convert id to a factor (also removing unused levels if it was a factor) mf[["(id)"]] <- factor(mf[["(id)"]]) ids <- levels(mf[["(id)"]]) nObs <- length(ids) if (nObs == 0L) { stop("nothing to do: no individuals in 'data'") } idsInteger <- seq_len(nObs) ### Check start/stop consistency (response) .startstop <- model.response(mf) if (NCOL(.startstop) != 2L || !is.numeric(.startstop)) { stop("the lhs of 'formula' must be a numeric matrix with two columns ", "like 'cbind(start, stop)'") } timeIntervals <- unique(.startstop) timeIntervals <- timeIntervals[order(timeIntervals[,1L]), , drop = FALSE] nBlocks <- nrow(timeIntervals) if (any(timeIntervals[,2L] <= timeIntervals[,1L])) { stop("stop times must be greater than start times") } if (any(timeIntervals[-1L,1L] != timeIntervals[-nBlocks,2L])) { stop("inconsistent start/stop times: time intervals not consecutive") } ### Check .allocate if (is.null(.allocate)) { .allocate <- max(500, ceiling(nBlocks/100)*100) } else { if (!isScalar(.allocate) || .allocate < nBlocks) { stop("'.allocate' must be >= ", nBlocks) } } ### Check that all blocks are complete (all id's present) .blockidx <- match(.startstop[,1L], timeIntervals[,1L]) if (any(table(.blockidx) != nObs)) { stop("all time intervals must be present for all id's") } ### Define a vector containing the time points where covariates change # unique 'start' time points (=> includes beginning of observation period) externalChangePoints <- as.vector(timeIntervals[,1L]) ### SORT THE MODEL.FRAME BY BLOCK AND ID !!! mf <- mf[order(.blockidx, mf[["(id)"]]),] ### Extract the coordinates coords <- as.matrix(mf[idsInteger, tail(1:ncol(mf),length(coords.cols))]) colnames(coords) <- coords.cols rownames(coords) <- ids ### Extract the endemic part Z of the design matrix (no intercept) des <- read.design(mf, Terms) Z <- des$Z nPredCox <- ncol(Z) # number of endemic (cox) predictor terms ### Only include basic endemic variables in the event history output basicCoxNames <- rownames(attr(Terms,"factors"))[attr(Terms,"specials")$cox] basicVarNames <- sub("cox\\(([^)]+)\\)", "\\1", basicCoxNames) nBasicVars <- length(basicCoxNames) # this is necessary if some variables in 'formula' do not have main effects extraBasicVars <- as.matrix(mf[setdiff(basicCoxNames, colnames(Z))]) ### Build up 3-dim array [id x time x var] of endemic terms coxArray <- array(cbind(Z, extraBasicVars), dim = c(nObs, nBlocks, ncol(Z) + ncol(extraBasicVars)), dimnames = list(ids, NULL, c(colnames(Z), colnames(extraBasicVars)))) idxPredVars <- seq_len(nPredCox) idxBasicVars <- match(basicCoxNames, dimnames(coxArray)[[3]]) ### Check simulation parameters ## endemic (cox) part if (nPredCox > 0L) { if(missing(beta) || length(beta) != nPredCox || !is.numeric(beta)) { stop(gettextf(paste("'beta', a numeric vector of length %d", "(number of endemic terms), must be specified"), nPredCox)) } } else { beta <- numeric(0L) } ## epidemic part nPredEpi <- length(f) + length(w) if (nPredEpi > 0L) { ## check f if (length(f) > 0L) { if (ncol(coords) == 0L) { stop("need coordinates for distance-based epidemic covariates 'f'") } if (!is.list(f) || is.null(names(f)) || any(!sapply(f, is.function))) { stop("'f' must be a named list of functions") } distmat <- as.matrix(dist(coords, method = "euclidean")) } ## check w if (length(w) > 0L) { if (!is.list(w) || is.null(names(w)) || any(!sapply(w, is.function))) { stop("'w' must be a named list of functions") } wijlist <- compute_wijlist(w = w, data = mf[idsInteger, ]) } ## check alpha (coefficients for all of f and w) if (missing(alpha) || !is.numeric(alpha) || is.null(names(alpha))) { stop(gettextf(paste("'alpha', a named numeric vector of length %d", "(number of epidemic terms), must be specified"), nPredEpi)) } alpha <- alpha[c(names(f), names(w))] if (any(is.na(alpha))) { stop("'alpha' is incomplete for 'f' or 'w'") } stopifnot(alpha >= 0) } else { alpha <- numeric(0L) } ### Parse the generator function for the infectious periods if (missing(infPeriod)) { stop("argument 'infPeriod' is missing (with no default)") } infPeriod <- match.fun(infPeriod) ### Parse the generator function for the removal periods remPeriod <- match.fun(remPeriod) ### Parse the log baseline function h0spec <- paste("'h0' must be a single number or a list of functions", "\"exact\" and \"upper\"") if (missing(h0)) { stop(h0spec) } if (is.list(h0)) { if (!all(is.function(h0[["exact"]]), is.function(h0[["upper"]]))) { stop(h0spec) } if (!inherits(h0$upper, "stepfun")) { stop("function 'h0$upper' must be a 'stepfun'") } h0ChangePoints <- knots(h0$upper) } else if (isScalar(h0)) { h0func <- eval(parse(text = paste("function (t)", h0))) environment(h0func) <- parent.frame() h0 <- list(exact = h0func, upper = h0func) h0ChangePoints <- numeric(0L) } else { stop(h0spec) } if (!isScalar(h0$exact(0))) { stop("'h0$exact' must return a scalar") } ### Define function which decides if to reject a proposal during simulation exactEqualsUpper <- identical(h0$exact, h0$upper) mustReject <- if (exactEqualsUpper) { function () FALSE } else { function () lambdaStar/lambdaStarMax < runif(1) } ### Check simulation ending time if (!isScalar(end) || end <= 0) { stop("'end' must be a single positive numeric value") } ################### ### Preparation ### ################### ### Initialize set of infected and susceptible individuals infected <- which( mf[idsInteger,"(I0)"] == as.numeric(!inherits(data, "epidata")) ) # in case of "epidata", mf$(I0) equals data$atRiskY => infected = I0==0 susceptibles <- which(! idsInteger %in% infected) ### Initialize tables of planned R-events and S-events Revents <- if (length(infected) > 0) { cbind(infected, infPeriod(ids[infected])) } else { matrix(numeric(0), ncol = 2) } Sevents <- matrix(numeric(0), ncol = 2) ### Small hook to subsequently update the (time depending) Cox predictor ### based on the current time (ct) during the simulation loop if (nPredCox > 0L) { coxUpdate <- expression( predCox <- as.matrix( coxArray[,which(externalChangePoints == ct),idxPredVars] ) %*% beta ) } else { predCox <- numeric(nObs) # zeros } ### 'lambdaCalc' is the main expression for the calculation of the intensity ### values IMMEDIATELY AFTER the current time 'ct'. ### It will be evaluated during the while-loop below. lambdaCalc <- expression( # Endemic Cox predictor (no h0 here!) of susceptibles predCoxS <- predCox[susceptibles], # Epidemic component of susceptibles lambdaEpidemic <- numeric(length(susceptibles)), # zeros if (nPredEpi > 0L && length(infected) > 0L) { fCovars <- if (length(f) > 0L) { u <- distmat[,infected, drop = FALSE] vapply(X = f, FUN = function (B) rowSums(B(u)), FUN.VALUE = numeric(nObs), USE.NAMES = FALSE) } else NULL wCovars <- if (length(w) > 0L) { vapply(X = wijlist, FUN = function (wij) { rowSums(wij[, infected, drop = FALSE]) }, FUN.VALUE = numeric(nobs), USE.NAMES = FALSE) } else NULL epiCovars <- cbind(fCovars, wCovars, deparse.level=0) # epiCovars is a matrix [nObs x nPredEpi] also used by updateNextEvent if (length(susceptibles) > 0L) { lambdaEpidemic <- epiCovars[susceptibles,,drop=FALSE] %*% alpha } }, # Combined intensity lambdaS <- lambdaEpidemic + exp(h0$exact(ct) + predCoxS), # Ground intensity (sum of all lambdaS's) lambdaStar <- sum(lambdaS), # Upper bound on ground intensity lambdaStarMax <- if (exactEqualsUpper) { lambdaStar } else { sum(lambdaEpidemic) + sum(exp(h0$upper(ct) + predCoxS)) } ) # the following initializations are for R CMD check only ("visible binding") lambdaS <- numeric(length(susceptibles)) lambdaStarMax <- lambdaStar <- numeric(1L) # At current time (ct) we have: # lambdaS is a _vector of length the current number of susceptibles_ # containing the intensity of infection for each susceptible individual. # lambdaStar is the overall infection rate. # lambdaStarMax is the upper bound for lambdaStar regarding baseline. # 'susceptible' and 'infected' are the corresponding sets of individuals # immediately AFTER the last event # in theory, if a covariate changes in point t, then the intensity changes # at t+0 only. intensities are left-continuous functions. time interval of # constant intensity is (start;stop]. but in the implementation we need at # time ct the value of the log-baseline at ct+0, especially for # ct %in% h0ChangePoints, thus h0$upper should be a stepfun with right=FALSE ### Create a history object alongside the simulation epiCovars0 <- matrix(0, nrow = nObs, ncol = nPredEpi, dimnames = list(NULL, c(names(f), names(w)))) basicVars0 <- matrix(0, nrow = nObs, ncol = nBasicVars, dimnames = list(NULL, basicVarNames)) emptyEvent <- cbind(BLOCK = 0, id = idsInteger, start = 0, stop = 0, atRiskY = 0, event = 0, Revent = 0, coords, basicVars0, epiCovars0) # WARNING: if you change the column order, you have to adjust the # hard coded column indexes everywhere below, also in getModel.simEpidata ! .epiIdx <- tail(seq_len(ncol(emptyEvent)), nPredEpi) .basicIdx <- 7L + ncol(coords) + seq_len(nBasicVars) .nrowsEvHist <- .allocate * nObs # initial size of the event history evHist <- matrix(NA_real_, nrow = .nrowsEvHist, ncol = ncol(emptyEvent), dimnames = list(NULL, colnames(emptyEvent))) ## Hook - create new event and populate it with appropriate covariates updateNextEvent <- expression( nextEvent <- emptyEvent, # populate epidemic covariates if (nPredEpi > 0L && length(infected) > 0L) { nextEvent[,.epiIdx] <- epiCovars # was calculated in lambdaCalc }, # Which time is currently appropriate in (time varying) covariates tIdx <- match(TRUE, c(externalChangePoints,Inf) > ct) - 1L, if (nBasicVars > 0L) { nextEvent[,.basicIdx] <- coxArray[,tIdx,idxBasicVars] }, # At-risk indicator if (length(susceptibles) > 0) { nextEvent[susceptibles,5L] <- 1 }, # Block index nextEvent[,1L] <- rep.int(block, nObs), # Start time nextEvent[,3L] <- rep.int(ct, nObs) ) ## Hook function to add the event to the history addNextEvent <- expression( nextEvent[,4L] <- rep.int(ct, nObs), # stop time if (block*nObs > .nrowsEvHist) { # enlarge evHist if not big enough if (trace > 0L) { cat("Enlarging the event history @ block", block, "...\n") } evHist <- rbind(evHist, matrix(NA_real_, nrow = .allocate * nObs, ncol = ncol(emptyEvent)) ) .nrowsEvHist <- .nrowsEvHist + .allocate * nObs }, evHistIdx <- idsInteger + nObs * (block-1), # = seq.int(from = 1 + nObs*(block-1), to = nObs*block) evHist[evHistIdx,] <- nextEvent, block <- block + 1 ) ####################################################################### ### MAIN PART: sequential simulation of infection and removal times ### ####################################################################### ### Some indicators ct <- timeIntervals[1L,1L] # = externalChangePoints[1] # current time block <- 1 pointRejected <- FALSE loopCounter <- 0L trace <- as.integer(trace) hadNumericalProblemsInf <- hadNumericalProblems0 <- FALSE eventTimes <- numeric(0) ### Update (time depending) endemic covariates (if there are any) if (nPredCox > 0L) { eval(coxUpdate) } ### Let's rock 'n roll repeat { loopCounter <- loopCounter + 1L if (trace > 0L && loopCounter %% trace == 0L) { cat(loopCounter, "@t =", ct, ":\t|S| =", length(susceptibles), " |I| =", length(infected), "\trejected?", pointRejected, "\n") } if (!pointRejected) { ## Compute current conditional intensity eval(lambdaCalc) ## Update event history (uses epiCovars from lambdaCalc) eval(updateNextEvent) } pointRejected <- FALSE ## Determine time of next external change point changePoints <- c(externalChangePoints, h0ChangePoints, Revents[,2], Sevents[,2]) .isPendingChangePoint <- changePoints > ct nextChangePoint <- if (any(.isPendingChangePoint)) { min(changePoints[.isPendingChangePoint]) } else Inf ## Simulate waiting time for the subsequent infection T <- tryCatch(rexp(1, rate = lambdaStarMax), warning = function(w) { if (!is.na(lambdaStarMax) && lambdaStarMax < 1) { # rate was to small for rexp if (length(susceptibles) > 0L) { assign("hadNumericalProblems0", TRUE, inherits = TRUE) } if (nextChangePoint == Inf) NULL else Inf } else { # rate was to big for rexp 0 # since R-2.7.0 rexp(1, Inf) returns 0 with no warning! } }) ## Stop if lambdaStarMax too small AND no more changes in rate if (is.null(T)) { ct <- end eval(addNextEvent) break } ## Stop if lambdaStarMax too big meaning T == 0 (=> concurrent events) if (T == 0) { hadNumericalProblemsInf <- TRUE break } ## Stop at all costs if end of simulation time [0; end) has been reached if (isTRUE(min(ct+T, nextChangePoint) >= end)) { # ">=" because we don't want an event at "end" ct <- end eval(addNextEvent) break } if (ct + T > nextChangePoint) { ## Simulated time point is beyond the next time of intensity change ## (removal or covariate or upper baseline change point) ct <- nextChangePoint if (nPredCox > 0L && ct %in% externalChangePoints) { # update endemic covariates eval(coxUpdate) } if (.Reventidx <- match(ct, Revents[,2L], nomatch = 0L)) { # removal (I->R), thus update set of infected remover <- Revents[.Reventidx,1L] .remPeriod <- remPeriod(ids[remover]) Sevents <- rbind(Sevents, c(remover, ct + .remPeriod)) infected <- infected[-match(remover, infected)] nextEvent[remover,7L] <- 1 } if (.Seventidx <- match(ct, Sevents[,2L], nomatch = 0L)) { # this will also be TRUE if above .remPeriod == 0 (SIS-like with pseudo-R-event) # re-susceptibility (R->S), thus update set of susceptibles resusceptible <- Sevents[.Seventidx,1L] susceptibles <- c(susceptibles, resusceptible) } # update event history eval(addNextEvent) } else { ## Simulated time point lies within the thinning period ## => rejection sampling step ct <- ct + T if (length(h0ChangePoints) > 0L) {# i.e. if non-constant baseline # Update intensities for rejection probability at new ct eval(lambdaCalc) } if (mustReject()) { pointRejected <- TRUE next } # At this point, we have an actual event! => # Sample the individual who becomes infected with probabilities # according to the intensity proportions victimSindex <- sample(length(susceptibles), 1L, prob = lambdaS/lambdaStar) victim <- susceptibles[victimSindex] eventTimes <- c(eventTimes, ct) Revents <- rbind(Revents, c(victim, ct + infPeriod(ids[victim]))) susceptibles <- susceptibles[-victimSindex] infected <- c(infected, victim) # Add to history nextEvent[victim,6L] <- 1 eval(addNextEvent) } } ############## ### Return ### ############## if (hadNumericalProblemsInf) { warning("simulation ended due to an infinite overall infection rate") } if (hadNumericalProblems0) { warning("occasionally, the overall infection rate was numerically ", "equal to 0 although there were individuals at risk") } if (trace > 0L) { cat("Converting the event history into a data.frame (\"epidata\") ...\n") } epi <- as.data.frame(evHist[seq_len(nObs*(block-1)),,drop=FALSE]) epi$id <- factor(ids[epi$id], levels = ids) rownames(epi) <- NULL attr(epi, "eventTimes") <- eventTimes attr(epi, "timeRange") <- c(timeIntervals[1L,1L], ct) attr(epi, "coords.cols") <- 7L + seq_len(ncol(coords)) attr(epi, "f") <- f attr(epi, "w") <- w attr(epi, "config") <- list(h0 = h0$exact, beta = beta, alpha = alpha) attr(epi, "call") <- cl attr(epi, "terms") <- Terms class(epi) <- c("simEpidata", "epidata", "data.frame") if (trace > 0L) { cat("Done.\n") } return(epi) } ### We define no plot-method for simEpidata (as a wrapper for intensityPlot), ### because we want plot(simEpidataObject) to use the inherited method plot.epidata ### which shows the evolution of the numbers of individuals in states S, I, and R ################################################################################ # A 'simulate' method for objects of class "twinSIR". ################################################################################ simulate.twinSIR <- function (object, nsim = 1, seed = 1, infPeriod = NULL, remPeriod = NULL, end = diff(range(object$intervals)), trace = FALSE, .allocate = NULL, data = object$data, ...) { theta <- coef(object) px <- ncol(object$model$X) pz <- ncol(object$model$Z) nh0 <- attr(object$terms, "intercept") * length(object$nEvents) f <- object$model$f # contains only the f's used in the model formula w <- object$model$w # contains only the w's used in the model formula if (any(missingf <- !names(f) %in% colnames(object$model$X))) { stop("simulation requires distance functions 'f', missing for: ", paste(colnames(object$model$X)[missingf], collapse=", ")) } if (any(missingw <- !names(w) %in% colnames(object$model$X))) { stop("simulation requires functions 'w', missing for: ", paste(colnames(object$model$X)[missingw], collapse=", ")) } formulaLHS <- "cbind(start, stop)" formulaRHS <- paste(c(as.integer(nh0 > 0), # endemic intercept? names(theta)[px+nh0+seq_len(pz-nh0)]), collapse = " + ") formula <- formula(paste(formulaLHS, formulaRHS, sep="~"), env = environment(formula(object))) h0 <- if (nh0 == 0L) { if (pz == 0L) { -Inf # no endemic component at all (exp(-Inf) == 0) } else { 0 # endemic covariates act on 0-log-baseline hazard } } else { .h0 <- stepfun(x = object$intervals[1:nh0], y = c(0,theta[px+seq_len(nh0)]), right = FALSE) list(exact = .h0, upper = .h0) } if (!inherits(data, "epidata")) { stop("invalid 'data' argument: use function 'twinSIR' with ", "'keep.data = TRUE'") } if (is.null(infPeriod) || is.null(remPeriod)) { s <- summary(data) eventsByID <- s$byID if (is.null(infPeriod)) { infPeriod <- if (s$type == "SI") { function (ids) rep.int(Inf, length(ids)) } else { # SIR, SIRS or SIS eventsByID$infPeriod <- eventsByID$time.R - eventsByID$time.I meanInfPeriodByID <- if (s$type %in% c("SIRS", "SIS")) { c(tapply(eventsByID$infPeriod, list(eventsByID$id), mean, na.rm = TRUE, simplify = TRUE)) } else { structure(eventsByID$infPeriod, names = eventsByID$id) } meanInfPeriod <- mean(meanInfPeriodByID, na.rm = TRUE) if (is.na(meanInfPeriod)) { stop("'infPeriod = NULL' invalid: ", "no infection periods observed") } function (ids) { infPeriods <- meanInfPeriodByID # named vector infPeriods[is.na(infPeriods)] <- meanInfPeriod infPeriods[ids] } } } if (is.null(remPeriod)) { remPeriod <- if (s$type == "SIRS") { eventsByID$remPeriod <- eventsByID$time.S - eventsByID$time.R meanRemPeriodByID <- c(tapply(eventsByID$remPeriod, list(eventsByID$id), mean, na.rm = TRUE, simplify = TRUE)) meanRemPeriod <- mean(meanRemPeriodByID, na.rm = TRUE) function (ids) { remPeriods <- meanRemPeriodByID # named vector remPeriods[is.na(remPeriods)] <- meanRemPeriod remPeriods[ids] } } else if (s$type == "SIS") { function (ids) rep.int(0, length(ids)) } else { # SIR or SI function (ids) rep.int(Inf, length(ids)) } } } set.seed(seed) res <- replicate(nsim, simEpidata(formula, data = data, beta = theta[px + nh0 + seq_len(pz-nh0)], h0 = h0, f = f, w = w, alpha = theta[seq_len(px)], infPeriod = infPeriod, remPeriod = remPeriod, end = end, trace = trace, .allocate = .allocate), simplify = FALSE ) if (nsim == 1L) res[[1L]] else res } surveillance/R/twinstim_siaf_exponential.R0000644000176200001440000000603013612577730020572 0ustar liggesusers################################################################################ ### Exponential kernel f(s) = exp(-||s||/sigma) ### ### Copyright (C) 2020 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at https://www.R-project.org/Licenses/. ################################################################################ siaf.exponential <- function (nTypes = 1, validpars = NULL, engine = "C") { nTypes <- as.integer(nTypes) stopifnot(length(nTypes) == 1L, nTypes > 0L) engine <- match.arg(engine, c("C", "R")) ## for the moment we don't make this type-specific if (nTypes != 1) stop("type-specific shapes are not yet implemented") ## spatial kernel f <- function (s, logsigma, types = NULL) { sigma <- exp(logsigma) sLength <- sqrt(.rowSums(s^2, nrow(s), 2L)) exp(-sLength/sigma) } environment(f) <- baseenv() ## numerically integrate f over a polygonal domain F <- siaf_F_polyCub_iso(intrfr_name = "intrfr.exponential", engine = engine) ## fast integration of f over a circular domain Fcircle <- function (r, logsigma, type = NULL) { sigma <- exp(logsigma) fofr <- exp(-r/sigma) ## f(r) approaches 0, r=Inf is used in R0(,trimmed=F) if (fofr == 0) return(2*pi*sigma^2) ## calculate cylinder volume up to height f(r) basevolume <- pi * r^2 * fofr ## integration via f^-1 Ifinvsq <- function (z) sigma^2 * z * ((log(z)-1)^2 + 1) ##intfinvsq <- Ifinvsq(fof0) - Ifinvsq(fofr) # fof0 = 1 intfinvsq <- 2*sigma^2 - Ifinvsq(fofr) basevolume + pi * intfinvsq } environment(Fcircle) <- baseenv() ## derivative of f wrt logsigma deriv <- function (s, logsigma, types = NULL) { sigma <- exp(logsigma) sLength <- sqrt(.rowSums(s^2, nrow(s), 2L)) z <- sLength/sigma matrix(z * exp(-z)) } environment(deriv) <- baseenv() ## Numerical integration of 'deriv' over a polygonal domain Deriv <- siaf_Deriv_polyCub_iso(intrfr_names = "intrfr.exponential.dlogsigma", engine = engine) ## simulation from the kernel (via polar coordinates) simulate <- siaf.simulatePC(intrfr.exponential) environment(simulate) <- getNamespace("surveillance") ## return the kernel specification list(f = f, F = F, Fcircle = Fcircle, deriv = deriv, Deriv = Deriv, simulate = simulate, npars = 1L, validpars = validpars) } ## integrate x*f(x) from 0 to R (vectorized) intrfr.exponential <- function (R, logsigma, types = NULL) { sigma <- exp(logsigma) sigma * (sigma - (R+sigma)*exp(-R/sigma)) } ## integrate x * (df(x)/dlogsigma) from 0 to R (vectorized) ## Note: df(x)/dlogsigma = x * exp(-(x/sigma)-logsigma) intrfr.exponential.dlogsigma <- function (R, logsigma, types = NULL) { sigma <- exp(logsigma) 2*sigma^2 - ((R+sigma)^2 + sigma^2)*exp(-R/sigma) } surveillance/R/twinstim_tiaf_step.R0000644000176200001440000001267712273015471017225 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Step function implementation for temporal interaction ### ### Copyright (C) 2014 Sebastian Meyer ### $Revision: 735 $ ### $Date: 2014-01-31 22:52:57 +0100 (Fri, 31. Jan 2014) $ ################################################################################ tiaf.step <- function (knots, maxRange = Inf, nTypes = 1, validpars = NULL) { knots <- sort(unique(as.vector(knots,mode="numeric"))) stopifnot(knots > 0, is.finite(knots), isScalar(maxRange), maxRange > knots) nknots <- length(knots) # = number of parameters (per type) knotsmax <- c(knots, maxRange) nknotsmax <- nknots + 1L allknots <- c(0, knots, maxRange) nallknots <- length(allknots) allknotsInf <- unique(c(allknots, Inf)) # ensure Inf as last element stopifnot(isScalar(nTypes <- as.integer(nTypes)), nTypes > 0L) npars <- nknots * nTypes .parintwidths <- rep.int(c(diff.default(knotsmax)), nTypes) .parintwidths[is.infinite(.parintwidths)] <- -1 ##<- in case maxRange=Inf, last interval width will always be multiplied by ## 0 and should give 0, but Inf would produce NaN, so we just set it to -1 ## the step function is right-continuous, intervals are [a,b) g <- if (nTypes > 1) { heights <- function (logvals) { # get matrix of type-specific heights dim(logvals) <- c(nknots, nTypes) rbind(1, exp(logvals), 0, deparse.level=0) } function (t, logvals, types) heights(logvals)[(types-1)*nallknots + .bincode(t, allknotsInf, right=FALSE)] } else { function (t, logvals, types = NULL) c(1,exp(logvals),0)[.bincode(t, allknotsInf, right=FALSE)] } G <- if (nTypes > 1) { typeheights <- function (logvals, type) # vector of type-specific heights c(1, exp(logvals[(type-1)*nknots+seq_len(nknots)])) as.function(c(alist(t=, logvals=, types=), substitute({ mapply(function (t, type) { knots2t <- c(0, pmin.int(knots, t), TMAX) sum(typeheights(logvals, type) * diff.default(knots2t)) }, t, types, SIMPLIFY=TRUE, USE.NAMES=FALSE) }, list(TMAX = if (is.finite(maxRange)) quote(min(t,maxRange)) else quote(t))))) } else { ## function (t, logvals, types = NULL) { ## vapply(t, function (t) { ## knots2t <- c(0, pmin.int(knots, t), min(t, maxRange)) ## sum(c(1,exp(logvals)) * diff.default(knots2t)) ## }, 0, USE.NAMES=FALSE) # vapply is faster than sapply ## } as.function(c(alist(t=, logvals=, types = NULL), substitute({ ##omtk <- outer(t, knots, pmin.int), bare-bone implementation: omtk <- pmin.int(rep.int(knots, rep.int(L <- length(t), nknots)), t) dim(omtk) <- c(L, nknots) .colSums(apply(cbind(0, omtk, TMAX, deparse.level=0), 1L, diff.default) * c(1,exp(logvals)), nknotsmax, L) }, list(TMAX = if (is.finite(maxRange)) quote(pmin.int(t,maxRange)) else quote(t))))) } ## the derivative is simply the height corresponding to (t, type) and is 0 ## outside this interval/type deriv <- function (t, logvals, types) { whichvals <- .bincode(t, knotsmax, right=FALSE) fixedheight <- is.na(whichvals) ##<- intervals number 1 and 'nallknots' don't correspond to parameters whichvals <- whichvals + (types-1)*nknots # select type parameter whichvals[fixedheight] <- 0 ## we do a bare-bone implementation of relevant parts of ## deriv <- outer(whichvals, seq_len(npars), "==") * rep(exp(logvals), each=L) repL <- rep.int(L <- length(t), npars) Y <- rep.int(seq_len(npars), repL) # column index Z <- rep.int(exp(logvals), repL) # value ##<- 6x faster than rep(..., each=L) res <- (Y == whichvals) * Z dim(res) <- c(L, npars) res } ## only tiny modification necessary for nTypes == 1 if (nTypes == 1) { body(deriv)[[grep("types", body(deriv))]] <- NULL formals(deriv)["types"] <- list(NULL) } Deriv <- deriv body(Deriv) <- as.call(append(as.list(body(Deriv)), expression( partwidth <- t - knots[whichvals] ), after=2L)) body(Deriv)[[grep("whichvals[fixedheight]", body(Deriv), fixed=TRUE)]] <- quote(whichvals[fixedheight] <- partwidth[fixedheight] <- 0) body(Deriv) <- as.call(append(as.list(body(Deriv)), expression( W <- rep.int(.parintwidths, repL) ), after=grep("Z <-", body(Deriv)))) body(Deriv)[[grep("res <-", body(Deriv))]] <- if (nTypes == 1) { quote(res <- ((Y < whichvals | t >= maxRange) * W + (Y == whichvals) * partwidth) * Z) } else { quote(res <- ((Y > (types-1)*nknots & (Y < whichvals | t >= maxRange)) * W + (Y == whichvals) * partwidth) * Z) } ## Done res <- list(g = g, G = G, deriv = deriv, Deriv = Deriv, ## FIXME: simulate = simulate, npars = npars, validpars = validpars) attr(res, "knots") <- knots attr(res, "maxRange") <- maxRange res } surveillance/R/epidataCS_methods.R0000644000176200001440000003416313570226676016677 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Standard S3-methods for "epidataCS" objects, which represent ### CONTINUOUS SPATIO-temporal infectious disease case data ### ### Copyright (C) 2009-2015,2017-2019 Sebastian Meyer ### $Revision: 2505 $ ### $Date: 2019-11-29 15:39:58 +0100 (Fri, 29. Nov 2019) $ ################################################################################ ### Number of events (including prehistory) nobs.epidataCS <- function (object, ...) length(object$events) ### UPDATE eps.s, eps.t, qmatrix OR nCircle2Poly IN AN EXISTING epidataCS OBJECT # all arguments but 'object' are optional, the ... argument is unused update.epidataCS <- function (object, eps.t, eps.s, qmatrix, nCircle2Poly, ...) { nEvents <- nobs(object) # Check and update eps.t if (!missing(eps.t)) { stopifnot(is.numeric(eps.t), eps.t > 0) object$events$eps.t <- eps.t } # Initialise indicator of which influenceRegions to update ir2update <- logical(nEvents) # all FALSE # Check and update eps.s if (!missing(eps.s)) { stopifnot(is.numeric(eps.s), eps.s > 0) oldeps.s <- object$events$eps.s object$events$eps.s <- eps.s ir2update <- oldeps.s != object$events$eps.s } # Check nCircle2Poly nCircle2Poly <- if (missing(nCircle2Poly)) { attr(object$events$.influenceRegion, "nCircle2Poly") } else { stopifnot(isScalar(nCircle2Poly)) ir2update <- rep.int(TRUE, nEvents) as.integer(nCircle2Poly) } # Update influenceRegions of events if (any(ir2update)) { clipper <- attr(object$events$.influenceRegion, "clipper") if (is.null(clipper)) # epidataCS < 1.8-1 clipper <- "polyclip" object$events$.influenceRegion[ir2update] <- .influenceRegions(object$events[ir2update,], object$W, nCircle2Poly, clipper = clipper) attr(object$events$.influenceRegion, "nCircle2Poly") <- nCircle2Poly } # Check qmatrix if (!missing(qmatrix)) object$qmatrix <- checkQ(qmatrix, levels(object$events$type)) #hoehle @ 16 Apr 2011 - bug fix. .obsInfLength was not handled # Update length of infection time, i.e. length = min(T-time, eps.t) if (!missing(eps.t)) { timeRange <- with(object$stgrid, c(start[1], stop[length(stop)])) object$events$.obsInfLength <- with(object$events@data, pmin(timeRange[2]-time, eps.t)) } # Update .sources if (!missing(eps.t) || !missing(eps.s) || !missing(qmatrix)) { object$events$.sources <- determineSources.epidataCS(object) } # Done update. return(object) } ### subsetting epidataCS, i.e. select only part of the events, ### but retain stgrid and W. If any event types disappear due to subsetting, ### these types will be dropped from the factor levels and from qmatrix "[.epidataCS" <- function (x, i, j, ..., drop = TRUE) { ## rescue attributes of .influenceRegion (dropped when indexing) iRattr <- attributes(x$events$.influenceRegion) ## apply [,SpatialPointsDataFrame-method (where "drop" is ignored) cl <- sys.call() cl[[1]] <- as.name("[") cl[[2]] <- substitute(x$events) x$events <- eval(cl, envir=parent.frame()) ## assure valid epidataCS after subsetting if (!missing(j)) { # only epidemic covariates may be selected endemicVars <- setdiff(names(x$stgrid), c( reservedColsNames_stgrid, obligColsNames_stgrid)) if (!all(c(reservedColsNames_events, obligColsNames_events, endemicVars) %in% names(x$events))) { stop("only epidemic covariates may be removed from 'events'") } } if (!missing(i)) { ## update .sources x$events$.sources <- determineSources.epidataCS(x) if (drop) { ## update type levels and qmatrix (a type could have disappeared) x$events$type <- x$events$type[drop=TRUE] typeNames <- levels(x$events$type) if (!identical(rownames(x$qmatrix), typeNames)) { message("Note: dropped type(s) ", paste0("\"", setdiff(rownames(x$qmatrix), typeNames), "\"", collapse = ", ")) x$qmatrix <- checkQ(x$qmatrix, typeNames) } } } ## restore attributes of .influenceRegion attributes(x$events$.influenceRegion) <- iRattr ## done return(x) } ## The subset method for epidataCS-objects is adapted from ## base::subset.data.frame (authored by Peter ## Dalgaard and Brian Ripley, Copyright (C) 1995-2012 ## The R Core Team) with slight modifications only ## (we just replace 'x' by 'x$events@data' for evaluation of subset and select) subset.epidataCS <- function (x, subset, select, drop = TRUE, ...) { if (missing(subset)) r <- TRUE else { e <- substitute(subset) r <- eval(e, x$events@data, parent.frame()) # HERE IS A MOD if (!is.logical(r)) stop("'subset' must evaluate to logical") r <- r & !is.na(r) } if (missing(select)) vars <- TRUE else { nl <- as.list(seq_along(x$events@data)) # HERE IS A MOD names(nl) <- names(x$events@data) # HERE IS A MOD vars <- eval(substitute(select), nl, parent.frame()) } x[r, vars, drop = drop] # this calls the [.epidataCS-method from above } ## Subset epidataCS object using head and tail methods (which use [.epidataCS) ## adapted from the corresponding matrix-methods, which have ## Copyright (C) 1995-2012 The R Core Team head.epidataCS <- function (x, n = 6L, ...) { stopifnot(isScalar(n)) n <- if (n < 0L) max(nobs(x) + n, 0L) else min(n, nobs(x)) x[seq_len(n), , drop = FALSE] } tail.epidataCS <- function (x, n = 6L, ...) { stopifnot(isScalar(n)) nrx <- nobs(x) n <- if (n < 0L) max(nrx + n, 0L) else min(n, nrx) x[seq.int(to = nrx, length.out = n), , drop = FALSE] } ### extract marks of the events (actually also including time and tile) idxNonMarks <- function (x) { endemicCovars <- setdiff(names(x$stgrid), c( reservedColsNames_stgrid, obligColsNames_stgrid)) match(c(reservedColsNames_events, endemicCovars), names(x$events@data)) } marks.epidataCS <- function (x, coords = TRUE, ...) { if (coords) { # append coords (cp. as.data.frame.SpatialPointsDataFrame) data.frame(x$events@data[-idxNonMarks(x)], x$events@coords) } else { # return marks without coordinates x$events@data[-idxNonMarks(x)] } } ## extract the events point pattern as.SpatialPointsDataFrame.epidataCS <- function (from) { stopifnot(inherits(from, "epidataCS")) events <- from$events events@data <- marks.epidataCS(from, coords = FALSE) events } setOldClass("epidataCS") setAs(from = "epidataCS", to = "SpatialPointsDataFrame", def = as.SpatialPointsDataFrame.epidataCS) ### permute event times and/or locations holding remaining columns fixed permute.epidataCS <- function (x, what = c("time", "space"), keep) { stopifnot(inherits(x, "epidataCS")) what <- match.arg(what) ## permutation index perm <- if (missing(keep)) { sample.int(nobs.epidataCS(x)) } else { # some events should not be relabeled keep <- eval(substitute(keep), envir = x$events@data, enclos = parent.frame()) stopifnot(is.logical(keep), !is.na(keep)) which2permute <- which(!keep) howmany2permute <- length(which2permute) if (howmany2permute < 2L) { message("Note: data unchanged ('keep' all)") return(x) } perm <- seq_len(nobs.epidataCS(x)) perm[which2permute] <- which2permute[sample.int(howmany2permute)] perm } ## rescue attributes of .influenceRegion (dropped when indexing) iRattr <- attributes(x$events@data$.influenceRegion) ## permute time points and/or locations PERMVARS <- if (what == "time") { c("time", "BLOCK", "start", ".obsInfLength") } else { x$events@coords <- x$events@coords[perm,,drop=FALSE] c("tile", ".bdist", ".influenceRegion") } x$events@data[PERMVARS] <- x$events@data[perm, PERMVARS] ## re-sort on time if necessary if (what == "time") { x$events <- x$events[order(x$events@data$time), ] } ## .sources and endemic variables need an update x$events@data$.sources <- determineSources.epidataCS(x) ENDVARS <- setdiff(names(x$stgrid), c(reservedColsNames_stgrid, obligColsNames_stgrid)) gridcellsOfEvents <- match( do.call("paste", c(x$events@data[c("BLOCK", "tile")], sep = "\r")), do.call("paste", c(x$stgrid[c("BLOCK", "tile")], sep = "\r")) ) x$events@data[ENDVARS] <- x$stgrid[gridcellsOfEvents, ENDVARS] ## restore attributes of .influenceRegion attributes(x$events@data$.influenceRegion) <- iRattr ## done x } ### printing methods print.epidataCS <- function (x, n = 6L, digits = getOption("digits"), ...) { print.epidataCS_header( timeRange = c(x$stgrid$start[1L], x$stgrid$stop[nrow(x$stgrid)]), bbox = bbox(x$W), nBlocks = length(unique(x$stgrid$BLOCK)), nTiles = nlevels(x$stgrid$tile), digits = digits ) cat("Types of events: ") str(levels(x$events$type), give.attr = FALSE, give.head = FALSE, width = getOption("width") - 17L) cat("Overall number of events:", nEvents <- nobs(x)) if (npre <- sum(x$events$time <= x$stgrid$start[1L])) cat(" (prehistory: ", npre, ")", sep = "") cat("\n\n") visibleCols <- grep("^\\..+", names(x$events@data), invert = TRUE) if (nEvents == 0L) { # not handled by [,SpatialPointsDataFrame-method # and thus actually not supported by "epidataCS" ## display header only print(data.frame(coordinates = character(0L), x$events@data[visibleCols])) } else { ## 2014-03-24: since sp 1.0-15, print.SpatialPointsDataFrame() ## appropriately passes its "digits" argument to print.data.frame() print(head.matrix(x$events[visibleCols], n = n), digits = digits, ...) if (n < nEvents) cat("[....]\n") } invisible(x) } print.epidataCS_header <- function (timeRange, bbox, nBlocks, nTiles, digits = getOption("digits")) { bboxtxt <- paste( apply(bbox, 1, function (int) paste0( "[", paste(format(int, trim=TRUE, digits=digits), collapse=", "), "]" )), collapse = " x ") cat("Observation period:", paste(format(timeRange, trim=TRUE, digits=digits), collapse = " - "), "\n") cat("Observation window (bounding box):", bboxtxt, "\n") cat("Spatio-temporal grid (not shown):", nBlocks, ngettext(nBlocks, "time block,", "time blocks"), "x", nTiles, ngettext(nTiles, "tile", "tiles"), "\n") } ### SUMMARY # the epidemic is summarized by the following returned components: # timeRange, nEvents, eventTimes, eventCoords, nSources, as well as # - tile/typetable: number of events per tile/type # - counter: number of infective individuals as stepfun summary.epidataCS <- function (object, ...) { res <- list( timeRange = with(object$stgrid, c(start[1], stop[length(stop)])), bbox = bbox(object$W), nBlocks = length(unique(object$stgrid$BLOCK)), nEvents = nobs(object), nTypes = nlevels(object$events$type), eventTimes = object$events$time, eventCoords = coordinates(object$events), eventTypes = object$events$type, eventRanges = object$events@data[c("eps.t", "eps.s")], eventMarks = marks.epidataCS(object), tileTable = c(table(object$events$tile)), typeTable = c(table(object$events$type)), counter = as.stepfun.epidataCS(object), nSources = lengths(object$events$.sources, use.names = FALSE) ) class(res) <- "summary.epidataCS" res } print.summary.epidataCS <- function (x, ...) { print.epidataCS_header(timeRange = x$timeRange, bbox = x$bbox, nBlocks = x$nBlocks, nTiles = length(x$tileTable)) cat("Overall number of events: ", x$nEvents, " (", if (x$nTypes==1) "single type" else paste(x$nTypes, "types"), if (npre <- sum(x$eventTimes <= x$timeRange[1L])) paste(", prehistory:", npre), ")\n", sep = "") cat("\nSummary of event marks and number of potential sources:\n") print(summary(cbind(x$eventMarks, "|.sources|"=x$nSources)), ...) invisible(x) } as.stepfun.epidataCS <- function (x, ...) { eventTimes <- x$events$time removalTimes <- eventTimes + x$events$eps.t tps <- sort(unique(c(eventTimes, removalTimes[is.finite(removalTimes)]))) nInfectious <- sapply(tps, function(t) sum(eventTimes <= t & removalTimes > t)) stepfun(tps, c(0,nInfectious), right = TRUE) # no ties, 'tps' is unique } ################################################### ### Distances from potential (eps.s, eps.t) sources ################################################### getSourceDists <- function (object, dimension = c("space", "time")) { dimension <- match.arg(dimension) ## extract required info from "epidataCS"-object distmat <- as.matrix(dist( if (dimension == "space") { coordinates(object$events) } else object$events$time )) .sources <- object$events$.sources ## number of sources nsources <- lengths(.sources, use.names = FALSE) hasSources <- nsources > 0 cnsources <- c(0, cumsum(nsources)) ## generate vector of distances of events to their potential sources sourcedists <- numeric(sum(nsources)) for (i in which(hasSources)) { .sourcesi <- .sources[[i]] .sourcedists <- distmat[i, .sourcesi] .idx <- cnsources[i] + seq_len(nsources[i]) sourcedists[.idx] <- .sourcedists names(sourcedists)[.idx] <- paste(i, .sourcesi, sep="<-") } ## Done sourcedists } surveillance/R/hhh4_simulate.R0000644000176200001440000002716013377012440016037 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Simulate from a HHH4 model ### ### Copyright (C) 2012 Michaela Paul, 2013-2016,2018 Sebastian Meyer ### $Revision: 2249 $ ### $Date: 2018-11-26 16:45:36 +0100 (Mon, 26. Nov 2018) $ ################################################################################ ### Simulate-method for hhh4-objects simulate.hhh4 <- function (object, # result from a call to hhh4 nsim=1, # number of replicates to simulate seed=NULL, y.start=NULL, # initial counts for epidemic components subset=1:nrow(object$stsObj), coefs=coef(object), # coefficients used for simulation components=c("ar","ne","end"), # which comp to include simplify=nsim>1, # counts array only (no full sts) ...) { ## Determine seed (this part is copied from stats:::simulate.lm with ## Copyright (C) 1995-2012 The R Core Team) if(!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) # initialize the RNG if necessary if(is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } ## END seed cl <- match.call() theta <- if (missing(coefs)) coefs else checkCoefs(object, coefs) stopifnot(subset >= 1, subset <= nrow(object$stsObj)) ## lags lag.ar <- object$control$ar$lag lag.ne <- object$control$ne$lag maxlag <- max(lag.ar, lag.ne) ## initial counts nUnits <- object$nUnit if (is.null(y.start)) { # set starting value to mean observed (in subset!) y.means <- ceiling(colMeans(observed(object$stsObj)[subset,,drop=FALSE])) y.start <- matrix(y.means, maxlag, nUnits, byrow=TRUE) } else { if (is.vector(y.start)) y.start <- t(y.start) if (ncol(y.start) != nUnits) stop(sQuote("y.start"), " must have nUnits=", nUnits, " columns") if (nrow(y.start) < maxlag) stop("need 'y.start' values for lag=", maxlag, " initial time points") } ## store model terms in the hhh4 object because we request them repeatedly ## (within get_exppreds_with_offsets() and directly afterwards) ## CAVE: for an ri()-model, building the terms affects the .Random.seed, ## so doing that twice would yield different simulations than pre-1.16.2 if (is.null(object$terms)) object$terms <- terms.hhh4(object) ## get fitted exppreds nu_it, phi_it, lambda_it (incl. offsets, t in subset) exppreds <- get_exppreds_with_offsets(object, subset = subset, theta = theta) ## extract overdispersion parameters (simHHH4 assumes psi->0 means Poisson) model <- terms.hhh4(object) psi <- splitParams(theta,model)$overdisp if (length(psi) > 1) # "NegBinM" or shared overdispersion parameters psi <- psi[model$indexPsi] ## weight matrix/array of the ne component neweights <- getNEweights(object, coefW(theta)) ## set predictor to zero if not included ('components' argument) stopifnot(length(components) > 0, components %in% c("ar", "ne", "end")) getComp <- function (comp) { exppred <- exppreds[[comp]] if (comp %in% components) exppred else "[<-"(exppred, value = 0) } ar <- getComp("ar") ne <- getComp("ne") end <- getComp("end") ## simulate simcall <- quote( simHHH4(ar, ne, end, psi, neweights, y.start, lag.ar, lag.ne) ) if (!simplify) { ## result template res0 <- object$stsObj[subset,] setObserved <- function (observed) { res0@observed[] <- observed res0 } simcall <- call("setObserved", simcall) } res <- if (nsim==1) eval(simcall) else replicate(nsim, eval(simcall), simplify=if (simplify) "array" else FALSE) if (simplify) { dimnames(res)[1:2] <- list(subset, colnames(model$response)) attr(res, "initial") <- y.start attr(res, "stsObserved") <- object$stsObj[subset,] class(res) <- "hhh4sims" } ## Done attr(res, "call") <- cl attr(res, "seed") <- RNGstate res } ### Internal auxiliary function, which performs the actual simulation simHHH4 <- function(ar, # lambda_it (nTime x nUnits matrix) ne, # phi_it (nTime x nUnits matrix) end, # nu_it (nTime x nUnits matrix, offset included) psi, # overdisp param(s) or numeric(0) (psi->0 = Poisson) neW, # weight matrix/array for neighbourhood component start, # starting counts (vector of length nUnits, or # matrix with nUnits columns if lag > 1) lag.ar = 1, lag.ne = lag.ar ) { nTime <- nrow(end) nUnits <- ncol(end) ## check and invert psi since rnbinom() uses different parametrization size <- if (length(psi) == 0 || isTRUE(all.equal(psi, 0, check.attributes=FALSE))) { NULL # Poisson } else { if (!length(psi) %in% c(1, nUnits)) stop("'length(psi)' must be ", paste(unique(c(1, nUnits)), collapse = " or "), " (number of units)") 1/psi } ## simulate from Poisson or NegBin model rdistr <- if (is.null(size)) { rpois } else { ## unit-specific 'mean's and variance = mean + psi*mean^2 ## where 'size'=1/psi and length(psi) == 1 or length(mean) function(n, mean) rnbinom(n, mu = mean, size = size) } ## if only endemic component -> simulate independently if (all(ar + ne == 0)) { if (!is.null(size)) size <- matrix(size, nTime, nUnits, byrow = TRUE) return(matrix(rdistr(length(end), end), nTime, nUnits)) } ## weighted sum of counts of other (neighbouring) regions ## params: y - vector with (lagged) counts of regions ## W - nUnits x nUnits adjacency/weight matrix (0=no neighbour) wSumNE <- if (is.null(neW) || all(neW == 0)) { # includes the case nUnits==1 function (y, W) numeric(nUnits) } else function (y, W) .colSums(W * y, nUnits, nUnits) ## initialize matrices for means mu_i,t and simulated data y_i,t mu <- y <- matrix(0, nTime, nUnits) y <- rbind(start, y) nStart <- nrow(y) - nrow(mu) # usually just 1 for lag=1 ## simulate timeDependentWeights <- length(dim(neW)) == 3 if (!timeDependentWeights) neWt <- neW for(t in seq_len(nTime)){ if (timeDependentWeights) neWt <- neW[,,t] ## mean mu_i,t = lambda*y_i,t-1 + phi*sum_j wji*y_j,t-1 + nu_i,t mu[t,] <- ar[t,] * y[nStart+t-lag.ar,] + ne[t,] * wSumNE(y[nStart+t-lag.ne,], neWt) + end[t,] ## Sample from Poisson/NegBin with that mean y[nStart+t,] <- rdistr(nUnits, mu[t,]) } ## return simulated data without initial counts y[-seq_len(nStart),,drop=FALSE] } ### check compatibility of a user-specified coefficient vector with model checkCoefs <- function (object, coefs, reparamPsi=TRUE) { theta <- coef(object, reparamPsi=reparamPsi) if (length(coefs) != length(theta)) stop(sQuote("coefs"), " must be of length ", length(theta)) names(coefs) <- names(theta) coefs } ### subset simulations and keep attributes in sync "[.hhh4sims" <- function (x, i, j, ..., drop = FALSE) { xx <- NextMethod("[", drop = drop) if (nargs() == 2L) # x[i] call -> hhh4sims class is lost return(xx) ## otherwise we were subsetting the array and attributes are lost attr(xx, "initial") <- attr(x, "initial") attr(xx, "stsObserved") <- attr(x, "stsObserved") subset_hhh4sims_attributes(xx, i, j) } subset_hhh4sims_attributes <- function (x, i, j) { if (!missing(i)) attr(x, "stsObserved") <- attr(x, "stsObserved")[i,] if (!missing(j)) { attr(x, "stsObserved") <- suppressMessages(attr(x, "stsObserved")[, j]) is.na(attr(x, "stsObserved")@neighbourhood) <- TRUE attr(x, "initial") <- attr(x, "initial")[, j, drop = FALSE] } x } ### aggregate predictions over time and/or (groups of) units aggregate.hhh4sims <- function (x, units = TRUE, time = FALSE, ..., drop = FALSE) { ax <- attributes(x) if (time) { ## sum counts over the whole simulation period res <- colSums(x) ## -> a nUnits x nsim matrix -> will no longer be "hhh4sims" if (isTRUE(units)) { # sum over all units res <- colSums(res) # now a vector of length nsim } else if (!identical(FALSE, units)) { # sum over groups of units stopifnot(length(units) == dim(x)[2]) res <- t(rowSumsBy.matrix(t(res), units)) } } else { if (isTRUE(units)) { # sum over all units res <- apply(X = x, MARGIN = c(1L, 3L), FUN = sum) if (!drop) { ## restore unit dimension conforming to "hhh4sims" class dim(res) <- c(ax$dim[1L], 1L, ax$dim[3L]) dnres <- ax$dimnames dnres[2L] <- list(NULL) dimnames(res) <- dnres ## restore attributes attr(res, "initial") <- as.matrix(rowSums(ax$initial)) attr(res, "stsObserved") <- aggregate(ax$stsObserved, by = "unit") class(res) <- "hhh4sims" } } else if (!identical(FALSE, units)) { # sum over groups of units stopifnot(length(units) == dim(x)[2]) groupnames <- names(split.default(seq_along(units), units)) res <- apply(X = x, MARGIN = 3L, FUN = rowSumsBy.matrix, by = units) dim(res) <- c(ax$dim[1L], length(groupnames), ax$dim[3L]) dnres <- ax$dimnames dnres[2L] <- list(groupnames) dimnames(res) <- dnres if (!drop) { ## restore attributes attr(res, "initial") <- rowSumsBy.matrix(ax$initial, units) attr(res, "stsObserved") <- rowSumsBy.sts(ax$stsObserved, units) class(res) <- "hhh4sims" } } else { return(x) } } ## done res } rowSumsBy.matrix <- function (x, by, na.rm = FALSE) { dn <- dim(x) res <- vapply(X = split.default(x = seq_len(dn[2L]), f = by), FUN = function (idxg) .rowSums(x[, idxg, drop = FALSE], dn[1L], length(idxg), na.rm = na.rm), FUN.VALUE = numeric(dn[1L]), USE.NAMES = TRUE) if (dn[1L] == 1L) t(res) else res } rowSumsBy.sts <- function (x, by, na.rm = FALSE) { ## map, neighbourhood, upperbound, control get lost by aggregation of units .sts(epoch = x@epoch, freq = x@freq, start = x@start, observed = rowSumsBy.matrix(x@observed, by, na.rm), state = rowSumsBy.matrix(x@state, by, na.rm) > 0, alarm = rowSumsBy.matrix(x@alarm, by, na.rm) > 0, populationFrac = rowSumsBy.matrix(x@populationFrac, by, na.rm), epochAsDate = x@epochAsDate, multinomialTS = x@multinomialTS) } surveillance/R/twinstim_siaf_step.R0000644000176200001440000001202713276305560017216 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### twinstim's spatial interaction function as a step function ### ### Copyright (C) 2014,2018 Sebastian Meyer ### $Revision: 2143 $ ### $Date: 2018-05-14 15:13:20 +0200 (Mon, 14. May 2018) $ ################################################################################ siaf.step <- function (knots, maxRange = Inf, nTypes = 1, validpars = NULL) { knots <- sort(unique(as.vector(knots,mode="numeric"))) stopifnot(knots > 0, is.finite(knots), isScalar(maxRange), maxRange > knots) nknots <- length(knots) # = number of parameters (per type) allknots <- c(0, knots, unique(c(maxRange,Inf))) allknotsPos <- c(0,knots,maxRange) # pos. domain, =allknots if maxRange=Inf nTypes <- as.integer(nTypes) stopifnot(length(nTypes) == 1L, nTypes > 0L) ## for the moment we don't make this type-specific if (nTypes != 1) stop("type-specific shapes are not yet implemented") npars <- nknots * nTypes ## ## auxiliary function to get the type-specific values (heights) from logvals ## logvals4type <- function (logvals, type) ## logvals[(type-1)*nknots + seq_len(nknots)] ## auxiliary function calculating the areas of the "rings" of the ## two-dimensional step function intersected with a polydomain. ## Returns a numeric vector of length ## length(allknotsPos)-1 == nknots+1 (i.e. not appending the area of the ## 0-height ring from maxRange to Inf in case maxRange < Inf) .ringAreas <- function (polydomain, npoly = 256) { polyvertices <- vertices(polydomain) polyarea <- area.owin(polydomain) bdist <- bdist(cbind(0,0), polydomain) ## distance to farest vertex (-> later steps not relevant) R <- sqrt(max(polyvertices[["x"]]^2 + polyvertices[["y"]]^2)) sliceAreas <- vapply(X = allknotsPos[-1L], FUN = function (r) { if (r <= bdist) pi * r^2 else if (r >= R) polyarea else area.owin(intersectPolyCircle.owin(polydomain,c(0,0),r,npoly=npoly)) }, FUN.VALUE = 0, USE.NAMES = FALSE) diff.default(c(0,sliceAreas)) } ## since this is the most cumbersome task, use memoization (result does not ## depend on the parameters being optimized, but on influenceRegions only) ringAreas <- if (requireNamespace("memoise")) { memoise::memoise(.ringAreas) } else { warning("siaf.step() is much slower without memoisation", immediate.=TRUE) .ringAreas } f <- function (s, logvals, types = NULL) { sLength <- sqrt(.rowSums(s^2, length(s)/2, 2L)) ## step function is right-continuous, intervals are [a,b) c(1, exp(logvals), 0)[.bincode(sLength, allknots, right=FALSE)] } F <- function (polydomain, f, logvals, type = NULL, npoly = 256) { ## sum of the volumes of the intersections of "rings" with 'polydomain' sum(c(1, exp(logvals)) * ringAreas(polydomain, npoly=npoly)) } Fcircle <- function (r, logvals, type = NULL) { # exact integration on disc ## this is just the sum of the "ring" volumes sum(c(1, exp(logvals)) * pi * diff(pmin.int(allknotsPos, r)^2)) } deriv <- function (s, logvals, types = NULL) { sLength <- sqrt(.rowSums(s^2, L <- length(s)/2, 2L)) whichvals <- .bincode(sLength, allknots, right=FALSE) - 1L ## NOTE: sLength >= maxRange => whichvals > nknots (=> f=0) ## we do a bare-bone implementation of relevant parts of ## deriv <- outer(whichvals, seq_len(nknots), "==") Y <- rep.int(seq_len(nknots), rep.int(L,nknots)) # column index Z <- rep.int(exp(logvals), rep.int(L,nknots)) # value ##<- 6x faster than rep(..., each=L) #X <- rep.int(whichvals, nknots) deriv <- (Y == whichvals) * Z dim(deriv) <- c(L, nknots) deriv } Deriv <- function (polydomain, deriv, logvals, type = NULL, npoly = 256) { ringAreas <- ringAreas(polydomain, npoly=npoly) exp(logvals) * ringAreas[-1L] } simulate <- function (n, logvals, type = NULL, ub) { upper <- min(maxRange, ub) knots2upper <- c(knots[knots < upper], upper) heights <- c(1,exp(logvals))[seq_along(knots2upper)] ## first, sample the "rings" of the points rings <- sample.int(length(heights), size=n, replace=TRUE, prob=heights*diff.default(c(0,knots2upper^2))) ## sample points from these rings runifdisc(n, knots2upper[rings], c(0,knots2upper)[rings]) } ## Done res <- list(f = f, F = F, Fcircle = Fcircle, deriv = deriv, Deriv = Deriv, simulate = simulate, npars = npars, validpars = validpars) attr(res, "knots") <- knots attr(res, "maxRange") <- maxRange res } surveillance/R/twinstim_step.R0000644000176200001440000001431112213074366016207 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Functions and methods to make step() work for twinstim objects ### (restricted to one component at a time) ### ### Copyright (C) 2013 Sebastian Meyer ### $Revision: 645 $ ### $Date: 2013-09-08 15:17:42 +0200 (Sun, 08. Sep 2013) $ ################################################################################ ### To make step() work, we are dealing with modified twinstim objects: ### object$formula is replaced by the result of terms(object), which selects only ### one of the two components! The original full formula specification is ### retained in the new "formulae" component. ### We let this special class inherit from "twinstim" such that, e.g., ### extractAIC.twinstim is used for its objects. However, this is tricky since ### the classes are actually incompatible in the formula specification. Only ### methods which don't use the $formula part work, but this constraint holds ### for what is needed to run step(), if we define some additional specific ### methods for this class. twinstim_stependemic <- twinstim_stepepidemic <- function (object) { stepClass <- grep("twinstim_step", sys.call()[[1L]], value=TRUE) ##<- since sys.call()[[1L]] may also be surveillance:::... if (identical(class(object), "twinstim")) { component <- sub("twinstim_step", "", stepClass) object$formulae <- object$formula object$formula <- object$formulae[[component]] class(object) <- c(stepClass, "twinstim") } else if (!inherits(object, stepClass)) stop("unintended use") object } ## In the first step() loop, object$call$formula is set to terms(object). Since ## there is no "formula" argument to twinstim(), we must remove it from the call ## before update()ing. We also have to convert object$formula to the complete ## formula specification (a named list) and remove the original one ($formulae). .step2twinstim <- function (object) { ##if (identical(class(object), "twinstim")) return(object) component <- sub("^twinstim_step", "", class(object)[1]) stopifnot(component %in% c("endemic", "epidemic")) object$call$formula <- NULL object$formula <- modifyList( object$formulae, setNames(list(formula(object$formula)), component) ) object$formulae <- NULL class(object) <- "twinstim" object } ### special update- and terms-methods for use through stepComponent() below update.twinstim_stependemic <- function (object, endemic, ..., evaluate = TRUE) { object <- .step2twinstim(object) res <- NextMethod("update") # use update.twinstim() ## we need to keep the special class such that step() will keep invoking ## the special update- and terms-methods on the result stepClass <- sub("update.", "", .Method, fixed=TRUE) ##<- or: .Class[1L], or: grep("step", class(object), value=TRUE) if (evaluate) { do.call(stepClass, alist(res)) } else { as.call(list(call(":::", as.name("surveillance"), as.name(stepClass)), res)) ## the call will only be evaluated within stats:::drop1.default() or ## stats:::add1.default, where the "stepClass" constructor function ## (twinstim_stependemic or twinstim_stepepidemic) is not visible; ## we thus have to use ":::". } } update.twinstim_stepepidemic <- function (object, epidemic, ..., evaluate = TRUE) {} body(update.twinstim_stepepidemic) <- body(update.twinstim_stependemic) terms.twinstim_stependemic <- terms.twinstim_stepepidemic <- function (x, ...) terms(x$formula) ### Function to perform AIC-based model selection (component-specific) ### This is essentially a wrapper around stats::step() stepComponent <- function (object, component = c("endemic", "epidemic"), scope = list(upper=object$formula[[component]]), direction = "both", trace = 2, verbose = FALSE, ...) { component <- match.arg(component) ## Convert to special twinstim class where $formula is the component formula object_step <- do.call(paste0("twinstim_step", component), alist(object)) ## silent optimizations if (trace <= 2) object_step$call$optim.args$control$trace <- object_step$optim.args$control$trace <- 0 object_step$call$verbose <- verbose ## Run the selection procedure res <- step(object_step, scope = scope, direction = direction, trace = trace, ...) ## Restore original trace and verbose arguments if (trace <= 2) { res$call$optim.args$control <- object$call$optim.args$control res$optim.args$control <- object$optim.args$control } res$call$verbose <- object$call$verbose ## Convert back to original class .step2twinstim(res) } ### add1.default and drop1.default work without problems through the above ### implementation of stepComponent() using the tricky twinstim classes, ### where object$formula is replaced by the requested component's formula. ### However, for stand-alone use of add1 and drop1, we need specialised methods. add1.twinstim <- drop1.twinstim <- function (object, scope, component = c("endemic", "epidemic"), trace = 2, ...) { component <- match.arg(component) ## Convert to special twinstim class where $formula is the component formula object <- do.call(paste0("twinstim_step", component), alist(object)) ## Call the default method (unfortunately not exported from stats) ## Note that the next method chosen is "unchanged if the class of the ## dispatching argument is changed" (see ?NextMethod) ## (the "component" argument will be part of "..." and passed further on to ## extractAIC.twinstim() where it is unused) NextMethod(trace=trace) } add1.twinstim_stependemic <- drop1.twinstim_stependemic <- function (object, scope, ...) NextMethod(component="endemic") add1.twinstim_stepepidemic <- drop1.twinstim_stepepidemic <- function (object, scope, ...) NextMethod(component="epidemic") surveillance/R/sts.R0000644000176200001440000004361014026737661014123 0ustar liggesusers################################################################################ ### Initialization and other basic methods for the S4 class "sts" ### ### Copyright (C) 2007-2014 Michael Hoehle, 2012-2019,2021 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ ###################################################################### # initialize-method -- see ../man/sts-class.Rd for class information ###################################################################### #Ensure that all matrix slots have the same dimnames, which are #always taken from the observed matrix fix.dimnames <- function(x) { dn <- dimnames(x@observed) #Make sure all arrays have the same dimnames dimnames(x@alarm) <- dimnames(x@state) <- dimnames(x@upperbound) <- dimnames(x@populationFrac) <- dn #Special for neighbourhood dimnames(x@neighbourhood) <- dn[c(2L,2L)] return(x) } ## a user-level constructor function, ## which calls the standard generator function .sts(), ## which calls initialize() on the "sts" prototype - see init.sts() below ## NOTE: using sts() is the preferred approach since surveillance 1.10-0 ## NOTE: NULL arguments are ignored => default slot values sts <- function (observed, start = c(2000, 1), frequency = 52, # prototype values epoch = NULL, # defaults to 1:nrow(observed), can be Date population = NULL, # an alias for "populationFrac" ...) # further named arguments representing "sts" slots { slots <- list(observed = observed, start = start, freq = frequency, epoch = epoch, ...) if (!is.null(population)) { if ("populationFrac" %in% names(slots)) warning("'population' takes precedence over 'populationFrac'") slots$populationFrac <- population } # else "populationFrac" is a possible element of ... if (inherits(epoch, "Date")) { ## FIXME: guess missing start value similar to linelist2sts ## if (missing(start) && frequency == 52) ## slots$start <- unlist(isoWeekYear(epoch[1L]), use.names = FALSE) slots$epoch <- as.integer(epoch) slots$epochAsDate <- TRUE } ## call the standard generator function with explicitly set slots isNULL <- vapply(X = slots, FUN = is.null, FUN.VALUE = FALSE, USE.NAMES = FALSE) do.call(.sts, slots[!isNULL]) } ## initialize-method called by new("sts", ...), ## the long-standing default way of creating "sts" objects. ## For backward-compatibility, we keep this customized initialize-method, ## although it would be cleaner to put things into the generator function ## and use the default initialize-method. init.sts <- function(.Object, ..., # also for slots of classes extending "sts" observed, # use copy constructor if missing(observed) ## the following default arguments depend on dim(observed) epoch = seq_len(nTime), state = matrix(FALSE, nTime, nUnit), alarm = matrix(NA, nTime, nUnit), upperbound = matrix(NA_real_, nTime, nUnit), neighbourhood = matrix(NA, nUnit, nUnit), populationFrac = matrix(1/nUnit, nTime, nUnit), ## FIXME: change default to a matrix of NA_real_ ? ## the map slot needs special treatment (see below) map = .Object@map # old/prototype value ## the remaining slots have useful prototype values ## and are handled as part of ... ##start = c(2000, 1), freq = 52, ##epochAsDate = FALSE, multinomialTS = FALSE, ##control = .Object@control ) { if (nargs() < 2) # nothing to do return(.Object) if (missing(observed)) { # use default initialize-method ## such that, e.g., initialize(stsObj, map=newMap) will set a new map ## and copy other slots from stsObj instead of (re-)setting to defaults, ## as well as to support new("stsBP", stsObj, ci=ci, lambda=lambda). ## CAVE: automatic dimension correction of matrix slots is not done. .Object <- callNextMethod() ## Drawback: .Object@map has been forced to "SpatialPolygons" if (!missing(map)) # restore the supplied map .Object@map <- map ## If missing(map), .Object@map = as(stsObj@map, "SpatialPolygons"), ## i.e., data will be lost => map=stsObj@map must be passed explicitly .Object <- fix.dimnames(.Object) return(.Object) } ## Ensure matrix form (auto-conversion is useful for single time series) observed <- as.matrix(observed) nUnit <- ncol(observed) nTime <- nrow(observed) state <- as.matrix(state) alarm <- as.matrix(alarm) upperbound <- as.matrix(upperbound) ## clear rownames and set colnames for the matrix of observed counts if (is.null(namesObs <- colnames(observed))){ namesObs <- paste0("observed", seq_len(nUnit)) } dimnames(observed) <- list(NULL, namesObs) ## if there is only one state-vector for more than one area, repeat it if (nUnit > 1 && ncol(state) == 1 && length(state) == nTime) { state <- rep.int(state, nUnit) dim(state) <- c(nTime, nUnit) } ## time-constant population fractions can be provided as a single vector if (is.vector(populationFrac, mode="numeric") && length(populationFrac) == nUnit) { populationFrac <- matrix(populationFrac, nTime, nUnit, byrow=TRUE) } ## we need to set the map manually since the initialize,ANY-method called ## next would coerce a "SpatialPolygonsDataFrame" to "SpatialPolygons" if (!missing(map)) .Object@map <- map ## set all other slots (including for classes extending this class) ## using the default initialize-method .Object <- callNextMethod(.Object, ..., observed=observed, epoch=epoch, state=state, alarm=alarm, upperbound=upperbound, neighbourhood=neighbourhood, populationFrac=populationFrac) ## this also checks validObject(.Object) ## for nUnit > 1, it will catch if any column names differ from namesObs ## use dimnames(observed) for all matrix slots (could be unnamed) .Object <- fix.dimnames(.Object) return(.Object) } setMethod("initialize", "sts", init.sts) ########################################################################### # Conversion between old "disProg" and new "sts" classes ########################################################################### ## transform a "disProg" object to the new "sts" class disProg2sts <- function(disProgObj, map=NULL) { disProgObj$map <- map ## NOTE: we cannot trust disProgObj$week to be a valid "epoch" specification, ## e.g., the week in data("ha") refers to the week number _within_ a year. ## CAVE: in "disProg" objects, several elements may be missing or NULL, ## and there could be further elements not matching any "sts" slot, ## e.g., in "disProg" objects generated by sim.pointSource() validElements <- names(disProgObj) %in% slotNames("sts") & !vapply(X=disProgObj, FUN=is.null, FUN.VALUE=FALSE, USE.NAMES=FALSE) ## initialize an "sts" object using the valid "disProg" elements stsObj <- do.call(.sts, disProgObj[validElements]) return(stsObj) } ## The reverse action sts2disProg <- function(sts) { disProgObj <- create.disProg(week=sts@epoch, start=sts@start, freq=sts@freq, observed=sts@observed, state=sts@state, neighbourhood=sts@neighbourhood, populationFrac=sts@populationFrac, epochAsDate=sts@epochAsDate) #For survRes: alarm=sts@alarm, upperbound=sts@upperbound) return(disProgObj) } ########################################################################### #Method to aggregate over all units, either the time series is aggregated #so a new sampling frequency of nfreq units per time slot is obtained. #The other alternative is to aggregate all units. # # Note: The function is not 100% consistent with what the generic # aggregate does. # # Warning: In case the aggregation is by unit the upperbound slot is set # to NA. Furthermore the MAP object is left as.is, but # the object cannot be plotted anymore. # # Params: # by - a string being either "time" or "unit" # nfreq - new sampling frequency if by=="time". If "all" then all # time instances are summed. ########################################################################### aggregate.sts <- function(x, by="time", nfreq="all", ...) { by <- match.arg(by, choices = c("time", "unit")) #Aggregate time if (by == "time") { if (nfreq == "all") { howmany <- dim(x@observed)[1] } else if (nfreq == x@freq) { # nothing to do return(x) } else { # nfreq != x@freq howmany <- x@freq / nfreq if (howmany - ceiling(howmany) != 0) stop("nfreq has to be a multiple of x@freq.") } n <- dim(x@observed)[1] m <- ceiling(n/howmany) new <- rep(1:m,each=howmany)[1:n] x@freq <- ifelse(nfreq == "all", howmany, nfreq) x@epoch <- 1:m x@observed <- as.matrix(aggregate(x@observed,by=list(new),sum)[,-1]) x@state <- as.matrix(aggregate(x@state,by=list(new),sum)[,-1])>0 x@alarm <- as.matrix(aggregate(x@alarm,by=list(new),sum)[,-1]) # number of alarms x@upperbound <- as.matrix(aggregate(x@upperbound,by=list(new),sum)[,-1]) ## summing population (fractions) over time had_fractions <- !x@multinomialTS && all(rowSums(x@populationFrac) == 1) x@populationFrac <- as.matrix(aggregate(x@populationFrac,by=list(new),sum)[,-1]) if (isTRUE(had_fractions)) { # population fractions need to be recomputed x@populationFrac <- x@populationFrac / rowSums(x@populationFrac) } } #Aggregate units if (by == "unit") { #Aggregate units x@observed <- as.matrix(rowSums(x@observed)) x@state <- as.matrix(rowSums(x@state))>0 x@alarm <- as.matrix(rowSums(x@alarm))>0 # contrary to counting for by="time"! #There is no clever way to aggregate the upperbounds x@upperbound <- matrix(NA_real_,ncol=ncol(x@alarm),nrow=nrow(x@alarm)) x@populationFrac <- as.matrix(rowSums(x@populationFrac)) x@neighbourhood <- matrix(NA, 1, 1) # consistent with default for new("sts") ## we have lost colnames colnames(x@observed) <- "overall" x <- fix.dimnames(x) ## drop the map (set to empty prototype) x@map <- new(getSlots("sts")[["map"]]) } #validObject(x) #just a check return(x) } setMethod("aggregate", signature(x="sts"), aggregate.sts) ##################################################################### # Miscellaneous access methods #################################################################### setMethod("dim", "sts", function (x) dim(x@observed)) setMethod("dimnames", "sts", function (x) dimnames(x@observed)) #Extract which observation within year we have setMethod("epochInYear", "sts", function(x,...) { if (x@epochAsDate && x@freq %in% c(12, 52, 365)) { epochStr <- switch(as.character(x@freq), "12" = "%m", "52" = "%V", "365" = "%j") as.numeric(strftime(epoch(x), epochStr)) } else { index <- if (x@epochAsDate) { # non-standard frequency seq_along(x@epoch) } else x@epoch # should always be 1:nrow(x) actually (index-1 + x@start[2]-1) %% x@freq + 1 } }) #Extract the corresponding year for each observation setMethod("year", "sts", function(x,...) { if (x@epochAsDate) { as.numeric(strftime(epoch(x), if (x@freq == 52) "%G" else "%Y")) } else { ((x@epoch-1 + x@start[2]-1) + (x@freq*x@start[1])) %/% x@freq } }) ##################################################################### #[-method for truncating the time series and/or selecting units ##################################################################### setMethod("[", "sts", function(x, i, j, ..., drop) { nTimeOriginal <- nrow(x@observed) if (missing(i)) { # set default value i <- seq_len(nTimeOriginal) } else if (anyNA(i)) { stop("missing row index values are not supported") } else if (is.logical(i)) { # convert to integer index i <- which(rep_len(i, nTimeOriginal)) } else if (is.character(i)) { stop("character row indices are not supported") } else if (any(i < 0)) { # convert to (positive) indices if (any(i > 0)) stop("only 0's may be mixed with negative subscripts") i <- setdiff(seq_len(nTimeOriginal), -i) } else if (any(i0 <- i == 0)) { # drop 0's (for the diff check below) i <- i[!i0] } ## if(missing(j)) j <- seq_len(ncol(x@observed)) # redundant if (!missing(j) && anyNA(j)) stop("missing column index values are not supported") ## check if i is a regular integer sequence (not invalidating freq) if (any(diff(i) != 1)) warning("irregular row index could invalidate \"freq\"") x@epoch <- x@epoch[i] x@observed <- x@observed[i,j,drop=FALSE] x@state <- x@state[i,j,drop=FALSE] x@alarm <- x@alarm[i,j,drop=FALSE] recompute_fractions <- !missing(j) && !x@multinomialTS && all(rowSums(x@populationFrac) == 1) x@populationFrac <- x@populationFrac[i,j,drop=FALSE] if (isTRUE(recompute_fractions)) { x@populationFrac <- x@populationFrac / rowSums(x@populationFrac) } x@upperbound <- x@upperbound[i,j,drop=FALSE] #Neighbourhood matrix if (ncol(x@observed) != ncol(x@neighbourhood) && # selected units !all(x@neighbourhood %in% c(NA,0,1))) { # no adjacency matrix message("Note: selection of units could invalidate the 'neighbourhood'") ## e.g., if 'neighbourhood' specifies neighbourhood orders } x@neighbourhood <- x@neighbourhood[j,j,drop=FALSE] #Fix the "start" and "epoch" entries (if necessary) if (any(i != 0) && i[1] != 1) { #Note: This code does not work if we have week 53s! i.min <- min(i) # in regular use, this should actually be i[1] new.sampleNo <- x@start[2] + i.min - 1 start.year <- x@start[1] + (new.sampleNo - 1) %/% x@freq start.sampleNo <- (new.sampleNo - 1) %% x@freq + 1 x@start <- c(start.year, start.sampleNo) if (!x@epochAsDate) { ## we also have to update epoch since it is relative to start ## and actually it should always equal 1:nrow(observed) x@epoch <- x@epoch - i.min + 1L } ## if (x@epochAsDate && x@freq == 52) { ## ## FIXME: should we derive start from the first date? ## ISO <- isoWeekYear(as.Date(x@epoch[1], origin = "1970-01-01")) ## x@start <- c(ISO$ISOYear, ISO$ISOWeek) ## } } ## Note: We do not automatically subset the map according to j, since ## identical(row.names(map), colnames(observed)) ## is not a property of the sts-class; Unmonitored regions are allowed. #Done return(x) }) ######################################################################### ## Plot method ... the type argument specifies what type of plot to make ## ## plot as multivariate time series: type = observed ~ time | unit ## plot as map object aggregated over time: type = observed ~ 1 | unit ## new map implementation via: type = observed ~ unit ## the specific plot functions are in separate files (stsplot_*.R) ######################################################################## plot.sts <- function (x, type = observed ~ time | unit, ...) { # catch new implementation of time-aggregate map plot if (isTRUE(all.equal(observed ~ unit, type))) return(stsplot_space(x, ...)) #Valid formula? valid <- lapply(as.list(type[[3]]), function(i) is.na(pmatch(i,c("1","unit","|","time","*","+")))) valid <- all(!unlist(valid)) obsOk <- (type[[2]] == "observed") alarmOk <- (type[[2]] == "alarm") if (!valid || !(obsOk | alarmOk)) stop("Not a valid plot type") #Parse the formula, i.e. extract components map <- (length(type[[3]])==3) && (type[[3]][[1]] == "|") && (type[[3]][[2]] == "1") time <- pmatch("time",type[[3]]) > 0 #All-in-one if type=time+unit -> no, use argument "as.one" for stsplot_time #as.one <- all(!is.na(pmatch(c("time","unit"),type[[3]] ))) && is.na(pmatch("|",type[[3]])) #No unit dimension? justTime <- type[[3]] == "time" #space-time plots if (map) { stsplot_spacetime(x, type, ...) return(invisible()) } #time plots if (time) { if (obsOk) { #In case observed ~ time, the units are aggregated stsplot_time(if(justTime) aggregate(x,by="unit") else x, ...) return(invisible()) } if (alarmOk) { stsplot_alarm(x, ...) return(invisible()) } } } setMethod("plot", signature(x="sts", y="missing"), plot.sts) ## define how "sts" objects get printed setMethod( "show", "sts", function( object ){ cat( "-- An object of class ", class(object), " -- \n", sep = "" ) if (!object@epochAsDate) { cat( "freq:\t\t", object@freq,"\n" ) } else { epochStr <- switch( as.character(object@freq), "12" = "%m","52" = "%V","365" = "%j") cat( "freq:\t\t", paste(object@freq," with strptime format string ",epochStr,"\n",sep="")) } if (!object@epochAsDate) { cat( "start:\t\t",object@start,"\n" ) } else { cat( "start:\t\t",paste(epoch(object)[1]),"\n" ) } cat( "dim(observed):\t", dim(object@observed), "\n\n") n <- 1 cat("Head of observed:\n") print(head(object@observed,n)) if (npoly <- length(object@map)) { cat("\nmap:\n") print(modifyList(summary(object@map), list(data=NULL))) # no data summary cat("Features :", npoly, "\n") if (inherits(object@map, "SpatialPolygonsDataFrame")) cat("Data slot :", ncol(object@map), "variables\n") } if (ncol(object@observed) > 1) { cat("\nhead of neighbourhood:\n") print( head(object@neighbourhood,n)) } } ) surveillance/R/LRCUSUM.runlength.R0000644000176200001440000001260512712141044016431 0ustar liggesusers###################################################################### # Compute log likelihood ratio for a univariate or multivariate # categorical distribution # # Params: # outcomes - a data frame with all possible configuration for the (c-1) # variables not being the reference category. # mu - expectation under which LLR under pi is computed # mu0 - null model. A vector of length (k-1) # mu1 - alternative model. A vector of length (k-1) ###################################################################### LLR.fun <- function(outcomes, mu, mu0, mu1, dfun, ...) { #Compute likelihood ratios. Both univariate and the multivariate #values are computed llr.res <- t(apply(outcomes,1, function(y) { llr <- dfun(y, mu=mu1, log=TRUE,...) - dfun(y, mu=mu0, log=TRUE, ...) p <- dfun(y, mu=mu, ...) return(c(llr=llr,p=p)) })) res <- cbind(outcomes,llr.res) colnames(res) <- c(paste("y",1:ncol(outcomes),sep=""),"llr","p") return(res) } ###################################################################### # Function to compute all possible outcomes for the categorical time # series. This is needed for the LLR computations # # Parameters: # km1 - Dimension of the problem (k-1) # n - number of items arranged (i.e. number of experiments). Integer # # Returns: # matrix of size (number of configs) \times km1 # containing all possible states ###################################################################### outcomeFunStandard <- function(k,n) { #Compute all possible likelihood ratios and their probability under mu #Note: Currently all states are investigated. This might be way too #much work as defacto many states have an occurence prob near 0!! args <- list() ; for (j in seq_len(k)) args[[j]] <- 0:n outcomes <- as.matrix(do.call("expand.grid", args)) #Take only valid outcomes (might reduce drastically the number of cells) if (!is.null(n)) { outcomes <- outcomes[apply(outcomes,1,sum) <= n,,drop=FALSE] } return(outcomes) } ###################################################################### # Compute run length for CUSUM based on Markov representation of the # Likelihood ratio based CUSUM # # Parameters: # mu - (k-1 \times T) matrix with true proportions, i.e. equal to mu0 or mu1 if one wants to compute e.g. ARL_0 or ARL_1 # mu0 - (k-1 \times T) matrix with in-control proportions # mu1 - (k-1 \times T) matrix with out-of-control proportion # n - vector of length T containing the total number of experiments for each time point # h- The threshold h which is used for the CUSUM # g - The number of levels to cut the state space into, i.e. M on foil 12 ###################################################################### LRCUSUM.runlength <- function(mu,mu0,mu1,h,dfun, n, g=5,outcomeFun=NULL,...) { #Semantic checks if ( ((ncol(mu) != ncol(mu0)) | (ncol(mu0) != ncol(mu1))) | ((nrow(mu) != nrow(mu0)) | (nrow(mu0) != nrow(mu1)))) { stop("Error: dimensions of mu, mu0 and mu1 have to match") } if (missing(h)) { stop("No threshold specified!") } #If no specific way for computing the outcomes is given #use the standard way. if (is.null(outcomeFun)) { outcomeFun <- outcomeFunStandard } #Discretize number of possible states of the CUSUM S <- c(-Inf,seq(0,h,length=g)) names <- c(levels(cut(1,S,right=TRUE)),">=h") #Time variable t <- 1:ncol(mu) #Dimension of the problem (k-1) km1 <- nrow(mu) #Create transition matrix for CUSUM control chart P <- array(0, dim=c(length(t),g+1,g+1),dimnames=list(t,names,names)) #Once in the absorbing state stay there! P[,g+1,g+1] <- 1 #Loop over all P[t,,] and compute probabilities for (i in seq_len(length(t))) { cat("Looking at t=",i," out of ",length(t),"\n") #Determine all possible outcomes outcomes <- outcomeFun(km1,n[i]) #Compute all possible likelihood ratios and their probability under mu llr <- LLR.fun(outcomes,mu=mu[,i],mu0=mu0[,i],mu1=mu1[,i],dfun=dfun,size=n[i],...) #Exact CDF of the LLR for this time F <- stepfun(sort(llr[,"llr"]),c(0,cumsum(llr[order(llr[,"llr"]),"p"]))) #Compute probability going from c <= S_{t-1} < d to a <= S_{t} < b for (j in 1:g) { #from index for (k in 1:g) { #to index a <- S[k] ; b <- S[k+1] ; c <- S[j] ; d <- S[j+1] ; m <- (c+d)/2 #From zero to new state if (j == 1) { P[i,j,k] <- F(b) - F(a) } else { #Rieman integral assuming as in Brook & Evans (1972) that S at midpoint #P[i,j,k] <- F(b-m) - F(a-m) #Slightly better approximation by Hawkins (1992), which uses Simpson's rule P[i,j,k] <- (F(b-c) + 4*F(b-m) + F(b-d) - F(a-c) - 4*F(a-m) - F(a-d))/6 } } } #Whatever is missing goes to >h category (take care of rounding errors) P[i,-(g+1),(g+1)] <- pmax(0,1-apply(P[i,-(g+1),-(g+1)],1,sum)) } #Use matrix to compute RL distribution Ppower <- P[1,,] alarmUntilTime <- numeric(ncol(mu0)) alarmUntilTime[1] <- Ppower[1,ncol(P)] for (time in t[-1]) { #from 2 to length of t Ppower <- Ppower %*% P[time,,] alarmUntilTime[time] <- Ppower[1,ncol(P)] } pRL <- c(alarmUntilTime[1],diff(alarmUntilTime)) mom <- NA #If the Markov chain is homogenous then compute ARL by inverting if (length(t) == 1) { R <- P[,1:g,1:g] I <- diag(nrow=g) mom <- rowSums(solve(I-R)) } return(list(P=P,pmf=pRL,cdf=alarmUntilTime,arl=mom[1])) } surveillance/R/graphs.R0000644000176200001440000000662713174076737014610 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Functions concerning graphs: neighbourhood order, adjacency matrix ### These are wrappers around functionality from package "spdep" by Roger Bivand ### ### Copyright (C) 2009-2013,2017 Sebastian Meyer ### $Revision: 2022 $ ### $Date: 2017-10-25 14:04:47 +0200 (Wed, 25. Oct 2017) $ ################################################################################ ### Determine the matrix of neighbourhood orders ### given the binary matrix of first-order neighbours. ### Working horse: spdep::nblag() nbOrder <- function (neighbourhood, maxlag = 1) { if (!requireNamespace("spdep")) stop("package ", dQuote("spdep"), " is required to determine neighbourhood orders") stopifnot(isScalar(maxlag), maxlag > 0) checkNeighbourhood(neighbourhood) neighbourhood <- neighbourhood == 1 # convert to binary matrix nregions <- nrow(neighbourhood) maxlag <- as.integer(min(maxlag, nregions-1)) # upper bound of nb order if (maxlag == 1L) { storage.mode(neighbourhood) <- "integer" return(neighbourhood) } ## manually convert to spdep's "nb" class ## region.idxs <- seq_len(nregions) ## nb <- lapply(region.idxs, function(i) { ## nbs <- which(neighbourhood[i,]) ## if (length(nbs) > 0L) nbs else 0L ## }) ## class(nb) <- "nb" ## convert first-order neighbourhood to spdep's "nb" class nb <- spdep::mat2listw(neighbourhood)$neighbours attr(nb, "region.id") <- NULL ## compute higher order neighbours using spdep::nblag() nb.lags <- spdep::nblag(nb, maxlag=maxlag) ## Side note: fast method to determine neighbours _up to_ specific order: ## crossprod(neighbourhood) > 0 # up to second order neighbours (+set diag to 0) ## (neighbourhood %*% neighbourhood %*% neighbourhood) > 0 # up to order 3 ## and so on... ## convert to a single matrix nbmat <- neighbourhood # logical first-order matrix storage.mode(nbmat) <- "numeric" for (lag in 2:maxlag) { if (any(spdep::card(nb.lags[[lag]]) > 0L)) { # any neighbours of this order nbmat.lag <- spdep::nb2mat(nb.lags[[lag]], style="B", zero.policy=TRUE) nbmat <- nbmat + lag * nbmat.lag } } attr(nbmat, "call") <- NULL storage.mode(nbmat) <- "integer" ## message about maximum neighbour order by region maxlagbyrow <- apply(nbmat, 1, max) message("Note: range of maximum neighbour order by region is ", paste0(range(maxlagbyrow), collapse="-"), if (max(maxlagbyrow) == maxlag) " ('maxlag' reached)") ## Done nbmat } ### Derive adjacency structure from a SpatialPolygons object ### Working horse: spdep::poly2nb poly2adjmat <- function (SpP, ..., zero.policy = TRUE) { if (!requireNamespace("spdep")) stop("package ", dQuote("spdep"), " is required to derive adjacencies from SpatialPolygons") nb <- spdep::poly2nb(SpP, ...) adjmat <- spdep::nb2mat(nb, style="B", zero.policy=zero.policy) attr(adjmat, "call") <- NULL colnames(adjmat) <- rownames(adjmat) adjmat } surveillance/R/hhh4.R0000644000176200001440000023771714020242007014135 0ustar liggesusers################################################################################ ### Endemic-epidemic modelling for univariate or multivariate ### time series of infectious disease counts (data class "sts") ### ### Copyright (C) 2010-2012 Michaela Paul, 2012-2016,2019-2021 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at https://www.R-project.org/Licenses/. ### ### $Revision: 2657 $ ### $Date: 2021-03-04 21:33:11 +0100 (Thu, 04. Mar 2021) $ ################################################################################ ## Error message issued in loglik, score and fisher functions upon NA parameters ADVICEONERROR <- "\n Try different starting values, more iterations, or another optimizer.\n" ### Main function to be called by the user hhh4 <- function (stsObj, control = list( ar = list(f = ~ -1, # a formula "exp(x'lamba)*y_t-lag" (ToDo: matrix) offset = 1, # multiplicative offset lag = 1), # autoregression on y_i,t-lag ne = list(f = ~ -1, # a formula "exp(x'phi) * sum_j w_ji * y_j,t-lag" offset = 1, # multiplicative offset lag = 1, # regression on y_j,t-lag weights = neighbourhood(stsObj) == 1, # weights w_ji scale = NULL, # such that w_ji = scale * weights normalize = FALSE), # w_ji -> w_ji / rowSums(w_ji), after scaling end = list(f = ~ 1, # a formula "exp(x'nu) * n_it" offset = 1), # optional multiplicative offset e_it family = c("Poisson", "NegBin1", "NegBinM"), # or a factor of length nUnit subset = 2:nrow(stsObj), # epidemic components require Y_{t-lag} optimizer = list(stop = list(tol = 1e-5, niter = 100), # control arguments regression = list(method = "nlminb"), # for optimization variance = list(method = "nlminb")), # <- or "Nelder-Mead" verbose = FALSE, # level of reporting during optimization start = list(fixed = NULL, # list of start values, replacing initial random = NULL, # values from fe() and ri() in 'f'ormulae sd.corr = NULL), data = list(t = stsObj@epoch - min(stsObj@epoch)), # named list of covariates keep.terms = FALSE # whether to keep interpretControl(control, stsObj) ), check.analyticals = FALSE) { ptm <- proc.time() ## Convert old disProg class to new sts class if (inherits(stsObj, "disProg")) { stsObj <- disProg2sts(stsObj) } else { stopifnot(inherits(stsObj, "sts")) } ## check control and set default values (for missing arguments) control <- setControl(control, stsObj) ## get model terms model <- interpretControl(control, stsObj) dimFixedEffects <- model$nFE + model$nd + model$nOverdisp dimRandomEffects <- model$nRE ## starting values #* -> better default values possible theta.start <- model$initialTheta Sigma.start <- model$initialSigma ## check if initial values are valid ## CAVE: there might be NA's in mu if there are missing values in Y mu <- meanHHH(theta.start, model, total.only=TRUE) if(any(mu==0, na.rm=TRUE) || any(is.infinite(mu))) stop("some mean is degenerate (0 or Inf) at initial values") ## check score vector and fisher information at starting values check.analyticals <- if (isTRUE(check.analyticals)) { if (length(theta.start) > 50) "maxLik" else "numDeriv" } else if (is.character(check.analyticals)) { match.arg(check.analyticals, c("numDeriv", "maxLik"), several.ok=TRUE) } else NULL if (length(check.analyticals) > 0L) { resCheck <- checkAnalyticals(model, theta.start, Sigma.start, methods=check.analyticals) return(resCheck) } ## maximize loglikelihood (penalized and marginal) myoptim <- fitHHH(theta=theta.start,sd.corr=Sigma.start, model=model, cntrl.stop = control$optimizer$stop, cntrl.regression = control$optimizer$regression, cntrl.variance = control$optimizer$variance, verbose=control$verbose) ## extract parameter estimates convergence <- myoptim$convergence == 0 thetahat <- myoptim$theta if (dimRandomEffects>0) { Sigma.orig <- myoptim$sd.corr Sigma.trans <- getSigmai(head(Sigma.orig,model$nVar), tail(Sigma.orig,model$nCorr), model$nVar) dimnames(Sigma.trans) <- rep.int(list(sub("^sd\\.", "", names(Sigma.orig)[seq_len(model$nVar)])), 2L) } else { Sigma.orig <- Sigma.trans <- NULL } ## compute covariance matrices of regression and variance parameters cov <- try(solve(myoptim$fisher), silent=TRUE) Sigma.cov <- if(dimRandomEffects>0) try(solve(myoptim$fisherVar), silent=TRUE) ## check for degenerate fisher info if(inherits(cov, "try-error")){ # fisher info is singular if (control$verbose) cat("WARNING: Final Fisher information matrix is singular!\n") convergence <- FALSE } else if(any(!is.finite(diag(cov))) || any(diag(cov)<0)){ if (control$verbose) cat("WARNING: non-finite or negative covariance of regression parameters!\n") convergence <- FALSE } if (!convergence) { if (control$verbose) { cat("Penalized loglikelihood =", myoptim$loglik, "\n") thetastring <- paste(round(thetahat,2), collapse=", ") thetastring <- strwrap(thetastring, exdent=10, prefix="\n", initial="") cat("theta = (", thetastring, ")\n") } warning("Results are not reliable!", if (any(splitParams(thetahat, model)$overdisp > 10)) { # FALSE for Poisson "\n Overdispersion parameter close to zero; maybe try a Poisson model.\n" } else ADVICEONERROR) } ## gather results in a list -> "hhh4" object result <- list(coefficients=thetahat, se=if (convergence) sqrt(diag(cov)), cov=cov, Sigma=Sigma.trans, # estimated covariance matrix of ri's Sigma.orig=Sigma.orig, # variance parameters on original scale Sigma.cov=Sigma.cov, # covariance matrix of Sigma.orig call=match.call(), dim=c(fixed=dimFixedEffects,random=dimRandomEffects), loglikelihood=myoptim$loglik, margll=myoptim$margll, convergence=convergence, fitted.values=meanHHH(thetahat, model, total.only=TRUE), control=control, terms=if(control$keep.terms) model else NULL, stsObj=stsObj, lags=sapply(control[c("ar","ne")], function (comp) if (comp$inModel) comp$lag else NA_integer_), nObs=sum(!model$isNA[control$subset,]), nTime=length(model$subset), nUnit=ncol(stsObj), ## CAVE: nTime is not nrow(stsObj) as usual! runtime=proc.time()-ptm) if (!convergence) { ## add (singular) Fisher information for further investigation result[c("fisher","fisherVar")] <- myoptim[c("fisher","fisherVar")] } class(result) <- "hhh4" return(result) } ## set default values for model specifications in control setControl <- function (control, stsObj) { stopifnot(is.list(control)) nTime <- nrow(stsObj) nUnit <- ncol(stsObj) if(nTime <= 2) stop("too few observations") ## arguments in 'control' override any corresponding default arguments defaultControl <- eval(formals(hhh4)$control) environment(defaultControl$ar$f) <- environment(defaultControl$ne$f) <- environment(defaultControl$end$f) <- .GlobalEnv control <- modifyList(defaultControl, control) ## check that component specifications are list objects for (comp in c("ar", "ne", "end")) { if(!is.list(control[[comp]])) stop("'control$", comp, "' must be a list") } ## check lags in "ar" and "ne" components for (comp in c("ar", "ne")) { if (!isScalar(control[[comp]]$lag) || control[[comp]]$lag < (comp=="ar")) stop("'control$", comp, "$lag' must be a ", if (comp=="ar") "positive" else "non-negative", " integer") control[[comp]]$lag <- as.integer(control[[comp]]$lag) } ### check AutoRegressive component if (control$ar$isMatrix <- is.matrix(control$ar$f)) { ## this form is not implemented -> will stop() in interpretControl() if (any(dim(control$ar$f) != nUnit)) stop("'control$ar$f' must be a square matrix of size ", nUnit) if (is.null(control$ar$weights)) { # use identity matrix control$ar$weights <- diag(nrow=nUnit) } else if (!is.matrix(control$ar$weights) || any(dim(control$ar$weights) != nUnit)) { stop("'control$ar$weights' must be a square matrix of size ", nUnit) } control$ar$inModel <- TRUE } else if (inherits(control$ar$f, "formula")) { if (!is.null(control$ar$weights)) { warning("argument 'control$ar$weights' is not used") control$ar$weights <- NULL } # check if formula is valid control$ar$inModel <- isInModel(control$ar$f) } else { stop("'control$ar$f' must be either a formula or a matrix") } ### check NEighbourhood component if (!inherits(control$ne$f, "formula")) stop("'control$ne$f' must be a formula") control$ne$inModel <- isInModel(control$ne$f) if (control$ne$inModel) { if (nUnit == 1) stop("\"ne\" component requires a multivariate 'stsObj'") ## if ar$f is a matrix it includes neighbouring units => no "ne" component if (control$ar$isMatrix) stop("there must not be an extra \"ne\" component ", "if 'control$ar$f' is a matrix") ## check ne$weights specification checkWeights(control$ne$weights, nUnit, nTime, neighbourhood(stsObj), control$data, check0diag = control$ar$inModel) ## check optional scaling of weights if (!is.null(control$ne$scale)) { stopifnot(is.numeric(control$ne$scale)) if (is.vector(control$ne$scale)) { stopifnot(length(control$ne$scale) == 1L || length(control$ne$scale) %% nUnit == 0, !is.na(control$ne$scale)) } else { checkWeightsArray(control$ne$scale, nUnit, nTime) } } } else { control$ne[c("weights", "scale", "normalize")] <- list(NULL, NULL, FALSE) } ### check ENDemic component if (!inherits(control$end$f, "formula")) stop("'control$end$f' must be a formula") control$end$inModel <- isInModel(control$end$f) ### check offsets for (comp in c("ar", "ne", "end")) { if (is.matrix(control[[comp]]$offset) && is.numeric(control[[comp]]$offset)){ if (!identical(dim(control[[comp]]$offset), dim(stsObj))) stop("'control$",comp,"$offset' must be a numeric matrix of size ", nTime, "x", nUnit) if (any(is.na(control[[comp]]$offset))) stop("'control$",comp,"$offset' must not contain NA values") } else if (!identical(as.numeric(control[[comp]]$offset), 1)) { stop("'control$",comp,"$offset' must either be 1 or a numeric ", nTime, "x", nUnit, " matrix") } } ### stop if no component is included in the model if (length(comps <- componentsHHH4(list(control=control))) == 0L) stop("none of the components 'ar', 'ne', 'end' is included in the model") ### check remaining components of the control list if (is.factor(control$family)) { stopifnot(length(control$family) == nUnit) ## guard against misuse as family = factor("Poisson"), e.g., if taken ## from a data.frame of control options with "stringsAsFactors" if (nUnit == 1 && as.character(control$family) %in% defaultControl$family) { control$family <- as.character(control$family) warning("'family = factor(\"", control$family, "\")' is interpreted ", "as 'family = \"", control$family, "\"'") } else { control$family <- droplevels(control$family) names(control$family) <- colnames(stsObj) } } else { control$family <- match.arg(control$family, defaultControl$family) } if (!is.vector(control$subset, mode="numeric") || !all(control$subset %in% seq_len(nTime))) stop("'control$subset' must be %in% 1:", nTime) lags <- c(ar = control$ar$lag, ne = control$ne$lag) maxlag <- suppressWarnings(max(lags[names(lags) %in% comps])) # could be -Inf if (control$subset[1L] <= maxlag) { warning("'control$subset' should be > ", maxlag, " due to epidemic lags") } if (!is.list(control$optimizer) || any(! sapply(c("stop", "regression", "variance"), function(x) is.list(control$optimizer[[x]])))) stop("'control$optimizer' must be a list of lists") control$verbose <- as.integer(control$verbose) if (length(control$verbose) != 1L || control$verbose < 0) stop("'control$verbose' must be a logical or non-negative numeric value") stopifnot(is.list(control$start)) control$start <- local({ defaultControl$start[] <- control$start[names(defaultControl$start)] defaultControl$start }) if (!all(vapply(X = control$start, FUN = function(x) is.null(x) || is.vector(x, mode="numeric"), FUN.VALUE = TRUE, USE.NAMES = FALSE))) stop("'control$start' must be a list of numeric start values") stopifnot(length(control$keep.terms) == 1L, is.logical(control$keep.terms)) ## Done return(control) } # check whether or not one of the three components is included in the model isInModel <- function(formula, name=deparse(substitute(formula))) { term <- terms.formula(formula) if(attr(term,"response") > 0) stop(name, " cannot contain a response") attr(term, "intercept") + length(attr(term, "term.labels")) > 0 } # used to incorporate covariates and unit-specific effects fe <- function(x, # covariate unitSpecific = FALSE, # TRUE means which = rep.int(TRUE, nUnits) which=NULL, # NULL = overall, vector with booleans = unit-specific initial=NULL) # vector of inital values for parameters { stsObj <- get("stsObj", envir=parent.frame(1), inherits=TRUE) #checkFormula() nTime <- nrow(stsObj) nUnits <- ncol(stsObj) if(!is.numeric(x)){ stop("Covariate \'",deparse(substitute(x)),"\' is not numeric\n") } lengthX <- length(x) if(lengthX == 1){ terms <- matrix(x, nTime, nUnits, byrow=FALSE) mult <- "*" } else if(lengthX == nTime){ terms <- matrix(x, nTime, nUnits, byrow=FALSE) mult <- "*" } else if(lengthX == nTime*nUnits){ if(!is.matrix(x)){ stop("Covariate \'",deparse(substitute(x)),"\' is not a matrix\n") } # check dimensions of covariate if((ncol(x) != nUnits) | (nrow(x) != nTime)){ stop("Dimension of covariate \'",deparse(substitute(x)),"\' is not suitably specified\n") } terms <- x mult <- "*" } else { stop("Covariate \'",deparse(substitute(x)),"\' is not suitably specified\n") } intercept <- all(terms==1) # overall or unit-specific effect? unitSpecific <- unitSpecific || !is.null(which) if (unitSpecific) { if (is.null(which)) { which <- rep.int(TRUE, nUnits) } else { stopifnot(is.vector(which, mode="logical"), length(which) == nUnits) } terms[,!which] <- 0 } # get dimension of parameter dim.fe <- if (unitSpecific) sum(which) else 1 # check length of initial values + set default values if (is.null(initial)) { initial <- rep.int(0,dim.fe) } else if (length(initial) != dim.fe) { stop("initial values for '",deparse(substitute(x)),"' must be of length ",dim.fe) } name <- deparse(substitute(x)) if (unitSpecific) name <- paste(name, colnames(stsObj)[which], sep=".") result <- list(terms=terms, name=name, Z.intercept=NULL, which=which, dim.fe=dim.fe, initial.fe=initial, dim.re=0, dim.var=0, initial.var=NULL, initial.re=NULL, intercept=intercept, unitSpecific=unitSpecific, random=FALSE, corr=FALSE, mult=mult ) return(result) } # random intercepts ri <- function(type=c("iid","car"), corr=c("none","all"), initial.fe=0, initial.var=-.5, initial.re=NULL) { stsObj <- get("stsObj", envir=parent.frame(1), inherits=TRUE) #checkFormula() if (ncol(stsObj) == 1) stop("random intercepts require a multivariate 'stsObj'") type <- match.arg(type) corr <- match.arg(corr) corr <- switch(corr, "none"=FALSE, "all"=TRUE) if(type=="iid"){ Z <- 1 dim.re <- ncol(stsObj) mult <- "*" } else if(type=="car"){ # construct penalty matrix K K <- neighbourhood(stsObj) checkNeighbourhood(K) K <- K == 1 # indicate first-order neighbours ne <- colSums(K) # number of first-order neighbours K <- -1*K diag(K) <- ne dimK <- nrow(K) # check rank of the nhood, only connected neighbourhoods are allowed if(qr(K)$rank != dimK-1) stop("neighbourhood matrix contains islands") # singular-value decomposition of K svdK <- svd(K) # just use the positive eigenvalues of K in descending order # for a the factorisation of the penalty matrix K = LL' L <- svdK$u[,-dimK] %*% diag(sqrt(svdK$d[-dimK])) #* only use non-zero eigenvalues # Z = L(L'L)^-1, which can't be simplified to Z=(L')^-1 as L is not square Z <- L %*% solve(t(L)%*%L) dim.re <- dimK - 1L mult <- "%*%" } # check length of initial values + set default values stopifnot(length(initial.fe) == 1, length(initial.var) == 1) if (is.null(initial.re)) { initial.re <- rnorm(dim.re,0,sd=sqrt(0.001)) } else if (length(initial.re) != dim.re) { stop("'initial.re' must be of length ", dim.re) } result <- list(terms=1, name=paste("ri(",type,")",sep=""), Z.intercept=Z, which=NULL, dim.fe=1, initial.fe=initial.fe, dim.re=dim.re, dim.var=1, initial.var=initial.var, initial.re=initial.re, intercept=TRUE, unitSpecific=FALSE, random=TRUE, corr=corr, mult=mult ) return(result) } ### check specification of formula ## f: one of the component formulae (ar$f, ne$f, or end$f) ## component: 1, 2, or 3, corresponding to the ar/ne/end component, respectively ## data: the data-argument of hhh4() ## stsObj: the stsObj is not used directly in checkFormula, but in fe() and ri() checkFormula <- function(f, component, data, stsObj) { term <- terms.formula(f, specials=c("fe","ri")) # check if there is an overall intercept intercept.all <- attr(term, "intercept") == 1 # list of variables in the component vars <- as.list(attr(term,"variables"))[-1] # first element is "list" nVars <- length(vars) # begin with intercept res <- if (intercept.all) { c(fe(1), list(offsetComp=component)) } else { if (nVars==0) stop("formula ", deparse(substitute(f)), " contains no variables") NULL } # find out fixed effects without "fe()" specification # (only if there are variables in addition to an intercept "1") fe.raw <- setdiff(seq_len(nVars), unlist(attr(term, "specials"))) # evaluate covariates for(i in fe.raw) res <- cbind(res, c( eval(substitute(fe(x), list(x=vars[[i]])), envir=data), list(offsetComp=component) )) # fixed effects for(i in attr(term, "specials")$fe) res <- cbind(res, c( eval(vars[[i]], envir=data), list(offsetComp=component) )) res <- cbind(res, deparse.level=0) # ensure res has matrix dimensions # random intercepts RI <- attr(term, "specials")$ri if (sum(unlist(res["intercept",])) + length(RI) > 1) stop("There can only be one intercept in the formula ", deparse(substitute(f))) for(i in RI) res <- cbind(res, c( eval(vars[[i]], envir=data), list(offsetComp=component) )) return(res) } ## Create function (pars, type = "response") which ## returns the weighted sum of time-lagged counts of neighbours ## (or its derivates, if type = "gradient" or type = "hessian"). ## For type="reponse", this is a nTime x nUnits matrix (like Y), ## otherwise a list of such matrices, ## which for the gradient has length length(pars) and ## length(pars)*(length(pars)+1)/2 for the hessian. ## If neweights=NULL (i.e. no NE component in model), the result is always 0. ## offset is a multiplicative offset for \phi_{it}, e.g., the population. ## scale is a nUnit-vector or a nUnit x nUnit matrix scaling neweights. neOffsetFUN <- function (Y, neweights, scale, normalize, nbmat, data, lag = 1, offset = 1) { if (is.null(neweights)) { # no neighbourhood component as.function(alist(...=, 0), envir=.GlobalEnv) ## dimY <- dim(Y) ## as.function(c(alist(...=), ## substitute(matrix(0, r, c), list(r=dimY[1], c=dimY[2]))), ## envir=.GlobalEnv) } else if (is.list(neweights)) { # parametric weights wFUN <- scaleNEweights.list(neweights, scale, normalize) function (pars, type = "response") { name <- switch(type, response="w", gradient="dw", hessian="d2w") weights <- wFUN[[name]](pars, nbmat, data) ## gradient and hessian are lists if length(pars$d) > 1L ## but can be single matrices/arrays if == 1 => _c_onditional lapply res <- clapply(weights, function (W) offset * weightedSumNE(Y, W, lag)) ##<- clapply always returns a list (possibly of length 1) if (type=="response") res[[1L]] else res } } else { # fixed (known) weight structure (0-length pars) weights <- scaleNEweights.default(neweights, scale, normalize) env <- new.env(hash = FALSE, parent = emptyenv()) # small -> no hash env$initoffset <- offset * weightedSumNE(Y, weights, lag) as.function(c(alist(...=), quote(initoffset)), envir=env) } } # interpret and check the specifications of each component # control must contain all arguments, i.e. setControl was used interpretControl <- function (control, stsObj) { nTime <- nrow(stsObj) nUnits <- ncol(stsObj) Y <- observed(stsObj) ########################################################################## ## get the model specifications for each of the three components ########################################################################## ar <- control$ar ne <- control$ne end <- control$end ## for backwards compatibility with surveillance < 1.8-0, where the ar and ne ## components of the control object did not have an offset if (is.null(ar$offset)) ar$offset <- 1 if (is.null(ne$offset)) ne$offset <- 1 ## for backward compatibility with surveillance < 1.9-0 if (is.null(ne$normalize)) ne$normalize <- FALSE ## create list of offsets of the three components Ym1 <- rbind(matrix(NA_integer_, ar$lag, nUnits), head(Y, nTime-ar$lag)) Ym1.ne <- neOffsetFUN(Y, ne$weights, ne$scale, ne$normalize, neighbourhood(stsObj), control$data, ne$lag, ne$offset) offsets <- list(ar=ar$offset*Ym1, ne=Ym1.ne, end=end$offset) ## -> offset$ne is a function of the parameter vector 'd', which returns a ## nTime x nUnits matrix -- or 0 (scalar) if there is no NE component ## -> offset$end might just be 1 (scalar) ## Initial parameter vector 'd' of the neighbourhood weight function initial.d <- if (is.list(ne$weights)) ne$weights$initial else numeric(0L) dim.d <- length(initial.d) names.d <- if (dim.d == 0L) character(0L) else { paste0("neweights.", if (is.null(names(initial.d))) { if (dim.d==1L) "d" else paste0("d", seq_len(dim.d)) } else names(initial.d)) } ## determine all NA's isNA <- is.na(Y) if (ar$inModel) isNA <- isNA | is.na(offsets[[1L]]) if (ne$inModel) isNA <- isNA | is.na(offsets[[2L]](initial.d)) ## get terms for all components all.term <- NULL if(ar$isMatrix) stop("matrix-form of 'control$ar$f' is not implemented") if(ar$inModel) # ar$f is a formula all.term <- cbind(all.term, checkFormula(ar$f, 1, control$data, stsObj)) if(ne$inModel) all.term <- cbind(all.term, checkFormula(ne$f, 2, control$data, stsObj)) if(end$inModel) all.term <- cbind(all.term, checkFormula(end$f,3, control$data, stsObj)) dim.fe <- sum(unlist(all.term["dim.fe",])) dim.re.group <- unlist(all.term["dim.re",], use.names=FALSE) dim.re <- sum(dim.re.group) dim.var <- sum(unlist(all.term["dim.var",])) dim.corr <- sum(unlist(all.term["corr",])) if(dim.corr>0){ if(dim.var!=dim.corr) stop("Use corr=\'all\' or corr=\'none\' ") dim.corr <- switch(dim.corr,0,1,3) } # the vector with dims of the random effects must be equal if they are correlated if(length(unique(dim.re.group[dim.re.group>0]))!=1 & dim.corr>0){ stop("Correlated effects must have same penalty") } n <- c("ar","ne","end")[unlist(all.term["offsetComp",])] names.fe <- names.var <- names.re <- character(0L) for(i in seq_along(n)){ .name <- all.term["name",i][[1]] names.fe <- c(names.fe, paste(n[i], .name, sep=".")) if(all.term["random",i][[1]]) { names.var <- c(names.var, paste("sd", n[i], .name, sep=".")) names.re <- c(names.re, paste(n[i], .name, if (.name == "ri(iid)") { colnames(stsObj) } else { seq_len(all.term["dim.re",i][[1]]) }, sep = ".")) } } index.fe <- rep(1:ncol(all.term), times=unlist(all.term["dim.fe",])) index.re <- rep(1:ncol(all.term), times=unlist(all.term["dim.re",])) # poisson or negbin model if(identical(control$family, "Poisson")){ ddistr <- function(y,mu,size){ dpois(y, lambda=mu, log=TRUE) } dim.overdisp <- 0L index.overdisp <- names.overdisp <- NULL } else { # NegBin ddistr <- function(y,mu,size){ dnbinom(y, mu=mu, size=size, log=TRUE) } ## version that can handle size = Inf (i.e. the Poisson special case): ## ddistr <- function (y,mu,size) { ## poisidx <- is.infinite(size) ## res <- y ## res[poisidx] <- dpois(y[poisidx], lambda=mu[poisidx], log=TRUE) ## res[!poisidx] <- dnbinom(y[!poisidx], mu=mu[!poisidx], ## size=size[!poisidx], log=TRUE) ## res ## } index.overdisp <- if (is.factor(control$family)) { control$family } else if (control$family == "NegBinM") { factor(colnames(stsObj), levels = colnames(stsObj)) ## do not sort levels (for consistency with unitSpecific effects) } else { # "NegBin1" factor(character(nUnits)) } names(index.overdisp) <- colnames(stsObj) dim.overdisp <- nlevels(index.overdisp) names.overdisp <- if (dim.overdisp == 1L) { "-log(overdisp)" } else { paste0("-log(", paste("overdisp", levels(index.overdisp), sep = "."), ")") } } environment(ddistr) <- getNamespace("stats") # function is self-contained # parameter start values from fe() and ri() calls via checkFormula() initial <- list( fixed = c(unlist(all.term["initial.fe",]), initial.d, rep.int(2, dim.overdisp)), random = as.numeric(unlist(all.term["initial.re",])), # NULL -> numeric(0) sd.corr = c(unlist(all.term["initial.var",]), rep.int(0, dim.corr)) ) # set names of parameter vectors names(initial$fixed) <- c(names.fe, names.d, names.overdisp) names(initial$random) <- names.re names(initial$sd.corr) <- c(names.var, head(paste("corr",1:3,sep="."), dim.corr)) # modify initial values according to the supplied 'start' values initial[] <- mapply( FUN = function (initial, start, name) { if (is.null(start)) return(initial) if (is.null(names(initial)) || is.null(names(start))) { if (length(start) == length(initial)) { initial[] <- start } else { stop("initial values in 'control$start$", name, "' must be of length ", length(initial)) } } else { ## we match by name and silently ignore additional start values start <- start[names(start) %in% names(initial)] initial[names(start)] <- start } return(initial) }, initial, control$start[names(initial)], names(initial), SIMPLIFY = FALSE, USE.NAMES = FALSE ) # Done result <- list(response = Y, terms = all.term, nTime = nTime, nUnits = nUnits, nFE = dim.fe, nd = dim.d, nOverdisp = dim.overdisp, nRE = dim.re, rankRE = dim.re.group, nVar = dim.var, nCorr = dim.corr, nSigma = dim.var+dim.corr, nGroups = ncol(all.term), namesFE = names.fe, indexFE = index.fe, indexRE = index.re, initialTheta = c(initial$fixed, initial$random), initialSigma = initial$sd.corr, offset = offsets, family = ddistr, indexPsi = index.overdisp, subset = control$subset, isNA = isNA ) return(result) } splitParams <- function(theta, model){ fixed <- theta[seq_len(model$nFE)] d <- theta[model$nFE + seq_len(model$nd)] overdisp <- theta[model$nFE + model$nd + seq_len(model$nOverdisp)] random <- theta[seq.int(to=length(theta), length.out=model$nRE)] list(fixed=fixed, random=random, overdisp=overdisp, d=d) } ### compute predictor meanHHH <- function(theta, model, subset=model$subset, total.only=FALSE) { ## unpack theta pars <- splitParams(theta, model) fixed <- pars$fixed random <- pars$random ## unpack model term <- model$terms offsets <- model$offset offsets[[2L]] <- offsets[[2L]](pars$d) # evaluate at current parameter value nGroups <- model$nGroups comp <- unlist(term["offsetComp",]) idxFE <- model$indexFE idxRE <- model$indexRE toMatrix <- function (x, r=model$nTime, c=model$nUnits) matrix(x, r, c, byrow=TRUE) unitNames <- dimnames(model$response)[[2L]] setColnames <- if (is.null(unitNames)) identity else function(x) "dimnames<-"(x, list(NULL, unitNames)) ## go through groups of parameters and compute predictor of each component, ## i.e. lambda_it, phi_it, nu_it, EXCLUDING the multiplicative offset terms, ## as well as the resulting component mean (=exppred * offset) computePartMean <- function (component) { pred <- nullMatrix <- toMatrix(0) if(!any(comp==component)) { # component not in model -> return 0-matrix zeroes <- setColnames(pred[subset,,drop=FALSE]) return(list(exppred = zeroes, mean = zeroes)) } for(i in seq_len(nGroups)[comp==component]){ fe <- fixed[idxFE==i] if(term["unitSpecific",i][[1]]){ fe <- nullMatrix which <- term["which",i][[1]] fe[,which] <- toMatrix(fixed[idxFE==i],c=sum(which)) } if(term["random",i][[1]]){ re <- random[idxRE==i] "%m%" <- get(term["mult",i][[1]]) Z.re <- toMatrix(term["Z.intercept",i][[1]] %m% re) } else { Z.re <- 0 } X <- term["terms",i][[1]] pred <- pred + X*fe + Z.re } exppred <- setColnames(exp(pred[subset,,drop=FALSE])) offset <- offsets[[component]] if (length(offset) > 1) offset <- offset[subset,,drop=FALSE] ##<- no subsetting if offset is scalar (time- and unit-independent) list(exppred = exppred, mean = exppred * offset) } ## compute component means ar <- computePartMean(1) ne <- computePartMean(2) end <- computePartMean(3) ## Done epidemic <- ar$mean + ne$mean endemic <- end$mean if (total.only) epidemic + endemic else list(mean=epidemic+endemic, epidemic=epidemic, endemic=endemic, epi.own=ar$mean, epi.neighbours=ne$mean, ar.exppred=ar$exppred, ne.exppred=ne$exppred, end.exppred=end$exppred) } ### compute dispersion in dnbinom (mu, size) parametrization sizeHHH <- function (theta, model, subset = model$subset) { if (model$nOverdisp == 0L) # Poisson case return(NULL) ## extract dispersion in dnbinom() parametrization pars <- splitParams(theta, model) size <- exp(pars$overdisp) # = 1/psi, pars$overdisp = -log(psi) ## return either a vector or a time x unit matrix of dispersion parameters if (is.null(subset)) { unname(size) # no longer is "-log(overdisp)" } else { matrix(data = size[model$indexPsi], nrow = length(subset), ncol = model$nUnits, byrow = TRUE, dimnames = list(NULL, names(model$indexPsi))) } } ## auxiliary function used in penScore and penFisher ## it sums colSums(x) within the groups defined by f (of length ncol(x)) ## and returns these sums in the order of levels(f) .colSumsGrouped <- function (x, f, na.rm = TRUE) { nlev <- nlevels(f) if (nlev == 1L) { # all columns belong to the same group ("NegBin1") sum(x, na.rm = na.rm) } else { dimx <- dim(x) colsums <- .colSums(x, dimx[1L], dimx[2L], na.rm = na.rm) if (nlev == dimx[2L]) { # each column separately ("NegBinM" or factor) colsums[order(f)] # for NegBinM, order(f)==1:nlev, not in general } else { # sum colsums within groups unlist(lapply( X = split.default(colsums, f, drop = FALSE), FUN = sum ), recursive = FALSE, use.names = FALSE) } } } ############################################ penLogLik <- function(theta, sd.corr, model, attributes=FALSE) { if(any(is.na(theta))) stop("NAs in regression parameters.", ADVICEONERROR) ## unpack model subset <- model$subset Y <- model$response[subset,,drop=FALSE] dimPsi <- model$nOverdisp dimRE <- model$nRE ## unpack random effects if (dimRE > 0) { pars <- splitParams(theta, model) randomEffects <- pars$random sd <- head(sd.corr, model$nVar) corr <- tail(sd.corr, model$nCorr) dimBlock <- model$rankRE[model$rankRE>0] Sigma.inv <- getSigmaInv(sd, corr, model$nVar, dimBlock) } ############################################################ ## evaluate dispersion psi <- sizeHHH(theta, model, subset = if (dimPsi > 1L) subset) # else scalar or NULL #psi might be numerically equal to 0 or Inf in which cases dnbinom (in meanHHH) #would return NaN (with a warning). The case size=Inf rarely happens and #corresponds to a Poisson distribution. Currently this case is not handled #in order to have the usual non-degenerate case operate faster. #For size=0, log(dnbinom) equals -Inf for positive x or if (x=0 and mu=0), and #zero if x=0 and mu>0 and mu0, which is always true), we have that sum(ll.units) = -Inf, hence: if (any(psi == 0)) return(-Inf) ## evaluate mean mu <- meanHHH(theta, model, total.only=TRUE) # if, numerically, mu=Inf, log(dnbinom) or log(dpois) both equal -Inf, hence: #if (any(is.infinite(mu))) return(-Inf) # however, since mu=Inf does not produce warnings below and this is a rare # case, it is faster to not include this conditional expression ## penalization term for random effects lpen <- if (dimRE==0) 0 else { # there are random effects ##-.5*(t(randomEffects)%*%Sigma.inv%*%randomEffects) ## the following implementation takes ~85% less computing time ! -0.5 * c(crossprod(randomEffects, Sigma.inv) %*% randomEffects) } ## log-likelihood ll.units <- .colSums(model$family(Y,mu,psi), length(subset), model$nUnits, na.rm=TRUE) ## penalized log-likelihood ll <- sum(ll.units) + lpen ## Done if (attributes) { attr(ll, "loglik") <- ll.units attr(ll, "logpen") <- lpen } ll } penScore <- function(theta, sd.corr, model) { if(any(is.na(theta))) stop("NAs in regression parameters.", ADVICEONERROR) ## unpack model subset <- model$subset Y <- model$response[subset,,drop=FALSE] isNA <- model$isNA[subset,,drop=FALSE] dimPsi <- model$nOverdisp dimRE <- model$nRE term <- model$terms nGroups <- model$nGroups dimd <- model$nd ## unpack parameters pars <- splitParams(theta, model) if (dimRE > 0) { randomEffects <- pars$random sd <- head(sd.corr, model$nVar) corr <- tail(sd.corr, model$nCorr) dimBlock <- model$rankRE[model$rankRE>0] Sigma.inv <- getSigmaInv(sd, corr, model$nVar, dimBlock) } ## evaluate dispersion psi <- sizeHHH(theta, model, subset = if (dimPsi > 1L) subset) # else scalar or NULL ## evaluate mean mu <- meanHHH(theta, model) meanTotal <- mu$mean ############################################################ ## helper function for derivatives derivHHH.factor <- if(dimPsi > 0L){ # NegBin psiPlusMu <- psi + meanTotal # also used below for calculation of grPsi psiYpsiMu <- (psi+Y) / psiPlusMu Y/meanTotal - psiYpsiMu } else { # Poisson Y/meanTotal - 1 } derivHHH <- function (dmu) derivHHH.factor * dmu ## go through groups of parameters and compute the gradient of each component computeGrad <- function(mean.comp){ grad.fe <- numeric(0L) grad.re <- numeric(0L) for(i in seq_len(nGroups)){ comp <- term["offsetComp",i][[1]] Xit<- term["terms",i][[1]] # eiter 1 or a matrix with values if(is.matrix(Xit)){ Xit <- Xit[subset,,drop=FALSE] } dTheta <- derivHHH(mean.comp[[comp]]*Xit) dTheta[isNA] <- 0 # dTheta must not contain NA's (set NA's to 0) if(term["unitSpecific",i][[1]]){ which <- term["which",i][[1]] dimi <- sum(which) if(dimi < model$nUnits) dTheta <- dTheta[,which,drop=FALSE] dTheta <- .colSums(dTheta, length(subset), dimi) grad.fe <- c(grad.fe,dTheta) } else if(term["random",i][[1]]){ Z <- term["Z.intercept",i][[1]] "%m%" <- get(term["mult",i][[1]]) dRTheta <- .colSums(dTheta %m% Z, length(subset), term["dim.re",i][[1]]) grad.re <- c(grad.re, dRTheta) grad.fe <- c(grad.fe, sum(dTheta)) } else{ grad.fe <- c(grad.fe, sum(dTheta)) } } list(fe=grad.fe, re=grad.re) } gradients <- computeGrad(mu[c("epi.own","epi.neighbours","endemic")]) ## gradient for parameter vector of the neighbourhood weights grd <- if (dimd > 0L) { dneOffset <- model$offset[[2L]](pars$d, type="gradient") ##<- this is always a list (of length dimd) of matrices onescore.d <- function (dneoff) { dmudd <- mu$ne.exppred * dneoff[subset,,drop=FALSE] grd.terms <- derivHHH(dmudd) sum(grd.terms, na.rm=TRUE) } unlist(clapply(dneOffset, onescore.d), recursive=FALSE, use.names=FALSE) } else numeric(0L) ## gradient for overdispersion parameter psi grPsi <- if(dimPsi > 0L){ dPsiMat <- psi * (digamma(Y+psi) - digamma(psi) + log(psi) + 1 - log(psiPlusMu) - psiYpsiMu) .colSumsGrouped(dPsiMat, model$indexPsi) } else numeric(0L) ## add penalty to random effects gradient s.pen <- if(dimRE > 0) c(Sigma.inv %*% randomEffects) else numeric(0L) if(length(gradients$re) != length(s.pen)) stop("oops... lengths of s(b) and Sigma.inv %*% b do not match") grRandom <- c(gradients$re - s.pen) ## Done res <- c(gradients$fe, grd, grPsi, grRandom) res } penFisher <- function(theta, sd.corr, model, attributes=FALSE) { if(any(is.na(theta))) stop("NAs in regression parameters.", ADVICEONERROR) ## unpack model subset <- model$subset Y <- model$response[subset,,drop=FALSE] isNA <- model$isNA[subset,,drop=FALSE] dimPsi <- model$nOverdisp dimRE <- model$nRE term <- model$terms nGroups <- model$nGroups dimd <- model$nd dimFE <- model$nFE idxFE <- model$indexFE idxRE <- model$indexRE indexPsi <- model$indexPsi ## unpack parameters pars <- splitParams(theta, model) if (dimRE > 0) { randomEffects <- pars$random sd <- head(sd.corr, model$nVar) corr <- tail(sd.corr, model$nCorr) dimBlock <- model$rankRE[model$rankRE>0] Sigma.inv <- getSigmaInv(sd, corr, model$nVar, dimBlock) } ## evaluate dispersion psi <- sizeHHH(theta, model, subset = if (dimPsi > 1L) subset) # else scalar or NULL ## evaluate mean mu <- meanHHH(theta, model) meanTotal <- mu$mean ############################################################ ## helper functions for derivatives: if (dimPsi > 0L) { # negbin psiPlusY <- psi + Y psiPlusMu <- psi + meanTotal psiPlusMu2 <- psiPlusMu^2 psiYpsiMu <- psiPlusY / psiPlusMu psiYpsiMu2 <- psiPlusY / psiPlusMu2 deriv2HHH.fac1 <- psiYpsiMu2 - Y / (meanTotal^2) deriv2HHH.fac2 <- Y / meanTotal - psiYpsiMu ## psi-related derivatives dThetadPsi.fac <- psi * (psiYpsiMu2 - 1/psiPlusMu) dThetadPsi <- function(dTheta){ dThetadPsi.fac * dTheta } dPsiMat <- psi * (digamma(psiPlusY) - digamma(psi) + log(psi) + 1 - log(psiPlusMu) - psiYpsiMu) # as in penScore() dPsidPsiMat <- psi^2 * ( trigamma(psiPlusY) - trigamma(psi) + 1/psi - 1/psiPlusMu - (meanTotal-Y)/psiPlusMu2) + dPsiMat } else { # poisson deriv2HHH.fac1 <- -Y / (meanTotal^2) deriv2HHH.fac2 <- Y / meanTotal - 1 } deriv2HHH <- function(dTheta_l, dTheta_k, dTheta_lk){ dTheta_l * dTheta_k * deriv2HHH.fac1 + dTheta_lk * deriv2HHH.fac2 } ## go through groups of parameters and compute the hessian of each component computeFisher <- function(mean.comp){ # initialize hessian hessian.FE.FE <- matrix(0,dimFE,dimFE) hessian.FE.RE <- matrix(0,dimFE,dimRE) hessian.RE.RE <- matrix(0,dimRE,dimRE) hessian.FE.Psi <- matrix(0,dimFE,dimPsi) hessian.Psi.RE <- matrix(0,dimPsi,dimPsi+dimRE) # CAVE: contains PsiPsi and PsiRE hessian.FE.d <- matrix(0,dimFE,dimd) hessian.d.d <- matrix(0,dimd,dimd) hessian.d.Psi <- matrix(0,dimd,dimPsi) hessian.d.RE <- matrix(0,dimd,dimRE) ## derivatives wrt neighbourhood weight parameters d if (dimd > 0L) { phi.doff <- function (dneoff) { mu$ne.exppred * dneoff[subset,,drop=FALSE] } ## for type %in% c("gradient", "hessian"), model$offset[[2L]] always ## returns a list of matrices. It has length(pars$d) elements for the ## gradient and length(pars$d)*(length(pars$d)+1)/2 for the hessian. dneOffset <- model$offset[[2L]](pars$d, type="gradient") dmudd <- lapply(dneOffset, phi.doff) d2neOffset <- model$offset[[2L]](pars$d, type="hessian") d2mudddd <- lapply(d2neOffset, phi.doff) ## d l(theta,x) /dd dd (fill only upper triangle, BY ROW) ij <- 0L for (i in seq_len(dimd)) { for (j in i:dimd) { ij <- ij + 1L #= dimd*(i-1) + j - (i-1)*i/2 # for j >= i ## d2mudddd contains upper triangle by row (=lowertri by column) d2ij <- deriv2HHH(dmudd[[i]], dmudd[[j]], d2mudddd[[ij]]) hessian.d.d[i,j] <- sum(d2ij, na.rm=TRUE) } } } if (dimPsi > 0L) { ## d l(theta,x) /dpsi dpsi dPsidPsi <- .colSumsGrouped(dPsidPsiMat, indexPsi) hessian.Psi.RE[,seq_len(dimPsi)] <- if (dimPsi == 1L) { dPsidPsi } else { diag(dPsidPsi) } ## d l(theta) / dd dpsi for (i in seq_len(dimd)) { # will not be run if dimd==0 ## dPsi.i <- colSums(dThetadPsi(dmudd[[i]]),na.rm=TRUE) ## hessian.d.Psi[i,] <- if(dimPsi==1L) sum(dPsi.i) else dPsi.i[order(indexPsi)] hessian.d.Psi[i,] <- .colSumsGrouped(dThetadPsi(dmudd[[i]]), indexPsi) } } ## i.fixed <- function(){ if(random.j){ Z.j <- term["Z.intercept",j][[1]] "%mj%" <- get(term["mult",j][[1]]) hessian.FE.RE[idxFE==i,idxRE==j] <<- colSums(didj %mj% Z.j) ##<- didj must not contain NA's (all NA's set to 0) dIJ <- sum(didj,na.rm=TRUE) # fixed on 24/09/2012 } else if(unitSpecific.j){ dIJ <- colSums(didj,na.rm=TRUE)[ which.j ] } else { dIJ <- sum(didj,na.rm=TRUE) } hessian.FE.FE[idxFE==i,idxFE==j] <<- dIJ } ## i.unit <- function(){ if(random.j){ Z.j <- term["Z.intercept",j][[1]] "%mj%" <- get(term["mult",j][[1]]) dIJ <- colSums(didj %mj% Z.j) # didj must not contain NA's (all NA's set to 0) hessian.FE.RE[idxFE==i,idxRE==j] <<- diag(dIJ)[ which.i, ] # FIXME: does not work if type="car" dIJ <- dIJ[ which.i ] # added which.i subsetting in r432 } else if(unitSpecific.j){ dIJ <- diag(colSums(didj))[ which.i, which.j ] } else { dIJ <- colSums(didj)[ which.i ] } hessian.FE.FE[idxFE==i,idxFE==j] <<- dIJ } ## i.random <- function(){ if(random.j){ Z.j <- term["Z.intercept",j][[1]] "%mj%" <- get(term["mult",j][[1]]) hessian.FE.RE[idxFE==i,idxRE==j] <<- colSums(didj %mj% Z.j) if (j != i) # otherwise redundant (duplicate) hessian.FE.RE[idxFE==j,idxRE==i] <<- colSums(didj %m% Z.i) if(length(Z.j)==1 & length(Z.i)==1){ # both iid Z <- Z.i*Z.j hessian.RE.RE[which(idxRE==i),idxRE==j] <<- diag(colSums( didj %m% Z)) } else if(length(Z.j)==1 & length(Z.i)>1){ #* Z.j <- diag(nrow=model$nUnits) for(k in seq_len(ncol(Z.j))){ Z <- Z.i*Z.j[,k] hessian.RE.RE[idxRE==i,which(idxRE==j)[k]] <<- colSums( didj %m% Z) } } else if(length(Z.j)>1 & length(Z.i)==1){ #* Z.i <- diag(nrow=model$nUnits) for(k in seq_len(ncol(Z.i))){ Z <- Z.i[,k]*Z.j hessian.RE.RE[which(idxRE==i)[k],idxRE==j] <<- colSums( didj %mj% Z) } } else { # both CAR for(k in seq_len(ncol(Z.j))){ Z <- Z.i*Z.j[,k] hessian.RE.RE[which(idxRE==i)[k],idxRE==j] <<- colSums( didj %m% Z) } } dIJ <- sum(didj) } else if(unitSpecific.j){ dIJ <- colSums(didj %m% Z.i) hessian.FE.RE[idxFE==j,idxRE==i] <<- diag(dIJ)[ which.j, ] dIJ <- dIJ[ which.j ] } else { hessian.FE.RE[idxFE==j,idxRE==i] <<- colSums(didj %m% Z.i) dIJ <- sum(didj) } hessian.FE.FE[idxFE==i,idxFE==j] <<- dIJ } ##---------------------------------------------- for(i in seq_len(nGroups)){ #go through rows of hessian # parameter group belongs to which components comp.i <- term["offsetComp",i][[1]] # get covariate value Xit <- term["terms",i][[1]] # eiter 1 or a matrix with values if(is.matrix(Xit)){ Xit <- Xit[subset,,drop=FALSE] } m.Xit <- mean.comp[[comp.i]] * Xit random.i <- term["random",i][[1]] unitSpecific.i <- term["unitSpecific",i][[1]] ## fill psi-related entries and select fillHess function if (random.i) { Z.i <- term["Z.intercept",i][[1]] # Z.i and %m% (of i) determined here "%m%" <- get(term["mult",i][[1]]) # will also be used in j's for loop fillHess <- i.random if (dimPsi > 0L) { dThetadPsiMat <- dThetadPsi(m.Xit) hessian.FE.Psi[idxFE==i,] <- .colSumsGrouped(dThetadPsiMat, indexPsi) dThetadPsi.i <- .colSums(dThetadPsiMat %m% Z.i, length(subset), term["dim.re",i][[1]], na.rm=TRUE) if (dimPsi==1L) { hessian.Psi.RE[,dimPsi + which(idxRE==i)] <- dThetadPsi.i } else { hessian.Psi.RE[cbind(indexPsi,dimPsi + which(idxRE==i))] <- dThetadPsi.i ## FIXME: does not work with type="car" } } } else if (unitSpecific.i) { which.i <- term["which",i][[1]] fillHess <- i.unit if (dimPsi > 0L) { dThetadPsi.i <- .colSums(dThetadPsi(m.Xit), length(subset), model$nUnits, na.rm=TRUE) if (dimPsi==1L) { hessian.FE.Psi[idxFE==i,] <- dThetadPsi.i[which.i] } else { hessian.FE.Psi[cbind(which(idxFE==i),indexPsi[which.i])] <- dThetadPsi.i[which.i] } } } else { fillHess <- i.fixed if (dimPsi > 0L) { ## dPsi <- colSums(dThetadPsi(m.Xit),na.rm=TRUE) ## hessian.FE.Psi[idxFE==i,] <- if (dimPsi==1L) sum(dPsi) else dPsi[order(indexPsi)] hessian.FE.Psi[idxFE==i,] <- .colSumsGrouped(dThetadPsi(m.Xit), indexPsi) } } ## fill pars$d-related entries for (j in seq_len(dimd)) { # will not be run if dimd==0 didd <- deriv2HHH(dTheta_l = m.Xit, dTheta_k = dmudd[[j]], dTheta_lk = if (comp.i == 2) dmudd[[j]] * Xit else 0) didd[isNA] <- 0 hessian.FE.d[idxFE==i,j] <- if (unitSpecific.i) { colSums(didd,na.rm=TRUE)[which.i] } else sum(didd) if (random.i) hessian.d.RE[j,idxRE==i] <- colSums(didd %m% Z.i) } ## fill other (non-psi, non-d) entries (only upper triangle, j >= i!) for(j in i:nGroups){ comp.j <- term["offsetComp",j][[1]] Xjt <- term["terms",j][[1]] # eiter 1 or a matrix with values if(is.matrix(Xjt)){ Xjt <- Xjt[subset,,drop=FALSE] } # if param i and j do not belong to the same component, d(i)d(j)=0 m.Xit.Xjt <- if (comp.i != comp.j) 0 else m.Xit * Xjt didj <- deriv2HHH(dTheta_l = m.Xit, dTheta_k = mean.comp[[comp.j]]*Xjt, dTheta_lk = m.Xit.Xjt) didj[isNA]<-0 random.j <- term["random",j][[1]] unitSpecific.j <- term["unitSpecific",j][[1]] which.j <- term["which",j][[1]] fillHess() } } ######################################################### ## fill lower triangle of hessians and combine them ######################################################## hessian <- rbind(cbind(hessian.FE.FE,hessian.FE.d,hessian.FE.Psi,hessian.FE.RE), cbind(matrix(0,dimd,dimFE),hessian.d.d,hessian.d.Psi,hessian.d.RE), cbind(matrix(0,dimPsi,dimFE+dimd),hessian.Psi.RE), cbind(matrix(0,dimRE,dimFE+dimd+dimPsi),hessian.RE.RE)) hessian[lower.tri(hessian)] <- 0 # CAR blocks in hessian.RE.RE were fully filled diagHessian <- diag(hessian) fisher <- -(hessian + t(hessian)) diag(fisher) <- -diagHessian return(fisher) } fisher <- computeFisher(mu[c("epi.own","epi.neighbours","endemic")]) ## add penalty for random effects pen <- matrix(0, length(theta), length(theta)) Fpen <- if(dimRE > 0){ thetaIdxRE <- seq.int(to=length(theta), length.out=dimRE) pen[thetaIdxRE,thetaIdxRE] <- Sigma.inv fisher + pen } else fisher ## Done if(attributes){ attr(Fpen, "fisher") <- fisher attr(Fpen, "pen") <- pen } Fpen } ################################################# sqrtOf1pr2 <- function(r){ sqrt(1+r^2) } getSigmai <- function(sd, # vector of length dim with log-stdev's correlation, # vector of length dim with correlation # parameters, 0-length if uncorrelated dim ){ if(dim==0) return(NULL) Sigma.i <- if (length(correlation) == 0L) diag(exp(2*sd), dim) else { D <- diag(exp(sd), dim) L <- diag(nrow=dim) L[2,1:2] <- c(correlation[1],1)/sqrtOf1pr2(correlation[1]) if (dim==3) { L[3,] <- c(correlation[2:3],1)/sqrtOf1pr2(correlation[2]) L[3,2:3] <- L[3,2:3]/sqrtOf1pr2(correlation[3]) } D %*% tcrossprod(L) %*% D # ~75% quicker than D %*% L %*% t(L) %*% D } return(Sigma.i) } getSigmaiInv <- function(sd, # vector of length dim with log-stdev's correlation, # vector of length dim with correlation # parameters, 0-length if uncorrelated dim ){ if(dim==0) return(NULL) Sigma.i.inv <- if (length(correlation) == 0L) diag(exp(-2*sd), dim) else { r <- correlation Dinv <- diag(exp(-sd), dim) L <- diag(nrow=dim) L[2,1:2] <- c(-r[1],sqrtOf1pr2(r[1])) if(dim==3){ L[3,1] <- r[1]*r[3]-r[2]*sqrtOf1pr2(r[3]) L[3,2] <- -L[2,2]*r[3] L[3,3] <- sqrtOf1pr2(r[2])*sqrtOf1pr2(r[3]) } Dinv %*% crossprod(L) %*% Dinv # ~75% quicker than Dinv %*% t(L) %*% L %*% Dinv } return(Sigma.i.inv) } #* allow blockdiagonal matrix blockdiag(A,B), with A=kronecker product, B=diagonal matrix? getSigmaInv <- function(sd, correlation, dimSigma, dimBlocks, SigmaInvi=NULL){ if(is.null(SigmaInvi)){ SigmaInvi <- getSigmaiInv(sd,correlation,dimSigma) } if(length(unique(dimBlocks))==1){ # kronecker product formulation possible kronecker(SigmaInvi,diag(nrow=dimBlocks[1])) # the result is a symmetric matrix if SigmaInvi is symmetric } else { # kronecker product not possible -> correlation=0 diag(rep.int(diag(SigmaInvi),dimBlocks)) } } getSigma <- function(sd, correlation, dimSigma, dimBlocks, Sigmai=NULL){ if(is.null(Sigmai)){ Sigmai <- getSigmai(sd,correlation,dimSigma) } if(length(unique(dimBlocks))==1){ # kronecker product formulation possible kronecker(Sigmai,diag(nrow=dimBlocks[1])) # the result is a symmetric matrix if Sigmai is symmetric } else { # kronecker product not possible -> correlation=0 diag(rep.int(diag(Sigmai),dimBlocks)) } } ## Approximate marginal likelihood for variance components ## Parameter and model unpacking at the beginning (up to the ###...-line) is ## identical in marScore() and marFisher() marLogLik <- function(sd.corr, theta, model, fisher.unpen=NULL, verbose=FALSE){ dimVar <- model$nVar dimCorr <- model$nCorr dimSigma <- model$nSigma if(dimSigma == 0){ return(-Inf) } if(any(is.na(sd.corr))){ # in order to avoid nlminb from running into an infinite loop (cf. bug # report #15052), we have to emergency stop() in this case. # As of R 2.15.2, nlminb() throws an error if it receives NA from # any of the supplied functions. stop("NAs in variance parameters.", ADVICEONERROR) } sd <- head(sd.corr,dimVar) corr <- tail(sd.corr,dimCorr) pars <- splitParams(theta,model) randomEffects <- pars$random dimRE <- model$nRE dimBlocks <- model$rankRE[model$rankRE>0] Sigma.inv <- getSigmaInv(sd, corr, dimVar, dimBlocks) # if not given, calculate unpenalized part of fisher info if(is.null(fisher.unpen)){ fisher.unpen <- attr(penFisher(theta, sd.corr, model,attributes=TRUE), "fisher") } # add penalty to fisher fisher <- fisher.unpen thetaIdxRE <- seq.int(to=length(theta), length.out=dimRE) fisher[thetaIdxRE,thetaIdxRE] <- fisher[thetaIdxRE,thetaIdxRE] + Sigma.inv ############################################################ # penalized part of likelihood # compute -0.5*log(|Sigma|) - 0.5*RE' %*% Sigma.inv %*% RE # where -0.5*log(|Sigma|) = -dim(RE_i)*[Sum(sd_i) -0.5*log(1+corr_i^2)] ##lpen <- -0.5*(t(randomEffects)%*%Sigma.inv%*%randomEffects) ## the following implementation takes ~85% less computing time ! lpen <- -0.5 * c(crossprod(randomEffects, Sigma.inv) %*% randomEffects) loglik.pen <- sum(-dimBlocks*sd) + lpen if(dimCorr >0){ loglik.pen <- loglik.pen + 0.5*dimBlocks[1]*sum(log(1+corr^2)) } ## approximate marginal likelihood logdetfisher <- determinant(fisher,logarithm=TRUE)$modulus lmarg <- loglik.pen -0.5*c(logdetfisher) return(lmarg) } marScore <- function(sd.corr, theta, model, fisher.unpen=NULL, verbose=FALSE){ dimVar <- model$nVar dimCorr <- model$nCorr dimSigma <- model$nSigma if(dimSigma == 0){ return(numeric(0L)) } if(any(is.na(sd.corr))) stop("NAs in variance parameters.", ADVICEONERROR) sd <- head(sd.corr,dimVar) corr <- tail(sd.corr,dimCorr) pars <- splitParams(theta,model) randomEffects <- pars$random dimRE <- model$nRE dimBlocks <- model$rankRE[model$rankRE>0] Sigma.inv <- getSigmaInv(sd, corr, dimVar, dimBlocks) # if not given, calculate unpenalized part of fisher info if(is.null(fisher.unpen)){ fisher.unpen <- attr(penFisher(theta, sd.corr, model,attributes=TRUE), "fisher") } # add penalty to fisher fisher <- fisher.unpen thetaIdxRE <- seq.int(to=length(theta), length.out=dimRE) fisher[thetaIdxRE,thetaIdxRE] <- fisher[thetaIdxRE,thetaIdxRE] + Sigma.inv # inverse of penalized fisher info F.inv <- try(solve(fisher),silent=TRUE) if(inherits(F.inv,"try-error")){ if(verbose) cat(" WARNING (in marScore): penalized Fisher is singular!\n") #return(rep.int(0,dimSigma)) ## continuing with the generalized inverse often works, otherwise we would ## have to stop() here, because nlminb() cannot deal with NA's F.inv <- ginv(fisher) } F.inv.RE <- F.inv[thetaIdxRE,thetaIdxRE] ############################################################ ## compute marginal score and fisher for each variance component # initialize score and fisher info marg.score <- rep.int(NA_real_,dimSigma) ## specify functions for derivatives deriv1 <- switch(dimVar, dSigma1, dSigma2, dSigma3) d1Sigma <- deriv1(sd, corr) Sigmai.inv <- getSigmaiInv(sd, corr, dimVar) # derivation of log determinant # -.5*tr(Sigma^-1 %*% dSigma/ds) = -R (for sd.i) # = R*corr.i/(corr.i^2+1) (for corr.i) d1logDet <- c(-dimBlocks,dimBlocks[1]*corr/(corr^2+1)) # go through all variance parameters for(i in seq_len(dimSigma)){ dSi <- -Sigmai.inv %*% d1Sigma[,,i] %*% Sigmai.inv # CAVE: sign dS.i <- getSigma(dimSigma=dimVar,dimBlocks=dimBlocks,Sigmai=dSi) #dlpen.i <- -0.5* t(randomEffects) %*% dS.i %*% randomEffects # ~85% faster implementation using crossprod() avoiding "slow" t(): dlpen.i <- -0.5 * c(crossprod(randomEffects, dS.i) %*% randomEffects) #tr.d1logDetF <- sum(diag(F.inv.RE %*% dS.i)) tr.d1logDetF <- sum(F.inv.RE * dS.i) # since dS.i is symmetric #<- needs 1/100 (!) of the computation time of sum(diag(F.inv.RE %*% dS.i)) marg.score[i] <- d1logDet[i] + dlpen.i - 0.5 * tr.d1logDetF } return(marg.score) } marFisher <- function(sd.corr, theta, model, fisher.unpen=NULL, verbose=FALSE){ dimVar <- model$nVar dimCorr <- model$nCorr dimSigma <- model$nSigma if(dimSigma == 0){ return(matrix(numeric(0L),0L,0L)) } if(any(is.na(sd.corr))) stop("NAs in variance parameters.", ADVICEONERROR) sd <- head(sd.corr,dimVar) corr <- tail(sd.corr,dimCorr) pars <- splitParams(theta,model) randomEffects <- pars$random dimRE <- model$nRE dimBlocks <- model$rankRE[model$rankRE>0] Sigma.inv <- getSigmaInv(sd, corr, dimVar, dimBlocks) # if not given, calculate unpenalized part of fisher info if(is.null(fisher.unpen)){ fisher.unpen <- attr(penFisher(theta, sd.corr, model,attributes=TRUE), "fisher") } # add penalty to fisher fisher <- fisher.unpen thetaIdxRE <- seq.int(to=length(theta), length.out=dimRE) fisher[thetaIdxRE,thetaIdxRE] <- fisher[thetaIdxRE,thetaIdxRE] + Sigma.inv # inverse of penalized fisher info F.inv <- try(solve(fisher),silent=TRUE) if(inherits(F.inv,"try-error")){ if(verbose) cat(" WARNING (in marFisher): penalized Fisher is singular!\n") #return(matrix(Inf,dimSigma,dimSigma)) ## continuing with the generalized inverse often works, otherwise we would ## have to stop() here, because nlminb() cannot deal with NA's F.inv <- ginv(fisher) } F.inv.RE <- F.inv[thetaIdxRE,thetaIdxRE] ## declare F.inv.RE as a symmetric matrix? ##F.inv.RE <- new("dsyMatrix", Dim = dim(F.inv.RE), x = c(F.inv.RE)) ## -> no, F.inv.RE %*% dS.i becomes actually slower (dS.i is a "sparseMatrix") ############################################################ marg.hesse <- matrix(NA_real_,dimSigma,dimSigma) ## specify functions for derivatives deriv1 <- switch(dimVar,dSigma1, dSigma2, dSigma3) deriv2 <- switch(dimVar,d2Sigma1, d2Sigma2, d2Sigma3) d1Sigma <- deriv1(sd, corr) d2Sigma <- deriv2(sd, corr, d1Sigma) Sigmai.inv <- getSigmaiInv(sd, corr, dimVar) # 2nd derivatives of log determinant d2logDet <- diag(c(rep.int(0,dimVar),-dimBlocks[1]*(corr^2-1)/(corr^2+1)^2),dimSigma) # function to convert dS.i and dS.j matrices to sparse matrix objects dS2sparse <- if (dimCorr > 0) function (x) { forceSymmetric(as(x, "sparseMatrix")) # dS.i & dS.j are symmetric } else function (x) { #as(x, "diagonalMatrix") new("ddiMatrix", Dim = dim(x), diag = "N", x = diag(x)) } # go through all variance parameters for(i in seq_len(dimSigma)){ # compute first derivative of the penalized Fisher info (-> of Sigma^-1) # with respect to the i-th element of Sigma (= kronecker prod. of Sigmai and identity matrix) # Harville Ch15, Eq. 8.15: (d/d i)S^-1 = - S^-1 * (d/d i) S * S^-1 SigmaiInv.d1i <- Sigmai.inv %*% d1Sigma[,,i] dSi <- -SigmaiInv.d1i %*% Sigmai.inv dS.i <- getSigma(dimSigma=dimVar,dimBlocks=dimBlocks,Sigmai=dSi) dS.i <- dS2sparse(dS.i) # compute second derivatives for(j in i:dimSigma){ # compute (d/d j) S^-1 SigmaiInv.d1j <- Sigmai.inv %*% d1Sigma[,,j] dSj <- -SigmaiInv.d1j %*% Sigmai.inv dS.j <- getSigma(dimSigma=dimVar,dimBlocks=dimBlocks,Sigmai=dSj) dS.j <- dS2sparse(dS.j) # compute (d/di dj) S^-1 #dS.ij <- getSigma(dimSigma=dimVar,dimBlocks=dimBlocks, # Sigmai=d2Sigma[[i]][,,j]) # compute second derivatives of Sigma^-1 (Harville Ch15, Eq 9.2) d2S <- (- Sigmai.inv %*% d2Sigma[[i]][,,j] + SigmaiInv.d1i %*% SigmaiInv.d1j + SigmaiInv.d1j %*% SigmaiInv.d1i) %*% Sigmai.inv dSij <- getSigma(dimSigma=dimVar,dimBlocks=dimBlocks,Sigmai=d2S) #d2lpen.i <- -0.5* t(randomEffects) %*% dSij %*% randomEffects # ~85% faster implementation using crossprod() avoiding "slow" t(): d2lpen.i <- -0.5 * c(crossprod(randomEffects, dSij) %*% randomEffects) # compute second derivative of log-determinant of penFisher mpart1 <- dS.j %*% F.inv.RE # 3 times as fast as the other way round mpart2 <- dS.i %*% F.inv.RE mpart <- mpart1 %*% mpart2 ## speed-ups: - tr(F.inv.RE %*% dSij) simply equals sum(F.inv.RE * dSij) ## - accelerate matrix product by sparse matrices dS.i and dS.j ## - use cyclic permutation of trace: ## tr(F.inv.RE %*% dS.j %*% F.inv.RE %*% dS.i) = ## tr(dS.j %*% F.inv.RE %*% dS.i %*% F.inv.RE) tr.d2logDetF <- -sum(Matrix::diag(mpart)) + sum(F.inv.RE * dSij) marg.hesse[i,j] <- marg.hesse[j,i] <- d2logDet[i,j] + d2lpen.i - 0.5 * tr.d2logDetF } } marg.Fisher <- as.matrix(-marg.hesse) return(marg.Fisher) } ## first and second derivatives of the covariance matrix dSigma1 <- function(sd,corr){ derivs <- array(2*exp(2*sd), c(1,1,1)) return(derivs) } #d1: result of dSigma1 d2Sigma1 <- function(sd,corr,d1){ return(list(dsd1=2*d1)) } dSigma2 <- function(sd,corr){ derivs <- array(0,c(2,2,3)) dSigma <- diag(2*exp(2*sd)) if(length(corr)>0){ dSigma[1,2] <- dSigma[2,1] <- exp(sum(sd[1:2]))*corr[1]/sqrtOf1pr2(corr[1]) # derivative of corr_1 derivs[2,1,3] <- derivs[1,2,3] <- exp(sum(sd[1:2]))/(sqrtOf1pr2(corr[1])^3) } derivs[,,1:2] <- dSigma # derivative of sd_1 derivs[2,2,1] <- 0 # derivative of sd_2 derivs[1,1,2] <- 0 return(derivs) } d2Sigma2 <- function(sd,corr, d1){ derivs <- array(0,c(2,2,3)) result <- list(dsd1=d1, dsd2=derivs, dcorr1=derivs) result$dsd1[1,1,1] <- 2*d1[1,1,1] result$dsd1[2,2,2] <- 0 result$dsd2[,,2:3]<- d1[,,2:3] result$dsd2[2,2,2] <- 2*d1[2,2,2] if(length(corr)>0){ result$dcorr1[2,1,3] <- result$dcorr1[1,2,3] <- -(3*corr[1]*exp(sum(sd[1:2])))/(sqrtOf1pr2(corr[1])^5) } return(result) } dSigma3 <- function(sd,corr){ derivs <- array(0,c(3,3,6)) dSigma <- diag(2*exp(2*sd)) # if(length(corr)>0){ dSigma[1,2] <- dSigma[2,1] <- exp(sum(sd[1:2]))*corr[1]/sqrtOf1pr2(corr[1]) # dSigma[1,3] <- dSigma[3,1] <- exp(sum(sd[c(1,3)]))*corr[2]/sqrtOf1pr2(corr[2]) # dSigma[2,3] <- dSigma[3,2] <- exp(sum(sd[c(2,3)]))*(corr[1]*corr[2]*sqrtOf1pr2(corr[3])+corr[3])/prod(sqrtOf1pr2(corr[1:3]))# # derivative of corr_1 derivs[2,1,4] <- derivs[1,2,4] <- exp(sum(sd[1:2]))/(sqrtOf1pr2(corr[1])^3) derivs[3,2,4] <- derivs[2,3,4] <-(exp(sum(sd[2:3]))*(corr[2]*sqrtOf1pr2(corr[3])-prod(corr[c(1,3)])))/ (prod(sqrtOf1pr2(corr[2:3]))*(sqrtOf1pr2(corr[1])^3))# # derivative of corr_2 derivs[3,1,5] <- derivs[1,3,5] <- exp(sum(sd[c(3,1)]))/(sqrtOf1pr2(corr[2])^3)# derivs[3,2,5] <- derivs[2,3,5] <- (exp(sum(sd[2:3]))*(corr[1]*sqrtOf1pr2(corr[3])-prod(corr[c(2,3)])))/ (prod(sqrtOf1pr2(corr[c(1,3)]))*(sqrtOf1pr2(corr[2])^3)) # # derivative of corr_3 derivs[3,2,6] <- derivs[2,3,6] <- exp(sum(sd[2:3]))/ (prod(sqrtOf1pr2(corr[c(1,2)]))*(sqrtOf1pr2(corr[3])^3)) } derivs[,,1:3] <- dSigma # derivative of sd_1 derivs[2:3,2:3,1] <- 0 # derivative of sd_2 derivs[1,c(1,3),2] <- derivs[3,c(1,3),2] <- 0 # derivative of sd_3 derivs[1:2,1:2,3] <- 0 return(derivs) } d2Sigma3 <- function(sd,corr, d1) { derivs <- array(0,c(3,3,6)) result <- list(dsd1=d1, dsd2=derivs, dsd3=derivs, dcorr1=derivs, dcorr2=derivs, dcorr3=derivs) result$dsd1[1,1,1] <- 2*d1[1,1,1] result$dsd1[2,2:3,2] <- result$dsd1[3,2,2] <- 0 result$dsd1[2:3,2:3,3] <- 0 # result$dsd2[,,2]<- d1[,,2] result$dsd2[2,2,2] <- 2*d1[2,2,2] result$dsd2[3,2,3] <- result$dsd2[2,3,3] <- d1[3,2,3]# result$dsd3[,,3]<- d1[,,3] result$dsd3[3,3,3] <- 2*d1[3,3,3]# if (length(corr)>0) { result$dsd1[2:3,2:3,4] <- 0 result$dsd1[2:3,2:3,5] <- 0 result$dsd1[,,6] <- 0 result$dsd2[,,c(4,6)] <- d1[,,c(4,6)] result$dsd2[3,2,5] <- result$dsd2[2,3,5] <- d1[3,2,5] result$dsd3[3,2,4] <- result$dsd3[2,3,4] <- d1[3,2,4] result$dsd3[,,c(5,6)] <- d1[,,c(5,6)] # derivative of corr_1 result$dcorr1[2,1,4] <- result$dcorr1[1,2,4] <- -(exp(sum(sd[1:2]))*3*corr[1])/(sqrtOf1pr2(corr[1])^5) # result$dcorr1[3,2,4] <- result$dcorr1[2,3,4] <- -(exp(sum(sd[2:3]))*(corr[1]*(3*corr[2]*sqrtOf1pr2(corr[3])-2*prod(corr[c(1,3)])) + corr[3]) )/ (prod(sqrtOf1pr2(corr[2:3]))*(sqrtOf1pr2(corr[1])^5)) # result$dcorr1[3,2,5] <- result$dcorr1[2,3,5] <- (exp(sum(sd[2:3]))*(sqrtOf1pr2(corr[3])+prod(corr[1:3])))/ (prod(sqrtOf1pr2(corr[c(1,2)])^3)*sqrtOf1pr2(corr[3])) result$dcorr1[3,2,6] <- result$dcorr1[2,3,6] <- -(exp(sum(sd[2:3]))*corr[1])/ (prod(sqrtOf1pr2(corr[c(1,3)])^3)*sqrtOf1pr2(corr[2])) # derivative of corr_2 result$dcorr2[3,1,5] <- result$dcorr2[1,3,5] <- -(exp(sum(sd[c(3,1)]))*3*corr[2])/(sqrtOf1pr2(corr[2])^5) result$dcorr2[3,2,5] <- result$dcorr2[2,3,5] <- -(exp(sum(sd[2:3]))*(corr[2]*(3*corr[1]*sqrtOf1pr2(corr[3])-2*prod(corr[c(2,3)])) + corr[3]) )/ (prod(sqrtOf1pr2(corr[c(1,3)]))*(sqrtOf1pr2(corr[2])^5)) result$dcorr2[3,2,6] <- result$dcorr2[2,3,6] <- -exp(sum(sd[2:3]))*corr[2] / # SM @ 14/05/13: formula fixed, marFisher() # and hhh4()$Sigma.cov[5,6] are now correct (prod(sqrtOf1pr2(corr[c(2,3)])^3)*sqrtOf1pr2(corr[1])) # derivative of corr_3 result$dcorr3[3,2,6] <- result$dcorr3[2,3,6] <- -(exp(sum(sd[2:3]))*3*corr[3])/ (prod(sqrtOf1pr2(corr[c(1,2)]))*sqrtOf1pr2(corr[3])^5) } return(result) } ### Various optimizers updateParams_nlminb <- function (start, ll, sc, fi, ..., control) { lower <- control[["lower"]]; control$lower <- NULL upper <- control[["upper"]]; control$upper <- NULL scale <- control[["scale"]]; control$scale <- NULL negll <- function (x, ...) -ll(x, ...) negsc <- function (x, ...) -sc(x, ...) ## run the optimization res <- nlminb(start, negll, gradient=negsc, hessian=fi, ..., scale=scale, control=control, lower=lower, upper=upper) if (any(is.finite(c(lower, upper)))) checkParBounds(res$par, lower, upper) ## Done list(par=res$par, ll=-res$objective, rel.tol=getRelDiff(res$par, start), convergence=res$convergence, message=res$message) } updateParams_nr <- function (start, ll, sc, fi, ..., control) { ## objective function llscfi <- function (x, ...) { loglik <- ll(x, ...) attr(loglik, "score") <- sc(x, ...) attr(loglik, "fisher") <- fi(x, ...) loglik } ## run the optimization res <- newtonRaphson(start, llscfi, ..., control=control, verbose=control$verbose) ## Done list(par=res$coefficients, ll=res$loglikelihood, rel.tol=getRelDiff(res$coefficients, start), convergence=res$convergence, message=res$message) } updateParams_nlm <- function (start, ll, sc, fi, ..., control) { ## objective function negllscfi <- function (x, ...) { negloglik <- -ll(x, ...) attr(negloglik, "gradient") <- -sc(x, ...) attr(negloglik, "hessian") <- fi(x, ...) negloglik } ## run the optimization res <- do.call("nlm", args=c(alist(p=start, f=negllscfi, ...), control)) ## Done list(par=res$estimate, ll=-res$minimum, rel.tol=getRelDiff(res$estimate, start), convergence=as.numeric(res$code>2), message=res$message) ## nlm returns convergence status in $code, 1-2 indicate convergence, ## 3-5 indicate non-convergence } updateParams_optim <- function (start, ll, sc, fi, ..., control) { ## Note: "fi" is not used in optim method <- control[["method"]]; control$method <- NULL lower <- control[["lower"]]; control$lower <- NULL upper <- control[["upper"]]; control$upper <- NULL res <- optim(start, ll, sc, ..., # Note: control$fnscale is negative method=method, lower=lower, upper=upper, control=control) if (any(is.finite(c(lower, upper)))) checkParBounds(res$par, lower, upper) ## Done list(par=res$par, ll=res$value, rel.tol=getRelDiff(res$par, start), convergence=res$convergence, message=res$message) } ## Calculate relative parameter change criterion. ## We use a weaker criterion than the maximum relative parameter change ## max(abs(sd.corr.new/sd.corr - 1)) getRelDiff <- function (final, start) max(abs(final - start)) / max(abs(start)) checkParBounds <- function (par, lower, upper) { if (is.null(names(par))) names(par) <- seq_along(par) if (any(atl <- par <= lower)) cat(" WARNING: parameters reached lower bounds:", paste(names(par)[atl], par[atl], sep="=", collapse=", "), "\n") if (any(atu <- par >= upper)) cat(" WARNING: parameters reached upper bounds:", paste(names(par)[atu], par[atu], sep="=", collapse=", "), "\n") } ## default control arguments for updates defaultOptimControl <- function (method = "nlminb", lower = -Inf, upper = Inf, iter.max = NULL, verbose = 0) { if (is.null(iter.max)) iter.max <- 20 + 280*(method=="Nelder-Mead") lowVerbose <- verbose %in% 0:2 luOptimMethod <- method %in% c("Brent", "L-BFGS-B") defaults.nr <- list(scoreTol=1e-5, paramTol=1e-7, F.inc=0.01, stepFrac=0.5, niter=iter.max, verbose=verbose) defaults.nlminb <- list(iter.max=iter.max, scale=1, lower=lower, upper=upper, trace=if(lowVerbose) c(0,0,5)[verbose+1] else 1) defaults.nlm <- list(iterlim=iter.max, check.analyticals=FALSE, print.level=if(lowVerbose) c(0,0,1)[verbose+1] else 2) defaults.optim <- list(maxit=iter.max, fnscale=-1, trace=max(0,verbose-1), lower=if (luOptimMethod) lower else -Inf, upper=if (luOptimMethod) upper else Inf) switch(method, "nr" = defaults.nr, "nlm" = defaults.nlm, "nlminb" = defaults.nlminb, defaults.optim) } setOptimControl <- function (method, control, ...) { defaults <- defaultOptimControl(method, ...) cntrl <- modifyList(defaults, control) ## ensure fnscale < 0 (optim performs minimization) if (!is.null(cntrl$fnscale)) { # i.e., using optim() cntrl$method <- method # append method to control list if (cntrl$fnscale > 0) cntrl$fnscale <- -cntrl$fnscale } cntrl } ## fitHHH is the main workhorse where the iterative optimization is performed fitHHH <- function(theta, sd.corr, model, cntrl.stop=list(tol=1e-5, niter=100), cntrl.regression=list(method="nlminb"), cntrl.variance=list(method="nlminb"), verbose=0, shrinkage=FALSE) { dimFE.d.O <- model$nFE + model$nd + model$nOverdisp dimRE <- model$nRE getUpdater <- function (cntrl, start, ...) { method <- cntrl$method; cntrl$method <- NULL if (length(start) == 1 && method == "Nelder-Mead") { method <- "Brent" message("Switched optimizer from \"Nelder-Mead\" to \"Brent\"", " (dim(", deparse(substitute(start)), ")=1)") } list(paste("updateParams", if (method %in% c("nlminb", "nlm", "nr")) method else "optim", sep="_"), control = setOptimControl(method, cntrl, ...)) } ## ## artificial lower bound on intercepts of epidemic components ## reg.lower <- rep.int(-Inf, length(theta)) ## reg.lower[grep("^(ar|ne)\\.(1|ri)", model$namesFE)] <- -20 ## set optimizer for regression parameters updateRegressionControl <- getUpdater(cntrl.regression, theta, ## lower=reg.lower, iter.max=if(dimRE==0) 100, verbose=verbose+(dimRE==0)) updateRegression <- function (theta, sd.corr) do.call(updateRegressionControl[[1]], alist(theta, penLogLik, penScore, penFisher, sd.corr=sd.corr, model=model, control=updateRegressionControl[[2]])) ## set optimizer for variance parameters updateVarianceControl <- getUpdater(cntrl.variance, sd.corr, lower=-5, upper=5, verbose=verbose) updateVariance <- function (sd.corr, theta, fisher.unpen) do.call(updateVarianceControl[[1]], alist(sd.corr, marLogLik, marScore, marFisher, theta=theta, model=model, fisher.unpen=fisher.unpen, verbose=verbose>1, control=updateVarianceControl[[2]])) ## Let's go if (verbose>0) { cat(as.character(Sys.time()), ":", if (dimRE == 0) "Optimization of regression parameters" else "Iterative optimization of regression & variance parameters", "\n") } if (dimRE == 0) { # optimization of regression coefficients only parReg <- updateRegression(theta, sd.corr) theta <- parReg$par if ((convergence <- parReg$convergence) != 0 && !is.null(parReg$message)) cat("! Non-convergence message from optimizer:", parReg$message, "\n") } else { # swing between updateRegression & updateVariance convergence <- 99 i <- 0 while(convergence != 0 && (i < cntrl.stop$niter)){ i <- i+1 if (verbose>0) cat("\n") ## update regression coefficients parReg <- updateRegression(theta, sd.corr) theta <- parReg$par fisher.unpen <- attr(penFisher(theta, sd.corr, model, attributes=TRUE), "fisher") if(verbose>0) cat("Update of regression parameters: ", "max|x_0 - x_1| / max|x_0| =", parReg$rel.tol, "\n") if(parReg$convergence != 0) { if (!is.null(parReg$message)) cat("! Non-convergence message from optimizer:", parReg$message, "\n") cat("Update of regression coefficients in iteration ", i, " unreliable\n") } if(parReg$convergence > 20 && shrinkage){ cat("\n\n***************************************\nshrinkage", 0.1*theta[abs(theta)>10],"\n") theta[abs(theta)>10] <- 0.1*theta[abs(theta)>10] diag(fisher.unpen) <- diag(fisher.unpen)+1e-2 } ## update variance parameters parVar <- updateVariance(sd.corr, theta, fisher.unpen) if(verbose>0) cat("Update of variance parameters: max|x_0 - x_1| / max|x_0| =", parVar$rel.tol, "\n") if(parVar$convergence!=0) { if (!is.null(parVar$message)) print(parVar$message) cat("Update of variance parameters in iteration ", i, " unreliable\n") } ## NA values in sd.corr cause a stop() already in marLogLik() ## if(any(is.na(parVar$par))){ ## updateVarianceControl[[1]] <- "updateParams_optim" ## updateVarianceControl[[2]]$method <- ## if (length(sd.corr) == 1L) "Brent" else "Nelder-Mead" ## cat(" WARNING: at least one updated variance parameter is not a number\n", ## "\t-> NO UPDATE of variance\n", ## "\t-> SWITCHING to robust", dQuote(updateVarianceControl[[2]]$method), ## "for variance updates\n") ## } else sd.corr <- parVar$par ## overall convergence ? if( (parReg$rel.tol < cntrl.stop$tol) && (parVar$rel.tol < cntrl.stop$tol) && (parReg$convergence==0) && (parVar$convergence==0) ) convergence <- 0 ## exit loop if no more change in parameters (maybe false convergence) if (parReg$rel.tol == 0 && parVar$rel.tol == 0) break } } if(verbose > 0) { cat("\n") cat(as.character(Sys.time()), ":", if (convergence==0) "Optimization converged" else "Optimization DID NOT CONVERGE", "\n\n") } ll <- penLogLik(theta, sd.corr, model) fisher <- penFisher(theta, sd.corr, model, attributes = TRUE) dimnames(fisher) <- list(names(theta), names(theta)) margll <- marLogLik(sd.corr, theta, model, attr(fisher, "fisher")) fisher.var <- marFisher(sd.corr, theta, model, attr(fisher, "fisher")) dimnames(fisher.var) <- list(names(sd.corr), names(sd.corr)) list(theta=theta, sd.corr=sd.corr, loglik=ll, margll=margll, fisher=fisher, fisherVar=fisher.var, convergence=convergence, dim=c(fixed=dimFE.d.O,random=dimRE)) } ## check analytical score functions and Fisher informations for ## a given model (the result of interpretControl(control, stsObj)) ## and given parameters theta (regression par.) and sd.corr (variance par.). ## This is a wrapper around functionality of the numDeriv and maxLik packages. checkAnalyticals <- function (model, theta = model$initialTheta, sd.corr = model$initialSigma, methods = c("numDeriv","maxLik")) { cat("\nPenalized log-likelihood:\n") resCheckPen <- sapply(methods, function(derivMethod) { if (requireNamespace(derivMethod)) { do.call(paste("checkDerivatives", derivMethod, sep="."), args=alist(penLogLik, penScore, penFisher, theta, sd.corr=sd.corr, model=model)) } }, simplify=FALSE, USE.NAMES=TRUE) if (length(resCheckPen) == 1L) resCheckPen <- resCheckPen[[1L]] resCheckMar <- if (length(sd.corr) == 0L) list() else { cat("\nMarginal log-likelihood:\n") fisher.unpen <- attr(penFisher(theta, sd.corr, model, attributes=TRUE), "fisher") resCheckMar <- sapply(methods, function(derivMethod) { if (requireNamespace(derivMethod)) { do.call(paste("checkDerivatives", derivMethod, sep="."), args=alist(marLogLik, marScore, marFisher, sd.corr, theta=theta, model=model, fisher.unpen=fisher.unpen)) } }, simplify=FALSE, USE.NAMES=TRUE) if (length(resCheckMar) == 1L) resCheckMar[[1L]] else resCheckMar } list(pen = resCheckPen, mar = resCheckMar) } surveillance/R/epidataCS_animate.R0000644000176200001440000001533312424415000016622 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### animate-method for "epidataCS" objects ### It respects the ani.options() "interval" and "nmax" of the animation ### package, and it is advisable to use it within saveHTML() or similar ### ### Copyright (C) 2009-2014 Sebastian Meyer ### $Revision: 1096 $ ### $Date: 2014-10-30 11:59:12 +0100 (Thu, 30. Oct 2014) $ ################################################################################ ## three types: ## time.spacing=NULL: sequential snapshots at all event times ## time.spacing=scalar: snapshots with given time step (and timer) ## time.spacing=NA: time step is determined such that "nmax" snapshots result animate.epidataCS <- function (object, interval = c(0,Inf), time.spacing = NULL, nmax = NULL, sleep = NULL, legend.opts = list(), timer.opts = list(), pch = 15:18, col.current = "red", col.I = "#C16E41", col.R = "#B3B3B3", col.influence = NULL, main = NULL, verbose = interactive(), ...) { stopifnot(is.numeric(interval), length(interval) == 2L) with.animation <- requireNamespace("animation", quietly = TRUE) if (is.null(sleep)) { sleep <- if (with.animation) animation::ani.options("interval") else 0.1 ## we cannot set this as default function argument, because we don't ## want to depend on package "animation" (surveillance only suggests it) } if (is.null(nmax)) { nmax <- if (with.animation) animation::ani.options("nmax") else Inf } s <- summary(object) removalTimes <- s$eventTimes + object$events$eps.t eventCoordsTypes <- cbind(s$eventCoords, type = s$eventTypes) pch <- rep_len(pch, s$nTypes) typeNames <- names(s$typeTable) multitype <- length(typeNames) > 1L # set default legend options doLegend <- if (is.list(legend.opts)) { if (is.null(legend.opts[["x"]])) legend.opts$x <- "topright" if (is.null(legend.opts$title)) legend.opts$title <- if (multitype) "type" else "state" if (is.null(legend.opts$legend)) { legend.opts$legend <- if (multitype) typeNames else c("infectious", if (!is.na(col.R)) "removed") } if (is.null(legend.opts$col)) { legend.opts$col <- if (multitype) col.current else c(col.I, if (!is.na(col.R)) col.R) } if (is.null(legend.opts$pch)) legend.opts$pch <- pch TRUE } else FALSE # set default timer options doTimer <- if (is.list(timer.opts)) { if (is.null(timer.opts[["x"]])) timer.opts$x <- "bottomright" if (is.null(timer.opts$title)) timer.opts$title <- "time" if (is.null(timer.opts$box.lty)) timer.opts$box.lty <- 0 if (is.null(timer.opts$adj)) timer.opts$adj <- c(0.5,0.5) if (is.null(timer.opts$inset)) timer.opts$inset <- 0.01 if (is.null(timer.opts$bg)) timer.opts$bg <- "white" TRUE } else FALSE # wrapper for 'points' with specific 'cex' for multiplicity multpoints <- function (tableCoordsTypes, col) { tableMult <- countunique(tableCoordsTypes) points(tableMult[,1:2,drop=FALSE], pch = pch[tableMult[,"type"]], col = col, cex = sqrt(1.5*tableMult[,"COUNT"]/pi) * par("cex")) } # functions returning if events are in status I or R at time t I <- function (t) s$eventTimes <= t & removalTimes >= t R <- function (t) removalTimes < t sequential <- is.null(time.spacing) # plot observed infections sequentially if (!sequential) stopifnot(length(time.spacing) == 1L) timeGrid <- if (sequential) unique(s$eventTimes) else { start <- max(s$timeRange[1], interval[1]) end <- min(interval[2], s$timeRange[2], max(removalTimes) + if (is.na(time.spacing)) 0 else time.spacing) if (is.na(time.spacing)) { if (!is.finite(nmax)) { stop("with 'time.spacing=NA', 'nmax' must be finite") } seq(from = start, to = end, length.out = nmax) } else { tps <- seq(from = start, to = end, by = time.spacing) if (length(tps) > nmax) { message("Generating only the first ", sQuote(if (with.animation) "ani.options(\"nmax\")" else "nmax"), " (=", nmax, ") snapshots") head(tps, nmax) } else tps } } .info <- format.info(timeGrid) timerformat <- paste0("%", .info[1], ".", .info[2], "f") # animate loopIndex <- if (!sequential) timeGrid else { idxs <- which(s$eventTimes >= interval[1] & s$eventTimes <= interval[2]) if (length(idxs) > nmax) { message("Generating only the first ", sQuote(if (with.animation) "ani.options(\"nmax\")" else "nmax"), " (=", nmax, ") events") head(idxs, nmax) } else idxs } told <- -Inf if (verbose) pb <- txtProgressBar(min=0, max=max(loopIndex), initial=0, style=3) for(it in loopIndex) { t <- if (sequential) s$eventTimes[it] else it infectious <- I(t) removed <- R(t) plot(object$W, ...) # FIXME: use default lwd = 2 title(main = main) if (doLegend) do.call(legend, legend.opts) if (doTimer) { ttxt <- sprintf(timerformat, t) do.call(legend, c(list(legend = ttxt), timer.opts)) } if (!is.null(col.influence)) { iRids <- which(infectious) if (sequential) setdiff(iRids, it) for(j in iRids) { iR <- shift.owin(object$events@data$.influenceRegion[[j]], s$eventCoords[j,]) plot(iR, add = TRUE, col = col.influence, border = NA) } } rTable <- eventCoordsTypes[removed,,drop=FALSE] if (nrow(rTable) > 0L) multpoints(rTable, col = col.R) iTable <- eventCoordsTypes[infectious,,drop=FALSE] if (nrow(iTable) > 0L) multpoints(iTable, col = col.I) infectiousNew <- if (sequential) it else infectious & !I(told) iTableNew <- eventCoordsTypes[infectiousNew,,drop=FALSE] if (nrow(iTableNew) > 0L) multpoints(iTableNew, col = col.current) told <- t if (verbose) setTxtProgressBar(pb, it) if (dev.interactive()) Sys.sleep(sleep) } if (verbose) close(pb) ## if (dev.interactive()) ## message("Note: use facilities of the \"animation\" package, e.g.,\n", ## " saveHTML() to view the animation in a web browser.") invisible(NULL) } surveillance/R/stsplot_spacetime.R0000644000176200001440000001333013507131507017036 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Old implementation of (animated) maps of an sts-object ### ### Copyright (C) 2007-2013 Michael Hoehle, 2016 Sebastian Meyer ### $Revision: 2434 $ ### $Date: 2019-07-03 15:53:11 +0200 (Wed, 03. Jul 2019) $ ################################################################################ stsplot_spacetime <- function( x, type, legend=NULL, opts.col=NULL, labels=TRUE, wait.ms=250, cex.lab=0.7, verbose=FALSE, dev.printer=NULL, ...) { #Extract the mappoly if (length(x@map) == 0) stop("The sts object has an empty map.") map <- x@map maplim <- list(x=bbox(map)[1,],y=bbox(map)[2,]) #Check colnames, otherwise no need to continue if (is.null(colnames(x@observed))) stop("The sts observed slot does not have any colnames to match with the shapefile.") #Check for legend options if (is.null(legend)) { legend <- list(dx=0.4,dy=0.04,x=maplim$x[1],y=maplim$y[1],once=TRUE) } #Extract the data o <- x@observed alarm <- x@alarm #Formula is of type "observed ~ 1|unit" (i.e. no time) aggregate <- type[[3]][[3]] == "unit" if (aggregate) { o <- t(as.matrix(apply(o,MARGIN=2,sum))) alarm <- t(as.matrix(apply(alarm,MARGIN=2,sum)))>0 } #Number of time points maxt <- dim(o)[1] #Process dev.printer options if (is.list(dev.printer)) { dev.printer <- modifyList( list(device = png, extension = ".png", width = 640, height = 480, name = "Rplot"), dev.printer) #filename format (padding with zeroes) fnfmt <- paste0("%s-%0", nchar(maxt), "i%s") } #Get color vector opts.col_default <- list(ncolors=length(o), use.color=TRUE) gyr <- do.call(".hcl.colors", if (is.list(opts.col)) modifyList(opts.col_default, opts.col) else opts.col_default) theCut <- cut(o, length(gyr)) #Cut into specified number of colors o.cut <- matrix(as.numeric(theCut),nrow=nrow(o),ncol=ncol(o)) o.col <- matrix(gyr[o.cut],ncol=ncol(o.cut)) o.col[is.na(o.col)] <- gray(1) dimnames(o.col) <- dimnames(o) #Sort the o according to the names in the map region.id <- row.names(map) o.col.id <- dimnames(o.col)[[2]] #Make the columns of o as in the map object o.col <- o.col[,pmatch(region.id,o.col.id),drop=FALSE] alarm.col <- alarm[,pmatch(region.id,o.col.id),drop=FALSE] #Screen processing screen.matrix <- matrix(c(0,1,0,1,0,1,0.8,1),2,4,byrow=TRUE) split.screen(screen.matrix) #Loop over all time slices for (t in 1:maxt) { #Status information if (verbose) { cat(paste("Processing slice",t,"of",maxt,"\n")) } #Clean screen (title area) screen(n=2) par(bg=gray(1)) erase.screen() par(bg="transparent") #Plot the map on screen 1 screen(n=1) plot(map,col=o.col[t,],xlab="",ylab="",...) #Indicate alarms as shaded overlays if (!all(is.na(alarm.col))) { #Plotting using density "NA" does not appear to work #anymore in the new sp versions alarm.col[is.na(alarm.col)] <- 0 plot(map,dens=alarm.col*15,add=TRUE) } if (labels) #getSpPPolygonsLabptSlots is deprecated. Use coordinates method insteas text(coordinates(map), labels=as.character(region.id), cex.lab=cex.lab) if (!aggregate) { title(paste(t,"/",maxt,sep="")) } #In case a legend is requested if (is.list(legend) && !(legend$once & t>1) | (t==1)) { add.legend(legend, maplim, list(col=gyr, min=min(o), max=max(o), trans=identity)) } #Is writing to files requested? if (is.list(dev.printer)) { #Create filename fileName <- sprintf(fnfmt, dev.printer$name, t, dev.printer$extension) cat("Creating ",fileName,"\n") #Save the current device using dev.print if (inherits(try( dev.print(dev.printer$device, file=fileName, width=dev.printer$width, height=dev.printer$height) ), "try-error")) { warning("disabling dev.print()", immediate. = TRUE) dev.printer <- NULL } } wait(wait.ms) } close.screen(all.screens = TRUE) } ####################### ### auxiliary functions ####################### ### wait a specific amount of milliseconds (via "while" and "proc.time") wait <- function (wait.ms) # number of milliseconds to wait { #Initialize start.time <- proc.time()[3]*1000 ellapsed <- proc.time()[3]*1000 - start.time #Loop as long as required. while (ellapsed < wait.ms) { ellapsed <- proc.time()[3]*1000 - start.time } } ### add the color key add.legend <- function(legend, maplim, theColors) { #Preproc dy <- diff(maplim$y) * legend$dy dx <- diff(maplim$x) * legend$dx #Add legend -- i.e. a slider xlu <- xlo <- legend$x xru <- xro <- xlu + dx yru <- ylu <- legend$y yro <- ylo <- yru + dy step <- (xru - xlu)/length(theColors$col) for (i in 0:(length(theColors$col) - 1)) { polygon(c(xlo + step * i, xlo + step * (i + 1), xlu + step * (i + 1), xlu + step * i), c(ylo, yro, yru, ylu), col = theColors$col[i + 1], border = theColors$col[i + 1]) } #Write info about min and max on the slider. black <- grey(0) lines(c(xlo, xro, xru, xlu, xlo), c(ylo, yro, yru, ylu, ylo), col = black) #Transformation function for data values, e.g., exp or identity trans <- theColors$trans text(xlu, ylu - 0.5*dy, formatC(trans(theColors$min)), cex = 1, col = black,adj=c(0,1)) text(xru, yru - 0.5*dy, formatC(trans(theColors$max)), cex = 1, col = black,adj=c(1,1)) } surveillance/R/linelist2sts.R0000644000176200001440000000546212471147162015745 0ustar liggesusers###################################################################### # Takes a data frame with dates of individual # cases and create an aggregated sts time series object for these # data with aggregation occuring at the desired scale. # # Parameters: # linelist - a data frame containing individual case information, one per line # dateCol - a character string denoting the column name in case containing # the relevant date variable to aggregate # aggregate.by - aggregation block length given as a string compatible with # seq.Date -- see \link{seq.Date} for further details. # # Author: Michael Hoehle # Date LaMo: 04 Jan 2014 ###################################################################### linelist2sts <- function(linelist,dateCol,aggregate.by=c("1 day", "1 week", "7 day", "1 week", "1 month", "3 month", "1 year"),dRange=NULL, epochInPeriodStr=switch(aggregate.by, "1 day"="1","1 week"="%u", "1 month"="%d","3 month"="%q","1 year"="%j"), startYearFormat=switch(aggregate.by,"1 day"="%Y","7 day"="%G","1 week"="%G","1 month"="%Y","3 month"="%Y","1 year"="%Y"), startEpochFormat=switch(aggregate.by,"1 day"="%j","7 day"="%V","1 week"="%V","1 month"="%m","3 month"="%Q","1 year"="1") ) { ##Check aggregate.by argument aggregate.by <- match.arg(aggregate.by, c("1 day", "1 week", "7 day", "1 week", "1 month", "3 month", "1 year")) #If no dRange let it be the range of the dateCol if (is.null(dRange)) { dRange <- range(linelist[,dateCol],na.rm=TRUE) } if (aggregate.by != "1 day") { ##Move dates back to first of each epoch unit dRange <- dRange - as.numeric(formatDate(dRange,epochInPeriodStr)) + 1 } #Add exactly one time step to dRange to ensure that cut #contains the last level as well. We use 'seq' to ensure #that even weeks/days with no data are present in the factor. maxDate <- seq(max(dRange),length.out=2,by=aggregate.by)[-1] dates <- seq(min(dRange), maxDate, by=aggregate.by) #Make a table containing the specific number of cases. Note that this #needs to occur using a cut statement lvl <- cut(linelist[,dateCol], breaks=dates,right=FALSE) observed <- table(lvl) epoch <- as.Date(names(observed)) #Translate "by" to freq string freq <- switch(aggregate.by,"1 day"=365,"7 day"=52,"1 week"=52,"1 month"=12,"3 month"=4,"1 year"=1) startYear <- as.numeric(formatDate(min(dates),startYearFormat)) startEpoch <- as.numeric(formatDate(min(dates),startEpochFormat)) observed <- matrix(observed,ncol=1) #Create S4 object sts <- new("sts",epoch=as.numeric(epoch),observed=observed, alarm=0*observed, epochAsDate=TRUE,freq=freq,start=c(startYear,startEpoch)) #Return return(sts) } surveillance/R/hhh4_oneStepAhead.R0000644000176200001440000002530213231640220016540 0ustar liggesusers################################################################################ ### Compute one-step-ahead predictions at a series of time points ### ### Copyright (C) 2011-2012 Michaela Paul, 2012-2018 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ oneStepAhead <- function(result, # hhh4-object (i.e. a hhh4 model fit) tp, # scalar: one-step-ahead predictions for time # points (tp+1):nrow(stsObj), or tp=c(from, to) type = c("rolling", "first", "final"), which.start = c("current", "final"), #if type="rolling" keep.estimates = FALSE, verbose = TRUE, # verbose-1 is used as verbose setting # for sequentially refitted hhh4 models cores = 1) # if which.start="final", the predictions # can be computed in parallel { stopifnot(inherits(result, "hhh4")) type <- match.arg(type) if (type == "rolling" && !is.list(which.start)) { ## new in surveillance 1.10-0: if 'which.start' is a list, it is ## directly used as the 'start' argument for hhh4() in all time steps which.start <- match.arg(which.start) if (cores > 1 && which.start == "current") stop("no parallelization for 'type=\"rolling\"' ", "if 'which.start=\"current\"'") } ## get model terms model <- result[["terms"]] if (is.null(model)) model <- result$terms <- terms(result) nTime <- model$nTime # = nrow(result$stsObj) nUnits <- model$nUnits # = ncol(result$stsObj) dimPsi <- model$nOverdisp withPsi <- dimPsi > 0L psiIdx <- model$nFE + model$nd + seq_len(dimPsi) ## check that tp is within the time period of the data stopifnot(length(tp) %in% 1:2, tp >= 0) tpRange <- c(model$subset[1L], nTime-1L) # supported range if (any(tp > tpRange[2L]) || (type != "final" && any(tp < tpRange[1L]))) { stop("the time range defined by 'tp' must be a subset of ", tpRange[1L], ":", tpRange[2L]) } if (length(tp) == 1) { tp <- c(tp, max(model$subset)-1L) # historical default if (tp[1L] > tp[2L]) # probably unintended stop("'tp' larger than the default upper limit (", tp[2L], ")") } tps <- tp[1L]:tp[2L] # this function actually works if tp[1] > tp[2] ntps <- length(tps) observed <- model$response[tps+1,,drop=FALSE] rownames(observed) <- tps+1 ## adjust verbosity for model refitting verbose <- as.integer(verbose) result$control$verbose <- max(0, verbose - (ntps>1)) if (type != "rolling" && verbose > 1L) verbose <- 1L do_pb <- verbose == 1L && interactive() ## initial fit fit <- if (type == "first") { if (do_pb) cat("\nRefitting model at first time point t =", tps[1L], "...\n") update.hhh4(result, subset.upper = tps[1L], use.estimates = TRUE, keep.terms = TRUE) # need "model" -> $terms } else result if (!fit$convergence) stop("initial fit did not converge") ## result templates (named and filled with NA's) pred <- matrix(NA_real_, nrow=ntps, ncol=nUnits, dimnames=list(tps+1, colnames(observed))) if (withPsi) psi <- matrix(NA_real_, nrow=ntps, ncol=dimPsi, dimnames=list(tps, names(model$initialTheta)[psiIdx])) if (keep.estimates) { coefficients <- matrix(NA_real_, nrow=ntps, ncol=length(model$initialTheta), dimnames=list(tps, names(model$initialTheta))) Sigma.orig <- matrix(NA_real_, nrow=ntps, ncol=model$nSigma, dimnames=list(tps, names(result$Sigma.orig))) logliks <- matrix(NA_real_, nrow=ntps, ncol=2L, dimnames=list(tps, c("loglikelihood", "margll"))) } ## extract predictions and stuff for specific tp from fit getPreds <- function (fit, tp) { coefs <- unname(fit$coefficients) c(list(pred = as.vector( meanHHH(coefs, fit$terms, subset=tp+1L, total.only=TRUE))), if (withPsi) list(psi = coefs[psiIdx]), if (keep.estimates) list( coefficients=coefs, Sigma.orig=unname(fit$Sigma.orig), logliks=c(fit$loglikelihood, fit$margll)) ) } ## compute the predictions and save ## pred, psi, coefficients, Sigma.orig, and logliks if (cores > 1L) { ## return value template (unnamed NA vectors) resTemplate <- lapply(getPreds(fit, tps[1L]), "is.na<-", TRUE) ## run parallel res <- parallel::mclapply(tps, function (tp) { if (verbose) cat("One-step-ahead prediction @ t =", tp, "...\n") if (type == "rolling") { # update fit fit <- update.hhh4(result, subset.upper=tp, use.estimates=TRUE, start=if (is.list(which.start)) which.start, verbose=FALSE, # chaotic in parallel keep.terms=TRUE) # need "model" -> $terms if (!fit$convergence) { cat("WARNING: No convergence @ t =", tp, "!\n") return(resTemplate) } } getPreds(fit, tp) }, mc.preschedule=TRUE, mc.cores=cores) ## gather results .extractFromList <- function (what) t(vapply(res, "[[", resTemplate[[what]], what, USE.NAMES=FALSE)) pred[] <- .extractFromList("pred") if (withPsi) psi[] <- .extractFromList("psi") if (keep.estimates) { coefficients[] <- .extractFromList("coefficients") Sigma.orig[] <- .extractFromList("Sigma.orig") logliks[] <- .extractFromList("logliks") } } else { ## sequential one-step ahead predictions if (do_pb) pb <- txtProgressBar(min=0, max=ntps, initial=0, style=3) for(i in seq_along(tps)) { if (verbose > 1L) { cat("\nOne-step-ahead prediction @ t =", tps[i], "...\n") } else if (do_pb) setTxtProgressBar(pb, i) if (type == "rolling") { # update fit fit.old <- fit # backup start <- if (is.list(which.start)) { which.start } else if (which.start == "current") hhh4coef2start(fit) ## else NULL fit <- update.hhh4(result, subset.upper=tps[i], start=start, # takes precedence use.estimates=TRUE, keep.terms=TRUE) # need "model" -> $terms if (!fit$convergence) { if (do_pb) cat("\n") cat("WARNING: No convergence @ t =", tps[i], "!\n") ## FIXME: do a grid search ? fit <- fit.old next } } res <- getPreds(fit, tps[i]) ## gather results pred[i,] <- res$pred if (withPsi) psi[i,] <- res$psi if (keep.estimates) { coefficients[i,] <- res$coefficients Sigma.orig[i,] <- res$Sigma.orig logliks[i,] <- res$logliks } } if (do_pb) close(pb) } ## with shared overdispersion parameters we need to expand psi to ncol(pred) if (dimPsi > 1L && dimPsi != nUnits) { psi <- psi[,model$indexPsi,drop=FALSE] } ## done res <- c(list(pred = pred, observed = observed, psi = if (withPsi) psi else NULL, allConverged = all(!is.na(pred))), if (keep.estimates) list(coefficients = coefficients, Sigma.orig = Sigma.orig, logliks = logliks) ) class(res) <- "oneStepAhead" res } ## extract estimated overdispersion in dnbinom() parametrization, as full matrix psi2size.oneStepAhead <- function (object) { if (is.null(object$psi)) # Poisson model return(NULL) size <- exp(object$psi) # a matrix with 1 or nUnit columns ## ensure that we always have a full 'size' matrix with nUnit columns dimpred <- dim(object$pred) if (ncol(size) != dimpred[2L]) { # => ncol(size)=1, unit-independent psi size <- rep.int(size, dimpred[2L]) dim(size) <- dimpred } dimnames(size) <- list(rownames(object$psi), colnames(object$pred)) size } ## quantiles of the one-step-ahead forecasts quantile.oneStepAhead <- function (x, probs = c(2.5, 10, 50, 90, 97.5)/100, ...) { stopifnot(is.vector(probs, mode = "numeric"), probs >= 0, probs <= 1, (np <- length(probs)) > 0) names(probs) <- paste(format(100*probs, trim=TRUE, scientific=FALSE, digits=3), "%") size <- psi2size.oneStepAhead(x) qs <- if (is.null(size)) { vapply(X = probs, FUN = qpois, FUN.VALUE = x$pred, lambda = x$pred) } else { vapply(X = probs, FUN = qnbinom, FUN.VALUE = x$pred, mu = x$pred, size = size) } ## one tp, one unit -> qs is a vector of length np ## otherwise, 'qs' has dimensions ntps x nUnit x np ## if nUnit==1, we return an ntps x np matrix, otherwise an array if (is.vector(qs)) { qs <- t(qs) rownames(qs) <- rownames(x$pred) qs } else if (dim(qs)[2L] == 1L) { matrix(qs, dim(qs)[1L], dim(qs)[3L], dimnames = dimnames(qs)[c(1L,3L)]) } else qs } ## confidence intervals for one-step-ahead predictions confint.oneStepAhead <- function (object, parm, level = 0.95, ...) { quantile.oneStepAhead(object, (1+c(-1,1)*level)/2, ...) } ## simple plot of one-step-ahead forecasts plot.oneStepAhead <- function (x, unit = 1, probs = 1:99/100, start = NULL, means.args = NULL, ...) { stopifnot(length(unit) == 1, length(probs) > 1) ## select unit obs <- x$observed[,unit] ms <- x$pred[,unit] qs <- quantile.oneStepAhead(x, probs = probs) if (!is.matrix(qs)) # multi-unit predictions qs <- matrix(qs[,unit,], dim(qs)[1L], dim(qs)[3L], dimnames = dimnames(qs)[c(1L,3L)]) ## produce fanplot if (is.null(start)) start <- as.integer(rownames(qs)[1L]) fanplot(quantiles = qs, probs = probs, means = ms, observed = obs, start = start, means.args = means.args, ...) } surveillance/R/twinstim_siaf.R0000644000176200001440000003213613777353271016176 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Spatial interaction functions for twinstim's epidemic component. ### Specific implementations are in seperate files (e.g.: Gaussian, power law). ### ### Copyright (C) 2009-2015,2017 Sebastian Meyer ### $Revision: 2601 $ ### $Date: 2021-01-12 18:04:57 +0100 (Tue, 12. Jan 2021) $ ################################################################################ ##################### ### "Constructor" ### ##################### siaf <- function (f, F, Fcircle, effRange, deriv, Deriv, simulate, npars, validpars = NULL) { npars <- as.integer(npars) if (length(npars) != 1 || npars < 0L) { stop("'siaf$npars' must be a single nonnegative number") } f <- .checknargs3(f, "siaf$f") F <- if (missing(F) || is.null(F)) siaf.fallback.F else { F <- match.fun(F) if (length(formals(F)) < 4L) stop("siaf$F() must accept >=4 arguments ", "(polydomain, f, pars, type)") F } haspars <- npars > 0L if (!haspars || missing(deriv)) deriv <- NULL if (!is.null(deriv)) deriv <- .checknargs3(deriv, "siaf$deriv") if (missing(effRange)) effRange <- NULL if (missing(Fcircle) || is.null(Fcircle)) { Fcircle <- NULL if (!is.null(effRange)) { message("'siaf$effRange' only works in conjunction with 'siaf$Fcircle'") effRange <- NULL } } if (!is.null(Fcircle)) Fcircle <- .checknargs3(Fcircle, "siaf$Fcircle") if (!is.null(effRange)) { effRange <- match.fun(effRange) if (length(formals(effRange)) < 1L) { stop("the 'siaf$effRange' function must accept a parameter vector") } } Deriv <- if (is.null(deriv)) NULL else if (missing(Deriv) || is.null(Deriv)) siaf.fallback.Deriv else { Deriv <- match.fun(Deriv) if (length(formals(Deriv)) < 4L) stop("siaf$Deriv() must accept >=4 arguments ", "(polydomain, deriv, pars, type)") Deriv } ## Check if simulation function has proper format if (missing(simulate)) simulate <- NULL if (!is.null(simulate)) { simulate <- .checknargs3(simulate, "siaf$simulate") if (length(formals(simulate)) == 3L) formals(simulate) <- c(formals(simulate), alist(ub=)) } ## Check if the validpars are of correct form validpars <- if (!haspars || is.null(validpars)) NULL else match.fun(validpars) ## Done, return result. list(f = f, F = F, Fcircle = Fcircle, effRange = effRange, deriv = deriv, Deriv = Deriv, simulate = simulate, npars = npars, validpars = validpars) } ########################################## ### Constant spatial interaction/dispersal ########################################## siaf.constant <- function () { res <- list( ## use explicit quote()ing to prevent notes from codetools::checkUsage f = as.function(c(alist(s=, pars=NULL, types=NULL), quote(rep.int(1, length(s)/2))), ##<- nrow() would take extra time in standardGeneric() envir = .GlobalEnv), ## integration over polydomains (F) is handled specially in twinstim Fcircle = as.function(c(alist(r=, pars=NULL, type=NULL), quote(pi*r^2)), envir = .GlobalEnv), simulate = as.function(c(alist(n=, pars=NULL, type=NULL, ub=), quote(runifdisc(n, ub))), envir = getNamespace("surveillance")), npars = 0L ) attr(res, "constant") <- TRUE res } ########################################## ### Naive defaults for the siaf primitives ########################################## ## numerical integration of f over a polygonal domain (single "owin" and type) siaf.fallback.F <- function (polydomain, f, pars, type, method = "SV", ...) { if (identical(method,"SV")) { polyCub.SV(polyregion = polydomain, f = f, pars, type, alpha = 0, ...) # since max at origin } else { polyCub(polyregion = polydomain, f = f, method = method, pars, type, ...) } } ## numerical integration of f over a circular domain getFcircle <- function (siaf, control.F = list()) { if (is.null(siaf$Fcircle)) { function (r, pars, type) { disc <- discpoly(c(0,0), r, npoly = 64, class = "owin") do.call(siaf$F, c(alist(disc, siaf$f, pars, type), control.F)) } } else { siaf$Fcircle } } ## numerical integration of deriv over a polygonal domain siaf.fallback.Deriv <- function (polydomain, deriv, pars, type, method = "SV", ...) { deriv1 <- function (s, paridx) deriv(s, pars, type)[,paridx,drop=TRUE] intderiv1 <- function (paridx) polyCub(polyregion = polydomain, f = deriv1, method = method, paridx = paridx, ...) vapply(X = seq_along(pars), FUN = intderiv1, FUN.VALUE = 0, USE.NAMES = FALSE) } #################################### ### Simulation via polar coordinates (used, e.g., for siaf.powerlaw) #################################### ## Simulate from an isotropic spatial interaction function ## f_{2D}(s) \propto f(||s||), ||s|| <= ub. ## within a maximum distance 'ub' via polar coordinates and the inverse ## transformation method: ## p_{2D}(r,theta) = r * f_{2D}(x,y) \propto r*f(r) ## => angle theta ~ U(0,2*pi) and sample r according to r*f(r) siaf.simulatePC <- function (intrfr) # e.g., intrfr.powerlaw { as.function(c(alist(n=, siafpars=, type=, ub=), substitute({ ## Note: in simEpidataCS, simulation is always bounded to eps.s and to ## the largest extend of W, thus, 'ub' is finite stopifnot(is.finite(ub)) ## Normalizing constant of r*f(r) on [0;ub] normconst <- intrfr(ub, siafpars, type) ## => cumulative distribution function CDF <- function (q) intrfr(q, siafpars, type) / normconst ## For inversion sampling, we need the quantile function CDF^-1 ## However, this is not available in closed form, so we use uniroot ## (which requires a finite upper bound) QF <- function (p) uniroot(function(q) CDF(q)-p, lower=0, upper=ub)$root ## Now sample r as QF(U), where U ~ U(0,1) r <- vapply(X=runif(n), FUN=QF, FUN.VALUE=0, USE.NAMES=FALSE) ## Check simulation of r via kernel estimate: ## plot(density(r, from=0, to=ub)); curve(p(x)/normconst,add=TRUE,col=2) ## now rotate each point by a random angle to cover all directions theta <- runif(n, 0, 2*pi) r * cbind(cos(theta), sin(theta)) })), envir=parent.frame()) } ################################################ ### Check F, Fcircle, deriv, Deriv, and simulate ################################################ checksiaf <- function (siaf, pargrid, type = 1, tolerance = 1e-5, method = "SV", ...) { stopifnot(is.list(siaf), is.numeric(pargrid), !is.na(pargrid), length(pargrid) > 0) pargrid <- as.matrix(pargrid) stopifnot(siaf$npars == ncol(pargrid)) ## Check 'F' if (!is.null(siaf$F)) { cat("'F' vs. cubature using method = \"", method ,"\" ... ", sep="") comp.F <- checksiaf.F(siaf$F, siaf$f, pargrid, type=type, method=method, ...) cat(attr(comp.F, "all.equal") <- all.equal(comp.F[,1], comp.F[,2], check.attributes=FALSE, tolerance=tolerance), "\n") } ## Check 'Fcircle' if (!is.null(siaf$Fcircle)) { cat("'Fcircle' vs. cubature using method = \"",method,"\" ... ", sep="") comp.Fcircle <- checksiaf.Fcircle(siaf$Fcircle, siaf$f, pargrid, type=type, method=method, ...) cat(attr(comp.Fcircle, "all.equal") <- all.equal(comp.Fcircle[,1], comp.Fcircle[,2], check.attributes=FALSE, tolerance=tolerance), "\n") } ## Check 'deriv' if (!is.null(siaf$deriv)) { cat("'deriv' vs. numerical derivative ... ") if (requireNamespace("maxLik", quietly=TRUE)) { maxRelDiffs.deriv <- checksiaf.deriv(siaf$deriv, siaf$f, pargrid, type=type) cat(attr(maxRelDiffs.deriv, "all.equal") <- if (any(maxRelDiffs.deriv > tolerance)) paste("maxRelDiff =", max(maxRelDiffs.deriv)) else TRUE, "\n") } else cat("Failed: need package", sQuote("maxLik"), "\n") } ## Check 'Deriv' if (!is.null(siaf$Deriv)) { cat("'Deriv' vs. cubature using method = \"", method ,"\" ... ", sep="") comp.Deriv <- checksiaf.Deriv(siaf$Deriv, siaf$deriv, pargrid, type=type, method=method, ...) if (siaf$npars > 1) cat("\n") attr(comp.Deriv, "all.equal") <- sapply(seq_len(siaf$npars), function (j) { if (siaf$npars > 1) cat("\tsiaf parameter ", j, ": ", sep="") ae <- all.equal(comp.Deriv[,j], comp.Deriv[,siaf$npars+j], check.attributes=FALSE, tolerance=tolerance) cat(ae, "\n") ae }) } ## Check 'simulate' if (interactive() && !is.null(siaf$simulate)) { cat("Simulating ... ") checksiaf.simulate(siaf$simulate, siaf$f, pargrid[1,], type=type) cat("(-> check the plot)\n") } ## invisibly return check results invisible(mget(c("comp.F", "comp.Fcircle", "maxRelDiffs.deriv", "comp.Deriv"), ifnotfound=list(NULL), inherits=FALSE)) } checksiaf.F <- function (F, f, pargrid, type=1, method="SV", ...) { res <- t(apply(pargrid, 1, function (pars) { given <- F(LETTERR, f, pars, type) num <- siaf.fallback.F(polydomain = LETTERR, f = f, pars = pars, type = type, method = method, ...) c(given, num) })) colnames(res) <- c("F", method) res } checksiaf.Fcircle <- function (Fcircle, f, pargrid, type=1, rs=c(1,5,10,50,100), method="SV", ...) { pargrid <- pargrid[rep(1:nrow(pargrid), each=length(rs)),,drop=FALSE] rpargrid <- cbind(rs, pargrid, deparse.level=0) res <- t(apply(rpargrid, 1, function (x) { disc <- discpoly(c(0,0), x[1L], npoly = 128, class = "owin") c(ana = Fcircle(x[1L], x[-1L], type), num = siaf.fallback.F(polydomain = disc, f = f, pars = x[-1L], type = type, method = method, ...)) })) res } checksiaf.deriv <- function (deriv, f, pargrid, type=1, rmax=100) { rgrid <- seq(-rmax,rmax,len=21) / sqrt(2) rgrid <- rgrid[rgrid != 0] # some siafs are always 1 at (0,0) (deriv=0) sgrid <- cbind(rgrid, rgrid) apply(pargrid, 1, function (pars) { maxLik::compareDerivatives(f, deriv, t0=pars, s=sgrid, print=FALSE)$maxRelDiffGrad ## Note: numDeriv::grad() would only allow one location s at a time }) } checksiaf.Deriv <- function (Deriv, deriv, pargrid, type=1, method="SV", ...) { res <- t(apply(pargrid, 1, function (pars) { given <- Deriv(LETTERR, deriv, pars, type) num <- siaf.fallback.Deriv(polydomain = LETTERR, deriv = deriv, pars = pars, type = type, method = method, ...) c(given, num) })) paridxs <- seq_len(ncol(pargrid)) colnames(res) <- c(paste("Deriv",paridxs,sep="."), paste(method,paridxs,sep=".")) res } checksiaf.simulate <- function (simulate, f, pars, type=1, B=3000, ub=10, plot=interactive()) { ## Simulate B points on the disc with radius 'ub' simpoints <- simulate(B, pars, type=type, ub=ub) if (plot) { ## Graphical check in 2D opar <- par(mfrow=c(2,1), mar=c(4,3,2,1)); on.exit(par(opar)) plot(as.im.function(function(x,y,...) f(cbind(x,y), pars, type), W=discpoly(c(0,0), ub, class="owin")), axes=TRUE, main="Simulation from the spatial kernel") points(simpoints, cex=0.2) kdens <- kde2d(simpoints[,1], simpoints[,2], n=100) contour(kdens, add=TRUE, col=2, lwd=2, labcex=1.5, vfont=c("sans serif", "bold")) ##x11(); image(kdens, add=TRUE) ## Graphical check of distance distribution truehist(sqrt(rowSums(simpoints^2)), xlab="Distance") rfr <- function (r) r*f(cbind(r,0), pars, type) rfrnorm <- integrate(rfr, 0, ub)$value do.call("curve", list(quote(rfr(x)/rfrnorm), add=TRUE, col=2, lwd=2)) ##<- use do.call-construct to prevent codetools::checkUsage from noting "x" } ## invisibly return simulated points invisible(simpoints) } surveillance/R/twinstim_intensity.R0000644000176200001440000003117012625070115017257 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Plot the temporal or spatial evolution of the estimated intensity ### ### Copyright (C) 2012-2015 Sebastian Meyer ### $Revision: 1520 $ ### $Date: 2015-11-24 15:12:29 +0100 (Tue, 24. Nov 2015) $ ################################################################################ intensity.twinstim <- function (x, aggregate = c("time", "space"), types = 1:nrow(x$qmatrix), tiles, tiles.idcol = NULL) { modelenv <- environment(x) ## check arguments if (is.null(modelenv)) stop("'x' is missing the model environment\n", " -- re-fit or update() with 'model=TRUE'") aggregate <- match.arg(aggregate) stopifnot(is.vector(types, mode="numeric"), types %in% seq_len(modelenv$nTypes), !anyDuplicated(types)) ## remove (big) x object from current evaluation environment qmatrix <- x$qmatrix # not part of modelenv force(types) # evaluate types before rm(x) rm(x) # don't need this anymore ##thisenv <- environment() ##parent.env(thisenv) <- modelenv # objects of modelenv become visible ## Instead of the above, we do cheap and nasty model unpacking! ## safer than the parent.env<- hack (R manual: "extremely dangerous"), and ## cleaner than running code inside with(modelenv,...) since assignments ## then would take place in modelenv, which would produce garbage t0 <- modelenv$t0 T <- modelenv$T histIntervals <- modelenv$histIntervals eventTimes <- modelenv$eventTimes eventCoords <- modelenv$eventCoords eventTypes <- modelenv$eventTypes removalTimes <- modelenv$removalTimes gridTiles <- modelenv$gridTiles gridBlocks <- modelenv$gridBlocks ds <- modelenv$ds tiaf <- modelenv$tiaf tiafpars <- modelenv$tiafpars eps.s <- modelenv$eps.s siaf <- modelenv$siaf siafpars <- modelenv$siafpars ## endemic component on the spatial or temporal grid hInt <- if (modelenv$hash) { eta <- drop(modelenv$mmhGrid %*% modelenv$beta) if (!is.null(modelenv$offsetGrid)) eta <- modelenv$offsetGrid + eta expeta <- exp(unname(eta)) .beta0 <- rep_len(if (modelenv$nbeta0==0L) 0 else modelenv$beta0, modelenv$nTypes) fact <- sum(exp(.beta0[types])) if (aggregate == "time") { # int over W and types by BLOCK fact * c(tapply(expeta * modelenv$ds, gridBlocks, sum, simplify = TRUE)) } else { # int over T and types by tile fact * c(tapply(expeta * modelenv$dt, gridTiles, sum, simplify = TRUE)) } } else { ## the endemic intensity is 0 ## but a non-endemic "twinstim" holds no information on 'stgrid': ## 'gridBlocks' and 'gridTiles', respectively, are undefined NULL } ## endemic component as a function of time or location hIntFUN <- if (modelenv$hash) { if (aggregate == "time") { function (tp) { stopifnot(isScalar(tp)) if (tp == t0) { hInt[1L] } else { starts <- histIntervals$start idx <- match(TRUE, c(starts,T) >= tp) - 1L if (identical(idx, 0L)) { # tp <= t0 NA_real_ } else { # idx is NA if tp > T block <- histIntervals$BLOCK[idx] hInt[as.character(block)] } } } } else { if (!is.null(tiles.idcol)) { stopifnot(is(tiles, "SpatialPolygonsDataFrame")) row.names(tiles) <- tiles@data[[tiles.idcol]] } tileLevels <- levels(gridTiles) tiles <- check_tiles(tiles, tileLevels, areas.stgrid = ds[seq_along(tileLevels)], keep.data = FALSE) # drop data for over-method tilesIDs <- row.names(tiles) # = sapply(tiles@polygons, slot, "ID") function (xy) { # works with a whole coordinate matrix points <- SpatialPoints(xy, proj4string=tiles@proj4string) polygonidxOfPoints <- over(points, tiles) tilesOfPoints <- tilesIDs[polygonidxOfPoints] hInt[tilesOfPoints] # index by name } } } else function (...) 0 ## epidemic component eInt <- if (modelenv$hase) { qSum_types <- rowSums(qmatrix[,types,drop=FALSE])[eventTypes] fact <- qSum_types * modelenv$gammapred if (aggregate == "time") { # as a function of time (int over W & types) factS <- fact * modelenv$siafInt function (tp) { stopifnot(isScalar(tp)) tdiff <- tp - eventTimes infectivity <- qSum_types > 0 & (tdiff > 0) & (removalTimes >= tp) if (any(infectivity)) { gsources <- tiaf$g(tdiff[infectivity], tiafpars, eventTypes[infectivity]) intWj <- factS[infectivity] * gsources sum(intWj) } else 0 } } else { # as a function of location (int over time and types) factT <- fact * modelenv$tiafInt nEvents <- nrow(eventCoords) function (xy) { stopifnot(is.vector(xy, mode="numeric"), length(xy) == 2L) point <- matrix(xy, nrow=nEvents, ncol=2L, byrow=TRUE) sdiff <- point - eventCoords proximity <- qSum_types > 0 & .rowSums(sdiff^2, nEvents, 2L) <= eps.s^2 if (any(proximity)) { fsources <- siaf$f(sdiff[proximity,,drop=FALSE], siafpars, eventTypes[proximity]) intTj <- factT[proximity] * fsources sum(intTj) } else 0 } } } else function (...) 0 ## return component functions list(hGrid = hInt, hFUN = hIntFUN, eFUN = eInt, aggregate = aggregate, types = types) } intensityplot.twinstim <- function (x, which = c("epidemic proportion", "endemic proportion", "total intensity"), aggregate, types, tiles, tiles.idcol, # arguments of intensity.twinstim; # defaults are set below plot = TRUE, add = FALSE, tgrid = 101, rug.opts = list(), sgrid = 128, polygons.args = list(), points.args = list(), cex.fun = sqrt, ...) { which <- match.arg(which) ## set up desired intensities cl <- match.call() cl <- cl[c(1L, match(names(formals(intensity.twinstim)), names(cl), 0L))] cl[[1]] <- as.name("intensity.twinstim") components <- eval(cl, envir = parent.frame()) aggregate <- components$aggregate types <- components$types ## define function to plot FUN <- function (tmp) {} names(formals(FUN)) <- if (aggregate == "time") "times" else "coords" body1 <- if (aggregate == "time") expression( hGrid <- sapply(times, components$hFUN, USE.NAMES=FALSE), eGrid <- sapply(times, components$eFUN, USE.NAMES=FALSE) ) else expression( hGrid <- unname(components$hFUN(coords)), # takes whole coord matrix eGrid <- apply(coords, 1, components$eFUN) ) body2 <- switch(which, "epidemic proportion" = expression(eGrid / (hGrid + eGrid)), "endemic proportion" = expression(hGrid / (hGrid + eGrid)), "total intensity" = expression(hGrid + eGrid)) body(FUN) <- as.call(c(as.name("{"), c(body1, body2))) if (!plot) return(FUN) ## plot the FUN modelenv <- environment(x) dotargs <- list(...) nms <- names(dotargs) if (aggregate == "time") { ## set up grid of x-values (time points where 'which' will be evaluated) tgrid <- if (isScalar(tgrid)) { seq(modelenv$t0, modelenv$T, length.out=tgrid) } else { stopifnot(is.vector(tgrid, mode="numeric")) sort(tgrid) } ## calculate 'which' on tgrid yvals <- FUN(tgrid) ## plot it if(! "xlab" %in% nms) dotargs$xlab <- "time" if(! "ylab" %in% nms) dotargs$ylab <- which if(! "type" %in% nms) dotargs$type <- "l" if(! "ylim" %in% nms) dotargs$ylim <- { if (which == "total intensity") c(0,max(yvals)) else c(0,1) } do.call(if (add) "lines" else "plot", args=c(alist(x=tgrid, y=yvals), dotargs)) if (is.list(rug.opts)) { if (is.null(rug.opts$ticksize)) rug.opts$ticksize <- 0.02 if (is.null(rug.opts$quiet)) rug.opts$quiet <- TRUE eventTimes.types <- modelenv$eventTimes[modelenv$eventTypes %in% types] do.call("rug", args = c(alist(x=eventTimes.types), rug.opts)) } invisible(FUN) } else { tiles <- as(tiles, "SpatialPolygons") # remove potential data for over() ## set up grid of coordinates where 'which' will be evaluated if (isScalar(sgrid)) { sgrid <- maptools::Sobj_SpatialGrid(tiles, n = sgrid)$SG ## ensure that sgrid has exactly the same proj4string as tiles ## since CRS(proj4string(tiles)) might have modified the string sgrid@proj4string <- tiles@proj4string } sgrid <- as(sgrid, "SpatialPixels") ## only select grid points inside W (tiles) sgridTileIdx <- over(sgrid, tiles) sgrid <- sgrid[!is.na(sgridTileIdx),] ## calculate 'which' on sgrid yvals <- FUN(coordinates(sgrid)) sgridy <- SpatialPixelsDataFrame(sgrid, data=data.frame(yvals=yvals), proj4string=tiles@proj4string) ## define sp.layout lobjs <- list() if (is.list(polygons.args)) { nms.polygons <- names(polygons.args) if(! "col" %in% nms.polygons) polygons.args$col <- "darkgrey" lobjs <- c(lobjs, list(c(list("sp.polygons", tiles, first=FALSE), polygons.args))) } if (is.list(points.args)) { eventCoords.types <- modelenv$eventCoords[modelenv$eventTypes %in% types,,drop=FALSE] ## eventCoords as Spatial object with duplicates counted and removed eventCoords.types <- SpatialPoints(eventCoords.types, proj4string = tiles@proj4string, bbox = tiles@bbox) eventCoords.types <- SpatialPointsDataFrame(eventCoords.types, data.frame(mult = multiplicity.Spatial(eventCoords.types))) eventCoords.types <- eventCoords.types[!duplicated(coordinates(eventCoords.types)),] points.args <- modifyList(list(pch=1, cex=0.5), points.args) pointcex <- cex.fun(eventCoords.types$mult) pointcex <- pointcex * points.args$cex points.args$cex <- NULL lobjs <- c(lobjs, list(c(list("sp.points", eventCoords.types, first=FALSE, cex=pointcex), points.args))) } if ("sp.layout" %in% nms) { if (!is.list(dotargs$sp.layout[[1]])) { # let sp.layout be a list of lists dotargs$sp.layout <- list(dotargs$sp.layout) } lobjs <- c(lobjs, dotargs$sp.layout) dotargs$sp.layout <- NULL } ## plotit if (add) message("'add'ing is not possible with 'aggregate=\"space\"'") if (! "xlim" %in% nms) dotargs$xlim <- bbox(tiles)[1,] if (! "ylim" %in% nms) dotargs$ylim <- bbox(tiles)[2,] if (! "scales" %in% nms) dotargs$scales <- list(draw = TRUE) do.call("spplot", args=c(alist(sgridy, zcol="yvals", sp.layout=lobjs, checkEmptyRC=FALSE), dotargs)) } } ## set default arguments for intensityplot.twinstim from intensity.twinstim formals(intensityplot.twinstim)[names(formals(intensity.twinstim))] <- formals(intensity.twinstim) surveillance/R/hhh4_W.R0000644000176200001440000002514313741600116014417 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Helper functions for neighbourhood weight matrices in hhh4() ### ### Copyright (C) 2012-2016,2020 Sebastian Meyer ### $Revision: 2567 $ ### $Date: 2020-10-14 15:42:38 +0200 (Wed, 14. Oct 2020) $ ################################################################################ checkNeighbourhood <- function (neighbourhood) { ## setValidity() in sts.R only guarantees correct 'dim' and 'dimnames' ## we also assert numeric or logical matrix with non-NA entries stopifnot(is.matrix(neighbourhood), nrow(neighbourhood) == ncol(neighbourhood), is.numeric(neighbourhood) | is.logical(neighbourhood), is.finite(neighbourhood)) invisible(TRUE) } ### calculate the weighted sum of counts of adjacent (or all other) regions ### i.e. the nTime x nUnit matrix with elements (ti): sum_j w_jit * y_j(t-lag) ## W is either a nUnits x nUnits matrix of time-constant weights w_ji ## or a nUnits x nUnits x nTime array of time-varying weights w_jit weightedSumNE <- function (observed, weights, lag) { dimY <- dim(observed) nTime <- dimY[1L] nUnits <- dimY[2L] if (length(dim(weights)) == 2L) { # fast track for time-constant weights if (any(isNA <- is.na(observed))) observed[isNA] <- 0 # keep original na.rm = TRUE behaviour (for now) rbind(matrix(NA_real_, lag, nUnits), observed[seq_len(nTime-lag),,drop=FALSE] %*% weights) } else { tYlagged <- t(observed[seq_len(nTime-lag),,drop=FALSE]) apply(weights[,,(lag+1L):nTime,drop=FALSE], 2L, function (wi) ## wi and tYlagged are matrices of size nUnits x (nTime-lag) c(rep(NA_real_, lag), .colSums(tYlagged * wi, nUnits, nTime-lag, na.rm=TRUE))) } } ### normalize weight matrix such that each row sums to 1 (at each time point) normalizeW <- function (W) { dimW <- dim(W) if (length(dimW) == 2L) { W / .rowSums(W, dimW[1L], dimW[2L]) } else { # time-varying weights res <- apply(W, 3L, normalizeW) dim(res) <- dimW res } } ### scale and/or normalize a weight matrix/array scaleNEweights.default <- function (weights, scale = NULL, normalize = FALSE) { if (!is.null(scale)) weights <- scale * weights if (normalize) weights <- normalizeW(weights) weights } ## update parametric weights functions w, dw, d2w scaleNEweights.list <- function (weights, scale = NULL, normalize = FALSE) { if (is.null(scale) && !normalize) return(weights) if (normalize) { dprod <- function (u, v, du, dv) du * v + u * dv dfrac <- function (u, v, du, dv) (du * v - u * dv) / v^2 w <- function (...) scaleNEweights.default(weights$w(...), scale, TRUE) dw <- function (...) { W <- scaleNEweights.default(weights$w(...), scale) dW <- clapply(X = weights$dw(...), # matrix or list thereof FUN = scaleNEweights.default, scale = scale) # always returns a list dimW <- dim(W) normW <- .rowSums(W, dimW[1L], dimW[2L]) normdW <- lapply(X = dW, FUN = .rowSums, m = dimW[1L], n = dimW[2L]) mapply(FUN = dfrac, du = dW, dv = normdW, MoreArgs = list(u = W, v = normW), SIMPLIFY = FALSE, USE.NAMES = FALSE) } ## for d2w() we need all the stuff from dw() -> substitute d2w <- as.function(c(alist(...=), substitute({ dWnorm <- DWBODY d2W <- clapply(X = weights$d2w(...), # matrix or list thereof FUN = scaleNEweights.default, scale = scale) # always returns a list normd2W <- lapply(X = d2W, FUN = .rowSums, m = dimW[1L], n = dimW[2L]) ## order of d2w is upper triangle BY ROW dimd <- length(dW) ri <- rep.int(seq_len(dimd), rep.int(dimd, dimd)) # row index ci <- rep.int(seq_len(dimd), dimd) # column index uppertri <- ci >= ri mapply(FUN = function (k, l, d2W, normd2W) { dfrac(dW[[k]], normW, d2W, normdW[[l]]) - dprod(W/normW, normdW[[k]]/normW, dWnorm[[l]], dfrac(normdW[[k]], normW, normd2W, normdW[[l]])) }, k = ri[uppertri], l = ci[uppertri], d2W = d2W, normd2W = normd2W, SIMPLIFY = FALSE, USE.NAMES = FALSE) }, list(DWBODY = body(dw))))) } else { w <- function (...) scaleNEweights.default(weights$w(...), scale) dw <- function (...) clapply(X = weights$dw(...), FUN = scaleNEweights.default, scale = scale) d2w <- function (...) clapply(X = weights$d2w(...), FUN = scaleNEweights.default, scale = scale) } ## return list with updated functions list(w = w, dw = dw, d2w = d2w, initial = weights$initial) } ################################## ### check ne$weights specification ################################## ### checks for a fixed matrix/array checkWeightsArray <- function (W, nUnits, nTime, name = deparse(substitute(W)), check0diag = FALSE, islands = FALSE) { if (!is.array(W) || !(length(dim(W)) %in% 2:3)) stop("'", name, "' must return a matrix or 3-dim array") if (any(dim(W)[1:2] != nUnits) || isTRUE(dim(W)[3] != nTime)) stop("'", name, "' must conform to dimensions ", nUnits, " x ", nUnits, " (x ", nTime, ")") if (any(is.na(W))) { if (islands) # normalization of parametric weights yields division by 0 warning("neighbourhood structure contains islands") stop("missing values in '", name, "' are not allowed") } if (check0diag) { diags <- if (is.matrix(W)) diag(W) else apply(W, 3, diag) if (any(diags != 0)) warning("'", name, "' has nonzeros on the diagonal", if (!is.matrix(W)) "s") } } ### check parametric weights specification consisting of a list of: ## - three functions: w, dw, and d2w ## - a vector of initial parameter values checkWeightsFUN <- function (object) { fnames <- paste0(c("","d","d2"), "w") if (any(!sapply(object[fnames], is.function))) stop("parametric weights require functions ", paste0("'", fnames, "'", collapse=", ")) if (any(!sapply(object[fnames], function(FUN) length(formals(FUN)) >= 3L))) stop("parametric weights functions must accept (not necessarily use)", "\n at least 3 arguments (parameter vector, ", "neighbourhood order matrix, data)") if (!is.vector(object$initial, mode="numeric") || length(object$initial) == 0L) stop("parametric weights require initial parameter values") TRUE } ### entry function for checks in hhh4() checkWeights <- function (weights, nUnits, nTime, nbmat, data, # only used for parametric weights check0diag = FALSE) { name <- deparse(substitute(weights)) # "control$ne$weights" ## check specification testweights <- if (is.array(weights)) weights else { if (is.list(weights) && checkWeightsFUN(weights) && checkNeighbourhood(nbmat)) { if (all(nbmat %in% 0:1)) warning("'", deparse(substitute(nbmat)), "' is binary (should contain", " general neighbourhood orders)") weights$w(weights$initial, nbmat, data) } else { stop("'", name, "' must be a matrix/array or a list of functions") } } ## apply matrix/array checks if (is.list(weights)) { # parametric weights if (length(dim(testweights)) > 2L) warning("time-varying parametric weights are not fully supported") checkWeightsArray(testweights, nUnits, nTime, name = paste0(name, "$w"), check0diag = check0diag, islands = any(.rowSums(nbmat, nUnits, nUnits) == 0)) dim.d <- length(weights$initial) dw <- weights$dw(weights$initial, nbmat, data) d2w <- weights$d2w(weights$initial, nbmat, data) if (dim.d == 1L && !is.list(dw) && !is.list(d2w)) { checkWeightsArray(dw, nUnits, nTime, name=paste0(name, "$dw")) checkWeightsArray(d2w, nUnits, nTime, name=paste0(name, "$d2w")) } else { if (!is.list(dw) || length(dw) != dim.d) stop("'", name, "$dw' must return a list (of matrices/arrays)", " of length ", dim.d) if (!is.list(d2w) || length(d2w) != dim.d*(dim.d+1)/2) stop("'", name, "$d2w' must return a list (of matrices/arrays)", " of length ", dim.d*(dim.d+1)/2) lapply(dw, checkWeightsArray, nUnits, nTime, name=paste0(name, "$dw[[i]]")) lapply(d2w, checkWeightsArray, nUnits, nTime, name=paste0(name, "$d2w[[i]]")) } } else checkWeightsArray(testweights, nUnits, nTime, name = name, check0diag = check0diag) ## Done invisible(TRUE) } ############################################# ### Utility functions for fitted hhh4-objects ############################################# ### extract the (final) weight matrix/array from a fitted hhh4 object getNEweights <- function (object, pars = coefW(object), scale = ne$scale, normalize = ne$normalize) { ne <- object$control$ne weights <- if (is.list(ne$weights)) { # parametric weights nd <- length(ne$weights$initial) if (length(pars) != nd) stop("'pars' must be of length ", nd) ne$weights$w(pars, neighbourhood(object$stsObj), object$control$data) } else { # NULL or fixed weight structure ne$weights } if (is.null(normalize)) normalize <- FALSE # backward compatibility < 1.9-0 scaleNEweights.default(weights, scale, normalize) } ### extract parameters of neighbourhood weights from hhh4-object or coef vector coefW <- function (object) { coefs <- if (inherits(object, "hhh4")) object$coefficients else object coefW <- coefs[grep("^neweights", names(coefs))] names(coefW) <- sub("^neweights\\.", "", names(coefW)) coefW } surveillance/R/twinstim_epitest.R0000644000176200001440000002643113352704405016716 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Monte Carlo Permutation Test for Space-Time Interaction in "twinstim" ### ### Copyright (C) 2015-2016,2018 Sebastian Meyer ### $Revision: 2226 $ ### $Date: 2018-09-26 15:26:29 +0200 (Wed, 26. Sep 2018) $ ################################################################################ epitest <- function (model, data, tiles, method = "time", B = 199, eps.s = NULL, eps.t = NULL, fixed = NULL, verbose = TRUE, compress = FALSE, ...) { ## check input stopifnot(inherits(model, "twinstim"), inherits(data, "epidataCS"), model$converged, isScalar(B), B >= 1) B <- as.integer(B) method <- match.arg(method, choices = c("LRT", "simulate", "time", "space")) # eval(formals(permute.epidataCS)$what) if (model$npars["q"] == 0L) { stop("no epidemic component in 'model'") } if (.epilink(model) == "log") { warning("boundary issues with epidemic log-link; ", "refit with epilink=\"identity\"", immediate. = TRUE) } if (isTRUE(fixed)) { fixed <- setdiff(grep("^e\\.", names(coef(model)), value = TRUE), "e.(Intercept)") } else { stopifnot(is.null(fixed) || is.character(fixed)) } t0 <- model$timeRange[1L] # will not permute events before t0 T <- model$timeRange[2L] ## auxiliary function to compute the LRT statistic lrt <- function (m0, m1) { l0 <- m0$loglik l1 <- m1$loglik c(l0 = l0, l1 = l1, D = 2 * (l1 - l0), converged = isTRUE(m1$converged) && isTRUE(m0$converged)) } ## observed test statistic m0 <- update.twinstim(model, data = data, epidemic = ~0, siaf = NULL, tiaf = NULL, control.siaf = NULL, model = method == "simulate", cumCIF = FALSE, cores = 1, verbose = FALSE, optim.args = list(fixed = fixed, control = list(trace = 0))) if (!isTRUE(m0$converged)) { stop("endemic-only model did not converge") } LRT <- lrt(m0 = m0, m1 = model) STATISTIC_D <- structure(LRT["D"], l0 = LRT[["l0"]], l1 = LRT[["l1"]]) STATISTIC_R0 <- c("simpleR0" = simpleR0(model, eps.s = eps.s, eps.t = eps.t)) ## LRT p-value (CAVE: invalid for the default log-link models) DF <- length(coef(model)) - length(coef(m0)) # number of epidemic parameters PVAL_LRT <- pchisq(as.vector(STATISTIC_D), # drop attributes df = DF, lower.tail = FALSE) ## result template res <- list( method = "Likelihood Ratio Test for Space-Time Interaction", data.name = paste0(deparse(substitute(data)), "\ntwinstim: ", deparse(substitute(model))), statistic = STATISTIC_D, parameter = c("df" = DF), p.value = PVAL_LRT ) class(res) <- c("epitest", "htest") if (method == "LRT") { ## we are done return(res) } ## otherwise: determine the null distribution via permutation or simulation res$method <- if (method == "simulate") { paste("Test for Space-Time Interaction (based on", B, "endemic simulations)") } else { "Monte Carlo Permutation Test for Space-Time Interaction" } if (model$npars["q"] > 1L) { warning("epidemic covariate effects might not be identifiable for null data", immediate. = TRUE) } if (!is.finite(STATISTIC_R0)) { warning("observed 'simpleR0' test statistic is infinite; ", "maybe specify 'eps.*'", # or use D-based p.value ... immediate. = TRUE) } ## define a function which generates data under the null generateNullData <- if (method == "simulate") { if (missing(tiles)) stop("'tiles' is required for 'method = \"simulate\"'") rmarks <- .rmarks(data, t0 = t0, T = T) function() { events <- simEndemicEvents(m0, tiles = tiles) events@data <- cbind(events@data, rmarks(n = length(events))) as.epidataCS(events = events, stgrid = data$stgrid[,-1L], W = data$W, qmatrix = data$qmatrix, nCircle2Poly = attr(data$events$.influenceRegion, "nCircle2Poly"), clipper = "polyclip", verbose = FALSE) } } else { function() permute.epidataCS(data, what = method, keep = time <= t0) } ## interpret 'verbose' level .verbose <- if (is.numeric(verbose)) { if (verbose >= 2) { ## create '.verbose' expression to print test statistics stats2string <- function (lrt, simpleR0) paste0(c(names(lrt)[1:3], "simpleR0"), " = ", sprintf(paste0("%4.", c(0,0,1,2), "f"), c(lrt[1:3], simpleR0)), collapse = " | ") cat("Endemic/Epidemic log-likelihoods, LRT statistic, and simple R0:\n", stats2string(LRT, STATISTIC_R0), "\n", "\nResults from B=", B, if (method == "simulate") " endemic simulations" else paste0(" permutations of ", method), ## will actually not be printed if parallelized using clusters ... ":\n", sep = "") substitute({ cat(STATS2STRING) if (!lrt["converged"]) { msg <- c(m0 = m0$converged, m1 = m1$converged) msg <- msg[msg != "TRUE"] cat(" | WARNING (", paste0(names(msg), collapse = " and "), "): ", paste0(unique(msg), collapse = " and "), sep = "") } cat("\n") }, list(STATS2STRING = body(stats2string))) } else { verbose <- verbose == 1 } } else verbose siafInt <- NULL if (method != "simulate") { ## if siafpars are fixed, determine siafInt for use in all permutations siafpars <- coeflist(model)$siaf if (length(siafpars) > 0L && all(names(siafpars) %in% fixed) && is.null(siafInt <- environment(model)$siafInt)) { if (!identical(FALSE, verbose)) cat("pre-evaluating 'siaf' integrals with fixed parameters ...\n") setup <- update.twinstim(model, data = data, optim.args = NULL, verbose = FALSE) assign("siafpars", siafpars, envir = environment(setup)) siafInt <- with(environment(setup), do.call("..siafInt", .siafInt.args)) } } ## define the function to be replicated B times: ## permute/simulate data, update epidemic model, compute endemic-only model, ## and compute test statistics permfits1 <- function (...) { ## depends on 'data', 'model', 'lrt', 'eps.s', 'eps.t', and 'fixed' .permdata <- generateNullData() .siafInt <- if (!is.null(siafInt)) { siafInt[match(row.names(.permdata$events), row.names(data$events))] } # else NULL ## sink(paste0("/tmp/trace_", Sys.getpid()), append = TRUE) m1 <- update.twinstim(model, data = .permdata, control.siaf = list(siafInt = .siafInt), model = FALSE, cumCIF = FALSE, cores = 1, verbose = FALSE, optim.args = list(fixed = fixed, control = list(trace = is.numeric(verbose) && verbose >= 3))) ## sink() m0 <- update.twinstim(m1, epidemic = ~0, siaf = NULL, tiaf = NULL, control.siaf = NULL, optim.args = list(control = list(trace = 0))) lrt <- lrt(m0, m1) simpleR0 <- simpleR0(m1, eps.s = eps.s, eps.t = eps.t) if (isTRUE(compress)) { # save memory m0[c("fitted", "fittedComponents", "R0")] <- m1[c("fitted", "fittedComponents", "R0")] <- list(NULL) } list(m0 = m0, m1 = m1, stats = c(lrt[1:3], simpleR0 = simpleR0, lrt["converged"])) } ## rock'n'roll (the computationally intensive part) permfits <- plapply(X = integer(B), FUN = permfits1, .verbose = .verbose, ...) ## if parallelized using forking with insufficient memory available, ## part of the replications in 'permfits' may be left unassigned (NULL) permIsNull <- vapply(X = permfits, FUN = is.null, FUN.VALUE = logical(1L), USE.NAMES = FALSE) if (npermIsNull <- sum(permIsNull)) { warning(npermIsNull, "/", B, " replications did not return (insufficient memory?)") permfits <- permfits[!permIsNull] } ## extract the statistics permstats <- as.data.frame(t(vapply( X = permfits, FUN = "[[", "stats", FUN.VALUE = numeric(5L), USE.NAMES = TRUE ))) permstats$converged <- as.logical(permstats$converged) ## compute permutation-based p-value PVAL_D <- mean(c(STATISTIC_D, permstats[permstats$converged, "D"]) >= STATISTIC_D) PVAL_R0 <- mean(c(STATISTIC_R0, permstats[permstats$converged, "simpleR0"]) >= STATISTIC_R0) ## set results res$statistic <- structure(STATISTIC_R0, "D" = unname(STATISTIC_D)) res$parameter <- c("B" = sum(permstats$converged)) res$p.value <- structure(PVAL_R0, "D-based" = PVAL_D, "LRT" = PVAL_LRT) res$permfits <- permfits res$permstats <- permstats res } coef.epitest <- function (object, which = c("m1", "m0"), ...) { which <- match.arg(which) permcoefs <- vapply(X = object$permfits, FUN = function (x) coef(x[[which]]), FUN.VALUE = coef(object$permfits[[1L]][[which]]), USE.NAMES = TRUE) t(permcoefs) } plot.epitest <- function (x, teststat = c("simpleR0", "D"), ...) { teststat <- match.arg(teststat) defaultArgs <- switch(teststat, "simpleR0" = list( permstats = x$permstats$simpleR0, xmarks = setNames(x$statistic, "observed"), xlab = expression("Simple " * R[0]) ), "D" = list( permstats = x$permstats$D, xmarks = setNames(attr(x$statistic, "D"), "observed"), xlab = expression(D == 2 %.% log(L[full]/L[endemic])) ) ) args <- modifyList(defaultArgs, list(...)) if (is.null(args[["permstats"]])) stop("nothing to plot (no 'permstats' available)") do.call("permtestplot", args) } ## auxiliary function also used by plot.knox(), permutationTest(), ... permtestplot <- function (permstats, xmarks = NULL, xlab = "test statistic", ...) { defaultArgs <- list( data = permstats, xlab = xlab, col = "lavender", main = "Monte Carlo permutation test for space-time interaction", xlim = extendrange(c(permstats, xmarks)) ) do.call("truehist", modifyList(defaultArgs, list(...), keep.null = TRUE)) if (!is.null(xmarks)) { abline(v = xmarks, lwd = 2) axis(3, at = xmarks, labels = names(xmarks), # if NULL the value is used tick = FALSE, line = -1, font = 2) } invisible(NULL) } surveillance/R/farringtonFlexible.R0000644000176200001440000007617614024124757017145 0ustar liggesusers# ____________________________ # |\_________________________/|\ # || || \ # || algo.farrington || \ # || new version || | # || || | # || || | # || || | # || || | # || || / # ||_________________________|| / # |/_________________________\|/ # __\_________________/__/|_ # |_______________________|/ ) # ________________________ (__ # /oooo oooo oooo oooo /| _ )_ # /ooooooooooooooooooooooo/ / (_)_(_) # /ooooooooooooooooooooooo/ / (o o) #/C=_____________________/_/ ==\o/== # Version of the 26.06.2013 # M.Salmon, M.Hoehle ################################################################################ # CONTENTS ################################################################################ # # MAIN FUNCTION # Function that manages input and output. # # RESIDUALS FUNCTION # Function that calculates Anscombe residuals. # # WEIGHTS FUNCTION # Function that calculates weights based on these residuals. # # FORMULA FUNCTION # Function that writes a formula for the glm using Booleans from control. # # FIT GLM FUNCTION # Function that fits a GLM. If it does not converge this function tries to fit it without time trend. # # THRESHOLD FUNCTION # Function that calculates the lower and upper threshold, the probability of observing a count that is >= observed, and the score. # There are two versions of this function depending on the method chosen. # # BLOCKS FUNCTION # Function that creates the factor variable for the glm. # # DATA GLM FUNCTION # Function that prepares data for the glm # # GLM FUNCTION # Function that calls fit glm, checkst he time trend and calculate the prediction fort he current timepoint. ################################################################################ # END OF CONTENTS ################################################################################ ################################################################################ # MAIN FUNCTION ################################################################################ farringtonFlexible <- function(sts, control = list( range = NULL, # range of time points to be monitored b = 5, # how many years to go back in time? w = 3, # half-window length reweight = TRUE, # reweighting past outbreaks? weightsThreshold = 2.58, # with which threshold? verbose = FALSE, # printing information? glmWarnings = TRUE, # printing warning from glm.fit? alpha = 0.05, # (one-sided) (1-alpha)% prediction interval trend = TRUE, # include a time trend when possible? pThresholdTrend = 0.05, # which pvalue for the time trend is significant? limit54 = c(5,4), # ignore if <5 reports during the past 4 weeks powertrans = "2/3", # power transformation for the data fitFun = "algo.farrington.fitGLM.flexible", # which function to use? populationOffset = FALSE, # use a population offset in the model? noPeriods = 1, # how many periods between windows around reference weeks? pastWeeksNotIncluded = NULL, # how many past weeks not to take into account? thresholdMethod = "delta" # which method for calculating the threshold? )) { ###################################################################### # Use special Date class mechanism to find reference months/weeks/days ###################################################################### epochAsDate <- sts@epochAsDate ###################################################################### # Fetch observed and population ###################################################################### # Fetch observed observed <- observed(sts) freq <- sts@freq if (epochAsDate) { epochStr <- switch( as.character(freq), "12" = "month","52" = "week", "365" = "day") } else { epochStr <- "none" } # Fetch population population <- population(sts) ###################################################################### # Fix missing control options ###################################################################### defaultControl <- eval(formals()$control) control <- modifyList(defaultControl, control, keep.null = TRUE) if (is.null(control$range)) { control$range <- (freq*control$b + control$w + 1):nrow(observed) ## NOTE: this default is different from algo.farrington() } # Use factors in the model? Depends on noPeriods, no input from the user. control$factorsBool <- control$noPeriods != 1 # How many past weeks not to take into account? if (is.null(control$pastWeeksNotIncluded)) { control$pastWeeksNotIncluded <- control$w } # there is only one fitFun at the moment control$fitFun <- match.arg(control$fitFun, c("algo.farrington.fitGLM.flexible")) # extract the threshold method thresholdMethod <- match.arg(control$thresholdMethod, c("delta", "nbPlugin", "muan")) # Adapt the argument for the glm function control$typePred <- switch(thresholdMethod, "delta" = "response", "nbPlugin" = "link", "muan" = "link") # Which threshold function? control$thresholdFunction <- switch(thresholdMethod, "delta" = "algo.farrington.threshold.farrington", "nbPlugin" = "algo.farrington.threshold.noufaily", "muan" = "algo.farrington.threshold.noufaily") # check options if (!((control$limit54[1] >= 0) && (control$limit54[2] > 0))) { stop("The limit54 arguments are out of bounds: cases >= 0 and period > 0.") } ###################################################################### # Initialize the necessary vectors ###################################################################### score <- trend <- pvalue <- expected <- mu0Vector <- phiVector <- trendVector <- matrix(data = 0, nrow = length(control$range), ncol = ncol(sts)) # Define objects n <- control$b*(2*control$w+1) # loop over columns of sts for (j in 1:ncol(sts)) { #Vector of dates if (epochAsDate) { vectorOfDates <- as.Date(sts@epoch, origin="1970-01-01") } else { vectorOfDates <- seq_len(length(observed[,j])) } # Loop over control$range for (k in control$range) { ###################################################################### # Prepare data for the glm ###################################################################### dayToConsider <- vectorOfDates[k] diffDates <- diff(vectorOfDates) dataGLM <- algo.farrington.data.glm(dayToConsider=dayToConsider, b=control$b, freq=freq, epochAsDate=epochAsDate, epochStr=epochStr, vectorOfDates=vectorOfDates,w=control$w, noPeriods=control$noPeriods, observed=observed[,j],population=population, verbose=control$verbose, pastWeeksNotIncluded=control$pastWeeksNotIncluded,k) ###################################################################### # Fit the model ###################################################################### finalModel <- algo.farrington.glm(dataGLM,timeTrend=control$trend,populationOffset=control$populationOffset, factorsBool=control$factorsBool,reweight=control$reweight, weightsThreshold=control$weightsThreshold, pThresholdTrend=control$pThresholdTrend,b=control$b, noPeriods=control$noPeriods,typePred=control$typePred, fitFun=control$fitFun,glmWarnings=control$glmWarnings, epochAsDate=epochAsDate,dayToConsider=dayToConsider, diffDates=diffDates,populationNow=population[k,j],k, verbose=control$verbose) if (is.null(finalModel)) { #Do we have an alarm -- i.e. is observation beyond CI?? #upperbound only relevant if we can have an alarm (enoughCases) sts@alarm[k,j] <- NA sts@upperbound[k,j] <- NA mu0Vector[(k-min(control$range)+1),j] <- NA # Get overdispersion phiVector[(k-min(control$range)+1),j] <- NA # Get score score[(k-min(control$range)+1),j] <- NA #Compute bounds of the predictive pvalue[(k-min(control$range)+1),j] <- NA # Time trend trendVector[(k-min(control$range)+1),j] <- NA trend[(k-min(control$range)+1),j] <- NA warning(paste("The model could not converge with nor without time trend at timepoint ", k," so no result can be given for timepoint ", k,".\n")) } else { pred <- finalModel$pred doTrend <- finalModel$doTrend coeffTime <- finalModel$coeffTime ###################################################################### # Calculate lower and upper threshold ###################################################################### argumentsThreshold <- list(predFit=pred$fit,predSeFit=pred$se.fit, phi=finalModel$phi, skewness.transform=control$powertrans, alpha=control$alpha, y=observed[k,j], method=control$thresholdMethod ) lu <- do.call(control$thresholdFunction, args=argumentsThreshold) ###################################################################### # Postprocessing steps & output ###################################################################### #Compute exceedance score unless less than 5 reports during last 4 weeks. #Changed in version 0.9-7 - current week is included now enoughCases <- (sum(observed[(k-control$limit54[2]+1):k,j]) >=control$limit54[1]) #18 May 2006: Bug/unexpected feature found by Y. Le Strat. #the okHistory variable meant to protect against zero count problems, #but instead it resulted in exceedance score == 0 for low counts. #Now removed to be concordant with the Farrington 1996 paper. X <- ifelse(enoughCases,lu$score,NA) #Do we have an alarm -- i.e. is observation beyond CI?? #upperbound only relevant if we can have an alarm (enoughCases) sts@alarm[k,j] <- !is.na(X) && (X>1) && observed[k,j]!=0 sts@upperbound[k,j] <- ifelse(enoughCases,lu$upper,NA) # Possible bug alarm although upperbound <- 0? # Calculate expected value from glm if (is.na(lu$upper)==FALSE) { if ( control$typePred=="response"){ expected[(k-min(control$range)+1),j] <- ifelse(enoughCases,pred$fit,NA) } else{ expected[(k-min(control$range)+1),j] <- ifelse(enoughCases,exp(pred$fit),NA) } } else { expected[(k-min(control$range)+1),j] <- NA } # Calculate mean of the negbin distribution of the observation # Use linear predictor mean and sd eta0 <- pred$fit seEta0 <- pred$se.fit # deduce the quantile for mu0 from eta0 which is normally distributed if (control$thresholdMethod=='nbPlugin'){ mu0Vector[(k-min(control$range)+1),j] <- exp(eta0) } else { mu0Vector[(k-min(control$range)+1),j] <- exp(qnorm(1-control$alpha, mean=eta0, sd=seEta0)) } # Get overdispersion phiVector[(k-min(control$range)+1),j] <- finalModel$phi # Get score score[(k-min(control$range)+1),j] <- lu$score #Compute bounds of the predictive pvalue[(k-min(control$range)+1),j] <- lu$prob # Time trend if(doTrend) { trendVector[(k-min(control$range)+1),j] <- coeffTime trend[(k-min(control$range)+1),j] <- 1 } else { trendVector[(k-min(control$range)+1),j] <- NA } } }#done looping over all time points } #end of loop over cols in sts sts@control$score <- score sts@control$pvalue <- pvalue sts@control$expected <- expected sts@control$mu0Vector <- mu0Vector sts@control$phiVector <- phiVector sts@control$trendVector <- trendVector sts@control$trend <- trend #Done return(sts[control$range,]) } ################################################################################ # END OF MAIN FUNCTION ################################################################################ ################################################################################ # REFERENCE TIME POINTS FUNCTION ################################################################################ algo.farrington.referencetimepoints <- function(dayToConsider,b=control$b,freq=freq,epochAsDate,epochStr){ if (epochAsDate) { referenceTimePoints <- as.Date(seq(as.Date(dayToConsider, origin="1970-01-01"), length.out=(b+1), by="-1 year")) } else { referenceTimePoints <- seq(dayToConsider, length.out=(b+1), by=-freq) if (referenceTimePoints[b+1]<=0){ warning("Some reference values did not exist (index<1).") } } if (epochStr == "week") { # get the date of the Mondays/Tuesdays/etc so that it compares to # the reference data # (Mondays for Mondays for instance) # Vectors of same days near the date (usually the same week) # dayToGet dayToGet <- as.numeric(format(dayToConsider, "%w")) actualDay <- as.numeric(format(referenceTimePoints, "%w")) referenceTimePointsA <- referenceTimePoints - (actualDay - dayToGet) # Find the other "same day", which is either before or after referenceTimePoints referenceTimePointsB <- referenceTimePointsA + ifelse(referenceTimePointsA>referenceTimePoints,-7,7) # For each year choose the closest Monday/Tuesday/etc # The order of referenceTimePoints is NOT important AB <- cbind(referenceTimePointsA,referenceTimePointsB) ABnumeric <- cbind(as.numeric(referenceTimePointsA),as.numeric(referenceTimePointsB)) distMatrix <- abs(ABnumeric-as.numeric(referenceTimePoints)) idx <- (distMatrix[,1]>distMatrix[,2])+1 referenceTimePoints <- as.Date(AB[cbind(1:dim(AB)[1],idx)],origin="1970-01-01") } return(referenceTimePoints) } ################################################################################ # END OF REFERENCE TIME POINTS FUNCTION ################################################################################ ################################################################################ # RESIDUALS FUNCTION # anscombe.residuals(m,phi) # is defined in algo_farrington.R ################################################################################ ################################################################################ # WEIGHTS FUNCTION # algo.farrington.assign.weights(s,weightsThreshold) # is defined in algo_farrington.R ################################################################################ ################################################################################ # FORMULA FUNCTION ################################################################################ # Function for writing the good formula depending on timeTrend, # populationOffset and factorsBool formulaGLM <- function(populationOffset=FALSE,timeBool=TRUE,factorsBool=FALSE){ # Description # Args: # populationOffset: --- # Returns: # Vector of X # Smallest formula formulaString <- "response ~ 1" # With time trend? if (timeBool){ formulaString <- paste(formulaString,"+wtime",sep ="")} # With population offset? if(populationOffset){ formulaString <- paste(formulaString,"+offset(log(population))",sep ="")} # With factors? if(factorsBool){ formulaString <- paste(formulaString,"+seasgroups",sep ="")} # Return formula as a string return(formulaString) } ################################################################################ # END OF FORMULA FUNCTION ################################################################################ ################################################################################ # FIT GLM FUNCTION ################################################################################ algo.farrington.fitGLM.flexible <- function(dataGLM, timeTrend,populationOffset,factorsBool,reweight,weightsThreshold,glmWarnings,verbose,control,...) { # Model formula depends on whether to include a time trend or not. theModel <- formulaGLM(populationOffset,timeBool=timeTrend,factorsBool) # Fit it -- this is slow. An improvement would be to use glm.fit here. # This would change the syntax, however. if (glmWarnings) { model <- glm(formula(theModel),data=dataGLM,family = quasipoisson(link="log")) } else { model <- suppressWarnings(glm(formula(theModel),data=dataGLM,family = quasipoisson(link="log"))) } #Check convergence - if no convergence we return empty handed. if (!model$converged) { #Try without time dependence if (timeTrend) { theModel <- formulaGLM(populationOffset,timeBool=F,factorsBool) if (glmWarnings) { model <- glm(as.formula(theModel), data=dataGLM, family = quasipoisson(link="log")) } else { model <- suppressWarnings(glm(as.formula(theModel), data=dataGLM, family = quasipoisson(link="log"))) } if (verbose) {cat("Warning: No convergence with timeTrend -- trying without.\n")} } if (!model$converged) { if (verbose) {cat("Warning: No convergence in this case.\n")} if (verbose) {print(dataGLM[,c("response","wtime"),exact=TRUE])} return(NULL) } } #Overdispersion parameter phi phi <- max(summary(model)$dispersion,1) #In case reweighting using Anscome residuals is requested if (reweight) { s <- anscombe.residuals(model,phi) omega <- algo.farrington.assign.weights(s,weightsThreshold) if (glmWarnings) { model <- glm(as.formula(theModel),data=dataGLM, family=quasipoisson(link="log"), weights=omega) } else { model <- suppressWarnings(glm(as.formula(theModel),data=dataGLM, family=quasipoisson(link="log"), weights=omega)) } #Here, the overdispersion often becomes small, so we use the max #to ensure we don't operate with quantities less than 1. phi <- max(summary(model)$dispersion,1) } # end of refit. #Add wtime, response and phi to the model model$phi <- phi model$wtime <- dataGLM$wtime model$response <- dataGLM$response model$population <- dataGLM$population if (reweight) { model$weights <- omega } else{ model$weights <- model$weights } #Done return(model) } ################################################################################ # END OF FIT GLM FUNCTION ################################################################################ ################################################################################ # THRESHOLD FUNCTION FARRINGTON ################################################################################ algo.farrington.threshold.farrington <- function(predFit,predSeFit,phi, skewness.transform, alpha,y,method){ #Fetch mu0 and var(mu0) from the prediction object mu0 <- predFit tau <- phi + (predSeFit^2)/mu0 #Standard deviation of prediction, i.e. sqrt(var(h(Y_0)-h(\mu_0))) switch(skewness.transform, "none" = { se <- sqrt(mu0*tau); exponent <- 1}, "1/2" = { se <- sqrt(1/4*tau); exponent <- 1/2}, "2/3" = { se <- sqrt(4/9*mu0^(1/3)*tau); exponent <- 2/3}, { stop("No proper exponent in algo.farrington.threshold.")}) #Note that lu can contain NA's if e.g. (-1.47)^(3/2) lu <- sort((mu0^exponent + c(-1,1)*qnorm(1-alpha)*se)^(1/exponent), na.last=FALSE) #Ensure that lower bound is non-negative lu[1] <- max(0,lu[1],na.rm=TRUE) # probability associated to the observed value as quantile # hoehle 2018-09-12: fixed p-value bug detected by Lore Merdrignac q <- pnorm( y^(exponent), mean=mu0^exponent, sd=se,lower.tail=FALSE) # calculate score x <- ifelse(is.na(lu[2])==FALSE,(y - predFit) / (lu[2] - predFit),NA) return(list(lower=lu[1],upper=lu[2],prob=q,score=x)) } ################################################################################ # END OF THRESHOLD FUNCTION FARRINGTON ################################################################################ ################################################################################ # THRESHOLD FUNCTION NOUFAILY ################################################################################ algo.farrington.threshold.noufaily <- function(predFit,predSeFit,phi, skewness.transform, alpha,y,method){ # method of Angela Noufaily with modifications # Use linear predictor mean and sd eta0 <- predFit seEta0 <- predSeFit # deduce the quantile for mu0 from eta0 which is normally distributed if (method=='nbPlugin'){ mu0Quantile <- exp(eta0) } else { mu0Quantile <- exp(qnorm(1-alpha, mean=eta0, sd=seEta0)) } if (mu0Quantile==Inf){ lu <- c(NA,NA) q <- NA # else is when the method is "muan" } else{ # Two cases depending on phi value if (phi>1){ lu<-c(qnbinom(alpha,mu0Quantile/(phi-1),1/phi), qnbinom(1-alpha,mu0Quantile/(phi-1),1/phi)) } else { lu<-c(qpois(alpha,mu0Quantile),qpois(1-alpha,mu0Quantile)) } # cannot be negative lu[1]=max(0,lu[1]) # probability associated to the observed value as quantile if (phi!=1){ q <- pnbinom(q= y-1 ,size=mu0Quantile/(phi-1),prob=1/phi,lower.tail=FALSE) } else{ q <- ppois(y-1,mu0Quantile,lower.tail=FALSE) } } # calculate score x <- ifelse(is.na(lu[2])==FALSE,(y - predFit) / (lu[2] - predFit),NA) return(list(lower=lu[1],upper=lu[2],prob=q,score=x)) } ################################################################################ # END OF THRESHOLD FUNCTION NOUFAILY ################################################################################ ################################################################################ # BLOCKS FUNCTION ################################################################################ blocks <- function(referenceTimePoints,vectorOfDates,freq,dayToConsider,b,w,p, epochAsDate) { ## INPUT # freq: are we dealing with daily/weekly/monthly data? # b: how many years to go back in time # w: half window length around the reference timepoints # p: number of noPeriods one wants the year to be split into ## VECTOR OF ABSOLUTE NUMBERS # Very useful to write the code! vectorOfAbsoluteNumbers <- seq_len(length(vectorOfDates)) # logical vector indicating where the referenceTimePoints # are in the vectorOfDates referenceTimePointsOrNot <- vectorOfDates %in% referenceTimePoints ## VECTOR OF FACTORS vectorOfFactors <- rep(NA,length(vectorOfDates)) ## SETTING THE FACTORS # Current week if (epochAsDate==FALSE){ now <- which(vectorOfDates==dayToConsider) } else { now <- which(vectorOfDates==as.Date(dayToConsider)) } vectorOfFactors[(now-w):now] <- p # Reference weeks referenceWeeks <- rev(as.numeric( vectorOfAbsoluteNumbers[referenceTimePointsOrNot=='TRUE'])) for (i in 1:b) { # reference week refWeek <- referenceWeeks[i+1] vectorOfFactors[(refWeek-w):(refWeek+w)] <- p # The rest is only useful if ones want factors, otherwise only have # reference timepoints like in the old algo.farrington if (p!=1){ # Number of time points to be shared between vectors period <- referenceWeeks[i] - 2 * w - 1 - refWeek # Check that p is not too big if (period < (p-(2*w+1))){stop('Number of factors too big!')} # Look for the length of blocks lengthOfBlocks <- period %/% (p-1) rest <- period %% (p-1) vectorLengthOfBlocks <- rep(lengthOfBlocks,p-1) # share the rest of the Euclidian division among the first blocks add <- seq_len(rest) vectorLengthOfBlocks[add] <- vectorLengthOfBlocks[add]+1 # slight transformation necessary for the upcoming code with cumsum vectorLengthOfBlocks <- c(0,vectorLengthOfBlocks) # fill the vector for (j in 1:(p-1)) { vectorOfFactors[(refWeek+w+1+cumsum(vectorLengthOfBlocks)[j]): (refWeek+w+1+cumsum(vectorLengthOfBlocks)[j+1]-1)]<-j } } } ## DONE! return(vectorOfFactors) #indent } ################################################################################ # END OF BLOCKS FUNCTION ################################################################################ ################################################################################ # DATA GLM FUNCTION ################################################################################ algo.farrington.data.glm <- function(dayToConsider, b, freq, epochAsDate,epochStr, vectorOfDates,w,noPeriods, observed,population, verbose,pastWeeksNotIncluded,k){ # Identify reference time points # Same date but with one year, two year, etc, lag # b+1 because we need to have the current week in the vector referenceTimePoints <- algo.farrington.referencetimepoints(dayToConsider,b=b, freq=freq, epochAsDate=epochAsDate, epochStr=epochStr ) if (sum((vectorOfDates %in% min(referenceTimePoints)) == rep(FALSE,length(vectorOfDates))) == length(vectorOfDates)){ stop("Some reference values did not exist (index<1).") } if (verbose) { cat("k=", k,"\n")} # Create the blocks for the noPeriods between windows (including windows) # If noPeriods=1 this is a way of identifying windows, actually. blocks <- blocks(referenceTimePoints,vectorOfDates,epochStr,dayToConsider, b,w,noPeriods,epochAsDate) # Here add option for not taking the X past weeks into account # to avoid adaptation of the model to emerging outbreaks blocksID <- blocks blocksID[(k-pastWeeksNotIncluded):k] <- NA # Extract values for the timepoints of interest only blockIndexes <- which(is.na(blocksID)==FALSE) # Time # if epochAsDate make sure wtime has a 1 increment if (epochAsDate){ wtime <- (as.numeric(vectorOfDates[blockIndexes])- as.numeric(vectorOfDates[blockIndexes][1]))/as.numeric(diff(vectorOfDates))[1] } else { wtime <- as.numeric(vectorOfDates[blockIndexes]) } # Factors seasgroups <- as.factor(blocks[blockIndexes]) # Observed response <- observed[blockIndexes] # Population pop <- population[blockIndexes] if (verbose) { print(response)} dataGLM <- data.frame(response=response,wtime=wtime,population=pop, seasgroups=seasgroups,vectorOfDates=vectorOfDates[blockIndexes]) dataGLM <- dataGLM[is.na(dataGLM$response)==FALSE,] return(dataGLM) } ################################################################################ # END OF DATA GLM FUNCTION ################################################################################ ################################################################################ # GLM FUNCTION ################################################################################ algo.farrington.glm <- function(dataGLM,timeTrend,populationOffset,factorsBool, reweight,weightsThreshold,pThresholdTrend,b, noPeriods,typePred,fitFun,glmWarnings,epochAsDate, dayToConsider,diffDates,populationNow,k,verbose) { arguments <- list(dataGLM=dataGLM, timeTrend=timeTrend, populationOffset=populationOffset, factorsBool=factorsBool,reweight=reweight, weightsThreshold=weightsThreshold,glmWarnings=glmWarnings, verbose=verbose,control=control) model <- do.call(fitFun, args=arguments) #Stupid check to pass on NULL values from the algo.farrington.fitGLM proc. if (is.null(model)) return(model) ###################################################################### #Time trend ###################################################################### #Check whether to include time trend, to do this we need to check whether #1) wtime is signifcant at the 95lvl #2) the predicted value is not larger than any observed value #3) the historical data span at least 3 years. doTrend <- NULL # if model converged with time trend if ("wtime" %in% names(coef(model))){ # get the prediction for k if(epochAsDate){ wtime=(as.numeric(dayToConsider)-as.numeric(dataGLM$vectorOfDates[1]))/as.numeric(diffDates)[1] } else { wtime <- c(k) } pred <- predict.glm(model,newdata=data.frame(wtime=wtime, population=populationNow, seasgroups=factor(noPeriods), dispersion=model$phi),se.fit=TRUE,type="response") # check if three criterion ok #is the p-value for the trend significant (0.05) level significant <- (summary.glm(model)$coefficients["wtime",4] < pThresholdTrend) #have to use at least three years of data to allow for a trend atLeastThreeYears <- (b>=3) #no horrible predictions noExtrapolation <- (pred$fit <= max(dataGLM$response,na.rm=T)) #All 3 criteria have to be met in order to include the trend. Otherwise #it is removed. Only necessary to check this if a trend is requested. doTrend <- (atLeastThreeYears && significant && noExtrapolation) # if not then refit if (doTrend==FALSE) { arguments$timeTrend=FALSE model <- do.call(fitFun, args=arguments) } } else { doTrend <- FALSE } #done with time trend ###################################################################### ###################################################################### # Calculate prediction # ###################################################################### #Predict value if(epochAsDate){ wtime=(as.numeric(dayToConsider)-as.numeric(dataGLM$vectorOfDates[1]))/as.numeric(diffDates)[1] } else { wtime <- c(k) } pred <- predict.glm(model,newdata=data.frame(wtime=wtime, population=populationNow, seasgroups=factor(noPeriods), dispersion=model$phi),se.fit=TRUE,type=typePred) coeffTime=ifelse(doTrend,summary.glm(model)$coefficients["wtime",1],NA) finalModel <- list (pred,doTrend,coeffTime,model$phi) names(finalModel) <- c("pred","doTrend","coeffTime","phi") return(finalModel) } ################################################################################ # END OF GLM FUNCTION ################################################################################ surveillance/R/algo_outbreakP.R0000644000176200001440000001274313607336043016244 0ustar liggesusers###################################################################### # Workhorse computing the OutbreakP statistic. # Alarm statistic at end time n is returned. # # Author: # Michael Hoehle # # R port of the Java code by Marianne Frisen & Linus Schioler from # the CASE project. See https://smisvn.smi.se/case/ # # For a manual on how to use the method see also # http://www.hgu.gu.se/item.aspx?id=16857 # # Date: # 25 May 2010 # # Parameters: # x -- the series with the counts # # Returns: # value of the alarm statistic at the end of the series x. ###################################################################### calc.outbreakP.statistic <- function(x) { #Length of the monitored series n <- length(x) #Index problem when converting java arrays to R arrays x <- c(0,x) #Initialization (not all parts might be needed) leftl <- numeric(n+1) y <- numeric(n+1) yhat <- numeric(n+1) sumwy <- numeric(n+1) sumwys <- numeric(n+1) sumw <- numeric(n+1) w <- numeric(n+1) meanl <- numeric(n+1) xbar <- 0 meanl[1] = -Inf leftl[1] = 0 for (i in 1:n) { #Initialize yhat[i+1] <- x[i+1] sumwy[i+1] <- x[i+1] sumw[i+1] <- 1 meanl[i+1] <- x[i+1] leftl[i+1] <- i #Calculate mean (this is a sequential formula to calculate mean(x[1:i])) xbar=xbar+(x[i+1]-xbar)/i #Create plateaus while (meanl[i+1] <= meanl[ (leftl[i+1] - 1) + 1]) { #merge sets sumwy[i+1] = sumwy[i+1] + sumwy[(leftl[i+1] - 1)+1] sumw[i+1] = sumw[i+1] + sumw[(leftl[i+1] - 1)+1] meanl[i+1] = sumwy[i+1] / sumw[i+1] leftl[i+1] = leftl[(leftl[i+1] - 1)+1] } #calculate yhat for (j in leftl[i+1]:i) { yhat[j+1] = meanl[i+1] } } #Compute the statistic in case of a Poisson distribution alarm.stat <- 1 for (j in seq_len(n)) { #Ensure 0/0 = 1 so we don't get NaNs div <- ifelse(yhat[j+1]==0 & xbar==0, 1, yhat[j+1]/xbar) alarm.stat <- alarm.stat * (div)^x[j+1] } return(alarm.stat) ## The above might cause NaN's in case of large numbers. ## logalarm <- 0 ## for (j in 1:n) { ## #Eqn (5) in Frisen et al paper in log form. However: it is undefined ## #if \hat{\mu}^D(t) == 0 (it is a division by zero). ## #We fix 0/0 = 1 ## if (xbar != 0) { ## if (yhat[j+1] != 0) { #if \hat{\mu}^{C1} == 0 then ## logalarm = logalarm + x[j+1] * (log(yhat[j+1]) - log(xbar)) ## } ## } else { ## if (yhat[j+1] != 0) { ## stop("Division by zero in Eqn (5) of Frisen paper!") ## } ## } ## } ## #Done, return the value ## return(exp(logalarm)) } ###################################################################### # The detection function in S3 style ###################################################################### algo.outbreakP <- function(disProgObj, control = list(range = range, k=100, ret=c("cases","value"),maxUpperboundCases=1e5)) { #Set threshold to some fixed value, i.e. 100 if(is.null(control[["k",exact=TRUE]])) control$k <- 100 #Set largest observed value to try as upperbound when numerically searching #for NNBA in case ret = "cases" if(is.null(control[["maxUpperboundCases",exact=TRUE]])) control$maxUpperboundCases <- 1e5 #Which value to return in upperbound? control$ret <- match.arg(control$ret, c("value","cases")) #Initialize the necessary vectors alarm <- matrix(data = 0, nrow = length(control$range), ncol = 1) upperbound <- matrix(data = 0, nrow = length(control$range), ncol = 1) observed <- disProgObj$observed #Store results count <- 1 for(i in control$range) { statistic <- calc.outbreakP.statistic( observed[seq_len(i)] ) # store the results in the right order alarm[count] <- statistic > control$k #Find NNBA or just return value of the test statistic (faster) if (control$ret == "cases") { #If length is 1 no alarm can be given unless k<1 if (i<=1) { upperbound[count] <- ifelse(control$k>=1, NA, 0) } else { if (is.nan(statistic)) { #if no decent statistic was computed. upperbound[count] <- NA } else { #Go up or down delta <- ifelse(alarm[count], -1, 1) #Initialize observedi <- observed[i] foundNNBA <- FALSE #Loop with modified last observation until alarm is caused (dx=1) #or until NO alarm is caused anymore (dx=-1) while ( ((delta == -1 & observedi > 0) | (delta == 1 & observedi < control$maxUpperboundCases)) & (!foundNNBA)) { observedi <- observedi + delta newObserved <- c(observed[seq_len(i-1)],observedi) statistic <- calc.outbreakP.statistic( newObserved ) if (is.nan(statistic)) { #statistic produced a numeric overflow. observedi <- control$maxUpperboundCases } else { foundNNBA <- (statistic > control$k) == ifelse(alarm[count],FALSE,TRUE) } } upperbound[count] <- ifelse( foundNNBA, observedi + ifelse(alarm[count],1,0), NA) } } } else { upperbound[count] <- statistic } #Advance time index count <- count + 1 } #Add name and data name to control object. control$name <- paste("outbreakP(",control$k,")",sep="") control$data <- paste(deparse(substitute(disProgObj))) # return alarm and upperbound vectors result <- list(alarm = alarm, upperbound = upperbound, disProgObj=disProgObj, control=control) class(result) = "survRes" # for surveillance system result return(result) } surveillance/R/ks.plot.unif.R0000644000176200001440000001321414013521730015621 0ustar liggesusers################# # Plot the empirical distribution function of a sample from U(0,1) # together with a confidence band of the corresponding K-S-test. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # http://www.r-project.org/Licenses/ # # Parts of the code are taken from stats::ks.test, which has # copyright 1995-2012 by The R Core Team under GPL-2 (or later). # Furthermore, the C function calls are taken from # http://svn.r-project.org/R/trunk/src/library/stats/src/ks.c (as at 2012-08-16), # which similarly is Copyright (C) 1999-2009 by the R Core Team # and available under GPL-2. Somewhat disguised in their code is a reference # that parts of their code uses code published in # George Marsaglia and Wai Wan Tsang and Jingbo Wang (2003), # "Evaluating Kolmogorov's distribution". # Journal of Statistical Software, Volume 8, 2003, Issue 18. # URL: http://www.jstatsoft.org/v08/i18/. # # # Parameters: # U - numeric vector containing the sample (NA's are silently removed) # conf.level - confindence level for the K-S-test, # can also be a vector of multiple levels # exact - see ks.test # col.conf - colour of the confidence band # col.ref - colour of the reference line ################# ks.plot.unif <- function (U, conf.level = 0.95, exact = NULL, col.conf = "gray", col.ref = "gray", xlab = expression(u[(i)]), ylab = "Cumulative distribution") { stopifnot(is.vector(U, mode="numeric")) U <- U[!is.na(U)] n <- length(U) TIES <- FALSE if (anyDuplicated(U)) { warning("ties should not be present for the Kolmogorov-Smirnov test") TIES <- TRUE } if (is.null(exact)) exact <- (n < 100) && !TIES ## Helper function to invert the K-S test. The function ## pkolmogorov2x is the CDF of the Kolmogorov test statistic ## and is taken from the R project sources, which ## is (C) 1995-2009 by The R Core Team under GPL-2 f <- if (exact) { function (x, p) { # x is the test statistic PVAL <- 1 - .C(C_pkolmogorov2x, p = as.double(x), as.integer(n))$p PVAL - p } } else { pkstwo <- function(x, tol = 1e-06) { # x is the test statistic ## stopifnot(length(x) == 1L) #Same copyright as above applies to the C code. if (is.na(x)) NA_real_ else if (x == 0) 0 else { .C(C_pkstwo, 1L, p = as.double(x), as.double(tol))$p } } function (x, p) { PVAL <- 1 - pkstwo(sqrt(n) * x) PVAL - p } } ## Test inversion Dconf <- sapply(conf.level, function (level) { uniroot(f, lower=0, upper=1, p=1-level)$root }) ## Small helper function to draw a line myabline <- function (a, b, x.grid = seq(0,1,length.out=101), ...) { lines(x.grid, a + b * x.grid, ...) } ## Figure 10 in Ogata (1988) plot(c(0,1), c(0,1), type="n", xlab=xlab, ylab=ylab) myabline(a=0, b=1, col=col.ref, lwd=2) rug(U) lines(ecdf(U), verticals=TRUE, do.points=FALSE) sapply(Dconf, function (D) { myabline(a=D, b=1, col=col.conf, lty=2) myabline(a=-D, b=1, col=col.conf, lty=2) }) #legend(x="topleft", col=col.conf, lty=2, # legend=paste(100*conf.level,"% KS error bounds", sep="")) invisible() } ###################################################################### # Check the residual process of fitted twinstim or twinSIR # using ks.plot.unif on 1-exp(-diff(tau)) # and a scatterplot of u_i vs. u_{i+1} to inspect serial correlation # # Parameters: # object - a fitted twinSIR or twinstim model # # Draws the ECDF of the transformed residuals together with backtransformed # 95% Kolmogorov-Smirnov error bounds. ###################################################################### checkResidualProcess <- function (object, plot = 1:2, mfrow = c(1,length(plot)), ...) { stopifnot(inherits(object, c("twinSIR", "twinstim", "simEpidataCS"))) ## check plot argument if (is.logical(plot)) plot <- which(rep(plot, length.out = 2)) else { stopifnot(is.vector(plot, mode="numeric"), plot %in% 1:2) } ## extract residual process tau <- do.call("residuals", args = list(substitute(object)), envir = parent.frame()) ## Transform to uniform variable Y <- diff(c(0,tau)) U <- 1 - exp(-Y) ## Calculate KS test ks <- ks.test(U, "punif", alternative = "two.sided", exact = match.call()[["exact"]]) ## return value ret <- list(tau=tau, U=U, ks=ks) ## 2 types of plots plotcalls <- alist( ## Investigate uniform distribution of U ks.plot.unif(U, ...), ## Investigate serial correlation between U_t and U_{t+1} which ## corresponds to Figure 11 in Ogata (1988) plot(tail(U,n=-1), head(U,n=-1), xlab=expression(u[i]), ylab=expression(u[i+1])) ) ## eval selected plot calls if (length(plot) > 0L) { opar <- par(mfrow = mfrow); on.exit(par(opar)) for (i in plot) eval(plotcalls[[i]]) invisible(ret) } else { ret } } surveillance/R/disProg.R0000644000176200001440000002375413566727577014745 0ustar liggesusers################################################### ### chunk number 1: ################################################### create.disProg <- function(week, observed, state, start=c(2001,1), freq=52, neighbourhood=NULL, populationFrac=NULL,epochAsDate=FALSE){ namesObs <-colnames(observed) # check whether observed contains only numbers if(!all(sapply(observed, is.numeric))){ stop("\'observed\' must be a matrix with numbers\n") } #univariate timeseries ? if(is.vector(observed)){ observed <- matrix(observed,ncol=1) namesObs <- deparse(quote(observed)) } else { # ensure we have a matrix observed <- as.matrix(observed) } if(missing(state)){ state <- 0*observed } else if(is.vector(state)){ state <- matrix(state,ncol=1) } else { state <- as.matrix(state) } #check number of columns of observed and state nAreas <- ncol(observed) nObs <- nrow(observed) if(ncol(observed) != ncol(state)){ #if there is only one state-vector for more than one area, repeat it if(ncol(state)==1) { state <- matrix(rep(state,nAreas),ncol=nAreas,byrow=FALSE) } else { cat('wrong dimensions of observed and state \n') return(NULL) } } #check neighbourhood matrix # neighbourhood can be a matrix or an array of dimension c(nAreas,nAreas, nrow(observed)) if(!is.null(neighbourhood) ) { dimNhood <- dim(neighbourhood) if(length(dimNhood)==2 & any(dimNhood != nAreas)) { cat('wrong dimensions of neighbourhood matrix \n') return(NULL) } else if (length(dimNhood)==3 & (any(dimNhood[1:2] != nAreas) | (dimNhood[3] != nrow(observed)) )){ cat('wrong dimensions of neighbourhood matrix \n') return(NULL) } } else { # no neighbourhood specified neighbourhood <- matrix(NA,nrow=nAreas,ncol=nAreas) } if(is.null(populationFrac)) { populationFrac <- matrix(1/ncol(observed),nrow=nObs, ncol=ncol(observed)) } else { # make sure populationFrac is a matrix populationFrac <- as.matrix(populationFrac) # check dimensions if(nrow(populationFrac)!= nObs | ncol(populationFrac)!= nAreas) stop("dimensions of \'populationFrac\' and \'observed\' do not match\n") # check whether populationFrac contains only numbers if(!all(sapply(populationFrac, is.numeric))){ stop("\'populationFrac\' must be a matrix with real numbers\n") } } #labels for observed and state if(is.null(namesObs)){ namesObs <- paste(deparse(quote(observed)),1:nAreas,sep="") } colnames(observed) <- namesObs colnames(state) <- namesObs res <- list("week"=week, "observed"=observed, "state"=state, "start"=start, "freq"=freq, "neighbourhood"=neighbourhood, "populationFrac"=populationFrac,"epochAsDate"=epochAsDate) class(res) <- "disProg" return(res) } print.disProg <- function(x, ...) { cat( "-- An object of class disProg -- \n" ) cat( "freq:\t\t", x$freq,"\n" ) cat( "start:\t\t", x$start,"\n" ) cat( "dim(observed):\t", dim(x$observed), "\n\n") n <- 1 cat("Head of observed:\n") print(head(x$observed,n)) #cat("\nhead of neighbourhood:\n") #print( head(x$neighbourhood,n)) } ################################################### ### chunk number 3: ################################################### aggregate.disProg <- function(x,...){ #aggregate observed counts observed <- apply(x$observed,MARGIN=1,sum) #aggregate states state <- apply(x$state,MARGIN=1,sum) state[state > 1] <- 1 #create univariate disProg object x <- create.disProg(week=x$week, observed=observed, state=state, freq=x$freq,start=x$start) return(x) } ################################################### ### chunk number 4: ################################################### plot.disProg.one <- function(x, title = "", xaxis.years=TRUE, quarters=TRUE, startyear = x$start[1], firstweek = x$start[2], ylim=NULL, xlab="time", ylab="No. infected",type="hh",lty=c(1,1),col=c(1,1), outbreak.symbol = list(pch=3, col=3),legend.opts=list(x="top", legend=c("Infected", "Outbreak"),lty=NULL,pch=NULL,col=NULL),...) { observed <- x$observed state <- x$state # width of the column tab <- 0.5 # left/right help for constructing the columns observedxl <- (1:length(observed))-tab observedxr <- (1:length(observed))+tab # control where the highest value is max <- max(observed) #if ylim is not specified if(is.null(ylim)){ ylim <- c(-1/20*max, max) } #Plot the results using one Large plot call matplot(x=cbind(observedxl, observedxr),y=cbind(observed, observed),xlab=xlab,ylab=ylab, type=type,lty=lty, col=col, ylim=ylim,axes = !(xaxis.years),...) #Show the outbreaks if (!is.null(outbreak.symbol)) { for(i in 1:length(observed)){ matlines( c(i-tab, i+tab), c(observed[i],observed[i]) ) if(state[i] == 1) matpoints( i, ylim[1], pch=outbreak.symbol$pch, col=outbreak.symbol$col) } } title(title) cex <- par()$cex.axis #Label of x-axis if(xaxis.years){ # get the number of quarters lying in range for getting the year and quarter order obsPerYear <- x$freq obsPerQuarter <- x$freq/4 myat.week <- seq(ceiling((obsPerYear-firstweek+1)/obsPerQuarter) * obsPerQuarter + 1, length(observed)+(floor((obsPerYear-firstweek + 1)/obsPerQuarter) * obsPerQuarter +1), by=obsPerQuarter) # get the right year order year <- (myat.week - obsPerYear) %/% obsPerYear + startyear # function to define the quarter order quarterFunc <- function(i) { switch(i+1,"I","II","III","IV")} # get the right number and order of quarter labels quarter <- sapply( (myat.week-1) %/% obsPerQuarter %% 4, quarterFunc) # get the positions for the axis labels myat.week <- myat.week - (obsPerYear - firstweek + 1) # construct the computed axis labels if (quarters) { if (cex == 1) { mylabels.week <- paste(year,"\n\n",quarter,sep="") } else { mylabels.week <- paste(year,"\n",quarter,sep="") } } else { mylabels.week <- paste(year,sep="") } axis( at=myat.week , labels=mylabels.week , side=1, line = 1 ) axis( side=2 ) } #should there be a legend? if(is.list(legend.opts)) { #Fill empty (mandatory) slots in legend.opts list if (is.null(legend.opts$lty)) legend.opts$lty = c(lty[1],NA) if (is.null(legend.opts$col)) legend.opts$col = c(col[1],outbreak.symbol$col) if (is.null(legend.opts$pch)) legend.opts$pch = c(NA,outbreak.symbol$pch) if (is.null(legend.opts$x)) legend.opts$x = "top" if (is.null(legend.opts$legend)) legend.opts$legend = c("Infected", "Outbreak") #Create the legend do.call("legend",legend.opts) } invisible() } plot.disProg <- function(x, title = "", xaxis.years=TRUE, startyear = x$start[1], firstweek = x$start[2], as.one=TRUE, same.scale=TRUE, ...){ if (xaxis.years && isTRUE(x[["epochAsDate"]])) warning("plot.disProg can't handle Date entries; axis labels are based on 'start'") observed <- x$observed state <- x$state #univariate timeseries ? if(is.vector(observed)) observed <- matrix(observed,ncol=1) if(is.vector(state)) state <- matrix(state,ncol=1) nAreas <- ncol(observed) max <- max(observed) #check if x is multivariate or univariate #multivariate time series if(nAreas > 1){ #all areas in one plot -- not supported in sts if(as.one){ matplot(observed,type="l",lty=1:nAreas,col=1:nAreas,ylim=c(0, 1.1*max),xlab="time",ylab="No. of Infected", axes=!xaxis.years) #If no legend.opts is specified or not set to null if ((is.na(pmatch("legend.opts",names(list(...))))) | (!is.na(pmatch("legend.opts",names(list(...)))) & (!is.null(list(...)$legend.opts)))) { legend.opts <- list(...)$legend.opts if (is.null(legend.opts$x)) legend.opts$x = "topleft" if (is.null(legend.opts$legend)) legend.opts$legend = colnames(observed) if (is.null(legend.opts$col)) legend.opts$col = 1:nAreas if (is.null(legend.opts$lty)) legend.opts$lty = 1:nAreas if (is.null(legend.opts$ncol)) legend.opts$ncol = 5 if (is.null(legend.opts$bty)) legend.opts$bty = "n" do.call("legend",legend.opts) } title(title) if(xaxis.years){ #todo: move this as output of ONE function # get the number of quarters lying in range for getting the year and quarter order myat.week <- seq(ceiling((52-firstweek+1)/13) * 13 + 1, length(observed)+(floor((52-firstweek + 1)/13) * 13 +1), by=13) # get the right year order year <- (myat.week - 52) %/% 52 + startyear # function to define the quarter order quarterFunc <- function(i) { switch(i+1,"I","II","III","IV")} # get the right number and order of quarter labels quarter <- sapply( (myat.week-1) %/% 13 %% 4, quarterFunc) # get the positions for the axis labels myat.week <- myat.week - (52 - firstweek + 1) # construct the computed axis labels cex <- par()$cex.axis if (cex == 1) { mylabels.week <- paste(year,"\n\n",quarter,sep="") } else { mylabels.week <- paste(year,"\n",quarter,sep="") } axis( at=myat.week , labels=mylabels.week , side=1, line = 1 ) axis( side=2 ) } } else { #plot each area #set window size par(mfrow=magic.dim(nAreas),mar=c(2,1,1,1)) if(same.scale) ylim <- c(-1/20*max, max) else ylim <- NULL #plot areas k <- 1:nAreas sapply(k, function(k) { plot.disProg.one(create.disProg(x$week, observed[,k], state[,k], freq=x$freq,start=x$start), title = "", startyear = startyear, firstweek = firstweek, xaxis.years=xaxis.years, ylim=ylim, legend.opts=NULL, ... ) mtext(colnames(observed)[k],line=-1.3) }) #reset graphical params par(mfrow=c(1,1), mar=c(5, 4, 4, 2)+0.1) } } else { #univariate time series plot.disProg.one(x=x, title = title, startyear = startyear, firstweek = firstweek, xaxis.years=xaxis.years, ...) } invisible() } surveillance/R/algo_rogerson.R0000644000176200001440000004024113276237331016142 0ustar liggesusers################################################### ### chunk number 1: ################################################### ################################################################### # Average Run Lengths for CUSUMs using Markov Chain approach # # based on the program of Hawkins, D. M. (1992) # "Evaluation of Average Run Lengths of Cumulative Sum Charts # for an Arbitrary Data Distribution" # Communications in Statistics--Simulation. 21(4) 1001-1020. #--------------------------------------------------------------- # # for discrete distributions (i.e. Pois, Bin) # and upward CUSUMS (increasing rate,probability) # # Parameters: # h - decision interval h # k - reference value k # distr - "poisson" or "binomial" # theta - distribution parameter for cdf distr, e.g. lambda for ppois, p for pbinomial # W - winsorizing value W (for robust CUSUM) # to get a nonrobust CUSUM set W > k+h # digits - k and h are rounded to digits decimal places # ... - further arguments for distribution # i.e. number of trials n for binomial (defaults to n=1) # # Returns: # ARL - one-sided ARL of the regular (no-head-start) CUSUM ################################################################### arlCusum <- function(h=10, k=3, theta=2.4, distr=c("poisson","binomial"), W=NULL,digits=1,...){ h <- round(h,digits) k <- round(k,digits) #cat("h",h,"k",k,"\n") distr <- match.arg(distr,c("poisson","binomial")) ############## # cdf of a binomial variate with fixed sample size pbinomial <- function(x,p,n=1){ pbinom(x,size=n,prob=p) } ######## distribution <- switch(distr, "poisson" = ppois, "binomial" = pbinomial ) #no winsorization if(is.null(W)) W <- ceiling(h+k+1) # common denominator of h and k denrat <- commonDenom(h,k,digits=digits) #cat("h =",h,"k =",k,"denrat",denrat,"\n") # check parameters for feasibility if(h <=0) stop("Nonpositive decision interval\n") if(W <= k) stop("Winsorization value less than reference value\n") K <- as.integer(k*denrat+0.5) N <- as.integer(denrat) M <- as.integer(h*denrat -0.5) w <- as.integer(W*denrat+0.5) deviat <- abs(K-k*denrat)+abs(M-h*denrat+1)+abs(w-W*denrat) if(deviat > .01) stop("h, k or W not a multiple of 1/denrat\n") # determine probabilities x <- seq(((-M-1)+K)/N,(M+K)/N,by=(1/denrat)) probs <- distribution(x, theta,...) # Winsorization (any observation exeeding some treshold value W is replaced by W # CUSUM is then: S_n = max{0, S_n-1 + min(X_n,W) - k} probs[x>=W] <- 1 #construct transition matrix transition <- matrix(NA,M+1,M+1) transition[1,1] <- probs[(M+2)] #Pr(X <= k) transition[-1,1] <- probs[(M+2)-(1:M)] #Pr(X <= -j+ k) ,j=1,2,...,h-1 transition[1,-1] <- probs[(M+2)+(1:M)]- probs[(M+2)+(1:M)-1] #Pr(X = j+ k) , j=1,2,...,h-1 idx <-rep((M+2):((M+2)+M-1),M)-rep(0:(M-1),each=M) transition[-1,-1] <- matrix(probs[idx]-probs[idx-1],nrow=M,ncol=M,byrow=TRUE) #print(transition) # I - transition matrix R IminusR <- diag(1,M+1) - transition #Solve might work poorly in some cases res <- try(solve(IminusR)%*%rep(1,M+1),silent=TRUE) # res <- try(qr.solve(IminusR)%*%rep(1,M+1),silent=TRUE) if(inherits(res, "try-error")){ warning("I-R singular\n") return(NA) } ARL <- res[1] #FIRARL - one-sided ARL of the FIR CUSUM with head start 0.5h FIRARL <- res[(M+1)/2+1] return(list(ARL=ARL,FIR.ARL=FIRARL)) } ################################################################# # find smallest common denominator of x and y, # i.e find an integer N so that x=X/N and y=Y/N (with X,Y integer) ################################################################# commonDenom <- function(x,y,digits=1){ x <- round(x,digits) y <- round(y,digits) which.max( ( round((x+y)*1:(10^digits),digits)%%1 == 0 ) # (x+y)*N is integer & ( round(x*1:(10^digits),digits)%%1 == 0 ) # x*N is integer & ( round(y*1:(10^digits),digits)%%1 == 0 ) ) # y*N is integer } ################################################### ### chunk number 2: ################################################### ################################################################# # find reference value k for a Poisson /Binomial CUSUM # designed to detect a change from theta0 to theta1 # # digits - k is rounded to digits decimal places if roundK=TRUE # ... - extra arguments for distribution, # i.e number of trials n for binomial, set to 1 if not specified ################################################################## findK <- function(theta0,theta1,distr=c("poisson","binomial"),roundK=FALSE,digits=1,...){ n <- list(...)$n if(is.null(n)) n <- 1 distr <- match.arg(distr,c("poisson","binomial")) k <- switch(distr, "poisson" = (theta1 - theta0)/(log(theta1)-log(theta0)), "binomial" = -n*(log(1-theta1)-log(1-theta0))/(log(theta1*(1-theta0))-log(theta0*(1-theta1))) ) # for discrete data the # Cusum values are of form integer - integer multiple of k # so there is a limited set of possible values of h (and ARL) if(roundK){ # add/substract 0.05 to k so that k isn't an integer or a multiple of 0.5 # when rounded (then the Markov Chain has more states) if(round(k,digits=digits)%%1 == 0.5 | round(k,digits=digits)%%1 == 0){ round.k <- ((k-floor(k))*10^digits)%%1 #print(roundK) if(round.k < .5 ) k <- k+0.5*10^(-digits) else k <- k-0.5*10^(-digits) } k <- round(k,digits=digits) } return(k) } ################################################### ### chunk number 3: ################################################### ################################################################## # function to find the decision limit h so that the # average run length for a Poisson/Binomial CUSUM with in-control # parameter theta0 is (approx.) ARL0 # # Params: # ARL0 - desired in-control ARL # theta0 - in-control parameter # s - change to detect (in stdev) # rel.tol - (relative) tolerance (if attainable) # roundK - should k be rounded up to digits decimal place (avoiding integers, multiples of 0.5) # digits - h is rounded to digits decimal places # distr - "poisson" or "binomial" # ... - further arguments for distribution (i.e number of trials n for "binomial") # # Returns: # vector c(theta0, h, k, ARL, rel.tol) ################################################################# findH <- function(ARL0,theta0,s=1, rel.tol=0.03,roundK=TRUE,distr=c("poisson","binomial"),digits=1,FIR=FALSE,...){ distr <- match.arg(distr,c("poisson","binomial")) #FIR-ARL or zero-start ARL? fir.arl <- ifelse(FIR,2,1) theta1 <- getTheta1(theta0,s=s,distr=distr) k <- findK(theta0,theta1,roundK=roundK,distr=distr,digits=digits,...) # compute ARL for two (arbitrary) points (h1, h2) h1 <- min(12,4*k) arl1 <- arlCusum(h=h1,k=k,theta=theta0,distr=distr,digits=digits,...)[[fir.arl]] nEval <- 1 #ensure h1 and arl1 are not too small (-> log. interpolation is better) while(arl1 < 100){ h1 <- 2*h1 arl1 <- arlCusum(h=h1,k=k,theta=theta0,distr=distr,digits=digits,...)[[fir.arl]] nEval <- nEval + 1 } h2 <- h1*2^(sign(ARL0-arl1)) arl2 <- arlCusum(h=h2,k=k,theta=theta0,distr=distr,digits=digits,...)[[fir.arl]] nEval <- nEval + 1 # determine h (that leads to an ARL of ARL0) using logarithmic interpolation h.hat <- round(logInterpolation(ARL0,h1,h2,arl1,arl2),digits) # what's the actual ARL for h arl <- arlCusum(h=h.hat,k=k,theta=theta0,distr=distr,digits=digits,...)[[fir.arl]] nEval <- nEval + 1 relTol <- abs((arl-ARL0)/ARL0) #cat("theta0:", theta0,"k:", k,"h:", h.hat,"ARL:",arl,"relTol:", relTol,"\n") i<-0 signs <- sign(ARL0-arl) convergence <- relTol < rel.tol if(convergence){ # print(nEval) return(c("theta0"=theta0,"h"=h.hat,"k"=k,"ARL"=arl,"rel.tol"=relTol)) } # find hLow so that the target ARL0 is in interval c(ARL(hLow), ARL(h.hat)) denrat <- 1/commonDenom(1,k,digits=digits) steps <- denrat #max(0.1,denrat) # cat("denrat",denrat,"steps",steps,"\n") hLow <- round(h.hat+signs*steps,digits) arlLow <- arlCusum(h=hLow,k=k,theta=theta0,distr=distr,digits=digits,...)[[fir.arl]] nEval <- nEval + 1 relTol.Low <- abs((arlLow-ARL0)/ARL0) if(relTol.Low < rel.tol){ # print(nEval) return(c("theta0"=theta0,"h"=hLow,"k"=k,"ARL"=arlLow,"rel.tol"=relTol.Low)) } while(sign(ARL0-arl)*sign(ARL0-arlLow)>0){ # cat("steps:",nEval,"\n") h.hat <- hLow arl <-arlLow relTol <- relTol.Low signs <- sign(ARL0-arl) hLow <- round(h.hat+signs*steps,digits) arlLow <- arlCusum(h=hLow,k=k,theta=theta0,distr=distr,digits=digits,...)[[fir.arl]] nEval <- nEval + 1 relTol.Low <- abs((arlLow-ARL0)/ARL0) if(relTol.Low < rel.tol){ # print(nEval) return(c("theta0"=theta0,"h"=hLow,"k"=k,"ARL"=arlLow,"rel.tol"=relTol.Low)) } # cat("hLow:", hLow,"ARL:",arlLow,"relTol:",relTol.Low,"\n") } # cat("hLow:", hLow,"ARL:",arlLow,"relTol:",relTol.Low,"\n") # return the ARL which is at least the target ARL0 if(sign(ARL0-arlLow)<0){ h.hat <- hLow arl <- arlLow relTol <- relTol.Low } #print(nEval) return(c("theta0"=theta0,"h"=h.hat,"k"=k,"ARL"=arl,"rel.tol"=relTol)) } ################################################################## # find h for various values theta0 # # Params: # theta0 - vector of in control parameter # ARL0 - desired in-control ARL # # Returns: # matrix with columns c(theta0, h, k, ARL, rel.Tol) ################################################################## hValues <- function(theta0,ARL0,rel.tol=0.02,s=1,roundK=TRUE,digits=1,distr=c("poisson","binomial"),FIR=FALSE,...){ distr <- match.arg(distr,c("poisson","binomial")) n <- list(...)$n hVals <- t(sapply(theta0,findH,ARL0=ARL0,rel.tol=rel.tol,s=s,roundK=roundK,digits=digits,distr=distr,FIR=FIR,...)) res <- list(hValues=hVals,ARL0=ARL0,s=s,rel.tol=rel.tol,distribution=distr,firARL=FIR) res$n <- n return(res) } ################################################################## # get decision interval h and reference value k for CUSUM with # in-control parameter theta using a "table" of h values # # theta - in-control parameter # hValues - matrix with columns c(theta, h) ################################################################## getHK <- function(theta,hValues){ one<- function(theta){ theta.diff <- abs(hValues[,1]-theta) idx <- which.min(theta.diff) hk <- hValues[idx,2:3] if(theta.diff[idx] > 0.05) warning("table doesn't contain h value for theta = ",theta,"\n") return(hk) } t(sapply(theta,one)) } ################################################################# # get out-of-control parameter theta1 # # X ~ Po(lambda0): theta1 = lambda0 + s*sqrt(lambda0) # theta1 corresponds to a s*stdev increase in mean # # X ~Bin(n,pi) # H0: Odds of failure =pi/(1-pi) vs H1: Odds = s*pi/(1-pi) # prob of failure under H1 is then pi1 = s*pi0/(1+(s-1)*pi0) ################################################################# getTheta1 <- function(theta0,s=1,distr=c("poisson","binomial")){ distr <- match.arg(distr,c("poisson","binomial")) theta1 <- switch(distr, "poisson" = theta0 + s*sqrt(theta0), "binomial" = s*theta0/(1-theta0+s*theta0) ) return(theta1) } ################################################################# # logarithmic interpolation, i.e. linear interpolation of ln(f(x)) # in points (x0,ln(f0)), (x1,ln(f1)) # # (ln(f)-ln(f0))/(ln(f1)-ln(f0)) = (x-x0)/(x1-x0) # # returns: x # # to find decision limit h for given ARL0 set x = h, f(h) = ARL0(h,k) # and solve equation for x ################################################################# logInterpolation <- function(f,x0,x1,f0,f1){ x0 + ((x1-x0)*(log(f)-log(f0)))/(log(f1)-log(f0)) } ################################################### ### chunk number 4: ################################################### # control - list with # range - vector of indices in the observed matrix to monitor # theta0t - matrix with in-control parameter, needs to be specified # ARL0 - desired average run length for each one of the univariate CUSUMs # s - change to detect # hValues - matrix with decision intervals for theta0_t # reset - if TRUE, the CUSUM is reset to zero after an alarm # nt - time varying sample sizes (for Binomial), # matrix of same dimension as theta0t algo.rogerson <- function(disProgObj, control=list(range=range, theta0t=NULL, ARL0=NULL, s=NULL, hValues=NULL, distribution=c("poisson","binomial"), nt=NULL, FIR=FALSE,limit=NULL, digits=1)){ if (is.null(control$s)) { stop("Error: the s value is not specified") } if (is.null(control$hValues)) { stop("Error: the hValues are not specified") } # if (is.null(control$ARL0)) { stop("Error: no ARL0 value specified") } #Default value is poisson control$distribution <- match.arg(control$distribution,c("poisson","binomial")) if(is.null(control$FIR)){ control$FIR <- FALSE } if(is.null(control$limit)) control$limit <- -1 if(is.null(control$digits)) control$digits <- 1 x <- as.matrix(disProgObj$observed[control$range,]) if (is.null(control$theta0t)) { stop("Error: no theta0t vector specified") } else { theta0t <- as.matrix(control$theta0t) } #theta0 <- colMeans(theta0t) #size = length of process size <- nrow(x) nAreas <- ncol(theta0t) theta0 <- rep(mean(theta0t),nAreas) #check dimensions of x, theta0t if(size !=nrow(theta0t) | (ncol(x)%%nAreas)!=0) stop("wrong dimensions\n") reps <- ncol(x)/nAreas #time-varying size n for Binomial nt<-control$nt if(control$distribution=="binomial"){ if(is.null(nt)) nt <- matrix(rep(control$n,size),ncol=1) else nt<-as.matrix(nt) } theta1 <- getTheta1(theta0,s=control$s,distr=control$distribution) theta1t <- getTheta1(theta0t,s=control$s,distr=control$distribution) hk <- getHK(theta0,hValues=control$hValues) k <- hk[,"k"] h <- hk[,"h"] #cat("k =",k,"h =",h,"\n") if(control$FIR){ control$limit <- 0.5 fir <- h/2 } else { fir <- 0 } #cat("fir",fir,"\n") # initialize the necessary vectors # start with cusum[1] = 0 cusum <- matrix(0,nrow=(size+1), ncol=nAreas*reps) cusum[1,] <- fir alarm <- matrix(data = 0, nrow = (size+1), ncol = nAreas*reps) upperbound <- matrix(0,nrow=(size+1),ncol=reps) #CUSUM as in Rogerson (2004) for(t in 1:size){ #choose k_t based upon theta_0t and theta_1t hkt <- getHK(theta0t[t,],hValues=control$hValues) #kt <- hkt[,"k"] kt <- findK(theta0t[t,],theta1t[t,],distr=control$distribution,roundK=TRUE, digits=control$digits, n=nt[t,]) # #for given k_t (theta0t) and ARL_0 choose h_t ht <- hkt[,"h"] ct <- h/ht # compute cumulative sums of observations x corrected with the # reference value kt, scaled by factor ct # cusum[t+1,]<- pmax(0, cusum[t,] + ct*(x[t,]-kt)) # reset CUSUM to zero if an alarm is given at time t if((control$limit >= 0) & any(alarm[t,]==1)){ cusum.t <- cusum[t,] cusum.t[alarm[t,]==1] <- pmin(cusum[t,], control$limit*h)[alarm[t,]==1] cusum[t+1,]<- pmax(0, cusum.t + ct*(x[t,]-kt)) } else { cusum[t+1,]<- pmax(0, cusum[t,] + ct*(x[t,]-kt)) } # give alarm if the cusum is larger than h alarm[t+1,] <- cusum[t+1,] >= h # in case speed is premium then one might want to comment this line if((control$limit >= 0) & any(alarm[t,]==1)) { upperbound[t+1,] <- ceiling( (h-cusum.t)/ct + kt) } else { upperbound[t+1,] <- ceiling( (h-cusum[t,])/ct + kt) } #Ensure upperbound is positive (this should always be the case) if (upperbound[t+1,] < 0) { upperbound[t+1,] <- 0} } # discard cusum[1] and alarm[1] cusum <- as.matrix(cusum[-1,]) alarm <- as.matrix(alarm[-1,]) upperbound <- as.matrix(upperbound[-1,]) #Add name and data name to control object. control$name <- paste("CUSUM Rogerson:",control$distribution) control$data <- paste(deparse(substitute(disProgObj))) # return alarm and upperbound vectors result <- list(alarm = alarm, upperbound = upperbound, disProgObj=disProgObj,control=c(control,list(h=h))) class(result) = "survRes" # for surveillance system result return(result) } surveillance/R/sts_animate.R0000644000176200001440000001435113746247075015624 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Animated map (and time series chart) of an sts-object (or matrix of counts) ### ### Copyright (C) 2013-2016,2018,2020 Sebastian Meyer ### $Revision: 2570 $ ### $Date: 2020-10-28 11:55:57 +0100 (Wed, 28. Oct 2020) $ ################################################################################ ### Corresponding to the S3-generic function animate(), ### we define a method for the S4-class "sts" and omit the recommended ### setGeneric("animate"); setMethod("animate", "sts", animate.sts) ### [see Section "Methods for S3 Generic Functions" in help("Methods")] animate.sts <- function (object, tps = NULL, cumulative = FALSE, population = NULL, at = 10, ..., timeplot = list(pos = 1, size = 0.3, fill = TRUE), sleep = 0.5, verbose = interactive(), draw = TRUE) { if (draw && dev.interactive()) message("Advice: use facilities of the \"animation\" package, e.g.,\n", " saveHTML() to view the animation in a web browser.") if (is.null(tps)) tps <- seq_len(nrow(object)) if (!is.null(population)) { # get population matrix population <- parse_population_argument(population, object) } ## determine color breaks (checkat() is defined in stsplot_space.R) at <- checkat(at, data=.rangeOfDataToPlot(object, tps, cumulative, population), counts=is.null(population)) ## style of the additional temporal plot if (is.list(timeplot)) { timeplot <- modifyList(eval(formals()$timeplot), timeplot) if (!is.null(timeplot[["height"]])) { # surveillance <= 1.18.0 timeplot$pos <- 1 timeplot$size <- timeplot$height timeplot$height <- NULL } stopifnot(timeplot$pos %in% 1:4, timeplot$size > 0, timeplot$size < 1) ## disentangle arguments not for stsplot_timeSimple() timeplot_pos <- timeplot$pos timeplot_size <- timeplot$size timeplot_fill <- timeplot$fill timeplot$pos <- timeplot$size <- timeplot$fill <- NULL } if (verbose) pb <- txtProgressBar(min=0, max=length(tps), initial=0, style=3) grobs <- vector(mode = "list", length = length(tps)) for(i in seq_along(tps)) { cti <- if (cumulative) seq_len(i) else i ls <- stsplot_space(object, tps=tps[cti], population=population, at=at, ...) if (is.list(timeplot) && requireNamespace("gridExtra")) { stopifnot(packageVersion("gridExtra") >= "2.0.0") lt <- do.call("stsplot_timeSimple", c( list(x=object, tps=tps, highlight=cti), timeplot)) if (!isTRUE(timeplot_fill)) { # see ?trellis.object lt$aspect.fill <- FALSE lt$aspect.ratio <- ls$aspect.ratio * if (timeplot_pos %in% c(1,3)) timeplot_size / (1-timeplot_size) else (1-timeplot_size) / timeplot_size } grobs[[i]] <- switch(timeplot_pos, gridExtra::arrangeGrob(ls, lt, heights=c(1-timeplot_size, timeplot_size)), gridExtra::arrangeGrob(lt, ls, widths=c(timeplot_size, 1-timeplot_size)), gridExtra::arrangeGrob(lt, ls, heights=c(timeplot_size, 1-timeplot_size)), gridExtra::arrangeGrob(ls, lt, widths=c(1-timeplot_size, timeplot_size))) if (draw) { grid::grid.newpage() grid::grid.draw(grobs[[i]]) } } else { grobs[[i]] <- ls if (draw) print(ls) } if (verbose) setTxtProgressBar(pb, i) if (dev.interactive()) Sys.sleep(sleep) } if (verbose) close(pb) invisible(grobs) } ### additional time plot below the map stsplot_timeSimple <- function (x, tps = NULL, highlight = integer(0), inactive = list(col="gray", lwd=2), active = list(col=1, lwd=4), as.Date = x@epochAsDate, ...) { observed <- if (inherits(x, "sts")) observed(x) else x if (is.null(tps)) { tps <- seq_len(nrow(observed)) } else { observed <- observed[tps,,drop=FALSE] } epoch <- if (inherits(x, "sts")) epoch(x, as.Date = as.Date)[tps] else tps if (anyNA(observed)) warning("ignoring NA counts in time series plot") ## build highlight-specific style vectors (col, lwd, ...) stopifnot(is.list(inactive), is.list(active)) stylepars <- intersect(names(inactive), names(active)) styleargs <- sapply(stylepars, function (argname) { res <- rep.int(inactive[[argname]], length(tps)) res[highlight] <- active[[argname]] res }, simplify=FALSE, USE.NAMES=TRUE) par_no_top_padding <- list( layout.heights = list(top.padding = 0, main.key.padding = 0, key.axis.padding = 0) ) xyplot.args <- modifyList( c(list(x = rowSums(observed, na.rm = TRUE) ~ epoch, type = "h", grid = "h", ylab = "", xlab = "", ylim = c(0, NA), scales = list(x = list(tck = c(1, 0))), par.settings = par_no_top_padding), styleargs), list(...)) do.call(lattice::xyplot, xyplot.args) } ### determine data range for automatic color breaks 'at' .rangeOfDataToPlot <- function (object, tps, cumulative = FALSE, population = NULL) { observed <- if (inherits(object, "sts")) observed(object) else object observed <- observed[tps,,drop=FALSE] if (!is.null(population)) { # compute (cumulative) incidence observed <- if (cumulative) { observed / rep(population[tps[1L],], each = nrow(observed)) } else { observed / population[tps,,drop=FALSE] } } range(if (cumulative) c(observed[1L,], colSums(observed)) else observed, na.rm = TRUE) } surveillance/R/scores.R0000644000176200001440000001423213430631466014600 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Scoring rules as discussed in: ### Predictive model assessment for count data ### Czado, C., Gneiting, T. & Held, L. (2009) ### Biometrics 65:1254-1261 ### ### Copyright (C) 2010-2012 Michaela Paul, 2014-2015,2017-2019 Sebastian Meyer ### $Revision: 2279 $ ### $Date: 2019-02-12 21:57:26 +0100 (Tue, 12. Feb 2019) $ ################################################################################ ## logarithmic score ## logs(P,x) = -log(P(X=x)) .logs <- function (px) -log(px) logs <- function (x, mu, size=NULL) { if (is.null(size)) { - dpois(x, lambda=mu, log=TRUE) } else { - dnbinom(x, mu=mu, size=size, log=TRUE) } } ## squared error score ## ses(P,x) = (x-mu_p)^2 ses <- function (x, mu, size=NULL) { (x-mu)^2 } ## normalized squared error score (IMPROPER) ## nses(P,x) = ((x-mu_p)/sigma_p)^2 nses <- function (x, mu, size=NULL) { sigma2 <- if (is.null(size)) mu else mu * (1 + mu/size) ((x-mu)^2) / sigma2 } ## Dawid-Sebastiani score ## dss(P,x) = ((x-mu_p)/sigma_p)^2 + 2*log(sigma_p) .dss <- function (meanP, varP, x) (x-meanP)^2 / varP + log(varP) dss <- function (x, mu, size=NULL) .dss(meanP = mu, varP = if (is.null(size)) mu else mu * (1 + mu/size), x = x) ## ranked probability score ## rps(P,x) = sum_0^Kmax {P(X<=k) - 1(x<=k)}^2 ## for a single prediction (general formulation) .rps <- function (P, ..., x, kmax, tolerance = sqrt(.Machine$double.eps)) { ## compute P(X<=k) k <- 0:kmax Pk <- P(k, ...) ## check precision if ((1 - Pk[length(Pk)])^2 > tolerance) warning("finite sum approximation error larger than tolerance=", format(tolerance)) ## compute the RPS sum((Pk - (x <= k))^2) } ## for a single Poisson prediction rps_1P <- function (x, mu, k=40, tolerance=sqrt(.Machine$double.eps)) { ## return NA for non-convergent fits (where mu=NA) if (is.na(mu)) return(NA_real_) ## determine the maximum number of summands as Kmax=mean+k*sd kmax <- ceiling(mu + k*sqrt(mu)) ## compute the RPS .rps(P = ppois, lambda = mu, x = x, kmax = kmax, tolerance = tolerance) } ## for a single NegBin prediction rps_1NB <- function (x, mu, size, k=40, tolerance=sqrt(.Machine$double.eps)) { ## return NA for non-convergent fits (where mu=NA) if (is.na(mu)) return(NA_real_) ## determine the maximum number of summands as Kmax=mean+k*sd sigma2 <- mu * (1 + mu/size) kmax <- ceiling(mu + k*sqrt(sigma2)) ## compute the RPS .rps(P = pnbinom, mu = mu, size = size, x = x, kmax = kmax, tolerance = tolerance) } ## vectorized version rps <- function (x, mu, size=NULL, k=40, tolerance=sqrt(.Machine$double.eps)) { res <- if (is.null(size)) { mapply(rps_1P, x=x, mu=mu, MoreArgs=list(k=k, tolerance=tolerance), SIMPLIFY=TRUE, USE.NAMES=FALSE) } else { mapply(rps_1NB, x=x, mu=mu, size=size, MoreArgs=list(k=k, tolerance=tolerance), SIMPLIFY=TRUE, USE.NAMES=FALSE) } attributes(res) <- attributes(x) # set dim and dimnames res } ### apply a set of scoring rules at once scores.default <- function(x, mu, size = NULL, which = c("logs", "rps", "dss", "ses"), sign = FALSE, ...) { ## compute individual scores (these have the same dimensions as x) scorelist <- lapply(X = setNames(nm = which), FUN = do.call, args = alist(x = x, mu = mu, size = size), envir = environment()) ## append sign of x-mu if (sign) scorelist <- c(scorelist, list("sign" = sign(x-mu))) ## gather scores in an array simplify2array(scorelist, higher = TRUE) } ### apply scoring rules to a set of oneStepAhead() forecasts scores.oneStepAhead <- function (x, which = c("logs","rps","dss","ses"), units = NULL, sign = FALSE, individual = FALSE, reverse = FALSE, ...) { y <- x$observed # observed counts during the prediction window mu <- x$pred # predicted counts (same dim as y) ## transform overdispersion to dnbinom() parameterization size <- psi2size.oneStepAhead(x) # -> NULL or full dim(y) matrix ## select units if (!is.null(units)) { y <- y[,units,drop=FALSE] mu <- mu[,units,drop=FALSE] size <- size[,units,drop=FALSE] # works with size = NULL } nUnits <- ncol(y) if (nUnits == 1L) individual <- TRUE # no need to apply rowMeans() below result <- scores.default(x = y, mu = mu, size = size, which = which, sign = sign) ## reverse order of the time points (historically) if (reverse) { result <- result[nrow(result):1L,,,drop=FALSE] } ## average over units if requested if (individual) { drop(result) } else { apply(X=result, MARGIN=3L, FUN=rowMeans) ## this gives a nrow(y) x (5L+sign) matrix (or a vector in case nrow(y)=1) } } ## calculate scores with respect to fitted values scores.hhh4 <- function (x, which = c("logs","rps","dss","ses"), subset = x$control$subset, units = seq_len(x$nUnit), sign = FALSE, ...) { ## slow implementation via "fake" oneStepAhead(): ##fitted <- oneStepAhead(x, tp = subset[1L] - 1L, type = "final", ## keep.estimates = FALSE, verbose = FALSE) ##scores.oneStepAhead(fitted, which = which, units = units, sign = sign, ## individual = TRUE, reverse = FALSE) result <- scores.default( x = x$stsObj@observed[subset, units, drop = FALSE], mu = x$fitted.values[match(subset, x$control$subset), units, drop = FALSE], size = psi2size.hhh4(x, subset, units), which = which, sign = sign) rownames(result) <- subset drop(result) } surveillance/R/twinstim_siaf_gaussian.R0000644000176200001440000002062213165636121020052 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Gaussian spatial interaction function for twinstim's epidemic component ### ### Copyright (C) 2009-2014,2017 Sebastian Meyer ### $Revision: 1986 $ ### $Date: 2017-10-06 10:18:25 +0200 (Fri, 06. Oct 2017) $ ################################################################################ ## nTypes: determines the number of parameters=(log-)standard deviations of the ## Gaussian kernel. In a multitype epidemic, the different types may share the ## same spatial interaction function (type-invariant), in which case nTypes=1. ## Otherwise nTypes should equal the number of event types of the epidemic, in ## which case every type has its own variance parameter. ## logsd: logical indicating if the gaussian kernel should be reparametrized ## such that the log-standard deviation is the parameter in question. This ## avoids constrained optimisation (L-BFGS-B) or the use of 'validpars'. ## density: logical. If TRUE, the isotropic Gaussian density (on R^2) will not ## be scaled to have maximum value of 1 at the mean c(0,0). ## effRangeMult: determines the effective range for numerical integration in ## terms of multiples of the parameter, i.e. with effRangeMult=6 numerical ## integration only considers the 6-sigma area around the event instead of the ## whole observation region W. ## validpars: If logsd = FALSE, you should either use ## constrained optimisation (L-BFGS-B) or set 'validpars' to function (pars) ## pars > 0. siaf.gaussian <- function (nTypes = 1, logsd = TRUE, density = FALSE, F.adaptive = FALSE, F.method = "iso", effRangeMult = 6, validpars = NULL) { if (!logsd || density) .Deprecated(msg = "non-default parametrizations of siaf.gaussian() are deprecated") nTypes <- as.integer(nTypes) stopifnot(length(nTypes) == 1L, nTypes > 0L) if (isScalar(F.adaptive)) { adapt <- F.adaptive F.adaptive <- TRUE } else adapt <- 0.1 if (F.adaptive && !missing(F.method)) warning("ignoring 'F.method' since 'F.adaptive=TRUE' (adaptive midpoint cubature)") f <- function (s, pars, types) {} # coordinate matrix s, length(types) = 1 or nrow(s) F <- if (F.adaptive) { as.function(c(alist(polydomain=, f=, pars=, type=), list(adapt=adapt), quote({}))) } else if (F.method == "iso") { if (!logsd || density) stop("only the default parametrization is implemented for 'F.method=\"iso\"'") if (nTypes > 1L) stop("only the single-type kernel is implemented for 'F.method=\"iso\"'") siaf_F_polyCub_iso(intrfr_name = "intrfr.gaussian", engine = "C") } else { formals(siaf.fallback.F)$method <- F.method siaf.fallback.F } Fcircle <- function (r, pars, type) {} # single radius and type effRange <- function (pars) {} deriv <- function (s, pars, types) {} # coordinate matrix s, length(types) = 1 or nrow(s) Deriv <- if (F.adaptive || F.method != "iso") { function (polydomain, deriv, pars, type, nGQ = 20L) {} # single "owin" and type } else { siaf_Deriv_polyCub_iso(intrfr_names = "intrfr.gaussian.dlogsigma", engine = "C") } simulate <- function (n, pars, type, ub) {} # n=size of the sample, # type=single type, # ub=upperbound (unused here) ## if there is only one type, we set the default type(s) argument to 1 ## (it is actually unused inside the functions) if (nTypes == 1L) { formals(f)$types <- formals(F)$type <- formals(Fcircle)$type <- formals(deriv)$types <- formals(Deriv)$type <- formals(simulate)$type <- 1L } # helper expressions tmp1 <- if (logsd) expression(sds <- exp(pars)) else expression(sds <- pars) tmp1.1 <- if (nTypes==1L) expression(sd <- sds) else expression(sd <- sds[type]) tmp2 <- c( expression(sLengthSquared <- .rowSums(s^2, L <- nrow(s), 2L)), if (nTypes == 1L) expression(sdss <- sds) else expression( types <- rep_len(types, L), sdss <- sds[types] ) ) # spatial interaction function body(f) <- as.call(c(as.name("{"), tmp1, tmp2, expression(fvals <- exp(-sLengthSquared/2/sdss^2)), if (density) expression(fvals / (2*pi*sdss^2)) else expression(fvals) )) environment(f) <- baseenv() # numerically integrate f over a polygonal domain if (F.adaptive) { body(F) <- as.call(c(as.name("{"), tmp1, tmp1.1, expression( eps <- adapt * sd, intf <- polyCub.midpoint(polydomain, f, pars, type, eps=eps), intf ) )) environment(F) <- getNamespace("surveillance") } # calculate the integral of f over a circular domain around 0 body(Fcircle) <- as.call(c(as.name("{"), tmp1, tmp1.1, expression(val <- pchisq((r/sd)^2, 2)), # cf. Abramowitz&Stegun formula 26.3.24 if (!density) expression(val <- val * 2*pi*sd^2), expression(val) )) environment(Fcircle) <- getNamespace("stats") # effective integration range of f as a function of sd if (isScalar(effRangeMult)) { body(effRange) <- as.call(c(as.name("{"), tmp1, substitute(effRangeMult*sds) )) environment(effRange) <- baseenv() } else effRange <- NULL # derivative of f wrt pars derivexpr <- if (logsd) { # derive f wrt psi=log(sd) !! if (density) { quote(deriv[cbind(seq_len(L),colidx)] <- exp(-frac) / pi/sdss^2 * (frac-1)) } else { quote(deriv[cbind(seq_len(L),colidx)] <- exp(-frac) * 2*frac) } } else { # derive f wrt sd !! if (density) { quote(deriv[cbind(seq_len(L),colidx)] <- exp(-frac) / pi/sdss^3 * (frac-1)) } else { quote(deriv[cbind(seq_len(L),colidx)] <- exp(-frac) * 2*frac/sdss) } } derivexpr <- do.call("substitute", args=list(expr=derivexpr, env=list(colidx=if (nTypes==1L) 1L else quote(types)))) body(deriv) <- as.call(c(as.name("{"), tmp1, tmp2, expression( deriv <- matrix(0, L, length(pars)), frac <- sLengthSquared/2/sdss^2 ), derivexpr, expression(deriv) )) environment(deriv) <- baseenv() # integrate 'deriv' over a polygonal domain if (F.adaptive || F.method != "iso") { body(Deriv) <- as.call(c(as.name("{"), ## Determine a = argmax(abs(deriv(c(x,0)))) if (density) { # maximum absolute value is at 0 expression(a <- 0) } else { c(tmp1, tmp1.1, expression( xrange <- polydomain$xrange, # polydomain is a "owin" a <- min(max(abs(xrange)), sqrt(2)*sd), # maximum absolute value if (sum(xrange) < 0) a <- -a # is more of the domain left of 0? )) }, if (nTypes == 1L) { expression(deriv.type <- function (s) deriv(s, pars, 1L)[,1L,drop=TRUE]) } else { # d f(s|type_i) / d sigma_{type_j} is 0 for i != j expression(deriv.type <- function (s) deriv(s, pars, type)[,type,drop=TRUE]) }, expression(int <- polyCub.SV(polydomain, deriv.type, nGQ=nGQ, alpha=a)), if (nTypes == 1L) expression(int) else expression( res <- numeric(length(pars)), # zeros res[type] <- int, res ) )) environment(Deriv) <- getNamespace("surveillance") } ## sampler (does not obey the 'ub' argument!!) body(simulate) <- as.call(c(as.name("{"), tmp1, tmp1.1, expression(matrix(rnorm(2*n, mean=0, sd=sd), nrow=n, ncol=2L)) )) environment(simulate) <- getNamespace("stats") ## return the kernel specification list(f=f, F=F, Fcircle=Fcircle, effRange=effRange, deriv=deriv, Deriv=Deriv, simulate=simulate, npars=nTypes, validpars=validpars) } surveillance/R/gd.R0000644000176200001440000000474112625315364013701 0ustar liggesusers###################################################################### # This file contains utility functions for the generalized Dirichlet # distribution described in the article by T.-T. Wong et al. (1998), # Generalized Dirichlet distribution in Bayesian analysis. Applied # Mathematics and Computation, volume 97, pp 165-181. # # This includes: # rgd - sample from the generalized Dirichlet distribution # Egd - expectation of the generalized Dirichlet distribution # # Author: Michael Höhle # Date: LaMo Apr 2014. ###################################################################### ###################################################################### # Sample from the generalized dirichlet distribution, i.e. # (X_1,...,X_{k+1})' ~ GD(alpha,beta) # This is the algorithm described by Wong (1998), p. 174. # # Parameters: # alpha - vector of length k # beta - vector of length k # # Note: The alpha and beta vectors are for the first k categories. # The element in k+1 is automatically given as 1-sum_{i=1}^k X_i. ###################################################################### rgd <- function(n,alpha,beta) { #Check that alpha and beta are of the same length. if (length(alpha) != length(beta)) { stop("alpha and beta not of same length") } if (!all(alpha>0) | !all(beta>0)) { stop("Assumptiom alpha>0 and beta>0 is violated.") } #Prepare result and sample the first step as in Wong (1998), p.174 res <- matrix(NA,nrow=n,ncol=length(alpha)+1) res[,1] <- rbeta(n,alpha[1],beta[1]) sum <- res[,1] for (j in 2:(length(alpha))) { xj <- rbeta(n, alpha[j], beta[j]) #Adjust for previous samples res[,j] <- xj * (1-sum) sum <- sum + res[,j] } #Last cell is fixed. res[,length(alpha)+1] <- 1-sum return(res) } ###################################################################### #Compute analytically the expectation of a GD(alpha,beta) distributed #variable using the expression of Wong (1998). # # Parameters: # alpha - vector of alpha parameters of the distribution # beta - vector of beta parameters of the distribution # # Returns: # Expectation vector of the GD(alpha,betra) distribution ###################################################################### Egd <- function(alpha, beta) { mu <- alpha/(alpha+beta) mean <- NULL for (j in 1:length(mu)) { mean[j] <- mu[j] * prod(1-mu[seq_len(j-1)]) } return(c(mean,prod(1-mu))) } surveillance/R/sts_toLatex.R0000644000176200001440000001142112672242502015604 0ustar liggesusers################################################################################ ### toLatex-method for "sts" objects ### ### Copyright (C) 2014 Dirk Schumacher, 2014 Maelle Salmon ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ toLatex.sts <- function(object, caption = "",label=" ", columnLabels = NULL, subset = NULL, alarmPrefix = "\\textbf{\\textcolor{red}{", alarmSuffix = "}}", ubColumnLabel = "UB", ...) { # Args: # object: A single sts object; must not be NULL or empty. # caption: A caption for the table. Default is the empty string. # label: A label for the table. Default is the empty string. # columnLabels: A list of labels for each column of the resulting table. # subset: A range of values which should be displayed. If Null, then all # data in the sts objects will be displayed. Else only a subset of # data. Therefore range needs to be a numerical vector of indexes # from 1 to length(@observed). # alarmPrefix: A latex compatible prefix string wrapped around a table cell # iff there is an alarm;i.e. alarm = TRUE # alarmSuffix: A latex compatible suffix string wrapped around a table cell # iff there is an alarm;i.e. alarm[i,j] = TRUE # ubColumnLabel: The label of the upper bound column; default is "UB". # ...: Variable arguments passed to toLatex.xtable # Returns: # An object of class Latex # Error Handling isEmpty <- function(o) is.null(o) if (isEmpty(object)) stop("object must not be null or NA.") if (is.list(object)) stop("supplying a list of sts has been removed from the api. Sorry.") if (!isS4(object) || !is(object, "sts")) stop("object must be of type sts from the surveillance package.") if (!is.character(caption)) stop("caption must be a character.") if (!isEmpty(labels) && length(labels) != length(object)) stop("number of labels differ from the number of sts objects.") # derive default values tableLabels <- colnames(object@observed) if (!is.null(columnLabels) && length(columnLabels) != ncol(object@observed) * 2 + 2) { stop("the number of labels must match the number of columns in the resulting table; i.e. 2 * columns of sts + 2.") } tableCaption <- caption tableLabel <- label vectorOfDates <- epoch(object, as.Date = TRUE) yearColumn <- Map(function(d)isoWeekYear(d)$ISOYear, vectorOfDates) if (object@freq == 12 ) monthColumn <- Map(function(d) as.POSIXlt(d)$mon, vectorOfDates) if (object@freq == 52 ) weekColumn <- Map(function(d)isoWeekYear(d)$ISOWeek, vectorOfDates) dataTable <- data.frame(unlist(yearColumn)) colnames(dataTable) <- "year" if (object@freq == 12 ) { dataTable$month <- unlist(monthColumn) } if (object@freq == 52 ) { dataTable$week <- unlist(weekColumn) } if (object@freq == 365 ) { dataTable$day <- unlist(vectorOfDates) dataTable <- dataTable[c(2)] } noCols <- ncol(dataTable) j <- 1 + noCols tableLabelsWithUB <- c() # I know it is imperative - shame on me for (k in 1:(ncol(object@observed))) { upperbounds <- round(object@upperbound[,k], 2) observedValues <- object@observed[,k] alarms <- object@alarm[,k] ubCol <- c() for (l in 1:length(upperbounds)) { if (is.na(upperbounds[l])) { ubCol <- c(ubCol, NA) } else { ubCol <- c(ubCol, upperbounds[l]) if (!is.na(alarms[l]) && alarms[l]) { observedValues[l] <- paste0(alarmPrefix, observedValues[l], alarmSuffix) } } } dataTable[,(j)] <- observedValues dataTable[,(j + 1)] <- ubCol tableLabelsWithUB <- c(tableLabelsWithUB, tableLabels[k]) tableLabelsWithUB <- c(tableLabelsWithUB, ubColumnLabel) j <- j + 2 } # remove rows which should not be displayed if (is.null(subset)) subset <- 1:nrow(dataTable) else if (min(subset) < 1 || max(subset) > nrow(dataTable)) stop("'subset' must be a subset of 1:nrow(observed), i.e., 1:", nrow(dataTable)) dataTable <- dataTable[subset,] # prepare everything for xtable newColNames <- c(colnames(dataTable)[1:noCols], tableLabelsWithUB) if (!is.null(columnLabels)) { colnames(dataTable) <- columnLabels } else { colnames(dataTable) <- newColNames } xDataTable <- xtable(dataTable, label = tableLabel, caption = tableCaption, digits = c(0)) toLatex(xDataTable, ...) } setMethod("toLatex", "sts", toLatex.sts) surveillance/R/calibration_null.R0000644000176200001440000001572412616616447016641 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Expectation and variance of proper scoring rules for Poisson and NegBin ### Reference: Wei and Held (2014), Test, 23, 787-805 ### ### Copyright (C) 2013-2014 Wei Wei, 2015 Sebastian Meyer ### $Revision: 1512 $ ### $Date: 2015-11-05 10:11:03 +0100 (Thu, 05. Nov 2015) $ ################################################################################ ## wrapper function calling the necessary "EV" function for the selected score score_EV <- function (mu, size = NULL, tolerance = 1e-4, which = c("dss", "logs", "rps")) { which <- match.arg(which) if (which == "dss") return(dss_EV(mu, size)) ## for "logs" and "rps", the EV function only works with a single prediction ## -> apply to each mu (size) res <- if (is.null(size)) { # Poisson vapply(X = mu, FUN = paste0(which, "_EV_1P"), FUN.VALUE = c(E = 0, V = 0), tolerance = tolerance, USE.NAMES = FALSE) } else { # NegBin mapply(FUN = paste0(which, "_EV_1NB"), mu = mu, size = size, MoreArgs = list(tolerance = tolerance), SIMPLIFY = TRUE, USE.NAMES = FALSE) } ## 'res' has dimension 2 x length(mu) list(E = res[1L,], V = res[2L,]) } ########################## ### Dawid-Sebastiani Score ########################## dss_EV <- function (mu, size = NULL) { sigma2 <- if (is.null(size)) mu else mu * (1 + mu/size) E <- 1 + log(sigma2) V <- if (is.null(size)) { 2 + 1/sigma2 } else { 2 + 6/size + 1/sigma2 } list(E = E, V = V) } ##################### ### Logarithmic Score ##################### ## for a single Poisson prediction logs_EV_1P <- function (mu, tolerance = 1e-4) # tolerance is in absolute value { ## use the same kmax for expectation and variance -> shared computations ## K2 is always a bit larger than K1, so we use K2 kmax <- if (mu^3 < tolerance/.Machine$double.eps/2) { ## we can calculate K2 from Theorem 1 (b) qpois(1 - tolerance/(mu^3 + 6*mu^2 + 7*mu + 1), lambda = mu) + 3 } else { # very high quantile (e.g., 1 - 1e-16) would yield Inf mu + 10 * sqrt(mu) } kseq <- seq_len(kmax) ## compute values required by both E and V fseq <- dpois(kseq, lambda = mu) logfactseq <- lfactorial(kseq) ## expectation E <- if (mu > tolerance^(-1/4)) { # fast version for "large" mu ## approximation error is of order 1/mu^4 0.5 + 0.5*log(2*pi*mu) - 1/12/mu - 1/24/mu^2 - 19/360/mu^3 } else { ##kmax1 <- qpois(1 - tolerance/(mu^2 + 3*mu + 1), lambda = mu) + 2 seqq1 <- fseq * logfactseq mu * (1-log(mu)) + sum(seqq1) } ## variance (does it converge to 0.5 as mu -> Inf ?) seqq2 <- (logfactseq - kseq * log(mu))^2 * fseq V <- sum(seqq2) - (E - mu)^2 c(E = E, V = V) } ## for a single NegBin prediction logs_EV_1NB <- function (mu, size, tolerance = 1e-4) { ## TODO: replace simple kmax by formulae from the paper kmax <- qnbinom(1-tolerance/10, mu = mu, size = size) + 5 kseq <- 0:kmax ## compute values required by both E and V fseq <- dnbinom(kseq, mu = mu, size = size) lgammaseq <- lbeta(kseq + 1L, size) + log(kseq + size) ## expectation seqq1 <- lgammaseq * fseq E <- sum(seqq1) - size*log(size) - mu*log(mu) + (mu+size)*log(mu+size) ## variance con2 <- E - size * log(1 + mu/size) seqq2 <- (lgammaseq + kseq * log(1 + size/mu))^2 * fseq V <- sum(seqq2) - con2^2 ## check against formulation in the paper (Equation 11): ## con2paper <- E + size*log(size) - size*log(size+mu) - lgamma(size) ## seqq2paper <- (-lgamma(kseq+size) + lgamma(kseq+1L) + kseq*log(1+size/mu))^2 * fseq ## Vpaper <- sum(seqq2paper) - con2paper^2 ## => V and Vpaper are only identical for kmax -> Inf c(E = E, V = V) } ############################ ### Ranked Probability Score ############################ ## for a single Poisson prediction rps_EV_1P <- function (mu, tolerance = 1e-4) # tolerance is in absolute value { ## expectation if (requireNamespace("gsl", quietly = TRUE)) { ## faster and more accurate implementation (works for larger mu) E <- mu * gsl::bessel_I0_scaled(2*mu, give=FALSE, strict=TRUE) + mu * gsl::bessel_I1_scaled(2*mu, give=FALSE, strict=TRUE) } else { E <- mu * besselI(2*mu, 0, expon.scaled = TRUE) + mu * besselI(2*mu, 1, expon.scaled = TRUE) if (identical(E, 0)) { ## R's besselI() works fine for mu <= 50000 (on my .Machine) ## but returns 0 (buffer overflow) for larger arguments warning("'mu' is too large for besselI(), install package \"gsl\"") return(c(E = NA_real_, V = NA_real_)) } } ## variance kmax <- max(qpois(1 - tolerance/(10*mu^2 + mu), lambda = mu) + 2, 8) # cf. Theorem 2 (a) kseq <- 0:kmax fseq <- dpois(kseq, lambda = mu) Fseq <- cumsum(fseq) # = ppois(kseq, lambda = mu) psiseq <- (kseq - mu) * (2*Fseq - 1) + 2*mu * fseq seqq <- psiseq^2 * fseq V <- sum(seqq) - 4 * E^2 c(E = E, V = V) } ## for a single NegBin prediction rps_EV_1NB <- function (mu, size, tolerance = 1e-4) { ## determine kmax for Var0(RPS), which is always > kmax for E0(RPS), ## cf. Theorem 2 (c), here corrected (1-) and simplified l5 <- (mu + 1)^2 + 1 kmax2 <- max(qnbinom(1-tolerance/l5, mu = mu*(1+2/size), size = size+2) + 2, 8) ## the other listed terms seem to be always smaller than the first one: ## qnbinom(1-tolerance/l5, mu = mu, size = size) ## qnbinom(1-tolerance/l5, mu = mu*(1+1/size), size = size+1) + 1 kseq2 <- 0:kmax2 fseq2 <- dnbinom(kseq2, mu = mu, size = size) Fseq2 <- cumsum(fseq2) # = pnbinom(kseq2, mu = mu, size = size) ## expectation ghgz_part <- mu * (1 + mu/size) ghgz <- 4 * ghgz_part / size E <- if (ghgz < 1 && requireNamespace("gsl", quietly = TRUE)) { ghgz_part * gsl::hyperg_2F1(1+size, 0.5, 2, -ghgz, give = FALSE, strict = TRUE) } else { kmax1 <- max(qnbinom(1-tolerance/mu, mu = mu*(1+1/size), size = size+1) + 1, 8) # cf. Theorem 2 (b) kseq1 <- seq_len(kmax1) seqq1 <- vapply( X = kseq1, # we could use kmax2 (> kmax1) also here FUN = function (i) fseq2[i+1L] * sum((i:1) * fseq2[seq_len(i)]), FUN.VALUE = 0, USE.NAMES = FALSE) sum(seqq1) } ## variance psiseq <- kseq2 * (2 * Fseq2 - 1) + mu * (1 - 2 * pnbinom(kseq2 - 1, mu = mu + mu/size, size = size + 1)) seqq <- psiseq^2 * fseq2 V <- sum(seqq) - 4 * E^2 c(E = E, V = V) } surveillance/R/qlomax.R0000644000176200001440000000145313275247767014622 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Simple implementation of the quantile function of the Lomax distribution ### (we could also use VGAM::qlomax, but this would be slightly slower) ### ### Copyright (C) 2012-2013 Sebastian Meyer ### $Revision: 2124 $ ### $Date: 2018-05-11 10:10:31 +0200 (Fri, 11. May 2018) $ ################################################################################ qlomax <- function (p, scale, shape) { .Deprecated("VGAM::qlomax", package = "surveillance") scale * ((1-p)^(-1/shape) - 1) } surveillance/R/algo_bayes.R0000644000176200001440000000755112600466365015417 0ustar liggesusers################################################### ### chunk number 1: ################################################### # Implementation of the Bayes system. # The system evaluates specified timepoints and gives alarm if it recognizes # an outbreak for this timepoint. # # Features: # Choice between different Bayes sub-systems (difference in reference values). algo.bayesLatestTimepoint <- function(disProgObj, timePoint = NULL, control = list(b = 0, w = 6, actY = TRUE, alpha=0.05)){ observed <- disProgObj$observed freq <- disProgObj$freq # If there is no value in timePoint, then take the last value in observed if(is.null(timePoint)){ timePoint = length(observed) } #If no level specified. # check if the vector observed includes all necessary data. if((timePoint-(control$b*freq)-control$w) < 1){ stop("The vector of observed is too short!") } # construct the reference values basevec <- c() # if actY == TRUE use also the values of the year of timepoint if(control$actY){ basevec <- observed[(timePoint - control$w):(timePoint - 1)] } # check if you need more referencevalues of the past if(control$b >= 1){ for(i in 1:control$b){ basevec <- c(basevec, observed[(timePoint-(i*freq)-control$w):(timePoint-(i*freq)+control$w)]) } } # get the parameter for the negative binomial distribution # Modification on 13 July 2009 after comment by C. W. Ryan on NAs in the # time series sumBasevec <- sum(basevec, na.rm=TRUE) lengthBasevec <- sum(!is.na(basevec)) # compute the upper limit of a one sided (1-alpha)*100% prediction interval. upPI <- qnbinom(1-control$alpha, sumBasevec + 1/2, (lengthBasevec)/(lengthBasevec + 1)) # give alarm if the actual value is larger than the upper limit. alarm <- observed[timePoint] > upPI result <- list(alarm=alarm, upperbound=upPI, disProgObj=disProgObj) class(result) = "survRes" # for surveillance system result return(result) } # 'algo.bayes' calls 'algo.bayesLatestTimepoint' for data points given by range. algo.bayes <- function(disProgObj, control = list(range = range, b = 0, w = 6, actY = TRUE,alpha=0.05)){ # Set the default values if not yet set if(is.null(control$b)){ # value from bayes 1 control$b <- 0 } if(is.null(control$w)){ # value from bayes 1 control$w <- 6 } if(is.null(control$alpha)){ # value from bayes 1 control$alpha <- 0.05 } if(is.null(control$actY)){ # value from bayes 1 control$actY <- TRUE } # initialize the necessary vectors alarm <- matrix(data = 0, nrow = length(control$range), ncol = 1) upperbound <- matrix(data = 0, nrow = length(control$range), ncol = 1) count <- 1 for(i in control$range){ # call algo.bayesLatestTimepoint result <- algo.bayesLatestTimepoint(disProgObj, i, control = control) # store the results in the right order alarm[count] <- result$alarm upperbound[count] <- result$upperbound count <- count + 1 } #Add name and data name to control object. control$name <- paste("bayes(",control$w,",",control$w*control$actY,",",control$b,")",sep="") control$data <- paste(deparse(substitute(disProgObj))) # return alarm and upperbound vectors result <- list(alarm = alarm, upperbound = upperbound, disProgObj=disProgObj,control=control) class(result) = "survRes" # for surveillance system result return(result) } algo.bayes1 <- function(disProgObj, control = list(range = range)){ algo.bayes(disProgObj, control = list(range = control$range, b = 0, w = 6, actY = TRUE,alpha=0.05)) } algo.bayes2 <- function(disProgObj, control = list(range = range)){ algo.bayes(disProgObj, control = list(range = control$range, b = 1, w = 6, actY = TRUE,alpha=0.05)) } algo.bayes3 <- function(disProgObj, control = list(range = range)){ algo.bayes(disProgObj, control = list(range = control$range, b = 2, w = 4, actY = FALSE,alpha=0.05)) } surveillance/R/twinstim_tiaf.R0000644000176200001440000000353412272751567016176 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Temporal interaction functions for twinstim's epidemic component. ### Specific implementations are in seperate files (e.g.: exponential, step). ### ### Copyright (C) 2009-2014 Sebastian Meyer ### $Revision: 733 $ ### $Date: 2014-01-31 17:46:47 +0100 (Fri, 31. Jan 2014) $ ################################################################################ ##################### ### "Constructor" ### ##################### tiaf <- function (g, G, deriv, Deriv, npars, validpars = NULL) { npars <- as.integer(npars) if (length(npars) != 1 || npars < 0L) { stop("'tiaf'/'npars' must be a single nonnegative number") } haspars <- npars > 0L g <- .checknargs3(g, "tiaf$g") G <- .checknargs3(G, "tiaf$G") if (!haspars || missing(deriv)) deriv <- NULL if (!haspars || missing(Deriv)) Deriv <- NULL if (!is.null(deriv)) deriv <- .checknargs3(deriv, "tiaf$deriv") if (!is.null(Deriv)) Deriv <- .checknargs3(Deriv, "tiaf$Deriv") validpars <- if (!haspars || is.null(validpars)) NULL else match.fun(validpars) list(g = g, G = G, deriv = deriv, Deriv = Deriv, npars = npars, validpars = validpars) } ################################# ### Constant temporal interaction ################################# tiaf.constant <- function () { res <- list( g = as.function(alist(t=, pars=, types=, rep.int(1, length(t))), envir = .GlobalEnv), G = as.function(alist(t=, pars=, types=, t), envir = .GlobalEnv), npars = 0L ) attr(res, "constant") <- TRUE res } surveillance/R/permutationTest.R0000644000176200001440000000351612532032517016506 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Permutation test to compare the means of paired samples ### ### Copyright (C) 2011-2012 Michaela Paul, 2013-2015 Sebastian Meyer ### $Revision: 1347 $ ### $Date: 2015-05-29 11:45:51 +0200 (Fri, 29. May 2015) $ ################################################################################ permutationTest <- function(score1, score2, nPermutation = 9999, plot = FALSE, verbose = FALSE) { stopifnot((nTime <- length(score1)) == length(score2), !is.na(score1), !is.na(score2)) meanScore1 <- mean(score1) meanScore2 <- mean(score2) diffObserved <- meanScore1 - meanScore2 diffMean <- replicate(nPermutation, { sel <- rbinom(nTime, size=1, prob=0.5) g1 <- (sum(score1[sel==0]) + sum(score2[sel==1]))/nTime g2 <- (sum(score1[sel==1]) + sum(score2[sel==0]))/nTime g1 - g2 }) if (isTRUE(plot)) plot <- list() if (is.list(plot)) { do.call("permtestplot", args = modifyList( list(permstats = diffMean, xmarks = c("observed" = diffObserved), xlab = "Difference between means", ylab = "Density", main = ""), plot)) } pVal <- (1+sum(abs(diffMean)>=abs(diffObserved))) / (nPermutation+1) pTtest <- t.test(score1, score2, paired=TRUE)$p.value if (verbose) cat("mean difference =", diffObserved, "\tp(permutation) =", pVal, "\tp(paired t-test) =", pTtest, "\n") list(diffObs=diffObserved, pVal.permut=pVal, pVal.t=pTtest) } surveillance/R/algo_rki.R0000644000176200001440000001056511770114750015073 0ustar liggesusers### R code from vignette source 'Rnw/algo_rki.Rnw' ### Encoding: ISO8859-1 ################################################### ### code chunk number 1: algo_rki.Rnw:96-214 ################################################### # Implementation of the Robert-Koch Institute (RKI) surveillance system. # The system evaluates specified timepoints and gives alarm if it recognizes # an outbreak for this timepoint. # # Features: # Choice between the different RKI sub-systems (difference in reference values). algo.rkiLatestTimepoint <- function(disProgObj, timePoint = NULL, control = list(b = 2, w = 4, actY = FALSE)){ observed <- disProgObj$observed freq <- disProgObj$freq # If there is no value in timePoint, then take the last value in observed if(is.null(timePoint)){ timePoint = length(observed) } # check if the vector observed includes all necessary data. if((timePoint-(control$b*freq)-control$w) < 1){ stop("The vector of observed is too short!") } # Extract the reference values from the historic time series basevec <- c() # if actY == TRUE use also the values of the year of timepoint if(control$actY){ basevec <- observed[(timePoint - control$w):(timePoint - 1)] } # check if you need more referencevalues of the past if(control$b >= 1){ for(i in 1:control$b){ basevec <- c(basevec, observed[(timePoint-(i*freq)-control$w):(timePoint-(i*freq)+control$w)]) } } # compute the mean. mu <- mean(basevec) if(mu > 20){ # use the normal distribution. # comupte the standard deviation. sigma <- sqrt(var(basevec)) # compute the upper limit of the 95% CI. upCi <- mu + 2 * sigma } else{ # use the poisson distribution. # take the upper limit of the 95% CI from the table CIdata.txt. #data("CIdata", envir=environment()) # only local assignment -> SM: however, should not use data() here #CIdata <- read.table(system.file("data", "CIdata.txt", package="surveillance"), header=TRUE) #SM: still better: use R/sysdata.rda (internal datasets being lazy-loaded into the namespace environment) # for the table-lookup mu must be rounded down. mu <- floor(mu) # we need the third column in the row mu + 1 upCi <- CIdata[mu + 1, 3] } # give alarm if the actual value is larger than the upper limit. alarm <- observed[timePoint] > upCi result <- list(alarm=alarm, upperbound=upCi) class(result) = "survRes" # for surveillance system result return(result) } # 'algo.rki' calls 'algo.bayesLatestTimepoint' for data points given by range. algo.rki <- function(disProgObj, control = list(range = range, b = 2, w = 4, actY = FALSE)){ # Set the default values if not yet set if(is.null(control$b)){ # value from rki 3 control$b <- 2 } if(is.null(control$w)){ # value from rki 3 control$w <- 4 } if(is.null(control$actY)){ # value from rki 3 control$actY <- FALSE } # initialize the necessary vectors alarm <- matrix(data = 0, nrow = length(control$range), ncol = 1) upperbound <- matrix(data = 0, nrow = length(control$range), ncol = 1) count <- 1 for(i in control$range){ #hoehle Debug: #print(i) # call algo.rki1LatestTimepoint result <- algo.rkiLatestTimepoint(disProgObj, i, control = control) # store the results in the right order alarm[count] <- result$alarm upperbound[count] <- result$upperbound count <- count + 1 } #Add name and data name to control object. control$name <- paste("rki(",control$w,",",control$w*control$actY,",",control$b,")",sep="") control$data <- paste(deparse(substitute(disProgObj))) # return alarm and upperbound vectors result <- list(alarm = alarm, upperbound = upperbound, disProgObj=disProgObj, control=control) class(result) = "survRes" # for surveillance system result return(result) } algo.rki1 <- function(disProgObj, control = list(range = range)) { algo.rki(disProgObj, control = list(range = control$range, b = 0, w = 6, actY = TRUE)) } algo.rki2 <- function(disProgObj, control = list(range = range)){ algo.rki(disProgObj, control = list(range = control$range, b = 1, w = 6, actY = TRUE)) } algo.rki3 <- function(disProgObj, control = list(range = range)){ algo.rki(disProgObj, control = list(range = control$range, b = 2, w = 4, actY = FALSE)) } surveillance/R/twinstim_methods.R0000644000176200001440000010005513514363214016676 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Methods for objects of class "twinstim", specifically: ### vcov, logLik, print, summary, plot, R0, residuals, update, terms, all.equal ### ### Copyright (C) 2009-2019 Sebastian Meyer ### $Revision: 2461 $ ### $Date: 2019-07-19 17:49:32 +0200 (Fri, 19. Jul 2019) $ ################################################################################ ## extract the link function used for the epidemic predictor (default: log-link) .epilink <- function (x) { link <- attr(x$formula$epidemic, "link") if (is.null(link)) "log" else link } ### don't need a specific coef-method (identical to stats:::coef.default) ## coef.twinstim <- function (object, ...) ## { ## object$coefficients ## } ## list coefficients by component coeflist.twinstim <- coeflist.simEpidataCS <- function (x, ...) { coeflist <- coeflist.default(x$coefficients, x$npars) ## rename elements and union "nbeta0" and "p" as "endemic" coeflist <- c(list(c(coeflist[[1L]], coeflist[[2L]])), coeflist[-(1:2)]) names(coeflist) <- c("endemic", "epidemic", "siaf", "tiaf") coeflist } ## asymptotic variance-covariance matrix (inverse of expected fisher information) vcov.twinstim <- function (object, ...) { if (!is.null(object[["fisherinfo"]])) { solve(object$fisherinfo) } else if (!is.null(object[["fisherinfo.observed"]])) { solve(object$fisherinfo.observed) } else { stop("Fisher information not available; use, e.g., -optimHess()") } } ## Extract log-likelihood of the model (which also enables the use of AIC()) logLik.twinstim <- function (object, ...) { r <- object$loglik attr(r, "df") <- length(coef(object)) attr(r, "nobs") <- nobs(object) class(r) <- "logLik" r } ## Also define an extractAIC-method to make step() work extractAIC.twinstim <- function (fit, scale, k = 2, ...) { loglik <- logLik(fit) edf <- attr(loglik, "df") penalty <- k * edf c(edf = edf, AIC = -2 * c(loglik) + penalty) } ## Number of events (excluding the prehistory) nobs.twinstim <- function (object, ...) length(object$fitted) ## print-method print.twinstim <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n") print.default(x$call) cat("\nCoefficients:\n") print.default(format(coef(x), digits=digits), print.gap = 2, quote = FALSE) cat("\nLog-likelihood: ", format(logLik(x), digits=digits), "\n", sep = "") if (!isTRUE(x$converged)) { cat("\nWARNING: OPTIMIZATION ROUTINE DID NOT CONVERGE!", paste0("(",x$converged,")"), "\n") } cat("\n") invisible(x) } summary.twinstim <- function (object, test.iaf = FALSE, correlation = FALSE, symbolic.cor = FALSE, runtime = FALSE, ...) { ans <- unclass(object)[c("call", "converged", if (runtime) "counts")] npars <- object$npars nbeta0 <- npars[1]; p <- npars[2]; nbeta <- nbeta0 + p q <- npars[3] nNotIaf <- nbeta + q niafpars <- npars[4] + npars[5] est <- coef(object) ans$cov <- tryCatch(vcov(object), error = function (e) { warning(e) matrix(NA_real_, length(est), length(est)) }) se <- sqrt(diag(ans$cov)) zval <- est/se pval <- 2 * pnorm(abs(zval), lower.tail = FALSE) coefficients <- cbind(est, se, zval, pval) dimnames(coefficients) <- list(names(est), c("Estimate", "Std. Error", "z value", "Pr(>|z|)")) ans$coefficients.beta <- coefficients[seq_len(nbeta),,drop=FALSE] ans$coefficients.gamma <- structure( coefficients[nbeta+seq_len(q),,drop=FALSE], link = .epilink(object) ) ans$coefficients.iaf <- coefficients[nNotIaf+seq_len(niafpars),,drop=FALSE] if (!test.iaf) { ## usually, siaf and tiaf parameters are strictly positive, ## or parametrized on the logscale. In this case the usual wald test ## with H0: para=0 is invalid or meaningless. is.na(ans$coefficients.iaf[,3:4]) <- TRUE } # estimated parameter correlation if (correlation) { ans$correlation <- cov2cor(ans$cov) ans$symbolic.cor <- symbolic.cor } ans$loglik <- logLik(object) ans$aic <- AIC(object) if (runtime) { ans$runtime <- object$runtime } class(ans) <- "summary.twinstim" ans } ## additional methods to make confint.default work for summary.twinstim vcov.summary.twinstim <- function (object, ...) object$cov coef.summary.twinstim <- function (object, ...) with(object, { coeftab <- rbind(coefficients.beta, coefficients.gamma, coefficients.iaf) structure(coeftab[,1], names=rownames(coeftab)) }) ## print-method for summary.twinstim print.summary.twinstim <- function (x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), ...) { nbeta <- nrow(x$coefficients.beta) # = nbeta0 + p q <- nrow(x$coefficients.gamma) niafpars <- nrow(x$coefficients.iaf) cat("\nCall:\n") print.default(x$call) if (nbeta > 0L) { cat("\nCoefficients of the endemic component:\n") printCoefmat(x$coefficients.beta, digits = digits, signif.stars = signif.stars, signif.legend = (q==0L) && signif.stars, ...) } else cat("\nNo coefficients in the endemic component.\n") if (q + niafpars > 0L) { cat("\nCoefficients of the epidemic component", if (attr(x$coefficients.gamma, "link") != "log") paste0(" (LINK FUNCTION: ", attr(x$coefficients.gamma, "link"), ")"), ":\n", sep = "") printCoefmat(rbind(x$coefficients.gamma, x$coefficients.iaf), digits = digits, signif.stars = signif.stars, ...) } else cat("\nNo epidemic component.\n") cat("\nAIC: ", format(x$aic, digits=max(4, digits+1))) cat("\nLog-likelihood:", format(x$loglik, digits = digits)) runtime <- x$runtime if (!is.null(runtime)) { cat("\nNumber of log-likelihood evaluations:", x$counts[1L]) cat("\nNumber of score function evaluations:", x$counts[2L]) cores <- attr(runtime, "cores") elapsed <- if (length(runtime) == 1L) { # surveillance < 1.6-0 runtime } else { runtime[["elapsed"]] } cat("\nRuntime", if (!is.null(cores) && cores > 1) paste0(" (", cores, " cores)"), ": ", format(elapsed, digits = max(4, digits+1)), " seconds", sep = "") } cat("\n") correl <- x$correlation if (!is.null(correl)) { p <- NCOL(correl) if (p > 1L) { cat("\nCorrelation of Coefficients:\n") if (is.logical(symbolic.cor) && symbolic.cor) { correl <- symnum(correl, abbr.colnames = NULL) correlcodes <- attr(correl, "legend") attr(correl, "legend") <- NULL print(correl) cat("---\nCorr. codes: ", correlcodes, "\n", sep="") } else { correl <- format(round(correl, 2), nsmall = 2) correl[!lower.tri(correl)] <- "" colnames(correl) <- substr(colnames(correl), 1, 5) print(correl[-1, -p, drop = FALSE], quote = FALSE) } } } if (!isTRUE(x$converged)) { cat("\nWARNING: OPTIMIZATION ROUTINE DID NOT CONVERGE!", paste0("(",x$converged,")"), "\n") } cat("\n") invisible(x) } ### 'cat's the summary in LaTeX code toLatex.summary.twinstim <- function ( object, digits = max(3, getOption("digits") - 3), eps.Pvalue = 1e-4, align = "lrrrr", booktabs = getOption("xtable.booktabs", FALSE), withAIC = FALSE, ...) { ret <- capture.output({ cat("\\begin{tabular}{", align, "}\n", if (booktabs) "\\toprule" else "\\hline", "\n", sep="") cat(" & Estimate & Std. Error & $z$ value & $P(|Z|>|z|)$ \\\\\n", if (!booktabs) "\\hline\n", sep="") tabh <- object$coefficients.beta tabe <- rbind(object$coefficients.gamma, object$coefficients.iaf) for (tabname in c("tabh", "tabe")) { tab <- get(tabname) if (nrow(tab) > 0L) { rownames(tab) <- gsub(" ", "", rownames(tab)) tab_char <- capture.output( printCoefmat(tab, digits=digits, signif.stars=FALSE, eps.Pvalue = eps.Pvalue, na.print="NA") )[-1] ## remove extra space (since used as column sep in read.table) tab_char <- sub("< ", "<", tab_char, fixed=TRUE) # small p-values ## replace scientific notation by corresponding LaTeX code tab_char <- sub("( xtable.summary.twinstim must be exported } formals(xtable.twinstim) <- formals(xtable.summary.twinstim) ### Plot method for twinstim (wrapper for iafplot and intensityplot) plot.twinstim <- function (x, which, ...) { cl <- match.call() which <- match.arg(which, choices = c(eval(formals(intensityplot.twinstim)$which), eval(formals(iafplot)$which))) FUN <- if (which %in% eval(formals(intensityplot.twinstim)$which)) "intensityplot" else "iafplot" cl[[1]] <- as.name(FUN) if (FUN == "iafplot") names(cl)[names(cl) == "x"] <- "object" eval(cl, envir = parent.frame()) } ### Calculates the basic reproduction number R0 for individuals ### with marks given in 'newevents' R0.twinstim <- function (object, newevents, trimmed = TRUE, newcoef = NULL, ...) { ## check for epidemic component npars <- object$npars if (npars["q"] == 0L) { message("no epidemic component in model, returning 0-vector") if (missing(newevents)) return(object$R0) else { return(structure(rep.int(0, nrow(newevents)), names = rownames(newevents))) } } ## update object for use of new parameters if (!is.null(newcoef)) { object <- update.twinstim(object, optim.args = list(par=newcoef, fixed=TRUE), cumCIF = FALSE, cores = 1L, verbose = FALSE) } ## extract model information t0 <- object$timeRange[1L] T <- object$timeRange[2L] typeNames <- rownames(object$qmatrix) nTypes <- length(typeNames) types <- seq_len(nTypes) form <- formula(object) siaf <- form$siaf tiaf <- form$tiaf coefs <- coef(object) tiafpars <- coefs[sum(npars[1:4]) + seq_len(npars["ntiafpars"])] siafpars <- coefs[sum(npars[1:3]) + seq_len(npars["nsiafpars"])] if (missing(newevents)) { ## if no newevents are supplied, use original events if (trimmed) { # already calculated by 'twinstim' return(object$R0) } else { # untrimmed version (spatio-temporal integral over R+ x R^2) ## extract relevant data from model environment if (is.null(modelenv <- environment(object))) { stop("need model environment for untrimmed R0 of fitted events\n", " -- re-fit or update() with 'model=TRUE'") } eventTypes <- modelenv$eventTypes eps.t <- modelenv$eps.t eps.s <- modelenv$eps.s gammapred <- modelenv$gammapred names(gammapred) <- names(object$R0) # for names of the result } } else { # use newevents stopifnot(is.data.frame(newevents)) eps.t <- newevents[["eps.t"]] eps.s <- newevents[["eps.s"]] if (is.null(eps.s) || is.null(eps.t)) { stop("missing \"eps.s\" or \"eps.t\" columns in 'newevents'") } if (is.null(newevents[["type"]])) { if (nTypes == 1) { newevents$type <- factor(rep.int(typeNames, nrow(newevents)), levels = typeNames) } else { stop("missing event \"type\" column in 'newevents'") } } else { newevents$type <- factor(newevents$type, levels = typeNames) if (anyNA(newevents$type)) { stop("unknown event type in 'newevents'; must be one of: ", paste0("\"", typeNames, "\"", collapse = ", ")) } } ## subset newevents to timeRange if (trimmed) { eventTimes <- newevents[["time"]] if (is.null(eventTimes)) { stop("missing event \"time\" column in 'newevents'") } .N <- nrow(newevents) newevents <- subset(newevents, time + eps.t > t0 & time <= T) if (nrow(newevents) < .N) { message("subsetted 'newevents' to only include events infectious ", "during 'object$timeRange'") } } ## calculate gammapred for newevents epidemic <- terms(form$epidemic, data = newevents, keep.order = TRUE) mfe <- model.frame(epidemic, data = newevents, na.action = na.pass, drop.unused.levels = FALSE, xlev = object$xlevels$epidemic) # sync factor levels mme <- model.matrix(epidemic, mfe) gamma <- coefs[sum(npars[1:2]) + seq_len(npars["q"])] if (ncol(mme) != length(gamma)) { stop("epidemic model matrix has the wrong number of columns ", "(check the variable types in 'newevents' (factors, etc))") } gammapred <- drop(mme %*% gamma) # identity link if (.epilink(object) == "log") gammapred <- exp(gammapred) names(gammapred) <- rownames(newevents) ## now, convert types of newevents to integer codes eventTypes <- as.integer(newevents$type) } ## qSum qSumTypes <- rowSums(object$qmatrix) qSum <- unname(qSumTypes[eventTypes]) ## calculate remaining factors of the R0 formula, i.e. siafInt and tiafInt if (trimmed) { # trimmed R0 for newevents ## integral of g over the observed infectious periods .tiafInt <- .tiafIntFUN() gIntUpper <- pmin(T - eventTimes, eps.t) gIntLower <- pmax(0, t0 - eventTimes) tiafInt <- .tiafInt(tiafpars, from=gIntLower, to=gIntUpper, type=eventTypes, G=tiaf$G) ## integral of f over the influenceRegion bdist <- newevents[[".bdist"]] influenceRegion <- newevents[[".influenceRegion"]] if (is.null(influenceRegion)) { stop("missing \".influenceRegion\" component in 'newevents'") } noCircularIR <- if (is.null(bdist)) FALSE else all(eps.s > bdist) if (attr(siaf, "constant")) { iRareas <- sapply(influenceRegion, area.owin) ## will be used by .siafInt() } else if (! (is.null(siaf$Fcircle) || (is.null(siaf$effRange) && noCircularIR))) { if (is.null(bdist)) { stop("missing \".bdist\" component in 'newevents'") } } .siafInt <- .siafIntFUN(siaf, noCircularIR=noCircularIR) .siafInt.args <- c(alist(siafpars), object$control.siaf$F) siafInt <- do.call(".siafInt", .siafInt.args) } else { # untrimmed R0 for original events or newevents ## integrals of interaction functions for all combinations of type and ## eps.s/eps.t in newevents typeTcombis <- expand.grid(type=types, eps.t=unique(eps.t), KEEP.OUT.ATTRS=FALSE) typeTcombis$gInt <- with(typeTcombis, tiaf$G(eps.t, tiafpars, type)) - tiaf$G(rep.int(0,nTypes), tiafpars, types)[typeTcombis$type] Fcircle <- getFcircle(siaf, object$control.siaf$F) typeScombis <- expand.grid(type=types, eps.s=unique(eps.s), KEEP.OUT.ATTRS=FALSE) typeScombis$fInt <- apply(typeScombis, MARGIN=1, FUN=function (type_eps.s) { type <- type_eps.s[1L] eps.s <- type_eps.s[2L] Fcircle(eps.s, siafpars, type) }) ## match combinations to rows of original events or 'newevents' eventscombiidxS <- match(paste(eventTypes,eps.s,sep="."), with(typeScombis,paste(type,eps.s,sep="."))) eventscombiidxT <- match(paste(eventTypes,eps.t,sep="."), with(typeTcombis,paste(type,eps.t,sep="."))) siafInt <- typeScombis$fInt[eventscombiidxS] tiafInt <- typeTcombis$gInt[eventscombiidxT] if (any(is.infinite(eps.t) & !is.finite(tiafInt), is.infinite(eps.s) & !is.finite(siafInt))) { message("infinite interaction ranges yield non-finite R0 values ", "because 'trimmed = FALSE'") } } ## return R0 values R0s <- qSum * gammapred * siafInt * tiafInt R0s } ## calculate simple R0 (over circular domain, without epidemic covariates, ## for type-invariant siaf/tiaf) simpleR0 <- function (object, eta = coef(object)[["e.(Intercept)"]], eps.s = NULL, eps.t = NULL, newcoef = NULL) { stopifnot(inherits(object, c("twinstim", "simEpidataCS"))) if (object$npars[["q"]] == 0L) return(0) if (any(rowSums(object$qmatrix) != 1)) warning("'simpleR0' is not correct for type-specific epidemic models") if (!is.null(newcoef)) { # use alternative coefficients object$coefficients <- newcoef } coeflist <- coeflist(object) siaf <- object$formula$siaf tiaf <- object$formula$tiaf ## default radii of interaction if (is.null(eps.s)) { eps.s <- attr(siaf, "eps") if (length(eps.s) > 1L) stop("found non-unique 'eps.s'; please set one") } else stopifnot(isScalar(eps.s)) if (is.null(eps.t)) { eps.t <- attr(tiaf, "eps") if (length(eps.t) > 1L) stop("found non-unique 'eps.t'; please set one") } else stopifnot(isScalar(eps.t)) ## integral of siaf over a disc of radius eps.s Fcircle <- getFcircle(siaf, object$control.siaf$F) siafInt <- unname(Fcircle(eps.s, coeflist$siaf)) ## integral of tiaf over a period of length eps.t tiafInt <- unname(tiaf$G(eps.t, coeflist$tiaf) - tiaf$G(0, coeflist$tiaf)) ## calculate basic R0 (if (.epilink(object) == "log") exp(eta) else eta) * siafInt * tiafInt } ### Extract the "residual process" (cf. Ogata, 1988) of a twinstim, i.e. the ### fitted cumulative intensity of the ground process at the event times. ### "generalized residuals similar to those discussed in Cox and Snell (1968)" residuals.twinstim <- function (object, ...) { res <- object$tau if (is.null(res)) { if (is.null(modelenv <- environment(object))) { stop("residuals not available; re-fit the model with 'cumCIF = TRUE'") } else { message("'", substitute(object), "' was fit with disabled 'cumCIF'", " -> calculate it now ...") res <- with(modelenv, LambdagEvents(cumCIF.pb = interactive())) try({ objname <- deparse(substitute(object)) object$tau <- res assign(objname, object, envir = parent.frame()) message("Note: added the 'tau' component to object '", objname, "' for future use.") }, silent = TRUE) } } return(res) } ###################################################################### # Function to compute estimated and profile likelihood based # confidence intervals. Heavy computations might be necessary! # #Params: # fitted - output from a fit with twinstim # profile - list with 4D vector as entries - format: # c(index, lower, upper, grid size) # where index is the index in the coef vector # lower and upper are the parameter limits (can be NA) # grid size is the grid size of the equally spaced grid # between lower and upper (can be 0) # alpha - (1-alpha)% profile likelihood CIs are computed. # If alpha <= 0 then no CIs are computed # control - control object to use for optim in the profile loglik computations # # Returns: # list with profile loglikelihood evaluations on the grid # and highest likelihood and wald confidence intervals ###################################################################### profile.twinstim <- function (fitted, profile, alpha = 0.05, control = list(fnscale = -1, maxit = 100, trace = 1), do.ltildeprofile=FALSE, ...) { warning("the profile likelihood implementation is experimental") ## the implementation below is not well tested, simply uses optim (ignoring ## optimizer settings from the original fit), and does not store the complete ## set of coefficients ## Check that input is ok profile <- as.list(profile) if (length(profile) == 0L) { stop("nothing to do") } lapply(profile, function(one) { if (length(one) != 4L) { stop("each profile entry has to be of form ", "'c(index, lower, upper, grid size)'") }}) if (is.null(fitted[["functions"]])) { stop("'fitted' must contain the component 'functions' -- fit using the option model=TRUE") } ## Control of the optim procedure if (is.null(control[["fnscale",exact=TRUE]])) { control$fnscale <- -1 } if (is.null(control[["maxit",exact=TRUE]])) { control$maxit <- 100 } if (is.null(control[["trace",exact=TRUE]])) { control$trace <- 1 } ## Estimated normalized likelihood function ltildeestim <- function(thetai,i) { theta <- theta.ml theta[i] <- thetai fitted$functions$ll(theta) - loglik.theta.ml } ## Profile normalized likelihood function ltildeprofile <- function(thetai,i) { #cat("Investigating theta[",i,"] = ",thetai,"\n") emptyTheta <- rep(0, length(theta.ml)) # Likelihood l(theta_{-i}) = l(theta_i, theta_i) ltildethetaminusi <- function(thetaminusi) { theta <- emptyTheta theta[-i] <- thetaminusi theta[i] <- thetai #cat("Investigating theta = ",theta,"\n") res <- fitted$functions$ll(theta) - loglik.theta.ml #cat("Current ltildethetaminusi value: ",res,"\n") return(res) } # Score function of all params except thetaminusi stildethetaminusi <- function(thetaminusi) { theta <- emptyTheta theta[-i] <- thetaminusi theta[i] <- thetai res <- fitted$functions$sc(theta)[-i] #cat("Current stildethetaminusi value: ",res,"\n") return(res) } # Call optim -- currently not adapted to arguments of control arguments # used in the fit resOthers <- tryCatch( optim(par=theta.ml[-i], fn = ltildethetaminusi, gr = stildethetaminusi, method = "BFGS", control = control), error = function(e) list(value=NA)) resOthers$value } ## Initialize theta.ml <- coef(fitted) loglik.theta.ml <- c(logLik(fitted)) se <- sqrt(diag(vcov(fitted))) resProfile <- list() ## Perform profile computations for all requested parameters cat("Evaluating the profile logliks on a grid...\n") for (i in 1:length(profile)) { cat("i= ",i,"/",length(profile),"\n") #Index of the parameter in the theta vector idx <- profile[[i]][1] #If no borders are given use those from wald intervals (unconstrained) if (is.na(profile[[i]][2])) profile[[i]][2] <- theta.ml[idx] - 3*se[idx] if (is.na(profile[[i]][3])) profile[[i]][3] <- theta.ml[idx] + 3*se[idx] #Evaluate profile loglik on a grid (if requested) if (profile[[i]][4] > 0) { thetai.grid <- seq(profile[[i]][2],profile[[i]][3],length=profile[[i]][4]) resProfile[[i]] <- matrix(NA, nrow = length(thetai.grid), ncol = 4L, dimnames = list(NULL, c("grid","profile","estimated","wald"))) #Loop over all gridpoints for (j in 1:length(thetai.grid)) { cat("\tj= ",j,"/",length(thetai.grid),"\n") resProfile[[i]][j,] <- c(thetai.grid[j], #Do we need to compute ltildeprofile (can be quite time consuming) if (do.ltildeprofile) ltildeprofile(thetai.grid[j],idx) else NA_real_, ltildeestim(thetai.grid[j],idx), - 1/2*(1/se[idx]^2)*(thetai.grid[j] - theta.ml[idx])^2) } } } names(resProfile) <- names(theta.ml)[sapply(profile, function(x) x[1L])] ############################### ## Profile likelihood intervals ############################### # Not done, yet ciProfile <- NULL ####Done, return return(list(lp=resProfile, ci.hl=ciProfile, profileObj=profile)) } ### update-method for the twinstim-class ## stats::update.default would also work but is not adapted to the specific ## structure of twinstim: optim.args (use modifyList), two formulae, model, ... ## However, this specific method is inspired by and copies small parts of the ## update.default method from the stats package developed by The R Core Team update.twinstim <- function (object, endemic, epidemic, control.siaf, optim.args, model, ..., use.estimates = TRUE, evaluate = TRUE) { call <- object$call thiscall <- match.call(expand.dots=FALSE) extras <- thiscall$... if (!missing(model)) { call$model <- model ## Special case: update model component ONLY if (evaluate && all(names(thiscall)[-1] %in% c("object", "model", "evaluate"))) { return(.update.twinstim.model(object, model)) } } ## Why we no longer use call$endemic but update object$formula$endemic: ## call$endemic would be an unevaluated expression eventually receiving the ## parent.frame() as environment, cp.: ##(function(e) {ecall <- match.call()$e; eval(call("environment", ecall))})(~1+start) ## This could cause large files if the fitted model is saved. ## Furthermore, call$endemic could refer to some object containing ## the formula, which is no longer visible. call$endemic <- if (missing(endemic)) object$formula$endemic else update.formula(object$formula$endemic, endemic) call$epidemic <- if (missing(epidemic)) object$formula$epidemic else update.formula(object$formula$epidemic, epidemic) ## Note: update.formula uses terms.formula(...,simplify=TRUE), but ## the principle order of terms is retained. Offsets will be moved to ## the end and a missing intercept will be denoted by a final -1. if (!missing(control.siaf)) { if (is.null(control.siaf)) { call$control.siaf <- NULL # remove from call, i.e., use defaults } else { call$control.siaf <- object$control.siaf # =NULL if constantsiaf call$control.siaf[names(control.siaf)] <- control.siaf } } call["optim.args"] <- if (missing(optim.args)) object["optim.args"] else { list( # use list() to enable optim.args=NULL if (is.list(optim.args)) { modifyList(object$optim.args, optim.args) } else optim.args # = NULL ) } ## Set initial values (will be appropriately subsetted and/or extended with ## zeroes inside twinstim()) call$start <- if (missing(optim.args) || (!is.null(optim.args) && !"par" %in% names(optim.args))) { ## old optim.args$par probably doesn't match updated model, ## thus we set it as "start"-argument call$optim.args$par <- NULL if (use.estimates) coef(object) else object$optim.args$par } else NULL if ("start" %in% names(extras)) { newstart <- check_twinstim_start(eval.parent(extras$start)) call$start[names(newstart)] <- newstart extras$start <- NULL } ## CAVE: the remainder is copied from stats::update.default (as at R-2.15.0) if(length(extras)) { existing <- !is.na(match(names(extras), names(call))) ## do these individually to allow NULL to remove entries. for (a in names(extras)[existing]) call[[a]] <- extras[[a]] if(any(!existing)) { call <- c(as.list(call), extras[!existing]) call <- as.call(call) } } if(evaluate) eval(call, parent.frame()) else call } .update.twinstim.model <- function (object, model) { call <- object$call call$model <- model if (model) { # add model environment call$start <- coef(object) call$optim.args$fixed <- TRUE call$cumCIF <- FALSE call$verbose <- FALSE ## evaluate in the environment calling update.twinstim() message("Setting up the model environment ...") objectWithModel <- eval(call, parent.frame(2L)) ## add the model "functions" and environment object$functions <- objectWithModel$functions environment(object) <- environment(objectWithModel) } else { # remove model environment object["functions"] <- list(NULL) environment(object) <- NULL } object$call$model <- model object } ## a terms-method is required for stepComponent() terms.twinstim <- function (x, component=c("endemic", "epidemic"), ...) { component <- match.arg(component) terms.formula(x$formula[[component]], keep.order=TRUE) } ## compare two twinstim fits ignoring at least the "runtime" and the "call" ## just like all.equal.hhh4() all.equal.twinstim <- function (target, current, ..., ignore = NULL) { if (!inherits(target, "twinstim")) return("'target' is not a \"twinstim\" object") if (!inherits(current, "twinstim")) return("'current' is not a \"twinstim\" object") ignore <- unique.default(c(ignore, "runtime", "call")) target[ignore] <- current[ignore] <- list(NULL) NextMethod("all.equal") } surveillance/R/stsplot_time.R0000644000176200001440000004006614024124757016034 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Time series plot for sts-objects ### ### Copyright (C) 2007-2014 Michael Hoehle, 2013-2016,2021 Sebastian Meyer ### $Revision: 2663 $ ### $Date: 2021-03-16 13:51:59 +0100 (Tue, 16. Mar 2021) $ ################################################################################ ###################################################################### # stsplot_time() sets the scene and calls either stsplot_time_as1() # or stsplot_time1() for each unit ###################################################################### stsplot_time <- function(x, units = NULL, as.one = FALSE, same.scale = TRUE, par.list = list(), ...) { observed <- x@observed if (is.null(units)) # plot all units units <- seq_len(ncol(observed)) nUnits <- length(units) #graphical parameters if (is.list(par.list)) { if (nUnits > 1 && !as.one) { par.list <- modifyList( #default: reduced margins and mfrow panels list(mar = c(5,4,1,1), mfrow = magic.dim(nUnits)), par.list) } else { par.list$mfrow <- NULL #no mf formatting.. } if (length(par.list) > 0) { oldpar <- par(par.list) on.exit(par(oldpar)) } } if (nUnits == 1L) { # a single time-series plot stsplot_time1(x = x, k = units, ...) } else { # multiple time series if (as.one) { # all time series in one plot stsplot_time_as1(x, units = units, ...) } else { # each time series in a separate plot args <- list(...) if(same.scale) { # compute suitable ylim if not specified if (is.null(args[["ylim"]])) { ymax <- if (x@multinomialTS) { max(0, pmax(observed,x@upperbound,na.rm=TRUE)/x@populationFrac, na.rm=TRUE) } else { max(observed,x@upperbound,na.rm=TRUE) } args$ylim <- c(-1/20*ymax, ymax) } } else { args$ylim <- NULL } #plot areas for (k in units) { argsK <- modifyList(args, list(x=x, k=k, main="", legend.opts=NULL), keep.null = TRUE) do.call("stsplot_time1",args=argsK) title(main=if (is.character(k)) k else colnames(observed)[k], line=-1) } } } invisible() } ## a simple matplot of observed counts from all/selected units, with a legend stsplot_time_as1 <- function (x, units = NULL, type = "l", lty = 1:5, lwd = 1, col = 1:6, epochsAsDate = x@epochAsDate, xaxis.tickFreq = list("%Q"=atChange), xaxis.labelFreq = xaxis.tickFreq, xaxis.labelFormat = "%G\n\n%OQ", xlab = "time", ylab = "No. infected", legend.opts = list(), ...) { observed <- x@observed if (x@multinomialTS) { observed <- ifelse(x@populationFrac != 0, observed/x@populationFrac, 0) } if (!is.null(units)) observed <- observed[, units, drop = FALSE] ## basic plot opar <- par(bty = "n", xaxt = "n") # a formatted time axis is added below matplot(observed, type = type, lty = lty, lwd = lwd, col = col, xlab = xlab, ylab = ylab, ...) par(opar) ## add time axis xaxis.line <- !epochsAsDate || grepl("\n", xaxis.labelFormat) addFormattedXAxis(x = x, epochsAsDate = epochsAsDate, xaxis.tickFreq = xaxis.tickFreq, xaxis.labelFreq = xaxis.labelFreq, xaxis.labelFormat = xaxis.labelFormat) # line = 1 ## add legend if (is.list(legend.opts)) { legend.opts <- modifyList( list(x = "top", legend = colnames(observed), lty = lty, lwd = lwd, col = col, ncol = magic.dim(ncol(observed))[2L], bty = "n"), legend.opts) do.call("legend", legend.opts) } invisible() } ### work-horse which produces a single time series plot with formatted x-axis stsplot_time1 <- function( x, k=1, ylim=NULL, axes=TRUE, xaxis.tickFreq=list("%Q"=atChange), xaxis.labelFreq=xaxis.tickFreq, xaxis.labelFormat="%G\n\n%OQ", epochsAsDate=x@epochAsDate, xlab="time", ylab="No. infected", main=NULL, type="s", lty=c(1,1,2), col=c(NA,1,4), lwd=c(1,1,1), outbreak.symbol=list(pch=3, col=3, cex=1, lwd=1), alarm.symbol=list(pch=24, col=2, cex=1, lwd=1), legend.opts=list(), dx.upperbound=0L, hookFunc=function(){}, .hookFuncInheritance=function() {}, ...) { stopifnot(length(k) == 1, is.character(k) || k != 0) #Extract slots -- depending on the algorithms: x@control$range observed <- x@observed[,k] state <- x@state[,k] alarm <- x@alarm[,k] upperbound <- x@upperbound[,k] population <- x@populationFrac[,k] binaryTS <- x@multinomialTS #Control what axis style is used xaxis.dates <- !is.null(xaxis.labelFormat) if (binaryTS) { observed <- ifelse(population!=0,observed/population,0) upperbound <- ifelse(population!=0,upperbound/population,0) if (ylab == "No. infected") { ylab <- "Proportion infected" } } ##### Handle the NULL arguments ###################################### if (is.null(main) && length(x@control) > 0) { #a surveillance algorithm has been run action <- switch(class(x), "sts" = "surveillance", "stsNC" = "nowcasting","stsBP" = "backprojection") method <- x@control$name main <- paste0(action, " using ", method) } # control where the highest value is max <- max(c(observed,upperbound),na.rm=TRUE) #if ylim is not specified, give it a default value if(is.null(ylim) ){ ylim <- c(-1/20*max, max) } # left/right help for constructing the columns dx.observed <- 0.5 upperboundx <- (1:length(upperbound)) - (dx.observed - dx.upperbound) #Generate the matrices to plot (values,last value) xstuff <- cbind(c(upperboundx,length(observed) + min(1-(dx.observed - dx.upperbound),0.5))) ystuff <-cbind(c(upperbound,upperbound[length(observed) ])) #Plot the results matplot(x=xstuff,y=ystuff,xlab=xlab,ylab=ylab,main=main,ylim=ylim,axes = !(xaxis.dates),type=type,lty=lty[-c(1:2)],col=col[-c(1:2)],lwd=lwd[-c(1:2)],...) #This draws the polygons containing the number of counts (sep. by NA) i <- rep(1:length(observed),each=5) dx <- rep(dx.observed * c(-1,-1,1,1,NA), times=length(observed)) x.points <- i + dx y.points <- as.vector(t(cbind(0, observed, observed, 0, NA))) polygon(x.points,y.points,col=col[1],border=col[2],lwd=lwd[1]) #Draw upper bound once more in case the polygons are filled if (!is.na(col[1])) { lines(x=xstuff,y=ystuff,type=type,lty=lty[-c(1:2)],col=col[-c(1:2)],lwd=lwd[-c(1:2)],...) } #Draw alarm symbols alarmIdx <- which(!is.na(alarm) & (alarm == 1)) if (length(alarmIdx)>0) { matpoints( alarmIdx, rep(-1/40*ylim[2],length(alarmIdx)), pch=alarm.symbol$pch, col=alarm.symbol$col, cex= alarm.symbol$cex, lwd=alarm.symbol$lwd) } #Draw outbreak symbols stateIdx <- which(state == 1) if (length(stateIdx)>0) { matpoints( stateIdx, rep(-1/20*ylim[2],length(stateIdx)), pch=outbreak.symbol$pch, col=outbreak.symbol$col,cex = outbreak.symbol$cex,lwd=outbreak.symbol$lwd) } #Label x-axis if(xaxis.dates & axes) { addFormattedXAxis(x = x, epochsAsDate = epochsAsDate, xaxis.tickFreq = xaxis.tickFreq, xaxis.labelFreq = xaxis.labelFreq, xaxis.labelFormat = xaxis.labelFormat, ...) } #Label y-axis if (axes) { axis( side=2 ,...)#cex=cex, cex.axis=cex.axis) } doLegend <- if (missing(legend.opts)) { length(stateIdx) + length(alarmIdx) > 0 || any(upperbound > 0, na.rm = TRUE) } else { is.list(legend.opts) } if(doLegend) { legend.opts <- modifyList( list(x = "top", lty = c(lty[1],lty[3],NA,NA), col = c(col[2],col[3],outbreak.symbol$col,alarm.symbol$col), pch = c(NA,NA,outbreak.symbol$pch,alarm.symbol$pch), legend = c("Infected", "Threshold", "Outbreak", "Alarm")), legend.opts) #Make the legend do.call("legend",legend.opts) } #Call hook function for user customized action using the current environment environment(hookFunc) <- environment() hookFunc() #Extra hook functions for inheritance plotting (see e.g. plot function of stsNC objects) environment(.hookFuncInheritance) <- environment() .hookFuncInheritance() invisible() } ############## ### alarm plot ############## stsplot_alarm <- function( x, lvl=rep(1,nrow(x)), ylim=NULL, xaxis.tickFreq=list("%Q"=atChange), xaxis.labelFreq=xaxis.tickFreq, xaxis.labelFormat="%G\n\n%OQ", epochsAsDate=x@epochAsDate, xlab="time", main=NULL, type="hhs", lty=c(1,1,2), col=c(1,1,4), outbreak.symbol=list(pch=3, col=3, cex=1, lwd=1), alarm.symbol=list(pch=24, col=2, cex=1, lwd=1), cex=1, cex.yaxis=1, ...) { k <- 1 #Extract slots -- depending on the algorithms: x@control$range observed <- x@observed[,k] state <- x@state[,k] alarm <- x@alarm[,k] upperbound <- x@upperbound[,k] ylim <- c(0.5, ncol(x)) ##### Handle the NULL arguments ###################################### if (is.null(main) && length(x@control) > 0) { #a surveillance algorithm has been run action <- switch(class(x), "sts" = "surveillance", "stsNC" = "nowcasting","stsBP" = "backprojection") method <- x@control$name main <- paste0(action, " using ", method) } #Control what axis style is used xaxis.dates <- !is.null(xaxis.labelFormat) # left/right help for constructing the columns dx.observed <- 0.5 observedxl <- (1:length(observed))-dx.observed observedxr <- (1:length(observed))+dx.observed upperboundx <- (1:length(upperbound)) #-0.5 # control where the highest value is max <- max(c(observed,upperbound),na.rm=TRUE) #if ylim is not specified if(is.null(ylim)){ ylim <- c(-1/20*max, max) } #Generate the matrices to plot xstuff <- cbind(observedxl, observedxr, upperboundx) ystuff <-cbind(observed, observed, upperbound) #Plot the results using one Large plot call (we do this by modifying #the call). Move this into a special function! matplot(x=xstuff,y=ystuff,xlab=xlab,ylab="",main=main,ylim=ylim,axes = FALSE,type="n",lty=lty,col=col,...) #Label of x-axis if(xaxis.dates){ addFormattedXAxis(x = x, epochsAsDate = epochsAsDate, xaxis.tickFreq = xaxis.tickFreq, xaxis.labelFreq = xaxis.labelFreq, xaxis.labelFormat = xaxis.labelFormat, ...) } axis( side=2, at=1:ncol(x),cex.axis=cex.yaxis, labels=colnames(x),las=2) #Draw all alarms for (i in 1:nrow(x)) { idx <- (1:ncol(x))[x@alarm[i,] > 0] for (j in idx) { points(i,j,pch=alarm.symbol$pch,col=alarm.symbol$col[lvl[j]],cex=alarm.symbol$cex,lwd=alarm.symbol$lwd) } } #Draw lines seperating the levels m <- c(-0.5,cumsum(as.numeric(table(lvl)))) sapply(m, function(i) lines(c(0.5,nrow(x@alarm)+0.5),c(i+0.5,i+0.5),lwd=2)) invisible() } ##################################### ### Utilities to set up the time axis ##################################### #Every unit change atChange <- function(x,xm1) { which(diff(c(xm1,x)) != 0) } #Median index of factor atMedian <- function(x,xm1) { as.integer(tapply(seq_along(x), INDEX=x, quantile, probs=0.5, type=3)) } #Only every second unit change at2ndChange <- function(x,xm1) { idxAtChange <- atChange(x,xm1) idxAtChange[seq(idxAtChange) %% 2 == 1] } #Helper function to format the x-axis of the time series addFormattedXAxis <- function(x, epochsAsDate = FALSE, xaxis.tickFreq = list("%Q"=atChange), xaxis.labelFreq = xaxis.tickFreq, xaxis.labelFormat = "%G\n\n%OQ", ...) { #Old style if there are no Date objects if (!epochsAsDate) { #Declare commonly used variables. nTime <- nrow(x) startyear <- x@start[1] firstweek <- x@start[2] if (x@freq ==52) { #Weekly epochs are the most supported # At which indices to put the "at" tick label. This will # be exactly those week numbers where the new quarter begins: 1, 14, 27 and 40 + i*52. # Note that week number and index is not the same due to the "firstweek" argument weeks <- seq_len(nTime) + (firstweek-1) noYears <- ceiling(max(weeks)/52) quarterStarts <- rep( (0:(noYears))*52, each=4) + rep( c(1,14,27,40), noYears+1) weeks <- subset(weeks, !is.na(match(weeks,quarterStarts))) weekIdx <- weeks - (firstweek-1) # get the right year for each week year <- weeks %/% 52 + startyear # function to define the quarter order quarterFunc <- function(i) { switch(i+1,"I","II","III","IV") } #nicer:as.roman, but changes class. # get the right number and order of quarter labels quarter <- sapply( (weeks-1) %/% 13 %% 4, quarterFunc) #Computed axis labels -- add quarters (this is the old style) labels.week <- paste(year,"\n\n",quarter,sep="") #Make the line. Use lwd.ticks to get full line but no marks. axis( side=1,labels=FALSE,at=c(1,nTime),lwd.ticks=0,line=1,...) axis( at=weekIdx[which(quarter != "I")] , labels=labels.week[which(quarter != "I")] , side=1, line = 1 ,...) #Bigger tick marks at the first quarter (i.e. change of the year) at <- weekIdx[which(quarter == "I")] axis( at=at, labels=rep(NA,length(at)), side=1, line = 1 ,tcl=2*par("tcl")) } else { ##other frequency (not really supported) #A label at each unit myat.unit <- seq(firstweek,length.out=nTime) # get the right year order month <- (myat.unit-1) %% x@freq + 1 year <- (myat.unit - 1) %/% x@freq + startyear #construct the computed axis labels -- add quarters if xaxis.units is requested mylabels.unit <- paste(year,"\n\n", (myat.unit-1) %% x@freq + 1,sep="") #Add axis axis( at=seq_len(nTime), labels=NA, side=1, line = 1, ...) axis( at=seq_len(nTime)[month==1], labels=mylabels.unit[month==1] , side=1, line = 1 ,...) #Bigger tick marks at the first unit at <- seq_len(nTime)[(myat.unit - 1) %% x@freq == 0] axis( at=at, labels=rep(NA,length(at)), side=1, line = 1 ,tcl=2*par("tcl")) } } else { ################################################################ #epochAsDate -- experimental functionality to handle ISO 8601 ################################################################ dates <- epoch(x, as.Date = TRUE) #make one which has one extra element at beginning with same spacing datesOneBefore <- c(dates[1]-(dates[2]-dates[1]),dates) #Make the line. Use lwd.ticks to get full line but no marks. axis( side=1,labels=FALSE,at=c(1,length(dates)),lwd.ticks=0,...) ###Make the ticks (depending on the selected level).### tcl <- par("tcl") tickFactors <- surveillance.options("stsTickFactors") #Loop over all pairs in the xaxis.tickFreq list for (i in seq_along(xaxis.tickFreq)) { format <- names(xaxis.tickFreq)[i] xm1x <- as.numeric(formatDate(datesOneBefore,format)) idx <- xaxis.tickFreq[[i]](x=xm1x[-1],xm1=xm1x[1]) #Find tick size by table lookup tclFactor <- tickFactors[pmatch(format, names(tickFactors))] if (is.na(tclFactor)) { warning("no \"tcl\" factor found for \"", format ,"\" -> setting it to 1") tclFactor <- 1 } axis(1,at=idx, labels=NA,tcl=tclFactor*tcl,...) } ###Make the labels (depending on the selected level)### if (!is.null(xaxis.labelFormat)) { labelIdx <- NULL for (i in seq_along(xaxis.labelFreq)) { format <- names(xaxis.labelFreq)[i] xm1x <- as.numeric(formatDate(datesOneBefore,format)) labelIdx <- c(labelIdx,xaxis.labelFreq[[i]](x=xm1x[-1],xm1=xm1x[1])) } #Format labels (if any) for the requested subset if (length(labelIdx)>0) { labels <- rep(NA,nrow(x)) labels[labelIdx] <- formatDate(dates[labelIdx],xaxis.labelFormat) axis(1,at=1:nrow(x), labels=labels,tick=FALSE,...) } } }#end epochAsDate #Done invisible() } surveillance/R/twinstim_simulation.R0000644000176200001440000015274714006033103017422 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Simulate a point pattern according to a spatio-temporal intensity model of ### class "twinstim". The function basically uses Ogata's modified thinning ### algorithm (cf. Daley & Vere-Jones, 2003, Algorithm 7.5.V.). ### ### Copyright (C) 2010-2018,2021 Sebastian Meyer ### $Revision: 2640 $ ### $Date: 2021-02-01 18:11:31 +0100 (Mon, 01. Feb 2021) $ ################################################################################ ### CAVE: ### - the type of contrasts for factor variables has to be set through options("contrasts") ### - if epidemic-only process (!hash), we actually don't need stgrid, but we ### want to have valid epidataCS at the end, which requires stgrid ## model.frame() evaluates '...' with 'data' utils::globalVariables(c("BLOCK", "tile", "area")) simEpidataCS <- function (endemic, epidemic, siaf, tiaf, qmatrix, rmarks, events, stgrid, tiles, beta0, beta, gamma, siafpars, tiafpars, epilink = "log", t0 = stgrid$start[1], T = tail(stgrid$stop,1), nEvents = 1e5, control.siaf = list(F=list(), Deriv=list()), W = NULL, trace = 5, nCircle2Poly = 32, gmax = NULL, .allocate = 500, .skipChecks = FALSE, .onlyEvents = FALSE) { ptm <- proc.time()[[3]] cl <- match.call() ####################### ### Check arguments ### (this takes many lines of code ...) ####################### cat("\nChecking the supplied arguments ...\n") ### Some simple input checks if (missing(endemic)) endemic <- ~ 0 else stopifnot(inherits(endemic, "formula")) if (missing(epidemic)) epidemic <- ~ 0 else stopifnot(inherits(epidemic, "formula")) if (length(trace) != 1L) stop("'trace' must be a single integer or logical value") trace <- as.integer(trace) if (!isScalar(nCircle2Poly)) stop("'nCircle2Poly' must be scalar") nCircle2Poly <- as.integer(nCircle2Poly) if (!isScalar(.allocate)) stop("'.allocate' must be scalar") .allocate <- as.integer(.allocate) .skipChecks <- as.logical(.skipChecks) .onlyEvents <- as.logical(.onlyEvents) ### Check qmatrix if (missing(qmatrix)) qmatrix <- diag(1) nTypes <- nrow(qmatrix) if (is.null(typeNames <- rownames(qmatrix))) { if (nTypes > length(LETTERS)) stop("'qmatrix' needs dimnames") typeNames <- LETTERS[seq_len(nTypes)] } qmatrix <- checkQ(qmatrix, typeNames) qSumTypes <- rowSums(qmatrix) # how many types can be triggered by each type ### Check other "epidataCS" components (events, stgrid, tiles, and W) if (!missing(events) && !is.null(events)) { events <- events[!names(events) %in% reservedColsNames_events] if (!.skipChecks) { cat("Checking 'events':\n") events <- check_events(events, dropTypes = FALSE) # epscols are obligatory in 'check_events', which is also appropriate here } ## check event types events@data$type <- factor(events@data$type, levels=typeNames) if (any(.typeIsNA <- is.na(events@data$type))) { warning("ignored some 'events' of unknown type") events <- events[!.typeIsNA,] } } if (!.skipChecks) { cat("Checking 'stgrid':\n") stgrid <- check_stgrid(stgrid[grep("^BLOCK$", names(stgrid), invert=TRUE)]) } if (!is.null(W)) { W <- check_W(W) # does as(W, "SpatialPolygons") } if (!.skipChecks) { cat("Checking 'tiles' ...\n") ## we always check 'tiles', but quietly in the simulate-method } tileLevels <- levels(stgrid$tile) tiles <- check_tiles(tiles, tileLevels, areas.stgrid = stgrid[["area"]][seq_along(tileLevels)], W = W, keep.data = FALSE) if (is.null(W)) { cat("Building 'W' as the union of 'tiles' ...\n") W <- unionSpatialPolygons(tiles) } ## empty CRS to avoid costly intermediate CRS checks (and rgdal warnings) if (!missing(events) && !is.null(events)) stopifnot(identicalCRS(tiles, events)) tiles@proj4string <- events@proj4string <- new("CRS") ## Transform W to class "owin" Wowin <- SpP2owin(W) maxExtentOfW <- diameter.owin(Wowin) ### Check parameters beta0 <- if (missing(beta0)) numeric(0L) else as.vector(beta0, mode="numeric") beta <- if (missing(beta)) numeric(0L) else as.vector(beta, mode="numeric") gamma <- if (missing(gamma)) numeric(0L) else as.vector(gamma, mode="numeric") siafpars <- if (missing(siafpars)) numeric(0L) else as.vector(siafpars, mode="numeric") tiafpars <- if (missing(tiafpars)) numeric(0L) else as.vector(tiafpars, mode="numeric") nbeta0 <- length(beta0) if (nbeta0 > 1L && nbeta0 != nTypes) { stop("'beta0' must have length 0, 1, or 'nrow(qmatrix)'") } p <- length(beta) q <- length(gamma) nsiafpars <- length(siafpars) ntiafpars <- length(tiafpars) hase <- q > 0L hassiafpars <- nsiafpars > 0L hastiafpars <- ntiafpars > 0L if (!hase && (hassiafpars | hastiafpars)) { stop("'siafpars' and 'tiafpars' require 'gamma'") } ### Check time range if (is.null(t0)) t0 <- eval(formals()$t0) if (is.null(T)) T <- eval(formals()$T) if (!isScalar(t0) || !isScalar(T)) { stop("endpoints 't0' and 'T' must be single numbers") } if (T <= t0) { stop("'T' must be greater than 't0'") } stopifnot(t0 >= stgrid$start[1], T <= tail(stgrid$stop,1)) ### Subset stgrid to include actual time range only # BLOCK in stgrid such that start time is equal to or just before t0 block_t0 <- stgrid$BLOCK[match(TRUE, c(stgrid$start,Inf) > t0) - 1L] # BLOCK in stgrid such that stop time is equal to or just after T block_T <- stgrid$BLOCK[match(TRUE, stgrid$stop >= T)] stgrid <- stgrid[stgrid$BLOCK>=block_t0 & stgrid$BLOCK<=block_T,,drop=FALSE] stgrid$start[stgrid$BLOCK == block_t0] <- t0 stgrid$stop[stgrid$BLOCK == block_T] <- T # matrix of BLOCKS and start times (used later) blockstarts <- with(stgrid, cbind(block_t0:block_T, start[match(block_t0:block_T, BLOCK)], deparse.level = 0L) ) ### Check mark-generating function # eps.t and eps.s are also unpredictable marks (generated by rmarks) unpredMarks <- unique(c("eps.t", "eps.s", if (hase) { setdiff(all.vars(epidemic), c("type", names(stgrid))) })) rmarks <- match.fun(rmarks) sampleCoordinate <- coordinates(spsample(tiles, n=1L, type="random")) sampleMarks <- rmarks(t0, sampleCoordinate) # should be a one-row data.frame if (!is.data.frame(sampleMarks) || nrow(sampleMarks) != 1L) { stop("'rmarks' must return a one-row data.frame of marks") } markNames <- names(sampleMarks) if (.idx <- match(FALSE, unpredMarks %in% markNames, nomatch=0L)) { stop("the unpredictable mark '", unpredMarks[.idx], "' is not returned by 'rmarks'") } if (!all(sapply(sampleMarks[unpredMarks], function(x) inherits(x, c("integer","numeric","logical","factor"), which=FALSE)))) warning("'rmarks' should return \"numeric\", \"logical\", or", " \"factor\" ('epidemic') variables only") ### Check prehistory of the process Nout <- 0L if (!missing(events) && !is.null(events)) { .stillInfective <- with(events@data, time <= t0 & time + eps.t > t0) Nout <- sum(.stillInfective) events <- if (Nout > 0L) { events[.stillInfective,] } else { .eventstxt <- if (.skipChecks) "data$events" else "events" # for simulate.twinstim cat("(no events from '", .eventstxt, "' were considered as prehistory)\n", sep="") NULL } } ## separate coordinates and data if (Nout > 0L) { check_tiles_events(tiles, events) eventCoords <- coordinates(events) rownames(eventCoords) <- NULL # to avoid duplicates ("" for new events) # which disturb the final SpatialPointsDataFrame() eventData <- events@data ## check presence of unpredictable marks if (length(.idx <- which(!unpredMarks %in% names(eventData)))) { stop("missing unpredictable marks in 'events': ", paste0("\"", unpredMarks[.idx], "\"", collapse=", ")) } ## check type of unpredictable marks for (um in unpredMarks) { if (!identical(class(sampleMarks[[um]]), class(eventData[[um]]))) stop("the class of the unpredictable mark '", um, "' in the 'events' prehistory ", "is not identical to the class returned by 'rmarks'") } ## add marks which are not in the prehistory but simulated by 'rmarks' if (length(.add2events <- setdiff(markNames, names(eventData)))) { eventData <- cbind(eventData, sampleMarks[.add2events]) is.na(eventData[.add2events]) <- TRUE } eventData <- eventData[c("time", "tile", "type", markNames)] } else { ## empty prehistory eventCoords <- matrix(0, nrow=0L, ncol=2L) eventData <- data.frame( time = numeric(0L), tile = factor(character(0L), levels=tileLevels), type = factor(character(0L), levels=typeNames), check.rows = FALSE, check.names = FALSE ) eventData <- cbind(eventData, sampleMarks[0L,]) } ## helper function to attach covariates from 'stgrid' to events attachstgridvars <- function (eventData, stgridvars) { if (length(stgridvars) == 0L) return(eventData) gridcellsOfEvents <- integer(nrow(eventData)) for (i in seq_along(gridcellsOfEvents)) { gridcellsOfEvents[i] <- gridcellOfEvent(eventData[i,"time"], eventData[i,"tile"], stgrid) } cbind(eventData, stgrid[gridcellsOfEvents, stgridvars, drop=FALSE]) } ### Build epidemic model matrix epidemic <- terms(epidemic, data = eventData, keep.order = TRUE) if (!is.null(attr(epidemic, "offset"))) { warning("offsets are not implemented for the 'epidemic' component") } # helper function taking eventData and returning the epidemic model.matrix buildmme <- function (eventData) { # which variables do we have to copy from stgrid? stgridCopyCols <- match(all.vars(epidemic), names(stgrid), nomatch = 0L) eventData <- attachstgridvars(eventData, stgridCopyCols) mfe <- model.frame(epidemic, data = eventData, na.action = na.fail, drop.unused.levels = FALSE) model.matrix(epidemic, mfe) } mme <- buildmme(eventData) if (ncol(mme) != q) { cat(ncol(mme), "epidemic model terms:\t", paste(colnames(mme), collapse=" "), "\n") stop("length of 'gamma' (", q, ") does not match the 'epidemic' specification (", ncol(mme), ")") } ## (inverse) link function for the epidemic linear predictor of event marks epilink <- match.arg(epilink, choices = c("log", "identity")) epilinkinv <- switch(epilink, "log" = exp, "identity" = identity) ### Build endemic model matrix endemic <- terms(endemic, data = stgrid, keep.order = TRUE) # check if we have an endemic component at all hasOffset <- !is.null(attr(endemic, "offset")) hash <- (nbeta0 + p + hasOffset) > 0L if (!hash) { if (!hase) { stop("nothing to do: neither endemic nor epidemic parameters were specified") # actually, the process might be endemic offset-only, which I don't care about ATM } if (Nout == 0L) { stop("missing 'events' prehistory (no endemic component)") } } # remove (1|type) specification typeSpecificEndemicIntercept <- "1 | type" %in% attr(endemic, "term.labels") || nbeta0 > 1 if (typeSpecificEndemicIntercept) { endemic <- update.formula(endemic, ~ . - (1|type)) # this drops the terms attributes endemic <- terms(endemic, data = stgrid, keep.order = TRUE) if (nbeta0 <= 1L) { stop("for type-specific endemic intercepts, 'beta0' must be longer than 1") } } # ensure that we have correct contrasts in the endemic component attr(endemic, "intercept") <- as.integer(nbeta0 > 0L) # helper function taking eventData (with time and tile columns) # and returning the endemic model.matrix buildmmh <- function (eventData) { # if 'pi' appears in 'endemic' we don't care, and if a true covariate is # missing, model.frame will throw an error # which variables do we have to copy from stgrid? stgridCopyCols <- match(all.vars(endemic), names(stgrid), nomatch = 0L) # attaching covariates from 'stgrid' to events eventData <- attachstgridvars(eventData, stgridCopyCols) # construct model matrix mfhEvents <- model.frame(endemic, data = eventData, na.action = na.fail, drop.unused.levels = FALSE) mmhEvents <- model.matrix(endemic, mfhEvents) # exclude intercept from endemic model matrix below, will be treated separately if (nbeta0 > 0) mmhEvents <- mmhEvents[,-1,drop=FALSE] structure(mmhEvents, offset = model.offset(mfhEvents)) } # actually, we don't need the endemic model matrix for the prehistory events at all # this is just to test consistence with 'beta' and for the names of 'beta' mmh <- buildmmh(eventData[0L,]) if (ncol(mmh) != p) { stop("length of 'beta' (", p, ") does not match the 'endemic' specification (", ncol(mmh), ")") } ### Build endemic model matrix on stgrid mfhGrid <- model.frame(endemic, data = stgrid, na.action = na.fail, drop.unused.levels = FALSE, BLOCK = BLOCK, tile = tile, ds = area) # we don't actually need 'tile' in mfhGrid; this is only for easier identification when debugging mmhGrid <- model.matrix(endemic, mfhGrid) # exclude intercept from endemic model matrix below, will be treated separately if (nbeta0 > 0) mmhGrid <- mmhGrid[,-1,drop=FALSE] # Extract endemic model components offsetGrid <- model.offset(mfhGrid) gridBlocks <- mfhGrid[["(BLOCK)"]] ds <- mfhGrid[["(ds)"]] ### Parse interaction functions if (hase) { ## Check interaction functions siaf <- do.call(".parseiaf", args = alist(siaf, "siaf", verbose=trace>0)) constantsiaf <- attr(siaf, "constant") if (siaf$npars != nsiafpars) { stop("length of 'siafpars' (", nsiafpars, ") does not match the 'siaf' specification (", siaf$npars, ")") } tiaf <- do.call(".parseiaf", args = alist(tiaf, "tiaf", verbose=trace>0)) constanttiaf <- attr(tiaf, "constant") if (constanttiaf) gmax <- 1L if (tiaf$npars != ntiafpars) { stop("length of 'tiafpars' (", ntiafpars, ") does not match the 'tiaf' specification (", tiaf$npars, ")") } ## Check control.siaf if (constantsiaf) control.siaf <- NULL else { stopifnot(is.null(control.siaf) || is.list(control.siaf)) } ## Define function that integrates the two-dimensional 'siaf' function ## over the influence regions of the events if (!constantsiaf && !is.null(siaf$Fcircle) && !is.null(siaf$effRange)) { ## pre-compute effective range of the 'siaf' (USED BY .siafInt) effRangeTypes <- rep_len(siaf$effRange(siafpars), nTypes) } .siafInt <- .siafIntFUN(siaf = siaf, noCircularIR = FALSE) # not certain beforehand .siafInt.args <- c(list(siafpars), control.siaf$F) ## Check gmax if (is.null(gmax)) { gmax <- max(tiaf$g(rep.int(0,nTypes), tiafpars, 1:nTypes)) cat("assuming gmax =", gmax, "\n") } else if (!isScalar(gmax)) { stop("'gmax' must be scalar") } } else { if (!missing(siaf) && !is.null(siaf)) warning("'siaf' can only be modelled in conjunction with an 'epidemic' process") if (!missing(tiaf) && !is.null(tiaf)) warning("'tiaf' can only be modelled in conjunction with an 'epidemic' process") siaf <- tiaf <- NULL control.siaf <- NULL } ### print some information on the upcoming simulation txtPrehistory <- if (Nout == 0L) "no prehistory" else paste(Nout, ngettext(Nout, "event", "events"), "in the prehistory") cat("\nSimulating a", if (length(unpredMarks) > 2L) "marked", "spatio-temporal point pattern with", "\n\t-", nTypes, ngettext(nTypes, "event type", "event types"), "\n\t-", txtPrehistory) coefs <- c( if (nbeta0 > 1L) { setNames(beta0, paste0("h.type",typeNames)) } else if (nbeta0 == 1L) setNames(beta0, "h.(Intercept)"), if (p > 0L) setNames(beta, paste("h",colnames(mmh),sep=".")), if (hase) setNames(gamma, paste("e",colnames(mme),sep=".")), if (hassiafpars) setNames(siafpars, paste("e.siaf",1:nsiafpars,sep=".")), if (hastiafpars) setNames(tiafpars, paste("e.tiaf",1:ntiafpars,sep=".")) ) cat("\n\t-", length(coefs), "coefficients:\n\n") print(coefs) ########################################## ### CIF of the temporal ground process ### ########################################## ### calculate integral of endemic component over W (= union of tiles) ### and over types for all time blocks in stgrid hIntWK <- if (hash) { dsexpeta <- local({ eta <- drop(mmhGrid %*% beta) # =0 if p = 0 if (!is.null(offsetGrid)) eta <- offsetGrid + eta ds * exp(unname(eta)) }) fact <- if (nbeta0 > 1L) sum(exp(beta0)) else if (nbeta0 == 1L) nTypes*exp(unname(beta0)) else nTypes fact * c(tapply(dsexpeta, gridBlocks, sum)) } else setNames(numeric(nrow(blockstarts)), blockstarts[,1]) # zeroes #<- is a named vector with names referencing BLOCK in stgrid ### helper function evaluating the epidemic terms of the ground intensity ### for a specific set of events (the lambdag function uses eTerms) eTermsCalc <- function (eventData, eventCoords) { # extract some marks from the eventData (USED INSIDE .siafInt() BELOW!) eventTypes <- as.integer(eventData$type) eps.s <- eventData$eps.s # distance to the border (required for siafInt below, and for epidataCS) bdist <- bdist(eventCoords, Wowin) # spatial influence regions of the events influenceRegion <- if (nrow(eventCoords) > 0L) .influenceRegions( events = SpatialPointsDataFrame( coords = eventCoords, data = data.frame(eps.s = eps.s, .bdist = bdist), match.ID = FALSE ), W = Wowin, npoly = nCircle2Poly, maxExtent = maxExtentOfW, clipper = "polyclip" ) else list() # epidemic terms if (!hase) { return(list(matrix(NA_real_, length(influenceRegion), 3L), bdist, influenceRegion)) } # epidemic model matrix (will be multiplied with gamma) mme <- buildmme(eventData) # integrate the two-dimensional 'siaf' function over the influence region siafInts <- if (length(influenceRegion) == 0L) numeric(0L) else { environment(.siafInt) <- environment() do.call(".siafInt", .siafInt.args) } # Matrix of terms in the epidemic component eTerms <- cbind( qSum = qSumTypes[eventTypes], expeta = epilinkinv(drop(mme %*% gamma)), siafInt = siafInts ) # Return list(eTerms, bdist, influenceRegion) } ### function calculating the (upper bound) intensity of the ground process ### it relies on several objects for the epidemic component which are updated alongside simulation # t will be one of the break points in stgrid or an event time lambdagVec <- function (t, upper=FALSE) { ## endemic part hIntWKt <- hIntWK[[as.character(tBLOCK)]] ## epidemic part ejIntWt <- if (!hase || length(infectives) == 0L) numeric(0L) else { eTerms <- eTerms[infectives,,drop=FALSE] gTerm <- if (upper) { rep.int(gmax, length(infectives)) } else { times <- eventMatrix[infectives,"time"] types <- eventMatrix[infectives,"type"] tiaf$g(t-times, tiafpars, types) } # ejIntWt only for infectives, others have 0 setNames(apply(cbind(eTerms,gTerm), 1, prod), infectives) } c("0"=hIntWKt, ejIntWt) # endemic component has index "0" ! } ### helper function calculating the integral of lambdag from oldct to ct ### during simulation; it depends on the current values of the simulation add2Lambdag <- if (!hase || constanttiaf) { function () lambdagUpper * (ct-oldct) } else function () { # old endemic ground intensity * passed time hIntWKInt_oldct_ct <- lambdaghe[1L] * (ct-oldct) # integrated epidemic ground intensities of infectives (from oldct) ejIntWInt_oldct_ct <- if (length(infectives) == 0L) numeric(0L) else { eTermsProd <- apply(eTerms[infectives,,drop=FALSE], 1, prod) # integral of \id_{(0;eps.t]}(t-t_j) g(t-t_j \vert \kappa_j) from oldct to ct, for j in infectives # we can ignore the indicator because t-t_j is not >eps.t if t in [oldct;ct], because recoveries are change points times <- eventMatrix[infectives,"time"] types <- eventMatrix[infectives,"type"] gInt_0_ct <- tiaf$G(ct -times, tiafpars, types) gInt_0_oldct <- tiaf$G(oldct-times, tiafpars, types) gInt_oldct_ct <- gInt_0_ct - gInt_0_oldct eTermsProd * gInt_oldct_ct } sum(hIntWKInt_oldct_ct, ejIntWInt_oldct_ct) } ################## ### Simulation ### ################## ### Initialise values for simulation loop # all necessary components for an epidataCS object will be build along the simulation # let's start with the events of the prehistory tmp <- eTermsCalc(eventData, eventCoords) eTerms <- tmp[[1]]; rownames(eTerms) <- NULL bdists <- tmp[[2]] influenceRegions <- tmp[[3]] sources <- rep.int(list(integer(0L)), Nout) # Transform eventData into a matrix, which is faster with rbind # (factors will be recreated at the end of simulation) # simulated events will be subsequently appended to this matrix eventMatrix <- if (Nout == 0L) { matrix(numeric(0L), nrow=0L, ncol=ncol(eventData), dimnames=list(NULL, names(eventData))) } else { sapply(eventData, as.numeric, simplify = TRUE) # prehistory } if (Nout == 1L) eventMatrix <- t(eventMatrix) # we will also know about the source of infection and corresponding BLOCK in stgrid navec <- rep.int(NA_real_, Nout) eventMatrix <- cbind(eventMatrix, source = navec, lambda.h = navec, lambda.e = navec, Lambdag = navec, BLOCK = navec) # row indices of currently infective individuals infectives <- seq_len(Nout) # maximum total number of events (including prehistory) maxEvents <- Nout + nEvents # change points of lambdag stgridbreaks <- blockstarts[-1,2] Rtimes <- setNames(eventMatrix[,"time"]+eventMatrix[,"eps.t"], infectives) # name indexes row of eventMatrix # index of next event (row in eventMatrix) j <- Nout + 1L # allocation of large objects for faster filling-in of new events allocated <- Nout ncolEventMatrix <- ncol(eventMatrix) newAllocation <- expression({ eventMatrix <- rbind(eventMatrix, matrix(NA_real_, nrow = .allocate, ncol = ncolEventMatrix)) eventCoords <- rbind(eventCoords, matrix(NA_real_, nrow = .allocate, ncol = 2L)) eTerms <- rbind(eTerms, matrix(NA_real_, nrow = .allocate, ncol = 3L)) bdists <- c(bdists, rep.int(NA_real_,.allocate)) influenceRegions <- c(influenceRegions, vector(.allocate, mode="list")) sources <- c(sources, vector(.allocate, mode="list")) allocated <- allocated + .allocate }) # current time point ct <- t0 # current value of the cumulative intensity function of the ground process Lambdag <- 0 # last point rejected? pointRejected <- FALSE # did we have numerical problems simulating from Exp(lambdagUpper) in the current loop? hadNumericalProblems0 <- FALSE # index of the current loop loopCounter <- 0L ### Let's Rock 'n' Roll if (trace > 0L) { cat("\nSimulation path (starting from t=", t0, "):\n---\n", sep="") } else { cat("\nSimulating (starting from t=", t0, ") ...\n", sep="") } while(j <= maxEvents && ct < T && (hash || length(infectives) > 0L)) { loopCounter <- loopCounter + 1L if (trace > 0L && loopCounter %% trace == 0L) { cat(loopCounter, "@t =", ct, ":\t#simulated events =", j-1L-Nout, "\t#currently infective =", length(infectives), if (hase && !constanttiaf) paste("\tlast rejected?", pointRejected), "\n") flush.console() # affects Windows only } # check if we need to allocate larger matrices if (j > allocated) { eval(newAllocation) } if (!pointRejected) # what we have to do in the usual case { # we need the time block of stgrid corresponding to the new covariates, # i.e. search BLOCK such that t in [start; stop) tBLOCK <- blockstarts[findInterval(ct, blockstarts[,2]), 1] # Compute new infection intensity (upper bound) lambdaghe <- lambdagVec(ct, upper=TRUE) lambdagUpper <- sum(lambdaghe) # Determine time of next external change point changePoints <- c(nextblock = if (length(stgridbreaks) > 0L) stgridbreaks[1L], Rtimes) nextChangePoint <- if (length(changePoints) > 0L) { changePoints[which.min(changePoints)] # don't use min() because need names } else Inf } pointRejected <- FALSE ## Simulate waiting time for the subsequent infection if (is.na(lambdagUpper)) { warning("simulation stopped due to undefined intensity") break } if (lambdagUpper < 0) { warning("simulation stopped due to negative overall intensity") break } Delta <- if (lambdagUpper == 0) Inf else tryCatch( rexp(1, rate = lambdagUpper), warning = function (w) { # rate was too small (for R >= 2.7.0, # rexp(1, Inf) returns 0 without warning) assign("hadNumericalProblems0", TRUE, inherits = TRUE) Inf }) # Stop if lambdaStarMax too big meaning Delta == 0 (=> concurrent events) if (Delta == 0) { warning("simulation stopped due to infinite overall intensity") break } # Stop at all costs if end of simulation time [t0; T) has been reached if (isTRUE(min(ct+Delta, nextChangePoint) >= T)) { # ">=" because we don't want an event at "end" break } oldct <- ct if (ct + Delta > nextChangePoint) { ## Simulated time point is beyond the next time of intensity change (removal or endemic covariates) ct <- unname(nextChangePoint) # update cumulative intensity of the ground processes up to time ct, # i.e. add integral of lambdag from oldct to ct Lambdag <- Lambdag + add2Lambdag() # is this change point due to next time block in stgrid? if (names(nextChangePoint) == "nextblock") { stgridbreaks <- stgridbreaks[-1] } else { # i.e. change point due to recovery recoverer <- names(nextChangePoint) # update set of infectives infectives <- setdiff(infectives, recoverer) # remove recovery time from Rtimes .Rtimesidx <- match(recoverer, names(Rtimes)) Rtimes <- Rtimes[-.Rtimesidx] } } else { ## Simulated time point lies within the thinning period ct <- ct + Delta # rejection sampling if non-constant temporal interaction kernel g if (hase && !constanttiaf) { # Calculate actual ground intensity for rejection probability at new ct lambdaghe <- lambdagVec(ct, upper=FALSE) lambdag <- sum(lambdaghe) # rejection sampling step if (lambdag/lambdagUpper < runif(1)) { pointRejected <- TRUE next } } # At this point, we have an actual event! # update cumulative intensity of the ground processes up to time ct, # i.e. add integral of lambdag from oldct to ct Lambdag <- Lambdag + add2Lambdag() # note that lambdaghe[1L] did not change by the above update in case of !constanttiaf, # which is expected by add2Lambdag (which requires the value of lambdag.h(oldct)) # Where did the event come from: imported case or infection? .eventSource <- as.integer(sample(names(lambdaghe), 1L, prob=lambdaghe)) # We now sample type and location if (.eventSource == 0L) { # i.e. endemic source of infection .eventType <- sample(typeNames, 1L, prob=if (nbeta0 > 1L) exp(beta0)) stgrididx <- which(gridBlocks == tBLOCK) .eventTile <- sample(stgrid$tile[stgrididx], 1L, prob=dsexpeta[stgrididx]) # this is a factor ## spsample doesn't guarantee that the sample will consist of ## exactly n points. if no point is sampled (very unlikely ## though), there would be an error ntries <- 1L .nsample <- 1L while( inherits(eventLocationSP <- try( spsample(tiles[as.character(.eventTile),], n=.nsample, type="random"), silent = TRUE), "try-error")) { .nsample <- 10L # this also circumvents a bug in sp 1.0-0 # (missing drop=FALSE in sample.Spatial()) if (ntries >= 1000) { stop("'sp::spsample()' didn't succeed in sampling a ", "point from tile \"", as.character(.eventTile), "\"") } ntries <- ntries + 1L } .eventLocation <- coordinates(eventLocationSP)[1L,,drop=FALSE] } else { # i.e. source is one of the currently infective individuals sourceType <- eventMatrix[.eventSource,"type"] sourceCoords <- eventCoords[.eventSource,,drop=FALSE] sourceIR <- influenceRegions[[.eventSource]] sourceEpss <- eventMatrix[.eventSource,"eps.s"] .upperRange <- min(sourceEpss, maxExtentOfW) .eventType <- sample(typeNames[qmatrix[sourceType,]], 1L) .eventTypeCode <- match(.eventType, typeNames) eventInsideIR <- FALSE ntries <- 0L while(!eventInsideIR) { if (ntries >= 1000) { stop("event location sampled by siaf$simulate() was", " rejected 1000 times (not in influence region)") } ntries <- ntries + 1L eventLocationIR <- siaf$simulate(1L, siafpars, .eventTypeCode, .upperRange) eventInsideIR <- inside.owin(eventLocationIR[,1], eventLocationIR[,2], sourceIR) } .eventLocation <- sourceCoords + eventLocationIR whichTile <- over(SpatialPoints(.eventLocation), tiles) if (is.na(whichTile)) { warning("event generated at (", paste(.eventLocation, collapse=","), ") not in 'tiles'") stop("'tiles' must cover all of 'W'") } .eventTile <- factor(row.names(tiles)[whichTile], levels = tileLevels) } .eventType <- factor(.eventType, levels=typeNames) # sample marks at this time and location .eventMarks <- rmarks(ct, .eventLocation) # gather event information .eventData <- data.frame(time=ct, tile=.eventTile, type=.eventType, .eventMarks, check.rows = FALSE, check.names = FALSE) # determine potential sources of infection (for epidataCS and lambda) .sources <- infectives[eventMatrix[infectives,"type"] %in% which(qmatrix[,.eventType])] if (length(.sources) > 0L) { .sdiffs <- .eventLocation[rep.int(1L,length(.sources)),,drop=FALSE] - eventCoords[.sources,,drop=FALSE] .sources <- .sources[sqrt(.rowSums(.sdiffs^2, length(.sources), 2L)) <= eventMatrix[.sources,"eps.s"]] } # calculate actual intensity at this time, location and type .mmhEvent <- buildmmh(.eventData) .etaEvent <- .mmhEvent %*% beta if (!is.null(.offsetEvent <- attr(.mmhEvent, "offset"))) .etaEvent <- .etaEvent + .offsetEvent if (nbeta0 == 1L) { .etaEvent <- .etaEvent + beta0 } else if (nbeta0 > 1L) { .etaEvent <- .etaEvent + beta0[.eventType] } .lambdah <- exp(.etaEvent) .lambdae <- if (hase && length(.sources) > 0L) { .sdiffs <- .eventLocation[rep.int(1L,length(.sources)),,drop=FALSE] - eventCoords[.sources,,drop=FALSE] .fSources <- siaf$f(.sdiffs, siafpars, eventMatrix[.sources,"type"]) .gSources <- tiaf$g(ct - eventMatrix[.sources,"time"], tiafpars, eventMatrix[.sources,"type"]) sum(eTerms[.sources,"expeta"] * .fSources * .gSources) } else 0 # calculate terms of the epidemic component e_j(t,s) of the new infective tmp <- eTermsCalc(.eventData, .eventLocation) # Update objects eventMatrix[j,] <- c(ct, as.numeric(.eventTile), as.numeric(.eventType), sapply(.eventMarks, as.numeric), .eventSource, .lambdah, .lambdae, Lambdag, tBLOCK) eventCoords[j,] <- .eventLocation eTerms[j,] <- tmp[[1]] bdists[j] <- tmp[[2]] influenceRegions[[j]] <- tmp[[3]][[1]] sources[[j]] <- .sources # Update set of infectives and recovery times infectives <- c(infectives, j) Rtimes <- c(Rtimes, setNames(ct + .eventMarks[["eps.t"]], j)) # Increment next event iterator j <- j + 1L } } if (trace > 0L) cat("---\n") ### update T if simulation ended preterm if (j > maxEvents || (!hash && length(infectives) == 0L)) { T <- ct # clip stgrid to effective time range of simulation stgrid <- subset(stgrid, start <= T) if (j > maxEvents) { cat("Maximum number of events (nEvents=", nEvents, ") reached @t = ", T, "\n", sep="") } else { # epidemic-only model cat("Simulation has ended preterm (no more infectives)", "@t =", T, "with", j-1L-Nout, "simulated events.\n") } } else { # ct >= T or ct+Delta >= T cat("Simulation has ended @t =", T, "with", j-1L-Nout, "simulated events.\n") } ############## ### Return ### ############## ### Throw warning in case of numerical difficulties if (hadNumericalProblems0) { warning("occasionally, the overall infection rate was numerically equal to 0") } ### throw an error if no events have been simulated ## because SpatialPoints[DataFrame]() does not allow the empty set, try: ## SpatialPoints(coords = matrix(numeric(0), 0, 2), bbox=bbox(W)) if (j-1L == Nout) { stop("no events have been simulated") } ### transform eventMatrix back into a data.frame with original factor variables cat("\nPreparing simulated events for \"epidataCS\" ...\n") preEventData <- eventData # drop unused entries (due to large pre-allocation) from objects seqAlongEvents <- seq_len(j-1L) eventData <- as.data.frame(eventMatrix[seqAlongEvents,,drop=FALSE]) # rebuild factor variables for (idx in which(sapply(preEventData, is.factor))) { origlevels <- levels(preEventData[[idx]]) eventData[[idx]] <- factor(eventData[[idx]], levels=seq_along(origlevels), labels=origlevels) } # transform integer columns to integer eventData[c("source","BLOCK")] <- lapply(eventData[c("source","BLOCK")], as.integer) ### Append additional columns for an epidataCS object # add endemic covariates at events stgrididx <- apply(eventData[c("BLOCK","tile")], 1, function (x) { ret <- with(stgrid, which(BLOCK==as.integer(x[1L]) & tile==x[2L])) if (length(ret) == 0L) NA_integer_ else ret #<- events of the prehistory have missing BLOCKs, thus return NA }) stgridIgnoreCols <- match(c("BLOCK", setdiff(obligColsNames_stgrid, "start")), names(stgrid)) eventData <- cbind(eventData, stgrid[stgrididx, -stgridIgnoreCols, drop = FALSE]) rownames(eventData) <- seqAlongEvents # add hidden columns eventData$.obsInfLength <- with(eventData, pmin(T-time, eps.t)) eventData$.sources <- sources[seqAlongEvents] eventData$.bdist <- bdists[seqAlongEvents] eventData$.influenceRegion <- influenceRegions[seqAlongEvents] attr(eventData$.influenceRegion, "nCircle2Poly") <- nCircle2Poly attr(eventData$.influenceRegion, "clipper") <- "polyclip" ### Construct "epidataCS" object events <- SpatialPointsDataFrame( coords = eventCoords[seqAlongEvents,,drop=FALSE], data = eventData, proj4string = W@proj4string, match.ID = FALSE #, bbox = bbox(W)) # the bbox of SpatialPoints is defined as the actual # bbox of the points and is also updated every time # when subsetting the SpatialPoints object # -> useless to specify it as the bbox of W ) if (.onlyEvents) { cat("Done.\n") attr(events, "timeRange") <- c(t0, T) attr(events, "runtime") <- proc.time()[[3]] - ptm return(events) } epi <- list(events=events, stgrid=stgrid, W=W, qmatrix=qmatrix) ### Return object of class "simEpidataCS" cat("Done.\n") # append configuration of the model epi$bbox <- bbox(W) epi$timeRange <- c(t0, T) epi$formula <- list( endemic = if (typeSpecificEndemicIntercept) { update.formula(formula(endemic), ~ (1|type) + .) # re-add to the formula } else formula(endemic), epidemic = formula(epidemic), siaf = siaf, tiaf = tiaf ) if (epilink != "log") # set as attribute only if non-standard link function attr(epi$formula$epidemic, "link") <- epilink # coefficients as a numeric vector to be compatible with twinstim-methods epi$coefficients <- coefs #list(beta0=beta0, beta=beta, gamma=gamma, # siafpars=siafpars, tiafpars=tiafpars) epi$npars <- c(nbeta0=nbeta0, p=p, q=q, nsiafpars=nsiafpars, ntiafpars=ntiafpars) epi$control.siaf <- control.siaf # for R0.simEpidataCS epi$call <- cl epi$runtime <- proc.time()[[3]] - ptm class(epi) <- c("simEpidataCS", "epidataCS") return(epi) } ############################################################################# ### much more efficient simulation for endemic-only models ### where intensities are piecewise constant and independent from the history ############################################################################# ## auxiliary function to calculate the endemic intensity by spatio-temporal cell ## from the model environment of a "twinstim" fit .hGrid <- function (modelenv) { .beta0 <- rep_len(if (modelenv$nbeta0==0L) 0 else modelenv$beta0, modelenv$nTypes) hGrid <- sum(exp(.beta0)) * eval(modelenv$hGridExpr, envir = modelenv) blockstartstop <- modelenv$histIntervals[ match(modelenv$gridBlocks, modelenv$histIntervals$BLOCK), ] data.frame(blockstartstop, tile = modelenv$gridTiles, hGrid = hGrid, hInt = hGrid * modelenv$ds * modelenv$dt, row.names = NULL, check.rows = FALSE, check.names = FALSE) } ## simulate events from the endemic component of a "twinstim" fit ## this simulates pure (s,t,k) data with the only extra column being "tile" simEndemicEvents <- function (object, tiles) { ## check arguments stopifnot(inherits(object, "twinstim")) if (is.null(modelenv <- environment(object))) stop("no model environment -- re-fit or update() with 'model=TRUE'") tileLevels <- levels(modelenv$gridTiles) tiles <- check_tiles(tiles, levels = tileLevels, areas.stgrid = modelenv$ds[seq_along(tileLevels)], keep.data = FALSE) ## calculate endemic intensity by spatio-temporal cell lambdaGrid <- .hGrid(modelenv) ## simulate number of events by cell nGrid <- rpois(n = nrow(lambdaGrid), lambda = lambdaGrid[["hInt"]]) nTotal <- sum(nGrid) ## sample time points tps <- mapply( FUN = runif, n = nGrid, min = lambdaGrid[["start"]], max = lambdaGrid[["stop"]], SIMPLIFY = FALSE, USE.NAMES = FALSE ) ## sample types beta0 <- coeflist.default(coef(object), object$npars)[["nbeta0"]] nTypes <- nrow(object$qmatrix) types <- if (nTypes == 1L) { rep.int(1L, nTotal) } else { sample.int(n = nTypes, size = nTotal, replace = TRUE, prob = if (length(beta0) > 1L) exp(beta0)) } ## put event times, tiles, and types in a data frame events <- data.frame( ##lambdaGrid[rep.int(seq_len(nrow(lambdaGrid)), nGrid), c("tile", "BLOCK")], time = unlist(tps, recursive = FALSE, use.names = FALSE), tile = rep.int(lambdaGrid[["tile"]], nGrid), type = factor(types, levels = seq_len(nTypes), labels = rownames(object$qmatrix)), row.names = NULL, check.rows = FALSE, check.names = FALSE ) ## empty CRS to avoid spending 3/4 of this function's runtime in rebuild_CRS() proj4string <- tiles@proj4string tiles@proj4string <- new("CRS") ## sample coordinates from tiles nByTile <- tapply(X = nGrid, INDEX = lambdaGrid["tile"], FUN = sum) xyByTile <- sapply( X = names(nByTile), FUN = function (tile) { n <- nByTile[tile] if (n > 0L) coordinates(spsample(x = tiles[tile,], n = n, type = "random", iter = 10)) ## else NULL }, simplify = FALSE, USE.NAMES = TRUE ) ## set coordinates of events events <- SpatialPointsDataFrame( coords = do.call("rbind", xyByTile), data = events[order(events$tile),], proj4string = proj4string, match.ID = FALSE) ## order by time events <- events[order(events$time),] row.names(events) <- seq_along(events) events } #################################################### ### some twinstim-methods for "simEpidataCS" objects #################################################### ### wrapper for R0.twinstim R0.simEpidataCS <- function (object, trimmed = TRUE, ...) { R0.twinstim(object, newevents=object$events@data, trimmed = trimmed, ...) } ### wrapper for intensityplot.twinstim as.twinstim.simEpidataCS <- function (x) { m <- do.call("twinstim", c( formula(x), list(data = quote(x), control.siaf = x$control.siaf, optim.args = list(par=coef(x), fixed=TRUE), model = TRUE, cumCIF = FALSE, verbose = FALSE) )) components2copy <- setdiff(names(m), names(x)) for (comp in components2copy) x[[comp]] <- m[[comp]] environment(x) <- environment(m) class(x) <- c("simEpidataCS", "epidataCS", "twinstim") x } intensityplot.simEpidataCS <- function (x, ...) { if (is.null(environment(x))) { objname <- deparse(substitute(x)) message("Setting up the model environment ...") x <- as.twinstim.simEpidataCS(x) try({ assign(objname, x, envir=parent.frame()) message("Note: added model environment to '", objname, "' for future use.") }, silent=TRUE) } intensityplot.twinstim(x, ...) } ### the residual process Lambda_g(t) is stored with the simulated events residuals.simEpidataCS <- function (object, ...) { setNames(object$events$Lambdag, row.names(object$events))[!is.na(object$events$Lambdag)] } ################################################################################ # A 'simulate' method for objects of class "twinstim". ################################################################################ ### FIXME: actually stgrid's of simulations might have different time ranges ### when nEvents is active -> atm, simplify ignores this .rmarks <- function (data, t0, T) { observedMarks <- subset(marks.epidataCS(data, coords = FALSE), subset = time > t0 & time <= T) if (nrow(observedMarks) == 0L) { message("Note: 'data' does not contain any events during ('t0';'T'],\n", " 'rmarks' thus samples marks from all of 'data$events'") observedMarks <- marks.epidataCS(data, coords = FALSE) } observedMarks <- observedMarks[match("eps.t", names(observedMarks)):ncol(observedMarks)] rm(list = "data", inherits = FALSE) # to save memory (environment is kept) function (t, s, n = 1L) { as.data.frame(lapply(observedMarks, function (x) sample(na.omit(x), size = n, replace = TRUE)), optional = TRUE) } } simulate.twinstim <- function (object, nsim = 1, seed = NULL, data, tiles, newcoef = NULL, rmarks = NULL, t0 = NULL, T = NULL, nEvents = 1e5, control.siaf = object$control.siaf, W = data$W, trace = FALSE, nCircle2Poly = NULL, gmax = NULL, .allocate = 500, simplify = TRUE, ...) { ptm <- proc.time()[[3]] cl <- match.call() ### Determine seed (this part is copied from stats:::simulate.lm with ### Copyright (C) 1995-2012 The R Core Team) if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } ### Few checks stopifnot(inherits(object, "twinstim"), inherits(data, "epidataCS")) stopifnot(isScalar(nsim), nsim > 0) nsim <- as.integer(nsim) if (is.null(t0)) t0 <- object$timeRange[1] if (is.null(T)) T <- object$timeRange[2] if (is.null(nCircle2Poly)) nCircle2Poly <- attr(data$events$.influenceRegion, "nCircle2Poly") ### Retrieve arguments for simulation endemic <- formula(object)$endemic epidemic <- formula(object)$epidemic # we don't need any reference to the original formula environment environment(endemic) <- environment(epidemic) <- .GlobalEnv if (is.null(rmarks)) rmarks <- .rmarks(data, t0 = t0, T = T) theta <- coef(object) if (!is.null(newcoef)) { newcoef <- check_twinstim_start(newcoef) newcoef <- newcoef[names(newcoef) %in% names(theta)] theta[names(newcoef)] <- newcoef } thetalist <- coeflist.default(theta, object$npars) ### Run the simulation(s) # establish call simcall <- call("simEpidataCS", endemic=endemic, epidemic=epidemic, siaf=quote(formula(object)$siaf), tiaf=quote(formula(object)$tiaf), qmatrix=quote(object$qmatrix), rmarks=quote(rmarks), events=quote(data$events), stgrid=quote(data$stgrid), tiles=quote(tiles), beta0=thetalist[[1L]], beta=thetalist[[2L]], gamma=thetalist[[3L]], siafpars=thetalist[[4L]], tiafpars=thetalist[[5L]], epilink = .epilink(object), t0=t0, T=T, nEvents=nEvents, control.siaf=control.siaf, W=quote(W), trace=trace, nCircle2Poly=nCircle2Poly, gmax=gmax, .allocate=.allocate, .skipChecks=TRUE, .onlyEvents=FALSE) # First simulation if (nsim > 1L) { cat("\nTime at beginning of simulation:", as.character(Sys.time()), "\n") cat("Simulation 1 /", nsim, "...\n") cat("-------------------------------------------------------------------------------\n") } res <- eval(simcall) if (nsim > 1L) { cat("\n-------------------------------------------------------------------------------\n") cat("Runtime of first simulation:", res$runtime, "seconds\n") cat("Estimated finishing time:", as.character(Sys.time() + (nsim-1) * res$runtime), "\n\n") # set up list of simulations res <- if (simplify) { with(res, list( eventsList=c(structure(events, timeRange = timeRange, runtime = runtime), vector(nsim-1L, mode="list")), stgrid=stgrid, W=W, qmatrix=qmatrix, bbox=bbox, formula=formula, coefficients=coefficients, npars=npars, control.siaf=control.siaf, call=call )) } else { c(list(res), vector(nsim-1L, mode="list")) } # force garbage collection gc() # run the remaining simulations simcall$.onlyEvents <- simplify for (i in 2:nsim) { cat("Simulation", sprintf(paste0("%",nchar(nsim),"i"), i), "/", nsim, "...") capture.output(resi <- eval(simcall)) .nEvents <- if (simplify) sum(!is.na(resi$source)) else { sum(!is.na(resi$events$source)) } .T <- if (simplify) attr(resi,"timeRange")[2] else resi$timeRange[2] cat("\tsimulated", .nEvents, "events", if (nEvents == .nEvents) "(reached maximum)", "up to time", .T, "\n") if (simplify) res$eventsList[[i]] <- resi else res[[i]] <- resi } cat("\nDone (", as.character(Sys.time()), ").\n", sep="") } attr(res, "call") <- cl attr(res, "seed") <- RNGstate attr(res, "runtime") <- proc.time()[[3]] - ptm class(res) <- if (nsim == 1L) { c("simEpidataCS", "epidataCS") } else { attr(res, "simplified") <- simplify c("simEpidataCSlist") } res } ### print method for lists of simulated epidemics print.simEpidataCSlist <- function (x, ...) { cat("\nCall:\n") print.default(attr(x, "call")) simplified <- attr(x, "simplified") nsim <- if (simplified) length(x$eventsList) else length(x) cat("\n") cat(if (simplified) "Simplified list" else "List", "of", nsim, "simulated epidemics of class \"simEpidataCS\" (not printed)\n\n") invisible(x) } "[[.simEpidataCSlist" <- function (x, i) { simplified <- attr(x, "simplified") if (simplified) { x <- unclass(x) x$eventsList <- x$eventsList[[i]] names(x)[names(x) == "eventsList"] <- "events" x <- append(x, list(timeRange = attr(x$events, "timeRange")), after=5L) x$runtime <- attr(x$events, "runtime") attr(x$events, "timeRange") <- attr(x$events, "runtime") <- NULL class(x) <- c("simEpidataCS", "epidataCS") x } else NextMethod("[[") } plot.simEpidataCSlist <- function (x, which = NULL, mfrow = n2mfrow(length(which)), main = paste("Simulated epidemic", which), aggregate = c("time", "space"), subset, ...) { simplified <- attr(x, "simplified") nsim <- if (simplified) length(x$eventsList) else length(x) if (is.null(which)) { which <- seq_len(nsim) if (nsim > 4) which <- sample(which, 4L) } opar <- par(mfrow = mfrow); on.exit(par(opar)) main <- rep_len(main, length(which)) for (i in seq_along(which)) { do.call("plot", args=list(x=quote(x[[which[i]]]), aggregate=aggregate, subset=substitute(subset), main = main[i], ...)) } } surveillance/R/wrap_univariate.R0000644000176200001440000001314313433500440016470 0ustar liggesusers############################################################################## # This function is a wrapper for univariate surveillance algorithms # using the old disProg and survRes object # # An sts object is given and a pre specified algorithms is ran # by successively creating a disProg object for each region, # running the algo and then assign the slots of the resulting survRes # object to an sts object. ################################################################################### ###Apply other algorithms by wrapping up a suitable package. #Wrapper function to call algo.farrington for each time series in an sts object wrap.algo <- function(sts, algo, control, control.hook=function(k, control) return(control), verbose=TRUE,...) { stopifnot(is.vector(control[["range"]], mode = "numeric")) #Number of time series nAreas <- ncol(sts@observed) #Set old alarms and upperbounds to NA sts@alarm[] <- NA sts@upperbound[] <- NA_real_ #Loop over all regions for (k in 1:nAreas) { if (verbose) { cat("Running ",algo," on area ",k," out of ",nAreas,"\n") } ##Create an old S3 disProg object disProg.k <- sts2disProg(sts[,k]) #Use the univariate algorithm (possibly preprocess control object) kcontrol <- control.hook(k, control) survRes.k <- do.call(algo,args = list(disProg.k, control=kcontrol)) #Transfer results to the S4 object if (!is.null(survRes.k)) { sts@alarm[control$range,k] <- survRes.k$alarm sts@upperbound[control$range,k] <- survRes.k$upperbound } } #Control object needs only to be set once sts@control <- survRes.k$control #Set correct theta0t matrix for all sts@control$theta0t <- control$theta0t #Reduce sts object to only those obervations in range sts <- sts[control$range, ] return(sts) } #Farrington wrapper farrington <- function(sts, control=list(range=NULL, b=5, w=3, reweight=TRUE, verbose=FALSE, alpha=0.05),...) { wrap.algo(sts,algo="algo.farrington",control=control,...) } #Bayes wrapper (this can be implemented more efficiently) bayes <- function(sts, control = list(range = range, b = 0, w = 6, actY = TRUE,alpha=0.05),...) { if (sts@epochAsDate) { warning("algo.bayes currently can't handle Date entries. Computing reference values based on freq") } wrap.algo(sts,algo="algo.bayes",control=control) } #RKI wrapper rki <- function(sts, control = list(range = range, b = 2, w = 4, actY = FALSE),...) { if (sts@epochAsDate) { warning("algo.rki currently can't handle Date entries. Computing reference values based on freq") } wrap.algo(sts,algo="algo.rki",control=control,...) } #outbreakP wrapper outbreakP <- function(sts, control=list(range = range, k=100, ret=c("cases","value"),maxUpperboundCases=1e5),...) { wrap.algo(sts,algo="algo.outbreakP",control=control,...) } #HMM wrapper hmm <- function(sts, control=list(range=NULL, noStates=2, trend=TRUE, noHarmonics=1,covEffectEqual=FALSE),...) { if (sts@epochAsDate) { warning("algo.hmm currently can't handle Date entries. Computing reference values based on freq") } wrap.algo(sts,algo="algo.hmm",control=control,...) } #Cusum wrapper cusum <- function(sts, control = list(range=range, k=1.04, h=2.26, m=NULL, trans="standard",alpha=NULL),...) { wrap.algo(sts,algo="algo.cusum",control=control,...) } #GLRpois wrapper glrpois <- function(sts, control = list(range=range,c.ARL=5, S=1, beta=NULL, Mtilde=1, M=-1, change="intercept",theta=NULL),...) { wrap.algo(sts,algo="algo.glrpois",control=control,...) } #GLRnb wrapper glrnb <- function(sts, control = list(range=range,c.ARL=5, mu0=NULL, alpha=0, Mtilde=1, M=-1, change="intercept",theta=NULL,dir=c("inc","dec"), ret=c("cases","value")), ...) { wrap.algo(sts,algo="algo.glrnb",control=control,...) } #### this code definitely needs some more documentation -- wrap.algo atm is # 100% without docu #Rogerson wrapper # theta0t now has to be a matrix #library(surveillance) #data("ha") #rogerson(disProg2sts(ha),control=list(range=200:290,ARL0=100,s=1,theta0t=matrix(1,nrow=91,ncol=12))) rogerson <- function(sts, control = list(range=range, theta0t=NULL, ARL0=NULL, s=NULL, hValues=NULL, distribution=c("poisson","binomial"), nt=NULL, FIR=FALSE,limit=NULL, digits=1),...) { if (sts@epochAsDate) { warning("algo.rogerson currently can't handle Date entries. Computing reference values based on freq") } #Hook function to find right theta0t vector control.hook = function(k,control) { #Extract values relevant for the k'th component control$theta0t <- control$theta0t[,k] if (is.null(control[["nt",exact=TRUE]])) { control$nt <- sts@populationFrac[control$range,k] } else { if (!all.equal(sts@populationFrac[control$range,k],control$nt[,k])) { warning("Warning: nt slot of control specified, but specified population differs.") } else { control$nt <- control$nt[,k] } } #If no hValues given then compute them if (is.null(control[["hValues",exact=TRUE]])) { #This code does not appear to work once n is big. # control$hValues <- hValues(theta0 = unique(control$theta0t), ARL0=control$ARL0, s=control$s , distr = control$distribution, n=mean(control$nt))$hValues control$hValues <- hValues(theta0 = unique(control$theta0t), ARL0=control$ARL0, s=control$s , distr = control$distribution)$hValues } return(control) } #WrapIt wrap.algo(sts,algo="algo.rogerson",control=control,control.hook=control.hook,...) } surveillance/R/checkDerivatives.R0000644000176200001440000000463612523122744016570 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Simple wrapper around functionality of the numDeriv and maxLik packages ### to check the score vector and the Fisher information matrix ### CAVE: the return values of both wrappers are not unified ### ### Copyright (C) 2012, 2015 Sebastian Meyer ### $Revision: 1327 $ ### $Date: 2015-05-08 14:02:44 +0200 (Fri, 08. May 2015) $ ################################################################################ checkDerivatives.numDeriv <- function(ll, score, fisher, par, method="Richardson", method.args=list(), ...) { cat("Checking analytical score vector using numDeriv::grad() ...\n") nsc <- numDeriv::grad(ll, par, method = method, method.args = method.args, ...) asc <- score(par, ...) print(all.equal(asc, nsc, check.attributes=FALSE)) cat("Checking analytical Fisher information matrix using numDeriv::hessian() ...\n") if (length(par) > 50) cat("NOTE: this might take several minutes considering length(par) =", length(par), "\n") nfi <- -numDeriv::hessian(ll, par, method = "Richardson", method.args = method.args, ...) afi <- fisher(par, ...) print(all.equal(afi, nfi, check.attributes=FALSE)) invisible(list(score = list(analytic=asc, numeric=nsc), fisher = list(analytic=afi, numeric=nfi))) } checkDerivatives.maxLik <- function(ll, score, fisher, par, eps=1e-6, print=FALSE, ...) { cat("Checking analytical score and Fisher using maxLik::compareDerivatives() ...\n") res <- maxLik::compareDerivatives( f=ll, grad=score, hess=function (par, ...) -fisher(par, ...), t0=par, eps=eps, print=print, ...) cat("Comparison of score vectors:\n") print(all.equal(res$compareGrad$analytic, drop(res$compareGrad$numeric), check.attributes=FALSE)) cat("Comparison of Fisher information matrices:\n") print(all.equal(res$compareHessian$analytic, drop(res$compareHessian$numeric), check.attributes=FALSE)) invisible(res) } surveillance/R/twinstim_helper.R0000644000176200001440000003706614013521730016517 0ustar liggesusers################################################################################ ### Internal helper functions for "twinstim" ### ### Copyright (C) 2009-2016,2018,2021 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at https://www.R-project.org/Licenses/. ################################################################################ ### Determines indexes of potential sources of infection ## determine potential sources of the i'th event ## all arguments but i and qmatrix are nEvents-vectors ## -> determine potential sources for eventTimes[i], eventsTypes[i] with ## distances distvec_j = ||s_i - s_j|| determineSources1 <- function (i, eventTimes, removalTimes, distvec, eps.s, eventTypes = NULL, qmatrix) { tp <- eventTimes[i] infectivity <- (eventTimes < tp) & (removalTimes >= tp) #<- eventTimes= t)) ## lidx <- length(idx) ## if (lidx == 0L) NA_integer_ else if (lidx == 1L) idx else { ## stop("'stgrid' has overlapping spatio-temporal grid cells") ## } ## ~5x faster alternative assuming a full BLOCK x tile grid, which is ## sorted by BLOCK and tile (tile varying first), specifically there must be ## all levels(stgrid$tile) in every BLOCK in that order; ## this structure is guaranteed by check_stgrid() if (t <= stgrid$start[1L]) return(NA_integer_) # prehistory event blockstart <- match(TRUE, stgrid$stop >= t) # NA if t is beyond idx <- blockstart + match(tilename, levels(stgrid$tile)) - 1L return(idx) } ## Crude estimate for a start value of the endemic intercept ## assuming the model only had a single-cell endemic component ## (rate of homogeneous Poisson process scaled for the offset) crudebeta0 <- function (nEvents, offset.mean, W.area, period, nTypes) { ## nEvents = exp(offset + beta0) * W.area * period * nTypes log(nEvents/W.area/period/nTypes) - offset.mean } ### Really internal helper function, which constructs the function that ### integrates the two-dimensional 'siaf' function over the influence regions of ### the events. The only argument of the returned function is 'siafpars'. ### The returned function is defined in the callers environment, where the ### variables used in the function are available (inside twinstim() or ### simEpidataCS()). .siafIntFUN <- function (siaf, noCircularIR, #= all(eps.s>bdist) = all(sapply(influenceRegion, function(x) # is.null(attr(x,"radius")))) parallel = FALSE ){ ## the following variables are unused here, because the environment of ## FUN will be set to the parent.frame(), where the variables exist ## they are only included to avoid the notes in R CMD check iRareas <- influenceRegion <- eventTypes <- eps.s <- bdist <- effRanges <- NULL ## define the siaf integration function depending on the siaf specification FUN <- if (attr(siaf, "constant")) { if (exists("iRareas", where=parent.frame(), mode="numeric")) { ## in twinstim(), 'iRareas' are pre-defined to save ## computation time (data are fixed during fitting) function (siafpars) iRareas } else { function (siafpars) vapply(X = influenceRegion, FUN = attr, which = "area", FUN.VALUE = 0, USE.NAMES = FALSE) } } else if (is.null(siaf$Fcircle) || # if siaf$Fcircle not available (is.null(siaf$effRange) && noCircularIR)) { ## Numerically integrate 'siaf' over each influence region mapplyFUN( c(alist(siaf$F, influenceRegion, type=eventTypes), list(MoreArgs=quote(list(siaf$f, siafpars, ...)), SIMPLIFY=TRUE, USE.NAMES=FALSE)), ##<- we explicitly quote() the ...-part instead of simply including ## it in the above alist() - only to make checkUsage() happy parallel = parallel) } else if (is.null(siaf$effRange)) # use Fcircle but only delta-trick { mapplyFUN( c(alist(function (iR, type, eps, bdisti, siafpars, ...) if (eps <= bdisti) # influence region completely inside W siaf$Fcircle(eps, siafpars, type) else # numerically integrate over influence region siaf$F(iR, siaf$f, siafpars, type, ...) , influenceRegion, eventTypes, eps.s, bdist), list(MoreArgs=quote(list(siafpars, ...)), SIMPLIFY=TRUE, USE.NAMES=FALSE)), parallel = parallel) } else { # fast Fcircle integration considering the delta-trick AND effRange .ret <- mapplyFUN( c(alist(function (iR, type, eps, bdisti, effRange, siafpars, ...) if (eps <= bdisti) # influence region completely inside W siaf$Fcircle(eps, siafpars, type) else if (effRange <= bdisti) # effective region inside W siaf$Fcircle(bdisti, siafpars, type) else # numerically integrate over influence region siaf$F(iR, siaf$f, siafpars, type, ...) , influenceRegion, eventTypes, eps.s, bdist, effRanges), list(MoreArgs=quote(list(siafpars, ...)), SIMPLIFY=TRUE, USE.NAMES=FALSE)), ## before: compute computationally effective range of the 'siaf' ## for the current 'siafpars' for each event (type): before = expression( effRangeTypes <- rep_len(siaf$effRange(siafpars), nTypes), effRanges <- effRangeTypes[eventTypes] # N-vector ), parallel = parallel) if (exists("effRangeTypes", where=parent.frame(), mode="numeric")) { ## in simEpidataCS effRangeTypes is pre-calculated outside siafInt to ## save computation time ('siafpars' is constant during simulation) body(.ret)[[grep("^effRangeTypes <-", body(.ret))]] <- NULL } .ret } ## set the environment of the siafInt function to the callers environment ## (i.e. inside twinstim() or simEpidataCS()) ## where the variables used in the function are defined environment(FUN) <- parent.frame() FUN } ### Helper function, which constructs the function that integrates the 'tiaf'. ### The returned function is defined in the callers environment, where the ### variables used in the function are available (inside twinstim() or ### simEpidataCS()). .tiafIntFUN <- function () { ## the following variables are unused here, because the environment of ## FUN will be set to the parent.frame(), where the variables exist ## they are only included to avoid the notes in R CMD check gIntLower <- gIntUpper <- eventTypes <- tiaf <- NULL ## from, to and type may be vectors of compatible lengths FUN <- function(tiafpars, from = gIntLower, to = gIntUpper, type = eventTypes, G = tiaf$G) { tiafIntUpper <- G(to, tiafpars, type) tiafIntLower <- G(from, tiafpars, type) tiafIntUpper - tiafIntLower } ## set the environment of the tiafInt function to the callers environment ## (i.e. inside twinstim() or simEpidataCS()) ## where the default argument values are defined environment(FUN) <- parent.frame() FUN } ### rename control arguments with optim names to have names compatible with nlminb control2nlminb <- function (control, defaults) { renamelist <- cbind(optim = c("maxit", "REPORT", "abstol", "reltol"), nlminb = c("iter.max", "trace", "abs.tol", "rel.tol")) for (i in which(renamelist[,"optim"] %in% names(control))) { fromname <- renamelist[i, "optim"] toname <- renamelist[i, "nlminb"] if (is.null(control[[toname]])) { control[[toname]] <- control[[fromname]] } control[[fromname]] <- NULL } defaults[names(control)] <- control defaults } ### Helper for iaf-checks: ### Checks if FUN has three arguments (s/t, pars, type) and ### eventually adds the last two .checknargs3 <- function (FUN, name) { FUN <- match.fun(FUN) NARGS <- length(formals(FUN)) if (NARGS == 0L) { stop("the function '", name, "' must accept at least one argument") } else if (NARGS == 1L) { formals(FUN) <- c(formals(FUN), alist(pars=, types=)) } else if (NARGS == 2L) { formals(FUN) <- c(formals(FUN), alist(types=)) } FUN } ### Internal wrapper used in twinstim() and simEpidataCS() to evaluate the siaf ### and tiaf arguments. If succesful, returns checked interaction function. .parseiaf <- function (iaf, type, eps = NULL, verbose = TRUE) { type <- match.arg(type, choices=c("siaf", "tiaf"), several.ok=FALSE) res <- if (missing(iaf) || is.null(iaf)) { if (verbose) { message("assuming constant ", switch(type, siaf="spatial", tiaf="temporal"), " interaction '", type, ".constant()'") } do.call(paste(type, "constant", sep="."), args=alist()) } else if (is.list(iaf)) { ret <- do.call(type, args = iaf) ## keep special attributes attr(ret, "knots") <- attr(iaf, "knots") attr(ret, "maxRange") <- attr(iaf, "maxRange") attr(ret, "Boundary.knots") <- attr(iaf, "Boundary.knots") attr(ret, "constant") <- attr(iaf, "constant") ret } else if (is.vector(iaf, mode = "numeric")) { do.call(paste(type,"step",sep="."), args = list(knots = iaf)) } else { stop("'", as.character(substitute(iaf)), "' must be NULL (or missing), a list (-> continuous ", "function), or numeric (-> knots of step function)") } ## indicate if this is a constant iaf attr(res, "constant") <- isTRUE(attr(res, "constant")) ## attach unique interaction ranges if (!is.null(eps)) { # in simEpidataCS() eps is not known beforehand attr(res, "eps") <- sort(unique(eps)) } return(res) } ### Construct a call/function for mapply or parallel::mcmapply, respectively ## args: alist() of arguments for mapply() ## before,after: expressions to be prepended/appended to the function body, ## where "res" will be the result of mapply() mapplyCall <- function (args, cores = 1L) { parallel <- is.name(cores) || cores > 1L mapplyFUN <- if (parallel) quote(parallel::mcmapply) else quote(mapply) parallelArgs <- list(mc.preschedule=TRUE, mc.cores=cores) as.call(c(mapplyFUN, args, if (parallel) parallelArgs)) } mapplyFUN <- function (args, before = list(), after = list(), parallel = TRUE) { FUN <- as.function(alist(siafpars=, ...=, NULL), envir=parent.frame()) body(FUN) <- mapplyCall(args, if (parallel) quote(cores) else 1L) if (length(after) + length(before) > 0) { body(FUN) <- as.call(c( list(as.name("{")), before, if (length(after)) call("<-", as.name("res"), body(FUN)) else body(FUN), after)) } FUN } ### parse the list or vector of start values check_twinstim_start <- function (start) { if (is.null(start)) { return(start) } else if (is.list(start)) { # convert allowed list specification to vector stopifnot(names(start) %in% c("endemic", "epidemic", "h", "e", "siaf", "tiaf", "e.siaf", "e.tiaf")) names(start)[names(start) == "endemic"] <- "h" names(start)[names(start) == "epidemic"] <- "e" names(start)[names(start) == "siaf"] <- "e.siaf" names(start)[names(start) == "tiaf"] <- "e.tiaf" start <- unlist(start, recursive=FALSE, use.names=TRUE) } if (!is.vector(start, mode="numeric") || is.null(names(start))) stop("parameter values must be named and numeric") return(start) } surveillance/R/stsBP.R0000644000176200001440000000367712672237564014360 0ustar liggesusers###################################################################### # initialize-method for "stsBP" objects ###################################################################### fix.dimnamesBP <- function (x) { dimnames(x@ci) <- dimnames(x@lambda) <- c(dimnames(x@observed), list(NULL)) x } init.stsBP <- function(.Object, ..., ci, lambda) { .Object <- callNextMethod() # use initialize,sts-method ## NOTE: we cannot have a validity check for the dimensions of ci and lambda ## in the class definition of "stsBP" since we could not easily get ## new("stsBP") to be a valid object. Thus, we will directly check here. ## check/set extra stsBP-slots dimObserved <- dim(.Object@observed) if (missing(ci)) { .Object@ci <- array(NA_real_, dim = c(dimObserved, 2L)) } else { dimCI <- dim(.Object@ci) if (length(dimCI) != 3 || any(dimCI != c(dimObserved, 2L))) stop("dim(ci) = (", paste0(dimCI, collapse=","), ")") } if (missing(lambda)) { .Object@lambda <- array(NA_real_, dim = c(dimObserved, 0L)) } else { dimLambda <- dim(.Object@lambda) if (length(dimLambda) != 3 || !identical(dimLambda[1:2], dimObserved)) stop("dim(lambda) = (", paste0(dimLambda, collapse=","), ")") } ## fix dimnames of extra stsBP-slots .Object <- fix.dimnamesBP(.Object) return(.Object) } setMethod("initialize", "stsBP", init.stsBP) ###################################################################### # Special coerce method to account for consistent dimensions ###################################################################### setAs(from = "sts", to = "stsBP", function (from) { res <- new("stsBP", from, ci = array(NA_real_, dim = c(dim(from@observed), 2L)), lambda = array(NA_real_, dim = c(dim(from@observed), 0L))) fix.dimnamesBP(res) }) surveillance/R/gpc.poly-methods.R0000644000176200001440000000401412237174420016467 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Methods for gpc.poly polygons ### These are no longer used by the surveillance package itself ### ### Copyright (C) 2009-2013 Sebastian Meyer ### $Revision: 666 $ ### $Date: 2013-11-08 15:45:36 +0100 (Fri, 08. Nov 2013) $ ################################################################################ ### Redefinition of gpclib's scale.poly method to also do centering scale.gpc.poly <- function (x, center = c(0,0), scale = c(1,1)) { x@pts <- lapply(x@pts, function (p) { p$x <- (p$x-center[1]) / scale[1] p$y <- (p$y-center[2]) / scale[2] p }) x } ### Same as inside.owin for gpc.poly (using point.in.polygon from package sp) inside.gpc.poly <- function(x, y = NULL, polyregion, mode.checked = FALSE) { xy <- xy.coords(x, y, recycle=FALSE) N <- length(xy$x) # check for each polygon of polyregion if points are in the polygon locations <- sapply(polyregion@pts, function (poly) { pip <- point.in.polygon(xy$x, xy$y, poly$x, poly$y, mode.checked = mode.checked) if (poly$hole) { # if point is inside a hole then attribute -Inf ifelse(pip == 1, -Inf, 0) } else pip }) if (N == 1) sum(locations) > 0 else .rowSums(locations, N, length(polyregion@pts)) > 0 } ### Maximum extent of a gpc.poly (i.e. maximum distance of two vertices) diameter.gpc.poly <- function (object) { pts <- object@pts x <- unlist(lapply(pts, "[[", "x"), use.names=FALSE) y <- unlist(lapply(pts, "[[", "y"), use.names=FALSE) ## The diagonal of the bounding box provides a fast upper bound ##ext <- sqrt(diff(range(x))^2 + diff(range(y))^2) xy <- cbind(x,y) dists <- dist(xy) max(dists) } surveillance/R/nowcast.R0000644000176200001440000014061213663537357014776 0ustar liggesusers###################################################################### # Function to perform nowcast at a specific day "now" using a procedure # which takes truncation of the available observations into # account. The full documentation is available in the nowcast.Rd file. # # Author: Michael Hoehle # # Parameters: # now - a Date object representing today # when - a vector of Date objects representing the days to do the forecast for. # A requirement is that for all elements in when are smaller or equal # than "now". # data - the Database containing columns dEventCol and dReportCol, which # contain the date of the event and of when the report arrives in # the database. # dEventCol - name of column in data containing time of event occurence # dReportCol - name of column in data containing time of reprt arrival # method - which method to use # D - maximum delay to consider # m - moving window for delay estimation # control - a list containing the following arguments # * gd.prior.kappa - prior for delay is symmetric Dirichlet # with concentration parameter gd.prior.kappa # # Note: As predictions are done simultaneously the entire vector of observations # is casted. Then the subset specified in "when" is returned. # # Returns: # stsNC object with reporting triangle, delay estimate and prediction interval in the appropriate slots. # # Todo: # * yt.support to N.tInf support in nowcast?? # * bayes.notrunc and bayes.notrunc.bnb could become one code segment # * Enable user to provide reporting triangle directly. # * Function should work for weekly and monthly data as well ###################################################################### nowcast <- function(now, when, data, dEventCol="dHospital", dReportCol="dReport", method=c("bayes.notrunc","bayes.notrunc.bnb","lawless","bayes.trunc","unif","bayes.trunc.ddcp"), aggregate.by="1 day", D=15, m=NULL, m.interpretation=c("hoehle_anderheiden2014", "lawless1994"), control=list( dRange=NULL,alpha=0.05,nSamples=1e3, N.tInf.prior=c("poisgamma","pois","unif"), N.tInf.max=300, gd.prior.kappa=0.1, ddcp=list(ddChangepoint=NULL, cp_order=c("zero","one"), Wextra=NULL, logLambda=c("iidLogGa","tps","rw1","rw2"), responseDistr=c("poisson", "negbin"), mcmc=c(burnin=2500,sample=10000,thin=1, adapt=1000, store.samples=FALSE)), score=FALSE,predPMF=FALSE)) { #Check if the runjags package is available (required for bayes.trunc.ddcp to work! if ("bayes.trunc.ddcp" %in% method) { if (!requireNamespace("runjags",quietly=TRUE)) { stop("The \"bayes.trunc.ddcp\" method requires the runjags package to be installed, which is available from CRAN.") } } if ((!inherits(now,"Date")) | (length(now)>1)) { stop("The parameter 'now' has to be a single Date.") } #Check if all when_i<= now if (!all(when<=now)) { stop("Assertion when <= now failed.") } #Check that specified methods are all valid method <- match.arg(method,c("bayes.notrunc","bayes.notrunc.bnb","lawless","bayes.trunc","unif","bayes.trunc.ddcp"),several.ok=TRUE) ###################################################################### # Time aggregation. Make sure it's a valid aggregational level and # move all dates to the "first" of this level. # @hoehle: Should work for day, weeks and month. Quarter and year not atm. ###################################################################### aggregate.by <- match.arg(aggregate.by,c("1 day","1 week", "1 month"),several.ok=FALSE) epochInPeriodStr <- switch(aggregate.by, "1 day"="1","1 week"="%u", "1 month"="%d") if (aggregate.by != "1 day") { warning("Moving dates to first of each epoch.") #Move dates back to first of each epoch unit for (colName in c(dEventCol, dReportCol)) { data[,colName] <- data[,colName] - as.numeric(format(data[,colName],epochInPeriodStr)) + 1 } #Check now and when if (!all( format( c(now,when),epochInPeriodStr) == 1)) { stop("The variables 'now' and 'when' needs to be at the first of each epoch") } } #Choose the corect difference function if (aggregate.by == "1 day") { timeDelay <- function(d1,d2) {as.numeric(d2-d1)} } if (aggregate.by == "1 week") { timeDelay <- function(d1,d2) { floor(as.numeric(difftime(d2,d1,units="weeks"))) } #Count the number of full weeks } if (aggregate.by == "1 month") { timeDelay <- function(d1,d2) { #Helper function from http://stackoverflow.com/questions/1995933/number-of-months-between-two-dates monnb <- function(d) { lt <- as.POSIXlt(as.Date(d, origin="1900-01-01")) lt$year*12 + lt$mon } monnb(d2) - monnb(d1) #count the number of full months } } ## Check the value of the m interpretation m.interpretation <- match.arg(m.interpretation, c("hoehle_anderheiden2014", "lawless1994")) if (!is.null(m) & (method == "lawless") & (m.interpretation != "lawless1994")) { warning("Selected method is Lawless (1994), but the interpretation of m is a horizontal cut in the reporting triangle (as in Hoehle and an der Heiden (2014)) and not as in Lawless (1994).") } if (!is.null(m) & (method != "lawless") & (m.interpretation == "lawless1994")) { stop("The selected nowcasting method only works with m.interpretation = 'hoehle_anderheiden2014'") } ###################################################################### #If there is a specification of dateRange set dMin and dMax accordingly #Otherwise use as limits the range of the data ###################################################################### if (is.null(control[["dRange",exact=TRUE]])) { dMin <- min(data[,dEventCol],na.rm=TRUE) dMax <- max(data[,dEventCol],na.rm=TRUE) } else { dMin <- control$dRange[1] dMax <- control$dRange[length(control$dRange)] } #@hoehle - check that dRange is proper if (!all( format( c(dMin, dMax), epochInPeriodStr) == 1)) { stop("The variables in dRange needs to be at the first of each epoch.") } dateRange <- seq(dMin, dMax, by=aggregate.by) ###################################################################### # Additional manipulation of the control arguments ###################################################################### #Check if alpha is specified if (is.null(control[["alpha",exact=TRUE]])) { control$alpha <- 0.05 } if (is.null(control[["N.tInf.prior",exact=TRUE]])) { control$N.tInf.prior <- "unif" } if (is.null(control[["N.tInf.max",exact=TRUE]])) { control$N.tInf.max <- 300 } if (is.null(control[["gd.prior.kappa",exact=TRUE]])) { control$gd.prior.kappa <- 0.1 } if (is.null(control[["nSamples",exact=TRUE]])) { control$nSamples <- 1e3 } if (is.null(control[["score",exact=TRUE]])) { control$score <- FALSE } #Checking for the bayes.trun.ddcp procedure. If so make sure params are set up. if ("bayes.trunc.ddcp" %in% method) { #If no parameters at all set to defaults. if (is.null(control[["ddcp",exact=TRUE]])) { control$ddcp <- list(ddChangepoint=NULL, cp_order="zero", Wextra=NULL, logLambda=c("iidLogGa","tps","rw1","rw2"), tau.gamma=1, response.distr=c("poisson"), mcmc=c(burnin=2500, sample=10000, thin=1, adapt=1000, store.samples=FALSE)) } #Check form of logLambda if (is.null(control[["ddcp",exact=TRUE]][["logLambda",exact=TRUE]])) { control[["ddcp"]] <- modifyList(control[["ddcp",exact=TRUE]], list(logLambda="iidLogGa")) } else { control[["ddcp"]]$logLambda <- match.arg(control[["ddcp"]][["logLambda"]],c("iidLogGa","tps","rw1","rw2")) } #Check breakpoint to use in case of bayes.trunc.ddcp (delay distribution with breakpoint) if (is.null(control[["ddcp",exact=TRUE]][["ddChangepoint",exact=TRUE]]) || (!class(control[["ddcp",exact=TRUE]][["ddChangepoint",exact=TRUE]]) == "Date")) { stop("Please specify a Date object as changepoint in control$ddChangepoint.") } else { if (any(control[["ddcp",exact=TRUE]][["ddChangepoint"]] > now)) { warning("Some of the elements in ddChangepoint are beyond 'now'. This might be problematic!") } } ##Check cp_order variable if (!is.null(control[["ddcp",exact=TRUE]][["cp_order",exact=TRUE]])) { control[["ddcp"]]$cp_order <- match.arg(control[["ddcp"]][["cp_order"]],c("zero","one")) } else { control[["ddcp"]]$cp_order <- "zero" } ##Check Wextra argument if (!is.null(control[["ddcp",exact=TRUE]][["Wextra",exact=TRUE]])) { if (!is.array(control[["ddcp",exact=TRUE]][["Wextra",exact=TRUE]])) { stop("Wextra is not an array.") } } #Make this an accessible variable ddChangepoint <- control$ddcp$ddChangepoint Wextra <- control$ddcp$Wextra ncol_Wextra <- if (is.null(Wextra)) 0 else ncol(Wextra) colnames_Wextra <- if (is.null(Wextra)) NULL else colnames(Wextra) cp_order <- control$ddcp$cp_order #Response distribution in the model if (is.null(control[["ddcp",exact=TRUE]][["response.distr",exact=TRUE]])) { control[["ddcp"]]$response.distr <- "poisson" } else { stopifnot(control[["ddcp",exact=TRUE]][["response.distr",exact=TRUE]] %in% c("poisson", "negbin")) } #Precision parameter for gamma coefficients for hazard delay distribution if (is.null(control[["ddcp",exact=TRUE]][["tau.gamma",exact=TRUE]])) { control[["ddcp"]]$tau.gamma <- 1 } #Prior for eta ~ [eta.mu, eta.prec] if (is.null(control[["ddcp",exact=TRUE]][["eta.mu",exact=TRUE]])) { control[["ddcp"]]$eta.mu <- rep(0,length(ddChangepoint) + ncol_Wextra) } else { if (length(control[["ddcp"]]$eta.mu) != length(ddChangepoint) + ncol_Wextra) { stop("length of eta.mu is different from the number of change points in 'ddChangepoint'.") } } if (is.null(control[["ddcp",exact=TRUE]][["eta.prec",exact=TRUE]])) { if (length(ddChangepoint) == 1) { control[["ddcp"]]$eta.prec <- 1 } else { control[["ddcp"]]$eta.prec <- diag(rep(1, length(ddChangepoint))) } } else { #Custom option if (length(ddChangepoint) == 1) { if (length(control[["ddcp"]]$eta.prec) != 1) { stop("length of eta.prec is different from the number of change points in 'ddChangepoint'.") } else { if (!( (nrow(control[["ddcp"]]$eta.prec) == (ncol(control[["ddcp"]]$eta.prec))) & (nrow(control[["ddcp"]]$eta.prec) == length(ddChangepoint)))) { stop(paste0("dimension ", dim(control[["ddcp"]]$eta.prec), " of eta.prec is different from the number of change points in 'ddChangepoint' (",length(ddChangepoint),".")) } } } } #Check MCMC options if (is.null(control[["ddcp",exact=TRUE]][["responseDistr",exact=TRUE]])) { control[["ddcp"]][["responseDistr"]] <- "poisson" } else { # Check that it's a valid response distribution stopifnot(control[["ddcp"]][["responseDistr"]] %in% c("negbin","poisson")) } #Check MCMC options if (is.null(control[["ddcp",exact=TRUE]][["mcmc",exact=TRUE]])) { control[["ddcp"]][["mcmc"]] <- c(burnin=2500,sample=10000,thin=1, adapt=1000, store.samples=FALSE) } else { if (!all(c("burnin","sample","thin", "adapt", "store.samples") %in% names(control[["ddcp",exact=TRUE]][["mcmc",exact=TRUE]]))) { stop("mcmc option list needs names 'burnin', 'sample', 'thin', 'adapt' and 'store.samples'.") } } #done with options for bayes.trunc.ddcp } ###################################################################### # Do preprocessing of the data ###################################################################### hasNADates <- is.na(data[,dEventCol]) | is.na(data[,dReportCol]) data <- data[!hasNADates,] message(paste0("Removed ",sum(hasNADates), " records due to NA dates.")) #Create a column containing the reporting delay using the timeDelay #function data$delay <- timeDelay(data[,dEventCol],data[,dReportCol]) #Handle delays longer than D. #@hoehle - handle that the unit might not just be days #notThereButDThere <- (data[,dReportCol] > now) & ((data[,dEventCol]) + D <= now) notThereButDThere <- (timeDelay(data[,dReportCol],now) < 0) & (timeDelay(data[,dEventCol],now) >= D) if (sum(notThereButDThere,na.rm=TRUE)) { warning(paste(sum(notThereButDThere,na.rm=TRUE), " observations > \"now\" due to a delay >D. If delay cut to D they would be there."),sep="") } #Which observations are available at time s #@hoehle: data.sub <- data[ na2FALSE(data[,dReportCol] <= now),] data.sub <- data[ na2FALSE(timeDelay(data[,dReportCol],now) >= 0),] if (nrow(data.sub)==0) { stop(paste("No data available at now=",now,"\n")) } #Create an sts object containing the observed number of counts until s sts <- linelist2sts(data.sub,dEventCol,aggregate.by=aggregate.by,dRange=dateRange) sts <- as(sts,"stsNC") #Create an extra object containing the "truth" based on data sts.truth <- linelist2sts(data, dEventCol, aggregate.by=aggregate.by, dRange=dateRange) #List of scores to calculate. Can become an argument later on scores <- c("logS","RPS","dist.median","outside.ci") #Initialize scoring rule results - to be saved in control slot -- dirty SR <- array(0,dim=c(nrow(sts),length(method),length(scores))) #List for storing the predictive PMFs. if (is.null(control[["predPMF",exact=TRUE]])) { control$predPMF <- FALSE } #Prepare a list of different estimated of the delay CDF delayCDF <- list() ###################################################################### # Done manipulating the control list with default arguments ###################################################################### sts@control <- control #Save truth sts@truth <- sts.truth #Reserve space for returning the predictive PMFs sts@predPMF <- list() ###################################################################### # Consistency checks ###################################################################### #Check if support of N.tInf is large enough if (2*control$N.tInf.max < max(observed(sts),na.rm=TRUE)) { warning("N.tInf.max appears too small. Largest observed value is more than 50% of N.tInf.max, which -- in case this number is extrapolated -- might cause problems.\n") } #Create a vector representing the support of N.tInf N.tInf.support <- 0:control$N.tInf.max #====================================================================== #====================================================================== # Build reporting triangle and derived parameters for delay #====================================================================== #====================================================================== cat("Building reporting triangle...\n") #Time origin t_0 t0 <- min(dateRange) #Sequence from time origin until now (per day??) t02s <- seq(t0, now,by=aggregate.by) #Maximum time index T <- length(t02s)-1 #Check if the maximum delay is longer than the available time series if (D>T) { stop("D>T. Cannot estimate the long delays.") } #How many observations to take for estimating the delay distribution if (is.null(m)) { m <- T } if (m<1) { stop("Assertion m>=1 not fullfilled.") } #Define the observation triangle n <- matrix(NA,nrow=T+1, ncol=T+1, dimnames=list(as.character(t02s), NULL)) #Loop over time points. (more efficient than delay and then t) for (t in 0:T) { #Extract all reports happening at time (index) t. #@hoehle: data.att <- data.sub[na2FALSE(data.sub[,dEventCol] == t02s[t+1]), ] data.att <- data.sub[na2FALSE(timeDelay(data.sub[,dEventCol], t02s[t+1])) == 0, ] #Loop over all delays for (x in 0:(T-t)) { #Count number with specific delay n[t+1,x+1] <- sum(data.att[,"delay"] == x) } } cat("No. cases: ",sum(n,na.rm=TRUE),"\n") #Handle delays longer than D #@hoehle: Not done! Just fix them to have delay D. # browser() nLongDelay <- apply(n[,(D+1)+seq_len(T-D), drop=FALSE],1,sum,na.rm=TRUE) if (any(nLongDelay>0)) { warning(paste(sum(nLongDelay)," cases with a delay longer than D=",D," days forced to have a delay of D days.\n",sep="")) n <- n[,1:(D+1)] n[,(D+1)] <- n[,(D+1)] + nLongDelay } else { #No problems. Just extract up to D+1 n <- n[,1:(D+1)] } #Calculate n.x and N.x as in (2.7) and (2.8) and Fig.2 of Lawless (1994) #Note the different moving window definition as in the Lawless article. n.x <- rep(0,times=D+1) N.x <- rep(0,times=D+1) ##Compute n.x and N.x for (x in 0:D) { ##Define time of occurrence sliding window index set (see documentation) if (m.interpretation == "hoehle_anderheiden2014") { toc_index_set <- max(0,T-m):(T-x) } else { #hoehle: Lawless definition is max(0,T-m-x) toc_index_set <- max(0,T-m-x):(T-x) } ## Count for (t in toc_index_set) { #cat("x=",x,"\tt=",t,":\n") n.x[x+1] <- n.x[x+1] + n[t+1,x+1] for (y in 0:x) { #cat("x=",x,"\tt=",t,"\ty=",y,":\n") N.x[x+1] <- N.x[x+1] + n[t+1,y+1] } } } cat("No. cases within moving window: ",sum(n.x,na.rm=TRUE),"\n") #Available observations at time T, definition of N(t;T) on p.17. N.tT <- sapply(0:T, function(t) sum(n[t+1, 0:min(D+1,(T-t)+1)])) #Truth - already in another object. Delete?? N.tInf <- table( factor(as.character(data[,dEventCol]),levels=as.character(t02s))) #Store results of the reporting triangle in the control slot together with additional #attributes for fast access of, e.g., summaries or defining variables. reportingTriangle <- n attr(reportingTriangle, "n.x") <- n.x attr(reportingTriangle, "N.x") <- N.x attr(reportingTriangle, "N.tT") <- N.tT attr(reportingTriangle, "N.tInf") <- N.tInf attr(reportingTriangle, "T") <- T attr(reportingTriangle, "D") <- D attr(reportingTriangle, "t02s") <- t02s sts@reportingTriangle <- reportingTriangle #====================================================================== # Calculations are jointly for all t values. #====================================================================== #List of casts each containing a table 0..N.tInf.max with the PMF Ps <- list() #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # # Lawless (1994) method without adjustment for overdispersion # #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% if ("lawless" %in% method) { #Hazard function estimates, i.e. g-function estimate as in (2.9) #of Lawless (1994). NAs are set to zero (consequences??) g.hat <- ifelse( !is.na(n.x/N.x), n.x/N.x, 0) #Force g.hat(0)=1 as stated just below (2.1) g.hat[1] <- 1 #Check how the estimated CDF looks #F <- NULL ; for (d in 0:D) { i <- d+seq_len(D-d) ; F[d+1] <- prod(1-g.hat[i+1]) } #plot(0:D,F) #Compute weights Wt.hat as in eqn. (2.13) of Lawless (1994). Use T1=Inf. #Note: Wt.hat estimates F_t(T-t). T1 <- Inf What.t <- sapply(0:T, function(t) { if (t 0) { CDF <- c(0,ltruncpnorm(N.tInf.support, mean=Nhat.tT1[i], sd=sqrt(Vhat.Zt[i]),at=N.tT[i])) PMFs[,i] <- diff(CDF) } else { PMFs[,i] <- (N.tInf.support == Nhat.tT1[i])*1 } } Ps[["lawless"]] <- PMFs } #end lawless procedure #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # # Bayesian method (simple model, clever sampling -> no MCMC) # #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #Part jointly for both bayes and bayes.notrunc if (("bayes.trunc" %in% method) | ("bayes.notrunc" %in% method)) { cat("bayes prep...\n") ###################################################################### # Prior of N(t,\infty) ###################################################################### N.tInf.prior <- control$N.tInf.prior #Extract prior parameters from prior choice if (N.tInf.prior == "pois") { lambda <- attr(N.tInf.prior,"lambda",exact=TRUE) } else { if (N.tInf.prior == "poisgamma") { #Find size parameters such that mean variance is as target. var.prior <- function(size.prior) { mean.prior + mean.prior^2/size.prior } #If mean & var specified if (all(c("mean.lambda","var.lambda") %in% names(attributes(N.tInf.prior)))) { mean.prior <- attr(N.tInf.prior,"mean.lambda",exact=TRUE) var.prior.target <- attr(N.tInf.prior,"var.lambda",exact=TRUE) size.prior <- uniroot( function(size.prior) { var.prior(size.prior) - var.prior.target},interval=c(1e-12,50))$root #Check result cat("(E,V) of prior for lambda = (",paste(c(mean.prior,var.prior(size.prior)),collapse=","),")\n") } else { stop("mean.lambda and var.lambda not part of prior specification") } } else { if (N.tInf.prior == "unif") { N.tInf.prior.max <- attr(N.tInf.prior,"N.tInf.prior.max",exact=TRUE) } else { #No option applied stop("Not a valid prior!") } } } ###################################################################### # Define function to generate PMF for max(0,T-D),..,T by sampling. # # Parameters: # alpha.star, beta.star - vector containing the posterior GD params ###################################################################### pmfBySampling <- function(alpha.star, beta.star) { #Sample from posterior distribution, i.e. sample from the reverse distribution #and reverse result p.sample <- rgd(control$nSamples,alpha.star,beta.star)[,(length(alpha.star)+1):1] #All the time points where extrapolation is to be done tSet <- max(0,(T-D)):T ###################################################################### # Procedure to generate nowcasts of all time points up to T-D,...,T. # This is based on the posterior samples available in p.sample. # Current code adds up the PMF tables instead of a pure sample based # procedure and also prevents PMF=0 better than tabulating the samples. ###################################################################### N.tT1.pred <- array(NA, dim=c(dim(p.sample)[1],control$N.tInf.max+1,dim(p.sample)[2]),dimnames=list(NULL,seq_len(control$N.tInf.max+1)-1L,tSet)) for (j in 1:control$nSamples) { #Extract delay PMF from sample p <- p.sample[j,] #Proportion reported up to x, x=0,..,T F <- c(rep(1,T-D),rev(cumsum(p))) #Guard against numerical instability: ensure that not larger than 1. F <- ifelse(F>1,1,F) #Loop over all time points to nowcast for (i in 1:length(tSet)) { t <- tSet[i] N.tT1.pred[j,,i] <- switch(N.tInf.prior, "poisgamma"=dpost.bnb(N.tT[t+1],sumpd=F[t+1],mu=mean.prior,size=size.prior,N.tInf.max=control$N.tInf.max)) } } #Average the PMFs as in Step (2) of the algorithm PMF <- apply(N.tT1.pred,MARGIN=c(2,3),mean) #Add part, where no prediction needs to be done if (T-D>0) { #Empty PMFs determined <- matrix(0,nrow=control$N.tInf.max+1,ncol=T-D-1+1) #Add "1" entry at the observed for (t in 0:(T-D-1)) { determined[N.tT[t+1]+1,t+1] <- 1 } PMF <- cbind(determined,PMF) } return(PMF) } #done definition of pmfBySampling } if ("bayes.trunc" %in% method) { cat("bayes.trunc...\n") ###################################################################### #Prior of reporting delay as parameters of generalized Dirichlet prior ###################################################################### #Define symmetric dirichlet as prior, just as in the other case alpha.prior <- rep(control$gd.prior.kappa, D) beta.prior <- rep(0,D) beta.prior[D] <- control$gd.prior.kappa for (i in (D-1):1) { beta.prior[i] <- alpha.prior[i+1] + beta.prior[i+1] } ###################################################################### # Posterior section ###################################################################### #Deduce posterior distribution of delay distribution, i.e. it is again #a generalized Dirichlet alpha <- beta <- rep(NA,D) for (d in 0:(D-1)) { alpha[d+1] <- n.x[D-d+1] ##Note: +1 coz index 1 is delay 0. beta[d+1] <- N.x[D-d+1] - n.x[D-d+1] } #Check if there are any points without data and warn about it. if (any(alpha + beta == 0)) { warning("The delays ",paste(D-which(alpha+beta==0)-1,collapse=",")," have no observations. Results might be instable and depend all on prior.") } #Add up. Note: Delay zero (i.e. element D+1) is ignored as this is #not modelled explicitily by the GD distribution (sum to 1 constraints) alpha.star <- alpha.prior + alpha beta.star <- beta.prior + beta #Compute the expectation of the GD distribution and store this as the delay delayCDF[["bayes.trunc"]] <- cumsum(rev(Egd(alpha.star,beta.star))) #Save result Ps[["bayes.trunc"]] <- pmfBySampling(alpha.star, beta.star) } # end "bayes.trunc" %in% method #====================================================================== # Bayesian version which ignores truncation #====================================================================== if ("bayes.notrunc" %in% method) { cat("bayes.notrunc...\n") ###################################################################### # Prior section ###################################################################### alpha.prior <- rep(control$gd.prior.kappa, D) #symmetric dirichlet beta.prior <- rep(0,D) beta.prior[D] <- control$gd.prior.kappa for (i in (D-1):1) { beta.prior[i] <- alpha.prior[i+1] + beta.prior[i+1] } ###################################################################### # Posterior section ###################################################################### #Deduce posterior distribution of delay distribution, i.e. it is again #a generalized Dirichlet alpha <- beta <- rep(NA,D) for (d in 0:(D-1)) { alpha[d+1] <- n.x[D-d+1] beta[d+1] <- sum(n.x[D - (d+1):D + 1]) } #Check if there are any points without data and warn about it. if (any(alpha + beta == 0)) { warning("The delays ",paste(D-which(alpha+beta==0)-1,collapse=",")," have no observations. Results might be instable and depend all on prior.") } #Posterior parameters. alpha.star <- alpha.prior + alpha beta.star <- beta.prior + beta #Check that its a ordinary Dirichlet for (i in (D-1):1) { if (!all.equal(beta.star[i], alpha.star[i+1] + beta.star[i+1])) { warning("Posterior at i=",i," is not an ordinary Dirichlet as it's supposed to be.") } } #Save resulting delay distribution delayCDF[["bayes.notrunc"]] <- cumsum(rev(Egd(alpha.star,beta.star))) Ps[["bayes.notrunc"]] <- pmfBySampling(alpha.star,beta.star) } # end bayes.notrunc ###################################################################### # Unadjusted procedure using beta negative binomial. ToDo: # integrate code with other Bayesian procedures ###################################################################### if ("bayes.notrunc.bnb" %in% method) { cat("bayes.notrunc.bnb...\n") ###################################################################### # Prior section (same as for all methods) ###################################################################### alpha.prior <- rep(control$gd.prior.kappa, D) #symmetric dirichlet beta.prior <- rep(0,D) beta.prior[D] <- control$gd.prior.kappa for (i in (D-1):1) { beta.prior[i] <- alpha.prior[i+1] + beta.prior[i+1] } ###################################################################### # Posterior section ###################################################################### #Deduce posterior distribution of delay distribution, i.e. it is again #an ordinary Dirichlet alpha <- beta <- rep(NA,D) for (d in 0:(D-1)) { alpha[d+1] <- n.x[D-d+1] beta[d+1] <- sum(n.x[D - (d+1):D + 1]) } #Check if there are any points without data and warn about it. if (any(alpha + beta == 0)) { warning("The delays ",paste(D-which(alpha+beta==0)-1,collapse=",")," have no observations. Results might be instable and depend all on prior.") } #Posterior parameters. alpha.star <- alpha.prior + alpha beta.star <- beta.prior + beta #Check that its a ordinary Dirichlet for (i in (D-1):1) { if (!all.equal(beta.star[i], alpha.star[i+1] + beta.star[i+1])) { warning("Posterior at i=",i," is not an ordinary Dirichlet as it's supposed to be.") } } #Save resulting delay distribution (i.e. no truncation adjustment) delayCDF[["bayes.notrunc"]] <- cumsum(rev(Egd(alpha.star,beta.star))) #Allocate PMF to return PMF <- matrix(0,nrow=control$N.tInf.max+1,ncol=length(max(0,(T-D)):T)) #Concentration parameter vector of the ordinary Dirichlet distribution #Note. alpha.star vector is reversed (shortest delay last). alpha <- rev(c(alpha.star,beta.star[length(beta.star)])) #consistency check if (!all.equal(rev(Egd(alpha.star,beta.star)),alpha/sum(alpha))) { stop("Problem. GD and Dirichlet do not correspond...") } tSet <- max(0,(T-D)):T for (i in 1:length(tSet)) { t <- tSet[i] alpha.i <- cumsum(alpha)[T-t+1] beta.i <- sum(alpha) - alpha.i if (T-t==D) { PMF[,i] <- ifelse( N.tInf.support == N.tT[t+1], 1, 0) } else { #Calculate PMF knowing the q ~ Beta( , ) by the aggregation #property. #Note: Vector N.tT starts at time zero, i.e. time T corresponds to T+1 PMF[,i] <- dbnb( N.tInf.support - N.tT[t+1],n=N.tT[t+1]+1, alpha=alpha.i, beta=beta.i) } } #done looping over all time points #Add part, where no prediction needs to be done if (T-D>0) { #Empty PMFs determined <- matrix(0,nrow=control$N.tInf.max+1,ncol=T-D-1+1) #Add "1" entry at the observed for (t in 0:(T-D-1)) { determined[N.tT[t+1]+1,t+1] <- 1 } PMF <- cbind(determined,PMF) } Ps[["bayes.notrunc.bnb"]] <- PMF } # end bayes.notrunc.bnb ###################################################################### # Fully Bayes version with MCMC ###################################################################### if ("bayes.trunc.ddcp" %in% method) { #Allocate result PMF <- matrix( 0,ncol=(T+1),nrow=control$N.tInf.max+1) #Fix seed value of JAGS RNG for each chain n.chains <- 3 init <- lapply(1:n.chains,function(i) { list(.RNG.name="base::Mersenne-Twister",.RNG.seed=i*10) }) #Make design matrix for a quadratic TPS spline in time makeTPSDesign <- function(T,degree=2) { nbeta=degree + 1 X <- matrix(NA,ncol=nbeta, nrow=T+1) for (t in 0:T) { #Form a centered time covariate t.centered <- t - T/2 for(pow in 0:degree) { X[t+1,pow+1]<- t.centered^(pow) } } #Make the knot points evenly spaced between 0,T not including these points knots <- seq(0,T,length=min(round(T/6)+2,22)) knots <- knots[-c(1,length(knots))] #Remove knots which are beyond T-maxDelay/2 knots <- knots[knots <= T-D/2] knots <- knots - T/2 nknots <- length(knots) #Penalty as REs - setup design matrix Z <- matrix(NA,nrow=T+1,ncol=length(knots)) for (t in 0:T){ t.center <- t - T/2 for(k in 1:nknots){ Z[t+1,k]<- pmax((t.center-knots[k]),0)^degree } } return(list(X=X,Z=Z,knots=knots,nknots=nknots,nbeta=nbeta)) } tps <- makeTPSDesign(T=T,degree=2) ##browser() #Design matrix for logistic discrete time hazard model containing #changepoints. Could be extended s.t. the user provides W. W <- array(NA, dim=c(T+1, length(ddChangepoint) + ncol_Wextra, D+1), dimnames=list(as.character(t02s), c(as.character(ddChangepoint),colnames_Wextra),paste("delay",0:D,sep=""))) for (t in 0:T) { for (i in 1:length(ddChangepoint)) { # Shape design matrix for change-points if (cp_order == "zero") { W[t+1, i, ] <- as.numeric( (t02s[t+1] + 0:D) >= ddChangepoint[i]) } else if (cp_order == "one") { W[t+1, i, ] <- pmax(0, as.numeric( (t02s[t+1] + 0:D) - ddChangepoint[i])) } } # Add additional effects as part of the design matrix if (ncol_Wextra > 0) { W[t + 1, length(ddChangepoint) + 1:ncol(Wextra), ] <- Wextra[t+1,, ] } } #Priors. Uniform on the delays D.prime <- round( D/2-0.4)+1 p.prior <- rep(1/(D.prime+1), D.prime+1) mu.gamma <- qlogis( p.prior[1]) for (d in 1:(D.prime-1)) { mu.gamma <- c(mu.gamma, qlogis( p.prior[d+1] / (1-sum(p.prior[1:d])))) } tau.gamma <- rep(control$ddcp$tau.gamma,times=D.prime) #Prepare data for JAGS jagsData <- list(#Data rT=n,T=T+1,m=m+1,maxDelay=D, #Time dependent logistic discrete hazard model W=W, eta.mu=control$ddcp$eta.mu, eta.prec=control$ddcp$eta.prec, mu.gamma=mu.gamma, tau.gamma=tau.gamma, #Epidemic curve alpha.lambda=2500/3000,beta.lambda=50/3000, #Spline related stuff X=tps$X,Z=tps$Z,nknots=tps$nknots,beta.mu=rep(0,tps$nbeta),beta.prec=1e-6*diag(tps$nbeta) ) #Select appropriate model (one of: "tps","rw2","iid", "rw" as specified in the options) logLambda.method <- control$ddcp$logLambda responseDistr<- control$ddcp$responseDistr ### browser() #Load the BUGS specification of the Bayesian hierarchical Poisson model bugsModel <- readLines(file.path(path.package('surveillance'), 'jags',"bhpm.bugs")) # Load local file #bugsModel <- readLines(file.path("bhpm.bugs")) bugsModel <- gsub(paste("#<",logLambda.method,">",sep=""),"",bugsModel) bugsModel <- gsub(paste("#<",responseDistr,">",sep=""),"",bugsModel) ##browser() #Problem when eta is scalar (TODO: Improve the solution.) if (length(ddChangepoint) == 1) { #Make eta ~ dnorm( , ) instead of eta ~ dmnorm bugsModel <- gsub("(^[ ]*eta ~ )(dmnorm)","\\1dnorm",bugsModel) #Use eta[1] instead of eta for matrix multiplication bugsModel <- gsub("(eta)(.*%\\*%)","eta\\[1\\]\\2",bugsModel) } #cat(paste(bugsModel,collapse="\n")) bugsFile <- tempfile(pattern = "nowcast-") writeLines(bugsModel, bugsFile) ##browser() ## if (FALSE) { ## #Try to compile the model with ordinary rjags to see if there are any problems ## #before doing 3 chains parallelized using runjags. ## model <- jags.model(bugsFile, ## data = jagsData, ## init=init, #Fix seed value of JAGS as well ## n.chains = n.chains, n.adapt = 100) ## list.samplers(model) ## coda.samples(model,variable.names='logLambda',n.iter=100) ## } ###################################################################### # runjags way -- ToDo: parametrize using control options! ###################################################################### runjagsMethod <- 'rjparallel' #'rjags' monitor <- c('gamma','eta','logLambda','NtInf', ifelse(control$ddcp$responseDistr == "negbin", "r", NA)) samples.rj <- runjags::run.jags(bugsFile,#bugsModel, monitor = monitor, data=jagsData, n.chains=3, inits = init, burnin = control$ddcp$mcmc["burnin"], sample = control$ddcp$mcmc["sample"], thin = control$ddcp$mcmc["thin"], adapt = control$ddcp$mcmc["adapt"], summarise = FALSE, method=runjagsMethod) #Extract posterior median of discrete survival time delay distribution model parameters dt.surv.samples <- coda::as.mcmc.list(samples.rj, vars = c('gamma','^eta')) post.median <- dt.surv.pm <- apply( as.matrix(dt.surv.samples), 2, median) #Posterior median of the lambda's lambda.post <- exp(apply( as.matrix(coda::as.mcmc.list(samples.rj, vars = c('^logLambda'))), 2, quantile, prob=c(0.025,0.5,0.975))) #Extract posterior median of model parameters gamma.red <- post.median[grep("gamma",names(post.median))] eta <- matrix(post.median[grep("^eta",names(post.median))]) rownames(eta) <- colnames(W) #Map from reduced set to full set gamma <- gamma.red[round( (0:(D-1))/2 - 0.4) + 1] #browser() #Compute the resulting PMF from the model. Possibly put this in a separate function. pmf <- matrix(NA, nrow=nrow(W), ncol=D+1, dimnames=list(as.character(t02s), paste("delay", 0:D, sep=""))) #Determine PMF for (t in 1:length(t02s)) { if (as.character(t02s[t]) %in% rownames(W)) { lin.pred <- ( gamma + t(eta) %*% W[t,,0:D]) pmf[t,] <- haz2pmf(c(plogis(lin.pred),1)) } } #Store result as CDF delayCDF[["bayes.trunc.ddcp"]] <- t(apply(pmf, 1, cumsum)) # Convert to coda compatible output. samples <- coda::as.mcmc.list(samples.rj) # Store model as attribute if(control$ddcp$logLambda != "tps") tps <- NULL # Configure list with model output and store is as an attribute. list_return <- list(post.median=dt.surv.pm,W=W,lambda.post=lambda.post,tps=tps, gamma=gamma, eta=eta) if (control[["ddcp",exact=TRUE]][["mcmc",exact=TRUE]][["store.samples", exact=TRUE]]) { list_return <- modifyList(list_return, list(mcmc_samples = samples)) } attr(delayCDF[["bayes.trunc.ddcp"]],"model") <- list_return # Extract PMFs for (t in 0:T) { #Extract samples related to this time point vals <- as.matrix(samples[,paste("NtInf[",t+1,"]",sep="")]) #PMF PMF[,t+1] <- prop.table(table(factor(vals,levels=0:control$N.tInf.max))) } Ps[["bayes.trunc.ddcp"]] <- PMF } #====================================================================== #A really bad forecast -- the uniform #====================================================================== if ("unif" %in% method) { #Allocate result PMF <- matrix( 0,ncol=(T+1),nrow=control$N.tInf.max+1) #Loop over all time points to nowcast and put U(N.tT[t],Nmax) for (t in 0:T) { #How many values are there in N.tT .. Nmax noVals <- max(0,control$N.tInf.max - N.tT[t+1]) + 1 #PMF at t is 0,...0 (N.tT-1 times), 1/noVals,...,1/noVals PMF[,t+1] <- c(rep(0,N.tT[t+1]),rep(1/noVals,times=noVals)) } Ps[["unif"]] <- PMF } ###################################################################### #Loop over all time points in the vector "when". Only these are #returned. ###################################################################### idxt <- which(dateRange %in% when) for (i in idxt) { #Save PMFs if thats requested if (control$predPMF) { res <- list() for (j in 1:length(method)) { res[[method[j]]] <- Ps[[method[j]]][,i] } sts@predPMF[[as.character(dateRange[i])]] <- res } #Evaluate scoring rules, if requested if (control$score) { #Infer the true value ytinf <- observed(sts.truth)[i,] #Evaluate all scores for all predictive distributions for (i.P in 1:length(method)) { for (i.score in 1:length(scores)) { #cat("i=",i," i.P=",i.P," (",method[i.P],") i.score=",i.score,"\n") SR[i,i.P,i.score] <- do.call(scores[i.score],args=list(P=Ps[[method[i.P]]][,i],y=ytinf,alpha=control$alpha)) } } } #end if control$score #Add first nowcast & ci to stsNC slots sts@upperbound[i,] <- median(N.tInf.support[which.max( cumsum(Ps[[method[1]]][,i])>0.5)]) sts@pi[i,,] <- N.tInf.support[c(which.max(cumsum(Ps[[method[1]]][,i]) > control$alpha/2),which.max(cumsum(Ps[[method[1]]][,i]) > 1-control$alpha/2))] dimnames(sts@pi) <- list(as.character(dateRange),NULL,paste( c(control$alpha/2*100,(1-control$alpha/2)*100),"%",sep="")) } #end of loop over time points #Add scoring rule to output if (control$score) { dimnames(SR) <- list(as.character(dateRange),method,scores) sts@SR <- SR } ###################################################################### #Other arguments to save in control object ###################################################################### sts@control$N.tInf.support <- N.tInf.support sts@control$method <- sts@control$name <- method #Store variables relevant for the nowcast sts@control$D <- D sts@control$m <- m sts@control$now <- now sts@control$when <- when sts@control$timeDelay <- timeDelay #Store delayCDF object sts@delayCDF <- delayCDF #For backwards compatibility -- change this in the future TODODODODODO! sts@control$yt.support <- sts@control$N.tInf.support sts@control$y.prior.max <- sts@control$N.tInf.max ##Store the call options theCall <- list(now=now,when=when,data=data,dEventCol=dEventCol,dReportCol=dReportCol,method=method,aggregate.by=aggregate.by,D=D, m=m) sts@control$call <- theCall ##Done return(sts) } ###################################################################### # Helper functions ###################################################################### #Helper function na2FALSE <- function(x) {x[is.na(x)] <- FALSE ; return(x) } ###################################################################### # Logarithmic score # # Parameters: # P - predictive distribution, given as a vector containing the PMF # with support 0,...,N.prior.max # y - the actual observation. Can be a vector. # # Returns: # -log P(y). If y outside 0,..,N.prior.max then -Inf. ###################################################################### logS <- function(P, y, ...) { return(ifelse( y>=0 & y<=length(P)-1, -log(P[y+1]), -Inf)) } ###################################################################### # Ranked probability score # # Parameters: # P - predictive distribution, given as a vector containing the PMF # with support 0,...,N.prior.max # y - the actual observation. Can be a vector. # # Returns: # -log P(y). If y outside 0,..,N.prior.max then -Inf. ###################################################################### RPS <- function(P,y, ...) { N.support <- 0:(length(P)-1) sum( (cumsum(P) - (y <= N.support))^2) } #Some other scoring rules which are not proper. dist.median <- function(P,y, ...) { point.estimate <- which.max(cumsum(P)>=0.5) - 1 return(abs(point.estimate - y)) } #0/1 indicator of observed value outside equal tailed (1-alpha/2) CI outside.ci <- function(P,y,alpha) { N.support <- 0:(length(P)-1) ci <- N.support[c(which.max(cumsum(P) > alpha/2),which.max(cumsum(P) > 1-alpha/2))] ifelse( y>=ci[1] & y<=ci[2], 0, 1) } ###################################################################### # Helper functions for sampling the predictive distribution ###################################################################### #Unnormalized in Binomial-Negative-Binomial Hierarchy. Should work for vectors of N.tInf! #Only kernel parts for N.tInf need to be taken into account dpost.bnb.unorm <- function(N.tInf, N.tT, sumpd, mu, size) { dbinom(N.tT, size=N.tInf, prob=sumpd)*dnbinom(N.tInf, mu=mu,size=size) #Direct implementation - appears to be less stable... #ifelse(N.tInf >= N.tT, # exp(lgamma(N.tInf+size)-lgamma(N.tInf-N.tT+1) + N.tInf*log( (1-sumpd)*(mu/(mu+size)))),0) #Compare the 2 ## foo.a <- dbinom(N.tT, size=N.tInf, prob=sumpd)*dnbinom(N.tInf, mu=mu,size=size) ## foo.b <- ifelse(N.tInf >= N.tT, #& N.tInf <= size, ## exp(lgamma(N.tInf+size)-lgamma(N.tInf-N.tT+1) + N.tInf*log( (1-sumpd)*(mu/(mu+size)))),0) ## plot(foo.a/sum(foo.a)) ## points(foo.b/sum(foo.b),col="red") } #Sample in binomial-negative-binomial hierarchy rpost.bnb <- function(n=1, N.tT, sumpd, mu,size, N.tInf.max=1e4) { p <- dpost.bnb.unorm(0:N.tInf.max,N.tT=N.tT,sumpd=sumpd, mu=mu,size=size) #Set NA values to zero (why would they be NA?) #if (is.na(sum(p))) { warning("rpost.bnb: sum is NA") ; browser(p)} #Normalize the distribution - safe this for time reasons #p <- p/sum(p) #Sample sample(0:N.tInf.max, size=n, replace=TRUE, prob=p) } #PMF for the predictive distribution in binomial-negative-binomial hierarchy. #Returns entire vector for 0:N.tInf.max dpost.bnb <- function(N.tT, sumpd, mu,size, N.tInf.max=1e4) { p <- dpost.bnb.unorm(0:N.tInf.max,N.tT=N.tT,sumpd=sumpd, mu=mu,size=size) #Set NA values to zero (why would they be NA?) #if (is.na(sum(p))) { warning("rpost.bnb: sum is NA") ; browser(p)} #Normalize the distribution - safe this for time reasons return(p/sum(p)) } ###################################################################### # PMF of the beta-negatative binomial distribution # See Teerapabolarn (2008) # # Parameters: # k - where to evaluate. can be a vector. # # Returns: # PMF. ###################################################################### dbnb <- function(k,n,alpha,beta) { #Check if k's outside the support are requested. neg <- k<0 k[neg] <- 0 #Calculate the density of the beta-negbin. See Teerapabolarn (2008) num <- lgamma(n+alpha)+lgamma(k+beta)+lgamma(n+k)+lgamma(alpha+beta) den <- lgamma(n+k+alpha+beta)+lgamma(n)+lgamma(k+1)+lgamma(alpha)+lgamma(beta) res <- exp(num-den) res[neg] <- 0 return( res) } ###################################################################### # Convert discrete time hazard function on 0,...,Dmax to a probability # mass function. # # Parameters: # haz - vector with entries for (0,...,Dmax) # Returns: # vector with PMF on 0,...,Dmax. ###################################################################### haz2pmf <- function(haz) { PMF <- 0*haz for (i in 0:(length(haz)-1)) { PMF[i+1] <- haz[i+1] * (1-sum(PMF[seq(i)])) } return(PMF) } surveillance/R/pairedbinCUSUM.R0000644000176200001440000002060313607336043016052 0ustar liggesusers###################################################################### # Compute ARL for paired binary CUSUM charts as introducted in Steiner, # Cook and Farefwell, 1999, Monitoring paired binary surgical outcomes, # Stats in Med, 18, 69-86. # # This code is an R implementation of Matlab code provided by # Stefan H. Steiner, University of Waterloo, Canada. # # Params: # p - vector giving the probability of the four different possibilities # c((death=0,near-miss=0),(death=1,near-miss=0), # (death=0,near-miss=1),(death=1,near-miss=1)) # w1, w2 - w1 and w2 are the sample weights vectors for the two CUSUMs. # (see (2)). We have w1 is equal to deaths # (according to paper it being 2 would be more realistic) # h1, h2 - decision barriers for the individual cusums (see (3)) # h11,h22 - joint decision barriers (see (3)) # sparse - use Matrix package ###################################################################### pairedbinCUSUM.runlength <- function(p,w1,w2,h1,h2,h11,h22, sparse=FALSE) { #Size of the sparse matrix -- assumption h1>h11 and h2>h22 mw <- h1*h22+(h2-h22)*h11 cat("g =",mw+3,"\n") #build transition matrix; look at current state as an ordered pair (x1,x2) #the size of the matrix is determined by h1, h2, and h11 and h22 #Look at all 3 possible absorbing conditions transm <- matrix(0, mw+3, mw+3) #the last row/column is the absorbing state, I_{3\times 3} block #Is this ever used?? transm[mw+1,mw+1] <- 1 transm[mw+2,mw+2] <- 1 transm[mw+3,mw+3] <- 1 #go over each row and fill in the transition probabilities for (i in 1:mw) { # cat(i," out of ", mw,"\n") #find the corresponding state if (i>h1*h22) { temp <- floor((i-h1*h22-1)/h11) x1 <- i-h1*h22-1-temp*h11 x2 <- temp+h22 } else { x2 <- floor((i-1)/h1) x1 <- i-x2*h1-1 } #go over the four different weight combinations for (j in 1:2) { for (k in 1:2) { x1n <- x1+w1[j+2*(k-1)] #death chart x2n <- x2+w2[k] #look at all possible combinations of weights #we cant go below zero if (x1n<0) { x1n <- 0 } if (x2n<0) { x2n <- 0 } newcond=0 #try to figure out what condition index the new CUSUM values correspond to if (x1n>=h1) { newcond <- mw+1 #absorbing state on x1 } else { if (x2n>=h2) { newcond <- mw+2 #absorbing state on x2 } else { if ((x1n>=h11)&(x2n>=h22)) { #only register this if other two conditions are not satisfied newcond <- mw+3 } } } if (newcond==0) { #transition is not to an absorbing state #translate legal ordered pair to state number if (x2n h1, S[t+1,2] > h2) if ((S[t+1,1] > h11) & (S[t+1,2] > h22)) { alarm <- c(TRUE,TRUE) } # alarm <- (S[t+1,1] > h1) | (S[t+1,2] > h2) | # ((S[t+1,1] > h11) & (S[t+1,2] > h22)) #If one or both of the CUSUMs produced an alarm then stop if ((sum(alarm)>0) | (t==nrow(x))) { stopped <- TRUE} } return(list(N=t,val=S[-1,],alarm=alarm)) } ###################################################################### # STS wrapper for the Paired binary CUSUM method. This follows in # style the categoricalCUSUM method. ###################################################################### pairedbinCUSUM <- function(stsObj, control = list(range=NULL,theta0,theta1,h1,h2,h11,h22)) { # Set the default values if not yet set if(is.null(control[["range"]])) { control$range <- 1:nrow(observed(stsObj)) } else { # subset stsObj stsObj <- stsObj[control[["range"]], ] } if(is.null(control[["theta0"]])) { stop("no specification of in-control parameters theta0") } if(is.null(control[["theta1"]])) { stop("no specification of out-of-control parameters theta1") } if(is.null(control[["h1"]])) { stop("no specification of primary threshold h1 for first series") } if(is.null(control[["h2"]])) { stop("no specification of primary threshold h2 for 2nd series") } if(is.null(control[["h11"]])) { stop("no specification of secondary limit h11 for 1st series") } if(is.null(control[["h22"]])) { stop("no specification of secondary limit h11 for 2nd series") } #Extract the important parts from the arguments y <- stsObj@observed nTime <- nrow(y) theta0 <- control[["theta0"]] theta1 <- control[["theta1"]] h1 <- control[["h1"]] h2 <- control[["h2"]] h11 <- control[["h11"]] h22 <- control[["h22"]] #Semantic checks. if (ncol(y) != 2) { stop("the number of columns in the sts object needs to be two") } #Reserve space for the results. Contrary to the categorical CUSUM #method, each ROW represents a series. alarm <- matrix(data = FALSE, nrow = nTime, ncol = 2) upperbound <- matrix(data = 0, nrow = nTime, ncol = 2) #Setup counters for the progress doneidx <- 0 N <- 1 noofalarms <- 0 ####################################################### #Loop as long as we are not through the entire sequence ####################################################### while (doneidx < nTime) { #Run paired binary CUSUM until the next alarm res <- pairedbinCUSUM.LLRcompute(x=y, theta0=theta0, theta1=theta1, h1=h1, h2=h2, h11=h11, h22=h22) #In case an alarm found log this and reset the chart at res$N+1 if (res$N < nrow(y)) { #Put appropriate value in upperbound upperbound[1:res$N + doneidx,] <- res$val[1:res$N,] alarm[res$N + doneidx,] <- res$alarm #Chop & get ready for next round y <- y[-(1:res$N),,drop=FALSE] # theta0 <- pi0[,-(1:res$N),drop=FALSE] # theta1 <- pi1[,-(1:res$N),drop=FALSE] # n <- n[-(1:res$N)] #Add to the number of alarms noofalarms <- noofalarms + 1 } doneidx <- doneidx + res$N } #Add upperbound-statistic of last segment, where no alarm is reached upperbound[(doneidx-res$N+1):nrow(upperbound),] <- res$val # Add name and data name to control object control$name <- "pairedbinCUSUM" control$data <- NULL #not supported anymore #write results to stsObj stsObj@alarm <- alarm stsObj@upperbound <- upperbound stsObj@control <- control #Ensure dimnames in the new object stsObj <- fix.dimnames(stsObj) #Done return(stsObj) } surveillance/R/twinSIR_helper.R0000644000176200001440000002204712401160566016176 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Auxiliary functions for twinSIR() ### and to compute one-sided AIC by simulation (in twinSIR_methods.R) ### ### Copyright (C) 2009-2014 Sebastian Meyer, contributions by Michael Hoehle ### $Revision: 991 $ ### $Date: 2014-09-01 23:13:26 +0200 (Mon, 01. Sep 2014) $ ################################################################################ ################################################################################ # The cox function is used in model formulae to indicate/capture the variables # which go into the cox part/endemic component of the model. # Also, with this "cox variables" it is possible to build up interactions # as usual: cox(var1):cox(var2)... (as if cox(...) was a normal variable) ################################################################################ cox <- function (x) { x } ################################################################################ # read.design extracts the two parts X and Z of the design matrix. # Z contains the endemic part (consisting of the cox(.) terms), # X contains the epidemic part (the rest). # The automatic intercept variable is excluded from these matrices! # # ARGS: # m - a model.frame # Terms - terms for this model.frame (used to extract the model.matrix from m) # RETURNS: # list of matrices X and Z. # If there is no variable in one part of the model the corresponding matrix has # 0 columns, e.g. ncol(Z) = 0, if there is no endemic (Cox) part. # NOTE: # This function is inspired from the timereg package by T. Scheike (available # under GPL2). See http://staff.pubhealth.ku.dk/~ts/timereg.html for details. # The function has been improved/modified to fit our purposes. ################################################################################ read.design <- function (m, Terms) { attr(Terms, "intercept") <- 1 # we will remove the intercept later on # we need this to ensure that we have a reference category # in case of factors (correct contrasts) XZ <- model.matrix(Terms, m) Zterms <- grep("cox\\([^)]+\\)", colnames(XZ), ignore.case = FALSE, perl = FALSE, value = FALSE, fixed = FALSE, useBytes = FALSE, invert = FALSE) # timereg 1.0-9 way: pattern="^cox[(][A-z0-9._]*[)]" with perl=TRUE X <- XZ[, -c(1L, Zterms), drop = FALSE] Z <- XZ[, Zterms, drop = FALSE] ud <- list(X = X, Z = Z) return(ud) } ## Alternative way to do the same thing as read.design. ## This approach is similar to that of coxph, but most often some milliseconds ## slower. # read.design <- function (m, Terms) # { # attr(Terms, "intercept") <- 1 # we will remove the intercept later on # # we need this to ensure that we have a reference category # # in case of factors (right contrasts) # nCoxTerms <- length(attr(Terms, "specials")[["cox"]]) # if (nCoxTerms > 0) { # dropX <- untangle.specials(Terms, "cox", order=1:3)$terms # } # if (length(dropX) > 0) { # X <- model.matrix(Terms[-dropX], m) # by subscripting a Terms object, # Z <- model.matrix(Terms[dropX], m) # one always gets an intercept term # Z <- Z[, -1, drop = FALSE] # } else { # X <- model.matrix(Terms, m) # Z <- X[, NULL, drop = FALSE] # } # X <- X[, -1, drop = FALSE] # # ud <- list(X = X, Z = Z) # return(ud) # } ################################################################################ # Little helper function which returns either summary(object) or simply object, # if it is already a summary. The function also verifies the 'class'. ################################################################################ getSummary <- function (object, class) { summaryClass <- paste("summary", class, sep=".") if (inherits(object, class)) { summary(object) } else if (inherits(object, summaryClass)) { object } else { stop("'object' must inherit from class \"", summaryClass, "\" or \"", class, "\"") } } ################################################################################ ############################## OSAIC function ################################## ################################################################################ # Two functions: # Ztilde.chibarsq <- function(Z,p,Winv,tR,s=1) # w.chibarsq.sim <- function(p, W, N=1e4) # # Both functions are only used internally, no need for documentation # they are used in function .OSAICpenalty (twinSIR_methods.R) ################################################################################ ########################################################################## # This function computes Ztilde # for one Z as specified in Simulation 3, Silvapulle & Sen (2005), p. 79. # See also p. 37 for the quadprog link. # # Params: # Z - px1 matrix or vector with specific Z value # p - dimension of the problem, where theta is restricted to R^{+p} # Winv - inverse of covariance matrix of Z # tR - transpose of constraint matrix R\theta \geq 0. In all cases equal to # diag(p), but to save time we deliver it to the function every time # s - rescale objective function (division by s) # # Returns: # Ztilde, the point at which (Z-\theta)' W^{-1} (Z-\theta) is the # minimum over \theta \geq 0. ########################################################################## Ztilde.chibarsq <- function(Z,p,Winv,tR,s=1) { #The solve.QP function minimizes #-d^T b + 1/2 b^T D b subject to the constraints A^T b >= b_0. #Thus using p. 37 we have d = t(Winv) %*% Z. d <- crossprod(Winv, Z) #Note: Winv and d can become quiet large (or small), but since the solution is #invariant to the scaling of the function being minimized, we can equivalently #call solve.QP using D/s and d/s (e.g., s=mean(D)) to avoid the error #"constraints are inconsistent, no solution!" theta <- quadprog::solve.QP(Dmat = Winv/s, dvec = d/s, Amat = tR, bvec = rep.int(0,p), meq = 0)$solution return(sum(theta > 0)) } ###################################################################### # Compute OSAIC by simulation weights as described in Silvapulle & Sen # (2005), Simulation 3, p.79. # # Params: # p - dimension of the problem, theta is constrained to R^{+p} # W - covariance matrix of the chibarsq distribution # N - number of simulations to use # # Returns: # vector of length p+1 containing the weights w_i, i=0, \ldots, p, # computed by Monte Carlo simulation ###################################################################### w.chibarsq.sim <- function(p, W, N=1e4) { #Draw Z's from multivariate normal distribution with covariance #matrix W Z <- mvrnorm(N, rep.int(0,p), W) if (is.vector(Z)) Z <- t(Z) # case N==1 #inverse of W Winv <- solve(W) #For each simulation calculate Ztilde sims <- apply(X=Z, MARGIN=1, FUN=Ztilde.chibarsq, p=p, Winv=Winv, tR=diag(p), s=mean(Winv)) w <- table(factor(sims, levels=0:p)) / N return(w) } ################################################################################ # The helper 'getModel.simEpidata' extracts the model of an object of class # "simEpidata" similar to the function 'twinSIR' with model = TRUE, # i.e. a list with components survs, X, Z and weights, where atRiskY == 1. # The log-baseline h0 is evaluated at start times of intervals only. # This function is used in function 'intensityPlot'. ################################################################################ getModel.simEpidata <- function (object, ...) { class(object) <- "data.frame" # avoid use of [.epidata (not necessary here) config <- attr(object, "config") alpha <- config$alpha beta <- config$beta atRiskY1 <- object$atRiskY == 1 simepi1 <- object[atRiskY1,] survs <- simepi1[c("id", "start", "stop", "event")] attr(survs, "eventTimes") <- attr(object, "eventTimes") attr(survs, "timeRange") <- attr(object, "timeRange") X <- as.matrix(simepi1[tail(1:ncol(simepi1), length(alpha))]) logbaseline <- sapply(survs$start, FUN = config$h0, simplify = TRUE) Terms <- attr(object, "terms") Z <- read.design(model.frame(Terms, simepi1), Terms)$Z Z <- cbind("cox(logbaseline)" = logbaseline, Z) model <- list(survs = survs, X = X, Z = Z, weights = rep.int(1,nrow(survs))) return(model) } ### Similar auxiliary method extracting the model component ### of a fitted 'twinSIR' getModel.twinSIR <- function (object, ...) { if (is.null(model <- object[["model"]])) { stop("'", deparse(substitute(object)), "' does not contain the 'model' ", "component (use 'model = TRUE' when calling 'twinSIR')") } return(model) } surveillance/R/backprojNP.R0000644000176200001440000003271514013521730015327 0ustar liggesusers###################################################################### # Implementation of the backprojection method as described in # Becker et al. (1991), Stats in Med, 10, 1527-1542. The method # was originally developed for the back-projection of AIDS incidence # but it is equally useful for analysing the epidemic curve in outbreak # situations of a disease with long incubation time, e.g. in order # to illustrate the effect of intervention measures. # # See backprojNP.Rd for the remaining details. ###################################################################### ###################################################################### # Helper function: Replace NaN or is.infinite values with zero. # Good against division by zero problems. # # Parameters: # x - a vector of type double ###################################################################### naninf2zero <- function(x) {x[is.nan(x) | is.infinite(x)] <- 0 ; return(x)} ###################################################################### # Single step of the EMS algorithm by Becker et al (1991). This function # is called by backprojNP. # # Parameters: # lambda.old - vector of length T containing the current rates # Y - vector of length T containing the observed values # dincu - probability mass function of the incubation time. I.e. # a function to be evaluated at integer numbers # pincu - cumulative mass function of the incubation time, i.e. an # object of type function. Needs to in sync with dincu. # k - smoothing parameter of the EMS algo, # needs to be an even number # # Returns: # ###################################################################### em.step.becker <- function(lambda.old, Y, dincu, pincu, k, incu.pmf, eq3a.method=c("R","C")) { #k needs to be divisible by two if (k %% 2 != 0) stop("k needs to be even.") #which method to use eq3a.method <- match.arg(eq3a.method,c("R","C")) #Initialize T <- length(Y) #Define new parameters phi.new <- lambda.new <- 0*lambda.old if (eq3a.method=="R") { #EM step. Problem that some of the sums can be zero if the incubation #distribution has zeroes at d=0,1,2 for (t in 1:T) { #Calculate sum as in equation (3a) of Becker (1991) sum3a <- 0 for (d in 0:(T-t)) { sum3a <- sum3a + Y[t+d] * naninf2zero(dincu(d) / sum(sapply(1:(t+d),function(i) lambda.old[i]*dincu(t+d-i)))) } phi.new[t] <- naninf2zero(lambda.old[t]/pincu(T-t)) * sum3a } } else { phi.new <- .Call(C_eq3a, lambda.old = as.numeric(lambda.old), Y = as.numeric(Y), incu.pmf = as.numeric(incu.pmf)) } #Smoothing step if (k>0) { w <- choose(k,0:k)/2^k for (t in 1:T) { i.sub <- t+(0:k)-k/2 goodIdx <- i.sub %in% 1:T w.sub <- w[goodIdx]/sum(w[goodIdx]) lambda.new[t] <- sum(w.sub * phi.new[i.sub[goodIdx]]) } } else { #no smoothing lambda.new <- phi.new } #Done. return(lambda=lambda.new) } ###################################################################### # STS compatible function to call the non-parametric back-projection # method of Becker et al (1991) for time aggregated data. # # Parameters: # sts - sts object with the observed incidence as "observed" slot # incu.pmf - incubation time pmf as a vector with index 0,..,d_max. Please # note that the support includes zero! # k - smoothing parameter for the EMS algorithm # eps - relative convergence criteration # iter.max - max number of iterations # verbose - boolean, if TRUE provide extra output when running the method # lambda0 - start value for lambda, default: uniform # hookFun - hook function to call after each EMS step, a function # of type hookFun=function(stsj,...) # # Returns: # sts object with upperbound set to the backprojected lambda. ###################################################################### backprojNP.fit <- function(sts, incu.pmf,k=2,eps=1e-5,iter.max=250,verbose=FALSE,lambda0=NULL,eq3a.method=c("R","C"),hookFun=function(stsbp) {}, ...) { #Determine method eq3a.method <- match.arg(eq3a.method, c("R","C")) #Define object to return lambda.hat <- matrix(NA,ncol=ncol(sts),nrow=nrow(sts)) #Loop over all series for (j in 1:ncol(sts)) { #Inform (if requested) what series we are looking at if ((ncol(sts)>1) & verbose) { cat("Backprojecting series no. ",j,"\n") } #Extract incidence time series Y <- observed(sts)[,j] #If default behaviour for lambda0 is desired if (is.null(lambda0)) { lambda0j <- rep(sum(Y)/length(Y),length(Y)) } else { lambda0j <- lambda0[,j] } #Create incubation time distribution vectors for the j'th series inc.pmf <- as.numeric(incu.pmf[,j]) inc.cdf <- cumsum(inc.pmf) #Create wrapper functions for the PMF and CDF based on the vector. #These function will be used in the R version of eq3a. #ToDo: The function uses the global variable inc.pmf which #definitely is dirty coding. How to define this function #in an environment where inc.pmf is present? dincu <- function(x) { notInSupport <- x<0 | x>=length(inc.pmf) #Give index -1 to invalid queries x[notInSupport] <- -1 return(c(0,inc.pmf)[x+2]) } #Cumulative distribution function. Uses global var "inc.cdf" pincu <- function(x) { x[x<0] <- -1 x[x>=length(inc.cdf)] <- length(inc.cdf)-1 return(c(0,inc.cdf)[x+2]) } #Iteration counter and convergence indicator i <- 0 stop <- FALSE lambda <- lambda0j #Loop until stop while (!stop) { #Add to counter i <- i+1 lambda.i <- lambda #Perform one step lambda <- em.step.becker(lambda.old=lambda.i,Y=Y,dincu=dincu,pincu=pincu,k=k, incu.pmf=inc.pmf, eq3a.method=eq3a.method) #check stop #In original paper the expression to do so appears funny since #- and + deviations cancel. More realistic: #criterion <- abs(sum(res$lambda) - sum(lambda.i))/sum(lambda.i) criterion <- sqrt(sum((lambda- lambda.i)^2))/sqrt(sum(lambda.i^2)) if (verbose) { cat("Convergence criterion @ iteration i=",i,": ", criterion,"\n") } #Check whether to stop stop <- criterion < eps | (i>iter.max) #Call hook function stsj <- sts[,j] upperbound(stsj) <- matrix(lambda,ncol=1) hookFun(stsj, ...) } #Done lambda.hat[,j] <- lambda } #Create new object with return put in the lambda slot bp.sts <- as(sts,"stsBP") bp.sts@upperbound <- lambda.hat bp.sts@control <- list(k=k,eps=eps,iter=i) return(bp.sts) } ###################################################################### # EMS back-projection method including bootstrap based confidence # intervals. The theory is indirectly given in Becker and Marschner (1993), # Biometrika, 80(1):165-178 and more specifically in Yip et al, 2011, # Communications in Statistics -- Simulation and Computation, # 37(2):425-433. # # Parameters: # # sts - sts object with the observed incidence as "observed" slot # incu.pmf - incubation time pmf as a vector with index 0,..,d_max. Please # note that the support includes zero! # k - smoothing parameter for the EMS algorithm # eps - relative convergence criteration. If a vector of length two # then the first argument is used for the k=0 initial fit and # the second element for all EMS fits # # iter.max - max number of iterations. Can be a vector of length two. # Similar use as in eps. # verbose - boolean, if TRUE provide extra output when running the method # lambda0 - start value for lambda, default: uniform # hookFun - hook function to call after each EMS step, a function # of type hookFun=function(Y,lambda,...) # B - number of bootstrap replicates. If B=-1 then no bootstrap CIs # are calculated. # # Returns: # sts object with upperbound set to the backprojected lambda. ###################################################################### backprojNP <- function(sts, incu.pmf,control=list(k=2,eps=rep(0.005,2),iter.max=rep(250,2),Tmark=nrow(sts),B=-1,alpha=0.05,verbose=FALSE,lambda0=NULL,eq3a.method=c("R","C"),hookFun=function(stsbp) {}),...) { #Check if backprojection is to be done multivariate time series case. if (ncol(sts)>1) { warning("Multivariate time series: Backprojection uses same eps for the individual time series.") } #Check if incu.pmf vector fits the dimension of the sts object. If not #either replicate it or throw an error. if (is.matrix(incu.pmf)) { if (!ncol(incu.pmf) == ncol(sts)) { stop("Dimensions of sts object and incu.pmf don't match.") } } else { if (ncol(sts)>1) { warning("Backprojection uses same incubation time distribution for the individual time series.") } incu.pmf <- matrix(incu.pmf,ncol=ncol(sts),dimnames=list(NULL,colnames(sts))) } #Fill control object as appropriate and in sync with the default value if (is.null(control[["k",exact=TRUE]])) { control$k <- 2 } if (is.null(control[["eps",exact=TRUE]])) { control$eps <- rep(0.005,2) } if (is.null(control[["iter.max",exact=TRUE]])) { control$iter.max <- rep(250,2) } if (is.null(control[["Tmark",exact=TRUE]])) { control$Tmark <- nrow(sts) } if (is.null(control[["B",exact=TRUE]])) { control$B <- -1 } if (is.null(control[["alpha",exact=TRUE]])) { control$alpha <- 0.05 } if (is.null(control[["verbose",exact=TRUE]])) { control$verbose <- FALSE } if (is.null(control[["lambda0",exact=TRUE]])) { control$lambda0 <- NULL } #Which method to use for computing eq3a if (is.null(control[["eq3a.method",exact=TRUE]])) { control$eq3a.method <- "R" } else { control$eq3a.method <- match.arg(control$eq3a.method,c("R","C")) } #Hook function definition if (is.null(control[["hookFun",exact=TRUE]])) { control$hookFun <- function(Y,lambda,...) {} } #If the eps and iter.max arguments are too short, make them length 2. if (length(control$eps)==1) control$eps <- rep(control$eps,2) if (length(control$iter.max)==1) control$iter.max <- rep(control$iter.max,2) #Compute the estimate to report (i.e. use 2nd component of the args) if (control$verbose) { cat("Back-projecting with k=",control$k," to get lambda estimate.\n") } stsk <- backprojNP.fit(sts, incu.pmf=incu.pmf,k=control$k,eps=control$eps[2],iter.max=control$iter.max[2],verbose=control$verbose,lambda0=control$lambda0,hookFun=control$hookFun,eq3a.method=control$eq3a.method) #Fix control slot stsk@control <- control #If no bootstrap to do return object right away as stsBP object. if (control$B<=0) { if (control$verbose) { cat("No bootstrap CIs calculated as requested.\n") } stsk <- as(stsk,"stsBP") return(stsk) } #Call back-project function without smoothing, i.e. with k=0. if (control$verbose) { cat("Back-projecting with k=",0," to get lambda estimate for parametric bootstrap.\n") } sts0 <- backprojNP.fit(sts, incu.pmf=incu.pmf,k=0,eps=control$eps[1],iter.max=control$iter.max[1],verbose=control$verbose,lambda0=control$lambda0,hookFun=control$hookFun, eq3a.method=control$eq3a.method) ########################################################################### #Create bootstrap samples and loop for each sample while storing the result ########################################################################### sts.boot <- sts0 #Define object to return lambda <- array(NA,dim=c(nrow(sts),ncol(sts),control$B)) #Define PMF of incubation time which does safe handling of values #outside the support of the incubation time. dincu <- function(x,i) { notInSupport <- x<0 | x>=length(incu.pmf[,i]) #Give index -1 to invalid queries x[notInSupport] <- -1 return(c(0,incu.pmf[,i])[x+2]) } #Loop in order to create the sample for (b in 1:control$B) { if (control$verbose) { cat("Bootstrap sample ",b,"/",control$B,"\n") } #Compute convolution for the mean of the observations mu <- matrix(0, nrow=nrow(sts0), ncol=ncol(sts0)) #Perform the convolution for each series for (i in 1:ncol(sts)) { for (t in 1:nrow(mu)) { for (s in 0:(t-1)) { mu[t,i] <- mu[t,i] + upperbound(sts0)[t-s,i] * dincu(s,i) } } } #Create new observations in the observed slot. observed(sts.boot) <- matrix(rpois(prod(dim(sts.boot)),lambda=mu),ncol=ncol(sts0)) #Run the backprojection on the bootstrap sample. Use original result #as starting value. sts.boot <- backprojNP.fit(sts.boot, incu.pmf=incu.pmf,k=control$k,eps=control$eps[2],iter.max=control$iter.max[2],verbose=control$verbose,lambda0=upperbound(stsk),hookFun=control$hookFun, eq3a.method=control$eq3a.method) #Extract the result of the b'th backprojection lambda[,,b] <- upperbound(sts.boot) } #Compute an equal tailed (1-alpha)*100% confidence intervals based on the #bootstrap samples. The dimension is (ci.low,ci.high) x time x series ci <- apply(lambda,MARGIN=c(1,2), quantile, p=c(control$alpha/2,1-control$alpha/2)) #Convert output to stsBP object and add information to the extra slots stsk <- as(stsk,"stsBP") #Add extra slots stsk@ci <- ci stsk@lambda <- lambda stsk@control <- control #Done return(stsk) } surveillance/R/epidata.R0000644000176200001440000010553613623242775014726 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Data structure "epidata" representing the SIR event history of a fixed ### geo-referenced population (e.g., farms, households) for twinSIR() analysis ### ### Copyright (C) 2008-2010, 2012, 2014-2018, 2020 Sebastian Meyer ### $Revision: 2520 $ ### $Date: 2020-02-19 15:28:13 +0100 (Wed, 19. Feb 2020) $ ################################################################################ ## CAVE: ## - we assume fixed coordinates (this is important since time-varying ## coordinates would result in more sophisticated and time consuming ## calculations of distance matrices) ! ## - in the first block (start = t0) all id's must be present (for coordinates) ## - those id's with atRiskY(t0) = 0 are taken as initially infectious ## - SIS epidemics are possible, but must be given as SIRS with pseudo R-events, ## i.e. individuals will be removed and become susceptible directly afterwards ################################################################################ ## Convert a simple data.frame with one row per individual and with columns for ## the times of becoming exposed/infectious/removed ## to the long "epidata" event history start/stop format. ## tE.col and tR.col can be missing corresponding to SIR, SEI, or SI data. ## NA's in time variables mean that the respective event has not yet occurred. ## Time-varying covariates are not supported by this converter. ################################################################################ as.epidata.data.frame <- function (data, t0, tE.col, tI.col, tR.col, id.col, coords.cols, f = list(), w = list(), D = dist, max.time = NULL, keep.cols = TRUE, ...) { if (missing(t0)) { return(NextMethod("as.epidata")) # as.epidata.default } ## drop individuals that have already been removed prior to t0 ## since they would otherwise be considered as initially infective ## (atRiskY = 0 in first time block) and never be removed if (!missing(tR.col)) { alreadyRemoved <- !is.na(data[[tR.col]]) & data[[tR.col]] <= t0 if (any(alreadyRemoved)) { data <- data[!alreadyRemoved,] message("Note: dropped rows with tR <= t0 (", paste0(which(alreadyRemoved), collapse = ", "), ")") } } ## parse max.time if (is.null(max.time) || is.na(max.time)) { # max(stop) is at last event max.time <- NA_real_ } else { stopifnot(max.time > t0) } ## parse id column id <- factor(data[[id.col]]) # removes unused levels stopifnot(!anyDuplicated(id), !is.na(id)) N <- nlevels(id) # = nrow(data) ## make time relative to t0 subtract_t0 <- function (x) as.numeric(x - t0) max.time <- subtract_t0(max.time) tI <- subtract_t0(data[[tI.col]]) tE <- if (missing(tE.col)) tI else subtract_t0(data[[tE.col]]) tR <- if (missing(tR.col)) rep.int(NA_real_, N) else subtract_t0(data[[tR.col]]) ## check E-I-R order if (any((is.na(tE) & !(is.na(tI) & is.na(tR))) | (is.na(tI) & !is.na(tR)))) { stop("events cannot be skipped (NA in E/I => NA in I/R)") } if (any(.wrongsequence <- (tE > tI | tI >= tR) %in% TRUE)) { # TRUE | NA = TRUE stop("E-I-R events are in wrong order for the following id's: ", paste0(id[.wrongsequence], collapse = ", ")) } ## ignore events after max.time if (!is.na(max.time)) { is.na(tE) <- tE > max.time is.na(tI) <- tI > max.time is.na(tR) <- tR > max.time } ## vector of stop times stopTimes <- c(tE, tI, tR, max.time) stopTimes <- stopTimes[!is.na(stopTimes) & stopTimes > 0] stopTimes <- sort.int(unique.default(stopTimes), decreasing = FALSE) nBlocks <- length(stopTimes) if (nBlocks == 0L) { stop("nothing happens after 't0'") } ## initialize event history evHist <- data.frame( id = rep.int(id, nBlocks), start = rep.int(c(0,stopTimes[-nBlocks]), rep.int(N, nBlocks)), stop = rep.int(stopTimes, rep.int(N, nBlocks)), atRiskY = NA, event = 0, Revent = 0, # adjusted in the loop below row.names = NULL, check.rows = FALSE, check.names = FALSE) ## indexes of the last rows of the time blocks blockbase <- c(0, seq_len(nBlocks) * N) ## which individuals are at risk in the first (next) block Y <- is.na(tE) | tE > 0 ## Loop over the blocks/stop times to adjust atRiskY, event and Revent for (i in seq_len(nBlocks)) { ct <- stopTimes[i] ## set individual at-risk indicators for the current time block evHist$atRiskY[blockbase[i] + seq_len(N)] <- Y ## individuals who become exposed at the current stop time ## will no longer be at risk in the next block Y[which(tE == ct)] <- FALSE ## process events at this stop time evHist$event[blockbase[i] + which(tI == ct)] <- 1 evHist$Revent[blockbase[i] + which(tR == ct)] <- 1 } ## add additional time-constant covariates extraVarNames <- coords.cols # may be NULL if (isTRUE(keep.cols)) { extraVarNames <- c(extraVarNames, setdiff(names(data), id.col)) } else if (length(keep.cols) > 0L && !identical(FALSE, keep.cols)) { extraVarNames <- c(extraVarNames, names(data[keep.cols])) } extraVarNames <- unique.default(extraVarNames) if (length(extraVarNames) > 0L) { evHist <- data.frame( evHist, data[rep.int(seq_len(N), nBlocks), extraVarNames, drop=FALSE], row.names = NULL, check.names = TRUE, stringsAsFactors = TRUE) } ## Now we can pass the generated event history to the default method ## for the usual consistency checks and the pre-calculation of f covariates as.epidata.default( data = evHist, id.col = "id", start.col = "start", stop.col = "stop", atRiskY.col = "atRiskY", event.col = "event", Revent.col = "Revent", coords.cols = coords.cols, f = f, w = w, D = D, .latent = !missing(tE.col)) } ################################################################################ # DEFAULT CONVERTER, which requires a start/stop event history data.frame # It performs consistency checks, and pre-calculates the distance-based # epidemic covariates from f. ################################################################################ as.epidata.default <- function(data, id.col, start.col, stop.col, atRiskY.col, event.col, Revent.col, coords.cols, f = list(), w = list(), D = dist, .latent = FALSE, ...) { cl <- match.call() # If necessary, convert 'data' into a data.frame (also converting # column names to syntactically correct names for use in formulae) data <- as.data.frame(data, stringsAsFactors = FALSE) # Use column numbers as indices and check them colargs <- c("id.col", "start.col", "stop.col", "atRiskY.col", "event.col", "Revent.col", "coords.cols") colidxs <- structure(as.list(numeric(length(colargs))), names = colargs) for (colarg in colargs) { colidx <- get(colarg, inherits = FALSE) if (colarg != "coords.cols" && length(colidx) != 1L) { stop("the column specifier '", colarg, "' must be of length 1") } if (is.character(colidx)) { colidx <- match(colidx, colnames(data)) if (any(is.na(colidx))) { stop("'", colarg, " = ", deparse(cl[[colarg]]), "': ", "column does not exist in 'data'") } } else if (is.numeric(colidx) && any(colidx<1L | colidx>ncol(data))) { stop("'", colarg, " = ", deparse(cl[[colarg]]), "': ", "column index must be in [1; ", ncol(data), "=ncol(data)]") } colidxs[[colarg]] <- colidx } # Rename main columns to default column names colidxsVec <- unlist(colidxs) colnams <- c("id", "start", "stop", "atRiskY", "event", "Revent") colnames(data)[colidxsVec[1:6]] <- colnams usedReservedName <- any(colnams %in% colnames(data)[-colidxsVec[1:6]]) # REORDER COLUMNS, so that main columns come first (also for make.unique) data <- data[c(colidxsVec, setdiff(seq_len(NCOL(data)), colidxsVec))] # Make columns names unique (necessary if other column with name in colnams) if (usedReservedName) { colnames(data) <- make.unique(colnames(data)) message("Some other columns had reserved names and have been renamed") } # Convert id into a factor (also removing unused levels if it was a factor) data[["id"]] <- factor(data[["id"]]) # Check atRiskY, event and Revent for values other than 0 and 1 for (var in c("atRiskY", "event", "Revent")) { data[[var]] <- as.numeric(data[[var]]) if (any(! data[[var]] %in% c(0,1))) stop("'", var, "' column may only assume values 0 and 1") } # Check consistency of atRiskY and event (event only if at-risk) if (.latent) { warning("support for latent periods is experimental") } else { noRiskButEvent <- data[["atRiskY"]] == 0 & data[["event"]] == 1 if (noRiskButEventRow <- match(TRUE, noRiskButEvent, nomatch = 0)) { stop("inconsistent atRiskY/event indicators in row ", noRiskButEventRow, ": event only if at risk") } } # Check event (infection) times for ties eventTimes <- data[data[["event"]] == 1, "stop"] ReventTimes <- data[data[["Revent"]] == 1, "stop"] duplicatedEventTime <- duplicated(c(eventTimes, ReventTimes)) if (duplicatedEventTimeIdx <- match(TRUE, duplicatedEventTime, nomatch=0)) { stop("non-unique event times: concurrent event/Revent at time ", c(eventTimes, ReventTimes)[duplicatedEventTimeIdx]) } # Check start/stop consistency and add block id histIntervals <- unique(data[c("start", "stop")]) histIntervals <- histIntervals[order(histIntervals[,1L]),] nBlocks <- nrow(histIntervals) if (any(histIntervals[,2L] <= histIntervals[,1L])) { stop("stop times must be greater than start times") } startStopCheck <- histIntervals[-1L,1L] != histIntervals[-nBlocks,2L] if (startStopCheckIdx <- match(TRUE, startStopCheck, nomatch = 0)) { stop("inconsistent start/stop times: time intervals not consecutive ", "at stop time ", histIntervals[startStopCheckIdx,2L]) } if ("BLOCK" %in% colnames(data)) { warning("column name 'BLOCK' is reserved, ", "existing column has been replaced") } data[["BLOCK"]] <- match(data[["start"]], histIntervals[,1L]) # SORT by block/id and create indexes for block borders data <- data[order(data[["BLOCK"]], data[["id"]]),] beginBlock <- match(seq_len(nBlocks), data[["BLOCK"]]) endBlock <- c(beginBlock[-1L]-1L, nrow(data)) # make block column the first column BLOCK.col <- match("BLOCK", colnames(data)) data <- data[c(BLOCK.col, setdiff(seq_along(data), BLOCK.col))] coords.cols <- 1L + 6L + seq_along(colidxs[["coords.cols"]]) # Check consistency of atRiskY and event (not at-risk after event) .checkFunction <- function(eventblock, eventid) { if (eventblock == nBlocks) return(invisible()) rowsOfNextBlock <- beginBlock[eventblock+1L]:endBlock[eventblock+1L] nextBlockData <- data[rowsOfNextBlock, c("id", "atRiskY")] idIdx <- which(nextBlockData[["id"]] == eventid) if (length(idIdx) == 1L && nextBlockData[idIdx, "atRiskY"] == 1) { stop("inconsistent atRiskY/event indicators for id '", eventid, "': should not be at risk immediately after event") } } eventTable <- data[data[["event"]] == 1,] for(k in seq_len(nrow(eventTable))) { .checkFunction(eventTable[k,"BLOCK"], eventTable[k,"id"]) } # Set attributes attr(data, "eventTimes") <- sort(eventTimes) attr(data, "timeRange") <- c(histIntervals[1L,1L],histIntervals[nBlocks,2L]) attr(data, "coords.cols") <- coords.cols # <- must include this info because externally of this function # we don't know how many coords.cols (dimensions) we have attr(data, "f") <- list() # initialize attr(data, "w") <- list() # initialize class(data) <- c("epidata", "data.frame") # Compute epidemic variables update.epidata(data, f = f, w = w, D = D) } update.epidata <- function (object, f = list(), w = list(), D = dist, ...) { oldclass <- class(object) class(object) <- "data.frame" # avoid use of [.epidata ## block indexes and first block beginBlock <- which(!duplicated(object[["BLOCK"]], nmax = object[["BLOCK"]][nrow(object)])) endBlock <- c(beginBlock[-1L]-1L, nrow(object)) firstDataBlock <- object[seq_len(endBlock[1L]), ] ## check f and calculate distance matrix if (length(f) > 0L) { if (!is.list(f) || is.null(names(f)) || any(!sapply(f, is.function))) { stop("'f' must be a named list of functions") } lapply(X = f, FUN = function (B) { if (!isTRUE(all.equal(c(5L,2L), dim(B(matrix(0, 5, 2)))))) stop("'f'unctions must retain the dimensions of their input") }) if (any(names(f) %in% names(object))) { warning("'f' components replace existing columns of the same name") } ## reset / initialize columns for distance-based epidemic weights object[names(f)] <- 0 ## keep functions as attribute attr(object, "f")[names(f)] <- f ## check / compute distance matrix distmat <- if (is.function(D)) { if (length(coords.cols <- attr(object, "coords.cols")) == 0L) { stop("need coordinates to calculate the distance matrix") } coords <- as.matrix(firstDataBlock[coords.cols], rownames.force = FALSE) rownames(coords) <- as.character(firstDataBlock[["id"]]) as.matrix(D(coords)) } else { # a numeric matrix (or "Matrix") if (length(dn <- dimnames(D)) != 2L) { stop("if not a function, 'D' must be a matrix-like object") } if (!all(firstDataBlock[["id"]] %in% dn[[1L]], firstDataBlock[["id"]] %in% dn[[2L]])) { stop("'dimnames(D)' must contain the individuals' IDs") } D } } ## check covariate-based epidemic weights if (length(w) > 0L) { if (!is.list(w) || is.null(names(w)) || any(!sapply(w, is.function))) { stop("'w' must be a named list of functions") } if (any(names(w) %in% names(object))) { warning("'w' components replace existing columns of the same name") } ## reset / initialize columns for covariate-based epidemic weights object[names(w)] <- 0 ## keep functions as attribute attr(object, "w")[names(w)] <- w ## compute wij matrix for each of w wijlist <- compute_wijlist(w = w, data = firstDataBlock) } ## Compute sum of epidemic covariates over infectious individuals if (length(f) + length(w) > 0L) { infectiousIDs <- firstDataBlock[firstDataBlock[["atRiskY"]] == 0, "id"] ##<- this is a factor variable for(i in seq_along(beginBlock)) { blockidx <- beginBlock[i]:endBlock[i] blockdata <- object[blockidx,] blockIDs <- blockdata[["id"]] if (length(infectiousIDs) > 0L) { if (length(f) > 0L) { u <- distmat[as.character(blockIDs), as.character(infectiousIDs), drop = FALSE] # index by factor levels object[blockidx,names(f)] <- vapply( X = f, FUN = function (B) Matrix::rowSums(B(u)), FUN.VALUE = numeric(length(blockIDs)), USE.NAMES = FALSE) } if (length(w) > 0L) { object[blockidx,names(w)] <- vapply( X = wijlist, FUN = function (wij) { ## actually don't have to care about the diagonal: ## i at risk => sum does not include it ## i infectious => atRiskY = 0 (ignored in twinSIR) rowSums(wij[as.character(blockIDs), as.character(infectiousIDs), drop = FALSE]) # index by factor levels }, FUN.VALUE = numeric(length(blockIDs)), USE.NAMES = FALSE) } } ## update the set of infectious individuals for the next block recoveredID <- blockIDs[blockdata[["Revent"]] == 1] infectedID <- blockIDs[blockdata[["event"]] == 1] if (length(recoveredID) > 0L) { infectiousIDs <- infectiousIDs[infectiousIDs != recoveredID] } else if (length(infectedID) > 0L) { infectiousIDs[length(infectiousIDs)+1L] <- infectedID } } } ## restore "epidata" class class(object) <- oldclass return(object) } compute_wijlist <- function (w, data) { ## for each function in 'w', determine the variable on which it acts; ## this is derived from the name of the first formal argument, which ## must be of the form "varname.i" wvars <- vapply(X = w, FUN = function (wFUN) { varname.i <- names(formals(wFUN))[[1L]] substr(varname.i, 1, nchar(varname.i)-2L) }, FUN.VALUE = "", USE.NAMES = TRUE) if (any(wvarNotFound <- !wvars %in% names(data))) { stop("'w' function refers to unknown variables: ", paste0(names(w)[wvarNotFound], collapse=", ")) } ## compute weight matrices w_ij for each of w mapply( FUN = function (wFUN, wVAR, ids) { wij <- outer(X = wVAR, Y = wVAR, FUN = wFUN) dimnames(wij) <- list(ids, ids) wij }, wFUN = w, wVAR = data[wvars], MoreArgs = list(ids = as.character(data[["id"]])), SIMPLIFY = FALSE, USE.NAMES = TRUE ) } ################################################################################ # EXTRACTION OPERATOR FOR 'EPIDATA' OBJECTS # Indexing with "[" would be possible (inheriting from data.frame). # But using any column index would remove attributes (row indexes would not). # Thus, we define an own method to retain and adjust the attributes when # selecting a subset of blocks of the 'epidata'. # Selecting a subset of columns will remove class "epidata" (resulting in a # simple data.frame) ################################################################################ "[.epidata" <- function(x, i, j, drop) { # use data.frame method first xx <- NextMethod("[") # then return its result as pure data.frame or assure valid 'epidata' # if a subset of columns has been selected and attributes have been removed if (NCOL(xx) != ncol(x) || any(names(xx) != names(x))) { if (inherits(xx, "data.frame")) { # xx could be a vector class(xx) <- "data.frame" # remove class 'epidata' } message("Note: converted class \"epidata\" to simple \"", class(xx), "\"") return(xx) } # else there is no effective column selection (e.g. j=TRUE) if (nrow(xx) == 0) { message("Note: no rows selected, dropped class \"epidata\"") class(xx) <- "data.frame" return(xx[TRUE]) # removes attributes } invalidEpidata <- FALSE blocksizesx <- table(x[["BLOCK"]]) blocksizesxx <- table(xx[["BLOCK"]]) blocksOK <- identical(c(blocksizesxx), c(blocksizesx[names(blocksizesxx)])) if (is.numeric(i) && any(diff(na.omit(i)) < 0)) { # epidata should remain ordered by time warning("dropped class \"epidata\": reordering rows is not permitted") invalidEpidata <- TRUE } else if (!blocksOK) { # blocks should not be cut, epidemic covariates might become invalid warning("dropped class \"epidata\": subsetting blocks not allowed") invalidEpidata <- TRUE } else if (any(diff(as.numeric(names(blocksizesxx))) != 1)) { # blocks can only be selected consecutively warning("dropped class \"epidata\": ", "only consecutive blocks may be selected") invalidEpidata <- TRUE } if (invalidEpidata) { class(xx) <- "data.frame" xx[TRUE] # removes attributes } else { # # adjust block index so that it starts at 1 # firstBlockNumber <- as.numeric(names(blocksizesxx)[1]) # if (firstBlockNumber > 1) { # xx[["BLOCK"]] <- xx[["BLOCK"]] - (firstBlockNumber-1) # } # Restore or adjust attributes tmin <- xx[["start"]][1] tmax <- xx[["stop"]][nrow(xx)] oldEventTimes <- attr(x, "eventTimes") attr(xx, "eventTimes") <- if (blocksOK) { oldEventTimes[oldEventTimes > tmin & oldEventTimes <= tmax] } else { xx[["stop"]][xx[["event"]] == 1] } attr(xx, "timeRange") <- c(tmin, tmax) attr(xx, "coords.cols") <- attr(x, "coords.cols") attr(xx, "f") <- attr(x, "f") xx } } ################################################################################ # INSERT BLOCKS FOR EXTRA STOP TIMES IN 'EPIDATA' OBJECTS ################################################################################ intersperse <- function (epidata, stoptimes, verbose = FALSE) { # Check arguments if (!inherits(epidata, "epidata")) { stop("'epidata' must inherit from class \"epidata\"") } if (!is.vector(stoptimes, mode = "numeric")) { stop("'stoptimes' must be a numeric vector") } # Identify new 'stoptimes' sortedEpiStop <- sort(unique(epidata$stop)) extraStoptimes <- stoptimes[! stoptimes %in% sortedEpiStop] # Return original 'epidata' if nothing to do if (length(extraStoptimes) == 0) { # message("nothing done: no new stop times") return(epidata) } # # Retain attributes of 'epidata' # .attributes <- attributes(epidata) # .attributes <- .attributes[match(c("eventTimes", "timeRange", # "coords.cols", "f", "config", "call", "terms"), names(.attributes), # nomatch = 0)] # Check new 'stoptimes' timeRange <- attr(epidata, "timeRange") inside <- extraStoptimes > timeRange[1] & extraStoptimes < timeRange[2] if (any(!inside)) { extraStoptimes <- extraStoptimes[inside] warning("ignored extra 'stoptimes' outside the observation period") } # Impute blocks for extraStoptimes oldclass <- class(epidata) class(epidata) <- "data.frame" # Avoid use of [.epidata (not necessary here) blocksize <- sum(epidata$BLOCK == 1) nInsert <- length(extraStoptimes) lastRow <- nrow(epidata) epidata <- rbind(epidata, epidata[rep.int(NA_integer_, nInsert * blocksize),], deparse.level = 0) # add NA rows, to be replaced below if (verbose) pb <- txtProgressBar(min=0, max=nInsert, initial=0, style=3) for(i in seq_len(nInsert)) { extraStop <- extraStoptimes[i] nextStoptime <- sortedEpiStop[match(TRUE, sortedEpiStop > extraStop)] # Find the block (row indexes) into which the extraStop falls rowsMatchedBlock <- which(epidata$stop == nextStoptime) # Split this block up into 2 parts # later part equals original block with start time = extraStop newBlock <- epidata[rowsMatchedBlock,] newBlock$start <- extraStop # earlier part has stop time = extraStop and no events at this time point epidata[rowsMatchedBlock, "stop"] <- extraStop epidata[rowsMatchedBlock, "event"] <- 0 epidata[rowsMatchedBlock, "Revent"] <- 0 # write the new block to epidata (reorder rows later) epidata[lastRow + seq_along(rowsMatchedBlock),] <- newBlock lastRow <- lastRow + length(rowsMatchedBlock) if (verbose) setTxtProgressBar(pb, i) } if (verbose) close(pb) # Adjust BLOCK column sortedEpiStop <- sort(c(sortedEpiStop, extraStoptimes)) epidata$BLOCK <- match(epidata$stop, sortedEpiStop) # Reorder rows by time and id epidata <- epidata[order(epidata$BLOCK, epidata$id), ] row.names(epidata) <- NULL class(epidata) <- oldclass return(epidata) } ################################################################################ # SUMMARY FUNCTION FOR EPIDATA OBJECTS # the epidemic is summarized by the following returned components: # - type: one of "SIR", "SI", "SIRS", "SIS" # - size: number of initially susceptible individuals, which became infected # - initiallyInfected: vector (factor) of initially infected individuals # - neverInfected: vector (factor) of never (during the observation period) # infected individuals # - coordinates: matrix with the coordinates of the individuals (rownames=id's) # - byID: data.frame with time points of events by id (columns time.I, time.R # and optionally time.S) # - counters: data.frame representing the evolution of the epidemic ################################################################################ summary.epidata <- function (object, ...) { class(object) <- "data.frame" # avoid use of [.epidata (not necessary here) # extract coordinates and initially infected individuals idlevels <- levels(object[["id"]]) N <- length(idlevels) firstDataBlock <- object[object$BLOCK == min(object$BLOCK),] coordinates <- as.matrix(firstDataBlock[attr(object, "coords.cols")]) rownames(coordinates) <- as.character(firstDataBlock[["id"]]) initiallyInfected <- firstDataBlock$id[firstDataBlock$atRiskY == 0] m <- length(initiallyInfected) n <- N - m ### summary 1: event table with columns id, time and type (of event, S/I/R) # Extract time points of the S events for each id StimesID <- by(object[c("atRiskY", "stop")], object["id"], function(x) { SeventIdx <- which(diff(x[["atRiskY"]]) == 1) x[["stop"]][SeventIdx] }, simplify=TRUE) names(StimesID) <- paste0(names(StimesID), ":") StimesVec <- c(unlist(StimesID, use.names = TRUE)) # c() if by() returned an array .Sids <- sub("(.+):.*", "\\1", names(StimesVec)) Stimes <- data.frame(id = factor(.Sids, levels = idlevels), stop = StimesVec, type = rep("S", length(StimesVec)), row.names = NULL, check.names = FALSE, stringsAsFactors = FALSE) # Extract time points of the I and R events for each id Itimes <- object[object$event == 1, c("id", "stop")] Itimes[["type"]] <- rep("I", nrow(Itimes)) Rtimes <- object[object$Revent == 1, c("id", "stop")] Rtimes[["type"]] <- rep("R", nrow(Rtimes)) # Combine the three event types into one data.frame eventTable <- rbind(Rtimes, Stimes, Itimes) # need this order for the counters below in the case of SIS: # pseudo-R-event occures infinitesimally before S names(eventTable)[2L] <- "time" eventTable <- eventTable[order(eventTable[["id"]], eventTable[["time"]]), ] eventTable[["type"]] <- factor(eventTable[["type"]], levels=c("S","I","R")) rownames(eventTable) <- NULL ### summary 2: type and size of the epidemic resusceptibility <- length(StimesVec) > 0 epitype <- if (resusceptibility) { Rtimes_notLast <- Rtimes[-which.max(Rtimes[,2]),] onlyPseudoR <- length(setdiff(Rtimes_notLast[,2], Stimes[,2])) == 0 if (onlyPseudoR) "SIS" else "SIRS" } else { if (nrow(Rtimes) > 0) "SIR" else "SI" } isEverInfected <- idlevels %in% initiallyInfected | idlevels %in% unique(eventTable$id[eventTable$type == "I"]) isNeverInfected <- !isEverInfected size <- n - sum(isNeverInfected) # everInfected <- factor(idlevels[isEverInfected], levels = idlevels) neverInfected <- factor(idlevels[isNeverInfected], levels = idlevels) ### summary 3: eventTable by id in wide form byID_everInfected <- if (nrow(eventTable) == 0) { data.frame(id = factor(character(0), levels = idlevels), time.I = numeric(0), row.names = NULL, check.names = FALSE, stringsAsFactors = FALSE) } else if (!resusceptibility) { .res <- reshape(eventTable, direction = "wide", timevar = "type", idvar = "id") attr(.res, "reshapeWide") <- NULL if ("time.I" %in% names(.res)) { .res[c("id", "time.I", "time.R")] # ensure natural order } else { # degenerate case: only R (and S) events in data cbind(.res[1L], "time.I" = NA_real_, .res[-1L]) } } else { eventTable3 <- if (m > 0) { # workaround for initially infected rbind(data.frame(id = initiallyInfected, time = NA_real_, type = "I", row.names = NULL, check.names = FALSE, stringsAsFactors = FALSE), eventTable) } else eventTable rowsPerId <- table(eventTable3[["id"]]) modulo3 <- rowsPerId %% 3 ## if this is 1, we need to append NAs for R and S events ## if 2, only append NA for the final S (occurs for SIRS, not SIS) rest1 <- modulo3 == 1 rest12 <- modulo3 >= 1 missingR <- data.frame(id = names(rowsPerId)[rest1], time = rep(NA_real_, sum(rest1)), type = rep("R", sum(rest1)), row.names = NULL, check.names = FALSE, stringsAsFactors = FALSE) missingS <- data.frame(id = names(rowsPerId)[rest12], time = rep(NA_real_, sum(rest12)), type = rep("S", sum(rest12)), row.names = NULL, check.names = FALSE, stringsAsFactors = FALSE) eventTable3 <- rbind(eventTable3, missingR, missingS) eventTable3 <- eventTable3[order(eventTable3[["id"]]),] .res <- data.frame( eventTable3[eventTable3$type == "I", c("id", "time")], eventTable3[eventTable3$type == "R", "time", drop = FALSE], eventTable3[eventTable3$type == "S", "time", drop = FALSE], row.names = NULL, check.names = FALSE, stringsAsFactors = FALSE ) names(.res) <- c("id", paste("time", c("I", "R", "S"), sep=".")) .res } byID_neverInfected <- data.frame(id = neverInfected, time.I = rep(NA_real_, n-size), time.R = rep(NA_real_, n-size), time.S = rep(NA_real_, n-size), row.names = NULL, check.names = FALSE) byID_all <- rbind(byID_everInfected, byID_neverInfected[names(byID_everInfected)]) byID <- byID_all[order(byID_all[["id"]]),] rownames(byID) <- NULL ### summary 4: upgrade eventTable with ### evolution of numbers of susceptibles, infectious and removed counters <- eventTable[order(eventTable[["time"]]),c("time", "type", "id")] init <- data.frame(time = attr(object, "timeRange")[1L], type = factor(NA_character_, levels(counters$type)), id = factor(NA_character_, levels(counters$id)), nSusceptible = n, nInfectious = m, nRemoved = 0L) cumulatedReSusceptibility <- cumsum(counters[["type"]] == "S") cumulatedInfections <- cumsum(counters[["type"]] == "I") cumulatedRemovals <- cumsum(counters[["type"]] == "R") counters[["nSusceptible"]] <- init[["nSusceptible"]] - cumulatedInfections + cumulatedReSusceptibility counters[["nInfectious"]] <- init[["nInfectious"]] + cumulatedInfections - cumulatedRemovals counters[["nRemoved"]] <- init[["nRemoved"]] + cumulatedRemovals - cumulatedReSusceptibility counters <- rbind(init, counters) rownames(counters) <- NULL ### return the components in a list res <- list(type = epitype, size = n - sum(isNeverInfected), initiallyInfected = initiallyInfected, neverInfected = neverInfected, coordinates = coordinates, byID = byID, counters = counters) class(res) <- "summary.epidata" attr(res, "eventTimes") <- attr(object, "eventTimes") attr(res, "timeRange") <- attr(object, "timeRange") res } ################################################################################ # PRINT METHOD FOR 'EPIDATA' OBJECTS ################################################################################ print.epidata <- function (x, ...) { cat("\nHistory of an epidemic\n") cat("Number of individuals:", nlevels(x[["id"]]), "\n") cat("Time range:", paste(attr(x, "timeRange"), collapse = " -- "), "\n") cat("Number of infections:", length(attr(x, "eventTimes")), "\n\n") print.data.frame(x, ...) cat("\n") invisible(x) } ################################################################################ # PRINT METHOD FOR THE SUMMARY OF 'EPIDATA' OBJECTS ################################################################################ print.summary.epidata <- function(x, ...) { cat("\nAN", x$type, "EPIDEMIC\n") cat(" Time range:", paste(attr(x, "timeRange"), collapse = " -- "), "\n") cat(" Number of individuals:", nlevels(x$initiallyInfected), "\n") cat(" ", length(x$initiallyInfected), "initially infected individuals") if (length(x$initiallyInfected) > 0) { cat(":\n ") str(as.character(x$initiallyInfected), give.head = FALSE, vec.len = 100, strict.width = "wrap", indent.str = " ") } else cat("\n") cat(" ", length(x$neverInfected), "never infected individuals") if (length(x$neverInfected) > 0) { cat(":\n ") str(as.character(x$neverInfected), give.head = FALSE, vec.len = 100, strict.width = "wrap", indent.str = " ") } else cat("\n") cat(" Size of the epidemic:", x$size, "\n") if (x$type %in% c("SIRS", "SIS")) { cat(" Number of infections:", length(attr(x, "eventTimes")), "\n") } dimc <- dim(x$counters) cat("\n$ counters ('data.frame',", dimc[1L], "x", dimc[2L], "):", "evolution of the epidemic:\n") counters2print <- if (dimc[1] > 6L) { tmp <- format.data.frame(x$counters[c(1:3,1,dimc[1]-(1:0)),], na.encode = FALSE) tmp[4,] <- c("[....]", "", "", "", "", "") rownames(tmp)[4] <- "" as.matrix(tmp) } else { x$counters } print(counters2print, quote = FALSE, right = TRUE, na.print = "") cat("\n") invisible(x) } surveillance/R/algo_hmm.R0000644000176200001440000001145312375723257015076 0ustar liggesusers################################################### ### chunk number 1: ################################################### algo.hmm <- function(disProgObj, control = list(range=range, Mtilde=-1, noStates=2, trend=TRUE, noHarmonics=1,covEffectEqual=FALSE, saveHMMs = FALSE, extraMSMargs=list() )){ # check if the msm package is available if (!requireNamespace("msm")) { stop("the HMM method requires package ", sQuote("msm")) } # Set the default values if not yet set if(is.null(control$Mtilde)){ control$Mtilde <- -1 } if(is.null(control$noStates)){ control$noStates <- 2 } if(is.null(control$trend)){ control$trend <- TRUE } if(is.null(control$noHarmonics)){ control$noHarmonics <- 1 } if(is.null(control$covEffectEqual)){ control$covEffectEqual <- FALSE } if(is.null(control$saveHMMs)){ control$saveHMMs <- FALSE } if(is.null(control$extraMSMargs)){ control$extraMSMargs <- list() } #Stop if not enough for estimation if(min(control$range) < 2) { stop("Error: Too few values as reference values") } # initialize the necessary vectors alarm <- matrix(data = 0, nrow = length(control$range), ncol = 1) upperbound <- matrix(data = 0, nrow = length(control$range), ncol = 1) control$hmms <- list() ############################################## #Repeat for each time point to monitor on-line ############################################## for (i in 1:length(control$range)) { #Function is so slow some sort of perfomance indicator is usually necessary cat(paste("i=",i," (out of ",length(control$range),")\n",sep="")) #Initialize observations for each round -- can be done sequentally first <- ifelse(control$Mtilde== -1, 1, max(control$range[i]-control$Mtilde+1,1)) t <- first:control$range[i] observed <- disProgObj$observed[t] #Init data counts <- data.frame(observed, t) names(counts) <- c("observed","t") #Initialize formula formulaStr <- ifelse(control$trend, "~ 1 + t ", "~ 1 ") #Create formula and add harmonics as covariates -- this could be done recursively? for (j in seq_len(control$noHarmonics)) { counts[,paste("cos",j,"t",sep="")] <- cos(2*j*pi*(t-1)/disProgObj$freq) counts[,paste("sin",j,"t",sep="")] <- sin(2*j*pi*(t-1)/disProgObj$freq) formulaStr <- paste(formulaStr,"+ cos",j,"t + sin",j,"t ",sep="") } #Obtain crude inits q <- quantile(observed,seq(0,1,length=control$noStates+1)) lvl <- cut(observed,breaks=q,include.lowest=TRUE) crudeMean <- as.numeric(tapply(observed, lvl, mean)) hcovariates <- list() hmodel <- list() for (j in seq_len(control$noStates)) { hcovariates[[j]] <- as.formula(formulaStr) val <- crudeMean[j] #Substitution necessary, as hmmPois does lazy evaluation of rate argument hmodel[[j]] <- eval(substitute(msm::hmmPois(rate=val),list(val=crudeMean[j]))) } #Any constraints on the parameters of the covariates for the different states hconstraint <- list() if (control$covEffectEqual) { hconstraint <- list(t=rep(1,control$noStates)) for (j in seq_len(control$noHarmonics)) { hconstraint[[paste("sin",j,"t",sep="")]] <- rep(1,control$noStates) hconstraint[[paste("cos",j,"t",sep="")]] <- rep(1,control$noStates) } } #Prepare object for msm fitting msm.args <- list(formula = observed ~ t, data = counts, #HMM with "noStates" states having equal initial values qmatrix = matrix(1/control$noStates,control$noStates,control$noStates), #y|x \sim Po( \mu[t] ) with some initial values hmodel = hmodel, #Models for \log \mu_t^1 and \log \mu_t^2 hcovariates = hcovariates, #Force the effects of the trend and harmonics to be equal for all states hconstraint=hconstraint) #Add additional msm arguments msm.args <- modifyList(msm.args, control$extraMSMargs) # fit the HMM hmm <- do.call(what=msm::msm, args=msm.args) #In case the model fits should be saved. if (control$saveHMMs) { control$hmms[[i]] <- hmm } #If most probable state of current time point (i.e. last obs) equals the #highest state then do alarm # print(observed) # print(matrix(viterbi.msm(hmm)$fitted,ncol=1)) alarm[i] <- msm::viterbi.msm(hmm)$fitted[length(t)] == control$noStates #Upperbound does not have any meaning -- compute posterior probability! upperbound[i] <- 0 } #Add name and data name to control object. control$name <- paste("hmm:", control$trans) control$data <- paste(deparse(substitute(disProgObj))) #no need for hmm object -- control$hmm <- hmm # return alarm and upperbound vectors result <- list(alarm = alarm, upperbound = upperbound, disProgObj=disProgObj,control=control) class(result) = "survRes" # for surveillance system result return(result) } surveillance/R/epidataCS.R0000644000176200001440000005373714006000011015124 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Data structure for CONTINUOUS SPATIO-temporal infectious disease case data ### and a spatio-temporal grid of endemic covariates ### ### Copyright (C) 2009-2018,2021 Sebastian Meyer ### $Revision: 2636 $ ### $Date: 2021-02-01 14:20:09 +0100 (Mon, 01. Feb 2021) $ ################################################################################ ###################################################################### # MAIN GENERATOR FUNCTION FOR epidataCS OBJECTS # PARAMS: # events: SpatialPointsDataFrame of cases with obligatory columns # time: time point of event # tile: reference to spatial unit (tile) in stgrid, where the event is located # type: optional type of event (-> marked twinstim). will be converted to a factor variable. # eps.t: maximal temporal influence radius (e.g. length of infectious period, time to culling, etc.), may be Inf # eps.s: maximal spatial influence radius (e.g. 100 [km]), may be Inf # The remaining columns are further marks of the event, e.g. sex, age of infected person (-> epidemic covariates) # The column names ".obsInfLength", ".bdist", ".influenceRegion", and ".sources" are reserved. # ".obsInfLength": observed length of the infectious period (being part [0,T]) # ".bdist": minimal distance of the event locations to the boundary # ".influenceRegion": object of class "owin", the intersection of W with b(s,eps.s), with origin at s # ".sources": potential sources of infection # stgrid: data.frame with obligatory columns # tile: ID of spatial unit (e.g. id of municipality) # start, stop: temporal interval # area: area of the spatial unit (tile) # The remaining columns are endemic covariates. # The column name "BLOCK" is reserved (indexing the time intervals of stgrid). # W: SpatialPolygons. Observation region. Must have same proj4string as events. # qmatrix: square indicator matrix (0/1 or TRUE/FALSE) for possible transmission between the event types. will be internally converted to logical. Defaults to an independent spread of the event types. # nCircle2Poly: accuracy (number of edges) of the polygonal approximation of a circle # T: end of observation period (=last stop time). Must be specified if only the # start but not the stop times are supplied in stgrid (-> auto-generation of stop-times). # clipper: engine to use for computing polygon intersections. ###################################################################### obligColsNames_events <- c("time", "tile", "type", "eps.t", "eps.s") obligColsNames_stgrid <- c("start", "stop", "tile", "area") reservedColsNames_events <- c(".obsInfLength", ".sources", ".bdist", ".influenceRegion", "BLOCK", "start") reservedColsNames_stgrid <- c("BLOCK") as.epidataCS <- function (events, stgrid, W, qmatrix = diag(nTypes), nCircle2Poly = 32, T = NULL, clipper = c("polyclip", "rgeos"), verbose = interactive()) { clipper <- match.arg(clipper) # Check and SORT events if (verbose) cat("\nChecking 'events':\n") events <- check_events(events, verbose = verbose) # Check and SORT stgrid if (verbose) cat("Checking 'stgrid':\n") tiles <- NULL # FIXME: add argument to as.epidataCS stgrid <- if (missing(stgrid) && inherits(tiles, "SpatialPolygons")) { if (verbose) cat("\t(missing, using time-constant 'tiles' grid)\n") check_stgrid(tiles2stgrid(tiles, start=0, T=T), verbose = FALSE) } else { check_stgrid(stgrid, T, verbose = verbose) } # Check class of W and consistency of area if (verbose) cat("Checking 'W' ...\n") W <- check_W(W, area.other = sum(stgrid[["area"]][seq_len(nlevels(stgrid$tile))]), other = "stgrid") stopifnot(identicalCRS(W, events)) # Check qmatrix if (verbose) cat("Checking 'qmatrix' ...\n") typeNames <- levels(events$type) nTypes <- length(typeNames) # default value of qmatrix depends on nTypes qmatrix <- checkQ(qmatrix, typeNames) # Check nCircle2Poly stopifnot(isScalar(nCircle2Poly)) nCircle2Poly <- as.integer(nCircle2Poly) # Small helper function converting event index to (time, tile, type) string eventidx2string <- function (eventIdx) { with(events@data, paste(c("time", "tile", "type"), "=", c(time[eventIdx], dQuote(tile[eventIdx]), dQuote(type[eventIdx])), collapse = ", ")) } # Check that all events are part of W if (verbose) cat("Checking if all events are part of 'W' ...\n") WIdxOfEvents <- over(events, W) if (eventNotInWidx <- match(NA, WIdxOfEvents, nomatch = 0L)) { stop("the event at (", eventidx2string(eventNotInWidx), ") is not ", "inside 'W'") } # Some basic quantities nEvents <- length(events) timeRange <- with(stgrid, c(start[1], stop[length(stop)])) # Are events covered by stgrid? if (verbose) { cat("Checking if all events are covered by 'stgrid' ...\n") ## surveillance > 1.16.0: prehistory events are allowed => BLOCK is NA if (events$time[1L] <= timeRange[1L]) { cat(" Note: ", sum(events$time <= timeRange[1L]), " prehistory events (time <= ", timeRange[1L], ")\n", sep = "") } } if (events$time[nEvents] > timeRange[2L]) { stop("found ", sum(events$time > timeRange[2L]), " events beyond 'stgrid' (time > ", timeRange[2L], ")") } # Are all events$tile references really part of the stgrid? .events.tile <- factor(events$tile, levels = levels(stgrid$tile)) if (missingSCellIdx <- match(NA, .events.tile, nomatch = 0L)) { stop("the 'events$tile' entry \"", events$tile[missingSCellIdx], "\"", " is not a valid level of 'stgrid$tile'") } events$tile <- .events.tile # Map events to corresponding grid cells ## FIXME: could use plapply() but then also need a .parallel argument if (verbose) cat("Mapping events to 'stgrid' cells ...\n") withPB <- verbose && interactive() gridcellsOfEvents <- integer(nEvents) if (withPB) pb <- txtProgressBar(min=0, max=nEvents, initial=0, style=3) for (i in seq_len(nEvents)) { gridcellsOfEvents[i] <- gridcellOfEvent(events$time[i], events$tile[i], stgrid) if (withPB) setTxtProgressBar(pb, i) } if (withPB) close(pb) # Attach endemic covariates from stgrid to events if (verbose) cat("Attaching endemic covariates from 'stgrid' to 'events' ...\n") stgridIgnoreCols <- match(setdiff(obligColsNames_stgrid, "start"), names(stgrid)) copyCols <- setdiff(seq_along(stgrid), stgridIgnoreCols) reservedColsIdx <- na.omit(match(names(stgrid)[copyCols], names(events@data), nomatch=NA_integer_)) if (length(reservedColsIdx) > 0L) { warning("in 'events@data', the existing columns with names of endemic ", "covariates from 'stgrid' (", paste0("'", names(events@data)[reservedColsIdx], "'", collapse=", "), ") have been replaced") events@data <- events@data[-reservedColsIdx] } events@data <- cbind(events@data, stgrid[gridcellsOfEvents, copyCols]) # Calculate observed infection length = min(T-time, eps.t) for use in log-likelihood events$.obsInfLength <- with(events@data, pmin(timeRange[2]-time, eps.t)) # Determine potential source events (infective individuals) of each event if (verbose) cat("Determining potential event sources ...\n") events$.sources <- determineSources( eventTimes = events$time, eps.t = events$eps.t, eventCoords = coordinates(events), eps.s = events$eps.s, eventTypes = events$type, qmatrix = qmatrix) # Calculate minimal distance of event locations from the polygonal boundary if (verbose) cat("Calculating the events' distances to the boundary ...\n") Wowin <- SpP2owin(W) events$.bdist <- bdist(coordinates(events), Wowin) # Construct spatial influence regions around events if (verbose) cat("Constructing spatial influence regions around events ...\n") events$.influenceRegion <- if (clipper == "polyclip") { .influenceRegions(events, Wowin, nCircle2Poly, clipper=clipper) } else .influenceRegions(events, W, nCircle2Poly, clipper=clipper) # Return components in a list of class "epidataCS" res <- list(events = events, stgrid = stgrid, W = W, qmatrix = qmatrix) class(res) <- "epidataCS" if (verbose) cat("Done.\n\n") return(res) } ###################################################################### # HELPER FUNCTIONS FOR as.epidataCS ###################################################################### ### CHECK FUNCTION FOR events ARGUMENT IN as.epidataCS check_events <- function (events, dropTypes = TRUE, verbose = TRUE) { # Check class and spatial dimensions stopifnot(inherits(events, "SpatialPointsDataFrame")) if (ncol(events@coords) != 2L) { stop("only two spatial dimensions are supported") } # check suitability of Euclidean geometry if (identical(FALSE, is.projected(events))) { # is.projected may return NA warning("\"epidataCS\" expects planar coordinates; see 'spTransform'") } # Check existence of type column if (verbose) cat("\tChecking 'type' column ... ") events$type <- if ("type" %in% names(events)) { if (dropTypes) factor(events$type) else as.factor(events$type) } else { if (verbose) cat("Setting 'type' to 1 for all events.") factor(rep.int(1L,nrow(events@coords))) } if (verbose) cat("\n") # Check obligatory columns obligColsIdx <- match(obligColsNames_events, names(events), nomatch = NA_integer_) if (any(obligColsMissing <- is.na(obligColsIdx))) { stop("missing obligatory columns in 'events@data': ", paste(obligColsNames_events[obligColsMissing], collapse = ", ")) } # Check other columns on reserved names reservedColsIdx <- na.omit(match(reservedColsNames_events, names(events), nomatch=NA_integer_)) if (length(reservedColsIdx) > 0L) { warning("in 'events@data', the existing columns with reserved names (", paste0("'", names(events)[reservedColsIdx], "'", collapse=", "), ") have been replaced") events@data <- events@data[-reservedColsIdx] } # Check that influence radii are numeric and positive (also not NA) if (verbose) cat("\tChecking 'eps.t' and 'eps.s' columns ...\n") with(events@data, stopifnot(is.numeric(eps.t), eps.t > 0, is.numeric(eps.s), eps.s > 0)) # Transform time into a numeric variable if (verbose) cat("\tConverting event time into a numeric variable ...\n") events$time <- as.numeric(events$time) stopifnot(!is.na(events$time)) # Check event times for ties if (verbose) cat("\tChecking event times for ties ...\n") timeIsDuplicated <- duplicated(events$time) if (any(timeIsDuplicated)) { duplicatedTimes <- sort.int(unique(events$time[timeIsDuplicated])) warning("detected concurrent events at ", length(duplicatedTimes), " time point", if (length(duplicatedTimes) > 1L) "s", ": ", paste(head(duplicatedTimes, 6L), collapse = ", "), if (length(duplicatedTimes) > 6L) ", ...") } # Sort events chronologically if (verbose) cat("\tSorting events ...\n") events <- events[order(events$time),] # First obligatory columns then remainders (epidemic covariates) obligColsIdx <- match(obligColsNames_events, names(events@data)) covarColsIdx <- setdiff(seq_along(events@data), obligColsIdx) events@data <- events@data[c(obligColsIdx, covarColsIdx)] events@coords.nrs <- numeric(0L) # forget index of coordinate columns # Done. return(events) } ### CHECK FUNCTION FOR stgrid ARGUMENT IN as.epidataCS check_stgrid <- function (stgrid, T, verbose = TRUE) { # Check class stopifnot(inherits(stgrid, "data.frame")) # Check obligatory columns autostop <- FALSE if (is.null(stgrid[["stop"]])) { if (is.null(T)) stop("'T' must be specified for auto-generation ", "of 'stop' column in 'stgrid'") stopifnot(isScalar(T)) autostop <- TRUE stgrid$stop <- NA_real_ } obligColsIdx <- match(obligColsNames_stgrid, names(stgrid), nomatch = NA_integer_) if (any(obligColsMissing <- is.na(obligColsIdx))) { stop("missing obligatory columns in 'stgrid': ", paste(obligColsNames_stgrid[obligColsMissing], collapse = ", ")) } # Check other columns on reserved names reservedColsIdx <- na.omit(match(reservedColsNames_stgrid, names(stgrid), nomatch=NA_integer_)) if (length(reservedColsIdx) > 0L) { warning("in 'stgrid', the existing columns with reserved names (", paste0("'", names(stgrid)[reservedColsIdx], "'", collapse=", "), ") have been replaced") stgrid <- stgrid[-reservedColsIdx] } # Transform tile into a factor variable # (also removing unused levels if it was a factor) if (verbose) cat("\tConverting 'tile' into a factor variable ...\n") stgrid$tile <- factor(stgrid$tile) # Transform start times and area into numeric variables stgrid$start <- as.numeric(stgrid$start) stgrid$area <- as.numeric(stgrid$area) # Check stop times stgrid$stop <- if (autostop) { # auto-generate stop times from start times and T if (verbose) cat("\tAuto-generating 'stop' column ...\n") starts <- sort(unique(stgrid$start)) if (T <= starts[length(starts)]) { stop("'T' must be larger than the last 'start' time in 'stgrid'") } stops <- c(starts[-1], T) stops[match(stgrid$start, starts)] } else { as.numeric(stgrid$stop) } # chronological data.frame of unique periods histIntervals <- unique(stgrid[c("start", "stop")]) histIntervals <- histIntervals[order(histIntervals[,1L]),] nBlocks <- nrow(histIntervals) if (!autostop) { # Check start/stop consistency if (verbose) cat("\tChecking start/stop consisteny ...\n") if (any(histIntervals[,2L] <= histIntervals[,1L])) { stop("stop times must be greater than start times") } startStopCheck <- histIntervals[-1L,1L] != histIntervals[-nBlocks,2L] if (startStopCheckIdx <- match(TRUE, startStopCheck, nomatch = 0)) { stop("inconsistent start/stop times: time intervals not consecutive ", "at stop time ", histIntervals[startStopCheckIdx,2L]) } } # Add BLOCK id stgrid$BLOCK <- match(stgrid$start, histIntervals[,1L]) # Check that we have a full BLOCK x tile grid if (verbose) cat("\tChecking if the grid is complete ...\n") blocksizes <- table(stgrid$BLOCK) tiletable <- table(stgrid$tile) if (length(unique(blocksizes)) > 1L || length(unique(tiletable)) > 1L) { stop("'stgrid' is not a full grid") } # First column BLOCK, then obligCols, then remainders (endemic covariates) if (verbose) cat("\tSorting the grid by time and tile ...\n") BLOCKcolIdx <- match("BLOCK", names(stgrid)) obligColsIdx <- match(obligColsNames_stgrid, names(stgrid)) covarColsIdx <- setdiff(seq_along(stgrid), c(BLOCKcolIdx, obligColsIdx)) stgrid <- stgrid[c(BLOCKcolIdx, obligColsIdx, covarColsIdx)] # Sort by BLOCK and tile stgrid <- stgrid[order(stgrid$BLOCK, stgrid$tile),] # # Get row indexes of the blocks' first/last rows # beginBlock <- match(seq_len(nBlocks), stgrid[["BLOCK"]]) # endBlock <- c(beginBlock[-1L]-1L, nrow(stgrid)) # Done. return(stgrid) } ### CHECK FUNCTION FOR W ARGUMENT IN as.epidataCS check_W <- function (W, area.other = NULL, other, tolerance = 0.001) { W <- as(W, "SpatialPolygons") # i.e. drop data if a SpatialPolygonsDataFrame if (!is.null(area.other) && area.other > 0) { check_W_area(W, area.other, other, tolerance) } return(W) } check_W_area <- function (W, area.other, other, tolerance = 0.001) { area.W <- areaSpatialPolygons(W) if (!isTRUE(all.equal.numeric(area.other, area.W, tolerance = tolerance, check.attributes = FALSE))) warning("area of 'W' (", area.W, ") differs from ", "total tile area in '", other, "' (", area.other, ")") } ### CHECK FUNCTION FOR tiles ARGUMENT IN simEpidataCS() check_tiles <- function (tiles, levels, events = NULL, areas.stgrid = NULL, W = NULL, keep.data = FALSE, tolerance = 0.05) { stopifnot(inherits(tiles, "SpatialPolygons"), is.vector(levels, mode="character")) tileIDs <- row.names(tiles) ## check completeness of tiles if (!identical(tileIDs, levels)) { if (any(missingtiles <- !levels %in% tileIDs)) stop(sum(missingtiles), " regions are missing in 'tiles', ", "check 'row.names(tiles)'") ## order tiles by levels and drop any extra tiles tiles <- tiles[levels, ] } ## drop data (also for suitable over-method in check_tiles_events) .tiles <- as(tiles, "SpatialPolygons") ## check tile specification of events and identical projection if (!is.null(events)) { check_tiles_events(.tiles, events) } ## check areas areas.tiles <- areaSpatialPolygons(tiles, byid = TRUE) if (!is.null(areas.stgrid)) { check_tiles_areas(areas.tiles, areas.stgrid, tolerance=tolerance) } if (!is.null(W)) { stopifnot(identicalCRS(tiles, W)) check_W_area(W, area.other=sum(areas.tiles), other="tiles", tolerance=tolerance) } ## done if (keep.data) tiles else .tiles } check_tiles_events <- function (tiles, events) { tiles <- as(tiles, "SpatialPolygons") # remove potential data for over() stopifnot(inherits(events, "SpatialPointsDataFrame"), identicalCRS(tiles, events)) tileIDs <- row.names(tiles) eventIDs <- row.names(events) ## get polygon ID's of events (via overlay) eventtiles <- tileIDs[over(events, tiles)] if (length(which_not_in_tiles <- which(is.na(eventtiles)))) warning("some of 'events' are not within 'tiles': ", paste0("\"", eventIDs[which_not_in_tiles], "\"", collapse=", ")) if (!is.null(events@data[["tile"]])) { which_disagree <- setdiff( which(eventtiles != as.character(events$tile)), which_not_in_tiles) if (length(which_disagree)) message("'over(events, tiles)' disagrees with 'events$tile' for events ", paste0("\"", eventIDs[which_disagree], "\"", collapse=", ")) } invisible() } check_tiles_areas <- function (areas.tiles, areas.stgrid, tolerance = 0.05) { areas_all_equal <- all.equal.numeric(areas.stgrid, areas.tiles, tolerance = tolerance, check.attributes = FALSE) if (!isTRUE(areas_all_equal)) warning("tile areas in 'stgrid' differ from areas of 'tiles': ", areas_all_equal) } ### CONSTRUCT SPATIAL INFLUENCE REGIONS AROUND EVENTS # An influenceRegion is an object of class "owin" with origin # at the event (over which we have to integrate by a cubature rule) # An attribute "area" gives the area of the influenceRegion. # If it is actually a circular influence region, then there is an attribute # "radius" denoting the radius of the influence region. # Argument 'W' can be of class "owin" (preferred) or "SpatialPolygons" # (especially for clipper="rgeos") .influenceRegions <- function (events, W, npoly, maxExtent = NULL, clipper = "polyclip") { Wowin <- if (inherits(W, "owin")) W else SpP2owin(W) if (is.null(maxExtent)) maxExtent <- diameter.owin(Wowin) doIntersection <- switch( clipper, # which package to use for polygon intersection "polyclip" = function (center, eps) intersectPolyCircle.owin(Wowin, center, eps, npoly), "rgeos" = function (center, eps) SpP2owin( intersectPolyCircle.SpatialPolygons( as(W, "SpatialPolygons"), center, eps, npoly)), stop("unsupported polygon clipping engine: '", clipper, "'") ) eventCoords <- coordinates(events) ## FIXME: could use plapply() but then also need a .parallel argument res <- mapply( function (x, y, eps, bdist) { center <- c(x,y) ## if eps is very large, the influence region is the whole region of W iR <- shift.owin( if (eps > maxExtent) Wowin else doIntersection(center, eps), -center) ## if iR is actually a circle of radius eps, attach eps as attribute attr(iR, "area") <- if (eps <= bdist) { attr(iR, "radius") <- eps pi * eps^2 } else area.owin(iR) iR }, eventCoords[,1], eventCoords[,2], events$eps.s, events$.bdist, SIMPLIFY = FALSE, USE.NAMES = FALSE) attr(res, "nCircle2Poly") <- npoly attr(res, "clipper") <- clipper res } ### CREATE stgrid TEMPLATE FROM tiles tiles2stgrid <- function (tiles, start, T) { start <- sort.int(unique.default(start)) stgrid <- expand.grid(tile = row.names(tiles), start = start, KEEP.OUT.ATTRS = FALSE, stringsAsFactors = TRUE) cbind(stgrid, stop = rep(c(start[-1L], T), each = length(tiles)), area = rep(areaSpatialPolygons(tiles, byid = TRUE), length(start))) } surveillance/R/sts_coerce.R0000644000176200001440000001160014026677433015435 0ustar liggesusers################################################################################ ### Conversion between "ts" and "sts", and from "sts" to "data.frame" ### ### Copyright (C) 2014 Michael Hoehle, 2015-2017,2019-2021 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at https://www.R-project.org/Licenses/. ################################################################################ ### Convert a simple "ts" object to an "sts" object setAs(from = "ts", to = "sts", def = function (from) { ## Extract frequency and start from the "ts" object freq <- frequency(from) start <- start(from) if (length(start) == 1) stop("could not convert time series start() to (year, index) form") ## Remove "tsp" attribute and "ts"/"mts" class tsp(from) <- NULL ## Create the sts object .sts(observed = from, start = start, freq = freq) }) ### Convert an "sts" object to a simple "ts" object as.ts.sts <- function (x, ...) { ts(data = x@observed, start = x@start, frequency = x@freq) } setAs(from = "sts", to = "ts", def = function (from) as.ts.sts(from)) ### Convert an "sts" object to an eXtensible Time Series "xts" as.xts.sts <- function (x, order.by = epoch(x, as.Date = TRUE), ...) { if (!missing(order.by) || x@freq %in% c(52, 365)) { xts::xts(x = x@observed, order.by = order.by, ...) } else { ## frequencies 4 and 12 are nicely handled by the as.xts.ts method xts::as.xts(as.ts.sts(x), ...) } } ### Convert an "sts" object to a data frame suitable for regression as.data.frame.sts <- function(x, row.names = NULL, optional = FALSE, # from the generic tidy = FALSE, as.Date = x@epochAsDate, ...) { if (tidy) return(tidy.sts(x, ...)) #Convert object to data frame and give names res <- data.frame("observed" = x@observed, "epoch" = epoch(x, as.Date = as.Date), "state" = x@state, "alarm" = x@alarm, "upperbound" = x@upperbound, "population" = x@populationFrac, check.names = FALSE) names(res) <- if (ncol(x) > 1) { ## names from data.frame() above should already be as intended namesObs <- colnames(x@observed, do.NULL = FALSE, prefix = "observed") c(paste0("observed.", namesObs), "epoch", paste0("state.", namesObs), paste0("alarm.", namesObs), paste0("upperbound.", namesObs), paste0("population.", namesObs)) } else { c("observed", "epoch", "state", "alarm", "upperbound", "population") } #Find out how many epochs there are each year res$freq <- if (x@epochAsDate && x@freq %in% c(52, 365)) { year <- strftime(epoch(x), if (x@freq == 52) "%G" else "%Y") epochStr <- switch(as.character(x@freq), "52" = "%V", "365" = "%j") maxEpoch <- vapply(X = unique(year), FUN = function (Y) { dummyDates <- as.Date(paste0(Y, "-12-", 26:31)) max(as.numeric(strftime(dummyDates, epochStr))) }, FUN.VALUE = 0, USE.NAMES = TRUE) maxEpoch[year] } else { # just replicate the fixed frequency x@freq } #Add a column denoting the epoch fraction within the current year res$epochInPeriod <- epochInYear(x) / res$freq return(res) } setMethod("as.data.frame", signature(x = "sts"), as.data.frame.sts) ### convert an "sts" object to a "data.frame" in long (tidy) format tidy.sts <- function (x, ...) { unitNames <- colnames(x, do.NULL = FALSE, prefix = "observed") v.names <- c("observed", "state", "alarm", "upperbound", "population") stswide <- as.data.frame(x, tidy = FALSE, as.Date = FALSE) ## nrow(stswide) = nrow(x), i.e., one row per epoch stswide$year <- year(x) stswide$epochInYear <- epochInYear(x) stswide$date <- tryCatch( epoch(x, as.Date = TRUE), # only works for particular values of x@freq error = function (e) as.Date(NA) ) if ((nUnit <- ncol(x)) == 1L) { stslong <- data.frame(stswide, "unit" = factor(unitNames), check.names = FALSE) } else { ## we have observed/population/... columns for each unit varying <- sapply(X = v.names, FUN = paste, unitNames, sep = ".", simplify = FALSE, USE.NAMES = TRUE) stslong <- reshape( data = stswide, direction = "long", varying = varying, v.names = v.names, timevar = "unit", times = unitNames, idvar = "epoch") stslong$unit <- factor(stslong$unit, levels = unitNames) attr(stslong, "reshapeLong") <- NULL } row.names(stslong) <- NULL ## reorder variables (ordering from above differs depending on nUnit) stslong[c("epoch", "unit", "year", "freq", "epochInYear", "epochInPeriod", "date", v.names)] } surveillance/R/sim_background.R0000644000176200001440000000414010662666102016265 0ustar liggesusers################################################### ### chunk number 1: ################################################### # 'sim.seasonalNoise' generates a cyclic model of a poisson distribution # as background data for a simulated timevector. # # Parameters: # A - amplitude (range of sinus), default = 1 # alpha - parameter to move along the y-axis # (negative values not allowed) # d.h alpha > = A, default = 1, # beta - regression coefficient, default = 0 # season - factor to create seasonal moves # (moves the curve along the x-axis), default = 0 # length - number of weeks to model # frequency - factor to determine the oscillation-frequency, default = 1 # state - if a state chain is given, it is weighted by the parameter K # and influences mu # K - weight for outbreaks sim.seasonalNoise <- function(A = 1, alpha = 1, beta = 0, phi = 0, length, frequency = 1, state = NULL, K = 0){ t <- 1:length # constant factor to transform weeks to the appropriate pi-value. omega <- 2 * pi/ 52 # season moves the sin along the x-axis. if(is.null(state)){ # no state chain mu <- exp(A * sin( frequency * omega * (t + phi)) + alpha + beta * t) } else{ # encounter the state chain mu <- exp(A * sin( frequency * omega * (t + phi)) + alpha + beta * t + K * state) } # create the noise as random numbers of the Poisson distribution # with parameter mu seasonalBackground <- rpois(length, mu) # get random numbers result <- list(seasonalBackground = seasonalBackground, t = t, mu = mu, A = A, alpha = alpha, beta = beta, phi = phi, length = length, frequency = frequency, K = K) class(result) = "seasonNoise" return(result) } surveillance/R/hhh4_calibration.R0000644000176200001440000000345713041377177016517 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### calibrationTest() for "hhh4" fits ### ### Copyright (C) 2015,2017 Sebastian Meyer ### $Revision: 1829 $ ### $Date: 2017-01-23 14:00:47 +0100 (Mon, 23. Jan 2017) $ ################################################################################ calibrationTest.hhh4 <- function (x, subset = x$control$subset, units = seq_len(x$nUnit), ...) { ## perform the calibration test in the specified subset res <- calibrationTest.default( x = x$stsObj@observed[subset, units, drop = FALSE], mu = x$fitted.values[match(subset, x$control$subset), units, drop = FALSE], size = psi2size.hhh4(x, subset, units), ...) ## change "data.name" to be the name of the supplied model res$data.name <- deparse(substitute(x)) res } calibrationTest.oneStepAhead <- function (x, units = NULL, ...) { ## perform the calibration test res <- if (is.null(units)) { calibrationTest.default( x = x$observed, mu = x$pred, size = psi2size.oneStepAhead(x), ...) } else { calibrationTest.default( x = x$observed[, units, drop = FALSE], mu = x$pred[, units, drop = FALSE], size = psi2size.oneStepAhead(x)[, units, drop = FALSE], ...) } ## change "data.name" to be the name of the supplied "oneStepAhead" object res$data.name <- deparse(substitute(x)) res } surveillance/R/epidataCS_aggregate.R0000644000176200001440000001753713346675125017166 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Convert "epidataCS" to the (aggregated) classes "epidata" or "sts" ### ### Copyright (C) 2009-2016,2018 Sebastian Meyer ### $Revision: 2217 $ ### $Date: 2018-09-14 11:07:33 +0200 (Fri, 14. Sep 2018) $ ################################################################################ ###################################### ### Transform "epidataCS" to "epidata" ###################################### ## CAVE: this only generates a SIS epidemic, i.e. atRiskY is set to 1 ## immediately after recovery ## length of infectious period is taken from epidataCS$events$eps.t ## fcols are not generated here. these must be generated by a second call to ## twinSIR's as.epidata with desired f. (for safety) ## tileCentroids is a coordinate matrix whose row names are the tile levels as.epidata.epidataCS <- function (data, tileCentroids, eps = 0.001, ...) { if (!requireNamespace("intervals")) stop("conversion from ", dQuote("epidataCS"), " to ", dQuote("epidata"), " requires the ", dQuote("intervals"), " package") ### generate twinSIR's epidata object from stgrid (no events) centroidIdx <- match(levels(data$stgrid$tile), rownames(tileCentroids), nomatch = NA_integer_) if (any(is.na(centroidIdx))) { stop("some levels of 'data$stgrid$tile' are not available from 'tileCentroids'") } centroids <- tileCentroids[centroidIdx,] if (any(c("xCent", "yCent") %in% names(data$stgrid))) { stop("'data$stgrid' already has columns \"xCent\" and \"yCent\"") } stgrid <- cbind(data$stgrid, atRiskY = 1L, event = 0L, Revent = 0L, xCent = centroids[,1], yCent = centroids[,2] # relies on ordering of stgrid by first BLOCK, then tile ) names(stgrid)[names(stgrid)=="tile"] <- "id" timeRange <- with(stgrid, c(start[1], stop[length(stop)])) ### now determine "events" with respect to the tiles # individual data indItimes <- data$events$time if (anyDuplicated(indItimes)) stop("'data$events' has concurrent event times") indRtimes <- indItimes + data$events$eps.t indInts <- intervals::Intervals(cbind(indItimes, indRtimes, deparse.level = 0L)) indTiles <- data$events$tile # tile data tileRows <- tapply(seq_along(indTiles), indTiles, c, simplify = FALSE) tileInts <- lapply(tileRows, function (rows) { if (length(rows)==0L) { matrix(0,0,2) } else if (length(rows)==1L) { as.matrix(indInts[rows]) } else as.matrix(intervals::reduce(indInts[rows])) }) tileNames <- rep.int(names(tileInts), sapply(tileInts, nrow)) tileItimes <- unlist(lapply(tileInts, function(ints) ints[,1]), use.names=FALSE) tileRtimes <- unlist(lapply(tileInts, function(ints) ints[,2]), use.names=FALSE) # there are possibly Rtimes which equal Itimes of other individuals # => break ties by considering Rtime shortly before Itime (arbitrary choice) while(length(dup <- which(tileRtimes %in% tileItimes)) > 0L) { tileRtimes[dup] <- tileRtimes[dup] - eps } # now there could be duplicated Rtimes... grml (choose another 'eps' in this case) if (anyDuplicated(tileRtimes)) { stop("breaking ties introduced duplicated Rtimes") } ### add additional stop times to stgrid for tile infections and recoveries requiredStopTimes <- sort(c(tileItimes,tileRtimes[tileRtimes timeRange[1]] # omit prehistory class(stgrid) <- c("epidata", "data.frame") attr(stgrid, "timeRange") <- timeRange cat("Inserting extra stop times in 'stgrid' (this might take a while) ... ") evHist <- intersperse(stgrid, requiredStopTimes, verbose=interactive()) # CAVE: this resets the BLOCK index class(evHist) <- "data.frame" ### <- THIS IS THE MOST TIME-CONSUMING PART OF THIS FUNCTION !!! cat("Done.\n") ### set event, Revent and atRiskY indicators tileNamesCodes <- match(tileNames, levels(evHist$id)) # event indicator (currently in evHist event==0 everywhere) idxItimes <- match(tileItimes, evHist$stop) - 1L + tileNamesCodes evHist$event[idxItimes] <- 1L # Revent indicator (currently in evHist Revent==0 everywhere) idxRtimes <- match(tileRtimes, evHist$stop) - 1L + tileNamesCodes # (may contain NA's if Revent after last stop) evHist$Revent[idxRtimes] <- 1L # atRiskY indicator .atRiskY <- rep.int(1L, nrow(evHist)) nTiles <- nlevels(evHist$id) nBlocks <- tail(evHist$BLOCK, 1) stopTimes <- unique(evHist$stop) # has length nBlocks for (i in seq_along(tileItimes)) { .Itime <- tileItimes[i] .Rtime <- tileRtimes[i] if (.Rtime <= timeRange[1L]) next # prehistory infection and removal .tileCode <- tileNamesCodes[i] idxsTileInEpi <- seq(.tileCode, by=nTiles, length.out=nBlocks) first0block <- if (.Itime < stopTimes[1L]) 1L else match(.Itime, stopTimes) + 1L last0block <- if (.Rtime > stopTimes[nBlocks]) nBlocks else match(.Rtime, stopTimes) .atRiskY[idxsTileInEpi[first0block:last0block]] <- 0L } evHist$atRiskY <- .atRiskY ### Return final epidata object of twinSIR-type cat("Generating final \"epidata\" object for use with twinSIR ... ") epi <- as.epidata(evHist[-grep("BLOCK", names(evHist))], id.col="id", start.col="start", stop.col="stop", atRiskY.col="atRiskY", event.col="event", Revent.col="Revent", coords.cols=c("xCent","yCent") ) cat("Done.\n") epi } #################################################################### ### Transform "epidataCS" to "sts" by aggregation of cases on stgrid #################################################################### epidataCS2sts <- function (object, freq, start, neighbourhood, tiles = NULL, popcol.stgrid = NULL, popdensity = TRUE) { stopifnot(inherits(object, "epidataCS")) tileLevels <- levels(object$stgrid$tile) if (!is.null(tiles)) { stopifnot(inherits(tiles, "SpatialPolygons"), tileLevels %in% row.names(tiles)) tiles <- tiles[tileLevels,] } ## prepare sts components blocks <- unique(object$stgrid$BLOCK) # epidataCS is sorted eventsByCell <- with(object$events@data, table(BLOCK=factor(BLOCK, levels=blocks), tile)) if (missing(neighbourhood)) { # auto-detect neighbourhood from tiles if (is.null(tiles)) stop("'tiles' is required for auto-generation of 'neighbourhood'") neighbourhood <- poly2adjmat(tiles, zero.policy=TRUE) if (nIslands <- sum(rowSums(neighbourhood) == 0)) message("Note: auto-generated neighbourhood matrix contains ", nIslands, ngettext(nIslands, " island", " islands")) } populationFrac <- if (is.null(popcol.stgrid)) NULL else { stopifnot(is.vector(popcol.stgrid), length(popcol.stgrid) == 1) popByCell <- object$stgrid[[popcol.stgrid]] if (popdensity) popByCell <- popByCell * object$stgrid[["area"]] totalpop <- sum(popByCell[seq_along(tileLevels)]) matrix(popByCell/totalpop, nrow=length(blocks), ncol=length(tileLevels), byrow=TRUE, dimnames=dimnames(eventsByCell)) } ## initialize sts object (sts() constructor discards NULL slots) sts(frequency=freq, start=start, # epoch=seq_along(blocks) [default] ##do not set epoch=blocks as blocks[1] could be >1 (from simulation) observed=unclass(eventsByCell), neighbourhood=neighbourhood, populationFrac=populationFrac, map=tiles) } surveillance/R/spatial_tools.R0000644000176200001440000002305613777627613016200 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Auxiliary functions for operations on spatial data ### ### Copyright (C) 2009-2015,2018,2021 Sebastian Meyer ### $Revision: 2602 $ ### $Date: 2021-01-13 18:36:11 +0100 (Wed, 13. Jan 2021) $ ################################################################################ ### Polygonal Approximation of a Disc/Circle discpoly <- function (center, radius, npoly = 64, class = c("Polygon", "owin", "gpc.poly"), hole = FALSE) { class <- match.arg(class) if (class == "owin") { # use spatstat.geom::disc res <- disc(radius=radius, centre=center, mask=FALSE, npoly=npoly) if (hole) { res$bdry[[1]]$x <- rev(res$bdry[[1]]$x) res$bdry[[1]]$y <- rev(res$bdry[[1]]$y) res$bdry[[1]]$hole <- TRUE } return(res) } ## do it myself for the "Polygon" and "gpc.poly" classes stopifnot(radius > 0, isScalar(npoly), npoly > 2) theta <- seq(2*pi, 0, length = npoly+1)[-(npoly+1)] # for clockwise order if (hole) theta <- rev(theta) # for anticlockwise order x <- center[1] + radius * cos(theta) y <- center[2] + radius * sin(theta) switch(class, "Polygon" = Polygon(cbind(c(x,x[1]),c(y,y[1])), hole=hole), "gpc.poly" = { pts <- list(list(x=x, y=y, hole=hole)) if (isClass("gpc.poly") || requireNamespace("rgeos")) { new("gpc.poly", pts = pts) } else { warning("formal class \"gpc.poly\" not available") pts } } ) } ### Wrapper for polyclip or rgeos::gUnaryUnion or maptools::unionSpatialPolygons unionSpatialPolygons <- function (SpP, method = c("rgeos", "polyclip", "gpclib"), ...) { method <- match.arg(method) W <- switch( method, "polyclip" = { tiles_xylist <- xylist(SpP, reverse=FALSE) W_xylist <- polyclip::polyclip(tiles_xylist, tiles_xylist, "union", fillA = "nonzero", fillB = "nonzero", ...) ## FIXME: polyclip() seems to return owin-type vertex order? W_Polygons <- Polygons( lapply(W_xylist, function(p) Polygon(cbind(p$x,p$y)[c(1L,length(p$x):1L),])), ID="1") SpatialPolygons(list(W_Polygons)) }, "rgeos" = rgeos::gUnaryUnion(SpP, ...), "gpclib" = { ## rgeosStatus needed by maptools::unionSpatialPolygons is only ## set in maptools:::.onAttach. Since it is bad practice to do ## library("maptools") in package code (cf. R-exts 1.1.3.1), ## the user has to attach "maptools" manually beforehand if (!"maptools" %in% .packages()) { stop("need 'library(\"maptools\")'; ", "then call surveillance::unionSpatialPolygons") } gpclibCheck() && maptools::gpclibPermit() maptools::unionSpatialPolygons( SpP, IDs = rep.int(1,length(SpP@polygons)), avoidGEOS = TRUE, ...) }) ## ensure that W has exactly the same proj4string as SpP W@proj4string <- SpP@proj4string W } ### internal implementation of as(W, "owin") from polyCub ### to avoid upgrade problems with polyCub <= 0.7.1 referring to old spatstat ### and to avoid calling as(W, "owin") from maptools (depends on load order) SpP2owin <- function (W, ...) owin(poly = xylist(W), ...) ### Compute distance from points to a polygonal boundary ## since spatstat 1.56-0, bdist.points() interfaces C-code via ## spatstat.utils:::distppllmin, which is faster than nncross.ppp() bdist <- function (xy, poly) # poly is a polygonal "owin" { bdist.points(ppp(x = xy[,1L], y = xy[,2L], window = poly, check = FALSE)) } ## Example: bdist(coordinates(imdepi$events), as(imdepi$W, "owin")) ### sample n points uniformly on a disc with radius r runifdisc <- function (n, r = 1, buffer = 0) { stopifnot(buffer <= r) rangle <- runif(n, 0, 2*pi) rdist <- r * sqrt(runif(n, (buffer/r)^2, 1)) rdist * cbind(cos(rangle), sin(rangle)) } ### Count number of instances at the same location of a SpatialPoints object ## NOTE: the default multiplicity-method has been integrated into the spatstat ## package which we import multiplicity.Spatial <- function (x) multiplicity(coordinates(x)) ### determines which polygons of a SpatialPolygons object are at the border, ### i.e. have coordinates in common with the spatial union of all polygons polyAtBorder <- function (SpP, snap = sqrt(.Machine$double.eps), method = "rgeos", ...) { SpP <- as(SpP, "SpatialPolygons") W <- unionSpatialPolygons(SpP, method = method, ...) if (length(W@polygons) > 1) warning("unionSpatialPolygons() produced >1 Polygons-components") Wcoords <- unique(do.call("rbind", lapply(W@polygons[[1]]@Polygons, coordinates))) atBorder <- sapply(SpP@polygons, function (x) { coords <- unique(do.call("rbind", lapply(x@Polygons, coordinates))) res <- FALSE for (i in seq_len(nrow(coords))) { if (any(spDistsN1(Wcoords, coords[i,], longlat=FALSE) < snap)) { res <- TRUE break } } res }) names(atBorder) <- row.names(SpP) atBorder } ### sp.layout items for spplot() ## draw labels for Spatial* objects layout.labels <- function (obj, labels = TRUE, plot = FALSE) { stopifnot(inherits(obj, "Spatial")) ## get region labels getLabels <- function (labels) { if (isTRUE(labels)) { row.names(obj) } else if (length(labels) == 1L && (is.numeric(labels) | is.character(labels))) { if (!"data" %in% slotNames(obj)) stop("no data slot to select labels from") obj@data[[labels]] } else labels } ## convert labels argument to a list labels.args <- if (is.list(labels)) { labels } else if (!is.null(labels) && !identical(labels, FALSE)) { list(labels = getLabels(labels)) } else { # labels = FALSE or labels = NULL return(NULL) } ## set default coordinates for panel.text() and parse labels labels.args <- modifyList(list(x = coordinates(obj), labels = TRUE), labels.args) labels.args$labels <- getLabels(labels.args$labels) if (plot) { ## plot labels in the traditional graphics system do.call("text", labels.args) } else { ## return layout item for use by spplot() c("panel.text", labels.args) } } ## draw a scalebar with labels layout.scalebar <- function (obj, corner = c(0.05, 0.95), scale = 1, labels = c(0, scale), height = 0.05, pos = 3, ..., plot = FALSE) { stopifnot(inherits(obj, "Spatial")) BB <- bbox(obj) force(labels) # the default should use the original 'scale' value in km if (identical(FALSE, is.projected(obj))) { ## 'obj' has longlat coordinates, 'scale' is interpreted in kilometres scale <- .scale2longlat(t(rowMeans(BB)), scale) } offset <- BB[, 1L] + corner * apply(BB, 1L, diff.default) textfun <- if (plot) "text" else "panel.text" lis <- list( list("SpatialPolygonsRescale", layout.scale.bar(height = height), offset = offset, scale = scale, fill = c(NA, 1), plot.grid = !plot), list(textfun, x = offset[1L], y = offset[2L], labels = labels[1L], pos = pos, ...), list(textfun, x = offset[1L] + scale[1L], y = offset[2L], labels = labels[2L], pos = pos, ...) ) if (plot) { for (li in lis) eval(do.call("call", li)) } else { lis } } .scale2longlat <- function (focusLL, distKM) { ## .destPoint() is copied from the "raster" package by Robert J. Hijmans ## 'p' is a longlat coordinate matrix, 'd' is a vector of distances in metres .destPoint <- function (p, d, b=90, r=6378137) { toRad <- pi/180 lon1 <- p[, 1] * toRad lat1 <- p[, 2] * toRad b <- b * toRad lat2 <- asin(sin(lat1) * cos(d/r) + cos(lat1) * sin(d/r) * cos(b)) lon2 <- lon1 + atan2(sin(b) * sin(d/r) * cos(lat1), cos(d/r) - sin(lat1) * sin(lat2)) lon2 <- (lon2 + pi)%%(2 * pi) - pi cbind(lon2, lat2)/toRad } rightLL <- .destPoint(focusLL, distKM * 1000) rightLL[,1L] - focusLL[,1L] } ### determine the total area of a SpatialPolygons object ## CAVE: sum(sapply(obj@polygons, slot, "area")) ## is not correct if the object contains holes areaSpatialPolygons <- function (obj, byid = FALSE) { if (requireNamespace("rgeos", quietly = TRUE)) { rgeos::gArea(obj, byid = byid) } else { areas <- vapply( X = obj@polygons, FUN = function (p) sum( vapply(X = p@Polygons, FUN = function (x) (1-2*x@hole) * x@area, FUN.VALUE = 0, USE.NAMES = FALSE) ), FUN.VALUE = 0, USE.NAMES = FALSE ) if (byid) setNames(areas, row.names(obj)) else sum(areas) } } surveillance/R/AllGeneric.R0000644000176200001440000001650514027110155015302 0ustar liggesusers ### Define some functions to be S3 generic animate <- function (object, ...) UseMethod("animate") R0 <- function (object, ...) UseMethod("R0") as.epidata <- function (data, ...) UseMethod("as.epidata") intensityplot <- function (x, ...) UseMethod("intensityplot") untie <- function (x, amount, ...) UseMethod("untie") intersectPolyCircle <- function (object, center, radius, ...) UseMethod("intersectPolyCircle") calibrationTest <- function (x, ...) UseMethod("calibrationTest") scores <- function (x, ...) { if (identical(class(x), "list")) { ## backward compatibility with surveillance < 1.10-0 scores.oneStepAhead(x, ...) } else { UseMethod("scores") } } pit <- function (x, ...) UseMethod("pit") ## internal function with methods for "twinSIR" and "simEpidata" getModel <- function (object, ...) UseMethod("getModel") ## list coefficients by component coeflist <- function (x, ...) UseMethod("coeflist") coeflist.default <- function (x, npars, ...) { if (is.null(groupnames <- names(npars))) { stop("'npars' must be named") } f <- factor(rep.int(groupnames, npars), levels = groupnames) split.default(x = x, f = f, drop = FALSE) } ### Declare some existing R functions (which we import) to be S4-generic. ### This is not strictly necessary, but considered better programming style, and ### it avoids messages noting the creation of the generics during package build ### and installation, see the section "Basic Use" in help("setGeneric"). setGeneric("plot") setGeneric("aggregate") setGeneric("toLatex") ## data frame-like methods defined in sts.R setGeneric("dim") setGeneric("dimnames") ###################################################################### # Conversion to and from sts objects ###################################################################### #setGeneric("as.sts") setGeneric("as.data.frame") ###################################################################### # Accessing and replacing slots of the "sts" class ###################################################################### #epoch slot setGeneric("epoch", function(x, as.Date=x@epochAsDate) standardGeneric("epoch")) setMethod("epoch", "sts", function(x, as.Date=x@epochAsDate) { if (!as.Date) { # return numeric vector x@epoch } else { # convert to Date format if (x@epochAsDate) { as.Date(x@epoch, origin = "1970-01-01") } else if (x@freq == 12) { # use the first day of every month as.Date(strptime(paste(year(x), epochInYear(x), 1, sep = "-"), format = "%Y-%m-%d")) } else if (x@freq == 52) { # use Mondays ## be consistent with epochInYear(): 'start' means *ISO* year and week! ## Unfortunately, %G and %V are not supported for input via strptime(): ## firstMonday <- strptime(x = paste0(x@start[1L], "-W", x@start[2L], "-1"), ## format = "%G-W%V-%u") # WRONG, gives today ## so we run a naive search for the Monday of the 'start' week candidates <- seq.Date(as.Date(paste0(x@start[1L]-1L, "-12-29")), as.Date(paste0(x@start[1L], "-12-28")), by = 1L) firstMonday <- candidates[match(sprintf("%i-W%02i", x@start[1L], x@start[2L]), strftime(candidates, "%G-W%V"))] seq.Date(from = firstMonday, by = 7L, length.out = nrow(x)) } else if (x@freq == 365) { # use day of the year (incorrect in leap years) as.Date(strptime(paste0(year(x), "-D", epochInYear(x)), format = "%Y-D%j")) } else { stop("date conversion only implemented for daily, weekly and monthly data") } } }) setGeneric("epoch<-", function(x, value) standardGeneric("epoch<-")) setReplaceMethod("epoch", "sts", function(x, value) { if (length(value) != nrow(x@observed)) stop("'epoch' must be of length 'nrow(observed)'") if (inherits(value, "Date")) { value <- as.integer(value) x@epochAsDate <- TRUE } x@epoch <- value x }) # observed slot setGeneric("observed", function(x) standardGeneric("observed")) setMethod("observed", "sts", function(x) { return(x@observed) }) setGeneric("observed<-", function(x, value) standardGeneric("observed<-")) setReplaceMethod("observed", "sts", function(x, value) { x@observed <- value x }) # alarms slot setGeneric("alarms", function(x) standardGeneric("alarms")) setMethod("alarms", "sts", function(x) { return(x@alarm) }) setGeneric("alarms<-", function(x, value) standardGeneric("alarms<-")) setReplaceMethod("alarms", "sts", function(x, value) { x@alarm <- value x }) # upperbound slot setGeneric("upperbound", function(x) standardGeneric("upperbound")) setMethod("upperbound", "sts", function(x) { return(x@upperbound) }) setGeneric("upperbound<-", function(x, value) standardGeneric("upperbound<-")) setReplaceMethod("upperbound", "sts", function(x, value) { x@upperbound <- value x }) # population slot (actually its populationFrac) setGeneric("population", function(x) standardGeneric("population")) setMethod("population", "sts", function(x) { return(x@populationFrac) }) setGeneric("population<-", function(x, value) standardGeneric("population<-")) setReplaceMethod("population", "sts", function(x, value) { x@populationFrac <- value x }) ##control slot setGeneric("control", function(x) standardGeneric("control")) setMethod("control", "sts", function(x) { return(x@control) }) setGeneric("control<-", function(x, value) standardGeneric("control<-")) setReplaceMethod("control", "sts", function(x, value) { x@control <- value x }) ###multinomial Time series slot ##control slot setGeneric("multinomialTS", function(x) standardGeneric("multinomialTS")) setMethod("multinomialTS", "sts", function(x) { return(x@multinomialTS) }) setGeneric("multinomialTS<-", function(x, value) standardGeneric("multinomialTS<-")) setReplaceMethod("multinomialTS", "sts", function(x, value) { x@multinomialTS <- value x }) ### neighbourhood matrix slot setGeneric("neighbourhood", function(x) standardGeneric("neighbourhood")) setMethod("neighbourhood", "sts", function(x) { return(x@neighbourhood) }) setGeneric("neighbourhood<-", function(x, value) standardGeneric("neighbourhood<-")) setReplaceMethod("neighbourhood", "sts", function(x, value) { x@neighbourhood <- value x }) ###################################################################### # Miscellaneous access methods ###################################################################### setGeneric("epochInYear", function(x, ...) standardGeneric("epochInYear")) setGeneric("year", function(x, ...) standardGeneric("year")) ###################################################################### # For stsNC class ###################################################################### ### access function for repotringTriangle slot setGeneric("reportingTriangle", function(x) standardGeneric("reportingTriangle")) setMethod("reportingTriangle", "stsNC", function(x) { return(x@reportingTriangle) }) ### access function for delayCDF slot setGeneric("delayCDF", function(x) standardGeneric("delayCDF")) setMethod("delayCDF", "stsNC", function(x) { return(x@delayCDF) }) ### access function for SR slot setGeneric("score", function(x) standardGeneric("score")) setMethod("score", "stsNC", function(x) { return(x@SR) }) ### access function for prediction interval slot setGeneric("predint", function(x) standardGeneric("predint")) setMethod("predint", "stsNC", function(x) { return(x@pi) }) surveillance/R/glm_epidataCS.R0000644000176200001440000000507413265641174016005 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Formulation of an endemic-only twinstim as a Poisson-GLM with response the ### number of events per space-time cell of stgrid and offset log(dt*ds) ### ### Copyright (C) 2013-2014,2018 Sebastian Meyer ### $Revision: 2111 $ ### $Date: 2018-04-18 15:05:00 +0200 (Wed, 18. Apr 2018) $ ################################################################################ utils::globalVariables("area") # in glm(), the 'offset' is evaluated in 'data' glm_epidataCS <- function (formula, data, ...) { if (missing(formula)) { covariates <- c("start", setdiff(names(data$stgrid), c( reservedColsNames_stgrid, obligColsNames_stgrid))) formula <- as.formula(paste0("~", paste0(covariates, collapse=" + "))) } ## for a type-specific model, we really have to set up the full ## "stkappagrid", i.e. with nBlocks*nTiles*nTypes rows typeSpecificModel <- "type" %in% all.vars(formula) typeNames <- levels(data$events@data$type) nTypes <- length(typeNames) ## aggregated number of events in each cell of the stgrid ## (prehistory events have a missing BLOCK and are thus ignored) if (typeSpecificModel) { .stgrid <- do.call("rbind", lapply(typeNames, function (type) { cbind(data$stgrid, type=type, deparse.level=0) })) eventsByCell <- c(table(with(data$events@data, { interaction(tile, BLOCK, type, drop=FALSE, sep=".", lex.order=FALSE) }))) .stgrid$nEvents <- eventsByCell[paste( .stgrid$tile, .stgrid$BLOCK, .stgrid$type, sep=".")] } else { .stgrid <- data$stgrid eventsByCell <- c(table(with(data$events@data, { interaction(tile, BLOCK, drop=FALSE, sep=".", lex.order=FALSE) }))) .stgrid$nEvents <- eventsByCell[paste( .stgrid$tile, .stgrid$BLOCK, sep=".")] } .stgrid$nEvents[is.na(.stgrid$nEvents)] <- 0L ##stopifnot(sum(.stgrid$nEvents) == sum(!is.na(data$events$BLOCK))) ## Fit corresponding Poisson-GLM environment(formula) <- environment() # to see typeSpecificModel and nTypes glm(update.formula(formula, nEvents ~ .), family = poisson(link="log"), data = .stgrid, offset = log((if(typeSpecificModel) 1 else nTypes)*(stop-start)*area), ...) } surveillance/R/addSeason2formula.R0000644000176200001440000000367512505254341016657 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Conveniently add sine-cosine terms to a model formula ### ### Copyright (C) 2010 Michaela Paul, 2013-2015 Sebastian Meyer ### $Revision: 1299 $ ### $Date: 2015-03-27 14:19:29 +0100 (Fri, 27. Mar 2015) $ ################################################################################ ## for S = 1, 'sin(2*pi * t/period) + cos(2*pi * t/period)' is added to 'f' addSeason2formula <- function ( f = ~1, # formula to enhance S = 1, # number of sine/cosine pairs period = 52, # periodicity of the sinusoidal wave timevar = "t" # name of the time variable ){ ## check arguments stopifnot(inherits(f, "formula"), is.vector(S, mode = "numeric"), S >= 0, isScalar(period)) ## return unchanged formula if S = 0 if (max(S) == 0) return(f) ## character representation of old formula ftext <- paste0(deparse(f), collapse = "") ## add sine-cosine terms if (length(S) == 1L) { for (i in seq_len(S)) { ftext <- paste0(ftext, " + sin(", 2*i, "*pi*", timevar, "/", period, ")", " + cos(", 2*i, "*pi*", timevar, "/", period, ")") } } else { ## unit-specific seasonality for hhh4() via the special fe() function for (i in seq_len(max(S))) { which <- paste0(i <= S, collapse = ",") ftext <- paste0(ftext, " + fe(sin(",2*i,"*pi*",timevar,"/",period,"), which=c(",which,"))", " + fe(cos(",2*i,"*pi*",timevar,"/",period,"), which=c(",which,"))") } } ## convert back to a formula as.formula(ftext, env = .GlobalEnv) } surveillance/R/makeControl.R0000644000176200001440000000210013346465003015545 0ustar liggesusers################################################################################ ### Convenient construction of a list of control arguments for "hhh4" models ### ### Copyright (C) 2014-2015 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ makeControl <- function (f = list(~1), S = list(0, 0, 1), period = 52, offset = 1, ...) { ## set model components control <- mapply(function (f, S, period, offset) { f <- addSeason2formula(f = f, S = S, period = period) list(f = f, offset = offset) }, f, S, period, offset, SIMPLIFY = FALSE, USE.NAMES = FALSE) names(control) <- c("ar", "ne", "end") ## default: negative-binomial distribution with common overdispersion control$family <- "NegBin1" ## customization via ... arguments modifyList(control, list(...)) } surveillance/R/twinSIR_methods.R0000644000176200001440000002312112422377747016372 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Methods for "twinSIR" fits, specifically: ### - vcov: enabling the use of function confint to calculate Wald ### confidence intervals for the parameter estimates. ### - logLik: enables the use of function AIC ### - AIC, extractAIC: compute AIC or OSAIC depending on argument 'one.sided' ### - print, summary, print.summary, plot (intensityPlot), ... ### ### Copyright (C) 2009-2014 Sebastian Meyer, contributions by Michael Hoehle ### $Revision: 1088 $ ### $Date: 2014-10-24 09:29:43 +0200 (Fri, 24. Oct 2014) $ ################################################################################ ### don't need a specific coef-method (identical to stats:::coef.default) ## coef.twinSIR <- function (object, ...) ## { ## object$coefficients ## } # asymptotic variance-covariance matrix (inverse of fisher information matrix) vcov.twinSIR <- function (object, ...) { solve(object$fisherinfo) } logLik.twinSIR <- function (object, ...) { r <- object$loglik attr(r, "df") <- length(coef(object)) class(r) <- "logLik" r } # Note: pz is determined by scanning the names of coef(object), # thus the 'model' component is not necessary # See the Hughes and King (2003) paper for details .OSAICpenalty <- function (twinSIRobject, k = 2, nsim = 1e3) { theta <- coef(twinSIRobject) npar <- length(theta) pz <- length(grep("cox\\([^)]+\\)", names(theta), ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE, invert = FALSE)) px <- npar - pz # number of constrained (non-negative) parameters penalty <- if (px == 0L) { k * pz # default AIC penalty (with k = 2) } else if (px == 1L) { k * (pz + 0.5) } else if (px == 2L) { Sigma <- vcov(twinSIRobject) # parameter covariance matrix rho <- cov2cor(Sigma[1:2,1:2])[1,2] as <- acos(rho)/2/pi w <- c(as, 0.5, 0.5-as) k * sum(w * (pz + 0:2)) # = k * sum(w * (npar - px + 0:2)) } else { # px > 2 message("Computing OSAIC weights for ", px, " epidemic covariates based on ", nsim, " simulations ...") W <- vcov(twinSIRobject)[1:px,1:px] w.sim <- w.chibarsq.sim(p=px, W=W, N=nsim) #c.f. (12) in Hughes & King (2003), r_i=px, m=0:px, ki=npar #as npar=pz+px, we have that npar-px = pz, hence the sum is k * sum(w.sim * (pz + 0:px)) } attr(penalty, "exact") <- px <= 2 penalty } AIC.twinSIR <- function (object, ..., k = 2, one.sided = NULL, nsim = 1e3) { AIC.default <- match.call() AIC.default$one.sided <- NULL AIC.default$nsim <- NULL AIC.default[[1]] <- call(":::", as.name("stats"), as.name("AIC.default")) ## I don't see any easy way of using AIC.default while avoiding ":::". ## NextMethod() does not fit due to extra arguments one.sided & nsim. ## Could maybe unclass "object" and all objects in "..." and then use AIC() if (is.null(one.sided)) { one.sided <- object$method == "L-BFGS-B" } if (one.sided) { penalty <- .OSAICpenalty(object, k = k, nsim = nsim) edf <- length(coef(object)) AIC.default$k <- penalty/edf } res <- eval(AIC.default, parent.frame()) attr(res, "type") <- if (one.sided) "One-sided AIC" else "Standard AIC" attr(res, "exact") <- if (one.sided) attr(penalty, "exact") else TRUE res } extractAIC.twinSIR <- function (fit, scale = 0, k = 2, one.sided = NULL, nsim = 1e3, ...) { if (is.null(one.sided)) { one.sided <- fit$method == "L-BFGS-B" } loglik <- logLik(fit) edf <- attr(loglik, "df") penalty <- if (one.sided) { .OSAICpenalty(fit, k = k, nsim = nsim) # one-sided AIC } else { k * edf # default AIC } res <- c(edf = edf, AIC = -2 * c(loglik) + penalty) attr(res, "type") <- if (one.sided) "One-sided AIC" else "Standard AIC" attr(res, "exact") <- if (one.sided) attr(penalty, "exact") else TRUE res } print.twinSIR <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n") print.default(x$call) cat("\nCoefficients:\n") print.default(format(coef(x), digits=digits), print.gap = 2, quote = FALSE) cat("\nLog-likelihood: ", format(logLik(x), digits=digits), "\n", sep = "") if (!x$converged) { cat("\nWARNING: OPTIMIZATION DID NOT CONVERGE!\n") } cat("\n") invisible(x) } summary.twinSIR <- function (object, correlation = FALSE, symbolic.cor = FALSE, ...) { ans <- object[c("call", "converged", "counts", "intervals", "nEvents")] ans$cov <- vcov(object) est <- coef(object) se <- sqrt(diag(ans$cov)) zval <- est/se pval <- 2 * pnorm(abs(zval), lower.tail = FALSE) ans$coefficients <- cbind(est, se, zval, pval) dimnames(ans$coefficients) <- list(names(est), c("Estimate", "Std. Error", "z value", "Pr(>|z|)")) if (correlation) { ans$correlation <- cov2cor(ans$cov) ans$symbolic.cor <- symbolic.cor } ans$loglik <- logLik(object) aic <- extractAIC(object, ...) ans$aic <- as.vector(aic[2L]) # remove 'edf' element attributes(ans$aic) <- attributes(aic)[c("type", "exact")] class(ans) <- "summary.twinSIR" ans } print.summary.twinSIR <- function (x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), ...) { cat("\nCall:\n") print.default(x$call) cat("\nCoefficients:\n") coefs <- x$coefficients printCoefmat(coefs, digits = digits, signif.stars = signif.stars, na.print = "NA", ...) nEvents <- x$nEvents nh0 <- length(nEvents) if (nh0 < 2L) { cat("\nTotal number of infections: ", nEvents, "\n") } else { cat("\nBaseline intervals:\n") intervals <- character(nh0) for(i in seq_len(nh0)) { intervals[i] <- paste("(", paste(format(x$intervals[c(i,i+1L)],trim=TRUE), collapse=";"), "]", sep = "") } names(intervals) <- paste("logbaseline", seq_len(nh0), sep=".") print.default(rbind("Time interval" = intervals, "Number of events" = nEvents), quote = FALSE, print.gap = 2) } cat("\n", attr(x$aic, "type"), ": ", format(x$aic, digits=max(4, digits+1)), if (!attr(x$aic, "exact")) "\t(simulated penalty weights)" else "", sep = "") cat("\nLog-likelihood:", format(x$loglik, digits = digits)) cat("\nNumber of log-likelihood evaluations:", x$counts[1], "\n") correl <- x$correlation if (!is.null(correl)) { p <- NCOL(correl) if (p > 1L) { cat("\nCorrelation of Coefficients:\n") if (is.logical(symbolic.cor) && symbolic.cor) { correl <- symnum(correl, abbr.colnames = NULL) correlcodes <- attr(correl, "legend") attr(correl, "legend") <- NULL print(correl) cat("---\nCorr. codes: ", correlcodes, "\n", sep="") } else { correl <- format(round(correl, 2), nsmall = 2, digits = digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -p, drop = FALSE], quote = FALSE) } } } if (!x$converged) { cat("\nWARNING: OPTIMIZATION DID NOT CONVERGE!\n") } cat("\n") invisible(x) } ### Plot method for twinSIR (wrapper for intensityplot) plot.twinSIR <- function (x, which, ...) # defaults for 'which' are set below { cl <- match.call() cl[[1]] <- as.name("intensityplot") eval(cl, envir = parent.frame()) } formals(plot.twinSIR)$which <- formals(intensityplot.twinSIR)$which ###################################################################### # Extract the "residual process" (cf. Ogata, 1988), i.e. the # fitted cumulative intensity at the event times. # -> "generalized residuals similar to those discussed in Cox and Snell (1968)" ###################################################################### residuals.twinSIR <- function(object, ...) { #Extract event and stop-times eventTimes <- attr(object$model$survs,"eventTimes") sortedStop <- sort(unique(object$model$survs[,"stop"])) eventTimesIdx <- match(eventTimes, sortedStop) #Dimensions and zero vector (in case we need it) nTimes <- nrow(object$model$X) zerovec <- numeric(nTimes) # Extract the fitted model params px <- ncol(object$model$X) pz <- ncol(object$model$Z) theta <- coef(object) alpha <- theta[seq_len(px)] beta <- theta[px+seq_len(pz)] # Initialize e, h and thus lambda if (px > 0) { e <- as.vector(object$model$X %*% as.matrix(alpha)) } else { e <- zerovec } if (pz > 0) { h <- as.vector(exp(object$model$Z %*% as.matrix(beta))) } else { h <- zerovec } lambda <- (e + h) #Determine bloks BLOCK <- as.numeric(factor(object$model$survs$start)) # lambda_i integrals, i.e. integral of \lambda_i until t for each individual dt <- object$model$survs[,"stop"] - object$model$survs[,"start"] #Easier - no individual summations as they are all summed anyhow afterwards intlambda <- tapply(object$model$weights * lambda* dt, BLOCK, sum) #Compute cumulative intensities (Ogata (1988): "residual process") tau <- cumsum(intlambda)[eventTimesIdx] tau } surveillance/R/twinSIR_intensity.R0000644000176200001440000002772011775403713016757 0ustar liggesusers################################################################################ # Authors: Sebastian Meyer, with contributions by Michael Hoehle # Date: 02 June 2009, modified 25 Mar 2011, 27 Jun 2012 # # This file contains functions related to calculating and plotting intensities. ################################################################################ ################################################################################ # Calculate the two components of the intensity lambda(t|H_t) for each row # of the event history. # Be aware that the function assumes atRiskY == 1 in all rows! # # ARGS: # theta - parameter vector c(alpha,beta), where # beta also contains the baseline coefficients in the first place # X - covariate matrix related to alpha, i.e. the epidemic component # Z - covariate matrix related to beta, i.e. the Cox-like endemic component # # RETURNS: a numeric matrix with two columns e and h and nrow(X)==nrow(Z) rows ################################################################################ .eh <- function(theta, X, Z) { # Extracting params from theta dimX <- dim(X) nRows <- dimX[1] # = nrow(Z) px <- dimX[2] pz <- ncol(Z) alpha <- theta[seq_len(px)] beta <- theta[px + seq_len(pz)] # Calculate the epidemic component e(t|H_t) and the endemic component h(t) e <- if (px > 0L) drop(X %*% alpha) else numeric(nRows) h <- if (pz > 0L) drop(exp(Z %*% beta)) else numeric(nRows) # Return the two components of the infection intensity related to the # rows of the event history in a two column matrix eh <- cbind(e = e, h = h) return(eh) } ################################################################################ # Cumulative hazard function # # \Lambda(t) = \int_{timeRange[1]}^t \lambda(s) ds, # # where \lambda(s) = \sum_{i=1}^n \lambda_i(s) # # Be aware that the function assumes atRiskY == 1 for all rows of X/Z/survs !!! # # ARGS: # t - scalar time point until we want to integrate, must be non-negative # theta - parameter vector c(alpha,beta), where # beta also contains the baseline coefficients in the first place # X - covariate matrix related to alpha, i.e. the epidemic component # Z - covariate matrix related to beta, i.e. the Cox-like endemic component # survs - data.frame with columns id, start, stop, event; "timeRange" attribute # weights - vector of length nrow(X) indicating the number of individuals # with the same covariates. weights are allowed to change over time. # Note: it is assumed that none of the individuals covered by # "weights" can have an actual event, if so they need to have their # own row # # RETURNS: value of the cumulative hazard function at time t ################################################################################ Lambda <- function(t, theta, X, Z, survs, weights) { timeRange <- attr(survs, "timeRange") eh <- if (!isScalar(t) || t < timeRange[1L]) { stop("invalid argument 't': must be a scalar >= ", timeRange[1L], " (beginning of observation period)") } else if (t == timeRange[1L]) { return(0) } else if (t < timeRange[2L]) { # We have to extract the relevant intervals sortedStop <- sort(unique(survs$stop)) # Find first stop time beyond t idx <- match(TRUE, sortedStop >= t) firstBeyondt <- sortedStop[idx] includeSurvsRow <- survs$stop <= firstBeyondt # If t between start and stop of an interval we need to chop... if (firstBeyondt != t) { survs$stop[survs$stop == firstBeyondt] <- t } # Extract relevant parts survs <- survs[includeSurvsRow,] weights <- weights[includeSurvsRow] .eh(theta, X[includeSurvsRow,], Z[includeSurvsRow,]) } else { # if t >= attr(survs, "timeRange")[2], we take all rows .eh(theta, X, Z) } lambda <- rowSums(eh) dt <- survs$stop - survs$start intlambda <- sum(weights * lambda * dt) # no individual sums as in loglik return(intlambda) } ################################################################################ # Function to plot the path of the infection intensity or the proportions of # the endemic or epidemic component, either on an individual basis or related # to the total intensity at each event (=infection) time. # The function works with objects of class "simEpidata" # as well as with objects of class "twinSIR". ################################################################################ # 'model' is the result of getModel(x) # if x is of class "twinSIR": theta = (alpha, beta) = (alpha, (h0coefs, betarest)) # if x is of class "simEpidata": theta = (alpha, 1, betarest) # per default, the function uses the fitted or true parameters, respectively intensityplot_twinSIR <- function(model, which = c("epidemic proportion", "endemic proportion", "total intensity"), aggregate = TRUE, theta = NULL, plot = TRUE, add = FALSE, rug.opts = list(), ...) { which <- match.arg(which) ## model components survs <- model$survs start <- attr(survs, "timeRange")[1L] end <- attr(survs, "timeRange")[2L] timeIntervals <- unique(survs[c("start", "stop")]) timepoints <- unique(c(timeIntervals$stop, end)) # need 'end' here, because model does only contain rows with atRiskY == 1, # otherwise would terminate in advance if all individuals have been infected nTimes <- length(timepoints) idlevels <- levels(survs$id) ## helper function for use with by() intensity <- function(iddata, what) { # 'iddata' will be a subset of survs, 'what' will be "wlambda" or "we" y <- numeric(nTimes) # zeroes y[match(iddata$stop, timepoints)] <- iddata[[what]] y } ## Calculate epidemic (e) and endemic (h) component in each row of the model eh <- do.call(".eh", args = c(list(theta = theta), model[c("X", "Z")])) ## Calculate individual _total intensity_ paths lambda <- rowSums(eh) survs$wlambda <- as.vector(model$weights * lambda) ## put individual intensity paths into a matrix [nTimes x n] wlambdaID <- by(data = survs, INDICES = survs["id"], FUN = intensity, what = "wlambda", simplify = FALSE) # initially infectious individuals (without re-infection) don't appear in # survs, since they are never atRiskY => wlambdaID[[i]] is NULL for such an # individual i but should be a 0-vector of length nTimes initiallyInfected <- names(which(sapply(wlambdaID, is.null))) #if (length(initiallyInfected) > 0L) # not necessary wlambdaID[initiallyInfected] <- rep(list(numeric(nTimes)), length(initiallyInfected)) wlambdaIDmatrix <- as.matrix(as.data.frame(c(wlambdaID), optional = TRUE)) ## alternative way but slower: ## wlambdaIDmatrix <- matrix(0, nrow = nTimes, ncol = length(idlevels), ## dimnames = list(NULL, idlevels)) ## for (ID in idlevels) { ## iddata <- survs[survs$id == ID,] ## wlambdaIDmatrix[match(iddata$stop, timepoints), ID] <- iddata$wlambda ## } if (which != "total intensity") { ## Calculate individual _epidemic intensity_ paths survs$we <- { px <- ncol(model$X) if (px == 0L) { stop("nothing to do, model does not contain both components") } as.vector(model$weights * eh[,1]) } ## put individual epidemic intensity paths into a matrix [nTimes x n] weID <- by(data = survs, INDICES = list(id = survs$id), FUN = intensity, what = "we", simplify = FALSE) # we have to replace NULL entries by numeric(nTimes) (cf. wlambdaID) weID[initiallyInfected] <- rep(list(numeric(nTimes)), length(initiallyInfected)) weIDmatrix <- as.matrix(as.data.frame(c(weID), optional = TRUE)) ## alternative code which is slower: ## weIDmatrix <- matrix(0, nrow = nTimes, ncol = length(idlevels), ## dimnames = list(NULL, idlevels)) ## for (ID in idlevels) { ## iddata <- survs[survs$id == ID,] ## weIDmatrix[match(iddata$stop, timepoints), ID] <- iddata$we ## } } ## Generate matrix with data for 'matplot' ydata2plot <- if (which == "total intensity") { if (aggregate) { rowSums(wlambdaIDmatrix) } else { wlambdaIDmatrix } } else { # calculate epidemic proportion if (aggregate) { rowSums(weIDmatrix) / rowSums(wlambdaIDmatrix) } else { weIDmatrix / wlambdaIDmatrix } } if (which == "endemic proportion") { ydata2plot <- 1 - ydata2plot } ydata2plot <- as.matrix(ydata2plot) colnames(ydata2plot) <- if (aggregate) which else idlevels if (which != "total intensity") { # there may be NAs in data2plot where the total intensity equals 0 # => when calculating proportions we get 0 / 0 = NA # we redefine those values to 0. (0-intensity => 0-proportion) ydata2plot[is.na(ydata2plot)] <- 0 } # prepend time (x) column data2plot <- cbind(stop = timepoints, ydata2plot) # if the epidemic is SIRS or SIS (re-susceptibility), there may be time # blocks during the observation period, where no individual is susceptible: # Problem: those time blocks are not included in the model component, # which only contains rows with atRiskY == 1 # Solution: fill the missing time periods with 0 intensity (or proportion) innerStart <- timeIntervals[-1L, "start"] innerStop <- timeIntervals[-nrow(timeIntervals), "stop"] noSusceptiblesStopTimes <- innerStart[innerStop != innerStart] if (length(noSusceptiblesStopTimes) > 0L) { data2plot <- rbind(data2plot, cbind(noSusceptiblesStopTimes, matrix(0, nrow = length(noSusceptiblesStopTimes), ncol = ncol(ydata2plot)) ) ) data2plot <- data2plot[order(data2plot[,1L]),] } ## Plot and return data if (plot) { dotargs <- list(...) nms <- names(dotargs) if(! "xlab" %in% nms) dotargs$xlab <- "time" if(! "ylab" %in% nms) dotargs$ylab <- which if(! "lty" %in% nms) dotargs$lty <- 1 do.call("matplot", args = c(list(x = c(start, data2plot[,1L]), y = rbind(data2plot[1L, -1L, drop = FALSE], data2plot[ , -1L, drop = FALSE]), type = "S", add = add), dotargs)) if (is.list(rug.opts)) { if (is.null(rug.opts$ticksize)) rug.opts$ticksize <- 0.02 if (is.null(rug.opts$quiet)) rug.opts$quiet <- TRUE do.call("rug", args = c(list(x = attr(survs, "eventTimes")), rug.opts)) } invisible(data2plot) } else { data2plot } } ### intensityplot-methods for objects of classes "twinSIR" and "simEpidata" intensityplot.twinSIR <- function () { cl <- match.call() cl[[1]] <- as.name("intensityplot_twinSIR") names(cl)[names(cl) == "x"] <- "model" cl$model <- quote(getModel(x)) if (is.null(theta)) { cl$theta <- quote(coef(x)) } eval(cl) } intensityplot.simEpidata <- function () { cl <- match.call() cl[[1]] <- as.name("intensityplot_twinSIR") names(cl)[names(cl) == "x"] <- "model" cl$model <- quote(getModel(x)) if (is.null(theta)) { config <- attr(x, "config") cl$theta <- quote(c(config$alpha, 1, config$beta)) # 1 is for true h0 } message("Note: the (true) baseline hazard is only evaluated", " at the beginning of the time intervals") eval(cl) } formals(intensityplot.twinSIR) <- formals(intensityplot.simEpidata) <- c(alist(x=), formals(intensityplot_twinSIR)[-1]) surveillance/R/clapply.R0000644000176200001440000000126513117527513014747 0ustar liggesusers################################################################################ ### Conditional lapply ### ### Copyright (C) 2012,2017 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ ### clapply uses lapply if X is a list and otherwise applies FUN directly to X. ### The result is always a list (of length 1 in the latter case). clapply <- function (X, FUN, ...) { if (is.list(X)) lapply(X, FUN, ...) else list(FUN(X, ...)) } surveillance/R/formatPval.R0000644000176200001440000000141613117532200015401 0ustar liggesusers################################################################################ ### Yet another P-value formatter, using R's format.pval() ### ### Copyright (C) 2013,2015,2017 Sebastian Meyer ### ### This file is part of the R package "surveillance", ### free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ################################################################################ formatPval <- function (pv, eps = 1e-4, scientific = FALSE, ...) { format1 <- function (p) format.pval(p, digits = if (p < 10*eps) 1 else 2, eps = eps, nsmall = 2, scientific = scientific, ...) vapply(X = pv, FUN = format1, FUN.VALUE = "", USE.NAMES = TRUE) } surveillance/R/stcd.R0000644000176200001440000000411114013521730014220 0ustar liggesusers###################################################################### # Shiryaev-Roberts based spatio-temporal cluster detection based # on the work in Assuncao & Correa (2009). The implementation # is based on C++ code was originally written by Marcos Oliveira Prates, UFMG, # Brazil and provided by Thais Correa, UFMG, Brazil during her research # stay in Munich. This stay was financially supported by the Munich # Center of Health Sciences. # # # Parameters: # x - vector containing spatial x coordinate of the events # y - vector containing spatial y coordinate of the events # t - vector containing the time points of the events # radius - is the radius of the cluster # epsilon - is the relative change of event-intensity within the cluster # to detect # areaA - area of the observation region A (single number) # areaAcapBk - area of A \ B(s_k,\rho) for all k=1,\ldots,n (vector) # vector of areas A\B(s_k,\rho) for k=1,\ldots,n # threshold - threshold limit for the alarm and should be equal # to the desired ARL # cusum -- boolean if TRUE then CUSUM otherwise Shiryaev-Roberts ###################################################################### stcd <- function(x, y,t,radius,epsilon,areaA, areaAcapBk, threshold,cusum=FALSE) { #check that the vectors x,y,t are of the same length. n <- length(x) if ((length(y) != n) | (length(t) != n)) { stop("Vectors x,y,t not of same size.") } if (!all(diff(order(t)) == 1)) { stop("The vector of time points needs to be ascending in time. No ties allowed.") } res <- .C(C_SRspacetime, x=as.double(x), y=as.double(y), t=as.double(t), n=as.integer(n), radius=as.double(radius), epsilon=as.double(epsilon), areaA=as.double(areaA),areaAcapBk=as.double(areaAcapBk),cusum=as.integer(cusum), threshold=as.double(threshold),R=as.double(numeric(n)),idxFA=as.integer(-1),idxCC=as.integer(-1)) #Indexing differences between C and R res$idxFA <- res$idxFA+1 res$idxCC <- res$idxCC+1 #Missing: compute which indices are part of the cluster. #--> Thais R-code return(list(R=res$R,idxFA=res$idxFA,idxCC=res$idxCC)) } surveillance/R/hhh4_simulate_scores.R0000644000176200001440000000507412575642536017433 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Compute scores based on simulations from fitted hhh4() models ### ### Copyright (C) 2013-2015 Sebastian Meyer ### $Revision: 1476 $ ### $Date: 2015-09-15 00:08:30 +0200 (Tue, 15. Sep 2015) $ ################################################################################ ## logarithmic score ## CAVE: will be infinite if none of "sims" yields "x" logs_sims <- function (sims, x) .logs(px = mean(sims == x)) ## Dawid-Sebastiani score ## CAVE: undefined if all simulations have the same value (i.e., no variance) dss_sims <- function (sims, x) { if ((varsims <- var(sims)) == 0) { # FIXME: What to do in that case? warning("DSS undefined for zero variance of prediction: all(sims==", sims[1L], "), x=", x) NA_real_ # if (x==sims[1L]) -Inf else Inf } else { .dss(meanP = mean(sims), varP = varsims, x = x) } } ## ranked probability score rps_sims <- function (sims, x) { .rps(P = ecdf(sims), x = x, kmax = ceiling(mean(sims) + 40*sd(sims))) ## Two alternatives via the expectation-based definition of the RPS: ## method = "means": equivalent to ecdf approach but slower ## method = "means.MC": faster than ecdf but with approximation error ## simdiffs <- switch(method, ## "means.MC" = diff(sims), ## "means" = outer(sims, sims, "-")) ## mean(abs(sims - x)) - mean(abs(simdiffs)) / 2 } ## scores-method for simulations from a hhh4 fit scores.hhh4sims <- function (x, which = "rps", units = NULL, ..., drop = TRUE) { observed <- observed(attr(x, "stsObserved")) scoreFUNs <- mget(paste0(which, "_sims"), envir = getNamespace("surveillance"), inherits = FALSE) names(scoreFUNs) <- which if (!is.null(units)) { observed <- observed[, units, drop = FALSE] x <- x[, units, , drop = FALSE] } counts <- array(c(observed, x), dim = dim(x) + c(0L, 0L, 1L)) res <- lapply(X = scoreFUNs, FUN = function (scoreFUN) apply(counts, 1:2, function (y) scoreFUN(y[-1L], y[1L]))) res <- simplify2array(res, higher = TRUE) if (drop) drop(res) else res } ## scores-method for simulations from a bunch of hhh4 fits scores.hhh4simslist <- function (x, ...) lapply(X = x, FUN = scores.hhh4sims, ...) surveillance/R/untie.R0000644000176200001440000002065513266060655014437 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Spatial and temporal tie-breaking of events ### ### Copyright (C) 2012-2014,2018 Sebastian Meyer ### $Revision: 2120 $ ### $Date: 2018-04-19 11:30:21 +0200 (Thu, 19. Apr 2018) $ ################################################################################ ## epidataCS-method ## makes use of untie.default (in time) and untie.matrix (in space) untie.epidataCS <- function (x, amount = list(t=NULL, s=NULL), minsep = list(t=0, s=0), direction = "left", keep.sources = FALSE, ..., verbose = FALSE) { stopifnot(is.list(amount), !is.null(names(amount)), is.list(minsep), !is.null(names(minsep))) minsep <- modifyList(list(t=0, s=0), minsep) do.spatial <- pmatch("s", names(amount), nomatch=0L) > 0L do.temporal <- pmatch("t", names(amount), nomatch=0L) > 0L if (!do.spatial && !do.temporal) { stop("no amounts specified, nothing to do") } ## Generate new events data frame events <- marks.epidataCS(x, coords=FALSE) newcoords <- if (do.spatial) { # untie spatial coordinates untie.matrix(coordinates(x$events), amount$s, minsep$s, constraint=x$W, ...) } else coordinates(x$events) if (do.temporal) { # untie event times ## by default, we shift event times (non-symmetrically) to the left such ## that the shifted versions potentially stay in the same BLOCK of ## endemic covariates (the CIF is left-continuous). events$time <- untie.default(events$time, amount$t, minsep$t, direction=direction, sort=TRUE, ...) ## FIXME: Does sort=TRUE always make sense? ## maybe only sort in untie.default if amount < minsep? } ## Generate epidataCS object with new events coordinates(events) <- newcoords # -> SpatialPointsDataFrame #proj4string(events) <- proj4string(x$W) # "proj4string<-" might change the # string e.g. add +towgs84=0,0,0,0,0,0,0 events@proj4string <- x$W@proj4string npoly <- attr(x$events$.influenceRegion, "nCircle2Poly") clipper <- attr(x$events$.influenceRegion, "clipper") if (is.null(clipper)) # epidataCS < 1.8-1 clipper <- "polyclip" res <- as.epidataCS(events=events, stgrid=x$stgrid[,-1L], W=x$W, qmatrix=x$qmatrix, nCircle2Poly=npoly, clipper=clipper, verbose=verbose) if (keep.sources) { res$events$.sources <- x$events$.sources } if (do.temporal) { prehistevents <- function (x) row.names(x$events@data)[x$events$time <= x$stgrid$start[1L]] if (!setequal(prehistevents(x), prehistevents(res))) warning("temporal jittering has changed the set of prehistory events") } ## Done res } ## untie event times by uniform jittering untie.default <- function (x, amount = NULL, minsep = 0, direction = c("symmetric", "left", "right"), sort = NULL, giveup = 1000, ...) { stopifnot(is.numeric(x), is.vector(x)) distx <- dist(x) isPosDist <- distx > 0 if (all(isPosDist)) return(x) # no ties direction <- match.arg(direction) if (is.null(sort)) # sort if x was sorted sort <- identical(order(x, decreasing=FALSE), seq_along(x)) if (any(isPosDist)) { minsepx <- min(distx[isPosDist]) # smallest positive distance amount.bound <- if (direction=="symmetric") minsepx/2 else minsepx if (is.null(amount)) { amount <- amount.bound } else if (sort && abs(amount) > amount.bound) { warning("'amount' should not be greater than ", if (direction=="symmetric") "half of ", "the minimum separation (", format(amount.bound), ")") } } else if (is.null(amount)) { stop("default 'amount' does not work with completely tied 'x'") } shiftFUN <- switch(direction, symmetric = function (x) x + runif(length(x), -amount, amount), right = function (x) x + runif(length(x), 0, amount), left = function (x) x - runif(length(x), 0, amount)) res <- .untie(x, shiftFUN, minsep) if (sort) base::sort(res) else res } ## untie spatial coordinates by moving them by vectors drawn uniformly from a ## disc of radius 'amount', optionally respecting a region (constraint) ## inside which the jittered points should be located (of course, the initial ## points must also obey this constraint), and a minimum separation 'minsep' untie.matrix <- function (x, amount = NULL, minsep = 0, constraint = NULL, giveup = 1000, ...) { stopifnot(is.numeric(x), is.matrix(x)) dimx <- dim(x) if (dimx[2L] <= 1L) { untie.default(c(x), amount, minsep, giveup=giveup) } else if (dimx[2L] > 2L) { stop("spatial tie-breaking is only implemented for 2D coordinates") } if (is.null(amount)) { distx <- dist(x) isPosDist <- distx > 0 ## take half of smallest distance, which guarantees that new points ## will be closer to previously tied points than to others if (any(isPosDist)) amount <- min(distx[isPosDist]) / 2 else stop("default 'amount' does not work with a single location only") } if (!is.null(constraint)) { stopifnot(inherits(constraint, "SpatialPolygons")) proj4string(constraint) <- CRS(NA_character_) outOfConstraint <- function (x) { is.na(over(SpatialPoints(x), constraint)) } if (any(outOfConstraint(x))) stop("some points of the matrix 'x' don't respect the 'constraint'") } else outOfConstraint <- NULL shiftFUN <- function (x) x + runifdisc(nrow(x), amount) .untie(x, shiftFUN, minsep, outOfConstraint, giveup=giveup) } ## workhorse for both vector and matrix 'x' .untie <- function (x, shiftFUN, minsep = 0, outOfConstraintFUN = NULL, giveup = 1000) { x <- res <- as.matrix(x) move <- rep.int(TRUE, nrow(x)) # initially move _all_ points ntry <- 0L updateMoveExpr <- .updateMoveExpr(minsep>0, is.function(outOfConstraintFUN)) while((nleft <- sum(move)) > 0L && ntry < giveup) { res[move,] <- shiftFUN(x[move,,drop=FALSE]) ## determine for the moved points if they are too close to another point ## or fall out of constraint -> try again eval(updateMoveExpr) ntry <- ntry + 1L } if (ntry >= giveup) warning("could not obey 'constraint' and/or 'minsep' for some points") if (ncol(res) == 1) res[,1] else res } ## check if points with index 'idx' are too close (< minsep) to any other points ## (this function could probably be made more efficient, especially for ## length(idx) << nrow(pts), since we actually don't need all pairwise distances ## calculated by dist() but only those related to the idx-points) .tooClose <- function (pts, idx, minsep) { distpts <- as.matrix(dist(pts)) diag(distpts) <- Inf rowSums(distpts[idx,,drop=FALSE] < minsep) > 0 } ## generate expression which updates logical vector 'move' (points left to move) .updateMoveExpr <- function(doClose = FALSE, doConstraint = FALSE) { if (!doClose && !doConstraint) return(expression(move[move] <- FALSE)) exprClose <- expression(movedTooClose <- .tooClose(res, move, minsep)) exprConstraint <- if (doClose) { # only need to check those not too close expression( movedOutOfConstraint <- rep.int(FALSE, nleft), if (any(!movedTooClose)) movedOutOfConstraint[!movedTooClose] <- outOfConstraintFUN(res[move,,drop=FALSE][!movedTooClose,,drop=FALSE]) ) } else { expression( movedOutOfConstraint <- outOfConstraintFUN(res[move,,drop=FALSE]) ) } c(if (doClose) exprClose, if (doConstraint) exprConstraint, switch(doClose + 2*doConstraint, expression(move[move] <- movedTooClose), expression(move[move] <- movedOutOfConstraint), expression(move[move] <- movedTooClose | movedOutOfConstraint) ) ) } surveillance/R/twinstim_iafplot.R0000644000176200001440000003105012520414147016665 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Plot estimated interaction kernel (siaf/tiaf) as a function of distance ### ### Copyright (C) 2012-2015 Sebastian Meyer ### $Revision: 1325 $ ### $Date: 2015-04-30 13:56:23 +0200 (Thu, 30. Apr 2015) $ ################################################################################ iafplot <- function (object, which = c("siaf", "tiaf"), types = NULL, scaled = c("intercept", "standardized", "no"), truncated = FALSE, log = "", conf.type = if (length(pars) > 1) "MC" else "parbounds", conf.level = 0.95, conf.B = 999, xgrid = 101, col.estimate = rainbow(length(types)), col.conf = col.estimate, alpha.B = 0.15, lwd = c(3,1), lty = c(1,2), verticals = FALSE, do.points = FALSE, add = FALSE, xlim = NULL, ylim = NULL, xlab = NULL, ylab = NULL, legend = !add && (length(types) > 1), ...) { if (isTRUE(verticals)) verticals <- list() if (isTRUE(do.points)) do.points <- list() if (add) log <- paste0("", if (par("xlog")) "x", if (par("ylog")) "y") scaled <- if (is.logical(scaled)) { # surveillance < 1.9-0 if (scaled) "intercept" else "no" } else { match.arg(scaled) } coefs <- coef(object) epiloglink <- .epilink(object) == "log" typeNames <- rownames(object$qmatrix) nTypes <- length(typeNames) ## interaction function which <- match.arg(which) IAFobj <- object$formula[[which]] if (is.null(IAFobj)) stop("the model has no epidemic component") IAF <- IAFobj[[if (which=="siaf") "f" else "g"]] if (which == "siaf") { # needs to be a function of distance IAF <- as.function( c(alist(x=, ...=), quote(f(cbind(x, 0), ...))), envir = list2env(list(f = IAF), parent = environment(IAF)) ) } isStepFun <- !is.null(knots <- attr(IAFobj, "knots")) && !is.null(maxRange <- attr(IAFobj, "maxRange")) ## interaction range if (isScalar(truncated)) { eps <- truncated truncated <- TRUE } else { eps <- attr(IAFobj, "eps") } if (is.null(eps)) { # cannot take eps into account (pre 1.8-0 behaviour) eps <- NA_real_ } else if (length(eps) > 1L && truncated) { message("no truncation due to heterogeneous interaction ranges, see \"rug\"") } epsIsFixed <- length(eps) == 1L && is.finite(eps) ## scaled interaction function if (scaled == "intercept") { idxgamma0 <- match("e.(Intercept)", names(coefs), nomatch = 0L) if (idxgamma0 == 0L) { message("no scaling due to missing epidemic intercept") scaled <- "no" } } else { # we do not use gamma0 -> 0-length selection idxgamma0 <- 0L } SCALE <- switch(scaled, "intercept" = if (epiloglink) quote(exp(gamma0)) else quote(gamma0), "standardized" = quote(1/IAF(0, iafpars, types)), "no" = 1 ) FUN <- function (x, iafpars, types, gamma0) { scale <- eval(SCALE) vals <- scale * IAF(x, iafpars, types) } ## truncate at eps if (truncated && epsIsFixed) { body(FUN) <- as.call(c(as.list(body(FUN)), expression( vals[x > eps] <- 0, vals ))) } ## if (loglog) { ## body(FUN)[[length(body(FUN))]] <- ## call("log", body(FUN)[[length(body(FUN))]]) ## } ## extract parameters gamma0 <- coefs[idxgamma0] idxiafpars <- grep(paste0("^e\\.",which), names(coefs)) iafpars <- coefs[idxiafpars] ## concatenate parameters idxpars <- c(idxgamma0, idxiafpars) pars <- c(gamma0, iafpars) ## type of confidence band force(conf.type) # per default depends on 'pars' if (length(pars) == 0 || is.null(conf.type) || is.na(conf.type)) { conf.type <- "none" } conf.type <- match.arg(conf.type, choices = c("parbounds", "bootstrap", "MC", "none")) if (conf.type == "bootstrap") conf.type <- "MC" # "bootstrap" was used <1.8 if (conf.type == "parbounds" && length(pars) > 1) { warning("'conf.type=\"parbounds\"' is only valid for a single parameter") } ## grid of x-values (t or ||s||) on which FUN will be evaluated if (is.null(xlim)) { xmax <- if (add) { xmax <- par("usr")[2] / (if (par("xaxs")=="r") 1.04 else 1) if (par("xlog")) 10^xmax else xmax } else { if (epsIsFixed) { eps } else if (isStepFun && maxRange < Inf) { maxRange } else if (which == "siaf") { sqrt(sum((object$bbox[,"max"] - object$bbox[,"min"])^2)) } else { diff(object$timeRange) } } xlim <- c(0.5*grepl("x", log), xmax) } xgrid <- if (isStepFun) { c(if (grepl("x", log)) { if (xlim[1L] < knots[1L]) xlim[1L] else NULL } else 0, knots[knots 1L && truncated) rug(eps) } ## store evaluated interaction function in a matrix (will be returned) typeNamesSel <- typeNames[types] res <- matrix(NA_real_, length(xgrid), 1L+length(types), dimnames = list(NULL, c("x", typeNamesSel))) res[,1L] <- xgrid for (i in seq_along(types)) { ## select parameters on which to evaluate iaf parSample <- switch(conf.type, parbounds = { cis <- confint(object, idxpars, level=conf.level) ## all combinations of parameter bounds do.call("expand.grid", as.data.frame(t(cis))) }, MC = { # Monte-Carlo confidence interval ## sample parameters from their asymptotic multivariate normal dist. rbind(pars, mvrnorm(conf.B, mu=pars, Sigma=vcov(object)[idxpars,idxpars,drop=FALSE]), deparse.level=0) }) ## add confidence limits if (!is.null(parSample)) { fvalsSample <- apply(parSample, 1, if (scaled == "intercept") { function (pars) FUN(xgrid, pars[-1L], types[i], pars[1L]) } else { function (pars) FUN(xgrid, pars, types[i]) }) if (length(xgrid) == 1L) # e.g., single-step function fvalsSample <- t(fvalsSample) # convert to matrix form lowerupper <- if (conf.type == "parbounds") { t(apply(fvalsSample, 1, range)) } else { # Monte-Carlo sample of parameter values if (is.na(conf.level)) { stopifnot(alpha.B >= 0, alpha.B <= 1) .col <- col2rgb(col.conf[i], alpha=TRUE)[,1] .col["alpha"] <- round(alpha.B*.col["alpha"]) .col <- do.call("rgb", args=c(as.list(.col), maxColorValue = 255)) matlines(x=xgrid, y=fvalsSample, type="l", lty=lty[2], col=.col, lwd=lwd[2]) # returns NULL } else { t(apply(fvalsSample, 1, quantile, probs=c(0,conf.level) + (1-conf.level)/2)) } } if (!is.null(lowerupper)) { attr(res, if(length(types)==1) "CI" else paste0("CI.",typeNamesSel[i])) <- lowerupper if (isStepFun) { segments(rep.int(xgrid,2L), lowerupper, rep.int(c(xgrid[-1L], min(maxRange, xlim[2L])), 2L), lowerupper, lty=lty[2], col=col.conf[i], lwd=lwd[2]) ##points(rep.int(xgrid,2L), lowerupper, pch=16, col=col.conf[i]) } else { matlines(x=xgrid, y=lowerupper, type="l", lty=lty[2], col=col.conf[i], lwd=lwd[2]) } } } ## add point estimate res[,1L+i] <- FUN(xgrid, iafpars, types[i], gamma0) if (isStepFun) { segments(xgrid, res[,1L+i], c(xgrid[-1L], min(maxRange, xlim[2L])), res[,1L+i], lty = lty[1], col = col.estimate[i], lwd = lwd[1]) ## add points if (is.list(do.points)) { pointStyle <- modifyList(list(pch=16, col=col.estimate[i]), do.points) do.call("points", c(list(xgrid, res[,1L+i]), pointStyle)) } ## add vertical connections: if (is.list(verticals)) { verticalStyle <- modifyList( list(lty = 3, col = col.estimate[i], lwd = lwd[1L]), verticals) do.call("segments", c( list(xgrid[-1L], res[-length(xgrid),1L+i], xgrid[-1L], res[-1L,1L+i]), verticalStyle)) } if (maxRange <= xlim[2L]) { ## add horizontal=0 afterwards segments(maxRange, 0, xlim[2L], 0, lty = lty[1], col = col.estimate[i], lwd = lwd[1]) if (is.list(verticals)) do.call("segments", c( list(maxRange, res[length(xgrid),1L+i], maxRange, 0), verticalStyle)) if (is.list(do.points)) do.call("points", c(list(maxRange, 0), pointStyle)) } } else { lines(x = xgrid, y = res[,1L+i], lty = lty[1], col = col.estimate[i], lwd = lwd[1]) } } ## add legend if (isTRUE(legend) || is.list(legend)) { default.legend <- list(x = "topright", legend = typeNamesSel, col = col.estimate, lty = lty[1], lwd = lwd[1], bty = "n", cex = 0.9, title="type") legend.args <- if (is.list(legend)) { modifyList(default.legend, legend) } else default.legend do.call("legend", legend.args) } ## Invisibly return interaction function evaluated on xgrid (by type) invisible(res) } surveillance/R/pit.R0000644000176200001440000001173213446150274014100 0ustar liggesusers################################################################################ ### Part of the surveillance package, http://surveillance.r-forge.r-project.org ### Free software under the terms of the GNU General Public License, version 2, ### a copy of which is available at http://www.r-project.org/Licenses/. ### ### Non-randomized version of the PIT histogram as discussed in: ### Predictive model assessment for count data ### Czado, C., Gneiting, T. & Held, L. (2009) ### Biometrics 65:1254-1261 ### ### Copyright (C) 2010-2012 Michaela Paul, 2013-2015,2017,2019 Sebastian Meyer ### $Revision: 2418 $ ### $Date: 2019-03-25 13:59:40 +0100 (Mon, 25. Mar 2019) $ ################################################################################ ## x - observed count data ## pdistr - predictive CDF or a list of such predictive CDF's, ## one for each data point x. If evaluated at x=-1 it must return 0 ## J - number of bins ## ... - additional arguments for pdistr(), recycled to the length of x. ## Ignored if pdistr is a list. ## plot - a list of arguments for plot.histogram (otherwise no plot is produced) pit.default <- function (x, pdistr, J=10, relative=TRUE, ..., plot = list()) { PxPxm1 <- pitPxPxm1(x, pdistr, ...) Px <- PxPxm1[1L,] Pxm1 <- PxPxm1[2L,] if (any(Px == Pxm1)) { ## This means the predictive probability of an observed x is zero. ## Our predictive model is really bad if that happens. warning("predictive distribution has 0 probability for observed 'x'") } breaks <- (0:J)/J ## calculate \bar{F}(u) for scalar u Fbar1 <- function (u, Px, Pxm1) { F_u <- punif(u, Pxm1, Px) # also works for Pxm1 == Px => F_u = u >= Pxm1 mean(F_u) } Fbar_seq <- vapply(X = breaks, FUN = Fbar1, FUN.VALUE = 0, Px = Px, Pxm1 = Pxm1, USE.NAMES = FALSE) scale <- if (relative) J else 1 f_j <- scale * diff.default(Fbar_seq) res <- list(breaks = breaks, counts = f_j, density = f_j, mids = breaks[-(J+1)] + 1/J/2, xname = "PIT", equidist = TRUE) class(res) <- c("pit", "histogram") if (is.list(plot)) do.call("plot", c(list(x = res), plot)) else res } pitPxPxm1 <- function (x, pdistr, ...) { if (is.list(pdistr)) { # list of functions, not necessarily vectorized stopifnot(length(pdistr) == length(x)) vapply(X = seq_along(x), FUN = function (i) { stopifnot(isTRUE( all.equal.numeric(0, pdistr[[i]](-1), check.attributes = FALSE) )) c(pdistr[[i]](x[i]), pdistr[[i]](x[i]-1)) }, FUN.VALUE = c(0,0), USE.NAMES = FALSE) # 2 x length(x) } else { # pdistr is (the name of) a function pdistr <- match.fun(pdistr) if (nargs() == 2L) { # no dots, same pdistr for every data point # and assumed to be vectorized stopifnot(isTRUE(all.equal.numeric(0, pdistr(-1)))) rbind(pdistr(x), pdistr(x-1), deparse.level = 0) } else { # ... arguments for pdistr, recycled to the length of x # pdistr is called by mapply, so no need to be vectorized stopifnot(isTRUE(all.equal.numeric( 0, do.call("pdistr", c(list(-1), lapply(list(...), "[", 1L))), check.attributes = FALSE))) rbind(mapply(pdistr, x, ..., SIMPLIFY = TRUE, USE.NAMES = FALSE), mapply(pdistr, x-1, ..., SIMPLIFY = TRUE, USE.NAMES = FALSE), deparse.level = 0) } } } ## plot the PIT histogram plot.pit <- function (x, main = "", ylab = NULL, ...) { relative <- isTRUE(all.equal(1, sum(x$density))) if (is.null(ylab)) ylab <- if (relative) "Relative Frequency" else "Density" ## call plot.histogram NextMethod("plot", main = main, ylab = ylab, ...) ## add reference line abline(h = if (relative) 1/length(x$mids) else 1, lty = 2, col = "grey") invisible(x) } ## a convenient wrapper for Poisson and NegBin predictions .pit <- function (x, mu, size = NULL, ...) { if (is.null(size)) { pit.default(x = x, pdistr = "ppois", lambda = mu, ...) } else { pit.default(x = x, pdistr = "pnbinom", mu = mu, size = size, ...) } } ## pit-methods for oneStepAhead() predictions and "hhh4" fits ## (similar to the scores-methods) pit.oneStepAhead <- function (x, units = NULL, ...) { if (is.null(units)) { .pit(x = x$observed, mu = x$pred, size = psi2size.oneStepAhead(x), ...) } else { .pit(x = x$observed[, units, drop = FALSE], mu = x$pred[, units, drop = FALSE], size = psi2size.oneStepAhead(x)[, units, drop = FALSE], ...) } } pit.hhh4 <- function (x, subset = x$control$subset, units = seq_len(x$nUnit), ...) { .pit(x = x$stsObj@observed[subset, units, drop = FALSE], mu = x$fitted.values[match(subset, x$control$subset), units, drop = FALSE], size = psi2size.hhh4(x, subset, units), ...) } surveillance/NEWS.md0000644000176200001440000030107514030545326014054 0ustar liggesusers# surveillance 1.19.1 (2021-03-30) ## DOCUMENTATION - The project website at has been overhauled using [**pkgdown**](https://pkgdown.r-lib.org/). ## BUG FIXES - The `CRS` of `data(imdepi)` and `data(measlesWeserEms)` have been updated via `sp`'s `rebuild_CRS()` to avoid warnings when **rgdal** is loaded with new PROJ and GDAL libraries. - `simEpidataCS()` now internally resets the CRS (temporary), which avoids spurious warnings and also reduces its runtime by about 25%. - Fix encoding error in `vignette("twinstim")` for CRAN's non-UTF8 Linux test machine. - This version of **surveillance** (formally) requires the new [**spatstat**](https://CRAN.R-project.org/package=spatstat) umbrella package to avoid collisions of old **spatstat** and its new sub-packages (we only use [**spatstat.geom**](https://CRAN.R-project.org/package=spatstat.geom)). The **spatstat** dependence will be dropped in the future. - The `epoch<-` replacement method for `"sts"` objects now accepts a `"Date"` vector. The standard plots may give nicer x-axis annotation if indexed by dates. See the `xaxis.*` arguments of `stsplot_time()`. - `tidy.sts()` (and thus `autoplot.sts()`) failed for date-indexed `"sts"` objects with non-standard frequencies. [spotted by Junyi Lu] # surveillance 1.19.0 (2021-01-29) ## NEW FEATURES - The `nowcast()` function with `method="bayes.trunc.ddcp"` now adds support for negative binomial response distribution instead of Poisson. Furthermore, additional components of the design matrix for the discrete time survival model can be provided, which allows the inclusion of, e.g., day of the week effects. Finally, the order of the polynomial created by the change-points in the discrete time survival model can now be specified. For further details see the work of Guenther et al. (2020) about nowcasting the Covid-19 outbreak in Bavaria, Germany. - `animate.sts()` can position the `timeplot` on other sides of the map. ## MINOR CHANGES - The weighted sum in the `ne`ighbourhood component of `hhh4()` models is computed more efficiently. - `simEpidataCS()` (and thus `simulate.twinstim()`) uses a slightly more efficient location sampler for models with `siaf = siaf.constant()`. Simulation results will differ from previous package versions even if the same random `seed` is used. - The default `main` title for `stsplot_space()` now uses the ISO year-week format for weekly `"sts"` data. ## BUG FIXES - Bug fix in the `farringtonFlexible()`-function, which for the argument `thresholdMethod=="nbPlugin"` and `thresholdMethod=="muan"` unfortunately computed the limit as an `(1-alpha/2)` prediction interval instead of the documented `(1-alpha)` prediction interval. This affects four threshold values in Table 2 of `vignette("monitoringCounts")`. The default method `"delta"` worked as expected. - In `hhh4()` models without AR component, the matrix of fitted values could lack column names. - Experimental time-varying neighbourhood weights in `hhh4()` were indexed differently in model fitting and in the `simulate()` method (undocumented behaviour). Both now use the latter variant, where the mean at time *t* uses products of weights at time *t* and observed counts at time *t-1*. [reported by Johannes Bracher] - For weekly `sts` indexed via `start` and `freq=52`, `epoch(sts, as.Date=TRUE)` now interprets the `start` week according to ISO 8601. For example, `start = c(2020, 5)` corresponds to 2020-01-27, not 2020-02-03. This affects `as.xts.sts()` and the time plot in `animate.sts()`. - `stsplot_space()` automatically extends manual color breaks (`at`), if the intervals do not cover the data range. - `simEndemicEvents()` and thus `epitest(..., method="simulate")` are no longer slowed down by intermediate `CRS()` computations. ## PACKAGE INFRASTRUCTURE - Removed unused **rmapshaper** from "Suggests" and moved **xts** to "Enhances" (used only for `as.xts.sts`). - Switched testing framework from (nowadays heavy) **testthat** to [**tinytest**](https://CRAN.R-project.org/package=tinytest). Together with moving **ggplot2** to "Enhances" (used only for `autoplot.sts`) --- and only then --- this switch further reduces the total number of required packages for a complete check (i.e., installing with `dependencies = TRUE`) in a *factory-fresh* R environment from 119 to 94. - [**spatstat**](https://CRAN.R-project.org/package=spatstat) was split into several sub-packages, of which we only need to import [**spatstat.geom**](https://CRAN.R-project.org/package=spatstat.geom). This new package requires `R >= 3.5.0`, though. - **surveillance** now requires `R >= 3.6.0`. # surveillance 1.18.0 (2020-03-18) ## NEW FEATURES - New spatial interaction function for `twinstim()`: `siaf.exponential()` implements the exponential kernel *f(x) = exp(-x/σ)*, which is a useful alternative if the two-parameter power-law kernel is not identifiable. - The `plot`-type `"maps"` for `"hhh4"` fits, `plotHHH4_maps()`, now allows for map-specific color keys via `zmax = NA` (useful for `prop = TRUE`). ## BUG FIXES - The `nowcast()`-function now also works for `method="bayes.trunc.ddcp"` method when the number of breakpoints is greater than 1. - The `amplitudeShift` transformation for sine-cosine coefficient pairs in the `summary` of multivariate `"hhh4"` models was incorrect in the rare case that the model used unit-specific seasonal terms (`addSeason2formula` with `length(S) > 1`). ## DEPRECATED AND DEFUNCT - The original `algo.hhh()` implementation of the HHH model has been removed from the package. The function `hhh4()` provides an improved and much extended implementation since 2012. # surveillance 1.17.3 (2019-12-16) ## BUG FIXES - The `head()`-method for `"epidataCS"` objects did not work with a negative `n` argument. - Fix for `"matrix"` changes in R-devel. # surveillance 1.17.2 (2019-11-11) ## MINOR CHANGES - For multivariate time series, `sts()` now checks for mismatches in column names of supplied matrices (`observed`, `population`, `neighbourhood`, ...). This is to catch input where the units (columns) are ordered differently in different slots, which would flaw subsequent analyses. ## BUG FIXES - `simulate.twinSIR()` ignored the `atRiskY` indicator of the underlying `"epidata"`, so always assumed a completely susceptible population. Initially infectious individuals are now inherited. For the previous behaviour, adjust the supplied `data` via `data$atRiskY <- 1`. # surveillance 1.17.1 (2019-09-13) ## NEW FEATURES - New one-parameter power-law kernel `siaf.powerlaw1()` with fixed `sigma = 1`. Useful if `sigma` is difficult to estimate with `siaf.powerlaw()`. ## BUG FIXES - `pit()`'s default `ylab` was wrong (default are densities not relative frequencies). - `R0()` for `"twinstim"` fits with specified `newevents` now handles levels of epidemic factor variables automatically via the new `xlevels` attribute stored in the fitted model. - Some S3 methods for the `"sts"` class are now formally registered and identical to the established S4 methods. - Minor additions and fixes in the package documentation. ## DEPRECATED AND DEFUNCT - `hcl.colors()`, exported since 1.14.0, has been renamed `.hcl.colors()` and is now internal again, to avoid a name clash with R's own such function introduced in R 3.6.0. # surveillance 1.17.0 (2019-02-22) ## NEW FEATURES - `W_powerlaw(..., from0 = TRUE)` enables more parsimonious `hhh4` models in that the power-law weights are modified to include the autoregressive (0-distance) case (see `vignette("hhh4_spacetime")`). The unstructured distance weights `W_np()` gained `from0` support as well. - `sts()` creation can now handle `epoch` arguments of class `Date` directly. - The `ranef()`-method for `"hhh4"` fits gained a logical argument `intercept` to extract the unit-specific intercepts of the log-linear predictors instead of the default zero-mean deviations around the fixed intercepts. The corresponding `plot` method (`type="ri"`) gained an argument `exp`: if set to `TRUE` random effects are `exp`-transformed and thus show multiplicative effects. [based on feedback by Tim Pollington] ## MINOR CHANGES - `W_np()`'s argument `to0` has been renamed to `truncate`. The old name still works but is deprecated. - `plotHHH4_ri()` now uses `cm.colors(100)` as `col.regions`, and 0-centered color breaks by default. - The help pages of `twinSIR()` and related functions now give examples based on `data("hagelloch")` instead of using the toy dataset `data("fooepidata")`. The latter is now obsolete and will be removed in future versions of the package. - The elements of the `control` list stored in the result of `algo.farrington()` are now consistently ordered as in the default `control` argument. ## BUG FIXES - Using negative indices to exclude time points from an `"sts"` object (e.g., `x[-1,]`) is now supported and equivalent to the corresponding subset expression of retained indexes (`x[2:nrow(x),]`) in resetting the `start` and `epoch` slots. [reported by Johannes Bracher] - For weekly `"sts"` data with `epochAsDate=TRUE`, the `as.data.frame()` method computed `freq` by `"%Y"`-year instead of by `"%G"`-year, which was inconsistent with the `epochInPeriod` variable. - For *non*-weekly `"sts"` data with `epochAsDate=TRUE`, `year()` as well as the `year` column of the `tidy.sts()` output corresponded to the ISO week-based year. It now gives the calendar year. - `sts_creation()` hard-coded `start = c(2006, 1)`. - `aggregate()`ing an `"sts"` object over time now recomputes fractions from the cumulated population values if and only if this is no `multinomialTS` and already contains population fractions. The same rule holds when subsetting units of an `"sts"` object. The `aggregate`-method previously failed to recompute fractions in some cases. - For `farringtonFlexible()` with multivariate time series, only the last unit had stored the additional control items (exceedence scores, p-values, ...), all others were 0. [reported by Johannes Bracher] - The supplementary p-values returned by `farringtonFlexible()` in `control$pvalue` were wrong for the default approach, where `thresholdMethod="delta"` (the original Farrington method) and a power transformation was applied to the data (`powertrans != "none"`). Similarly, `algo.farrington()` returned wrong predictive probabilities in `control$pd[,1]` if a power transformation was used. [reported by Lore Merdrignac] - The `control` argument list of `algo.farrington()` as stated in the formal function definition was incomplete (`plot` was missing) and partially out of sync with the default values that were actually set inside the function (`b=5` and `alpha=0.05`). This has been fixed. Results of `algo.farrington()` would only be affected if the function was called without any `control` options (which is hardly possible). So this can be regarded as a documentation error. The formal `control` list of the `farrington()` wrapper function has been adjusted accordingly. - The `control` argument lists of `farringtonFlexible()` and `bodaDelay()` as stated in the formal function definitions were partially out of sync with respect to the following default values that were actually set inside these functions: `b=5` (not 3), `alpha=0.05` (not 0.01), `pastWeeksNotIncluded=w` (not 26), and, for `bodaDelay()` only, `delay=FALSE` (not `TRUE`). This has been fixed. Results would only be affected if the functions were called without any `control` options (which is hardly possible). So this can be regarded as a documentation error. - `pairedbinCUSUM()` did not properly subset the `sts` object if a `range` was specified, and forgot to store the `control` arguments in the result. - `wrap.algo()` now aborts if the monitored range is not supplied as a numeric vector. - In `vignette("monitoringCounts")`: several inconsistencies between code and output have been fixed. - `epidataCS2sts()` no longer transfers the `stgrid$BLOCK` indices to the `epoch` slot of the resulting `"sts"` object (to avoid `epoch[1] != 1` scenarios). - The `ranef()` matrix extracted from fitted `"hhh4"` models could have wrong column names. ## DEPRECATED AND DEFUNCT - Several ancient functions deprecated in 1.16.1 are now defunct: `compMatrix.writeTable()`, `makePlot()`, `test()`, `testSim()`, `readData()` (the raw txt files have been removed as well), `correct53to52()`, `enlargeData()`, `toFileDisProg()`. # surveillance 1.16.2 (2018-07-24) ## MINOR CHANGES - `autoplot.sts()` gained a `width` argument to adjust the bar width, which now defaults to 7 for weekly time series (previously was 90% of that so there were gaps between the bars). - `"epidataCS"` generation now (again) employs [**spatstat**](https://CRAN.R-project.org/package=spatstat)'s `bdist.points()`, which has been accelerated in version 1.56-0. If you use the `twinstim()`-related modelling part of **surveillance**, you are thus advised to update your **spatstat** installation. - The `boda()` examples in `vignette("monitoringCounts")` have been updated to also work with recent versions of **INLA**. ## BUG FIXES - Offsets in `hhh4`'s epidemic components were ignored by `simulate.hhh4()` [spotted by Johannes Bracher] as well as in dominant eigenvalues ("maxEV"). - The color key in `fanplot()` is no longer distorted by `log="y"`. # surveillance 1.16.1 (2018-05-28) ## BUG FIXES - `autoplot.sts()` now sets the calling environment as the `plot_env` of the result. - Several `twinstim`-related functions finally allow for prehistory events (long supported by `twinstim()` itself): `as.epidataCS()`, `glm_epidataCS()`, `as.epidata.epidataCS()`. - The `summary()` for SI[R]S-type `"epidata"` failed if there were initially infectious individuals. ## DEPRECATED AND DEFUNCT - Several ancient functions have been deprecated and may be removed in future versions of **surveillance**: `qlomax()`, `readData()`, `toFileDisProg()`, `correct53to52()`, `enlargeData()`, `compMatrix.writeTable()`, `test()`, `testSim()`, `makePlot()`. # surveillance 1.16.0 (2018-01-24) ## NEW FEATURES - The `as.data.frame()` method for `"sts"` objects gained a `tidy` argument, which enables conversion to the long data format and is also available as function `tidy.sts()`. - A [**ggplot2**](https://CRAN.R-project.org/package=ggplot2) variant of `stsplot_time()` is now available via `autoplot.sts()`. - `as.epidata.data.frame()` gained an argument `max.time` to specify the end of the observation period (which by default coincides with the last observed event). - The now exported function `fanplot()` wraps [**fanplot**](https://CRAN.R-project.org/package=fanplot)`::fan()`. It is used by `plot.oneStepAhead()` and `plot.hhh4sims()`, which now have an option to add the point forecasts to the fan as well. - `plotHHH4_fitted()` (and `plotHHH4_fitted1()`) gained an option `total` to sum the fitted components over all units. ## SIGNIFICANT CHANGES - Package [**polyCub**](https://CRAN.R-project.org/package=polyCub) is no longer automatically attached (only imported). - `scores.oneStepAhead()` no longer reverses the ordering of the time points by default, as announced in 1.15.0. ## MINOR CHANGES - Some code in `vignette("monitoringCounts")` has been adjusted to work with the new version of [**MGLM**](https://CRAN.R-project.org/package=MGLM) (0.0.9). - Added a `[`-method for the `"hhh4sims"` class to retain the attributes when subsetting simulations. ## BUG FIXES - `aggregate(stsObj, by = "unit")` no longer results in empty colnames (set to `"overall"`). The obsolete map is dropped. - The `subset` argument of `twinSIR()` was partially ignored: - If `nIntervals = 1`, the model `summary()` reported the total number of events. - Automatic `knots`, model `residuals()`, as well as the rug in `intensityplot()` were computed from the whole set of event times. - The `as.epidata.data.frame()` converter did not actually allow for latent periods (via `tE.col`). This is now possible but considered experimental (methods for `"epidata"` currently ignore latent periods). - The `all.equal()` methods for `"hhh4"` and `"twinstim"` objects now first check for the correct classes. # surveillance 1.15.0 (2017-10-06) ## NEW FEATURES - `siaf.gaussian()` now also employs a `polyCub.iso()` integration routine by default (similar to the powerlaw-type kernels), instead of adaptive midpoint cubature. This increases precision and considerably accelerates estimation of `twinstim()` models with a Gaussian spatial interaction function. Models fitted with the new default (`F.adaptive=FALSE, F.method="iso"`) will likely differ from previous fits (`F.adaptive=TRUE`), and the numerical difference depends on the adaptive bandwidth used before (the default `adapt=0.1` yielded a rather rough approximation of the integral). - Added `quantile()`, `confint()`, and `plot()` methods for `"oneStepAhead"` predictions. - Exported the function `simEndemicEvents()` to simulate a spatio-temporal point pattern from an endemic-only `"twinstim"`; faster than via the general `simulate.twinstim()` method. ## MINOR CHANGES - `twinstim(..., siaf = siaf.gaussian())` uses a larger default initial value for the kernel's standard deviation (based on the size of the observation region). - Non-default parametrizations of `siaf.gaussian()` are deprecated, i.e., always use `logsd=TRUE` and `density=FALSE`. - `twinstim()` uses a smaller default initial value for the epidemic intercept, which usually allows for faster convergence. - `update.hhh4()` now allows `subset.upper` values beyond the originally fitted time range (but still within the time range of the underlying `"sts"` object). - `scores.oneStepAhead()` by default reverses the ordering of the time points. This awkward behaviour will change in the next version, so the method now warns if the default `reverse=TRUE` is used without explicit specification. - Minor improvements in the documentation and some vignettes: corrected typos, simplified example code, documented some methods. ## BUG FIXES - The C-routines introduced in version 1.14.0 used `==` comparisons on parameter values to choose among case-specific formulae (e.g., for *d==2* in `siaf.powerlaw()`). We now employ an absolute tolerance of 1e-7 (which should fix the failing tests on Solaris). - Interaction functions for `twinstim()`, such as `siaf.powerlaw()` or `tiaf.exponential()`, no longer live in the global environment as this risks using masked base functions. # surveillance 1.14.0 (2017-06-29) ## DOCUMENTATION - The replication code from Meyer et al. (2017, JSS) is now included as `demo("v77i11")`. It exemplifies the spatio-temporal endemic-epidemic modelling frameworks `twinstim`, `twinSIR`, and `hhh4` (see also the corresponding vignettes). ## NEW FEATURES - Pure C-implementations of integration routines for spatial interaction functions considerably accelerate the estimation of `twinstim()` models containing `siaf.powerlaw()`, `siaf.powerlawL()`, or `siaf.student()`. - The color palette generating function used by `sts` plots, `hcl.colors`, is now exported. - The utility function `clapply` (*c*onditional `lapply`) is now exported. - Some utility functions for `hhh4` fits are now exported (`update.hhh4`, `getNEweights`, `coefW`), as well as several internal functions for use by `hhh4` add-on packages (`meanHHH`, `sizeHHH`, `decompose.hhh4`). - The `"fan"`-type plot function for `"hhh4sims"` gained a `key.args` argument for an automatic color key. - New auxiliary function `makeControl()`, which may be used to specify a `hhh4()` model. ## MINOR CHANGES - `twinstim()` now throws an informative error message when trying to fit a purely epidemic model to data containing endemic events (i.e., events without ancestors). The `help("twinstim")` exemplifies such a model. ## BUG FIXES - `siaf.powerlaw()$deriv` returned `NaN` for the partial derivative wrt the decay parameter *d*, if *d* was large enough for *f* to be numerically equal to 0. It will now return 0 in this case. - `twinstim()` could fail (with an error from `duplicated.default`) if the fitted time range was substantially reduced via the `T` argument. - The `"simEpidataCSlist"` generated by `simulate.twinstim(..., simplify = TRUE)` was missing the elements `bbox` and `control.siaf`. # surveillance 1.13.1 (2017-04-28) ## DOCUMENTATION - The paper on "Spatio-Temporal Analysis of Epidemic Phenomena Using the R Package **surveillance**" (by Sebastian Meyer, Leonhard Held, and Michael Höhle) will appear in the upcoming volume of the *Journal of Statistical Software*. The main sections 3 to 5 of the paper are contained in the package as `vignette("twinstim")`, `vignette("twinSIR")`, and `vignette("hhh4_spacetime")`, respectively. ## NEW FEATURES - The `calibrationTest()` and `pit()` methods for `"oneStepAhead"` forecasts gained an argument `units` to allow for unit-specific assessments. - A default `scores`-method is now available to compute a set of proper scoring rules for Poisson or NegBin predictions. - New plot `type = "fan"` for simulations from `"hhh4"` models to produce a fan chart using the [**fanplot**](https://CRAN.R-project.org/package=fanplot) package. ## MINOR CHANGES - `scores.hhh4()` sets rownames for consistency with `scores.oneStepAhead()`. ## BUG FIXES - The `"Lambda.const"` matrix returned by `getMaxEV_season()` was wrong for models with asymmetric neighbourhood weights. [spotted by Johannes Bracher]\ Dominant eigenvalues (`"maxEV"`) were not affected by this bug. # surveillance 1.13.0 (2016-12-20) ## NEW FEATURES - `earsC` now has two new arguments thanks to Howard Burkom: the number of past time units to be used in calculation is now not always 7, it can be chosen in the `baseline` parameter. Furthermore, the `minSigma` parameter allows to get a threshold in the case of sparse data. When one doesn't give any value for those two parameters, the algorithm works like it used to. - `animate.sts()` gained support for date labels in the bottom `timeplot`. - `stsplot_space()` and `animate.sts()` can now generate incidence maps based on the population information stored in the supplied `"sts"` object. Furthermore, `animate.sts()` now supports time-varying population numbers. ## MINOR CHANGES - `hhh4()` guards against the misuse of `family = factor("Poisson")` for univariate time series. Previously, this resulted in a negative binomial model by definition, but is now interpreted as `family = "Poisson"` (with a warning). ## BUG FIXES - `animate.sts()` now supports objects with missing values (with a warning). Furthermore, the automatic color breaks have been improved for incidence maps, also in `stsplot_space()`. - The `as.data.frame`-method for the `"sts"` class, applied to classical time-index-based `"sts"` objects (`epochAsDate=FALSE`), ignored a `start` epoch different from 1 when computing the `epochInPeriod` indexes. Furthermore, the returned `epochInPeriod` now is a fraction of `freq`, for consistency with the result for objects with `epochAsDate=TRUE`. - `simulate.hhh4()` did not handle shared overdispersion parameters correctly. The different parameters were simply recycled to the number of units, ignoring the factor specification from the model's `family`. [spotted by Johannes Bracher] - Simulations from *endemic-only* `"hhh4"` models with unit-specific overdispersion parameters used wrong variances. [spotted by Johannes Bracher] - `oneStepAhead()` predictions of `type` `"rolling"` (or `"first"`) were incorrect for time points `tp` (`tp[1]`) beyond the originally fitted time range (in that they were based on the original time range only). This usage of `oneStepAhead()` was never really supported and is now catched when checking the `tp` argument. - `plot.hhh4simslist()` ignored its `par.settings` argument if `groups=NULL` (default). # surveillance 1.12.2 (2016-11-14) ## NEW FEATURES - The internal auxiliary function, which determines the sets of potential source events in `"epidataCS"` has been implemented in C++, which accelerates `as.epidataCS()`, `permute.epidataCS()`, and therefore `epitest()`. This is only really relevant for `"epidataCS"` with a large number of events (>1000, say). - Negative-binomial `hhh4()` models may not converge for non-overdispersed data (try, e.g., `set.seed(1); hhh4(sts(rpois(104, 10)), list(family="NegBin1"))`). The resulting non-convergence warning message now mentions low overdispersion if this is detected. [suggested by Johannes Bracher] - An additional `type="delay"` option was added to the `plot` method of `stsNC` objects. Furthermore, an `animate_nowcasts` function allows one to animate a sequence of nowcasts. ## MINOR CHANGES - In the `animate`-method for `"sts"` objects, the default top padding of **lattice** plots is now disabled for the bottom `timeplot` to reduce the space between the panels. Furthermore, the new option `fill` can be used to make the panel of the `timeplot` as large as possible. ## BUG FIXES - `bodaDelay()`: fixed spurious warnings from `rnbinom()`. - `vignette("monitoringCounts")`: fixed `boda`-related code and cache to obtain same results as in corresponding JSS paper. # surveillance 1.12.1 (2016-05-18) ## DOCUMENTATION - The new `vignette("monitoringCounts")` illustrates the monitoring of count time series in R with a particular focus on aberration detection in public health surveillance. This vignette corresponds to a recently accepted manuscript for the *Journal of Statistical Software* (Salmon, Schumacher, and Höhle, 2016). ## MINOR CHANGES - Non-convergent `hhh4()` fits now obey the structure of standard `"hhh4"` objects. In particular, such fits now also contain the `control` and `stsObj` elements, allowing for model `update()`s of non-convergent fits. - `knox()` warns about symmetric input matrices. ## BUG FIXES - The code of `boda()` (with `samplingMethod="joint"`) and `bodaDelay()` (with `inferenceMethod="INLA"`) has been adjusted to a change of arguments of **INLA**'s `inla.posterior.sample` function. Accordingly, the minimum **INLA** version required to run `boda()` and `bodaDelay()` is 0.0-1458166556. - The functions returned by `W_powerlaw()` now have the package namespace as their environment to support situations where the package is not attached. - Attaching package [**nlme**](https://CRAN.R-project.org/package=nlme) after **surveillance** no longer masks `"hhh4"`'s `ranef`-method. (We now import the `fixef` and `ranef` generics from **nlme**.) # surveillance 1.12.0 (2016-04-02) ## DOCUMENTATION - Several new vignettes illustrate *endemic-epidemic* modeling frameworks for spatio-temporal surveillance data: `vignette("twinstim")` : describes a spatio-temporal point process regression model. `vignette("twinSIR")` : describes a multivariate temporal point process regression model. `vignette("hhh4_spacetime")` : describes an areal time-series model for infectious disease counts. These vignettes are based on a recently accepted manuscript for the *Journal of Statistical Software* (Meyer, Held, and Höhle, 2016). - Improved the documentation on various help pages. - The `hhh4()`-based analysis of `data("fluBYBW")` has been moved to a separate demo script 'fluBYBW.R'. Due to the abundance of models and the relatively long runtime, we recommend to open the script in an editor rather than running all the code at once using `demo("fluBYBW")`. ## NEW FEATURES - Overhaul of the `"sts"` implementation. This mostly affects package-internal code, which is simpler, cleaner and better tested now, but requires R >= 3.2.0 (due to `callNextMethod()` bugs in older versions of R). Beyond that, the user-level constructor function `sts()` now has explicit arguments for clarity and convenience. For instance, its first argument sets the `observed` slot and no longer needs to be named, i.e., `sts(mycounts, start=c(2016,3), frequency=12)` works just like for the classical `ts()` function. - `stsplot_time(..., as.one=TRUE)` is now implemented (yielding a simple `matplot` of multiple time series). ## MINOR CHANGES - `plotHHH4_season()` now by default draws a horizontal reference line at unity if the multiplicative effect of component seasonality is shown (i.e., if `intercept=FALSE`). - Since **surveillance** 1.8-0, `hhh4()` results are of class `"hhh4"` instead of `"ah4"` (renamed). Legacy methods for the old class name `"ah4"` have been removed. - The internal model preparation in `twinstim()` is more efficient (the distance matrix of the events is only computed if event sources actually need to be updated). ## BUG FIXES - `stsplot_spacetime()` now recognizes its `opts.col` argument. - Conversion from `"ts"` to `"sts"` using `as(ts, "sts")` could set a wrong start time. For instance, `as(ts(1:10, start=c(1959,2), frequency=4), "sts")@start` was `c(1959,1)`. - `algo.twins()` now also accepts `"sts"` input and the automatic legend in the first plot of `plot.atwins()` works again. - The experimental `profile`-method for `"twinstim"` objects did not work if embedded `twinstim()` fits issued warnings. # surveillance 1.11.0 (2016-02-08) ## NEW FEATURES - `update.epidata()` can now handle a distance matrix `D` in the form of a classed `"Matrix"`. [suggested by George Wood] - `glrnb()` can now handle `ret="cases"` for the generalized likelihood ratio detector based on the negative binomial distribution. It's based on a brute-force search and hence might be slow in some situations. - `boda()` and `bodaDelay()` now support an alternative method (`quantileMethod="MM"`) to compute quantiles based on the posterior distribution. The new method samples parameters from the posterior distribution and then computes the quantile of the mixture distribution using bisectionning, which is faster and yields similar results compared to the original method (`quantileMethod="MC"`, still the default). ## MINOR CHANGES - Revised `vignette("hhh4")`, updated the package description as well as some references in the documentation. Also updated (the cache of) the slightly outdated `vignette("surveillance")` to account for the corrected version of `algo.bayes()` implemented since **surveillance** 1.10-0. ## BUG FIXES - Fixed bug in `categoricalCUSUM()`, which ignored alarms generated for the last time point in `range`. Furthermore, the exact computation in case of returns of the type `"value"` for the binomial are now checked through an attribute. - Fixed bug in the `estimateGLRNbHook` function of `algo.glrnb`, which ignored potential fixed `alpha` values. If `alpha` is fixed this is now taken into consideration while fitting the negative binomial function. See revised help files for the details. - Made a hot-fix such that the `algo.quality` function now also works for `sts` objects and if the `state` or `alarm` slots consists of TRUE/FALSE instead of 0/1. - `intensity.twinstim()` did not work for non-endemic models. - A parallelized `epitest()` could fail with a strange error message if some replications were left unassigned. This seems to happen if forking is used (`mclapply`) with insufficient memory. Incomplete replications are now ignored with a warning. # surveillance 1.10-0 (2015-11-04) ## NEW FEATURES - Calibration tests for count data (Wei and Held, 2014, Test) are now implemented and available as `calibrationTest()`. In addition to a default method taking pure counts and predictive means and dispersion parameters, there are convenient methods for `"hhh4"` and `"oneStepAhead"` objects. - Shared overdispersion across units in negative binomial `hhh4()` time series models (by specifying a factor variable as the `family` argument). - `scores()` and `pit()` are now generic and have convenient methods for `"oneStepAhead"` predictions and `"hhh4"` fits. - The initial values used for model updates during the `oneStepAhead()` procedure can now be specified directly through the `which.start` argument (as an alternative to the previous options `"current"` and `"final"`). - `plotHHH4_fitted()` (and `plotHHH4_fitted1()`) gained an option `decompose` to plot the contributions from each single unit (and the endemic part) instead of the default endemic + AR + neighbours decomposition. Furthermore, a formatted time axis similar to `stsplot_time1()` can now be enabled via the new argument `xaxis`. - The new `plot` `type` `"maps"` for `"hhh4"` fits shows maps of the fitted mean components averaged over time. - New `plot`-method for simulations from `"hhh4"` models (using `simulate.hhh4(..., simplify = TRUE)`, which now has a dedicated class: `"hhh4sims"`) to show the final size distribution or the simulated time series (possibly stratified by groups of units). There is also a new `scores`-method to compute proper scoring rules based on such simulations. - The argument `idx2Exp` of `coef.hhh4()` may now be conveniently set to `TRUE` to exp-transform all coefficients. - Added a `coeflist()`-method for `"hhh4"` fits. - The generator function `sts()` can now be used to initialize objects of class `"sts"` (instead of writing `new("sts", ...)`). - Additional arguments of `layout.scalebar()` now allow to change the style of the labels. - A pre-computed distance matrix `D` can now be used as input for the `as.epidata()` converter -- offering an alternative to the default Euclidean distance based on the individuals coordinates. (Request of George Wood to support `twinSIR` models on networks.) ## MINOR CHANGES - The first argument of `scores()` is now called `x` instead of `object` (for consistency with `calibrationTest()`). - The result of `oneStepAhead()` now has the dedicated class attribute `"oneStepAhead"` (previously was just a list). - Changed interpretation of the `col` argument of `plotHHH4_fitted()` and `plotHHH4_fitted1()` (moved color of "observed" to separate argument `pt.col` and reversed remaining colors). The old `col` specification as a vector of length 4 still works (catched internally) but is undocumented. - The `epoch` slot of class `"sts"` is now initialized to `1:nrow(observed)` by default and thus no longer needs to be explicitly set when creating a `new("sts", ...)` for this standard case. - Initialization of `new("sts", ...)` now supports the argument `frequency` (for consistency with `ts()`). Note that `freq` still works (via partial argument matching) and that the corresponding `"sts"` slot is still called `freq`. - If `missing(legend.opts)` in `stsplot_time1()`, the default legend will only be produced if the `"sts"` object contains information on outbreaks, alarms, or upperbounds. - The default `summary()` of a `"twinstim"` fit is more concise since it no longer includes the number of log-likelihood and score function evaluations and the elapsed time during model fitting. Set the new `runtime` argument of `summary.twinstim()` to `TRUE` to add this information to the summary as before. - The `animate`-method for `"sts"` objects gained an argument `draw` (to disable the default instantaneous plotting) and now invisibly returns the sequential plot objects (of class `"gtable"` or `"trellis"`) in a list for post-processing. - The flexible time axis configurations for `"sts"` plots introduced in version 1.8-0 now also work for classical `"sts"` objects with integer epochs and standard frequencies (try `plot(..., epochsAsDate = TRUE)`). - `stsplot_time()` initiates `par` settings only if the `par.list` argument is a list. - The new `all.equal()` method for class `"hhh4"` compares two fits ignoring their `"runtime"` and `"call"` elements (at least). ## BUG FIXES - Fixed a bug in `algo.bayes`, where an alarm was already sounded if the current observation was equal to the quantile of the predictive posterior. This was changed in order to get *alarm_t = I(obs_t > quantile_t)* which is consistent with the use in `boda` and `bodaDelay`. - Fixed bug in `algo.outbreakP` causing a halt in the computations of `value="cases"` when `calc.outbreakP.statistic` returned `NaN`. Now, a `NaN` is returned. - `wrap.algo` argument `control.hook` used `control` argument defined outside it's scope (and not the one provided to the function). It is now added as additional 2nd argument to the `control.hook` function. - `stsplot_time()` did not account for the optional `units` argument for multivariate `"sts"` objects when choosing a suitable value for `par("mfrow")`. - `hhh4()` could have used a function `dpois()` or `dnbinom()` from the global environment instead of the respective function from package **stats**. - The default time variable `t` created as part of the `data` argument in `hhh4()` was incompatible with `"sts"` objects having `epochAsDate=TRUE`. - A consistency check in `as.epidata.default()` failed for SI-type data (and, more generally, for all data which ended with an I-event in the last time block). [spotted by George Wood] # surveillance 1.9-1 (2015-06-12) - This is a quick patch release to make the test suite run smoothly on CRAN's Windows and Solaris Sparc systems. - The new `hhh4()` option to scale neighbourhood weights did not work for parametric weights with more than one parameter if `normalize=FALSE`. # surveillance 1.9-0 (2015-06-09) ## NEW FEATURES - New functions and data for Bayesian outbreak detection in the presence of reporting delays (Salmon et al., 2015): `bodaDelay()`, `sts_observation()`, and `sts_creation()`. - New functions implementing tests for space-time interaction: - `knox()` supports both the Poisson approximation and a Monte Carlo permutation approach to determine the p-value, - `stKtest()` wraps space-time K-function methods from package [**splancs**](https://CRAN.R-project.org/package=splancs) for use with `"epidataCS"`, - and `epitest()` for `twinstim` models (makes use of the new auxiliary function `simpleR0()`). - New function `plapply()`: a parallel and verbose version of `lapply()` wrapping around both `mclapply()` and `parLapply()` of package **parallel**. - New converter `as.xts.sts()` to transform `"sts"` objects to the quasi standard `"xts"` class, e.g., to make use of package [**dygraphs**](https://CRAN.R-project.org/package=dygraphs) for interactive time series plots. - New options for scaling and normalization of neighbourhood weights in `hhh4()` models. - New auxiliary function `layout.scalebar()` for use as part of `sp.layout` in `spplot()` or in the traditional graphics system. ### New features for `"epidataCS"` - New argument `by` for `plot.epidataCS()`, which defines a stratifying variable for the events (default is the event type as before). It can also be set to `NULL` to make the plot not distinguish between event types. - The spatial plot of `"epidataCS"` gained the arguments `tiles`, `pop` and `sp.layout`, and can now produce an `spplot()` with the tile-specific population levels behind the point pattern. - New function `permute.epidataCS()` to randomly permute time points or locations of the events (holding other marks fixed). ### New features for `twinstim()` - New S3-generic `coeflist()` to list model coefficients by component. It currently has a default method and one for `"twinstim"` and `"simEpidataCS"`. - New argument `newcoef` for `simulate.twinstim()` to customize the model parameters used for the simulation. - New argument `epilink` for `twinstim()`, offering experimental support for an identity link for the epidemic predictor. The default remains `epilink = "log"`. - Simulation from `"twinstim"` models and generation of `"epidataCS"` is slightly faster now (faster **spatstat** functions are used to determine the distance of events to the border). - New option `scaled = "standardized"` in `iafplot()` to plot *f(x) / f(0)* or *g(t) / g(0)*, respectively. ## MINOR CHANGES - Initial data processing in `twinstim()` is faster since event sources are only re-determined if there is effective need for an update (due to subsetting or a change of `qmatrix`). - `formatPval()` disables `scientific` notation by default. - The `"time"` plot for `"epidataCS"` uses the temporal grid points as the default histogram `breaks`. - The special `fe()` function which sets up fixed effects in `hhh4()` models gained an argument `unitSpecific` as a convenient shortcut for `which = rep(TRUE, nUnits)`. - The convenient `plot` option of `permutationTest()` uses [**MASS**](https://CRAN.R-project.org/package=MASS)::`truehist()` instead of `hist()` and accepts graphical parameters to customize the histogram. ## BUG FIXES - The `bodaFit` function did not draw samples from the joint posterior. Instead draws were from the respective posterior marginals. A new argument `samplingMethod` is now introduced defaulting to the proper 'joint'. For backwards compatibility use the value 'marginal'. - The functions `as.epidataCS()` and `simEpidataCS()` could throw inappropriate warnings when checking polygon areas (only if `W` or `tiles`, respectively, contained holes). - Non-convergent endemic-only `twinstim` models produced an error. [spotted by Bing Zhang] - The `"owin"`-method of `intersectPolyCircle` could have returned a rectangle-type `"owin"` instead of a polygon. - An error occurred in `twinstim()` if `finetune=TRUE` or choosing `optim()` instead of the default `nlminb()` optimizer without supplying a `control` list in `optim.args`. - The `"time"` plot for `"epidataCS"` did not necessarily use the same histogram `breaks` for all strata. - Specifying a step function of interaction via a numeric vector of knots did not work in `twinstim()`. - `plot.hhh4()` did not support an unnamed `type` argument such as `plot(x, "season")`. - `simEpidataCS()` did not work if `t0` was in the last block of `stgrid` (thus it did not work for single-cell grids), and mislabeled the `start` column copied to `events` if there were no covariates in `stgrid`. - Evaluating `intensity.twinstim()$hFUN()` at time points before `t0` was an error. The function now returns `NA_real_` as for time points beyond `T`. - Truncated, normalized power-law weights for `hhh4()` models, i.e., `W_powerlaw(maxlag = M, normalize = TRUE)` with `M < max(neighbourhood(stsObj))`, had wrong derivatives and thus failed to converge. - `update.hhh4(..., use.estimates = TRUE)` did not use the estimated weight function parameters as initial values for the new fit. It does so now iff the weight function `ne$weights` is left unchanged. # surveillance 1.8-3 (2015-01-05) - Accommodate a new note given by R-devel checks, and set the new INLA additional repository in the 'DESCRIPTION' file. - Made `linelist2sts()` work for quarters by adding extra `"%q"` formatting in `formatDate()`. # surveillance 1.8-2 (2014-12-16) ## MINOR CHANGES related to `hhh4` - In the coefficient vector resulting from a `hhh4` fit, random intercepts are now named. - Parameter `start` values in `hhh4()` are now matched by name but need not be complete in that case (default initial values are used for unspecified parameters). - The `update.hhh4()`-method now by default does `use.estimates` from the previous fit. This reduces the number of iterations during model fitting but may lead to slightly different parameter estimates (within a tolerance of `1e-5`). Setting `use.estimates = FALSE` means to re-use the previous start specification. ## MINOR CHANGES related to the `"sts"`-class - For univariate `"sts"` objects, the (meaningless) "head of neighbourhood" is no longer `show`n. - The `"sts"` class now has a `dimnames`-method instead of a `colnames`-method. Furthermore, the redundant `nrow` and `ncol` methods have been removed (the `dim`-method is sufficient). - If a `map` is provided when `initialize()`ing an `"sts"` object, it is now verified that all `observed` regions are part of the `map` (matched by `row.names`). - In `stsplot_space()`, extra (unobserved) regions of the `map` are no longer dropped but shown with a dashed border by default. # surveillance 1.8-1 (2014-10-29) ## NEW FEATURES - The `R0`-method for `"twinstim"` gained an argument `newcoef` to simplify computation of reproduction numbers with a different parameter vector (also used for Monte Carlo CI's). - New plot `type="neweights"` for `"hhh4"` fits. - The `scores()` function allows the selection of multiple `units` (by index or name) for which to compute (averaged) proper scores. Furthermore, one can now select `which` scores to compute. - Added a `formula`-method for `"hhh4"` fits to extract the `f` specifications of the three components from the control list. - The `update()`-method for fitted `"hhh4"` models gained an argument `S` for convenient modification of component seasonality using `addSeason2formula()`. - The new auxiliary function `layout.labels()` generates an `sp.layout` item for `spplot()` in order to draw labels. - When generating the `pit()` histogram with a single predictive CDF `pdistr`, the `...` arguments can now be `x`-specific and are recycled if necessary using `mapply()`. If `pdistr` is a list of CDFs, `pit()` no longer requires the functions to be vectorized. - New method `as.epidata.data.frame()`, which constructs the start/stop SIR event history format from a simple individual-based data frame (e.g., `hagelloch.df`). - New argument `w` in `as.epidata.default()` to generate covariate-based weights for the force of infection in `twinSIR`. The `f` argument is for distance-based weights. - The result of `profile.twinSIR()` gained a class and an associated `plot`-method. ## MAJOR CHANGES - For multivariate `oneStepAhead()` predictions, `scores(..., individual=TRUE)` now returns a 3d array instead of a collapsed matrix. Furthermore, the scores computed by default are `c("logs","rps","dss","ses")`, excluding the normalized squared error score `"nses"` which is improper. - The plot-`type="season"` for `"hhh4"` fits now by default plots the multiplicative effect of seasonality on the respective component (new argument `intercept=FALSE`). The default set of components to plot has also changed. - When `as.epidata()` and `simEpidata()` calculate distance-based epidemic weights from the `f` functions, they no longer set the distance of an infectious individual to itself artificially to `Inf`. This changes the corresponding columns in the `"epidata"` in rows of currently infectious individuals, but the `twinSIR` model itself is invariant, since only rows with `atRiskY=1` contribute to the likelihood. - Several modifications and corrections in `data("hagelloch")`. ## MINOR CHANGES - Better plotting of `stsNC` objects by writing an own plot method for them. Prediction intervals are now shown jointly with the point estimate. - Reduced package size by applying `tools::resaveRdaFiles` to some large datasets and by building the package with `--compact-vignettes=both`, i.e., using additional GhostScript compression with ebook quality, see `?tools::compactPDF`. - Added `units` argument to `stsplot_time` to select only a subset of the multivariate time series for plotting. - The `untie`-method for class `"epidataCS"` gained an argument `verbose` which is now `FALSE` by default. - `"epidataCS"` objects store the `clipper` used during generation as attribute of `$events$.influenceRegion`. - In `plotHHH4_fitted()`, the argument `legend.observed` now defaults to `FALSE`. - The default weights for the spatio-temporal component in `hhh4` models now are `neighbourhood(stsObj) == 1`. The previous default `neighbourhood(stsObj)` does not make sense for the newly supported `nbOrder` neighbourhood matrices (shortest-path distances). The new default makes no difference for (old) models with binary adjacency matrices in the neighbourhood slot of the `stsObj`. - The default for nonparametric weights `W_np()` in `hhh4()` is now to assume zero weight for neighbourhood orders above `maxlag`, i.e., `W_np()`'s argument `to0` now defaults to `TRUE`. - Added a `verbose` argument to `permutationTest()`, which defaults to `FALSE`. The previous behaviour corresponds to `verbose=TRUE`. - `simulate.twinstim()` now by default uses the original `data$W` as observation region. - The `data("measlesWeserEms")` contain two additional variables in the `@map@data` slot: `"vaccdoc.2004"` and `"vacc1.2004"`. - The plot-method for `"epidata"` objects now uses colored lines by default. - The **surveillance** package now depends on R >= 3.0.2, which, effectively, is the minimum version required since **surveillance** 1.7-0 (see the corresponding NEWS below). - The two diagnostic plots of `checkResidualProcess()` are now by default plotted side by side (`mfrow=c(1,2)`) instead of one below the other. ## BUG FIXES - In `farringtonFlexible` alarms are now for `observed>upperbound` and not for `observed>=upperbound` which was not correct. - Fixed duplicate `"functions"` element resulting from `update.twinstim(*,model=TRUE)` and ensured that `"twinstim"` objects always have the same components (some may be `NULL`). - `animate.epidata` works again with the [**animation**](https://CRAN.R-project.org/package=animation) package (`ani.options("outdir")` was removed in version 2.3) - For `hhh4` models with random effects, `confint()` only worked if argument `parm` was specified. - Computing one-sided AIC weights by simulation for `twinSIR` models with more than 2 epidemic covariates now is more robust (by rescaling the objective function for the quadratic programming solver) and twice as fast (due to code optimization). - `simulate.twinstim(..., rmarks=NULL)` can now handle the case where `data` has no events within the simulation period (by sampling marks from all of `data$events`). - The `lambda.h` values of simulated events in `"simEpidataCS"` objects were wrong if the model contained an endemic intercept (which is usually the case). - Automatic choice of color breaks in the `animate`-method for class `"sts"` now also works for incidence maps (i.e., with a `population` argument). - `hhh4()` did not allow the use of nonparametric neighbourhood weights `W_np()` with `maxlag=2`. - `scores()` did not work for multivariate `oneStepAhead()` predictions if both `individual=TRUE` and `sign=TRUE`, and it could not handle a `oneStepAhead()` prediction of only one time point. Furthermore, the `"sign"` column of `scores(..., sign=TRUE)` was wrong (reversed). - For `"epidataCS"` with only one event, `epidataCSplot_space()` did not draw the point. - The trivial (identity) call `aggregate(stsObj, nfreq=stsObj@freq)` did not work. # surveillance 1.8-0 (2014-06-16) ## PACKAGE INFRASTRUCTURE - Package **surveillance** now depends on newer versions of packages [**sp**](https://CRAN.R-project.org/package=sp) (>= 1.0-15), [**polyCub**](https://CRAN.R-project.org/package=polyCub) (>= 0.4-2), and [**spatstat**](https://CRAN.R-project.org/package=spatstat) (>= 1.36-0). The R packages **INLA** and [**runjags**](https://CRAN.R-project.org/package=runjags) are now suggested to support a new outbreak detection algorithm (`boda()`) and the new `nowcast()`ing procedure, respectively. The R packages for [**lattice**](https://CRAN.R-project.org/package=lattice), [**grid**](https://CRAN.R-project.org/package=grid), [**gridExtra**](https://CRAN.R-project.org/package=gridExtra), and [**scales**](https://CRAN.R-project.org/package=scales) are suggested for added visualization facilities. - More tests have been implemented to ensure package integrity. We now use [**testthat**](https://CRAN.R-project.org/package=testthat) instead of the outdated package [**RUnit**](https://CRAN.R-project.org/package=RUnit). - `hhh4()` fits now have class `"hhh4"` instead of `"ah4"`, for consistency with `twinstim()`, `twinSIR()`, and to follow the common convention (cp. `lm()`). Standard S3-methods for the old `"ah4"` name are still available for backwards compatibility but may be removed in the future. - Plot variants for `"sts"` objects have been cleaned up: The functions implementing the various plot types (`stsplot_*`, previously named `plot.sts.*`) are now exported and documented separately. ## NEW FEATURES - The `nowcast` procedure has been completely re-written to handle the inherit right-truncation of reporting data (best visualized as a reporting triangle). The new code implements the generalized-Dirichlet and the hierarchical Bayesian approach described in Höhle and an der Heiden (2014). No backwards compatibility to the old nowcasting procedure is given. - The package contains a new monitoring function `boda`. This is a first experimental surveillance implementation of the Bayesian Outbreak Detection Algorithm (BODA) proposed in Manitz and Höhle (2012). The function relies on the non-CRAN package **INLA**, which has to be installed first in order to use this function. Expect initial problems. - New `toLatex`-method for `"sts"` objects. - The new function `stsplot_space()` provides an improved map plot of disease incidence for `"sts"` objects aggregated over time. It corresponds to the new `type = observed ~ unit` of the `stsplot`-method, and supersedes `type = observed ~ 1|unit` (except for alarm shading). - An `animate()`-method for the `"sts"` class provides a new implementation for animated maps (superseding the `plot` `type=observed ~ 1 | unit * time`) with an optional evolving time series plot below the map. - The `plot()` method for `"sts"` objects with epochs as dates is now made more flexible by introducing the arguments `xaxis.tickFreq`, `xaxis.labelFreq` and `xaxis.labelFormat`. These allow the specification of tick-marks and labelling based on `strftime` compatible conversion codes -- independently if data are daily, weekly, monthly, etc. As a consequence, the old argument `xaxis.years` is removed. See `stsplot_time()` for more information. - Inference for neighbourhood weights in `hhh4()` models: `W_powerlaw()` and `W_np()` both implement weights depending on the order of neighbourhood between regions, a power-law decay and nonparametric weights, i.e., unconstrained estimation of individual weights for each neighbourhood order. - `hhh4()` now allows the inclusion of multiplicative offsets also in the epidemic components `"ar"` and `"ne"`. - `hhh4()` now has support for `lag != 1` in the autoregressive and neighbor-driven components. The applied lags are stored as component `"lags"` of the return value (previously there was an unused component `"lag"` which was always 1 and has been removed now). - `oneStepAhead()`: - Added support for parallel computation of predictions using `mclapply()` from package **parallel**. - New argument `type` with a new `type` `"first"` to base all subsequent one-step-ahead predictions on a single initial fit. - Nicer interpretation of `verbose` levels, and `txtProgressBar()`. - The `plot()`-method for fitted `hhh4()` objects now offers three additional types of plots: component seasonality, seasonal or time course of the dominant eigenvalue, and maps of estimated random intercepts. It is documented and more customizable. Note that argument order and some names have changed: `i` -> `units`, `title` -> `names`. - (Deviance) `residuals()`-method for fitted `hhh4()` models. - Added methods of `vcov()` and `nobs()` for the `"hhh4"` class. For `AIC()` and `BIC()`, the default methods work smoothly now (due to changes to `logLik.hhh4()` documented below). - New predefined interaction functions for `twinstim()`: `siaf.student()` implements a *t*-kernel for the distance decay, and `siaf.step()` and `tiaf.step()` provide step function kernels (which may also be invoked by specifying the vector of knots as the `siaf` or `tiaf` argument in `twinstim`). - Numerical integration over polygonal domains in the `F` and `Deriv` components of `siaf.powerlaw()` and `siaf.powerlawL()` is much faster and more accurate now since we use the new `polyCub.iso()` instead of `polyCub.SV()` from package [**polyCub**](https://CRAN.R-project.org/package=polyCub). - New `as.stepfun()`-method for `"epidataCS"` objects. - `plot.epidataCS()`: - The spatial plot has new arguments to automatically add legends to the plot: `legend.types` and `legend.counts`. It also gained an `add` argument. - The temporal plot now supports type-specific sub-histograms, additional lines for the cumulative number of events, and an automatic legend. - The new function `glm_epidataCS()` can be used to fit an endemic-only `twinstim()` via `glm()`. This is mainly provided for testing purposes since wrapping into `glm` usually takes longer. ## MAJOR CHANGES - Fitted `hhh4()` objects no longer contain the associated `"sts"` data twice: it is now only stored as `$stsObj` component, the hidden duplicate in `$control$data$.sts` was dropped, which makes fitted objects substantially smaller. - `logLik.hhh4()` always returns an object of class `"logLik"` now; for random effects models, its `"df"` attribute is `NA_real_`. Furthermore, for non-convergent fits, `logLik.hhh4()` gives a warning and returns `NA_real_`; previously, an error was thrown in this case. - `oneStepAhead()`: - Default of `tp[2]` is now the penultimate time point of the fitted subset (not of the whole `stsObj`). - `+1` on rownames of `$pred` (now the same as for `$observed`). - The optional `"twinstim"` result components `fisherinfo`, `tau`, and `functions` are always included. They are set to `NULL` if they are not applicable instead of missing completely (as before), such that all `"twinstim"` objects have the same list structure. - `iafplot()` ... - invisibly returns a matrix containing the plotted values of the (scaled) interaction function (and the confidence interval as an attribute). Previously, nothing (`NULL`) was returned. - detects a type-specific interaction function and by default uses `types=1` if it is not type-specific. - has better default axis ranges. - adapts to the new step function kernels (with new arguments `verticals` and `do.points`). - supports logarithmic axes (via new `log` argument passed on to `plot.default`). - optionally respects `eps.s` and `eps.t`, respectively (by the new argument `truncated`). - now uses `scaled=TRUE` by default. - The argument `colTypes` of `plot.epidataCS(,aggregate="space")` is deprecated (use `points.args$col` instead). - The events in an `"epidataCS"` object no longer have a reserved `"ID"` column. ## MINOR CHANGES - `hhh4()` now stores the runtime just like `twinstim()`. - Take `verbose=FALSE` in `hhh4()` more seriously. - `hhh4()` issues a `warning()` if non-convergent. - The following components of a `hhh4()` fit now have names: `"se"`, `"cov"`, `"Sigma"`. - The new default for `pit()` is to produce the plot. - The `twinstim()` argument `cumCIF` now defaults to `FALSE`. - `update.twinstim()` no longer uses recursive `modifyList()` for the `control.siaf` argument. Instead, the supplied new list elements (`"F"`, `"Deriv"`) completely replace the respective elements from the original `control.siaf` specification. - `siaf.lomax()` is now defunct (it has been deprecated since version 1.5-2); use `siaf.powerlaw()` instead. - Allow the default `adapt`ive bandwidth to be specified via the `F.adaptive` argument in `siaf.gaussian()`. - Unsupported options (`logpars=FALSE`, `effRangeProb`) have been dropped from `siaf.powerlaw()` and `siaf.powerlawL()`. - More rigorous checking of `tiles` in `simulate.twinstim()` and `intensityplot.twinstim`. - `as.epidataCS()` gained a `verbose` argument. - `animate.epidataCS()` now by default does not draw influence regions (`col.influence=NULL`), is `verbose` if `interactive()`, and ignores `sleep` on non-interactive devices. - The `multiplicity`-generic and its default method have been integrated into [**spatstat**](https://CRAN.R-project.org/package=spatstat) and are imported from there. ## DATA - The polygon representation of Germany's districts ( `system.file("shapes", "districtsD.RData", package="surveillance")` ) has been simplified further. The union of `districtsD` is used as observation window `W` in `data("imdepi")`. The exemplary `twinstim()` fit `data("imdepifit")` has been updated as well. Furthermore, `row.names(imdepi$events)` have been reset (chronological index), and numerical differences in `imdepi$events$.influenceRegion` are due to changes in [**polyclip**](https://CRAN.R-project.org/package=polyclip) 1.3-0. - The Campylobacteriosis data set `campyDE`, where absolute humidity is used as concurrent covariate to adjust the outbreak detection is added to the package to exemplify `boda()`. - New `data("measlesWeserEms")` (of class `"sts"`), a corrected version of `data("measles.weser")` (of the old `"disProg"` class). ## BUG FIXES - Fixed a bug in `LRCUSUM.runlength` where computations were erroneously always done under the in-control parameter `mu0` instead of `mu`. - Fixed a bug during alarm plots (`stsplot_alarm()`), where the use of `alarm.symbol` was ignored. - Fixed a bug in `algo.glrnb` where the overdispersion parameter `alpha` from the automatically fitted `glm.nb` model (fitted by `estimateGLRNbHook`) was incorrectly taken as `mod[[1]]$theta` instead of `1/mod[[1]]$theta`. The error is due to a different parametrization of the negative binomial distribution compared to the parametrization in Höhle and Paul (2008). - The score function of `hhh4()` was wrong when fitting endemic-only models to a `subset` including the first time point. This led to "false convergence". - `twinstim()` did not work without an endemic offset if `is.null(optim.args$par)`. # surveillance 1.7-0 (2013-11-19) ## SYNOPSIS - Package [**gpclib**](https://CRAN.R-project.org/package=gpclib) is no longer necessary for the construction of `"epidataCS"`-objects. Instead, we make use of the new dedicated package [**polyclip**](https://CRAN.R-project.org/package=polyclip) (licensed under the BSL) for polygon clipping operations (via `spatstat::intersect.owin()`). This results in a slightly different `$events$.influenceRegion` component of `"epidataCS"` objects, one reason being that **polyclip** uses integer arithmetic. Change of `twinstim()` estimates for a newly created `"epidataCS"` compared with the same data prepared in earlier versions should be very small (e.g., for `data("imdepifit")` the mean relative difference of coefficients is 3.7e-08, while the `logLik()` is `all.equal()`). As an alternative, **rgeos** can still be chosen to do the polygon operations. - The **surveillance**-internal code now depends on R >= 2.15.2 (for `nlminb()` `NA` fix of PR#15052, consistent `rownames(model.matrix)` of PR#14992, `paste0()`, `parallel::mcmapply()`). However, the required recent version of **spatstat** (1.34-0, for **polyclip**) actually needs R >= 3.0.2, which therefore also applies to **surveillance**. - Some minor new features and changes are documented below. ## NEW FEATURES - Functions `unionSpatialPolygons()` and `intersectPolyCircle()` are now exported. Both are wrappers around functionality from different packages supporting polygon operations: for determining the union of all subpolygons of a `"SpatialPolygons"` object, and the intersection of a polygonal and a circular domain, respectively. - `discpoly()` moved back from [**polyCub**](https://CRAN.R-project.org/package=polyCub) to **surveillance**. ## MINOR CHANGES - **surveillance** now Depends on [**polyCub**](https://CRAN.R-project.org/package=polyCub) (>= 0.4-0) and not only Imports it (which avoids `::`-references in .GlobalEnv-made functions). - Nicer default axis labels for `iafplot()`. - For `twinstim()`, the default is now to `trace` every iteration instead of every fifth only. - Slightly changed default arguments for `plot.epidata()`: `lwd` (1->2), `rug.opts` (`col` is set according to `which.rug`) - `twinstim()` saves the vector of `fixed` coefficients as part of the returned `optim.args` component, such that these will again be held fixed upon `update()`. - The `plot`-method for `hhh4()`-fits allows for region selection by name. # surveillance 1.6-0 (2013-09-03) ## SYNOPSIS - The `polyCub`-methods for cubature over polygonal domains have been moved to the new dedicated package [**polyCub**](https://CRAN.R-project.org/package=polyCub), since they are of a rather general use. The `discpoly()` function has also been moved to that package. - As a replacement for the license-restricted **gpclib** package, the **rgeos** package is now used by default (`surveillance.options(gpclib=FALSE)`) in generating `"epidataCS"` (polygon intersections, slightly slower). Therefore, when installing **surveillance** version 1.6-0, the system requirements for [**rgeos**](https://CRAN.R-project.org/package=rgeos) have to be met, i.e., GEOS must be available on the system. On Linux variants this means installing 'libgeos' ('libgeos-dev'). - The improved Farrington method described in Noufaily et al. (2012) is now available as function `farringtonFlexible()`. - New handling of reference dates in `algo.farrington()` for `"sts"` objects with `epochAsDate=TRUE`. Instead of always going back in time to the next Date in the `"epoch"` slot, the function now determines the *closest* Date. Note that this might lead to slightly different results for the upperbound compared to previously. Furthermore, the functionality is only tested for weekly data (monthly data are experimental). The same functionality applies to `farringtonFlexible()`. - To make the different retrospective modelling frameworks of the **surveillance** package jointly applicable, it is now possible to convert (aggregate) `"epidataCS"` (continuous-time continuous-space data) into an `"sts"` object (multivariate time series of counts) by the new function `epidataCS2sts`. - Simulation from `hhh4` models has been re-implemented, which fixes a bug and makes it more flexible and compatible with a wider class of models. - The `map`-slot of the `"sts"` class now requires `"SpatialPolygons"` (only) instead of `"SpatialPolygonsDataFrame"`. - Re-implementation of `oneStepAhead()` for `hhh4`-models with a bug fix, some speed-up and more options. - Slight speed-up for `hhh4()` fits, e.g., by more use of `.rowSums()` and `.colSums()`. - Crucial speed-up for `twinstim()` fits by more efficient code: `mapply`, dropped clumsy `for`-loop in `fisherinfo`, new argument `cores` for parallel computing via forking (not available on Windows). - Some further new features, minor changes, and bug fixes are described in the following subsections. ## NEW FEATURES - Using `tiaf.exponential()` in a `twinstim()` now works with `nTypes=1` for multi-type data. - A legend can be added automatically in `iafplot()`. - The `untie` methods are now able to produce jittered points with a required minimum separation (`minsep`). - `simulate.ah4` gained a `simplify` argument. - New `update`-method for fitted `hhh4`-models (class `"ah4"`). - `oneStepAhead()` has more options: specify time range (not only start), choose type of start values, `verbose` argument. - `pit()` allows for a list of predictive distributions (`pdistr`), one for each observation `x`. - New spatial auxiliary function `polyAtBorder()` indicating polygons at the border (for a `"SpatialPolygons"` object). - `animate.epidataCS()` allows for a `main` title and can show a progress bar. ## MINOR CHANGES - Changed parametrization of `zetaweights()` and completed its documentation (now no longer marked as experimental). - `twinstim(...)$converged` is `TRUE` if the optimization routine converged (as before) but contains the failure message otherwise. - Increased default `maxit` for the Nelder-Mead optimizer in `hhh4` from 50 to 300, and removed default artificial lower bound (-20) on intercepts of epidemic components. - Renamed returned list from `oneStepAhead` (mean->pred, x->observed, params->coefficients, variances->Sigma.orig) for consistency, and `oneStepAhead()$psi` is only non-`NULL` if we have a NegBin model. - Argument order of `pit()` has changed, which is also faster now and got additional arguments `relative` and `plot`. - `twinstim(...)$runtime` now contains the complete information from `proc.time()`. ## BUG FIXES - Fixed a bug in function `refvalIdxByDate()` which produced empty reference values (i.e. `NA`s) in case the Date entries of `epoch` were not mondays. Note: The function works by subtracting `1:b` years from the date of the range value and then takes the span `-w:w` around this value. For each value in this set it is determined whether the closest date in the epoch slot is obtained by going forward or backward. Note that this behaviour is now slightly changed compared to previously, where we *always* went back in time. - `algo.farrington()`: Reference values too far back in time and hence not being in the `"epoch"` slot of the `"sts"` object are now ignored (previously the resulting `NA`s caused the function to halt). A warning is displayed in this case. - `hhh4`: The entry *(5,6)* of the marginal Fisher information matrix in models with random intercepts in all three components was incorrect. If `nlminb` was used as optimizer for the variance parameters (using the negative marginal Fisher information as Hessian), this could have caused false convergence (with warning) or minimally biased convergence (without warning). As a consequence, the `"Sigma.cov"` component of the `hhh4()` result, which is the inverse of the marginal Fisher information matrix at the MLE, was also wrong. - `untie.matrix()` could have produced jittering greater than the specified `amount`. - `hhh4`: if there are no random intercepts, the redundant `updateVariance` steps are no longer evaluated. - `update.twinstim()` did not work with `optim.args=..1` (e.g., if updating a list of models with lapply). Furthermore, if adding the `model` component only, the `control.siaf` and `optim.args` components were lost. - `earsC` should now also work with multivariate `sts` time-series objects. - The last week in `data(fluBYBW)` (row index 417) has been removed. It corresponded to week 1 in year 2009 and was wrong (an artifact, filled with zero counts only). Furthermore, the regions in `@map` are now ordered the same as in `@observed`. - Fixed start value of the overdispersion parameter in `oneStepAhead` (must be on internal log-scale, not reparametrized as returned by `coef()` by default). - When subsetting `"sts"` objects in time, `@start` was updated but not `@epoch`. - `pit` gave `NA` results if any `x[-1]==0`. - The returned `optim.args$par` vector in `twinstim()` was missing any fixed parameters. - `hhh4()` did not work with time-varying neighbourhood weights due to an error in the internal `checkWeightsArray()` function. # surveillance 1.5-4 (2013-04-21) ## SYNOPSIS - Fixed obsolete `.path.package()` calls. - Small corrections in the documentation. - `update.twinstim()` performs better in preserving the original initial values of the parameters. - New pre-defined spatial interaction function `siaf.powerlawL()`, which implements a _L_agged power-law kernel, i.e. accounts for uniform short-range dispersal. # surveillance 1.5-2 (2013-03-15) ## SYNOPSIS - New method for outbreak detection: `earsC` (CUSUM-method described in the CDC Early Aberration Reporting System, see Hutwagner et al, 2003). - New features and minor bug fixes for the "`twinstim`" part of the package (see below). - Yet another p-value formatting function `formatPval()` is now also part of the **surveillance** package. - `polyCub.SV()` now also accepts objects of classes `"Polygon"` and `"Polygons"` for convenience. - `siaf.lomax` is deprecated and replaced by `siaf.powerlaw` (re-parametrization). ## NEW FEATURES (`twinstim()`-related) - The temporal `plot`-method for class `"epidataCS"` now understands the `add` parameter to add the histogram to an existing plot window, and auto-transforms the `t0.Date` argument using `as.Date()` if necessary. - `nobs()` methods for classes `"epidataCS"` and `"twinstim"`. - New argument `verbose` for `twinstim()` which, if set to `FALSE`, disables the printing of information messages during execution. - New argument `start` for `twinstim()`, where (some) initial parameter values may be provided, which overwrite those in `optim.args$par`, which is no longer required (as a naive default, a crude estimate for the endemic intercept and zeroes for the other parameters are used). - Implemented a wrapper `stepComponent()` for `step()` to perform algorithmic component-specific model selection in `"twinstim"` models. This also required the implementation of suitable `terms()` and `extractAIC()` methods. The single-step methods `add1()` and `drop1()` are also available. - The `update.twinstim()` method now by default uses the parameter estimates from the previous model as initial values for the new fit (new argument `use.estimates = TRUE`). - `as.epidataCS()` checks for consistency of the area of `W` and the (now really obligatory) area column in `stgrid`. - `simulate.twinstim()` now by default uses the previous `nCircle2Poly` from the `data` argument. - `direction` argument for `untie.epidataCS()`. - The `toLatex`-method for `"summary.twinstim"` got different defaults and a new argument `eps.Pvalue`. - New `xtable`-method for `"summary.twinstim"` for printing the covariate effects as risk ratios (with CI's and p-values). ## NEW FEATURES (`hhh4()`-related) - New argument `hide0s` in the `plot`-method for class `"ah4"`. - New argument `timevar` for `addSeason2formula()`, which now also works for long formulae. # surveillance 1.5-1 (2012-12-14) ## SYNOPSIS - The **surveillance** package is again backward-compatible with R version 2.14.0, which is now declared as the minimum required version. # surveillance 1.5-0 (2012-12-12) ## SYNOPSIS - This new version mainly improves upon the `twinstim()` and `hhh4()` implementations (see below). - As requested by the CRAN team, examples now run faster. Some are conditioned on the value of the new package option `"allExamples"`, which usually defaults to `TRUE` (but is set to `FALSE` for CRAN checking, if timings are active). - Moved some rarely used package dependencies to "Suggests:", and also removed some unused packages from there. - Dropped strict dependence on [**gpclib**](https://CRAN.R-project.org/package=gpclib), which has a restricted license, for the **surveillance** package to be clearly GPL-2. Generation of `"epidataCS"` objects, which makes use of **gpclib**'s polygon intersection capabilities, now requires prior explicit acceptance of the **gpclib** license via setting `surveillance.options(gpclib = TRUE)`. Otherwise, `as.epidataCS()` and `simEpidataCS()` may not be used. ## NEW FEATURES (`twinstim()`-related) - Speed-up by memoisation of the `siaf` cubature (using the [**memoise**](https://CRAN.R-project.org/package=memoise) package). - Allow for `nlm`-optimizer (really not recommended). - Allow for `nlminb`-specific control arguments. - Use of the expected Fisher information matrix can be disabled for `nlminb` optimization. - Use of the `effRange`-trick can be disabled in `siaf.gaussian()` and `siaf.lomax()`. The default `effRangeProb` argument for the latter has been changed from 0.99 to 0.999. - The `twinstim()` argument `nCub` has been replaced by the new `control.siaf` argument list. The old `nCub.adaptive` indicator became a feature of the `siaf.gaussian()` generator (named `F.adaptive` there) and does no longer depend on the `effRange` specification, but uses the bandwidth `adapt*sd`, where the `adapt` parameter may be specified in the `control.siaf` list in the `twinstim()` call. Accordingly, the components `"nCub"` and `"nCub.adaptive"` have been removed from the result of `twinstim()`, and are replaced by `"control.siaf"`. - The `"method"` component of the `twinstim()` result has been replaced by the whole `"optim.args"`. - The new `"Deriv"` component of `siaf` specifications integrates the "siaf$deriv" function over a polygonal domain. `siaf.gaussian()` and `siaf.lomax()` use `polyCub.SV()` (with intelligent `alpha` parameters) for this task (previously: midpoint-rule with naive bandwidth) - `scaled` `iafplot()` (default `FALSE`). The `ngrid` parameter has been renamed to `xgrid` and is more general. - The `"simulate"` component of `siaf`'s takes an argument `ub` (upperbound for distance from the source). - Numerical integration of spatial interaction functions with an `Fcircle` trick is more precise now; this slightly changes previous results. - New S3-generic `untie()` with a method for the `"epidataCS"` class (to randomly break tied event times and/or locations). - Renamed `N` argument of `polyCub.SV()` to `nGQ`, and `a` to `alpha`, which both have new default values. The optional polygon rotation proposed by Sommariva & Vianello is now also implemented (based on the corresponding MATLAB code) and available as the new `rotation` argument. - The `scale.poly()` method for `"gpc.poly"` is now available as `scale.gpc.poly()`. The default return class of `discpoly()` was changed from `"gpc.poly"` to `"Polygon"`. - An `intensityplot()`-method is now also implemented for `"simEpidataCS"`. ## NEW FEATURES (`hhh4()`-related) - Significant speed-up (runs about 6 times faster now, amongst others by many code optimizations and by using sparse [**Matrix**](https://CRAN.R-project.org/package=Matrix) operations). - `hhh4()` optimization routines can now be customized for the updates of regression and variance parameters seperately, which for instance enables the use of Nelder-Mead for the variance updates, which seems to be more stable/robust as it does not depend on the inverse Fisher info and is usually faster. - The `ranef()` extraction function for `"ah4"` objects gained a useful `tomatrix` argument, which re-arranges random effects in a unit x effect matrix (also transforming CAR effects appropriately). - Generalized `hhh4()` to also capture parametric neighbourhood weights (like a power-law decay). The new function `nbOrder()` determines the neighbourhood order matrix from a binary adjacency matrix (depends on package [**spdep**](https://CRAN.R-project.org/package=spdep)). - New argument `check.analyticals` (default `FALSE`) mainly for development purposes. ## BUG FIXES - Fixed sign of observed Fisher information matrix in `twinstim`. - Simulation from the Lomax kernel is now correct (via polar coordinates). - Fixed wrong Fisher information entry for the overdispersion parameter in `hhh4`-models. - Fixed wrong entries in penalized Fisher information wrt the combination fixed effects x CAR intercept. - Fixed indexing bug in penalized Fisher calculation in the case of multiple overdispersion parameters and random intercepts. - Fixed bug in Fisher matrix calculation concerning the relation of unit-specific and random effects (did not work previously). - Improved handling of non-convergent / degenerate solutions during `hhh4` optimization. This involves using `ginv()` from package [**MASS**](https://CRAN.R-project.org/package=MASS), if the penalized Fisher info is singular. - Correct labeling of overdispersion parameter in `"ah4"`-objects. - Some control arguments of `hhh4()` have more clear defaults. - The result of `algo.farrington.fitGLM.fast()` now additionally inherits from the `"lm"` class to avoid warnings from `predict.lm()` about fake object. - Improved 'NAMESPACE' imports. - Some additional tiny bug fixes, see the subversion log on R-Forge for details. # surveillance 1.4-2 (2012-08-17) ## SYNOPSIS - This is mainly a patch release for the `twinstim`-related functionality of the package. - Apart from that, the package is now again compatible with older releases of R (< 2.15.0) as intended (by defining `paste0()` in the package namespace if it is not found in R **base** at installation of the **surveillance** package). ## NEW FEATURES - Important new `twinstim()`-feature: fix parameters during optimization. - Useful `update`-method for `"twinstim"`-objects. - New `[[`- and `plot`-methods for `"simEpidataCSlist"`-objects. - `simEpidataCS()` received tiny bug fixes and is now able to simulate from epidemic-only models. - `R0`-method for `"simEpidataCS"`-objects (actually a wrapper for `R0.twinstim()`). - Removed `dimyx` and `eps` arguments from `R0.twinstim()`; now uses `nCub` and `nCub.adaptive` from the fitted model and applies the same (numerical) integration method. - `animate.epidata` is now compatible with the [**animation**](https://CRAN.R-project.org/package=animation) package. - More thorough documentation of `"twinstim"`-related functions *including many examples*. ## BUG FIXES (`"twinstim"`-related) - `nlminb` (instead of `optim`'s `"BFGS"`) is now the default optimizer (as already documented). - The `twinstim`-argument `nCub` can now be omitted when using `siaf.constant()` (as documented) and is internally set to `NA_real_` in this case. Furthermore, `nCub` and `nCub.adaptive` are set to `NULL` if there is no epidemic component in the model. - `toLatex.summary.twinstim` now again works for `summary(*, test.iaf=FALSE)`. - `print`- and `summary`-methods for `"epidataCS"` no longer assume that the `BLOCK` index starts at 1, which may not be the case when using a subset in `simulate.twinstim()`. - The `"counter"` step function returned by `summary.epidataCS()` does no longer produce false numbers of infectives (they were lagged by one timepoint). - `plot.epidataCS()` now resolves ... correctly and the argument `colTypes` takes care of a possible `subset`. - `simEpidataCS()` now also works for endemic-only models and is synchronised with `twinstim()` regarding the way how `siaf` is numerically integrated (including the argument `nCub.adaptive`). - Fixed problem with `simEpidataCS()` related to missing 'NAMESPACE' imports (and re-exports) of `marks.ppp` and `markformat.default` from [**spatstat**](https://CRAN.R-project.org/package=spatstat), which are required for `spatstat::runifpoint()` to work, probably because **spatstat** currently does not register its S3-methods. - Improved error handling in `simEpidataCS()`. Removed a `browser()`-call and avoid potentially infinite loop. ## BUG FIXES (`"twinSIR"`-related) - The `.allocate` argument of `simEpidata()` has now a fail-save default. - Simulation without endemic `cox()`-terms now works. ## MINOR CHANGES - Simplified `imdepi` data to monthly instead of weekly intervals in `stgrid` for faster examples and reduced package size. - The environment of all predefined interaction functions for `twinstim()` is now set to the `.GlobalEnv`. The previous behaviour of defining them in the `parent.frame()` could have led to huge `save()`'s of `"twinstim"` objects even with `model=FALSE`. - `simulate.twinSIR` only returns a list of epidemics if `nsim > 1`. - `simulate.twinstim` uses `nCub` and `nCub.adaptive` from fitted object as defaults. - Removed the ...-argument from `simEpidataCS()`. - The coefficients returned by `simEpidataCS()` are now stored in a vector rather than a list for compatibility with `"twinstim"`-methods. - Argument `cex.fun` of `intensityplot.twinstim()` now defaults to the `sqrt` function (as in `plot.epidataCS()`. # surveillance 1.4 (2012-07-26) ## SYNOPSIS - Besides minor bug fixes, additional functionality has entered the package and a new attempt is made to finally release a new version on CRAN (version 1.3 has not appeared on CRAN), including a proper 'NAMESPACE'. ## NEW FEATURES - Support for non-parametric back-projection using the function `backprojNP()` which returns an object of the new `"stsBP"` class which inherits from `"sts"`. - Bayesian nowcasting for discrete time count data is implemented in the function `nowcast()`. - Methods for cubature over polygonal domains can now also visualize what they do. There is also a new quasi-exact method for cubature of the bivariate normal density over polygonal domains. The function `polyCub()` is a wrapper for the different methods. - `residuals.twinstim()` and `residuals.twinSIR()`: extract the "residual process", see Ogata (1988). The residuals of `"twinSIR"` and `"twinstim"` models may be checked graphically by the new function `checkResidualProcess()`. - Many new features for the `"twinstim"` class of self-exciting spatio-temporal point process models (see below). ## NEW FEATURES AND SIGNIFICANT CHANGES FOR `"twinstim"` - Modified arguments of `twinstim()`: new ordering, new argument `nCub.adaptive`, removed argument `typeSpecificEndemicIntercept` (which is now specified as part of the `endemic` formula as `(1|type)`). - Completely rewrote the `R0`-method (calculate "trimmed" and "untrimmed" *R_0* values) - The "trimmed" `R0` values are now part of the result of the model fit, as well as `bbox(W)`. The model evaluation environment is now set as attribute of the result if `model=TRUE`. - New predefined spatial kernel: the Lomax power law kernel `siaf.lomax()` - `plot`-methods for `"twinstim"` (`intensityplot()` and `iafplot()`) - `as.epidataCS()` now auto-generates the stop-column if this is missing - `print`-method for class `"summary.epidataCS"` - `[`- and subset-method for `"epidataCS"` (subsetting `...$events`) - `plot`-method for `"epidataCS"` ## MINOR CHANGES - Improved documentation for the new functionalities. - Updated references. - `twinSIR`'s `intensityPlot` is now a method of the new S3-generic function `intensityplot`. # surveillance 1.3 (2011-04-25) ## SYNOPSIS - This is a major realease integrating plenty of new code (unfortunately not all documented as good as it could be). This includes code for the `"twinstim"` and the `"hhh4"` model. The `"twinSIR"` class of models has been migrated from package **RLadyBug** to **surveillance**. It may take a while before this version will become available from CRAN. For further details see below. ## SIGNIFICANT CHANGES - Renamed the `"week"` slot of the `"sts"` S4 class to `"epoch"`. All saved data objects have accordingly be renamed, but some hazzle is to be expected if one you have old `"sts"` objects stored in binary form. The function `convertSTS()` can be used to convert such "old school" `"sts"` objects. - Removed the functions `algo.cdc()` and `algo.rki()`. ## NEW FEATURES - Support for `"twinSIR"` models (with associated `"epidata"` objects) as described in Höhle (2009) has been moved from package **RLadyBug** to **surveillance**. That means continuous-time discrete-space SIR models. - Support for `"twinstim"` models as described in Meyer et al (2012). That means continuous-time continuous-space infectious disease models. - Added functionality for non-parametric back projection (`backprojNP()`) and now-casting (`nowcast()`) based on `"sts"` objects. # surveillance 1.2-2 - Replaced the deprecated getSpPPolygonsLabptSlots method with calls to the coordinates method when plotting the map slot. - Minor proof-reading of the documentation. - Added an argument `"extraMSMargs"` to the algo.hmm function. - Fixed bug in `outbreakP()` when having observations equal to zero in the beginning. Here, $\hat{\mu}^{C1}$ in (5) of Frisen et al (2008) is zero and hence the log-based summation in the code failed. Changed to product as in the original code, which however might be less numerically stable. - Fixed bug in stcd which added one to the calculated index of idxFA and idxCC. Thanks to Thais Rotsen Correa for pointing this out. # surveillance 1.2-1 (2010-06-10) - Added `algo.outbreakP()` (Frisen & Andersson, 2009) providing a semiparametric approach for outbreak detection for Poisson distributed variables. - Added a pure R function for extracting ISO week and year from Date objects. This function (isoWeekYear) is only called if `"%G"` and `"%V"` format strings are used on Windows (`sessionInfo()[[1]]$os == "mingw32"`) as this is not implemented for `"format.Date"` on Windows. Thanks to Ashley Ford, University of Warwick, UK for identifying this Windows specific bug. - For `algo.farrington()` a faster fit routine `"algo.farrington.fitGLM.fast"` has been provided by Mikko Virtanen, National Institute for Health and Welfare, Finland. The new function calls `glm.fit()` directly, which gives a doubling of speed for long series. However, if one wants to process the fitted model output some of the GLM routines might not work on this output. For backwards compability the argument `control$fitFun = "algo.farrington.fitGLM"` provides the old (and slow) behaviour. # surveillance 1.1-6 (2010-05-25) - A few minor bug fixes - Small improvements in the C-implementation of the `twins()` function by Daniel Sabanés Bové fixing the segmentation fault issue on 64-bit architectures. # surveillance 1.1-2 (2009-10-15) - Added the functions categoricalCUSUM and LRCUSUM.runlength for the CUSUM monitoring of general categorical time series (binomial, beta-binomial, multinomial, ordered response, Bradley-Terry models). - Added the functions pairedbinCUSUM and pairedbinCUSUM.runlength implementing the CUSUM monitoring and run-length computations for a paired binary outcome as described in Steiner et al. (1999). - Experimental implementation of the prospective space-time cluster detection described in Assuncao and Correa (2009). - Added a `demo("biosurvbook")` containing the code of an upcoming book chapter on how to use the surveillance package. This contains the description of ISO date use, negative binomial CUSUM, run-length computation, etc. From an applicational point of view the methods are illustrated by Danish mortality monitoring. - Fixed a small bug in algo.cdc found by Marian Talbert Allen which resulted in the control$m argument being ignored. - The constructor of the sts class now uses the argument `"epoch"` instead of weeks to make clearer that also daily, monthly or other data can be handled. - Added additional epochAsDate slot to sts class. Modified plot functions so they can handle ISO weeks. - algo.farrington now also computes quantile and median of the predictive distribution. Furthermore has the computation of reference values been modified so its a) a little bit faster and b) it is also able to handle ISO weeks now. The reference values for date t0 are calculated as follows: For i, i=1,..., b look at date t0 - i*year. From this date on move w months/weeks/days to the left and right. In case of weeks: For each of these determined time points go back in time to the closest Monday - Renamed the functions obsinyear to epochInYear, which now also handles objects of class Date. # surveillance 1.0-2 (2009-03-06) - Negative Binomial CUSUM or the more general NegBin likelihood ratio detector is now implemented as part of algo.glrnb. This includes the back calculation of the required number of cases before an alarm. - Time varying proportion binomial CUSUM. # surveillance 0.9-10 - Current status: Development version available from - Rewriting of the plot.sts.time.one function to use polygons instead of lines for the number of observed cases. Due cause a number of problems were fixed in the plotting of the legend. Plotting routine now also handles binomial data, where the number of observed cases y are stored in `"observed"` and the denominator data n are stored in `"populationFrac"`. - Problems with the aggregate function not operating correctly for the populationFrac were fixed. - The `"rogerson"` wrapper function for algo.rogerson was modified so it now works better for distribution `"binomial"`. Thus a time varying binomial cusum can be run by calling `rogerson( x, control(..., distribution="binomial"))` - An experimental implementation of the twins model documented in Held, L., Hofmann, M., Höhle, M. and Schmid V. (2006). A two-component model for counts of infectious diseases, Biostatistics, 7, pp. 422--437 is now available as algo.twins. # surveillance 0.9-9 (2008-01-21) - Fixed a few small problems which gave warnings in the CRAN distribution # surveillance 0.9-8 (2008-01-19) - The algo_glrpois function now has an additional `"ret"` arguments, where one specifies the return type. The arguments of the underlying c functions have been changed to include an additional direction and return type value arguments. - added restart argument to the algo.glrpois control object, which allows the user to control what happens after the first alarm has been generated - experimental algo.glrnb function is added to the package. All calls to algo.glrpois are now just alpha=0 calls to this function. However, the underlying C functions differentiate between poisson and negative case surveillance/MD50000644000176200001440000005665014031002053013256 0ustar liggesusers8e0cb5459b98fd65beb9c10cddac1229 *DESCRIPTION 6064ed4dfbb4761fa060103c8579d168 *NAMESPACE f0ced91360e6c09af20045eda30e40ca *NEWS.md ed6d9ff44df6c59f9f7a5d0fe41de0b3 *R/AllClass.R 2c24c9bc050a306ef27f8078423b3cde *R/AllGeneric.R c18cc82eec3698f76db1ceb2dcf157bf *R/LRCUSUM.runlength.R 61d58a61ff4500831f3be0fea209afa9 *R/addSeason2formula.R aa76ffe9106f413caeca6e046077b167 *R/algo_bayes.R cbe06603923df49f1c517230297acb9a *R/algo_call.R be214ec6475829e4205744ac732af81a *R/algo_cdc.R c9ca07b4861c4273ff8331615a435e82 *R/algo_cusum.R cbea2e6fe37cd963b51550feb2c53166 *R/algo_farrington.R 25b98b7e94c3478c4ecbd596ac21043a *R/algo_glrnb.R 32bbac6af87327df5953f881a46158fb *R/algo_hmm.R e41d32428002e89cd00597b240b23216 *R/algo_outbreakP.R 7e59e5a1fd0c57754e6f123e04014da9 *R/algo_rki.R 5296b3656bc846b6ee78433a0783a604 *R/algo_rogerson.R 6e8037026028eec3255d9bcfbea27681 *R/algo_twins.R c87ac9ff64f310fb4bdd649ca4dd0274 *R/backprojNP.R 8bb8092e85c007dae55e83f700a4b6d2 *R/boda.R 1ac3496a4f5253dfda0407ccc48f0f93 *R/bodaDelay.R b65570b14ce9f07e00a73721b088acaa *R/calibration.R 51d65e4566c1924aa1f309a9b911eb96 *R/calibration_null.R 4dccc53b6a46e9aab590a505ee678603 *R/catCUSUM.R 09b2e1ae80408fc68c5f9d6b7e87de71 *R/checkDerivatives.R d9b04a88446501b386e9131b52de7daf *R/clapply.R 8860b75bca67188771c4c9611676ac39 *R/disProg.R 2b42bffe99b6544f9cb964972507b606 *R/earsC.R dc266bbded6e29f5b443f534f7dad27b *R/epidata.R 30be489440d8bcb2ee04103233253c8a *R/epidataCS.R 951b017def4c063424a45c9878ab9398 *R/epidataCS_aggregate.R 707878ace5fab773424dc0f152f06ddf *R/epidataCS_animate.R cfbe0ea7419d7372fc23d7638645d194 *R/epidataCS_methods.R a5d6315c70e46592ef49afd612ca5aba *R/epidataCS_plot.R 27fd9ed1b6eef08efe073522fb14234d *R/epidata_animate.R 82fe3b6f16d0ddc0774e52dfdd98433b *R/epidata_plot.R 6dce3be79d50634e952e52cedadb9e76 *R/fanplot.R 2b4d7b144f25c5b2b4b1200f3d59e136 *R/farringtonFlexible.R dc25e670264995ba857dd2d1252c1686 *R/formatPval.R 3b2b41811955b4d807ee2bf331fee0c0 *R/functionTable.R 9f7ac36012ccb60b1aaed07dd8d69ff1 *R/gd.R 34632c6567c644038a36a0ea076d2256 *R/glm_epidataCS.R 5a9797f6921b53e03e4368b3da08c82a *R/gpc.poly-methods.R 378e81cd40da5e64de2bea6d68a57134 *R/graphs.R 3fce7ba1440c82058c4b5e9ca8c3be07 *R/hcl.colors.R 2c7a345f9053a5b166f727cf93a41ad7 *R/hhh4.R c11c0b1ab81e0953fd8cffc82571876c *R/hhh4_W.R d1343512e15acbf5c31f1d070f40fa53 *R/hhh4_W_np.R 234dc6a175e19e5eb44064d230b55ffa *R/hhh4_W_powerlaw.R 52f4635978f1b412a7f8b7df6391a8eb *R/hhh4_amplitudeShift.R 480fbe593f41518f989aa14c24d847e4 *R/hhh4_calibration.R fa9c9360625e42bdc5ded772ed9b9ac4 *R/hhh4_methods.R 236a816dbae38d5dd6a9dc5a939ee4bf *R/hhh4_oneStepAhead.R 7766f035e2e8d03541de5c4afae87658 *R/hhh4_plot.R 81857263bf7975a29b31da5c618835a6 *R/hhh4_simulate.R af7dd828472c36a96a1691aa7eb69b82 *R/hhh4_simulate_plot.R 06b3c6c38ad2153300e43b14882dc00d *R/hhh4_simulate_scores.R 13a43831746902664c4e2c6507b7c671 *R/intersectPolyCircle.R d2343ded6d3d37b1f9eff2fc6803de16 *R/isScalar.R 264aee3c6902ee22e5ebb0e1f75647c7 *R/isoWeekYear.R 97c5fe6d659c4bc619ddfe61900f13c3 *R/knox.R bc4e7c9d7e5f658b6e426858ca7cc796 *R/ks.plot.unif.R 497c59cfb53217f850f7b74a1ae8ddf1 *R/linelist2sts.R 8ffe5f0ca294c6194526e1030e3a36ed *R/magic.dim.R 6c6ae43c0ca87d3c8de9456c7b527cc2 *R/makeControl.R b6e047c499661bd2c2ebcec29f2e23b3 *R/newtonRaphson.R 5c38a7e5007319e3f895ad15c0b67fba *R/nowcast.R f4fce463df5961fba2cc72e9b1480275 *R/options.R 434a538a1d66781183ae63cc632fbbc1 *R/pairedbinCUSUM.R 81c00325e893d5142480a3c48d7e2236 *R/permutationTest.R bf97038d8ab01b8cb205c5f9c1ba9912 *R/pit.R d889846110caef5ca462711c3feff29a *R/plapply.R d8cf8734e2ca55edac39e76208af4d4a *R/plot.survRes.R 8273e0659e4f444c38af825fabce70a7 *R/qlomax.R cc3a5ed9f0e3d732035a5151ca8551ae *R/scores.R d76738240acf75767e17440185a3503a *R/sim_background.R 064d3a73fdd755280188e83d8f858945 *R/sim_pointSource.R f5c7ebbd2b36d30ff1126c3da4b5a436 *R/spatial_tools.R 5be08cde0378959bd22199f0bffada93 *R/stK.R 3d02709e13a3775595b87d2ca37073b7 *R/stcd.R 65fcd0dd7871f81fe659efb9d9899dc0 *R/sts.R 51c8d382619c1f5398f98582f1a0cbaa *R/stsBP.R b65c4afa346aeb4f73fafe9b208e8a43 *R/stsNC.R 95a4ff018cd5909c61d160dfeb4fce77 *R/stsNClist_animate.R 6eff75d8109bd58ce2c480fb224e7890 *R/sts_animate.R b5359ada15b3bc5f992669d5c279e287 *R/sts_coerce.R 67f925ce5d0690299a87e7eb297b77b4 *R/sts_creation.R 106e464a6419ebf6a41c6d916022eecb *R/sts_ggplot.R 2c00c931f4d0fd00632767437cad2ad6 *R/sts_observation.R e3fc5471a0507efecbd96b6abf8d69b3 *R/sts_toLatex.R a4027f2e7008a9cc695d4c55f123c3f2 *R/stsplot_space.R d9a412c9e57f532e8ce097991ff5abfc *R/stsplot_spacetime.R 33035e744550941416fe4c8f6d2e7273 *R/stsplot_time.R 800b11aeac754cb920addce281e8bf92 *R/sysdata.rda 21b332280797c16407eb0d476b47e41a *R/twinSIR.R b9b75d193bf403aac831669a2b6c7975 *R/twinSIR_helper.R 0c251c18648b2a2b03eff1a432d49698 *R/twinSIR_intensity.R 4078d9a93052f83fdd9d1dcdab1cd485 *R/twinSIR_methods.R 5c13866b24dcee5b158305cd9c7d7946 *R/twinSIR_profile.R 4ba759ce939d1b77a4c215883cd76f24 *R/twinSIR_simulation.R bd9984819e6d5ec6672ef46e7ab94e41 *R/twinstim.R 40c4067a7cdd33cfbd90d3ecd2f6581d *R/twinstim_epitest.R 6ee871223ff7a3dd22889298d2028218 *R/twinstim_helper.R fb609b909569717c962729486d90251e *R/twinstim_iafplot.R aa5b34374657036b2f51341267c53e9c *R/twinstim_intensity.R 9960393dac67d491ea8331b2866f6ded *R/twinstim_methods.R 54515b240b825cab036fd58f7c8f512d *R/twinstim_siaf.R a536d26a521389ad87b7ff4b83f94e45 *R/twinstim_siaf_exponential.R e271ccb24d65210ff04c40e0a976a12f *R/twinstim_siaf_gaussian.R f5df6f9cb6cb12747d7aee4af9e0b966 *R/twinstim_siaf_polyCub_iso.R 7ce826d19f265d7ac099e6f10f05b36e *R/twinstim_siaf_powerlaw.R 1b534e06b1c3d6b9ae3aae1bf4065e98 *R/twinstim_siaf_powerlaw1.R c3310f8d58b327255c8f1ee8995db11d *R/twinstim_siaf_powerlawL.R ebeb46a1df317e9a471f5a64aa701f34 *R/twinstim_siaf_step.R 492ae60e071ddd690910f1137a85067f *R/twinstim_siaf_student.R e8004f1f3ef4ac55fbe3b905085a4ac5 *R/twinstim_simulation.R 848e68f6b19e146eb0d6f6fd073de56d *R/twinstim_step.R bb161cec90325a763709d2e5fa86d7a4 *R/twinstim_tiaf.R 1bbf034dc9335443d9416e371239f4e8 *R/twinstim_tiaf_exponential.R c007fba9f8232e06532b1021ade92094 *R/twinstim_tiaf_step.R dc21a66dae7dc2c258dbc6a2cac5710d *R/untie.R 04418f54e798bbac2261e0789b71b8b9 *R/wrap_univariate.R bfed3e4106e10197db51a90a6e2f327d *R/zzz.R 226ce90d00ab1c4bb87cdc47cf059705 *build/partial.rdb e5a615dbf4b4220ca02f7cb28fc2fc89 *build/vignette.rds 39cd4adbe3c05e3bed5a29e962a30724 *data/MMRcoverageDE.RData bc57ed2de6c59d625e8ff1dc4bcc534d *data/abattoir.RData 8f11226dce910b95b8ef780e1e087340 *data/campyDE.RData a5d19dc926e0079295e7bbb807b71183 *data/deleval.RData 74396784d70b77ce6f94f6895118168f *data/fluBYBW.RData 7f00d8ec6194adc54678c9ad5aa684a4 *data/fooepidata.RData 154bd5f0caec21664a3b42caf7990582 *data/h1_nrwrp.RData 731c70fffa23b3683391557f47000132 *data/ha.RData f13e5e8fff2b55cb8df0169792c821eb *data/ha.sts.RData 836e9f6eb2993c4dbf7f4b975d78eb35 *data/hagelloch.RData d86889b540c46c9b9ea2b42e5dfd3bcc *data/hepatitisA.RData 2aa9e24781d83436f2daaf4db6b788b5 *data/husO104Hosp.RData 21ee61986db098e9710044f7d4e070dd *data/imdepi.RData 73d0974a388e8c0c76d62d296fbe4118 *data/imdepifit.RData 3723c7f472e9782de2011d7885218586 *data/influMen.RData d7e124e76fd06d35ac3de97cfb3ca0d8 *data/k1.RData 1173ed2c8b616486e274967c6a97ce8f *data/m1.RData f4ae714001625bb89963fe0e4e2e9a77 *data/m2.RData b21b89d9b8dab8750e93f944fc30cac1 *data/m3.RData ec915cd8e2ce14bfd9a8de11bdba92ad *data/m4.RData 728cf151f5516831158588b57d194cd9 *data/m5.RData b1313db9d37f054c5185cea5fc215ca0 *data/measles.weser.RData 9b38f5dd970f407ef6eb0197c9f428f8 *data/measlesDE.RData ee301860324a41505ad334d75bbeec9c *data/measlesWeserEms.RData 6e88fa261848741a2927e8a0208c176c *data/meningo.age.RData 32eef47d250194f5decca9c2e78126ae *data/momo.RData 0c08f2d5d556db1d91f1c6a3d3125d70 *data/n1.RData 029fff2b44242b6fc032dd4abb839207 *data/n2.RData 9b33e305674dc2bd24e465592a88e4e1 *data/q1_nrwh.RData f19779c068198733db7f1c95cecc19db *data/q2.RData 8f41ddc82674072f51517b573007112f *data/rotaBB.RData 66338d480d9d6f7541d6b7f5690f1c94 *data/s1.RData 08135e0c5091d08becc0ceb7bb3bb3c4 *data/s2.RData a24280cb563d545fa1f547b5c0959962 *data/s3.RData e6867f5f8b49d82d5d56a6ba21bc79bc *data/salmAllOnset.RData 7090e08b0233c23ad3f7e77f9375ac4a *data/salmHospitalized.RData 177f82cfd139402fd4c68014c65faf46 *data/salmNewport.RData ebc818bb58803591f7ade0b04d956926 *data/salmonella.agona.RData f976fafb219fc04b750e0e4117ba054c *data/shadar.RData c68d5433a3091d766465ac1990b4f697 *data/stsNewport.RData 9b358dfd9210aec26b9017eaa925dbdf *demo/00Index 959fe3c7db5b1e48763fb9674aaec1e3 *demo/biosurvbook.R 8df770993ef900b110b71a15d2d73c3a *demo/cost.R 14eac582eb4a68c582e5f8140ec70d81 *demo/fluBYBW.R 91b32c4a3530fd97687310b61f183722 *demo/v77i11.R cfff3dd3f593b008473c88f01328b05a *inst/CITATION 06ebad83fc97c75e365e0db35732ea82 *inst/THANKS 86fd13a3a3c9423f3afb133efdb286f0 *inst/doc/glrnb.R 2470566a52c9e1847131bee3e4a06327 *inst/doc/glrnb.Rnw dd546d8195a117beeb022b1f7dc4964a *inst/doc/glrnb.pdf d33e4297b8604a5aa07959b75ac89da6 *inst/doc/hhh4.R e88da7024531aaedbe2e3625b5116f00 *inst/doc/hhh4.Rnw d3e7bba7c470219abd8e56c98273e43c *inst/doc/hhh4.pdf 1c017a783e5384ef35265aed344e6e3b *inst/doc/hhh4_spacetime.R eba4d38207cc8a2de753f62bf1c2515e *inst/doc/hhh4_spacetime.Rnw 14fb3d61ed7224a154b54479fb9dfa86 *inst/doc/hhh4_spacetime.pdf 9ed20a3b73d13b7245159e4a43636ed8 *inst/doc/monitoringCounts.R 568fd2f6e02109c15b4cb6d03aa1e989 *inst/doc/monitoringCounts.Rnw a96c12ca1545495d5b048d7f39161841 *inst/doc/monitoringCounts.pdf 9d80c53b804ec87db1b44505ffa21f47 *inst/doc/surveillance.R 07e3ba5f073c83057628ddef7211794e *inst/doc/surveillance.Rnw 97a7304cf161bc0c3bebac80c2d063c2 *inst/doc/surveillance.pdf fe0cf08a224669c084910c5c19f6ff4a *inst/doc/twinSIR.R 262ceca0090376646aaab1caf245b89d *inst/doc/twinSIR.Rnw a2c4d21ef781aca807aaf548ffedb4ca *inst/doc/twinSIR.pdf 919c2a638c9a6f917d7ceeecd697635c *inst/doc/twinstim.R 36cf9b5f5b4170c0b0cb22b075bff8fd *inst/doc/twinstim.Rnw d4e34333f2ed2f48b7dc1f9acac47a37 *inst/doc/twinstim.pdf 01e880f0dcb85b78a1c2be29862d464f *inst/extdata/counts_flu_BYBW.txt 7368155ea8525f22a4039c99101fa209 *inst/extdata/neighbourhood_BYBW.txt 57facd5cc2cdaadf18538e6742158b88 *inst/extdata/population_2001-12-31_BYBW.txt 3cfa159d1f9810e948068fb7831fdc2c *inst/extdata/salmonella.agona.txt 6eb9544879e01cf717999ddf45b1b6b3 *inst/jags/bhpm.bugs 61faaa303d7c5e4e88278dc1026f1463 *inst/shapes/berlin.dbf 6d61b4a4e2ba0197aed611390250f5a8 *inst/shapes/berlin.sbn 4215c8c5fc9aa22fa9277267cbe20746 *inst/shapes/berlin.sbx 57a46753e569f12f8aa48540429444d4 *inst/shapes/berlin.shp 2a29a0fdf04dc5a01a3614ef1096f7e5 *inst/shapes/berlin.shx 34b31cd3345ec052d3dd8b4f0e82aa3b *inst/shapes/districtsD.RData 351fe957e8d3d2d241581fe6e0f6432f *man/LRCUSUM.runlength.Rd da3bc824858d6e87e0431999212fd5b9 *man/MMRcoverageDE.Rd 4cf4eca7a105c2e9462693e77a0eabf0 *man/R0.Rd 050377f810eaa62351b28927e9374dc7 *man/abattoir.Rd 2848385412e408f9e1bed525e6e11e97 *man/addFormattedXAxis.Rd 81ed4b504edd3ff054223a111a598e4b *man/addSeason2formula.Rd b65347920ac7f8914c5eb4b50a602f79 *man/aggregate.disProg.Rd 88a75c42b99a537c63e7e4a904560c12 *man/algo.bayes.Rd f8b6ab06e4d27d44a58be803b578faad *man/algo.call.Rd bd37a85505aa53d1e8007be27e8cfb0f *man/algo.cdc.Rd 8a2406825225cbd7b94519dac6ce3820 *man/algo.compare.Rd 5b937a700198de09efdf616cf95ea079 *man/algo.cusum.Rd f9472865f2e749cfd96f1a16e03b38fb *man/algo.farrington.Rd ddcf60189b825884760b09415b2f3ab0 *man/algo.farrington.assign.weights.Rd 7bb1f601a5b9c9f0066794189b8b9e17 *man/algo.farrington.fitGLM.Rd eb25344d041f96e3caa276d0f03cce9c *man/algo.farrington.threshold.Rd 714c9e6d7e0b1e2b08adfcf5153c04d4 *man/algo.glrnb.Rd b506a2cf3085ee2494dda7d51d96346c *man/algo.hmm.Rd 12bf0e233c5786c27d266acb083b6424 *man/algo.outbreakP.Rd a03ce41b3d94f8e29938eb451f65e540 *man/algo.quality.Rd 9aa31b24d5cacb427a415a9918ae682a *man/algo.rki.Rd 755421ca2ecd4eb0a405934754f4eadc *man/algo.rogerson.Rd f4f6d4df5bf74c1f371a618c27760603 *man/algo.summary.Rd 3a822ba5ef9fe15475276f2055211def *man/algo.twins.Rd eeaee3c5d0e839ef8d6ef83358ffc575 *man/all.equal.Rd e67c69a89d7f343368d411318ea15e3f *man/animate.Rd 2f342e80d91a6dbe78e7e24537c6d3fd *man/anscombe.residuals.Rd 785860fc36b43605c8b250287cd51081 *man/arlCusum.Rd a5e1f7cf7a4ce082d293e954f2329807 *man/backprojNP.Rd 09fb22575940aa22c3c155be4ea1ffc8 *man/bestCombination.Rd 0d2e08a56fe27dd709c66897c94943b9 *man/boda.Rd 34485f58f151d34ba3f1dcfa1a39e660 *man/bodaDelay.Rd 0874769b413334054106966019cf51b9 *man/calibration.Rd 2b0798d1c1710dd011826721277b3433 *man/campyDE.Rd c979567e2086ca3f6168532824de918f *man/categoricalCUSUM.Rd bc8ebb1ba43f41e23b50592440d554dd *man/checkResidualProcess.Rd 5d88a6b64a591a16ff7ae3bb6d0640d9 *man/clapply.Rd 27ef79270ab1c247cc00247a25d96b65 *man/coeflist.Rd 0e12d409a9eab25d226f71311603d264 *man/create.disProg.Rd dda9e5df2c4f4d37b805888fc3c98896 *man/deleval.Rd c629a963f9544c4a04d5317179f49c89 *man/disProg2sts.Rd 248e54773fc5c72846cddac922c21356 *man/discpoly.Rd 7e557bf7fb1f3dbe5a489e40d68a325c *man/earsC.Rd a894140fb321b9d9ba6224f3b9a4a4dc *man/epidata.Rd 699307bbbad09a946ed3152d46eb35c6 *man/epidataCS.Rd 64db34549872d56d58887a10cd72e589 *man/epidataCS_aggregate.Rd 6a61e163424b50bb480281a0d5210284 *man/epidataCS_animate.Rd 0ff2c126e9c41509f69d9dd9acecb79c *man/epidataCS_permute.Rd 8ce7a00d4f928d8d2ded1389202847aa *man/epidataCS_plot.Rd 7bcbfeb759bc26af095d44f5f91b4352 *man/epidataCS_update.Rd ae46544e4317c47bb5fb5485242c8136 *man/epidata_animate.Rd 36024c712cad9831fbad480990582fa0 *man/epidata_intersperse.Rd 7f561161b773aa45f18e830e134562d0 *man/epidata_plot.Rd d2b4122305af3b0f63e2ae4054678422 *man/epidata_summary.Rd f660b9a5c95cb8dccef722142b27d61c *man/estimateGLRNbHook.Rd ebd996999531f5af119ee0372e72dee6 *man/fanplot.Rd 7c4874b911129a9605d30d402d65a53d *man/farringtonFlexible.Rd 08907427c5e4283cebd965c020e5c314 *man/find.kh.Rd 3c3d71c027b25bd7e483d9a1d67bc46b *man/findH.Rd 02198b044625da79eea751c4ee82481f *man/findK.Rd 33cfb6c5c1247c3cb5e4287902648bb7 *man/fluBYBW.Rd e19481324bcb1f9d401c2119c56014f0 *man/formatDate.Rd c5abd4fb24364663f13b98295ac4c169 *man/formatPval.Rd bbc2674f910b27a89531cb996da4bd83 *man/glm_epidataCS.Rd 8db35442a16ffe91262acdcc807aa91f *man/ha.Rd d0d29c129f9af27916de1d174476d3aa *man/hagelloch.Rd 8066e0538b9ee18e9b2931bdfaa84719 *man/hcl.colors.Rd 7a992f9f050d817daf9f0246b1e3fbbb *man/hepatitisA.Rd 29ab27ec799e6e97893a238a805e5594 *man/hhh4.Rd 58e531e8f6dcdbf3b208bedfd5ac2bb7 *man/hhh4_W.Rd f9e8e43bedfa2dfeb84960c46eb1d3f9 *man/hhh4_W_utils.Rd fdca3cebc9604fb81337f01f95814376 *man/hhh4_formula.Rd 55cb7018dc49e8f0e6e49b6749488d3e *man/hhh4_internals.Rd fc5c69b34c944409deb60b42697be848 *man/hhh4_methods.Rd 36b8d66c4f2266b639582aab567151d8 *man/hhh4_plot.Rd 5b8d9ba6fd3c5cc3139568480c0fd2e2 *man/hhh4_predict.Rd 9d9db5cbc2d3f5641720e0119fa39a6a *man/hhh4_simulate.Rd 37e8818db7bc3b3eb7bc989df91cabb9 *man/hhh4_simulate_plot.Rd 5596cb7d6cb4bec5190cf856fdd8cf6e *man/hhh4_simulate_scores.Rd 7ba1bb375aeab457b03623aedbad22af *man/hhh4_update.Rd 330bff15ff5b6031426a95cdc3fd1043 *man/hhh4_validation.Rd 8f810db3c775c085ade43c8819330eb8 *man/husO104Hosp.Rd cf1bd01c5beee2a8867e383f2e97b960 *man/imdepi.Rd dc60f2ca5eae07ed235dd063c28175af *man/imdepifit.Rd cc422432b310f004e3d5991bef193c17 *man/influMen.Rd 90e5837327aa060a5a776febe68205da *man/inside.gpc.poly.Rd df35457995a21853415e9af9e3ada813 *man/intensityplot.Rd b07fe8fb0d63949b7f9d02078df4cea3 *man/intersectPolyCircle.Rd 9d04dc30b863d2a5961247f906ec770c *man/isScalar.Rd edcec8233e8b0490c5996c3deb950174 *man/isoWeekYear.Rd e9ed8a8aeea3a2916aa65b00653ca557 *man/knox.Rd e523030beec583706e157cbb31ce06fb *man/ks.plot.unif.Rd 11a9c37d74443482dfc40621e25a9bee *man/layout.labels.Rd 9624bee6d800b64b6738f9c907adfd67 *man/linelist2sts.Rd b228be93ee4db7f4458d18a3b5658490 *man/m1.Rd a63d24fd9d091edd1b287d6318ddc918 *man/magic.dim.Rd 10ac2d54bf7cd46d59afbeff8f9ff3fb *man/makeControl.Rd cc5b26350902fbcfb6adba326a4f8512 *man/marks.Rd 87815f879d5777f355a16354127e2e57 *man/measles.weser.Rd 07ae1224b6941931d59c94539f6ca70d *man/measlesDE.Rd f4a18ebcd09b7ef61ef79b3a1c6f4037 *man/meningo.age.Rd 5a55fd7ff66adffbc1f4df017a422acd *man/momo.Rd 157a81a24e3f716677b18c2e4a5e0290 *man/multiplicity.Rd efa8f148f8057a40797ecd49e56374e8 *man/multiplicity.Spatial.Rd 0c66f14de0b9d7713536fbc4d621319b *man/nbOrder.Rd 557779e416f0977e95875653ee1a7782 *man/nowcast.Rd 2684bc74f6898b7f6a8ccda0ff7325b1 *man/pairedbinCUSUM.Rd 4ff95723c9481aed610d0b1e810039dc *man/permutationTest.Rd 4811ad6bf8795ac03660e55fbbadf561 *man/pit.Rd 44d63f2f2c26cedc8ad8870cee11f5d3 *man/plapply.Rd 1132b404312195dc55bc1e05c602431f *man/plot.atwins.Rd 9aa0c447874ecb24af9a8ccbe158357a *man/plot.disProg.Rd c98c3c95af334ee5775444d6c62ab490 *man/plot.survRes.Rd 5b1ff526f5617d7c35627cc7d26b9298 *man/poly2adjmat.Rd 48cdc7c2ff6188011fdd8d2b80c0b1af *man/polyAtBorder.Rd 8fcfd9d601274b15b0a2f2caa71b218c *man/primeFactors.Rd 747966188aa39b5600347f2d98d32aae *man/print.algoQV.Rd 172a18b1405eeafce04a8e024987d8a3 *man/ranef.Rd b46adeab66b32aa5d6b491505fc5e2a3 *man/refvalIdxByDate.Rd 92f2406fd4e5054765f773c694681498 *man/residualsCT.Rd a1250eb509f187d201ee78fec0493dfd *man/rotaBB.Rd 7831a80a59e3bc6a8e91a79beede0e02 *man/runifdisc.Rd 7a98ef0577af730874fae7ba3f654f7d *man/salmAllOnset.Rd 6b358e25a5c89bf0c0ccfd9d5242a614 *man/salmHospitalized.Rd 9f591f22a9787714196f8666077d70a3 *man/salmNewport.Rd 88281097eee429efba0848a940880151 *man/salmonella.agona.Rd 13b2a4a417b50c7b4dbb1903dfab6f68 *man/scale.gpc.poly.Rd 5917b743a31ee99fdd29ca14dd8ae405 *man/scores.Rd 360616c5ec2d33ffd151c4bcd4475bfc *man/shadar.Rd 851df66c37bee39c65a35828dc53977f *man/sim.pointSource.Rd 7b234e3fd1d07a268a6e33ffe065f21f *man/sim.seasonalNoise.Rd cb11e9ab99de788f7cc92747e342349b *man/stK.Rd bb60b62230fdc91b7f6ad6a09d9bfc02 *man/stcd.Rd 41e7f0d5ec1e4afef89948c129777e19 *man/sts-class.Rd 873204f207dfb51caaa746ee2bd10fdc *man/stsAggregate.Rd df53a0b596017c56bb4250406a18caf4 *man/stsBP-class.Rd d4bc0f219c87e0f18697449e89115793 *man/stsNC-class.Rd 3ce59ca4c559df7119fb5135ea0982ca *man/stsNClist_animate.Rd a05769d4bb4c67bd1183a6da950cb703 *man/stsNewport.Rd 819cbd32ad7b842a2f9c560c4d1737ee *man/stsSlots.Rd d5e4f5446da8cdcd0a3d40fd8fd4bd82 *man/stsXtrct.Rd 9abf305c9d37a2a65f3b67a78b84c8f6 *man/sts_animate.Rd b9d74adc5b1197257a5ee1717d3af17a *man/sts_creation.Rd 9ee041c6b132fa6835cc88a791c15b2b *man/sts_ggplot.Rd 199cf4042e5f43e712f1bdd06a283f3a *man/sts_observation.Rd 0223c2f41f0232fd62fba15122b5d460 *man/sts_tidy.Rd 99ce690540fa51cdb57d7297e19900c3 *man/stsplot.Rd bfa9db314f6ac81272e9776c71f0bd72 *man/stsplot_space.Rd 6e5ac4c2e1665d2a7cd3d116fe7b1906 *man/stsplot_spacetime.Rd 78290e77f6b6002f85ba730ccab2fcf1 *man/stsplot_time.Rd e52ba35f59e05bd7d1929377aa19fcb9 *man/surveillance-defunct.Rd 339881ea8df44f353b2cff61d6e91442 *man/surveillance-deprecated.Rd 0ca9c436fb665d50189d76219d20368a *man/surveillance-package.Rd d1e4fc81752c33156ae95c2f8712f2a9 *man/surveillance.options.Rd 1cb7ec5f0e9ca67546e8859d2f1de0d3 *man/toLatex.sts.Rd c53dd9ba0f19aacb872241e3b8d0bac4 *man/twinSIR.Rd 7a7c977c0e0b512e2c30b096d05377de *man/twinSIR_cox.Rd a60c1b1d277524ad5f92e8ff879b0be8 *man/twinSIR_exData.Rd d2759f1a4bd57ceda4a84e0cad4ad59a *man/twinSIR_intensityplot.Rd ac127bfd7a894989c7f62ca19f19a713 *man/twinSIR_methods.Rd 465865dfa7ef7034cd01ab5ec249d478 *man/twinSIR_profile.Rd 6dfcd7c35b1e9eeedeaea2c1f82de773 *man/twinSIR_simulation.Rd 262b0594893201e6fcf7d82ae9ecb4a7 *man/twinstim.Rd a4a7a8339f3a742a8195838bc8b73d93 *man/twinstim_epitest.Rd 19d0a627f65dc4f87d38e1296ef9c52a *man/twinstim_iaf.Rd 64af818d69a2d1b67801af21f612d705 *man/twinstim_iafplot.Rd e36647b767ef7c23a9e4c20e1097aaa1 *man/twinstim_intensity.Rd 0a35371d58ab0cb7bc5f983b4d4befe8 *man/twinstim_methods.Rd 30954d87702654fac94fb92a7170e848 *man/twinstim_plot.Rd deccdbc7fadc8a8ab8f543917de0df87 *man/twinstim_profile.Rd b855e3b6b307ac0933aa0e1f4e473944 *man/twinstim_siaf.Rd c625195f02f957511cfe5a8d6cef950a *man/twinstim_siaf_simulatePC.Rd 9ada82cd81353b36f1da8034e3a8f628 *man/twinstim_simEndemicEvents.Rd 6f76a5334d2ec5c2a6c96b6a28fa8956 *man/twinstim_simulation.Rd 779060593a223d40c3081cc5ce97d401 *man/twinstim_step.Rd 3977925a2d40feb47561093bd350f57b *man/twinstim_tiaf.Rd c69279fdb57e6373842394a5b4cde78c *man/twinstim_update.Rd 6df6bf11745b7358ca3c3d9a3f5ecd13 *man/unionSpatialPolygons.Rd f2fcddd15d55f553416d92ab103c9465 *man/untie.Rd 89c71b586228b1e556437d2187c816a4 *man/wrap.algo.Rd ce6f1e7c93f38a57ebc8d23784649d77 *man/xtable.algoQV.Rd 382c0cb7667e12641bbe5fd11a082880 *man/zetaweights.Rd b03b552e9456a164ef4b99a3a73f1e24 *src/backproj.cc d32a71533d7c17c93dc7bca321d0ae7d *src/determineSources.cc 591f53f4e940f79fae3422b45238e815 *src/gsl_wrappers.h 5ac3230ca75731739bfeed25f191ee6d *src/init.c d3184948a91745df2a195bf9a29234aa *src/ks.c b88cfaeae45fcb3d8c9d4a5e3e2a27ed *src/stcd-assuncaocorrea.cc c7e10733c5a4a27293f72ae939935163 *src/stcd-assuncaocorrea.h fa22e336de260fe76f6ebf6b9512c6cd *src/surveillance.c 3d2fd19412d9ebc374e6d3197680ff94 *src/twins.cc 978f19dcf1b11af13798fd9097a93a66 *src/twinstim_siaf_polyCub_iso.c 868e5d45f081b97845cb0843d26d09bb *tests/testthat/test-algo.glrnb.R ac7f8c824c94d65ea7c1cf71794a5df9 *tests/testthat/test-bodaDelay.R 80e3e4ed5012169b68e69f0c1a57bceb *tests/testthat/test-calibration.R b5e5f94cc6ab38600f8dc292e5702527 *tests/testthat/test-createLambda.R 5494bdb98b2b8171f9696f0f89499318 *tests/testthat/test-determineSources.R 4681a416f8c6d5c2c807c63f6b4de135 *tests/testthat/test-earsC.R 730db86056a458ba4de6e4a92eea8614 *tests/testthat/test-farringtonFlexible.R 8e3c562317e1e4f06f165c01a723ce9c *tests/testthat/test-formatDate.R c6dabbf90e9db8aa7314c267fd4d8446 *tests/testthat/test-hhh4+derivatives.R 45c55df2b6fd0384f33172fef63c1fa3 *tests/testthat/test-hhh4_ARasNE.R e92115004f23c7fc20a0a552cd6c9018 *tests/testthat/test-hhh4_NegBinGrouped.R e1051ecf1cbb778d01df69c6b00ac283 *tests/testthat/test-hhh4_offsets.R 747445546c91691be896c2f56069596b *tests/testthat/test-hhh4_weights.R 1ee320c714e601a5867fd53d17f7828f *tests/testthat/test-nbOrder.R cf09694c1c9a7d6d0bcdf548155a574a *tests/testthat/test-plapply.R 9187eaf509a5f5a7d0e336c0895c90e4 *tests/testthat/test-siafs.R 9d1bb1687954e12fc5049605a4d859e2 *tests/testthat/test-sts.R fe2feecf7c827ef7404801c1a22aae61 *tests/testthat/test-tiafs.R 39f30f8c12c9c4d9f18cfc518f238f01 *tests/testthat/test-toLatex.sts.R b0b804273cc346be4f2e0e39b41eb225 *tests/testthat/test-twinstim_score.R 115973170347b95f626c9204eeeb66a3 *tests/tinytest.R 2470566a52c9e1847131bee3e4a06327 *vignettes/glrnb.Rnw b1c985ebb611614fc547c4c9ce227985 *vignettes/hhh4-cache.RData e88da7024531aaedbe2e3625b5116f00 *vignettes/hhh4.Rnw 71a631c17142aaba9f6efe3c2a848215 *vignettes/hhh4_spacetime-cache.RData eba4d38207cc8a2de753f62bf1c2515e *vignettes/hhh4_spacetime.Rnw 76290d63ad475c49790994239303b35c *vignettes/monitoringCounts-cache/boda.RData ec28746025727a89828f16cb0cb8ff36 *vignettes/monitoringCounts-cache/boda.covars.RData a0517d2448a2a64e964b1446ebad59e1 *vignettes/monitoringCounts-cache/fPlot.R 664394dce780b1fd632e74f6801fb445 *vignettes/monitoringCounts-cache/fPlot1.pdf d34a1009c327f20686d10285dafcd151 *vignettes/monitoringCounts-cache/fPlot2.pdf 1c2e76beabaee793efc68c30cc25b86c *vignettes/monitoringCounts-cache/pMC.RData 5323bb86df0e995032a98515aea404fa *vignettes/monitoringCounts-cache/pMarkovChain.RData 568fd2f6e02109c15b4cb6d03aa1e989 *vignettes/monitoringCounts.Rnw a9ab2abc6a6ffb5673d71b1315e1f357 *vignettes/monitoringCounts.bib d90db1a8ccc8314c06f613fd9a4ffb8e *vignettes/references.bib b20497d605781ef73fd4887027c40c30 *vignettes/surveillance-cache.RData 0a96e3a856939e76f1b8b6dc67af789d *vignettes/surveillance-hmm.pdf 07e3ba5f073c83057628ddef7211794e *vignettes/surveillance.Rnw 01099bf54aeaf8a031440283443e994a *vignettes/twinSIR-cache.RData 262ceca0090376646aaab1caf245b89d *vignettes/twinSIR.Rnw de94eb2eaca1e446d38183c522688e7d *vignettes/twinstim-cache.RData 36cf9b5f5b4170c0b0cb22b075bff8fd *vignettes/twinstim.Rnw surveillance/inst/0000755000176200001440000000000014030612521013715 5ustar liggesuserssurveillance/inst/THANKS0000644000176200001440000000067214026677433014657 0ustar liggesusers## The authors would like to thank the following people ## for ideas, discussions, testing and feedback: Doris Altmann Johannes Bracher Caterina De Bacco Johannes Dreesman Johannes Elias Marc Geilhufe Jim Hester Kurt Hornik Mayeul Kauffmann Junyi Lu Lore Merdrignac Tim Pollington Marcos Prates Brian D. Ripley Barry Rowlingson Christopher W. Ryan Klaus Stark Yann Le Strat Andr Michael Toschke Wei Wei George Wood Achim Zeileis Bing Zhang surveillance/inst/doc/0000755000176200001440000000000014030612521014462 5ustar liggesuserssurveillance/inst/doc/surveillance.Rnw0000644000176200001440000005527414004512307017665 0ustar liggesusers%\VignetteIndexEntry{Getting started with outbreak detection} \documentclass[a4paper,11pt]{article} \usepackage[T1]{fontenc} \usepackage{graphicx} \usepackage{natbib} \bibliographystyle{apalike} \usepackage{lmodern} \usepackage{amsmath} \usepackage{amsfonts,amssymb} \newcommand{\pkg}[1]{{\bfseries #1}} \newcommand{\surveillance}{\pkg{surveillance}} \usepackage{hyperref} \hypersetup{ pdfauthor = {Michael H\"ohle and Andrea Riebler and Michaela Paul}, pdftitle = {Getting started with outbreak detection}, pdfsubject = {R package 'surveillance'} } \title{Getting started with outbreak detection} \author{ Michael H{\"o}hle\thanks{Author of correspondance: Department of Statistics, University of Munich, Ludwigstr.\ 33, 80539 M{\"u}nchen, Germany, Email: \texttt{hoehle@stat.uni-muenchen.de}} , Andrea Riebler and Michaela Paul\\ Department of Statistics\\ University of Munich\\ Germany } \date{17 November 2007} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Sweave %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{Sweave} %Put all in another directory \SweaveOpts{prefix.string=plots/surveillance, width=9, height=4.5} \setkeys{Gin}{width=1\textwidth} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Initial R code %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% <>= library("surveillance") options(SweaveHooks=list(fig=function() par(mar=c(4,4,2,0)+.5))) options(width=70) ## create directory for plots dir.create("plots", showWarnings=FALSE) ###################################################################### #Do we need to compute or can we just fetch results ###################################################################### CACHEFILE <- "surveillance-cache.RData" compute <- !file.exists(CACHEFILE) message("Doing computations: ", compute) if(!compute) load(CACHEFILE) @ \begin{document} \fbox{\vbox{\small \noindent\textbf{Disclaimer}: This vignette reflects package state at version 0.9-7 and is hence somewhat outdated. New functionality has been added to the package: this includes various endemic-epidemic modelling frameworks for surveillance data (\texttt{hhh4}, \texttt{twinSIR}, and \texttt{twinstim}), as well as more outbreak detection methods (\texttt{glrnb}, \texttt{boda}, and \texttt{farringtonFlexible}). These new features are described in detail in \citet{meyer.etal2014} and \citet{salmon.etal2014}, respectively. %and corresponding vignettes are included in the package; %see \texttt{vignette(package = "surveillance")} for an overview. Note in particular that use of the new \texttt{S4} class \texttt{sts} instead of \texttt{disProg} is encouraged to encapsulate time series data. }} {\let\newpage\relax\maketitle} \begin{abstract} \noindent This document gives an introduction to the \textsf{R} package \surveillance\ containing tools for outbreak detection in routinely collected surveillance data. The package contains an implementation of the procedures described by~\citet{stroup89}, \citet{farrington96} and the system used at the Robert Koch Institute, Germany. For evaluation purposes, the package contains example data sets and functionality to generate surveillance data by simulation. To compare the algorithms, benchmark numbers like sensitivity, specificity, and detection delay can be computed for a set of time series. Being an open-source package it should be easy to integrate new algorithms; as an example of this process, a simple Bayesian surveillance algorithm is described, implemented and evaluated.\\ \noindent{\bf Keywords:} infectious disease, monitoring, aberrations, outbreak, time series of counts. \end{abstract} \newpage \section{Introduction}\label{sec:intro} Public health authorities have in an attempt to meet the threats of infectious diseases to society created comprehensive mechanisms for the collection of disease data. As a consequence, the abundance of data has demanded the development of automated algorithms for the detection of abnormalities. Typically, such an algorithm monitors a univariate time series of counts using a combination of heuristic methods and statistical modelling. Prominent examples of surveillance algorithms are the work by~\citet{stroup89} and~\citet{farrington96}. A comprehensive survey of outbreak detection methods can be found in~\citep{farrington2003}. The R-package \texttt{surveillance} was written with the aim of providing a test-bench for surveillance algorithms. From the Comprehensive R Archive Network (CRAN) the package can be downloaded together with its source code. It allows users to test new algorithms and compare their results with those of standard surveillance methods. A few real world outbreak datasets are included together with mechanisms for simulating surveillance data. With the package at hand, comparisons like the one described by~\citet{hutwagner2005} should be easy to conduct. The purpose of this document is to illustrate the basic functionality of the package with R-code examples. Section~\ref{sec:data} contains a description of the data format used to store surveillance data, mentions the built-in datasets and illustrates how to create new datasets by simulation. Section~\ref{sec:algo} contains a short description of how to use the surveillance algorithms and illustrate the results. Further information on the individual functions can be found on the corresponding help pages of the package. \section{Surveillance Data}\label{sec:data} Denote by $\{y_t\>;t=1,\ldots,n\}$ the time series of counts representing the surveillance data. Because such data typically are collected on a weekly basis, we shall also use the alternative notation $\{y_{i:j}\}$ with $j=\{1,\ldots,52\}$ being the week number in year $i=\{-b,\ldots,-1,0\}$. That way the years are indexed such that most current year has index zero. For evaluation of the outbreak detection algorithms it is also possible for each week to store -- if known -- whether there was an outbreak that week. The resulting multivariate series $\{(y_t,x_t)\>; t=1,\ldots,n\}$ is in \texttt{surveillance} given by an object of class \texttt{disProg} (disease progress), which is basically a \texttt{list} containing two vectors: the observed number of counts and a boolean vector \texttt{state} indicating whether there was an outbreak that week. A number of time series are contained in the package (see \texttt{data(package="surveillance")}), mainly originating from the SurvStat@RKI database at \url{https://survstat.rki.de/} maintained by the Robert Koch Institute, Germany~\citep{survstat}. For example the object \texttt{k1} describes cryptosporidiosis surveillance data for the German federal state Baden-W\"{u}rttemberg 2001-2005. The peak in 2001 is due to an outbreak of cryptosporidiosis among a group of army soldiers in a boot camp~\citep{bulletin3901}. <>= data(k1) plot(k1, main = "Cryptosporidiosis in BW 2001-2005") @ For evaluation purposes it is also of interest to generate surveillance data using simulation. The package contains functionality to generate surveillance data containing point-source like outbreaks, for example with a Salmonella serovar. The model is a Hidden Markov Model (HMM) where a binary state $X_t, t=1,\ldots,n$, denotes whether there was an outbreak and $Y_t$ is the number of observed counts, see Figure~\ref{fig:hmm}. \begin{figure}[htb] \centering \includegraphics[width=.75\textwidth]{surveillance-hmm} \caption{The Hidden Markov Model} \label{fig:hmm} \end{figure} The state $X_t$ is a homogenous Markov chain with transition matrix \begin{center} \begin{tabular}{c|cc} $X_t\backslash X_{t+1}$ & 0 & 1\\ \hline $0$ & $p$ & $1 - p$ \\ $1$ & $1 - r$ & $r$ \end{tabular} \end{center} Hence $1-p$ is the probability to switch to an outbreak state and $1-r$ is the probability that $X_t=1$ is followed by $X_{t+1}=1$. Furthermore, the observation $Y_t$ is Poisson-distributed with log-link mean depending on a seasonal effect and time trend, i.e.\ \[ \log \mu_t = A \cdot \sin \, (\omega \cdot (t + \varphi)) + \alpha + \beta t. \] In case of an outbreak $(X_t=1)$ the mean increases with a value of $K$, altogether \begin{equation}\label{eq:hmm} Y_t \sim \operatorname{Po}(\mu_t + K \cdot X_t). \end{equation} The model in (\ref{eq:hmm}) corresponds to a single-source, common-vehicle outbreak, where the length of an outbreak is controlled by the transition probability $r$. The daily numbers of outbreak-cases are simply independently Poisson distributed with mean $K$. A physiologically better motivated alternative could be to operate with a stochastic incubation time (e.g.\ log-normal or gamma distributed) for each individual exposed to the source, which results in a temporal diffusion of the peak. The advantage of (\ref{eq:hmm}) is that estimation can be done by a generalized linear model (GLM) using $X_t$ as covariate and that it allows for an easy definition of a correctly identified outbreak: each $X_t=1$ has to be identified. More advanced setups would require more involved definitions of an outbreak, e.g.\ as a connected series of time instances, where the number of outbreak cases is greater than zero. Care is then required in defining what a correctly identified outbreak for time-wise overlapping outbreaks means. In \surveillance\ the function \verb+sim.pointSource+ is used to simulate such a point-source epidemic; the result is an object of class \verb+disProg+. \label{ex:sts} <<>>= set.seed(1234) sts <- sim.pointSource(p = 0.99, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7) @ <>= plot(sts) @ \section{Surveillance Algorithms}\label{sec:algo} Surveillance data often exhibit strong seasonality, therefore most surveillance algorithms only use a set of so called \emph{reference values} as basis for drawing conclusions. Let $y_{0:t}$ be the number of cases of the current week (denoted week $t$ in year $0$), $b$ the number of years to go back in time and $w$ the number of weeks around $t$ to include from those previous years. For the year zero we use $w_0$ as the number of previous weeks to include -- typically $w_0=w$. Altogether the set of reference values is thus defined to be \[ R(w,w_0,b) = \left(\bigcup\limits_{i=1}^b\bigcup\limits_{j=\,-w}^w y_{-i:t+j}\right) \cup \left(\bigcup_{k=-w_0}^{-1} y_{0:t+k}\right) \] Note that the number of cases of the current week is not part of $R(w,w_0,b)$. A surveillance algorithm is a procedure using the reference values to create a prediction $\hat{y}_{0:t}$ for the current week. This prediction is then compared with the observed $y_{0:t}$: if the observed number of cases is much higher than the predicted number, the current week is flagged for further investigations. In order to do surveillance for time $0:t$ an important concern is the choice of $b$ and $w$. Values as far back as time $-b:t-w$ contribute to $R(w,w_0,b)$ and thus have to exist in the observed time series. Currently, we have implemented four different type of algorithms in \surveillance. The Centers for Disease Control and Prevention (CDC) method~\citep{stroup89}, the Communicable Disease Surveillance Centre (CDSC) method~\citep{farrington96}, the method used at the Robert Koch Institute (RKI), Germany~\citep{altmann2003}, and a Bayesian approach documented in~\citet{riebler2004}. A detailed description of each method is beyond the scope of this note, but to give an idea of the framework the Bayesian approach developed in~\citet{riebler2004} is presented: Within a Bayesian framework, quantiles of the predictive posterior distribution are used as a measure for defining alarm thresholds. The model assumes that the reference values are identically and independently Poisson distributed with parameter $\lambda$ and a Gamma-distribution is used as Prior distribution for $\lambda$. The reference values are defined to be $R_{\text{Bayes}}= R(w,w_0,b) = \{y_1, \ldots, y_{n}\}$ and $y_{0:t}$ is the value we are trying to predict. Thus, $\lambda \sim \text{Ga}(\alpha, \beta)$ and $y_i|\lambda \sim \text{Po}(\lambda)$, $i = 1,\ldots,{n}$. Standard derivations show that the posterior distribution is \begin{equation*} \lambda|y_1, \ldots, y_{n} \sim \text{Ga}(\alpha + \sum_{i=1}^{n} y_i, \beta + n). \end{equation*} Computing the predictive distribution \begin{equation*} f(y_{0:t}|y_1,\ldots,y_{n}) = \int\limits^\infty_0{f(y_{0:t}|\lambda)\, f(\lambda|y_1,\ldots,y_{n})}\, d\lambda \end{equation*} we get the Poisson-Gamma-distribution \begin{equation*} y_{0:t}|y_1,\ldots,y_{n} \sim \text{PoGa}(\alpha + \sum_{i=1}^{n} y_i, \beta + n), \end{equation*} which is a generalization of the negative Binomial distribution, i.e.\ \[ y_{0:t}|y_1,\ldots,y_{n} \sim \text{NegBin}(\alpha + \sum_{i=1}^{n} y_i, \tfrac{\beta + n}{\beta + n + 1}). \] Using the Jeffrey's Prior $\text{Ga}(\tfrac{1}{2}, 0)$ as non-informative Prior distribution for $\lambda$ the parameters of the negative Binomial distribution are \begin{align*} \alpha + \sum_{i=1}^{n} y_i &= \frac{1}{2} + \sum_{y_{i:j} \in R_{\text{Bayes}}}\!\! y_{i:j} \quad % \intertext{and} \quad\text{and}\quad \frac{\beta + n}{\beta + n + 1} = \frac{|R_{\text{Bayes}}|}{|R_{\text{Bayes}}| + 1}. \end{align*} Using a quantile-parameter $\alpha$, the smallest value $y_\alpha$ is computed, so that \begin{equation*} P(y \leq y_\alpha) \geq 1-\alpha. \end{equation*} Now \begin{equation*} A_{0:t} = I(y_{0:t} \geq y_\alpha), \end{equation*} i.e. if $y_{0:t}\geq y_\alpha$ the current week is flagged as an alarm. As an example, the \verb+Bayes1+ method uses the last six weeks as reference values, i.e.\ $R(w,w_0,b)=(6,6,0)$, and is applied to the \texttt{k1} dataset with $\alpha=0.01$ as follows. <>= k1.b660 <- algo.bayes(k1, control = list(range = 27:192, b = 0, w = 6, alpha = 0.01)) plot(k1.b660, disease = "k1", firstweek = 1, startyear = 2001) @ Several extensions of this simple Bayesian approach are imaginable, for example the inane over-dispersion of the data could be modeled by using a negative-binomial distribution, time trends and mechanisms to correct for past outbreaks could be integrated, but all at the cost of non-standard inference for the predictive distribution. Here simulation based methods like Markov Chain Monte Carlo or heuristic approximations have to be used to obtain the required alarm thresholds. In general, the \verb+surveillance+ package makes it easy to add additional algorithms -- also those not based on reference values -- by using the existing implementations as starting point. The following call uses the CDC and Farrington procedure on the simulated time series \verb+sts+ from page~\pageref{ex:sts}. Note that the CDC procedure operates with four-week aggregated data -- to better compare the upper bound value, the aggregated number of counts for each week are shown as circles in the plot. <>= cntrl <- list(range=300:400,m=1,w=3,b=5,alpha=0.01) sts.cdc <- algo.cdc(sts, control = cntrl) sts.farrington <- algo.farrington(sts, control = cntrl) @ <>= if (compute) { <> } @ <>= par(mfcol=c(1,2)) plot(sts.cdc, legend.opts=NULL) plot(sts.farrington, legend.opts=NULL) @ Typically, one is interested in evaluating the performance of the various surveillance algorithms. An easy way is to look at the sensitivity and specificity of the procedure -- a correct identification of an outbreak is defined as follows: if the algorithm raises an alarm for time $t$, i.e.\ $A_t=1$ and $X_t=1$ we have a correct classification, if $A_t=1$ and $X_t=0$ we have a false-positive, etc. In case of more involved outbreak models, where an outbreak lasts for more than one week, a correct identification could be if at least one of the outbreak weeks is correctly identified, see e.g.\ \citet{hutwagner2005}. To compute various performance scores the function \verb+algo.quality+ can be used on a \verb+survRes+ object. <<>>= print(algo.quality(k1.b660)) @ This computes the number of false positives, true negatives, false negatives, the sensitivity and the specificity. Furthermore, \texttt{dist} is defined as \[ \sqrt{(Spec-1)^2 + (Sens - 1)^2}, \] that is the distance to the optimal point $(1,1)$, which serves as a heuristic way of combining sensitivity and specificity into a single score. Of course, weighted versions are also imaginable. Finally, \texttt{lag} is the average number of weeks between the first of a consecutive number of $X_t=1$'s (i.e.\ an outbreak) and the first alarm raised by the algorithm. To compare the results of several algorithms on a single time series we declare a list of control objects -- each containing the name and settings of the algorithm we want to apply to the data. <>= control <- list( list(funcName = "rki1"), list(funcName = "rki2"), list(funcName = "rki3"), list(funcName = "bayes1"), list(funcName = "bayes2"), list(funcName = "bayes3"), list(funcName = "cdc", alpha=0.05), list(funcName = "farrington", alpha=0.05) ) control <- lapply(control, function(ctrl) { ctrl$range <- 300:400; return(ctrl) }) @ % In the above, \texttt{rki1}, \texttt{rki2} and \texttt{rki3} are three methods with reference values $R_\text{rki1}(6,6,0)$, $R_\text{rki2}(6,6,1)$ and $R_\text{rki3}(4,0,2)$, all called with $\alpha=0.05$. The \texttt{bayes*} methods use the Bayesian algorithm with the same setup of reference values. The CDC method is special since it operates on aggregated four-week blocks. To make everything comparable, a common $\alpha=0.05$ level is used for all algorithms. All algorithms in \texttt{control} are applied to \texttt{sts} using: <>= algo.compare(algo.call(sts, control = control)) @ <>= if (compute) { acall <- algo.call(sts, control = control) } print(algo.compare(acall), digits = 3) @ A test on a set of time series can be done as follows. Firstly, a list containing 10 simulated time series is created. Secondly, all the algorithms specified in the \texttt{control} object are applied to each series. Finally the results for the 10 series are combined in one result matrix. <>= #Create 10 series ten <- lapply(1:10,function(x) { sim.pointSource(p = 0.975, r = 0.5, length = 400, A = 1, alpha = 1, beta = 0, phi = 0, frequency = 1, state = NULL, K = 1.7)}) @ <>= #Do surveillance on all 10, get results as list ten.surv <- lapply(ten,function(ts) { algo.compare(algo.call(ts,control=control)) }) @ <>= if (compute) { <> } @ <>= #Average results algo.summary(ten.surv) @ <>= print(algo.summary(ten.surv), digits = 3) @ A similar procedure can be applied when evaluating the 14 surveillance series drawn from SurvStat@RKI~\citep{survstat}. A problem is however, that the series after conversion to 52 weeks/year are of length 209 weeks. This is insufficient to apply e.g.\ the CDC algorithm. To conduct the comparison on as large a dataset as possible the following trick is used: The function \texttt{enlargeData} replicates the requested \texttt{range} and inserts it before the original data, after which the evaluation can be done on all 209 values. <>= #Update range in each - cyclic continuation range = (2*4*52) + 1:length(k1$observed) control <- lapply(control,function(cntrl) { cntrl$range=range;return(cntrl)}) #Auxiliary function to enlarge data enlargeData <- function(disProgObj, range = 1:156, times = 1){ disProgObj$observed <- c(rep(disProgObj$observed[range], times), disProgObj$observed) disProgObj$state <- c(rep(disProgObj$state[range], times), disProgObj$state) return(disProgObj) } #Outbreaks outbrks <- c("m1", "m2", "m3", "m4", "m5", "q1_nrwh", "q2", "s1", "s2", "s3", "k1", "n1", "n2", "h1_nrwrp") #Load and enlarge data. outbrks <- lapply(outbrks,function(name) { data(list=name) enlargeData(get(name),range=1:(4*52),times=2) }) #Apply function to one one.survstat.surv <- function(outbrk) { algo.compare(algo.call(outbrk,control=control)) } @ <>= algo.summary(lapply(outbrks,one.survstat.surv)) @ <>= if (compute) { res.survstat <- algo.summary(lapply(outbrks,one.survstat.surv)) } print(res.survstat, digits=3) @ In both this study and the earlier simulation study the Bayesian approach seems to do quite well. However, the extent of the comparisons do not make allowance for any more supported statements. Consult the work of~\citet{riebler2004} for a more thorough comparison using simulation studies. <>= if (compute) { # save computed results save(list=c("sts.cdc","sts.farrington","acall","res.survstat", "ten.surv"), file=CACHEFILE) tools::resaveRdaFiles(CACHEFILE) } @ \section{Discussion and Future Work} Many extensions and additions are imaginable to improve the package. For now, the package is intended as an academic tool providing a test-bench for integrating new surveillance algorithms. Because all algorithms are implemented in R, performance has not been an issue. Especially the current implementation of the Farrington Procedure is rather slow and would benefit from an optimization possible with fragments written in C. One important improvement would be to provide more involved mechanisms for the simulation of epidemics. In particular it would be interesting to include multi-day outbreaks originating from single-source exposure, but with delay due to varying incubation time~\citep{hutwagner2005} or SEIR-like epidemics~\citep{andersson2000}. However, defining what is meant by a correct outbreak identification, especially in the case of overlapping outbreaks, creates new challenges which have to be met. \section{Acknowledgements} We are grateful to K.\ Stark and D.\ Altmann, RKI, Germany, for discussions and information on the surveillance methods used by the RKI. Our thanks to C.\ Lang, University of Munich, for his work on the R--implementation and M. Kobl, T. Schuster and M. Rossman, University of Munich, for their initial work on gathering the outbreak data from SurvStat@RKI. The research was conducted with financial support from the Collaborative Research Centre SFB 386 funded by the German research foundation (DFG). \bibliography{references} \end{document} surveillance/inst/doc/hhh4_spacetime.pdf0000644000176200001440000124645014030612524020061 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4491 /Filter /FlateDecode /N 86 /First 719 >> stream x\[s7~?oF;0HJkq$'qØIcS?< rHwOzChtn``9 3A1˼1)d<.NIi LZaW)] u4jvV%Hnj0.1 @L fK$sbN U2eW((=:s[TVyC_ `XB2Xi,Y L[VPYA;ς hW` XXHC3:$%  Iǔ$ СGC"{`"7,pcVxHh{}H]b `$xꀲ@YjZ: 1Femee :FTA{N1#]ƹd`ũ'+eK<(zPc MFh*<(;SCMʮH^@\|c@kLKL?K7e4ƏV]yr3U7ju  &^}H;LٽMvM5fGz5{^OtȾGïQ38GW 3V]=F E!HֶPo}=y~>49;#i}.;pjO&aUh0b]sQ6`rq~heodXcg_f|ZUˆM[Wmr kfZ.Aݶ9ƱFr:^ j X(C7o WM7s {t448Oԃ ?k `4iaVSXo,Ieg,ܧ.C*|4*_VdV^R7O:mL]&_m桔yYUYCW!2, 2 ^({nL] &ڰaSݟ|ԋ (Fw z{;h>vON:-2V'uVo]۲6hm9LFk>n-/x ,:ԛrǍ8"YH[`TsO_kjj?GYYyKgjSsܤ&57ivMbD$*&QS).MsV=M3ּiR`af& uӫ=o=/8zuGǓj,q}v5h4L;٘=CBk"r* 7\ 6dMf؝Rhl 0D܈2Y?y^)L‡e ҕmQmz $ |}[.pP?'rI:)DQ rE.>2&z_5|y UbDŔy1Q,3b1)[Vך%>Cv@FM39;z2{C?Ow)Ɵ6. +~_K~O+#__հu۴V!|LW#Sߋy# >O՞jO)s_1>u>r;"lTvns$ 6!]դoFv_H b)Ny[rmH=i:eLP;b1%RkO8?=qӗ{EUz>k(HR9mSmktC kBf2+w$ǒU[fi_K6,}`fתa B37Ekd ? 9˕31eub)UYRQОԪ,Y:H(jsI5gFY Wv*a$ ͦئMD$~]r\p.QqKT\U^faQw^'/O~dTLvj5FRfON'9|.&jC~GewzLH6kM8б8GeNrAw<{o#/CБP͛vsP363nlRw  tK.v@,YpE@*Jm@p”t$+J!0dܜ nrKaX>D!<,P Ĥ,"L I0RTt+ %g4 ׀%C#52 He $ ޹X`Qf_)/L(LQ~f,a<]M02W[ P LwpIm5)H3Jf;bJC^ Kb<$8 ?Jk$-xnj)A;bJzL_Ã?)FU2(IP8Lw5g\(3z2R1\SIZ˨H͘a^ߪ6wh^™o#Iz) ٢5:8Vj᪣c»05e(ʜR9(sRQO[>c7jg)>~vrή>a\Zj٦kn9K]_g" ^AŅ-GEwQr\l?!%>,o}:K]9c,v)2wᤉh"EA'ay 0GAAG<#B7[.rj'КvTAC @S7D̥R군m'B9vaQ@I'] Oe)m󋰎H"ʊ^Bdm}(~5"wlX8J u|KODP(+r&E,CyRg\띶(ջocf$tY)}a)O>|(RWj8xW7m!h  ^`]Sn־7zޒ@ Mr|Qi?:Wu*&o4A '-٦A} Ic*ULďAI}`guXЕk2ȔN{m;qm)U 2x2KlgaSt⫣6%EU(;  hS!& HCmIϱ6Fp#`%|/By;vNr6ch( WMShǎ+VB]l> stream GPL Ghostscript 9.26 areal time series of counts, endemic-epidemic modeling, infectious disease epidemiology, branching process with immigration 2021-03-30T14:10:58+02:00 2021-03-30T14:10:58+02:00 LaTeX with hyperref package hhh4: Endemic-epidemic modeling of areal count time seriesSebastian Meyer, Leonhard Held, Michael Höhle endstream endobj 89 0 obj << /Type /ObjStm /Length 4358 /Filter /FlateDecode /N 86 /First 808 >> stream x\[sܶ~c3총d;rr\)lw@(޲K9r~}\^v]e]uI p|8w*M$JySV.Jed*\(FDQFB:# w'ŤD},A [NY:/8]i DK##v0eo?$:֟lZSԒ gwxNu 8cL2x-Bڧrvˈz|qWpVfeWxlth}2b?w%?quS9lً~tֲaU*89}Cˈ Q{WD㊗o3V=BHW}Md'֊ƊhYRI줡fI;&qo&XP䈡.,5@ݶ=FYF4TC,.6d.VԁM*ݡ9l7f8*8coٸ4ίjA/ָkI{Skok(+jQ_zH.D".d`r@ S4]-@Qiy Dzee~aF\%er,[7`{[N$[37?荒0W{\X}eZڠݦ_C罰kݍE^,Q!e%QL,Jl]#e1)mf3^pӁ !mT&'謯]p^p:ȓ>:l5SZ]_GGTzf_Q!6ܷ[+}4-.Ey xq?A-E ):i>ҨXR@թ8ڴ}H55qj-l@~AShU@ iPNXxN4L۩Ԯ)Z_S2\SגZXy(X&B( "HӴ (^%yE}Q۟EY_wӖ4}^q5bzȏO^1{=oO3{޳se,[_eP4EXxlnzP ?l @/Kx/<,yKu^;LTl) P[Jy1l%۶A(NlLŚƐM'dTZ< MRXa4 o@Srm\ hb MZ8>)M.DOFXU&n vDI+n<:=x>/fx 3,;M%@#d6;C]M\Zc75QS%R Q,vNDg/819ҊPAyu+M|-]^(D$VJU,LQX$/o204 WPgc9+ګ]m454l1 Q*'%J &JsMfj7 R ds1}`-kpCWQVNQu!ť^QxCqѤ!4y$R:F'Ъ(6m 28dpP!C@  *8Tp0О C9B=U!8ZhH[-ğq4|K^V>5TlG8SE˻ vNՙ!׼}y8}~]Ά֝sz3Adj7I$:0pm׫/+U&sWfDV=;_PtY R~'KzhKS}}Ή:eJϔ y׺UTᡠ~Y02_5qͣk$N +b/dQ ,}$`ac b[ɭIC cv,vђ_it iYD@1+hZC [(2ӔHr:& I\ݺ08}Ow_}Ӟ}:v]_hOd 9i=5QuAlWyNF{\J; q L8> stream x[[o~ﯘ"A_$D;u)ISZKl(!)wfwU+eA\Μ93T!0TL+21k4SQ2KŔx=aZƄezC^k216q-I2Ih,*%a&y OYE$35 ZK-YG`$@ VH@IŜZjhZNX,+sNRϜᑹhd%d(J+hJ3Q1{ 2P*tms@%,iilFI؄j .jK *–5-PX@;hlZX܈51p~NSQ 6h!l+谆NF` #=u)35q+0 膘VzO?ed2.ߙ $dJ].m]\at:YiM,/FG8Q* cībI f>, e,3ggX W~z5%8cuaI?_g%~Yqk_ ɥi}zٗu_ƶeoڗbkXW~>LﱏŢų_}ox9^dP^ތ9 ej|!LuZKjZ"]MKh!h򕭚roݘ8]ߵ Q\+Bg7|%3>mؽ_nrk_>Ř~5/˚o&Xw8 ⷙޒJ~X##:^4 JY5 W6{JeSWXq8kj@fl:&D*"ymֲy>/?yVߕѰxt:ذ-{$2lK$M"ueBޑ=4Wj-쬜QI4Co*#ZӠйK+X%D)mWRy&.Ql=hr8 [28+qܑ@[H_6aߒV4PI;*$L|;x<">Z.gq:8%/a?r>l`O$gcOFIsL曖TM~^F1J<[ZՊ!şPo1w6 GBM1iM׎;6 t 2w|'_&nqǭ <Ǧ{5Oͽt!lkd ~WE.'YX/#幄j 2mY>h':S߿e'Bbx('QtcĬ|v5x7,hGs1G,8.A "rwla4:2މ -!w%hlH0HLchz~ 4~H$K=ֹal鼺*6 {r9ˡrLW_>}NibyUيUv;}:[Vtݶi-֚XL&V>S䘒ff(CVW=A:ie5s m#Y^]4QN0Y 1Ի73!Ntb 9rToj ڥ4UqXu~=dWʖ+-gS}`MP餸`ោx$+ :Y0n#"Xk;L^ )zʣѽ)H[ wJ@)38~߸ ||;㸣 \0Cuä@O9L; .J[gy Xъ\^ϜڎQq P9@5Y$۫Er=Ei١X:x)ao@/~֟L:}e훸7qi^96(FWLXHtH:2@ )z;DyO`WK<Ӄ䶏-aHhvt$ "ܥ~)8Ihq <.L6HX|{`…4r\QH۷%4 Yuϐf_u|t}S,aendstream endobj 263 0 obj << /Type /ObjStm /Length 2912 /Filter /FlateDecode /N 85 /First 772 >> stream xZms_txͤr&=)&n2pNԐ'}wǷ#SDE⌷B *$\5(]"逫A? #X t.a BB::HPc(xAWm" Vad F^cxpʠ[Ǎ4d%#&ktOd̘MS (9*n, EA+RNxr O.;D0#H ,[tZ5\pEhBnDNڋ,FDcM mHth#b =Xxua=Ok 5kwNE[_-jd~Oɷ4kKmi-5VE}vWM홷nZn8*+~w/y9[g ml<(~̞ϖo]f\=Gy]vsr!h4)gY#` Њ>ix5wf\^C?ij_}w5޴c4- 26*ۣslz78/b\Te1)ŬbY] ]xf_8҇g|qR ԮRZJHVۉZx?A웲>t(G ╂P_?jzd:5uj1YzLӱvJݕJ.L~+/. *;oA7okH$2Ȑ$w!* iAZ.I?l]4ސ]Gph,$*pUz HS{X#p جs>-%9]Hc P\{ڣr A3F+Hi0cdC_HyHd=HIåIuƙT)AJN **™\܁sr 0jrqjeV@q\4e{NwZPFy5(#]O;7`*0iNxca"G@( rqN !P{4 NK꺵$ʰu9 >ſG߮Uꋛr2_-_M[ey'/r>D0C.c[Nxjq9Szgm?tulk#} N{O>?BkF\JDp 㮜`! Iw#rՑw;.jfR攱[qmG! zo*b3n:筗QQOs ˟>BT? N\:#)bQץT42͓ %sC f=ԀTs;!tZ0L}3<ȣo ,q]ImEP<6,6jjCcURF.4>: 3y[ v`aPjh:qV۵roZMpv }$#+\8=+GnV9_\W rv; ع9Z/ -076{‐eHK #<-(`W\vyz[As AǎD䃌1b:{|p*rx&[awb:P!k3:)'bI0m|o|ȶ9020kGVksMcر0&|809$cz@ځ0vD&*F]Tk8.SN0[͉.āC„x<~{ "94>JN&p;X1A;<cJƔVZT霝d>2Sax?e\#x(_=f#z!p P- OVzP!6~ɟ>"nUYu#fTCK{lohbz?suAG?T$@5g&-#wI_&COb!~M&Q&y8u`坅}p P&nɜX vHNr*A\__1shC!{iX%ΞхjG캕X< ʏ;>u}{$> stream x\KHr6[vc_'ۢic`=cw ?}`UTܖ^5$3)JU0#/"REUE_.'B...,jPܚҨkX`]\>1̖ZӪj ׻?\faK\\^xfºR:FRWnqxS\5r%(+ff_?)ne,0eEѴ{|{UmxbEvկqcb\kq{ hJg+C]WX)~/o.;@Q1-2qg^ܬt.[%B1mul{X|38m%v}[U EʚH@Kjb4<նFGRѩTTˆ`ף~hoׯ'W}sTXJ;X9+eר./x=S:[I8|pJaN^J5E"UZoo^D9GsbūzOɇpG2Gta$A $(.Db 5H@2r4ǯ Ųevͺg8oׇ5:O v|Uj+uș% hwBW5z c1Ȫc2| x1whe! $2PiC3%g*3ρ.!R.}F( j%,W7]sZosaRff!VAxC50QÚ u{_ Vf_C]I:y!Q7aJRTB_!*p 9Qŏ<+m1XU.gy)M38qi@2-$Y|- +pr||6<@feeտ:WEkII ( ǽvK6;l7نnd_(Ë+40Y `.xr>&1&3@$jDw65 .`L'NNiut&ڳhS m&"a2Aʮg4i02)q95w]tQe˥bosC KA;ӆZ#ˍb@:` M A}≢J`wL K.0GL@WCݧ(j}>P4`c==EȻO:s#)%!T,ʭntxvU|CH0B\ iSCmqd YKp\aLAYnyGZP̤ DiDp$aLLB1&}Lxx*R`ũǁaph|$)psvxS. ' ]>7,'KeS~dǷIfh`ODi_FhSl=7$ V* dPD1N}0a|EP͙+ԥ C8d!q!~h3rHU{# ֘Uvܔ/>3e:7[+;0ӽ}3n1!UDg:gٕ̔g Js6g%w: Ŷ|nc|"^R*ڢeLO?#e`gfPNbW zq f)%w-Fed !'׌ T?(%OS$Dĩ0'Lƹ'(P|^^cgJ}PL'Jj"/ E"[Cw>6[sy.ݦ*5Au΂o:TMI o!\Q`}wui(ÿL5nDLl! i-38"%js%T44@ׇ6#RhTSrǾФc=iƤ sJDxlЭ>97&&,ʴzW^6$cspN kdp e Xr}`;y1-OP("w[؜C^9L4chxrx 17Ĵ++b ;I?HOg%5 瑛Hi NCG| ,Vt1; `loxyjP.c{XƘqPs kr?+d*5ːE!xnn<ҩ #ZGoIB$*n/U)5 7H:^'@_X;Z#\uVl4IJ@wnҦ_ 6IٝjGO\O\jYeާ~Ǭwaa?1dj%֜2L$ݿruQIA?Dɧtxí`q0ܫءN곦J@Ы2G-*78LC5CGjn!wgnd)@0 cz!;YQ,+ir( 9-PPy i8 iA1:>au5*%!lpDAb]Zq W>Z1 dh0/RsRWCUm+v4c?|was I򟋏=dEd&$ /†5P_&TrA0s~ia$7wCD ow!z@22 eLV9֜`R7s,aQZSo;Jl$Š_.WLk~3Կ|9.~6B=8Jud@݅66V00eeDE`fH2ܑYtktg<39Im37Jӛry "wzmHil[y>,CshUz.[~np:1-A3stU%q3n>Fx|SЕFHhh<̧ l>&&ZR13*֝sդ>x'8]׳q<TlF׳砗_ϛ.z9ͥwx @ +"2rA&؜0Lɔc70#NIXP„ 3 GO(zF&U7'|%< jcR @M&7JYJliL"&j/r\X5$A`JbS#0, $ ֗ .V̫CڲlX^zQu? 4kxEb1[Vi>59k|LX=Ɛqe!\)\_ξQ:| z!͎o Sv,D B|k5QM  SƄ&Hж8B' yx5 ' 3 rZ_Hx;X K^sEa d᧵s8$=)݆; h?5Dh6 tW'QU1ht̥v"tSc}RgWi ℱemhE6.ay~|j H< ԫM?q ۚec.8wfIwh/2.jq&˕0hȻS! 26wPO 4ɹK34b;ao씂iPdW71Dݥ ƿ(F!(Y$$;\b Htr~\/oփlendstream endobj 350 0 obj << /Filter /FlateDecode /Length 6461 >> stream x\Kq>(|#&t1wtDKMQ(D8עgp}tfVUOeuT+ߙ_7n.o u{ iCɫGQbk2'Gx5\'xEvF{BsU|~A]h^^l:<酷پ,zKOo)/4Rl={chǠEӏ~?6۰-TP kT6a2-UsZit8?fK*zkU` mZp;rs~a8ݚ67# |Zmxx64تi6Cڱ7i0n?9UOy&P;\Ny3S0 M"$ l@u~U4p8ő̶*tvS|ڴ[aLh>'aWKxґs"BFםt,>pDlLsz]82y:/w݅A 5~_~.֒ɬf;|8*}IMP±Ki`](d3+o񉠜TSpE~kufa._Jm}u|5j.:\U=gd3 "]"Ӂ\J{N@9D~[<Uw8\-N4_eJEÒ0sr޴ $|J櫋4L=cX7I[Z0>}2%@wû~>бWBV* O/*5+[M\ZEZ-qIl`|>_- gQ\<4]@,|"u<;2eTz RL[EODDYnRY`]cp"SYf7OQ uxƈv8#URQ(XiօP`ug|@FIZ9ݡ:0bb=>'LUB%E/gF>sP@.hćpQm J?Fv΢6F/V)Pvm#.P\`rLl-WnEK@vxXu! .O /[=bEqxO~@-$.2SX59M6{~aMaG KMʼnxԱF2r a^qe`Yx6 f `>Pk~pBC)E*ԏhezEJ{ZxRAD P){c8yք*tgUmT!U$C*18 Ԕi;  Qݮu3)'\''L muNi}>Z#D84$ML̤n{Ro:*) }^?ʼ ƥLcFU,㴸eQxlZ'!njVLkey|Da=jx {S>2\±g)]Z<:vy)ncZtGmooRl3bb8%(I~Fy1:OGj_fq(lFvnFN]_ΛkNfjLryJYGT SB1fBhCJ׬?E.lsRbz=;n#H"-D2ܩac&=L wU5y:2B CAS!LZͯghb=;MR3lp$. q ]>u{٦CGTlV?:}[)G' cV<|UcG]6 ,e.綱@l\reRg(W1bJ;"`Bg i` %48J>,txy0_B]ibM3!CWwZPՉs]ǧ,!د}{lV}kYkAB"cʰLyȨojYK8 l\+bnT*P?91ʕk ۃ8"}90L TV.;y?k fH!ga9˓9#UD/ӕ9y :T)qnzy:_L]vXfPILNJujQ-;Ei»gtx%A  K֛p#w M+̞6rp@nni1|g NDjE+̭B 1`Q_ 4)E*k|ۂvӚ{Y%(2 )A:)|Uv`@hwSt xs1`Yf6J̔sMWzb 6|y)K!e8&V|r$VUDЂ`W{#0a({z6=nD8 0 Ye 7EZX!i5!&@c-6`P쪒(ϓ1\+P mG[IJ> Ђ*MbSJSP%B"1^akB^Wڃr y )̺ᅽ()gl\kM[%a9{C%c>KtQoIA,[<9s2rT9ΡML$7$O t&>Li!3h;7!]gg#?o^*SI̡{YY^Y%bM AduK2 vsrYJ{'*ҕw>(E?q#Ut˜A"5J~ҩ⅟KV# ԁ*Ryr>BnSxnJgxԗn $sl`bvhT:Е6-*8oYlpY6Ҕcrj/ڗiiO JBA{[@R! xf`AIf$n)ga/c\a9(B҇F`s~=Fy2 LRǢh>8>vO@UA~$}ֵz=> \/S.3DZ.s‹+SuE1oB^s5?Z9z\2%,'?_;5:%!V!@H^ %ԍRy(1_LX Ӷ-i}?,vKHg;pXQWnүF9OD)J/nbba%#.q Rji7 *e™ jIAR٬$qʬGp1A48PDz^qÈfeqr#DJˈ$l]dT;KEY&/h>ylhD-.p+I&`ɒbËޚփY  42۟M`Xk6opȷE`{cD#Y5aI#/w*jwsq_hF+%H)[~̟o=kw~F#gX~Dl`KЎP}!8p 21Ј.fy˼Q5Zf X\޸Nqڀ/A,o&b.SV5;֎];e PՁC}lDTű;//y@S %2a) $Gf4[ɁU.aMPɫgi:Ҭ帩dm16՝;xfr<^upW8ܸ*3[e**V(T{VyD0~,}N)7MuD(sωi?Q^LJ_NNpjb(eS1iEBsY $FPP:<mIߐWAB7{8uN/n'TSuaZ'Z5w=8Ō3J-qa%d*VzD]/4фcٮo6 of N%ޅPWñXFnI5:Xx) G5t uSrU%NUhg9WQg` axn< N }`/ΰ, zEhו524*8Ʊ1Zr;[LjVИuf7tH,ѪF\<ވ9{{e,G7ؓy;oaL4=$+EqY?  X#V(L@$[855vz |`cdi>UFA~zOڊL3\1\&rnS#W#6t٫co!i}s5:r܈O.o`L-I+=fYAlӵ:bROQ@O츈_ 䊌ĠdUh.3(D;^y# (\B6sԿ[0uhܼ[%#?9LXMܙN[0 I+lr$; |oW3ZD.tOE k'N9 &"2\ &DA90~ O|m_=^1'EO& !u'P"͉KZi_1jp|zz4J_@iߔ;l߻+{Ȅ6)SoF C$*3X Gn ,C "FT67,ຍ>?};(e +'ʴ妕#D >I*?$UkNN[WЩy9>) #U:͋SdcznXaGW9 ƶq=0bbaq}!-ujgMSfKO703 QF^}|+*h1gq`Й-e wSOVGSr)xXW}[Gn쾯\4zóPk.GKyGtbO]02˺ n~X]@ HLq`ZAfwWu?pn/V=`VN6,j4 ug@^:81;  ΗF'|HS3Sx8]*b{fy:9{S_RK8LO`endstream endobj 351 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8754 >> stream xzXT2 jΠ[b{GAt"Cf ׁ8 1%b KbblxxxΜ^Z]Z{YJ XXi7q8;7P Dnԅ{O肵)Q}S z٩A|-8ho҇2{ b=i„Ƒ3#獷^zW;GY;y:[[u``zjQǷV8::Qo<6-\(xqȒХaG8tYujO^k|od&NOZD+ }V!Ksx:Yd+_{`} $8T^egn,PoCv3ȞG`i.*]`v/:,7od&n2z s-27Deh-+sX]&h'4z7DWLC[u{jV .\V>>.B .%%QExKC6)R WW_hO+~>gpgۻTnk #E1 CՓdz Yi:xR4YG$~:Yn,X?s(dP%ZM^_")znNP݊v 9o%AF=qXbcPWHȔ8:ns,P *T:\a 1o<Ì Lˆp"&Y!3q^$fҬYAbSȔ"wA{ no0cev 9}x|j-XoΠB qW\nqdCFq*6o= Y7A ~E$h4Ѡe2GA<9b؍6 +lx;vAD Mdu^g@cu!Zo:*lኑGfCk' miIEE8| s${.nVFJL9Zs|ୋ9A YT* %`Y8d '~!gX* m_+Ps( /(jjljrPA4(c9'YMuz}t%y)4Gmdx_ph$Z-0cSxݚ Un/m既Kp=l>z_xh`O+*.*:I'JPmBLs!qph_kI;?48&;` 0V!u)xjxC h:1rPڤGcZhrAx|<' o׏,ld{4l*+6iaG(*Iix䋖/34ҰJUx,2etZz؀nv-H[_fy"Z[674vc!^T1=k ;ģ:!BAo_YV*4%#HK ԎI\ZLL%ypl~MѰ` D)AR 6U&'+ݪaqM/%Ujgь.~<]fO80UR〉H;!1'M.)a5p:*#y@3> h7+ڄ`D"EgxSK?eilzl7RoŻH«4< ;a'4OFvU# Ld<)5* kJGZnuălZ} |S"z߀+ jd&mlWyqɀt b`c> As0<6t\2>)Jp >biATʽԓ8|*@'p_ArpĀ|Zje&-0! !Z=MS6/7= V=$Mφ h՗j؉4AuǠ77M7);e+y>"vG&*N˔@~wɞn eq%\K㆒ IS46rB SpZEXU6A1)X ːAC1Gkܙ,iVykjP 3d l92FTDnb\f;j/1T`(06BPbj(MmP"3gIG\/X: m”4EoFT$JrSKPOX>\odEkxD4i?O1U_-RW!D4n>9\"ÅIkri,BC 4y46m, E[..M۪UͩYI2wzrrAg= |о%xKjeKwl\tK+N vNCHX;@X%&3x`B"yC 5|Bn_|1 Zb ו^T͚ &,r9X-W;@&@6{444Uw`*|a&FM-.KɌ@6X9[:x^[AAAVOS0آUm_`t-lۼfuJ*5u%| & ]MϾ!͔EbJR3)m4vXa-iv;ܩF۷DF!LnTҲszpO}xT7b9P¦33+ 4VU:k(hsex87$W&!MT~"Ѵ߳uxc8tٝ +u)']q3T;I[JIUR,DTyP>:B DҠաk-+$}ER3uPTFnk8v -T^>.%h _6$Bn2.Ɏ%DE 7~Y;0&GS-'. oʺT ٢@Ub$o žxXbgBJ: eWW-pJ5i.i1i$~r@TƞRuMћDk~jja>F2𖃞G2lq0GM \%@$o>HJQPD"\3^)dA܃ZEW3tD5_, %,n\śZL hC5lt_=d`HtZl)I*$1HHdM}nH2xo`,4CB2޺SP- rv]EfVIQ*qW_Y sU>5(UʕjD0%":-IJUpc۟Ibժ `!D+my8Zg%hS )ЉewH P;惡_F~A$,-EV'JG*tuwhqF7T1_\qx v&n<eh C%71x(៶>F/Y'vN_Д:2%uN[7G>w@"#Gh M &0}D e)h)$G~c?xģ4ꁨ^ʆ$3'b/̢M&Mt;#s ǖ[_žr.))u wT8;b,sr҆hǎ:7},fWxO!vvpaXzn?S}|C5z "ީa%ɿYY̷ׯnj *5iH+&m)J$Xfg^lavIF9`J*]A !.LUL|EY^oc"C]ڻKl5|6вELYu!{oj6ʃQEo&e蟎P<C:C':\ %ƀ%#DX x1W6խ-yVq"a~x?y'~=y鈦 ߱|` ,f7F|ܣb;Ee8,Y< dIA9ٚ( 2K|ɅiAEM49I iYlct5KAM.zO٣Kr?JB*}wSZg;|C4pрfU \ ZqE%UxR ]Yz7N]>Y9 *jjUDηƹy6Ƀgw'@|<Ϊ݉e^,8ߜ5WT PS &'Ц֝4F@{QYU_5l_hG%cyHngacMo<)ق|b*#K#ן?ť+߰~!fQUahAРGm(S/4p$"L|4b씲dCC"eDa]|Dڟy$N\!ũ9*".^x[3a 5}qXeǻC(bX$zBv-2^x)c啚h^xѱD/ƽ\-hQyr{ؙd Ԩj4ƇK̶W宛w[K-ٱ\{x0b0YOE"V[ YA:8hrց!tmBӐ]fFSf6'TeD%() =L9"ԡ֜i|ۯ jM̃"Ф`4SC8HDGwE? W ѿ&0̈́(Udh0Xl)„oo韨JH+P&F;K.FN:҄b^g r=t Xnrr_| HMuG[ڍRm6.AfriR4=UUF<snwpP!2"#LjMx(ANXy;Te`2,Gq!"F7WH/}Ί] 㓣 "3w)xҁB9Xj=D qK6SkO;ck!Z{RR\W\SSQQ"2]35IrqqJb <+>'?5;[ˢ~FX\$Re %|3 :^4>u+(/|_{=efJH./5wj} yu"Kup-dd=́xp%\>rʜFwMrdRΉJDD֛\ayl2n?/o,ſqhi(|v%&,%%ׯ^8/h= |0hs+#+=R@d˯j<7g͔‡?Z^R9m3-ⱫIZۏ4)tp[ _qUnO>3f9^C^ktؔ52;AUpJ"$U3= ߸xqܢ$2 ~ю]7ĚPE \(Mk0L@LWW|}*+X6D'2iJԩ !%d+|L+ڨC W^M{$m~}[%̗ڼ=ӃEi1S(Kz&/](w$ex%Dov\`kƖm2? Q2| 6=)l<Bw+iX<6)=p+D;3sUZ*g`]b+3qL\&z}42CO? 1ȇ{ZV\9s$n  , h(N'ZFQ&:;qXw%%ƢuLHrC[٧hyl?.AI?`b H> +Z;t.SKW;Hz@\ħT6Q-ͰqVnVEAa̝ڗO J[`8ݮ%mQ*" RK v&:F[u9/7#"fxD< `Ul;tfi8=y*<сKh f2d^rR# 4DEe/Ypy:lGjlasb;M̿o]~f[Ot=P\r֕ٲvfS_j=W{a]qVzx&=A}/OMݗ ?vS'_Bf[Ε4y뚝=̚"CpAhoS4]샧;FnR[rd#35VۛWOD_ݎmrSWl'sLٟ a-jR>wxiZ5Mf$&C tudݣpQ (38WC>q1҈ܥ )p+N}1D&>[rm%q%n1- [B^-Ig ͙O+j n,bE[~[7GQ#ÿ٣ҦyФu@:3.֣YR|qig5KJLT/  a~퉈R],➟6dNȮKj/!=U;x҅E4;>;[rZ"=Ktv9 IwL>|i>Wvm,?|}$ޢUgj){ x'TDT,ZO:;bI4*YE._4zr1>"ȫ-޾.}§_?պ`N'FoVSxF#j>R~k8{须ML{Z"~ S`_m lE{dD`f<;՞S'd-W7ot) \EX<-/d20YC ífEG@xx ȸD%P*gD:>˗q?:r,_W25r|QrʝUq JIQ f|v,'d-$1wضec簝9!z%,}7Zhy{<\'YWpX,$46{?Z Lz6C(| J\mQWQ$'+R){>, s[tHMN~)g.H|^̍?%sXi?;WFMߜxo&;Gs<N%{o: ;zn?%pH'N XCHfFۑ4Չ$<"u~('R(gQ;>N`]ɝ~">JǰU*>ߑxR9EQWeLIyyk[lW$PF *,@KI":%W]$tA~zvBdS޽h~?pA8X3鱍xQ U(4r䤝xmSioB5kS* -6-oӢi+h:yrst%514x_薜5ڧ331/`1}  vI2Q)AUXx2oz:w&'HN'05Wh<]C:> stream xUyPw{0r(=c5 DlB<@$cp\r!009AAf McYe\Ds-*akSU]տ?~>o? e7H$OI$식>HO)N~1II dvV:..錵N:ՎD#.9c}Jv J?U>>{*F Uٝ$Ǫ}W)%[;JhPV֟ù2l?%^Դ3 B?|zɡnSxգOtEWIS\ e|\,S3W$W7t6Y:Xq=s8:<1=]]=mx&Jדh8[‚\9*T)`07dlWbk {o^Zn[Uqƃ6`;]yimY9ri ջM"@\wP[uw, Z R6tJ"ar訹qr泉ܻK)<ǧ>NH¹E{>s,&27 KÉC٪ޜO4+=z`oj =1Y {8y>7BCDF!?|EݽCHs&(EpҀ..TaYNrv:gg&xhS +Q|ȇ pP$.A bcv&B^6\fV=T2==]\Ўo(`4y,m2w+S{D֕ޓC 92Pj8d(?:VXJ (QjX\ڎS)O?"5O، N0t(wD 6Q2/2U)Y6ةvbd3 y#t{ĢC|U>"ߩ@wv}C7di]~@qsy]e][jL<(UaF **9ً>UbVuY_Ttu x\u$MkGr/Ё12'l#Q(P5Fr72- 6h>_|pFߞT IlB{qzUsmYH־H^ye`#~~SJ$ +^kG18ʢE~gZ2-6KD$F'ddIU^eVU,گ*+꪿f1k63'Kح' x!+wӈ\oAŰ&x$#7qŸT =y=W M!.>v7 X2(ToY nZBt2Y-&ΰ7 ڦ(|r×|V=e{3*# >I=uW|XOg_&5V.-z#haoERk3'ccB?Gq#*IC\XqXlΓlIdtz>"C'i]sm@_\rN=v+5i˳B*l`Oϊ[VB MEɖ0 6?="u–|Azݶ`?d4VT36Tי5ϥjendstream endobj 353 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 467 >> stream xcd`ab`dd N+64 JM/I, f!Cß N7aa9B{h ̌E% Fƺ@R!RIO+19;8;S!1/EKWO/(>)(d?Kȥk3^eXP33~~G}iKw$f|[Yw_/~7b"k .~|];yy} >}Y÷o{hݥcfM_8?wi9< rn -[DS#afDq>(ڱh~¬O$éjܮ=-]L| pB\7XBBy8y7> sdFo_2048endstream endobj 354 0 obj << /Filter /FlateDecode /Length 613 >> stream x]=nP{7?6 n\$\" Z.ř)"}^_˭خv2m;˲;2>۰O߆uA?wpiN}ƶ k۝2c=V5UUX5C c S Nj\5>pOC [\\J F)*)*`̪ALF      MFH i4A! &H#i4 @@d2R:6l@d2QcSƞ z6ճgKw)8.ߥTp |SRp*8:vUxfKu1;! d0 d2 BCA`2 A!jBCc \J&d4 ,$Rr!`b!$pM&})ߤo5M| הo7M&\SI߄k7pM&})ߤoB.%Lؤ:0> stream xYXTG׾K{Ul\Dc-* QaiT؂eQiMMKk9c >T-3wJt$֒^v6NA_»RQ#lk *N*N Fҽ@qxl_ ԘXx7r5HWmXAv"/:T!.o@rX>z `cO \K y5[CCyVيJ&tDYHN,KOJ`6yp¥<kT[?Oҿ S6%6K4&[z}!"   A5tj}T2 @^VZjQKs?yՀJ%4`l˵i:"XPO)ab(Hh)1<0FB*!RiCJ[+o֜z۞`M2L'4 뽏%s4˿0f}]c!8;iVf{ x]QBRSp/&.|9x"f˛r --֭n/1 !O!:kdPXõ ': 1sg^IfxFD3bQu$2Tħ&Ԩb 6[|Be1P )% */];0/w}ݔZ<+NǓqя+1CHzB)`;#5.‰z@EGT$jI,pF Hs{f5r[%#x(ASc *ʽ[,#bQX:ٗgJkw~|z`15UeyE(a9%$rO6O1"p ݨ<`F^epPZS@BSps՞ٛ}6cLCkOK3|PC`,!wv@$r2IWQhh|REr?bTG"RyIs$ 8UB9}$p*cyZ fz`6,ߏoa\ל Iqݚ:#uS+'k$H *KA@ni!|Ulb֡!O!TZ&{>)ۮ|yWukt*Ma)䷨* wZ:s jkV3y^*ާRX L 2FpB'ԈѡJP"V`ا)侀"ࡶ|dPiA_WjdPjQa;f5m24la&x,^އ/h Qg;nXŘCO2i \R *x5: ʌ< E&ζ+P@eY5סWz$zwA(T(##E16;3/_D&ut奧We|F ޔC6/=-gyȫ\ѩ^o(a [jZ:9M&"l5^",V}4x,hK<(1g톚рQO0rC4[?F$^<^wP-w]"9s/U?Tq RBjUU "' !U!Qub}}2ᑠC=Ul]`m}x0}z =%1, NN\ Y(!> `k4eKX=xPכӠ_%;P}%j . >(1"6U'F7F̊ w='')5USW]tFX给 #?iWv /=V"h A 2] ԘdYlC-R(J͉ @2m"Ğ*԰*gOԧa?S/..$mHvh>]+m~;e= WMJH wUFc !%-2lh},`x00t,RAsQQS/0I @狌o@WW'1fypE=s.; 1C[ $jnp!rl-CI|,NDcvY3 A _k)zRIJm ,`"a 432w#Ѻ=ϸgֈZW7\gAҏ?C|r2FP@J?ץ F+X&1,"W4ǿ )?4OJa'auMWCVJآyU@ 9>Wڲ;6ώ #*L]hrP +b"₽ h:)`o=_!;1*ױf1b6( gDD]°cbJjQUz4 1/땰KPs\S.8=ǯ61EMkrV6^[WNx=>%tMAղBӎJEo?yi$<mF GM v̑wg<bn}z4po~"~xܣޗv Q7ƄXI*8>(s{irN-;ExPɃk|}}}kP_{$J[z9tDtqܵ+YB׹0tW\X5+pxީ}w=lM5+1{(6[*O|*]65Yxiii onjrJZފD0 ~*o7?<-N0U!J} h Ae\FTóqB0cйɳ҅I %)$)Kp)}2=t  $;jս_؋b}J+|mMmpَhGt__<1^F'*!\x}U!*8/܀Kz:~O/v"SC5Q92PJM O@HBMW.I%q%*n(U}v &R>qn؟ <l_;"Rqn(wh7֎$p.j'Q&`?LԄ=\wsKU T`\Epy;,'DF%韦 iWinY:ByCtH5 m`=70yB1ǣ׮50f)_?}<{)`qqYk@-ik&FG'+lbBS#BR/PTZٙs߷zxe % HhyWzqjw#'\/FnE J)W'0Z8f'\ÓՍ37:qxU+\1յ\ٖOсlwYK?dr͆4 F6F7V{w{oq/V"&A`¡5w,.3L>bԣ^Vڰ$ %oik};/Xjf ^_زs.Edg璎  \NBz_|r`ϸȲĤ?BF,>?(XZoDP4'1e99|!R]Ur5"yYmyUiS QBxAwqNݖҁ()-JCqQ(I1e9BlnK\/p8E[,W+! G9$;` #.jh8O ֊<*i6nwk'yyyMAe!qW~ `)Z=ֽtw Yvs@/66N| ݓ*N"_ę҆.sILc=-<D?}:uXnfpBeԡ}yijBtJyC!S:, #JT@rc w쇰#CM|6t0Q\ ; UR֪z6_Ӈ׌h<݅0OVc,n_2y1 Uy|Lo#s=(,LNJ,*I'6KE$(ϑ7A})G秦on/~Qnq.dP!Oؐѐ)j2Gm#',ß Y~*J@Ҩ v`jGtBT"$#> stream xY xSe>!4AHp"0ȦeXަٚ=_r=ЦKB -"\PqEEљ;:xzO[s}4Os1|mܴ5-%"ɹeG$'D/>>c' c'3X_xdbn|=ISo2E#<^h\̄)Ν?{6~\2%`9SGD%e%%LH~Φ9S6 SfNH;e{);ٺmJ!f xNĂ+VX\VPڜu"6GDm ߚmSBgϙ`G?Aӈ`b:BMg=znԙ{gysl}2~LȻa=pebS`h&O>i夘ɳ&A%P->Z|?xn/'^omT Pͣ}A$J-l6^/7 HYĀBHs)T;4jc `7o(_e2ȗ8@'um@Z\PEsFfA^߼ńv ُtC *)苴2=} s e9ՅK^ΗRc y(ff̥FLU]>r._+sjt"UiRRkvJOʻ@6nˍD]1{~ކf)}ݍxy?4!|wixGrQXA uJh2#*hMC#ЈdtSp G}ZA\s%[w==Fq ZB_FmhsSyO \yrӸ#tO}Zs@9'nX[=qh 5 ;Cx}$.B3,˭2r⺍12&}m2z["ҹQ1>-x}<̙}z!.-M  %zqBR޺:i  [8_t5Ha}gWpwh5 Mf-8DvI'OS=܌ktuxeF{Fnn0Rb@+݊;eDz:.T&o~z;{6Ut]`}4ؑ 3hja@zox"/YoS:N_Nj}_B7}VqPLA`q2e>5 R媩9K\*8 8"SV륺,ڠ19XRe5G *@j3,Pjh֟׍Ԁh-M ^$%ӫ̳Y ;Vxr3]9bJ~*:TfkD]DF o P#@CJ*o>E~}l'[|.`7^8S bZ!X 0ӯ!sGB/X2hp]m)1,WYjhL,NuF u'?eq~uМsNI!\ViOYWAC"I%i ºJPouCxQҨ+zLJ-b2TKKs 6{=։amD/Ȑ{g`qE-L@:n"/ "wء,C*x!jV ӡ Ft<ຠ9ThfIp!k ͊LE.; `wРՊA"u?N Arދt}kgQAl%zlUNIh: "h ҟ+fN5" i tPbSsgm~o X}a3p4=sg!GMIoL2q~ u,?SYJU꩕\RQ~Zj^ Êk՚ˠ< Tjy|HSHDk2V@(LKښ먪ުNAR G.%m:X[~ ڨDґD3#mM`=oߥL,VQn1ȭz_G.8;75t!PO튎2/uyFe'^>"B qb+u{|sv-Gwp 3pMQYűt\ܧA,(,k로ZٽG[;+ :APl7xcw$&%PQۊT0tL 7@H&B56#Z6FpLrބL5M;:)Cpnܿ= X'N~RFR}DFzJKnɊ|AijfWTbg5,Vo Jx[_~@yg0F\_ JRk0(Ԋ@9s{#w!\ksw p'G|uVw@rP;3|1KPHs r@3zBĉ~)lEJ5>pTFդ@90`)@"1(8T *dGx27\nN\ 55RGl]q '-7DMߕ]Yjn4tr<6sj \+{`4n n͓Gy&-D*Wp- //-硙1&6D?78W=~{ Xn!̏D],rѨ242C}@V0׺yyc;}"EHPQܰu~a#e\2A g:!qff^|ء܎P~+V.CAc-hyj?lP/h$YGT)*kکbkS}lZYЩm1!cG7!hV]}a 7;l"Oْ)+Vj)ISz#(pn7HЕ\.:wl:7-A' )$C: YQpn5%Fxo^)Ch3Y{pY"%dms}ir{QumiCFSUKMˡ.Dg Ӊ*}q zTjtTᶔd St ->a;д}7m곗qW+xvOUƎ"U]d-ߣ@>9kŹ׮5c`cٱ#%uWv3.kd5—0{ 7֖S OK45NE1$thJ M% 觅h35@%_lyGBː kbk3$ oGJ$Hdp+h>RW860N;m;yP&A(DkAhXuVq4d1.c%Ɯ yQ5j(Uf4[׏&D=YP_JU]DȐ[=B;Y:ȤR0Dxc` lPNH*Ӫlm@)"]lbg7g|DFg f7ڝ͈z5^ɳC7.~fϚ]{PKa ^?*\Md/,JW\;n&BS# j+V*:.p%cs@y7UbqC 9*\(V=hz&$l* mTca\7<x|-MKJLĮ*y[ɹlu#bI3|օ)^ &.EۓA[_o8~.osy@BX2n.l}%j7YU.Ukpo֨,)ةe s+(:{d!TXzDVDECsLx yohD/Z2܊\fW2Ya҂VA2?V'a\ibHOQ(Rҕ.iC, $øJ hbΠB*Uq{ѫ9yk:.Ĥ*Y]6'e٭B@Xn뵢JԹBu , jXZ߁K!==%4W %9&YV".ߩ1J9 &Nz_O\FMIHNJήklx, T)E|F k5QIUvǬ^CZRǨ>4 N(#,ǽ_c5} jA%Ϟ)"gPиD/p<<,~dd~v03ʚ{G:>|ﭮ'&Ued0EnsqXQ填}h^%pߤ<4'lgaq%=m,|MMNS!c(A @|>>"lx`RS,1SEܵaOGLXt/Eq(T9=g{Gѣ -@eeJTf4W%*endstream endobj 357 0 obj << /Filter /FlateDecode /Length 4416 >> stream x[KssoU6YnWeЎ%[be# z̽@7!"fBws}߳3h޿8=V?B %—N?셮|j[xg=&Gh\ָ{ʢ u>KKs3<{xCWαzbRl> Xf:yt{s>zYɺTK| VM>( W8 alSg_t` `'(O (^EؕcoA6_2&kK7~WP"~D:H/x?Ln'<W5  o=L[ h}J@8`Mf˜P~m:9;XF~r٧Vr]e"u?ޱmֆKmzs2":^ų5f},Ѱ{WרJ_aMvjѬ!g³jQ[ Z`͉JG ~O`)!6Ff MىeF}ӑ1"P H?_Gp^G`^+ )od]itqzu,6M,Ne»>1zX켪Rs9%Y$ٳɪi60(iJ6UAR#<*c؆2~8ZH#x KOo>6)  i@AkbR.Qͪ6uզΓN,@zg闞BCdNV}B ^Ior>zRNݴ}PEaǷw0t}$RBٷdzT =Y;q}1$x`\"²_wFwv{dL l>PuAΔFe˸pnһu"*̀fFDrIҠ^s[<}p v14tYgx؈fZm/yvKЈ.)=[e7bm&>4{bcYi:"my:ى[ ,MHn (i ڟIV;I I 'ԾSɀRhgڸi(}Vr6(rVғTdOʂ=򡎞}ҷ6`G 8#ٳcDm"ZipUK>9 `9RR-Bxp^PU?>8Ha3t5Sߡ•BTD%%eUk*zX4oc P**VKUՈU$X\dRd'OnDZb41lkе3+c~D(ᙦP )Ǵ:R]h7xlcf8Rtz+kqnŪ^.F:*C.3Ks`d"4])JVpJ&M5b)s[d`қ eo* *7 qv}k9}}F-%%65I<fs\>Hv5jj[g)-}OGU_d^XKa?BK<_D1 v'ٻ62 \n>-IX2߁,u|zhF& _UFvmotjHp^+AApH0N$j,+B*  Ӵsѵ vl_wHv<1g vԊE\9zΐgl2 TW:QpoO2C%5ZEdPt]gMzc?? g͵t3eV"0?y%^/Ҁ8LsK2IFIOOnN߿bk5t@[<%[AtKD6vBjك7K xn@-7X^jHq{^Cw,ޭd MX+űYdG$bw8(96YzH26wYx96s@Ln..R)AUS h|r!l0#{yTL nMfZhru.d٘@#Tprbq7\s9O2B̈́F^6&:*/GoH N,XNOQcUQ T%UV^uE1eC ї@e򛛐]TV M/5k"`?9|r-ry򠍯:b%E!M1Cͪ&F B 2Hu?τtm6 p8P;%,(cw;T!t."y$)\ƈֽƻ0+ @)GzD 4n_+'\$H<#Dn⯾|).b0eRVd=ݘ/PUJQNUJaK "UVeLg.*=j$\&U&aJ{[CTIs[Udn/IM4Ƹ<6M񩴶)Bz+0$z&ГJ, _9/b߱D>f5vރƿ`I=2 _1Ev^yU6lmlqyF6C2q3M 9]xջnwA1$w Jŧ'`wXeqB`F;DвOaNP jOL&6ѡ[3FݭP|(E)SLH-VTsRF2Yn4dzP9wkpB,4@2< 8 ߩkU1_8M:E,&Ύ&(:\G9)T_)N)}XvM̭Xi>n݌aG'a 1}]dسlJ#H1rpf^zHº#9M4ݦ/ ǍL^9s~JN=tnU/߈ԋC]اs$l̻ݚNijT|Lr."boendstream endobj 358 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3316 >> stream xW PTWM׫"*7-}$D)q (Bn4@o+ 444 "\2`Bb2'cbIjɤIe./yzU[]{<ݍx޻vKON Ge-_t_|T-qM-g}xn3b._҉P3-P>x?= EH=5=#WbٲK:ߘ\?Wc9n?=9x0wQzoL|R87=7$oh};ᅬQ |3IdJcvOHLJ.a A{`"%b+#^&mvbH$^!V=' 1xgp'tķ<ۋnhUwFҏL*f:o37|cn֏ <ߛ=ux'+p^huy&ӎ6;%]6~/@hTsr}(S1\CIQi@|r 0'.]ݧ#|>x_W zZ%OV#w<ֈR us 4͕r;/fDS,BebqhZdjDX!؆gr+.zʇ'ϼpGjEUSNw%vq8n_-˛#1ޚ5Pd:&pB0+K"Zq2I$:jCchy9ܬ5k!sz@gFGDOpfQN Utyۉ8i9|GÛBLB yK 1d8h^t $dcZNF S,JHPrH*DĂ&#2qdHokv>ImM[عBS{f[FEx4ʆDt 7 X.ipz4)&%VsW3|=:g(=TTۡZ*83O\=~&ŭ%#| ?B͏qmG }<} THTÏ^@P=ˢɯ̕ʬC>D:2A!J 8ܔۍ`D1" tU5GF,M0홵1}1XNCE&qsZ7E po@u`$hA 0q8'Qaq}utz~̛e.Y,`V8Uyj SZ-UȸVZ#Ss7#sd@.U$1㓼!_TAk7V5漚,ȁ#T K+-|]59,:gHe[V4@3"@1he_,Æ7Vfmf1yji*:t޳n8"=EB 7%<\8&ircDvTeS3x1_;2J޿g,gM-8%Бw3+bN'?xΧ A0=.t=N'F8eo/893a51ȉCq3D/}s" vh[; 5<>M&n;[6Zk5|cNKjr.ej g?UUEdP ın9VQNگ﹪E[MJ#O!W|G'| 2GvB>ؽ:Տv~ܾC6vb-`GiHz'K IP)EkrOɽ?1nyIzp\Q)TKYU9-u& #ӈ]hiD(kqPЁHp#ciw7K'L{SQiNИz,_@-{eIޓo8 Z:~#46B2HU-t:ԉ9fe'Zʻ{϶=ˆznST:\qk?TJUZNVc ("$mɽ+â |Q>/{7YѦ'dR)wo]iy .fU`}˒)_b=3*r$_L<{r hsƼnd(߲? pkKqw%کibk$^Vk:k]1C tѫqTGP⢀# E{ue7/r=okFH3@ RZfX}/=+%~C;|Z 2ԙʌ.z K{+Z-K& *Olh)[kQl'3g>;=$$c:xl:^W7\pT7`2=f8endstream endobj 359 0 obj << /Filter /FlateDecode /Length 2572 >> stream xYKs7=Λ*=Ȏo&oX^=o74TE*4#LѼ=bG<ҟX8og.|ĝ̌K=X} cF3Of p`ȧljZ'R ґn, r?1G#U6MNs/bț9o,i}9xʩV$yl͌0F\S2z:"B/>M wpIYUF 'akzx%vY~:MWa7 ùkA^_YR2d=9KޑD0?8AVx<OS"/sgd.vƃ=\mκXsʸj0z&+"YO~j;*7Iq9-䷛.ςe&;Xlto.V^u9U}Pɯgon_]rĨ)©9}>娰bdZM|:}yvM= m֟g]7馺Cd['wQc}<,! g&b'6:M-ӓs!DLv-Y7xXnbEs|uʹczRo ~6f/[#[paD#{Ĺq,fLsJf8SN,-)?&nXA$7vUiuIoL7'5E/E + !ed6Dk{9'Cs-r.)::^2&籃{^" ij?:^o`B p1Cbp3!O~,[e\[.6{n61)kȺ1/`ż)3]/nW=LG !U*Caܕnm 槰 O<#ºELG4 :#吿,HLSOf2'tڅ$w`A$]!(Ӑ0{KV5'@k0ѹXɦ@io#P h?>H8k>պ~p-N?- !Ek^QTLR]QgV-!9 2MYβf&pA}/7&<9-rA; _ `!AqcۮlmIS? ùOpYuV O<MR[ ENTVx=͂7@JmI/<&Y)+<=Dg<|.ևLwXkDi nlNU/YX\p˄ezϹ,)J8 Ȇ &2TY2&QLy2Q vegFY6!;g9{ 8,w|![k2㒇{HW2QA@& !Q0rE;p ) jyrJ:*&[( (%,u  ǔ"榾 C"K(Vge)(JSHT-a㣟:,*6hB=sU"rzvȽ*A+ s@a`N6libvP4o˝|S1pb} 0p6*i˹U>C3\zl4aұ b=!iG @#e}j Y2UTX[CE^0|Ӻș%ɫE=wd^!eJ~tC$'=U r˘ \U), 1M^yYOM~ 9C`YnQ196 =oD)rhn#/'޺4|( 8 M%|zƠT 1faЮ:krCZp[Pk0aVa w|CC~ˀ1Nr^wcogZŞV\L'CרM`Iqj\e(syǺYvͻuo) {+bi71PvJ_Be;L1Y,b[ݯwd:Z*lZ\ eϩ `f@wmMWr6޶,|.(Xendstream endobj 360 0 obj << /Filter /FlateDecode /Length 21078 >> stream xo9WP:k=;1{mazԶdkƲ{$|dDJ\-wQLikN&}}7xtş^ ䷯ D lN^]Q͉q;œvvՋo;SpL%>d7?t?^}8}霅y SsNA'pkӔ7~K]$ny==6\>XuR*냽ѦxPwu\՛kN_N 0KKc`|?N_l77?]]].~v2yꆟSl6OOIքm_p9saے17ן3(|SZN383>q_0L*9=SS\/`N!ojNYis1~   RV;aTN7W Omݪo~]J:iԆ^TOtn@йFr0=%|t|zosM(-YWe|ڧu12տx_H&d䥫\Ҕ=vA? ৿ow.lۂSrp]0l)wJ[ *VMumasbֆ&؀u_ ;MGN6vsK-3&]Y`cjŕmuB^\kĹXEm}vs6|j˝T#% O<4oW;rxDw;!PbIQ]( lw`aop}Px__*trUQKj՞Dg>&lL%~pOJMmt;n!g)>|]ч;_9} E9Hs** ږ͊(zjodAxãEW?vz7mLnH1ۗ\_QH-m?_ѐ iJ0]rxx7F o Wg~s@n0NaYb/o O٭XK灎ca6ˢۂ`a>g ''~9N=Hus{a"@z7;Iiq*ļ MB_|@UxmkS$*Tr0}(P0Ӈ"I*)mCBDڹE ֚/3a:I[&u(A.q<8g| xy$)Bi2yy%+xG,ƛ>4 weoj^YbTu ދnkz w$ta6pTK|<*B@}*ʋņũy׷oZ+}L im]T  iQ愅{Pl!z=ɺ)^Rs >vkM/CFA,( ą"*ˮZۇI[)1(dd'J ͔00[2)N+ 5 >.ter&[h.L^K79,̽&ة{ 3K!MJ8yl^0ϡ$0 _`* dTV^edp}V~r6z%s{t.h%UZ( WC:[$mU;F2nhXaVݺ<z [.NXPAr1uM N1hzw G?}Hi?,D4};&qMvք!k2,24 =7$t]ݦEdY;Ea t=-K}g":arux@j琡bK̡\d`jJNɠH8v(B3Sð(sf\{NqxQX|xYh#J,=*l I<PC^ `V$ɔaUЬ>\Pe$lUʊ!>IDyb r!Ʒo7666665N 20FNq x4*p4"gT{A8ńB,Wcam|psO8 eZ%Hpkpfk$tG+Nzr}v6[Q`!o!0o PkԂpu)rӵi|=Ӭ=Ugp 8}oo ˫l2rn3/*m.؛.MD))-cξ̨xG8TɄCD5=8v;8=D*䉪z!\ѭ>G7U,:`Q5 fh{u"\Ƹ͠nE#P55'aNp>p1E@7aĞp w%F8(m'ɐs3 c5o|mmmmmmk[A¼$\ÁFʡCw G+A`puţNV-pszX"$J (\mE+؀&K.q9lh!΂ eithmWcIC\P~< 8j" 1Eyٞǎ]l2@p rl۸zP[uzPWGEзSؾ3kЫ %w<JCp@;,Q/Bg#yMvS I\Y'wDv%806:+#rZW@%̪5ɔZVp#\ ="[FXL8SXHk p! }xoFOL.0.k۲ x\&bwW_'aDP:ӫ@^xJ#PeR(սYCHJ(㒤N%A!)JT̂(b>Յa")`IE&aj˲CRλLLcsPIub1fڅ¦kn(B8R\T$u|QMEH a}A0ܧSʶ$čT[?Br[׻悒E Z9Q:h/84([j|M0:1ΑzQh2V'j[_T5]5jdm5Qgds/QTEEEEEEEhB;yg̽ eL RNT$R42V@:S @@,{HE R~XSj"׃"HtsZRWWJA)nu;w$ J':R R 590 &68V Bo=Im{XBRɛH4m8^LaiH+NWIpz,.:jC#*9xN"CAix!Nԕ<~*{6"^3wOdL=^%{&Un4d\xû^Zz4KRYjrRkh(C Bu E+ l2H ]\]quHy! "t-)TFF嘉`I"1: FDDTw4BOkq@} Q-PAbK76&BeEQ܃TT( O;( $LQPܪkЫLjQ_:FH(qi%φQU0-o_,v(Q<&".'5h~T5 M"_hwqXI-8 E?V9 U$U,1^JyRC)o@I-zJɭL[:rLY:Uk* 'di!F)YCHA) KE2`ea(yS`H,Xw4BtK+ԒCi0дIipt{:1LH30NxF{7 No?? ס暦hj>)~P1xp#uR)-v4ut߻?f5^չM S qC) ǹ|3DT((mFiRR? Ow II'afP 2hA@Hj\sHȬVH[jJ:8:8:8:8:8:8:8z/GBJu)5d?IjxBR9lI*|0I2U 8C)ߑ(((u終R #(gT),T"5-w(eICHNCi0J ?R$+Ԅv#$^mLoP뻆$h$BRn{;AH4ܦYX2|AͽܴPaSUA` zASӯJ}ű>7 Ӱ?3L >J`:MV"71LyV(L$&ER=)(((((((}Jۼ@5>/1T$a:K9.}ՀXjûk,0R2R Xʒ#Y>i󰔉',uuyC)i(eIC)o?JmEA#)Y_R2HHyW@_-SFqs/I{ۂRtj-aA)V9]TF)C/" /qZ Fs!zt@t@t@t@t@t@ 6ssQpxGT >0$Q1swWKv re( ]:ht#bT d &-w5DR,`GəF҉6 I}ZX7nmI82Pٵ6NugK#{FXmR>.XG^b)(ԥ oI%Ŏ#hR0IEROWw(HRᎥ|Y4Kz.n4%+MtAl0ZK t)ݕѡYHJJJJJJ'Je^礐D *@l$e6IjiIT I9%)y|rAO$F!Z!9,i$ɑ$e(^QaVVutX6XW+[#(*zz0}Q)*fE'*PX:(jis`(oByċP+N˖(߃ e:NFwa2)Ǩe`o׫8(~~NQF`4ҧFjxqCt UxsQ h|=( tV3/Q˗qu}Eq%"a:G7Q<.GEv9*>s| ubQ& DA3TPA&1Ԥqc1^e@m](r(xި *:cͪ55::# IЉ>GBV}ȻWm26Pv].̼*G(X\R%^u{C(@fRƑL1c Dz@\c0Q:`#l>f(4,]qTOFAAAAAAATf~'1\0*/Qu0ʒ5"0*&HI?+I9hG)C NYsv,iej GE=oFV9vQp>(/th:5fC)^/,rK"7~ġDXJEX7%KEBlC)gNQ{ ?סc5r,A_l<I]5t]EGu0%aT`c>zHF[u;@ cw?HᗬVH6R H%C@ ̨:\cd+,h#Y >++&*Kz[cOKL_fBu4񋍆RtC)^C)%.e}Ś%IwjFRgvHʉI_ܒ*5#nAiј\U+$OcIIUVI: +$uỎN#)5KC-DAzgqtLHGGGGGG(H3XbF ͗")43HʠGRoy^V")X#HVH*H:^5rAҧ$)LHjaMR^N̓H*`\rFH*OR^=m(^Fitk-mY]@RB)~ Q}{Q>M?,82F!}XGOKX 2 GQ ,ukiτ\X҉?-,kQ^Z$AjZ!C)UgPG `ec>23;'H=*Rg8hnд WisԂST$LL7 ~2MMBYt2),9x˟^|{)Wh\ݛ$0hӉbb)߈є*0H +0͞`J;}\j_aNG/f)@Li `JR)Su f+Thʱ*- M]Ϻ4} ׋zf:X:X:X:X:X:X:X*ZNOGSoWQAWݟ,yr%ke G1+"kS&xq.>"5ʚ Gy}ǞdyiX+$y>|ԫॿIt ݩT* lz}=\@gVV@hLRI9ގLRPn`j$hRtGI(0R;(J;HΑULt~(3j`ENJQ:IT eRN_EY9XX L(Ya"8Uc)+X*a)8< >X*, , 1KVR42ʯR^buGa@ʔSF L#}Z(X%Zu9K`*A"9f&-0v94.#]nGasBGiʳӔvwXOSOxkSS<^jS!$ʊQT4AiA&/`ۙ7J5) |:rSΫ4r9;Ȣ%މӋzr:`:`:`:`:`:`:` ԇܴVS0"rͧT$\9\c(8eWpʥj8nQ>s N |^.a8r8eg)rp !8Fp*/SSNiuY׃NSS<OL)mwxdb]`^)oj|8bmw4Sh) S@vW2/[vX""(ŃC A\:ٞ?pсi<'ǔ1G%)lxIjgJRyH ~\֑$T]Hr!:r!)x?I$v5UH0I% I-},7VV/ P+S9B&KR8omyqg~nR|CK;mRkY>;.TpօRKoK7{H:椃Tf1iq0} S^B!LN(NT$ +0_V0,9"Y-*H`}}aL9kİn,C2&R(ԤweRcH1ҥn'q(mc 5J\C:L_;`OjRn"< IPʋWR@'A)yQ*YT$ lRRޙĝ'U] P#!TZ!)1@ +$0PM#Dp&A$+ s/Aʍґ4քWH}sR{tptptptptptptp3Rs*RKߣyrS"δhB$ I=$enIac~$bI*6I{|sҏfh䬌:FXc)_PY F;@`*aЈFTN<VFS`] #0nRSnSj^qm1'5;GJIIIIIII@R|ԙEF\"y,eԢq{^JRRKџ KYSmT=ȓ 0%\\Y)tB @)Y*,|9T$+,^3W c5ܙ}ёY 钤2+;'Jvr02KYղT0dB,%Ner8+Jk I9L#)w#Ij$]l:g5BJ Gk( 6. e'0*\Y|Xm Kp eZܕ\ 9ڝӤ {4GÞF=q#| :CYP1௅qAǩl?2E9?E)_-6n}NtSaII8 \8jsσxds.71{EQRNF)4@4rFk4e?pư|7t4+8ή1k05dЦpѱTHKKKKKKKRzpi)05v ,q,eFX\c)(δk||*KYRR$gIҙ^?KCOVg)PʂT{V%cH5^~ϩFy(P/^oè\ V0 &WٯVm){Hq`U"nI"Yi4qH6!)Hʷ2H P%)(BYϓVI}R&ח!iX:_-4KR.*I6JoXT|7RFǒkb0 `d\Zj"xzf_{ܼHI%'T0I*b,9,i,RtzVBAG4}FӶHh:OS]FC)6άP*PLP:tzJJJ(pҭRv PJ7w(5F3ʜnqbP N=|"JErJQߏҔGmAڏRghet "YAHr(yt䋠T0($+(5@]( H9PH +$etٙ?/Hj)Z=+0 rE`5ZF0ELOSl¬a*|\҂{fyih*i*4;MMhjk4 τۉG)i4wN_T$ OSz}ѧT2>vM9A4 ~>/,jHML+Kttttttt!,i=UZjXy ;?gzڡq4M3>hMmOj404io×MM'OASM mbح/LS@S3/ij3}_>ڥH,YH[}m8x N'q:-7 }Ntomq*\z]E)x`x\h禃N]hOhrs)<KS <'Wx*r<>O9'|b4yτyaOTS0 (KS޻tOd^NGi; #<[3k:hBOv`uLM){Rq`)K.3=,@j)}}AAAAAAAӻh*iO OvAƚzOeޤ/S9=[tO)K)7/|~rV][yOͼړvqCx ܸyjSSSg)GU Cp k8xޓ7eaPp*HcʍS:5BSa.:35(((((((E)K%g^Z)CHB 1T|%05>\ 4 T˃@Ln8G}'i I̬?h ^0np=GSWQE` rCu00 -Kab:X:X:X:X:X:XcL򴩛flhjM]vhyI;@LtFӼW L#`e+0%)KLE`/s)XjerVDzԐ?rʪ~H_n,0+,ŭj'pG;`j^0Ӑf`jo'GaJa}(pRn8)(r6P´>2+yK95ruIct0KZ9rźR5r)tyc؃P$?+W}(ݡ.KF,m:?9KwXz|, ς_l=j R K!=,mW40Kjb4z4rД}\M1(ҁҁҁҁҁҁҁ,"9 T`u_L'0 0@9{ךD`g۪/^ev=KDZF+mY9X bZaOtKJXa)ۗG)muo hP-R|(K] ~iN.]0CCs/Kv 01R]c:S KXʕR! K-{aʅXiO1-((((((T楐b^ 9 ӹY)cq "0Wi;#0zY*a8aM<;,EKKmt‮IPӍdC)yCH*"psG&)YIiᨚ>|FsQmҌ 紣 t uqTZsT5mh8v;b Q[l+n=Fw3Gco.H(4yD[V82Y(ZJLਭѝEEEEEEE(F'h{bwA3}4Bu>I筟%I$$+(Mv/RnρR ~"JwS~n,t`;X:/^gb0.`)}c'c):`-vB5 _c)`;e_Ћl>koFZIzϔtttttttt4&,rgXNHwk,wXnft#Y%߃Rtatt_RK.[C[/8v.yI}7Dz,Ե ZlERG/*Ij(ZI O4}.$]u$5Nh2' ;GIGGGGGGGHw2gMRK]`bЭSPJaeUGg~`s~ Vs`J*asǧtk~LEMBS4\I4mfGєDh:yT?~TMKLLLLLLLSK lD;=`Z$?_"8vm| qgBҴ՟ipIzCRII !Jϕ+'ʦ| b풴.[QrgQG_>~㨯5QWQ9z.GQ/fp\xA{!s}ƆR%`?B'f ?d?x1zM {'eV{I)ٕH$JxW'd9`p.Rc!Jsw%s&|BVM vL%99+ s,pI鳺]l~V,>WFoCz4}rb)g>eҟYyaR-vfH)AATQ,xpCA"/VIbUggz~( O$r7 h^\ mhS[̄88%7\P5HUgRrILxљo=1 a6fPb-tǢwmdx7aJAG{A)_PW A1(Lz7TB(ID W1~ z`1"4c F=%@JCAZåM ip}%7H WWtBD&@k|⅒^=>KIyR3xL#Vg1C7A%`~2 dGv%:OU^*Ik0VFF`IF?!h^ U#T8h\/  }B:4ϫNb;ZV_`sMYI \$8 ϩtIE Q 6N}DBjSqQ6Sl.CF.)%@sR0ց'%.D-X(+aBCт,'#暹&( 虲(1HM MG@:H֚Хx]75($Y!;xNI9Q=0B5 ^).~y` iaRI]ct9jP3[|U'qed'B#b/3;X ԝl?C8STwٖ@t&8b%8H "BMf::$$I8L=.F*2g28DP"E @3.YX0eDV: ]\0jSzCFV8< 0K`ZJ_;2X,րB5EJ7xL|m}ܜQOׇH5$|#.1X Zr lƖS콼HO~V05O )~G/rՒrB\XGN@ F < -Z,CyY-=̣)6zVStzwħ{[ᑽh !XgJ@+2eƉJr#ވ%'~'?\vy"WLN$+0-9Q|]Z(pT lLSYå3fS ޥ.~.9`P0 2 lYgA:pVPe(*n;: .,MRqk!B>1Xf ORa*`-9]Z6R,!L$@NP/"6&!& 0$(Ɂ&* GE(u4RF`Zյ*hw_֥Pz@CMT~~Uk%Ip1Ġ ϬG%Av]uCƮqH!x9>CT3~7Qtycp jZ1] E__4XҪHh4{[Ɵ<%6cYm!bƻyZ ,S&'I&VLD !zS llWMB/o=2^^_^B$#)31D2aH 8|mҮW5 Tj4\ :o3YJk%IЙ'-p1wygZp֒Sek:: N=DnQZ?9|7& sbLR)RqYtt)JpvN2sv@ʚԵϨg(l!p˘k c͡D \B 8rTYAP[' S?c*P{e^rZ;fzl % 5 ?'srX0ցCd8w /HX1_włADҁcs+KX)-9H3kDZL;#I0z XgFer]d<lwы/4/0tFv]5 v[r”m?RKE p Y= P 5';3l@ [K EBV$)Ib-)E9Jp~h`X$40'PWv`3 ˒XKe$u(+)P;BaBBޜ.G)ĺv :{QipL󳴚H9jAU!e ^u /ujR;; 1W-8>=b5gЊEpVE( |HA%R+ԗآu ܅Cd"!$\򐢉t9jbR *:1/)3K, )ΜG+k!bwM'x 틲m7O <`q'/.ԩsó)*7Hu1x[Ht 9+k]Ag'`t쭗փws*3sAg[ 7&]C7ܢ4p/rFY9`0u`,_*18' ʢ0co :\z@1tbHy*{%w0Uu1<< xkq. si"#܆l,0b_ Coj;<:3X"}|1K h$sž);b9 ;0y#[a21jqG08Qc<Ơ/c9Fĵۮj 5(,4M=*F5yr2T8tuAg'2#PS…e{cP.ou1 ~DC7F#,y7`[Rry'=)k5bo ub$ǴuĀ<Y/Rk |% kC7aN$09o:}1j.end$nk Gilˆ3[L5b_F9~K$wYAyL;Iv-^c<ƨyشvB?a##PC/aVeh MkrkV?FKOAR섎5co #-mj Ǿs<\N1u]*1(&匇Lq_BfFX|1Z,W>!G\b,>-bYY!Q|k{ctWиϪJ02}xT?N~o5a:y+Nvƥ4 CWW/6濜+` L޼Єs(0LwSPssë_JulUaB7J zy~ bB )w s#d*+?}ig%mӗZi]^}+0qߟ^~Py 7o0\mK x}E}V^g7i۳˚E}Λw*Wl7_R/6C&!o2!m% Z0cm߼W)zݾPzW#ū{VO91W%h6?K֌ܼCf<27O-Ü7Sq>rpW"L[؇›9{g}{vctxI\4o\3Nǽf{m7חOŜstGm;G,Zjs)Hm?.p>jl0eـB8؂a~yeTA딏Km74>`jns*}pjΥ4ݢ9jzgN .=6􂥚-5mt΢RP`ڐT;+mzq$_u0^n (NMnF,1@3N1Q:k=2+0ٚ3TKhÏR;0RwNQ \AMwS^XƘ`ZJ8ksG]ڋݚ?\2ǒ8@}V  zO;η޽G ;KSeo3I#߬9s(%Ub5.<5Yt 8 ɘͿAx*?X Eʗ2"]-M߼Xޜ;7- F8Ā?xo<`~]oJyy_>e>mOʲw ?5xؔ{@\f4no~ؾx8o%Pg k x9)+--_^[Β]W jq ['Z{= {} g+\޼.rs0U g ~ 3bk F^y:n>uP> z@__~G%9⺁u=ʀ[#s]:.w {sOJrxgn{Lŭto3Oؿoۗx-m~׷ۏok,>T:؎񲥈1_W2}q S \F]ߜPm?R|CBf<7gBl>R{}kx<֜qp?Pe4_Ӡ_! sI "9}7Bzēٚԛ8=՟v^gd4;cT㚱٘{)x~T_ +Ou ]6"yxĂW~q{H7o/|PÿZشeݣ?J-Fm CPFޜƉf:㤱CHes 0 ݜsT#2AwudPfvwW7аx#w^gJTYb6%u*,ZҺqg1<LucuX ɿLObxr{3Chw ;{ci w! ~@R{5DZnOfendstream endobj 361 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2435 >> stream x{pSuor)AQ_2* Jy jmmK##irlNNiImP@bA EQYqW/7-k᯽w~ssxXfnqTuCox]|`U?//mw_%͇wZ1@}c|o_YY$~kr'ex:ZV".o_c8_6%2R,/.,ؼ@u].1 yF*k,Wȯc=Nl[`{Bl? ی=mǶa۱ X>˅eb4^$1̧3Wd~#3V ɅUYKQr Sr%$Fh$ۙA8 -P!a54.N9!K@M@;LM(@{VHo@GȆ!ʝ!IQ]).Ч)aLBsP`VRJKzʁw*嗔ӕv2 Bht`$L^'h*Dإ66̎tHd{cacMq)-,U -DѨd#(/JJrgqmO,v9K|h1Cȭn![f>_~ >;۳_ئzpO ~UsEdM2R~حRhUu.$w8$w8NW%׵*\/ 4'@O>c z-ׂXbgR\E.,.&g)T•%F]'ILz!݉;6=6/}'Cp>R ${'>JȎ[me`'d#ydU(!=y= 6rq" *qmÍ105K%7{`w!?X/bM6Hj?LkYaR[ٸM)~8=<9vt "WaLD4hᩛjB =34Z)Vu :*0큑dl4z:Ơ_k6[X;z]]YTo!ʘcA6VBWR^hx"QQ`ڸo ˙LJ%OGނϥWЁ|x8Ͷ6Dm'vQ9ap 6I7xD=NZjn0:Jy%}??^-I%Q|7 {Ks K9t = 3Di춸md; -`jjZ*x|t?0Bo;t"vqxNCFnm ^f: S@75|%4//Muq;tv8A J[klRXTDnIesʼnãcd!&Hp>v#i2DT%J?^CQ&V4F;Y{ƫŖԚӻ/Nyޅ)8$;B`GkH\o{A b_~?o ];% R2_ \>w`k/^/p; |!6gyGFnHM,^/\x.qui?DB&*nD 3?T=Vn#tpvuz$~k?G?ݽeFrI8 p5]!@as!/>95 ġucvVBH[* ,'F!37IhY{,I`+`,y߅]WG"(+jdy @+.W WԽW@L _aϒKZ&O#5dSтr&q䠯f惡WYIK \48ãTCo!Ṽ+E; ˚tjzٜpp (Z6G8~EsQ1{Y4V*d[ߝ@wOAL 秶FwsG tQc_^Xb3.\]XnC.9Dph3:lUo7RD;?L12W\<,Vu)MrU"Mô '>BL*_;/zвW.qᓏNa3)M@`nF+\wmS-V i1Zڬm%;2‹W]}o։Vmy /lrHp=i6SPn0?\HD@v37 \J V. @ =g߇)b,QZT(֘^(zZgRy,Z> 6)yv&\wܤĀVȟ(ӭkR"h tKZC6T"2+s,{!d/r+egsy}nnqً1?endstream endobj 362 0 obj << /Filter /FlateDecode /Length 24449 >> stream x͏%KݧE $.KJb0)PDם5|5*·p~1˛5H /"}Ϗ&F ;eWW>(Yẘ+Y?nD8cDQ8G{QT,^co_sHg5Q,~&kYm4-wtVըpVcq\>#ֲ6[2kYoMֺo2ڮs\\˵XBcxkoe5uďVW{\[Y\i+-Sd?4۷in' gՑe}mqk>m++@v+cNs+sYm;_Y%珃neF3VYBPR' ;Qe7FEEEqƚd=ҝUs+7BIa*艮'pgqf̨(Ygf&ҝeq|ewEUV;(YgfEE⸓U_+*BCf^<ǣ"Y%q*ąqE8cELG(Jwk^91vьؗ^WcfT88X,s7e37S0m UVUʅj<0*eV E-d *eϗBd=KyЗAUK`T%{TS *%r[']Wxt u#Q9(۝ƭ*|rk9(77/Ǥ)8r͗QMK`T%{TS0=)|L]>u2ėDց9WN$ QQ:>!dFę̇:>!t%U]HȌїT5 !1*$$OQQ8U yԬ<g`UXQ,U@ȌҟI;ILe.o>39$J3نp$QY4W6UF922mfk27xƻc|ûg}%Q8Kl6"lz g r+P}AFCT8ǁQQ::>mDȊ e],SgF%ɔV5 ) jABfTN$IQ,1%UmBȌɔe) jABbTM$ Q⸗˨DT'/ Qڠ'$FEdvV%{fT$KlP`U[XQQ:٠f&dFEd?[z ͻKo \ 9rw wnљ)Is;Q8]3ﮉy誻ywͻ۪)A<ݼ{ZygěKyؚiywiLK!ƪeyS{T>xpV59{b |ͻ׼GZuasBYXk3*JGPBĨP8"$FEX:c OBfTN%Q,q,UDȌɱ?#eෑdEvV{fT$KH`UkXQQ:Y̨(H`AP6Y@ dFĊV!3*J'+X̨(H`Uk2t"#"t"UAȌ g j BfTNVdg5kgF%ɊV5 ,w!1VkR ak*zT8KLF`NDQx:df & BbTN^$7QQ:y#dFEEKY"wz"u-ksԺ-UneUڇFQl \A'rw"c.WӍGTQw@u )БGyS'sp]<+u!eH a{Jizqr/A>5' :79}m8B}8nBZ(q(rc!-~saӂ'y3'~+9 `nCZN1p߅  d# iA01?BQxq҂@)CQݍ h3 iA09xv$тF!7 sT7Ah=ǝ`Ah;=fH rx0l˽]@RFiB-X~1dh\ u3sqRPw!+E0CVvqTS*; &E y!-F{8YH AŐJiz2FN4p? PJPT!-F8nJCZ(q(!-~sӂ'pāc7bo>e!-8r9?fGNRs"gߊ2 0 s s8όD0  fpq`뇬  >?AXT  >R8^iA0H 98A`0tN{Z8k ?p `/cmpJҿu@q҂@)\S!-z฽iA0ǭ}H 9gr`![JฝiA0tN{Zu"AX3,^ CVq \&@Z 9x&dp`ԇ +>AX=/ X+#_K~ו_[9̿rƿČOn G=*`+5D% UځvP;0 LbZ;1RZFJjkH']WXc_fr£._8ǣGy)X?cpjD0V @TZ-Q~~f3)5:p<бY^7QQ: -~̌ gq=Q t)@[ȊұY^GQQ: =L %q}Ĩ(k ?fFEXg@XQ: K03*2̌ұY^=QQ: K03*J`zȌJctYQ,S@`fT :01ʆz D fQ^PQ,[hjnYn͋5Lc Ecr3Y\,̨(KU+ 3* dEEX@XQ,.[@W`fT ̌ұ|Y^UQQ:0 K03*J2dyufFY^aQQ:3 ˫ 03*Jǒd2w2EX@CgyP6V@fTUȊұY^ҀQQ:9 +03*X!03*Jǒd eCY "l,@,`bT8#e x5ac,p,V`̨HK ˫c3t,@W3`fTR ̌ұ|Y^ՀQ,. Y2A( R dyfFEXN,r̨pT +03*JDz `zȌJciZ DAX^$z̨pX +03*J2 bĊбY^ZQQ:[ ˫ 03*%J̌ұY^ QQ:^ K-03*J򋦦S9yv$+UH‚Pg"p@$!S L %qp@B`fTU* ̌ұY^Q,@E`fŤұ X( +*BJ ` Ȍdq5HYQQ:Vd K%03*JǪ deece`3Y\,/̨(+4cx3t YA5E dy!fFEXZLQ t@Wj +*J duΡ ye&FEXŁ(/Ĩ(+9C5I\с(/Ĩ(:ce3V1 eQftZqe`+J t;ov f_A6-fiMiSkl\w6yWTU(޼BQtMf_C:m`h[>i5gv3 j-{Po}ǿw̾ƿ5fOcW7l^nUZ\QU}Ԩ`<,omjJ0{w}NmikYEh3sNSٕt%AN0ԕ{Ne$ 3pgw91*>i#L8+9G;eJQQMd#D8cH;Q,3|DQQ8X>ҝqřtgY<.mY 'J3=tIj,35 *)XљPfT88KLQ4KDKդ]$mϰϜG Q⸓uΔ!g,tgq03*Jw6Smn(sLY5J(S!r;LJ%A2>mhg8Ky4F>Ivs(|ϩtgqn2W2Eβ8ymtgYʭSen#̀DQ8dV B 5Yen#%AN0yj,3y|g,m)Ysj36Δ9l>$J֜cWPR#qU=%Ok϶ҝq63tgYcY|Q⸓uWk9~cfT,3VYot}I;kɬJ9(Xg_tgqʓG;ȕ`T+}QMVУQj҇5?-Ͽ+dw%rƿ`T+}QMVУQWu)Jt:\')SfWss&p> 6Ϲl*}T-EĨp>"@fTNSjdFET,*%jH Z-BOPPQQ8> @ڼ@bTWj)$F%kqP l8(}̨(JU  3*J҇ZyYuk?1\$J -*`y5$s\ 5+%E 院t*jMfAdگ`"Ȍ gIQjdFETYȠgF%ЩXXQQ:5;? gIQjdFETԀ,/3@VTj+ql*jR-2̨p5@fTNE :H @"l*j藤؊*OkEɒ`"`EETԀ,/a@VTNE j,Ȍҩ jU Zp>V:@fTr+mE>rةZeκ*(. o:>*% w7* C+WQ,)0V5!3*YzȌҩXM>SN2R` #ty?CT8H ڵUN鉒uJU ȌJG^ j`|T8K*U 3*JdIkTxR{d5* 3*Y+yȌJJZJʁjN~OJS(*U 3*J`U'2YR9Cx*U|ȌҩrXN2tVu!3*%w܇ VQ,) ecI 6 gIINQA6zaSIj$FER.剧<}_IJu{Uxݖ *t+߃Ap،PPABA`A@;^'TQ`T!)iAlhҏt=G h<{Wh,+4c8`1o셠`1DSpQMjCTS0Zjb5:A4o>p#|!㏔ Ja8CZ?p܇ MHD04,;iAAW%7TN{&e{=@{"0]{YAH:KT[Q1{}e0zYaȆ] k.ےs6lK'~[Uoj.e1Ճ;amI̴%kr%1PW;>KCQB\^ٖ-wMJa8BZl,F< sҰ\c76V(l(ލ8{ӍQV2R'dO`"~gsf:yB:q'҂@)l]LH 2GnjjVYw"P ۛw/LՀ{C֢pU-CT<\r+P9SYO{qO/ wAW# ^풑cSj),rUV-g(cy\Kq>mp6jqC'd{{~j?yt.SsNEu:rsZ-w/;57S#cnKj.% Ja9܃6зYiAHJ+Nmi_97 `A 6$:=+E38LAhD]H ф{Ja9ODBeAh<]H t;Ja8n@Zp$ d߂/4 bч s3YA`,zq \|`iu_z [^ WP݃zwt5׾vzyg<`T 9U}jC^}hm=7׾ƿ{k_CZk=g݂W4n^Fsp̪Q}5@Ts0z j^C܇vIjnC?Imk"p:qvV@b7Yn`fT(̨(] diLQ::^ո˞ dg%2 1*~#Qn\ ** G3XODbd &FEhj LJsV^/bfT̨pVcq]7Y ˍ̌ҟUABCn ƽ\(̨85r_YQR!ɟ})`fT88r03*J Y56r}mZĨdmd5!+̌BVE[&ʜո$fMd){]1\$F!EEO`!401Cku (qm )܅d^5_kfFCY{AfT 3dW?,)K̨d^5܈!J+uA7j丗̨( ?df&AO$Dla?(11*Xge2>Q8nP^o+*ClckjpV۠vim&fF%Ҫq/ Qɺj-@̌ҟϊ(wU11*Hr?m,7i13*YV{~(XQ/dFezje#+*J[Yn"cfTkMۯ6QگjJcfT~kM}M d]Uo58^o|~o4Q8YFM˝{bݺE;8TT8rJ{ʐ]635ո̨dD5 fF%k$ZtD1Q$6UqHJ,Ǒ,713*JGCYcfT8Myb^92םy̌ќGA,13*&=;̨(zdQQ::mtL {}kzZ;+S8-ybYN ly]Q6Zr+fw%ԠiO2MtInA& $ h܂zmOr 6sznUiZg+*u[jk+*uE5nJ&mkhۯBж>6}h[kh{$_AMr'&|o/MTdI/I;e~!}EΚ&i7y7 w~>df e rh(YtHdL=nF QT88Xfҭ2Q,3Y|DQQ8X[4c|hkD:B+,C:2upTzehz>4F(YgYZfҝUxd# g5<,|DQQ8XхZ?gҝq7sLQJcxGs tҾ7Q$;Q}N18scˎDθP婅`PR#qC LQ8X塅>gҝq*-9S,c/#cfT88XNoόҝqƺeҝqn)Jwyg=j|_Wj\}ms W׹A[+܂tunA*{M~[Fj FRj=)i[>'m֍C:_C: m}[_k{[>5UG)8fTG5ZFj FRJ5KI:_Eg[DD,)V-̨̀(K5Uk' 1*$&2F=3*NJTT2t*V-̨pjN@fTNSi(J5UK' 3*%: t ɺR H T@0"l*V@"YR^6)c8o6 3 -*Ykӫw;_)`&Q,@SETZqQQ:`Kf gI jMdF%nqw}d[ UH}HΒ `*Ȍ gI?h(&%BS%GqP5QQ:UMV1@fTUSjdFET5Z#QQ:UMKf gIDg:@ `y!l@ Seϯ@:H gIlP-vĨ k+F3ET[ZQ8(̨(J0UK" 3*J `ՒȌҩY6U+*M&dI }s(J05mܽ9SN%S jIdFX/5SN%E?ƥ_R$`2޿Su,x alwW+XP`qsԴU; AG(@B̛/;D~ZiT z %^c4㊩Qyt_涙B8 piLq _ݓ|kKfHߝni BfEpV0dFEd<K:X&xCX&d j+*iLYQ:F0dFEd<kt2U`Ȍ g jCfTN3 QQ:63dFEd<˽`H  CbTM3*cRq`<_qQt4U`@ExԴ jCfTN3 QQ:F0dFxV5!3*J'X̨(g`U2t2,L!z4ww{pyrun1<  h|]݃[4A{ԛ76VQd`T5!)i>5Ui|\̓ط}[k{>{5ytunA<0M3D5 QMj0CTS =R<6ܛkDRn{ː ~!+63bj. C6+QaiAP0q8A0Z6j2F[8BZ(-e iA0qw҂ `1Ah#]]H gZsKC-X>1dhW18Pøӹ2KDut9 f!-8m`H F S^H 4LMJa8nBZ֬r?-9iAP8%o"]cฉ iAvC\3A#{>KKT7!-F8nBZ(ba iA0q҂ `t60A >g#9բiAP09Y߂'!u΄A[Y7?0nBV(`,d߂ BV/`s&]_ iAv|,AWH ˋl Fs6M.ps&]L5TO p!-FG8nBZ(\P?L0:q;҂ `tqs~mo>͋'o"` [q҂ `wV.AOwN|O $b6WE3Rڶ(2!-~s,U152l==FAWCzlݼhؐ@MQsH }lu'SJQ/  lq!-~8CZ(q(2ґ!tAk  ,z!-IǽwH 4Eu҂ `4;AhCǿ~: >pgN=}huH ̽Ah}uH !-F8CZ=pܩ {V:dߔ@=@D 0 {Ĝ9b\4/Ns毨 0q0A 69dAhrH ќ;1!-¦AfO7Y +6ЙYl#]w̌JwZǏQQ:;dF%G(M3Eh#]x̌ g,)JG۟Xe:U׽̌G=̌ұ YcfTr3t,@6ΔEȌJcYNQAP6, RTrY\"Ӽ (=&FEX*(m<) rdQ,.@;c̨(K>fFEX>@,il*B`mz:Q$3%kuq3ָ^OQQ: ˝~̌ gqYu(K ?fFEX^,铝"23e 5`fT ̌ұX2Xr,@VT88&`fT ^ߙuָ 3'(e@*s'S%:x'z{pċpk^p> k)JDy&F%a0Ԃ^-MzpAo{nkBb22EX,)JǂdyfF%*dy=fFEX,J̨( %-AXC{q'dq!>YQI{K\ VT:̌ұ(Ytca^3Y\,Y̨( $u c(3t,@Y€A*@1`bT:cDg"H DyABI\<(iĨ( (u ch3t,@b dyfFEXP,s̨(*Ecav2YoX۹Q\vxZ\эu5|)cݢm})*utè`m/'hQMqz+*g\2ΎTW`e-k\AUu( }t[>4۝GrA(*uL^,ӣ\spۼZWld@?QS9+}B,U]QUcaйڙ:WUVeQy=y7Ƙ1.+YJ qtg%Ktg g56_&I9J B\NP=@ + ObOeDtLV9Daܙ"l'i3a |4Qɚk]Av4Q 5o94vl"uי䝏 g r:n Ȋ+⌥=3Eβ8>F& g5Kngҝeqt~AYqFWrbv"j,[ %OKZn'$p?ƏglA:Yq2_v(Ywҷ!3*JwkH;;Kxg g56 _ CHD 5@fҵ$Q'<2Fa9ڜ@:($II;5ILQ&i̕LQ,jF spVcqg(Yw>#/CfT,3YiK;ewPŒjV πd-5Xf&Q,X IڼdTT88Xf$ҝqƚG:QP(@ڭ;Se H;Q88d23eHfB&ҝq2-tgqh.3E2Q[vg g5_nڲ;QdݴEr3778cHDQJ GBIaϸO-;ﺹQg,sC(Jw6_n*J \e"MDQ8X+EE=w ̨(Y"|W#-kq2_f@(YwewE D!ka4wsxYq2Wntgqʣ (If ֝)vo|vIJ[ev!E⸓u$W٫h3#dD%OT>BD I;}'J:}' g5V}aHDI{[/ɛ(Hf( .( wB>(Yg;Q, >  QцA3Ne; gaH 8}pt](mQ9Ox\sPEUρ= Wj/5Ƿ^`TQMV{ѣ^s 7L`X;SNܙYR{,/ d% j/fSj$FET{,/ p^H{~gPpVi3T{ZAp^L@fT%%SdYQ:hKgҩDXd2YR,m)J ` Ȍ gI%\*J-̨phtV+Q t*Nec IQįb @Ւ H gIFU/@]UZ p^VZ@fTNݙu ׸V{ig#Ңej^@ZTM:dFa&0AEOPPQQ8^kNݙt,s%SNΒ `ZȌҩX2tV̨(j/% SX258̨(j/:Z 6^B**%wo@6u@pQs*U 1*%E:dfʐLe ȌҩX2t*N=SETT,*%EuϙTTZ(*UK( 3*JǢ @H %IQs k/Uk! 3*J `J ȌҩX2YR{Z QQ:^B@fTNSEgZɒ `Z`EET{,Z*B `y$AT{Z Q,V̨(j/eca}j/uY)  `Ւ Ȍ gIjdFET,/@VTN% 2zfTJ4]3DXtSk}phd@fT%5J @Ւ H©DXd2t*V-Ą̏(J4ȊʐzJȌҩb]W\T"*9U6S%jedFX2tV̨(*9ec%j$FETɁgx qk\ (+9TK+ 1*¦J@պ H©ZeEόdI%jtV̨pTrm@fTNuΊ|o%w+9u_1BQ{EUpTyr8!:(O\&6 S6 )ժ6zԥ@85}lk;8նG1㿟?+<^#[.+ư==ATUF)8bQbQbVlyj Di Go+TK*z(P oNj5 - x1Rn8g%d߂ J pV8gdߔ@u cF0^2YA.gtN-iAP0fK%J xAX/ 1sG ' Z xAX/ P ` . - xQR8O$]T1~jeDJ-p p Rn-Xõԡ_m3^[ݡ^iA0E?.jxrfpnt^7*!lū`Cg-`Cr1 + p8Ocp敯QbV/7!-q҂ `.ɫ?lKJpLDBH Js&gߊ  P W]}H 8Y`xe!7J<+aӔz1AX 7!-*HD0VG'97iX4!-0CV0~pL`.:q҂ `t>A$W~{wD3ϑV[Ҋ^mU>q$74+g-ml1! \ioK9aLck :5NܤSN+ P ۺ9]VhvNo{Zv.psz(\L:= Y/jG[GR;q;lƝT pѶe m]iC՝ZH Ѧ85h޻j4N(l+¶+p -W iA0ڭ9 Pҿ V뽻@sCaCnj9 Cu٧ S YA`,q \L;Z=-~)pƙކ'+[Ja8nBZ)p PJGGT|3<틻ѽ<$B-J5;?yyOqޕ[X0zȻ7nv5g0?cWi(;w?x_mȾX~ξmsswsW7߹o+f/e۽uRek{˶4n(A¶ٷӁ~L+UcエKu=R ZZ[u)u@ԥqsk;o_?L0~JU0/+b<*plxY<w4/7TKuV=R 5[;n+tP$c|[u'1g-\ |VQQe%~QQu)=jgؕ_:$!`=ĽZXU\ yEIu$B^QRɵWTGr-d?|^-Zi5(1:ƻx)+7}15>̏A/Zma~+>_sѰl>sk~ͷ~oՍcײf_(_'GO |7--|L駒\r.|r{;ﲀoOTvb\ḵ=|{ 5iYv߾|⴯xW.h/mr)wnJR>$~X;lۼ5sLe?=[0gە+'9eo~[a.f}fmK91cތ%8ʘi|/۫c>~цL)}*/iðsO"eԟ?lRc?ʠޙ_d?_Gr}8˄2핣T+QTYty8v ~y_%wxkݻa sڷ:=~T>0va|矾O_']O1&'oF5tOvc+fa>ʒm.mm3mBߖfݵv?%;*UrǨGap9/e[.4-0 V>'e._^?܆gdaGnXnᄐ~/^Mfi,w uW>C>?8‡~v0sgvMBye G. y Ǵ񼇜j).mƿo|G痡o-~ zu7o>6rri~vvގ{]7Ưsʭns>ya_Gߴ1*WO/7OyM_ƙWءrNWBVB? _/x?3yh7\T1''jjv̴#Í>I_Z<sBe ,(ZוA7u+V]*kh%c]'|뤲v\| ^|OsXZz{gጰabM ^V*KvЕwRֻsy(rGvǺǻVgl)[Yˣm 5~.z"٣m%֫W;W2\þS]_|^筬wܡ^:뷺R+_yKmۖM,gd,'8鼙q܎7,O[{y뛃Q>縍/b^ >t=W8_>YLKsIreJo=( -yVkJ#.M+T 6n׶7[W*7H o5n?ro0ԩy;W_,ڙ\y_\8gcZyݮw38bE':h߶SdZȍ$=}q)n˕l L ixMNח4/~TFUvQڛ?w`ioŗ->3OB󼢟o]eeR?|9|;AS<ўu7/ O?Lyd|5;f_{-w]>3ϛWWS]o5vx˼o7ZIl_g.e^?4=D{ \ir;?=o'k0 ?8/psٟo;2ne]Ecy}OSZ?)2f8]sy(9%u<M2MeͫƷ#Ny7u\ 埒%n{ Ռ>0FnJcؘ-Ɇ}#-yax A! #~9+vk#8q5;=,r+7^}ފrn8gf(G{;ß'G;?%G;H2Eiw7Fu{܇v3\|J?S7+"xtGGzLle(sa q.-X>Oy8[NX?Kh}jND]eL>aL[a0 [Ov*a?x[uq\`ko^^tFᖎָ\V 6ޫ?Qʹ]Y4f^%Ǜj?,?3N2[jisLT 9`2yۂRO¶m3tdot| WNifO~UӠ}_uwӇ=L'~W5:-Ieޫ)eo 1?7*MW7\+eK%?~<@@ tڞu 쵫>ح;A| Tn&M/ɋz.'&)L=-Xݳg}"Ox*z$f vv~n|q}-d>?.9>iQt&=w M[MuOڭvzaVrX$¯SyZ\#?ig'kϞ9]o[ix]]N#xZ4vn~k^-g˰^|B3^r2#$_uKV}#3r?ʵfZܬVc3rj y.|ZlC$3y_':nRM3v1Y>4_fϛz2:yoQ߰&s|XKnkyV_!}M_μ2޴lo,mt/~Nz{{ u/PU>[ޥ dw:K"}S>۸` ;w_JKWZ:7zl_i=_>3oYA ޖۯ 3݌"ǖ6WN?#$WN>@uPX1V˲?v=K߽5]-}xOV6m'_g{ۣ3O޺O=Ͼo+<9ʩc> stream x}VyXgdfDx^xUw^HgAN!AyC * k}]UWϺ̓w2JDd2,Я͌^?vԂĈdW&t_YIum+ͅ"Ň]qQrlqb }Rfr|lA3nFI45A) ]&hњtaf^Hc4 hBCh _<(gNbHMX5'*:&6.>$q͸ 5GST QRj&5 fQ{ߨ j5Kq D)(uY6EviSO%@9IYKLc/sŹsU_aV tٓ(O-$=Qnj̱dO&aZ;?lҙ MJ JJU@t%X"`̥otߛ&t zmm/c,\Jg,i62gK!JM2nEwYpe(J8&8&m# VqE{(xv}&kq\s#u]`qP}ܴ|ED{&CU|*IG&dd"[wnkvj}*kbE9Z[Wn-ܢ--/,7g좂`^kYfn1oU Ƀ5ܨnM=+Fe e[ `S`AMdLlYZ;bֲbk~?lp⒄,?܅C1Oto I(L8] hzB/`cLjڹ˻/پFcV;|ɃNH1\ PfFN ~p?\C8[%zi0A&T\3'CH.=h6'7Ѫ<)DV,Cz(>`(َ֒3 _vkec\44&hZ jYLdN;EVBڶiy:!CMk9 f߹pLwZ> 5Р&AH&" ޕME%i>)2DawУHs& zkw9oxOEJyX -E> zȰ 2{Z*[`t [:qSWJghD.YGسYQho5Ϧ5xOK4d)q[PQ> !\{4Oߋ+{)oHd ޜf%~S5ɗDK!)&3R 7]Tt*9#b' (ϒl Ա^ J.oCmco ت+1K|mzoӗ3v5h7,q;,rip(zQ$n;" _6 kKM!#EywgծHJKuc 105n0R2wm]mh&/8|~NKϻp׋A ٙYVI)Vn,}/0_ zTTdX{#/Iv=Σ\,>Q;4cHBkOǿǷj[kٕM{Zky ;νX{l{rDrQ9QbXS|4)V-}I^۠N\eX3u Xv` ,?^[es'왧}ů'T+Tض̦|awd65nɢ%0w,8{NN=g.\9K{TQ >| _ S>ESy|~ k4wgr= L<Jw\ű=O\q0GcdӯUD}6!zUD{ǰ6 >Eb'pވQeoKmU4~5dPt1o.EOElN5IIQm+v?`zV,:M\M|;HvCkWݱq%|^1k5GlBZe4]{m)»$\_9< Rj12V) B.tAVҸ(]bpՖWЂ3;+]K6RdV"XJn)T\Q8endstream endobj 364 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 685 >> stream x]]HSqM 2 K(-Dsbmnns.R9fR5K6^$S 0/z_B, _w2DDzyEfuzcrs,9ŮE2 fQoq V!llp~Hi\ԅ5>1ӕA:5CQ& N%pLfo2c;޵X8\3 Z<U+`z1? WV^ 8C=q8UatNax4`bcyrF6py|10~R]FǠsDWL6vkLA9sUGƿ.*\MiդZ?Ŭ2AܞxSŧWB. -g| zKYotӸG:,{bHGbAbLia|( x20 N @_ گVO$@ x=8ވosFGendstream endobj 365 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4241 >> stream xX XS׺=!s[jQqHdR 80Ȕl2 2TZġVo[W/@k7|{ |d(SJ$٬Z>mGDh4I$0F"l$e1$Nk᥶adRbw[eӧNF~2ɲe[C"bB@ٲ^eEĐJpb[p@h,"HfzZE}dW\3ao/\="\(3jhUbEVmAۃW+}B&O>c_ޜ܉VR)oj rR>8j @|)jFmRE'F-R3eLʋzl([SRʎED eJES]"O*8B|4Y I&} b˳VrCB߳jj}fͣo d޺ @gh2DmA " "{6W3xB1}l+xӀ{LN_F:tͺgr}sz {#g7E 9'@,\6wߓP4W'Lmm2=JiЋj J, B NSixw4 r*O:aKg \yt 5,8"?c1x@ b +,d{a$%c_b"JpIxNCЌ>WC. /^vi^;`H@ ^ME@J _Ӿ]aL h E/0bXٳCo J {}LVvw_1ؗ' "?B | 展vY~\^~6l~eo@o^H cFlGRPI`>A%wxDa}옆*tLyʷZQa)AAQ.(6ؒ\[ZB Q AszYcb 59o(ܐ~YVI8$B6,5X[t-`삞ges+Q)bnlsx:s[ێ $됔rmVlF߁}|GQ`:vam:7zlF=#97ߣa~ݶ~jKJKJuL{-" O AQPrhYcO;CfGB_=7dϽ%+r4H:a ч㠃}HuDA괗 vt ==End05FԋQaȻk4xOwTm\P-*˰Rv~Xs=o# 8hD[Pj<;SNFQ]Q#.|YGxC|_]\yD0=Ǐqxtɱ9Ltt义>ip* ׮"$/>;dbÍAE:~Mqa1G PCxz?4k1"fb!΀G j5 ]Dzy$j>c^N:cAt-Jޗx0y.G\w%с~t -\OZgWQPv5,|Pb/nB0D&Vc0X3 8=- om%~M087 yF'XޏOM5sgUcSw9>Yz"?kmym Q𻔻M:%%4&. %`FmFΘR-ͬ Ei Vpl)OגCʿLa3\ch\Ampޥ4"ch_> stream x]= wN KĒ.ZUm/@D Dۗ:|HdΓ [ tQaQUV/2<_pnU@/Lk؂T[ ]'cJfY`2%n>ًJك0&y~){J# V{R=.i\endstream endobj 367 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 627 >> stream xmOq?Z )\o(4UM`A 9KKiBmi-ZZh@":5,Lw+3 }A GFGYM1lDv,Tnin5N6u[*L$Uw \hÙv`pbnq9}ܬK`.2S!f n~>ژnrL/e.33cfqg{8q]s8~.< pN@@5 DP . I8?Aye!I#q((JeE>Z*JUZUe$L L $Nw|2}#x(X.#D+ 5=[Q;eCJnZM291+q$i1/[R.% m:endstream endobj 368 0 obj << /Filter /FlateDecode /Length 360 >> stream x]An0Enh6&VU `c*DȢCEҋ?32[}x[Qz~-R5Nm~ۯ隯R/ʰkw-ܗvWNi*So54<&q 4ˎK ȎK-zQD1&yj=(qPQjdT9@|>Q(#&,Ci2@E*1kujCi-:"P&{$x6(P6[Q|{^z 0&=p' @sDͨu#FFMLxV/A"Q a> stream xuWy|Se>!4 񜂢 0씵,-]蒴it˾ɾi-mR6)P, 3:ޙAe|\/̝/'{}PGQ`VI9܌ysg.͗'~?"ৎVcn'=JMXVO2¸g>=O~]&RZ#u ΥE2uqnv"sϞMQ.BHU/7UZbΪ9TbnSE{r{Snڒa _v9zU/ˑRH3liA4_Z'S*˕ryEYҒ)EQ^)ZXT3n۸ŇpbI*iPtj-HQOS-6j*5ZAZI=OM&Ra 8N'p + 5M(^pvԒQGG'=tK&>#4cc?1T 8e;870O3yD##YQqPիfeV$٢*IX&ίS2^Ϭeemvpg:zhOzU8K| !O.i9<ttfJ*m1pL Q!Eh+8)Gg(mEeW)5OeDb̒>1dgY1wлwu<قo>}}6Q(C@@9_3 B+Ș<m2mٔ Jţ)S8IID jIbX< <Y4a`5?oVWo[_e{h&yIA=irBnwC7wp>?rޔH]$I$zxL0$Sѳb8X֬ ]l_趦 O3k"J*e,T7K8e]/ިl31u+~U-Aknn8XR ;e|`S t*z=&[ffcJrVMNN$EuӁ/:4rk=GdIE*i|DKqtZo֙?'|C~j]8dR*F鴸 >ESA.!M>i+pIDP`YȝV"(p,̡)$_F"\ ɷ|Ok%j)eTa9dXlQt>^njzz`_Yx^8n#m@`_NfT0]vӘUq -B&?Nep5rYYMyq19cY!p||SN6eCqo+Fܱ@x$I5jXxQn,A Z9̊M@7 l(=׳m }@kErXȢ羓piYyZn@SDNai$%rpXȗ]pNħ0UِoQ^Hى_T^$[G{]rqnwrc1M.U3՛526s w@j! P DFw\:E^ŶG^ߣx t.`-UV|ze֖=_%+gmn2C5C QXUkNSغ$x O>fìOڝw@%[l]TlߩhB1㭁~DL6ݜ3 1먮S/^1yMf`b=E'B 젨 n8O5\kwh֡W?4^:E_>.lG13Yִ}0My:Po D̷?X~Of2-j?D[kX ,vUJe .Ouw.c5G{`vx؋KBEh:bFs|f֝k HEn'w$_>O{3GGg$IUdrYYQk+X+͌[H`@c'xP8_ߍ?OiTZtAuQ,17Vh6$& xZNhe8H 6r3pP W o q~g'4̀JKE-6Mtj )Sh㠸IXkJ)~h5`&^DÜà7HAJ$KWUB}lQ\MU#q7z$'(,tF>n>w!Htiƽb@tKGw޷jBί jB9'O9d(\gOc.PH8P nΛ(Rh^ջM#ǰT&M#fxr{sCHre%JNDŽhCvq_fHw.N9WrZCF4&/M?,tX5q&j2mYt>lj bo hL-a]x)}-}>4oW8?~ϗ7^x ozl-__X?Y'/u!o+J2~q{GQZq1BGա)8<;€-v]vV1^ٜV.h1Fnp:Є Nb;_nNmU1ZRLxvBR[Vc!A[(JI!Fܤ2BJɔ3E0'?8Ox\C%o#]Vk=!&r S<%S:Q/7taNS-W 34+1I7h]c&[M|(}N6}9[IHqwuXg- ^3_gG y'!W$̺ !lUZBBo\NBf.ҋfTбD7ܸ>EZTWx抐j?`6pRn-0po&.Vxf][)t]9U絻N1Nީy74 4_gpKk qT43L> stream x}Vyp_FY X$ B @ %CN @p!_%ue%_زcs4\$Ф0arg'w鴫:鼙ݙog~{`)IÙ#+HV]VT-,yeUniy}u$sIERԱas6 ɐrIϠQΞj`orB&XfVbV 2JD* sujNQ#( VU Deҽy[roڝ|<-#E amð"Tv$[WY!G,XaXƶ`/a:ml6c4KpHNot7Y2+\SϣNP.'Sq3}L:C!XzgO(-BDDtWYnp";w<mEzv]yg(v A"STj{,-!KcȤ:f 9** 4[.wHPpz·ch: 8~Rq jv*-=AC9vCa'́MKͩВ'ܕz[<\)8nں!4AЀ̣Y¢{$GDD@]SL߮)唼;A{X}4)4 w9Ik(Dc;o##@ 7gPC3;YnNYhi/rФ @;1$ Pw^ ZgǞT*=9 cFc3IL*ieH 6")d\Vjyzr*s`қ Fb?G^{r'd^ȸɉ0z3}c8x8SZ۷uOCdoD6i,:R]6a=yWAt, r2M:}#\AaۭjYxH_foL M.顩y & |BUt3@ .M HC\ Wt@m^T.YɽzZ(Q5Dm '~%o ~&Rx]6Y]M#"y^èuVBp ȁ[οPr|"F~b#AD><5tvoox ߬4kcHHm^5 NqPm仝5rXl)Ju?BZBhZm?ls{L.fh J9riGp/K > ]Frp~c^׽:=ف:CPi~-eQ-23Q̬g^ Vmx(*#<-v?]sGKL&_X5IrQF5uPSO7K17x?!|h_IQ}41qNRee\xrLvtڨZyV,,\NXo:C(f=DOB 붱nr:zFO-F(ە]}!=rs\Va>Ā1lf9?Т#k*2r,$΢I&6UmS>ux@$@}naeK0@+S*EbQD4 A*7+Xm6C˧hz5C?FQd>W},`^J~+i:pTe';[wf[(t7ۃA@40 'K:'scƼ݀}"xzFȐƚά"3[ Lf>}:iJة (WEЖVv.49̃[e?>1,8xU4i4 ~W %Rl/>ʼn%USVl XQO*T(m̈́f1OvW/LڃF !yYbQk6 qдs}ɂBς#ڏg x1YG2=i:QjAԵ6k@Zlm`7T j}<j6RWZZU3q=LG\dw'.NX&R֋T+RWP> stream x]kHSqͳv1#/CIs-Pv2Ԡ,u^4Wj6}Z3Q'X}btSхP/lR}uaư!+GkZS\zV=mZFIPT@=!_92dH^W Ž3aKR{jb^ʰHl+dGMEd#dY#lIs*n,)@ڒ:'WZiKvn5!]cdBb4D-ф"j<#ߙL?C10 T :_ܸ]cvu_;@{P{krMKSr{9;y F\ܟ7es0dIi (qu9Z ,P5`>P75l/n;pY4)ps7xc˩F4E}#^JW9%]9ofLfsE$(RR-)!=KFP*C4X(jkπ_ X]=ffgY?f)?c~&8nIh*ÍGP G{{nr㾣 -L3@k?LLuvC  fvzZێC{SDZ^:Xڸ6OkW߭ۧ;Gi3yN_79uV'QOt>!?9zOфTKendstream endobj 372 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 756 >> stream xu_HSq/۲me`j] 9fQQ!˔͑=\svKM+mlk)K`BAPDi/aԓXoj|9r8R,BNTWs|N؜2C|ιNˤHbIYekZYH:|P5y'u/k< ?XM-G5agi]a=[ŝmuvxZm,hbzCnmNhiVie,thb+5ZSv8aXF[Yxηq[:?qٹ.PRw{zi[3ŐFYRC| HóOO}πX& >kz.m&*R x4> stream x];!@{N Flah, BZx{px$|~a)O3!-o!2 ;n$o~4wk_#zf_hh$$R7ĨHA7#%Bt-"IuEUiZ#S[Fie9n9cIʴiZU{viendstream endobj 374 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1080 >> stream xumL[eǟK.v؄Fhфi,M9ǛwG+P@y:z[J_No^^xTdCMFݜN?_ y&Kyr>y<JNBAdzڪ;fۑJckн $![|$<^M2_d%{3!?'CQgwnSk[u4?\ٿH5RCSn f$v4^yNLC6((nb팟E`B)Ay"3&g֬b* l,(D#S=;;i׎=]PeиXt˷@>Us+g0&s|v'yۍu*[ Ks)銺 i<0dcA Ȉʹ`0 #)EE 'Dendstream endobj 375 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 708 >> stream xmKSqϙSVji9UXڍW4qM87_ڋs9{9[kgS# #=Z" ]TyQНAfu<8&`8V[2iT:mYEY^+RqA$ef v| e1.QȐ#L}#cCVWc"tWVZN֨cT v5Z9Y<$;{4*ԫɆQy^I^xCyJQgIa&°V܋eaB,o߂Ӆ e} Cg$ &'9G+.>Y?v$;α{pˌ9ng j *uq# C-,zY IR*kCS}Fm2qXy[̞4?HV m`aTǡ]nCF.?E+,&Ip?nf\ B.f=xIǨ[ ˎ|J3|ټ/2^{b.E4Ѻ3Gn%~{[oeSf}l D!w(JB5LmviPXA ;4XY Det:cYGߥqjtKbq,郵@UH,L܄6jQY\Ώ\YrW \(?X6dO au@endstream endobj 376 0 obj << /Filter /FlateDecode /Length 5499 >> stream x\s6rʣrKj+dEIW:_ž>JJ(iVѮ@Avmm*{fFw xZ'}/pRޜx"iТᗲqzĿ*NEmʞ:Fɫ*+b:/5A;v:SJ›u kljgxs|AUU_']m:Nş;u]lWYCϪZIռ+6&?olYit};R)eFίO :sr8=kkWE UPx)u-^_ՙ(+aAUIxuVMqen _&w.y*Zr4x-(]6QQϱl MԺrh[x}CumP kJ^؋n2KtiU=fn7$4œ UUܿت~T /'UҎKUhVj6m A*]*ݱQ%ɥ%[\}uV G-熃4/ia1hY0}* #X1x50_C`n J ^elG^w4{ٴpWҪaQpEF U:Ո}Q<ۮ1 ?uvxvZCHQFfy .6y |I8[daFEMR+MĩjC}93&Цmu4QaX܅?iيc 7g[F 1|"Ao`}S/ݍ26BX}^*5Bw M+_* ҁ(2-on2T4vf)]o%'n ~AGн%ɫ9ԞhLAA~P92ax /kX+Uz̢.a,L:67>MuL@mmb(S0L]iSj(j2IMǼS̨/'n <AW% [xn=zۦYTp<:i-7hޗ7+GI78/$l,ΰI@I]zsNP:AN}s:$rd>,Ch3Яtpŷ4'&$S31-,F{#$W*f؇ouR̔KGQ:ztDY^8ײT>:j7 >BXM)Q؂?ygL; <=ۧx`2-dcCx@V5 Wgpj0Q 3-0$MfAvWT}w ߌxbX0VC7jUߦގAqF$[{~ɤ!9v`:K6)&JW/;Ur0/V37肳t>aI4.#eE@ٶ _t@Ħ^xiC!(Ȼ`s7m ]Ba Uk}sdb4AG e䰏*xw;#;(mdO|lFmsnO{>zځ.WW4295l*E,=́Ssa- ;ilfֱc5xAR(5葰x##J'b.+x FX$Ԉˁ}{GuȢr'FhM3e0g'?yZoj82Z!,n1hpD:槣%h'XXE> bSêlT)Z}MUh0;sdI+-O.CvTϦ6] , N FDgGI /r!FiWl/SO/v 3PХrW71@v:#=N롪># `9p 9WzWi@PjMg'8"tZĈGFbX7DwCԺ XrnpݭRINTו7@SYj8%Tjy7A%9dM"PF~lҘC 6ϥf[-HJSM #?;f-1:Yo8"̫܈ibZ(K*hhʉi_oY48VJmbwkj7/^R1"$Q$,263W3>^0ʆ)6ETabQft\`D%3>?bcX:vi>- zfs~t 4*J\Wl5Cׇ(QnQ/fQ|s]M@bFo?8\v<87KbFĂ "[ GR!ˍͼ6SG%Sh,,!oS] J)p+E2',i ךjg6|JB䆎 p }7 f\bSi+T-=[0{nzT]z*ǛI߉Rsz2A*<醗D5e(!PiPYp\Nn TFtN%r ;R.=rap^GwnA߃V7͉* ::ZZBΉQ V8vV[((eILe\XwIГpY>4v~P ^7Fm~)]s-e~GfB ?$Q]8BСa>|>%`YA 06xetS2yU^=[w7vt<1 *:#S;K`/y{s0+γ8 'KmX1(5׌TP/ qLao<  Cgg⺞u,3AK! Ml@H~zf -ZG/7qi ~[|4 Fqqb7U)5b<1*=x;zr㝗*:R ˀ Ǯrd_,)x|s7hě4nΧ/dl<,w!_#hcTc{ʑb;ծRļY;(v1ݬ썶F|+`TllwLusuB$Db 9~y( WЁAv3,K$lQ`e^&&ùwf-͹[,bݲ C;0/MT̅7C 7Ĺ+[d$&W*Z+rpDK I u ؄R?_G*}Pw ɯ y|Mـj >jZP@M> BA :m/4hDC\pϪgϧgL5v,;)*6ŷM}endstream endobj 377 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7671 >> stream xy |UqXd:5(MAVBBY[I4M$ٺ{J)ATª(.TE;w'-߿Lܹ9ysn(''Q+}|bcEN|eƴA!I;;'~9xpCospO瘛{0tѓ]#ѡ(tfLlj|XHh3fMF~sߙ9{ǮiBD}0I1;BwDmt_nu׮^})ZFFϘ7c/KX"+Y#e]>W ]./b}(m]BW@X--RqhmRFiBS dGbh0D]\mG~vW/i[ZɆL 6?|w!aI) koJwI -TsDk[ocg`p26ʀC{fntdvb^OE"4f/>p&-ڼU2KG"< *<46͉)F#_p9MJ#1T4j,˧{xowK+pNzg)m|;;磅bk[(AENxiNNGK f5C;Uh g*3GA*35j]6wTm,,Fa(V'~oq lCP_߉GP|{l-#v $H~w1aӷyYOIP.>~ar#OIC}J{Lpw Y<͞e&dz{< ~BUhz-%-( O9a"+`mB/|ٔ7»IݏȮ 5L0Gp8!q3#/0p)q|E !ʓ8M&g"?qNjk!jA}LwaUP nm5q_hh37V?E k9Q%0G@C} I^Uϫ֣㢣=3L54wr ?pok|SIn CgWAƬ ( ur.;pn l,j>^%EFU7vrr%9ńh>ÑKR6ɦ0r/0D zD14'[⩿u;RĴ%нַ~߁^4A7Qh6z]4AD< OBZCgL|48v~=qw{~w$<٦>Р$C$ .18uMmHD.(ȟf2U'Nn`wɛF6H6牮'K!$~qT,|2Ww.YGjYEy#C{ib[\m Y}{mm ME_hRU+٤"6 5U/*@4u< k9BڈL `kiA~z 7XLH՛{,HS;: Y BFKG9w"MANbKc4hz4+=v0V_..az *mh$lm)6{<K~"r4l*F<*#=?֓!HVGvxED(т"US/pWC+(r*v/XG|Cb?c917Ȭ6gJ KV"kI& uo"$7?yog4m7D$ ;'!961gF8v`&fש|.Y[vtp^{R[~=lģ= }1]O/|C(xS)L඼}_?cD9G2C5Cɺ6WOg=xJ.3%j:U x1 :NW aD(Cs&& ہIoيƊq%ppgezGֈ}\q[[;eJسd3}2HKE%τY\}LwӅ /U_{6܀6ϼ5CH<ie߼nGqew{S,96pIh"ﺾeͮ8.а~TA7Ciw;TA}۶699DGxq]oSA}M[J'oK^lH' Z681D}Rz]f@{v8ƞ|#_9VѲ4 a BED64xC1G(0%]҄f P.+\zBWձ{eWYJgA:rUZ:qoQU"sٙ즌ڢ:sINNlʡނI!pIxق[ɛX}"Lg(mmj,ˇkdSj`')a G89NW ZR?9*kqpg_ݍm_a؃u*bKp)FTiy TG:TYp'$vqt#'L?Ҁ~ЖUaϸ'=lEҘAX2 +4Sϣ߻_ǻGo gv3Ԓ{C>US[_*.~YfR]c6WHpJZ`GJBJ7{DKZC?mu'*}kp(yh 'ï|ïW=;= xT/ t]|7-ʲɪOh(Zf/XM ?sI"+5n(uDn\{#УrTMWpzAC˕=Wt8蚙CwW1Ib[&)J"G 閱S%r=YzAI-)wcқoN2G~GЫgѫUWvotxf!yՈԓU2m6_T`&T)Z^}g[S%;haOHl只 uw'ʜ{277@._h_űLaOs:p‘b<|1TsO7իCuIy^lm +tXM&Π+t]xj :7daPZGzUv"\84 ݹ k FZa?4t / ;?ÉbSEFtGk\e `ҥUA k. r 'd!(pw|W}]/f%=P'< GЄ]'K xb1rŖ`ɺҲ惛߯n E\{WH:YzM&mNv7dMv6P Dc|Np8z)p!=a64L>=_C;3oY 9R=ed6&;\[JKf3^.w"F"!rnBs1b'Xmb>aA]6*qD8v꙳BCgÀHs4 rBA Fc8+-I mYʥk<YN]{6+J , 9gܘwhkS`9ҺH#2szM6<Ի "N\ՐqhMٸny,r}eoɩz"ls3X[Iy+] >¾|Bz<mX QT9`i4dL-WdhC!5rFw4\j < S[-yVSN^OM\!4+<@B7bcYFd6K-SS+2Ӫ "ei$]^V$#B.Y WUQ^pVKYCyMi[P69/vDI0_<"k&@%&X4INn뤚#M|V@C|9hGJ&;B 'y~.t)AD5\|q:" dK)*P+C-疝XQFܜSShW`-!0P8X/6j̲"<FP~߆Ơ(3Ks р1KWl:Ѻ:` KoZqUhH䷱v^>@\Ԑ-:4;eJﴉ(ɰc,9}!ػ&}g}zJ.6+9I>m %hy8$Tc4"Y>4[…TL/}K AM4,U'ץh|9<7]nlm@rR᥍64mpr-M΄U}Nf<"*LO<^hq""VJ?ⴑ̽K]vA9]^C>K͏Y#HcI[0fAn3dȭ 8, "q^\`x;Bxސg$Vv%r+ Sh3LYȿY i`:_fY֡C srs NY } zendstream endobj 378 0 obj << /Filter /FlateDecode /Length 9788 >> stream x}Kqw6wS1$H$jmP4t1]GdfA C.EkqPwDp5jҿg^==>o?~NZd SW[ mg]AVsty9Hj.VLyo8Acw=Ab[IkX6X.xk$.a9iaZeX$ d|\;d"wԏɰ -+;ɰsܱ~׊9A!&SŊ99MshՍד*i"6!X1:ϱcR`t"$VaLlHy$QDV$V qp$l4< @, B捊Pj[LE.]C2xr,/3q=!Erj*"tQth8< )>"Ldh&uP,)D, Qt'8BD c )^ zHV6FHvƥR3c8Xp\x+pO3@[MvqDF˻*!N;W*N>\}Ry IC,k+ hTpdbgH - D+2sZκ2N oyb BpYW(;^#GȈC3̒1HYZ;NX,Z [S?Zs-qAZ$$6ɠD: VsxFgez 2t &>*I񣂳 @+2BLeL{i\&$4 obSw5\f iEJ+2Mc.VX9Yl.(D íxpg)} kPT.Fm`4or"jA Y@}&'iHf VJNfs%8$C~cY)o4 >>Ѻ2{kE'+lc.Lid"*& 8i;R)l&i˦uVn`e%@6L4G8^o7``XZ+ l+,ZN_Oq)kKȬ{[6R“\x Or 0#hհ- [jf_r>_bD! Y("d 2UVX츤 2zE=IVh0"j)iBBUf6;C jx ՜dǮiWaGh"u:0lfEo7y߸],ZrOZvpj$=O՛ļ!.L6aĘV K&@1E{t9~O}i6P8\x85p#qFtXn=&9>z"P#/0qvki|'T C2y2!T8ӿ1Nbɦ]4h5&E6%95w&.7[qIKdN%҉`UY"Kd.)3UbJeD@ͱdZP2:NNIi{;G VD'}3(#!#i\F:Vg-k9B+>tχ 4/۟3&S4(O7(Қ2 616NزyLK /)Hfi&R`"yhi]"!1 bŢmx#?eIg1(yD~A0kTU^V2 pRpL]Tt9,O1uW&hZgcY) I[1.di!$k;z"&kK>E4E)$.C+\#A)4 z ډs>,nx*gMi&<C*^"P̔2x$}A`A@350xֵ:';hU.(YSe%1 S@N 1bHEl|R9q}ZMB&Lgy}Nx722n]Y&y'nq[? $a@)SK/d;I:ަ}oj2-7 P2qtf#ΆN3 ?Cv蜡8oR(/Rp +Bcq18t6Q!NpfyTMwTCiC?ހ%p/e/S 88a+a␐T&eP 8Ϧ[eߓ`:QƋnd&W's'U)'5bSʐkdlmh;ڡoRax5S1e:cGx'F+Vw4>Xu8GhvGχC9Y]JFy+A|2P1`1Lq5nFN rE;CaUpT{)!':e*QVזl ebަ-=mO["ԔuJdiOm2mNڶN+ZףNZ䴎P+yb'6<\_U|IVtEQ`ECOJ !P:J%7tm7 mH]M҉&@pQoՍϰ[DabD8lWQr7Oǹ1\g涀0;æLAk:An j_VkE-4-$'" >Ln> `cG5 (:6g5lonmb|nmMtwU¼MMrMwuʵf6s#x߳ VC{AU|eNL+‵ j+,(soB}J'gLq' zIa%t>;p@**'b?au)!c󽇘YTg/DRx0q[wQI(FuqiTs:=nE@GL75/jį͐@Io^פ?xBrpf:Ow=1u~Tc u)22Ѭ{b'=WjWxѠ4Cuuҭh4wdLfN\[K5%yM^y'1J"И%-nPkHM2Ssߩs%5^^5:K^=؛6:tASZД4 "6RFCx'1GN|仆@:h!9 8p CB8GTw 7d( Ȇ+[6Om}TwkssN ~zj0d|x ͣbN SkX$I|N]= bKD4%r"m7OF+Q'mm| 14[mQ0AݰhoGu%?$EnpS N1N_8"_ =A"ّbV5 $}6H4 >_}jqH0H v߾^,%}h/D2t/=loܤ|-` ͈ESRZtlZ)r<\Teg(=Mk ͸5NI 5h7ԫl =JL $!5lon=h P]_ghI,u|sh8?U- ޑV@C5THG,gz+-;{|82oC!T H=6$!뱹5TmkJu@-*pڮrV%[+qZei5T4 9z^Чb23a8\OW- + +A tr2)1ɰJg"J+ +ArwR`Eΐ"I|!%H6JKnv o!#a!1L7"=*lb0`h IoCq)( U+&LDH$%Y'R^ܷLZ,PiD j!X,BTwEI#bct$+>{x: pJWh>\ߠ^:(vFXGaШ]gc.x>gDiM U66Bސ]hQ*ld6y1mzԑl0o4E^YG+GR6^&\ Be x7:"A?Xfï*zznY; Ou{D_V<ŏi,h !j/x6ԆO)Whww״KͩD>R74ݟeoޞ7ҡ4[ɌC QB K1q؎cRA /qж)Mt~(té;q]~Z}YUh^t·u㑚lOwǷ9w?E6B鸾θg%E@p\6!lJ~An3ݘ|Rg7dҒ1PjoN# yLЦ=aZ10<`+1MS|;ӆKL&uAxJf8 org8I% # WJCkӇXg:p" VzٶCmz#[Z+ݾ0c֏l.i~5IL<km'ViEI3p\LIee@B/Ru. H#~SR%D5ï Yv~%Iӻ(% Mh(EX$tQD?A 6KY8Ÿr(6%o~^: 1_ e?!T})%FcnzB>'$W,␈SPWpt閸@c0wG땿/>?NPS\s3l"g"s!Ч 9D]GOtLW6CWI?p :'0hW:L:t]|3pﲚ<LU`F:֏зg + S·#!LMk,;nI>>!s31zN hmCEcGRO\Y{6\v5VmF"M1a&fċ*!J~P>| !ӯu|&AdC59'BE]O̹&U8p*8wSRՓv5Æ;2TϘB= -7J\E޾D"K J6g4P^6r߮}aV-IMLadV ?%t9CsHLPSm/WA%i_5yyd=chP~1iF؄aE-)oWk/5i'G2_{ z^n Nao1Qab;*?uMKJVh cÀoӞH_*հkB|?lJX<RM-럌wlp)xqxs(׆(B5aUG%h)ר32S;*ڇS%lopo 9p8 SF3黏Ţca7/U +ٸ`%υ+b%"^ދ6+<8|:{%ɹeǤ,bx v*QdSg>A{pKj΍t qjնzOˬd( H=f.mys G>~~MÚY=8K)mī}`8>7M6WG\㸬ѹd9°k9E3-QhoHة)hYdiLᤥt _T=䎧 C#&×\䒈~hM('WQ(]"X A>iE\t\ -tLSk8tmHo 7dL%PFh(b\O5^d"taM6_k}>/_wG*<}xttd|t`OFbls{1DJbNSD_^b16ޮ!s;rB_Ŵe5|&hbyOw:qkv^ p|}Q3|?ܢ뫝/<:]> 3M_;ྻ=}w.[K5_Iendstream endobj 379 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3204 >> stream xVkTW "*e vcAPQ*7AAy) >cA8!&*׀ :AbLt4]=Y3afZjթ}Mh6NHNgXHd9F4]\nB!21:k9 x@hz>9sڮHߜ11)K0Y.G ӔMvBJԄf*C27EQv7{fdfm> aC`bƐM);:9/X\xEYQ@*EPTNPG(eO7CR*jGSj<52xj%P)sPSiC]J)#j Kݡ-5 c\lsdLlֈ33:ltC>V1L`։ b_9KΎǕ~|cw6+ y^M?Y\&ۿu6mfGl3يݧsbqA0d?mڛ3ΉL;ʧ޼㵴# S*4/ep[3]LOJ&ZZ㙯Lf?9'/q{ E#k4g Ǻ=5Kٓ\5O뢷fC=Q΃%E:U1fPTn8u!PFCdJv ֻaFnO_"7dd7 LV>VIn%o2o%C;]'e׵¾ѧKqXWOTdnޛz8aPqqm7#ŭl^bn}xg[Rbg۴w(> ;u}>?RvóK@s36۱q ]+8)+;ɑ{8U#ii^#qnvZ*<ʍw϶2/k0n䝵2:mt)wY8>Ľxc'hR"Z"<~Vۏ']vR룻>v@Dd& G5EW~`vr%3p|6{d`K 4T YlX0$ )2MuO lW3XǶ{_MVaѵ>4 3GFW8A㞌{ B 3fgFe}sdPK| kbVeExublHj(F7)6^Fѫ[6#.fyo n  % 5 6˶Ė9*èH/IG̉_WտiOP?zWW7tH{3],,=[?0-hS_h~;NRcLV-zϙrXgPd$%Zl=/^Mk<xRWlwHa򫏝L{QN\1so> stream xcd`ab`ddd v541H3a!#,,eyyXV|$=N{4fF*ʢdMCKKs#KԢ<Ē 'G!8?93RA&J_\/1X/(NSG<$C!(8,5E-?D/17UL=ZZRD1~g]b?zl9sk+~c=H_cN莦s徯c;{{սi.wf?5ݥS*9~3M۽{_~'Mg̵[|> <<@}endstream endobj 381 0 obj << /Filter /FlateDecode /Length 4075 >> stream xZݓqOqdjʕr8 \cE9YJH)?ܺTY.ɽt7!Ū@nEUEw\/jqwg]%ޔrlqy{᧲(S:ۋśeUVJVY8[\'~hr%x|Zr]:kTN{DYU&%u. s@%Pv(9lMdE_o:li57OНP˛-/+L+)bì8m+e/>bl]Epzn]|?o~j 9| m+ aUBh8)MY43r2D8}9-+V;- ,\7[Z&݁I( ~VCma<ǟ~ۢKXUԻ4r{ U i+nAn5um:2bE0d'T$d*2A{9+.-GȥAue}gp? dzH]0t仠SsQW^h)YtCd.V .4OۋO'l)K- $V Ю0~*+QkآN236 Y^e&nQeC(6u `zɠoBmΐ_7=i r.P#(~PB߆LäX.VbK#`(Zl/,Xs>Fc5N-4Q,}GǸ.6[:H++1B uaou/WR( xM!ȵK+$r\c,FMR*Q lѩ›g=7]I8%! (e$J_77z/v[w(*sDio6! |iTFf0{-F5$l$ջtlwMn7.7uW`xM?G[ ITͦl~z7lZl$ (X"~׼M}"u 8:78B5Aabh-wYF"eizW).Kl*gaXJc&7 m&% 8w>L:1l!'mL~I&VF nu$7P)Ŵ6 M_m{/ϦBЙΆ,bָ%(}~Xg,, X,a [n**pQ" _엢„‹wK YCu-V8,JUcFj1B b'Jbd鵫sH9҈ )uMY8i}%NQ$WF[*i 2{+qK{qP~J==@ uH 4t+_WAL{&, 1EV"-ggb}5>v&4as/i5MF?iPspRDKuEa| HK-(aEKS]&N04DL{z W:Ed3 @Q`$ͪi,z gF>.2451&Jr V7e0"afCFOPl/ˆ碕+UѪljJXw"Pl;~ {70aF7ou1ܔS J4A}ri\7hkP/Ekg.p1%\Um02v*iC]Nf+lXf)I{^[rEr&m@GIJY 1!L&a}0P\DRMKtA>7fba8/' PtGAGqd̆$shDl@`[My=:aQr>aC{ډQ>ݼrA%1'94~M$x]Lm]%LPo  xR@@}l^]Ly?0kTZ&3|l@s8CknK~Fv,0vtt~8"Ԫc{! ZBQ DCsRր 6 e1$@՚0 ΖEp]g9ZrȁL\&5HpsfbB g:ݾnNLus&2 Kp<|"H5ԯg|b\AW~Z1j&v[E"b.9qzaz:C}qә O _pNLK_om%U~5~W Mjq[xS?kVc; -h.?FbSt0 nd*O~ ]Uj gW^_oًq_篳Y~u+G&Oϖ(}(c+cuOqC>RixreQp1eJQ#/ŪWVB@B:{.t x-OB\nTݗxK;T]o\i?t)>ҟ\;P`)HH;jx֝G_DX  WYuTG/E9G %:Hۋ H endstream endobj 382 0 obj << /Filter /FlateDecode /Length 3852 >> stream xZKo$Y!p  ܓh:7<'X?;2|lw4zF#wϮ9g眪"Mvhe{=h,Ūⷋd ׻?;'ߞ0u[87[]l.+0ʔNxJ *Wl+8[ sI Y4%hVƭö~Zsٿ@wRBN.O ƖgajJpJJX85;/>r,nSwM~se^X0e}\c]>]+ROx^i⬪P2ȃ-ȳ6ᢸ վݽE/(H^+mX?a:yf4=J7lKMo.ƪ=mωг>|<+?er%W !u+05.b&?9\qWHb%ga4WEUrkrW>sFmi ˟yC;]!X^Xoqȇ~7ADt^0f (̂XG'+nSR)UY8E2aZʦd蜠R% \3dL ΋ 烝?Eƭ,g:Æ )v*L?hDb5%A-I)EH^&_7oИ5spav߅-,lڅեPEF^,vC7 Ӝ3EIWbZE^@)s7}aط=uV o3HwmU.ތEN=tpHyuv*|JlRr=0tYeGۧKNU&@7Wj#c١.fp XtoڢSS5jk"K;7Dr0ָ #8s8ܑb= UOɌ%EtP$G Y)eB!)r`*P䑥QHe*q e):-2& x6/!i3-xܣxg#q$Ǚzsff٨1pWh+:[ڨ*5(1Ϣ䮊@#%zSݓaW-LI^ +J''5Pjw'u `pE<'YԆΕ՞ 1Yܝ-}}̳2Ǫ1 %ۍoC `iSlnp V20ū:z1K~+0*/ $ j -z*Auad,/XA]X)⢈Cq>͹,SZ̰"ȴfX/5bLlp ;U 8'@K vL3tǰx~fْ@ʕao愤#΀|ԍ|o u0D9Hv8B{9䃳snxxl! D26a3>REWQuJYms sV`]:7NQIF b6rϻ7!>+4Ԟ*/*qO#~ps ]g "%c=AcedM\$)EAH4P#… |p*ge=ȧ%$H/㨑I(E`H 3\^p/o1f@>OJm3#HWGP lz(6#5e-*Ǵ,YI,UBv\;M6h8 V8Fez0rZ !8蚐b(JM6K//MA8XpMtd14qx; ~O/꺭 MxaPO\Mii;ų>Ҏ4 "Wˬn~ݺ"fUߴChda}$9~َN2*UUW8rANq//0:Gэ!T6 pު+l0qKx"2qVe^H+~(}zO:c p6,dX,'с %] TU#v1tw2W W}HZ 3I)OS9 5u/yJHO $q2r `3>,Mނg~0苫ܒ!%U{y Zi獻 q_Y? "؆[_ XÀaaڞ ߿oMi:ߝϥUiOe樸1ѳ+Ͷn,RU@aL? rNdL|z[@V>BSʮWB o}#yu$/).AN/4QMdwJ'^=#Xh^JK"h \*d&,fc;PaNn/H!=]K(%\kJRNh`Zzx% 񫾐&w?N4#ϣ˹PlO 8B[LUVPC @nDBDP=]2>gXú4v(< {%R.c& E~fm$Crڑ)W^zPSg t)-?[2ֶ?r* _ typ$Xlg> W a*Vo6-˻,Pg‡?\GiYԂI?S,S%L87vcA2SeKk CMQ \`֕HPPsN! $"fqCJ? U7 )[ tF*SfrZr&b4bAh?GsOZM>@ޱЪ!->t 7,!mXg%ub\ FcEУD0yA idcL;n/Խ͔T} iy#)sYAP1厚wI'vN]+b͊"{x;>Bz:'>tO &};}O$)g C D̵@z wރ_Ѧam}tJ~zYy=endstream endobj 383 0 obj << /Filter /FlateDecode /Length 3509 >> stream xZKOA80` lg["'cN8ޝ%?;TV+ =Y,ğgeg% _m/>{,ywQΞ_|,wFX O W:>S[SfVpR.OٓyYZVtl=_Y,yQw}S )̴l0p,Ҳo7]"&]]E :NY6IV YVDU$鰍f1vJRN.WO0 =sY8d-`5=e8_h 򛯖lͺ{ܭÃ^vSomvwWAt)qFpB1 +E̓j Np4ĴN< 6GmuQL(IPfh9:l>V O^ԛ̬ m9w[hJ>ʖ#/oL*{?(~ԳfGh1&u:rҢ"W&vzla4?9Đ1e4UqfS "o:qLa+u-gQgdH7F+ $>BA!R=P 2nEgT"U͏2//~t^9 !n֞9򙖮od* F [V@>4.a U#$q-@B#[2d`p8Lmo-ʂWIe{#5}n`Ѧy= F۳AMQ@8cCBom SUQq쪰PQO!EP -z#jNX, /he\7/BbGm|/ ^0ny?zbM}N;ڼtL7#+? &Z֦Ds|>=z #O XG}Oӝ7B5bynnuQ \YԴ/ĤHGy E J9ElkAތ[5GHFClIx$4>yҫ ܾV"=ȎBLp[Obl:JԽSg:tlS9~c|4H]Ve~= 3_?v™ukJ%PxyAd{bXrlѺCIX>0#1"VOU}ѤȗҀ"h隆$"űɞTw H^Pb uGHvu b6R7N^|6sa '~]Ty|A8Q*߅z/MeptZwGY[{Dq/SM>x߅v. ~~1-f.*M #ʃQ:% \)*HXlT@ ~XKr'0tś$t/eaQY>[aoAH|(̄&Xt[h3qٻ--_ER"Y)|)t3N%s^ *),FkkResi0(L[LZCeĊo_NLTAd @ cҾ ƪ xcJZ2LZBRو;o':i*2B< u÷) ;@! xZb0hAC+~PfU7*m~TBՆG1=& ;rNǗDJS̴5YaOMpE4  pYMk'qCɹg֬_vTlF- t" &b<@''daeyז^KnM|̾X #n+0r*pkEpz @? 6M/3黸5vSO Ws\mggO/n'n%meau(}/GewT6\뛥`we:æoW'7uOχpv[?N2%P "P,P9~Z_^wv~3l$%ց7 uՋxlkoۈ2HҲ?X5]b_AU-  Uf%v4+{EU:>t 5֫axf_+a7[ =WpeaUM`j0/ ֑yAoHM^;mϗ?YxFijqdJ s DZ˩pp ]O@sw]c[dmϕ(u&,z ǁ`I]zǵ:?\3Bendstream endobj 384 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4570 >> stream xX XS׶>!sT =jõVZ*z"2I$̃SH2@!(VVEm>mKvЎc};>~|9ϿED rD"шeW*wGϞ24u)S^[Sx{D)"yy/2܌Ry ]:`/ߕ+VL>|OQԤʘE S.K Y-4,<"2`UbigΚso5ZASQԳ*j55ZCRk:ʇB^"ʗZL-S3er+ܨ;R(5J PèGj-r;Nķ}I"]Ɨ<C t0aw]pv>Æ1mD&%ݑ# hb'm ɢ rPY8>-Si yyi8-Y:FA3ڠ F+;yPЅ2T*ƓLYz =w@,B'0XJX%AC5gW<$->]Bal1u6%362j !oi}4L]qS5ޅ{y`aeGsЊ <c2@,2@S&G%Y$tV LLf[}.u&D=m`O JmEA&PMiUs At?܂=旒XZ)S܎UQi@j0wq]5s$0JeE+jAGĂd[b}p؂epio( qN yeZ~L2386;;OSF:}ڨDyb_#Y4u;s)cc?4źL`!'UG;!PL%e[;5V=6Y-Kv~B_ғ|ExbCh,ǢeJZ7+♉&f7I 8ܞ Ge~Gߏ's$wBf( Ml"WE=ݗϢ?_w9;Ar ' @ Tǯ!jE* la9B4-yA͡s4x" ~є| w?M B=NI' "JxF;4@ @Iu,6_4.a jj ٸ9ޢMmpt04#YaZ(2w'uY={5;~h_C(Cc]y $C6O6! `l $&V 4ʱjt%UYh8q2*˫ mwL,l'# (4IOLcVTT=-6eTYbS1+L*hMރ­hK U օáJDZPY *jw6ڪk; @nkw^α ;F[V\Gu7nn}ݓr9$=7,Z 9B`$ڱ#˵ϕ;EE|kki=hM "RѮЖӲU꼥/^&ע5CiIic}6VD6mA&V&4&h]C|㌚]PLrV^Q76[U'@}xV͊E. +2}?bwwcDL<_淔HRؘ&6$1jb AhC|[z-2&ԗTv?1q%8XtcD5.c_`.٨5Tzk%B*}+]h/l= *޽擜O<&aW4 :ex#  5XIr@>4-O\qLc`'S*RbÎ Z"XXuI"h&mB,JKUQvg( v-0KgEk]ʺqmtfbkkwgGN#b]-hsyc QQT$ r!7'<TNGNM7^v0l,ɅZG_ Sv)x@dA.2Gzxy!\\kWuVhGV9G'8z*pfG;I LZ,#JfSc-I kؔ2z 4Ķvh!%Uxq]$[hi юLv$%sZY1 4:.rtZ֝v@_2i<{,rE}R/ v8x#Ԝ!38b1/ 龫t_U{1*O%x(rّ1P-&T&V'Nڪ~[mhk6kso:9zr v$% J UxS<>/m|]*#HEVRjmZH'^f%~~ǎ`v%VQhdO=hzA hN&߼xP_ŌZً5=&sc$=:i\9gLѫK,ek(3W2R˶ |k 0l 5u] 򃛫hzE;=MΑHgd9«d)5f*+c3+f&h]%pVL/$':%hvMH:HawU>p}$>ۦ5"'S:%n=*ZjzK`* D(1/ILPKv#VI獞<AR`'4n]i.A)i9oxLYAb#kH}{T&eBg&SXBI0Du&GjC>,B!?; v;X'^J ̰qց)6߿䨄U§`ưn I!F`wB΂a}`&FZuxնT-˥5Z~FTlb -5 "ńfAq="D!JvK⭤n&Z[E{Q91<;R8`4+͓+gRW:py3TJLȬ,$,GܟOy rzz =WKj}p6ڻd ,_ߓ,Bɗ«!Cp qMI5FR4\7n:>5yё(s-$J㟑(2IƜ>{"z5闧N]ס5WmNS5.tɋ}aC/Uě@J#ד>BVQK]~')MShoh4N:9Wi2KIM:[1eSN0lMYbIL)]4+RAENJh*4j8 = X l#xq[%54b!S?|9kh$k-x?ˤ )T3JvJT_t\xə$}3,X驂.x;Lz1."/ _<zAa#n#$6qA@̾{qo-s׃;/ܥژĤDơE׽$,}P1晱 ن\3GCOL#0f)7s6LP`q<t&l2GI DNHܼ&KQ4\ہ ? xc<j !E$n@2$$Y( }/m[DJLLLukE%gAM.5\pu.]RX' jf2Ao P7b7ZRTP2 endstream endobj 385 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3716 >> stream xW Tg(jI[nU{E+T {’â_}7+i[5._wsۛ9ͻ<>;MF ,X`;o]&;z˔bo|2%3PaBӼQF|ȤĔgJ H y_HePGێ~k<Иs+sXXǥZY[ZZfq\5`uf~C$a7W"j *z7Tvv/ W|Ud Ճ vdqh1<솫Iao)tQd ^56c, = !"h92Ef!Kdd64&OBKzuhG.\ eaPZ562§,]A6?}m?_i@8#\G*v/u龋n&ȄT *eGw,nĎe11P}H"@T5 t +g05Z@OHsdy% @UO -.;@M wJ@}|_N (F@hdaհ׸`" ٷOI'W%r%@a-%9 s#_Gxc MSD&NFUj520 "lFB X* Af!ᵀo_F?1W]KȈRESyJHaϒ~iX-Lrޥ[f5uBJP>,, s hdF-:r:bCִnf7pzP(U`r8 (u#Y'eqZ NiRuRLJUFHǒ0t-h`Ρ9~՘ɝ #掶R[Li YS9C} jsoP Z70eZCك+m594vĞ3:9$ݐQ݉ Z=GP›XS^$D@|5K2ŧhϋ6hbyƾJ %_IMadYΕu $c d F$KRT%؉f[i+1ɍW%%PBab Oa 1pzeMTPIRMDjNuꡈ@b|N_]82'jZ~!DU<:(dUw88 d24mbF`}B)++ohf4,h?l~V>Vm2n-N֚K**|kbϙ3ddh6$;1! 8=ID$lur:>P4Dz3aM>g(x!H@\6 v0A"4n 4Y')ݏjUpw9r\wVH7s`p?̈́:8zx@'MtFXU\*ꊍNVFM+wJhYy`A歶G2q xWS{p*7g(V?D^K O'5Hʈ)@c"P2AaV^9P?ؒ[Tkë(e#ܽo:;{{;'uΎct?Rf7a{B7,K+rF<c;1b|"7#\frUyL&d5򆆚)p)(pNtA'oxP9 'V#%#S$a  &aCfFp"$ wS fjРjpA:Ab0p@s؉EN4$J2O1N(]Pȇ}#KZ:[7 @FHuB^" ?ʕs\,9LDD&F'Oi yBU^W^E-șհ`yxb9<NB&t&rK4 ox/H.NhʕN N3.rhq$ )y0rΆ+$ kMlRz{@H`o#U5mqC0ì&58^ @uYJ]Fi8ͺiN=ƅp yԙƚCɯDpP~ZVww1Ea'TJ -7nup[pOd'i ȂvhI?Ј#5Xwb>%6|*޻qp{EEx pt}e)Y)tCZ(N$||sK3]J7s@gO/\?BAB#`C1g;[%Ō:7*/P6">2_:qh-Cp,t\ #EwxݸӥÃŎ)Z5ӏLG1K󵸑3e5Y<dendstream endobj 386 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2761 >> stream xiXgLA TEvB P"u9h-*P (P "Jm@m^[=}Is?yr̻ MP4M[%PDIM5l 0DfS~ r $HbV9Td 2Qf4 eтQGJMZNHP;Cx7.]p&Uqɩi&:3]0*%]a )rLIM[L\:*:&6nnB:Y9v ');ÝfL_=C`51m h⏝d{2`+HvӼlə@?}8R#ny|Lr`lZԴxsvہhuq# Up՟FـOC[$'Qx)Yrk;:Q롺vV-e_"\IU!0 X-@{Pj|zcC3 &wfx6t8ϺVXάUŮ\bЇVXC;ΰp5^BQ*o KT=0nW̠`ߗ9!Oup@_"0uԋzѽ/:;A0uA)5s`H<N%X:w/@jN5)##HRalH%tĪ8,Gc-LXO0 #GDf, FD w у3}@$p+v1[X{BO?%B>-K䦣UC׌CXuEuQ}B㗥 A`˨n֋YdVVL4 M2-;e]&#ţԒn~A?}_U\}FDҐcp#l-ugzF }  ܀}`v`-aaCGNP.NzRwjz ѶKヷb;VX̭ (:UFm<9!nlL.x<8َ ڧe?^i:r,&F|,ߨޖ摋m]yauMIbר͊N?f0`ijL!{3@a q%nqf s_50ޒE`Ք4#Z^%!꯶ z r[o/vE|9]ML( CLOnKkNyVE hV7Jv*Ң fߌkxz>k#ܢ]RrjA>`]p(y|Ago9~o 6dLF> sW2n 컨7!e,b` &=6ܡUQ+ b[l7z26?^yG{cW0 \^=[lܥE!sT ۛ:x٫5W\mI0vB QRvcGHp-A0]}R{mIdxF::? }{W> stream xZKOF.F %3E6@IJr Ze=U )j09 %vWWWUU~X9Y5Wr﯊+b߮fFp0drSJ*7LU&۷0VxQ9glwWukjLvw)RZoaYS߹hM:YpzA{cE0BѬi :k!ur1zI޻GIVvɔhnHqMu^0òsJn*gYJ*%UYW'=&yDtv g{ W~4{$I00^ʲw 08Ng~af&5X,QB5"`y I5ivӗPVש@/dYxl]%e4ԵC&*񬷑.P!,'u?tp9}sġc8 %pgA@T0CxxӔ ])чkz"5DpxZ?_M(=lpͳv >9 8]0Z-xе%Cq+nCG3wF pKB`K (P N?7r,`aUU){ q1AOWYwa6>54+w@H6 G8ד:],'$U_u7Mv}mU*"٩=.Coo3qae7g'rAֱ'뿫Ї/ ݛ{h$}&:ߗMصۋoK T,gYca'ȡ 8uPg9ώQ2K2(IyH.& )]$I~@Ah ;BL+mVK فz UWDxP-G .RW`ZW݅D1-Ѝsrb#{G1ɯ"+K,$3*g9rf3crpg}/ߤ"DOWm0)d9x\HB80{[nie Ϋ="Slf: )HY9r,LBpyւ6J&EFӟr >hOڤĆTB2-oB̹rF]̈UbF6]4#KTQoKDH`W}fO=}s2{,Kɸ-Ls1Ng%i!eȔl= j΋q4ˆJd_XT{c^3%eH"Lk[ [.Fd(SQaLW؛5y8c-x2L t"ZϦ{*\t3F\ZsC%r$9 ],KVgɔmI+ Ÿ>/Sއ9}pqB\axW5 J'OՆK_餀T#?W>4;/NA_]ޏ3̥ xg0np9Ap@H.hxNdtHN9AԂ] $=0x#0d(%dV(ܬ-4$|쥲'O32bMyQ*MwQ)c_ rpZlr>b?lAee1)d\& XPVqӽuy8k"{{*|~z:.^gոuH;{ _,T|Ed4`}fYȉz'~ZDt{\i"_0PNHIRm36Uάȱ>S4 &9LO`|v)E+p۪ճQ6 ƔI4 _:N9f;VVl@uMPl\9]wnbwTө=v9C<X/)90یNdrᩞb8zTP\d.7f)!,E dp%L5Q>ng0!ß:1Kl( 7ޜ]Ռ?ڀX雪Wcd*: ,HDnC}u/'%6F<(}/_b %Fqg_^o7 GI5ȓXT) 7mԜڞ`VV^endstream endobj 388 0 obj << /Filter /FlateDecode /Length 5322 >> stream x\o$qc-ȃ a#$lLNȀb$)oN\jwx9ꏙ^) w{kWU=aѵlٿ|p-n~8cEr#[xue fuQuB-^l.])aXXدnBpkś )Vkl^nZ ]>!;wTƥ/!2۸X]r N7T }*|v5'ιƢhn&ċw q\O)w;x4po__;^S3p:㾰ë<^$A+A3@9p\yql kMI{`gQ6Yz̲ ^}tIokB1 E/  rK*E)bMm7LE_ EhоRęq&/ I(/jy0Y)WQ h+Paw#Ys(\Ǫd."wr8l l@vbMLT|_8ݝroư8uwr|=J|bS+.j]+{@$-+bc5,?QME g6,5LgCjZٱGz+#'tM uIU+#0iG9a^,4BDz$V_ /@*%tnp RZ#G`RVO ((d5ؓ7HAXiRu=(|V9Hl7[o6'9XyN>W HQ_=P "V3z}%ۏD8 ͦiadWCY$墹$ WN/8B ]k$=kO[.[xl2oمd"!7Η9̛TrQNn>|͑qp4q* xxX:lև/_tㇻuDy|ҲÚT4v}Ku-@7;lWHԪ {88q ༄Z]Sد,n4Bbq##.,nň#.6g\B)?" #PKab$QtQ8Nx=`L)FKѝnnJ#9& MJ?]kh@ F)r qu5q+s[4DY!a4skuB "FzFj_fZ W9sJ^A_gqt88Cdp4HL*?:ڙ+Dzxq- :9[je:z 9ڝZZI#wjBq3\F=k򸚑 _H!U:# Dq 20~D‘(D=mN (Sf`7p 1H*B!{p r@΄\(y1 &;r:ayo?<2Oё"^%.6="\p [|HFe{3&|"m6VA\#>IEyVT3cJYN+IfirR~D2K=iLPYlZZ.iX4+nNOCzzk5"r$&/J . ׾ߧUQ|AQT0TE=N]5D0RYu:bKԟ7S$9G5IQP'7}_W݋jqĔ[?q^uMyLBў耚'Ə#?,Lp=^G٩㩷Py]Oϫd3>fǴhEǏ't(?qn*(*LEjh~aJ:Hf34@?N{|Lk|{ 0O>]U86f|ZUclɫ6yxn;O ~! }q*&ĐJÃqWS\jr&4ٓ0rQ@11c_$P*ՋTnЧ&5Í2{* |RPS|[]Zb˜L;"jfROL`I{/Ix}'l9* kXa/4V> stream x]&Grx߿a/6qgU^ B643H'"Cgz/OYdeeFg~MO7&ɯOϗ/OeR1߿Kӕv>Ҟ޿y{}'u|{ogxrchۗ敮~rU~3"QJy>>Sr?w?GMo~oS~,Q~磌G)4뗯LE-oP=Ϸ5쿾74Cu իޟj(O^sO}ӞϚsy_IW}帞g_ԯiSyg| 媵>rr=ώpri$ezmbޜԟSnG]sBM婧g۷84Uڳsv~{sK[JgyRGivVN Ww;ejW.l\g}ek^ej?ЌީcɁ\s4ϑ. !ot(md啖Ъ^{Q>|.)g_M)B)U(WB*g4^1drT>S(ɆIQ{J.m(E`ȧK̑E-9:qOg/sj=UUCO|V.uv+ e}g60Qf̊RRG},jSxBBin GkZu `FxN<ݳd9_j^Q1W 7.dzO;A;V<4JbOeir[VOxGw֞z{NU/:+UTw9/,<>G|QDǝ?yR evנ Wוgvۚdv]}[Ӈ +xfLHsH7 1ev'E9AGM=ޓH:,v8#pF}#@@d% /˿dC,"v>``}I^kz_O6p|s{sZ0d:7}:_끯s^aUðૡע+ |_*? _Nq| /S_J_B| +)~ _6;q|7sc` 8LBً ag;VG֟Flv<6[_vm?X`1+M|us|ATk"v^9rD\U~ixuʿ;op}\׿pw׉4 DW'".ׅ2bT)WLp c \Y%ņN[a%ƽ2l14l]V֠|/[Bl 5(6~ tl[tl}б՟NbdVxMօbkP? .Hk >[@>dp[{ }fJl:Z(%I0Z.H ,EqM ۿUC`Piju1f|P2:zG.6Q _MpD)MC:NW<]8F f8 kmS5kC7ᒡ*R x0{Рm\@V ==CNe!)aO@چ m8B `[UtHP]3ڶw ^hbv)i+39`NzVuP;xsVr)8yLж 9hIШ7 9>󐙹{y4KKiRo?9$m: R末<>}z谶?XnJx!nY?q&M#Z+5xa dрm8gZ8kT rQ+L|*mѡmsbqvjN|Wi6f8em[FX{fDΑ%: R,$+eжnIX7-|&vʅWPĉgܺ&ԖCE0AmQ!We>bJ\ҳ !h#^hb;gjretu fwcn9H ؜/+W]6(EC|*7(N>ȼܠ^lk Jn9h"%Nhn"V9Py9]u4o!IaOv4ꈮhF6R잞!fM%k7,{g[OUTd|Zs'%: R9C/* ~`v얎!bNuE#Mc?Sv~H ѹvBJNHΡ| ս!#|lj_]_x}'4pisWNhnDl<MʗeySX>?bmhu9F=mQ!W(K(C|,j ؜/K;)p~r9C6o] T;mhw'4`t xQ# ykZgP|gXgP8] T#LHẟW_>E+I_ +۷Sϼ\mNA |wA)awBuOT NC_9G烂\2,sT؎a)6lP Æ`vF얎!b؃ZNPy {`~ ;W_|wUu7n ؽА3ĬU]}PlF哞*wXm*mIO #F|ZAZI oN1㴝ndQ gR46fߛ ==C3g E >$qzMJ.+%#oڠa.@ ,l m 쮞{3Qy?*Fi8_(\Iӏo9Wv|߂9pTj3kNNEkY _w?~};r(=S۷_ӏ}+rVp}kT >gW5JQ,NYKǩ+) WIe2Vu\e&"eZ ݟV0 B a7d9Ï(o!@>0)5ǻe=ObjYN)-\Fٸpn\Fٸ D&%&R'2,I )WNMBqPY3^i:()P&kݡ9,UK2A*Z (c7"_N*@rDwŐu6$5pJ`הCu!*+_[gzS,rҟ=Cإ{$.Hd\%9lM =P0@J="24ezq/{Yְ`0ŜeF&D{1Í_G*)fDkW]񧙾={WE^#&W:{V7?h@>v^[դOƯ1:&Vb(Q7X}MUݾ lyd!qϤ`z8X;NWOh%vS!X VOWJX`؍X6^jCjZ?|p>Vf]`<NqXUZƄU|V}rX 7Xۜ͠wX=uXmqcUцXR6TU"C悎Z\#a~EԳ Ta`0@5gjoU;6,)zyU=|kl#laAuޓ6PmITV#PmE/º* 4*v{TFG_^Q@>DTSۅT鐊 AgY!Y_T`yUc_s  @*4]mT<ÐDGKT -BHm]]Tt /c*QC_T_uLx_SA[HL"$/e Q~TyW\,X|^׊xIdB Xit`c x`+ Rϋ:|_2՟ՠ+V>@P}T+|QPLA /pK6i~jro(MS 7ҢA۸&&R+Թw"^BrjYdr\bYaC]E %)z\жݻz.f] nbujZ-/MV۷뒩Lp ~bvQ/3=7#yc6/4^ 1Cs)K3nókNL89%D9E sfh=잞UUgA1M vWp W׳svH1(MpDޘ-Bjz5zʊa.5y) 7펑bQF8GZGW^hbdy/ A*$f 9˛ ڂn/l"^.0 򽎧pdhSn j`%Ґ1 [frǧיe淪˽iSllR|&&Rꎎ!b] }Q+:.Y⢜5u^ySdSVB|@Zvx?>^9Oӥ''ICɺ2>㐥g20oeufuW~Y~#V5<7qQS&*.)(*[\2tG!V0(Z1$HPaEM_91cHCq;Wd(, xO٫l7e-$x;$yRO!uE։䠑y!c8,Α4'A+?8|4^p_mBS 8]kpuA)awB#vK\_Mbjx'KmmwE41m -C7[xq.3gh~}sRH &lo|Ѐ/īe+M~9fYdmǺ~? իxͥpALnV!B8\)ZCOxf~} :p *hs;cucвFy^w4E i4[49w4^7t $t fͦ&%T>Ͷag6AQB T@o#Rs Q:ాPd\9>qrf(L!6EwFF ==C.9Jø+jح-vYRB5;cO.cȡv`s.aCvW!o:q Hj]'Hm1TS$;Gʔc_z |Zڶ'Fiv/4htd_|ęm٠gx|!MB>_mDOhG04v'$bt ~_](:fC%74F\;8.CQ):h]n9h ؽА3Ĭ}ZA2>ʺ7FeZmNb"`&*i iVknTr2d8Şy6C!xiq/>d8 PC 49DJ؝X19 a91;@g*1`MT&LDe&&R~n!Zmnep=GQ]}>j§8(PC 44v'4^t 쪊w؛8M΋ĻJq%*F%M:]V8Yv?4hw q;:r̓޺ΓT2T u+d.>"&ByULFF ==C̮)X?-,ó~QoSg=~w/4f 1ZlC#@9$<cH-;$]u B GJA1 CJCv?4jw [~#Kj(9Uo{$E; *G䠑&`BvS3mz+1"gơ/.MfўpݢY[Vͯp:(fU][m83+NG# M 4{zfH^Vڅ_3v䯅jRѦcNQxY /74Ѯ}үw8`y&¯YY`_mj6X$x9E sfprH{!g>^MU^)Ss%ZxNYznRޙHpt!/zwQO}T=';̙l,~/z6svJ(ׅJqc2E+kU*pݜ\'@m<ҧ%mZ/5/)B}U䎔(i]9'/r'yCïR䮶<_IOխk?"w5xY)UtZ$:4}H=9Or$e0I)DR,<dj$źVlRpEt`:h8Ƶv<;bxުOCrFxJ:ulv6y.v]u[Jpt"!j-D %OrY`ϙ4]zFߓ5cu+hlT!СԡPtDY4@z6 U@.޳i7HmQKg@:@*j|^uCziIO| 6 |I+y>ftR> Np( /Cr("@R;J}"Xm)h:ꆦ]AS)98_M .4{ lSOr0숚4Qw@MZ󂨖}AîZ!UGOfiT GA*T>@}'Z9+ j,KC*u!k< -FgSzBjԏ uT/)%Ri$1{AH-=@j|!z-Tw ]׶tRa@B*Fo w^1Օ6LL-cjԲ{ Q@>%@]5za+`C_7#9A (vkfr!_۸8M6 bwGp5}Nly˃AeZ9Xǐ~V)s|~ @ݲ6.0F=2z2[<嬤ύ-f^ґQMA _ J!Ҕ[g64NhnN.sZջVN9ksp[2URۢ8! iQm\A#V =Cc,Uf9;>q2Hmqg_94%xc}tv6 UF얎d+::lS\:ox&M8Ʒ&;b%[4$l.Hnbd|KgQx-kC-cF-: d)d{C։aVRF쮾I܆T|~Y)rTmmG߉ 䝣Hlh~%~`/?^C 䠉;aAcXDjcN]z;5ű*jlmwZF :7ZޛAo[Djf:ʩ7k = KֆZ,r.e+HIX ԑ:-7C [mv/4j 1CNqiRmo,0tM?)MGujq66R잞!f3Hv)VUhd}Wg-:YpeHv EcB*ҖEp+MF!n]S$1NAthJ[[,9|.OB։3/P1ڢ@ۢL mv'4ht ;'=ӈSRUHQH̶xC/M$K]۪W8EΨ<m+v ]}C.yEXq㇨X;)S[r\| 낛ϴWk4֊e/+eQgd>ȶ)Kid%Dm|#w @)WMm~WNhnrUEzM"geY0opŎ\V9\m5C VRMi ]x-DKNG~.*:1<ˡVڷukCkfA_\E\` @v BŪb/{awʹr\؆_P۝ O{¿2z~!^Ew`Ɛ7̠Ⱦٶ8t>kC+pTKlN rQ0O!l:PVaVR쮾!nUWc .(ب65:G.\s[asV8YI !`, XS_Ͻ]*pu]v.B*}w :|Upc|1_cu'$>Rw?'g;(Ds~v'R΅<6A?ܹ*ӟA$l^Xd0gN)+j!uK{ `Ճ_]}5w]6m6]^]^e?)S^N媺{r/y_p9ᮛFk[WH~-lEt|ixIf:4_0yTV&l\5I(WC)s2ƅ\Fٸ̟kcC:h9;`,ʞ299]ɠ9i-/4?O!Eɘ9ƣQ~JYj|}j} \XbsKڿ?%rsY!iz""JU p+*&ŗДKm_"R˩/9>)G3YO$賔P_:B*A(]*ٴ=p%}H8?8e6H.Or$ 5G[ȀLG*Xؒ,^OjC"- qKWH/kqnLvا;/ULKUoz\" 7Ct笧[f'7;;2)sSY0kUTC>M7&az'ް"\sʭ.shR*;IN E{JΟ%I%@]lY91C]9oNSn/hЎɈd"5lbl%)LJ~8Ȧak}H([g:tQΆO3!](zx)aUSvSWsSաghւ2pc*$BU$et'9o0|b PHA eXf5F9I:r TMrNȧPiSi4p,Ŏˤ_TlX]}ޔe+e3x\rFgUȪ#L+|7[I˜d,X(bߤ \fZ]2P>kynIH-SrIԕ2ѳo<GZ֌yjw|&M2T6vvy(6 ߃?r}@jP~ n㰚V1`UêAj7XXZ ɣZs\E“jє(pz5Z,EfMkdê%_`ld[qUGddU|YUeCckh犭ei9+C+;r@%]Y5ZY&D֣n"k‚kّzSVև:YW tbN Ύn:68N@WGAGWK-k]/誏5/7Nql>N^c=Y@>duZ-!oZl 02q(Z{,p(BUl߁ XBD$ Uz\> X|X}Ӏ5(օkh'ŀ5".z7qqS  k%:F F!RYZFWvh#5#B+(:th=* ZAI+3zꏫԾ"ٞzUʈv\o03NWXf' deGz ǿZZ>p\}W 3b|l/$O(f|s0wh7BM@E\a쁨O!!KБ-œAh^ywf gÈ́c U1!ֆU6G}!ۢW8@G ]vkm ZA5._GU;Y֗Ae4dZyS:m EHj HU m}#V ==CPDvyY(mI?:7716r Q۪C %a?$LNuѡmf9nQoi |Plk. )@tP cQ`~Yn~! v&(M Di䠕Qo[C.s V< 7,yxum90% uح ]}C΂̈́oNeph9?ʅo-;S_WQ-VVRGCoAQP&6-gP ;7Ӳ2RPS2`ͭpv?$n qu9تZܧIŪuV90E۾.]Us .V6 y)l˹ |Cؼp\M`3llr&awBvKǐ3 ߻S.?bӅ2G[98ݳa. tF6zga]wlp ײ}w!p)f%r{ wvl~hVN1,N^зMߪ][RqQ'諤_m._F&awACvC-丐_EgGm#U$x+%uʭpv?4dwM6;cY?C||wJr=\< r[pv@ @)`wBv?[.reH8Q4kÚ&wbZjO:ǩ+/.ۢC0RJdqж;v?4dw qRG{!P`zvn͈햵6^9 nڻp`AGUV & BSf#u\O_Ұ˷VgR2:I2F) H,DG ֶ['ɤV ==lryKjTʔ+8|ݺbu%2 MPi 5:m7\h#^hb6I##ŵSTo ~bX~Ń{[)/vy.]vDqz+4^,jϭ~e)2HNe'5-f^2udV) '$$9c:wN mv?$b qK]/ A'.&ːyD9 v2$ Yڶ'VÌ Bi8%G.y RL?!5Lݶ k`_ R;46/4`t C ۅԪ/+Aj%ToO:beOѶLcx#UpV,Ȅ m˭S@)`AuC9?9ϵDS{U+NVfxR:M.U\@lArMmۭ+[mv/4` 1kY#%-vPvǡF>䝜g9 ])kp NZ.( 8y-:mwO ]}CPqVsus&fYYw-S*؄H.lc>Ea՛{yR!~N9 =pj6^2 rQvRBlaCz 0+)aCuW$nS,Ux632_esֆ֑eWs[K`[UdN:N+VV B.[t&:l]wΨlsEW@gZAyCcvW~-ӧ$Hɴk9jf"d].`o(erPdSЦ h]WpjdiZX auΑu?K`;SR2Э ͞|dv[$剗/AvЋ'yOm*%x_b\}sϭ ]}Crs8]xoN eܱ %QCIm-,U]E0Jѯ?.OamѡV8Yv?4nw q+M3!fbRPEک3Rd=R˖mB?g]Mp|s 钯yHv$E9nqo[9FnҶPq u{Z8̛u,(0W{rfWUzh4"̚JG^]Wy[&)fjg~Vl^hb֥SKe\K2ō(+Ɲ6S۪B E8Br2&`BcvOS׋Hq,& gpjz9=%9,ZNpi7oF!nWFqEf)\`zNy甧0mb k-b n ؽШ3lX\|am9C$}8bq6A;v1:|[fIxFcͅPpv9 ?G*:%G&MG(޺cWF8DZk{DI,ǠXoJ/k9Y?lyG#:KGR)zVVz7- MWfv3ɃQ|Ar Ey)vv% rJJи7-_:.G8ac IGAql@(Äiq&m6;xͯqPrB`+ZZ&SO͖左BoCpv0W#{wZkn&mxӝA|U;)Cg Xʇfg #)L~ 4 4v4L7t jJ爝bG?ݞ>|*x[N{EO;%6V6  BƩȌO&z:$DzX%F+m~h;h,-(nYf9 oIwǰӿ&)%7/`=@ۻ7?T*'g*)K??}ͷ<7/2""OxyS:&X􋟰ez۩\*s…X .,\nw*&4?_0-\Fٸ̆kogy>=z*#?ݛ s=,̗e~; ~'Np/SE@yO)KS|OVp`kOGt4ܕq{2ʟGi|qL^6积g+t OsoshƅNrKmot`CF{Ls(SpvOơeuߴ9"Kü]k3B){3L\62)|sbF(Rgs|Tso'k]<}9"< Aq;ΥU&zIJVRЪFs%-A3 e-zXxP(<ҌQrA`țXk/=4/2clQ=LHѕ2uJ98tj㎺ɩyHY#dS0F%)4q+չNukh>j*\U,Ǵ\&T90d+V43]T˩5}Ji7K(չ2YXW2rsYU"mYFA+9TYlzPMg@e=dNZ-lR4s]JתaA=dKLqu,(r ~K+[ʼcأ*N!Ƿ,[ܔW: )ٰyH{堩n1PN$f'K`;dɖe)FnX{1jm}dQϡHV^v5.OgW}@b> ςX2*}N)IYG0y!8 _,pzڐQp NV8-B8he,xj₧V{Ӭ iDX&^]t>cGPYm@$!jZ:ʎ͐gETaD=QQҪxx=}Ox>PT<^P/ϏTT)P?PS! r(0Uhr_ nl:c*Rʾ:`jySgU`bj4a+ڱa*x*TP|7L˵ajN@5@nj_"~>8}'~A`)@f$xr dv(mQ p39 h3vWs0}tY/LơG1R* Tnr&aB1D첌^ nslC_ɠgƕaK'=XP{z"f3]to7N=ilCOUÝ9cJKDJkv`~Q*17nIؼ01D,[269Vj cqKh[ =&G5Lo\AF s8 iڽww'4bt c1٫^Dj\{b;tEl{Ot# `?3 if[VR&"oWWXG}uZJnI،0ɟwXf T)jX?2K=8Wt lօ f5;Tб0nIؼ@n>rcN0)Kl_qdnq,niLt,j 0];F8DD잎scN0rI@khcަcAP\*179hIؼ11#Yfj_ &m fjnw6ӭBP]S݅"U*.t"k|j8-%xfAѹjHln`D^;cX٥#~kGRyDGo HFi9j -C.@Ʊ}ćpl }3r˝Mǂ`fQͦpFíYЈ1DlXN<~9| ,QaŸ"s"_rr@J]А-Df l-RfoI^F0)mڕ+HIx 9~w -Cp^l}eq~q=A1Q{ 6ZSGsgMn!Z=7J9&B_nيi8qjJmJ¹%T*|*Ew'4^tYvD'X?j~˼'be#1I`=1ڑ\u4W=эpZm6/$bt /6jUx3]/BխeU3h4U[4.hn',8%$~$p)WP"Ip-׫AYzn MS뵥ퟔxojH]#u2U-ȖW=k1}3Lx0cLFٸʩ}ߙ`@9(W(yI՝(p-w73Nyҳ޻ENZ^iJ.?Wm4O/ʳϪ'Wyg4e$ȳ,ڱY0˹g_}7l_;dZS`j: ($plM q3*!X(9O+hSCO1kqx8-!䉨Y]a2*k69AU6*˥Oxs2&7[b9V]2D7ʻWKt  ;yu8KY#Tѫm fx1W[SoA4پ_QZw=m4O]ǁ1f;3!u{j^U(Ogbg9[`*ˉs1$A@꺟}ߎ'ZjKUߛQ%γ2ͮ/;dypTfكzX&@ lUYpTEgڊؒ Q8v'jj^QzHS?T2򫃪̀?6'^@jV}7^^=R> HT˼v1S5>0Bo*1*.8buISc*ZR!AURUc@Tvie!b*T97T\vTeb@UtU5a努Hrj}+2]˨ʴzCzp9QuRUT`|S1TSAhI,b'W%(MLc/#1éa/SX1#yĹ-JցsT?]J'*"SScBLHqG0t㉩R` E}jL VLԞmTA5{T7IR0i5SZ?R1o:R_ﵿ6M3~RBH3Hţ!j^~Ox>􁧟%0sGe_yPYv<qgQA)ۢC E:qHEq [iv?5]SJC "+&nXq7E<. |m}3|B0oIV \f?jΔ-f&8ekUp Z#thN6 A/:E{vE{t9,cc*Ia7I#A)aBvO3ɕ2g2N]V+hX*XS5!/CϺk۽ F -C.Kxe;aFmC7r+BA rQNI)Np:m7O  ]}CF~=r͋*M@+$IZ'V̶N͉UPtZl >2#8rm^{Qg8x|$ɹd.oQat_C6 Grh gRejHжݾZI %M'Rx%jO(If٨BAl"wU4މ & he}CW$AЏVa EcGE `M FHXqH|/M fv~^I4fEҁ^"Kw $t)?l$m}J1m!W(\,5 eT́}bҶ;+ìt My,%Sɫfð6bPY8lBk;KƸZo {v'65L7qQQ+Fh3tsQuȹ+QЈc躰*d /Pam_p$l^hn"֤"38UT(c뫘 !SԺ..CWi:ramb3F ؽА3Ĭ[L\(]i65;` ]-fP4 ܜ;rF ؽИ333IvuL9x=#Q 55}7l^h除)(󩄖ĭa5`m+9L@4;5iV=H*-7ό m ==C&ԡOm).z1gUdn9Xjɑ,%D[t XG MJwm+#T ШշwKW;S+J;0WUrK`ۿ#EJ:;[A$~Hn6=IB˒py}˶9XY{h9uo½ݼ;嬥4J]~2 ijK̈́a{L(ۉ)H.W,\m+ m ==C̊ 9Vgyq))je"mU:UU\@d;qeРmyb9j{1gٌ_{S3Uƍ[:h,%mݑ&J`o莴9 C'@'d vH-e 6E?Q}ߋO68ho.hjhMٌXFoRϼm]Ħor7Sl/ *w䠍{agY6?'YYnmqYvMZE_4=W)`sAu?|=r#y<<S*ӯShsw.8u8O+A+9~h6B}@o__,-Xk)%(u[_v?4nwZK1E(I08 aLȑ &0lLGrFJؽ3 +S̰kCj?εG#&)hr<䠍{1gymfMZ_u}CkT$Y:)hry]zwAu?j{-X':[mG%ïf ia m(4hrwfBuOcS4k@tv0hJ D3#4m [mv/4f 1ռ)nXUAj@9|?p.شmj]ns WU:$U$J9yQ B6f]v+.ۢC0J.``eQmsbs&`BuO$fCqR<(zj1Ap&gQx `Ic޼+ω砉;a!cXU6{%9N)8N/j1lY{HP[s l! I&en9j{!g|Sh' =y~ &ގ 50uK.ڞA#Oy f[FmĞZ'^˧g?1t=YSlK6u > vA)`BvOz)F }r czV9NUK5ln9ھ]}Cz Yeg!_",   B&3H䅑M-N"Lǣj-vJ%-*pB:tAPmsb9jycإ‘:)j{Eꑞ9z!Yx=9 3/2Ddd[֨2+^ZbS-NpeS:ta9-\h)A |+ŬZ ]}{Nל,SM =A.A3 f lW.Hl}^_J Sf}q ?_Ͷ~R"BpMFn!Z:W߁oD,eߏ Kg~!ߏ 4v5Nr*Z0e\ʂ䉶JVOrT=%Vr)UGrJ8Fж 9hI`7ĭ 6eRcJRj1Z&W;I9 NY]Mi]Oz8I햎!bX3kSC61aNqR jN=ז˽- ɒtO @hж214v',Fwt R%(yeTV_?YP{ cp\hXTd^t]:bIm˽w4`w : qosURJi |+ZuȲ3 =-*@cImۭA)`B#vOJFI*9(vNIkr`H[NRyOREA;':$[A$~h6;Iä@NЌ>ucf񢷞hCou" ,@nqH;1_pEXC!(Jk{t9zo2S, s(䠍{!gYS԰rj9m5ñr]<;F ͢]˸&RF~!^%@=)jQ۳''+COb={BP4KQ r&aCcvWx=8EYEӣC^9&!Ia7=F8DZ {zOn-֎|ha; ӳIxzLԨZ RvK 5䔼]_a\k?lGK?\)hw}a9ʆѯЀ3ĬjU3aS$uJY;-}qt+k鞑B(vSExF#w잞!ff%D(i٘qć = ]_p t_ ͡ n ؍Pʡwoӛ|'g|޿Sˤ<ׄo~X?sx?}^s'0|_^atխ[ve?@_Fz&O-fU%z_7w/2wcgɘho~zڑ_u]=~M|a]ax96Ma>֛f%b2lGLZTO WZ^N-xKU<1.:N_ZpmĶB}/'o0/ >!r)j5sNz\% =22Dmˈ9 w*KܪPKDy6ވj*煫sE%8 D!-c>FyL4rUu6`@U Eqc ,:oC0䑸p#$$i ?.jppׁ"M[QQШ^p?d]CMd-"q&Z%K>in6k/Loh8N&jB8NuNY837"4I&)~[FmA`,\:N~nXf-j**4,,7z d'~25|A~׊ 3n)ksJ"Bͬ0ng͋N*T1 C2$/$>„߹&k-AtҨBD8aꋇSv"A=lpƷLrz=eDK-J@پfJrl v^0սU nIy#0,<1DOgVHX0F>$IG.5΂VkH1*d]UQ 5/4zes?0(&@81$fhhqape%:릧SB桲>E{1έolUfxQKͶM"y<[d>Q/:re!սϵ'SԄ䜒IUoM^0nwiT1TVYKYC uA͛"evV _u͒endstream endobj 390 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 453 >> stream xcd`ab`dd M3 JM/I, f!CǺ N7aa9CW{h ̌E% Fƺ@R!RIO+19;8;S!1/EKWO/(>ЈAAd?Kk3%K1to1rK2۔=c]+ۻsO/{Vw\B ۡ݇fvaO?d ߧ~`h[gmgMCKUwm7o".{rzJɕ+]7RE t#N[bu;B7ǗöV.1N66vuuHpg+[ym-ýwRO<<z7O;stsendstream endobj 391 0 obj << /Filter /FlateDecode /Length 3785 >> stream x[IsNL.9X:`,޻UJ[WYf*qiT4g'=tc!EKҁn0+r2+ڞUsR̮N~8!Ymg</OT2d IFw0XdpAs3/Ne̋\Sc]f&[:Vq|Ypjd . Em=_PY 9El՗$+ɔrU#hݜ`eGaQoYָe%P%O>9?i]p5;Q Tݳe'MV_]*HVf' xcJ'"/(͖[&&ӂh/H^h w9`BL#8Qj%PD8Nnuċ6)&^fwF%ñwnzCu ^xꭅľ7eMRyu.7\CU+"b ֿP5׳a:.X9UώuW9LT7ֵi{v"6:901p|gd) ~9ȜKW1OoGmBl&d`7[π{ o2I4LhO4ތV5գr S<00>)F%&153:5oƝ쭺d>W@w២d&_ kyjOм )20f,װط^) ^ %MVMHA9c2hJ 2R޶(lg#XwѺ!U%:i܊e yaaIAnPb2pű;զll[6M%-]Jج Cek}ڰ[9!l&%WP_֓qXy9V>Q({M\锋3ɂp<8K?-[6q\0su;Z?>bf0`u`6Gc٧աD^ड1y6P cm.!p^ f 5KG^>b'BvD;m'Lֳح>l贳_>*J2Ȧx|!(*cn vEEuNܭN//=snO^73k2ᶾXo>vT}QlanhpK>16ߕuŽvX fD/eYq Vw3 yi cex~=!;kEAgbn;S.. (o{ki r84I i~.ͺ:~l Pj#rXj2eċ; ncU`a[0mL8.Pf1^AE4`9k!Md#AnHʐKd^X>F \:7?e'ۤ,l 'O?˰B2un߿ sW@⪐ ˬ֑=@}@[q溲qu,~m:ݮnSzs[Wׁ7*$YZۣ-] U7nD'hmQ{POnbH ;_jщ։ѥN#e" bZ|,RX~lC4RpGzw<ԛG0yy5mDw-Q6 :ˠ6ԏl]?(~\e)z}1CW6L컩c3ӋP)Iǰd`L5.(>f>633JPQ0vοO0#&8T(īkO0NrP~;YH{򍦽N-cLaڿWw{K@,=\pbӱTY@Yσk^܀:!=5u~o2,zz 3-qE4 ,>۽1SesMNiJ )jWZHbE7CZ=Үb6&CWA_ԵIhg|dIĬ{퓁`xPz}`sћT؞Q@mY1lWa .W:7]pyE[dYRB y:xh ~(njئtjmBv %z]c D- E)y.u%$w^` k]¸m[ W2ݱM Q cyt煽|5-~xj `b&ý YT}љQS픖X sNݤݬVڣf c>TTm;<11I`H Bxcy$6lkrե_ sٜ mvz?Xӑ-xt4n^y{}8*;y?tt坈`1ac:^3; XI? ewJ͡m0u=W3myմ mS]bp.M$S =¸u@M'ͪOc[ǘKOVE=g]k V4 ߢ  5ОGYVxZ͕Qwxj7ddbe5r:K(ce+mͧy8w4-#*&']dk $.\GV q9ulR\%F'EI93kc˘"8i *Oˡ#4hW=;/Nw@ =^W>\ DNޟibTD1JN޶n?o2e7%==b}N܍p(!|M!vrX߽cLٳѹƿ( iAx^ޮ7endstream endobj 392 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 729 >> stream xm[HSqiuiVn;!iQɐDl8/Vj\:uLӦh-M9=) "ڠ ,]KX+ci30 7{F cV7ء~[/C3:pOy3^0@>#GKE1,MQ2ŸVz6:_>ryV0w|K5\pF|B񆱚iQ,q" Ȣ:plq5 {s#T1eIZ&-wmP Mj[ՠ`u9{ &HO4)q]L>5yA\ Y mclL{qB80N;: )4E-endstream endobj 393 0 obj << /Filter /FlateDecode /Length 21800 >> stream xݽmeGn݉pu}]9bd) { } g5M.G<v5~tD'H?K9M?{x3oLon'sx/:R;rLG9}5-]N_<ޜ:|9o<jLm8KֿVխ{pU;}??~Q˴ǧWsyo;}. Z籔ԇzA'V^eG=3ߠu^y;?? Woo>Wu˥3=};.u;}|m9Mȟߜ'kiө N_.lBn-K".ٺvQkѹ幼]ʶ^<ϭk`& ]>6`u~I׉%߮I\+OK⥵- ~abO'K9K_yLt9͵wt2 A7X˜ |i%1pXUl rx e e$qlU^n:8fm?]^{^{m!Ů28>c rX[RK JA qZVwM:[U,O+>.{L>@ Lߊ MmK`iA)HA)am /!csLςo]V>ӫ?]{ܷL%ā-RCtRK JA q؜<ێ;.e9ILo_6;@gviZ/{j 1ED.}I KL Fmتy*0ͪ-֟JodXz2}'Ok҂ yY> RR8{W碌c3_ U'> 3 -"-82ԇ҂RqfC66?Y/msX%SgWaCZ/`iA)HA)mGnjم$uկ0sjd+0}Ja XZt` ~|l>+~k+-@r-+a듬Cn z.K Y`@P;YO|-?u`4`0as3REynQd!ok[et)AA聐F~gsJԁ:k!)("99L;W!2wy$Ycl?n]և!Ρ!`i)R:q؟՘e !ulͳNZ֕,ǒ88]h `~aO20XWHA I nT1gUx)`=wlBD8>T_} rkA[R)HA)a>Ny165cS{bU=S7fe9Qlօ C(ȡ[I},-() $ X.QވRnUɆz17aZNFU]օ]5HYWuB R@H2a} u~'Ze}4j7Md5qp82`,Z]#e2Hv0{g3>^[@g_ m}xuMֵ8(Z~h=-뒍Bԁ )("9ֹ4(#0ʈq&WjRN I)6"n ̦M0J_Ev,29IUzC \CaQ (7-a̧laZ]}™oY_v/T!ir{2йmFm_q9t'w5!T tevm]?#+wڭ|ޜWz }#~1ZE쟿S<6[:-FL􃥥ef[gr  KQlN3!FT4AtšcV=SУM֗5Q2EOvL[`ATp̽ԇ882$q6cP`9dKqkjr#9baQ RPJrT8vQ49R%ϒt.m]T%5'l1Yރ8j"0[85-xe}a嬫} ?Mʮ:+{mL_0V0}"/ Ɗu_)pcUM.qgk(]Rdok7P띿S86Ň4/|ͫ9`bFB[`~"W# 0J&$qx] pUfc$Qn}WpmvR9>D`vN |r#5a0ƚ)(%9Џ_ŎϜv9_ B_wրU&AײeM>pcYߓއl@ ÄzX6y,X e s]+5q l]:OLb̚0XMi`0 "Ja8*xDǵ%`h[K@fo9braW827@A)aWYFp L}Y!Gš0 =St^Ӟ8>o#:)8ȁΚ0;)(%90Kl]-1bhHM5GrmXbߒ)6l1bhJ;?ކ1F)HA)a<1~_Z{aژ)֊s L)r>~OaMvd31?K7V=m-cU^N 2sbp z,bɠ(\`CL`V 7=3Iu?Oe^r5n@8 3p]#,3JA 0h+,kw9.ytKϓ"punÕl~5.2\dAq_ a9f+[f֮dU[2;>2;|e[fH $qXWs:f'(|L4L! e 9fJA `| yLi(]alVQBJāq0=nrmK}uR0}zWF =]#E2e āpʀqO8'}i+{mJk3k ^kۆf Y6G ; OAvz\9D8 !YObȁ^0~2J4HI8-=eK֚`fS֕ے)882b"Gx( چq . HLfnCl&7yy݈hyIlv0fnCh&wqA`ƉJ9fw{Y6lffO*=l}GQ RPJrǡc[}c Iu[k [>d cs $s0÷3FXm3gluf hgagm|| FDc33$a,p>Vs|||\pG`|>G#)}> ug: Ec&L͗91\g3W;Bs_ _4XmٌpCǗDkԠ=uMw|c#\gq گ kzcƨ'G".f%`^tɁ. gD)2x8cUh=NףuEc&'PWaw1a㮰QRPDr-I(a&8uBQ&$=qfk%U=qXDA)a ʵ}F -,9\ъr$;hr̛ F ,r>L0C'|4 10X&MVY_Sb~p%#1W?#K W_-)ˣ-W5VR2"e0+-881T5G(#H08/Պ[yB34!C34?`뒚sQS4L(|‘(ugo5뚗vH =6l:`l0Aї* OxG(aBJp=O5B5qŚC3K̄ӑ(fKtmLZ-y""8i3V%uF=fZtxʶ@3c|^ rV pT`iA)HA)a֍lL1k tې0kaVJ%5$bR )"L8]׮G0s3F_CH\\3EE 98XI^(rlmu}Z|*$qxoJ9 b Q)L^Mw%F _@ t0,ܮ. qęY*E)0٦(f U4RAk`iA)@R8p:_<3-17`B{ψc=hzȠ(㾳#uK7ƞ)ȉWwxȁ1|g½k{rwi#?f ,+7K\1A9dva7< fbaQÄzP(:xu$F?iᚴf&,hz9D%'3D!y% V ""8 l9f=fwsEhҢ^ !hkS5W)8PPώ ~l>1 4+y;$ K)[iJRX QIҴBܛh^5P%:0XPRPB0F3FdGd#1 LOHR\\Y]Ԝp5:a`EtQWm8ϦP<aPXa}3Ŷّ9FbH ZdH)<R8LC W} !dܒ"ϧ)*\r l] ͎ɡ;g߄d0 "z4fflc;Br2n׳d`,ؓC-}ܞ9SPJrcOacƥ1DL!yK@O38"" 8N}1a`mf60oLaٴEpLI}Yd JI8<0>4x`їcOh#IJkVԇC q ĉENLN^8vlgYKjvԚ#/w xDŽǫcYrL&l ٶ@[>]`x̀c0=[mL91nOzVWC؏D!6~b01 h91vE E$q{˼Xf0vS BտY :q[9 1ní$tq#$ oLisc/V,ɴuN8[t AԵ zcsfH9abȬYg$+w#Zψ)Ò8xF`<cH Ɂ#")odC 8Cuvd Yk8pDbeCIF,Qq aӖ(XQ"1H"e{E8puݵcƨZصe)]SЖ~va;fp`H!B"@ H MA)HA)40չt@xYA},kr<L!Q+K@869rHNp&#U6d0 ]5|~ &Y rR NGKJ r? ;' ~+D2@D˹d.(}K\p2:WbduAFJA qI1U -*ɭdAJT)kl(YCpeĸMGXe+Ly4EccaC#SH%qp3̈BGdQ RPJQeM2bhS-%unbc11ٜpda!baQR07mXTӁ'\8y &زf+Vl:0nR& d0C3k3`bkY7f~wcA8 (1}epӐn e0#Eyg(6HH+#yaep2JA JI8\l9Ԃւ 4챆-+`r 1ȁ{RR8 Y.<5[,[ C]#;mc0:ʲ #%C%p$`ACڣ$C %S 7{8ź)R~C -^p J  9R#H&[&86V#Vo UȠл#}G]āq0ET!gh#?Y.78p焝ٸAF0,>(dp9L0ɈEEGdҭaDc0o#f #!SŏiA^}D0# $qp~[7V2x,8-Qezx:efĸ azo:vO'pc%Ӊt=ho4#MÃP6[=A jo.k@83:[<̳ՃPijn' 8phT : .03L w~!.[]F;1}RB0wa8=&gꮰْ&8T2pgNf8b)ӌNbfAJO5Gl,s ]SniV7e aK1pxb#mI8] @ ruW6BLrjanbnYt ,:|)5IȐG&1HH" 'ATyRNHWgȺin\wI: 4 :#zH!?R|i~1lf#n7\9Ю-: pkk̺HwAtّ͵q8B?a}@0*n0"PwN[s10ȁƘ_& Mob78|>1,zȾǟ<̝w?ҳ; mgGmLRH)旓nzW70y|DiC9t#  L70!:>"Ro`qxΔ{F 3f 3p_\=ÝrN!W4l=#Ƌ3GQ RPJrG?I0:ZO7Z]hi3э@xp#{p#:F@ H ȥy1an4}eGi~! ӆq཭QĊgRbpCȊ-C GP`b(8(+BJ:ޤDdMnUAP=UAqxRlqer UAê NUAGT^~gEnhHUĀ-ekanhv]y07g[`=-/:v[?M p_t. 4vA8? (_)a^aLfjvIk9yWO_ڎӚtrZ#֔k^ 5g;Dw;O[nwf۝vw3dȖZq +}F)Hr3-o2DpäΪw<$'"8N`I>R' $q_<1<o7ŦI+Jy9c<Ë$eOx)/%qQ1 67Jm-`X5Sl; "p jj#5zœ `{ݸv,qu/<) 4NDj|'"}*:ho*JxEQL6P7GBʁˁ1 ]^Jʁ9ˁT"x9jGlF0n->QVQv@Qp2(k-oFY}eߦeFtl}z85#{Pk?{Zu յaO 泏O)HA)awJp78gOPT<PF}gq^U#ȑ(zQ'0AX82E[ce$r$CCG9R Rx(p^T1![ }|^ `Q""8"6לgCRXPN8 Ȓm^|T9'cD#:G8(Qnpfcа]Kn N]E$V4s l+r>4papGFbXnrKOY ztRsT^e YN#;'#Q||c9{̽ޚ2Eׇ$984`fC6 S@]5 f9K(bZ.@M "hA9ILE9.o!Le],\RPHrFeJzt q'Cq|oeZݢ0Bx ^s#!b"MD0ǧ]V1nSU;t%k2sOKʈMEGXe6Hy8) %^C:X\-4p;QV$VdQRBz!<1Ns@J#cBSN`@sa)qfc#)DFR^H}&CWaV1 ^JZ,Pp m􊱏Q R0.6sM=*춙+6Rc@m3"`9m3vEp Ҹ#YvA/43cED,DM7%`wGBt́pD7cGD7R 4!:<f^PAGFFf1yYVgJbe@c ͈`+1 |3ffy%N:$`<<3>Z8'/0trCrMMVG/YdZHֽy705v5H@2c.v9vrla+-왂R8H􁓌|Yvu` X7bau!ٖ >IF)HA)mh%U{A{YT_-e Ya/O ߓ㳏(O)HA)a@*V~^χZx3WhQP2Dfx&8;ْ 5S@HAxUR10~U[V~ e]Ql2 +pp }$ R&ew8jR(F(v3=S4+k䑑J({1)B`(D9\ @ef7d99rRj&b, iz=OCs4"Ŏ""za8۠ʣc")]`1Q~=ތ'8]@ ݣwм`ǭmЇ߷1C=2źуfC Lw "mW @&<2#UCIg] ٺJ!{Sv`XUlK,cx`0iJBJ XDlu'oL8U@  @6/2HC"1=l,Ä2M(7Qf(G"1=$>"zRR8ˇ~pn.bfӞ($Hȁn0JkPtW0/jQQ 5ziX ʼR 뎨^/ 80 mb+MD3Na(S~\1 KHg՗5`]Ko3U8/ -*rp }!/mBy싻"ކx縷7!#\oێ2 S%=/x<4lͻ8У]EDŽ9fK2/La հ lbŽaLH7PjoRR8 #(^!AjEփrP^jǑaa$E!ƒ/qD yL`$Y GAǠGA48\E^E.gcbۮ#t4Sle\$>2F.Bu";u\2:5=+<:V2/iEA+rI4ϗͣ "dڒd l.d8k [$aӘ?4]4Q1Rz0h՝dDIVŝdE$[-)6\a IqAvUϝgFl +ȱ;x!r!gk;Gۖ`Jp۳۵)F)4ņqx9wݞ+2-.bΩ9TT$o\T6Icrmh`"MEc2DeF2#0nCe`hhq y -,*zEw8d}hbrpؗb .ˇ]̅0S ј4&χ>a)0[=-q)WѰe=U+& /D'@jt{]wخNi4Gf A+0xw4&V$E9v F";ds%\H8 S24"ReEzVSF+V%82AEp"#iQȑr8eujchULH;Vlυvf]F82`.Z]#emL-l40es. d'kfdS#N~JiZlӅK0c5Qؒ`ϸ_KԽaz1bܜw-Q16%Ej89FfF5V*;\hcxCdA 0!0/# N1bS̎₶Aj]j`^U iG`WLms>1@@ ~C Ar 5 Ԕv{.1gppu)b 5% tml Eŀ_\.2{^2ǯR>0!1Abؑ؇?lү.*am314:݆0#E4j6P@}F)@rq?] FbvMT3ϩSh.+uJTǵ&\ܳ=z%& gs˰~$j V>*LVsyI$0^aX?}DIJ&MH/05$7aa3nGD8hb1#l C@}8'gNvۣ&xalҎp0V޹Aѵ5E M%r)>آGƁڅH g+| վ}Sf0`p!{!:D@ H |UoW0fGt-8 Ȯ#Sm'W0fgQte J04 0TUөYc?IcmIc#i04 "4Q;ȑPE0ӛi4CS;p ĕSm8a}0+u0-0JagW#U7/~fUWnU *%x5ʊU8U]$`nTuz挛xU]v;lvXhw*rְ^9V [)(lvR`K3#D!$qg)>)q}^煉zcNa{82Lj/p`^1JA JI8l_/.DH3T]TYu%o(췾jZc$& D %aPZDbܞZ,"Cq8,2LSWn2Ĕlܓǔk%YdФ*q{k6NȪb>]VHp Hq*{=IǠ$u-$g_'/GI؋I W$x&Df1^N&ɽS(88bRf[ÆdeG*)HA)aG!a.-S_q+1IU;O n%eXp/>Rqգx{^q8 ^✅=a!< ѫ$Danjx$ {+0WEJ=Xi=q2^^.?,\~3<S X3źQxs`pk#].?+vA Gn Vb\pr@p9L898K[V'oB /q aWW'+v.Or"pjĸ4Gj ˒6H1WLj̒g94Úy{P[eb9,qF ,r>PɔS)Ɓȣjøƌ:Do[ 1𐱹_Rq$K*eKFD!M.PLU=n|9a/AKr=+v@yM,?R\s7xFB8H leZ(ɽax`90(]۹qF)R1FAR B9ot븓l"dƢ'p>2֣ $/D1Vc0,afZUH m'#ːvd1v8]>.nnA *!?-Y?_[mԋ̿?,x٧"+V0ľYT1jХ߹XeT_ PV 98Ua~r`e))^a}{HD RC(31yk$1,I,$ L%(p3!Z-2ǰ>?]C0dqI `MF *rх.( $d0⣃F-%94j!H?h\+J4"4xЈR頑xЈRq^ǃe?]{Y;] ר;`~)eK%:H 9]~ |@B:tP}[k}k}6hꙭ"nFFKuN5Q#Ps혵¢ o{gnYNnͦgmEߜ/eXn&mnK,D6MltbЗ7L'<77}Jp\_|y*ũH~jI7_:}խ\`u{z~{_݊hKo ־_t)_Jӫy?JB:5}{JB;: zZ#]3?|˫~7#>Wdʗit58*?>lMW9T[EՈDW?E&R$j^whIN%u]t*5u˔f/}AEX_$q뎘*Gj(U.MˀTŮGs_3i3UIN `|jU+HSB\⽍LWגov|bptDOHE`BB0{}γF.**D{(,_-Mv-,oh8i^6X9)(9됚d`jd_`d2 ^vW~SBڐSc&iA̤4},-()(%9б}QT:#0/UCX 8 m9c„pH(./ʹʓX*0<:_VP9&|FIT|ADu%k>7 KNS§-R.5r*WOG{dyuyՕv^bj.8k#>ja*,5S3P5)m 1h93PUm Lv.P/V^BhBɥSkпWEf3( ~t`ͿeWX3,o`1sL8YD]T);wwoxzz~ߥ#ZGwӻwO]!ٔNeo^~x%ղo!X}yG.뗖AߢⳄkeN}~j1yKv g _3ohvGf]4\X.iK&_7 Ozqb,=OG=eR'?<*G9'C~8 Vb$hB39 8iƤK[Y55 Tc%jL M8Ɏ--*]$ToopI)phx{oXb9%p‘(?! F)ܔ7e80eL2r(S)8uɎm0|s3$s33C 3 niZi&Xi&xii p3 Q[6nxȤI 53ri 8Yh^زz9fW.zS.5L QW )b /s=f ܽН1?<[1tl筸j_q77q``8?u+%}>4kǻi;- &N;`?}]g+k>|:N˺|w:Gjj`o%?[ؓ[mE[u뺃 < /稽..#MJua>Ώw9~r~Dއ$fʋ{>5ӈ'^=gi(G@kTgW~ [/陲l׻wozd:yw/ (;_{lYv=1ACfGíDW_}'V>AaW=LVZ0^~ٿW|yJM #{@?K_d_br1Wwr?>޿}޼^e;kq41i;7}0Mg /* NJ NVK{$T:>c0|m~iGWiV˰^cmቛŦ\A]^xgenioM&oCr=W_᧽v7_ 7t:{󶜿EHYە-98Ew`{>2 ~꣑=tY~2H̲7JépJwӅEo۷io{a,g|B[.y!S -ku{Lu_듖0&[3ҹąf۫kˇ\_̢;g>R&$8hyz8 i,K'tg/_[['9YG7?0ܻ{~O2;nUג ߓ!-j='m%y(LM]DžV_IÃ/,`د̛~l^/hS۔j?w߿JV}_0^C_\nkx\_A#7/Üj b ٕJ:ڗ^+iOV5}ʼn c}7w奥^k/NqJKK?ܵ;]$@;*b-W/tc%[H3}|2rx]V^ԍJg?f&]m#ʹۆvNC{?/"Ǒ] A$~Nj+LT(翑A}mKr,rQ8O< W.ȑr^|=Rԭ͋MQO7yO}&lۊmպ+R[ױr6ȭt}t鲷Rymدy|UȚ"mTlà@Qڷ0^#Ƃdg~:ҳ\?_:zɘޞ&椧$W1qw?Uⶈ)NI6»g]m_v%޽3B޽ ɬy&> AQ^1YlOB7 gszDgvm造9"幎̽m_n[g>~Wɺ+WBVtk=t7U*a1jrEVOW}?l;`^ޱDW[۸D=%BD3 -)$%[<,\ŃfrJ`ӟxD 5c>?7&ϡF'H?|::єFn%kzINUX.N4{_AZ(9/~a+W&lwp+oO(l%z:0M5Sh9?m+ 1>ǭA[(vS~,:ć]j99)n f ?ɼ=$l?K2 mS LtL7.;WV,'l2Vnj2|ep}S]w5mr!܆nD@м뙿wOI4 zUsK3f(yrӖ~.QG(c7I𙻤4m?qe V7>O+}_Ç7t6NxNiV_T7)bi%,T9a3U5v+woޒe߿Lro$?{r?y ~/ ݃߻4o?Л]O_~[9SI٧zٷwOTSm܋#֑]b{c#oO^w;pxz96s.[T>|>M>5ڊIy:ogzGK|i]n8 ;|w틆׵`t.y j& \L/dnd6.:yP?>v<#ާ?1` tc^s˼AMR N^^v:N2;}$bU|9(utt%Βєxc8pPVs2gz*~N4qK7 O~!KOrYܤDPK^!pMXjsVy{[uiPesjicxȨ^<{ȳ? }Qуv۳hHz> ['( xwwyaKgv w_y> ˮs6^y*[ZuP D9hr^X`J~ڟm;ǂ.h2Rо6 69yqirZt/=VEQW]mdz(u?oq{x`Uekb^FT/kdAf o֝m_bYEF<CQendstream endobj 394 0 obj << /Filter /FlateDecode /Length 12783 >> stream x}o%7^y0A *;). $'" 3;d]W.|,Vv[cw=5`5/b/߯ίlte׋5Zy+j.fsB:f.^_|wyW'{/_N)|~d|?f3p>}j{i0S8S啥5!~whsx?7Ր;Dao/|\vgt4=<7dhcxZkGL᫲W.eoOo7WMq2!yZЛ){?~q2}7z:'ˣ1ۛǷWiN~)Y78oiiC>z{lM B˹МS]4 m_ߝ-t{oN6hG߯U-Г ڔ:cYFLC=|sz~Iə#u/AhGVNo%qu!quy8^yGju)fdƵ- 5G bX '?<<=(Z4Ԕ//Fx_[ w]m7^6UC"Ur뻻HɔL 2 =X:f~IRL2wlC;0X"|Zj^t]~. E, :Pσܛn^q/Hq|"Ezs"}߭><|,c^hh{EkFD߶]) sc\t=ק1Dz/!o˸@LDkM{+ʗc z||Cխp3OW׏Oi"WGC(h%6s_~뺹g$.z2/$7uxX99''n?`0MycrCZ 29Z{yPgK1p?^# etu?tgE OPg"LF ק^Yk2ȷ9oԏ'~2Ϗ|$H>EСWQq?>Վ[3dG?z({OV Lx{ZGyו^1){.Y}u6nh~B{ǧC,?: r w3]uoVj@! zU$X?ߛH=&AwP J3TTm7dd>~;> ;>#C*H$#ߛ{\Uto3[uƯ]g0y̹߿n/ F_83U_!$yۋ{xQS$ k:'fu~EBrš .yq(PK?cIR-$!!fnb\&4h"<4$k=5XjgNɑ3SCl]2ih +@ C ;Mi,~33IځdLz^~|aLˏBVA nqnx ҇(mC g@6$bӎk씤y 7tN杲,=< m hGã,-nDg,}=}fį`I5Wr(H%+؉]U<%4h IΆXtL _*ڈ$=fN"<'y C*Odgj1S ޽}Kt& ;"mGSp~X'V-:FD@ D$ߥɕӍYD^N N(1b/Ni:z4Y^td9LDsigN˳:LC4'DhB$*͍Јm6)YZw 71Ų ) _.ٌLD.lVm=)-@_WN܇HSv9~NDʖ7|u46ʥ&( Z p$yA8 NjΜV7״eH=Z"AS>z9>0dK >(9xYq$'#-QX!ېOt,OQ3Zp#o$vƧB|Ey,ky-6T^r=%g z*3_>|^mq$ofj!ꉕ?n +N΋/&NDYc'/|a ;_XQfbfA#i-)sPKJygyN=B4k͢6Xb #jRaDЛ:A ТC9  &Nkd-w;HY'([PL#Q_!J B}?*xp zVX#aeah#D)ږj|/K@V9=r2!Уrut( nK6N$9#j/$ f9B!}A4Br8VhnFu@Z@>]վaʗ}0Mթ ڗº, KtS+ 1 -2!z8Uup[z,W:`#+P( J `PZto(}5/[;JŒWiGϝ#;,DB|33\΀Khm&r3-ي1gfd.X@XZl&#>^}{lEǘXՅxD}قt_j+Uu4˹M8 >%e7SJjqY "Ӧ"VXx[LH2Ot욳PbKc"ULR+O(`I(h?8<#-)?#.dޘ=TSLe=m"Q0[Ż1q7WuaQ ,tEm /.z TL[@-RoM34t/ sY!h-VʣD9̑D57EN498q~3T4EgK@bmW231)ѹ҂k9GͲME@C|U.t۝0v <! ih15TPXEd6M2SrnA PD XAǥ5͜:8L-libmQ"byD9 p'?F=ȱ2T 2Ay&!`!,AteːUݐ]+\sby3{3Q9<:G鈥jȧ"::nmm>hC!T2ynwJ)EjֆWG\H2C˕I:PDl GAնu OCrhϧ*r" GC{Ⲿ$PP'A%1o ݋Ʊ^S+z#㦕};p WmV$l$))D?x' X} D(%r?1PX]UaA{,a6Z%F,h٥sIΔ@ /C*SY$9n?] E(p¸Af(] D&ɷ[P%}#2Z[@iiy:*:z+*Yfl鎔vΚSѵ&H{ێGX׎V{uBam֊5q{" jqu3n~> yJKu&lh?8MqEtgIVdo}@fKAľvNJ _yA9\N=6Ȫ,s^vSeI XAldYz*~f*:Բ &m~tD";@ 3Lj\ՕeBƏ8" y1N#iAضX hm[Wʖd8jcKUhDV2Lxa|tZuA 0S8a ̀Q]'Ԁfܸ:8Xѫ[uP}i 2w9O,~+#oFXc9 #MnN4)7ߏj]PG[sޜyL߿SLBWm{#p6Uϧ{-S9Z{aѓ3k ivaէPcשz zUBu]ҲzBiKڲUUZV IuJ˪W!Wi^:FR]H3`q udG\+ʣF$؁1($h=Si$q.si.*aŒ?x6MR#G _5X6%gg4Cƥ*٠6/,|+fkie6OVJmO+[Jmneۤ&DVke7MêRqHԇsZv"s#YYءZ+IgC)cpX A"iZ9۹q m'Y& mrx-x*{+mglkvV q`N ^FuȂ7lZq-pwlNr= 4ϖ@|a ;_š/0Crqi9\>iofjn(+M$iiձRWAWRd6Z33_%TMrVKלoZᒕG6v896IbQl\bQf5}j96M߱P9r|88wMJAFBiD|fS_2CvCPgBO01bVb+HmoΟ,9xm(&w"@Y WՏ80,]|*|]*ĖE cCL#3ω} [x6uAn]A˷_R[+dCH)z$ֈ"v̪e]3iS+³Jִ1PO**?GAX֨5:+j̠6ZYBYۇ%՜p+Dez{.>vXk.԰'lnXk58`YT؁mgfbZ^H- 84?R6jj*StA3KFCf+pôli" v8$\ @0SpN F_@m:v.t3La F RHQ$BBkSsCT ۮB750Rlm)Q)keA9$$'QXK$p.^,-Lw9^ZAdjy0_,xjtjxjMTTL]C)Z,4@5RtXl;9>4@-Qn+ aPT2PͱvѢv"܉OJcr& !,XmAuXmIDoI 16:|1 pAje¨lQ3ȺQ9:wG %3F*b::jBƊ2hѣ ۝RvJylyyߊ-3\$.ꢙ N_m oh1y m]\B1w0'NgSBoQd-TTe~6سdxz[u߂pXPphG!,k5"pïQ C-artF1б A^VbN!gup{Q}hA~AҀcsfH9$b &pVI+?M l;3? !P: b"3@gY$n?UV(Kns,tzB- `|(rɆF\ ]1F@iAVblk>;;+92[*ɼs!j* t9s̥K]:[ہYL&ukf2vvIg$A:k<̬PYn*#N%ͩ:{7&z~18R'W@9 D#U TmӠIH:*Uj)ҵZzu55{u$@gRF [ǁv.(ecY&bAհXz$([W5[=v=%u9otK:IT ދ<ȭf :-~Ok:X֫^_`Z֯ezX֫^_`Z֯j~W79>?_\?LAt7~_^/A =cvϧgA#.;L?~tL?ocͮg!(j%މi{Hk)wA_=I>k=G k8ۡ|Nʥ{4p8|^P90dT tY! #PLwF}ydCE/pg _f-ճ,UlLV{klWj%f<ֲf=ezdT kn]vK%3I|=ɚꑅA63`usNC;0

LrGGPZG%* "ܥ ‚h^r讃bROT/jZr8IQ 5?ƶ>@Gqzצyq[LU1>Xig…՗QR>Y#neOo^6 /< ʲ /o!Eax#xko\ϊC/b@l9ag;sA=b5_Izib8?i^ICj9XZS#"rdz[,֠JIP cW~௡dk횐M}| &m`$vS) ⹒R[]ĉ-9iI#&1ҧИX"Q&j壬#zKZ^ņD @Z:-KsH5gj9pR+f f%kh|Koc]NG-oZ-#}I9q}M(.ݚDJ!LDS0:ʕ\Tntڞ}-S^,ez|f?[gIEfͰg}¥D+g~O87|I; o\ĩy7_D4 yN;~^Xr|<E% zI"kB PM&}摀_`¨ ;rFE5W} tl_#5/GN_[d*EIn`Lw`jه Gi|tyDrAEWl_)d< NE]emm ~ _OBo4tљ"EM.di"ԲY/ȗ[Ga?eTS@)(.ڏ| /2T|6cͣgPbAiQQMˬ;)N-vtKMwD6 dgs(il# YjTrgQPHcɁ$VĒp|2Zp<ƣ WMZWͳwjR* jEȻm> V\N-3@)H݇(y}h#ohejqek> dgkm)>[0-fʚ(<ԦK&7@ h9hm@,e 粥.fV;Hq`yy!p%ׁtlA^ 0H"p.mŒ hhcs ro7ui[+m8Bģ91jJis%(c,F A*H#n%IT FIXPĎF+۝2vR*6q1HNIV4jHOi~:44bX,B4;4䯥dBs54oOca˻sK8:BX,X, V |\a2 ڴu+yeZQQ44^ *tfNcQlW(K-T-44vi,{iiQXii[`V<]pulF# 8m3(N;@V6Z.h^8׆F-Fo g R[H `@M6JnQ 0:IfHrmt:Ni=:Ger@::*3A&2[i̟۝Xvby)hԆ_G,}BAK -YZ3w%w`+Yh 8tQ:j"MNf0SE<(񰊊=rBOųg,qh?g,7Eaiʊ#.elw4,=/a?]`psrFA a\`2aeyiƸu|*߆u}J ~5;\3ry;ſQϝU\3z<y5LrPh7ҌU"8Ҭp4 9r;UTiQ- :V5m bꠋ܈$D-s 1hV[:g Rm/D]"u6ppQeV~4p-呖h ? > ?-Ƅil!@ni}Vc.O͓JO gO,t~&.!d P1 o\`톉k*mt nqIH u4)8#5Q!.RE#U4(^NJImt.lQ[<밿 ݯ|| [ Tɵ$K ,* e@S+ O(~h oUFI*ItH fW. C7T_1P}@<TI?T_)P} 1qP}?ݤb".2c/k^Fx1^_r6⥈}m!ڈ۞yay*-[\|v:\|W?_gQ}ϧ%k:-}Ok:ܾ֫^n_pZezܾ֫^n_pZj}W7qrΟ+nOŒX}|aK p4>vϧ~.`Jm)Y\Y.#HYF_~{4o9<ޔE|"- "<|\O?Ӻa>/#}<=lϧo/INd:x:?տV㟟]:-تLC}Ox!> stream x]7&xޫY,fvKg_]=LɃ(7*YvGrIꩮ?O|d˲{9/ CFɇd?lwfGwyG|8z'FGhI}w/nkH۸} }ٷ~??G[5(woI?z|eJۿgڨ߾qXr%?|8ß̔r7J/ꮖJiwR}գOԚn;ȿǝlRW;߾=U[v?Ow,~cQt8-CdPq|fz}'A+cE;5/տ7<=/G[=n|h-0z[9rJh3T TTw^gq O\{[iVn.bZXX*\I/F+䵍.<֗7s˼`T_ga6^/ȫڲULCJ'9W'ܵ:4v^3J^[3fFovn)Ok{jo߽y ުlt^SΗsewLʞ5GY|q{l鮾чA5,5js6cǸ>hKO@ho}=oˎIݨBſY⼆r n`kj؏WAWؼ_^)Ӗ&2FMO쿿P]ͶmdQ>WvkQQ2іn7n9T{j-dPT^2fsVsw1"\)#}5i^C[{dU/POL{C_Kڶo,bm//$rPf :꫏Ciu%-Mv+u+_>֚ ˷BWJH[RaBe^ 5 ^JD8Vo_*ڲ6JSG)KR^׮_^.I=DXFgm:W $wؘc#Atz0CRf Z2ʗ5ƱԫmvLxR!Lc&ЃrGDž,1C!O]>J/7 zHI~SҦKO=E@Q]]A[υDmz`7*{ȱ8V} ߾{޽TxOزt{!*[mBo^ޝPh@/b6DOZYËK{}W/wϛVNgw|_q+~ٛ7/}/(^Ca)OrE",챿y# sd/շ l8o>*\8Z{GRCRy%#oTu4COcBmh%~%~L$yBlo΋w_KxVG:WwTN8F͂]^7<>yZƗ1|d'x4Nm4/ {#LEUX[3Ou6H|k md?DR{QK ̃͵opm6/fLuw3 ::㹼gt ^hh*K/t~1_a7^9j]Wq+D]*ܼ}6SӔr<ھ2|;w")8r:rxWv,\UzƉK) M/"K) WNՉK)3\Z~wlh0fv= njI M>R%O$=9 ъvczRif_M_R7^+큘p:V YyPnel0.ifP.Qx8F%,[}'6@S VMJ]]ݖ62{ε@cѻ}}+RRP+I(\/Dxչ9)T!CAQOdHn?2%%P>4xgJ0nz ah 6TB{*afzZ> 4)|5 ~>w"iH{E+^ȋF_BV~&T~PŖOUT_JWt R_3 p JXP5_L8jk% ͨ*/̌i@U%̨*xUyfPwh2TsP=4V3 ^A ׉Rz+^! ?O4}Ꝿc*TK5L6c*T lcT8PPy̠@l&P$nT>5H+^:G O4; T:b}zC*}JmJVH%/] RsVTj { H%L "ÂC*:R]!H5h g IPz+^ - 04T{eT8n EC .>$XaZHN%@) 38\}|9dL=A-a(fSEd[6%h,sXyr(j 0MCd&fU"uR1OeB (4AѴ fCxUFN2i20mTsz<(bX@`{ė)C-!]9GhqVQx=$I`V 9=C̺` Oڨ{ʞGЉޘ9w ^fR6Lƃ!e 9J!Q;ovT~ Ťqrf*瘏f4E@>(~B); -ڤCZlq*I}C0|i3ZDVѸw;F^eؘx$4*}eVO(x H萲[b>>/:?8K"Ggkd&̈Ml424b8k\N!{  f}[afIXX7-o Ka3aJz/t,_Jp3CY?&+G"ׅ` f X3 ˕8tyV}?QdD-B|je6t`98*+<^V X TpQ+mh}|uBBvJ1bMWjb1 ceoFdhfqޜ?$hgSW O FE)[ pi܍-΁ S6|pͺ -s6 9=CZ4tN+g5)}I'a-90&^IgPdtN:|VۭV?$jg q뙑N7OQ+C:z &EM9.;+ ~(<=.jz!Q;gِլ(q FvŖ㨖(,/~(4G  IYsz:hĹFhc>ko-5z[cP1mlZ f X3 Q̰\sn 5$ow\5e&,M YanUD!nb$r,=5a& W` n X3$EEm)jN=_l|qRecs2h/kA\Z:78nQ;o-X|3[Ve[ۓ·Z枅>&Z όf*0j'$ht GcFqlmruX8֊lU3fU5e&,7YfqNܸf&7/\cD,ݵa& a n XӳH̬em#ݱC6GY^:u jwl4 0UG~!^)N;%߷j}RPd{ G7( F )Cd3- ;vپhu{*| VmI&]F[V/$d 1xrO9vLRh%9OSGZ]2C>MU?am35THjFS:[6E%XC…so)s;<[9ڡSs#MMsb\!F6}(GUvkn'#:nq[9xhmś α-s/P1y7qpE:q;g VQ8aHNd)kI0U~!^bޕփEѽsC:Gx7KrtA$Xmg{Ww7Z )CF[t߲n܎}y';T9G睂 iĽWpv*A?i݆`D씎!bHUNѯPZR|=#ſwj!MM+q_]_w#sz*) wuX>Ǒb]8uU늴KtDoVۭV?$jg qyQ̲XqZ~k4if\Q0`lpj 9=fC= FC?,qz\#~WfAblj' |z6[V/zbm̟.hAm2 wJygNgvn3wzʏnh4!كZ6";aws%L}h>ސd"lS~CA9͆K3I=݈ &А4Ԓj8 Շ{|u:f4-f{2Uc1yr2;*! ǫL AM29y= BJ!M#"Oj)C2SҺ'-\Hjأ% s+G&V3͛l^I9!8̇B N%yA%߭y N5ɘIGPۏScIt lȄzM`R&LcT L5uLu펩fC*|H ZBT+ݕa*^L:!b*p7LES SݑFꘊ7S Ry!^}5X!#aT ZZ 5U!5 SJ0YM;SUi+wP=3pNkhnUa*ua8GZ@〾j1HwTU-~*yi'T fTtP$VOsaR>b}OӝWD"QzE+^fz%/Re.j"Z8:8֦yj!y tm ""#ϐ ]{5H\Ri?-Jq)߃ڏ*.bZԮχSK ,]K/.¶iVNJiHeՑ2C&];RHiL i@*'>H_."<銣Ѭ_fWE(zE+ozn&ddٞ3 jU B!a(݊?^d[K2P檄|tn=3;EXgH 3G#7v>dzKNF @%F4R֚ aF |^!Vdj2%j N~a\/]*&rl 9=CpxfK64:='Ӎ# d/gAaLLpllBbvNϞª3]dz e f8FC}v=ˍy.S12s3Z+f#vJ.zY4y2c4vm+]1a*pxFG{ /$b 1k?[/:? ckBg:5vd!7ZnPdj@ַ2Pה;V5Dr Q%uV|fJGL@]Y?`!7.iO/H:fҐ,n,wOC6;@&{Z 捖NmU՟Nn I\AT'["wC[t[2+ۥ5 |a[=/1d)PyɊubvږ~2[7!G2`eVPD&THY0Z%,^pghF1I:2݁s+ȣȝ[t"iFE]SnJX͗X%D (navFִ <5HwU*240_t2r9w?At4]m{'AṜ }> e_ :_Wze7KRyrl/a1{.?k!nX@!DH?: ~ȥg~$|t|jy$>]J: UfJX͗%DoN[/%Hƾ⭖'nʞRp/1:Cܶs?j%K lm d,?*1=e5QdZ?Oojg0SrDD 2GӬeE\VcI?XMc%.ֳNt.@{اc}nX03ĬH4"jums"[y6=ͷEG0' R֪cÍV S:@^YqdldYWyo2)#Ibޙ݋b'\jc0W͗@ɟ0?APyb7/VFq+P|Y4Є& Bemy{gL*#֯V6stNzP[6kPpE뜞!f%C&%J}dYdȄ[p;4dr R^yFD*u 9\/:KLع pӷ  &1pL\N\O0pUꔎ!biLdtN#cGc Ir"C#;c"59lz!!;gY?.DZFqsL<8qݳgcaLߏez!1;gِ¦(mqL 쨙w Hk1>ǭHPnRKhMčduBBvJpSoIci-"l'wU+G[t 7%`-z[4#z%Y<3ٕxحInó|'UI beJv8hfn!Z5_ S̪ hK.n$x B{9sn;f 9=U`j̞9EEZNgiBXP.}ެn<;_OqXY_NQyx3_3h$X fn?/힙f I={ywT/.kӕX/ ĵ,d-)uilg]W7Oqr5\\'W}q5%=ek2|>xw9WE6s)eig&!, $yS"(i\% !Z`7\k }R By8ž޷bQ78OJ0WSv|)>,L_t؄眣(qhpqh$ꮞcWD9=5DPӡRivJo$'ŵFm+jٖ. l۝ݨrݔ$%|R&uj]g>\E©l˿eu RUr-a W Hnjt^8ֈ!~E5!\^rj U)B/7u{fAQ4!q%J,.?7ڐIU2ooЬ"o敍h"7,5|n$ӆ(Z~A.uoS%x9N:DP4`W!eeUe*YAZs9jz!;gY<{/(j{|ϳ}ů>?qtغ/?0(A?@~z_u6",ϔ T<{Ƕ5~eUO(G&ˬP ȉ'RjP+C4 UY &X{x9d7w ^7ʙKЕ5kons&aCvV Z<4CmW]֨4I8E.AW ئ'%Vg34~ |Bp\پ׏$ *ሔ;v.@? Fi/4Pgtvzsx?qZ{z̠Nz ~O| wUtD :[9JӦA`iiQ!WP6^L\Un2q唉kc)uil}pȉpи.Q Yr D@-@j-~Ww(3WNe]oe K) Wm'&&.,\{*&.,\5Ь*62*{F39}2hؙ'ͳ Gl x{;u ۈNPw .֥$Kִ?#rAM!rXR._:CR.'*}nN雜vOHAFI](7.zgI\p^c">F7&'њ8Hyvi /h7.dI{$] eȞ4dE;QAmydHW:w90w![FH؀[LNtN/IWH`WA j$L;r5$8S3*O{#nr|uor"a/#8Aؒc&g >`lA"Y6rV#/4@;viЎ6y~iH"Ypt0U' k)N`3cY;:~`hNr($a1(3n<2a)ܝ*b,%J]r2 Gb+Y,-#_).Qr4cʋK #k +81=+FOJEqї{Tn Ur8`.y6es:6TB]fJJ-5A9ǰe풄ډ2ȫ/Kizɝ7Iu]SB@Q]}39gΔQESҦn"3Pʮ2A103xSD9ZCJ<6%ƇߛrWbkX{+~.)zYrb|T$@պժ؂iBUkw V'wXMVpFբ1PDACՍ`@qTEj9qTZP5珣EQ@jF>u?U"QU)Jerq!P,b UMU8jVPUkI Lu0tLe SNj5BդITV!2$uHWX^fT 2$= R7bGսݕT남YQ%fj^;fHbTczH}TqT~Iu߇ l?._W<Oxzϙj_2&>P~Le䓶x)_1)%SYN8vyaSBYP\:UvT\-6TR= D[ TRPz/Tb/| RwO1HXӲ^ '캫+Dz2qUIY0J=RBu20+M'sPwdHP8,nhHAlcY/~(#KHi:5V[V?$Vg qCƒ#ϼO0Q%26$0"eOZu|1 I7^\Ҍì4 SOi(ާ~ܶFO/{0*e7tK&$H|P2$xY[]M(!te76B9B}|2^ugpbtd*rin9p82W$יf݄/2Ep%(Gf8J:o[kgߜb%C`wL0п:L5a& Wྛ*`BvN#8Qd jX²XjKX7čTkP|Y48gMv&ٲ LI:E[Ig:MVZ^7Ά0 mX 0p׍Ǟ<lpmk`pm*0#M픎!bOMc`v+wāy<rc`#\c`8n!A;o-;tWٷ:󊉢tofq*aCvVߞ[D7eM6Z!t G Ӵ.I%}s+V n}^ ei]UvIm])|-ۺZfIX7mq{)j>#,o#vp$,y<̶ᘘfIX1J gOEB6^L~ܵ/gNQT;51"Fz!;gYmgN18W.{cf28m\5eY&\+&JY}C 'A^gљW梻`: D H\'ns*auBvJʻL1StL~ 1~1.7Go*> \\ 0Uq C~|`jp;筨V֩?K.c0Ld>!Nm`s$ڤ +X+VұrrPzX9Yvq VQjM R֚c+V 47K[tNN:8x2v4 rhC6㺀URL4h%+cC1SG` V'I*lZl'oLvc2eQD|n%Ьf)k[V/$f 1 tSVncŷrJ̃a2:tǣ ͔ǫM\Xr}V|^=E rF8:4BRJO/Kbt5T_q?PBԒP>HI\eh8J& VQs uHYMp[%~Hm`$MƼF2#m#I׭Դ Cgz.ˬPJI2鐲[aj XИ/īJpQ \%j»@g\>5iM" }MGYan쬾!nL‡NP1}H=V|\),Z/~B)_oBB B\j2D|^=)֊feVf)#fQYv+G;tC,nNvVm-x0?o[⋈}NouЋѧ}Ğw^xOH | r -u>8n!q;o>DW|ߌB8ز7!2'X2-;Gk`eс'_|B}'Rfjq*aCvV$ڏ%N9%nĖj!)np4=h:Qږ@sRrRc+ì4 U^"bN 7i||ı[͢l[ł";\sW̭03R,NHժ ǜv֑g5mWx ~-e)jI0MGxfVf쬾!n- ߏFVؗCNn첃ՒW43w`eQOE6jv8|ǹȵ6[V/$j 1;dx(n.p&iC.S2`ͬpnY}CzNQcBcRp=l{u΃_ekؚcohm6hV/$j 1IҎg>rѯ?c16%xw?E.:|[fHCvVߞB6gA);ߏK*PđdIavPq]"pv+c[r]q;o!WytU"&srdYv V6m&A[6VmzCvR4$:Zŏcs$5$xY8A=X*afIX1D,wYF/J{7D'R\0мOS({'Lu"Fz!!;gY4H048i2CPەI\Ҧgq+|7B||DҦ@Zslq*aqAu>zhf8}d8j݂fFPʰ-}ފkh^y{ں}7,Hv OE}[w-c~xCǰʁtf?v)zX%9H oZ"8J!:o۰?7eoYooX30 SYk,r1&Vof!q;oO%.ia&Y R\.1}K?8dF6 ^vߌb/JP̊{kgq;oOG|ӆ|zִ.y|51zn'N 0beܴc|mŗF>}KP.WQ5@urJp\yǬ)rU@唉k6q%\ L#<_%5_\!3I"rt}ϸpU\ p)eR+&ɮOrwlh ƇB*m )̿t!Me(/2-($׺(s{ܚbV1 s[Jyɸ?cg{{>nn^~Rͷ/{!H7h":t]^&4&]j^Wυi {y|?$~-$7̸lts4q6Lgk!3ݸ)Yojp;NoaGӛ|q SfV.FTjEC" y&. ۊd2v=Z}rV!Yh(`P]tK UPCM %B蚑nIXya׮4)q:_)\$]˛1eݕ+*~5`4Kxt (4&z8s15]~1W^[xʶ5]]*P'e+ n̔ӊjyvEV15ϲ!!c6]9$ԫ&'TGйaM_&OU^"JXB#i ( y෨]\ROfHV; ːEץ w=M4ko|8o{E+^W"AX_|ȫ2j{ Q}T&H3,\TP |-F͞6MnԞ UΟRɯ H-/*~ϐ`KKHUvHu UQmTy>DyԽ_Bj>H5!&L]!եΐ. +HVS SqLrLw0U< G ޟkg RVHa@j{ RCҽM _tDݶ NW- Ny(;؁v) OfX**}MvKgx (& b醭c17@ev, X(+jO7! PlTOTi3J3AjP&H]TU?=Pz+^O@9*8:EET9/ڋʟIzFGTS|@TGHDZu}UH3-@_VSQT&>A*S@jPRS,m+1cjO6'0E+ίOb׊9QzE+^%t|'ΖOeYAQxb+l;2컜2^~@ I -)3B{~v9g|n=5Ԧyf^akE8Fs&*`Fpˌ03$,^s:uM9Ҽ`n6P_H ȅalS#ւz$%5Ɵ3cUpxcó7xǘ4y'- ,$O6/f w̍03$,^p"- 1#X'* g-Rf;y~.ʬ8!N' \Ԛcw7Y_\p+ĪHX: &Ɠ Ҁ`] Z  `& Bϙ&`VKϮٯi#㘝{ά56$,KS0NlP(7Gk]EkĬ f s,03$,^ P'u ǟ&ͱ Y͙@l]KY6,7}/cNP\*1796O./8bt),:1+:+':eu j P];F8i/8bt#6<;f3'DkWk v&*`VpLpDwkAu>-9%s TV dGZl[u/ǡy:Lu:xxt&n> :W%ז7A'h s\&l%xڠ kb"af )Cĺ\9A<_2SHvseu f 0t;fFU3{!;cؐ20e?=Gن {Ll=Gw399d9$\ iMܫ!Y7WG,]+'W&@2 ᕚ`^V) zX{)γZA2~GŠ]hYuZe[`fIX\X+*W^h"ɲQY?je笯΁I؉B\6::ڰ8n!;o[sj^Bm3pm]nsRֿE63+!cC)^Zi1v`V󤂋Zoln&S:Q4<= vmGT::uF^X/n*o0,>HN} z?lC]2ֵ1~: 8J`e?9A@&@GOfn& yxcl4 H joƦ6pôݒ&6 ^6mB.@6F8i/8\t gxYO}ƛB٭],M-#Q&6aj ]hΩLK\S9ܰayK7sև\A.My$gwQܤ;]}fR…ep;_>q)eR'>9ԟqlȞi>3KDǾy~izK(99rKqϥֱR KozdSdSIF!eΟPG=5mO!-Ub4 ÈgNO? z"pq0bCU% nn L!75>,wm] 1+"@EW>R GwIR3Cʝu^] 109^-* fOqUa܈O1fY 2,kywxR=& qR,',*y߁VW9!ʐvom|H<mk+ }r־Ex97فi2+hvTDt5dMV+|A!%z5Pp/4;PL91So_eYڳ/u78XnXld[q5!>\D2Fl z`}]Ðv((C!(MG0"X|e˦ 6Vx9UN..]78Ȫ-glw7ז돏yLP3 #u:aKNOoz --WMۨMJIvXbfH޳*r<<ϡ6KLzjUOͱ U.;c>'Td {+^A ?dʿL@ieTix䀊|uATD nzQqwnMR31C*>}Ԇ*3bI,/ˮڂ538M@`*"]`j6펩E- L{&Lmr\ØMG9nz`jQjosjp)F Tv莪YA59:&98&I*ᘺY=EK5PIXrU*W@2pPPFU8``AuGꄾj1jT,iFUQnŲۤߴjA"*2̠$8nTpMA5Q 1*cA5Qm+TUJ*yAU}ժeLڴ1 rOOU5aդ)jnXT CU*;OSZ+^1WLbS3UͬUdl+YsPMI]"3PۯTakY9noMNZO ҉bO[~q}X[~M|m]:T amJ֭Sz!Nu:[2[{cpn{`E~%p#?xX u&hV'$dt l =蔝szޡ):3_) ;5'!ME?aeHfm)kv Y}CFY9EEHM&_e`eX9EE.AM+5UfV/$j Gup" |eȃ4$-T%wsE.ʢ0 OB| Rkvi}Cܒ$aAMڌ)slD2ԒO:0^AKBMۤZwln he,n9gY _2Ԗw8+fɸƩIq3"ׂ`}|^q$Yf>xD̀yOۿ%e)8*_,: 4[#a:tHYk0[%~hNV$k*RE*;VW :Xq$Ț"gv-7ӸlE#~vkz ֩Bj1cѦgExX!q[k .C#kcyH;/ǡxI>5@ws| ЙA|W5f{ar'ʉP܁OPIx'CqVQdH jtHA0Z^Hb[~Lɱj%&o(nf#!ʖ@sȄtppU윞!f,N=a,Nu 48tj9E 3 f\#wڪfBbvN85$ˌNNN{hǠeXU9wEDi8BnQ )sV 9=Cs1bpG.[M̖mHr$Du ШvG69@c'Rc+íT ~HS%k)(z0~+ '$ArL%xپ E> ZnJXฝ7-Ʌ9E-KvY·{+&}rf, |f!n9pDᵷ_R:~G# '«m!AҜL̸W&Z^HbVdM;h8w ZV8'eE2A6VQx u e<8jz!1;ghkL5.f'(9"WGY9:E[If:9avY}Cܨٴ]m|?)WWwc I۟Sӝ /{? !& yűiKPx tT|cGtDZq@E݅tJl!},nq&aBuJYgnjfev@I)Go#Px2Yej~h$^ (K4S?d !.zqYan!/JSd4>iĹO21L(%(2P >;6o/n_V?$ng)ᛕn$( @zHhHf0đ, #Nnh/q#l4 z M] 2f|L/H6 9=CJ] by}:_uָdB(_F1LY=Sh :WU³[NFt`du-/_V bfz:~5x!:gYrj1:(M5Hg@&crɖ CSw=gmņ`pU윞!f{W k26kG]UG_Vͣ9E-;:d[X19qՅI ʡaYNEGwUmq)8tu[jIX7 ۾7eu9DI·Q#z$x}3YfrO\羙ΡVz,~HM/NЋ3As1!"(% , pE…)yEPdih-{%`Hz mpnp AT AFpD휞ísFJf%#`$綑vJM-Cm΁`$hYTFh|? :ܞB*pU윎!bl$|[Ұqr<[S-B+扡.ˬP y`IHUHQm0^H_1ys^rǨQyIIM)\fCeE9 9:t Z!t|*!nJX1Dk35N_}kд,o=]GE?` e:58h:!;cRWlot (3@/ ddTX@P;Aֺ]7[.0 0 f Rcl4sz#8=t8q`,f:69r$xُmEN]=:ؖ[aHCvVߞBKl:mSn)P?pL'RNc֣͂0UG^!V!ӁtV{NX-VPe9n+~($>*5F8-V/$^cOƝP;d=g_w#m{ ~hoB`zgf,>I0|maU_uIUPl;Mdؖo6-4= v -:&Zr.2ZmrМ_{34-TB6ട6@ G y%=gA>ʩ1rb$SgWHL0 @[LZ 6ԏ [-FFb3nRU k]Vb7̺9mXKU(,-pK F(8m3t\;s쩴5&Z#K}~fݝ%~ɕ5smr4456PPbkaQ۪oylXRq0=L'rK 8@}N)/`hܺ YѪ@]C킅lnI¤|.:YYgR@.U/mYb4M@ZLH0`7~IFSHhxm1X'9MrXc9Q#gImDlOYl9iY  %ZFO[ppg!o"bޕYNPЬ[>OJoeș]3+3YӮY.//|p>Y B qLma՟W ǽ .@x a/ƻtK9O:~Z鐎0HD9 c ]3;.U/#{h>{V v}_>t]+'`^]L>nuUCۍ$~=@nj&;K,ɖ$xE9u>H [Z$tu<].A§k߯.\{uC\}Q]!]s9Ih)w2?u@a0$%F\pCi8'B'"|șEdf_F^I]SE'q]2Iǡp| @SG۹փe=^.k.so/ 1R҉j~0]ާs=c|zl5Pʡ%;/ygmZƘ_)VS(Uq2脵4b|VL}JhS݋@g=wmXendstream endobj 396 0 obj << /Filter /FlateDecode /Length 14151 >> stream x}ߏ%u^)yɋ1/moK ؎P[;;ӽ{QmYiBj.Wbeŗŗ/~_+*=/b.^ݾ"9ŅW/v&^7*Ҧd jz^N)de[ʹn.G2)M\+,>aЄM8lwWB<һû/sm{.7U uMnzwi~r=Fp_(Fw͟yj=_O1L^"M3ν^==ܿy4Rztxx6~ 2 g4'@ۦ4͢MxZD{}|ϵyR$܇VboK}k~T}+~~՟{=,5@+s3t~|9&wݚw8|u,^ ~I/?Uee!\Sj#Ak?9;5mk/&Zp*r-b-J^YlQILݽ~Mx}9,:sd~k `ʒ &(_|U*ݽy{ye}_gn_~׏G 7޼{{C:Kym]W?ډ/nݛߕvB봏/__^V%=l*. L?\Żg.L+0>K.{g|viׇwkW_E[+d=tLM"7FQ6}aM/LptReO"x'vO^ԱhHCi٠'G}'PVh~! ?1izB*/Z&ѓi{< 'I}"@;Q)ţ!)(-3u"}̹̍hdEpD=ki"P'ҲbfHqED3KuA !1pXSnwGDՍ.qy&q'H]&lSΰ*Vd^ܾ4kD.ߝ,Ջ_&Ӄb-w hh$[kC]Dۏ䫄\dY3N%&::0i9'j%LcIc<͐ -ƒ'h.d6gsf$Nd9n幞gR;K~FFy`xbm(-Q"ݖHtd4ML. ̘H< yUe%OY %7ew2`I\&~Y:0 lCfdYs9vPDxD22.Y ~f]r{)c=BcZd#E#JNv,t^0q{ B '>@CI`[»h I8^1CE tV~2$mg2x LvHW'mkisl7K)*žc@cz^MKvh[m6ےc eXK;[ͭ q@hF2OFDrk磼#e0^*$>}uH!Ų52$T'3dЌD '  *cb^ "x{,l#-s-Cȅ|2_$l- =h_He8jxB9)(c-@@?O(B8TS"tldZ.I^n7 D, ]2/Kg q>~G'd ѤD6xd'0 # M1"$4=dʆ GC qjqKNB$c ;^v= "f;`M]dX.jnZvP<*ms{}?͑Mr87oYNG§SpHv "ڲp+$NI؜\ϵH"Iɱ$Ij9_FFp u̘#hf6GEɒNpWyIJI4V]*KM/ K.y]d8LK  1*N0#K1Wf0%Ō2 TZŲqv8KYZ>RZN'S0qpЃDyAJC)A,"e q"S8C(Nd!H5TN|3|ؔ OxQDevl . H-yx_@xN M2D, zҙ 4̃8AحgxSn̕{}@'ɵ<g_d;1#8gZP'H+xw|qA"AGnlŋY(qGp0IuxWy0 (  `FM48O'?s*X?-'!o [9,3'㦃c"3|y|x9fqRK ?NzXb `Z>H&!d<5FCI#pA<`ZV`B$0,g腂uEB i;P!pr!+Z9" @"3Cl^eP ԤNT_4i{5n +IGg:,D1a}9yl8Hu7r'$ ,zdϲ5l90ΎBNS UXDAlՐ2"H$!_:gL0Np4G8(Ɍ5g/TU#ew& APg/ czΌKhpbtB:KY'ZokBȊ?\dL9T;ѓ"12f{2E[4Fd4fQfGz4Gt2S' c!LOHޞWG:X7Ϧ=]lPllL wHbEUm'Z1 -ǒQ ?Tx$p5fU(kUadЉ\ՙ`-eu*mg(;'u~%I rsPwm-_[4s9P< i2 g-^u~ )$0fI,p];;KY; @yNA) 16s 39;vAtMΗήvՉOɥO7 [BmOoQN(0R  DJ@#J4_$csԸ[r2 ,r+%`6Y2BUb$/Va@|t_ör|Pa@ ap?D(܄/SNhiikC4nxdN8uNQIGH`tٗQ "uHDVR7ԥ(}wJ߯w\ z~$ru}׹}#X.!>pؓ||uibM?bv#@a;t(>:gUH&L7!q# `)D#QJRIAKJ3`@;ʳ=Ӆ%@z%!c37$&h ("$֐Hz+ZJ iUq]pYWSdFEf1p]G=ž$ "[v& vŁE}zYnHʯ-9^#&Tl3w \ i?J<ӼצV?v(h̋]_ZB?gF ~&/ƛ. H<:q'&eNu&c*d B3VJO :A[V[qƅx"gycTL". Ftz),oNW&fɵ W[_+?״4k)3l"] Fm2m[i+z 1WܦP~)%2Y@BɦT^զT~2a.9tR ] ,,.02> sVa{;xg>?I=3$9-; Aғӓ7`Y )Sm O1BXmD 60W,jpQ_6&a ns [eЅi¨% ׫,Lʇeiò@uLòzU:, ߰aY@W`+,aI*aXaÒxҰa$2,^V* o@+VW*, cJøbÅW>i^GSK0}n.\%TT0shPdLyջ\dȕzKd`t>ҫ cT2ihqlleb Y̧26hU ahMu!0]CUf啪v\ʸR2 WY0g)9KwH T\'S9 &x0\Q IVl/hsTb<7B%FOJ%ƵJ0J%,؅JYM~sHٳZ.Q; !! 2Õ& ^vQV.u;:\zĽ׬kzk\{^f8bɯ7} t!>Wt "Bp9L $c"#ptyH|e3q0tt; I>i}?Ll A}]eu^) +yY[|kl~J^f *yf)eɢY,.}k {=[C2 (S(c0 `e/>2,EԐ'YLa/'Q {I$iY0΂1á[1RKr 91:;TYbmW1t )%+__N 4RTuCEM Օtb%4qirбUV`=X\b@S{/V11g|_Xd^r!AuF+3}c8W1gn;֋$DZ:}$ wtw{,~g#I몷!Iތdz3z3XЛ9qlXpnUzEa4bfH ҙMREՙA6vU.v;kh7HJgq9Nj F^ߏԽ:>XJf6\Eڕm!=4DRuUdUV!a([ h1'gRCXhtiqIx@eJBOf* 'Bkd0vYČ|1-bN0'+ =*MaK׌r*}3*٫:c*:%>K>:gZlbC*FM;IdR1졕TŠTbSo_MIb+%ݵR])źfbwd$36 +gn2QUF|!Jp."KlK[z;I6#lZDk^ɶ+fcn8ر`qƎr.7^g( pqfD8Z,bgE8fq?Bb(L}8|ii i@B4q:]7?N삂hx0h b ]0BE+nc8Q!g5v| R8zَsSlq#;`+ۤOQ9HegGID?(~m~=wEԋ.8E=MW+t{>'KooPOԚ-ޠĪ5ƽAEMW+{jn8j͖soTS՚-h=^زW+6{Zû׉Zsevs@A۔:~;҉sKo7h/ҶxRlЩgpKa*^-pRsZ9jʹW 5{Tܫνǹ$` Osσ~towFߺ$_.=Ev~n<>Bs+ڃzsڃ M]'dr#zI 9Qq!$wer,=c¸z$BSШp n`J-/ ي"'Q ]Td[?RyLx@ TlGYy Գ`Ua ]=RH*(,])iBxܺ &rIt2X<|Da&2Pe[VMH|+֨z=Ͳwlvh{8ʦlpvx Ԡ<- A*≞xv+*Ӽ#*j؋|G!'WM=zYzy90YXw _y&kz,p=Lfi*W2+X1ʥ5RCZk՟yOc[ة;ΊΊa`HPYqE]l%I2I1NMZ VCgW~Zf,-`f䚪 $_Tb+0dHӃ֊i+kM2 ֺYZ6`;=ЉM` O5z;!?E2GDJ=N)(sxP9=!X"+cLiZh\L9T⛻Hǁŗ2P㦌|-,c)=X+ܒE |/ЦcV'?a$VA3ӊWSȺAwBnғw0Cφ* ǥjgr--gΔ[XGr|sk&NV P0[x\.P\r c+\-Ӥ̯ǜ]Sh ;Ǖ\23*VVgA; /hYi\Ⱦ&Nʺ(H0ʮ'IaWc>4j_(j؆[C`ґ­WʭqVn5 >5jt$LV̲Tn5z$V\7L% ʭRãr?n5phpi$Y(B aY ~@6#Ed%Pͨ**g{ېѱ376(RԊS+IrOh)* : Ѕ,΂rZ@܊luJYzeZn88[ԙیȵmG 3TkMT8/mFg++Ƭf>ʍǕެvOrž91# (fKl툩t5ֲR2Y!F!F#IUcH6hTX*3{C6h`3 Y3@(h B*s:MS:ݪ2c}}h_swnrrUze}?TJcAt_ohW>ʕcjGX91p( *"::"17!c+(,BF9˥BRHWD:0^X scU_8m_C-40$ES(dfL,ߞ憮fbiP [$~urX'uY'L ݧUF|4:}6-h`HNE1b!hcey4LC1mYýj)ceWAm%,?] ?Z`a8H6l~A g UF>/0+z?=޻zk%ZHko9>fI {Lg_ػ`0Ha8We7#kΨ T g42uw"vPCdE⚕~nvS5u#FH۟8NA='EBG$s[!#w}$}gipI}*}J_R;_yv~ ڜ.p? h(TA$xOs~h  PyG71Td¼RSJO+?-TTRӸr }ұyk)nifM2MS z\nJ>:NYUj**TyJWTZʫ*U^-PBU^JO';M*ܴlxd-/E_#i=\^9G'ZXvgWddw}S yp+^髦Hmg?I1ޢu7WdxСv]Z/n`3vvD5t4GhG&'ozR&k..ɺl]?N=ty_^;x4ưLOv{m \wO'=5;\хZv_%ZҠ&OߢALg ѼD_:[v6~/]_x::aw ܦM廵x'ebU~*5Nn+U7Ç.aqx<>}FI.K<15~"Бfp_ V98@~՟^Z^3n+Gj709 ]7mM%foJ_OewmPZv{CwӂpJʎ%Kڼ4k,o^;.+/PKj0ǫtJ5*:XCH/e+p`~bws}9><]oxwAn$: ^&ۖGi>hݾD?'(xﷇ/.ڛs-/V=?}4fnk\H=Ҏ Tediu;:G VNC\):Zf%ڿͫH%Q=`?ߎ%1ENnM^pcDYT\DP7QbbRT1omq kQ:716Zp6;ȞR66ܡ;߿isʨዦv^a#&9r8G`29Sv=EVlu̎A(l]+?:i/pYHoćBp #uk.OWmVK T<ͻҋmMvޕdR|!)Ϸyk)F|r*xzӁsJ[:3WAFo~t1ծۅLFS#dﴊa8ܯϛn,Ɔ `mqQj/axĿ_?y7<Ɗ˨7wGDu}az6,wJ}v/ I"w//__nz4]~RGSotwݷw_> stream x\I#Gv>Hpt%2ؖTlFjbT>KFD&Ah2b{eյlߧϿz+F?O kI:VϮPbV^eZ'z~ݵu]o:Џ[o06*(\UNp l]6|M\D7߄1IkzB3KY49Grb]܊hl4ך?_x'E;!T$an}WWwZ0JwҶZЦܲVv+2BLI2\&95~ݎ_̰߭Tf' ΥjØl=k$Ɖ7Z.Jn]ghC:$ Z!;V?ҿ]1`Dj9*HOvW/PMOkT:P*=ɨkFdTVV*=x'ykx$Nq,B|I0f0 )(zìpp 臄/l$S PImlԢ a`|*jN$'Rk;'94.' Or"9QxY+Zr$#2]gZk3$' Έ2oyګ[aIp$7o~FrI_zeN%J;dBQ'zu T&8*Y)+:ޒE;zQ2ߌۊHYxg3;BD D˙ *pߞh|"#}\-yj5@T*7_ vY&OQqVJq3"Z{'+p  ZvsՁ3H}D[j9ʜ :)nt ԓ^ L[wP6kuکAU/ _\]9$ \Uz2QsIFU7ӓJa0=häfꡑ=;ArOOrDՁuUDVJ2JT U\*!Wʐ+h@u}ݱmc$ڷ6+-*xe>·KD~Kи);ȡ.L&^CoD M3<*MH0jJXJe& O$akB P 7.}h쒐3rXa1bJܪ"b^\&}t>Uz2{9&GIF%;8}AdT B)$2:(5=ɨg|/]a;$X,Z` }z9p<,*Q%JTZ%V*CD*QehZ% ҉&3TΨUIzjmVQh5iRe0;{@?=yg Y mӓ߂V"ΜQA[4؀ +O!I*;0ZcrYBK.\0u:D-cEgsC4I-+yYq:nJK2ry]o@Hmú/&Y-H˝k< cEqtOߏ7gԩ)P:ai Weu4Em\G6annwc`!BXAN-r/hebiNx}O.!U8|LY+~ `:^ zi^鸨} &ZV'Lt8  N m m( goD |zk_^!MD}rB+ޜ_Y l6f"% 4b[>?FB)ZC,w䚂}Τ( ah]uT~1qخ7(ۮ®.uoHjX̰mÚCpyHs^`0ᗕɵexZZza4[B0yMVѱI#-n"Rn\ 5Le I ~hs 1xZKJMS<:n=[KTOPg{[5k#C&8eͪM<9}.!W7܌vi3kq7ADT|]Lw̄0П a>;|j&IcP>!dsHX\On SEs=rp׮Z8V 痐t $o9$aߣ!hI^LFm6EDdkGS@Kt8,pYrrc x394C+X B$c6mPNrf(Xџ.*W8 #30wR0hru۳1/z5SwV1nTlJw{p9Ig!@em?|D`@ J:nxE]94Ht,Oann8Ygxca.N*VY%G:ƭTW<ٮ8F6aHEmE>pHb`˯ip0Yߌ1a;YzfI)f٦ؙa$Q!y|?5=6nzv0{Ъ҃;@ss?|3 UA)|OF!DG 疼(y( a xx$M.x1{8cPKsZQkKY"կ]9b,Hܡ%5rGrsG!'XB_ŗ0qp*£LF"F.T,bEe:'`91P/(,} iۡj. $p6=ݽz  ]ҧ}Px:VRJȹbsì,x NERdX> S!T`cXF(Rpa|hwnƲ+ۮ?;*VX6.40)f\e`M6vkoYo ,xssӵc=Rqܘf H |w!`0SR>>U6aGX 63ńf!1+d]6I@KX(~lpخ1Ŧ$}lR?,=K8d Au";U ϧ}h[i#gd þcbfYW&?W9O]e8UVv#\Y AN^0i[E::f^($Rȯu2;r+y9]HtfQxoZoSC2 $P 280݅ 7_2?fЅ/ Q=< {(gW"jKqaKIۡdD9.in!dO6ؖi p"HԋiM [TTpa%q0 醡9UO-ۅJ{p5܉-bgG֏%¤.5U`yˮ\:'wc9}%]_*"a#/b象β]N;;v%R\~g瑤ry;O*NC8x t&,(Yh_JHMls3#t(Sź)6ziVs9(0]w/V]0+= arŻiǴ׋_oo̝[AjC 30y W]ǹT]ӬkQQ+{0a!_ \s1' A*l3u.ߪT6ؙ2%oV.MNA\P=˦ԥh 05N[/0ItBmX?*\,!5 GK|7f,uuOw-צĕ(JMET^QTY l{[tG9δF{4lę3S~Z,Lyԛ9n77ߢ#]dTm-cBE”gqA4K ~+-IKuFzsXȡ0m)xa*]gɕq30|7&͌lug8.эLI]D\;0a\]\ X*)&^b [oYtW^ ;?B% (OKVqIKhOF ,c,,8sd.,/`yB+ GBh|??Yb5êD4ޓ"y\kX]2rwJ_sd L0|Ct˿24U4/nbz)#AIԚ2^X)NUL|76s[߲!}룅,4,͎;Ooe;CS|.؅,( =0HQS&M4ԃW0RzaQpʭ̣5I`)gb!=(M@S8++ >ս24uM,>ryN8o#=qn#IQ(i>AQ̷+kd B}!xP6! b8ūUZK;g4\^QbywF-d`rI imw: 2"ccXlu'y-c¿cJg Y^U eB+3R>e|Z &QLAc8o9CwOG`dЍ X \& 4RavaWoէ+  ŗ7E3gKưV[ivXbpIsfw}t. s\`|n~>C i鴴UP.yTISmRMrie35b Y @7 eF1 o)a x?K/:47Χ+KoEA1|jR?r@endstream endobj 398 0 obj << /Filter /FlateDecode /Length 5427 >> stream x\IHv6|}ePVҌ@ncư]FÐJU*Kh T$:([]W_~] :x70B+t7׵Kg4lp%Ko^=/Ueet#+N[jQnLJF4)ݫq~|MSt+S_h TYղ8w{ {n^Нۢ8E۳WGu`;>O:dSVʩb&ZJke]{vox幨]SI :Q#̿t616ybEL(P;F݆(^'cR-vq#MoayRQ knokdc]<,怓WU]:YgS ٶ^c5F8~hP)S\v^Zu>&U䓠+}AП w34mz9th3^L@){J&!V'6FO,pAX "yï:F 8#̟^OT IyQ8(`&AulA Q?b ?hVXk#HjaX/=s0у5K+'ͬفؘ YE<0reRem)u ~"0΋Q%'CW&;߸b~ v|F_m)Jgǰs)w=*ƀ}U @Tnpxڏ/,tk8TEP"(-Tan]^5/E`wZ &w%+墛 1 ѢF a^9Oh&KߣU?r>Z '3 vqњvlUm|U7зMg̼E4{loc$L,6dos BbJ `+:H?{mл؉^ ˂,qJ!PdrSJݴC񦫎fQ&%Rccqx|>q?<1`^3|ImXǸ CP \OG1QbZ4 C{D8eR-ٿ@t yʑmD q=E9wޕdvY*LDVc,`SnD^O:4tEYuKS4vhT!虼{> إ.2Z /DB[8 ):eYҌC cef]eK[Jų0p\|ʔFdG )tx(M-"ɯFh698UU0@>oVꊠ ul=AAhO$+Ci@ZHlQR=AFhI B<>[`s̡)$lNŲrVժfx=ʣ@#un?1=9ޭTnDTRqq)}erґ/g{Ɋ&TFHY:~򡈊I4$w a8p,mvZvf5oTs8IN*c{2/"@ j& ),k5t ^`jӜ:G&,gT^CSKJd@RkDp`y8&Ih\# -S(F0gzOj1+V<-VU49\Q-Of ΒY̷1S4)Xjl^<8;NAL^~\q ŞE}lR:J}|b9 aF5KE;%#<Iia!/3L5Lauëwx(Ҹ iu Ǣͬ+FZ>8N!N.sK@樻4A?c va0U?Ӆ̍P%fT{8"6P$+ xr1,P={prJdP#-cOoȞ 傾~mbt+$`<\)0P,)D0xS1~94c#{x*GecG͉:ey%:|Ş!#rSERGfd}D`5T~Md'xFY0#8]~QW1dTZQ8v"j.ެJh, ҉l&щ)#t|QN* qضg9Aќա{X(}*4Wo~ZYx8?.H4J)|)sz.wZΙq,4l\D֭~RvH=}t@jq*2Ӟ#" 2")j=g'byidtF1Z@cNg1XְV6xEcNXh3 @h0yVWk( COUs=+:Z+ڀKe+*I A>D0RC#bCJ~D wT}9@B r虆F,Tz*DO&N! '>dSX1T^PL X^˂I'Fq"FT_QߕˢA"dXa]P^%-hsJʎBHl5ʪ-J֭QqTVI|0"C=b/ (f56`o5u穩MmD=sfq1mIg/u.t+k_A6auCA=! >Ak8 Gr-z0"p@bRcJTK d_Y>ԍ؆>FNQ/ȤVdάڻ l2&4vo޼Ѿ6Kc:|XnjZPV?> 5|Ҹѥ"G;X2F"Lh*$J!oOѫK[ jQ%k aH_Oc,rYﱉm0Z;K7 +֕l nBRPkHf 3`G F"6.B׉AWӍ.?{.W9 i!VmXCfj6z 6o %U14hxs&Ȕ*v&\mN㡈u >wJ>q!B$%i>#zڥdbhJ.҅YJ5ԷA1t"MQΚsy=7RDS|H][@xYCrOW7zL,=vnோS ~Rj#O0/Ř~:/cSK$%h{ XŮ?vǝNCefTu]~Ld:VR:]/={AjKUv'E (f}Vw.QtP?} žrA\pQb#rn8ڤw Ҏy'\^k`n%1Sf9f Tg  tf en.AC\ybtA+nVe"2t?ݍؾcw߀+]a3zVy4KVLRFT3?D3bĺс0;g)%uXN{qpLcWҧ8i=oQ*Y<;d} F^!sJo$! t\)u/Eo;SM[yiԋ"nq{͘ȲM?Y?(Mxˤ8C׬vSZe(p-w^pY5aQl@5 Uol ,9d-ؗ˶pZЗ cVN.ZM_ԁ,kх[0@ ;khm`s:VX[FTմnuŻViF&Tt2P@vuA/@pUSUJ#*s #p:㦂8mbC~oW^~.m̀ei`5eL*joI&4bU46"!M)oaj.g G5ce|M\mf)KS[n ^hJy1< Zn5J#

L|#L:*IB qSMrؼ^ DRi,^5Iz+"1C5L?m^/`x ~aV2-x nٟ#[7Yg.e 0 '  E޸mxGvYf8۬G̳g5nҰl;eުhKn1Zr0\Uxh}kM`n=-"!(uDDڃbprוf iċdII?[@{4an)q?|~9n$^9>o?!\2ظr4߯0(⏚?(⏚?fS>ᏎI&dаZQ4kGxӼYmHl9{ gH%1)w5ŘԚDߺ}̷OIL'wt}v}$0=> stream xcd`ab`dd N+ JM/I,f!CǗ^ N7aa9}(Cr *23J uuBRWbrv~yqvBb^_~9P0SA#?O!)5#1'M!?M!$5B!45(X=?4 XSyP.c1##K5|f>fXP yQ#qabdqw>[Y^/n=y֊^~~];`gy҅?|]8w!r\,!!<> stream xU{TB ~*x~ImUӞj:P9R*pM[ryBB4܂\hբStjuӵUk9{6Y|}H=;wyаF-ڵ;QX+|{2^ ciU!%$o:u:%la̻/෻,p:~jn^ EFۛ]+,reћ֯os%(NlUʅ-ScYjNS^GsSW]GDsyńdv'rD,ɪK@l|iUX2YsNx??7ػOut92]rye(H 0_A2v08SsG9^-f-KݡA)Q;R[܍&b #h}`5B7UK;jܝ^?%q?hlנh&t;mŝe4K9 VGjV"[E\XmsfbɹܨWGTĸ|T{-$8<^E`UX[I:Z?HqwP%z0̴@#iup;/ൕAko.P h`dZQ$dTւDf'm`WTS':"TB^yOY.Ѓ7jBMTB"_EXRku2_%C] ڧ>;~lK{ <udL-zSsa$V@UN%Ӂz]<6n_UAP~8GDZ?oOϟl3)s5 j<JFQjVhSc%j)?^)MܞsOSFB_ yFM=1`Pϸ_̥0'ө̳EIAdn\^-&|D&uzW%QdRW bFx,\s:&a[;id<2p}ɉN+yR[0~k.\gDJ!QdI9|ȉ67=V~ S!S*Ӓ57GPcyĩ/XRՃN#[[]z+ڮhzjUĘ͢T\<><11|֕ɭfQPإ6Cq2r9tz_ [~?'ݱ&8pSM}-׺-g  Ekד;Ћ;>>0_&7A<1xdl`Fkn%~u壕AשN/ee*/sV͎L_dlռZH @Ј{%lhZ蓰PK%;#hu:v X>pV5P5.W?1_#l Z~>pa[m%WcnkJKKĕrE)Y-K9mfY/U>bvO4:ԢG2&8 P엘6{ZP2>*~zGb:*)lkлMz3)l5*N$RsϤSE.Zy>W`WQDn[H9IJֽn϶5: )Y*F+QC:VRLttv44V-F8 ʹc?e㴇(y+PQ\!vѮ70+ ڗ+urTΨqRRW'q~ЙQOnOd2͛O MNυ>A74\!2Ť7X  'endstream endobj 401 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 432 >> stream xcd`ab`ddM,M)6 JM/I,If!C N7aa9CN{Je ,E% Fƺ@R!RIO+19;8;S!1/EKWO/(ݙ%Eٹ@1Ff.&FFg{}Y+v/8 gO>{Šu-]rU g~W:fFeUu]UGoCw UOi閬/-6UoMry3&L.]i_k9sg͘k\VZ4L1oByƩ'Zi!~r\"<<ܫ'0aBd==3{gMKendstream endobj 402 0 obj << /Filter /FlateDecode /Length 3447 >> stream xZo<F~s@$$M'P$Yǚu"}>wf?]޵Lcfvv7<mۋ|xAәt oLn}JgPYfZp9]ܐ<%47d3_+AvU/8geA~L|3BT ?YIdA"oEA|a% ɏjpԺ ЇN\p+قL Ümq`LIC*Ѵ,N/x{noˮ`Wa 5%Dq! lpWw]kxT!.-tod5 |E2ccRCUlL^+vذjvPDIW@%םf7L"$RaN?@׿q]MB P9kjǍ#J'mL`MQ[ .3*W[oi:僺ض&t^loXn7ve_ҔəAS^aRs]W/$Ɛa7}[?,g%m/=a xn\TRIM֛ Bx?U]C |ܬ[ګ!?V|.aj_W}YfG_۷~c%iSz)j7徍n]4{M&0lagւ!s%psUߖ-&!V 3e1fP:EC90\ќs=Hb%s#1= J 7g+^Dbj-Mq!-i_׵EÀ1A>_o~vN@NY Ȭd#'>Ybiv] R|fԅi*A*:&pA!ecxTƦ_ve|B`r"l I7&i'Ҕ >`ByZ2A"\^Pm~jz>Ё K:M.ދ@6кh5l:Zxm[,.Ws$iSIe`3V2H́v͐) PD!w5u]`(` |CG!)q\4VhV.zb嫝2ڐD-YɃ@xOqzzإ y4ƌ]@c蜱?BF'y_:ʑ.Gʉ$yiiIU2}&]Boݵ֗Hki.L7oBnRL8i*"ޕμ3 `uytEƚ59o\Q/xI^SzC`P ŧJFO+v j_p% V7G@b4qꡞ4Ŕ^s[Av$ٽ4괅 @n6=AKNI=v@ՍwrUfIBf d]y_[/DN3sչ˩K }AVd, P0lɼ<'uΏETV8!*>̡V{u -9g)o،+j3fC+Dݘt FȢRciqwndpzNxh[TmǎH9+.QbT$kτױo[Aů _L<sn "Bhɶ fJdT1|2h ~P܎i']1b 9Ʌ#~ /"Md 6C Wwqx;JNoU*Ez`\0-Ɉ$=Nb h *!Q0U</r f upc読}gCwMV;8>8 jg_Pp>gVRγ' )ig!)x?+o Ls1*5єo"pF }P!vȏ+x [m!e`6)3#xB-iǝ߈!0fm75>҂5|~Hgߖdj4NvҬoaVN?CI~0Q{[-x]0e}^QG1>>a"9]m,LαáÆC ze2Mk"Mw' 9YC>ҝr恒w: 6Pj^)Ux~OL 88{C a'N;;7?iOutd_\W`%E|r<\(9%8=-@i,E$Y?qXO "huUiBèܵ6x.'n܆2^~H1yk2K|u۰l l:=Jf!}Ҧen,ڪ|fylsrG? &qt@lſעendstream endobj 403 0 obj << /Filter /FlateDecode /Length 21242 >> stream x]#ˑwߟ bl[CUW-`_hzuk] lQOH3G+,VlIv`oȌ*Vf_~gǯWǫ^s{ ixG fq*E͢7jpaqs4O#ή.q__w.SoL7,?\_ݶF% UOrιU;K}CǻOG?fy]Y2*#DCrktmU=Uj?MxZ6F_F_t۫_\P~XŸL޹}~OWanwb['+cMXޤ+k$~$7d̢bZj fX DjdWnTHZU%VkHieFuwcFkWd-64W,"T/*eaeꚿ$țW/0$\Haf$cjhu4VqVJFZ0S7ZU2J_CU%#xhJ]GӊkD2ҪnK<##|gZ%3YY(%a߱W. m04K[^!Zp7ۥMM${#۔*3Br4q׍Yv]U э\E4S}|gla9Md'On~Ru:dJ/u}EoniayJ7c3IhK@Iٕ-MTeܘG"+v@M-6fDsϒUzz93mB=݄ IEP\.#ok%2ɲ?k*#O벻XXf6Vc?쒍SI?FsqTRa$fZ`P0NGoA2d#evzM5! %P6*Z %ё8 1"ƑuVTb[7k.2טc8.@X0 ff1 8FPR{eaǍmST(zOi6^sQQ8ﲎJ[X̻:XaH:je$`x6{{΅F='uiva.ZXGhu?sYQaxi$ۭ :lu0:2u ub Y{9yC~le_Km9..<=c=D@ @a@e^78ܖ] 4@ @5 TOkĪ*ف5oP:GEKfNNqʖ[˂Yb[O?RHD GO&bҁ"Fd -p5}  kJd"ȨY"Z7GQ;#Ĉcq|*qq|%{ 0yz-@EɾvFZHqH E[@*[ut u Er <0&M7&]l`dINdy@H Po%0;a|izi3|:$o>li 0-JܒhB/Gf -NLm(\@ Ea<<0BE߷qalX\ӱhHknwٯ:Bk z۠QkiۨRj#͑ev+_1O2tU1o4z=5uiKĦ!5XmiKH/DvR* "˻C627}r-OG@sUXbۦ.mUBS4MHZMQB4RxylMAR.%#ƦM(WgZAD 0B4tA4Rxe`T H~|ڍ`Q%t=R5ິKm(<6h]hyl1M/r6{huѲ% #jF^V0Du߮ *<QWCngL|455Mzڀ?!c)_b;6HjzJwlh$1k =C hcC/DCz)5Q֧6o$3Zמ#oʬ|h4 !Ik5Ij=G:6酌Mz)c#mn5JD 4߲GБQ&6Ҡ%35ȵL$22AF&m`dhNJA /~us(X2n%7rZb6 y>~;JJ#햖e!,Ult۫_ߐՌ ^%@Oh7R4Y(UH)lqU@)-TJmiv6; Vci\lށaH3{KbKr{ȺpH-'ֈaF6lo6CWbh 47( }bݪ<槩h{a -\7?\-o?6P&l|M0jqi}m?A arc'g9 m0jf EN h{8X?+Zd[%2%SkmUIO=/,1T|QUR4)a'CgY?0l)T')@2L l#DEt5`iLt&4jO1MSLtf?pè$ګ35KL@,[eVbH`&e"?,j{2mc8 keDa0j1 V'QkʰaεEL̢?a 2*jL#5N4<0 ӈA׍avK2Lpf1 W C[7fx\|?*O3 $ Xoee纱˞rvQq]Z 2Zhǰ].cz+yL#9Hǜ];e iD5>E׍eNg^25ýdmTrlH4(i0;%F}sOaH O 0| r}l p^npT wɆ^T*9D#^Nb“>fA +3< Y-C a"qnxwmZ,v17HDrD<⦜*)7r݀|g1']V$gVӷ=B"Pjݰ"r$n ɩ]7lȽ,ϑ{tqvHv~܋c+g3҅ ZёG3۳ga\^HWJs^Zĭbfbn?lgq,$;aEGPzK֍lM-L̜mw 6g=2<|6N۲_qἾ p8t;r7<͖ۖqݶnniڙ{AlJW v_VBRjpm#bz\nL!vb1m5Vī6KfWę9Shuw&fuPR"Zk^g3gVZ; MӽIzBqHڷP*b;]9a4s^ST6RՉm"T Hd/uZbo*P/)"՞YS՞Xqjͧu:jaz@ng٣>Չ"THj|=5"N{jKU3S=2U3Pf:9Lz CodKbq:9J+Q.m~AzPa´ ""Nd;zsTUJF D"VwgfυU׭ 홙2:Ggt]!z!@9Fw׫t a{ÿIV/|Xa*-N>[nn@D]5WmĔVv#$d;RJM%ZtlSn:J{H1q9cVٹd*2vQ`ߓioJ؃7 ;+_o + $bYk]_*9ב EOt̯&q0 ٬e: nuϒRhEd%KPyVhВ_tfܠs{q`4OZ+i[wMذRWC+jZ:hHƴ]X\]{_-XcW 27,^i HDCzX+6{ɵ^)ҧD^G2aP)cTݺVZ/g}>x2ѐ F#ȃɯM}#! = \Sc`V645uiK@b-!t54mk.! 0GQAŕSfThj?4)n\ -tE!.KjQpOC3"+9逆j\@/k*}ei@$] a`4<eR><|ln&7H ץ\JhA\k9ڦr%!c3_%3Z DR{fhfhqiH ap Ԟs!c~6KΪ!ygtƱFiȥ8#ٗxq5m@Q ZhePa$ܶO @`)Jr6@.K(^YBDCXkD ɇےu2DCjGMj=Gz!Ka<<6(mE$g HH16<̪kz$5Q%DR{jƆ^Rj#-tFm,Pslu0@+go W:\JjPC9 ѐ^J qQJ֏ `J#=: -vM(FZ`KJK֬EA&wY*)ZYBK$m%V+[lhJїZ"iScZ"i6X?M5?_\;-ߖx90 O67?MmF[<ٍ扛7wX[ G)1E_>_;r|۟?naςo !Ne$J,O4b)$vf$iE(Q*2}{֓Ւ yB^ Mϓ0?f8ig4zdUuzC 5XZfVkr"6޾Gynlk>pu"DyP)ǹ<<%725H<y y(`ygo׹t菉wM<=tad4PX t؇YG >aơu= #cRFX֑q6{;ΥF='Z렃0 &̓ãz#8b1N3GWdqj2{;E[FZxkFlarژBu\S>1F6j`bY51$01l6 i]j^7&~fjbcG~,QFa<`C}l:Z `^'`Ul@b/6R!d^F= EbI ]ZpP 1H)p/gi[ɜPdڨ鶜 .l)Gi; WΦ962 L 3 1c=4 HU"xM $[٠uChtngl:R962r|h0 $/4 ᠙  nSlǂZ: ;`XkL; SNpk\a=*:i`B?Ђd/Z}j hfY~k5j H৷Cz~  M7C~*Qg3`Acp/Ș@F{An!j=BtF|V%>qd2d}R C `[q!<%F.1ae,:cx$lc쉶3V?xvS~pe,Z7F}vmG>Uh`p\~gDAr>l?(>gxOb/M `:V㧧az|3gF@~Z?qχ7~gg[?}~;@ @ʩڷO/vSw\@fOǙDt !*:bC Dvr(~^Br Qٖl2u#tƉt  s($R (^#' 3G;l?9< M3]NhkID8T؇^:6d9iC<=q^{)߉mX^.D^<:v ng/MveLA,ae9l,}u2HHDKGn͝IIJ&Y,_e3RnPay(aKHRGyzsnX^~`^6KD?QJΘ)N ɋǦ4ϐD&O.O}@g!RI t9tD Y9yӃv&|nNcϋ(c%0^7_(g¯O:fXae | )e|?i|%4yK2|٫0v(nZτ08LvE@Kgnx&|!ӻCXf XCn%cH! J- q1кga/ ɮ ڭbhtaecO{ݳVq5pG߾6c̭l(Vo{Cbo]1 9iA꣭GV>B_|\ܢa'fO +M1{: "]4;_x#J[<{X o`O(PL׹\JEBʓo45.@CXˏp_17^݈.׷;)TuZRUkVEC:'5_DM}# =Ǝ/{vw5LkP$٨enE@5R@l=tj\VzBz(#u|y-Q&9L1L+B.Q$B+j kDJQ~4<*lRFѲcd\S>V5i r]=h65^]k#'VFBk"{r_[@~L-}SA,-d*!9@@k A <.c)58#k޸o9a `}МŸKq zԼ/_sB@KAD&{|Eɋl\%ʗ-e E!C龔.6՗k_7`<rn!oAo$dK6԰"OOZhЪMS\6r tUKkDd}Cǃm锞$k9]t䞣V5 Ԁ 5P~a?2k`!chf RF2t@NΡe( `s AK\B$)lWk,65^Rj- 9A!& DzX Z*f ]5kITR&z*z!56gq {*I9?gpIN3͚\$Dk|4mk. ǵxnnjV`_CpB=R e/c-46Z4ZaT$Vs izr4 FkOGuAZn-ZG2G~񫛫_~gB%Җdq# fa3J~e '`).c,SmU'FޔMn~{cxI ڃ{ YWVG3HˑO)ZU2Ңi*V82dRg6ZU2Ҫ#j1>ygZ䉣'ö́oX;7v_7yyO`nei`S` ޯs۟?Fn`΂! !3f)8;b NӊQ<Dݜvs;\Ivx_hr=2!8 /!_b. m3i_M {FM zΈ-m8BlSLӊDd`LQj#ccnBv&|!5=u(Et(\Td>$(؀P(PTOt @e+fSH^74ؙt[TzSOAO@_(bljQ=ο,v Hqx Hq HU%F;6P4({".HPji?مT@v!r@*@O(bԇ-p2Ng-!BR,wX`LB#3іvDo/'vp8'-OO9RJy~Eyʽ-i3SiOR=c ~[VNPOA*=OTfa,@I!!O$O,pvA)d|-〨-GQ;#Ĉ8> я Ή@zvBQ \; +ҽ 2v@?\u ##d_ !:Cg-GGkg 1MχikC l  *: yJ4yݦJ Oʌeߓ1ng` ӇX>6f Q΄/08L3}>;+}=3; `,:ŗHQl''`櫦HAtjw;k8=jq,yJadLW|ls^Phhˎ͔e4]a#iȐdQAPt QΈ/1Mk$ӟx`X0!XN|u8v7Έ/1xr4Otz%w*щU ~;VZy>3ϳ)ڥk)dNh|͐_'[ GB#tȓhe O?ɅB^7 FO^Hw_)*NBr R| Wۑd}8jggSSnzT[1 Sbsyz096w׫t WU ͿnAw_>,tE뽷-NKt-K\U0lK.`mG WPRj*AұM"mV"hiއxsGIK,JYy L ZÖ55%[&϶vDZ#i$WZ " !6kݳd(H+:S :9goKE'ut Tw`o%>F>%!fbX˺XkK4KbBexYvրrEV]+]YkpllP>a98<0_R kj>C*_1&㝯tA4Rxy`<, lcc{sdst3chi!jƞۨ5XZfF4RxylI=foq?c[Pyys~aUkunKb `дQB4j' |#sň65MmAKHoj Q״r%~c dJ BK͓'oo.9ʧFfTP/K _TQ\M@ѐ.J A)H_4FtK ,vQuk =C sQc^R6G[ ?jV+oٹ[]vǘh%uFVCljunK_5mk.!I`4:#0EZ2G,IPC#pE Ѩ FiJz#TR,n;J{ivAYt\5(s\S\9I6Ph V0EDl !ĝ 5-fhj͛o4mk.! 05S8]s20wMTrKZ=m8U-HRaT$L1[4ZvyeU-H-Ǫ.H;Nw]>/~us(X2NL#Nmu1^e߁`wdU'F)?76v5c @(d`_֟jdVSݳU*)RR쒪T#OUj:H߱-v7\?۝k헆MG \oGK5r=`^; HpCH65hq Al|׆Z~j@ <%avwm@0\4hȆn4upj|* \l|͗CXwז/zpԐyF(g8vr/?ϕғt4MSF_T(;svlL ZĒ q T4Xg!hZ#ee˞ #UWNOeRh4f7rrԽH;0 f=K!%vhR ֶ)I#`0Ja`)I=g|eԺ1̞rنQIM;g`ZFCG,BbzX&QKxitbFRM[e)n s\a1{ Qˈ1 #* Mۃ+.a̞rVQou;]|2iviW4ь!n J.c̀Сv !IQQ=-\ekhw`m mtmnLѰ ߝq5l,^{-v}1yxb`XbmԺ+y͉ZFR^kNo5N0 $`0xʧ!۝ "Ns.i|wvF3LCbiTr\e_ag0`ihqu| s\ag:-Fo`1?B;*ՑDP.Fp8,9-2.;syՈeܦ؇ {Q-s%F 6 {@Yl3eH* 00x| XW. "sl/,唰H}|AN LSɀ9Q.xvKB;Ȩnr\1v/AF 5b`. "lj$&9އ .|q"frI:K>-2ȩχ.1gS"9 2p d !!S"fl\1־\@g&͈"fG tN Zx9Q,N ۿTd.sĜD{]kѕɬ.o bΆ "Ƽ]#|wc.lHĄ'=RFW Gf,φ.lȬ.sĜD}] $3'D}]專鞴@@2l>stO\=b|zSG:[ɨK nPļy.ۗ"3Q1#=ٸ b̓Mvdscm˳GK)KL\ ^v1:Sd`9BXb{-̌8X/ݓv0g0ǍIsNjM[\jbi7ԯ tF |`"6ֽҌ|psl9LĦ uw3K+kGG io;=} s^^įC㋫ M^6 򝼒wڿV3g.M={~ 9~//7]UTnwB[niL"k<#eH>d~]H-5TئtqXvbZ9sƬas Lo a`IXِ,992)?H'~KU"lؔ$!U4h *raXu>?"xĔ~.UHEF5qԒcWq{džI\?f(Z:He-7cXnXy_JA1I5]Yɓu@5+QДcTR 4طZ!RHd D絋(>m;@D)L|m &Ock$_[j!~~5PSiKH/DCz)5Ǒ߃E $W>@XŅN_Zp}8s\@$.!؞f\B Ea<<2C;0 ]\9[i`M-aQAR$k Șu\:'/CybwH U pR:$ʬkkT/B  0mC]G ,J|]uZuiŷ^sOiia.mU«_h &/^F ѐ^J qbgr쾑4P-9MJo9Cjj17߀\Bz!wa<<6y~IfQ'V*3WM HV5k\H֑ <ñi\s hH(z^Ѧ+XʊXB)F46בD/ HiDCXkDuJ\['?MH')|S24q].M+(_d !]ƃsAFRJ>W Hbfچ#J)y=%CC~#6~)x%$~U%6~HK'9(L:5m %6~)8CEiLhl-1E@>F0 Zj/hhW>Uv77JG焦 =J6pF&x#H)w>;T(A;3=Qb㭓ZBϛ5{Dw5%oϵ n=(KmW=;tߵI-s%6;Ҕ۽ %6ޟoi&ڝF䱽4MuM ŏ(]զnh23m_|Di >2ЪM2+Z(񝟦Dn)o#J,\ P*cM4 6nJ8k7ZBx%6lJ$><7oMGxMKPm56ҩEn-xQʩ|ћ>w4Q+#}ʿ8Qev;*c0aCSjd`s˟7:+kiѦq7=ƦK` UlO7n<7= :R_>_c}kѩ>ط\Pt~)Emѱ-Omhe;+{NzG?fP 0Cz-?|`3hCNI/iѤova?6YҚCcs^/۝GFf 9T# z(qN>w%nYdv9oBRX;_7iYwOӸy%|grC1JyB'c$vb!㨦_D-]Pr@r0!Mu~<ލ/4Wps<7D3ψ9Zm}?G-0d4_9\)G|&3d}p|pn!Lij#YZ./sysc]i4hW1#GY]vro}Y7}r~q]2~t:5=szJp|Bm;6yT!-30m8x5wOj~U7jR^̗GK^\YZ q='yS/qIޔ ̶'"9UyX+`9)(?WV/яCoWlӜGb(ZCoG[XWmE>1~͗9riosV~DgjFy(s+h0RX(gy8y_NO=]rL8)zuJq?USnh^}+;͢x'=G9]>7(x ̓}VdmKUHH%mzMr/'A#CnXQG\Fu4EF-=|ȶm5O]]bzPS yFe(6P5h)4 jc4źbjbín?^MJ9M~ 9\kqz&GG7}> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 405 /ID [<7b3e011777adda65333c8b96ea4a68e3>] >> stream xcb&F~0 $8JP?j 󍦇MgJX L ׾Mg0?J GSH%38٭d;T ID2[jH.$@젅 L8"=z߀H``7H` }Al?J 0{2X+"sD H~KMpՃmU+V# Dd@2{,&H ZM  lAW)2 endstream endobj startxref 346887 %%EOF surveillance/inst/doc/twinstim.R0000644000176200001440000002577114030612502016476 0ustar liggesusers## ----include = FALSE--------------------------------------------------------------- ## load the "cool" package library("surveillance") ## Compute everything or fetch cached results? message("Doing computations: ", COMPUTE <- !file.exists("twinstim-cache.RData")) if (!COMPUTE) load("twinstim-cache.RData", verbose = TRUE) ## ----imdepi_components, echo=FALSE------------------------------------------------- ## extract components from imdepi to reconstruct data("imdepi") events <- SpatialPointsDataFrame( coords = coordinates(imdepi$events), data = marks(imdepi, coords=FALSE), proj4string = imdepi$events@proj4string # ETRS89 projection (+units=km) ) stgrid <- imdepi$stgrid[,-1] ## ----load_districtsD, echo=FALSE--------------------------------------------------- load(system.file("shapes", "districtsD.RData", package = "surveillance")) ## ----imdepi_construct, results="hide", eval=FALSE---------------------------------- # imdepi <- as.epidataCS(events = events, W = stateD, stgrid = stgrid, # qmatrix = diag(2), nCircle2Poly = 16) ## ----imdepi_events_echo, results="hide"-------------------------------------------- summary(events) ## ----imdepi_stgrid, echo=FALSE----------------------------------------------------- .stgrid.excerpt <- format(rbind(head(stgrid, 3), tail(stgrid, 3)), digits=3) rbind(.stgrid.excerpt[1:3,], "..."="...", .stgrid.excerpt[4:6,]) ## ----imdepi_print------------------------------------------------------------------ imdepi ## ----imdepi_summary, include = FALSE----------------------------------------------- (simdepi <- summary(imdepi)) ## ----imdepi_stepfun, echo=2, fig.cap="Time course of the number of infectives assuming infectious periods of 30 days."---- par(mar = c(5, 5, 1, 1), las = 1) plot(as.stepfun(imdepi), xlim = summary(imdepi)$timeRange, xaxs = "i", xlab = "Time [days]", ylab = "Current number of infectives", main = "") #axis(1, at = 2557, labels = "T", font = 2, tcl = -0.3, mgp = c(3, 0.3, 0)) ## ----imdepi_plot, fig.cap="Occurrence of the two finetypes viewed in the temporal and spatial dimensions.", fig.subcap=c("Temporal pattern.","Spatial pattern."), fig.width=5, fig.height=6, echo=c(2,4,5), out.width="0.5\\linewidth", fig.pos="!htb"---- par(las = 1) plot(imdepi, "time", col = c("indianred", "darkblue"), ylim = c(0, 20)) par(mar = c(0, 0, 0, 0)) plot(imdepi, "space", lwd = 2, points.args = list(pch = c(1, 19), col = c("indianred", "darkblue"))) layout.scalebar(imdepi$W, scale = 100, labels = c("0", "100 km"), plot = TRUE) ## ----imdepi_animate_saveHTML, eval=FALSE------------------------------------------- # animation::saveHTML( # animate(subset(imdepi, type == "B"), interval = c(0, 365), time.spacing = 7), # nmax = Inf, interval = 0.2, loop = FALSE, title = "First year of type B") ## ----imdepi_untied----------------------------------------------------------------- eventDists <- dist(coordinates(imdepi$events)) minsep <- min(eventDists[eventDists > 0]) set.seed(321) imdepi_untied <- untie(imdepi, amount = list(s = minsep / 2)) ## ----imdepi_untied_infeps---------------------------------------------------------- imdepi_untied_infeps <- update(imdepi_untied, eps.s = Inf) ## ----imdsts_plot, fig.cap="IMD cases (joint types) aggregated as an \\class{sts} object by month and district.", fig.subcap=c("Time series of monthly counts.", "Disease incidence (per 100\\,000 inhabitants)."), fig.width=5, fig.height=5, out.width="0.5\\linewidth", fig.pos="ht", echo=-2---- imdsts <- epidataCS2sts(imdepi, freq = 12, start = c(2002, 1), tiles = districtsD) par(las = 1, lab = c(7,7,7), mar = c(5,5,1,1)) plot(imdsts, type = observed ~ time) plot(imdsts, type = observed ~ unit, population = districtsD$POPULATION / 100000) ## ----endemic_formula--------------------------------------------------------------- (endemic <- addSeason2formula(~offset(log(popdensity)) + I(start / 365 - 3.5), period = 365, timevar = "start")) ## ----imdfit_endemic, results="hide"------------------------------------------------ imdfit_endemic <- twinstim(endemic = endemic, epidemic = ~0, data = imdepi_untied, subset = !is.na(agegrp)) ## ----strip.white.output=TRUE------------------------------------------------------- summary(imdfit_endemic) ## ----imdfit_Gaussian, results="hide", eval=COMPUTE--------------------------------- # imdfit_Gaussian <- update(imdfit_endemic, epidemic = ~type + agegrp, # siaf = siaf.gaussian(), cores = 2 * (.Platform$OS.type == "unix")) ## ----tab_imdfit_Gaussian, echo=FALSE, results="asis"------------------------------- print(xtable(imdfit_Gaussian, caption="Estimated rate ratios (RR) and associated Wald confidence intervals (CI) for endemic (\\code{h.}) and epidemic (\\code{e.}) terms. This table was generated by \\code{xtable(imdfit\\_Gaussian)}.", label="tab:imdfit_Gaussian"), sanitize.text.function=NULL, sanitize.colnames.function=NULL, sanitize.rownames.function=function(x) paste0("\\code{", x, "}")) ## ---------------------------------------------------------------------------------- R0_events <- R0(imdfit_Gaussian) tapply(R0_events, marks(imdepi_untied)[names(R0_events), "type"], mean) ## ----imdfit_exponential, results="hide", eval=COMPUTE, include=FALSE--------------- # imdfit_exponential <- update(imdfit_Gaussian, siaf = siaf.exponential()) ## ----imdfit_powerlaw, results="hide", eval=COMPUTE, include=FALSE------------------ # imdfit_powerlaw <- update(imdfit_Gaussian, siaf = siaf.powerlaw(), # data = imdepi_untied_infeps, # start = c("e.(Intercept)" = -6.2, "e.siaf.1" = 1.5, "e.siaf.2" = 0.9)) ## ----imdfit_step4, results="hide", eval=COMPUTE, include=FALSE--------------------- # imdfit_step4 <- update(imdfit_Gaussian, # siaf = siaf.step(exp(1:4 * log(100) / 5), maxRange = 100)) ## ----imdfit_siafs, fig.cap="Various estimates of spatial interaction (scaled by the epidemic intercept $\\gamma_0$).", fig.pos="!ht", echo=FALSE---- par(mar = c(5,5,1,1)) set.seed(2) # Monte-Carlo confidence intervals plot(imdfit_Gaussian, "siaf", xlim=c(0,42), ylim=c(0,5e-5), lty=c(1,3), xlab = expression("Distance " * x * " from host [km]")) plot(imdfit_exponential, "siaf", add=TRUE, col.estimate=5, lty = c(5,3)) plot(imdfit_powerlaw, "siaf", add=TRUE, col.estimate=4, lty=c(2,3)) plot(imdfit_step4, "siaf", add=TRUE, col.estimate=3, lty=c(4,3)) legend("topright", legend=c("Power law", "Exponential", "Gaussian", "Step (df=4)"), col=c(4,5,2,3), lty=c(2,5,1,4), lwd=3, bty="n") ## ---------------------------------------------------------------------------------- exp(cbind("Estimate" = coef(imdfit_Gaussian)["e.siaf.1"], confint(imdfit_Gaussian, parm = "e.siaf.1"))) ## ---------------------------------------------------------------------------------- exp(cbind("Estimate" = coef(imdfit_powerlaw)[c("e.siaf.1", "e.siaf.2")], confint(imdfit_powerlaw, parm = c("e.siaf.1", "e.siaf.2")))) ## ---------------------------------------------------------------------------------- quantile(getSourceDists(imdepi_untied_infeps, "space"), c(1,2,4,8)/100) ## ----imdfits_AIC------------------------------------------------------------------- AIC(imdfit_endemic, imdfit_Gaussian, imdfit_exponential, imdfit_powerlaw, imdfit_step4) ## ----imdfit_endemic_sel, results="hide", include=FALSE----------------------------- ## Example of AIC-based stepwise selection of the endemic model imdfit_endemic_sel <- stepComponent(imdfit_endemic, component = "endemic") ## -> none of the endemic predictors is removed from the model ## ----imdfit_powerlaw_model--------------------------------------------------------- imdfit_powerlaw <- update(imdfit_powerlaw, model = TRUE) ## ----imdfit_powerlaw_intensityplot_time, fig.cap="Fitted ``ground'' intensity process aggregated over space and both types.", fig.pos="ht", echo=FALSE---- par(mar = c(5,5,1,1), las = 1) intensity_endprop <- intensityplot(imdfit_powerlaw, aggregate="time", which="endemic proportion", plot=FALSE) intensity_total <- intensityplot(imdfit_powerlaw, aggregate="time", which="total", tgrid=501, lwd=2, xlab="Time [days]", ylab="Intensity") curve(intensity_endprop(x) * intensity_total(x), add=TRUE, col=2, lwd=2, n=501) #curve(intensity_endprop(x), add=TRUE, col=2, lty=2, n=501) text(2500, 0.36, labels="total", col=1, pos=2, font=2) text(2500, 0.08, labels="endemic", col=2, pos=2, font=2) ## ----echo=FALSE, eval=FALSE-------------------------------------------------------- # meanepiprop <- integrate(intensityplot(imdfit_powerlaw, which="epidemic proportion"), # 50, 2450, subdivisions=2000, rel.tol=1e-3)$value / 2400 ## ----imdfit_powerlaw_intensityplot_space, fig.cap="Epidemic proportion of the fitted intensity process accumulated over time by type.", fig.subcap=c("Type B.", "Type C."), fig.width=5, fig.height=5, out.width="0.47\\linewidth", fig.pos="p", echo=FALSE---- for (.type in 1:2) { print(intensityplot(imdfit_powerlaw, aggregate="space", which="epidemic proportion", types=.type, tiles=districtsD, sgrid=1000, col.regions = grey(seq(1,0,length.out=10)), at = seq(0,1,by=0.1))) grid::grid.text("Epidemic proportion", x=1, rot=90, vjust=-1) } ## ----imdfit_checkResidualProcess, fig.cap="\\code{checkResidualProcess(imdfit\\_powerlaw)}. The left-hand plot shows the \\code{ecdf} of the transformed residuals with a 95\\% confidence band obtained by inverting the corresponding Kolmogorov-Smirnov test (no evidence for deviation from uniformity). The right-hand plot suggests absence of serial correlation.", results="hide", fig.pos="p", echo=FALSE---- par(mar = c(5, 5, 1, 1)) checkResidualProcess(imdfit_powerlaw) ## ----imdsim, results="hide"-------------------------------------------------------- imdsim <- simulate(imdfit_powerlaw, nsim = 1, seed = 1, t0 = 2191, T = 2555, data = imdepi_untied_infeps, tiles = districtsD) ## ----imdsim_plot, fig.cap = "Simulation-based forecast of the cumulative number of cases by finetype in the last two years. The black lines correspond to the observed numbers.", fig.pos="bht", echo=FALSE---- .t0 <- imdsim$timeRange[1] .cumoffset <- c(table(subset(imdepi, time < .t0)$events$type)) par(mar = c(5,5,1,1), las = 1) plot(imdepi, ylim = c(0, 20), col = c("indianred", "darkblue"), subset = time < .t0, cumulative = list(maxat = 336), xlab = "Time [days]") plot(imdsim, add = TRUE, legend.types = FALSE, col = adjustcolor(c("indianred", "darkblue"), alpha.f = 0.5), subset = !is.na(source), # exclude events of the prehistory cumulative = list(offset = .cumoffset, maxat = 336, axis = FALSE), border = NA, density = 0) # no histogram for simulations plot(imdepi, add = TRUE, legend.types = FALSE, col = 1, subset = time >= .t0, cumulative = list(offset = .cumoffset, maxat = 336, axis = FALSE), border = NA, density = 0) # no histogram for the last year's data abline(v = .t0, lty = 2, lwd = 2) ## ----strip.white.output=TRUE------------------------------------------------------- table(imdsim$events$source > 0, exclude = NULL) surveillance/inst/doc/surveillance.pdf0000644000176200001440000047743514030612526017702 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4935 /Filter /FlateDecode /N 83 /First 686 >> stream x\Ys6~oq*eɩTiI"ٱ;\[VwF 0ɌeI1tabc)2X8ORƥ8>Ź`&8gV1*ӌk&2m7L -F%)S\b1%3D”5 izdZ)+2LTrÌx2`"e&˄Y3@TGIlfi:aP]g))SjI MfiƔh8,SWad0.'ybВ $Yq(a4S4k "F1((-@SLf5Km1@YqP% jԬ5q<05 j6 70I$PNiLq;/IX 3$ȳ4cc% e)Z:ZԜ&K` Hap 3ZLQs/<1/,>*|9f =au9΋`ΏK(M=-.?*vbɞMϯb^jV|>a;ɲɴ8Kw)qP.yY'Dʄ&%?%$*G˴bWf,.M~ dۗrbOprUr1Y'ϏŪ\/7%"aPt}dONVs1yAEYNlU˲x bD=ո->~=XAs; W~vϰOe/MiG5m~&I%5},|sYηYf$ {D}A+2󲘣UQ1滋jә Rţ4'%wR9:A=x|~ZBF .EBD?dh1]J8qŊ 1G6"qfH ׳K+b|O1/_ 1~XY?ܖG3NiEc0|Xnz <'=/=#ht{v2J#by 3G׬\tZH*\۽7/~;% ~6?_LHtjZ_ppܻʗ@9r@' ap,$JG+j|F̍_K?OS޺%[ N3tݵ]S@RX?4tVsBCXn^˫{8|1[I\x_x/U\s%cgWSy! n3w3T?̓鬠-o7/VNw`xMW+L.rZ׿B'͙jLr1W{;~Gk'- 5|0#=|Hhhm[hZHna NnTnT^PHÂ<&`,4&ܓ4w:,4: g 8)qZ%) O( p;]+/ Fu:ݺ՝4[ x|,>/+a|I|ǿ9y`uLeKOU|_4vb_@ƗU 쪘 8j .\M|CzV\h*J7]L??׋׈iWӴ*>k e\^-".,cS,-W-:MmoaWA>Z&d [[U[7*ӥv;5 shP`7o6[WygKE"{&\3bD~={(MfFq[נU0ѰuXMuY ]^˿6Ƽyt%pޱ*g_:ulnGrXf.BDžV{/Ч4i`C{UHShOױl;eMt?pHl]_:MRXkI{cXwQϞD2)6XP},\VSmIZ VY.6eu~)L{n a|[ZƓBKTFpN:}Cfܞhn MeӖOF=2@%]lRƫkJZ&ɴwõp%+ ٳqMlm'hی SG9#(: 8nnܤ '&kd wz:Z7JEe.Ѩ- @S(yF5Z"!\.:0C65lfmD肐C1m+l èI"57'_eM^Gdv_-W'3g2 m>nNji(tmO ة=+4Euo&i2=6 5wкUv}mrt3HnE̾ eC`Qn26=oO Wc%9jَWO>hπnJcxP#m5t6JoXsZmXb-}-k=2|4mXNeXzN!C,B'/`(3뜌t>(M$4M,tR=x$|3G: 7f&ƪ6YoUSxhO?ua$U!RJ`(Kbm5*K 퐊 -1B 'Υ jtISr.pR.Jx%IW '4M>8|$mrI>5l~sS"Wh(B=^cl;":}?f:v}x\X9H&}DBd0n Aw09a@QYڑ;!1y&HifLQ6hD%MkZҦ74Ckq3s1[wmGrHk#ҋt"pR" %cb[q@7~X捅Q^LN–an g?wBI'-MO豲޶%k 'mO{=$H4ZDKT::oY &3dK&mϊjz9^/w/Ųٶ&{8!)'ѡO/碜OwIfW5sOmT8>RlC*Kw.a~C/b6DqZGt׋vƞxrp0dm+|λ@wc JOwXa<ۮѻ0R]|[pw@6$/v^Qn&\L!p3AzfKmluZV$Tq^PIt [r}:׶@u7c!q κp/MLcPx-ew#" Q6 cE|\@߁&/sk›Ȏ&M3JBdLDK>PJdFR5iwqTRUĒzUҽVk@8p9ٟЇ]8yZ~8~w>m xtL{* DdqnFm)GJ62LucXzckԉK+hO4U}'\TzNXeOgʩt؉u.пwH# _+@[Đ/ k{AUMD'NqUS7REnY.!!#g\ T)\ސdA7$yoIOp #E_ 1Q \KQZLE^1oW=%ߩVJگɿ#ئftUI Om|qs_\Iendstream endobj 85 0 obj << /Subtype /XML /Type /Metadata /Length 1681 >> stream GPL Ghostscript 9.26 2021-03-30T14:11:02+02:00 2021-03-30T14:11:02+02:00 LaTeX with hyperref package Getting started with outbreak detectionMichael Höhle and Andrea Riebler and Michaela PaulR package 'surveillance' endstream endobj 86 0 obj << /Type /ObjStm /Length 2894 /Filter /FlateDecode /N 82 /First 738 >> stream xZrF}߯ǸRJd%Nl*vC"$ @ҠL[%LOO ҰvEL0,J\`Q15aRxcRkǢg2=Ȥ#QL PL)I4SQ0e5)0pL9GShhT)6$L[P*hhf饌22Izό'd@ЭP f%DIf-HO)͜c9%˜1nyĔ4`d^U$^R/F7| 5j!h@p,hz Y0$ G'C i.BIo>IcѰAJIX!i HHVR *V:KxfrȚHqHCn?mZZYLyd7򪽄do|^.$H6_.D+X]}Q1\1_'\sR^acy4ߜWǬjM=`?;} fo.OEb-Ll}li]\^#].ᘕyV]skwj{ \KF T@L`H~76p(.Tp;^13kԗKDudДw`}GN. ~ v,7koU'KunD})WٽKv?PmɔAwyG"rkDǩLT3<"g:D,! IBɁl Z@r%R ;!j q{Yc0lJr힐M{>IYT'=eźJWKU`^Ӌbq3nhaNժ]9 H3_nG-Wvd]~D:CL=e3?fEjK((wYG t/Bߨ6d"4`^>fEyX7^Nq#kX eax/-r2m>t 47h$?}h7y]Gk2|I~H;M" )tAک5oQ \VH޵\[WުZoGRŒu= (ʸ͞<4gyVdR14#tb1JsuL&vMf}fbjg|к/U6rok SZʇ ]vn)\)%^)DcUPGV"n{V$-M xv=:M ynA\XQpͽȤ_/e2Cί#Ĥ]v Vdr\c VZȼS&8''փ9Ѳ+ 2!ɴ-cE8i')J`^c+4W;ޣVWU81fP;N٣_O_6M^:'գY["CThVxh bܶ앫+h uq(=yySFnA`?/\-KaM:O|'jںIQa6*dP 3nowuiڤSwK'[[N߫j͘tmԘ[9*Xw\cV^q %Yq[.Zbjw=@'<~4i3>rCy}C' GDN9A"|he{ͯqLްgs{sʗ=akj'n{\] ];;rSWR6%"/D~P:JNg gt:Ƒ31~HǍlkO 7TA 9kנZfcg9)KR] k7H+/$Tyϳi@m3Fچes5:sݳcljSn,endstream endobj 169 0 obj << /Filter /FlateDecode /Length 6458 >> stream x\[q3"Oڣ_$mG"P$ypK.e{(R?TL.El|u/^?;ϮnG?=׳gx#;CrgO?3krM8XΞ~ݟ[!ޟv{0n.P]1GҞOG3k 9C K)6djhayw|4nnߛ`y8MoǴ9`Qqҡ֒w?]>8L}6Jb"؞cM#-dWːsשCj'|A;X]I@MԤ'`s>؋CrJpMl]E7`xb;D#sD9S zF[$wb!pfsO[:r7!7!'Z SIމV +ѹHdQu-> r(VEځPj]R}FZ?M{L|0lۋEpH"^U;-y\C>KN*y9>]ĥ&1&GzքgLH0h%GR\-hn&)li)<`%YzUٺKR#僔֬bNofw`L1ۼ-#G|pj k+.f2T!CUe!!t\E;uͿd N3yCLV]U5qn3 iݻA7 $ /,Vf&--a! 8*A)Xۡ 7ѧ<^-#>싇%XRlK< r93CO $>^(f}yT p*>]/(UXq199\ JőB׳~p@#udZ`(PWv ҧ*[ NKJ쉐bq UXIoN`ڳl$4^f_! ?MTʧᛞWFFKڶ'`[M !,/_h>L ҂C 7YFt0w?fS>aՀmcXҾbMS^K=ٰ+0l0v]H+:+u %f@r!eAaPTL),iuQr L-FjE16^Rdw d -BȂa҉d{ E65HS(P&JH@Ӽ78g_R-2ZD.&jf'3BReaeAx6L61cыQa4@bTsa]&I1`]~1_0$1*P+"!|Av3E<xuL8CG!- w?@߶X-uTQ!,w9:iWRBx#rRj-? *"_ݑ%$ zm%eJlayy_fC&eNxN 7mևsBOE9EK&+-A' yZN V ɻuIH٨1R5|-5Cqp-Ŕ(M )xy~ӅV^\JnHh3*3btH +[F3)| VNev/,J1rћXFZI: a8*B>tY[zbCfo939O7ۓDxo9訪)|Ԙy_2.C1' `KLc'1?,sͯag"ׁ]ҟ'r#ux2kqsCQsT&&a0Uhkh`2+bY޴o!UoeM GrS`.3hy,G9dp妭 lvSK6uڬa]VƧFvQu^t x{wܬNܘ! dD<~iU-gFɣXHFI(~8hKXƛYj=-%arqVnC@|$QQZ\v澣ԧwˣ@oZ ̎S|̵浨sT1Wc5qe!WpUUfs+V&(g/RQm'ũ#0ChP}V1[[Q=X7T[kX k[|:ׯUAOBulVIsti $mМTƽ]Je(Jֵ7x[|MCmb]0x;6ǏΦ8l,ʉ<&A)DžЋ;Y7΅&a`&2#D \B&7!^K N8߁5} f,M bX)0^“;]h:s MI/qnn𯪺&̇0R>c/ !n)D;-h"+hX2\t/}I5u8Ϩ`]"V8 SI5@6Y)LBac9k:A`7SĸPm"%/^yaqiS&K\4~CT}mnºQUTdU̝nvEե$wߴt]vyV–;sSgcl9j??uΐڤŀk2 t\H6`M5N[{(`ap)AW >u ԿL60c!lZ#泷ͺ $?0!*ySB-Ԭ-Z2OHg.# 뉹S-Fb_b ,msr&qlFs cI2EӪusˆ4ސ RV8נO[ڵi)N`;H!zO)AU4߿Y*uEB̭=|)im[m&eIYjzo8 bwaEEbBPe{kWਵ1ųtNȎae6SKW#~"*f]:AD8bcRm rkfwԐCqCY>l<0z;X/ B5`M <98)Q tlk$12"QhŞW-?~ .j.zl\vF.-l֎3^ŢƮ/[ lsZúð[]JmZ[tg(:XmŇ; c.adN=$YRM'.HGԖϪ i=uv0}+4ksP͗ /OC̐M]+_%q*5 .Cl>&3fVi֌mr試z-ɱKItYwj8-$z/hm8lh:X(0@X@"B V>\\.xwJ6D4zjkl$/okp~YU!/d3w¢|D|-߻% 16֕p UJ .'Ufܸvoq>WcR] E$$'Й9aNTj.ZT>ͦR5i!Zz(~r6 N!}[:N^BWn`UjU:?%kL&GZILY:%K iMd'hp=1 cFz{~V/[+B hE5cLl\ ~a0"M| 9l 5bm+,x]$xï~er;_X]߬- d|aH}xG1/cԣ5o{iXlHrZ)մ!\ (pO@Lk f}mD=B9o`v4vYE@^c-Z2R/!A[IOn&Ef;r;lji W4,9i r5_:}zB$07 6tqeTAoQ}󝷫k5s .~nhz`)v2~qQ?eF:f}_g$۾yo܎[CxZ? v"Yd|5H,g{8QfpƟohF1޲_ODQ2oĂۃ:IimWW]`6uHh'\R8Q$Sԯ2}s&(PtyXqHX^wg`n 8DGGa>~oRI'?Š^LԮ@Տ-(elaGTf׼]ļ"ۖsT8Y}_[24 n]ùBu7aGx+Q_ɚRe}a ` ЕGXuT6?+ ,zbl(O8rҧl >×KB3N3 @֯.}lb5a5byɫwai}A>8_|/U ujDdkB0c[[=!W@l6Z-gHI_i9S=!i="C:kٷsx; 1MŅ{M"a(/o*b̺ ۼeoH|ê}U:㴿g VlgbSkģ!_,o>~?lM|[%ruV 1UնH9[pREU q%IJN"L:` :S~v6NL!5Ag}-Id^2,Bu>> stream x\KwqG`9~? $Kla(^PY\ ąB2Ez3]wp_UWj >}R㋻#u|q#M=.}8F|3f#TkdqTn6~8z5ިhc7[5c48fm_Q8\hm4FQS4m79~gsvWfmLs nx9bJ3s&tRQao/ Vg4 Z)uE; ԙuV,eh)$f>kf13o04]mVb ƚ=n(c90Ӿѥo'}YL控n EHu`1܆GeqHEP3rJaG2Jr񵇽s2Π#JIa\rNA &id0'URUBf]/u^=dYIv!0'>܌N<42'-L-9sϮϫs xFn H\P]}TJ!1Fed DFPE#Fa'r 4y?7Bgһ aBo(' CT >Gq3k }?x+IΕu'Jb_sDبSjbQIHiє]XC1Y{XF y@="k2QcHW#C G_^:^ ŭln8hvf]X`^Jd)u!Gfԙ撌&C]Z+y(!gk`7[%nMpdP4 YVy1[Of,;=aY p$&U 0N)Ld+'gL\ *Eh</ry5n/KW?I&sFƓUC=5x~MGsDS]Nm':D>יJ.dubݳSSZB"3JVOE930IO-/`tC_ Jrᬐh5f롰eHy2|J hs[n `dFxi=Kk)e\vGh]%;$F!YqY\[y[-; -T `S(T`4f* 2Ri}I_d<bYel=she2Ä*5'<@b]SiwsmX?oq6\Ll לuBPF&8zA7H))}r5Rj:"T켰'+H) e>)pl4)Q=,s&Y'$kSq>$ǡ8M'=l{(kd,Q1HQÕ.C?,7d,Zl /ШUˋX/$ءS_IN<;c76}Myh'j7:`/f) 8#y=}ؓúh!|R'+c F\[NN58>9H9*iBT9 4A-Wlʮc1ʘɦ'*%a^9o|XJ3NM;"fw ӝ#{)MW%CA-@2gظ5pGY[/TtvXU0lI{,DJ%7ٸʁU&E74j&q`jX;Qo49w6J.uFSVT}u)u\U>WM APSm_3վMP[[R\QS\{شgf8mhf[۸VkZ&J&)"vG.i2K`P-etQN% s =ӔiaeXwNfm}>xL[m4L5Kpə:aESƏ6ղ6P˩,ޚg;4Д4mOZzv6,cS)vt'(S\D )%(ZÊ*1i]z񇯪՟@aQs3AZ|WCj󺺘K ;4\q㌃|fri?}gB%5Ckk.e j[vC8Dž)i]SxbdOֲU_@hbF ô]0ע~II R5X&֤(`\ʙcZ[K~ 26k}x7B]No/EZzXpJY|䢢3a7gو`k-Bj&V_U==[NKB-VU DW9j%ۉnt{=9 7.脚Vq`l,O/iokW JYc\:zfZcAendstream endobj 171 0 obj << /Filter /FlateDecode /Length 4904 >> stream x;r$w~DbVzqg蓵c"i6ќ7( Q]5ք $D7jů븹;]1 B[7Q4yJ1*mNzVM3*9*C7q6`F4jo6Q1fzۭUJƨ0o/ ! 'wZ=NKzxkvȧmqenpr8J?W%0fѻT^F^n2JeOp;3q:٘-wD\pw>f.W(w!, hdᘂnNN[ޖayM26gg3do=!)<[^~z| qZh[M夌 :&i9P kO[^s Lr&4S/aQi3 N2ADMcoXdtnbr(&E0.5,G6LI"?) TVp|m(0av31魽QʀX12?}c-]E7}&+#$(h#9xMy : _;M$~)v2Vle2fAGTGpp+bs[b!8!O fR׆]['I^B$x^B0Tyx_L1]hwٰM`5A`EQHa$ xc^$fO0|ӅMFM}"sE@-Qh_PVPh[+`?,ZCV6bjð{\gԒ(Md\QYrA{0>ؙ`$hݷ=49A+T5&^7mahP=ZFv~V`[킷6 I 7,6pܥyOMʼ酎S}C/!el8+p2Α(J:doUc E}TG/ u`a b `nlA#nRRڍ> GRmdy_*e+G: w&]|A 6: ?72EN b҂UU>?.H@:ڷ1M?V:ݪF*X,td2U1vu0Z:ju\SG?/( B+YR>B gko;[Vd rouE+21xB8d>cq^"<hXH~$m "AYiڬԼ Ӳov) w6?A8Sb|p`'Le;av4{DI0Q80px)Lp" 9(:c|Au} :6DH&d<@0[*Oe:,=& 6#NاxGwz"D#$epx'<#!R縜1*ՍPSXdS7#`v!t.VBŨ $h>-^Tv':ؕV-9E4¶rzOJq_9ҙCԦɿ9H|АWbM :1Q*}yB~tmS#{N{j),N-nDX^%?xiEZ)הj_ Zq^9wnl3_.\k^:zT{4hDLzq nG{T=aD}TP}~0jy=Ӥ))g0ߘZfK#(BŴ*ZR .wcѧCHܲpׂ!0bR~n7;d!p]cz~dFj Ԡ{%T!ILL;/<n w00 mT{4̵,5@t+JUYX@`lϘ|G5=qHWASUűeW(g8.͛B;84* W۫=#I? 9\wHJ4DŢ_$#bsy`g.ӃSdfu՝+`h !a#" đ"䇆8ZFҮݾ8=|ekB9O4PL h3FFAD#$m.-,907 kf# {)@k'Gذ Y)"kjsɳpCbb2[C)`)@@5GF> v}jLBKL37DA凖mIO$Xb.HP;!N2i@ؗ$!.GQ87QR'TYQ^M`E`rZĥ(؂x4/:lPSeS K@0)R$VETL(4wy%sT#-Z2NLܠ{d*pv/a9g1}Z<o Euw `qnZ+Bbc*PQ" XEӭލ}s='\*?3K31aaK*FLc $Ҧ U swg:$ /HB7.gh>#O>ϓj#/*FL"zW v2Ωʇ>Jgd^rUAydR"J @ P†%:_]R`J8I2i9 c؉edy@ ,-FRR;?ov=gP pXS;3x):d 2%&e ;0l[؝6p2y}W;a_+4|6wL<$ 2;;bqg$#5TX[8}RaS2l]֘>Oϣbyg#D:Tν[*~l]+ho_b`)$OMfEHUd(Kk#Q֯}^X.;l;_̬׆LOӀ/Cr_ۗ B=/f|sGn2-wigSDD13+-(΅,Oembc[P31F!=oGqܹ0y☿mgH6R%i$:PRd%"D[qһ|誗v氩QzꯕE7劍Stqվ%C4{7V 8SijۊNKQ+`ʩTb6[]R$,ZA ߧ E_rZwl?gQ1O.Y'V€ӕ"OP4pea. -7I;rM.1 6).Lom"r9"6)6)2aFpc[^:a C,[ 1~t46/n˸MS]6>W7lq!u}y!kx|!=b 5㸌Lؘ|& !#USMA:Wj0 \)mvAX;0XU&ъ& oP,xX\mxj×6-Ύmچ6o6|\vaxkX(*s#vP#ZwXȓ1$endstream endobj 172 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2246 >> stream x PSgo 仲xH-[Wk}oyh|^b ID*4) Wt}v;::YuuŏݛZIrswsΕ0>DB"MNFJQRW ?r𗂿O(l1y#H2rEddd Ig͚:K ]W/O0 6T@=a`Ȝ=mZ^^ԄY)s'R :u6[MV/+ҵNH1h3YzaYنܼImf,YŌc0Z&ep&`1 HfYά`F20>LLI2uKi&@EW8v@قI <%RA>Q ; JPkÍEyjd fhk<1A^e. _t>}CD tB;|.p4eٗbWS(( Tt'Go,Ґmb/8s)iT׶OۮTC%d\mlnI C mpĴt&]8D|q8dt#gT>pvCt霩 0 U(|zx (!ܫz("$[1NBSkdhOwG/@%1i/~pr!]4ߍ(x rKlOn̼] 75< #W (X A&558/mU*"!)ްLot #[Z;na(<ű Gpn>Bi-:=o18PPfd]eTL 06v]jUww1~XYzv FJH>=a uiZ`q*Kx >pEe'QA?._9!pI*P&<2-YmViȱ&Sb.۫,S˶[,bm%ze7 FWg@ *{Q*j_D1-ِ [Nݖ&bR O^eiXs:$@ЫN` yshx#L4'Zal$)}~{ ʫG^bk׌tW)8b\ 6SyAV[ w. ]=ig(*/{+q*ɦS,H{]9l(z'7dx5M1^2:'b넧?[L+` J@?.7~SՓ0zLw ݸS2a"*<O*Kw_mB fk@y!pf}vF\/V5P) Bs.e;VaY8u { ,]KUT^_=A*< tr.}]ozLc->rH:~t  'p/|*H#\9Ѧj૚ShOӋֲ8< 詫f-ޣCg8呣P\ZhT宏J["*X0괏<} p hUw`hJ T> stream xtT0dx%RD TQ;! ɤLIe2ɤMBJ(*RiOQs{$[?Ok?dܳ?ك+9M4ˉ"z`/d7漱## o=C* D{#P  O7qG3s4q ǹ^a<܂WEzE=vyED;ބ |0fc_u^^a<E8r rs!0Ǖ^aAE-4/x~ȂЅaG,.ݽ,myh1V{]o;~DIL6?j9}jjIޤQoQ(j=5@6RcMfj5BͧޡR 6j!5ZDMSNj)5ZFMSS4j%5ZEKQT0՟@ (1RS({ʁL R ՋM R@IiEJ|l*2;fsנ^3?G^mWb~`XFv +}b?+lZ3|fCn6o9,qdH85kXõ#G啶`B[LRcޱM+PeB,(iأqL]9m0[ŒU:MF+9 hZ9(! ta/#yI4d 6,7mTd˷iPK a=cq9CKF6m5Twﱾ{ K +'/۰M:9pLM%~0\vd7^x&Ub۱x3!]tټ< {HM7hZ94@q[cq28QyHk~8U;vZ up m ޿18H$hvB1 yK#g`0=M"M{!É-=e!0Š+$d $p[z7m>68x*wFgZŒv.⻃/{#ͱ%wowvu][Цjf9tFlˆ/A}~H<{S"~6 \#(m;Zs p\/I|9h@OܑIc6PIwǰ;=Dh&hC-ӑkspIڶ0 0jȐuk"n+,koαxm)JJ{M?Ip? _pࠡѡ?:X̼U\{˖)gu5=4!jDouF!zلyx0ءKGKw:3[MF4(oud;ݡ5n9B?z4J>mnӓ榢.X0:cls2?̮!6F|8HyAq'3}=fM9U,P'lO@j^FSVi--;vX]NLuzllRXu>492$?X.M RPzոCgjD>Fy3^VKT+5F=&/D-מ_Ww{e^'da5B8ORL/MwKʣլR3Lf,)mլ&!,UZDc?+Zpz fU8BPUPMȐfEMi>-,{ ب8|dψץ(5e @gRf1!C#'G v-7=A/]%=8Q Lےa0Oq8C.4>f}[3c@}LUM*?aFRx4<'Ii㛕 + {s%V&ˎߵc{㱏Z$K!?΢%hnv'uz:3!6))%_!!ӈ!lg&$@C'DBT ;=e[Ikh?Jd=ZYej:PjE*PJQ`E*;slRB|,Y?$g:@QQ޲BT Eb eШ[WYwpmkרvC]5iZTVR  >b} /99.LuZ}nM0 n؊#ՏEhRP:i&(jrmH7RHe;]b Rȝ2;Qv%J*w:po6 o&RslH-en( ތ'X| L"D'!iTd*7C=^J=!?m6M1VE+Ǥ.!AZVc!2(rU$ (Cbk/)OK'H/Zi@Z)=LUTqD4!pGϱNl55?f_ٝx(@; h1Io9y@4dxQcY3AG$e4,tWTVMƽwb[1,8~GRgqԴjyc0ߍFfe_{ *h",&pNP"pج=[{ܴ)-qy1ь2Q#Dq|&胀ZQE F QLg_DHӨg9u0U&(WU~Uk p tΦ ,G vd~Lf]Of1{yxJpnՋ :(~3}̄xhn˱d`=e)ڴd S7,2FbʱʲB -tp]ŠsێPfoAi|.W[ z` hYspQODФbWnD4-g1]{} 28u#y07KdB^ S0 )mOa}:˴jAh!Ŏf?ɑBfvS愶n7-Ud쓈HaҐCX^O:R1dUم!Wя=_3,<.8wo4ruhe~KUS9u&M8}wϻ"S%^aOSRkCsIbl?=hvэ &un (_eqv]yH@/C=$|zӤ͇[岋`z29fqw$O]\Qh}pptd覓|1{ֆWUVV<&^Q6 (!6 ;w`;( KI*DSPJd*W "]?Gp z81wG<"2 ⩖K+\W%J]PU' G78CCT" R:hZI+m-r3{) PJmqse񉄲a[{>3)(f:Lf`eshK(Gad D?zD%N9AW8lg>m] ۴>gDJ"(D ;7gHhvlv;lasugf'<,ȾVj}[Q,x;}𽫑G|Q~K7}U'p.e#ڋesa6 5alyܸu;̷Jf!\֎JFp+O l^t$:qfz@Pn? FV?|lkA=2 1AS}2Н'~k~ZP{8ыMHꔨZ* ܶKNQR!E$F)eA wHty۹d$ZVec10Aw}̈́7,39&Z/oH'ʌT i䭑KgTLJQ:R t.mhbo'laĿ:-;t;)ZǜёAXYcM>XS"1?~ˁw'qz<,{4H܉m&6Ey8Cq]ξ_CȄ*Y Y'-r9r <+/n/YQXiJa3m%'pvB%4~Dȇ `9"l E&7t9 2oBqH; ~Nxo;ù\ >>^QxVOObCP17l:(a}rMyn[ϛEɤSG7w|vG f$LbmTу%M^-\Tyl(7U4cU4=W4Ȭ!8a1{BCٵ%"il`_! !C?>2`IFɹp9 ԓ|TfX磛TkysvO_ۙwwYJ* .!J ?jӨN!K0k)DJ&5CU]ZҔiKwadwWj02w>{6LBZFW$&_]mBtS";;EP4,?ɓcn3f~aed$ob-HM٬F~\nS-n)<xG+2s}g6# >Af@ o" /!("ҭPk"Q,7z3rxlć-uMQ{(Fx*^x^8riUtAR6B1 \2Tii!2:rB3D7l':-"ɢ#,D"~H`毾Hxc$QF\BVFVKiMC^#Cu5C ׍./+ǝc.#Y \ #]r]JCpoB$q]SRsWɃ!J((;exg3ps9am9m/h]5ݚ-ZF> stream xX XS׶>!prT!C:Xj DZA@d&!@lf(Ș C8q85*jj[[>]n}|>9;{s3B Vzxys0|Mp͸bw_+Y0k "Kɶ=65T[P fD)!A2osrz{|'{@iH_,80O/7IBe Ke%s9~1s$ҠoͲȂ=cq]%2u~R+ {HvJ# x;E*Z#u+v ܽ!(3dWXxĎs:̛`o;dflE7 Fb Il"^fb 1&> \JbXM%\yX@ EZƒXG,'VXš!8&[ŽIC Fh‘0'RfY[e` 2E08b/Fn5{Tv1 Y[mJ;vظYZc&ӎ~Ч"t6Oय़ pVTRр׸dyjnDVOo:s\]}}M%>8SwGg9Xu`M~[6}j񳨇]w G*QDx=dEq*,.oue4C0.>vmXg B.ͱf```?vkn޺0c,\\®7,*sWn_tY8seN1xQci@ Vym"<?i. ir1NC^xzhg[ۯ%WO„``"X,9"v#6b+,4NpDx@8{Ն@&ɿQ|,,cfOvа8l(IʇCVn#]hmNN7>ΉnIl=~?A|0ɉ#V]٨NkXE-0،Yb\A9Ya(%X8ނz?»a2hнDOPBƣ؃Hfz/6֯ 5vUoU2A.\7A @1-˗I "+C֖.crrZeՋ*Q}D^eF6E<ȜHxdŏҿJ2qP7ޕ2Y{K Q"6Tt1]A粵|ro+~Ӛjв뺿8H9G>9DqZ8X>rU#Mp*.Qyx!j9:[?XC^p"x ӠjD9)YO;6 9cޒ?(!gczqQt-5;r@IIFѯĄ>gNPE 9f@FKKbpB1C(5 BHjTfs *T!pG#)*$1P Eng,Tކ)GW=|^9"EY]_.C_נf2˗%AvGP^IANy^-Cxwe ը uZUlw8ά ?:(m-o%)Kg!\ts7͝SK&B[ґàuny~T:w֙]?gqJm]()+&4r!B8RHnq,o|8ԢFy,x=_0Oc"a`P8Xa+ IPgw ֮@$7@[ g]~GyJd8i;QSxym:Npx+qHLEF̪Id́ ?^/lCRdiWPv TZ;ؖ3W1}Bn7hm$Lܓ-`2?tF7~8LFilNpQu7^O^-6j+4;M!Ɣ?)Կr$,9kj7QDA*&ƵW6y1ô⺅ݢSڒ~ԸG)'!amd\w E;^3]/Q 0N'8:].Mu/x<5sY< 4v>7H~9MVr.彗@I* 9nB(4$~%*A,߱2pT3ԡVt>߳y*]TǏbeƯ7+#LZm<,#>eNי@|W6DY-hxB]8Ie`ݟ߈d6 :$gl׃O*x|t|m>ؕ$xџuQw ^m| `I@()>}ͨURbrG5aUăR١ʷ;AN0)M2t8u=}H*M&tʘOg\6V"HjcZ-3W9{ *^36k9ZHyܰT-1kZUIv@%0G"tWyDzw5蓔eՔْjviwMY1;ɋ-T 5TjY 2^ W,]1RW=3j=Py9 MOe]DO΁]DR;Q<=uf1 V h77sA b<^ ^n修~6l"g#͗\ \i<>WPx^~&&64u(08 )tv)6/1{DAqC`Ў8{"qWΣſ*;dɻ$LAIu(P2&MG7x__6䫛 vev#ۊJZ lu7ݜs:$SQpp;*`-=tGDCX4<|wstC;j^Ԃ,!Ҋ.ANf$%g2f$JST$k*WD iX7&`3l> Oxs<q͟D 2w a۽7tw;Q$:hLm 0A|9{/iYHIy 04KF^:cGtr<\x6JsJ[?8rQر`n^{} 'Чmϱ[ZT+h_r rH eâAٞ{59Izfu:,/f WKeIal t·3'懗ܚq#Ԃ }tZ2/ThIF 05FG'؇{W X1by'S`hW\]*.R#Si7TEU|[sYC'8h3^6() .K\TNˍ̬T wP\sMe0? ] ۽==|]/Ot w(l%'~͈" +&xу'w/9kvt.^х\69=HLt'?$t^gHq2?JFǾ9IyqZT s Aw :|}|P6oB]6}&JCA9qS򄈰UsL,©|,ݓlՉ~3^uL+W e,[PV~"J6{:24 nFȕ'x߸Cpr.kyx7?_Inxx]? 50odJ(oN1<9⶙F2<`==,L(UPYH<9;@]lr/Uشݘœg$*P^;yK =q++`u:gMBZ[}ï11#J*$;V]%۲EGM 2!3dWBCQhZd[n4+ʒlɊ,6`bCJFr&EG{+;Z"Ph[j+iR}ۧoѴh di|6*l>hϣ<>LLٻg"ˤ矂naYDX`('Zʭ,wKI(FG1#:KKh_6endstream endobj 175 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2065 >> stream xU{PSg1]qj+> RWDBG@A^Aܓ@oBxA)آEaűԎ[kѭߥEm;Ιޙs~;CnIaAOk$?Ǎ+3:c.1#ws7t%/9SBU3fBH2% LOKWh$~%. VdIT+%YC%Kk &uE@V*Ui! H $R.O˔I֩R4-R\025C#ODi)ALQdh8y"! ۈDG$vP"XC%pb#%0@GdyMעk2K<{PT%[KA7(◡ /ԕ[d OPSG61j.!NC]&cI/}v,| ;כ?hpdeqYćD1_gY#A|Lj]ӛv4 tCWi-gޚOu F'="'vdg P^_[do?Ԑ*q"mEX.f68wj֓(]W娑s dL>h9 4ɶo|qdlW6e@<-M<=chiY<[RRfcJk[J+ROnRrXe.-Z[ҮOJ$YK+X,v ]?kIO~";!TEn0Qųvï$!V|qk!fk.ˁEsLtދ[O/^bh ~ fDžN.]eZH{"qA8Eh1'Fy"7f:미vGh ~.4EwFЭ@ܞE/z ::s E6hEqsZ6t]/k-hZVPzE?RF5GDMt_0g.J : ΨNsҠ\`.| Κ2(uuC`YcBC}PT V(~.A$@dn?T* ԧ$X(k1ٲKv-jzb(p~!PU ꊩHzedDcV]acǙK'd(:v(7+{@NXe@7dԖTCN,gvGDG9szp8sm丕lꙘBea 7a>xxA~3":1NZw 5}qގ |j$ͫs YdpN2^GXPm(>1|ȼ~zQ%\qa!YHm ЛBFZrchwDB]ǀr(- [}|{ƆRhK9?RAOh E:S^E;nDWL09Z/ <KJÞ3hs!g52dӒڟ<m?pfOHԡw"Wތ! :ͫtlSP`3#d9յe I2NbmIG_* Ewg+=t]}79 D "&ٖ'诲 q`Be S`Pg]Gmз'G۵}&z, Oe8<*CVz[#ASq^k2zg3f~O>VhS> stream xWyTTGmK߫A41%bybPDDcfTN/_7Ҡ".Ԙʼnc/e$3)Ifx;ӧ~[ODED"&C߼"a(1=G x/v&G{ϡEyQJ,|TetdmV"y*Ben(i\u,Rf-RK<=%̕+#ppKVEm&HIpr- 4N6|Ϲß8EJtK2f~ʄUՉISoEm7ktz@EmfR-,*rRoS>/RoRJjEPkW?@=K=GM\ljMiED <}GƩ_'2ؘ7Ԍ5V>=sq^d.s\8^:w ނš; ~f;zV]-J4ja״u:FF3 4 h:ϰӡz.Ca14JNctЂfr6Pr2jE45lZ `P" `E.H=hREl.NftK '<`8Eé'7[=< U8k%!-No5 Űu3c:~?+H>=F@p줢Gt8tڡ+1e' )R Gtviԉ]B5bm}h3"0!b9`тj PWrv1Q2k{Nsyx4~fիo*Mɼʺ M#'pK˱k`e{ zMA? S'^I{ݥ-> ]ACq7|ֹ^0جiEL݋0.xKL+} l\P02kF\جh Y(LtH,S7NC34DJ}yaRYXhA=bt[x5BafvJbV[[<.r͖ :9u:RSl_r&DcGMF5hɀTOS!Pr`9t jS1JEњ"*i 8}v1SFQdGڅYvѩ{mg /rv:^` MBɯ.;~)p.78Ch*|32顊su\nR0 ah^m)jtbKhZ-^ft;'Jts"_a?-4_W_8x~W r#qɎF}}ۖ_v3% ZoҼ!a,Ù)~f>ՔȈiEv*zar/Obhx6 cL8d[k0^4˜<h2"LJ,xfKj^3vZMXHs6K[K]hԚW(p J۠#[@"?* ŵÖeF{M}hu_E/ ԉIKu j0LiV[m3֮`} hSl,|7*p=(Xˎ0ă肒ނbkQ?xK247`Cyp$0?-3C"ذw Z6k*B]d!lz<{xGkӖHՙ3q+*Z `Vvl4,U"QV7N! &6<R*S&eʤmg:O^h [Y쟝;b?M {mVS'1sg %ɟܑrmFΥ9i5|_`\ 0KZygíOcmBu1`sL:R"=efɈT<38oěo䅣zmlAF~vL.%]TW X ӟ-vX?BbOI"t)X6믟m FCQXYt=5\w<5U+i4g^giv&Seh뵊CkpپRaeMQËͪisdP9`;-mT`KA[.Lڵ,M+Mv8`[ ;Gdz&;3d ~qX%<e6gze)+&VB } MĖ /M#hcGE)]!ovc|uXgI6SY cT5[[}ǛKgrxeOKO\]6bC:}Od*~/Ac~_ ېd'&)l)cC9@x4& /pqC.lфIvWYt *{"s̅O:03qW;bv_St%gƷf]Mm>V{\[mS_ IIws8㵁D2<^1écs_ >I +8z#_B7 OXuGIfsh Oݎ}Ƿn;l?YbL.9׾>zg0|g{`12>B zk9J*ktVjXGO&xnc1t\❈LVfHGjp2I!WŸqXzbhbHKE';ba=Z&J4{dD`IIc3}gҶ̻IAfլ׶uC@SI3ԱK|pMqS pWVkŹ4JI :I|eyI "tLboM΀.hzi@6ɢLHLRmK=6NjJ4&(JKR@ ZN|,FoIᲔx)׃孋߰k'贠f K#s!V#yYECiKpcG/2;1:;_qGQɦ?endstream endobj 177 0 obj << /Filter /FlateDecode /Length 193 >> stream x3632W0P0Q0R06C.=Cɹ\ Ff`A#K  RN\ %E\@u\@E\N \.@3c݀}o]s~Nin^1PC[f:>ݫ#}AP`á6 R?|N푘J}02pz*rg;}endstream endobj 178 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O1 y@TuKVUc"D7@ҡY:ߝ|x%I cikDfǢU`o:>`3$=U[C -A#E3i^?iLvw*WUJM5FT&c=|) ZSendstream endobj 179 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 337 >> stream xcd`ab`dddw 641H3a!O/VY~'YyyX~#=C{*$fFʢdMCKKs#KԢ<Ē 'G!8?93RA&J_\/1X/(NSG<$C!(8,5E-?D/17UL=土[PZZZ`d{N|q00~Ksp跲ֿ^_n=E'l~];\| pBIOpb1y4^(,}endstream endobj 180 0 obj << /Filter /FlateDecode /Length 179 >> stream x3631Q0P0U0R02S02VH1230!U`d`T022L(rU()*Mr{q;8+)h\nn@n.P9?47ΎEAmpm]/}MSLJ:l:|[> stream x]O1 y@QtЪjbLPH:t8K结r/#=>)ul"-~H0X ô2񭃐U'lC5jk%hy&5MY b'NC\ HJ$pLg9[Sendstream endobj 182 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4104 >> stream xW XS׶>1sx BE(*"I! a IV<# "*-U[;XZ^[Z۫}c}}}Ǘ>k_Ç<}Vׁ7ؿ׆8 n&ۀ5&6}4eڢW >'R"a! IgΟ8q4H(p VI A% R$8NZPD.93..nX=C&Y:yc\"qkPt<6hjYqX8持2id"HA/HAYUhELlx}B}A!a^ 9s.LFMfb D8^6bMl'v> b:K$fՄXKxo F–M%x/D:1pÉl0ްa7++_dU- ~!7(7+\1uy1j fvW_E]F6jK1ZuT]IL_hNl"b5Awl< g-'tvrMBHȖ8z'#Q^\ah[a Vp]l$3VrvŲ~qMR+ X{]J&s3ܹe@q@!D2WdB2dfJ GL#eذοQ'Ͽ j@+Fo!%5ڤ" fiS&x`3/ҫ/՝P<`B]|SeȺZϠ3|]HR&--S%: /J:2ΤI=I,J7C5:E}u.QiR: "n,i%y:]U%W\۹ڈouꉸZQXKHpj|r=KR ;ƌfŧ}Ͳh0 :-q 7o vn7 k=&ѡ"Bȣ9W3Vvs!4"i&f֓ 7 ]=0jFɍWu*S0v hA5~4G77i3H7)]R^m sQM.+D9Rfwjh={:s 4:g)$xdhcܱpzj(anM*2i="^$ {Di`Ǜyլ D4yOշCFaF !(X˃3 ԅPC5tpMGI:4 N<ը9t URNrm,/Z-]c9j 8CeLuL.5`!ۡެ9bAÇN4C`4ԠQ)AOCXfr k ı" lMIsSSˡrse98CvQN5Ƹ6bl8CND]sō!]@/! F*agH`;5Xp_NvjE :s1hh?)'5n51; r}y1nC2=M{\Mcl,^ Vf>*eAn CÚ9%f T `Z򟅲NlU}Gg: rAOշ髉ӊ1[a'JSf cGxxKZ M!D,VXEHCOR`0V~E?^?"/"zEφ5 CurPaBSfNrKfd]ui˻W}pG4d4i|Im/*8!OJ׀o R1мNd17!Zn7RRǟtǡ3gpHϬ#"l]'(rsS]]1[\LWЭ+|ΣUG]sWD٩eLvNU2 "MVCAJV$'ߑXtW8-UNT[񅚖056Sn.K9,{wqdt%LiejrKn`bsǣD{{cXӵdLg>Jd_J @^#Hz1/OlXg5~ :!Y| ~" ɽ6y[[86>Zg 8 pH_?VB^vž "n;_o՟q7~"ñu%bw"O oi /7"x_#|эF4Z!nzIew*7!);vYˢwO _ZS?kqI'}7u~t53$ǴUpf{W*[.U(K`=^S7r;RQ PF< ⡝?~>8qek ƶ0jLkwI3A"I@&B#h (itr|rbw;VMzOLF{[xՈBwWO5 xi4g1ʲFQsx~I!"lJ3ڽ.C&.+,*5Ww,sQT9;u*S|뗼W`IOO-gMpuTRz'ޛ#޼u?͠W0ʂʔ|i,=p:6:,^n_;q LȾyI C:VONK#()T}BL ]ۻ>=1_Pg,tqo"+FS8[EqJ: {0ഡ̑v̴9e9abQgG 4:ZQ'7620?S>6)je2B&UL&opp,VO.sRaTyŷEYf,/1615!QW##W[b̳EůP,endstream endobj 183 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1492 >> stream xSyPg%aUc=v- lCЊِl@@E^( *ᨉ*^S2##zo;tctf}# A$0{v-BHJϋ*`"(6zGoZ P R}6*,A`BA&kaoαufzLdQLŠI363M^c 9`=neZyI#FAj-%x$22û8>luDk1l⭂-MD73£ "H&f)"BL%it"H$ b[VO( h!gu~A~E~7+v+*#۩RuT ":#zQW!Ţ̣yG#kYT>.ZghP;?^nMf5CoܾdQ2_Ή)/ ȠG`|o$R"(&]} c$r/ .hb!L2ײy 4sDsYf^sF>4QSuήv)ց`"T07Tov' 4{Zil H4 ;wl% Jng58IE# d4v<:U 5:!;}U3|m԰ 7[`E8c[<_c3/LaWm1n0iagQsYWXnt.;޴fxW$"3 g&]w!TS~W{.cX/:ࠇ5xhw-46!ϑ q1 /]kN^xo{QpK;/xC$Sb+[!ڑ6N,_8gqɺ@N$p?x&"?E%]_Z7Ujjc ŽqPHDm޹?g#k}AQ?޺-ziC]E[;R8 5hZY#g5VL~.ic*:{;8S7Gԧ_wx맶2G; aӌ[yAq8 c4;8=8ZUGe UddI|S|oSJ|p8YߟWFlEQH)r0w1[aٱyWCpհl:mՍ9'8WUhzJCUo4U1esB :ml%C];4]:1CWonhr7t1Us+,d%SPtqX{Gvߌ)x0q> stream xXMo7 :j7}jrhl4ENl7q"!z"ЫYp͛T=-ooO^QX^/baAeo{ I%Z[WiI2(ۓ˳oݫZ*%SZk߻h\lp KIjmc J)7bymWXLw.t>tLx {S 2X\aI7]ؒEP-$1)TR7™5]Z;}~-.d2(ecDtn^%EN{2=PLJG3[&U >'TI=A#v>?Ҋj;Ru?Ξ/PW%Z3gu"{0-O0}\3R:Y_mIYZ>ؽiS&v'YD1rk,dzgc Qϯhsxm gQ \Fsw9l%Pҿ x|lxߋpns#yBb\KEx7mR[1pJwFϩI5ƫdkუٔ2&c!lGrxGB)A}KN~t[hcOn=Q֚_R݉yFq۲`'M~1iGuY"rͽUՂb6GZ Ry o[&H_j0K"CB #%iuدPȅg~PT hMJz#KRGU1pe۬icov]ԩW4J]hY;Re:k> stream xW TS>1$瀈(zNOPjGkE'"K$L! oB|ԋV[hZ[Noֹ5]s;k5OV˿oߎqd2vIYηdҘ!X91.@Xf;cW8%x]N>?| cK\&K.ۗ sfW- KܥUh"b4ICjcܮȰ4IIޚ9355u&&qF\BoLSF&E6%%V&jbT'1$.f_rRX* nwXB,0ظ I+SR5v 1*:&w90uL 3Lb61w)Lc2f9YfV1s5Lnj`F20<3Q|1.AlJӐ!OB5.7J{ur^pg:왤p<;إUu2ic>JhHQϞ t \ZyG@ |?qť`4\QXN|6c/t@i,/eȑN9p(.Ye(G@\:=ɐ%D $ jv35hk|dN'.HAX'&j;V\j8_sƓQds?1<z&69Wch\]8A07Gx^.q4;~E_Z&q/i8$ySAm@x A#BVC9ϟ!'ݘ[8]ƅZӛG;/+ɼ;4 1pz[==qvX=g~VZSp/\a'v /oTaZpgC_%u)=uVIuBq3nȥa"̙z u=pb KɭbK5I lYS'ҁK]HXv?hF՞xj \^[SkH=PrӲPu*FCZI)qQGʷa]r &5JɓuޯI+}7g.kؕ1FZk#a񅴖5[E;.|Y[-rIt Eo)qIagpZ\(7dxqh]p$5  jt%.jf| ]t=@y;<,xLʓoN #Nũ^ʄ .+__2%"&𵫢YtVv?C)q/yc*J1z8[Si'C)Rm-7&!3'7R]milUg;ûy շ//yR iYaV.&0ݕ(\ſCʗuڲDKuSQ]Rر6:4%-Em]^GЮH1}Zmu$~[޿zKBܴfSO Ejsv%ᢪ#5sp} 9UYVPDzI<ïv|t_Y1̉ۖ8M/Jo0ʶmNo=2S:Ef瞎G>ŀQx6~K6xy*@[vCH:B+;R]Cp?V<PKxr-o8**ysw3j=zN7igkst8=!n .~TkueCNHR8XWf,*;,TT~C ,d5Diŷٛ}Wsq(}/Ǽd(]!κ % 1v7 \ֹ}X(endstream endobj 186 0 obj << /Filter /FlateDecode /Length 216 >> stream x]=n0 FwB7v \%CC" 3(:<"cɫ>i)Z 霋c.׽J܅')m(,jI}J_/ۀO:;3ݡ0(]Q0u(c&}ա$#Qܗí5*\H.υ~Xڧ4vmendstream endobj 187 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1078 >> stream xmlSUݝeC -:2 1h "t7e"]]nnmeOv];CKW"A aF1ń5~~fn,5'99y'=9AAHkju&δV)vpܦޟQeʋfB pS:|aXB"[=Xf1j5β]%% dҕS-EK+ҚR,=ݤjVԴAMU59BݓbaQPUJNe2-LVPک|=~]$zQ#z@=(U 5ۈYb!.gH 97oZ:k& $w?#3/,K]es8l@/+gl-26fO~y~G_2eJ&^2MMRS'Y}Ar{~ D}6~q͂ަ6 ӞH,ο_$` ΍d^ C֝ 9_%i7Xmm.j``nBdHq]lWD"hg7Z?0 $ŷJvb7*[dDUP7 0?}RXP._99UnnۊoWN5~xVh٣n_Z +dX:L\! )["wɱ1>MLdP5"|&Md*;nʳ`6!␸_-"8& y}AwtP%yov9\la ʹ<f"eO RgtRw$02 f+ư/ 3A<;ˏڝJq] ?*e}<_Kesp'Ɍ 9dvw}H|a{oW}ZGk6o7C?W0;pi>tşsF/4wGiN&pk<oyosry"U<#@fd^:?] 'qA }o 'aac>_fA85endstream endobj 188 0 obj << /Filter /FlateDecode /Length 349 >> stream x]n@D{?֖mE(qXqH1'=]f-Η4ny3m0Nc)u1ndgKV_{I9 Ұ[{KGU:{TMq}icZ隲sz{URQw],5;bPs]J5:&W|[c*\sG5QC PVjUS Xj䆓 ^M@ODV Y `MnlIM fChC`ĂІ _BD8€ b6p򎟗<$uMfeå˼+,jendstream endobj 189 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4695 >> stream xWtSعlQb$%Bx 4&0'-[,k+}^eK6ì`ČpдiSq_/^{99}:xiS`80{r+3_=̋-[p+۬EĜ_\na 3`3 yQ%HowIJgf]kʄq3˗\?%MyyYʆe)9)mZz_,Ly4%+ 8/,/eG_ٶ=e;lr^^,dfO%Y9‚e%A/IWK6VgIsrl+^܃f? !6[b'xH%{kJbCb#1xf,<+%R0T"K??N剟%oM#h،53%=ms)`nؼ/ =KxnUb8;DSl2ԀQWs_ertFiTze^ H^EG.u> \E$lI֪dnFR*p \b"|I(}8){Vpli6Zw9!v=hQ#7PE)y#wFSNК{7eͬ~9jiMa0jb ;wC' yEUJ0r i@Skt;;fg_9+ڴ{&ֈJPio\oVK -// }jR,&V6cA3;ZZFY $"vX}q7 z Z l)b>xo>AxЯ>c7غ,~=US(V[ZhkB&[.gG [@*+@Dxq} GGn'?+u.!~'l/]N&;Z.܌QefI%Tny808q.9oIO;ii Ep3k15`tB;\Tv.A4k8…lDn -5fFe2UQdbuDg)2=blPzdL@Cp+&IPt$7@J=չT5-N-TJ `PUV ಺N ֙c=A 4p6Q۠&ӧcԫA /}!-`]wx{!{=ev?+*[KyU'E{D p ;ϵ;}=4bRDG14nN+1IA *0Y ]%9@ ]ht D}:[N}>]o>X*tnr?)8 Q;8zuFZ'~/YqRʲ(^d3ktBkɷ :YW<%IV3PACN`fQr $sLո/aw+'^6!lj̡2ht:l=V'I. F̯, Qa8Ä'!*#f)ʌzVBLw5jLF*JY[C6-Q-r6ZIQfCń!]C ++;=@>rVJ`fkl]Bl*L=*]r$G9Xʗח3 ?Ϡ&"ҕ8J?um::o◵&5AVZ)4TQrPk@Ȧh:5Ec]B|{Ⱦ/\fehBLo d\\C bT*(xe=ks?d~?~{cjj^,i3Jiho{w sKc`eq mjqQԣy33Css(;t=!_So0zj@&<9z(􇞋t#ga ~Hmm$YѨ v L0'8_ϙBл 1ýle(, Lkh2"bLP'ZcaP7u#̩m@Mrs-=FQڕ[tz RЋTnA &L# ڳМct! <כfCб860Z{5t>?g]G:TX-Gs%p[cbr?,*+i=u\HJ OK(x3&%6y#UlǓs7g&}W1 njRj":ҧdwPxuA,UJ+s8tv譁FR3eqO5!If)65 Z!(M7 @N֎5{AVt G!pu!^5 sX}$Z#QN~tL⤫(G@GƳ m *ø0:_;ҚWsrD]Wۧ&dnK|%44&2db t [m XE/h|I~r$fBF B`~AcTOoьȵz1+䁄42][=)# < "au@ITӦ#}=w7G=_X=mWQ?U*֊pdMZ(*2^V^]ATFǗ1$KBJ>\+r3!oB7Q z @Xu%&/Jj6ݑ }ܬF!Z֯9+g+]F.![,bMzt᧌\5Bvn|sG^=͍<*nK `mCD\" 4Q|<<ٛd]KEE.p@1srN_zgZ m+Atb߇zUPX^Z=Tyb?EG 7 h#^ B /v4;ylu{gOVd TYlط9Vtp;'&K(;?g`ěܫlh5V@˺pK@q/VpJI˄!Qos{(j_\R",&̘n{?M\9twOFN=1{s^ 7Z}m4 M٣ºN-Њe'_q6l.|Ww0#ѳ4vU@n.|]o?S柀{y~5KUhcWߌbo>4j,^lː_U6ɼ~:;ycVoDUҰ+1 [T-ᒵ6'Zf]Em [e_sp!xzD_r_S[VqMw"Cw@{m-gexw1 2M4T4Kl|lF"6w= g 9*|Jt&TL-TB1Xl~ f~ڝjD#1MBldeljN2bWiPJ6ݍkdH2)z=((G9- +=d8 C<)KBN| o9%]G A{32zƍHɡ.{[W^k9[mTATa3݆qf2> stream x{PWƿo~_uձ]uSoPAIHB=o@M"KL Zt^괺uk 3uo3;;;gq9{h;wbʷ_JT7oGN]/NMgY%o`G{$Cbp2rQBlU!Q4nWIܲr5k]ײ$YŇՇ,ve k]YiZZdq,~)k'%=5}w枌8dWT8"6a+EEt.D ۍeb&,{ێvL4)8z/~G; lGE4*%)hpQ2k%ejg "'Opտi]VLφ2dy,I6ۺXۥAN#JcoB3E~r5VGs=uQ:(|ni%mQjFB)/+o~`UHc نwJriNP?тECJ~J $`b -`8IKA鏂Er _'&Օb࿽ƻp6m0i*r#O'0:d>pO>"35V"EEŀ+Φ6w k py/-vp~U&0Ya60VmauJ9'=1 J3I"aoS[v$4"]/m5[MDhj| .9Onjf^}Z4`ʠf4hy6*!sBx ;g4,EѶV'y(ڎefp.ĹY\Dkw:z*HO' d<#?hNBĖ\׃#6؈>^TÀΨ2(rg.8Է¼wz'~zeK Ac dlQr%=!C3ޢ5*Lz/p0vH#o"#cKc+6K ;~sQȸe06JV~}\'?>s& \NnUwUo  F;{Ӡ3L.J4r0pJnvj9CLJZE?Zk D@e$%Û%\JݪʡDwjv\G}gʦ2\m85wP,r%ߐ7yHF B/8`BH8mRAJC|FSjvC'qw&*I/zUl.*}oD ^nTP,@Z4I&gk+jݜOKO$X$&΀b[=f{ ;bKendstream endobj 191 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2273 >> stream x}U PWaFtbd+`h4jbh '(?L󋈂3 #IT]5knmhbe+U[޽N{9P(4Cif'b\W!=&MSGGwQށy<@M07L IKƬĤY̜)?h@MzCvj66}v5A&kڸX]V _{94L"4$but?\aՉɺY;oMAQ!j* T05P,QS(ʗzF58ʝ2QWunƔʝF&Qz2=NżX"NI446>l5IYP0S[؜ҲaC>GqJGZ_N F:U"UN:o f;ӟ4Dwo$MeSt\R*p82Lz($8=p9H܍ѭG `qdwZk EQIlU} aND])]c$>pjtjG0PcPg*m|rs|GC’r̎  [?.}tmҦE%~XYmPbT:JjVb^ط+[A_>nt.\b]Qw ƓC}bJtbE>[Ay3̦_hp./o OL"sx6hWw[Ux| 1&;Ȯ9e>,m\Ե]˳+/_b--OV`L$5; !1Yz+p܏/_ޱ^%UyNj7|7<|ok_]=[PbEL~cQNk'AFejAVqQ~0m=m{ڴ{ml<,rˏ8zȟR_Aw+3^w#ZxևmCJy!ARGDR7KejM쁞Så;ܾd>+Þ@KGBנR)ZB:O9 nqogЉ>%/...wǩy{p݇Y@.K]Vȕ ۏ:"%=ڍIPigNs  <Bx0 ')SM&lF+GR\‰\e|JZ/_v3dh,YC.,t}8,0 ;'^rUz#f5%=@3.c }.|Sߑg n};r'Eq8~mBٚ4˴ttk5%Alر$܍QuZramy6B:X[74 ~ C[T)"edfoNȌGrS xq(V6v79m)I%Q:u̿oc~-ҟ엯i_%|1) c8dg  kx8pi/u[7 EO&_‘L҂̲T=N8Xi!Qe9 ҆J߁KuZQv{`-c~GWF.yqَtGvgɓ=8Ha?,8dljׁX֎))i2.zy̠qG! Q@ B~Qz QH?+>\(n `=zZ򺻜+ǾW<ݮ\G71\`P:įH "X$Ԥ1qR.ԕVv5%<Ȅjx_Yjq E.pB8B{TZ^^j Q$K H4ۊSZ]^_=G],~<+w >taz7X "I7][(ԅy.6ii37qqў9VeX,cO϶R+bMm/endstream endobj 192 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 787 >> stream xemHSq;^Wۘݥ"MP)iElRe^ӽh9fXjl~Эآ&XVhaAQhvG΁C I"BL76nkm 昹O#q|: Qm|:ZRT37(Z2 \W-E$ATs\S[desuZ85mlQ6xh9ffGe^!f6kdkjMƆ:c+jz[WYnX}5Bm[) !H*$4zL 72nT&y'y'QÏG| (ugBrl o::>㞇G!5C`_YⴀJ  & <;E''GtCP?!_ =t )!4kh\OJ=ML͑-Y Hpd?u],d&spC0[RN-}^) \n]g; Ȅ azhس^2^ FU.CxF/{jTɃb.>x d#VBFP )~Wt7JHdFI\I&I\'ǚ|ę/dY|̫C89hǼ̯j ;~`LA;B,Kj 7O `nNF#|V+D"RM`apߔz|1}/bn(J|$J*8:`Oz{{Νpjendstream endobj 193 0 obj << /Filter /FlateDecode /Length 8556 >> stream x][qA^2x_߻c82bY2ِ")KbOkU]Cj0`KutMa>~yU8<{s]J=<~y04"%uxU:(NQCIxS_]o'q>jOolǷn)\J!FJq㷘Z {d`w|rn!~k,%LHPGOA;d+ԟVyR+@R@Y9{H1#FCJ9\-zG٢qNg)FNFAjO!fa֣p W%Z7mXrbnb-%g}iRPmh65Wtq'jY`ߨ͝eNXZ\+o]dVZE+mա 7b_N}STܫr5mt0SZ'Ԇs63th7Eo 4rXos#W6}{b;|9mSRW}:5iҪwچZY(X쨷M`7K Ɲ描*%$9:c*qS*]qŒG1Hy3M'f wFz? S=A>zs0PWI;Sk}"-=:aGvnFeaZdEmָiƜ\=/s}W&pB|[1zyt~9RB9%p =X ek1l0G}{cu" :,DS.E?) )k2F؀a4#v'U mNI^蕠ǷK4۵~n|Tq}ǀ]vxd$rY}ˡ_x?>Yr>,u)kAл~2/-?f|\ըcut;\i;O1' -X̪맇/wWvJg:| Oaa>tч; (HaEc5$@7BXAӔXCn5][jMˤ5(#`>96i96$S 5B˕I{jX$Դeuɱ;BJM!`njVؐ 6@(KkW>˲ ՐaSmBY0xcChB~Ց8*+ᢄBRK9 R,* H,B-zEa2$ƪK 7m<NJ`ѭ!p,p/NsC8g4)RH\b;QVE-YXN6FLYai>{VCbO;m+g2=|!V,\e09WGH0^M&K0G +1RH,4g R*9sд\kB ɵ:B96\kB 6d2S9!wuY+eq\ c5̉9W('0RC7ŗ!iyoqpt] #5X[3VGh~."{GhZAԖVGHZW&`h5!:N5!e񨇱Br/ BYVG>x `X !R{v]!9dYi97WGh`xl:ud,uωپ#Ii<gIòyaGH&ceMG( 6DGhZGtDcCFfÃ@Ff3X 3`æf͗ᤆPܻf*@ێ!~ @( 2G ^l1.y),z#G +nܷBs%&[Haƃ֌`;BZ#pp`f'kԁR O.)kB 6ܻ:BljP{`BUY~B 6`ܻ&`y),<džKBXRXp#&`;2F.cay G96#4-؆)!r c]c1l1R*g]!O{[%GEHa.lcv][EtlaKwcC vaܷaZՐ4٪#q8!5kC3,+n` +p&` * XDojXW:rb,U#o^ݿ~o|qzY +C8}I Oq(%QK<ӗDy/p0X R"͞5X&gX,"u@K$#e`qrqˋaobe:BX v8 VG(Xn52²0i@Y!,LdVAA{:2XxHpDGK :BX0kfil:BXQ/?g/v -h7=}=際Z)̃fznVmWJ͝[َEE`YXMW17(0RRrߌҥ!~bm)?~] wt#*j?,>k?DU^aw74P2\*I^?:E!,IP>894$+xWca:PFoL%<}Ĩ< =^Lmغ 7WzOo'b Alyۏp-y~~isAom = 4ލ,U5ao=:ڔVw!Xϰ)evi{t}cs4˄X mI\u~ [UIz g>z7~SS'T67^a xX^-*#3E+nb{[!e`9ޥH;b?ʁ=&zm|ūQW=Wl EC̃?XjkѲŷȓu#8x5UNo`~/Ѻ>$⃂KhBsH=HL:YZ0ɋWbʰjO[fb|5`=FӖ&EIʽ¶(ކŲyU=s~['}TMo 9Y)=cFYK$ށuB񶍏40p^gq82N 2.249)5c a'_m&k8>!?v0 vxNЖ%~Xkx0oY K".F!8\k 1*hޟ+FC`k0Kb~ўDW~K(X.)DoäGYI_#G?;xq/yd2HCez̯Ӳx z3|8Zy뒜K<=|{BS,3Ylӈ"3/K{\ ,`VK ?pI]T9h}3r4@}=7]I0yY~8L&)l;cscHpODŽ ֛'-=.=YW-w6doD+3YyZ(|},$+gem߇RodzªGk3]KL)0%֣aoqFaJSCR=*ǻ0v1E%~l}Ovⰺ֜׻*3 || 9j=5i.~+n^<͕ΣC>H\]?*0PQ]j7L!<{sr"7BPux*6áYirVtc-;:W9RT/"h?RfԎ]ߢ 8(*N>2W mi! ] 0 JųnV58.F[;ط>ß2E – YS/KS?yE)GFHxzh3 < m:`PQr} =ƒ8T˃z\  -Ўw^^huٳNXjcdWZ9 Qace,e)F16ȷ5A @,UAaY?X/ǩSV* QUPZI6 ~7 Hm<^x6s63 SG!p x.Йqi%lcWcd͊OîZ?QWY n`2TsDP@%uYBQ!a y/'qm;]|[gs4Ql&/c7M` Mp 0YgPË*Ӎ>gLB'js;+޵Y;A&~_2A5jWR&M!1?vRt8IqDYSaF[p;/#rF6N߰?s7=[ϖ mra7cS/lewkw}9 sx^xmOyMYʫHGCZeeD!!endstream endobj 194 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5927 >> stream xX X֞3jGA _jU! @I !!KB@aIAEĵjVQ.Z~>%QC}.̜GtFx<k[7q량07*?T*wW,wlnrGyd&T$NM9̙Ӄ'M0~BtJ\dD脈4OZad\tشYgffD$SbόK_X2"!!&$EFE$`~ahԴ̈eőQ+[wDž윐=qtӦ ?Ϟ3w1[DN!V#5Zb@l$6 `b3J# Exb11xHKbF V3D/1Ol# &C_O %1x`!zDo"/y^nc r9 r?{R|JE9Gza%|O>G=oB?Q) 0 {@ŀs}*~>(|z}s׆LRwiCc6m؜a#x9pɽR&r_z%P 2 hzTR A6M@]E R%6+\^4x zCc ~ rX/+Cr_GW<.wNؗXMq ly(:/U~s  ED`NnB[XR\fi= 'u9:%% w`w¯T/:YO}]*I?qF[Zz8K}(+i؟y[h;܊m0C_%fH[:u o9y:aU>gfv{cz~Eaþ$C鰝Λ|Zp.#r'W}w苀*ϵffJ)RU P(VHl:(m HSI?6+P6} gcaKVO?`/-/`( {PہӴ4HiI( }klO%uWl&?md WH |>.y؃N> 7PI]A1p\Ҩ? "4I&h Lz#ɻY 6T$٧gdc8PAzgW(FuO$vhlŖ3I]*{9@66lT5ufSENsa4:ڱHp(#ly/lx\҂Qͥڮ-O @ؠ͑-TEVYi,^!t:`Y ~al~ՠZ{|2^M AzҤgO"G*STЙcAP/݌}(@v@>8̚ۏ:`g i4r&XNz9Zn6+SRrVJE\Yz}n;(`9HyC3"IKg (? VIL]A\P]lSt@.jݾI"`Jg|A+419b1eا7z~4N`dOd ̢bTuV D=xD"tޝp*+M޽Us~a:73*Z]WV}}~tϵsn/W1сqH¨bo[2-A NڧE0=AcO,t/Z ,ByCހ.Sχ,K [?]Aʛ-ޟf}Zyx* n%kr3 50NPK~$rvӍn,yY.bj4RۜaYAlpu<#נ&8<, <UM,9_~VPTcޔbϳnh\J&5bʤ1xIbYiStjep(Ec 3-Γn_:|%~DŽ,o?V6,: a/$n ʕzψDJMlϨokbи b;'?> EÁWL2: w3Ga#xʘ 1^lS2G VNDO(b\Y!wpdžEwm5'šK?.E {>h@$~w $~1^//rEv)]7vP%V]B$,_zf1Pl+ّ}) &]ϳm4.|<=ώˏ}S^^#vr* t@J\ƩЕq|DH}q{ 5wB5{S6pz\NJ{hm.Zvk;tovq{()mbdc {&}!pVl=ӹ5>n@?i}ބpX: "4 a]F4Ms񰖶X^słV`q.σ{>{nNkKNp$:xւ;yo[zx皸A4A<5(^yW* *GK . {mD4&ba÷]?<(4 jNB@[p HPlF?E.^X+ݦX: B`o%FAW2 =8,kjHRLIq>q.$ݪ(ʗ+Ռ$ePD 0,~:Mܥa\ރN> !{_o޸%Rl}eJu^SWML3/꣏|efD$rU"OVJPXk()lH`A2/Y̝ER}S Hب+duu`&;MuJ"AKUHj4RD#߭܃Fk)y<{aapf?l)l5「$e jMzINP g{Wv<دsk/?wddùh><}+ؔ;ɞO~=+|7|?\7*Ac/ے_ZS[dQ=s`PG3vE'{qUjΙ']pyhk tbt)ᛛlH1"KL7&֒\'W_(3`N;lD5]$L22v*^MIh63N_45eLl;!eKJ:OUJkA LکNxNng:K!8`ו˔*E>H /D#Co{~?€O>DTԄh:I A9I␷z xUk : kCVkFNN@TTz\Pk32pp1+ cA1(ys_61"h5b,?Gu (m;,Ēl||xsz LTENm.u\d-Z@ Z6[|'3/B} ]t>z^ & (jaz|«WH8 N0pxãǣTaPP1=%F+GP_Z+өYHue6YK0u€jU.1p:.컊<x߈'[0@_"3Y.S3{ A]2 {Ok /2^k.,ҏ&H8KHp=}lB^:cf؝É s.990ZJ\tpsdF8Nd%\#i.Gzdf#K !BR)%``4X m[jɤ|`TBԔ"/6뙂jCv+}-|t+nךɋ 5 jRvjw TyPߧa,*:S&l DFst0.n!Y֕NY\6]Rh-C\0)x2q¤gPoyn)Te2(" \ n) ҃]j2k}i]W)W+^_ra ]UZT 2DeDg`D--yE*`N*@쮙bT jTv"OW(3%Bx n ppaX)K-=7Б ፀ+.f`S48vwIN!RybE>[u!ڌs3eַ᎝ܺgV~f>"hKajiT* 굴qM5`+˵sST*FcVa)峿BV˔Yr0jJp2tDiU~Ye5E&Fo*:7 LQQ]iS*LW+4eg#SAN.dc &VXNLs:j]ne!4zހ ~ʌUbIAchp2T~YVTdWT~•[YJN4cgrfFF"qQgk>NYendstream endobj 195 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2676 >> stream xgXw-A8YsJh"`4QlEDM`pE *D%DR41H&!,܏>;oYѢh6ILIMNEsZ-V6r h#L[c8jc)vYg:n- qӝ\_1) qIJkޢIJ]LKQD$(k4EQ%yoINIM̈K؜}r:\{vNN c¼ŜaA,bOHn7U&mzzPi84 cqЊmpdC*aq*m@W?C=c(҈1%TvK'{|ӊMK17~71Qrt6*c``T6p|~55ם:1WZyS>.%_ [[x XWV:(0N,6V6p~2 S5ī 3o?"fhĿЅ@$0k$d+Ofj^øg`n]T^3A!I +xS [|&ۆxy{Kmbތm(Ff*O8ʇ ["IܮhZ#=xAPlO--l$sE-a}M,ow$$]Q8r& _MoŪ?*Ȑo;Ȟd5n9{jQCUC݊?y {] *+4m7ϤϑA|J?-[ob%~۱:SUف}g!>6HZlRhĽiuX?N#pK"WnW6 _ﴕ_o.q҈."&V%xXH)UN΁+[/r0g}܇q۶e+D;+`xeFp o>XYv0'jgh҉skCuy4 FwFNPqјtIhJSO8Yۚ^sIwY۾z A A A!`KKF?ֶ_xtb.^ eܗu?g`96tM`XWJЁ[vTD`~YX%qwޗc 6OiXu=}{@q~|Gh%~؜%yTV4"JUőnwG)Ki>*ragaoAv"%qM ؟bpeiu°lX [XSU҄Mkg\Pyu$\<y/.^`w#e`9|+BD!̴ e屗*#g8:3;s† jW7. =L ~l*:sC"0'J.Wxy[([7o@a?hC: F&Aa#Z1+MM50NH>-6B0Cq̒P8h &cކ}#`mMf&bɜVϵTl|^ 9Zo?P5i0kӼI`Mw?nc.G"^\}ajXw|70V10E endstream endobj 196 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2355 >> stream xUkTGf:-*dza <"O" cqEGQ71*  j95Ȟ={w+h1]b#g sEr%$;.:m>'R2 |̙!yEY:z2HehzaVFҕlLצꢳ KSr (jڢܼ݆Դ51kr>s*)JEPTJ)w* B) E-TI-RQMM(GޠjDdj "lT1UGЉ~>=`ѦY6EAV!xj[my{MA Kwc<>z;&=?-wl%ɝ;ɖc]>Whgv)5{U >aᬮ}=28@[аXL…ܦSx=J?ZEz`'~38 UZ\H{b-no_|hTs +5!$¬3; pG;>%}*q9%]v0 Ad( k?[2^}F/q?^[!n`dm ہm)ڕ5z}j)hF t7cҡ8 a@JL?[B8PvtanTe(JAD#%$GY&vgP@4Cˏ]#\q08&XCZAṰƶd,9,qXƶ8xTc> %T⼖uDpJjp{ PG$ &~it4~LMWE~,.F);Q|=}޼vLHa>A#Z#22uQjTNoNVAYu3򮳰 /‹L*p`*%⤌p)QJ>wezOE80}5F_Gt :2(CSEN*7"gAƀtmu%XYxb-x(v$B1pA^h"ع6?D͗.'Rn޿t}ޚ6KRJ2-o^96r; T0sOK {'*FO 2ݠU ^?C$Dȕ .rRo#Dc9%d"Jšd` fիW$oDKXm:\&+lЈ !io~;̷^N7G,[w[}# 5!zJIRӐr"LxFJZnI^`Kamz L24gRvfJx*wwc<kkcτK\k=z Ϥg V]~"< eF2Rf4gı9Xi v)޵zVI?j 9$C 5sՆg ޺@6\L^~,\--Hߖ9}5);q_OK\u+d^ ma;Ìټ&'Cpb~^LT'?4hv5s8|Y;kk>}}*L߁xn)䡯/]9qKג+YS,n%^]4t"#RHwXV=Yp|)eQGy`a-K59 qO#UoF Jؽ.+ͫXm9g/?ci[b%"@'PM 0nA@IGN?t{9y9L)0Y>1],ϥ{/4q>f:&bǦ; ֍> stream xX TS׺>1CQSMPV[lZ""9I'!3$ bZvҧZ;<{w<޶kb%$kuo1v ѷ5 ԔdMQb \-~-Q/y_ ǐ9zO@LQP|E,ŸMē\"Jl#ۉ`bI%~ij:b @ǦOCnx IvUGh+^ Sj:]H>AcI=gҡQc {V!2nG0y.dnc;VM{* XLؐ@ Z]^l.tP4GYh erM/?PR B6&"& 1&9V d~ iҒZ{Dcչs[91 b =EZ2bi+.WwYbT2^JWܚ]UTn^CA˛e|;LZ- $/i_'9M| y9\%? 8h@*YUYЪe 7c"Jn6(cg4 y"?[t|k7YFhכ˨1[+gsc/z1Qe>kF.ꛀKK934QxhUQQ{\!~)mX! 7Kψ $ޕB Fvf/Rx&JGc(MSEaPQvn1h%6$M@#3}EQHL em9{] [%Lnr nYzx 5sG>" /{FQT4{8.Nq=j;hkܴ1)f疵+@9V`vq6Ы(6Г\`ťӁ{H끵PmE*QbZh\;FCE!.aQ.s@3fr`)D&hs4].߽ Kz?3֩ 1<̱@U(uTg3cͣ0 'Ry\^.]+,9drM9 Wɡ{_ Iިʽ-ڋ] ww-qz_<4{Pu$;qsRM' YS/t;:zc9XM(VH j늺Fx&\5H/t kKBk0Q1;%(w`烔Y)hC K&au)/9/ R2AO`^:tjdȓ=qrA3ߊ\H ԖYYjqCRMV9 mPPL4Dh "Qnr›9W/8TKmM2F::5܋,nWBz Xm`(,a.ZT/Uzbk`%|Π f CQ+k\wA&hs@ihvVJ [SPRH[]phIdQL_j&O46ۺTյ>̠t*Z$/_5>;E0f%\HD@#ܧE[6GaÝ LˇLEka:ఱ:K1H ,=G ⁒hq>N)))9YI$pP& 9m澦Ҧ2sC,vϋKGa :See.l =K d)MeT0X.nûaErp@֩ nnqm*: :Xlw֥iu"Ovb%,%hǞ^4ŧ>vݠ-=f7@"n_Sb{:$pw6~8x-^0ȸYYqo"r<ɂsCB\oXFh1"a.Rs-mP Y,a1,f;n>_G?ڻ=y೧P]MN)rurdv(9fPi49}UNKROZT`^`&wWn7R~]E]jChtj- ="w>ReOIcܪ63Eq 2ƻ^#lI5GVUpcߘ2gܔe9 h5yxmݓMGqwMzm q;7UާĉVb .v>R )*5Gzc7{.زo~jw[B/~gٱ_"z[{/υ;+.߫,quCPnOܗ%+J/[jm4@Ԋڪ]#'st7ctLRq"TiB('8 (/m4@̴KcCھD{Zݳ[oC|t]гf k ڟb?\>FzLlTH?8a*zz9qDޔ N}KJK #wÉx8EPNW&eˢ"2op+4 KqB$DnB}9 4r.~2۪)tC.~Vj~7`9Au0*u<2LjEo[iGQ]m@bUc3Iv4dmQiVt7~ynúI::<܀8B aQywIOQ{WKa]hher_Fx"I _n6;=]}x8 ;%J6ª-]#͢^[7vޝ@+.W?+z_piG!pVd07uVUiB^?Q9)Nۛ7":ؾj8%Ѡog=2Q%CS;<]'ǾP5q6q"A؛xendstream endobj 198 0 obj << /Filter /FlateDecode /Length 6520 >> stream x\[u3Wyܷ&q/2D(`%״KJVs.TWRz਷.;ڿO_=6_\>2׏}dퟧ.>} #Cr?W텵P\&O_=z{k]8ݗ\!}wU Zjt^//g Ä4Ԓݝi)N^\?ߊ|ZOIۨ >׋xomфk1K]>83r8KutgOE<ވdL'q`jYH)l8;=1ƐpdSG{XI?3uZےk)HVvz+fõ hMJ1g,VCP: Ey|Tl-w?\u!,!Z逡|'baCfJ+N|F[Cϻnm`߂|3b/$IemEw(O& q i{ <4w~ݎ H:J7Q8UB7  nힷIp94$|In}0I@Հ$@8d} 'OTIUQ;o) Jn95U[b74^:nX'[~Zuy<k6a@|%-ij k^ jB,r[֨orEqm}GINfP@XmtBBSMc,S-.ŁK嘒gNk|D Q$9{5-wTh |rѼ=ǵѤͪDVFޕo}Ç }d&sE'6J".rwVRcRM`1\kAi@ a]ͼYIMIhx۴WH,y x>sOz,짹7 2 ]=Yq~'`Xd^sh~q &hgbzZ*F?SG%*u|sz 4Z]1 || ":\1*gg[J7;+%KN"@<%,eRJ6ߝesr2)w B>'`#hXOk.$X%]RN6۔vv:Xy ۧ^w?MJq"ic{м 2M]@[$Xp/U<-і3V vc6&Ta.g>k-3Qjv+xH)-{ YnY:d1LWBHd(7x`-OX!Ugao42` s> @#>X{ U4lZ{?ZMJt4jLYf뼝c[;1+k<jD<Xa=8 r9_vsJ'wЇL{ qŨkǎ80נKSf(1ba4XKu;1ck܁Hqm4K(oy":BᏌrKl>oّȗ)zin;W"{"}1fV2hefB> u6ߓݯ/%ViPW U MF6r__t=Do]0T08qoK@SX/`_5ož&WWT {&nc9X[@h.*fԠl=X+;VŬd &,%zIVu`}vȆ q f a$ܻof,x8Ζ| }o>6^D10P g AqV+YZqlߌn',+w7%i|wS0cbs<~h޳#tqHGVEnzT}qZFt#{C]&h~LpC>‚Lu?tmdp ޼ޒґHa,]J[ߎb&bM0^N9Sk Uffg:pEæ D)TnnZ7xp?"x `֩m>tq&3)"/Fw}N V1 fl$A(hD!Rl?p_u.5ׁٹ9sp *o:,p ,ɘtTk; |q_BқNݺI`khђ&X()mi UKbBjr`|Pn+)bhRb) I [ Ss"!X(tz d3cKP-E'H|b0P8HgZ4KWPUKyXSGɫVaQ8_TupqA]Q;+%L,lۍ¹n׽ *ef+}E9 dO>w0:Bwu1 &E0*hkH # `f6O}jSMN+ږ ҕcW,G6v9׉u䬔[_jC,|lX`,.eGMqyc(4ZrF)}KvS<Î7{b{ $U=3"(1es?Ҿ!>FDɕfO,$ro+:pR*YnN3"+lQjv+ \d"LWQR9[M G=5T~b:j쵚 }9vRb4OlT*ڱhd*u# /dքS ^j6q>P\M $2 C 1PM75, >c K ~0qF7sjZ%k>Jfki?+n (M gqGײLIj婎2TzClݫOxXd`#}rǻ3)Ql:/e-ևяZ޷Ϊd.z9퀹dwX`[6av;qeS5Ӂx~F{? a:j"Ʈ7:Je0k݆e:[ćM޽23'gL9jQVJ4L*FU2M{>!{Pq+Ƌպ.i;ިg @y\֤G-mkg32ŧ-s}b\Zl诇%}z3 DECYsѥ@| V 1Z4n\oWQjyRRkZe7tQN ZrAfm}Kָ\)t.:5MK+Lt#mz?[Pf8 Xq:C3;-Ũ#3_/2kڪZ.R[uF,H@G@_G5v@I7C ?6Ic s!D) `~t ;r#֬xb9o+{lDHQuCYPK~fjP"Gq>ȓvYoSϬ<;$]!Al_)JoD+hB_gm Wu{/ڰ*:_M3l[Ƙ23p k͠ S.>ʠB1#0ݜI,F J[خ*-˻nv-PnFo`iBOsݩG_iI> stream x[[o~#4Gφ%Y$&A[MCǒ,;tl]E{g=s)&7\\|7{8OǴ"ZJA٪]40(i_|Z;WG'Yux]::{RgY5/ŧu_ux^_:x MLï+fEB&G.pZ}v]t|J๬n#֣w1I Cd??8*:;ѫp@fGE=TN]N|cŠM>Wۇȟ}TguD{UauԼd` nmle6ND9ake9bu_-ߝ94ي;\HIL~2PDsQ'lYϥG%dފ¶-?"A.oX;bGvKq(c5N\זf#M oASW4T^uMZ<$Q.0|k!QW:i qI]b5t.pS祫7ifp7֋\O@4FxuhiV .h5rO묩rXpvZQ4Tu-Ҷ}l]Z-sf@-,evɀ Ћd(qva[ g@ Ss/%Y#XM ->_2BK=,a#]I6^֋{{ Z2!@Z?ھtD,Z6=\SDe T,,$-;Ѳn͔hY/'. [ Kdh٦+> q,|A¬ޭ0~'[3f~!Sp bh)ٷ36YDӜg9 cK!X{ٷFv:=R.pҥE_LhK8r=CH==AD73]qԳeϓ#]`[ާYM)@̀4V)J>̪U;6QkצQЫ-[_`2eOW4Ƨ,ēM`1iſɋm25/0dc,p])GkY&tF+.5cwAj=Lfv=h^意Up(:|FAD k} K'MP==8iȳ. ̆a CFnU^ @|V_IuQ(,XR(rH&Wٲ"uIo?v`pZeq<틭F=Dc}fVOL[Gmx} zIF@|NzVSN|^DVԓJ؋. gn+QZC8m3^sk <6t'E)T>4O8<鞍!c!3/q9:T/J" @wUOɼ; `{(QvR14EF8cYЭ|5>$b7`esX2.o53.zQϹW)(Ӛ')=:cFI 0 ٖSqkپZOy(,l :K7d9uĕSs 'glrgCiC6atLRm^Ye=uSq*ќ9i& ]Mυ8hbڥDխ7/Lz ~zuY=-(Sݘ{p̚&I/} 4<AϤPg; m0Iw.h6L[LLǜ8J%Y pËroFt!B%pΘ{M3нx-I oXV$K`av(ޥʜsdBr̆Gemv1lj$ڞ`Vkn3Ռ-z,PL~<._QKS ~\N9KGX"4iMQƋH5B\48ܔQ`y3O3bw>n? P.ZX)HؿYOiYZoߩ\`"( ~~E0(!.:`;{/냐! SdB3ҭܢg)詌벯Om8-uWZ]g䲵 ܰv̙ 0 fLѨ]8{4y\|%* @7X,Ԗ|NY_Ţ1X.bϊ> stream x]; D{N ĒE4.EI.a( >|)fag[6mzAƢ-(IRmU]j&てrlsyjjI9 HϹPZ̞<]Di&ۉdR?t P x<ww>hVbendstream endobj 201 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 729 >> stream xmOqwb*Vmz%j@ hDb@Q|$Zޅh)RJk˲}'`) hi'D]5 ,ku>Fr |'np<Fe' +j4gѽ@3Am0#OaoAr=<>աbaIڻ4n /{ <,/%XyY 0=5HDM60^\8 Tzum%o|R$S49=1&M':0+H[ xm R1`PJD,yZ[Ӂa1rOKDc ]${ CtCDg&f'QĢ5M*wTl_Ng)n=fsr0Ǔ7'S^Bˌ(ɰG;*s DZaK9žZdendstream endobj 202 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 608 >> stream xcd`ab`dd M3 JM/I, f!CǴ: N?'aa9d1 *23J uuBRWbrv~yqvBb^_~9P0SA#?O!)5#1'M!?M!$5B!45(X=?4 XS}0>107 , ~ þdYнǒ뗗 }tG\xǏ\3Ǿ'_Ǡ+CD:݇sĞ].ݭj҃oXx룯;0%QPj]ۇˋuˏzWy-`nmsmY.9ﷵ2uN;_ľ3W\ӡ>=iEozʹƍ׺Op287YuŠ/Z|ANܺmq$60G>+53vJl7*pN L6v,Q~W5 8V/ϯJp=olnqqpvp/;i {7O?endstream endobj 203 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 455 >> stream xcd`ab`ddM,,IL6 JM/I,f!C_VY~'Yu0wa"=G{&4fFJʢ#c]] iTध_^竧_TSHJHISOSIP v Vp HA<&FFg:dиr{2T,:]:!)}Ef!?}Ֆ݇9 HΕk0;$46nfw̞efgS7IMi-ٿEZ;$K6Ξ;{Ɣ rL^=a§N\8aѴ-WlYykG{"*~3W'77uWqm~wYr|8O:{|\XBy8yM0c<> stream x}[&q]ugOdBrP0Ve_K=\vvrg;ʓ콴<؈DǛ:LO/;7o>em|ҋtx ֙nu~.!#]mW?O5lteq:qc҈"5D*~>B}^{5ѱ |W,ŵ4|GGeoy\mC0d@xZxa= |W )'3ϗ^̎'{iF|{t4C5/&KabPԄsr#?U|ɑ_D\Ƙ xco(X]n G NiUmo^d-4i׳SP\Uv_oAˑzs~yx0ٛݩRFvyn_yYdᰁH={K>U4s}ZW8 t*vPMs胞^&u)뾰m)'W~:ݲ>+J9,*7h쩫MF/3-6{Xȅr-G1L-E#:EzZ$HA]J S”9ڽU-#Aȭ E?I` ZuUh=Kvt١q~^2EZ3vFe>t"ĸq]wŲUqus=~RXwuvWr}^pr*8r imͽ9WKsVz#Qo EzT7}wXb([|wx/m~[˶z=3Û˼S̻|[nSO0hp(.m@(E/z^ {+EoFw4~v~ CLtM;;z{>]KC1YWdk7ޞgnKNA!)MNvcC5PϬϖv;btcr/o>dp;·JS(~y{9p۝wcorƜ\J0^Nz=\ޛki74̛4\H$Bz|ks}]n'nY[ N}o=O_˱Q4gFŀQFV;0ƋsLrKSgy-/_K~+`)rORIJCEx>BVvP=C9^@UUtvj%-  %7G)+5GͮtY!6BFxȓ.rRJwt?(=zAoY|ѩI9f*0A=#m_)gp A5pmKV ,.#ʳeVCj_o#B0s@LuYz0gZii#]i~Ig{ΌӶ> [!oo8>5ecH_my`^a2Eً1!(&_U!c&WA>9{X.ߕgMT_ݶuo{ސ-@5pg=%Є}Ǯ >׍PmBm =Rp#^ ǚhlZ\oJ\$W\ 즚9K'<,(>UuOƷP7_o2_E$-A=Jv2I֛gCJqxl[r?dt/Yoca/c$gA/mqq '>zÛK-xuT%?nɋySwSݚ8MRb)^1:8C*O.w^aLN;ǞvYV:թX7I 7D%('jfo6thQ0xc[֏e{&b~[|]t"SXsi8d#Z㟽:UkzF7tu /X6qrqʻL|azhWZhuwSBMA ^rtCS{͜Bkn{+%#\۬ÉXk'YL{ %LroJ4ևyyl|$OksFgzehVwesҲ9yHƧ6K?e]{k۬Wۿ7;?4f_okmR<D"n8mX΁Jyj6c#T+t\^˯ˇqz nSs+w9"Mbδ1?2EzkVoid"e]PQ/(vo5K|Alߡ`[HzCXw9JfDNJ/+7% &äoG f˰v0oIpHw9c]/OV%H˛ R[]j26$񯆣N ׃*n._*~/XcJJX0%e"Ea:j=v$F4)^~<.XV\:.yKw\.ǥ+9ɼ=ԧ qqcj궫5 iϾ x3ɗ%= /~۾Ѯ nfYNV/@/]M֪N~/˷F9SVb턲v`iFaw7ڣ[C͇܏p\E~E$岴iB!z3;g1$\rׯog:$|CϦ_=nm'wd_5w0L, UhhRf?*NҼW*iZ[_*RVĨWEP*RdRFF)ՐȉMj j|ƝiFWC-aEHwz*EXlwWEܴs_ZTAukmẠ^ z MRQSh3>;Uڵ*WfmT)z-Q(nWCd嶲WC%8HUDWC@/0^ ~T z"^8BH1 -QhzUM۫" _)xQTc7nR! ECK!EM^ ;xg*H#7ЋtQ^i;DCH+/XQ ֥TA3ŝt UN3Uwe~O늖#^ԫ#ªEWG-ݶTE@38BH-3ګ#B3 e@x/7hQHuAkuDk7mS2TED! R*h"WEP ԫ"]4X!yv8fmqN8X!b̺yaHUDz7ԫ"(EČR;u@*hW<GC;o(l^`,ECЎH){5Dޑ̃qhG-NLm-WCwK8#Ж_1HZWCۄqhGPj8# EUqFT8T#EƻWE^ qhG^ qhG-h/)K4,vge>\3C5R Ў^ۢU'C;P)C*mxR~0툴vex8# ơ.*{5ڢ ЎH8# ErR{mơ翢^3ơ 6o (7mHi8#+wC"#_;Uš ;x8!Ж"ЊІ^*t)VDI*mk^H3tQš ^bve96| HUڢюjl*UHC"{)7WE@58! EІԾh{UD-L*mJ6Ui՝F;ơ )RL="q'jkюqhCũ8!("w6,Ab~s^AІMš h+{U1*mTW6e/yQqhC^;ơ>{ .({UV.ơ Іș#WC@uWqhE`۪Њ*zx8!8!cC+STqhC^^ ;:ІYš ;xG{5D76Z6Y۫"TqBTX*"vCqBXj8BᙖivrơV%lG{uDekC;"Ў@[>:mxwh D1Xb1nX"sSP V0HxWЎT8T#E{7襐"D5R8BJe~ HmVEh$l8#wC;"L3VE@*NvvC;"<hCyG'Fʈ6e1H1DC@{F*"VEejq DE-Zԫ rEP8U0E}B "g&CmYh!qhC@{r0vښWe^r1|tC;w}PH{-4!9˪>:"s*xwhfmHoqhG>:"H1,>4RܑP o(Hz!RTGGqFԦ>:'miDDE>4RJЎ;5)vr*#m0 "彊Cw\UGGXרUkp#46ԫ"p0k{U翣TA@/8!Ol~Q fSy|zDC^}4ͫ3r Z">f&kmmQ۫ 0? ADE@>mY۫"`TZqhCr~ 4s~ ء!(Nq(" >.^V"hw WE[hH/QH)D{5`@BHr*C#E "UGGs~ Z"^[Hq~ Z" TއFTRAš wWC8?"&6r*!Us~ U{٪%*2^_h/S@DE1^)cWEh3F|zu"ơY}4DfbɩH{8#xNQ{!zUD>g%*Rh!U'|DK *!rt!z ADE8CЎH AЫ!›!TA->:z-*C#E;ġ;qhGDs UxFDKTHq Z"WTym AK!U*`hkVyz AЫ!9C,hkQy)>4RV 6*kkm^5KU9?P9?j|?9?j|ƜTA^H-*# `^^}tZUGG@};*#`=h{UDs|DKTDzD >GOWC^9?0ԮPAHs~ }\P*#W9?С^g*#i9?UhX5mU9?-Q)AH8? vTGGWEs~ XB!UJ}t_UGG众wڵ*R8T!E*@[1j{UDHeZ."fDv _|>V7;Yu8r7\mz'J_XŕG5Lt\nZ7\7oq^ˏ_|FFFFF'   # # ˶,0jos% # # # R&W&W&WX2M00wM0esQlqmkrA/+,`+, Kr%_ Kz2WXJ\a  KFFFVb+,ߐK\aW&WX2F= CKW9W6WO &Vb+b+,2}"WlF  M0esQlr%' C[&W,iR #i\a)WX&WW2 w((6 S eqA{+ z=+ RWO F)+ b+ ⿢^FV'qA ' +FFN;\aFFFFF-040J=+2WXd0M00Ml C[&WXK+OR+,&WX&WXZ\a ermks%R6WX C[&WWض&W&WX +WW6WX>FFFF)+b+ b+ S+W&Wض6WX # M0hFqA ˑWW&WX\a2’GkrlqQlq-+ m\aFFK Կ!0H} 0H\a(²O~NF;Z\ad\a(6(6’ls%s c[WX1-0X ^WWX1 02-0eqQ/+b+,hrQlrQlqQlrA &WW&WXd0HY\aྚ\a ;\ad[\a;\ah  ;\ah K  KFFll C M02-0{M0erQlrQlr&WXeslqQlre[&W8&WWX1 02M0<Vb+ b+,`+zY\a(rjqQlrlqQlq-+ m\a`\ad[\a KT0JY\ax+R+RW2ЖF> WRp% # -0erQlrQlrQlqQlqQlrQlq^WWX4+R6WX2-b[\a`\a`?+ 02w-02M0-02 0esksQlr% C[&W2b =w’Qls3~’jrQlqU ˶l0M00)+ OhrQlq C[&WW&WXz +b+ b+ + RO ;\aྚ\ad[\aW Kmd0-0< !h)&ZZkj*ŪEdfjUU^UUy-V UyMV-TXXbbU^UUyMV-TXXbbU^UUyMV-`ZUdBU^UUy-V-VXXdBU^UUy-V-VXgU _\,V-b1UUyMV-`VUXXb՞CV5d"fj*ŪŪcV- 3%kQW[S9{>S"}jkܳt 'UwL+B@ΓWp QPYx PVp~SjЈ3ъ`1Pd/DJ_T Zl>oc~,~3GN#'ߌX2g<g ~3GN#' oF,f ~3GN477c}d>r2X9f ~3GNd>r2X9f,~3GN#'ߌX,1#'ߌo7c}d>r2X93 zo7 ~3b7P9Y3 zo>fU9f,~3GN̂>r2X9Yf ~=7c}dK#'|fAX9 K#'ߌoF,Yf+1#'ߌoF,Yf ~3GN#'\c>r2͈1YGN#' o7c}d>rP9f ~3GN#!y -*-3J=.w\K쫌*C5 c=U'Q+™I87$mĒҖEӘ<'X2H |L҆" icdTI<' EAF,$mĒA""i 6biLVEA>S$m( 6yNI<' X2HX9$m, EE,$m, 6yNI<'=!I[yNILsHP9 ICgJ64qoMklUdt2xXG:Y #> 9Nc9@ɠ#,9NKU'jA5G,YT !\NKU'jA5RɠcdPͱTuP:Ts,U 9NKU'jA5Rɠc4RɢCdPͱTu2X:Ts,U,9NKU'jE5wSͱTu2X:TckPgSͱTuSUdPKJU'j~&X:Ts,U,>cdPτKU'j>VjA5?TsU:YT Ts,U >CdPͱTu2gBjE5RɠcdPͱTu2X:YTYkD5RiL5WA5RɢO?cdPͱTu2X:YTs(U 9NKU'jA5RɠcdPͱTu泶יkA5RɠP:YTYšC9NKU'jE5p|L5?GTs,U 9bɢCdPͱTuS 9bɢ > C9NLHQͱTu2gBj>.jX2%j~&gBjJU'jA5?҇Ts(U 9NCp[&/{ LؖiږQհӐn(@ N Zx=jQVNCX PW^,QNC:Xs58xSjf=*rR*! C݃{TD+EɓuX< `Q4bxT7NwPtP/E9GF9"<·9KpHuDHqZmuDH5!ՑC4MH/:! 5:Hꈐ_D 5@vh#B*̓)+9Cj](: exm@!B*I{9!HgTGT|{9!yVGTH:"8yo@TNO:"ڦ!ՑCt89 drl@Hw@ ]GN_ws:7e4c۲ /<+͈}]~6#Xw&vճg`O ԏWNrWgߑ+͝J 3q=oy}hVR> stream x]O  !EU%]:!t`$c׌՚HGp:궠N0KXGQqgUoҿ?h2V~ 4ϮcՐrV/ig CۊQO50<%g0D(ωL«ڗǘ'7> R6JX])@lQ^`endstream endobj 206 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1110 >> stream xuR{lSU +E$5^7>Dp (nmnjۺumǶź]ml]$#'L∏YQϽ=B0`%w~R)(К5;Vtl,)*VI|B,P˙9iF +OQscS3\ew۪kܦ'侙3bn7䩳q.S̽lmcNg*Y9S ފ?W_{ehy\vosxn~sܥܼvڪ]콋n(F PH@(((A-STXԌ>S(ZOqRNb2}W#9ڛneDSxh}1]-&"Q@'V剟}04}kֹNojaYA:5yّA,zW[RlRצN,0‰&xMQ6MUD6} DP)r/${]>#GRݣ^fY~+܁e/ w low$ts,Xj񁛮M;-Jfy K+37K0|d.?1G.ކY=!I$.<.peFFC7L=P`GG&$<;6w,NC狟2s%FzF vTWCQuj );LI8=]02HEj\xW<|dNTз%6K'5 bp~ wv4FBa! t[7|=r){-nO@QACpi^ˋ$?G/= >֨nE-bHTqc%[y; E0 h?$ Z3>!`YM I%#G#UYH,@D3c@ϨS 6DW/?AIq]\4$ar*S H8E'3@|_7@Lendstream endobj 207 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 556 >> stream xcd`ab`ddM,M) JM/I,If!CW N?'aa9c6߫Xs *23J uuBRWbrv~yqvBb^_~9P0SA#?O!)5#1'M!?M!$5B!45(X=?4 XS;QESsRsSJ222K* 1v1v= 0QVi w2,'Ve3\f{<# ;wIm~+u{_]GX1HB];Jm~;-w A=ƍ?72[g99٬3Wws՝8pϼn|~sf;ߝ?oӽhC@g%#flf};܆w~2U5S^"o(17.?xr7SyƩ'Zi!Nr\"<> stream xcd`ab`dd M3 JM/I, f!CǮ N?'Naa9[{h ̌E% Fƺ@R!RIO+19;8;S!1/EKWO/(>AAd?Kk3%K1ɴ^1Kwɹ;/,3Cۻ[*8Ĺ.;}~oOv1g}tn;7|;k eede-+Xfٲ5rO`-<{ 6qb ^;wq {'Oq֬endstream endobj 209 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1548 >> stream xE{PTUޫn(6Ry5-ah1( 5W:6b ..>` , w-(cٕV'(@ўJ5=jL{ZұԜ3;!$ɰ^!GC9k&ֆ!'=3g̈́((C`Ь6Qux8s-7U.GXFe~d ~}#H7%šs;Qg~-vuA.&ql|Jg蘥?~0x4}ԏ~]oӋlL8&S-x9g;(͆KCx_(Hz UPo}i5TV0耳TTxl̠f9ȎnWSŝHCEbS1^ҜoJi2˺Qnbd:Cxx.h>UXfYghKJNnb!4Ouc@-p40IR"DSG(tOt\![z*ށ ioA$mJ1N"h9F_LShuƆ?tE<!]iՓ- X8*|̖rh?5oZñ`~6= zMVo.1{ D,:U/ʲMvFFMzD6rryQ.]̽z:SGZDW/d--%{ZtNp (ԃv<'w\o4mu+cS7yv60rM`Xl^R"LTFs)hu^!ZDùJ!w t>_PoڏV81\-ϴ^rot_ڳsL{2~Y(~۸+p34_)1nm f7ؓ7ꘟlT8^p,rTA S|ZpWZٌLzX@Nc.kUNQDendstream endobj 210 0 obj << /Filter /FlateDecode /Length 8873 >> stream x}ݯ9r_}y<݌] X0g-'~}XٱWd&Ϲ; j~VE}R7uzOtiw<!dI*WOʫDOrݓgnLXͭfq*_Y-6%ǘOi]M}&w*io@ST>?Go#s ;߿nC8nR1{CVw w BO-ָTeS>UN5ZYߓE VjUӢ (Oh.N/{zoAi]Ҫ2_KƜn,#_Je$6 +'^ַaʤf¬vQYnY1g_b:L#C:m(yMBS-y?~\BXup-kK`WK*nkL2 <㌮4(&-Ιn$LG/N,!){>%-xJ*T(cv*94K\ 0}&ꭥ_Y@S͆iO:3OE+g͘?mOgùnQ:ݠʖǍhf0 Np_X֬M01>׷Bx/7$;g{o2WbrYC$O7"6|KeVbʝسBkyU[(D)f`PKVO4tϿa?ӂE߿ۑh|<&P؈*X/FG>6 =C-G:f , sSVsMeNUU*^S1{q0U1F_Ǡ/V1/X7 K/`Gc(TJ, QĄoڱ.%lpo?lqaePc++ӌ~cچ> O[Ȟ$àjχ Haz\J$ S(5 iL~ uzI` L?xWX&*†S]}[o4 o%~^BpbƛJЫnץƸ7Roh[7/Yfցuv+"RDkVPF4>IQ2X#>0VK~.wo*Ep{_ǏQ%ESlV.d 91|~ Xaz\0qcgO qҿ6˫͛D,֮XOh.i«{߿oD+J}HӲ2*:%ar)b0{_%Jdzyxlj.8ӃK*h~0:0<{H]JX|2LZys!-!f!mVH ~V Qj:~wbVc ('u0o=[/J侲O:~T$`/ګ?diK3X* yj2^7X|lffj5nؖ6U[}lK `n|rqaB,(= :.BHMb{a7C_Ӧ,.B+L=|Ƒ(ͭ79m7?vͷu?~/1ٞǯto* ]7+p 0 N=6[(z.:w@2> A{p@,QXIp>wiY5O"ZڀVJ@IJR ` Р\hB6I IV> D$^VƉKp"P@NXs 5AVek> Z)nj/JD]7_Dlk -b9 5};kRj6)K'kv9V7Q@8^QxEP |o֫ h5A}DQJXM4D$LgJo;@ـgދ= :jNTN;v{Nŀ'iPhhJFCpVXUADN 7N|*$Ph?Ͷ 4;@pف(nB" ~ \bB x%Ph@ ](!;ЬIn۴ [mdGHr2tcxl@mꈢ 1vP\Rv7k7Fjjz%Jk]~YJ ]s *r.+;PG(@upG褰I!?0U)Vpv0l>V!.Е]lU l#P!s7hpt;&(4AI^LHp"nuQ(4!Br6ptw@ @ـ^SP$F.F m/;@phBU"HLA1.YEmAxQP*zh.8;uƛ](J@zGہ31 luzM1d@ca(7yK#N2IՐSmg#Z}kqo޿>=!;js|CQlȕˏ?Wj +DGN:PZ6̽y"Ԋ[j63 akߐe>/7_Ѯzw~ͧ/zGbL<(*4>CbLi|1& {(DcM<1O 4> b 20O 4>K'dq No`DE bLlYjhR|E!Ę'x1EoϣQ F3 Q 4>&bai N4OY|G !}'XUE+f荙(Zc`L<0 PQ'8>C51Ob Ę'X \OcBbL<1Ql5<1O hhإE b<.>& ֚i|yL|<*@i!6fh(zb_`4E<*@h Ę(js 3(ZcAi|1 ޠG':<>EY1`D bL21O 4>Cb<&>IDE'(z ^Уf1<@h ޠ8@hh Pty| `4'8ѣ N4O4@4O.(jlaiBV4O jB[]<8vCa.N/йxD)Db;Z(8ZjzIf@Kzl*W6 T HؑW}Y_Bt !Ru4=bH 8:|hJ4!B.HA!@!G]AG) bb(b)9sq@AJ;AY!`v;BY!d%@V!}SF0@TG,!_A蜺0DWfH+x~l_snԒI5zTl" W7D[p:F6oH3LԷH1궴xh 6I8BJ6 s A=F[eWZ;\|z@c$w1R+t@c\UG1'/#Cl!JfGZb:^b_Bֵh &Vx0@kuWJG!eeG6M%%#KK4G^Ƽ~^Dh/# cuCX^#K#K@Cވ{LvԷ,iX ֖KW> ['C3DfU R2v~VCqĔB1(9й1 !6\h.#06M;## V.4H!JR&CZ)ɰçV3Fj"/!ޖK* 4 ?l!;:]A3T#}Çˈ׶KE&MNYi_S#^V>5v %{/}V!P(ni _n C]v uͮUZH/G"1_2DjDF׺GŐ5vD.hH:h/u֑9&K􅔈BB1Iq$%qXc5vD2;ugJI8Bubi[\ VltLo=.+tX!.\A#L#-'+  '6.2N'~ˀKr.5 Xx+!K/#} Nw!ԎJ[d酗:B#:B^*CZv#:Gh|["uv`:=ܜ.ԙN'ꌷ|OMG ҄Ѫ~[Ew:Ik*xH@ ٝ3/퀗ַEkg$"^*e9)ּx@v ߐRyrӷu^%nziW EE6dּg]ӲVcf9ܤY<8i ON!N!ȳ&Y&<=i O$#{l OP#Hl YB;Di P# YB;Di P#3& e$ AF;Ei P$$Qe$ YJf9 IR IZ>saNR$sfI aJ!iᘒQ&$R YF!ma OQ$Hl;Gi IZB?Ii9Jy$+aw,-4LJ)ͲIJy,/A4IL4L`) 0J')MQJv(+aw,-A4Kg)$6L8/2iJüy,1Ab̄C15A4KM8/S y,7Ab} 9pLN* S ԄC 5ᐾ0HM`)  y,3ᐾ0L8d/S ̄C 34K8d. y,14LKG)MYJìy,-A4Kg)aJMRv)r v03A4NMy=R=omqm "ߪqø[,7#YƁJXcGKDavŸ 5|ҽUj bw,C:5NR8Sy$cM8S48<y,CbL6K8{ R9S\C q؟?5'| 9 dC C@58|9 lC @5`018d{39vOR9\} C?5$cͲ9$6K,CbC qLB5,CC58$~ r:9T~0CC5I} r:9TÌy ,CC5P 2:XٙK[+}-!T͑#f-ŀ5덨#nD Dum'jH'6;QCQ؈:҉ֲNԐNTw-utm# #jH'+:QCQ kH''Ft2vX=LWҍ㔌AKo!]x[p[5)k.oy޼̛5kƫu[.w-\NR`K\3$Fi4D=2[~82 D=\hXA nO^xQY+zv g0@h1C)<5,,|R^f Y+VA e="Mkc+"ZM6Eק]Y+D>rcxQFC{*\Y=k]CR,ƂCմA8o 8|(}wOw/?<-lK ^4}MݥX}V`5ڨcY'+6Vh6 `oi6vf ;{j?ыIu7B3c]y\mxW9urO>ER•D\'/K6|qH8skK~'+F';ibR|F\_Jk374cYz-xsT@wQ?@br}KRVVۃse5tѧ g]ʅvFj0+[VcGdG3yptPwݠ6 o)L/|ml\o&+7dY{ {cno^3KճӋbK| gGT9_ b.lm]ob6;Z^160Gfwn޸@Pl]˾ƴo6CA{ YcWI ¢etendstream endobj 211 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 565 >> stream xcd`ab`ddM,,IL6 JM/I,f!Cw: N?'u0wP{l ̌9E% Fƺ@R!RIO+19;8;S!1/EKWO/(Օ( LY ^ @10,`T?ޙ Yčeי"rvIA^a^ܚ\"OhwڮV$nʦO5cZS,"7jÑ"U~K7͜7qJ7-?E>H}^wg]d]n[]wGC~X(tI V u7f]Y<:(lvŒ+K}Fwjߔ@K7{fu-]> stream xoLu(p7uM0ɐm֒ f [6!4mt--G˟A{(mB;!@N(dF0jfsY1w 7O'<$D$# |#)/ɯx5GK14'?R}(fc=p] QWu3Rڳkx_shcq8u SSgө<_^|SNLqMYNNc 7A.-ef z]5Vzzh||:5}nu\)o_:9jA,G[5+pOsN6F9@I,Ϳ^X]: M/!$ʊT-2rqEv>85t5 ͲQUQIk+OUC]m퇅a_K/Sn6ZA}&Ĕ89J:32/t{ *_s L#5+\NO'ކ{0>{Gf`SRdV8kY~`aeOa;gz<`/Qz)bkp/~d(gP$*=DwT 9ޭA#* CY铂-RÙO4UYxmK{' zћcFǰ #@`8T> stream x}[%q{?<2KLfiX ^Z-r/H5OרG~ΈLrf^^ h` t.O}=ٯSlygzoy3"ˋtI׹^ʔӲ]^ܞ}y}{r?nϩ,^_=}eyHӶL-ӴlyO^z_9>8{+ZҶ# tsD3uPNu .{Gr+K7^?4*53!m??!mm-RuMa2_{cN9 M0m6h]VmTj}d xCu#u_ָ=P3o<%xަn q[ls{'᧽r}ҞUW/ْ=KEgkӔ[}͍?W(ke>[/vMQuǖ~DvZPU;ɻ__퇯]խn>h﷭!ߵ|5ϯZuE1tx3w JohMD/[~ a]+mN;;FkJX VN*ׁ.N'A۔\ ?Vw86KڗGǑmǎo>˫uǎDg//ًr4Ѽ&+~bO>8o;H=vyMe@ܥ!u*\7N ;=o3O _*;̛Fznyiyi8گ\;ZvT?>&&otگlج 08:5u  TsFlpB0qO[չ"K9nz8ɏ'Sa}Ȃ,5ˬA9Qw*Qʼn3?QAï8l#6i0Ѡgϗc7[Z2iUߨXs+{`V'zl 8aZxu;sapN+ -VX cym}zn)"7m"U8KsA;-0P` (&.eƨHZK mV3cbA`eܥ+s2_=}MNdj8= ŲVYWʼ'V[|1-4//0f%Ozo{2cjnӕ[H"mWLojư0äPd,߀2DL fȣA,??h?_owݟ3RЯJ۶uaz}\*&10ɰ310U|bt w>'L9V#4(bO{ h?, yJWrK|jc/Lro\t'byڢIC|OqȆϏn~>a` z{'~S 7%PXS>w Q?дVr/6-T%M E uyC|`FcM+` s?'J C&Ҳ{,0q=W@4_OT-&V~ìwXq5rٻxZ3l [j8եBo;Fr)3踱"KWyÌ'0!cz;&)֠:#/u4Y_]- Z.U2bJxml~-a3{|5MxV~obTW*i8nv0 ܰO;Nߛt,֋(y1m[HmǙ2,1cOY)MZ;Vʧcׄ=8nx&JL9x6(6~+zw5%Q)dQ VE%6& "j11,S{v`H~?&Cτ+РtV^=q71K {{& 8:.' Ѝm_J&R=͹yOP'>h*9v.Jg>e]F (J)#5W76q>t ztUx<7W6)֌dI.+91@X)(Fdbf[x'(*cb0KUrv7QNd}?@֧Y@z(a!{C覨ZTnSc7 ŷF\UUf6`OqUg=]~뺄584_&,,"O]{]=ozNϥ??DEy֖!W]ڵK$i?Kn] u=o?]zQXQ3.~\Is0U$P-ˡ/( rH_až "W}A?&W!P x# i]C5!1!bhg"^-qD "ZbhHSi]!MŔ#*#xx5'֖chgU!S! q ibaky.ac1)7)`YPa*LqR)XO^!ƕ)fәe-J7?K^JgYkhCO[hC51BG\O'ʘC(Y Tk=tC\`45&VǮH=Z< ޲N(lyOCP0U.~}!A"{qbPn~`SnK?`4um)HOC\| JwՃc*p'*`4S Ӑ6j x!'ڽĵ`4ċբ@uNSĮNGz!(Ia*?8Vu7C<`pAp!7ݧBF#uaeیܘae e/?1Me~O/~|o/XGiFj"`Me\?¼V9֝fpsU;LAzy4NW/[8?>T#_U7v}@`{wꇀအcW~ ]! 0v]k\ ! ju=x ]ޢW{cWc_Ʈg5vXcWƮwǮ}}y4NY]}]}Ϋ> 𙾾#^X]}j{oyCW?Ʈ}}ƾ?Tc_U8tC@`;zzy^BCW+7v=t?tC@`^CW?5]83]@~Ʊ=W߇Ʈ]}'> ;?ןk%)bD_{WXȋo~ WQ~6w˯Άg#F&fʸll`<7DFke- ?eA2A`,Be%YfB^z|Kb{,L/]&, [YR+d6%ȶ1/=Нfd<X\(;ۏg43Cg3R(+trT;xTdka,_ .6uy>" Zd]+*]qN3RшWx|.]c&\7 # l2O2箰E`t\;Xhٟ X=y!%k/`(uEutͽT<ѬJlD2n"0!=o+%o۶=AXp rFDdKL`#eTH:#2ShꌐuFV8 #chg x.Rg kY`ߖ.R8$uFV;fʶɓLe~gTATaĭ[ѕ. +#=#ДOp^7A+3ڨh=/]|o!x+S(91V(`Yơ|B1 &P#~:YasE8t`$-P@x9fZ9) cW {zRc.ej2[)LXp?8=IQS^~en/iY2w$+ J Ns|9sy7@M3;x;&,3[ul*)"R)Xh:) krsHT1ve~2Vbcx8h'Y).84WD P)Cԃu[' '/9g4a/%W@M5ET0堂ki2"2Zf;#қl2Ѻc8T0eM;*"Ihg "|ԁwy<&LlP2Q8*f Tn3b) Y%}sȼ{y3r,3E('8N{r0a@F9X'El&2H!&p8Y5ڛC[@B,ҽ.85DW|@wԄbH1K`"=u``\(I#d@ĺ`9@R&9 Ac2VN )dC4mupOP R9D=X* fvs:X1(hKRjWꆍm6cELi<,dSqUpZOP Q9D-X3%Wf}CRq2ޛ3#ɶ2 sӴ: ɪc EH(Wp`hIR" -,Y/`H U̝0-cFQK@\>CF9,3VJ "A PWjۛC߂ fD2 ,M$]JA\>HyX*25H*rznɇ_nv eh_ GkX1.q&24ΕzB!K2H*p8׋^e4_n[8.+LhVf"+~1@]azR٤?rB>>~%d! uNdaeLR҂(0X8VN{ xQ)F)SӱFR*+"pPܓ0`[4M);-FA\>Nej?B)TJ``IĘJΤ!+@fhdc ɥRzXLdc E?\j L'"(Xƥ>7+' @]"a{_ǒa.4ҍ|BPha2[T7"ȟE>}8*ZfzoLjLe2~̶].kҿcPǵL4ԍA郷 ej6(TD``-Į%`nL5b+KEW#9 wed25H*rzna7DkLo=I~GW}),6"2^* $PPSpp!TIlT2ɹ}.0u,qqX(25H!&p8Yr<ƒ4Mަ}(8XYCd0+0,~)c;}`k`ouAAJXE P)Cԃu릈J _My7 XƠچ` u4D]dT͢,lC؂pcZl@*R4&qв a q` ա6HBlRlflﴮNd1C=+~@in܄akN eP4Rj!ej?(Tn``$6#x84Bj,:O899Ў7Z!U!,j2h ,{usȎVp\š3K$gF-d+-r "v4}oUpNP Q9D-X3XK )"qsnfΟtp0bLKZEL0EF``AKYv ,'Ua[ऌ߱: К7Z: Eh+Ed4Z\)Q֍ qsH/PdiբE e`eYCVX8Slup^4 *2Zf6v!ҝvBM& M4 6?+Z4Dz1fUBhT?F-X3XYT1 j#!Qd7Z*f1HnUpO R8D-ygGmXg4DRdSg &G'捁[gTD2f5X_4BeTQ Ewf!*Mpو4Cǣ:u!{8hJTrR"rzn0Ҷ7 -ǥM~}MFS A6=6'7 aӣI&pzn{ S!*JNrޛtCMx~+7 ɔJu4T 0Cԃu,5覈z߼U٩V)QlVc妚!"rPa jν9SZQ_m (0 ȨWV ԀƁSZ\ФP R9D=XJ nn{f$]cH+hL$PPDX-SBn3nV-&r gJ9l%!p"\BSJB kV6s͆p%THb,ee04of'vBv+kReC8qLw$n6)B=X7hٲQMgUr9#yl[jZn)  b ʨE-FCdf[gsEun\f676g6 3asI&pzncgf `7a3.p޷/0T.uSVVY36i p5X]\!BTA V Xd)hNBb5elNu2V(e%A\=!ȶsTm;oupZOP R9D=X,_5dV}mhZ,|A#; )Z$\  ,n"(搝0ooev|}M1s.沵:V/] oa+ku״&R!m)>Z;BF>Iۡ?CJ:M!չqҭ hdxT+ifyX9ER~=3b/$ˊ!K4I ƁӞ[ݤP R9D=X7XtSD%SytdcK)&J%Oʾ( 268>Bր#s]QLvVt1vG} |Q\92bB)Tj^ VmACx:Ӑvm>Uπqвn hohxkAC74)BTQm1636@&?''OQTcS:5* MP!CЂã`V;^*>f%|JB/23|֐`=rWM³BuXnjϘn;Ӳgt)R(0elVLpvܭnR(J)zn;oiqhn6q}CI[v3FYfeie"mAsߴR(-znxbq:9Jս7 ڽcK4B2@)T@{AVk{qmГ_[cCcwk0DBƁ#VF:L4`0Jp ꠣ.3m糣jLBVΠT A\>H_h_t"R!etSD%[xTs9wndA%:n*i/R*m}t3cF=6>P0<,Re~Q}*]%WYxnio2" FSOMʺ!$q Vm1)Q֭fU"6ZkfU`(QV6LVYU&P"ZN7ETmLO:y)6PiUKٛZ&ZQG>6Ǵ䖣fLco-71-u8D;Ki,s!/V宛Ba |tI2@*˅c[VUEgcw0]@˚UeDə,DPQX_[!AgL8e{): <tql 8H.a&R!ͼ%" _xbeJ>ޠ2TYa*+.ʖ3f| :,i̤P R9D=Xlp؁f?XXfx'5gL%![ʘI*0*Z+'tj"R?TBo% `eSLLZEL0EHa`Z"ryKיܓ?||R`l8V6ʭU4T]ԗOJCcJVhpgeWV]YyزweM;ٻ|evX)++a(e  RAG,[l!+"b6yWS!إK:W[1A |^9{':dԵC a٠%H|j&e jfRdn^MEjVfe)צ8={KP 5$>4V3e~&AKfT䨥a%kAr@<Ix-٘j23"G5E)ѻX]\X,cYiIsT䨧he6[jؾEz4=wtVPg|q|l_槉,ЭɮqɌHxIuymhC{`X]n嶂޿V1m \[\:*dԽV0sMFltb /Uzsr[嶂{`rvWr[l@r[mPԻV&Un mk5v5 ݶmWF66bնTmjz7۞lkXһV1wBmWPb[ŌS^ڞ]kqj{smۃ#z ԼΥvޮ%[ ߢŹ]I0 L1$/"G1tHK> Js[a(rDm!}ATlO)J·=yI9J(^0 ټڻ_zW ;6_X1wBSz +funVZ{`=.šizԽ\|qraL 5{w +f|p݂nbӭ{`m-7[/-,ջ[Xo nrZs ֖mp^@l2ump`z Y/>\>ܻYX&ufa̮,JݹY#>=ܻVX1ԹU8@ޥ,2Sr(o^uxë˿^?_vgeS|0'ndK2OT *G7QTGeHk9\8*QBAGek!:ty/fo^<ٯka6_^@V"/nϮ4{s͗;Ϯ?x޻e>sn%?Y*ZwAg&:5 i]AiAeg_zTx(utɌ+*Źȅ,D F$@ Ϯ Tc#Mcz eX0wu{ ?2^SEt/'yIf)%%|I`^|s˂` hGdYRRQeڱ4x#S-l;0U櫘"Ƶzs<%.oBUR t$3EY'/ЈA.E@JB5ϼt6*Ap~wWz5Rz?"U9(qM*1aJy+Ji7xp oyar ׅr7 n˸ձDq5ed j$s@*>Q)y{7dHB7y J Ncoƅ X]\oX nwwx">xyz}ۏo߿~77wH]A!Ooo0fc:3WOw3t}ns|}a" >7 ੆P0 \a@ 8 x^@ !l0 À@ !l0 0 q@ ~@B`aq@q@ >/ i砏>l0xVÀ@ #>{aq@TÀ@a@ F$@5 kaa@Q@p6܀?x^ 5 zaa@ 6F|`aa@ 0  a@ 8 8 ]a@ À@4B`AaD 8 A@ À@ 0 |3@B}À@ !l0 8 <8 ?& ZF%xH&߂ _0sv]IK49zȼy,$#k0HȶFC0+&(: 3lԊ)_l;Rf[dw!d|< ͵nlfvMnf0BYn?3mb$3{O6,;L +#trCA.mfd#^𼽥`6f5+߶F_f+A jD+Ap/ #y`U~*[d Ј̗ܟBG An5Ap '`-b8 5yS:&N/$]N<#u;H#Kl#vDK";[dqWS Qx;8#0gOE'd"؅g *-x4Ygd3j[KfޘSdND" !xni! Ľ?| ItƫT u$35E9b Id!Agc$wB0J!w#;t3(lmSYp@P_yxm3Cze<_~;VntF}QWgc5C`һ=#āS& KEvIdə} ޜpھφ}D7ܾ:=;}KzƧzqK^υ|ܨ'sz*r3BnJfږ^]ij W>[L~<0O-~÷| I6k% NaHoj&g5WGMMEjm sB/ nCd!2 6QA)em aa! 2 Fq( b a`w! .A~A܄0/=a_ a^ Ia^ {a( b􂈍 QzA܃0J/8'! ~zq( A=􂈍 Ia^pNB=~zAoUtQ!Y3?z{ drxj*“4/U1yAĽʭ qE?AgzM͝RfxhN1;B%1J p1wuc)NkZrƞגy59vtSZflZns<=k9Ӻ(Zdn^KEZm8 ga 2l(&b ga2l"6ʰpF6q(&nd2l6QM܆3H92l6QM܆3ʰ p 6`wlpF 6q &%]8 g`wl.QM 6]8 l.QM܅3JpF 64^M܅3Jp 6]8l"6J9l.QM 6}6&ݴ^49S5cFeGeOՎʐF@8* qTQ?o܄qt/j}J1)U((U:):)ŕO%HjA{w5*t sBÏFt)m//Q=^|}v 8C} *׏L .Gߣh\?amTx2۔Lz-Ms^/-XR?a۞:ta>{UZy _D 7!P:1>1 !pg-iۮO nL+Dü1%ykUZFk. E- eoIKoz0p2S:o0.42Y~j?Y [Ӽhc칬\VhwrmG jW[qenꞦ2xW7$_-t`)Xگx^Oc! o "=K' ք5?;+އbv^v45mTn )okwUrn}.Т{IMgnb~3`"'#f؏$:!p>̌zegm|w<.,7 OwW;KWLSekKmQ-08`0=o?*+^>5 h.} s)iYe8Fith:\P71m|ʵ[gևͧ5)76q7 ~/ldcZ^2;0p0\ ?]V~On`ӄ7R,4]l`Ѩl++o  j1>&X`;N +]8Q?PT9\zoOn}p=کs:iYkF~uԌ.؁M|4}̭P5(w`uhnۓ,_pkP,QTXpL^-wjן89$/GXd=<ʎ?;C?Rv$3dޖW1)O/A7o?X;%hu~I׺״۬9!h݀7hx`ae}sϙS2Dkϑ0O|(N.:&re9` ^h>s:&8R؎[>CNU,`+%u?;M99 LfTAv}b3yӼ: ~yv`}NX~,05G5H_"UczLb5a)_gF'i͟Z\[究Z$͡y$&;-΁Y}覌хtfK_=C[% s"df|J7PWydөz ÏL'v?mg/K-kၴ#SבCzmO$a%9:ſސD} ' =goɵvc`em(8|҆I_p/!X B삛q;,)HK*oGekP,#$)!k1G"=KD(xF?f7i`\;Lz$?ߴOB}W~|h?ߵoOw][Y 43F'+K(/c=kE\pHLE`ڥ9[۾j??v -Ey;%w 6(sa/)쟟/@aendstream endobj 214 0 obj << /Filter /FlateDecode /Length 4264 >> stream x]ǭ!(P)64)Zn>Ywߝ.>;pV|q~0o5CrLG_K?ܜI֗NaD"铓4UΤ]Pav˓G?.VN7^腈|V, xQM C_22e_ xQߝXg jTA3`iҗ_4K)0<=/Uoc֋F&o8ߗ9c/2b1RwքlVA+Ql+(T"N(.T Zi0{ h xzm6?NN?@=\N ?4:XI]@08XhoQ^ZHs ΪN@GQ ]Te&"j>)qG8\:#&F:eQࡋJQ]"(Km*Bय़o$T'A/P3et =aeRjD@cKbt֧"H=q ?ChRBj1"fpD Di܎'WRZNh tbG,Q׌ԙW~Ms)@mنF2Q*}Au"PkK&p^sAUDH;k거[++㊯q28#^_'}V5lU^ݚ N*S ^ O Z1 @Ρ"`|dEb ۄ=<o:I ?juw4n`+bHFF TD@{;{KY;K8PVLf),v)t,qxlA} |u;' )DN''LuN:?Ǡ# X3L 88v90hMjp@͜&QC`?BBpƨ%ӐV܁8O8.@d%h!Џz"I Uc{WKJUln/;SmoIYkR 2gk/(,S8K#dpdBGre2U2+fE٦5cgh-F=qnf%>>[ *OZ0qtp!Pn`gn\-kn>U1G\q] a-DxZ!p 9W* gvZ0%6B$> VTR^O6τ y1d~_ &( `|4sR,)้]i18rxǃ±,hW^ޏ,/ ޜ.%ycb\H8'd L_;pu:s!,.R%jU`~rGM>T֯6W7xO :(C~%JR@yMYu{(l?n qPR.Ԑ Kx7<@v{5?zJOsB[-R1B*Tv}Ӏ$_UO?SFSM`25(I}ʏV:.g,kRxW"5˄bt;bDKZ,5U%! OÃM(?g#(E%ܼǍJlNi@pwbuxDTIDvp(,L[i/mvƫ>]ȑ՗fS)E:VoĤ^$:Ka<j>\>T"]_VUŐƥeen%z5T`҄^x+zSw>ՠ%rYz҅"wW&]0ءE5ނzLuvػ ؘߑhףzj`X^:`'֖:<&ܷ!oE h1";##8Û]Ei%7VbO0EmY/ N=!^ý|E4v?ظfރE1Rk|IlþB&!j VlEM$A:AX N LgBXǶ5Xkb՞ia*K C[=jy؝կ]v"5~˱xqrTV.`,Y.),g~ܪky>E)9=e>@~9>@{r`d8^8J^p`(qeS;1 hڟ &h8T6Xo/4X*!8{z`ihsHKS`tC{wuW60y{2)-Zyo`Vn3mVDV'"B/8T LF_%WXl}VA}p V+)Z- ºwc#֍n)ҷQ[3wx\n=cmI{pT *[J lendstream endobj 215 0 obj << /Filter /FlateDecode /Length 3674 >> stream x[s#w7*Tk`ela dl AuAXLnP9άPmkgP$K|gG`~?fOfRք09:ZhS)hOs/#H+b4B5d9i"FH>O۠!\{Q>p !͓!Fg} FNEҐ&[Oa1DJo;qhnCGWhv1ϞV:+ IDVO" ](vg~}2 ܇( G "FQR ǃYpR\EZnRZ6Z*9 oW4uE 5Qe4nSCђU[$KBy?fOkHZ A&_Qg~AʴeVk6J nU"mw)䪐nR~ 5 ͋],zΜIcx~*hU? ۉ@q^P/0m8[ܥ.eӃ#ieAh/vr pt&J* Tj VZ&,)yڪ`4//MBA/'y2_0: [l:Z@$~ <*Z#* r9Db:˶$. e2.aLەTY5U FO{,зhH%I|K7d@hn\ Qz7Lfɷ2c?U5*Xعߕcݲs6^ab=ļitXw#{YsޕcJpETYPeTQr tncaӹ mcdPiA]UaWÁ=HmSˁ'u2v/vS\eRf.ors{ZmU?JxT;CUXjG2z4bjX;|Δc+, Đu입}~1̇oQ;Fv!|Hhm|YyǶ4،Pv uч{xCoOH1ySV9Kwgǹ=VTMN |` )zǝwÒrb$U7@-2Q dzu^/0E[A:-izC+ ژw\5je7ʸb omRP!`& Goi%;|їN<;ZH !@Ujۋ򄧪vci ?ClX`/Y$TR϶4)԰uV& OwAPBu'jrL"\p䍐~k A2AUAJ iEY#q/`K $ɸ)_!MmO~Y_zu9 ʤmAS΃k()LS䷩ xZo[1aυ$X$r}; 9- y@ H YA@x ނ0 4:g'ܳSc^|}VHzJ^v[w.ܐqzQ8F` e!rf%gwȄJU9Ct 冝{QDl/,擂3ns5a`'q7>`'쐠=}NOyܓۺK2IGv j>]3;f8YlJr.hὔQ<cexBA[xKs k)팖'.<B?>잫O& q\%#D$lE~m@O(Qȗk~ wcJ1K!o Ipt|;җlZG:ȇk,D}%l^ڐ7m>8=zqGendstream endobj 216 0 obj << /Type /XRef /Length 208 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 217 /ID [<9f9ca8123cfc69b72d5cb515e50fd230>] >> stream xcb&F~0 $8J?u@6cP?6#ϠưȖlŸ. R7X,"A$#| "V" MDrlV`Ye@Qx`D2H%p h\d,Re &wŐ,"Y6enKٮ f3y`[,%m endstream endobj startxref 163130 %%EOF surveillance/inst/doc/twinstim.Rnw0000644000176200001440000016236714024100031017036 0ustar liggesusers%\VignetteIndexEntry{twinstim: An endemic-epidemic modeling framework for spatio-temporal point patterns} %\VignetteEngine{knitr::knitr} %\VignetteDepends{surveillance, grid, animation, polyclip, memoise, maptools, spdep, colorspace} <>= ## purl=FALSE => not included in the tangle'd R script knitr::opts_chunk$set(echo = TRUE, tidy = FALSE, results = 'markup', fig.path='plots/twinstim-', fig.width = 8, fig.height = 4, fig.align = "center", fig.scap = NA, out.width = NULL, cache = FALSE, error = FALSE, warning = FALSE, message = FALSE) knitr::render_sweave() # use Sweave environments knitr::set_header(highlight = '') # no \usepackage{Sweave} (part of jss class) ## add a chunk option "strip.white.output" to remove leading and trailing white ## space (empty lines) from output chunks ('strip.white' has no effect) local({ default_output_hook <- knitr::knit_hooks$get("output") knitr::knit_hooks$set(output = function (x, options) { if (isTRUE(options[["strip.white.output"]])) { x <- sub("[[:space:]]+$", "\n", # set a single trailing \n sub("^[[:space:]]+", "", x)) # remove leading space } default_output_hook(x, options) }) }) ## R settings options(prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) # JSS options(width = 85, digits = 4) options(scipen = 1) # so that 1e-4 gets printed as 0.0001 ## xtable settings options(xtable.booktabs = TRUE, xtable.size = "small", xtable.sanitize.text.function = identity, xtable.comment = FALSE) @ <>= ## load the "cool" package library("surveillance") ## Compute everything or fetch cached results? message("Doing computations: ", COMPUTE <- !file.exists("twinstim-cache.RData")) if (!COMPUTE) load("twinstim-cache.RData", verbose = TRUE) @ \documentclass[nojss,nofooter,article]{jss} \usepackage[latin1]{inputenc} % Rnw is ASCII, but automatic package bib isn't \title{% \vspace{-1.5cm} \fbox{\vbox{\normalfont\footnotesize This introduction to the \code{twinstim} modeling framework of the \proglang{R}~package \pkg{surveillance} is based on a publication in the \textit{Journal of Statistical Software} -- \citet[Section~3]{meyer.etal2014} -- which is the suggested reference if you use the \code{twinstim} implementation in your own work.}}\\[1cm] \code{twinstim}: An endemic-epidemic modeling framework for spatio-temporal point patterns} \Plaintitle{twinstim: An endemic-epidemic modeling framework for spatio-temporal point patterns} \Shorttitle{Endemic-epidemic modeling of spatio-temporal point patterns} \author{Sebastian Meyer\thanks{Author of correspondence: \email{seb.meyer@fau.de}}\\Friedrich-Alexander-Universit{\"a}t\\Erlangen-N{\"u}rnberg \And Leonhard Held\\University of Zurich \And Michael H\"ohle\\Stockholm University} \Plainauthor{Sebastian Meyer, Leonhard Held, Michael H\"ohle} %% Basic packages \usepackage{lmodern} % successor of CM -> searchable Umlauts (1 char) \usepackage[english]{babel} % language of the manuscript is American English %% Math packages \usepackage{amsmath,amsfonts} % amsfonts defines \mathbb \usepackage{bm} % \bm: alternative to \boldsymbol from amsfonts %% Packages for figures and tables \usepackage{booktabs} % make tables look nicer \usepackage{subcaption} % successor of subfig, which supersedes subfigure %% knitr uses \subfloat, which subcaption only provides since v1.3 (2019/08/31) \providecommand{\subfloat}[2][need a sub-caption]{\subcaptionbox{#1}{#2}} %% Handy math commands \newcommand{\abs}[1]{\lvert#1\rvert} \newcommand{\norm}[1]{\lVert#1\rVert} \newcommand{\given}{\,\vert\,} \newcommand{\dif}{\,\mathrm{d}} \newcommand{\IR}{\mathbb{R}} \newcommand{\IN}{\mathbb{N}} \newcommand{\ind}{\mathbb{I}} \DeclareMathOperator{\Po}{Po} \DeclareMathOperator{\NegBin}{NegBin} \DeclareMathOperator{\N}{N} %% Additional commands \newcommand{\class}[1]{\code{#1}} % could use quotes (JSS does not like them) \newcommand{\CRANpkg}[1]{\href{https://CRAN.R-project.org/package=#1}{\pkg{#1}}} %% Reduce the font size of code input and output \DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl, fontsize=\small} \DefineVerbatimEnvironment{Soutput}{Verbatim}{fontsize=\small} %% Abstract \Abstract{ The availability of geocoded health data and the inherent temporal structure of communicable diseases have led to an increased interest in statistical models and software for spatio-temporal data with epidemic features. The \proglang{R}~package \pkg{surveillance} can handle various levels of aggregation at which infective events have been recorded. This vignette illustrates the analysis of \emph{point-referenced} surveillance data using the endemic-epidemic point process model ``\code{twinstim}'' proposed by \citet{meyer.etal2011} and extended in \citet{meyer.held2013}. %% (For other types of surveillance data, see %% \code{vignette("twinSIR")} and \code{vignette("hhh4\_spacetime")}.) We first describe the general modeling approach and then exemplify data handling, model fitting, visualization, and simulation methods for time-stamped geo-referenced case reports of invasive meningococcal disease (IMD) caused by the two most common bacterial finetypes of meningococci in Germany, 2002--2008. } \Keywords{% spatio-temporal point pattern, endemic-epidemic modeling, infectious disease epidemiology, self-exciting point process, spatial interaction function, branching process with immigration} \begin{document} %% \vfill %% { %% \renewcommand{\abstractname}{Outline} % local change %% \begin{abstract} %% We start by describing the general model class in %% Section~\ref{sec:twinstim:methods}. %% Section~\ref{sec:twinstim:data} introduces the example data and the %% associated class \class{epidataCS}, %% Section~\ref{sec:twinstim:fit} presents the core functionality of %% fitting and analyzing such data using \code{twinstim}, and %% Section~\ref{sec:twinstim:simulation} shows how to simulate realizations %% from a fitted model. %% \end{abstract} %% } %% \vfill %% \newpage \section[Model class]{Model class: \code{twinstim}} \label{sec:twinstim:methods} Infective events occur at specific points in continuous space and time, which gives rise to a spatio-temporal point pattern $\{(\bm{s}_i,t_i): i = 1,\dotsc,n\}$ from a region~$\bm{W}$ observed during a period~$(0,T]$. The locations~$\bm{s}_i$ and time points~$t_i$ of the $n$~events can be regarded as a realization of a self-exciting spatio-temporal point process, which can be characterized by its conditional intensity function (CIF, also termed intensity process) $\lambda(\bm{s},t)$. It represents the instantaneous event rate at location~$\bm{s}$ at time point~$t$ given all past events, and is often more verbosely denoted by~$\lambda^*$ or by explicit conditioning on the ``history''~$\mathcal{H}_t$ of the process. \citet[Chapter~7]{Daley.Vere-Jones2003} provide a rigorous mathematical definition of this concept, which is key to likelihood analysis and simulation of ``evolutionary'' point processes. \citet{meyer.etal2011} formulated the model class ``\code{twinstim}'' -- a \emph{two}-component \emph{s}patio-\emph{t}emporal \emph{i}ntensity \emph{m}odel -- by a superposition of an endemic and an epidemic component: \begin{equation} \label{eqn:twinstim} \lambda(\bm{s},t) = \nu_{[\bm{s}][t]} + \sum_{j \in I(\bm{s},t)} \eta_j \, f(\norm{\bm{s}-\bm{s}_j}) \, g(t-t_j) \:. \end{equation} This model constitutes a branching process with immigration. Part of the event rate is due to the first, endemic component, which reflects sporadic events caused by unobserved sources of infection. This background rate of new events is modeled by a log-linear predictor $\nu_{[\bm{s}][t]}$ incorporating regional and/or time-varying characteristics. Here, the space-time index $[\bm{s}][t]$ refers to the region covering $\bm{s}$ during the period containing $t$ and thus spans a whole spatio-temporal grid on which the involved covariates are measured, e.g., district $\times$ month. We will later see that the endemic component therefore simply equals an inhomogeneous Poisson process for the event counts by cell of that grid. The second, observation-driven epidemic component adds ``infection pressure'' from the set \begin{equation*} I(\bm{s},t) = \big\{ j : t_j < t \:\wedge\: t-t_j \le \tau_j \:\wedge\: \norm{\bm{s}-\bm{s}_j} \le \delta_j \big\} \end{equation*} of past events and hence makes the process ``self-exciting''. During its infectious period of length~$\tau_j$ and within its spatial interaction radius~$\delta_j$, the model assumes each event~$j$ to trigger further events, which are called offspring, secondary cases, or aftershocks, depending on the application. The triggering rate (or force of infection) is proportional to a log-linear predictor~$\eta_j$ associated with event-specific characteristics (``marks'') $\bm{m}_j$, which are usually attached to the point pattern of events. The decay of infection pressure with increasing spatial and temporal distance from the infective event is modeled by parametric interaction functions~$f$ and~$g$, respectively. A simple assumption for the time course of infectivity is $g(t) = 1$. Alternatives include exponential decay, a step function, or empirically derived functions such as Omori's law for aftershock intervals. With regard to spatial interaction, a Gaussian kernel $f(x) = \exp\left\{-x^2/(2 \sigma^2)\right\}$ could be chosen. However, in modeling the spread of human infectious diseases on larger scales, a heavy-tailed power-law kernel $f(x) = (x+\sigma)^{-d}$ was found to perform better \citep{meyer.held2013}. The (possibly infinite) upper bounds~$\tau_j$ and~$\delta_j$ provide a way of modeling event-specific interaction ranges. However, since these need to be pre-specified, a common assumption is $\tau_j \equiv \tau$ and $\delta_j \equiv \delta$, where the infectious period~$\tau$ and the spatial interaction radius~$\delta$ are determined by subject-matter considerations. \subsection{Model-based effective reproduction numbers} Similar to the simple SIR model \citep[see, e.g.,][Section 2.1]{Keeling.Rohani2008}, the above point process model~\eqref{eqn:twinstim} features a reproduction number derived from its branching process interpretation. As soon as an event occurs (individual becomes infected), it triggers offspring (secondary cases) around its origin $(\bm{s}_j, t_j)$ according to an inhomogeneous Poisson process with rate $\eta_j \, f(\norm{\bm{s}-\bm{s}_j}) \, g(t-t_j)$. Since this triggering process is independent of the event's parentage and of other events, the expected number $\mu_j$ of events triggered by event $j$ can be obtained by integrating the triggering rate over the observed interaction domain: \begin{equation} \label{eqn:R0:twinstim} \mu_j = \eta_j \cdot \left[ \int_0^{\min(T-t_j,\tau_j)} g(t) \,dt \right] \cdot \left[ \int_{\bm{R}_j} f(\norm{\bm{s}}) \,d\bm{s} \right] \:, \end{equation} where \begin{equation} \label{eqn:twinstim:IR} \bm{R}_j = (b(\bm{s}_j,\delta_j) \cap \bm{W}) - \bm{s}_j \end{equation} is event $j$'s influence region centered at $\bm{s}_j$, and $b(\bm{s}_j, \delta_j)$ denotes the disc centered at $\bm{s}_j$ with radius $\delta_j$. Note that the above model-based reproduction number $\mu_j$ is event-specific since it depends on event marks through $\eta_j$, on the interaction ranges $\delta_j$ and $\tau_j$, as well as on the event location $\bm{s}_j$ and time point $t_j$. If the model assumes unique interaction ranges $\delta$ and $\tau$, a single reference number of secondary cases can be extrapolated from Equation~\ref{eqn:R0:twinstim} by imputing an unbounded domain $\bm{W} = \IR^2$ and $T = \infty$ \citep{meyer.etal2015}. Equation~\ref{eqn:R0:twinstim} can also be motivated by looking at a spatio-temporal version of the simple SIR model wrapped into the \class{twinstim} class~\eqref{eqn:twinstim}. This means: no endemic component, homogeneous force of infection ($\eta_j \equiv \beta$), homogeneous mixing in space ($f(x) = 1$, $\delta_j \equiv \infty$), and exponential decay of infectivity over time ($g(t) = e^{-\alpha t}$, $\tau_j \equiv \infty$). Then, for $T \rightarrow \infty$, \begin{equation*} \mu = \beta \cdot \left[ \int_0^\infty e^{-\alpha t} \,dt \right] \cdot \left[ \int_{\bm{W}-\bm{s}_j} 1 \,d\bm{s} \right] = \beta \cdot \abs{\bm{W}} / \alpha \:, \end{equation*} which corresponds to the basic reproduction number known from the simple SIR model by interpreting $\abs{\bm{W}}$ as the population size, $\beta$ as the transmission rate and $\alpha$ as the removal rate. If $\mu < 1$, the process is sub-critical, i.e., its eventual extinction is almost sure. However, it is crucial to understand that in a full model with an endemic component, new infections may always occur via ``immigration''. Hence, reproduction numbers in \class{twinstim} are adjusted for infections occurring independently of previous infections. This also means that a misspecified endemic component may distort model-based reproduction numbers \citep{meyer.etal2015}. Furthermore, under-reporting and implemented control measures imply that the estimates are to be thought of as \emph{effective} reproduction numbers. \subsection{Likelihood inference} The log-likelihood of the point process model~\eqref{eqn:twinstim} is a function of all parameters in the log-linear predictors $\nu_{[\bm{s}][t]}$ and $\eta_j$ and in the interaction functions $f$ and $g$. It has the form %% \begin{equation} \label{eqn:twinstim:marked:loglik} %% l(\bm{\theta}) = \left[ \sum_{i=1}^{n} \log\lambda(\bm{s}_i,t_i,k_i) \right] - %% \sum_{k\in\mathcal{K}} \int_0^T \int_{\bm{W}} \lambda(\bm{s},t,k) \dif\bm{s} %% \dif t \:, %% \end{equation} \begin{equation} \label{eqn:twinstim:loglik} \left[ \sum_{i=1}^{n} \log\lambda(\bm{s}_i,t_i) \right] - \int_0^T \int_{\bm{W}} \lambda(\bm{s},t) \dif\bm{s} \dif t \:. \end{equation} %\citep[Proposition~7.3.III]{Daley.Vere-Jones2003} To estimate the model parameters, we maximize the above log-likelihood numerically using the quasi-Newton algorithm available through the \proglang{R}~function \code{nlminb}. We thereby employ the analytical score function and an approximation of the expected Fisher information worked out by \citet[Web Appendices A and B]{meyer.etal2011}. The space-time integral in the log-likelihood \eqref{eqn:twinstim:loglik} poses no difficulties for the endemic component of $\lambda(\bm{s},t)$, since $\nu_{[\bm{s}][t]}$ is defined on a spatio-temporal grid. However, integration of the epidemic component involves two-dimensional integrals $\int_{\bm{R}_i} f(\norm{\bm{s}}) \dif\bm{s}$ over the influence regions~$\bm{R}_i$, which are represented by polygons (as is~$\bm{W}$). Similar integrals appear in the score function, where $f(\norm{\bm{s}})$ is replaced by partial derivatives with respect to kernel parameters. Calculation of these integrals is trivial for (piecewise) constant~$f$, but otherwise requires numerical integration. The \proglang{R}~package \CRANpkg{polyCub} \citep{meyer2019} offers various cubature methods for polygonal domains. % For Gaussian~$f$, we apply a midpoint rule with $\sigma$-adaptive bandwidth % %% combined with an analytical formula via the $\chi^2$ distribution % %% if the $6\sigma$-circle around $\bm{s}_i$ is contained in $\bm{R}_i$. % and use product Gauss cubature \citep{sommariva.vianello2007} % to approximate the integrals in the score function. % For the recently implemented power-law kernels, Of particular relevance for \code{twinstim} is the \code{polyCub.iso} method, which takes advantage of the assumed isotropy of spatial interaction such that numerical integration remains in only one dimension \citep[Supplement~B, Section~2]{meyer.held2013}. We \CRANpkg{memoise} \citep{R:memoise} the cubature function during log-likelihood maximization to avoid integration for unchanged parameters of~$f$. \subsection{Special cases: Single-component models} If the \emph{epidemic} component is omitted in Equation~\ref{eqn:twinstim}, the point process model becomes equivalent to a Poisson regression model for aggregated counts. This provides a link to ecological regression approaches in general and to the count data model \code{hhh4} illustrated in \code{vignette("hhh4")} and \code{vignette("hhh4\_spacetime")}. To see this, recall that the endemic component $\nu_{[\bm{s}][t]}$ is piecewise constant on the spatio-temporal grid with cells $([\bm{s}],[t])$. Hence the log-likelihood~\eqref{eqn:twinstim:loglik} of an endemic-only \code{twinstim} simplifies to a sum over all these cells, \begin{equation*} \sum_{[\bm{s}],[t]} \left\{ Y_{[\bm{s}][t]} \log\nu_{[\bm{s}][t]} - \abs{[\bm{s}]} \, \abs{[t]} \, \nu_{[\bm{s}][t]} \right\} \:, \end{equation*} where $Y_{[\bm{s}][t]}$ is the aggregated number of events observed in cell $([\bm{s}],[t])$, and $\abs{[\bm{s}]}$ and $\abs{[t]}$ denote cell area and length, respectively. Except for an additive constant, the above log-likelihood is equivalently obtained from the Poisson model $Y_{[\bm{s}][t]} \sim \Po( \abs{[\bm{s}]} \, \abs{[t]} \, \nu_{[\bm{s}][t]})$. This relation offers a means of code validation using the established \code{glm} function to fit an endemic-only \code{twinstim} model -- see the examples in \code{help("glm_epidataCS")}. %% The \code{help("glm_epidataCS")} also shows how to fit %% an equivalent endemic-only \code{hhh4} model. If, in contrast, the \emph{endemic} component is omitted, all events are necessarily triggered by other observed events. For such a model to be identifiable, a prehistory of events must exist to trigger the first event, and interaction typically needs to be unbounded such that each event can actually be linked to potential source events. \subsection[Extension: Event types]{Extension: \code{twinstim} with event types} To model the example data on invasive meningococcal disease in the remainder of this section, we actually need to use an extended version $\lambda(\bm{s},t,k)$ of Equation~\ref{eqn:twinstim}, which accounts for different event types~$k$ with own transmission dynamics. This introduces a further dimension in the point process, and the second log-likelihood component in Equation~\ref{eqn:twinstim:loglik} accordingly splits into a sum over all event types. We refer to \citet[Sections~2.4 and~3]{meyer.etal2011} for the technical details of this type-specific \code{twinstim} class. The basic idea is that the meningococcal finetypes share the same endemic pattern (e.g., seasonality), while infections of different finetypes are not associated via transmission. This means that the force of infection is restricted to previously infected individuals with the same bacterial finetype~$k$, i.e., the epidemic sum in Equation~\ref{eqn:twinstim} is over the set $I(\bm{s},t,k) = I(\bm{s},t) \cap \{j: k_j = k\}$. The implementation has limited support for type-dependent interaction functions $f_{k_j}$ and $g_{k_j}$ (not further considered here). \section[Data structure]{Data structure: \class{epidataCS}} \label{sec:twinstim:data} <>= ## extract components from imdepi to reconstruct data("imdepi") events <- SpatialPointsDataFrame( coords = coordinates(imdepi$events), data = marks(imdepi, coords=FALSE), proj4string = imdepi$events@proj4string # ETRS89 projection (+units=km) ) stgrid <- imdepi$stgrid[,-1] @ <>= load(system.file("shapes", "districtsD.RData", package = "surveillance")) @ The first step toward fitting a \code{twinstim} is to turn the relevant data into an object of the dedicated class \class{epidataCS}.\footnote{ The suffix ``CS'' indicates that the data-generating point process is indexed in continuous space. } The primary ingredients of this class are a spatio-temporal point pattern (\code{events}) and its underlying observation region (\code{W}). An additional spatio-temporal grid (\code{stgrid}) holds (time-varying) area-level covariates for the endemic regression part. We exemplify this data class by the \class{epidataCS} object for the \Sexpr{nobs(imdepi)} cases of invasive meningococcal disease in Germany originally analyzed by \citet{meyer.etal2011}. It is already contained in the \pkg{surveillance} package as \code{data("imdepi")} and has been constructed as follows: <>= imdepi <- as.epidataCS(events = events, W = stateD, stgrid = stgrid, qmatrix = diag(2), nCircle2Poly = 16) @ The function \code{as.epidataCS} checks the consistency of the three data ingredients described in detail below. It also pre-computes auxiliary variables for model fitting, e.g., the individual influence regions~\eqref{eqn:twinstim:IR}, which are intersections of the observation region with discs %of radius \code{eps.s} centered at the event location approximated by polygons with \code{nCircle2Poly = 16} edges. The intersections are computed using functionality of the package \CRANpkg{polyclip} \citep{R:polyclip}. For multitype epidemics as in our example, the additional indicator matrix \code{qmatrix} specifies transmissibility across event types. An identity matrix corresponds to an independent spread of the event types, i.e., cases of one type can not produce cases of another type. \subsection{Data ingredients} The core \code{events} data must be provided in the form of a \class{SpatialPointsDataFrame} as defined by the package \CRANpkg{sp} \citep{R:sp}: <>= summary(events) @ <>= oopt <- options(width=100) ## hack to reduce the 'print.gap' in the data summary but not for the bbox local({ print.summary.Spatial <- sp:::print.summary.Spatial environment(print.summary.Spatial) <- environment() print.table <- function (x, ..., print.gap = 0) { base::print.table(x, ..., print.gap = print.gap) } print.summary.Spatial(summary(events)) }) options(oopt) @ The associated event coordinates are residence postcode centroids, projected in the \emph{European Terrestrial Reference System 1989} (in kilometer units) to enable Euclidean geometry. See the \code{spTransform}-methods for how to project latitude and longitude coordinates into a planar coordinate reference system (CRS). The data frame associated with these spatial coordinates ($\bm{s}_i$) contains a number of required variables and additional event marks (in the notation of Section~\ref{sec:twinstim:methods}: $\{(t_i,[\bm{s}_i],k_i,\tau_i,\delta_i,\bm{m}_i): i = 1,\dotsc,n\}$). For the IMD data, the event \code{time} is measured in days since the beginning of the observation period 2002--2008 and is subject to a tie-breaking procedure (described later). The \code{tile} column refers to the region of the spatio-temporal grid where the event occurred and here contains the official key of the administrative district of the patient's residence. There are two \code{type}s of events labeled as \code{"B"} and \code{"C"}, which refer to the serogroups of the two meningococcal finetypes \emph{B:P1.7-2,4:F1-5} and \emph{C:P1.5,2:F3-3} contained in the data. The \code{eps.t} and \code{eps.s} columns specify upper limits for temporal and spatial interaction, respectively. Here, the infectious period is assumed to last a maximum of 30 days and spatial interaction is limited to a 200 km radius for all cases. The latter has numerical advantages for a Gaussian interaction function $f$ with a relatively small standard deviation. For a power-law kernel, however, this restriction will be dropped to enable occasional long-range transmission. The last two data attributes displayed in the above \code{event} summary are covariates from the case reports: the gender and age group of the patient. For the observation region \code{W}, we use a polygon representation of Germany's boundary. Since the observation region defines the integration domain in the point process log-likelihood~\eqref{eqn:twinstim:loglik}, the more detailed the polygons of \code{W} are the longer it will take to fit a \code{twinstim}. It is thus advisable to sacrifice some shape details for speed by reducing the polygon complexity, e.g., by applying \code{ms_simplify} from the \CRANpkg{rmapshaper} package \citep{R:rmapshaper}. Alternative tools in \proglang{R} are \CRANpkg{spatstat}'s \code{simplify.owin} procedure \citep{R:spatstat} and the function \code{thinnedSpatialPoly} in package \CRANpkg{maptools} \citep{R:maptools}, which implements the Douglas-Peucker reduction method. The \pkg{surveillance} package already contains a simplified representation of Germany's boundaries: <>= <> @ This file contains both the \class{SpatialPolygonsDataFrame} \code{districtsD} of Germany's \Sexpr{length(districtsD)} administrative districts as at January 1, 2009, as well as their union \code{stateD}. %obtained by the call \code{rgeos::gUnaryUnion(districtsD)} \citep{R:rgeos}. These boundaries are projected in the same CRS as the \code{events} data. The \code{stgrid} input for the endemic model component is a data frame with (time-varying) area-level covariates, e.g., socio-economic or ecological characteristics. In our example: <>= .stgrid.excerpt <- format(rbind(head(stgrid, 3), tail(stgrid, 3)), digits=3) rbind(.stgrid.excerpt[1:3,], "..."="...", .stgrid.excerpt[4:6,]) @ Numeric (\code{start},\code{stop}] columns index the time periods and the factor variable \code{tile} identifies the regions of the grid. Note that the given time intervals (here: months) also define the resolution of possible time trends and seasonality of the piecewise constant endemic intensity. We choose monthly intervals to reduce package size and computational cost compared to the weekly resolution originally used by \citet{meyer.etal2011} and \citet{meyer.held2013}. The above \code{stgrid} data frame thus consists of 7 (years) times 12 (months) blocks of \Sexpr{nlevels(stgrid[["tile"]])} (districts) rows each. The \code{area} column gives the area of the respective \code{tile} in square kilometers (compatible with the CRS used for \code{events} and \code{W}). A geographic representation of the regions in \code{stgrid} is not required for model estimation, and is thus not part of the \class{epidataCS} class. %It is, however, necessary for plots of the fitted intensity and for %simulation from the estimated model. In our example, the area-level data only consists of the population density \code{popdensity}, whereas \citet{meyer.etal2011} additionally incorporated (lagged) weekly influenza counts by district as a time-dependent covariate. %% In another application, \citet{meyer.etal2015} used a large number of socio-economic %% characteristics to model psychiatric hospital admissions. \subsection{Data handling and visualization} The generated \class{epidataCS} object \code{imdepi} is a simple list of the checked ingredients <>= cat(paste0('\\code{', names(imdepi), '}', collapse = ", "), ".", sep = "") @ Several methods for data handling and visualization are available for such objects as listed in Table~\ref{tab:methods:epidataCS} and briefly presented in the remainder of this section. <>= print(xtable( surveillance:::functionTable( class = "epidataCS", functions = list( Convert = c("epidataCS2sts"), Extract = c("getSourceDists"))), caption="Generic and \\textit{non-generic} functions applicable to \\class{epidataCS} objects.", label="tab:methods:epidataCS" ), include.rownames = FALSE) @ Printing an \class{epidataCS} object presents some metadata and the first \Sexpr{formals(surveillance:::print.epidataCS)[["n"]]} events by default: <>= imdepi @ During conversion to \class{epidataCS}, the last three columns \code{BLOCK} (time interval index), \code{start} and \code{popdensity} have been merged from the checked \code{stgrid} to the \code{events} data frame. The event marks including time and location can be extracted in a standard data frame by \code{marks(imdepi)} -- inspired by package \CRANpkg{spatstat} -- and this is summarized by \code{summary(imdepi)}. <>= (simdepi <- summary(imdepi)) @ The number of potential sources of infection per event (denoted \texttt{|.sources|} in the above output) is additionally summarized. It is determined by the events' maximum ranges of interaction \code{eps.t} and \code{eps.s}. The event-specific set of potential sources is stored in the (hidden) list \code{imdepi$events$.sources} (events are referenced by row index), and the event-specific numbers of potential sources are stored in the summarized object as \code{simdepi$nSources}. A simple plot of the number of infectives as a function of time (Figure~\ref{fig:imdepi_stepfun}) %determined by the event times and infectious periods can be obtained by the step function converter: <>= par(mar = c(5, 5, 1, 1), las = 1) plot(as.stepfun(imdepi), xlim = summary(imdepi)$timeRange, xaxs = "i", xlab = "Time [days]", ylab = "Current number of infectives", main = "") #axis(1, at = 2557, labels = "T", font = 2, tcl = -0.3, mgp = c(3, 0.3, 0)) @ \pagebreak[1] The \code{plot}-method for \class{epidataCS} offers aggregation of the events over time or space: <>= par(las = 1) plot(imdepi, "time", col = c("indianred", "darkblue"), ylim = c(0, 20)) par(mar = c(0, 0, 0, 0)) plot(imdepi, "space", lwd = 2, points.args = list(pch = c(1, 19), col = c("indianred", "darkblue"))) layout.scalebar(imdepi$W, scale = 100, labels = c("0", "100 km"), plot = TRUE) @ \pagebreak[1] The time-series plot (Figure~\ref{fig:imdepi_plot-1}) shows the monthly aggregated number of cases by finetype in a stacked histogram as well as each type's cumulative number over time. The spatial plot (Figure~\ref{fig:imdepi_plot-2}) shows the observation window \code{W} with the locations of all cases (by type), where the areas of the points are proportional to the number of cases at the respective location. Additional shading by the population is possible and exemplified in \code{help("plot.epidataCS")}. The above static plots do not capture the space-time dynamics of epidemic spread. An animation may provide additional insight and can be produced by the corresponding \code{animate}-method. For instance, to look at the first year of the B-type in a weekly sequence of snapshots in a web browser (using facilities of the \CRANpkg{animation} package of \citealp{R:animation}): <>= animation::saveHTML( animate(subset(imdepi, type == "B"), interval = c(0, 365), time.spacing = 7), nmax = Inf, interval = 0.2, loop = FALSE, title = "First year of type B") @ Selecting events from \class{epidataCS} as for the animation above is enabled by the \code{[}- and \code{subset}-methods, which return a new \class{epidataCS} object containing only the selected \code{events}. A limited data sampling resolution may lead to tied event times or locations, which are in conflict with a continuous spatio-temporal point process model. For instance, a temporal residual analysis would suggest model deficiencies \citep[Figure 4]{meyer.etal2011}, and a power-law kernel for spatial interaction may diverge if there are events with zero distance to potential source events \citep{meyer.held2013}. The function \code{untie} breaks ties by random shifts. This has already been applied to the event \emph{times} in the provided \code{imdepi} data by subtracting a U$(0,1)$-distributed random number from the original dates. The event \emph{coordinates} in the IMD data are subject to interval censoring at the level of Germany's postcode regions. A possible replacement for the given centroids would thus be a random location within the corresponding postcode area. Lacking a suitable shapefile, \citet{meyer.held2013} shifted all locations by a random vector with length up to half the observed minimum spatial separation: <>= eventDists <- dist(coordinates(imdepi$events)) minsep <- min(eventDists[eventDists > 0]) set.seed(321) imdepi_untied <- untie(imdepi, amount = list(s = minsep / 2)) @ Note that random tie-breaking requires sensitivity analyses as discussed by \citet{meyer.held2013}, but these are skipped here for the sake of brevity. The \code{update}-method is useful to change the values of the maximum interaction ranges \code{eps.t} and \code{eps.s}, since it takes care of the necessary updates of the hidden auxiliary variables in an \class{epidataCS} object. For unbounded spatial interaction: <>= imdepi_untied_infeps <- update(imdepi_untied, eps.s = Inf) @ Last but not least, \class{epidataCS} can be aggregated to \class{epidata} (from \code{vignette("twinSIR")}) or \class{sts} (from \code{vignette("hhh4_spacetime")}). The method \code{as.epidata.epidataCS} aggregates events by region (\code{tile}), and the function \code{epidataCS2sts} yields counts by region and time interval. The latter could be analyzed by an areal time-series model such as \code{hhh4} (see \code{vignette("hhh4\_spacetime")}). We can also use \class{sts} visualizations, e.g.\ (Figure~\ref{fig:imdsts_plot}): <>= imdsts <- epidataCS2sts(imdepi, freq = 12, start = c(2002, 1), tiles = districtsD) par(las = 1, lab = c(7,7,7), mar = c(5,5,1,1)) plot(imdsts, type = observed ~ time) plot(imdsts, type = observed ~ unit, population = districtsD$POPULATION / 100000) @ \section{Modeling and inference} \label{sec:twinstim:fit} Having prepared the data as an object of class \class{epidataCS}, the function \code{twinstim} can be used to perform likelihood inference for conditional intensity models of the form~\eqref{eqn:twinstim}. The main arguments for \code{twinstim} are the formulae of the \code{endemic} and \code{epidemic} linear predictors ($\nu_{[\bm{s}][t]} = \exp$(\code{endemic}) and $\eta_j = \exp$(\code{epidemic})), and the spatial and temporal interaction functions \code{siaf} ($f$) and \code{tiaf} ($g$), respectively. Both formulae are parsed internally using the standard \code{model.frame} toolbox from package \pkg{stats} and thus can handle factor variables and interaction terms. While the \code{endemic} linear predictor incorporates %time-dependent and/or area-level covariates from \code{stgrid}, %% and in the disease mapping context usually contains at least the population density as a multiplicative offset, i.e., %% \code{endemic = ~offset(log(popdensity))}. There can be additional effects of time, %% which are functions of the variable \code{start} from \code{stgrid}, %% or effects of, e.g., socio-demographic and ecological variables. the \code{epidemic} formula may use both \code{stgrid} variables and event marks to be associated with the force of infection. %% For instance, \code{epidemic = ~log(popdensity) + type} corresponds to %% $\eta_j = \rho_{[\bm{s}_j]}^{\gamma_{\rho}} \exp(\gamma_0 + \gamma_C \ind(k_j=C))$, %% which models different infectivity of the event types, and scales %% with population density (a grid-based covariate) to reflect higher %% contact rates and thus infectivity in more densly populated regions. For the interaction functions, several alternatives are predefined as listed in Table~\ref{tab:iafs}. They are applicable out-of-the-box and illustrated as part of the following modeling exercise for the IMD data. Own interaction functions can also be implemented following the structure described in \code{help("siaf")} and \code{help("tiaf")}, respectively. <>= twinstim_iafs <- suppressWarnings( cbind("Spatial (\\code{siaf.*})" = ls(pattern="^siaf\\.", pos="package:surveillance"), "Temporal (\\code{tiaf.*})" = ls(pattern="^tiaf\\.", pos="package:surveillance")) ) twinstim_iafs <- apply(twinstim_iafs, 2, function (x) { is.na(x) <- duplicated(x) x }) print(xtable(substring(twinstim_iafs, 6), label="tab:iafs", caption="Predefined spatial and temporal interaction functions."), include.rownames=FALSE, sanitize.text.function=function(x) paste0("\\code{", x, "}"), sanitize.colnames.function=identity, sanitize.rownames.function=identity) @ \subsection{Basic example} To illustrate statistical inference with \code{twinstim}, we will estimate several models for the simplified and ``untied'' IMD data presented in Section~\ref{sec:twinstim:data}. In the endemic component, we include the district-specific population density as a multiplicative offset, a (centered) time trend, and a sinusoidal wave of frequency $2\pi/365$ to capture seasonality, where the \code{start} variable from \code{stgrid} measures time: <>= (endemic <- addSeason2formula(~offset(log(popdensity)) + I(start / 365 - 3.5), period = 365, timevar = "start")) @ See \citet[Section~2.2]{held.paul2012} for how such sine/cosine terms reflect seasonality. Because of the aforementioned integrations in the log-likelihood~\eqref{eqn:twinstim:loglik}, it is advisable to first fit an endemic-only model to obtain reasonable start values for more complex epidemic models: <>= imdfit_endemic <- twinstim(endemic = endemic, epidemic = ~0, data = imdepi_untied, subset = !is.na(agegrp)) @ We exclude the single case with unknown age group from this analysis since we will later estimate an effect of the age group on the force of infection. Many of the standard functions to access model fits in \proglang{R} are also implemented for \class{twinstim} fits (see Table~\ref{tab:methods:twinstim}). For example, we can produce the usual model summary: <>= summary(imdfit_endemic) @ Because of the aforementioned equivalence of the endemic component with a Poisson regression model, the coefficients can be interpreted as log rate ratios in the usual way. For instance, the endemic rate is estimated to decrease by \code{1 - exp(coef(imdfit_endemic)[2])} $=$ \Sexpr{round(100*(1-exp(coef(imdfit_endemic)[2])),1)}\% per year. Coefficient correlations can be retrieved via the argument \code{correlation = TRUE} in the \code{summary} call just like for \code{summary.glm}, or via \code{cov2cor(vcov(imdfit_endemic))}. <>= print(xtable( surveillance:::functionTable( class = "twinstim", functions = list( Display = c("iafplot", "checkResidualProcess"), Extract = c("intensity.twinstim", "simpleR0"), Modify = c("stepComponent"), Other = c("epitest"))), caption="Generic and \\textit{non-generic} functions applicable to \\class{twinstim} objects. Note that there is no need for specific \\code{coef}, \\code{confint}, \\code{AIC} or \\code{BIC} methods, since the respective default methods from package \\pkg{stats} apply outright.", label="tab:methods:twinstim" ), include.rownames = FALSE) @ We now update the endemic model to take additional spatio-temporal dependence between events into account. Infectivity shall depend on the meningococcal finetype and the age group of the patient, and is assumed to be constant over time (default), $g(t)=\ind_{(0,30]}(t)$, with a Gaussian distance-decay $f(x) = \exp\left\{-x^2/(2 \sigma^2)\right\}$. This model was originally selected by \citet{meyer.etal2011} and can be fitted as follows: <>= imdfit_Gaussian <- update(imdfit_endemic, epidemic = ~type + agegrp, siaf = siaf.gaussian(), cores = 2 * (.Platform$OS.type == "unix")) @ On Unix-alikes, the numerical integrations of $f(\norm{\bm{s}})$ in the log-likelihood and $\frac{\partial f(\norm{\bm{s}})}{\partial \log\sigma}$ in the score function (note that $\sigma$ is estimated on the log-scale) can be performed in parallel via %the ``multicore'' functions \code{mclapply} \textit{et al.}\ from the base package \pkg{parallel}, here with \code{cores = 2} processes. Table~\ref{tab:imdfit_Gaussian} shows the output of \code{twinstim}'s \code{xtable} method \citep{R:xtable} applied to the above model fit, providing a table of estimated rate ratios for the endemic and epidemic effects. The alternative \code{toLatex} method simply translates the \code{summary} table of coefficients to \LaTeX\ without \code{exp}-transformation. On the subject-matter level, we can conclude from Table~\ref{tab:imdfit_Gaussian} that the meningococcal finetype of serogroup~C is less than half as infectious as the B-type, and that patients in the age group 3 to 18 years are estimated to cause twice as many secondary infections as infants aged 0 to 2 years. <>= print(xtable(imdfit_Gaussian, caption="Estimated rate ratios (RR) and associated Wald confidence intervals (CI) for endemic (\\code{h.}) and epidemic (\\code{e.}) terms. This table was generated by \\code{xtable(imdfit\\_Gaussian)}.", label="tab:imdfit_Gaussian"), sanitize.text.function=NULL, sanitize.colnames.function=NULL, sanitize.rownames.function=function(x) paste0("\\code{", x, "}")) @ \subsection{Model-based effective reproduction numbers} The event-specific reproduction numbers~\eqref{eqn:R0:twinstim} can be extracted from fitted \class{twinstim} objects via the \code{R0} method. For the above IMD model, we obtain the following mean numbers of secondary infections by finetype: <<>>= R0_events <- R0(imdfit_Gaussian) tapply(R0_events, marks(imdepi_untied)[names(R0_events), "type"], mean) @ Confidence intervals %for the estimated reproduction numbers $\hat\mu_j$ can be obtained via Monte Carlo simulation, where Equation~\ref{eqn:R0:twinstim} is repeatedly evaluated with parameters sampled from the asymptotic multivariate normal distribution of the maximum likelihood estimate. For this purpose, the \code{R0}-method takes an argument \code{newcoef}, which is exemplified in \code{help("R0")}. %% Note that except for (piecewise) constant $f$, computing confidence intervals for %% $\hat\mu_j$ takes a considerable amount of time since the integrals over the %% polygons $\bm{R}_j$ have to be solved numerically for each new set of parameters. \subsection{Interaction functions} <>= imdfit_exponential <- update(imdfit_Gaussian, siaf = siaf.exponential()) @ <>= imdfit_powerlaw <- update(imdfit_Gaussian, siaf = siaf.powerlaw(), data = imdepi_untied_infeps, start = c("e.(Intercept)" = -6.2, "e.siaf.1" = 1.5, "e.siaf.2" = 0.9)) @ <>= imdfit_step4 <- update(imdfit_Gaussian, siaf = siaf.step(exp(1:4 * log(100) / 5), maxRange = 100)) @ <>= save(imdfit_Gaussian, imdfit_exponential, imdfit_powerlaw, imdfit_step4, file = "twinstim-cache.RData", compress = "xz") @ Figure~\ref{fig:imdfit_siafs} shows several estimated spatial interaction functions, which can be plotted by, e.g., \code{plot(imdfit_Gaussian, "siaf")}. <>= par(mar = c(5,5,1,1)) set.seed(2) # Monte-Carlo confidence intervals plot(imdfit_Gaussian, "siaf", xlim=c(0,42), ylim=c(0,5e-5), lty=c(1,3), xlab = expression("Distance " * x * " from host [km]")) plot(imdfit_exponential, "siaf", add=TRUE, col.estimate=5, lty = c(5,3)) plot(imdfit_powerlaw, "siaf", add=TRUE, col.estimate=4, lty=c(2,3)) plot(imdfit_step4, "siaf", add=TRUE, col.estimate=3, lty=c(4,3)) legend("topright", legend=c("Power law", "Exponential", "Gaussian", "Step (df=4)"), col=c(4,5,2,3), lty=c(2,5,1,4), lwd=3, bty="n") @ The estimated standard deviation $\hat\sigma$ of the Gaussian kernel is: <<>>= exp(cbind("Estimate" = coef(imdfit_Gaussian)["e.siaf.1"], confint(imdfit_Gaussian, parm = "e.siaf.1"))) @ \citet{meyer.held2013} found that a power-law decay of spatial interaction more appropriately describes the spread of human infectious diseases. A power-law kernel concentrates on short-range interaction, but also exhibits a heavier tail reflecting occasional transmission over large distances. %This result is supported by the power-law distribution of short-time human %travel \citep{brockmann.etal2006}, which is an important driver of epidemic spread. To estimate the power law $f(x) = (x+\sigma)^{-d}$, we use the prepared \code{eps.s = Inf} version of the \class{epidataCS} object, and update the model as follows: <>= <> @ To reduce the runtime of this example, we specified convenient \code{start} values for some parameters. The estimated parameters $(\hat\sigma, \hat d)$ are: <<>>= exp(cbind("Estimate" = coef(imdfit_powerlaw)[c("e.siaf.1", "e.siaf.2")], confint(imdfit_powerlaw, parm = c("e.siaf.1", "e.siaf.2")))) @ Sometimes $\sigma$ is difficult to estimate, and also in this example, its confidence interval is relatively large. The one-parameter version \code{siaf.powerlaw1} can be used to estimate a power-law decay with fixed $\sigma = 1$. A more common option is the exponential kernel $f(x) = \exp(-x/\sigma)$: <>= <> @ Table~\ref{tab:iafs} also lists the step function kernel as an alternative, which is particularly useful for two reasons. First, it is a more flexible approach since it estimates interaction between the given knots without assuming an overall functional form. Second, the spatial integrals in the log-likelihood can be computed analytically for the step function kernel, which therefore offers a quick estimate of spatial interaction. We update the Gaussian model to use four steps at log-equidistant knots up to an interaction range of 100 km: <>= <> @ Figure~\ref{fig:imdfit_siafs} suggests that the estimated step function is in line with the power law. Note that suitable knots for the step function could also be derived from quantiles of the observed distances between events and their potential source events, e.g.: <<>>= quantile(getSourceDists(imdepi_untied_infeps, "space"), c(1,2,4,8)/100) @ For the temporal interaction function $g(t)$, model updates and plots are similarly possible, e.g., using \code{update(imdfit_Gaussian, tiaf = tiaf.exponential())}. However, the events in the IMD data are too rare to infer the time-course of infectivity with confidence. <>= local({ nSources <- sapply(levels(imdepi$events$type), function (.type) { mean(summary(subset(imdepi_untied_infeps, type==.type))$nSources) }) structure( paste("Specifically, there are only", paste0(round(nSources,1), " (", names(nSources), ")", collapse=" and "), "cases on average within the preceding 30 days", "(potential sources of infection)."), class="Latex") }) @ \subsection{Model selection} <>= AIC(imdfit_endemic, imdfit_Gaussian, imdfit_exponential, imdfit_powerlaw, imdfit_step4) @ Akaike's Information Criterion (AIC) suggests superiority of the power-law vs.\ the exponential, Gaussian, and endemic-only models. The more flexible step function yields the best AIC value, but its shape strongly depends on the chosen knots and is not guaranteed to be monotonically decreasing. The function \code{stepComponent} -- a wrapper around the \code{step} function from \pkg{stats} -- can be used to perform AIC-based stepwise selection within a given model component. <>= ## Example of AIC-based stepwise selection of the endemic model imdfit_endemic_sel <- stepComponent(imdfit_endemic, component = "endemic") ## -> none of the endemic predictors is removed from the model @ \subsection{Model diagnostics} The element \code{"fittedComponents"} of a \class{twinstim} object contains the endemic and epidemic values of the estimated intensity at each event occurrence. However, plots of the conditional intensity (and its components) as a function of location or time provide more insight into the fitted process. Evaluation of \code{intensity.twinstim} requires the model environment to be stored with the fit. By default, \code{model = FALSE} in \code{twinstim}, but if the data are still available, the model environment can also be added afterwards using the convenient \code{update} method: <>= imdfit_powerlaw <- update(imdfit_powerlaw, model = TRUE) @ Figure~\ref{fig:imdfit_powerlaw_intensityplot_time} shows an \code{intensityplot} of the fitted ``ground'' intensity $\sum_{k=1}^2 \int_{\bm{W}} \hat\lambda(\bm{s},t,k) \dif \bm{s}$: %aggregated over both event types: <>= intensityplot(imdfit_powerlaw, which = "total", aggregate = "time", types = 1:2) @ <>= par(mar = c(5,5,1,1), las = 1) intensity_endprop <- intensityplot(imdfit_powerlaw, aggregate="time", which="endemic proportion", plot=FALSE) intensity_total <- intensityplot(imdfit_powerlaw, aggregate="time", which="total", tgrid=501, lwd=2, xlab="Time [days]", ylab="Intensity") curve(intensity_endprop(x) * intensity_total(x), add=TRUE, col=2, lwd=2, n=501) #curve(intensity_endprop(x), add=TRUE, col=2, lty=2, n=501) text(2500, 0.36, labels="total", col=1, pos=2, font=2) text(2500, 0.08, labels="endemic", col=2, pos=2, font=2) @ %% Note that this represents a realization of a stochastic process, since it %% depends on the occurred events. The estimated endemic intensity component has also been added to the plot. It exhibits strong seasonality and a slow negative trend. The proportion of the endemic intensity is rather constant along time since no major outbreaks occurred. This proportion can be visualized separately by specifying \code{which = "endemic proportion"} in the above call. <>= meanepiprop <- integrate(intensityplot(imdfit_powerlaw, which="epidemic proportion"), 50, 2450, subdivisions=2000, rel.tol=1e-3)$value / 2400 @ Spatial \code{intensityplot}s as in Figure~\ref{fig:imdfit_powerlaw_intensityplot_space} can be produced via \code{aggregate = "space"} and require a geographic representation of \code{stgrid}. The epidemic proportion is naturally high around clusters of cases and even more so if the population density is low. %% The function \code{epitest} offers a model-based global test for epidemicity, %% while \code{knox} and \code{stKtest} implement related classical approaches %% \citep{meyer.etal2015}. <>= for (.type in 1:2) { print(intensityplot(imdfit_powerlaw, aggregate="space", which="epidemic proportion", types=.type, tiles=districtsD, sgrid=1000, col.regions = grey(seq(1,0,length.out=10)), at = seq(0,1,by=0.1))) grid::grid.text("Epidemic proportion", x=1, rot=90, vjust=-1) } @ Another diagnostic tool is the function \code{checkResidualProcess} (Figure~\ref{fig:imdfit_checkResidualProcess}), which transforms the temporal ``residual process'' in such a way that it exhibits a uniform distribution and lacks serial correlation if the fitted model describes the true CIF well \citep[see][Section~3.3]{ogata1988}. % more recent work: \citet{clements.etal2011} <>= par(mar = c(5, 5, 1, 1)) checkResidualProcess(imdfit_powerlaw) @ \section{Simulation} \label{sec:twinstim:simulation} %% Simulations from the fitted model are also useful to investigate the %% goodness of fit. To identify regions with unexpected IMD dynamics, \citet{meyer.etal2011} compared the observed numbers of cases by district to the respective 2.5\% and 97.5\% quantiles of 100 simulations from the selected model. Furthermore, simulations allow us to investigate the stochastic volatility of the endemic-epidemic process, to obtain probabilistic forecasts, and to perform parametric bootstrap of the spatio-temporal point pattern. The simulation algorithm we apply is described in \citet[Section 4]{meyer.etal2011}. It requires a geographic representation of the \code{stgrid}, as well as functionality for sampling locations from the spatial kernel $f_2(\bm{s}) := f(\norm{\bm{s}})$. This is implemented for all predefined spatial interaction functions listed in Table~\ref{tab:iafs}. %For instance for the %power-law kernel, we pass via polar coordinates (with density then proportional %to $rf(r)$) %, a function also involved in the efficient cubature of % %$f_2(\bm{s})$ via Green's theorem) %and the inverse transformation method with numerical root finding for the %quantiles. Event marks are by default sampled from their respective empirical distribution in the original data. %but a customized generator can be supplied as argument \code{rmarks}. The following code runs \emph{a single} simulation over the last year based on the estimated power-law model: <>= imdsim <- simulate(imdfit_powerlaw, nsim = 1, seed = 1, t0 = 2191, T = 2555, data = imdepi_untied_infeps, tiles = districtsD) @ This yields an object of the class \class{simEpidataCS}, which extends \class{epidataCS}. It carries additional components from the generating model to enable an \code{R0}-method and \code{intensityplot}s for simulated data. %All methods for \class{epidataCS} are applicable. %% The result is simplified in that only the \code{events} instead of a full %% \class{epidataCS} object are retained from every run to save memory and %% computation time. All other components, which do not vary between simulations, %% e.g., the \code{stgrid}, are only stored from the first run. %% There is a \code{[[}-method for such \class{simEpidataCSlist}s in order to %% extract single simulations as full \class{simEpidataCS} objects from the %% simplified structure. %Extracting a single simulation (e.g., \code{imdsims[[1]]}) Figure~\ref{fig:imdsim_plot} shows the cumulative number of cases from the simulation appended to the first six years of data. <>= .t0 <- imdsim$timeRange[1] .cumoffset <- c(table(subset(imdepi, time < .t0)$events$type)) par(mar = c(5,5,1,1), las = 1) plot(imdepi, ylim = c(0, 20), col = c("indianred", "darkblue"), subset = time < .t0, cumulative = list(maxat = 336), xlab = "Time [days]") plot(imdsim, add = TRUE, legend.types = FALSE, col = adjustcolor(c("indianred", "darkblue"), alpha.f = 0.5), subset = !is.na(source), # exclude events of the prehistory cumulative = list(offset = .cumoffset, maxat = 336, axis = FALSE), border = NA, density = 0) # no histogram for simulations plot(imdepi, add = TRUE, legend.types = FALSE, col = 1, subset = time >= .t0, cumulative = list(offset = .cumoffset, maxat = 336, axis = FALSE), border = NA, density = 0) # no histogram for the last year's data abline(v = .t0, lty = 2, lwd = 2) @ %% Because we have started simulation at time \code{t0 = 0}, %% no events from \code{data} have been used as the prehistory, i.e., %% the first simulated event is necessarily driven by the endemic model component. A special feature of such simulated epidemics is that the source of each event is known: <>= table(imdsim$events$source > 0, exclude = NULL) @ The stored \code{source} value is 0 for endemic events, \code{NA} for events of the prehistory but still infective at \code{t0}, and otherwise corresponds to the row index of the infective source. %% Averaged over all 30 simulations, the proportion of events triggered by %% previous events is %% Sexpr{mean(sapply(imdsims$eventsList, function(x) mean(x$source > 0, na.rm = TRUE)))}. %-------------- % BIBLIOGRAPHY %-------------- <>= ## create automatic references for R packages .Rbibfile <- file("twinstim-R.bib", "w", encoding = "latin1") Rbib <- knitr::write_bib( c("memoise", "sp", "polyclip", "maptools", "animation", "xtable"), file = NULL, tweak = FALSE, prefix = "R:") ## write_bib() to file does enc2utf8() -> fails for ISO8859-15 session charset writeLines(unlist(Rbib, use.names = FALSE), .Rbibfile) close(.Rbibfile) @ \bibliography{references,twinstim-R} \end{document} surveillance/inst/doc/twinSIR.R0000644000176200001440000000741314030612467016162 0ustar liggesusers## ----include = FALSE--------------------------------------------------------------- ## load the "cool" package library("surveillance") ## Compute everything or fetch cached results? message("Doing computations: ", COMPUTE <- !file.exists("twinSIR-cache.RData")) if (!COMPUTE) load("twinSIR-cache.RData", verbose = TRUE) ## ----hagelloch.df------------------------------------------------------------------ data("hagelloch") head(hagelloch.df, n = 5) ## ----hagelloch--------------------------------------------------------------------- hagelloch <- as.epidata(hagelloch.df, t0 = 0, tI.col = "tI", tR.col = "tR", id.col = "PN", coords.cols = c("x.loc", "y.loc"), f = list(household = function(u) u == 0, nothousehold = function(u) u > 0), w = list(c1 = function (CL.i, CL.j) CL.i == "1st class" & CL.j == CL.i, c2 = function (CL.i, CL.j) CL.i == "2nd class" & CL.j == CL.i), keep.cols = c("SEX", "AGE", "CL")) ## ----hagelloch_show, warning=FALSE------------------------------------------------- head(hagelloch, n = 5) ## ----hagelloch_plot, echo=2, fig.cap="Evolution of the 1861 Hagelloch measles epidemic in terms of the numbers of susceptible, infectious, and recovered children. The bottom \\code{rug} marks the infection times \\code{tI}.", fig.pos="!h"---- par(mar = c(5, 5, 1, 1)) plot(hagelloch, xlab = "Time [days]") ## ----hagelloch_households, fig.cap="Spatial locations of the Hagelloch households. The size of each dot is proportional to the number of children in the household.", fig.pos="ht", echo=-1---- par(mar = c(5, 5, 1, 1)) hagelloch_coords <- summary(hagelloch)$coordinates plot(hagelloch_coords, xlab = "x [m]", ylab = "y [m]", pch = 15, asp = 1, cex = sqrt(multiplicity(hagelloch_coords))) legend(x = "topleft", pch = 15, legend = c(1, 4, 8), pt.cex = sqrt(c(1, 4, 8)), title = "Household size") ## ----hagellochFit, results='hide'-------------------------------------------------- hagellochFit <- twinSIR(~household + c1 + c2 + nothousehold, data = hagelloch) ## ----hagellochFit_summary_echo, eval=FALSE----------------------------------------- # set.seed(1) # summary(hagellochFit) ## ----hagellochFit_confint---------------------------------------------------------- exp(confint(hagellochFit, parm = "cox(logbaseline)")) ## ----hagellochFit_profile, results='hide', eval=COMPUTE---------------------------- # prof <- profile(hagellochFit, # list(c(match("c1", names(coef(hagellochFit))), NA, NA, 25), # c(match("c2", names(coef(hagellochFit))), NA, NA, 25))) ## ---------------------------------------------------------------------------------- prof$ci.hl ## ----hagellochFit_profile_plot, fig.cap="Normalized log-likelihood for $\\alpha_{c1}$ and $\\alpha_{c2}$ when fitting the \\code{twinSIR} model formulated in Equation~\\eqref{eqn:twinSIR:hagelloch} to the Hagelloch data.", fig.pos="ht", fig.height=4.4---- plot(prof) ## ----hagellochFit_plot, echo=2, fig.width=4.5, fig.height=4.5, out.width="0.49\\linewidth", fig.subcap=c("Epidemic proportion.","Transformed residuals."), fig.cap="Diagnostic plots for the \\code{twinSIR} model formulated in Equation~\\ref{eqn:twinSIR:hagelloch}.", fig.pos="htb"---- par(mar = c(5, 5, 1, 1)) plot(hagellochFit, which = "epidemic proportion", xlab = "time [days]") checkResidualProcess(hagellochFit, plot = 1) ## ----hagellochFit_fstep, results='hide'-------------------------------------------- knots <- c(100, 200) fstep <- list( B1 = function(D) D > 0 & D < knots[1], B2 = function(D) D >= knots[1] & D < knots[2], B3 = function(D) D >= knots[2]) hagellochFit_fstep <- twinSIR( ~household + c1 + c2 + B1 + B2 + B3, data = update(hagelloch, f = fstep)) ## ----hagellochFit_AIC-------------------------------------------------------------- set.seed(1) AIC(hagellochFit, hagellochFit_fstep) surveillance/inst/doc/hhh4.Rnw0000644000176200001440000010241013627516414016022 0ustar liggesusers%\VignetteIndexEntry{hhh4: An endemic-epidemic modelling framework for infectious disease counts} %\VignetteDepends{surveillance, Matrix} \documentclass[a4paper,11pt]{article} \usepackage[T1]{fontenc} \usepackage[english]{babel} \usepackage{graphicx} \usepackage{color} \usepackage{natbib} \usepackage{lmodern} \usepackage{bm} \usepackage{amsmath} \usepackage{amsfonts,amssymb} \setlength{\parindent}{0pt} \setcounter{secnumdepth}{1} \newcommand{\Po}{\operatorname{Po}} \newcommand{\NegBin}{\operatorname{NegBin}} \newcommand{\N}{\mathcal{N}} \newcommand{\pkg}[1]{{\fontseries{b}\selectfont #1}} \newcommand{\surveillance}{\pkg{surveillance}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\hhh}{\texttt{hhh4}} \newcommand{\R}{\textsf{R}} \newcommand{\sts}{\texttt{sts}} \newcommand{\example}[1]{\subsubsection*{Example: #1}} %%% Meta data \usepackage{hyperref} \hypersetup{ pdfauthor = {Michaela Paul and Sebastian Meyer}, pdftitle = {'hhh4': An endemic-epidemic modelling framework for infectious disease counts}, pdfsubject = {R package 'surveillance'} } \newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}} \title{\code{hhh4}: An endemic-epidemic modelling framework for infectious disease counts} \author{ Michaela Paul and Sebastian Meyer\thanks{Author of correspondence: \email{seb.meyer@fau.de} (new affiliation)}\\ Epidemiology, Biostatistics and Prevention Institute\\ University of Zurich, Zurich, Switzerland } \date{8 February 2016} %%% Sweave \usepackage{Sweave} \SweaveOpts{prefix.string=plots/hhh4, keep.source=T, strip.white=true} \definecolor{Sinput}{rgb}{0,0,0.56} \DefineVerbatimEnvironment{Sinput}{Verbatim}{formatcom={\color{Sinput}},fontshape=sl,fontsize=\footnotesize} \DefineVerbatimEnvironment{Soutput}{Verbatim}{fontshape=sl,fontsize=\footnotesize} %%% Initial R code <>= library("surveillance") options(width=75) ## create directory for plots dir.create("plots", showWarnings=FALSE) ###################################################### ## Do we need to compute or can we just fetch results? ###################################################### compute <- !file.exists("hhh4-cache.RData") message("Doing computations: ", compute) if(!compute) load("hhh4-cache.RData") @ \begin{document} \maketitle \begin{abstract} \noindent The \R\ package \surveillance\ provides tools for the visualization, modelling and monitoring of epidemic phenomena. This vignette is concerned with the \hhh\ modelling framework for univariate and multivariate time series of infectious disease counts proposed by \citet{held-etal-2005}, and further extended by \citet{paul-etal-2008}, \citet{paul-held-2011}, \citet{held.paul2012}, and \citet{meyer.held2013}. The implementation is illustrated using several built-in surveillance data sets. The special case of \emph{spatio-temporal} \hhh\ models is also covered in \citet[Section~5]{meyer.etal2014}, which is available as the extra \verb+vignette("hhh4_spacetime")+. \end{abstract} \section{Introduction}\label{sec:intro} To meet the threats of infectious diseases, many countries have established surveillance systems for the reporting of various infectious diseases. The systematic and standardized reporting at a national and regional level aims to recognize all outbreaks quickly, even when aberrant cases are dispersed in space. Traditionally, notification data, i.e.\ counts of cases confirmed according to a specific definition and reported daily, weekly or monthly on a regional or national level, are used for surveillance purposes. The \R-package \surveillance\ provides functionality for the retrospective modelling and prospective aberration detection in the resulting surveillance time series. Overviews of the outbreak detection functionality of \surveillance\ are given by \citet{hoehle-mazick-2010} and \citet{salmon.etal2014}. This document illustrates the functionality of the function \hhh\ for the modelling of univariate and multivariate time series of infectious disease counts. It is part of the \surveillance\ package as of version 1.3. The remainder of this vignette unfolds as follows: Section~\ref{sec:data} introduces the S4 class data structure used to store surveillance time series data within the package. Access and visualization methods are outlined by means of built-in data sets. In Section~\ref{sec:model}, the statistical modelling approach by \citet{held-etal-2005} and further model extensions are described. After the general function call and arguments are shown, the detailed usage of \hhh\ is demonstrated in Section~\ref{sec:hhh} using data introduced in Section~\ref{sec:data}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Surveillance data}\label{sec:data} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Denote by $\{y_{it}; i=1,\ldots,I,t=1,\ldots,T\}$ the multivariate time series of disease counts for a specific partition of gender, age and location. Here, $T$ denotes the length of the time series and $I$ denotes the number of units (e.g\ geographical regions or age groups) being monitored. Such data are represented using objects of the S4 class \sts\ (surveillance time series). \subsection[The sts data class]{The \sts\ data class} The \sts\ class contains the $T\times I$ matrix of counts $y_{it}$ in a slot \code{observed}. An integer slot \code{epoch} denotes the time index $1\leq t \leq T$ of each row in \code{observed}. The number of observations per year, e.g.\ 52 for weekly or 12 for monthly data, is denoted by \code{freq}. Furthermore, \code{start} denotes a vector of length two containing the start of the time series as \code{c(year, epoch)}. For spatially stratified time series, the slot \code{neighbourhood} denotes an $I \times I$ adjacency matrix with elements 1 if two regions are neighbors and 0 otherwise. For map visualizations, the slot \code{map} links the multivariate time series to geographical regions stored in a \code{"SpatialPolygons"} object (package \pkg{sp}). Additionally, the slot \code{populationFrac} contains a $T\times I$ matrix representing population fractions in unit $i$ at time $t$. The \sts\ data class is also described in \citet[Section~2.1]{hoehle-mazick-2010}, \citet[Section~1.1]{salmon.etal2014}, \citet[Section~5.2]{meyer.etal2014}, and on the associated help page \code{help("sts")}. \subsection{Some example data sets} The package \surveillance\ contains a number of time series in the \code{data} directory. Most data sets originate from the SurvStat@RKI database\footnote{\url{https://survstat.rki.de}}, maintained by the Robert Koch Institute (RKI) in Germany. Selected data sets will be analyzed in Section~\ref{sec:hhh} and are introduced in the following. Note that many of the built-in datasets are stored in the S3 class data structure \mbox{\code{disProg}} used in ancient versions of the \surveillance\ package (until 2006). They can be easily converted into the new S4 \sts\ data structure using the function \code{disProg2sts}. The resulting \sts\ object can be accessed similar as standard \code{matrix} objects and allows easy temporal and spatial aggregation as will be shown in the remainder of this section. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \example{Influenza and meningococcal disease, Germany, 2001--2006} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As a first example, the weekly number of influenza and meningococcal disease cases in Germany is considered. <>= # load data data("influMen") # convert to sts class and print basic information about the time series print(fluMen <- disProg2sts(influMen)) @ The univariate time series of meningococcal disease counts can be obtained with <>= meningo <- fluMen[, "meningococcus"] dim(meningo) @ The \code{plot} function provides ways to visualize the multivariate time series in time, space and space-time, as controlled by the \code{type} argument: \setkeys{Gin}{width=1\textwidth} <>= plot(fluMen, type = observed ~ time | unit, # type of plot (default) same.scale = FALSE, # unit-specific ylim? col = "grey") # color of bars @ See \code{help("stsplot")} for a detailed description of the plot routines. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \example{Influenza, Southern Germany, 2001--2008} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The spatio-temporal spread of influenza in the 140 Kreise (districts) of Bavaria and Baden-W\"urttemberg is analyzed using the weekly number of cases reported to the RKI~\citep{survstat-fluByBw} in the years 2001--2008. An \sts\ object containing the data is created as follows: <>= # read in observed number of cases flu.counts <- as.matrix(read.table(system.file("extdata/counts_flu_BYBW.txt", package = "surveillance"), check.names = FALSE)) @ \begin{center} \setkeys{Gin}{width=.5\textwidth} <>= # read in 0/1 adjacency matrix (1 if regions share a common border) nhood <- as.matrix(read.table(system.file("extdata/neighbourhood_BYBW.txt", package = "surveillance"), check.names = FALSE)) library("Matrix") print(image(Matrix(nhood))) @ \end{center} <>= # read in population fractions popfracs <- read.table(system.file("extdata/population_2001-12-31_BYBW.txt", package = "surveillance"), header = TRUE)$popFrac # create sts object flu <- sts(flu.counts, start = c(2001, 1), frequency = 52, population = popfracs, neighbourhood = nhood) @ These data are already included as \code{data("fluBYBW")} in \surveillance. In addition to the \sts\ object created above, \code{fluBYBW} contains a map of the administrative districts of Bavaria and Baden-W\"urttemberg. This works by specifying a \code{"SpatialPolygons"} representation of the districts as an extra argument \code{map} in the above \sts\ call. Such a \code{"SpatialPolygons"} object can be obtained from, e.g, an external shapefile using the function \mbox{\code{readOGR}} from package \pkg{rgdal}. A map enables plots and animations of the cumulative number of cases by region. For instance, a disease incidence map of the year 2001 can be obtained as follows: \setkeys{Gin}{width=.5\textwidth} \begin{center} <>= data("fluBYBW") plot(fluBYBW[year(fluBYBW) == 2001, ], # select year 2001 type = observed ~ unit, # total counts by region population = fluBYBW@map$X31_12_01 / 100000) # per 100000 inhabitants grid::grid.text("Incidence [per 100'000 inhabitants]", x = 0.5, y = 0.02) @ \end{center} <>= # consistency check local({ fluBYBW@map <- flu@map stopifnot(all.equal(fluBYBW, flu)) }) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \example{Measles, Germany, 2005--2007} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The following data set contains the weekly number of measles cases in the 16 German federal states, in the years 2005--2007. These data have been analyzed by \citet{herzog-etal-2010} after aggregation into bi-weekly periods. <>= data("measlesDE") measles2w <- aggregate(measlesDE, nfreq = 26) @ \setkeys{Gin}{width=.75\textwidth} \begin{center} <>= plot(measles2w, type = observed ~ time, # aggregate counts over all units main = "Bi-weekly number of measles cases in Germany") @ \end{center} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Model formulation}\label{sec:model} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Retrospective surveillance aims to identify outbreaks and (spatio-)temporal patterns through statistical modelling. Motivated by a branching process with immigration, \citet{held-etal-2005} suggest the following model for the analysis of univariate time series of infectious disease counts $\{y_{t}; t=1,\ldots,T\}$. The counts are assumed to be Poisson distributed with conditional mean \begin{align*} \mu_{t} = \lambda y_{t-1}+ \nu_{t}, \quad(\lambda,\nu_{t}>0) \end{align*} where $\lambda$ and $\nu_t$ are unknown quantities. The mean incidence is decomposed additively into two components: an epidemic or \emph{autoregressive} component $\lambda y_{t-1}$, and an \emph{endemic} component $\nu_t$. The former should be able to capture occasional outbreaks whereas the latter explains a baseline rate of cases with stable temporal pattern. \citet{held-etal-2005} suggest the following parametric model for the endemic component: \begin{align}\label{eq:nu_t} \log(\nu_t) =\alpha + \beta t + \left\{\sum_{s=1}^S \gamma_s \sin(\omega_s t) + \delta_s \cos(\omega_s t)\right\}, \end{align} where $\alpha$ is an intercept, $\beta$ is a trend parameter, and the terms in curly brackets are used to model seasonal variation. Here, $\gamma_s$ and $\delta_s$ are unknown parameters, $S$ denotes the number of harmonics to include, and $\omega_s=2\pi s/$\code{freq} are Fourier frequencies (e.g.\ \code{freq = 52} for weekly data). For ease of interpretation, the seasonal terms in \eqref{eq:nu_t} can be written equivalently as \begin{align*} \gamma_s \sin(\omega_s t) + \delta_s \cos(\omega_s t)= A_s \sin(\omega_s t +\varphi_s) \end{align*} with amplitude $A_s=\sqrt{\gamma_s^2+\delta_s^2}$ describing the magnitude, and phase difference $\tan(\varphi_s)=\delta_s/\gamma_s$ describing the onset of the sine wave. To account for overdispersion, the Poisson model may be replaced by a negative binomial model. Then, the conditional mean $\mu_t$ remains the same but the conditional variance increases to $\mu_t (1+\mu_t \psi)$ with additional unknown overdispersion parameter $\psi>0$. The model is extended to multivariate time series $\{y_{it}\}$ in \citet{held-etal-2005} and \citet{paul-etal-2008} by including an additional \emph{neighbor-driven} component, where past cases in other (neighboring) units also enter as explanatory covariates. The conditional mean $\mu_{it}$ is then given by \begin{align} \label{eq:mu_it} \mu_{it} = \lambda y_{i,t-1} + \phi \sum_{j\neq i} w_{ji} y_{j,t-1} +e_{it} \nu_{t}, \end{align} where the unknown parameter $\phi$ quantifies the influence of other units $j$ on unit $i$, $w_{ji}$ are weights reflecting between-unit transmission and $e_{it}$ corresponds to an offset (such as population fractions at time $t$ in region $i$). A simple choice for the weights is $w_{ji}=1$ if units $j$ and $i$ are adjacent and 0 otherwise. See \citet{paul-etal-2008} for a discussion of alternative weights, and \citet{meyer.held2013} for how to estimate these weights in the spatial setting using a parametric power-law formulation based on the order of adjacency. When analyzing a specific disease observed in, say, multiple regions or several pathogens (such as influenza and meningococcal disease), the assumption of equal incidence levels or disease transmission across units is questionable. To address such heterogeneity, the unknown quantities $\lambda$, $\phi$, and $\nu_t$ in \eqref{eq:mu_it} may also depend on unit $i$. This can be done via \begin{itemize} \item unit-specific fixed parameters, e.g.\ $\log(\lambda_i)=\alpha_i$ \citep{paul-etal-2008}; \item unit-specific random effects, e.g\ $\log(\lambda_i)=\alpha_0 +a_i$, $a_i \stackrel{\text{iid}}{\sim} \N(0,\sigma^2_\lambda)$ \citep{paul-held-2011}; \item linking parameters with known (possibly time-varying) explanatory variables, e.g.\ $\log(\lambda_i)=\alpha_0 +x_i\alpha_1$ with region-specific vaccination coverage $x_i$ \citep{herzog-etal-2010}. \end{itemize} In general, the parameters of all three model components may depend on both time and unit. A call to \hhh\ fits a Poisson or negative binomial model with conditional mean \begin{align*} \mu_{it} = \lambda_{it} y_{i,t-1} + \phi_{it} \sum_{j\neq i} w_{ji} y_{j,t-1} +e_{it} \nu_{it} \end{align*} to a (multivariate) time series of counts. Here, the three unknown quantities are modelled as log-linear predictors \begin{align} \log(\lambda_{it}) &= \alpha_0 + a_i +\bm{u}_{it}^\top \bm{\alpha} \tag{\code{ar}}\\ \log(\phi_{it}) &= \beta_0 + b_i +\bm{x}_{it}^\top \bm{\beta} \tag{\code{ne}}\\ \log(\nu_{it}) &= \gamma_0 + c_i +\bm{z}_{it}^\top \bm{\gamma}\tag{\code{end}} \end{align} where $\alpha_0,\beta_0,\gamma_0$ are intercepts, $\bm{\alpha},\bm{\beta},\bm{\gamma}$ are vectors of unknown parameters corresponding to covariate vectors $\bm{u}_{it},\bm{x}_{it},\bm{z}_{it}$, and $a_i,b_i,c_i$ are random effects. For instance, model~\eqref{eq:nu_t} with $S=1$ seasonal terms may be represented as $\bm{z}_{it}=(t,\sin(2\pi/\code{freq}\;t),\cos(2\pi/\code{freq}\;t))^\top$. The stacked vector of all random effects is assumed to follow a normal distribution with mean $\bm{0}$ and covariance matrix $\bm{\Sigma}$. In applications, each of the components \code{ar}, \code{ne}, and \code{end} may be omitted in parts or as a whole. If the model does not contain random effects, standard likelihood inference can be performed. Otherwise, inference is based on penalized quasi-likelihood as described in detail in \citet{paul-held-2011}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Function call and control settings}\label{sec:hhh} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The estimation procedure is called with <>= hhh4(sts, control) @ where \code{sts} denotes a (multivariate) surveillance time series and the model is specified in the argument \code{control} in consistency with other algorithms in \surveillance. The \code{control} setting is a list of the following arguments (here with default values): <>= control = list( ar = list(f = ~ -1, # formula for log(lambda_it) offset = 1), # optional multiplicative offset ne = list(f = ~ -1, # formula for log(phi_it) offset = 1, # optional multiplicative offset weights = neighbourhood(stsObj) == 1), # (w_ji) matrix end = list(f = ~ 1, # formula for log(nu_it) offset = 1), # optional multiplicative offset e_it family = "Poisson", # Poisson or NegBin model subset = 2:nrow(stsObj), # subset of observations to be used optimizer = list(stop = list(tol = 1e-5, niter = 100), # stop rules regression = list(method = "nlminb"), # for penLogLik variance = list(method = "nlminb")), # for marLogLik verbose = FALSE, # level of progress reporting start = list(fixed = NULL, # list with initial values for fixed, random = NULL, # random, and sd.corr = NULL), # variance parameters data = list(t = epoch(stsObj)-1),# named list of covariates keep.terms = FALSE # whether to keep the model terms ) @ The first three arguments \code{ar}, \code{ne}, and \code{end} specify the model components using \code{formula} objects. By default, the counts $y_{it}$ are assumed to be Poisson distributed, but a negative binomial model can be chosen by setting \mbox{\code{family = "NegBin1"}}. By default, both the penalized and marginal log-likelihoods are maximized using the quasi-Newton algorithm available via the \R\ function \code{nlminb}. The methods from \code{optim} may also be used, e.g., \mbox{\code{optimizer = list(variance = list(method="Nelder-Mead")}} is a useful alternative for maximization of the marginal log-likelihood with respect to the variance parameters. Initial values for the fixed, random, and variance parameters can be specified in the \code{start} argument. If the model contains covariates, these have to be provided in the \code{data} argument. If a covariate does not vary across units, it may be given as a vector of length $T$. Otherwise, covariate values must be given in a matrix of size $T \times I$. In the following, the functionality of \hhh\ is demonstrated using the data sets introduced in Section~\ref{sec:data} and previously analyzed in \citet{paul-etal-2008}, \citet{paul-held-2011} and \citet{herzog-etal-2010}. Selected results are reproduced. For a thorough discussion we refer to these papers. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Univariate modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As a first example, consider the univariate time series of meningococcal infections in Germany, 01/2001--52/2006 \citep[cf.][Table~1]{paul-etal-2008}. A Poisson model without autoregression and $S=1$ seasonal term is specified as follows: <>= # specify a formula object for the endemic component ( f_S1 <- addSeason2formula(f = ~ 1, S = 1, period = 52) ) # fit the Poisson model result0 <- hhh4(meningo, control = list(end = list(f = f_S1), family = "Poisson")) summary(result0) @ To fit the corresponding negative binomial model, we can use the convenient \code{update} method: <>= result1 <- update(result0, family = "NegBin1") @ Note that the \code{update} method by default uses the parameter estimates from the original model as start values when fitting the updated model; see \code{help("update.hhh4")} for details. We can calculate Akaike's Information Criterion for the two models to check whether accounting for overdispersion is useful for these data: <<>>= AIC(result0, result1) @ Due to the default control settings with \verb|ar = list(f = ~ -1)|, the autoregressive component has been omitted in the above models. It can be included by the following model update: <>= # fit an autoregressive model result2 <- update(result1, ar = list(f = ~ 1)) @ To extract only the ML estimates and standard errors instead of a full model \code{summary}, the \code{coef} method can be used: <<>>= coef(result2, se = TRUE, # also return standard errors amplitudeShift = TRUE, # transform sine/cosine coefficients # to amplitude/shift parameters idx2Exp = TRUE) # exponentiate remaining parameters @ Here, \code{exp(ar.1)} is the autoregressive coefficient $\lambda$ and can be interpreted as the epidemic proportion of disease incidence \citep{held.paul2012}. Note that the above transformation arguments \code{amplitudeShift} and \code{idx2Exp} can also be used in the \code{summary} method. Many other standard methods are implemented for \code{"hhh4"} fits, see, e.g., \code{help("confint.hhh4")}. A plot of the fitted model components can be easily obtained: \begin{center} <>= plot(result2) @ \end{center} See the comprehensive \code{help("plot.hhh4")} for further options. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Bivariate modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Now, the weekly numbers of both meningococcal disease (\textsc{MEN}) and influenza (\textsc{FLU}) cases are analyzed to investigate whether influenza infections predispose meningococcal disease \citep[cf.][Table~2]{paul-etal-2008}. This requires disease-specific parameters which are specified in the formula object with \code{fe(\ldots)}. In the following, a negative binomial model with mean \begin{align*} \binom{\mu_{\text{men},t}} {\mu_{\text{flu},t}}= \begin{pmatrix} \lambda_\text{men} & \phi \\ 0 & \lambda_\text{flu} \\ \end{pmatrix} \binom{\text{\sc men}_{t-1}}{\text{\sc flu}_{t-1}} + \binom{\nu_{\text{men},t}}{\nu_{\text{flu},t}}\,, \end{align*} where the endemic component includes $S=3$ seasonal terms for the \textsc{FLU} data and $S=1$ seasonal terms for the \textsc{MEN} data is considered. Here, $\phi$ quantifies the influence of past influenza cases on the meningococcal disease incidence. This model corresponds to the second model of Table~2 in \citet{paul-etal-2008} and is fitted as follows: <>= # no "transmission" from meningococcus to influenza neighbourhood(fluMen)["meningococcus","influenza"] <- 0 neighbourhood(fluMen) @ <>= # create formula for endemic component f.end <- addSeason2formula(f = ~ -1 + fe(1, unitSpecific = TRUE), # disease-specific intercepts S = c(3, 1), # S = 3 for flu, S = 1 for men period = 52) # specify model m <- list(ar = list(f = ~ -1 + fe(1, unitSpecific = TRUE)), ne = list(f = ~ 1, # phi, only relevant for meningococcus due to weights = neighbourhood(fluMen)), # the weight matrix end = list(f = f.end), family = "NegBinM") # disease-specific overdispersion # fit model result <- hhh4(fluMen, control = m) summary(result, idx2Exp=1:3) @ A plot of the estimated mean components can be obtained as follows: \setkeys{Gin}{width=1\textwidth} \begin{center} <>= plot(result, units = 1:2, legend = 2, legend.args = list( legend = c("influenza-driven", "autoregressive", "endemic"))) @ \end{center} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Multivariate modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% For disease counts observed in a large number of regions, say, (i.e.\ highly multivariate time series of counts) the use of region-specific parameters to account for regional heterogeneity is no longer feasible as estimation and identifiability problems may occur. Here we illustrate two approaches: region-specific random effects and region-specific covariates. For a more detailed illustration of areal \code{hhh4} models, see \verb+vignette("hhh4_spacetime")+, which uses \verb+data("measlesWeserEms")+ as an example. \subsubsection*{Influenza, Southern Germany, 2001--2008} \citet{paul-held-2011} propose a random effects formulation to analyze the weekly number of influenza cases in \Sexpr{ncol(fluBYBW)} districts of Southern Germany. For example, consider a model with random intercepts in the endemic component: $c_i \stackrel{iid}{\sim} \N(0,\sigma^2_\nu), i=1,\ldots,I$. Such effects are specified as: <>= f.end <- ~ -1 + ri(type = "iid", corr = "all") @ The alternative \code{type = "car"} would assume spatially correlated random effects; see \citet{paul-held-2011} for details. The argument \code{corr = "all"} allows for correlation between region-specific random effects in different components, e.g., random incidence levels $c_i$ in the endemic component and random effects $b_i$ in the neighbor-driven component. The following call to \hhh\ fits such a random effects model with linear trend and $S=3$ seasonal terms in the endemic component, a fixed autoregressive parameter $\lambda$, and first-order transmission weights $w_{ji}=\mathbb{I}(j\sim i)$ -- normalized such that $\sum_i w_{ji} = 1$ for all rows $j$ -- to the influenza data \citep[cf.][Table~3, model~B2]{paul-held-2011}. <>= # endemic component: iid random effects, linear trend, S=3 seasonal terms f.end <- addSeason2formula(f = ~ -1 + ri(type="iid", corr="all") + I((t-208)/100), S = 3, period = 52) # model specification model.B2 <- list(ar = list(f = ~ 1), ne = list(f = ~ -1 + ri(type="iid", corr="all"), weights = neighbourhood(fluBYBW), normalize = TRUE), # all(rowSums(weights) == 1) end = list(f = f.end, offset = population(fluBYBW)), family = "NegBin1", verbose = TRUE, optimizer = list(variance = list(method = "Nelder-Mead"))) # default start values for random effects are sampled from a normal set.seed(42) @ <>= if(compute){ result.B2 <- hhh4(fluBYBW, model.B2) s.B2 <- summary(result.B2, maxEV = TRUE, idx2Exp = 1:3) #pred.B2 <- oneStepAhead(result.B2, tp = nrow(fluBYBW) - 2*52) predfinal.B2 <- oneStepAhead(result.B2, tp = nrow(fluBYBW) - 2*52, type = "final") meanSc.B2 <- colMeans(scores(predfinal.B2)) save(s.B2, meanSc.B2, file="hhh4-cache.RData") } @ <>= # fit the model (takes about 35 seconds) result.B2 <- hhh4(fluBYBW, model.B2) summary(result.B2, maxEV = TRUE, idx2Exp = 1:3) @ <>= s.B2 @ Model choice based on information criteria such as AIC or BIC is well explored and understood for models that correspond to fixed-effects likelihoods. However, in the presence of random effects their use can be problematic. For model selection in time series models, the comparison of successive one-step-ahead forecasts with the actually observed data provides a natural alternative. In this context, \citet{gneiting-raftery-2007} recommend the use of strictly proper scoring rules, such as the logarithmic score (logs) or the ranked probability score (rps). See \citet{czado-etal-2009} and \citet{paul-held-2011} for further details. One-step-ahead predictions for the last 2 years for model B2 could be obtained as follows: <>= pred.B2 <- oneStepAhead(result.B2, tp = nrow(fluBYBW) - 2*52) @ However, computing ``rolling'' one-step-ahead predictions from a random effects model is computationally expensive, since the model needs to be refitted at every time point. The above call would take approximately 45 minutes! So for the purpose of this vignette, we use the fitted model based on the whole time series to compute all (fake) predictions during the last two years: <>= predfinal.B2 <- oneStepAhead(result.B2, tp = nrow(fluBYBW) - 2*52, type = "final") @ The mean scores (logs and rps) corresponding to this set of predictions can then be computed as follows: <>= colMeans(scores(predfinal.B2, which = c("logs", "rps"))) @ <>= meanSc.B2[c("logs", "rps")] @ Using predictive model assessments, \citet{meyer.held2013} found that power-law transmission weights more appropriately reflect the spread of influenza than the previously used first-order weights (which actually allow the epidemic to spread only to directly adjacent districts within one week). These power-law weights can be constructed by the function \code{W\_powerlaw} and require the \code{neighbourhood} of the \sts\ object to contain adjacency orders. The latter can be easily obtained from the binary adjacency matrix using the function \code{nbOrder}. See the corresponding help pages or \citet[Section~5]{meyer.etal2014} for illustrations. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection*{Measles, German federal states, 2005--2007} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% <>= data(MMRcoverageDE) cardVac1 <- MMRcoverageDE[1:16,3:4] adjustVac <- function(cardVac, p=0.5,nrow=1){ card <- cardVac[,1] vac <- cardVac[,2] vacAdj <- vac*card + p*vac*(1-card) return(matrix(vacAdj,nrow=nrow, ncol=length(vacAdj), byrow=TRUE)) } vac0 <- 1-adjustVac(cardVac1,p=0.5,nrow=measles2w@freq*3) colnames(vac0) <- colnames(measles2w) @ As a last example, consider the number of measles cases in the 16 federal states of Germany, in the years 2005--2007. There is considerable regional variation in the incidence pattern which is most likely due to differences in vaccination coverage. In the following, information about vaccination coverage in each state, namely the log proportion of unvaccinated school starters, is included as explanatory variable in a model for the bi-weekly aggregated measles data. See \citet{herzog-etal-2010} for further details. Vaccination coverage levels for the year 2006 are available in the dataset \code{data(MMRcoverageDE)}. This dataset can be used to compute the $\Sexpr{nrow(vac0)}\times \Sexpr{ncol(vac0)}$ matrix \code{vac0} with adjusted proportions of unvaccinated school starters in each state $i$ used by \citet{herzog-etal-2010}. The first few entries of this matrix are shown below: <<>>= vac0[1:2, 1:6] @ We fit a Poisson model, which links the autoregressive parameter with this covariate and contains $S=1$ seasonal term in the endemic component \citep[cf.][Table~3, model~A0]{herzog-etal-2010}: <>= # endemic component: Intercept + sine/cosine terms f.end <- addSeason2formula(f = ~ 1, S = 1, period = 26) # autoregressive component: Intercept + vaccination coverage information model.A0 <- list(ar = list(f = ~ 1 + logVac0), end = list(f = f.end, offset = population(measles2w)), data = list(t = epoch(measles2w), logVac0 = log(vac0))) # fit the model result.A0 <- hhh4(measles2w, model.A0) summary(result.A0, amplitudeShift = TRUE) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Conclusion} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As part of the \R~package \surveillance, the function \hhh\ provides a flexible tool for the modelling of multivariate time series of infectious disease counts. The presented count data model is able to account for serial and spatio-temporal correlation, as well as heterogeneity in incidence levels and disease transmission. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \bibliographystyle{apalike} \renewcommand{\bibfont}{\small} \bibliography{references} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{document} surveillance/inst/doc/hhh4_spacetime.R0000644000176200001440000003570114030612462017504 0ustar liggesusers## ----include = FALSE--------------------------------------------------------------- ## load the "cool" package library("surveillance") ## Compute everything or fetch cached results? message("Doing computations: ", COMPUTE <- !file.exists("hhh4_spacetime-cache.RData")) if (!COMPUTE) load("hhh4_spacetime-cache.RData", verbose = TRUE) ## ----measlesWeserEms_components, echo=FALSE---------------------------------------- ## extract components from measlesWeserEms to reconstruct data("measlesWeserEms") counts <- observed(measlesWeserEms) map <- measlesWeserEms@map populationFrac <- measlesWeserEms@populationFrac ## ----measlesWeserEms_neighbourhood------------------------------------------------- weserems_adjmat <- poly2adjmat(map) weserems_nbOrder <- nbOrder(weserems_adjmat, maxlag = Inf) ## ----measlesWeserEms_construct----------------------------------------------------- measlesWeserEms <- sts(counts, start = c(2001, 1), frequency = 52, population = populationFrac, neighbourhood = weserems_nbOrder, map = map) ## ----measlesWeserEms, fig.cap="Measles infections in the Weser-Ems region, 2001--2002.", fig.subcap=c("Time series of weekly counts.","Disease incidence (per 100\\,000 inhabitants)."), fig.width=5, fig.height=5, out.width="0.5\\linewidth", fig.pos="htb", echo=-1---- par(mar = c(5,5,1,1)) plot(measlesWeserEms, type = observed ~ time) plot(measlesWeserEms, type = observed ~ unit, population = measlesWeserEms@map$POPULATION / 100000, labels = list(font = 2), colorkey = list(space = "right"), sp.layout = layout.scalebar(measlesWeserEms@map, corner = c(0.05, 0.05), scale = 50, labels = c("0", "50 km"), height = 0.03)) ## ----measlesWeserEms15, fig.cap=paste("Count time series of the", sum(colSums(observed(measlesWeserEms))>0), "affected districts."), out.width="\\linewidth", fig.width=10, fig.height=6, fig.pos="htb"---- if (require("ggplot2")) { autoplot(measlesWeserEms, units = which(colSums(observed(measlesWeserEms)) > 0)) } else plot(measlesWeserEms, units = which(colSums(observed(measlesWeserEms)) > 0)) ## ----measlesWeserEms_animation, eval=FALSE----------------------------------------- # animation::saveHTML( # animate(measlesWeserEms, tps = 1:52, total.args = list()), # title = "Evolution of the measles epidemic in the Weser-Ems region, 2001", # ani.width = 500, ani.height = 600) ## ----echo=FALSE, eval=FALSE-------------------------------------------------------- # ## to perform the following analysis using biweekly aggregated measles counts: # measlesWeserEms <- aggregate(measlesWeserEms, by = "time", nfreq = 26) ## ----measlesModel_basic------------------------------------------------------------ measlesModel_basic <- list( end = list(f = addSeason2formula(~1 + t, period = measlesWeserEms@freq), offset = population(measlesWeserEms)), ar = list(f = ~1), ne = list(f = ~1, weights = neighbourhood(measlesWeserEms) == 1), family = "NegBin1") ## ----measlesFit_basic-------------------------------------------------------------- measlesFit_basic <- hhh4(stsObj = measlesWeserEms, control = measlesModel_basic) ## ----measlesFit_basic_summary------------------------------------------------------ summary(measlesFit_basic, idx2Exp = TRUE, amplitudeShift = TRUE, maxEV = TRUE) ## ----measlesFit_basic_endseason, fig.width=6, fig.height=2.5, out.width=".5\\linewidth", fig.cap="Estimated multiplicative effect of seasonality on the endemic mean.", fig.pos="ht"---- plot(measlesFit_basic, type = "season", components = "end", main = "") ## ----measlesFitted_basic, fig.cap="Fitted components in the initial model \\code{measlesFit\\_basic} for the five districts with more than 50 cases as well as summed over all districts (bottom right). Dots are only drawn for positive weekly counts.", out.width="\\linewidth", fig.pos="htb"---- districts2plot <- which(colSums(observed(measlesWeserEms)) > 50) par(mfrow = c(2,3), mar = c(3, 5, 2, 1), las = 1) plot(measlesFit_basic, type = "fitted", units = districts2plot, hide0s = TRUE, par.settings = NULL, legend = 1) plot(measlesFit_basic, type = "fitted", total = TRUE, hide0s = TRUE, par.settings = NULL, legend = FALSE) -> fitted_components ## ---------------------------------------------------------------------------------- fitted_components$Overall[20:22,] ## ---------------------------------------------------------------------------------- colSums(fitted_components$Overall)[3:5] / sum(fitted_components$Overall[,1]) ## ---------------------------------------------------------------------------------- confint(measlesFit_basic, parm = "overdisp") ## ----measlesFit_basic_Poisson------------------------------------------------------ AIC(measlesFit_basic, update(measlesFit_basic, family = "Poisson")) ## ----Sprop------------------------------------------------------------------------- Sprop <- matrix(1 - measlesWeserEms@map@data$vacc1.2004, nrow = nrow(measlesWeserEms), ncol = ncol(measlesWeserEms), byrow = TRUE) summary(Sprop[1, ]) ## ----SmodelGrid-------------------------------------------------------------------- Soptions <- c("unchanged", "Soffset", "Scovar") SmodelGrid <- expand.grid(end = Soptions, ar = Soptions) row.names(SmodelGrid) <- do.call("paste", c(SmodelGrid, list(sep = "|"))) ## ----measlesFits_vacc, eval=COMPUTE------------------------------------------------ # measlesFits_vacc <- apply(X = SmodelGrid, MARGIN = 1, FUN = function (options) { # updatecomp <- function (comp, option) switch(option, "unchanged" = list(), # "Soffset" = list(offset = comp$offset * Sprop), # "Scovar" = list(f = update(comp$f, ~. + log(Sprop)))) # update(measlesFit_basic, # end = updatecomp(measlesFit_basic$control$end, options[1]), # ar = updatecomp(measlesFit_basic$control$ar, options[2]), # data = list(Sprop = Sprop)) # }) ## ----aics_vacc, eval=COMPUTE------------------------------------------------------- # aics_vacc <- do.call(AIC, lapply(names(measlesFits_vacc), as.name), # envir = as.environment(measlesFits_vacc)) ## ---------------------------------------------------------------------------------- aics_vacc[order(aics_vacc[, "AIC"]), ] ## ----measlesFit_vacc--------------------------------------------------------------- measlesFit_vacc <- update(measlesFit_basic, end = list(f = update(formula(measlesFit_basic)$end, ~. + log(Sprop))), data = list(Sprop = Sprop)) coef(measlesFit_vacc, se = TRUE)["end.log(Sprop)", ] ## ---------------------------------------------------------------------------------- 2^cbind("Estimate" = coef(measlesFit_vacc), confint(measlesFit_vacc))["end.log(Sprop)",] ## ----measlesFit_nepop-------------------------------------------------------------- measlesFit_nepop <- update(measlesFit_vacc, ne = list(f = ~log(pop)), data = list(pop = population(measlesWeserEms))) ## ---------------------------------------------------------------------------------- cbind("Estimate" = coef(measlesFit_nepop), confint(measlesFit_nepop))["ne.log(pop)",] ## ----measlesFit_powerlaw----------------------------------------------------------- measlesFit_powerlaw <- update(measlesFit_nepop, ne = list(weights = W_powerlaw(maxlag = 5))) ## ---------------------------------------------------------------------------------- cbind("Estimate" = coef(measlesFit_powerlaw), confint(measlesFit_powerlaw))["neweights.d",] ## ----measlesFit_np----------------------------------------------------------------- measlesFit_np2 <- update(measlesFit_nepop, ne = list(weights = W_np(maxlag = 2))) ## ----measlesFit_neweights, fig.width=5, fig.height=3.5, fig.cap="Estimated weights as a function of adjacency order.", out.width="0.47\\linewidth", fig.subcap=c("Normalized power-law weights.", "Non-normalized weights with 95\\% CIs."), echo=c(1,5)---- library("lattice") trellis.par.set("reference.line", list(lwd=3, col="gray")) trellis.par.set("fontsize", list(text=14)) set.seed(20200303) plot(measlesFit_powerlaw, type = "neweights", plotter = stripplot, panel = function (...) {panel.stripplot(...); panel.average(...)}, jitter.data = TRUE, xlab = expression(o[ji]), ylab = expression(w[ji])) ## non-normalized weights (power law and unconstrained second-order weight) local({ colPL <- "#0080ff" ogrid <- 1:5 par(mar=c(3.6,4,2.2,2), mgp=c(2.1,0.8,0)) plot(ogrid, ogrid^-coef(measlesFit_powerlaw)["neweights.d"], col=colPL, xlab="Adjacency order", ylab="Non-normalized weight", type="b", lwd=2) matlines(t(sapply(ogrid, function (x) x^-confint(measlesFit_powerlaw, parm="neweights.d"))), type="l", lty=2, col=colPL) w2 <- exp(c(coef(measlesFit_np2)["neweights.d"], confint(measlesFit_np2, parm="neweights.d"))) lines(ogrid, c(1,w2[1],0,0,0), type="b", pch=19, lwd=2) arrows(x0=2, y0=w2[2], y1=w2[3], length=0.1, angle=90, code=3, lty=2) legend("topright", col=c(colPL, 1), pch=c(1,19), lwd=2, bty="n", inset=0.1, y.intersp=1.5, legend=c("Power-law model", "Second-order model")) }) ## ---------------------------------------------------------------------------------- AIC(measlesFit_nepop, measlesFit_powerlaw, measlesFit_np2) ## ----measlesFit_ri, results="hide"------------------------------------------------- measlesFit_ri <- update(measlesFit_powerlaw, end = list(f = update(formula(measlesFit_powerlaw)$end, ~. + ri() - 1)), ar = list(f = update(formula(measlesFit_powerlaw)$ar, ~. + ri() - 1)), ne = list(f = update(formula(measlesFit_powerlaw)$ne, ~. + ri() - 1))) ## ----measlesFit_ri_summary_echo, eval=FALSE---------------------------------------- # summary(measlesFit_ri, amplitudeShift = TRUE, maxEV = TRUE) ## ---------------------------------------------------------------------------------- head(ranef(measlesFit_ri, tomatrix = TRUE), n = 3) ## ----measlesFit_ri_map, out.width="0.31\\linewidth", fig.width=3.5, fig.height=3.7, fig.pos="htb", fig.cap="Estimated multiplicative effects on the three components.", fig.subcap=c("Autoregressive", "Spatio-temporal", "Endemic")---- for (comp in c("ar", "ne", "end")) { print(plot(measlesFit_ri, type = "ri", component = comp, exp = TRUE, labels = list(cex = 0.6))) } ## ---------------------------------------------------------------------------------- exp(ranef(measlesFit_ri, intercept = TRUE)["03403", "ar.ri(iid)"]) ## ----measlesFitted_ri, out.width="\\linewidth", fig.pos="htb", fig.cap="Fitted components in the random effects model \\code{measlesFit\\_ri} for the five districts with more than 50 cases as well as summed over all districts. Compare to Figure~\\ref{fig:measlesFitted_basic}."---- par(mfrow = c(2,3), mar = c(3, 5, 2, 1), las = 1) plot(measlesFit_ri, type = "fitted", units = districts2plot, hide0s = TRUE, par.settings = NULL, legend = 1) plot(measlesFit_ri, type = "fitted", total = TRUE, hide0s = TRUE, par.settings = NULL, legend = FALSE) ## ----measlesFitted_maps, fig.cap="Maps of the fitted component proportions averaged over all weeks.", fig.pos="hbt", fig.width=10, fig.height=3.7, out.width="0.93\\linewidth"---- plot(measlesFit_ri, type = "maps", which = c("epi.own", "epi.neighbours", "endemic"), prop = TRUE, labels = list(cex = 0.6)) ## ----measlesPreds1----------------------------------------------------------------- tp <- c(65, 77) models2compare <- paste0("measlesFit_", c("basic", "powerlaw", "ri")) measlesPreds1 <- lapply(mget(models2compare), oneStepAhead, tp = tp, type = "final") ## ----echo=FALSE-------------------------------------------------------------------- stopifnot(all.equal(measlesPreds1$measlesFit_powerlaw$pred, fitted(measlesFit_powerlaw)[tp[1]:tp[2],], check.attributes = FALSE)) ## ----echo=FALSE-------------------------------------------------------------------- stopifnot(all.equal( measlesFit_powerlaw$loglikelihood, -sum(scores(oneStepAhead(measlesFit_powerlaw, tp = 1, type = "final"), which = "logs", individual = TRUE)))) ## ----measlesScores1---------------------------------------------------------------- SCORES <- c("logs", "rps", "dss", "ses") measlesScores1 <- lapply(measlesPreds1, scores, which = SCORES, individual = TRUE) t(sapply(measlesScores1, colMeans, dims = 2)) ## ----measlesPreds2, eval=COMPUTE--------------------------------------------------- # measlesPreds2 <- lapply(mget(models2compare), oneStepAhead, # tp = tp, type = "rolling", which.start = "final") ## ----measlesPreds2_plot, fig.cap = "Fan charts of rolling one-week-ahead forecasts during the second quarter of 2002, as produced by the random effects model \\code{measlesFit\\_ri}, for the five most affected districts.", out.width="\\linewidth", echo=-1---- par(mfrow = sort(n2mfrow(length(districts2plot))), mar = c(4.5,4.5,2,1)) for (unit in names(districts2plot)) plot(measlesPreds2[["measlesFit_ri"]], unit = unit, main = unit, key.args = if (unit == tail(names(districts2plot),1)) list()) ## ----measlesScores2---------------------------------------------------------------- measlesScores2 <- lapply(measlesPreds2, scores, which = SCORES, individual = TRUE) t(sapply(measlesScores2, colMeans, dims = 2)) ## ----measlesScores_test------------------------------------------------------------ set.seed(321) sapply(SCORES, function (score) permutationTest( measlesScores2$measlesFit_ri[, , score], measlesScores2$measlesFit_basic[, , score], nPermutation = 999)) ## ----measlesPreds2_calibrationTest_echo, eval=FALSE-------------------------------- # calibrationTest(measlesPreds2[["measlesFit_ri"]], which = "rps") ## ----measlesPreds2_pit, fig.width=8, fig.height=2.5, out.width="0.93\\linewidth", fig.cap="PIT histograms of competing models to check calibration of the one-week-ahead predictions during the second quarter of 2002.", echo=-1, fig.pos="hbt"---- par(mfrow = sort(n2mfrow(length(measlesPreds2))), mar = c(4.5,4.5,2,1), las = 1) for (m in models2compare) pit(measlesPreds2[[m]], plot = list(ylim = c(0, 1.25), main = m)) ## ----measlesFit_powerlaw2, include = FALSE----------------------------------------- ## a simplified model which includes the autoregression in the power law measlesFit_powerlaw2 <- update(measlesFit_powerlaw, ar = list(f = ~ -1), ne = list(weights = W_powerlaw(maxlag = 5, from0 = TRUE))) AIC(measlesFit_powerlaw, measlesFit_powerlaw2) ## simpler is really worse; probably needs random effects ## ----measlesFit_ri_simulate-------------------------------------------------------- (y.start <- observed(measlesWeserEms)[52, ]) measlesSim <- simulate(measlesFit_ri, nsim = 100, seed = 1, subset = 53:104, y.start = y.start) ## ---------------------------------------------------------------------------------- summary(colSums(measlesSim, dims = 2)) ## ----measlesSim_plot_time, fig.cap="Simulation-based long-term forecast starting from the last week in 2001 (left-hand dot). The plot shows the weekly counts aggregated over all districts. The fan chart represents the 1\\% to 99\\% quantiles of the simulations in each week; their mean is displayed as a white line. The circles correspond to the observed counts.", fig.pos="htb"---- plot(measlesSim, "fan", means.args = list(), key.args = list()) surveillance/inst/doc/twinSIR.pdf0000644000176200001440000044645114030612527016540 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4967 /Filter /FlateDecode /N 87 /First 734 >> stream x\[s8~_IW$ fʹ'm3v2qDLd-҉3?|;)nӵG \q 8S:1fqnY*4s,5%Lp 3a8NZ&ZB3,)ci/u´8HT"93NY&hIɬ64% C"i [朦FXd2ayx5AA8KI,V0: bKT1^(ΝI5R'(tYrP&TUR[t] z=!t - ӰHfR jV4jVԬf-C(14zp0 SPi;HPHc54SQ %Ip5QLX Ļ4V!IllSàuARA4m9Ps15':E uR:I09FQH!a'tT,AͩD jN5!5ģY|Uɨ1,+sjt .^X9g_uuQٽtTVh/>;Ȋh>aϲ>;lʞbj٨ʋ٣Q{~W ?r7֔F9.ϳ3v5(Ks1^>h7>z)˧l1}&e>ˮr.Ig,M2K̦g{8P]bWb}v:G+A///h=,&_y1gWEQVx_U,E뼚s>;~~{^7Ftl.\]O=m؏:_ш?43FW/9  &/+p‘~kj6+*jEWE݄QǺTn[Ŭf\P'`7hjoG3ZD`߈9^h(+9o'^(𤠁uqVգ'lvSҟ^w{5e*Z, ee{Zrf)QKE;55l l<˩^~lZ֡:Zqapt wסd3`xk;L&Ą4z'v'V0A6hCl ݲ[vMnnt!@]/GShWL/_gMek+<8<*.G3! ޘ Ť3Ch/2<`7/!7Z2e(@|Dhh ;'Յ /)ʷQY)%T罄o/bYB6Y-YOvޥd Q{2:` ,dy Jx}Fcb1]u,/K ^ ~?a|ǣ4c#xgY|g`)Qk0F 4RH4Gx{t-4 ~t_BI`P{DE҆ΰR4:=cƴ>DC5/_yBu&g6~3ZȓcVͩ^J||}y6nj5../G^Lcx^"~wB^ ʦkه(5]'fgU8 @Qaw5.߯*N}")Iƒ&߅>󛸜my g%Di =Syꧥ#F"F7KT 'OVZ|yc)Pa3j7ט*tBb?sydjIO;7J]~ior߂ O@kVH,PB&&häEOj(߽}Cu2gtBiBr73$[ ]uQ Un;WZ\{vwtvͳNH<$)I#ѼA^;zgRLi6A՜S1U4+˖޹ 'P,Jaje6帘gL7(燼#LW(ldρbΆ锗/zr"~B CJ_$aU jS&a$<=_vf2.4d8w^$)G,3_uhu9InH|mtla B)gY{4=/{\=7|.m8We߲Y^ Hz:] nD`hjM>_}{QuZtWyAr7DmQnVSM\a6^mL2>Ӛ!iyPc#zBgKܶc,n Uϼ>.O'xv݈x4Dw1/sWp5bַ~}v` /Zيt it/.o^mۀ2T@L;UI`>t\-+'5^nj    )['t(­Xf}d%.PenvwrpOGy%/IQ ɬfй]g.K3rKy/KvukN詳JZ im(ǭhDcwHeXʆ޼nt4֋FmOP YDOlWskyko\;何CZ7%_oD$uՋ1m/""EgMfa*OCG6?_aq&?~o̺-fmC֮ml_-lwntѧ_X,ôrTۗ+○Z-PfӖ;]1B(֭MӀV<-[׬=˾۹i(Mܻ6;ʩW1s5GDq"/05Mił("DsAD#:4AGЩRڑ.֏B\U"N"hk"i씉H4i+1,CR=elD߅0G+EGIymfQxO>< 0n>}6D{o/ vEU]?ãռQ1HU~ ooX3"̩ŀgdv52C8qۓnLԂ($Qbw&* Q#Dk9x=qXgE9tMZA\.&"ZQG2\۞ݻ?>˦*V*_Zྑ= whF6^y>L'Uښ.b%m'6>r|ݦvFoeΊ/Cب/aN#tٶ8n]tXrI7؎"óT#ñHڇVcc0ӡދWxx6 =̰$;)IOI2֫=6 6-xHݑ¶a& bThWZE4jGOHMM.Z&°.ۻ.Jh 2aPX̀pmn"|'/ABGuIpN4ItIanCS_USǎ(`:R99mOsdSFs?fxP=>)Hvҕ_]*HBV$p i?\C]϶q++($l@t 3Z}'DE4,f@ xP[Kh>2c(XR5'w, L[dOY-*%`5~XFd2>/?S<~+!i4E\ ஋v6>ܙ_ ЯዦBQrQd2 P%=;!:[H5jޑM! APӯ4B%ג_Xԛ[@(ߘWP`!j7EzP$@6[*(u qq cw\U٘ )Tf (QDPqV/ńM#5և ;k(I@>ws׋c._&x'ؙbLM?̡я^Rf+Hb>βyF?ZCZY:i*-ޥBt6fJ8Qjk'?|=J1e"Ⱥ9Scb9|?m iF<y[,,p0W7 dOIMyٙlJ4T/[D!ԷxS]'Gu5b*+IwK>M]m Yx;"Hԓendstream endobj 89 0 obj << /Subtype /XML /Type /Metadata /Length 1849 >> stream GPL Ghostscript 9.26 individual-level surveillance data, endemic-epidemic modeling, infectious disease epidemiology, self-exciting point process, branching process with immigration 2021-03-30T14:11:02+02:00 2021-03-30T14:11:02+02:00 LaTeX with hyperref package twinSIR: Individual-level epidemic modeling for a fixed population with known distancesSebastian Meyer, Leonhard Held, Michael Höhle endstream endobj 90 0 obj << /Type /ObjStm /Length 3559 /Filter /FlateDecode /N 86 /First 793 >> stream x[is۸_ Av>6W$#Ӷ^& )lʖNGc @{KYLŤ,hc0+QIɂc^<B`kR&E|_0ÍD/)Z SԵH-SV1R§ LkEʴS7@ Ɍ@!3ʡq4( a ǬR!.Y6;gEFk`[@ %j4iR t-#ʦדuټXb:jN2W^UuTW[]]uյH2V^l;"%iنp Lz-ʻLJ4_o|ggEVQ\c lE,'OA;Ls2oY-IZ3EےQ]b[u÷߿뫓4'%/@h~NFWR Ƒ~8ZJ6e}GD5-ˑW$MFѻ~:-GO骷ZY[Aˍx1FhuU5+t)G3#[ٛDy>1ׯOg=|2ts|v-=>S>s]|>5&mEyu/2x???먮 ğN%LQh:M)++'92u6oQE6fmPd\BM6Aȧ?eC6Tk^ u2Ltuv0[ֶԼ5MP6H3DLzZm{v{]~^v7mѵW7D,D?f<+ $M +J1kH"JTSĞز`%c#c[?TH_kģy;:/1Y6[*o&[FҌ.7+n"`Apv.HT 1 A U/_68E#!n;2aG"g ͇tW&^!1@B))]=ۍi%}l(]*VJ';v10[};k6<8{zک d=և^5vN+Q*hHMLZAf+1ym2ϓ(&+SPa)6qB!ZbڂWCEKDhvDOrG󀔽+#`=>7[t[nSwU'C2& hJCBa+c  iWNO͊Gtv% Sm2+2Dl}@~KF2)I>+|<g ^̯7&zoLjR(io'f /#xƇ>6d LW;{S' mˊ袻#D'C@tx%~kqg*1]Aݍ2UE("lWysævuCnN~zнzv?4 ushGja,>o@NZ ,A^.WbAjYUTUd:$aV}bC#fUN&6nD9A-Wx5}ŭ1PaZ 6WMP5|ml.W$N'4*ImbA43@9@(P}BB{-P:xpͫ2 '@ze@4IIpM)}6Qb| Tk)![B$K^&ɡ- 21n7LqO_h|4O7}2?狿_d7ybmFa"H :uNLTpvq TY=Vt"* & 9hüWDU>e 0YuG<;2T߫x*Ly cs͉<}3Uc({U{=dUIWBT}4iOYm,C")ꥰk lK:p LQmA)[Eb!{Վ.6NhYk:vOnmh\@ѹF'DMh _Tjjdwcx'hc\^vEVԋnS5Uqncq{׆ol)eͿ!Xubnէ;De^2u6vqh:Ѷ|qcK ׮z G'׋a _g-o.!9ښI,E#!V2A"Xsb<NᧆmإIA)A&ˌ:dLۑ;je6COK{l ν6Ir(LȄΓJ ,)wMu1;Z'."rSS>ӝ@GOJڶ09tBr. BqtR%҆1;kqwӗfeUFڱ w ]UmBT(#d5(ZAE "0HT!_T)נH@ })T<AH%)6L&tVdl.~NqVp3~h0WTh)4S!cBZ}WJ˞(a9cQoA$("f y~pf:a0@PZچG1"Fӧ:>Q^W>Ga5VX'VHӴE-鄠E6~ BV^OЌW6WFnqg`0i 5DA_xy(9/ާb dFoĥHDRwnvInY-T[fh)mc280Ռ!iS=vMkUF/)'^p^c=u؆IX},[дx⤿ENendstream endobj 177 0 obj << /Filter /FlateDecode /Length 5595 >> stream x\K8r>V6|bOo`1Gt9JiJ%Y]] >@U]5q% H$e?*9\;fWJή,VjuRܚҨI{F0nWF(W*'RkUWb*wsX5.VtZg,5le4/uVׇMaʪEs\s[VLbN ~)7Cs:b8g Lbx[`Ye4۫bxho^aΞPú*+S'lv6}\] Ŵ)vȶ3C8;݆)N\K,g%Cq51MޏDU%YXwIŮixeKIkL5;ŻmMu_q^ݿktQZWVLɒd$p]j>)X0M/Ym )<Z:`m7o`S`\u_:ӕw+lսJi`ip1owd3bŮ{U|k|*sxz!H(?%s&%q/%hF).-u+H&P<s@Bik?o~C'/mO &w(8VR 8brχ+ {g4ikwi)R0PGqL₶h܅\O6x/i% Fr2F#Ș3'IΠ#)uz\82qմs+n!|e,iv0F/aSq|\o@rB0 }\ =#Q`SiF8.aśD4K2.`L6Lr4'τO[$Ɗ;0ۨLZ l{~>5MRk#߂O b[& R0 r V"›$ h긌+%N"4:ƨۥy|bYPti\iT_<+B\FςB4wk띶kv!eնҶ!FU 9=;6q@: S ٰn/gnUq_cdD^;_'um>[0]ṃ1fA Θݯ I:PL\?Nu)J6;BrI @.!,`wi\Wr1sY-$S`~}M[-mM\K2H'R3;5 118!|Tlf2μ@|:89WoU2kf?4*HS1aI2DPnk|P7m-VI2ګ6|tk22S6!DsƥcsX9%Uu`8 .t\ -(I_\ i );4H5EOVD5#7iar&&$AI܌ Xn11&S$>]N/m&kzAtrVO3y֞PƜb_l"ㄟ=ĜHAn4i42)q%5otd&a}5iYARfa(h@ޏ<(D(  θEC8ǫC@4y+-s Ko/](a~3seU,n.s!u$^rj* ƆAuDyL;v&cg7tRNrDJIs;(]nU#ߐ ]rF(C 53XƙH!})ZV}Rya8H t(|*!#1L AY(ƴO-'b63)Hp sA(!ǖBȇb zj‡~Yn>O`AM5FQ#0sd/0, X}n 4uu2\#cE8N\YE&O>gbNf92i<"Nn!mɐO4G5ջўNwd;^EM}ǨT"-sr"B82Mo1 ϙYB<{3f A5s+Nݣ [2dXX]p&`'/,p~gK.>vpCb /}.xb 2$LB@b 5",H٣J^y$Jo|ưRaƘy<6 R%Č:~&t]Rnn`O-ye6IvRAf^8f[R'}_-u3/zYWG-ԍBJ畄@]([YXp$2]t/%$Ռ6El~_P;ҢM}f+8(92(`R;֖źNgʩ x; ́*p:@i{#?GW!,$|S,RUh '5W,1tZxzQ n= e*ٞpCU%f)nB?GYV܆=K/mJ4˕AΈ})9SbVo?UzX.\P6o%9`|5yH:DN-`HOZL$1T ;][tNrEY=?괒U|6Y/6Ƽ-Lg >k(3,..gjO78Y׫Tz] ,gEڧ)dms|Ce52Q?1+GsQ Y{~xfð@N$ Mm*CiH nLA7eWͮ27v= (Efģ-fG,ff'/Y(F,}s4 Q3kB/. oSwYϽ@dMA>n:*L7ИR+R6JkLJҸxOkGC‚jNt[~DRV2Nԧm4#D&z8hүT<$F[g[_ oىQ8' U4&][ bqRZ8)G`cfs6T $("AAp\t>!;m*6F͹]grkSpGI&B}%$?G f/qjOGHۡAa0d&M;ƚPǢ0 5$;HE(~fPӗ)Żt" *0S>8usn)qcg`?@LuD\"7VRbBB)v컙= 0 q/eHk^xaI,si(cc Ed% ^bȷne0kf~R3*;N$65ERQIk vID!ci4xp9&lHQuԟ g2/]r @P聈KS9oL5t%, *  D(jDd\J.yzI[C%asY#v!]ٰsT}-Ӱ kI$ӤI+AIᾟi̓=E;KO;LA9ݪRT\|Qn(xI1%5Vt^H0NhrS7fq g:">Vyʃ%Q{{Vlp&  %w;fY YIk-bOEb±Pj; $zjīpե_n+YcYWbCUe%q2-'+.`B *z\rV:7\co6>3&*;瑻ja(pe *#{cBџ'h λ%EB-A9?Ϣ퀨A&aJ qiMPJQ9j#fϥ9Wh6ov \;I7ufnٓ 6;&śeif\|okqs6I!wk\tg_jաIe;*U}7{nD=Q=Mj:eLtnM{y'у[;1$#xސ-ǒ)0|M,1 p%K'Ks݈U:]G^X$͈vIX ECΩK@;|=,Sh M A8rY\đ?{[{Om'to;@yqHՅ V7WG`7ϢHr7B=P'@ns&"l)DDЫ@ֺ3~vR>8:F'O/=?WtO脻bg4vJE8 nm F$W Z%Y7g*|?8'@7A *1&-<-9 K-S!pE)Jkh|QJZ 6[vlLRɈh|`Th&R@}`f0ג UU Хb*cvY>1wfE?,-hJqdg0EGᆯ@pCw @d/f" N͗5W:  /TgA̶R`b8n@ƱtR5ۂu7644 d\xZnjbʻ2yxo]xD`:D ]I^ /y4?&^,sN * O$r]b#5XhTa[{"_&ӱB(?wK _+o]y=ntmendstream endobj 178 0 obj << /Filter /FlateDecode /Length 6276 >> stream x]ϳ#qyCrH`W勆r2Kr;j7j{ԒKs|?*.`FwOV5K(ޛWYOYWױ[la>6nuMm>DAbY0%+Ow?-"B["A{e4HUv' ]J MIh9F2WW|lM;GY|aА,[|НLYELr Zc9=tu=D=> &~֚T5~- qx nV*8dT6݂D^ή__4u5.נ=60׼ɾ6A48ؑ(sX2_P:FBk/r]a+ l;'{' C*6&柂 u¶J A А[nYe{,J-;]tާl/a$[{Y$A!=rGzPV0-ja}xя?laD8gQ.iwINjCy٩YpZRgeu.[%)qOa31-[iWCxQ L7ksE._e6rI:b mT62w3M}O:6Q_->EZck[Lz/S6+Wpx4ƀ֯ѶiRo/{CswfW;vux8n0f}Oq6$ú-b!)p_v 6sP 0%n9~:^)hVvъ|ҰOn:Cy- $8T2L@:Q;~eh+;VltQF7ꝕ% uLkɃ+^U044_vl:j]{[gѳ&4/Mj=SׄWSu[>q~BǠ:-vWZ)|b;^QS \MM#TT^cI#MN/"),M#HXDW^Wu^HW?d{}Ϡ#Dpr>nx]Z6)tՙ}w FތuWslGBu-is!0 (SоK@\| |TtbP2oM Zƅl횷tBqMnLuzŲ&6aa׻o0@qHjlmߑn`ąllb@v#P9]o;D`Npx4EU`8ZDtk:y.|t @Qo*]iԣASk%@ XNk5CQ0/*=BRYR#^F,=f)DbH}FAc6ע^4-0Fꄎ@r D܈ ;1j_q}J2^ | oCRn_܏")0qB3N 0 B9xDZ7˸P,D8%Dl'`Kࡊg0e'U+A>_D`G@ejm`cPЈt 1^BL |:ZݴuC/ OC1t3\o3(cocF'ucJ3'H] zrINpO<+l9^-Ua'#b6M0 7+ vf3ٙ1)L8.2W݆9FOf{ ̣؛`W|} uv@$H}-=0<|w&A=远<5 VS~|Ѕ۝33*q^E m_ir+c`E:QeɊ0fXLi:ݭJ`l!<+p6*EeUN7)voƩ:S÷uj v{uEO}dEBpz釒:ϒ˰:r;BO9ɤʧ̄Q1kk^F1M[i1M eo ;nwq]@FtejzcSNU k )lFu֩3:<#wX!B,!O̺ ze8A#1(PJ>z$FCCMd v8Vqws0Eu\:SB4^4E5~ir SMCL5=aL~Aq!lgWS)b%AUɠ0e(%vEpY?)Gu0} %!X`[]jV;7ҩz\>.P:ŴUU~&9liteo-O{6jpI+PaEP-k䗪{zee LP\B+E{k*ꚺ_u54KU؈6JpMhߞr Ȟ=qiˬ_uJ;:f8_fMCpOP0F~Qc23ԟ:p` B1+Ac;R8Ys_XvLHO`>~@kx _ NRk@N.9R& "{dW+еH+u.?*uTQiQؚ7`'X)*gSűyr#7f(&x7ِPl-m$1k')k'|V (%Y!|$+ŗn2+kR oAG?` ]7^OxtriS$P7%0(d)[|M+jcC^g3sugٴxj9ޣ0FNFjSbغH3 uh*]S,bs57O{cԍqRcL49Z,Ms6zI!¯( G9?HvCKǚ'I#^:-*} Yc-$NdWPnUXPȻ5$_6C%]L'2; y%C ڦ˰_I?e:K QyܭxE><z[&|2%ۤS(͈pu9ƾ0Q UO|JB3Nd빿ԙ#qGQm- t6XG(3KUPb L z .1!qbwB%%&:,k}`X( =h~$q Grp!Ch}H9_7E)гbelc7/&mo" k/= =xd.@0?ջaUڴ~?r[Z 6n3T% Ng,/3b y% xTWQ;E}:5&'B cay>pXQ3KcЕ+TY@>҅yArDc쵒*K3f&M᩸ޗ!}.4QKЊJJhM-&H{#~Q Vw4;\lI/;e\V$SG;%ia1|8){cIcg&L ᮄe4,w$aU)- ʘgaɿ9=Oz/ן"DQz&:د7!Uenin Ά]MCIJ؋D<G<} < -1O`}fҎX=K߀[+FW3++2 ha\%[r9+[,X|)9h $=.`*ڊ먱*l'v 2׀6@1-bq{7sm3WQ+O(D?nß.|+ ?޾{.SgxaEp!1Pe8>2Ps4i[ĒՖiZ"iT]WGDU Бu hdo「g+n= ۷r\A? KS@*deIJ3vIn6YnB !E#)p,<:+AICO{~7q}ޯPX d^~j0au-U]I=o:|Jvn"թ8|=%M36,L m_6ܘGGsci؁wo2.ljbW/. jXoBMhUYl9 G=LLpwC+yL B pxUiP|W0[h^.kendstream endobj 179 0 obj << /Filter /FlateDecode /Length 20984 >> stream xۏ$upW6 *%@$$@c03MvL/{\zvDK/陝5CQYYyr3\]Nٰ_}t\}ui&]unlsNn1\D:\ zsYe9u:}~er]*?칷c~q|w4/?@q9mZ`\oޛV^d=f9}?߿n?ԑ<;^J'u |I~LK9|Kjn_-C~W[N (ϾY@%.nߥwe9Kr>?n9Y 4u+y;?= y|5ֿisMo =>Pe_by:qvik˿8<9k^P;Xzߵy}j˂\^ 2#-Xghqъ5~)sYϞ +c^ip]Wq>CQwJ,J,%Uܥo*soN 7RhUYaeUy>^D/ lg6 fW:kzEpz%)lJpzGF]14=\"ֲ֫X^eM^_8r. ׷_xOoxa:>tO>f=<2\|f>MWomuϷ&dW{|waLKKDO㜳8R͔$֚q-NlksfKN[ki=.7[KÛ}-Nop'U֑e5-IJKr)9eeѱAޣ- ߎnQx=[Dv^~Ϡ{f[/g!Jm7X%m͸W1NIW4y=8/˘<^Lkְ5K[\5KZv~Ȓ+KUY|CÒ'yYq۴.ؚ 'Uo.g<mH>ܬy/}3gm;Gn"&ІDeX"6(3]E}V#[NIeښZO./[-궢]EjaV[xkq&#kp9ϸ/e;-ߡrvЄ[=NSWY*AT; -ڶ`KQޟ)p'Anq?/<4e,|T,plyiM5D[&+|C%YYhfښq-򧒗D94e,|gp Zh)CԤT6ShE?K(4zOm6hAV[; Ђ_] 2nMAiׄZk=Q;TI-g߿u.W$d*[׾EN^dր;K!'˪jV n7 $RQ QRrOjIV]I%Y~Zȕ_h+ ܡNnqFt@.H4mʥiD[]uښ b ڪtgr=W1iR[]V[K]Y^girф[V8,R;ю$ܒl۫cp 2nmol)ˍkN%Y^3fns-.rrD[5+W/rMnqryI ڒ$܂[/\5SoOS%]k 8g]kݢpd '2Ym4:$QZc $+G wJc9&*[1s[ezרzMji6ȒǍǠ (+O)L#g[cypD[&z5kq$z@nAƭ|Z,IYmmi,T18%/|-Yׂz [Vݓv-ΖMDž>ֶP` 2nseS-j[u-ȸ > ЂZ& ꒺dS_ԃN(j稧бwujTse; Vq\[&--jkWYmmn|ׂ, 0V9?[&O=f5Z}R+(u·%jkiYm=J'3mHq;f5׻؂곌] 'kAV[|ׂ *fU.v%~ODS9ӄYm 08`J6MZkXZqk|k^^ Z%;P>wlT;tA:6;Gש초;:;Ls*N@T;f~pg؈;ポN=Ij>Kx輦/D_ "lag۱Qtl;6[6ᆛ⩟b oxlFSAĎJ\(Ɓvz :SSy^~nG}eLG9Yl­ŵV:Fg~&u~/Vy{Zqk¬[XZAGВBfc*OtNjK!sr(cp Z rRnGI%qg Jk\{߂x\fU1TjBZ>/ W +G[2  :d¬z,-jksa%d+ˉi8NYnC.+yѶ0+Ww-JkxYiS,A$5(q: ˭BW+lJweUcYiMN¬T΁fѵ +0Kyp9PNltGp>[KNsǞ9.9G-tp)8jKNV`".9G-tp)8jKNQ˛HơKNV`"[$%.9W[KNq˙HơKNQ+0os-q0\r :8%.9G-tp)8nKNi8KNq \r N[%ਅ.9ǭȼE28%ਅ.9-SpӖqp)8jKNq \r N[Ge\d޼̛qy2.0o^E˸мy7/j"F0.2o^wq97/"`\`޼ ̛qDo8z8g". JSpTB%ਅ.9-pp)8jKNQ \r [SpV`"[$%. ;Spr-q0Ρy8g"HƁKNq \r [Sp-qSp܊[$z8pp)8n-qyd7o[$㌃K^q+2o3.z-pp+8nE-q{_j ;ҩWp2.zGмE27oNӖ7o3.z޼E2\: [ҩWp [$S[$z8pp+8nK^Qˣy8pp+8nK^q \: N[S.zμE2\: [ҩWp2.z-pp+8m-qҩWp܊[$㌃Klޢ8tSd\: JҩWp-qҩSpZ-XNRo"[$yd\d"7QpBNup+8.K^q+2o3.:%ҩWpZ2.z-pp)8.YN:tSձ7S.:G%pp+8-N7o-qҩWpHƁKNq˛HƁKNq \r [ydqp)8jKNq \r N[%.9ǭȼE2nKK˸y7/>b".9G-tp)8m8%.9-pp)8m-qSp܊[$㌃KNq˛HƁKNQ \w%՛Hƭ/ɸHƁKNQ \r [SpB8%ਅ.9-G̛qy2.0o^E˸мy7/>C8%jspWpZ[z `\޼.9%g". ;Rԣ98pp)8.K3o^y`܇[$Bɸ޼E2.2o]ɸE2ΛHy8g"Hy8o"[$"ɸμE2.0o-jy8o".9GE2.2o[$yd\d"כHy8o"Hyɸ~ꭂCo [㚃KN -qyd7o-qμE2.4o-qyd܏7oQ0ΛHyɸ0.4o[$yd7o-qμE2.0os-q=z `\`"ʸHE-qyd88+>-q0[b"ʸHƅ-qyd7o-q@yeG[$Y1[$yd7o3NY܇[$ydGos-qyd7o9DG-tpV}мE2.0o-q޼E2ΛHƅ-qyd\`"[$yd7o [$yd\o".As'.U9{缟rR^E@j^M$p B Ҍ?xdzeg7c-ӱXX\@k;qv7@YA%YA#8GXp,t1j?qvl2@ K3@YGq0bKK3A %Y*q0U-9ds s-CCqdt)HNƖɉp8{26^j\,c 4ӋҨfr߀-i"0gm/Hɭ ppC3-nh&wXٻ0[>P38Ťܭbghi7H]8V#8c75[Ffzld7H5ۨffzKܸme75[fzۙܜf7[&û[zk!܀Lo3-[Lpfmjp(HDp$pDi8xB#yh1<ڡ>L%aL^ah0j4kcñ$kcñ$Ӈ}#A*M8.Xp,t=({j<(Lbcim1G4cb-#gk!7Q8Fqfm,Bq8dmtI"CqԦQLf򰟠8H`t,Rq0GmH4qQ̝f2wP60Af15K23(_6]`4y$((FҬD(EH&nqf:SaLgL85탠8}hlkmMX8S:!, gfJ:YXře8Q6]ñ$kcm2Q3^6B$k<K6`5<fm,q8dm,r8gm` 넠9G RjٮˋaQ)R"gfʵԥYb{j`Fa"pfFa"If[䜤F]"gdm$rv$AsmiLFRFHGRgFҬ:ΎđqYm#]NJrҬVΎQa١`e)gf AY.RqqƝjdF.Ȏsn V3,L8s%#vlnQFbgGH885S, 2e23Y#E,nkpak&\4qY,đ(Yi Ff$p8g:A8;' Y2apZZm­ej ǒE ǒLbcI&߂ |SFu .h_ ~C ٯL~B ^o_2.~ 8uz_~ 8e#-u~qf&엠h_R6 (`(` [ٯxLRFi/̌EXt̡`8;-k##8d:i8J"1$Y?p0XT1LbcI֦ lQW _YQ%Y.Fq0]yd./Fq$Ϗ8L?т'eugFPh[-Aqvn-`dk*(nM%k[fAqvˬn٭|d!(14ӽ8Lc~L##vIג8wLDę8D@=LGbgGFLc?(R49HGQV0=m(G%KԎm,@(= hFKe2Fz^"ΞhF:;Gf$p8gz%Ξwi&c)3cLR gj*3&sKpܲe|HΜ™3g pjEʇR8ser-F2']3$ҫMJզɕ+pʕ.:™`ɵ9p\:R8serP)fhA)2\o[P g-S(3)Z&<™[pFrP)ahA)i6Q8K6Q82}.D)y.eR8IXp,Xt-L}Qg}1?+$<+2yHQye 8 GFoqsU}-yFKqyFe8FGr!4y6%ryɳq̳q&dzyN3}fOyfeR9_YBrYB2s-gE̙G5%̙%Mv9;˖SS-'BΙ'B[&O*3Olx)fN^I\|?3ZBl dȜma 35D(_%Cl dma -͔ dȜii0\D< ܧ3sF1Yjd[5 9Y~㿩+f4 4˒!sa dɐ9,@>k-QtY2hk nii9hA Z+Σ9,8-Ɏv]%LdvZ 8s|RrбQR.U` DI9 Rf'cԌtlRqQj::cSu;kwQjv 2qбQj":6JÙFi8ۑHi.k8w1J A J%pرQjJcp,5 -RpL`8K,5 -Rp}k"Z,Q--β.KA (0KVF᠅Yj[F᠅YjZFYj[F%rpk&\$q%pbTlDY.Ru6pֳK$R]5.r Tg9f"ſ͎Qj :6J;AFI雭p&^]|ÖdBY`yowYj [)ff Y2,L7𻬶r`3e,"-jkDYQ|dZfZAK2 o4ӯfh˸U[*Yo[e;ٷ(#fj +-) - /@2no~MB[ ٯ6"-J8/@f&A3oklAV[߰e"-ȸWLA.-ښa 2n[.}dE [qk} T.-2b3 !mk*c- o,h [7lAVZ ߠYm|Ödr/o ([uKյj_@%/f˸U1\, L7 %kU5*YJKmaV[ ߠYm1|d Z5+]j ֲj - ]VZ ߠYi1|fO[/cV[߰Yi1|fܚFߚpaFgmL@7F,5<Y\e8lAgJPYe!8had!ut-Bp؂,5-2 \_2Yj[R\p}k+΂.K A 0\2Yj[᠅Yj[ᰵtPU3E,]H(ktYH]ÖdYeI] Ja X\ZC,X8S0-,β.KA ` K A ` K ak_a0K}1-q؂,5-Rq؂,5-Rq,j8,5-Rq؂,5g[][ WgQf.saqwYj,[ Dgg:F8,AgV0!Ycs`xd?˄Yj,[DagFcAFqq8kfMu=)aDH7LM8$jA(J1N%+peIp84SR&3T3M" ɜ`8 4S~'"=Z2<Cq%k\AFR#qВI&q(%kؒILn2)YC J@)8`)NeU]Yfq@k%ka0]vǕ1nh8'Kh'ܢ'RqhdƙVʝQZ 'ڢ7 8kįH4nqצ_p'ܒ>bp'Ԓ,M<8V'#8NU57N%& qV T.ҕ?N$ٮsH8nqv:Uqr8I$Y~AZy/(o I-8NnqVWDnIv$gp$Ԓ,3K%Qnw$\a8I%Y~yIIJک0jppK]($,`pp/I%zWnqVף-'lu뒔[99$ ' 4eI-fu-z(r[MYpUʼTIB-YD$V͎M*a8IEY#$gy%ANnA0q%Y=omNja"+QTvDK58-Sqp4' 4 a'?R uIΆ$d3U'$~8IYyޙ^X$ԒlB1[5^`$܂8la`4xi1$/ªIB-IF$f;//qPKQ8 Q~iIJ%/2$d4k!8I%*hq$@' $2Z퓠=qpZɏAqPlaA9IJ˻HǁK}ؽE:ιHu.9 G-"[{tZ${t\gᒓpB V>{tX\޼ ܛq|s8-q{tso [{Pc-q޽E:wo[:E:,\%:8.o[8o-q| pUz[| pos-q8o-q| pw[~| :8o-q`ᒓpZB8"[zE:ιH-q޽E:,\rZ{t\"g,\%KNQɻHy8"H-qνE:λH-q޽E:ιHǡKNq ,\%z8'߼s-q%'.;¥NqZ$%'ᨅ.K8-q¥^qŻHǁKNiX${tX$\mu.z -pK8nK^iwo NV"[v7I]Hu-qhҩp2.z ǭȽE:Xt%{tpK8ny8".z ǭȽE:Xt%K^q˻H98pI8p'[{t\"׻HE-qзV"H-q{tpK8ny8pK8m}{󭏸Hy8"HǁK^q+ro3.>޼{tpK8nK^q ,\:[`ҩp[8"[¥S/ջHE-q{twos-q޽E:.poÉ2[{t\"׻HǁK^q ,\:[{t\gᒓp-q`ᒓp-q޽E:,\rN[{t\"H-q޽E:,\rN[8wo[{tpI8n9нE:wos-q{two[㺩G -".9 ޽E:,\r[νE:÷E-q޷~{tX${t\"[8"[Bޢwo-q޽E:ιH-q8F8"׻H-qзXK8. z ".9 G-[%'ᨅ.9 GE:Ǻ[¥^Q ,\rNK%'.NK%'ᨄ.9 -pp\.9 %pI8jK㒵pI8.KNq ,\%KNiX$%'.}ؽy7u܇[%'.9 -cᒓp X$KNq ,\r[`ᒓp ӖpI8m-q`ᒓp Hнy7"u\޼ ܛq{:.ro^Džy7Bu\޼sؽy7u\޼ ݛq{:.ro^97>%u\޼ | ppDžнyq{:.to^yp\޼ܛq|s87"u\ ܛq{:.oDžy7Bu\޼ܛq{:.to^}нE:.po[\ $"8cᬎ ݛq{:.po^DžHE-q`@}ؽE:Z8"u\޼ ݛqoƁ:.ro^Ey7"u܇[{tZ8q޽y7Bu\޼ q{:.to^Ƚy7u\޼ܛq{:Eh\gbOߟ׵LKտ\?@j[:ml}+ 1-Ih;ZZlM( '0Ĵ$$-Nw(Mz_#'ô$k-[ZɇmM^0mqi4J~; `'ô$:$ڝck-bʼnm1;0-I4ZsIZ5.ZeϠ8VĶ$74ZGyĶ $] 5V~ki[5m55-~޴$4Zk5㕱@k]8q-z޶8Z9Zcc%IO&КϬ&ŏɛ$К+q@kК@kh@m-NE'EOЙ}&A5ޝhb[iI-zܶ8-~ܴ$74޿$$] 5_%ҝi_%VĶAsӒĶArӒZ[?]@$-NLK$o-Muk&EOmСmۚ:&Z}5-~Դ$-~Ѵ$jZTkZ3϶Ķ ےĶDKxKl--IL֮=4`[N?k[ĶmIZ>dcFa!C0In9~~UNnќem)vy3\֫KVΗ^^r.Wn&噠%eZ>!Û:ĴLL qʻ`r(ӑ5kF0'eWe\{~ y],}eZ*_1 1^o޼7cyrw<WbUcz/ә7w>Zpp||~6u~)ӘeK>1Ӻ-&N}u~eچˑ_ռ꽟y_73y)O)K\^=og4o(wOhݭT5zch6E7rW}lS3hn^;7Tg Eki{rD*Z9ќ~ׯq;y#?o?o>o^ӷ)/h/[h7Na_/k ? WhS?(T~5oM!g.eGp2%f<,?m7qm6߼cO̳~a|q]c W޿,3,vZ~/u) 37ѰGgӶl/<1hufTƒyo6gy{t~K1Z'*~)rU-w0-B/]țWSy6lo߽*\4B0ݖ%U\z8Q>C$w޿yg^_O1S~A o~TGs7:].K6}s/owȺv~p8ep-nj]avwLG,{"~y /?zs>OvjG| SO.;?NU>p[:''\oeJ8yW̸}zjz,/\^w|ws|?e^^KYn=Æq+`@|Ҧ% /YWxU>3oK׳vX]o|g/4}eFuX,{t(g|h d'S'o-:3y/eT,نo! 9\_\XwrEVR}ga šRz*Gw!ݾ}iŪQ,UwҚ;;RH|BauVwx=g-LӾuymM"!yq9"U9z3pO͏/Ffkte=ͻSy!.qfmC+eap9R_5O\k-vY #/߼xp[SE zy~L͔Vo5 L#5mgmWh{{.iNӅ+).U Yc{[ּ Qapzoxi;_!~l/>|АmZz}b;Sچ̅k^hdz١cr~J~|[ԿX_| N-7Wjt﯇S>}ۓ|e]@WŎoRϋa9/tYR{rgǹNx_v{8x׼sW_F,XdیRdw$o ݡ|-m\:G{I Tv~cy+;iI%8#Xecp\d]Wxnw[`<Ӹny<")e)ҡa8z9(+,|@9Y'E!!]Ŝ#n, ?py#359T; t¯h7sl˷ǖw^㣤T[/d!}ގc,~s EʿWң\'(S%\zޥ9d ypĻ"rWa̗ב:_8&x#Q6Vj`;Ko:ϢW u=WzO˟}/aKX7K׏oKxl砐ݵMCy.SP^7Cľ*/Ӈ0_--e#ςʝ}/9+ݏݹ(vr/c v.矅u8X]knV<+Z4p0.k\4}0Nf/iӣ_cּz9WA2OF/vy~pvh[?ˬ^2y.露~Z/_ y~xg"{p}WJ='xNG/-('/5.eyfz"=p_6wuz\߽i!s~}c': .aO/w>vuЉVm\(q9~+[=~7$>vO`;2R+O;Zjɋ>,0湣пI ]fDuY@4K#_-'ۑs"S\s[%eɾ*7PCOz^:/~R$> GrliAh.H>;>&Wƒ2&>~UkwWY[2%ȇGXjko\Ց~L\_6x[|N.L.Ȋʻ|׶f2Oݸz+rCwZ7zNZܢxZo?VSv . {> stream xˏ%u,.5`]e#@ؖdq0B4Ev,MW7Uմ9'9'٤(AW_FfF.\W?ɗo^^o^ᕩzsP$Vb..5툗ҭpﮟ#dr}sw?2Kpe~:([)<qoV$^ׄs}}Ԑ=쮟>~ug}V]Zdhӿ~k;w΅[}~u-w1❉QB C$y~s˻Wh~ =| H%n)]H)rDpGo^䋻ٰ%%ޜ#nlH: 쑄#bؒ0<$>cDH2pa Q)ho숁vtm | &$uͥ-1nHy#GN`*V6Gv؛O$ʼ'1׆pBB#bf f-="i1G0Fٞ@{F {;&iKn=R%iK [T2iOPfOݓX[ I{-0=!ۑ{J dZ"lHoě=I )wD 6ωOWw3"W[8rM xDgwr8B -_&[gK, 5y slF3ijSj%ߌ9#wB =C}iL w3R2Jd KmlbB8d(]9#T"ݱ.g qg$ԝ-g0϶%T )P-Ghw_\ccd*ES>F[ -kS♔)h y%uJBD*=zxFrȝҮ '@1-W"ٖf)/C /c ?#kXaʳHwX < =tF$dܽwkU+Y;Nk׬q'Sw1V)#9:+$d5;%tǢWj&8ۑJ %Hmj%r-1Br:#CQ_|ln '+d 1 4dOy<=h'7JZNg$)tm88#]1Fb$|Ul+XMx@8i ӒgrH[;*kiNԖ$/$3±j]j%6[))P$2ojGM?FbJB^ ( 2g&Yr;prJZ Ǫs<iG$IbLmI$Iy\S±GHh3"86v4B1О0I|F+C(gRh,tKvƁDN<ʞX1 qOBRcuC _dK6$ɛzOO%r F6 9qO2J*4!-Ap%Y$yxKЮIR }Srpȱ]5hmA{cOpxu%Ĭ ֑wlɷ#'Ԝ;%tZL{RI^@&HՖp|@c$-1F;I!/ǹ*b[AMUbqW³Vy4Uo%8n^?-_QG@ 9\;hg;q]jbtIյb-  XR71ˣ+%G?WI:#PVz\G6]Oj1ZX>Xّ9OC7b@s~d䳺8 'etveԄ>IV6=tB.pA*Q*g#NU?}W}l휁qh9$(FZ=w;8¿mg k5wƬ c$ J|Xl{(0GQZb!B4L(@!ѥDL(Cݘ(IwяQ:(3wk vᇕ~xz'IVA8u_u:ĆO-iˇL/w;˿ևqO\Cϭu*(`:vTɇq䪓H5d(, dCA tG\?; SImUg;& cR"Ɩ:O *Hho(AVH*JD}J:9܎ uje0Q?JS%rm#}*C{مRe*tgn֔xV+pq 1 {Nx J1b)Z(%QSpoVڪqȖ.SFL>5 NOۇvgJݽA`$dߏ-jI8v i.o R*e6'ԉ{ %BATujD"M~ jX]F%d/\g\:Bfx!1ui / pW%=^qBw (#:wǎN!{5u^X[]Њ?; J.;xpHt ͷ>h%!4b]Cb)I΃tu,|xмCܩHP?9FZqg͛MfL8H:5:&;pC"sut?j0jLygLP94\Ȧ RxL5."w̞'tԩ7̞]`DofOiGO'& Gr]qh0fOp×)=GMYٳqj5hrY՟g{ܭ8t\)kW!s,4 ;z٥moz\C@v8uH8g:C.ʬ}z.eOi֟Х3>"t傑.NEL(Oݝ(l!t2"kr:Q";QuE0\'J8B$ʋ(O0dͷ 8(C*/˃{;suDaىẝQ$D8^։A:hQ]V8hQ=a1Q"ЧDT6DWEB(M Nǁx'h,t"7}FMN$C% ir(n5|F(mNFQ5Ͳ- T'ZTDzN(K;预[ i%2ӢN! Ѣ8F[~HnD@hQJFh&?՟D)O FJűڈ@B(ϓ;Q"g%sFjDNȅqj'JUi(QHs:Q"V(QɳHH׈<QE(Q,ӷkD4QۣDDpu ";$DXtEa(ډ86;Q"7:h;2hQ˸N(]NFQZDivyݏjrmGÐdžZpv( A󠐊 2&jT9dLHXԣ,2&MUiGƤÅudLH%,Ŵ%c*fxd)uP?rȒ|9]EƄŮȘ42\$c*f. T."ydL*dB>R QA 6ʘJM[QNR52aE 2 CFV1 1a332$o`+cREA.2˘.r2&b3j]ƄTl"aKlτ/[ 4!f= !0gf M;wMDDB*aMh Xg3Q&U̒2]4!VfMHU5ٱhBXŬx.HB*f !0spVV \j&Up2Yy6ӄJN##'!/Mc@ RM*f71EY3&T@ lfGMe.RB*a4{jBXE Glk-kvWMbVm !UclՄٝ/XB*aQ-ΚV1e$ҭ5!Vս5!ckBX 'M^ƀ4vkBH%Tz&E2 !Dj&E(&T¬[6!U՘;"R5Md*f]LM !0(1 MCJoqل4UekU `!Ye bo)F1I׉T !UcJnڄJ3&Tp@U&sJnqۄJ?.k? Zfƀ4tFV1zvpڄY2Tf&T՝&*lB*amHD(@d a34 !Ucj6ℰntW㜝8!UԮ9[qBH%,.ًB l a3\tftL5qBHE,'Tn !UckㄐJCy6䄐JuGNHU5}ؒB*a {rBHXM9!g+])ǀ4B)'UaƪZ aB%rBH% #s龜VUUO%RbwKw愰lM*'!UcJm愐JasNyOsBXŬf1OBIq4UetzXyb)& ASLՎ mo#=-~Fiۚ=zp;wmvdmܵՃ۹kqVn뮭][=zp;wmvmݵՃ[łksVn箭^[,zpwmܵՃۺkqVnc-][=zpwmvܵՃ۹kuVnqVnﮭ][=zpwmvܵՃ۹kuVn㮭imc-j-][=zp;wmvmݵՃ۸ksVnc-][=zpwmvmܵՃۺkuVn㮭][=zp;{m6ܵՃ۹k n뮭][=zp[wmvmݵՃ۸ksVn箭][=zp;wm6bܵՃkkuVn箭][=zpwmvܵՃkqVnc-^[,zp{wm6ܵՃ۹ksVn5&n n箭][=zpwmvbmݵՃۺkqVnc-][=XpwmvܵՃ۹k n㮭j-][=zp[wm6ܵՃ۹kuVn nc-][=zp[wmVwmvbmł۹kuVn㮭bM^[,zpwmvܵՃ۸kwVn㮭bM][=Xp[wm6ܵՃ۹kuVn㮭^[,][=zp[wm6ܵՃ۹ksVn뮭][=Xp;wmmܵՃ۸kuVn箭][=zp;wmvdmܵՃ۹kݵ]v1^p_/5n 7ە7ە+#OVZnZ6=9f YnƓh'd޺9.8کnfμwsuOc޸ Em悾 mL7GQm6GL2σvosijh#d޷9T24cGp2mxbus4m[7'{7z&ں k}ٺO_mf OUwZm[7[{PVk[7[.h3g޺cnFmf m,[7[U[7;<Ν6s潛]=Ȳus@˄6s歛@ЬCȼl݌6BK!OeflliLXm}䍐efml }Yn&o,{7'ڛ7nCidٸ3Y6nN8@[96cvA}Saیg.Ȳo36idٷ9S.ȁRYmF;vA}AYmF׀6A}2Y6n#o3D ˾ͅ5jfidٷif޶G͠t 2oۜp$(mkkvAmk&o m)ʾP'Dmjp} R} 6na۾ͭk6bi6#>mfm7eft/j یyfn7yfRi6[tŨh6cv+3odضm)&\ [W!%VBG&U`<"e N5 I׀1( X+EWN/@Ce6^< 8 $(u%Ttނ_ r~v+9EUإ xZ+2 ѤVׂφfׂfׂχfׂ x3 xh3 vFL@lg']&qY&5 6-,* ntU DET:[Fׁ6L@< u2 FW&e>oA@Sh*B AETrUc:oA@KGD:В@ᨙ!u iTIeԁΒWܪ@'V:]VTq *ˤǽ8gu%=OhG)J^ׂPrQ&`S9 qG^X)> CB- 9 [6N/Ȏ?WG=?:#v`,bK5dT-эYՂH=C>=Mj |ҕ vו ?P7<$bm7e=k 23W:aGFc{ZxgAͥk>_2 :>5{Bz W7rwPr_9r n!>5m;nPCwyO%ux7n=;,VAxu@0?-^_|y[KPWQ_oyi~;LhjBG;#C[wЌ8{ӰL=Tk<`z|&xMX-"~)*:}D*3u2 OCn䀋?b7\rG# {uphjf~(w?|7߾zTCL#.q-xW1|8ꎄy_8Gc+ͦkAk~a>Ch|n9GiE޽}mx)W}Ǘu겇!>rUW-9AQE&yRxbsP _3y*j{~]H#"GؓsCHyǧ;< #Ygk@'~( :? >Çkv uO _K>aTg`?eȝϐݜtcrG, &0ܿP骍i.'G_ˮ?fŶ'4խ̿D3z:pʟ Q ~ l.QUШ2wk;k(KFyj2קMM4D`ѩqLyЅ ;2v=ۇ+AvС@{Fu8_csW3R]+Bo9z?FPaߤ^fp9n y˶8 *FQj_Tפ=|ߵ>riA._>7 iR髶57࿱i_^?D<7˫vs~Q 岅:Vuܐ1-W-h Lo Rq) <ӧ[?O@Ӹa J &>Lq=pv¿AA\_˾SjbIV=UEcwИaO|yJ_/\K>z{<{o5%U-lUd/&(GUm?-h DXCwolsǁ_rs4]ݟ@"Pөo/iJv) 9~^+#ֲ Yh6c=H{XP@P=БXG8fY3 u ~|?֋~PcoT>?+ӵv܁|ףՠ>=M۝qTmB_<z3 Z,`~wsN'\#$-󁓑 mł0KCO$Bڏ!2)và#h!ٗOa>#NCC~7|&w_~MJ9HL TSq< > ;S5}Nէ0A ?/j @1 e-_it#S;f痡#c/S}h_Vn3 z0ی7|!ݕԆrQu$Bj6o.vX{)?BJݻw3 (sUijCi mR#EY:KܖYw? A§|u%5NfK8. ErL@y  MPևHsN^WAjHϯ?U7endstream endobj 181 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4039 >> stream xXy\TGMCߋ"n%mTp!.q1\(*\"& }h]e, jDF_^d8KHZ̛WИ-_U|2҂dzo9kڒ k,Z.r ۞=raF/Yi:l`cyq,oÏ^Ѹy #vF.ړ+d,OӦ%]WztעigHWΐ`ר`׍;]7.búM>SrϾ1 39rIԞe1{WƭӬMx;hg aӦϜ5{΂Gcf3a3/La63,a0Ki2f:bV0+Ulf53Yx31hƎ%|D0S(v%Ȗ.XXϐ'\ocʠSٱ?{{z{# WA7P;mdkN8tyNT{Zm<.`>˿xH%߾")RݐJ0%jTM|WFLޭ"c@l ٶrL؋/:M@4NRD}}?"DVw2O^(% kJܡ{PM5WdS>FkPr~H455rI7KR^z\SPѦrMShަ5t 7+l#9E丈' VHVQaIWX6B~Bn)O Ncc?)-:!'/wY ̈́=ɮFG#jTV!am6Blϒ8_2"cn`cBm}]ekEZbTX ]9_Kf;y߅.EeU&1+2 b@9c{r,D5rT|3p23t(1HTsLQVOaMtɢ'@uRҡy@PG~RGO{$_zV('VjECM UvpS ,uH\]= .ӄqx}^_ɖUU|)J9SI6lM | 8J*,4-9R*ҼP`WJdV_]0Da}Bl52#te>3Ĭt}D#EKыtU3Ef$9iy-LHjEᧃp/t#'^ffё伔2p*#X1[]Qk&̉#(m$ ȓi3Yav6bGdĕLt\MY'r&c%0._ɫ8`!el[$1zO8~@.D^ /*>ζsNߣ鏞p'vGO|_>_NJ0tdU6W42RBojpbIV}_#'Pv\I; 'MN}~ 3#Aؠ SR{1Mc"qVϝFdpC?.ϻs))mVjMP36VZ\;_)lOjV}p\JK|ٜT+-3dedeB"=7UT drxf8{n|%xmSњ3Z8 dr ?JS Ra\|]/8LIk6$89GYv8'q>-Q/dl FHpOzwm˜йt .rN+u6}Ea3Ђ#vhL2FׇBM]!?<8"tFGXc3Tܜ>7]_Q95ho_fw+W37D0q#a&lw)&P̸Ȍ~.{<| V%"!. YeVP)hA_1lgeSR1[C/;H(..%^Kς4 -aBvGxsuO[8&zǜ_KkKldj5{5ܘLH:+5I=U&^DCoZ=+z>6 ^ϛ(ʛO Sq%dhpTDHp}TSK])S7Ts>Fw]鞍A4GQSKaů$nYG7C| ?&6I௛NV^D P2J5R\FTzٚ<}M'>DQ<e#lfd]NvNN! Mgْ#Ca(/OSendstream endobj 182 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8073 >> stream xyXTrԜލ51^P" Ez/"0a{"{FI4D1Œ}?hIn}q>{[[k(s3J$YZmg;iGspՖ$o]og, =,MЁ>h[_J,m \3fIS&L gٸD,h?<{.WOYN.gnnmb{vkL__(a[.Z4dY+#\VE޳kZO^}6n|kķ'N2uZ3fΚΈ_wwq(j(eG Réu;ʞI9PhʑCmRq&j5L-&P[EDj+zZBMR2j95ZAMVRӨUtj55ZCͤޔ?ՇK,)OIT8eE(kj 5LIOj7ՍN}E]zRT/QwQj= !eNER/D f5/a̖|@1#nu7XSSˬW^/Sޏc껫~CZj-])=7mVweY/Yn?gl۠5:nW")?+i8dC4tЗ6 kn7B6bHFz6"c$m:c%dDAIص{Y&׽jƇIhdi5ZQde@rO$klݠQJ9gQ`- %2nZ5|#@A4OCtW:Xvf~ٛx‡E XF,t\2U4[׮8s8o_p$F7hϬ#&9Z)>%7G̃v3 ]C^A{(}6Y?lN/+b@jI6JػoWV颍sd[JFU4ke2yo^0VU{#/)?"K^?>A,73S o3A24@8\uXA8 B+wVu{oϤB F|>ifCFqj.ES4ڸ2M#KhPS29 C Nkl)x'vEӱ-Zf)4Ap!3Nv2!ߣ.EE8~ ${-1խ9. 2(f9QsjНS#5b*Ij`qǖu# ݁g+p3ԢCk!}.Xh+*)+i ۤj0C㑈|F/KpL ޑ1ۀY Ѩǭ֏>kAp] FnC[ hAglPc#~wCqtI)|r(+-4b9cPKS?ϊ)ōXIk}jv.X3Cf$p a9H}Sc!/۫/6rP%cFt6׶Oۂ hmJ;6{ϛ]=h @1zѬxȫ#}W5%NW`p( HH)~Heqr<;R8/6׫vWsCβZNLHФTq*jKBmgU6Za4'p&O93<.|ԩq@b=єpZ-hK;Q6ܮ}묏Wh'O;w0 +m](T73"| G]@4.g:IVW!Ob.N8!GfQz֥ f$;ͱ=W4y&XzLBRīmbt1~We.SR]b.xòJS]e*Ż\OɞӮ]6Jh1' ISṪnT@ħ$'+FL6G) 2wDz>hٌW" lL6t*gn o찁c*a)x\dsU6E,̿mӝ[pL~_K7UCE4_lr05 SH%$LYѨ69W4&(%S_P͈dQiR.bgW&VOB*Y_eiѺ#Q'ܘ.YNR71x#*|x\oY{޼C'or)^G8hhմp+0l[[27nF^U"vMӛTU|N 8ꍣp*c.. wR\K+N\(v B!,0B24?3̩5F:P D!nB:=!*..!ee*.r\־Z;En'Q0hkXG̮uHmH 8p\pLÐm6hc4d۬MYJfL@$99?g([CAAVaH)Ul1X*f)췮[dJQS ߫dBuyA_SwOF2yQu{ "5UAz_+V܂#R~"-nǺۘX2g(ݢ;[kl_i4Svy9úF"tP J@St\f J=0Mĕ䮒vňhIIivc s" +t)g}睌%_F|dT!}I:%)v$V˰铳SRIU6B81(mɂֆ_ٻI@(ؘB20$<4wGɫ.Tph?)M!N QcHOA=q"L&ufÔChE˙jzJ 1L`qxueiѾ6):`w`--$UURTj/# P'FbK2_׫%xk)`pAvuӡn^Ǫ|F1KI#]fjRz4%Q Mh*`9#⻉ȎD:ws 1% "էSI#2# E"I$جШTsx,C^㎜=dWL褴j~V5 w9l ϥ 0>W ~ϧFRuJ_NKҨSDzX:|('*8{ 02хmR|2!n q\4Z^eOcNe,zArd-qt4K~`gAZiids =xWx]'%h|).ox{5GhаE8KؾmM2S/B7=#ѽnp~S|o:Ĉ!úH,b#c ) )(^?*Ues{[y[~_x4F=3ۀd*$h "K9xAyl%1Yc+VhiQlp'3bE.t%Ksһ1{#|܌v{w+zuԽS ` !N 3B-WEMͪ`yncTcPaCZ1m+@Ԋ$2{b K29SRP{ J wf*<+5EnfyA`T:.Q)yFH8mgk[֗{kOUjl/bm^8onډ ܺkuv^: 6'Х֝1F@Ϟ 3y'U_5G~]N\=r,4/nzGI!CSY\c'_6E ن2 b|~4B IDD)M+C*!;,衜Y%?ϡĕ zL_"₼^ Y7u|& ;̢26)ۗ3jі OD/F ³D%zy-ChJCf9}!-i'95P!bjuϳڞ_*uh0ūLIj*6ul'ŕt,iuY QA *ela^Pk.(Bg _Rʃ"Ц`+Y~cUB*x0[`~Ύ7F#M!a/Q01`,~lnzoHlPzV^J "SujJ:p]O-<[xy CX4^ˇ| Ih[IA>h3R,>Mfw^ʰ3eCM`!J6 CFA0C ˆ2Ԛdw IUf(9 c? w]O,yR,$lo?9ʂ*O@[d:u_?(B=܋r}$f#.rPi|V@D,qTߞJw 1ZFZ\W\SSQQ #2pyJۙ9Y8mAR $4+sSu4B"y9ToEM(UDwYgmaV3@K;0/նywOY7\)I饊{]<<[z|e:Y2:*aDABh.B޴*Q_+@­^9si/ MS(wђ_~4Z/wD HǑ71a9aP/x&!Yxf$bۓN>23!*Ƌ|sh,9{x=)v$iۤ^x BI͕ +-/7r\/}DT_sr'n5|.66e]wnU:A D51 (Kh\:^s^$0 ~N]D7ux 4ĭgn PQVx0ŗw[ĝυ 4.-pTu"$Hw=׊6U'az]ɧamӟ"iۿ/v)1FeʔuP[䥫l7 HORo _F 3%蒅8{`t$D :3T.p 3# ݵHG vδ?}7Oөs**=2ٴc9b~G>W/DhӋ _z)NȀt"vZ hZ-%qW1ii90yÓo,[Z3;/uߴ5pE[u>{.9A ĥY=dr[=vۉY3(f>{XxVbw[!n-"HL%7%0A/B0j^nF=#rUd[< au8zdsh8;y6<+he #2bSdI:)ǻRhPqk(^yhIfϟѲˉm 2Y7F3C~Z WnI y⾟ɶKE-\۸+kJ8k+k}uC}TG5\Wxl =D}!B`-;vr.;Ba}=_nnXy8fu{Ù5E(]κ|+o?=V(>W#$y䁼z v`XN]}+G]<3{&Z3/ 9:Ұդ| i: pmfQsVOL~S8փ\6l@ {{iyWKJLTr. Xaa鈢Bspg|B2χcaO&5'pʘQ6tBʎ֦drΤL ݺ^`=I 7/C;Μ?Gq#W'Heu珡ՊĞ"<'2(4,Xl(ВFvCEHJǣx<BrLYt`A4i0y&-hIt@NRPr˵'y/_ u*F.^ pчlwVS*9uJZX %?6ëW%蘌$̹]ŎCU7uv韧K(%vO,:@-8>_^::eR̮6aX,&{Nu[\}&=U!QYpOzGvϴq(*H)?}OLT\wŖ1S&g?sY.×>CBFOڅei޿:N gM D8E7H?#=!?4#eV Yu& Eu|( __% "I!J9Tj`:Z74h64R NQ@0t^Rʹ(!))//qMXd2}4#E}rNfW ]i_MT!#s?ciWˊ"#& 16xwX䖜5:331o;'3' vؑi*\l(5AUXx7rDupoۈ}TutWl:FDv&tº#){#zLHX; ditWJ|0ChnTq%AIU\P=%7{pׯw zk;gh={utNڳEob1iendstream endobj 183 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1671 >> stream xU{PSgOsTn9iVx[[vE* pO"r) !@EIP[Aki;ڮ{i;{7vfygy#<(D8D!vUTx`*J\"ĥP1w{.AZaVxT0=.#>S)wd>&]1?@VjuTԚp|CnTFj2rtlF^*Aw-Ua:*}_~n*y_*%).1I%q[oEQ 5^[:EL%QT Km6Srʏ@ o$u$oW{,+Cx^}_.zm?U4I+NaFbysU jX |bX |K%E;}HWA/0 f|G@#礲َy i3e%(W rJ 92*aLxG͠:7^;`8M؇9ki ). UJ;d4պ߶7Mn,G,v`{&0dryY"@3sYoK{e E/͌smP l y"a PivNa>19tC9 ]&?2 ߐV PW<譼Nev]͐LR3H3LbrpY wpf"Pz=eP%XzXamE'ٱ5iC߹ųswqr{M l6[%ebzNxSl8{~&˓=(WDf8Ɛm.ne)Su~i@_F_E 3YV_C(Silbh .1<5$ejT@dN}tz1? ,ʀxc}*./OB1(J#cmMY5!XN\Clnƒʠy@Nl$ aI+1םEo@MMqX'8Ü漊8Zɽ Ww0m}Fx53f}33;_b0|zU ĠGbRl=Jd?!aڝ5̙ꖚ !EǶrs-vB kZBE O#j?n&G}kތPYpU+9{G.=YpdWeºu1ʸï;9ާa#oSR3wk8 ~EZgl?-6 TT%NӒW (iucҷp狔%?> stream x]=n@^ DɀݸH$E Y.ř)E`Į^_mX_6O׹zZVp8ͷϤ6]VoK~[_܎;isi>euZ^qV}9OۦG<i11wMc|hƩi'} AMx`MMƣG<7MaD}H0kDc$2 (ldA,FXM^`5y^5z 84L@#Ѐ3@G}IpwG}IpwG}IpwG}IpwG}Ip댜g"8 ."89(p>3 Q@EMH4!QPЄDAQossossossBFFb&Xjj&Zlrɒɒ"m$DTdĎS{N9Ԟ{N80 ,츴 ֒-XKޢ`-yނ-z ֒-XKޢ`-yނ-z ֒-XKޢ+$“ W6k_nu;-ߕy9_րYHendstream endobj 185 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6732 >> stream xYXڞ̎q{lGPQP6TPޗ~TX,d5717ML4IoC? ,s)|+~2УD"vvK/w="U_\"L SppW[רVwi3 x@D{=7K<|yytj'z vBR+J crBPcj _"Q;Pc@2FTHWֶ֣0!Vu3 =_=HW4V ^8lJjv>;8LsBA+&E D?;v\γvTv#dFT*9\t#XF՟fmy嫥;yvLפP3OYP([^$-;HlBQƯC%̰.SOJfQR[$0۰JȭFT&`3\aB5ĂrM 5#1dS_b/ f3g*Rx(5Uk^A}>CizT.iPpN:f#\}l3&'_yKjIخQA/x/`\<.=y"; KKϿsǷmC^|5Yo|K)dEdF"TK@".G ~q|Z Jz7އ̑cOxw ̗ObTG,>s%S!JKsϾlI+uC4voG Xm7|r/fo\zu:?VA%9o80uh2I"D7"˃0da<<|A^̈c=};j$GWZ/O6ZY6S@\48`::H|[ָBD";*HW vCZBcI<ՖTeOABzy@ UB_Ha Wszվmb͊XO5` l77^X>#"1MyD?`9-$qCOԙ1E 0K_L#F8b?VBR_p s5>9[MLc{۷emҔCp"1wqAre=9Bғi4ø{/>PH'W!F.0aMV^EMei2A leDLsfgP̚g^z?jF~F!gF0+x[[( =wya@!ՍC!W0GuC#,VY"6J(hSGO;cZi/Y&g-hN3 ݅]<{'UNO7hc4BSvBW@ (05b|bM+BJQ= Taօx4X G8nU4bjT̪P*1|c:죔e|ay,Ã5¡JCǗ 1R)Z~1+7b35'Q$óJQ33؉-Fcb0V+\i|p什0uB6ɦP q d(⃶bgx$RqAqZn\)m!(>askO~]<(C\t6v̎Η$&'T__e2hڎ$?4 QA7InN~Z1܁,Tav<yNHF;'4,=Eyբ*^OjT aGI":rD- )a~=̳;<?HxűΫoy;wo88y/1ԏ>i]H Df[FЩ0GK! %Ȭ8Dm M4fcI*OC@R \kP۾Ǔsx ۅ0809\E޿]DZ?iLfUA'\th3v >]v NCbw:.Eː&RezΗh+~2IJPZDU}/q7Fo?3G~}GPogCQ.Џ>D|J.`R5XF",ODz&ۉϿ/XрRMMpqu^[UAST\7a'PB`v:{TQgա 7Y<\]&"*62>o ,ԙ`錶NY~g-QW}MyGj f3QBfJB1 ;=64MAٙمYi'd=yv~ ƷD4gd%"Eb}e#읜kg^y=~͑ssN,741x"VCOنZ] \sHm@@HH@@mHssm&5Ej|厭 $*;& k8ttLuk*Mu 型`tB\>+(z1vd~ kbgkWn=oyālYsa˧=kmSvQHSJ j26Oefo BxaBE @7_"QLfT&L4*Be!<{ <;)CxNg[_HO2iـb #cbakCFؗob]ŢB+CYk zN#Z-g` ~STÚA*%' F6ٻwVmkXY/i<7_e/:'vOס j {2陸?)G^1a{!DF ࣎Tz#aZPT5] {b""f{/tוFX!?L)j;p <=;QŝSs(0Qq]~>nR__֠ mm$oW2",$1*:ǟnPYxYDum^YI.jWP0A$ w h))`[qx-n-0[o92N DL1ByRLL2ro<-2=4 0䲪g5:1ՇKܯŨӓ8}8yvJ*o78 < 7.xx>{ShC<^z{slEM]EsqrI|_vByɡeޱq #>!))}3eZX%d'%0Zu/Cްi}zy`!c K S{K~Q! iG}JBl4[wv8l_|t}х INNLZ* Y;Lx\ʣ#cyH KjP EEp&,(7'7omn,/EJbJC&\^WQ]R*^#}GDjFxLw${t0JNri|4JCawAb9Hdԣ'Vm  mS I>=hOW!qsZ;1GH^S7wW^'Q')_n5ѱNnX;p<_j'8QN79%.-%#*yhuKvaf32 ,-v]u|E#o>]ZRRG c@}5\m:{*Ep_s*:?6S /Ey%ʬN^c|4>):Y_b7tUu{1t'=Q+ w©jN. tz7:ƛRKti ߪi$/TQHyLN&J76Qc* +p WcKR69T^'!0@N|]^ʘ |6 zyi}`HsR% |q$5aR /oUeH+tuô8$U1٨('llooacꖢ %"(yQR qhv$F'0  xс: Z?Hk^w4Ef|TTϵJ Ia{Aq3`FoF'"R !bXM3tNMM3a3`Om>lNH;r22(1)qɄ"*|qE"XW]D+G F!1's3_9ٹFF5yUu Waendstream endobj 186 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4729 >> stream x XSgoKbo5]eԶتұvuZ7PDDC EHB,%BXv})ն:.3t77ϙ{xp=w1~p]asbBho "=.p=q=547 k;ibʊeOZvNI\4SoE)h# MZ"K]8sϗ}#|W])L+|0wcbP(򍈎 M78sA 7A-H\*9ͽio_!*: &0vSfAP|W "IM3f"xB[bC$y?&1XM,  EZEbL&52>}|piQ &;\;38TQr-v imZbP)2ao426 TMp4BUrEƒKg:YW2JdRȔt[4@îBC5/K5qM=U5u@3b9(rAII7-//[ꘚ=h< ׷ض# 3V4A,d2#s75T骋Շ@A$3Ubf{\/ԔTt7zh3mңl*7f'J"\:ݍnE4^<}φq/D n_Y&7\>&/c94{'pXjШ$ p\Ƒ*tf hL^J u&($A[ 뗇]T |6i]YF!W( W3إҢShƙz }U@‹BP3Z Bzt[V3JM\uQ 8=,V JR% %| X`n72NhU+Ma]d,Ak0j760-Ad#<~[)yWgi"VQ8vQZ<&%qMy364MLˎGU dQЇOoFB( s2U.Ou%8hyLwһp4<ԅq-wۿ}WSB@Kg +?ʮΤ CA(qħcDqwrݡ|ȧcuRNC֣yķuPIUFEzR86u+s!SV52eڒ@Vv8~]8-ʪ+#DB!jYFw56;flVK ,E&XŴYur@0Z)4O-wn ZB19h;( cF%Z.Q˽&YWijO.ʴɗC!"O7hyix䍞ǖ~ۑʁ&cu/il~q`mhga><X MƵHM}u&?+A(2 WwԕwCiެcƒ k,zQ X?@yo⾧r\s N8~H[V>MyD0bx_CpXǺ{ݷ>CS9 \c} r`l}je_4[]/džK$A]\8V8lNlkP0V q78>/duy9c7gL_'Ӊ%~? RPncLq$5M?HIHlUyT˙_+t}áN8f?k v泬C00d~þ {t^/Z}IAK9 W-H%|l$/EO(!%x?bYkYe@&!ߋh2sΗKT,txʮ+Ӂ~RkDgJF싘>.P8<eh3gx+"| 7$ +o6A i*dMŴ^yc.9bKx;-P)'HBVWQ__qF #N;:$sn9#ɿN-c |ГF̢ 2rfG tP SţF)X\3<Տ}h=8Y4+i\%MIa! W_9SR¤$ U' ;#KȅxHXJjqc)ͯNΊDEvd>(K1wAā>E/kvO̭mh&׽NK JFЩ1 /˗(jjkkiyFrKm"{P0E4nVSyy%[7fUKu WЄo WNdXvjT(\΃NrjCn\>sYOP)XZV/TזVZ t/Z>6p)j髜5޳ׁ'D z܆9|\YŴد(Ɣ-a< Pg3}P oB3ڇnÇ7.l55N ǀ?=S,>*LRΗ1* dS{ lBqPT NYlћ| @掉H~=viS?F7!Q 8Cr^RCo7F'c ?#} uLdIUXfQu 驾 86ǖ  *қNХ99hڍK SФIv89< =?uqdBFk%^?nA3(*jeW2p#rց$Ƥ)U%ӏXԕYV5 bK[lY ++^|PJ5ƮLiT:}؝ťrՇ@>8]ϹO-c-PBVdg'?ød@k{̡ΚR1.8=f#N:on{w-R `sà/m:;p~ؖilsӯ.'4Nq>E&.fy7lPHz4~ZQdz/^5`7ZBVq=x!N"Lz lDង=UVGK3{ O\HO  S+2[Nǽ,rQAEbFĉjk*F:u;#T*4kDzr͵Ҋ+l> stream x];n0D{7mi\$\V S-}fWqİ>_^.yL^ɛ</Dιr֪>{ev 7?7Ci%+W'k4MTqqzFi#h66PדmmkHlw$z-)b봗Бb-N@ t˃.RxE^E@Zг JZlңΛ~V/ϙ~o]VIPH endstream endobj 188 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2799 >> stream xmV p^a,` U* G2SM8:@lmlc˖,K,[VuY>dK>o!1b1`RhINIH)i/g L:ٙ{CLDp8oo,m -[sP(M->ωΝW]uk扎 q8Ĥϐ7 ifS8EY_\Γ /]"%}d)6 )EقK6/l)"AVN0WP+ؑG3ʹtAj֝_^s騰$O~ 2 f+- /O/xms1xI"{ěD*xD$86pbDL&8*ΣI8^&7ƧuN0+kS&KAǦ0; h,:9ioз!ykUH{r-5^AXU`Y,jhB lvHk|P3I4Jf, X`9_5)v,ld]L%:r]IF;jA/$BE O;:u_w'. Gh<:EcUL[s^oXZP+EW#zOi)l">+BϬ?%·,4%AUh\t0W\i*SOg<=QD8j}Z+g5TzU&-_OmCvޠ ȡ#}{:m\Tg}@zlGJ5"]( 7$z?G|z ﭹ/2 rJf7;^D;'@6{ ЀwDj'\v5:v2v9xOB1]}#Fm&R$BIR"U^=״,#ޕȌG;AZ ͏!VbȪhʘ \\gPp*?}1Bؠ3qz?Kݮ?T vBs`q*3C5^M~]5<#^g5T =]awFܭwA34U(%+mᮆ풪)Ni'qؽvXK7B[X@Wlf iq(VQIGw;@no(\D)(gf\*7*eYbim|zK/U:ی&"^Ek}S +Rʅ w9oaOOh o/cRUgȪ(ro允s]()r`oȏ+Ffbs<X}-D 59#u3xGV,,2$-װ7(=]V,R. |3H dD(r*=~aJMcGx]AId/97o !EY*RڠY#?8C}:iIseDYң:TqsH̓ݪ4r.Ր\oO_.ē`wRQqZ3lj4-YgD<O{md*>/[&F3]͌b [%H1lqt\2]  Pg/~[fXcYD#ѕ ,RiHQBn={=0!r @BEcV)ONZ:[@ oGum+kuPD$;|hlx[bF x̹8ᵗ6D"Qsi{{ssx |Aq(I F@v;Bv[ڼ*G45ƔzG;ʫ6:`r^wXi( MhiX)72NA˖]d<dGW>\UX#h:[C@n-z]g~>Ě׀g&ŗM&MapM"3V?{h#sahAmS]_Y8"g}僧sSӮt 2'RK]Y 4zcjcKqK2nmjU|eP]W<2/:)m@>qf& ~m,S/Ɠ171[ŬYkllYj jij r3͗(%%TF&ؑ8 3vk 2Lbb jn{47> stream x]1n0 EwB7t\%Cd 4XgCI ow\b6_Zl%6ηȎtlzsX3L߽Vu?}>ڬKat>PBRBC%oƘGw8Ǔu ^tDuFVآOV@D94 $ <*^SAASAA*dPaVߗ JUflFe>/)U,cx5endstream endobj 190 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2198 >> stream xUyp^YX8IR  @4G;C!.`l66>C%벬k߮,ɺe[,l6p I P2 $t'{զkifٙ}a ɺ,YjYVQAks*Ǟ,=ϋMO=G2f`O-[:$z|Y $!9)K~ϠP\X.-U$.|u|$[!Y@&+PZ]QX *+Y ud[,̖HsI$[$Wo9 +[0ll9 al.-`kuzl‘15OpUbF/>w&P4EbOGѦjz^zcլ2۷*?yl>:ORBЯrͪ@ܢtJwElJj X.Sg6fzo`E+=HVR^aʉufܬ|Kc (V+!X*!8Ԯ^u'_%\.ע Zm:_%#SNqbvA?B"tC+R@Usn xLr&{sLWYJ%j:QOBE0j f%ɊuycQQ rS}eHkOB8; 6c} Q٘d^G+_Gb]`z:aHI7K Х3?GFPq{Co,ߩ% gr;D &Z&J__BÇnMݝ;gg3uNsvmD^.J/3(Ƚ/)6ngwJ6.%"pi94܃~ gG9`$}}ny}mE6bq3<i`<Fʧ7Y*8dMփ=t#}+ѫ|48(lU6J*+N"6bQZ{GEv ytNϖYDF6IuV,]YBlR8(76؟CR=Dk"SP(.p$LdBiWG6LDӀxwST뷭Yb%-tW\xWURZ?wC(0JP$Y:{=΋5q8LhvxG{!o:/6mqvw{}g?K52 Ӱ򀪵hwE wkb/h^]Zse͕bKʬe ܁ZW뉓(C9sU &X\3pWG5~ ߡke>5(טeԻCVw;8<&9+Rw[k6NXXn`{dCyUMAa'=taGy$'5\ɂz_Su6GC%g->l ܎6أp!x_s vY&L5b0FOޖY$vRWF@^z硴ڄ*vWKKAi_[ k驩XiNg7d"%AdʵĔ idHfvINC4Cm~ڞ< G "endstream endobj 191 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1150 >> stream x]Q{LSw]."mW 7|dx @/6/\ AW.VEc@[u "a$F\-q#SI6~nDc;'99_r|Bn(|NjԂa}D?мP-u\L a_:.Ub2hړoJ E\HPPh@/"Y0Kϥ~EFr15s\n.#5;ӸD]+ak|=] un|OR"R7/bB2 ]G"wtݠ⩧nn]nsuM }ӴXoi௪)&Q >=q=쁝KKp:[43!`SaնLö.|(<+p~|gǶ~87K!3/!u\7R4I ގ CyxEjk4.&Sjy&Mk0A>\t/0zO|LFF7%JVKB~}u@μ &ҦwVGY%*DlƂV/nc<ٍi839lrKq~9f}{ǨN=}>^+F>5QfknjE6'1`SMT2pLŭҪ"0mwbDpUC8[ԗǗcn " %0Ma)\s.z[\ ϗh3ssz<~L4eKIil:qg ]4:q[C/4cQWB(*vA)vCd1N.bIRy"ór#dOI6+;q b9^ya67*KNa dGd}iivSO Is3O'YO#nbGMY{"n^ |eT VpY8bS,E_bendstream endobj 192 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1201 >> stream xuS{LSW{ZQLH]e3&d,qS^)Ι(RR yY({Т`APqj|es!Kɶ%nf[O].e珓||w;$T$I.j6`6J[Y7'j%RbRR 66D,ܐd:|2/T1Rgc%2_,&d:xwhX :-mʕ򾎫h2SZssCӚͩ\YN&BW5VsjX+)*,6H_,ZdU2 RT\PAO=6ܦ e6&A05[ &H EMzH/#y %"kLW{1CpcE[hbiDclnޣ?Rr!)0+rT ڈ<0'tvݕ:hv|G*6QuT  8B!Qѯ5 M>ᔬ7_>]+\f4˭gǚFwJc{{Zcx{7?id+Viru-Ұ>'umSkGO88͠p4wrjо>Q!&1:YlJ> )#aBk ҄q3+E{[ٻ B.G5:x^oWO՜/! *<5x .p1?a*ҟ@Ic(PO.:cjLXWP6W;$FV9B$1$qva#eX~泔OY3S0:6?Sd\c5=udr;DiC2`)-61Ʋ{βv1{gGA(|׶De;o3ecM{g,:$u#RW$WU@`l_Q*+7Bh!endstream endobj 193 0 obj << /Filter /FlateDecode /Length 190 >> stream x]PK s nL$ Bc Px4,J o/k! oϽ5V7?Uy連UF˧#UwrHuW1au+&9+\u[9AFM1 GGxA="e 7Bx<#9]۫im*rm-sX~]r_}endstream endobj 194 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 737 >> stream xuOQe@-Hl Ѹ`"ĘA#e #С-RL5Z I\I\_ &&1qwک&&$n;@ցOaw],+nfV>+OC0C."I'NJ@N]mb@z53y $-7Ww=ΞXUCו ^lPXB798U;Xt[흴nenm-ך[7n\(O#착Dr2Z팝c c9 CC0 &@@?!Y q.\TVDK^j׬" Z)l)vv>Q"e؃فv܊#Ƅ+β.]DoZC<a" O#zBg$ҍ#G&9Op`"0 Fg|E)y/sZ(HkR^*->hW]R/ !7P*HF>k!p! T9ŠT+Y%W𥿏s{:FJYR6EXN^g"}v,BH^D¼'7%`endstream endobj 195 0 obj << /Filter /FlateDecode /Length 2155 >> stream xXw|{~I뺱cJr{I 2)?sgv.E*ml fiǿzzey̟_PrԱ,,esF׋͌0Z\|a#Sa*K.X~%䶢LY,hE0:*JaՖ6.J!,Ʌ椪MHQN R#U䯉3U(4]r8fK͸Izli ox5)sz 4-saHkM~ܡū{+ BUV a63~V'@3APIx >3k8T(4sz| trpJ"<*ŔL};,'煂uUP3dD?f7,WmIFˮ/.dmn6~|W(pf+d|Uj&i3IJaleLTJ:\rOylk$fOZej;Y&|j"u!eμRlJI@]fw,s*L3Ӟk.>9GCS^MB?yUH1#Kg0IyK!E,;q96UgԃY{H!e+ *tЌyvۯt,iy< *o ѡԥWֆmcMh6@M,ٸzXZyj Ycdԩ-ǔRf{B>nː`‘=N}g uY}B1NY-C1룈0GwMJCu#0 Uhd)Kj@$+!j^ZAb07AȖ̵M{Â- ~3ť 9(K#?< ϔ#T]cCSX|sy]53Eff2t 5݃ kGB 7p-8+2*9ܢss5#7 {>9VY\:[:ځQ|b7izXVp6G,zcg _aCO S[(/bRZf^/1'1K=f1AK#R "\C[Ija`#,i[jL0R,6QkX՛ |)+Fy1tVMCcG|V;De>?#rLQ^8+S_?_^sV r  $ mfFuaD<$P[kA7%/'m Φ K$YV=X,H_'?a Ka + X3Ô)C+4Yu+5F%Atf U2ucz3U , 4@r/ h ᗀ24m0iU0Bh"ChE^#`x^I {N >Edur/~~7v~ܲFoP #DU`Pd{ jz,yZFaQG/ r##MrS x@_fz0hg.* 1?6 E:/ptx wd!^>uҴY瞃[$U v.ꏩ[Z3,=9"kx ⤊żL4Ci5'ZnJP ZqQi&r ӏeZi& +mjw2AÅr!6SpUQ M?C/\j&=cuA<{F}= KZK'Fz]`󆄖~V1 .-[B? 7Gpn'ރM4uN=ym8`ԀxK*-u)O?|eaÝK$bBJzxzvٛrulDj1V]>8m%0L)2+BZ{}f+endstream endobj 196 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3639 >> stream xW PgaUDн$)bDz6Q1QQD9dc@fFԈHuI$I\7-׵M&M_US]S_~<Յx7&G#S2{#06^"p>z]E\ϒ` վ/tmu7>>z:LE{|oGrޚԴL-| |]轩&zGx{sj6=;5;*6!R-vwp [q.AϧedGژ)&6>!1h8DJۈ`bI&vk7m‡XK# ;ěFbH&/dW"r_\ &2H>1dȤɕnu+e+vO˜vuuvV`RX:Cp|~N Cw=)o֗.2uEK"ȅ~~UVQM0ЫfLvA`14#?@ AUk%Re*6RapFM'd6}C{N~~NG3Deg2 +:bKJ-T7ww  dRg^y`ѕpv@RJVT'FA>p_HHm(>˓)ŐvNRk,9չZL5X8z#O[h#gYZhcz뚥i)ɓC?is !_M?T94zYsĽ,/5Kw{V%=\[7i07 U}u=Мa0OJ>EOaI*+c>^Ă/$0d>^8ڨWVO>CЅH?р0zBdv]|ԃXkA^Rvrps+[Y $EE_>Z)h \`©iՍ%Dy˳W=NJ?O*aMVI{e Uh5CSLr:Ruz\zҔ0F9ٸNTd=d/J0K٨zMStn&[U}AaQY̔­ L$0L(.9jMq$V*TA\ P_gjW@]gwNALRCV>YCNRe˸5g )hu] zrm[/dsW&y@BwAN<^ĺy_<*XݨafNfR[!Wm&jmv: J׈ ;:6ʻNc. Jzr^gfU5eu:*39: Ҝ(\D dhJqC ̟| J}G^Ͳx3iłK(PW 9EƞS ~s%jZV:d@$,(=ajaN^PTf Z'|}Sz! @z֎ v*,Nٺ;b#cHU-{8'9; 9Y(nFB ] 4KؐHj rI0;6Diݜ{(Hɹ#fGŞ{ JsAyer=Z-b҉B]}|(]О琜 ᮆ:60~='Z$pY؜er~ 7cW`%# qGUS|RKR.kn@ m8z3΁7f xR.)vP SDD3. e VL<:>+?_"A).mޘ"zm M_[JRt+ENl̳:tFcC]]WϯU2;]S-vJ-)Ue:"r/14v{3 iXxěP7cIiզs0 r_Ue'G'@@,% ߬ B\"'gƙȖ_5[$Vx=PӢњ_?`_̳xcT|~84T7ɓjBjSI/s s3񞿇REqٞ7tCKP"cKNs2$BF*CfMo.hj56\bGk-%;Z*mAE5uT;P{jRGpͣuh3.E }3|S޸(EyZh5pHnЊ;|\wՙQY&OX`h6_}|3 !m3!$[QWΌM.O>n q0ȣ¸RHN8,#mQNd{ŐJWW@T+˥+b7+ZPi& *Ϭh.kjPl' O~qmnmrU^W7\rskj&`0F)o5cendstream endobj 197 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6513 >> stream xY \LG*FBVZZVZJݰ(**n,Hr j[ERjƵ-黾}wBDB!ܹ9?suF& HLH ەܜY#v%w$nc#ݓq&w5ǩq~':1>}6jMA?t(~$-߿"yeʪi2vܓw]Dd`T}A1qgy9_8%S.L،5ZO-&SSij#5 R4j BM6S~jZNQ+g*9j5OͣPSRTZFQ'%X*Q(oʝPSQOPQhj A=LLBtnS~aFf ыO2>L(hftp׏?|ǸϏo\GN&PuRl_v~{wۣGK$$eM{<DDď|IyIIٓѡ\~qOuߡMTX*cd>N?,/AvW/C*hU>G `n3CYQ8omTC3fh4Pۄ`{hۓɆL8:ʮ^ e0l#eL- ]<±rmh=cgOI΀%1o}N|,c:NnvE(O$/ XKƢ/2mO\ŤQ R tsWǍt6z6a"ESȟvos[2?]=facZ3Tw>WjA aaRH0' Lx$6]{þx8W^#?XZ{ik;0XJ˝5A0-{rA0h6 FMf:*R?NC`]xWrC`;!|^#^4#2iXauڢN1ko7%xG>sanpe,NVu;DR XenqZh=_ˋ]4ko}%?w9CJb߻pfADz_ eܹT){Ǚ &M_oد2d30vq#S@#שwׇC!j&JOY9MΘ9 6d50tN*~3ꌾ\8ג̄[6aewq!\?54oqxm!6 MmB&T Y ]= $}wɘi. a% Ӏ/;. ES}4ĀVR2651#H)@ ]4T !/a$;xulM(>N"lJ\j-{Lq:ƸnL(G]#Lwͱ'Cl/6hw@@̡(O{.{and 2wCwwbl)ěGMV"4bFri@HI΅L9\apؕ}9d!##tW7]&B*3y:Q-0'@Cꂡ4HӴ u:cL%w.Sm`T>rl>q䖱)CkB%ʠJmP#,WlBv'Wχf@iV_Cq+{>ONxrEZ K + I=q o`XJ5tÅ W}pXJȔlg$ҥKweY{wJѝE[NFOٞϭxj&_V/ṇ棉h-CB5j d|qfu'v}O3|=r hPSb'_!AVWa>mhw"]z ;Xgn6Cl50(kM4!8©T?PuRk(Yw \ہHAcHYuHnl[h &r[W䯙h E5$Dh1g8CeD*3sxj 9{R!HM}r%XOŽLcX!%IXvBf36[ &SvM=ջZC$.*E2.LF娬n$MdnM "OHW$m{˕ߔA! ɬFWm6dʮM}a~,{^:i$4X=x_l"=*s)1nuFcwQg1TibTź |Y$iBLI7tYޚ} $7&Uoz :x}% tUfnrca&>zpd'@&(uy񛃔 od)a Lq ۍǦ>$H &a$µtK\p.Y '{}WhW^n;+"EX]2D]S~CRS ƽ]PMUVmkmHgsst '8_Q՝CB#)t;Z7ͤur3E:wоR`>ze"޲ù v9GyLMjǰ-n̂| ;bJ3rd9ixM58w*B>u?[FQ9`V*ua6{^[nTTQdeX36|q7vn-`nkcSEAAR TFawҕ&c9ð[%gsyvNⷌ5]uUF6Fq$"uXz@s?;| )c%,f{!~uGd'sxJPrw KWڊV|ЇGNJ}M`%VE7XeoeMNKaf*Ia08J_R3P6"=G/%XɂO-8pL8'"EG8+9O=8(եd ^D[m F6+B~+k;Z>$" {Ұܕژyc_skaEUgnB:qڡFǧEOG7g64kېn9|K3{%)0ȮK&T͞4ܽM4SDUU2RZf_bLNڢ(+" fɒRctzYW d֪ B44JIC gjS6q{#zIrGrM(~ȗ҄ȊiJHhiKCV:>%VtMLLp&BڕhvM (p"/mdunC 0V`'Qih5B ǵb]f~jԀlx!?׿@JR@I;EfK1iWIɆXf||xEaCmkv)(ފ[Ξ[MK䋕ϰזHs|eo?oUFGDyWc^'>p2Lï+_ ZH/Rf ܆k983pE֞ؒҋ)å8\i+KIp>gk4ǝSD%'s>-hP~dBR˥-<ˊ?y3֢ndLCZdAK/弜)B\E4hNJLViu:(!2_]2D+45/.38^+9Ur#̯D͡Ŝ͎HFdUc+ȿXmj* u#dt"ENq`!Y_fjt M u^>87yvH ˷͏ ɫ8Fb}h>Z[ `lj>t-Zΐ"Vmy땏H}]rr:!oAɬh{HV;DyYi"ƚZר,Oq;r/=ԗH@ɖ|}9/"__dR2I?V̎"(Re1>"Ĺ]L(UYȊ2U1Oέ"i*p#U9 JIVG|kmh"z曚SaeAn?X;%ieQ Z_И}mMo b^A϶:45YSx,:A#RRTJF<1zdp(sg3yxXX=դ=endstream endobj 198 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2824 >> stream xViTWD; *"& Q㒸=ԉ b[\E1ADnL4jrڙy ӧϫw{)}=i섬'ьDs iH 2 CRsHxb ;FB(JBӳ6[,2}UzNFrbRbtGE\ ɉi䐝*][X/EQi32dǮY21)94%U,z3g)FRT DM%T(FYSKeNSIyRv"ʋ|(_j&GSJ)FS52hjuG)}ry?}wI- yoKt4Ezfi ewjΘ36XdpР NH iI*2ins]fXwEo ?O`Uue WFqjjm*UIxVV{-; \<*nZHliVfutUDc#"! ڱ#FR}-6@ذ|b/,B^@7)y]2?qxp *`Fn BPN]i'^וzUe7\iB7e.Ug6!@3x=,($%ޟ{ ~³[s10~FH7;ʍhk}ܔ}wy`r\Z%oYKtC ^QfDo#H5Fl\QJyחf#>vI_-PEr΁كG4lw/Y;6t釳idFp2Ei{$ DjLٴ^&kf?۾qg.7A:K*pOF9 7iCӴ~CK .ӵՔ_+fp W*x:Ğ+Y'ذgцUtd HCSlu хO[, ڷUK"6؆".£r{`5`M|,3}r2{x;]X._Lٖ!`uj4li-^/2hutUÓK@fd%}8߄7Ζ?-8l?'J8,|mC HmpI_00GKA9J׈k{e,_p6 d} 7pݗ8 q?$D Q܎:5hq,0E[ <)NWOB֣v6V_|{PaɷahTyH2^ ۮnXV/ͬQB[a^or[jZh1P7fnPFtڂY'*F6Q3IKbt'.a~8C:Vn ؒ +Iÿ8^cʟ:YG7n79-u?o=a׬=c"du{qY(P2"zqsx{l:I7bFʚވsq/qSEH2b4%P &1MŒOanNyb9}qJcuBUnO7>DOѭ̺szțgFƘ]]åqq]?KۅaYym3ƉCkQTE}}X >yy5>4&/{O 7&qӟc3tyxAV|pzj̝Y߭;*zJy!䶛Nef,jq|L|a.*Mjoƭrؑ=+\BkUWDUr <1(~f`}woMk=fZE6Sc{dw^&j*0FYbq!|<1޾а+pE endstream endobj 199 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 368 >> stream x%MKQFut4 (.-ď6bDQBE0٠:1 פ7/Ikq2!A?+Ӣ9g`d!3I? I̦l0G?!orW~zy@@?Έ57\.T)* dgH4\$'bD5/D7$#g JI`!ᰦi!tLh5O6I9Ȫ\VIZ,If+ BȆ]x20 |d/V:z 2xzځ[l{ҹuQ_?Pm-;U8M߼N[c ơuer|Bj+'ԠҤ.:<@Ѻ=o9{ޠmuZ~7endstream endobj 200 0 obj << /Filter /FlateDecode /Length 4152 >> stream x[ݏܸ yh ֊dpwf:gWN0g`7(f8h\E@J˝.5\g+^[cQDPoL R]mҦ M f6 5ýXA-`a!k#x#VIwaUx a!~By򍆅96_fҘ,EZ*XhQ@ oZ4A|Ժ1/i-)Ǎ'ߎ#hDmiݤE=j_זc[},ȤBU^ÔJ2)hg#A{4FWkµ=m܆Fk j%jrm{.`͈ 9̆àb3l.01ia86P6 eUڵ}w sb]5೬qi~v>Vq]Dw?і1)r5:F5Qi'dk`9a; lp9&yLۀL0[d)yD?, 7Qd 8%` Έ0)(t!$6}dž^!pM^V /Zoi(ǝ˼wwHW~o`KL JU˪nwחWkлikVG}zYx0HWB?c[ pic"~IhfK N1ح&ˁ=p<UM bH׉\Xv챀0ԄdE lQ/+0>69ؤͽHhC̥ȸ]Z"̙6@"D3ͰD8Qلhy37tIhHtcMLf?-P ^bS62iJH#JxTn iС N pqce<χ'> IK_HwB`߅3 ` Xɘuۦ2< ]x76 I9⏇Ѝ.;#$̋ޜ2x>"{<_0n3sV=6ъ@>;52bClvC6j5䉠5%U+=` h ))57ݒF7G ݉=ʢDZ^ hyqyS[d_]\R$γM3˜O;Rp :vgA 'pcrwJa *nCTMqr##DNXrdd9G^籹JR77 . \Qg@&wl_ KxgXz8R/]{]B@^ǀW@F/oq1H\ǖf/\aC#- I_MW4 d8jæ(SRr<xCMC>i ܒEFf =`YLTeT=Og I,DW>ŌdwWa6Ap Y;ό? <]lC38I&zƷfIV8vEJby+48]z1Gy)ͶT#g0Ru?<1͋JbETZ߀@l^N,O0 Bq\EVEH/LNm2…unZm"z H%Td0 z98$I=KE7(G)l>yXb `nzfWuDn0bEXWs#Bڲypಞsup@z@YX2" 0 @(:'ϰƖQa.bÁV\l" :GQuVGz&@9\q '`c,&賷%l?]`|F{JۘUb,3x̯@>u 1u߅CϟW0=l- >(xbku2"Xv _:bjSi׵7pͼf^0 uz!TH(WF bG;=L W3Oj[tm`qʤ8ݛ!$stqtkZX&r`ۉqx=fq<4~X R~ sh(b` t+F钗,6^ X0^j_o&gY{9Tk:XVOO{;d' [pp?u(/Q.mSm;+:KE/K>sT~ ld͙\qjE7vmjř7R,$=D',Wt%_M/cP1{/YAM5 WCO%TtϷu] U ]'scXj&Ha! _¦o}qJ=- !: nsGNl5(ǟeEˊᢄE tz 5ug2+UT-_ӃFqRT i<**G5u|_q~#;\  s_,nXYkW0Bd׾UcTp b  C&IxW"āe>y7"_bS&WK~wavN"?w Zr |& F`T<3SYLb>0眔*-˻,cʼ>r["wmtJrcrvW)-jzFFTT\7؆hko[LnU:,&\95ǜh% D51dVxN{Gw|߰]-ބ bQ3v$+ϩ;ygh)JX gb85ET1CuPAՇV-2"]Jq1U}~ ̊6CǓ qAxŢ})TRI4q'AXޘ_@N" e$aq1KbY|Qɽ W.> stream xW T׶J1&F%3ĉ($̃hO(c (FAE&H=<̋okVmAI_bu/o{ꜽGDD"蕫(bcf O'XoƉlŀ?}kkV>bq I˒S6Z#9ksݟj8 ǀ\d: '3Mh ta‘<ͮ  ZKçТ؟d miޖ-Ci+/o:zsp*=+ l{rjgQ/B7QM񃸱|=_C(4\#/ RrC7җ0}()eMSlݼݏ(欅2M&!?LD)w戸DQ~e[UIasMVHXwAC~m p /(y>sa[ݏC(@SIi  AYS7 eհvXtj5qhC5/y}/%JaYv=ۜVvN5qhZyz7BsڢZ,hWbaV(A.i| 0+NhծvYJaj&2uLsǣҭd{3O "xa5Oc_$!?'=wIټ:>FQUtl JzJ_&^*7˄N?SEY p+Rc)2$6H5 f끭5o4EQȚps:~  Z \>v$L' y43IyMQui)Xo.^ҭh˪:R Q5VՔsS$:c˨]Y߶IV]gs&ɐ!#]3?h*,:USuٗI;OخaXczc[#}+a;H5bܰWZqQ75܄I8E1͞g?騾^wjP /|/mMlILiJlmmjjlE5n 8wa]( ™#NVϩw}.0{'M i:U#S/C:ؕfsYeQ;ޝ #xkSJvtW\K+d`ƅ3c"A^y2-{#*csx y%:]E W''kV_&FeCd ѺBZ!Pm%8"B^u-h*Q=m#"u8xn4Jez3A]lnCrH<0B=)L$3f<c\0tqam rZ[OZ}ZFVn~ۻ 4?:Hl@b0hKMltQg˭K(C aJ1`~[kKBjGUc~ٿ.>Av[Q'Lǖnsa  Z&|vujJ 26+gr`.R0$4ɻgOٱ?]:]$LL@]"5=,K(^iK#!W~8 U )9~)X('b* ׉W*N/ֳ|-+'8kSE(Xxs,M{+H+>MnՊe$6Ao3X_u'm 8LVPovݽw B(]JK%Ɵ끻n~̆3gnT6hMiԠ-sw_ +F% &gy!n:ozIya#l63> WHʅ\\g;qOEpdzU]R vhkmսdg_b J^A]e(Jolu TL.x,sauJ41=;w--'&y$3OT4)> stream x}U{TW`P)n ʲ>֢`!OyG  !"Rno@%<"(ڴ+QFew]V;{v̙s7>foq8%I%RUa ԘL۷_3-;ƃR29x`~@pG{[ \`kP ,^QVizNfrb\gͪUW#ĉJ8Y#zy Jc=D*JE …a!+0R)+v*cbsDai~cll; `Z,s^g4cN]]ws{O{CYg%hvb:vZ]TߜT4TMI1SIC,ϓqJ %znי@m nNt B)$$l9' =Y\F|Ñ>E:yXu2q HȲqnvRB".\ FDShin0pӷi(Ю ]`(S8|>bg-!Nw?%+T T摓z&?RxJ0@[>=gL:]@$a%UTT+ȕw*MUk pC3 a:+]X'X61*e*>H`ilh$n-";}*m2&WDJ aDug}:'RckmAJfvќ>M)ȥ粧j*Eb(&]|\r^خ1.)FcPQz n+`ɂvb聾si{7%8I~fq;&ҜW-7p@yUuHeƉ'z|%0ĵaK^'ΞeIY[p=4G#_Fߺ;ryoLyA(\Ls>k&L<Պ< PP@>֟Q>%d 5#EhwҁG|Tt8UGjБ-@V _޴^se Yϫ9/`OJެjtݫo{\Z5Ҫg9|HA RB}TFAEA+1KZtUsǕϖNm -PLf[uKlnp}CP d0 ZK3l ] AJ -6o9?:>ě'~!o]`ҷ,3g;:zwp@4ohYf: 郖3|E*˥bO3v:+L*]/E&Ve7h&ХK!OYF5bz~7x(ФRLkͿ].9"Qx/ĘJǎQa_ҽ÷UקSQ %aX}W &zeǕ(X,{0.)$,EJ2t 5գCpzxaT^ryY>Q x\&'M,JO:ӻz:fiF3{sW4p4Q2ybn  /Ϝ!hYz}?Xh(* y gvp1Z ~Z #bM[ؤC(Zfk-]N s5ZmiiѱTW^VZ@q1Wendstream endobj 203 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 344 >> stream xcd`ab`dd N+ JM/I,f!CǗ^ Nykvu0wS{p! ̌iE% Fƺ@R!RIO+19;8;S!1/EKWO/(<(1%L33,(~DŽ޼0Oqo R~s,{_7a^ɬendstream endobj 204 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1970 >> stream x}UyPwanAiFPgVT"+Y5* ȡgF.QA!" 2QK]#5*2nrKzŘ-k_[?=WSz}$̅H$V66'͝oMKwF0E$`$}IƮ+U K2cJ*D'ysϙ#~樗 C:Θ U6e&ZKKMTku1ȈȰ6HQXiKZzykf\|NB0j NEPkH*R˨@*ZAS!jC%5It(& /S(nR ~ve}|o{w2ni@6Y^*dq\C_Êr_֢.@?GAwM(-r]r=1=MJL ʏdړȢQ:%h !ѬD?U <|Qk9'XkM Axl$q_^38;"nLNsD !yS+4ȟTsQGGߔq,fl;vc%qW`C%]R`'QJ>Hx8`C]EBkc[%쀬$P LtMtGųW=K5bmu;8}tbÄiL 2Vl,lYzKr}(ٹF-4Ȏ6Iˡ!)AvnoLhh",3xA <0;q9Ywpu=:`'?ְSe3( !LXl^l˕Xɀ"Hl\S \U*tL޾ArȁSp7YEbG7%M$- [8h mD+a49 ԽvSdٱ\z)=CvCJh|?H: iU8;__\rŰ@g yP!LVtzђT5,e%I,/ ^JdV*SߌK/kmPY_݄wuzDKMtҶ%pCT0VrT/{ *ԝ}zfnrO1`7,Wi2xm)̧O wOr;8U$[Zs/wC{S:gE8aıRG(nnw֕K@sr, *eMܮm`f޵ucIDMf>_(؉;q%ֻ p,_W4D 0)atl֚ 5c!IfJWiiv1כM>`lћ9yGtΑu:e =ԋ}%8x{1zMfXO8 8|D~"?G/pVl$ 6|>OL5 J6q3xnO~vdȡԋuQRydƯ4a%ìJ/M<ȯXg`؃Զ ,@z)@n"" 0jc/n\A/]PjS_RZV L-95[9< ޺!goF+RbН93p sOwO:7Y9s=Џ!Ŷa8>DAR'{W17%Sy"Ơ֟mo|C&:`hc}h2xs.oY$^}`6$Do, bP/ q 녰ZmW#q(nluVWT4^oru*tMQendstream endobj 205 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2089 >> stream xU{TB ~*xM*RiOnejb&-p BnOB !$!!@#-HV-:EV7j6gϦ=7sgvt?9{.hE$ xomUd̜1-*$^BSSVc cY].p:~jn^ ѾEFۗ]+,reMћ7loq%(N?7zNwvlar^e6W[xʠZk&(ߑFylC=/>H.%W//G}-GP$zdd{Pp>!߇ؽ'>z<'[=R|zwĻC̑C) 9R.R[]&b ̣h}`5BWTPG=zޤրB0J)J0ڠ .QЪkHIJIl$]v 46z-Np=ӭ6,hm4x%ߪg.mb)?AN e+U &9aDp2sIz&0a/5=G=n6y,Ի*PI$BJ)⊢ҊZpO }öt5XG#Ydý]Dwa71Gh ß\ΒB*'C2|5r2ʅѓ.{‘ \T%kn' ӣ_l~SF6>1`WLMX*jĘ-T^q?>211r Z֙Uip(V]>{`P\N!#7~=H<Ay^&@:~w 9(otSsЎq .2Ү{O/!C?2R ^P0#kuy%>K+nPAǝ^ gdJ_ "笌ٙ&},eQM 4 GF]"UfsGDG:K\BݝM:*fE,q`xQ#C=.vԶy,ç%hKo?Vu Α+;G_T)1/49>{РotZÇ.`1#05endstream endobj 206 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1433 >> stream xuyPgw ,F٦Zt:JǪ"E.H H@M8)!JPxNV'TmթVm~YgڥAQg7߷}gǜ0ǭ f26+CxOL}MV[n $"Orǡxw LkX?VMJHLW̟;klPe)|=AʘVPcAլN\LRLg U\rsW[PDF|]g ƨ5:el\xR`aX$6[b `RqN1<rw)Xr9K6ٹ TȥU(xu'`S>.$rSAn:GF4 :N2*HPGr5ᴰU>ϡoT&h]ߨM;h_%?_r2ѪVbޚ|Y`/}ݕz\ ;v]Y6e𫐻ȤBӁz[F -)ɐExj* ϯ_QN h#VRӣ8AU|gz?v*ƻ. {M b\f( Gw%}X+@lUsDXZvcݛG ,x_;-@&^Vl}(\6WՠumCv^JGtLGuh0 J"ފct, endstream endobj 207 0 obj << /Filter /FlateDecode /Length 183 >> stream x]1 EwN HKVU 0C2 Nӡ÷2xyuOA?1sI5i#N38DU*zGڝoj't0D1)?!ZvJ0c$qk$mAsA$cM$>r>Yuob/]endstream endobj 208 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 739 >> stream xm_HSQ_6FZd)*J ɜŴ ]qtA9?Y@"롆/Yȹ؟k>D<(@WVUXsXSƖ}_'!'Pb/\.NVSH.zfcT@ a341_´9--fNM\IVj tyZU}NftlhDڋZL_]tK nqfmF;k,v Xld6,p"A/ DvEůŀ7Lqn5kW}#h.ndYMt8;s+8'r dn49KTc#оqk+G]qGj!Rύ1v=@> bi};w5BW"6ȥ&bۅu2^/ 3a6ND2WB )"gH<)%XxqQu^⣖$RGR 凐<*8F|07!17;ripP@endstream endobj 209 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 332 >> stream xcd`ab`dd N+64 JM/I, f!Cß^ Nykvu0wM{( ̌E% Fƺ@R!RIO+19;8;S!1/EKWO/(>1%LW}xÌ~u_Qo}ZyRwVߍؾZcwo.*g|Waq6EtᏀ9^v&KHH<g7Y=@DSyx\^'endstream endobj 210 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1369 >> stream xm]Lg0ak,nJPD[tDp@E@EPZ--}ZJ ,ĂR` 6q,&,K[ese1K,oGs xhn._)Dɗ}P&\D_cD3LEkL"LGIȂĸxC n݊5ɸ"8Q1rqHݛo|[Ԋ1/dfqdX(I_"ʄ\{Bc?-ܙ?:BPkt G%IĆؾD˨drˬ8LzL=@XxN%f,-!+zdž)ݘ/E. !;cd ~H_/ vNe|C-% h/ǭLP Y*Tg((,YIwU|ȤQ4-ɘ>ǻ0@pJ6c%/){|'+/tȶ?J-:QJ4$BK,-"01:ЊuyL˫d3fE%t>7;IZzAi7I8oKhs8GgU-=`(;6$V16R}@;xqg[w0UfH5 MD˵FR>SxrN|ro ȠFc5Wqt>V VP N( FEBiiѼhei~rYSINm=w)l֘G,œva[}pVp"0 FzP>Š|Ph_^=ɡn({c+MV~R@2P=n2|c1yMQm$Fw'E)_Xke: F+$ yF~OxD5$k 's> stream xcd`ab`ddM,M)6 JM/I,If!C Nykvdu0w!'F{%2Fvʢ#c]] iTध_^竧_TSHJHISOSIP v Vp LTԢ\#3c##k3=b, EMh[w'N=_raήz*3`S3]no*O'wtKUԗMo*]uk}Kwdz{&OկQjvݜfnz5AwQi-s&wKΘ7}<_~ -ߴm?~n9.p{nzzfL02&Oe`=ѯendstream endobj 212 0 obj << /Filter /FlateDecode /Length 5585 >> stream x\sݶr{U>, fIĝɕ靉brN?]$v`vR75;nN=~35o~9bw~s0mӲ㳫#*;fV׍>6ԭPg7G?T6M(aXV-0V?FaVoE2뺵F n[}++X /f_>~Ϸ .~ҁ"\6 A.`:fY11spRv)~{ ؆F蒚GF/n+p9Hqoӫ#NU1LOg˙|d5rl5 9:Nnx5>rުEn3]ލ!ѿ¥(8<yәG&yf'aO]pl󰊷<8OwhjlɚlڴeG[_'d RfU;hb⋓"rv=28qUkP}hhj~Glܺ t _t+9ygd%\B(>sPZ8H-ܻwrw:@ޟ_8V iK?Tv} A%2i5>$hoWZUnV]IAȢå*qC\A—OЍagTB,3#AQ+XmC;J$ƤZ %Z[9* AFy€YPT]6못ZVGIw]%*%V*֑:ro.aT@%VI=&,\ !KsXZ #Ԁ\õ]CQl 0ίDZl41c{wӟGaH!FU#0P>29ɵo]yE83WNldLjI,0& Xě0Jc8LjMq.8AM8)1 rTߔ #nDWRA( E` 4pA &mD)_c.n C5p4G!skdcIJcB$ޟC7pKF3y /8*=KyφKuϖ>!lRoK,>}6-9d7rftn) YsDM>뗫 ;!4 M՛H CF?AD6ʥPMH 㦑հ?,| 6d:x]$HG ،ibRv],. r8 & H4EzC IFظ'p: ZX.@|&y 5[off^1q8')PdPN@'=GͿɲK+FToz2OtPg SLzGVf^b m7.=MUAwbZKxtL'wlmL=mjY*NW_ e\I m KczoN-Y&Ο_[L9o n2܂6sF9T ,ګe}Խw6ʆ|2 (~}s)*+ZPbjшg]]xJ )~٬'ϳ(Ze{^b.O%K?RQ /2N@ʔ4f9סn"Q*tD\$S[@.w:a9l`6VO')<""ULq jZd&GWpCu]VJ-8;ɑΓ$5*/Eԝ2DypEdIW^jLQZ"Yسq~-_-ɷ=p+pJx|ԡ'J-~oh{ND /ԕ| c̶ΙYVSQsM穐FQ1^w` ua貑蜟7^UiJ"b@==b@ɟ[L 5ք#H[P#j6(!,yƒ5*Ȯ%ؤq!|ߛE_w]z ~d6,#-M$\qW+HՍSvߓ0mdT쒂stݨ4oϐ1YwqW &"y*\v,ۋ!V憥 S?~EX|7gA=glb*#աĕJm;RA>8}|ětQ@*,%C!wҒ`0Tlus©Oϗ&+J#ܴ?leYjϻ4uR;]E/ay{ƊRțZGUX  )tȺRo-pE M6M/3ШM^PeWk>m+;y:Ѹ.ᢸ iC)C-| 'e-%m^O'f[fs]d=>ϧNVN'O64 F<)aG }r^ ~pmꄺ~ PIZռnD'vvq ^=Gz7Pv?-͓ VזnW1ZS-|v3s6Gú(oɼ'GS|<Ƽ}N98O G3ew~{jgu=y/EVnŮu +v 1nJЂclo}GQKOԗ_:ԽgIjl|L:-,zHٙB4l+TuHC0}8 6LN;X rE MzҙE ߓ_'SM-LAõ\EOZ,Vi8]4kPyJ;#UڐӾSs.LѸ۹ڭ1S}M֠junua[:Gni'GyWW%`'{e#eПӞ *[1x,7AĹ6Tĕ6 PqQ 9"1t[-" \QE6ttsb3H01>`4TMcJ$U߲AEo .= V< : d[% H88qYl;C$Rb]ks[MYbb}ٍX/RpZsإ%kWA1T)}VR\,-CʸIQ5O8FH]:鸠:/b{i}{doj=~ܯE=e9) m'h4[jM[;!K/i6{l*ΩkD_~$mуOnåyp=l{]53l/@%r4Dn<͔ 6b 3Me'@޶/5<@bM[k)S{KDC['=^]vQ'CLH(LM1XAe\'^.*p1ҜHaW]ZH܅b' MC8_V<HK4W+wPڵ57n󛚗gYmAvRfOD!^QttCIJLHKǰIhUCeoIt ,IX> +e8'/ bJxa*ۍ,FȚ`>M o'$X5ZOGRRc^%v` ZJW 3eALV*/˵2^?ՐQY݅S }MCVCAv* sAIs~D;ء#I7E+=>dWvЅC;0h,kR\C(fdI܃PģgHM-Ȱ3 x,NRtL7ExiNC6H .Z^Bd[<#o&4 5 s lc~WJ7R,{NجL ܇s{D>P!I|^7F4bވxOdI_G}$`1I41D4?IdqߎO lw9?_endstream endobj 213 0 obj << /Filter /FlateDecode /Length 4637 >> stream xr{*f0A{SI؎')K<$%0",  E3_^w@cVt;~Y55[5/;3fW7Y9ѯn Ԯqluq}淲e+L׋Y%/~Fk_\?_s[7L꼩% k\=_YmM~9_ !f.Y.k/?xzvZT|䵳,9쨫Hi{qͪ} }m>^x_l:pN$i`Q 2qn(i579O1lZ{^Xm҇ UЪ\H G3OB`2 FDM/gLZMgy\8  tδ|(?^yp-kTϐk!V9>k /@ox;]ZϏOΚ@Nd6 j#E2ܘ(ft>2ѩ%o h6D|`~H^\sz۠[d I6.)$$(*~X/_)G0eLi*55U:YDig*JA Td(OR֞L &{﷯]4.n!$NRg 6ߖ-PxJ&},{$v`]v七P OC!/Cm#8=&Y>w$SC,YV{ >aPn %`yN^*Y8lct9ϫbWxDX ]zpW dp"^#5,ϒ)k@yX]$,2X!DuA׌ҲO$f1,8 4C8,xƈ;lf7-h%`tezY?2wc,Grt*$;$?yd5<tb=loP>K|B*{-x`{7)l:Ȅ-&*)X}ixN L5k3d)#Uh!U+Ghi>A:z x e &rbJT?٧BgYbGh4 FǿLGB,_ZXnI?V ;|P5jaٟJ]+.NDth~B%*!zF3Z"qR*4N*8z<d긘pBJHuer$F$6bJBCB n}C%ZU8/.&LktX٨Py)xG:ɪo/X6sd4<K`/O.uʹ\ PG 9JV)sy/{5RSX#b Y7f7`̎FOA  ?I77[<gBO;XA,xZvt<7Y?/E[vXb,QDة`ڌ;EͭIK&?ɹt> qc&o9`aZp:dݿ/X7i.ވszCoԲ"#5OPz|~a52Z>I* ϓ9,/*uԟHBbOߟG͛[Su 15HwhIY/DBm2 w@f}44/>g`!m yk !w&GՖLjg3 ȐXbQ}mQދj%Y/g`5tJgmV4K J-dBSh iK@І"?}<2xJlRi -pܼzo,:!.O#(5HaVKAB, 8{*N; pg>D$\R>^8e8C[iLNaSO|qz|ncжk $@pM\H^ж_0d3*ڻd:r7]K[wj}yLL&MgqZl-u; .rZ8S[sأ*U6ᤡZlWΡU;x`'աi *dz G3]4 #.:Q88+@NMR fҷ/h j|Em@eHhv^EeZlό!^,|e{FxA󣁬M E@KS(rqњ\BigM~R( %'^fc8ۓ%ќ$S$DYFRS˓Q 8TPqvʍf0SYx0'?48qQh3\mj2L)M_HgC:X,(Jy*ˬ#03%nCfNDzfUڥJ"0,T߃wPPyho?}%]*\z*,gQ̨uIpl%TޮC p>٠.L0bq>~.έ6t2*ON7ImWc!:Jr]Tk;Nљ*~?%XbvvC .,jVYǮ=!.i7ƍ-tj _/35(58?֪ y||qYeW[z~wƹkjWNvgWo秄 Bem!WRҋ_*pnRR-zv[͎q #=Lv pN|pyYBDZjo6`Ywxs |r7Xw%&߾G->] "[%V/B !wo >z8uD*so7-VG)^ܲfF{1.Rgu| O<ɘĚ{J0M0s (c# &4Lűa_7'8EUoۛRӺ8^+HTx 6*=z|5ftb! ulX6-Ϫ.Fv"oVn ʿ@a>ǁ³qШ (bVͪ3|\ ^L ǻkN> Xʽ iClVbR*|(!VmL?ES*W hwe:3%endstream endobj 214 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2220 >> stream xU{PwYW>张FJ_d+;wu,м ãK0_hZ.H.Lϐ۸q u?Yߗ"J}B)s3VONHN Sc1ѡQ=Qb"3;b %I)'R#32EbQX4v`;,ۍmbaflv 8 ȕ9:X_ )IWjB:âSd{< DƐSY@5i'ka tB-)=ȩSJIv咅tBkkw`X'm M-n9k~0 inHݠmi zIey mΦcEA$yB)oOiTPde\Q]mmvYӑ(=r>7 <1gJY-_kוĤD@( k\5hjFNTkUJ2 c%c&^>"63Yy=QWIdTyH23&05MSZ +#$2gOOy8:`%\)Ɉ[8dm6k.&õ&F+Q1u,}slvJ t:qPC̢$lA)#u2eҪ:#n@4j^Z$(JLUh֏Fޜ|Ceִ'_#lmu WuK^XYLZqVEqV#,!al/!ߴu* U^QR Q#wEr&B7r4GPsx,7aԲ RyD$(~x7txLXUD%8 xt*=u3YYCAtMA~|Zɢ8fB% sg\96rƔ_m=O+ ,U(QZimcTùF{*GăCHt`}Fp!n~tE~hS)(uPNnc冇K/3}*1鍠@VSёE)fm3uoך켱gb:474ɎC6@XJJuUzj+JPˁwd(pA/'қ]QX"Ζk{{/}1\)eix&ׇvs3\uY4\qPTU׫KKT@>v1].;' 'Jdp73 300 X-'kA'w28U#A8 0%BґBiq@h`Qjv8G:(Z^m0\THvK,.$ Sd62j /x;Yendstream endobj 215 0 obj << /Filter /FlateDecode /Length 3286 >> stream x[Y~o߶} @bsxwΌĵ wurGW)x=`6 voןdKqbp^+)/ˣכ]O !|7Oby9VXc֭%R괓vɧB﫤&W@I-b0=/Nc Mt)4{p7}5],חLtZa1Iea^( GYK$Hc7ê`2/$?sQ/4JR#״Y,zz;I ?nvn1u=0xlC~x8_=,;^Nв˔]\?H '΄qNBunRݢY̸IilG4v,2c,X {"ҕ].rg@Ws7JQ7KNB W"g ڻAph o]"o7rz[hБVQFCzk[!gb&%f)l^/o˛zjs{ԅRgl8ɳ&>{R$qF,GՔ{ހGizBXp@ b?hV-ܰ7BBr}& փv7֋Y^OrXҲuKt6v,}XW],ovMMrs;[-m6='2d Z[Pޘ5R,:G TW$7u#CZ횈cMl[#Y=NtY: Qgs"B69wE廄НS?f0]#ܽ-5a˖vvPwbssl:1?QwmMv}"-z=%Xm?ֶ{[{EIiW1:ф:$?W t=K=q#Zv^RdϊL3FK'rێx۪V$WmZ^LJޜUNTtѝ}vgܶh_>oFZڡ{ËcK[[IA?xNٞR хpw ּmhT9bA7ոrj+95Uľ> stream x[[o#Gv~Wmfu|[Nj& dCc3c!=ߩ[W5lc0PX]uܾTKn~ߞߟun~kU/s~}vvg\zzw>86BJٳq7UɱW}u$<{#%=;}RS3u ^,n[z,mBppzyP Cl qIF#l}}~dKX#iTof\ps^v3k/]'|wi\/-^ n9NֆZ<0Vx@ ̌ӢW-A]潎-p#f%O kzBqF.Uhd!GQqr!Lr{A ^RȩehѦ6vb^0Wa*1)QK-ޒs޳ \,'-Q='&+HsZzFKwSa!LzXnв60"`bY {]Q^ͤn^g]Lx~h>C. Oi(}`Ix* CI꨺CFoz0iZy,UrdSSJ%7w yS,/:F4蹣(ۏ 28wS -M/eBUr J-u,Ы4$E804lFEI-TtlFF- ސ*ZBAVJ<>3 ȏu.ɐ5 ]>[P74nh9tܬwz.6sA~jDڬ0h@3~LSSH땘dl؈!t .X dCΧ\Pp2Z8d8ᐔcš) 4CEpAFþXgz8B@tDoPxdKCs:sL2>> HL&5< I.7z(!dv֭򰺞_/簿wtMyOk߾)NWր4 ȗ[2Ies4E`6CDjcOab5Ғ4W8k Zh!ư<Ԃҥdoń)aDh!_+TqڰVN9i*%[cf10$*74 }ZkTap1g| uJ>bVQoj;!G{^Y;4'.XJ-M/BVRKkrV+fh8El7@hsS@{FG5@nYQ˨&\ T(W?jȄW$`PӀUPj ǬZ!@ [㓿OLz(֤ 4k%f OXda <qPțe (̛M@X&<݀&K8] [ޖ'WO22O'a4(*k)ZQ-V_L\RR1w: Ĝ`Kl+8e}q*%VU9.|q`㬿IŃGK=#;͊z`P(_eRǝWh`2o j4&;B3Z~.ð%Ӯ[5dau8K^$IGgw͎8?w@D8 y/U*'+?8xz<9OYPvW:JBq({:DTCbJ\*㿾┸ݩSyX>9Ip.D [SCFb=_?|)LI89,K{&6kٯj/__Z 낈iجJф'^aa8ǷՇr4!\w 9*L؃FnE(g0dUUX\rKT]ts;l92u8?ԶmkMe9=Ix]>km<,A=qǓB \qQ1֌Bl^.DSjG]|3* HԱO2"q?<8Oqe7,Q%HZ[qyov,M:]m-PI::@K;0{ܑ" Yo6[ZE_~ @4^=ׇco͢C~ 5 tuzr__]nkZ<7fj[ h_j>n -pi\|L#ve嫘(hɠ 8ZbpoF(EI~xߪ<1#l$%2 $ڛi$oj.?O3<1#jfjm}  \ᆢ~Ja`m+K ݤzFQ$lYMi8ջyF<9MR!/ 0fS:Hc>K9 3V!Z̾?,I< ~on[[ޑ`ĴC3@3[qo!,G_ZR{5NsӾ18PMNi XX8Xz*]N8PsnM 2vme?aZxABFy|3Ɋ!w>.m(nShSZzB=|f:n~O6@?s@j@r+h-~D RqTCng2e׵Ժߍ,_T>#U}Ez#wٽ+QUAQL-qyQxZ LX:ʍoԱ,Ȍ˲DXRiW( ''pl/1@om7+EŨ'} H{̌cރn'Gly F][.TH~rr;?-f<6{)SBxO(@~'6ǰ ۛ 3$> ٞљ);:D '˕=C x0puMMLH,chtY9b1Yvc{k|eTX],!H9QvDk i 3(,avOyݜdsi۟gj3Ol#'$[>ୗ晴OAa]t(LǚaDb[yt{^&[6$9 [U4҅cfV{5ʞJP>+Ҟ:׈ 4ѭ4鎞&fJ;FG6hMNqx=tQ&?ȞwS!EwSV 1iSepq‘~0׫h}]ѧ( Ӑ0fw5&c,FCJLVhVY5}ސ'Nz8O{yfV&y^4u@{:CJ Wf^Lxnv)Ӵ[vvO:e"]zTubr-<<^ %Ɖ]˖(~NoJ:]ВO1~oȵH\PDa{ا^ _]]ǿ ,<~tD]xfz `;0]F_ge:(WC<exjxV}x1)x07gH~PPWJP[q#P[ny]e}bRU|$Zw۷ݖ6㻖B^U0E 6-'z<,qdҷorPAڦ,~6<\ΓVly$endstream endobj 217 0 obj << /Type /XRef /Length 190 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 218 /ID [<16da198692482be2af422fbc50547f57><1d946f27c867f6afc61c9b753a7f0d29>] >> stream xcb&F~0 $8Jh?U@6G7(U<M#ϠSs 2DJ@Q D2A"@$!"YoI8;X5Xv+ Is lcT| "9xd d6D؂[@n0lv>){[ endstream endobj startxref 150360 %%EOF surveillance/inst/doc/hhh4.pdf0000644000176200001440000101775414030612522016030 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4835 /Filter /FlateDecode /N 83 /First 690 >> stream x\[s۶~?oM ;{:ۉ&Nmm6wdɕ4?x$ӉzrFIºcQL1cf: Y"aqeXD(KP!LDO0a˄d"n2 ͤV:1h4 eJO$LY aR0m4, 1FTH0"Qb0#hˌ-13&13Y`jLMXR,V,@(&%"R?D藰Ġ@$Ђ%hJaK Bec10xƀ<:B-LXJEҀH!:! ĈYK؂F-)@B۸2ZI'-AE`9M,J  8IdbR 'x+% :, [ 1Fb.m`c@1,$EAX ?YN"eq=eͪl u _2:f@<]7{t_ޤ4e'jلe;ξfxtE>E"C5W"ϡ) Wy7/K/?+t{}k,#4,e'|Y,/]@t8[]7,أjri:y^L1O777_f6|'dfW6Ϯ=l/tjV,Wܧ& 9c^a~YbRP)-)pM,]|[RozTKZl^n#'%%BI ;|.1D5)xx2 cT(cscTk"]>ӽMP$ b-`p*F?͖s ?R[2,+<Ѳ/k~%#_, V|I%˞ٲYcXOS1[MkPXQɬK)% $~K ?O ~_Wo ?gw=SYZ i/%O3MOl-%ϸÅI7AgIno,y"uMgE0.mZ{>{TIf#_h"79k_\GeFSJ c̰zѬQ6u)O )I#*P3[¤]\x 4>Xη'o?XS 2XS3֚V\)TݞU9>mZx8~q^y^XFmd䖗y^IJd˜x=nyT!gV"fdR #7R딤iy^z\K]+  }4Zt48981RTd[(5"򴂘ظ7 'g"]mDrV4fծƧ*$y2pk))%dq'%o{ bqQh8{_>׮6I6|%1h+: ҷӽ7{dp 9'!OCpu}7MoCZT)BӰwXX1ZԕA}#`CڪSu|kv îwu>4X D*|pe@im =0 lR *}0uAzt=f97X+Sv~XFoޝ>qZܜ}O"|r]FmE= cU ƶOk\]}b)DyHxaYKk~H/33M{YiM"Jͱ@px=nш=}) 1rD~-i;p3;6cmwDFQvV9FFǍ. YQ 7Y CmLt?%Uk6P6'xB&}LEO 6I sB:yxzY.ӻr."u>&)ZSj]dpiz{1I|:B)jȯt;@ad U+%pVGJ;네rUFH9hL>aOO*єNvu2rYsYX| SM]M{6Ȋn_|6:Kw06U[pm4)Fm}m~Ҡ<TjojxߪH%I!Wқ_?rmK~ۿY2ʛ9滵`+PoIJoB(@%P!=Q@ PjcT0dT&D3& R2'TeeF{P)Rj! M~,RP&ALA'՜iP nHE"Т"փ4>KF]2*0/}R&1$R4 Bwx!H%RZH/_Yp^>xA\XFEnBµW2zlI tP7O.Q߸?hcgCۄ /WAlm-?jth{,RBS(fS i $fۋl(pܞsh ˜֒H%I//`w m-^//vY$E1j;Pc&dϋůg=;m'mܱkb^߳_6t>/aї8._PQb# ׬y"2-%qR|[$.eӬ/1wehPaǺH9,wYW>E^>5q-0#c~lx7NyJj B3Ae¯R}WrD`0'z]bƠfT6Igq^Bn=3>kdUaB۞W|8vZMX`w?e\ew벇J? 2` R )EZkvԖĺ62eXc)ƽ͎U 2QH"ޭݡbum6W ^p}e%jP^RwRk&Eunrkm|Kan  4S(]緀#5sy#P4.B~O7rJMeGfUCK/5fBUU~P X+?Fҕ25=輢w9y^fe9U~L3gR8~v4  $)cJWWbzWS:~vh7ikmj3/rnEۙ KMG'z "['IA?t3c4QX?_endstream endobj 85 0 obj << /Subtype /XML /Type /Metadata /Length 1711 >> stream GPL Ghostscript 9.26 2021-03-30T14:10:57+02:00 2021-03-30T14:10:57+02:00 LaTeX with hyperref package 'hhh4': An endemic-epidemic modelling framework for infectious disease countsMichaela Paul and Sebastian MeyerR package 'surveillance' endstream endobj 86 0 obj << /Type /ObjStm /Length 3016 /Filter /FlateDecode /N 83 /First 750 >> stream x[[o7~_.( 8Nv[oS;ET{u$CRi~EJ$[a( x琉I#MbF$ ƱXJ%͔5%TL+7%tTh#3ʣM. LTOf*|cV{..lEd%9>!(\(f\J3G Fǜ\Y P@ #J\Α(i\rx%+QZ1O_(:*m#cP,HO X %>UXIՀ>7,$%X5hƳ$}XLh]&o,G0mx, lV CYp0F(+ }W#Eڀ7Ns؎1"fH刽Ԓ2cߎrEKuhbi*/"&ВyT"6֙$hg#.?lZ3}$;[[ZWZ_PXT 8V PtdP/0D8ȏ/^L3j9>z~Ldy&^VVqmE_W^V I]xw<,W}y|%E<]/k&~Ύft\,H'Z-/|A.~R‹w܄@5@^Im&zN-;$_g8~SMߔT" oRhpr3^"042LG'o]MnG =Ct:OW۟a<0OZDm$!'_=~+ZBJ([}  nMJ kٺo}sOD_t8T 7⭘*q+eX żBQ;zQ@:VLաDZw~J=|iGiXHԮ&~|[d;[sտ冾]NӃÚ-iGXmWX'b"JyO᭸e-7$+{WOmEIo~( Mv(uz(u}9ji9}'OJ ёhس%CCоܥR+SF{Id0uөцh5t?6='_߶җ ߻%j ~hqs if(i=~je fi$[<sjX@Ȩn^.ld|"^w;lOs0Q}?RG)ǻ jQRy뢟 aw@HyB:6{wsC=Ϫ ꉘG:)C7yOg6r`b*T.~P @xs\ X 0qAD ׂ n6kPqI˭YBZITH9TqgT ͵f>SspC7#Z3kKqQ,:R=j]Јρ  crA h+6%SSjCE'@*:N+vd4U zɩei'D1kA2vf'OI UK7d jgyߗpPҲ+CZA(Ģ'HDteaPd 0;R*XtZL,Nh#fS ؋I\$P*N\\,R)5&j?LwW ڎQWڪ$=Hat>eU8u[FM1ţ6PݖLϔLlPЦ} =PPZ.hj((TDVnTPժgŭfXkgSݨChFܕ CnqՈ3 a4ИApBɁh?dЪ&hDlAl HD<ZʷRr\6CU%&"t@(C!B>5RSK8uФvPoRTNjSĚ/g|6w(P{GM{Dݷtfzi9Jې iCD#QxLay}sn (k[яe :y@mehR(E>?xݖR6tK@22q'bB!-&LDú-W *S.aCdXckZLA.' lwg`ncچ%eClj$!Azh6XX̸Gaݵ|pی) d5BtWQZL:/OϏ{?vfkWTs|4tPż~ñ3V&@[߳@ja)٤Ҧ4TR"cvPQj5dv58%<{VmSB*K[QўӞ〢kPe씥h ݉)y:^WsunuLGp<&n 1ڏ;(  endstream endobj 170 0 obj << /Type /ObjStm /Length 2996 /Filter /FlateDecode /N 81 /First 737 >> stream xZr}_1v2}J]K;#Vz)HF"U$%_ӃHI"e te%LzŤ5h5S ӞZLhsŅ끅g|dRdE/x$ඳtK3:ÔBB x>ihQ0,:GtcF53*BV4pdtxIPc`V'2Krj B2kYShf=+aeN`rJ@ H挤q"s$]a.<.%sYIżZ8z4+EW,FSgǼ%|, VH,£ +XxE0 ,jI#adL):,z1혔p1H*4t,ЎJzj3h!@]& MZg dH+)2@F:""aVa'??IJŤb?*R+peqy60\äl+-Et}VE^lV|a>~]V=:})8 vWc xL4Co-Fi.\y5O2hHR.pJ&)`ŷ|\O͋jEZ\fE쥪qz\tvv1Ϋr:6f֝x,UNs zvv pnb2"Llc|rFI5I=U99]5_jn:۞7K>+`g1[c~4C 5v=;WH~2LȏC7mhZ$8jȠ LFqLa<g1`$GXp}P̧Q1gÊPL yfnaAz^ّ&0~?/hbQ+*"WbI_-(W=</˫g'Io_({:Meoɓɼ\\Hkgs>CЭ Yy^Mg)T̲ËOUR&T--dr.Knc, YYtf(url.K#Q[_qG#2">؜VS({(MIvIv}~deW6βI6γY6Ϫ"8! #(8?3ɝvģ^9.f],9Ϫ|\LNAWy|8Vٟȗmd]3[JV8owM9fp 9żǔo9[\K朣,h9&eצ{gP/mU'A6}{ӟK-]D @G4*Mf;l/{gMyv'vj-9!V_ k=;MWqiU ـn>6,8 mVPdHUsS`Jg nABrk6ϫY&ͅ&͐Hs^O!-6ah;\/xnBq̈́LfB)G&ʷ7 nZx/!:Ğip&r/ W[T~*9U4bxZTߒ<ꖮPod˓PW≮a=L:"ڝ/9/⤪fᬏ}hG̊ H\H7{}ii9&SM~7z!IR᭑.;(F15T +̰ rW.e[Sܡ2Lʭ`+V; (.t@ETVAA.g,4%LSAuwh \mM+ZJķּRV VJWmwI bupg:mJIz˼{0Yާj~r!jؐ[xwP +F CD.-Ӑu)NE0)8pnXT_^|#}@E/#(Bb}vk J{俔Yi/˭X7H}R ePInZo ~X՞$-"^{w)#mX X{vcp۽U5 n͘!I6oouo{ ]NC.}um0˟t7T( '` @;:,ׁ H S+Ⱥzڬׁkf*D7#jtہ2И>@h2lTw#xT5H)9Wv-(">}#vP畜u#u#u#u#u#u#u#hZٴiuӚ5>ݩ5KuC72P'P!c:;_IWm-$#-@5 > stream x\Ysq~Gl7LiFh}R A+pAꬬ<<?dw'~:;߼igl WoS.N]Jfxƙ`xnXkEcn=.W@W`/_㇮pJ +0mqL:k_Jjl qV1xqaL.q]/q:8:=!o|-r\]@$6hXq8wiJָܭDI05nbd ֘M? S&Gp@㽞ׂtL; k8՜E=;Y[٩TX}{uiYuY5S'~T.G Rkr[QQ~SMdRH׌J?]qE}v`ZȞ@z1:)l6u%=y11ƄrlR Lg_D4?oьMޣ5{g5 =ny5e%;A¼ckͽQyB!sj]KF`lfɘ "~S?\wa򾚄Nt`wܖɕXYOU;}`X_Ki)PRKc49:x~ۍkχYBU\(j]ESЄ>V05:Pk;M *QFA8$Z~I禎A&M@dvDb6HTo{ ΤhNx0(? Hwt<ȅEpƏPF`[34 !eyV'D2P+6;6%8KHG/yuuV:8 }s․RS+ 4vWJ}]թ&H*M_K| 3Yp2U?^Hvkd뤶#"W g+Nt%Ɠ$(Zt r?u+[6ض;W-CtײEF{ 6~9Ck+$ h!8Ab<_Ax^ dKwXy_z8C)s+W,j!Uqu;D-z݄u:ՊgeLDY$';w 7I϶TS,?e~ϥ\YLf t= PYͪurRY2<2 \m4t'gy+V0My$sbUf,q4Ϳ_8ya hbE*IH.d %sI)npqhN/8%067!-Q8D!Z鹹wht53uӆSVJ}Ͱ -``3R} b[3zcjEo+p6|wGMmO VWL9@Nss)3ȩA0umV*d>ol/ Wʅ|@9ûPGe/.f4&:fhhCKCL׭ͺSw}!+i἟NC.ؔrt8RPӲ U1vKF\@^CAK&m'!ǝmBfJar ЍQyh@ ?eLx±eB?*]2Mj:PzRbkgvDJ|$^c g ImQjz9Y"H;FQ8>+m<LAj$lGFPKXIbem&eYB4Ɂn\B2-%c!,싶w5%g7hL5%w m}ݯ800 vnXԓ?Ic 64@3TB֩^\NЈԫ`_ N2Ef@T<1Lcng w=F-z׸lŤ0G-"LX٪J r3O@%ȁp2eN|2R^F2*.T}L`.PHʨ 0|M Idx++Dm!u0>>v,}Мe%,5z`ˇ#o0~$(%iyt7ʨ+ 1nKϣya-|g PPX|LJriy;DnIOȷ[{(qXN); cPrI1Ïӕu3 /|Ck;*b)L} ?.}nո^7 wu hsM鲉rj9=+d(b +s, k"ċKK{PdrNm1q7A_h+?/kt5ƾuR3džRIRScfhzO<\iȬZ*";)#Ad^1Zn0)n/FiW[A(;M[)QlYMp"m2)P\ %E)!aȅyA*t s˻dlgRAUn8Y{sD2My+pSκ*>f*b4}7kf=y`_כ$n 'b fMNY )Z[8'B-<`SY@l +P} ^@Wlc6cSZǓ+ƀ;F,55}rc6sŴSxٝVm9VC8 &lYkO_t:IP"4A!RN8*S^[G Vʓ%f0e ?*x+kd dPtޓ!6:1j+ZS'vBE0^.t` X/ي݂Ue|v( x!7V~jlWϝ._4DSxc}eʑ.>[7;]?-7 P uR9QUmCA~6^1bj8c-HH{*ΈoM̀EkE"[ٽ!Cb!z3d7_pϼs59e-BB.,jgC a_z2^_ȩV#pzs{!R$)ZX = SDwcݡS@ƔjC>Y nkEl,-bENCYY9 d*[ҐKdmw†Qe;'gm~e>4^*cdJ;IRqa.lu5=P赟;8;8s/UJ|y%tfvդJU-h.3 %6?!0uxVėJOUrE5K7-0Ʈ6._| sc1Ռs >ȁکn r2YmȃJHN{_Xr̮ y抿aMXL*;.rbcd Z[$KDc6o+xX)Ug\2{wLxFY%o{S=G!)VZBOu¶HOGif/|j`*qs^%*Ŗw-] ?oEu^ۈRs_j,x;ٜW5<4u2]0ڐ6}BoN -+=/3Vzi~ꐄ'?y@'Hz Y{ J=aϝX9 s΢mdEhv`)_0 Bﵴ_pO +7e=9+>8Uod.qRό^ а*?I@Adw`ʶ<,Y޶Kl,bpk?>>> stream x\IsGv1G\\J/pgF1# % $5oɪ|ՀD[dpf}_{o?}8Sg?izs/x2g3~UkdyTnٷBF)rf\6i]Xog}N sJaڿ:kt}1Ak EfEik;A*2s~zvdpn[<(4-[b\\u6X)txM м=4Ki/.f&u]agœ7ӏYcཔ|2v.YxhWB)/FpkO.icr)Y\光FQ忟Yg50U]؍XB / qԋe mgRџ9[^ "*1zrHܞ9A'}a(?*PHl0W6@xK#vIFv4.{HaYAajl"Y< W &)`^^:IM^-Gnh-\{F]'װb#^Ng&z Vnz @f#vf' y7ڔDe!zIB9ed퐌}[4_]@AsAx6Qwչj*(Ԅm´a <[J7u+l cd/ognaz[+A4ܭj4yMmRb 8uգT"´axJ}3IPufshҺc^X׳nzwuV>)VhꑗˌV % 27GԳ[vZd6$)z-a2DX &# > <#ss 0c&TĨ<ְ&_Do(b]'dVnphtKشW_Y̷wMktPUSu ̄mPM{_u"w1[i0ALUsE'([eA:֧2iQ$tػ:c*-遜oΙTc?KkօrsT@xB'؟B*ZCJM^8;GG**yBԎ.0)UieVrKPL 0D<Ԧy70 jut TUr0n=j=~@*4vdP:vΟWG &XK N$yYOT"F̛QDdY":DAh))L>> _e]}4XֶZ^RGW! O5(p+\oIݻiV80Ь"tۭzj69Xrqwj͜"fQky D2!HL+ܪWkC.:${;+p~;gjsH,ڭncem(o^-!aU덄vfkoWׄE'ֿT [h{] 4zK wܨ`Q++>ǩ6)Wz fFMn\=6M|Kw?,Lī-&!B3x2ct oU$(0_;QibIWޡT*dt[VKRqkvgCaPd:/BbUy7l:ܫwhغш:}^- ' bd>%׻D]= t,Z\ -EZ޾TH\!%(ohpNF}7 Wlmn%0{,&{XTj+\7zO\f3ɲCI}Tl㪱GT=))emMR5f k.Nd3ɓ>@εq؇ GY 8{Mc!`_v|r^2 3]^s3H|#m}-9ŢA( '>>i+~X7 wkeǾ6ɧ7UzaMKܓb65M݂ʍ}$aK@=83x .28ƗB׏I@l0~6"(w;fOy l"?{삗)O䣨 q-uT(Q`:*JA~ӯxn$Y',4nJ5֐>1bF[7A$/ `m||wca{7N_!'XtwqN-]_Ԉ+/ ^M=/Cz< (t<%KUZ6bs {:YY˪P5j]5~*'zƺeW |4]o(zd^0Tl:M9ȭO1l &X|YnI<ݚOIh훪]QESot+M"ڦ>El1ߖ?kY'kSv;N7ʻU]Yf,qNr&vp}]B09GBJ5UJl(oEk4+AyA 8sTQ{TIkiu=?9BhaSWvZTd1w E.-=4wc6KKSAf-ҲT,6֦Y܎KlƧF$tpX~&q%(l- d.Y81jE8XzI/>$Fbu-Y:K+(%P\lfDMnt f(a6^FuB}S c铄ɝm6Wj?m8ߏƮy`4؏wͧn/@?2YuwK9gp,-Q=8o 󎎍!,> stream x]oq3m qĆGtqH)YKTuLWuRԝꚹwb;Wŗ_+{p k2vղs@K)waΙI駠 3Ivϗ_.ғ71he zyW/?R/OX/OC tn^*]|S/m}p]O$#{]/!x)]pW!3 RɔCz 夅]z~ 8kctyEX!QoxWu|[/h.TN<_.4 T/2Vmy7] .^PݖvDg*Jkݑ.hd' ̘6 n+7rP-hrYe6PAEvj?i?cw  O+ UȰsO:lBA)ovPJ޿[9^:>_~s_gvw>Nk7 Zz9GtC^Mʝ7A'7itt]g+WгL&bTۄdwa:~B n \n PQ]7C- 8ROJxx9ޭC-tOjX=TޝC-PJnju/nj'VܡH)W&&-)w'mP `twS# J;B8AuBc@F»6wp h-'kFʻ6wRH#lyPVܡJ oz@ [Ys Z*Fh$0΍-ZvbfYVLh$F4;,1݈BJ H4CJJ;B9I%5l;isZ(mOqD[Ys Zl#&S*F܁Jtq7jr'nj̔~@ [YsZ#p2|# J;B(۲y R6ޝC-ӪS*VܡJ"d<׌(w+mP gLZg%G»6wp`qjC xnj$p:C= έ-+AZ-?z ܊;RY\r+I=nj DB>-Z3f@[isZ'&ʦ!P(Vܡ BIgNFOB˚[2brnDh ME؅r@[asZ*%kZ*#qD![isZ*%;5>qG܈RywTf3׎(w+mP ,Tpt»6wpZmVxRywN6IG [YsZ#H-t dBxN'<HQVܡIȲnڀ#JI;B(k*V܂¦DZǤ#J܈;B#4a:T֭C-z@z̀P9wJ#x\ʻ6wR:X09j)w+mP lj"eO*V܂yRɆpvk@ aȴ݈BXTV2imFʻ6wRRT0Ʉ F/ܯҍ6o>pT0T 8t0PJ`{XL]rO}N']׺7Q4RZe-یy{[G_jDkD/NL91$p?e'3oQ*xDC// G Lql]~}wH[ªlү*p@L_ KHHDީMyhBؼPCf9d (^CƐux"|U#v[/̛fn̶@BUD6v8^ul,z-5Y 4[x`Sse(lG̠M_T}H4XTls$daM- 8R2ƶ|54NóEY<*d"_~@>ځ% 4xN|lr)zVB{FB1FbLaLdl6 9E^]"}: 7 .H'Q뚏 Fa IdE̦T˜X́8KR%,G:YDS΋^X!:O v vP̌U*(lpGc7 ZlJīB0$u[ߌ]9NR˫Xw5L03`EMtj-?XMX=>vKʟC16C65E PKaϩب aR%qƹ"#nɴow,}+9z7GC< 3y/^`=C xv TMjH' <2dncK۸,LՔռ."YDa̟q1w𩞮Y;>A"; iV;řYY^XF)s V̲mԔ}C wֈ\`I5/;fSvɰQ Yg "nΨ¦~4FEf-m 9hH6{z7eqlO,roB"-ED?]BEVFA0H,1-9Mbզ.eUD=:Dxiy.ڍ r$QuNʓiy܌Dq,2)ΔVJiM',$j l:| BVm܇3#d8Nvd f?^y)MфI_++G0};OWpWyk0,A,k`QUș~x yܙmH)i=A_!Xw[4g[/^I* {+eOs,Hٱ2$>bRFE~5Lziv%Wr)KPbޟޘlu:O/f ιgvB6h{&0bsM)`9X֍g Zg2=#dR$lb AW]lxjFwDԆػZO KE#1>S<'xEf,aWMWFD75"44Kpb!ƠeDbǽMٶ uX6g,dNhpS1hˏ"h]'%n>;gVq?Cl NӬs n+dBLo H&*҂ǷRZ:vOV 7gA$hsĶc!s~j c{䜣hm^alQa9eC!׃ګ @ _7NGxHG^w@x;]좌OE}PzZȗn: NJXZS&rt^'E;X-;WS!usZmظAZ;zeq0i*Ԃt}z9sO~IO~0M%!Ѱuy0 UshZ0%neyM>I AMVG\Q lt,Y 5>_7ۋ@><.?V>:}xㇽVco}6儽ٰ)g|9x5mJMz GbR<]R ~dIƳnqC4?a|@uksx^а$'a\{q,!:*cI '^nLFc#:!k(?9lMXOQc}OLQR,,}_}yo;Y|֗'0a0nKuw {G6N-&9Lޖ--XJ[YM2HYcN S2b3~Zxendstream endobj 255 0 obj << /Filter /FlateDecode /Length 5402 >> stream x\Ko$7A?7su1v gF{j/g.daFd03Rjk4#_|$ñʿg7G7eG#z\99Zh5}Q\c)}T8ޭ֢WB7+` V=V %}0Rt$z#wыA)@읶Ѥ"l;3Jcuj-C*O_^{~ћZ^ؠ4UF0u4 .t+z#R jC6F(40;Z'2jv'->rRPVh-W7.jl` A.m>u>݌CVQ֍6(c-A^UoWʃdΒhJkGļ4]ΜtcS6֣z>T@bJ$z,%IF;L:bPFQuנ>JV{ Zɂ:#^KBrxأIgA6Twi ߝ9\'9D Y^]xOK1ڀkjޮiՒ|& r: ^Q[A yd@f6)8L"0{f8w9QxRe u ?{؟aHÊۺjZm0-*"In JEqF^K3 3/{焚̋y*`t{lK݋OF)tqQvVwΪ1'@%wL*ƅ.+QX'@% 4*^ۂ3Q ^N%-m<Ƃ+`JaU 6r։+&9zPUw-"1Q%B*"A1v< oG䷫RBh.VN4a[L^"8d1I}SWHd'j*p[oJPǛj(8FSY%!8HӁAc-X5CEeJ liM$Zh{I<!$ -_ZHZf lbfn)<|:y<58x$t^ ~?pR QLBjh> Nz_?(,GVHlrL@ZD› 221hϓ/ @2I =D`;»?1GkuBn !;!3Lb D:@i$,3tI q;R{Sx^!R ms-y$++3aO8{b(5&oxA ЋVB m-Z0(aQKZQ2zF3t]]#aǔ c 6C\FX2iVsd$<@\K9ޭ0;i*<6a k31RN ^]|>V]I2/^k#BJ4Lns)\ %oA<_ /j2/^ f.YQDx23}H0 3kK<@qL ϶."FY,詡 ek QH!CD0ML,˖:ʺMeV$kfKemQƸR玸Ǡ8Uj{4.ڡmbAP`*hg#F׀fBr[e"xND}q _*HfgFh9E ):} alƍv{@͡O^(X6:Kd#4#@0dFee`P^C l{.z*~Ӡ;Jm!49!R$R[֎EsTJmRJ rܬ"<"吝 ~[#bk XHhwR[`QѶ$,srb./mKQݶw=WS&RQ&NCv78h[΍-#M)BS -T`_F3{aa! m8_tK+\.HkF%S oklmd= Xv ƹ=BT9pd~n8jv~NeZ\t8ԦrZn 5E'k wvS;?$GE]>-a d1/o cmn0fS$+YimGz =!ƩM/yut/;ZK_oV7YZ}BVI{X-ҙ(twޏ[g8c BD@[\7_Нj߷+_I(L>^os&uA 0 -Nj'פ۫T/^X;H-ߧjEo[fJa.9I;hIy?&c<fD37l Tx˓go2G||@JyXF*?-QĔ&(cX(ZL祼qZ[{6568Ǟ0,Khsp^@:]K А1-ŽvODb1 "R|"x dlmPm"r6t/@3 SvL~n2|]YRܐT;; KNS9Cx86 Y4>4a5S^F̉-=a+Oי ^j)ًYveclB'R\P䖍 iǪ,Yv ~)wx[x϶ݲlLr0YXUq\',a~ ,2~(ߚotvx[HqRo*k{65ǚ%˒ګ  L"sq gڽFIH}XcD6{<7 :GM5MXՁOĜ2{Rg4rӲw> kTR&.t1f`'ܷv94>^wFx_0#bX|Im;*I1 ŋZ|$z!H[ U;h6Phc-+m!ض;0*231xcd{3C݈_nu77ν)!&~-Y;1Β75ۖ :Ps_=\  l<޳K߰R8JA.ďHV ٙǚTh^nTZD;޲Ct$sE^nH{ #OTmEbn^> JbhIÅ[ H7t}Ȑ:wÐ5gVi{|rsXT~T$õ̅-rbl{:TlKwXwLgG7G毕P7Ƅ1Nv Q }>:dnDlbj~i~DHi7;{*94M&铕íjF45@ubBs5 eQϴa35ɣW%;?>^i4 v©qpstx/\^i< B/x>GWGZ>/EKzRus'*rQ3Jd}sbw5}iPů}?,kJ{/r-X_}`E2U@5Yӻ7pEsOG*4f ]Rlyϡ:s\m&x wxf{S$/qge,>R-^ԡ \`4ij4WJnkY)un~JM.C@S\y.,j$uicgm@> stream x[IsEW(b.݌}q a³`4\`Ԓ r[*_Vg[b 8Tgۿ+ʿO}#~.\}qO`8 9w|r~cL: چ7G_-\egbTC6ٰr卅⣺^m]׋>v^7 uyHi"zw]uqxz_my32x˫t7.^O9 i><:I\y̦ Ӻ|& ݮ{S? qn^u5/{\L1先fi]3S4_GV\^TO)%.7o]]ɭ򪓿U;uy/ONˡ._kX?,KEf i"3Ϻ i$1i#r]"ol7qg"}]G pw肙8\ۃU].3_n[h[|ҍUwSLFW1`!OWcZAR>O؄ZV.jx:t;MYum㣀1 .{lqUݥX Q{xMen0==0Dn]"  ;$+ =iE&_p`]0xhWqDY!=\Y @[ܣT_^se-/XsX bA5Z#!sk$/VdiA٪s^]3)8m$w‹/F+&psRJ!E=&)tiqT@B~{AR>r%spiaqTT5VBXpX@{[&$b_0֐EE]ոxHB4T˨[ATR`9ӡ֛8iY}k#*l0'dNUSeD3$Q%>dv78å@ɠH5PgM@4A>` f[`!A 2B6$y?b2|FJ^'7gsM8tH~Qo#F[Z [1R %Q}w'AkF=I&z*Jq[Rd!vm1lN`*Άş.)Q'R$Ġ-)4 ]n8Cl d` 4pll`-f1He*Ls)h?ߐF&Y#+lrZOk,s(*RnZxl*c[ӮF\C&W_9  Z2GHBf4 +(o=$ݦmI|v ̨r46 +@ H*#n#z*$6U(N.ELo]H$I"aY]a SmZ `Է+={Rߜ!Φ i'c>:( p 7b6gx~gOiElӐ}6xǪ ! WC<(2$ʦHsUh<ޞRZFE0†` rfxUG ?SB 0=aoįv )ߨ#%M!+i7uMh^#U K'^pq"7ހ jU Fiy_$QZ%2 o*Y(֋!Îcb: ^JYl?\l?g/^(WMWǂ䒦ȞGhM1y{U97q\ΏwS4J /+F 03TXwEbo,Ƥtd6VgCS|Wk :d75NtT ˉUӥcQʘ>)DMn%Ԁ2l|I͒nry8k TDZfWzā}j4-p GUL bbm{Y=?^s>^pߔgq* }­ QE+ʚk җ\k23t{p՛J.*g.7Q4,@,DX9Ŭ㼎 _L/ EXN-;':(2ۢsa;GAYSOqïeJ m+\CI*~  &FPf$DsGt'8DRT츫y,vP~Yw1..f.Lql(dS^䞝 v`&xmٰ -AJ0 J[Zʧ8-u:ղGGmjdi)k+hC.j8Am7@Ж`Â)'us>9xX`(5lgtհ^td4.8iS-(>5-o\m#V* p=q@1pSN'c}~4gyȈ4#/ds5$I KbK *kh?x _wy-1xbwYiȳ6S(3lY3ukɠ; e%Yo+~m{vĄ~bs{$P/; A`="|8azts|đR=. {O i憟de$X_cWV1[޾%3q'.4lC4 y Thne CȄ-k Ke\vD:)(G˞ -T;>}][t˦2uѭ+ H?,2t<z#jtsZs *ɵ}{ǵG2[$pdJ[q 3[li&p;DL%rO`8G?9ϐWue?[aŲ%AvSQ?*_ua L5 U")݀i@X‹,prz8ntp6{=Q&V4fH~ĘK %NOKHV% Χ Vhr@P p-lͯ RC=|&n>N0¡F2]rv>'-/Eۢ '""e|PMs)jX/d)%t#1)瀩G#S/7O+Nv&>3ŧdqq,`98!w5if>[7$@9%*v[H&}+z_IՄ! 1R҆wC\Yd2mNDg}:Ȗ(=+ثpbl$B2b4I4Xg;¹\A)sS+9%*C^>E6CGM8YFML3/n\1{OkA`dlSu~4ص^ș|N9Eʈ L6qHpj9ܾp0?Htendstream endobj 257 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8103 >> stream xXT׶8202j`]#vWޤC2 m@@ Q5ݘh&&&FޞLr_}߇{޽(@ ZnTa~x/-*Hzd[seB{56Ae r{ AKCF؏o0wIӦNk8+l[:_@M`=^FD;Sç7~}_M^^a{<Eeoץ!0u^aAE_cIҐ]B["btH5Qk=x~{=o })Lu6}̑f3yc:(~5ZOͥFQFj6CmR[qVj<@mvPKINj)5E-P;rj*rVRjjIfQ* ʆS)zPT/ʎJ S4P{>T_jBR} $wToJ#*x˷U;g.Ѣ(z,}oqwyr'Zg <0o{ybj^֛|>$`Hoj.84dnaK &nwA2Yi/ϳxxʑFMktンiv.%*cÿɢnCG;vR8<=>6H!TTE[UlH|C! c}K `[k^UFeeh-698->2p#!Ӵj>&e51P 9<4yۢi8E)@iWsnuI[_:zo^cݱl>:l!bXYmzCqֶ5fcdYp{MW@V#鬤/}T~=~ׯ~u6TY T6?NYlk^)3S G~,<M<?OD}ЀGbb'\C -ppѶpI\\a# lH]v91dsf"~z}b4 ;1 yKb0 GԎﻀ1zNB`AW.AIt d HLv.nq|"؉x&]6dZÌ6C"?{ƣʺ%wvHt][pB{9ϧs֭_=bxDlDJ.bQ8]{X `_g/ ė5`wઑ_g;Yy俲b9Dr[D2,2U]4Ai׉Se (x7Q [R$j2^ #H* =υ ـJG=%Bơ hB 7{"~螋.Ue6̄Q}vEl8$cf}TL<.cȟ^Sj/xг1!22:4GKCY#D~޿qu6Iݜ7}4eD: )K""?ϒQih,`RE]ƊdR%QV{*̚cz8D|C_%fYI|7[lѴikY$h )G_diADe ZWN|s w%cOfBGňFv}]=5n<t= %H.&r3Qf0n.95 2c؇(#䉶ŽXXX0̗Q@LFK-yMYՂu9.}/ K̗{feGߜƪQ -I-0Q&Ri*HLdҫHVn-JďCkœd= 3o[e#!8%@z#.%tKzǜVoQ0 |,~yK6W]q _ ǿ~q`?4 <mÝ}Hʌhh(Qb~4F(6Lh?^Hĵ44Z͵ܙ-4A@M ׅMhɢGBm*M"_bb^|-.zQ'~n>>F~,-B>EVfy Q۬FYS@c_ ^Np\dC_v-PN &qp"co |~]m|\0dUm]ɔ\yϷɎ[vCJ˞+eP3C63D1 I4" U mm{zy֮J՛U@ FЗ ]^FcdM+HI5Įז#Z)yY&!|/UZTc;+ZȰn=ykz*!PMȐfEi>-3gE>Mӵ8+ 6#^bާ63GdnNdW: 2bLPQMM'h뤱D`=ڀ)PxWr:l31L.SM ŧٳeg#0s; TՔvk$Ս9vsě:Crm=ayY% =PƤ9Xz=λz$RHlDOkP?Il==|v[t?P0KiF^3s .Гv3w 6Tsk1 y@l*LM̲UCIidQN s@lRB|?&gAQQ޲BJ {IҕOAn[fm'~7JkTA-*TZE`mcmeD|q!fӜH[oQ ZQXhzy`V,,Hk4مmXDQz`jXDs:F*f; e+n9Qv%J*w&po ^~[|*Û _頔ԖJ=uy憉0xˈ8A\I䣜JIHd/Px 䧓tBS "Ү1dIR`-""WNʸ->$g-E:i ~k /Ql3UQ҄@V9SYHixl4'Fh֓9>R LhidM-A` _?#rZ&8\MY 6)u\w]wH1M6(5(pmAhhM+#!.tyH1VyMSUͺOU_UWT&^ PF$`Ftd{(6g6< 9|LBԙ?5^JD"\9 Y*H"@ P'u 9T_QOGs7>bWho=%&תgY,~2+2ggPyoFd1RWT!֬=_{ܴ18s^ g4zEk5B1MWkȋ ^]PjtI&J4:]O|fQgSYe] m/B}ջT~]~N!΅t YDeh>`^aE"Th@PaS9Zyc!_d( -bgjM}Q_Qs`lD?.LAx%2`^z99BҴ)TU2E ܲzHও3e\ ^ƽh?ex"-ĭ;jm5◮OkR&Uv'o>=Dae;K,e)bM!w5\x1ܺ#Bj =i@&2t#5Z⻪!_QzŢ|* ߠւǨ<]!}ۛq'LSJbG=aZhLaҐCX^O:R1tUem$ O-yvp 39ΙFWjub*)42_ gO1[!<ʼnOcԺ\P̲Kg-8xJyYݤMepdN4ڼ,4:]IӞ>Hs{K"Wݛ'!9K:||%*O_bN9.qcf{9y暴(PTE냃#C>ߐ1wmxePPxxPPexmmee-SohqɈlcJ\,>hfQDr !#9AOpy-GB>sw$+,M#."B<2w% m"|PwBPPV(/ѺbQs5srˆڮyNb1Za֦eJv>7҉Vm= L({6qse񉄲G`k[>+)(fHuMr}hS(@~J8r p|Ԏ= Z3K!tj :X'F06;W1\c!Ga6W~bBÂ]okW\JQUrs-k兖 ;T.'Gsמ~lfB/mVTJ@QH)׌jqu.u `C-fTX kzޏLre Nw[ʾ/]unO_t {=/[5fzcwe} 7ޝ;o-$x Ǐp;Kՠ%dH LuIKMYEռi52@}hiEg=YbSzޫ>)н'Y~!֤7w8tJA0?:$6H()pU*SՐĈ;e,(5/rq]TrWҘf2~)D2*$@|G^ U`4Ļ$FsftY5v'pMuWMcyGdC"XYz}А6p4Gbh7})GX_mz~MA2#Ue$2yWJYN):4)|,.3+n>u;;[ ;?w MLkɘs:bOJ1Fx3N@%_wHl/)ߋGi@/CG#!%!MLB9zJy#3y=j KJKU+%[&8pݖPDX-D:$Gc/[x̥ӸwVC b?/C dC(ςyp\ Lf.KۣQ,yv+ұpKlAV'#W(Mʢ%ߏ;/o`?As + Y[2c@CUTxQF!%1 &gѓ`l>mnoׅ/~A x!3<Oǎx{ȩrVeIU&Ln*r*HS> Bo9-wRV‰",D"~H`Hxc77I E\B[Y,5uJm}Fj ׇ 5\B& }buf%Kpş6ɱjb*} ź=箒C,WPVQQ\wnK),U&rsU>.rA)Z+F&'\NvTNۈ&'E24#+O[̿MdӌчmGw .^4.ysF00S*#?\%+ٽ֏DS1H-ׄU';Lc%oO'ԝ?rܥHGn&h.MɸI!٫_lL > stream xcd`ab`dddw 641H3a!O/VY~'YyyX~#=C{*$fFʢdMCKKs#KԢ<Ē 'G!8?93RA&J_\/1X/(NSG<$C!(8,5E-?D/17UL=土[PZZZ`d{N|q00~Ksp跲ֿ^_n=E'l~];\| pBIOpb1y4^c|endstream endobj 259 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1565 >> stream xe}LgegT\uⵧ3{gE8xz@w˂./ *ª [X@;=4^=066&^z4}l2<'_RP4Muq1_1򫴼&D^9(O=K ]K}?74@jd'+0+(MB%Ϙ+igٲes-%/KoR@/)| l5U' mVp-\~\82\3y"P3{t\JzEz)x\?l( Hh NoqCy̩y * L~i%5mw|~|ؔT\..AE}xCN_fsv8/\;$yqXpmScBsh՝QK^yE掆YarZd+]\ؖ*'8ŷIɛLr@X\|I[?FOskяӽߚ_Mx% ?LV;b2|ec.Vρm%u7; %[aLU.1N#ŦLitCSjrxUW2xMOT{{;=ʦh%럾GL%TW; 'Kbl#|x|w㮛 *L>{iL lGEK֓>Dfttbҏf%OyɘcN0uAG XsjcΏu|ȿB}d%dc6*D&\?{ uu5yWIҏ p8rM7 '@IʚA?FQ2 /r]Vu6˻ }xiwA5g9g9yb/h3Y͊uf],{T@{ȑI Lf`y'OistBr8A2ه0LN޺ D A'H1'j=tȗ;>*$^fepAL; #؅G7x_ CO) (Y80zU9{> stream xX TS־!psE*B*juhpR:VQ+*BH@&%e$LaPu8TJ}u)jkz SoX+k=goo_ekCD"*Ooyn Ey~<{r\%= 1rm21Yg,%ddQjyHPu]-],nnK]WDCvGz+#dI;$Pv,Xr;WR񏈙#,WU"uc`L\u,R:蜁?dQ@lO<ő+eۢVGyKTw{ |gAC6Gl=oۼI gQ;ԋԻ;5HM6Q)ojBmާVRۨUCmPs5zZG-<ʓPc(Gj95r))GxJLM8jeO=GJbNR)S2yh;msmZ)tIc3F`0z.Iw;5fnywsry΅_Kݤlƅ]&?7~*0F0 tMm ܅ql *ͭ(<qu z,G ~~܅2;Vk=r_"8o#ߐ8fl |Mߛ!Ѽ/B'kgh@غWh,cNG]LWsXIkQDx4I4]m~ݽ_KX,%pM+}}'g 3ԙBm K6`Oa]yׯv+~Iqe=]^GC"('P-Nќs%.$GCkzfvx\2Zz&kOӔOtܡ|/\8sǚ׃lLmA RpCxƬ%J_ Uf弡OK ߁+,cI_;+h~]'v vbzMcf!\jd F\Tc@F`۩+`陶=|F5k|FKWxܸ [X @+~`>mAa\E-|d[uiIJh= ~CGZ^uK3p}D -VfXi?s\vL1]E.Ē׫7x/ 6Vx]E a);M00~S rồ5"4#2;ko n }砐Uryo,7#roT,OH_S, $ /ߑv'iˤ{Cvl*-2G#ƒI7UdpA[>BǬ>,RHRLLf' lm !\F[xNƶ>; M`:̾֫ LU]gefJH"ܴܟ .0M5:>"=OMD'ɐ 4 K1Kza-]8P'H號u~dojs>b@*SCe:~ka 1tE]U5/Y]!z{r2tw31ݜ48:~h p$Jre"| w;b/D/ !=rF5py=0$RN zr&V5svZz}ҙ>1_hpή%+P{s9ǐ1STe.,dy[ܴ*TPNEog 9ƍ,fB3|ZPk2 ^U':`ld vd7VYO7߸ֱɛ3;AIbQ&N̉B[VLi?F0AMÆzTs$;dZ]L;-\q&E3Ԓ@.Mg<91 YN;Ǿ9Ugaeݗ/@Dʋ^hvBIib5.pBiĞSO( 8\.@r}s[tuOϔ^n.APkjwrDS6lVG'o7|+|K+3ZcAM= Pko\mnkp40Z~6(_6IDJeQt߻=,bӌnZEЂO'nٲkL KQ_\;87^t/ws)f :u;l fs*:Cަx#/-ΏI?ʚƚ)>ڐ|BqpN^|8^؈I(L^REO`{u \Rabi( LD(,LCQC WO*jiuE1`-5}G/›pVo[*Q5a|7vfXg7sL?7N=7~ a۪M8͛=؞kFplxa'M$Rj(#QVaL8>R,iY{e̅0KnT5н%݇.a; _F;܅xz>l,6V&3Pq=x4ƌ/Uh!u׻d> 9'UJmUؠe!;aJFd,skA'KcQW1GU%\G}|.'8C7$K'H4.us/}fݗ1DJj(X)9<Ï% 8,"w862ҭ 0WKHRwkh9^u+=^[Ϋe|^^a1 ebD%_[p9?3iA߅vѿ+ԏ=?E%\csC @W&f'(UeWZ\w Y#2h[oł'> stream xX{tSe?!4#d T" (+Sʻ'mHiy4N;u?t$'η~8p86Fڼli2f6yl8W- yxdúWt‡\:0g(:?AI.#mH dԜ —ZbQK _,%Ƌ#sR3sDQrNARsryF./^"`Q\#9;YD5>39A(ɔ$#%I21AzFi,;'wsy|BAbdRTΌū bEsAg9(eEsQ!(v)ԴV5 |tcؼCDXd,<@K/;[ b0RZtv ުFwCa˿oQŔ-R@O,&R{SWNlZa/U3&FU䫨q9)NwH Y_f}1Ay:DSxܛ2,fgPz?lr(OƉsA] `*UސEZvˡF4 MW7l8{]Ne鰹- Ax_:3k' 4q+ qmgv;oȏ4$?E'H:d⶟Dˁmqsӣh񮽱 .pOF ,eVKc2۫c@&l.bWs.P4⠴n :Zvk+\94F@|5 z,C:`Y<6KM#bТ񨘨egZc, N÷ڡG o{v8ʪ.4Yؘp<h?NDKiPlWB #pu}M ;Ch,.b/5]61N NnKH%Xm`!=*0bPY*u ,^F-!F \`2HجeԛH X*(-c9?w5ypZ:pyh|Yj*6ff$ʪ,eV5!c)B ;73Jhѱ@2<X,N/gV^\ȃ|vh2UmLY\T:rz'핔\>T/rbD,R@KFNC֝Cqf](FpL3dtS +ˑnJy-AO&SaK)GGn4wkSEa׺X} ,r ۵鑫wpB\{n<هGctl0[QtIs ب1|ʬ6CP톪Vh,3565 ,\qtD4-DOP(/]DYZG"lK37\a﹙aߣ&A2j _W-X-t^ dSJCYV,!%䍐A4TVC=y\f F!n"šω~*/l {Zlh--rl`LwMwN0yHdݻ/pNg,;.E,;㎴fgr˱gdL~dc"%S6cB3YkjIA-}n΢wXMMj$dZfYxcX Aa%؝3y/O]|zFwl{=.lAvp[9.l7.s7o;[\6HAױ@okk]@,.8dn1\j/ԏ.#CiBَ]owI<ylIJ˿oQ=l~A?Z d! v|t`h-6%m/ո-C[s T4(ANj}%,OGmjo|䩱y]xX`*3|?V3#3þCP$"aDŐRUQ5deI]VA2)wW߽ZG!̋l\T9Zq)UR7NbO,cNA_EIJ[[bam9ކ&׿A7=6=[+?sÿMcMV*9[Mq]kmjjjf}?x30BO G,}UZ*;*K%w+Nfr"vG +hr\ 9j.\Y/2LjH8}-,>309FK'Kd>mp#wtO$ٌ؝Ilc>r{Rϐvzo E.cMGɅrڧ]CfŊ)0M6FlRbu(OES{x E\( meӲʶ%ߞQnG޼;-guYԅ02FG9w>oCpO=Jp%R1`tˆx Џ=?}p'ERTBoa+6lѿ[}#bȤb! oxFQ9'EQO Ǹ ͓ y&!?8Q6HIE!r . zIzZfZVmaSg 3'egT[:g6zPc=SFL\:_>u*A/m&endstream endobj 262 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3455 >> stream xW T綞 hRu QNQo(, @ NTފ[ F^YrڪV=>깕j{޵κk'+Z{oߎbd20m嫛4R&Fˁ* Z8`/{Q!x !Lޑ4q$'9s<:͜1cӂ؈-('U\xh*%"4.i;qq;溺&$$LWEN qNkCwƪCCV"CtzWtX'(aF{Lڵ'$tkXuG͜'Y͸0Xf L`1Ɍ?,b3of Yb3̟f3ʼ0f mcD3e2dr 5-6m>QLStl)sQv p~с#RC %; (M(K2Cyb>?BСxf(+y"Ͻl 8FK~hb9O :?4bYp Gɛ{Ng{0&X c,.qzU}Yaq$-b ٴ2b.aErIz(k@{,F{TtxTP'"<}G>߼Plzba):C΂@M 4JN\( /9S Bv^MVN᭹aE@ Ybm1s?Ă{wV2u$Ԑ'<.%| W.sܧ幋c >%c쫢=]F ʚq^Łmr 8LƢJ8qb=:ߏE?,2jj^EhCkZVvcoXGR}\.ȵ1>h%kAzIGXp͊0Pјa {;5^@`r“ 7ˤ Ɂ+ٟ83B0-;@&[RHxldViӤ(X1}iWY\;`! Cs̙F EwVl,%^.B]xbm}]ESHR`,2nd&g pmŭIhWTCeE:W0TqDOH~4x@Qƾy#E G3G O*4ZҊ&@6KuKhuu-gT&hf tJX_G]yz/dJ6\TO?k1m{e&*yKN.-_vtYf׎g/q8ϻtZ 1]OScɨ\q5 '8J]Phqjztp+ ^N&S* _7} a羨:=Tr%=5؛V [x{F-5tA#.Y6V 鐮'-#o>2 /,u޴1g'mtt,%zU)T`QGl:.VA+J4n,4_0ORUPStVT\CO828SpҒdܭIc/SҌq:!YZ'* { wRkzlQHJ(dKEgE6WHNKO5xJSɩְE3|AA.v@SPt*./`H~_f.ΝoB--k~HSh6Ev ~DyJ~bepk7^| AWЗ&G.(ߏ<<7"[jxhxXfW܋bm`J]4pJ;zq|;~Z~ 55Ol83>O'DVC PE 3 fӦ(@Yw8{%ՑOp߯H4!ߣXY6S͒dG1gyRzG #t'D>bk9uzsoebGiv:q'껦׍%#:qY+goQjo|}s[Ӑ z%v [R:yٽ WOV{oXD>>rA9>a:-l r{nLq.z+d ?Y֏ScYցo%nU79-C T\MǿkIʿ8sEfLgڶWl/{yI$IHK$Ͷ2rf}Vr<N.0eEsAIsQu(͜oq>Z+< |JDb=wGNJkܘ'ހ\wddvж`1&"FM%貵9l.)ʅvR~~ew'K 5ZHNHgj%$ "SuT{kLMN-U7p')2g0ڲM eNӝ괐&~C!THK):{ |w0/p!<JSL7pzc[3)Ebɷ7+HPkCڣ޶eU OPmendstream endobj 263 0 obj << /Filter /FlateDecode /Length 3157 >> stream xZKsW)5[e~IUC*qkcK\4fge%K͂Џn5ꍢ\?'o8 6yIySoc2i5fݟ g::ͩ)o1oj٨-^HJimK 2?۞Zǘ MV984Ƥ{}:ᢌ4)Bn1[:{L8+|^F4El`Vn[%kuv]y)-qJb0$gBX6 ,5&1̎9t3AH8W^6n5CuoϜl̶JPìĠnu^͎R,T#FgįhQ*Zc #hc4a&NI68ml0i kl ѓaZ ~X1vl یW!EduiI,*„8v0Ĉph䎝!vb72* a/ۺʼn!XR2X Ooy1.Z)[{iU 6.qZ@Klpb>B ^tۖb}#Y\.Quu!*3`y܌-2 d<3l fcZ{t'zBfЦ &Z"sAe`l0%'XtS{Œ 뀴(`+*I;U]Z G7F9:E%fD0ajZl}IO}*ש &9p/z~  HHDTE2@3qg~T܋L.WYpGJtܘ 9͇E:9WլuU;eߨw&TګR[ "n3f$\W_J2NdI(8Aׁ9g{1aѡCbꈴ99ʹņ5Y[F ;O%ZsYMBw=!Ms** #Mm=3s3", .4+9^8ګ]4J Z~נRjmMel,VMH 6:Vu9G9gN2F:}xXSJnu1ܐ}PLxu ڞ2LE]c@X5lE3G "ۯvƾcb]N!iQg>FÙO-"Qh_N%QQ]+A޶D.kd*tsUv3Xq1P|D 'Uڿ9MOIS"M# *Pbͮxs+Ej`ߡT>٠^NVv_;4cLMwQ8٧^7(>}n0k[B V?>r1y[ a-5446ф K{elt!,Ư4}C49-$nDu NeJW_v%sڭ1y?i\ H,zXg-Bɏ4? մayCA\]yzZ%r~zjqE+݂6bb.%dgnW1Bh͚$Ⓠ#ut{`tmv٥MɁo?w _;=`u/K[X}!<9FȖ.U$)YMYZh-ZTwQ,{nT6sخCOZÂ[v7"eUJ>|E=kPPY񖺂hˏHZl!g_anQǝ.w4yjvL}srOK뤡/i涀lDRp,iJqFK2Ng-;#xE>{wo\pδoGYtͧλNefAf8:tNSg1i,-~nZ+/yx#ŭf ?ngB-gs$VZgVO?X`T,LL!O"~iFAG?ygendstream endobj 264 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1052 >> stream xumL[u良ܱ sI9&FmD@*8 Bޖ[J{/oVFqfL(nًu1[|ט,31v(~11'9ΓsߓC1IMk]ڠ7>YTSۦ״iCp&k}mt8.Hbsʓ@.yG/oF+uo"Hds M]m]+3;L?OWYtzX5Gܬ,`:1C#]]Ski~.)_TL\tPYChin荺Z/lQē$""AO䝘K2M')e%oGSnR^oq'[U/ϔfEM]oP}O-'cexn'JW ڡNvz}.,m&J_ E8{j6s\y4LMAgQv)zGCl|W`J.vM؞ mg0K 0=SG8_$^I8922>CН,ǣy ])Ex&Wuc;dPxt ?j] Kuܳ'.K0S:w8aᯝsZ e,3pz'`(,a@ln5w9.8(+_.`H¾ç&O|Jz+D41AkpR+e!*BVe)Z7*xX;2jB6 \0n8R (zgCºN[ RO!#0ru 1N B7 E<,B=zXU.[L`2 J|/~pֽP2JZ&'{h 9S1.ƫtTB>TݳIըm[0nS"w {~7yr>Q(+ݲ[\ ı/'0~ae{_; endstream endobj 265 0 obj << /Filter /FlateDecode /Length 352 >> stream x]An0E`c)MɢUؘEtų?cΗ4ne3o0No}M{ [隮RTnYr[wՇF=>ߖ.嵛sq2FN E#Qv\MbMeEGͲb(H[jo{Q㒡վ-Z<@@TTrFe1ub:PQ85(Cix;"PVn(ƴUۈy=r^Y_]yf=CܤG<Nh fz7QA+c/[}slt_> stream xuWixS畾BعdQcreBICB3l}f3y&[־jlcY,y_!-054%i&mI>'g}E6 3$h6w]2zy4`PJ=spnZhjuɄ{A U}Y{tSm,1站l4aaL Njo*:u GsxcAo4PQZ 6:ѬXG|t |J0pmA MG#PI-O?SU+/Q} S,eLpI0IW]H@\s*z;&FΕW+:*b@vF(Ziy?LWTidZƨM;bm @ &^VT e 77x{n}5%@=7x h9.8?2Τ3㘯0#?C!N]]nLI&)?tvMWO"'A8Z\WA9]㭌mO(멞bnǼ8. zWzm.Nlf$ f2 L|hiٱ}e(9=/|-=917Ǵ+g1od$f4f 'ZV-KeCC lh>1gH " t?um Hцq0tQҽU7V{g z:x+jtr5ck UZ뀿[yx;#6o-xo`jCe*R5j;/_)82u{ĢP Cq{)ҝ6fc(!Csg(9r[5ϡs&FǬ|s@ScG,=9聁@i R[[#7U.?ljJ ?x.|o) !񛨱bg鬿$Il[9_]\wއX\Sd2т/E9)˱[}w3GcqQD/X )-V[[:Z:w3 FXy7ZOKh$_;X=+hYCj@#>隃*q]H<7aRsbq֌#87v)xpC#HZ䶗[N.uaGx!CcamYTj\`m^|NiG+|]ү>'e%߀!4jq0%LZzhd DJ 1F~d hfΆ}}h]T|C mm,IdH-5QwϼX\LK6%FZm|ȡ$e ʦҲbi^P?| ѤL,V‹U$|"ũ3`6k+ි&} ɓbop^j;z01 XzI$I<C/8"Q+D|lnSՁU,(f5,4Vh3jsX۰U!  &EUVZ]Vfz̈8<ڭj:#&}Jgkv?E^ wI0[M~ew+" 8fI[}Oyvg}(N;, 0rw26ZU:l8 FKM;q q'֓Ʈ?x&0Tj.B|jz31rQ )Jօ(»^6] ++) }~ðV-%QyR`~k4Aӯh=_:N:V5D搢ӅpOpMmFS!%azV#hɢSogpr+7[.}Ӻ < kyK_{M᫧/6^ͅm@L7hC} {1O=19Dy ejA^#a~7 :U.tI?#³8Yc38{l>$7n;>¥3' < WgE1BF<4 =ml5TBZ_Ω),hZ^!-5Op3~YC?S&)%"m1 z*,6M|&t甘lbq7K|qzыWܬ0 bZi׺Ѐi3j~닫hЇN/֤R.?Fbjnld5T^]6Ʈ'K/Ӂ&TԸM &^N8PtPǯ(^A:ٓxaPСD A-O<ؓ8S؋V)T2Nb@Fz~x?,69 d CxZ*&\ҕ +c?}QBb~^})u<$ ͇p 0<=CDG)Q ;Y&_wqcC,_k% U4V 2V;4/maW_>E)hFK1<B/(at?,}{(鶘/ʍ}Fy4k p-h ldl&5<ŅW}]Ƥᕆ8+tB-D>\zmvF",(" ,Fa :ѓoDR瘂PlQN9QPa6`^'c&2Hז2Iy A1;@q3JF3"h6K(cFi6}H94:JVdU4Nm_0dV(PG=E뽵 =Cv"sqA@od[!AXf%"]Fq2m٨gC>}W> stream xmkLSgϡ@P-SNS7nn,: Z.Br rĻhK0:iH&Cvdym*i,Dcj# {qCI9jh YhNЋ!Zeo$pŖH|si|N­Xz޵˓=ݒ7 Hϱ!Jj k&.zwJ2 'L믽RhJCPv _A i 5\V3 J 4ӎVt"U+LZGd 'OQҹ އHMM#S \+xۻ\Bag\b97zendstream endobj 268 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2538 >> stream xe TW'2#" qnRւ`*Vix@HGK +A "**jUTmWו>ZjW mzvϞ9gι. 8C@`D(D#0;s-̯sk&E^'|}^_v*l`c92ߺigL;՞r83$Ҥ,bwwEط06W&\"e$ cҶ ׻ ?$$M+JIJaax!µ!AC]WAi̬TwŞ^lJׅDl$ˆp—XM@b6@9#D# 1&ll%!'r9,,nr/[Z,rN ^ʚ fψAݶZ,`4[:d1DNiRPuQ(;`L-hJn_- q*n!#ϤJ,VF_G(w>rźWVf`vq:&Pלi4gs< ٘~@̲ <;*;2*m3DCTKt.@/HPіmm "W*ZZc(X1G24pJUXl^SZTOۡ;Ȣ#U͔*'dTKˠpTV(E&6JڳNqkkq+&(TE@\v"󠸮\ejPQ\ cD2RzFYiJ,M 27ߓT^b&۸^rYgDLQ5VoYO4 ȓG*AO޿q P^k7uhn/V.h9^gdCe'I (|ҋcG 7=s˅}'i~;~I8=8i hͤk@LQRƤ/ch ZzkދAD 榮/?s~tAz̈́lj=X= =o#>|Rn5M11lg5k>҉ ;s!RDFYP^J4Q~?![d䍞KH4T0 *@/9p|z>^{P|G9@5hTrE Zm1AC*%_0K{X^no/kwaAYLͤWov}{Sß is;LKSퟗ9 X)*f.?^[{ŢzO6qL"Mgtl00OrXqeP nt+xU+jz%}ynI@5, 7Ko&~Wc oF[ʚҕ|*<y^~MvF-Cߙ+oߨe4ZwBđ6r8`fDf~0u o?y/8(+y\c 顬GVPg1_?(܄ {hdC|2-;MfCɝtmAdkei^l laK;'cq^DÑZ$ 5kF* }QzPC 񤝁3xRN}zGD9-;k7 \;Qu_|[:rp+SW'5- ꍟ!&~fVhپ%(t֥8Mԅ˃W]ǴL*M^K>5]}Av+;ry/)>;Ps$yι̶tgfkZZffZZkfggkk'aDgWቹM齕r`5 VaO*TT](p78g&q'E1ݜ#^AdZ/0ۯfؒ]g\'0?^{y8?r7 l䐂-)EvhvtZ))XlQVYylwg}-PUUG;YA CVAne-Q(JPr{g=vU{I9,^ 1l50e؄y@+A;d;@Ns3h1ȗYd~M(J3Zd-¢lfL]\.V5ll 4U*FSYUa3 ~Iendstream endobj 269 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7078 >> stream xy\SW1k^ǽ(-]gݻ=DD@F$<HaVV%ZEڥm_v_{n~>I|_䞜} ܉x]\bÆ =LA $c`0sӾA5>܎lOĚDqrLTtj!O::dCG&D' OOſąFDCόNMM1n\FF#FdĤFLLNX*<>2䩩cg0>1-529dpodrAk%l/ܺ qaҢ)KR.ڳB2sDEYCƸMwk _ۏY  h_PYУYHpsgT9`: -dR$eȼ!-U*{Mྠ5{$d&HѾR @,S=X'tRbjA1~R@p]C>@!y &«n7wyNM[1^ ܙLoewXyWp |`<+}%K7vS5d9lz)7Nt:%뵅b[3tϿDnFa`&l(~HJC Q c এ&ARr=!%"GR/n۰N U9M* JI^;e7Go\ v {w3n_ƾBnC0]$+6,Wwܼ*3B+ ;Ͼ1FQ/"곥C$C;襻7wNoX8fv脀A!i7.DȕIṢ`u鬉DCAP]UJibK%bZ.H  vAɳ4l=q7KO;ޤʑSʕ1*vnxڸ DU̒XtW#sƜX#ꁺb`R6E 6/$[;Ov=*RM\fR@憻<a?R҈JvEeFW cjby G6]!5iy{J)S9 E&~ nījoXЛg EV`l^!:HqSqCg1@l+m"PCap^{z{.֐[O5K q8͔>9 Z&}QD:w 5^k$#>蒺 2&PH ]AFRϭ{14j}^*s3ZɊHޓ QU.Ͱ].G| 6DH"h|Np4 Mg R$ y D?M~WO=ϲ(2 N sZZ20 FN+1[<ַy(NR*llKèUkI"؇sJoW$rWjAYg8$`e V( :sX)_ZKnkS8htB^J6 kԨ2sؤ=DZ9OlQ ]z&~Rd@c;{7GQNC? &24)R*2?mfpBUm衊p=Q]A6o~8g7FQf`EƃW=¢OJ\z\w60n&@9ޟ2gYYP-G n  #ji؋? "Xw@#EvyĊճء^yk;Zx]|sA65P4pUTf0Ny~U E)[0ɮ\;\YӺpUv6c+N3YQt?֤JHC6Q>_`>ݿžhiO]~+׮^&鑫*u=Q9:>V1}zYm c | ċƭsDՒ}%ɉ=HϺ3]={m.Vu|%Plj#Ua˓H@GE+vJgMլ\ P< MB7/o\tDGu satB*LNrWW3hLF}qa/i  Ja+P}S2: `݋._JX%(?}tFV"UL@3X!\FHF;GʘQWoOo>n.bkEj9._iT@Մ,To1\/,>繁fOK؏ ,F=6>H!8ǟ|:7'_#5S\X9L媌/ roсu<48(y_a7"h_b j4<.08=+bl%n(8oѸJMxxd ;˶IoA:ue x랓|<@:\?2hWR̛\ ko˾]j@ +k^z0 @mޗB]Pyӱt[q_ xiсͲ}>T䔎k @Ț1T^> ([.J~/Yt~3';{" Fv,0$TumγԱg?<<L&.r8Bxˤ6`uDVѴ 0@8W>ނu3x+۩Vfz N+Q:t F"u{^N[D@l-"lJ Sz1 Q}F}!,f''i}anPݸ?BhEog@Zys\͠93^'e :ؽ {>؇_Rkcb#+555yt+gqri?Ry*܇|`ʲ"Q,'iG Z9P)I\j0@QuhH{.n ,QDW%gVԺ*mJT?{@}QaLLhD`BP ,U0(JvlGt@2|~tpݎf˜dx'5pm[rF= y緅V`S%.nQnn+*9Ԫwxބݑ xmvZAZd;·m|MesT<#M@k*,dYnlZ)ifMLqU=,*,TRFgUICy v7'{$E|'VA&i H6"('[%Y>#! ň+XOqE\~6d77 i"Lލ|SMIJh2g޺dnTk yJmN̎ʄm`sΤ[~/o MܥN~=z cXO^z!Sy Juk+jlz`!zш;K]`[E)WMZ QP Ɏm