bridgesampling/0000755000176200001440000000000014036247673013255 5ustar liggesusersbridgesampling/NAMESPACE0000644000176200001440000000255014035332232014457 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(bayes_factor,default) S3method(bf,bridge) S3method(bf,bridge_list) S3method(bf,default) S3method(bridge_sampler,MCMC_refClass) S3method(bridge_sampler,matrix) S3method(bridge_sampler,mcmc) S3method(bridge_sampler,mcmc.list) S3method(bridge_sampler,rjags) S3method(bridge_sampler,runjags) S3method(bridge_sampler,stanfit) S3method(bridge_sampler,stanreg) S3method(error_measures,bridge) S3method(error_measures,bridge_list) S3method(logml,bridge) S3method(logml,bridge_list) S3method(post_prob,bridge) S3method(post_prob,bridge_list) S3method(post_prob,default) S3method(print,bf_bridge) S3method(print,bf_bridge_list) S3method(print,bf_default) S3method(print,bridge) S3method(print,bridge_list) S3method(print,summary.bridge) S3method(print,summary.bridge_list) S3method(summary,bridge) S3method(summary,bridge_list) export(bayes_factor) export(bf) export(bridge_sampler) export(error_measures) export(logml) export(post_prob) import(Brobdingnag) importFrom(Matrix,nearPD) importFrom(coda,spectrum0.ar) importFrom(methods,is) importFrom(mvtnorm,dmvnorm) importFrom(mvtnorm,rmvnorm) importFrom(stats,cov) importFrom(stats,dnorm) importFrom(stats,median) importFrom(stats,pnorm) importFrom(stats,qnorm) importFrom(stats,var) importFrom(stringr,str_sub) importFrom(utils,read.csv) bridgesampling/data/0000755000176200001440000000000013663004467014163 5ustar liggesusersbridgesampling/data/turtles.rda0000644000176200001440000000171513663004467016361 0ustar liggesusersŘ+pQIy4@v7Lvab[Dy2@14ht ,ht5 {w?9w3geլdͪӏjY֤5G6o]kYu;͞ObG}^g~UZ~U縮qǕ_3gWqoY>eU8ٗ/R_~i3qK|]/1vIb枮橱e{6{33|dOrCdV+}vn+ƾ_E,NwOe7 ?<%3Af~꛱#y]îI-[W uFl~Q o!gJ^-[)>y,'Rx_v!7@'OJ/got OW\pFD$\U_ EXQSٷJr0~ލGu6'>^]z}yj&8{|8Z﮲hq[w?2aaorbridgesampling/data/ier.rda0000644000176200001440000000777413663004467015451 0ustar liggesusersuYw8x Bihx7i<YES);Y 7T4$JDF׬A=zG^};>su9|LnֱY4Fqҧ#AOGOA[y34Kaw԰"%ߘ1'mW=n_8GKvh8* &lW1IL@ED`eTzq=5(zΫ)t{yPO}^`Ty >lWr*^zx)(<0.=E11+ah`_̹>K>:=.&^盿?n!L.uXGoz 4L&|yԞd_1n _Nb+cG9p;} s7aX9DBt@w҇a<o<=-e/M5 Vm=R K5¡ LԆ0&MNs?};68b~tI{H=̥L0aWuk̾bėl#f0|Eg)<齚A35EA 1ȿ'AkŵA}g >;4}G;v# udKWQ&O+ 7eϾGtg¾! KǣJ_3!nSU .o=C0H~+3M.yuuk 2;J[+퀭?ɹd:+zZ;ς#noOZ!.swYG+I}>0~l}cL< >42[X>[&rU%/|tŘl0]'^WΆyCv^[A_;L}m%E:Tn{ޞ A"ہajFFmhӾNX^'ZQT{#\ V$Nۯrz4F36ɎP} WCPDIbps+{f 'k|Bx*ݤ[u`+hazN}JS.%!Amf{,=z<ל2s.!{NLL֯y8A1]`cM;z GnwOC@b H権f@yfIr;Ag@~V.5αYKV_Cl E߷ 0(;5- S]']j^?O91iek҄|zßb`}|Xrw2+w݂aP!d@ᮑ,3 <.\^^Jψ^ vY1P56 i꠨c,PsӸ ]F:?.y2f#`(yn%9:U`đWhnUj܇7,*|X~QQF)ؿHg7y_@l-(_h&\CGO,Mo@ qDCcUO,lrP"@xq0xV*47{-˘ 5`ƳEO19\-Bd`o[ңd6xw3=0v꼪 OzE2f1(i2Q $oEyM)R`/HY h Xt9 W?NDʆ<ιfo:X|SHgL3S7׃.ҸƠ8ڢM/OY E>TlՎzu+ Ov(d`҄Փ䖿.-BbAp~Z-Ew0Jr%$t3 8rAntrސ^<=W,;z}%C_o7Hx٫5 ݴ7ttaȁԜ=&Wq ~/z!nNxOFNtz>X &b飡 vV <DV &w{ 8QTWaxft? GXlbHt<.!zS:5ѕ9CTw|L{F|("17 Nۑ􅓞rWծ`}lQvt9l!!56{y|uzksaccW?¸[k+|+74%ð?p86t\a[+\`pw̵z*֊HAR݋FpL_ J#pJ&2}oOGS & .=/ U @kZЙq?`stvsO0PAqI7(+tU)yWzl0l^p!u<l6D} {p#80]^9BB.ptx^D&wФ;`=P}.0oyNJXa9kA6MV4Rw*l!J Zo~7 } wMߞzf `i犠j5<,4 /ޛ@L~9@@D]yקIj.҃9NqK?8:q,߲IV7P43lwϩL{q?b^.`Kf(:^uFzjZ.JL|&w^7s6~]X~IdD  XKBA}|VG&:`<wDqPO5l\_4zL5{@[ùt?`˻>ki<"F^08zSm$9lנ)`ʠWzl5ErcZufVlsz1`r8Id4&ԷCK3iIcnX6'P_Mx:P ]~ vڅd;ƁU(&r;mATneDf~mbť*Taʗ0Dr]7}[e?묎EH;6 (£pgZw 9F' ٛգ~^( % o+8|#`\~}?1/7 ~@BA2BFݙaKN16;7<MMʥrZYۣ6Ʀ63.f6$zᲲ7gЌNqbridgesampling/man/0000755000176200001440000000000013663004467014025 5ustar liggesusersbridgesampling/man/post_prob.Rd0000644000176200001440000001000114024663305016305 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/post_prob.R \name{post_prob} \alias{post_prob} \alias{post_prob.bridge} \alias{post_prob.bridge_list} \alias{post_prob.default} \title{Posterior Model Probabilities from Marginal Likelihoods} \usage{ post_prob(x, ..., prior_prob = NULL, model_names = NULL) \method{post_prob}{bridge}(x, ..., prior_prob = NULL, model_names = NULL) \method{post_prob}{bridge_list}(x, ..., prior_prob = NULL, model_names = NULL) \method{post_prob}{default}(x, ..., prior_prob = NULL, model_names = NULL) } \arguments{ \item{x}{Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Additionally, the default method assumes that all passed objects are numeric log marginal likelihoods (e.g., from \code{\link{logml}}) and will throw an error otherwise.} \item{...}{further objects of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Or numeric values for the default method.} \item{prior_prob}{numeric vector with prior model probabilities. If omitted, a uniform prior is used (i.e., all models are equally likely a priori). The default \code{NULL} corresponds to equal prior model weights.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ For the default method and the method for \code{"bridge"} objects, a named numeric vector with posterior model probabilities (i.e., which sum to one). For the method for \code{"bridge_list"} objects, a matrix consisting of posterior model probabilities where each row sums to one and gives the model probabilities for one set of logmls. The (named) columns correspond to the models and the number of rows is given by the \code{"bridge_list"} element with the most \code{repetitions}. Elements with fewer repetitions will be recycled (with warning). } \description{ Generic function that computes posterior model probabilities from marginal likelihoods. } \note{ For realistic examples, see \code{\link{bridge_sampler}} and the accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} \cr \code{vignette("bridgesampling_example_stan")} } \examples{ H0 <- structure(list(logml = -20.8084543022433, niter = 4, method = "normal"), .Names = c("logml", "niter", "method"), class = "bridge") H1 <- structure(list(logml = -17.9623077558729, niter = 4, method = "normal"), .Names = c("logml", "niter", "method"), class = "bridge") H2 <- structure(list(logml = -19, niter = 4, method = "normal"), .Names = c("logml", "niter", "method"), class = "bridge") post_prob(H0, H1, H2) post_prob(H1, H0) ## all produce the same (only names differ): post_prob(H0, H1, H2) post_prob(H0$logml, H1$logml, H2$logml) post_prob(c(H0$logml, H1$logml, H2$logml)) post_prob(H0$logml, c(H1$logml, H2$logml)) post_prob(H0$logml, c(H1$logml, H2$logml), model_names = c("H0", "H1", "H2")) ### with bridge list elements: H0L <- structure(list(logml = c(-20.8088381186739, -20.8072772698116, -20.808454454621, -20.8083419072281, -20.8087870541247, -20.8084887398113, -20.8086023582344, -20.8079083169745, -20.8083048489095, -20.8090050811436 ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 4, 4), method = "normal", repetitions = 10), .Names = c("logml", "niter", "method", "repetitions"), class = "bridge_list") H1L <- structure(list(logml = c(-17.961665507006, -17.9611290723151, -17.9607509604499, -17.9608629535992, -17.9602093576442, -17.9600223300432, -17.9610157118017, -17.9615557696561, -17.9608437034849, -17.9606743200309 ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 3, 4), method = "normal", repetitions = 10), .Names = c("logml", "niter", "method", "repetitions"), class = "bridge_list") post_prob(H1L, H0L) post_prob(H1L, H0L, H0) # last element recycled with warning. } \author{ Quentin F. Gronau and Henrik Singmann } bridgesampling/man/bf.Rd0000644000176200001440000000705014026404747014704 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bf.R \name{bf} \alias{bf} \alias{bayes_factor} \alias{bayes_factor.default} \alias{bf.bridge} \alias{bf.bridge_list} \alias{bf.default} \title{Bayes Factor(s) from Marginal Likelihoods} \usage{ bf(x1, x2, log = FALSE, ...) bayes_factor(x1, x2, log = FALSE, ...) \method{bayes_factor}{default}(x1, x2, log = FALSE, ...) \method{bf}{bridge}(x1, x2, log = FALSE, ...) \method{bf}{bridge_list}(x1, x2, log = FALSE, ...) \method{bf}{default}(x1, x2, log = FALSE, ...) } \arguments{ \item{x1}{Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Additionally, the default method assumes that \code{x1} is a single numeric log marginal likelihood (e.g., from \code{\link{logml}}) and will throw an error otherwise.} \item{x2}{Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Additionally, the default method assumes that \code{x2} is a single numeric log marginal likelihood (e.g., from \code{\link{logml}}) and will throw an error otherwise.} \item{log}{Boolean. If \code{TRUE}, the function returns the log of the Bayes factor. Default is \code{FALSE}.} \item{...}{currently not used here, but can be used by other methods.} } \value{ For the default method returns a list of class \code{"bf_default"} with components: \itemize{ \item \code{bf}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2}. \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. } For the method for \code{"bridge"} objects returns a list of class \code{"bf_bridge"} with components: \itemize{ \item \code{bf}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2}. \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. } For the method for \code{"bridge_list"} objects returns a list of class \code{"bf_bridge_list"} with components: \itemize{ \item \code{bf}: a numeric vector consisting of Bayes factors where each element gives the Bayes factor for one set of logmls in favor of the model associated with \code{x1} over the model associated with \code{x2}. The length of this vector is given by the \code{"bridge_list"} element with the most \code{repetitions}. Elements with fewer repetitions will be recycled (with warning). \item \code{bf_median_based}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2} that is based on the median values of the logml estimates. \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. } } \description{ Generic function that computes Bayes factor(s) from marginal likelihoods. \code{bayes_factor()} is simply an (S3 generic) alias for \code{bf()}. } \details{ Computes the Bayes factor (Kass & Raftery, 1995) in favor of the model associated with \code{x1} over the model associated with \code{x2}. } \note{ For examples, see \code{\link{bridge_sampler}} and the accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} \cr \code{vignette("bridgesampling_example_stan")} } \references{ Kass, R. E., & Raftery, A. E. (1995). Bayes factors. \emph{Journal of the American Statistical Association, 90(430)}, 773-795. \doi{10.1080/01621459.1995.10476572} } \author{ Quentin F. Gronau } bridgesampling/man/error_measures.Rd0000644000176200001440000000547214026404747017360 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/error_measures.R \name{error_measures} \alias{error_measures} \alias{error_measures.bridge} \alias{error_measures.bridge_list} \title{Error Measures for Estimated Marginal Likelihood} \usage{ error_measures(bridge_object, ...) \method{error_measures}{bridge}(bridge_object, ...) \method{error_measures}{bridge_list}(bridge_object, na.rm = TRUE, ...) } \arguments{ \item{bridge_object}{an object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}.} \item{...}{additional arguments (currently ignored).} \item{na.rm}{a logical indicating whether missing values in logml estimates should be removed. Ignored for the \code{bridge} method.} } \value{ If \code{bridge_object} is of class \code{"bridge"} and has been obtained with \code{method = "normal"} and \code{repetitions = 1}, returns a list with components: \itemize{ \item \code{re2}: approximate relative mean-squared error for marginal likelihood estimate. \item \code{cv}: approximate coefficient of variation for marginal likelihood estimate (assumes that bridge estimate is unbiased). \item \code{percentage}: approximate percentage error of marginal likelihood estimate. } If \code{bridge_object} is of class \code{"bridge_list"}, returns a list with components: \itemize{ \item \code{min}: minimum of the log marginal likelihood estimates. \item \code{max}: maximum of the log marginal likelihood estimates. \item \code{IQR}: interquartile range of the log marginal likelihood estimates. } } \description{ Computes error measures for estimated marginal likelihood. } \details{ Computes error measures for marginal likelihood bridge sampling estimates. The approximate errors for a \code{bridge_object} of class \code{"bridge"} that has been obtained with \code{method = "normal"} and \code{repetitions = 1} are based on Fruehwirth-Schnatter (2004). Not applicable in case the object of class \code{"bridge"} has been obtained with \code{method = "warp3"} and \code{repetitions = 1}. To assess the uncertainty of the estimate in this case, it is recommended to run the \code{"warp3"} procedure multiple times. } \note{ For examples, see \code{\link{bridge_sampler}} and the accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} \cr \code{vignette("bridgesampling_example_stan")} } \references{ Fruehwirth-Schnatter, S. (2004). Estimating marginal likelihoods for mixture and Markov switching models using bridge sampling techniques. \emph{The Econometrics Journal, 7}, 143-167. \doi{10.1111/j.1368-423X.2004.00125.x} } \seealso{ The \code{summary} methods for \code{bridge} and \code{bridge_list} objects automatically invoke this function, see \code{\link{bridge-methods}}. } \author{ Quentin F. Gronau } bridgesampling/man/ier.Rd0000644000176200001440000001337714024663305015100 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ier-data.R \docType{data} \encoding{UTF-8} \name{ier} \alias{ier} \title{Standardized International Exchange Rate Changes from 1975 to 1986} \format{ A matrix with 143 rows and 6 columns. } \source{ West, M., Harrison, J. (1997). \emph{Bayesian forecasting and dynamic models} (2nd ed.). Springer-Verlag, New York. Lopes, H. F., West, M. (2004). Bayesian model assessment in factor analysis. \emph{Statistica Sinica, 14}, 41-67. \url{https://www.jstor.org/stable/24307179} } \usage{ ier } \description{ This data set contains the changes in monthly international exchange rates for pounds sterling from January 1975 to December 1986 obtained from West and Harrison (1997, pp. 612-615). Currencies tracked are US Dollar (column \code{us_dollar}), Canadian Dollar (column \code{canadian_dollar}), Japanese Yen (column \code{yen}), French Franc (column \code{franc}), Italian Lira (column \code{lira}), and the (West) German Mark (column \code{mark}). Each series has been standardized with respect to its sample mean and standard deviation. } \examples{ \dontrun{ ################################################################################ # BAYESIAN FACTOR ANALYSIS (AS PROPOSED BY LOPES & WEST, 2004) ################################################################################ library(bridgesampling) library(rstan) cores <- 4 options(mc.cores = cores) data("ier") #------------------------------------------------------------------------------- # plot data #------------------------------------------------------------------------------- currency <- colnames(ier) label <- c("US Dollar", "Canadian Dollar", "Yen", "Franc", "Lira", "Mark") op <- par(mfrow = c(3, 2), mar = c(6, 6, 3, 3)) for (i in seq_along(currency)) { plot(ier[,currency[i]], type = "l", col = "darkblue", axes = FALSE, ylim = c(-4, 4), ylab = "", xlab = "", lwd = 2) axis(1, at = 0:12*12, labels = 1975:1987, cex.axis = 1.7) axis(2, at = pretty(c(-4, 4)), las = 1, cex.axis = 1.7) mtext("Year", 1, cex = 1.5, line = 3.2) mtext("Exchange Rate Changes", 2, cex = 1.4, line = 3.2) mtext(label[i], 3, cex = 1.6, line = .1) } par(op) #------------------------------------------------------------------------------- # stan model #------------------------------------------------------------------------------- model_code <- "data { int T; // number of observations int m; // number of variables int k; // number of factors matrix[T,m] Y; // data matrix } transformed data { int r; vector[m] zeros; r = m * k - k * (k - 1) / 2; // number of non-zero factor loadings zeros = rep_vector(0.0, m); } parameters { real beta_lower[r - k]; // lower-diagonal elements of beta real beta_diag [k]; // diagonal elements of beta vector[m] sigma2; // residual variances } transformed parameters { matrix[m,k] beta; cov_matrix[m] Omega; // construct lower-triangular factor loadings matrix { int index_lower = 1; for (j in 1:k) { for (i in 1:m) { if (i == j) { beta[j,j] = beta_diag[j]; } else if (i >= j) { beta[i,j] = beta_lower[index_lower]; index_lower = index_lower + 1; } else { beta[i,j] = 0.0; } } } } Omega = beta * beta' + diag_matrix(sigma2); } model { // priors target += normal_lpdf(beta_diag | 0, 1) - k * normal_lccdf(0 | 0, 1); target += normal_lpdf(beta_lower | 0, 1); target += inv_gamma_lpdf(sigma2 | 2.2 / 2.0, 0.1 / 2.0); // likelihood for(t in 1:T) { target += multi_normal_lpdf(Y[t] | zeros, Omega); } }" # compile model model <- stan_model(model_code = model_code) #------------------------------------------------------------------------------- # fit models and compute log marginal likelihoods #------------------------------------------------------------------------------- # function for generating starting values init_fun <- function(nchains, k, m) { r <- m * k - k * (k - 1) / 2 out <- vector("list", nchains) for (i in seq_len(nchains)) { beta_lower <- array(runif(r - k, 0.05, 1), dim = r - k) beta_diag <- array(runif(k, .05, 1), dim = k) sigma2 <- array(runif(m, .05, 1.5), dim = m) out[[i]] <- list(beta_lower = beta_lower, beta_diag = beta_diag, sigma2 = sigma2) } return(out) } set.seed(1) stanfit <- bridge <- vector("list", 3) for (k in 1:3) { stanfit[[k]] <- sampling(model, data = list(Y = ier, T = nrow(ier), m = ncol(ier), k = k), iter = 11000, warmup = 1000, chains = 4, init = init_fun(nchains = 4, k = k, m = ncol(ier)), cores = cores, seed = 1) bridge[[k]] <- bridge_sampler(stanfit[[k]], method = "warp3", repetitions = 10, cores = cores) } # example output summary(bridge[[2]]) #------------------------------------------------------------------------------- # compute posterior model probabilities #------------------------------------------------------------------------------- pp <- post_prob(bridge[[1]], bridge[[2]], bridge[[3]], model_names = c("k = 1", "k = 2", "k = 3")) pp op <- par(mar = c(6, 6, 3, 3)) boxplot(pp, axes = FALSE, ylim = c(0, 1), ylab = "", xlab = "") axis(1, at = 1:3, labels = colnames(pp), cex.axis = 1.7) axis(2, cex.axis = 1.1) mtext("Posterior Model Probability", 2, cex = 1.5, line = 3.2) mtext("Number of Factors", 1, cex = 1.4, line = 3.2) par(op) } } \keyword{dataset} bridgesampling/man/bridge_sampler.Rd0000644000176200001440000004736614026404747017312 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bridge_sampler.R \name{bridge_sampler} \alias{bridge_sampler} \alias{bridge_sampler.stanfit} \alias{bridge_sampler.mcmc.list} \alias{bridge_sampler.mcmc} \alias{bridge_sampler.matrix} \alias{bridge_sampler.stanreg} \alias{bridge_sampler.rjags} \alias{bridge_sampler.runjags} \alias{bridge_sampler.MCMC_refClass} \title{Log Marginal Likelihood via Bridge Sampling} \usage{ bridge_sampler(samples, ...) \method{bridge_sampler}{stanfit}( samples = NULL, stanfit_model = samples, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, maxiter = 1000, silent = FALSE, verbose = FALSE, ... ) \method{bridge_sampler}{mcmc.list}( samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, param_types = rep("real", ncol(samples[[1]])), method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, silent = FALSE, verbose = FALSE ) \method{bridge_sampler}{mcmc}( samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, param_types = rep("real", ncol(samples)), silent = FALSE, verbose = FALSE ) \method{bridge_sampler}{matrix}( samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, param_types = rep("real", ncol(samples)), silent = FALSE, verbose = FALSE ) \method{bridge_sampler}{stanreg}( samples, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, maxiter = 1000, silent = FALSE, verbose = FALSE, ... ) \method{bridge_sampler}{rjags}( samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, silent = FALSE, verbose = FALSE ) \method{bridge_sampler}{runjags}( samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, silent = FALSE, verbose = FALSE ) \method{bridge_sampler}{MCMC_refClass}( samples, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, maxiter = 1000, silent = FALSE, verbose = FALSE, ... ) } \arguments{ \item{samples}{an \code{mcmc.list} object, a fitted \code{stanfit} object, a \code{stanreg} object, an \code{rjags} object, a \code{runjags} object, or a \code{matrix} with posterior samples (\code{colnames} need to correspond to parameter names in \code{lb} and \code{ub}) with posterior samples.} \item{...}{additional arguments passed to \code{log_posterior}. Ignored for the \code{stanfit} and \code{stanreg} methods.} \item{stanfit_model}{for the \code{stanfit} method, an additional object of class \code{"stanfit"} with the same model as \code{samples}, which will be used for evaluating the \code{log_posterior} (i.e., it does not need to contain any samples). The default is to use \code{samples}. In case \code{samples} was compiled in a different R session or on another computer with a different OS or setup, the \code{samples} model usually cannot be used for evaluation. In this case, one can compile the model on the current computer with \code{iter = 0} and pass it here (this usually needs to be done before \code{samples} is loaded).} \item{repetitions}{number of repetitions.} \item{method}{either \code{"normal"} or \code{"warp3"}.} \item{cores}{number of cores used for evaluating \code{log_posterior}. On unix-like systems (where \code{.Platform$OS.type == "unix"} evaluates to \code{TRUE}; e.g., Linux and Mac OS) forking via \code{\link{mclapply}} is used. Hence elements needed for evaluation should be in the \code{\link{.GlobalEnv}}. For other systems (e.g., Windows) \code{\link{makeCluster}} is used and further arguments specified below will be used.} \item{use_neff}{Boolean which determines whether the effective sample size is used in the optimal bridge function. Default is TRUE. If FALSE, the number of samples is used instead. If \code{samples} is a \code{matrix}, it is assumed that the \code{matrix} contains the samples of one chain in order. If \code{samples} come from more than one chain, we recommend to use an \code{mcmc.list} object for optimal performance.} \item{maxiter}{maximum number of iterations for the iterative updating scheme. Default is 1,000 to avoid infinite loops.} \item{silent}{Boolean which determines whether to print the number of iterations of the updating scheme to the console. Default is FALSE.} \item{verbose}{Boolean. Should internal debug information be printed to console? Default is \code{FALSE}.} \item{log_posterior}{function or name of function that takes a parameter vector and the \code{data} as input and returns the log of the unnormalized posterior density (i.e., a scalar value). If the function name is passed, the function should exist in the \code{.GlobalEnv}. For special behavior if \code{cores > 1} see \code{Details}.} \item{data}{data object which is used in \code{log_posterior}.} \item{lb}{named vector with lower bounds for parameters.} \item{ub}{named vector with upper bounds for parameters.} \item{param_types}{character vector of length \code{ncol(samples)} with \code{"real"}, \code{"simplex"} or \code{"circular"}. For all regular bounded or unbounded continuous parameters, this should just be \code{"real"}. However, if there are parameters which lie on a simplex or on the circle, this should be noted here. Simplex parameters are parameters which are bounded below by zero and collectively sum to one, such as weights in a mixture model. For these, the stick-breaking transformation is performed as described in the Stan reference manual. The circular variables are given a numerical representation to which the normal distribution is most likely a good fit. Only possible to use with \code{bridge_sampler.matrix}.} \item{packages}{character vector with names of packages needed for evaluating \code{log_posterior} in parallel (only relevant if \code{cores > 1} and \code{.Platform$OS.type != "unix"}).} \item{varlist}{character vector with names of variables needed for evaluating \code{log_posterior} (only needed if \code{cores > 1} and \code{.Platform$OS.type != "unix"} as these objects will be exported to the nodes). These objects need to exist in \code{envir}.} \item{envir}{specifies the environment for \code{varlist} (only needed if \code{cores > 1} and \code{.Platform$OS.type != "unix"} as these objects will be exported to the nodes). Default is \code{\link{.GlobalEnv}}.} \item{rcppFile}{in case \code{cores > 1} and \code{log_posterior} is an \code{Rcpp} function, \code{rcppFile} specifies the path to the cpp file (will be compiled on all cores).} } \value{ if \code{repetitions = 1}, returns a list of class \code{"bridge"} with components: \itemize{ \item \code{logml}: estimate of log marginal likelihood. \item \code{niter}: number of iterations of the iterative updating scheme. \item \code{method}: bridge sampling method that was used to obtain the estimate. \item \code{q11}: log posterior evaluations for posterior samples. \item \code{q12}: log proposal evaluations for posterior samples. \item \code{q21}: log posterior evaluations for samples from proposal. \item \code{q22}: log proposal evaluations for samples from proposal. } if \code{repetitions > 1}, returns a list of class \code{"bridge_list"} with components: \itemize{ \item \code{logml}: numeric vector with estimates of log marginal likelihood. \item \code{niter}: numeric vector with number of iterations of the iterative updating scheme for each repetition. \item \code{method}: bridge sampling method that was used to obtain the estimates. \item \code{repetitions}: number of repetitions. } } \description{ Computes log marginal likelihood via bridge sampling. } \details{ Bridge sampling is implemented as described in Meng and Wong (1996, see equation 4.1) using the "optimal" bridge function. When \code{method = "normal"}, the proposal distribution is a multivariate normal distribution with mean vector equal to the sample mean vector of \code{samples} and covariance matrix equal to the sample covariance matrix of \code{samples}. For a recent tutorial on bridge sampling, see Gronau et al. (in press). When \code{method = "warp3"}, the proposal distribution is a standard multivariate normal distribution and the posterior distribution is "warped" (Meng & Schilling, 2002) so that it has the same mean vector, covariance matrix, and skew as the samples. \code{method = "warp3"} takes approximately twice as long as \code{method = "normal"}. Note that for the \code{matrix} method, the lower and upper bound of a parameter cannot be a function of the bounds of another parameter. Furthermore, constraints that depend on multiple parameters of the model are not supported. This usually excludes, for example, parameters that constitute a covariance matrix or sets of parameters that need to sum to one. However, if the retransformations are part of the model itself and the \code{log_posterior} accepts parameters on the real line and performs the appropriate Jacobian adjustments, such as done for \code{stanfit} and \code{stanreg} objects, such constraints are obviously possible (i.e., we currently do not know of any parameter supported within Stan that does not work with the current implementation through a \code{stanfit} object). \subsection{Parallel Computation}{ On unix-like systems forking is used via \code{\link{mclapply}}. Hence elements needed for evaluation of \code{log_posterior} should be in the \code{\link{.GlobalEnv}}. On other OSes (e.g., Windows), things can get more complicated. For normal parallel computation, the \code{log_posterior} function can be passed as both function and function name. If the latter, it needs to exist in the environment specified in the \code{envir} argument. For parallel computation when using an \code{Rcpp} function, \code{log_posterior} can only be passed as the function name (i.e., character). This function needs to result from calling \code{sourceCpp} on the file specified in \code{rcppFile}. Due to the way \code{rstan} currently works, parallel computations with \code{stanfit} and \code{stanreg} objects only work with forking (i.e., NOT on Windows). } } \note{ To be able to use a \code{stanreg} object for \code{samples}, the user crucially needs to have specified the \code{diagnostic_file} when fitting the model in \pkg{rstanarm}. } \section{Warning}{ Note that the results depend strongly on the parameter priors. Therefore, it is strongly advised to think carefully about the priors before calculating marginal likelihoods. For example, the prior choices implemented in \pkg{rstanarm} or \pkg{brms} might not be optimal from a testing point of view. We recommend to use priors that have been chosen from a testing and not a purely estimation perspective. Also note that for testing, the number of posterior samples usually needs to be substantially larger than for estimation. } \examples{ ## ------------------------------------------------------------------------ ## Example 1: Estimating the Normalizing Constant of a Two-Dimensional ## Standard Normal Distribution ## ------------------------------------------------------------------------ library(bridgesampling) library(mvtnorm) samples <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) colnames(samples) <- c("x1", "x2") log_density <- function(samples.row, data) { -.5*t(samples.row) \%*\% samples.row } lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(samples) bridge_result <- bridge_sampler(samples = samples, log_posterior = log_density, data = NULL, lb = lb, ub = ub, silent = TRUE) # compare to analytical value analytical <- log(2*pi) print(cbind(bridge_result$logml, analytical)) \dontrun{ ## ------------------------------------------------------------------------ ## Example 2: Hierarchical Normal Model ## ------------------------------------------------------------------------ # for a full description of the example, see vignette("bridgesampling_example_jags") library(R2jags) ### generate data ### set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ### set prior parameters alpha <- 1 beta <- 1 mu0 <- 0 tau20 <- 1 ### functions to get posterior samples ### ### H0: mu = 0 getSamplesModelH0 <- function(data, niter = 52000, nburnin = 2000, nchains = 3) { model <- " model { for (i in 1:n) { theta[i] ~ dnorm(0, invTau2) y[i] ~ dnorm(theta[i], 1/sigma2) } invTau2 ~ dgamma(alpha, beta) tau2 <- 1/invTau2 }" s <- jags(data, parameters.to.save = c("theta", "invTau2"), model.file = textConnection(model), n.chains = nchains, n.iter = niter, n.burnin = nburnin, n.thin = 1) return(s) } ### H1: mu != 0 getSamplesModelH1 <- function(data, niter = 52000, nburnin = 2000, nchains = 3) { model <- " model { for (i in 1:n) { theta[i] ~ dnorm(mu, invTau2) y[i] ~ dnorm(theta[i], 1/sigma2) } mu ~ dnorm(mu0, 1/tau20) invTau2 ~ dgamma(alpha, beta) tau2 <- 1/invTau2 }" s <- jags(data, parameters.to.save = c("theta", "mu", "invTau2"), model.file = textConnection(model), n.chains = nchains, n.iter = niter, n.burnin = nburnin, n.thin = 1) return(s) } ### get posterior samples ### # create data lists for Jags data_H0 <- list(y = y, n = length(y), alpha = alpha, beta = beta, sigma2 = sigma2) data_H1 <- list(y = y, n = length(y), mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta, sigma2 = sigma2) # fit models samples_H0 <- getSamplesModelH0(data_H0) samples_H1 <- getSamplesModelH1(data_H1) ### functions for evaluating the unnormalized posteriors on log scale ### log_posterior_H0 <- function(samples.row, data) { mu <- 0 invTau2 <- samples.row[[ "invTau2" ]] theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + dgamma(invTau2, data$alpha, data$beta, log = TRUE) } log_posterior_H1 <- function(samples.row, data) { mu <- samples.row[[ "mu" ]] invTau2 <- samples.row[[ "invTau2" ]] theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + dnorm(mu, data$mu0, sqrt(data$tau20), log = TRUE) + dgamma(invTau2, data$alpha, data$beta, log = TRUE) } # specify parameter bounds H0 cn <- colnames(samples_H0$BUGSoutput$sims.matrix) cn <- cn[cn != "deviance"] lb_H0 <- rep(-Inf, length(cn)) ub_H0 <- rep(Inf, length(cn)) names(lb_H0) <- names(ub_H0) <- cn lb_H0[[ "invTau2" ]] <- 0 # specify parameter bounds H1 cn <- colnames(samples_H1$BUGSoutput$sims.matrix) cn <- cn[cn != "deviance"] lb_H1 <- rep(-Inf, length(cn)) ub_H1 <- rep(Inf, length(cn)) names(lb_H1) <- names(ub_H1) <- cn lb_H1[[ "invTau2" ]] <- 0 # compute log marginal likelihood via bridge sampling for H0 H0.bridge <- bridge_sampler(samples = samples_H0, data = data_H0, log_posterior = log_posterior_H0, lb = lb_H0, ub = ub_H0, silent = TRUE) print(H0.bridge) # compute log marginal likelihood via bridge sampling for H1 H1.bridge <- bridge_sampler(samples = samples_H1, data = data_H1, log_posterior = log_posterior_H1, lb = lb_H1, ub = ub_H1, silent = TRUE) print(H1.bridge) # compute percentage error print(error_measures(H0.bridge)$percentage) print(error_measures(H1.bridge)$percentage) # compute Bayes factor BF01 <- bf(H0.bridge, H1.bridge) print(BF01) # compute posterior model probabilities (assuming equal prior model probabilities) post1 <- post_prob(H0.bridge, H1.bridge) print(post1) # compute posterior model probabilities (using user-specified prior model probabilities) post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) print(post2) } \dontrun{ ## ------------------------------------------------------------------------ ## Example 3: rstanarm ## ------------------------------------------------------------------------ library(rstanarm) # N.B.: remember to specify the diagnostic_file fit_1 <- stan_glm(mpg ~ wt + qsec + am, data = mtcars, chains = 2, cores = 2, iter = 5000, diagnostic_file = file.path(tempdir(), "df.csv")) bridge_1 <- bridge_sampler(fit_1) fit_2 <- update(fit_1, formula = . ~ . + cyl) bridge_2 <- bridge_sampler(fit_2, method = "warp3") bf(bridge_1, bridge_2) } } \references{ Gronau, Q. F., Singmann, H., & Wagenmakers, E.-J. (2020). bridgesampling: An R Package for Estimating Normalizing Constants. \emph{Journal of Statistical Software, 92}. \doi{10.18637/jss.v092.i10} Gronau, Q. F., Sarafoglou, A., Matzke, D., Ly, A., Boehm, U., Marsman, M., Leslie, D. S., Forster, J. J., Wagenmakers, E.-J., & Steingroever, H. (in press). A tutorial on bridge sampling. \emph{Journal of Mathematical Psychology}. \url{https://arxiv.org/abs/1703.05984} \cr \code{vignette("bridgesampling_tutorial")} Gronau, Q. F., Wagenmakers, E.-J., Heck, D. W., & Matzke, D. (2017). \emph{A simple method for comparing complex models: Bayesian model comparison for hierarchical multinomial processing tree models using Warp-III bridge sampling}. Manuscript submitted for publication. \url{https://psyarxiv.com/yxhfm} Meng, X.-L., & Wong, W. H. (1996). Simulating ratios of normalizing constants via a simple identity: A theoretical exploration. \emph{Statistica Sinica, 6}, 831-860. \url{http://www3.stat.sinica.edu.tw/statistica/j6n4/j6n43/j6n43.htm} Meng, X.-L., & Schilling, S. (2002). Warp bridge sampling. \emph{Journal of Computational and Graphical Statistics, 11(3)}, 552-586. \doi{10.1198/106186002457} Overstall, A. M., & Forster, J. J. (2010). Default Bayesian model determination methods for generalised linear mixed models. \emph{Computational Statistics & Data Analysis, 54}, 3269-3288. \doi{10.1016/j.csda.2010.03.008} } \seealso{ \code{\link{bf}} allows the user to calculate Bayes factors and \code{\link{post_prob}} allows the user to calculate posterior model probabilities from bridge sampling estimates. \code{\link{bridge-methods}} lists some additional methods that automatically invoke the \code{\link{error_measures}} function. } \author{ Quentin F. Gronau and Henrik Singmann. Parallel computing (i.e., \code{cores > 1}) and the \code{stanfit} method use code from \code{rstan} by Jiaqing Guo, Jonah Gabry, and Ben Goodrich. Ben Goodrich added the \code{stanreg} method. Kees Mulder added methods for simplex and circular variables. } bridgesampling/man/bridge-methods.Rd0000644000176200001440000000246313663004467017216 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bridge_methods.R \name{bridge-methods} \alias{bridge-methods} \alias{summary.bridge} \alias{summary.bridge_list} \alias{print.summary.bridge} \alias{print.summary.bridge_list} \alias{print.bridge} \alias{print.bridge_list} \title{Methods for bridge and bridge_list objects} \usage{ \method{summary}{bridge}(object, na.rm = TRUE, ...) \method{summary}{bridge_list}(object, na.rm = TRUE, ...) \method{print}{summary.bridge}(x, ...) \method{print}{summary.bridge_list}(x, ...) \method{print}{bridge}(x, ...) \method{print}{bridge_list}(x, na.rm = TRUE, ...) } \arguments{ \item{object, x}{object of class \code{bridge} or \code{bridge_list} as returned from \code{\link{bridge_sampler}}.} \item{na.rm}{logical. Should NA estimates in \code{bridge_list} objects be removed? Passed to \code{\link{error_measures}}.} \item{...}{further arguments, currently ignored.} } \value{ The \code{summary} methods return a \code{data.frame} which contains the log marginal likelihood plus the result returned from invoking \code{\link{error_measures}}. The \code{print} methods simply print and return nothing. } \description{ Methods defined for objects returned from the generic \code{\link{bridge_sampler}} function. } bridgesampling/man/logml.Rd0000644000176200001440000000167613663004467015440 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/logml.R \name{logml} \alias{logml} \alias{logml.bridge} \alias{logml.bridge_list} \title{Log Marginal Likelihoods from Bridge Objects} \usage{ logml(x, ...) \method{logml}{bridge}(x, ...) \method{logml}{bridge_list}(x, fun = median, ...) } \arguments{ \item{x}{Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}.} \item{...}{Further arguments passed to \code{fun}.} \item{fun}{Function which returns a scalar value and is applied to the \code{logml} vector of \code{"bridge_list"} objects. Default is \code{\link{median}}.} } \value{ scalar numeric } \description{ Generic function that returns log marginal likelihood from bridge objects. For objects of class \code{"bridge_list"}, which contains multiple log marginal likelihoods, \code{fun} is performed on the vector and its result returned. } bridgesampling/man/turtles.Rd0000644000176200001440000001200714026404747016015 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/turtles-data.R \docType{data} \encoding{UTF-8} \name{turtles} \alias{turtles} \title{Turtles Data from Janzen, Tucker, and Paukstis (2000)} \format{ A data.frame with 244 rows and 3 variables. } \source{ Janzen, F. J., Tucker, J. K., & Paukstis, G. L. (2000). Experimental analysis of an early life-history stage: Selection on size of hatchling turtles. \emph{Ecology, 81(8)}, 2290-2304. \doi{10.2307/177115} Overstall, A. M., & Forster, J. J. (2010). Default Bayesian model determination methods for generalised linear mixed models. \emph{Computational Statistics & Data Analysis, 54}, 3269-3288. \doi{10.1016/j.csda.2010.03.008} Sinharay, S., & Stern, H. S. (2005). An empirical comparison of methods for computing Bayes factors in generalized linear mixed models. \emph{Journal of Computational and Graphical Statistics, 14(2)}, 415-435. \doi{10.1198/106186005X47471} } \usage{ turtles } \description{ This data set contains information about 244 newborn turtles from 31 different clutches. For each turtle, the data set includes information about survival status (column \code{y}; 0 = died, 1 = survived), birth weight in grams (column \code{x}), and clutch (family) membership (column \code{clutch}; an integer between one and 31). The clutches have been ordered according to mean birth weight. } \examples{ \dontrun{ ################################################################################ # BAYESIAN GENERALIZED LINEAR MIXED MODEL (PROBIT REGRESSION) ################################################################################ library(bridgesampling) library(rstan) data("turtles") #------------------------------------------------------------------------------- # plot data #------------------------------------------------------------------------------- # reproduce Figure 1 from Sinharay & Stern (2005) xticks <- pretty(turtles$clutch) yticks <- pretty(turtles$x) plot(1, type = "n", axes = FALSE, ylab = "", xlab = "", xlim = range(xticks), ylim = range(yticks)) points(turtles$clutch, turtles$x, pch = ifelse(turtles$y, 21, 4), cex = 1.3, col = ifelse(turtles$y, "black", "darkred"), bg = "grey", lwd = 1.3) axis(1, cex.axis = 1.4) mtext("Clutch Identifier", side = 1, line = 2.9, cex = 1.8) axis(2, las = 1, cex.axis = 1.4) mtext("Birth Weight (Grams)", side = 2, line = 2.6, cex = 1.8) #------------------------------------------------------------------------------- # Analysis: Natural Selection Study (compute same BF as Sinharay & Stern, 2005) #------------------------------------------------------------------------------- ### H0 (model without random intercepts) ### H0_code <- "data { int N; int y[N]; real x[N]; } parameters { real alpha0_raw; real alpha1_raw; } transformed parameters { real alpha0 = sqrt(10.0) * alpha0_raw; real alpha1 = sqrt(10.0) * alpha1_raw; } model { // priors target += normal_lpdf(alpha0_raw | 0, 1); target += normal_lpdf(alpha1_raw | 0, 1); // likelihood for (i in 1:N) { target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1 * x[i])); } }" ### H1 (model with random intercepts) ### H1_code <- "data { int N; int y[N]; real x[N]; int C; int clutch[N]; } parameters { real alpha0_raw; real alpha1_raw; vector[C] b_raw; real sigma2; } transformed parameters { vector[C] b; real sigma = sqrt(sigma2); real alpha0 = sqrt(10.0) * alpha0_raw; real alpha1 = sqrt(10.0) * alpha1_raw; b = sigma * b_raw; } model { // priors target += - 2 * log(1 + sigma2); // p(sigma2) = 1 / (1 + sigma2) ^ 2 target += normal_lpdf(alpha0_raw | 0, 1); target += normal_lpdf(alpha1_raw | 0, 1); // random effects target += normal_lpdf(b_raw | 0, 1); // likelihood for (i in 1:N) { target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1 * x[i] + b[clutch[i]])); } }" set.seed(1) ### fit models ### stanfit_H0 <- stan(model_code = H0_code, data = list(y = turtles$y, x = turtles$x, N = nrow(turtles)), iter = 15500, warmup = 500, chains = 4, seed = 1) stanfit_H1 <- stan(model_code = H1_code, data = list(y = turtles$y, x = turtles$x, N = nrow(turtles), C = max(turtles$clutch), clutch = turtles$clutch), iter = 15500, warmup = 500, chains = 4, seed = 1) set.seed(1) ### compute (log) marginal likelihoods ### bridge_H0 <- bridge_sampler(stanfit_H0) bridge_H1 <- bridge_sampler(stanfit_H1) ### compute approximate percentage errors ### error_measures(bridge_H0)$percentage error_measures(bridge_H1)$percentage ### summary ### summary(bridge_H0) summary(bridge_H1) ### compute Bayes factor ("true" value: BF01 = 1.273) ### bf(bridge_H0, bridge_H1) } } \keyword{dataset} bridgesampling/DESCRIPTION0000644000176200001440000000423714036247672014770 0ustar liggesusersPackage: bridgesampling Type: Package Title: Bridge Sampling for Marginal Likelihoods and Bayes Factors Version: 1.1-2 Authors@R: c(person(given="Quentin F.", family="Gronau", role=c("aut", "cre"), email="Quentin.F.Gronau@gmail.com", comment=c(ORCID="0000-0001-5510-6943")), person(given="Henrik", family="Singmann", role="aut", comment=c(ORCID="0000-0002-4842-3657")), person(given="Jonathan J.", family="Forster", role="ctb"), person(given="Eric-Jan", family="Wagenmakers", role="ths"), person(family="The JASP Team", role="ctb"), person("Jiqiang", "Guo", role = "ctb"), person("Jonah", "Gabry", role = "ctb"), person("Ben", "Goodrich", role = c("ctb")), person("Kees", "Mulder", role = c("ctb")), person("Perry", "de Valpine", role = c("ctb")) ) Depends: R (>= 3.0.0) Imports: mvtnorm, Matrix, Brobdingnag, stringr, coda, parallel, scales, utils, methods Suggests: testthat, Rcpp, RcppEigen, R2jags, rjags, runjags, knitr, rmarkdown, R.rsp, BayesFactor, rstan, rstanarm, nimble, MCMCpack Description: Provides functions for estimating marginal likelihoods, Bayes factors, posterior model probabilities, and normalizing constants in general, via different versions of bridge sampling (Meng & Wong, 1996, ). Gronau, Singmann, & Wagenmakers (2020) . License: GPL (>= 2) LazyData: true RoxygenNote: 7.1.1 VignetteBuilder: knitr, R.rsp URL: https://github.com/quentingronau/bridgesampling NeedsCompilation: no Packaged: 2021-04-15 18:55:13 UTC; singm Author: Quentin F. Gronau [aut, cre] (), Henrik Singmann [aut] (), Jonathan J. Forster [ctb], Eric-Jan Wagenmakers [ths], The JASP Team [ctb], Jiqiang Guo [ctb], Jonah Gabry [ctb], Ben Goodrich [ctb], Kees Mulder [ctb], Perry de Valpine [ctb] Maintainer: Quentin F. Gronau Repository: CRAN Date/Publication: 2021-04-16 08:50:02 UTC bridgesampling/build/0000755000176200001440000000000014036106017014336 5ustar liggesusersbridgesampling/build/vignette.rds0000644000176200001440000000072114036106017016675 0ustar liggesusersMO@+*$&&^L z0z? hԠ.nUēۋ8.jQәٷ3+Zrd8namp?dFЗvw5qw<I%@T{>]Irt8 ~^ٚfBZZnpr:@".|Z^3B&YffMN 5-51MXK T[:M-Z5y2حx|jzUol^SEZsõ* ? EG ml[x4͢ q..guϚ(;v|f-3|g:bj27f);/ܺqc=73S}UfB+'\^:v.9yˮ盪{ooM7ow?{u/ _߾u9?5_ӡlFѬ…с{z^80^*]4|cຓ3 wbi`/|eفUȏçL3?0ZY (Lf^wu Кp(]_7^^Ljvٚזڊ/:Xgmb[R4b a=;rѴ}÷Y6䅟@԰SC).4Jp Hx*D $>o<߀5g.]2|r* ]\ yuAɯ!ic%[, S]5e,KHwp drPR%6x4cݴ1ozw\AŬ2T  ysCo!MWL&o/UP .ȻZ#$ ywDϞѤAFބZNhp#䍭 %)x4i0]gK.p+䭭W %-x4fMv,s풵\AaLIZ軧bGZSG.g3Sd7 wֲn=0 ֜=Ak)Aȃ[~0:6VKe!g[5J~(x$I鬜pD>k(˞陾4{ 6ZҎP7SKBwV7C2|mRۀ{ K}T;?^ yh>iȧ`ٱr-{HlJU'k K(Y{r96B#9Lj9 L9V:KʜcMry[Y1Xx+%elpplz J'¥b[a[bӒCS:#s,1tӞ† h+USeϘm䵮EilS'GI !jNTZ8+ȋZN !糶φ[ӯ993O3 vk;!. ;^ѪEt iZ+rNQ͢v!+׬qerx FzYH)gx(',F33ts߀ڛbA i~'5v?vtv= Σ#3]7❠ApY˟l{(/r bBː,dV*FOiDiDؤ!A^4LUW,-aW79.!{?PGK N#s\sT` Jm] %`3kz]w߄^[?Pr l@&piBl++l]v^oϖP&0d};Dd gZgu8\w 1ߩDgk²-qNCVD]٨:'1&@wOi6ٷ4tVOE! "f) N\LjN~-6{!*i?s"?OW^|ARHAo׀w!KFvx=e[g*EkbئP*a8%l0aCEDؓjYnH ]emQN6 OӬxyRAq; 4!ʚ+"v3BFm hA ڈkCCcE'7ϱIQ6vR|g +"& Ŋ(.RM%+""Ɗ(R1t#+ ):BwOidP4VbE C?V8| %4d<^ +40W-!5ʻ ԣFW!*QnVhnch( x@Nv8 ;cZ5亀RF_7ʂ6\1c{Af|F|GL#Y-4Q3y3ηy͉?Ql~g#'"R'b5$/F[<+k+awG3@5Y^TjM; He%1&NkBycpSⲕ,(UW^ª o'R;LI_k]dx7 P&l㡼LjaMA\ȰP]^Q`Ɗ'}=m? A_|dv GP(er= ΆC`lCTJ3z=Y4aߟ6<3/ZՈU7p7d Ī%ynڸ 28yL"KKQ|V عOAT;D[~oR֙`8ɛCbzQ3H<]1f HhlqWGvQ1l(1nHlz.Q $G(ӣ#gO1tӣPǩ)F.p!MjwP ݏ,hoO JgUtNV5BY2wTw^ 4Kx 5mMbwE#zv-އ|_YLJhYRa3fC3~̤ߜP3P3 _7Cj>JE; CYɺC1{4jkHF[~9/o _цJG D{7}*4:k4wTS1Tk#31hg 7AޤToOӐrєTҴf S 䌲CSso2yZКM?Ge gx -iI%|*&$~8 yR);1ģ{:RNLh>jWYWؑ!o{Zcw}1:}?v)bev-g(m~cJcG[moPgSmQ1Tu9 b':&3l"{ǝLZ]8HVO]s.n k jZfFrqA4XKHЇ+}m<9|R, IKK 7A=kMB|)xc?7;̞ʎ?! ~4rC \} r!Z4FUE#:=d&|y9TsWc_BYe32YO#Fo ݛ>| qفInbXY0Uz MQA>d^O"$8)7H:'#VUr \uMyÛkgdoO0ș/5f;kMbzr#/Ukl5cF^;QU0_w&ضMW=TMev*xtM;g6|:V ~txgf{omLk}yysjƚ-f%<[~!Ɗ+/t-x! 'X5icU #/PLMU~ުNyi/Q銩D;3cFsjHcj m7o;%js㙲G9X4츝l[XM3qcWRP)ho.fuSAJŶpfbohP6Od+{5z[W80EC#ަ]PQRf]A%o` A={?r #ժ1JoٳZ9Aw=%܊[h5v>_5U;~ĺ4Zo: ̂']SGga3ygjWPN-h m@tQroX@M$BF;oƫrFSWyaKCãʛsj+Vͱ _n`fѰ׸I1=_\F;*/ljh鋷=]+Hݏkׅ^ܾ,3U/.eV.ݻ{ةU_ŸwRy.姟gW\hV;W_~W'0xat˓LL|w63290o ZLǮ;7s ܆/Ko \wrFan Lҽ ??;0|d >eUL ` Ò^Y]ޞW,'dV a;61ub,3z k|>}~0_Ph!0Z>-˙-d7:0Sy@rA6" f09~7:rx$_bog&Hgi4cAF_A1OZ tLH4jrB8yP^x=(aHVϊp58 3X*Lldpc}O[]!Rtii5PbT0 YEd o)x#(%N_ 0 \:at$R;wx֤k8 6Z556w2%!¥lޢ[ V}65|4 kVyֲ-lk'lk;nɺ/BwPk{!/­9/CK 2koH{Se;^ގ ^#`s?F¥2R6pifZ&5_r CU N\b;^L1Rr [?4E!JBC=L,p/dh2LKK~B%~JZp`BG`I!hmR Z;hc(^¥6|PW&xZ5#h탍ud@Api#Tuc̼̓nbw;p^Ctޅ#\nе"lB N<\T #CK 2kE{SKpwTeD~G=`@t RqKz?x_+'p9Z¥h}<>[ʎևZ5rr48Z_2KJ"\ZG#h}d>UtSu>2JL'lIRt> +B':|j; j)0p>}DT|pi-j|~ .ׂ|~|VMj|=.!77h%c77jե䂛ooJy7#qBJdWMjn7$߂lG!}>.:J[S ~if˧ %t8YƼ*8(8<%69Dz!Q;ߞ BFL;fcCu+kg]U;m1Sz]乨rA /]Ð}XՑ,䬾9< lk3|hq6rxms=9#\Yj'sQ=9VfUE!UF9^2cRcqe= <1 oCJMDk:Ϸ*bp_`քj~#(LZ YձCm~s`"<' {!KBURRIrkT <Jv7#/j,7 GTc' O(p؅C<(3I<Oa jȫeqB]u[ K- JVel][ށSkNrCJYۀR]UBe_t.}QU>֨jxeUmHGsVn.XMѺMlƀ!e/tM( ߲gez Qmǔ~8,x%~ngeLY>;lǏB֢&E̎@0sl߰͞Bmv709c >U9ߗ ;[>X'a7nee=KV!TXޚ1]Z:<1$M;T)wU(qFw9|}L\Jt>K Glox Aeв앍Ba>hlxć1D+ 2 11.k1K$Qfʱ\%s?UsebJ9x> W `Yh :[!3JPr==Bo<^KS04 Yqs@Gӽkɺ<Ձi3'mŠA_u%6Ow@|wB[pX#oEcdpJa_BA$Ě+fOC~Z |_ "lNyml N_jAķZ2q |Kad'&D!V(cɫ[zۏp ֆrN+{">r[F# <@GJn+ du %J\ppKRj,tU\-O7.-<%2xH@[ݴRc4~%asZ|ǻ K:ZRpJ(Rf@$wmrQ1;E:W&5pՕv5up x 5e*;ʶXz`2o>Mc}x@ޏ,64]̻x֝ ]rX/!Bp3%Jn'RuPMObꌸ^,itv^*l* @^|=,#f?l&ͮ[v10r֝~jPތH醣b`Jy vg,AB _j /CG<ѺL 234Ȱ+3Y0^0S9\ȳifS4 d+ꑆ~"H B|˦: K-쟀|0.Ѵ.NTFa#eG^6e dvh<0/4$Yg5`8|ՠ|ٔH0P/:SvU7y%3gX&?_&jHx'̻lʎ rF%~,jȫ AسOiC@ 0tE GVvCaYԱ:{q acgvl-;4W?Zo6s*9Y4%/o*+9d|hXxviЈعp>Y2 ΞZD3a.AVsf*'/D-eI#y6PvB!K-(9<V<C֦M(Y϶F/#sϩ|xCE,ƁW 7hx^0 ,9n,5+`vRk  y1 *d(oV[j5o.} {!\yy{b%\{Vo)"b ]54{d2 \|1g|+*fd*@i._4b`>G E̒\aLQ'aiXd!Ŕ [P ?G DG )TB )!uHEV`&FxH~ ? r)N6{9h {?oAGMR+j WnJ\ @ D؜6&$^ [.R ɐgpqPiléŊ/y}CC;zE8px󇆔<6DN.@ַdpUkJ+0h^+a8,51\{E ܣM'{=9 g+p/佭Q>"s*1`F ѽ9͕ +#aR{G5s8yDY3pQ(0לmԈq%ȗyû`+!`e|_̀RdOp<\ c6'4˰+GtlɌjerQn 7sq Ykw&ݲ(+z,sy"Ba;MSnB-3\e+j39_8Kl*4L.c 4SϷrM㻆UWoZ5'Է :6:Ю|M/Zw|m~>~.ȩaBxFԷkp]4G\;*l`H"\4]+gPSr~Xyfe,oy~+_֞t6tWP1 68y6ctXytx CuK[­CBT3܂˹A1J~O 4;XBrB7i59{` ~ ;zDg GV < Y!xbBl \їоPriiȧ EMAzwhmžTA  o44>a#p+KrhH OBy"<Y.M u^hECC#BNxZC? !\p?%7<|͚K/@V= D)1lqrL/OiJ &[RU=SaL>oY? ٜ o&6!ݜwHC|,l5'`dkWʌhB?c41&N#s)CŵCj}#~H"qxA+#/f}!Q}tk+DZB "[!sިXT={!\D=ۀ OydᏒUN,LP@U^|U)닜S,|=t#; \ LAfq,Y|o h-Y VdOS<";bGy٠JRN(r .\[%ӷyGj p;0GɭCY;;ӕ F]~vk'pl]^m!b,y[PbGƦp MM쨨P7A$m,I⑤)=52Ct( ՗WKf g5k]c_\b0LlCN"6 Y}>5c9N>#j>7T:U"J \T(bGW%;%6Qr SHh[ZpmE,2dK7-+FԠUS:JjamqVVTVMɋ֫?a܂8 %xzX,6,TGFx2h"!7;Oӽtµ/P"zoTdwZf0h]_ ;S!,5"V(9ܯ6zՓΣ]LN#cqpuPՖKU'Jv pdpuw4$Z~RU'Jn dQ\'0XP_f,Z! 7鮑M5p di8Dɮ@i}mx/B_ܷX+$zYyT3a)6Һ Exae$OX$OA>3-6]<zE-^n,ΉYY곖VӐ7nna9iX^|=d h?rYy[SoN/@V_3 ({7 5lD_~FUa*`PVX"Zx#SahɟbY2S9Pֺ?qɍ'BYQӿFE'eF;CM{N9C[Cv[p# n[[ PۀBYo* P򻫨6J+'s-7yUPPW5yn/^PzQ x05MW/JPGSZ 1beXOx>ت!DLN c]xYC؍ӭM9E>:fD"|/BY◎7W۳u'QEŭ#tVa(ʉ NT&.c-̈rF O@ڹuS % %Gwמ`O]ZiD;T1zKa1~!kp 3xv\B_iќ`ޞEWa|5Ub#l08fc^DBoT7 œ)z^2pCƴ51K*ڰ(9^G/-\z-zEC9F9nɮ e u|x%Ke˥\-~>њBuEӰUB8[kxE |( ͓;m:ĝST;=s݇, mj[▅=3,۵ZVwRWͷ,Jn x-5!^*j@, ,s{*.9},7(ff ejc7G{rH:`# Q"WBY/|zw2--8F F8o|ޙ ޮp$R.]bVt\4v0b3, 7KO|?rcH#^cY4O;'›ȓzwu.f^`:jĢmePg\7tZ1㹢d'rSv ̫Ah bWkdr_KxGh~v!?FnaDž½jꝮ_DO k\7Luf/r 2- B}OP9u['Է3r9ۆeƄ="[υ%X#ST C׌3ȿR|~e:hrs|*3ha$.!H8Cf̙|Q$6Cχs&3Pn<)n9+jĦ C%% e Dw2Ǣ:-CE`(geL=nq|N]gQ1Qyc>Celʧi- Cw:^(u {d~4up%?:٠VpP[!V(,l(*p xx%u Ei5{CqHbףl4B0WtBLQϰ_)gte3Hd|0.h jAP0#Pa-^6us`T[` Jy mSxr],+HI<VPma7i-Weڃ# H WM7qfBY|xJI2*udhb<BԨ Z(k,ƦJReI۠CB8ѴN $^V&]7H 3Ӱ}r+VPrGBY*,K*lop%o; U`I=m$s!#[p&Onp'~IM9]CqZgHS܊ŇaPְp30_E+71e ֚/V)(mu765h\ssH:ϧ5koֆ6WY/ٹLb;3Z-WXϳ1Upṕ\},jcpY@ ODn[mkhd_)٫| G4CC'(Aʄi<Zi^6FkG_],JusՠZ]_$ Jk V} WwƻVT͢,q*\^Jb:[ \nwk-*+Ač@zZ_6h-vÃBWkIcJ3ۢ+tM?ܜa41,jRދ|N"_/z]YA&A@D֫i"'$%!prU}ȎA#N>}K,[û4|!P%N;e.D|h<ͲF0Bk|9slS*YX41h&\n_mnE"gQv_ v&:N[ a'Ȩ |4^EZ2hⵕQA1,pV9Ƙo>/8)߉8X^v2T%d0a֟Ohee]paE-Nc,+&} -hOih5gB jjR/׀axt84Vkql>a%¯ߖYZ8¬S(o@FP1#_Φ~#(6oojJ7p\ Gq=ԏmJx\+soKm[l0"/MµcLr0#!v*fVdȌ1>XeL(2Ex L}C"~ 3i}]qquq:5} ׽J"h7Q,CV]B}qn,~tB+]a,h|V ҸV,~?9)6N1վ."CsM9?C5׭o9"?o!j9cZa ,ɬ 'Coj_5綾<{eQl mk=W7xz(8,o҈~&HƟj'ž0Teha 1(8YPiU6|CWSOxO({Em>f(Bk ~# <;A_k-9o7MƾA㧵ިoegP_Y[7uaj- }B})_W&yq?5E^׾__ZG<8%%űؚNil=vEؽHٍνe{8eTVeZ-sMoD a͛-Ι/ .vLaьl]I} OBqENkxss. D m) 3"Ywַ4h_*8VKlZ_o%*:jnZ [ŬV77ZhUֵYV7APs*7#lc[* mm43c˾ɝA,:-OډhY CƔp`NG cR[xEG>#qm$v9VK"?EO.̙?١I 9s+%LFV\2! Ksj5uFV>KZ mh56ʶZVA㏵Ftsű t>~4 }*ma [8je,lUm143F r6V?i3UK3f&~ܯLs]h|DV#6nssu'Uzp_qui`V`t}畋442_-nM dPJ~LE=.a?+]('-LZ-Ole̳0kњ\mvW[*go25vs_73',3""A8l_-GNc]9mjTޱ}l7}ܡߵbN\<Ο"WkGئ;:VTVj69ӱ 9"w̶؝?ḁ̃ (JGp7`ngq {StJTj 6~acFv3s>ءY4ӦKgUG̴~ݧ DYdo`=ƒg;9c(llW,ͲwG>;^\F,9Dtpb;0*X9b(j]btzaaNxO(s{| 2%ßb)o}7?y{G= cDp{VhEusn]<7'a/x*K5!4D2L`saΈr̀sFs(lo{niXtG|D}0=ݙq ar1p4\Rޢ@wTmV֦hb86դNxA l&)k* U:t*Fbܸ:QN3R&XlѰ4|:^0fMh<0a}CCV{(O^ P4 (rl6rhf'nt-$_pl^ ƺ(YJFG}~)4̰;~-;^83~ivzH& 9*EdSm.$lw(*DP= L(ɣߞc>./f{cn>t*_~w7:::O;V_lo['^C+;n߻31yc䭄 n e$荵 4踭Tg(Xjޣ$ԒN˰O#F⣬a{GٜсŌ4|ZWaR\^o TbѨzlg5^ș.82FqSNozt$ϡ'*Q\R65 oqſ]$6l$PKM6JNpK&9e͘KXWƺ3gJd>!D:),,@1Gy1χ?;s4IdHdYdYL\X^ѰD /9.-Ik14;,FߴY1+g٬x^3;E ~CY TdШ[[-b'ɔL EZܜSpf1ba*HvG(k[.9=K aefD( d O :%lJD8p<۱8 ถE[Td'udr^P;ńIEc#ǡBӖFmou Hr^$BD$=N86F͈h#'-$ȓWvZr$๤L6Zf7p#1oVv舙MXO/>R80ƛ;vO>JoTvqXq" t QꨢYUЉkpq|^uL:vCe?HHʄڒMA9nmPd 2PpyY:NaPģ/Cl7υ2aQx@ F&rDnVX[2%rSY2%r;mj-juwC1qV]Q1:W͙A{zQXv C\]P&lSr'BY716υ\=:%ѣHc,Mf& Q/3~<0ʊfX"ڞb䉰IKwhBYvB亀CYnvTQr=LhkLIm#"{}L<4 0p'Sꇞaf *3ݪt0zKe2m٥ aWL۵TͰۆ|\)~γP.8ۆsqNKβlR3tne9wI9['[ߨ5ܘhd+M]qQc' -UN//56*g7?5\ͷ5: p:%ul(jRJ~.r+ɔ9s܎A2%{ɗͫyf'ZL'Uy2z]-΃Pso#L:"ΉM;윒\ n[esfu( É/} {?\ Mr>|s=4\˘. l::m 9MUR-"l (?nO3N:Q?B?릟/{\[f <蜜xD!;90p,&bW FfЉS=k joLDPN-{*X|+t=x~ִFYa33o\$[7nLl`oqSotzŸ\8޻{fW I ~O; *ZUy֝/ȾG-Ym5IКtm.iAMyEo;X Dv5Cv!(Z3ڼ+;\ޓ _ܢ;0 9\)①Pl/nY[x6 K- %5xfn,UD S]5e,b 2VreJ~[ hR͖%l5Z!loK @z Q{cGv6}T*p?-PSwBm9C<48Ajb7ur Z4oxz`9I&OB>QyuP#N^yT^דI o(ˁںLb4d2&2] /6{( ABjUŭn,7juN;{!K. џ0~2> g&N 8y F -Y^C^^E,2Zv#<z]oٶqlݝ7!i焇!_VDʞ1ۨBt+կ$ FdZJ'hkH" | SҕSBZtp`x O ˮ JV4zix m-i< YjT_ǖ·^e@~F^ƺ )ބ|59r61c]α_V߹ 4 JZd t4S#5*k>4|wRC~]HcG[-,&$;/e,;!V؈9.`8)HlS*Z[o|c}-^,&0:'x뭉?C<W9F{Lbu<Й9_ЬO-;og٣4\JFlz4[\,T71o)l g"_3 ,A.i-JGwAnAKɽ |z-7̻7UQ! ?Co#(ܯ* A)6+(⑬O!ߺ+z&)_aDA%' $\i p+dXU?YQ7IfvAraQs8y5 :<|mCiaQkx -mڈ c7KJqo@ing!*+n%Wmq͒s $|)7 g;R#ѱi0*{?3۠ڙSk&u'qDMzGhTSc]J Bg h]sH2n_).Fq}<^tPɇtQa^->IqDUꪊ .ۆde/vnEzukg{NԭֳCjUrONTߒkY1gfC k|1xoPpO%)ӖQW:R7?7]:6kgu܅*긦d9"~k)IoZZBU$yZVj Emmzgy|@vQ"ģHC"Nİdi q2Jnpr ԖmXTK A~[|Gɧa[1ԡu'#bPXO'b ?[\O>GPs}h}ڏ:)wYc} nM˾U0k؁ݴyϷދ*ԫ\jYzQ"ģVMEy ݊,Ga۔rُT!G;xKZMCoV7YTI64LuTrUxBQkTaHfU:OB6B=bFը^A% z'pY\RX<ݐ߭\f`FNU:vFW(4ut@M(4ᥙg c&țjI>`rZ-'OBHU즅z Q 1yޚDo ߒ6TB˧rnBⷁog(;1ģK|:RN_oaQbz]&}= |/dl:{_ n dCC12kc ~p2o+p|3Hz,~7Gz#?EE39vZ9]sn4;)A5V#XKH6Ͳ#Dz'NJ)?' E c>./ޘO{f_k}z~ޱzd{_`L67J&;|Ա3dL2LL0y+!_+vAN9_7Kl=eXoXڬQ6%ot` ѯ%ubXY0ube+Hb8 ]xI/FqSNozt$ϠhuhWq+`k+_;364%c5kr0؜/5>ȡf2׳~Z_d;mxs'Lo,wݙMǟM+NdK<^}4ک5\t%΂x @(Qzl֨ H^qW|=0JpF^X4')HLl7Bo%)M7)e=6{y >meeE<*|JfvYPlڦ˷O3ym6u(A1|PIqkgʼ!  ͩtѴze% ]ޝ/5N\zP=1ģI%)QӺYRTWOv֥aM8Z)ѤN&I3(J \Y_UY9eFd%԰d%1BDR%t!KMTWMhU]rUx/BVc/Vtо^/E1jjzmVot$ō]%а:mIٖ7Q|4[/ #zmCZ7U}Xɸ}ZVB q;azKV}Zx ߩEn=4)l͆N>6 #{tVBZ eL#06M;m wgEJ%<ykCdPssNjߡg@9oIdײ7ff!7 Pv>b=֗s #X|O߰l^eGG%9Jn*}>[f>Ͳ*4YU=UQpkmC YjhKID["[bٶD*Us?< lXߌQ hMכJDMxn|NVEuGyu5_w`mƊG[oΚ.6Gv;k8۟ats79㡉;|oEUz5&d;~w9yb(Q,U%5xT}\D]w¨R.kA,s yN܃pz5(%ku£|#a%\xYJǢcˣudnDݳ`Do}H7z/1ӫyc2ݠ$=+Ou9N i(sj1spVծP@B=jgQy8pR2M&Wz(V'T;:lZY_(gle, `Ub6|VnKH:BMs `9NVS`2Uy__Ѝ҉}d/^W_B}y 9V-m:r`Vz90sj ;5Gr C> : +,W;8r[_! $ yٸIG"!e'=+@Nz n Rvgr2O]N,s09ӪߧU{,Gu- ^Aڄ<X \R.XGd=4.̦ ck%saSYEcHNіߝm%]Z ^%B}A֝ "={I%V Mr2ۦ|P$|ʠXJw cl*&!dlyikbdCpiP_ryIG'|$L̙QBXcN+i@P?& Ft`K$:>!_<>OȀL al[1[2 u¶? PO\~3|_VxP߸) X>N9|ֺOb .b7_\<>4Ǟ7c:[5Y!הL4Ȗ.˥344#;-#O Ae *+gPŏj5z J8WCNoC.7gǑIȓoz z"U~IGS`Atw3]80˯O 2CixeVZ׃atҡQͪPT t"d^2&M`MD (fkľd5No߾dE+1asV$so+X_^(Qv`:9,0_陪ͥw@z-PZ S;vVAQڗp~G h*Sc>$}Aҩ߉\G(] $'7 t*<xMJH0#' kC n; // number of observations vector[n] y; // observations real alpha; real beta; real sigma2; } parameters { real tau2; // group-level variance vector[n] theta; // participant effects } model { target += inv_gamma_lpdf(tau2 | alpha, beta); target += normal_lpdf(theta | 0, sqrt(tau2)); target += normal_lpdf(y | theta, sqrt(sigma2)); } ' stancodeH1 <- 'data { int n; // number of observations vector[n] y; // observations real mu0; real tau20; real alpha; real beta; real sigma2; } parameters { real mu; real tau2; // group-level variance vector[n] theta; // participant effects } model { target += normal_lpdf(mu | mu0, sqrt(tau20)); target += inv_gamma_lpdf(tau2 | alpha, beta); target += normal_lpdf(theta | mu, sqrt(tau2)); target += normal_lpdf(y | theta, sqrt(sigma2)); } ' # compile models stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") # fit models stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n, alpha = alpha, beta = beta, sigma2 = sigma2), iter = 50000, warmup = 1000, chains = 3, cores = 1) stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta, sigma2 = sigma2), iter = 50000, warmup = 1000, chains = 3, cores = 1) # compute log marginal likelihood via bridge sampling for H0 H0.bridge <- bridge_sampler(stanfitH0, silent = TRUE) # compute log marginal likelihood via bridge sampling for H1 H1.bridge <- bridge_sampler(stanfitH1, silent = TRUE) # compute percentage errors H0.error <- error_measures(H0.bridge)$percentage H1.error <- error_measures(H1.bridge)$percentage # compute Bayes factor BF01 <- bf(H0.bridge, H1.bridge) # compute posterior model probabilities (assuming equal prior model probabilities) post1 <- post_prob(H0.bridge, H1.bridge) # compute posterior model probabilities (using user-specified prior model probabilities) post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) # "exact" ml H1 mH1 <- function(data, rel.tol = 1e-10) { y <- data$y n <- data$n mu0 <- data$mu0 tau20 <- data$tau20 alpha <- data$alpha beta <- data$beta sigma2 <- data$sigma2 mH1integrand <- function(tau2, y, sigma2, mu0, tau20, alpha, beta) { (sigma2 + tau2)^(-n/2) * exp(-1/2 * ((n*mean(y)^2 + (n - 1)*sd(y)^2)/(sigma2 + tau2) + mu0^2/tau20 - ((n*mean(y))/(sigma2 + tau2) + mu0/tau20)^2 / (n/(sigma2 + tau2) + 1/tau20))) * (n/(sigma2 + tau2) + 1/tau20)^(-1/2) * tau2^(-alpha - 1) * exp(-beta/tau2) } (2*pi)^(-n/2) * (tau20)^(-1/2) * beta^alpha/gamma(alpha) * integrate(mH1integrand, 0, Inf, rel.tol = rel.tol, y = y, sigma2 = sigma2, mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta)$value } exact_logmlH1 <- log(mH1(list(y = y, n = n, mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta, sigma2 = sigma2))) # "exact" ml H1 mH0 <- function(data, rel.tol = 1e-10) { y <- data$y n <- data$n alpha <- data$alpha beta <- data$beta sigma2 <- data$sigma2 mH0integrand <- function(tau2, y, sigma2, alpha, beta) { n <- length(y) (sigma2 + tau2)^(-n/2) * exp(-(n*mean(y)^2 + (n - 1)*sd(y)^2)/ (2*(sigma2 + tau2))) * tau2^(-alpha - 1) * exp(-beta/tau2) } (2*pi)^(-n/2) * beta^alpha/gamma(alpha) * integrate(mH0integrand, 0, Inf, rel.tol = rel.tol, y = y, sigma2 = sigma2, alpha = alpha, beta = beta)$value } exact_logmlH0 <- log(mH0(list(y = y, n = n, alpha = alpha, beta = beta, sigma2 = sigma2))) exact_BF01 <- exp(exact_logmlH0 - exact_logmlH1) H0.bridge.curr <- H0.bridge H1.bridge.curr <- H1.bridge BF01.curr <- BF01 post1.curr <- post1 post2.curr <- post2 load(system.file("extdata/", "vignette_example_stan.RData", package = "bridgesampling")) expect_equal( H0.bridge.curr$logml, expected = exact_logmlH0, tolerance = 0.01 ) expect_equal( H1.bridge.curr$logml, expected = exact_logmlH1, tolerance = 0.01 ) expect_equal( BF01.curr$bf, expected = exact_BF01, tolerance = 0.01 ) expect_equal( H0.bridge.curr$logml, expected = H0.bridge$logml, tolerance = 0.01 ) expect_equal( H1.bridge.curr$logml, expected = H1.bridge$logml, tolerance = 0.01 ) expect_equal( BF01.curr$bf, expected = BF01$bf, tolerance = 0.01 ) expect_equal( post1.curr, expected = post1, tolerance = 0.01 ) expect_equal( post2.curr, expected = post2, tolerance = 0.01 ) } }) bridgesampling/tests/testthat/test-bridge_sampler_Rcpp.R0000644000176200001440000000425613663004467023326 0ustar liggesusers context('basic bridge sampling behavior normal Rcpp') test_that("bridge sampler matches anlytical value normal example", { testthat::skip_on_cran() testthat::skip_on_travis() # library(bridgesampling) library(mvtnorm) if(require(RcppEigen)) { x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) colnames(x) <- c("x1", "x2") lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) Rcpp::sourceCpp("unnormalized_normal_density.cpp") bridge_normal <- bridge_sampler(samples = x, log_posterior = log_densityRcpp, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_densityRcpp, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE) expect_equal(bridge_normal$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3$logml, expected = log(2*pi), tolerance = 0.01) # test dots argument mu <- c(1, 2) x <- rmvnorm(1e4, mean = mu, sigma = diag(2)) colnames(x) <- c("x1", "x2") lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) Rcpp::sourceCpp("unnormalized_normal_density_mu.cpp") bridge_normal_dots <- bridge_sampler(samples = x, log_posterior = log_densityRcpp_mu, mu, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_warp3_dots <- bridge_sampler(samples = x, log_posterior = log_densityRcpp_mu, mu, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE) expect_equal(bridge_normal_dots$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3_dots$logml, expected = log(2*pi), tolerance = 0.01) } }) bridgesampling/tests/testthat/test-bridge_sampler.R0000644000176200001440000002766113663004467022347 0ustar liggesusers context('basic bridge sampling behavior normal') test_that("bridge sampler matches anlytical value normal example", { # library(bridgesampling) library(mvtnorm) x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) colnames(x) <- c("x1", "x2") log_density <- function(s, data) { -.5*t(s)%*%s } assign(x = "log_density", value = log_density, envir = .GlobalEnv) lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) # check repetitions > 1 bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE, repetitions = 2) bridge_normal_c <- bridge_sampler(samples = x, log_posterior = "log_density", data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3_c <- bridge_sampler(samples = x, log_posterior = "log_density", data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE, repetitions = 2) expect_equal(bridge_normal$logml, expected = rep(log(2*pi), length(bridge_normal$logml)), tolerance = 0.01) expect_equal(bridge_warp3$logml, expected = rep(log(2*pi), length(bridge_warp3$logml)), tolerance = 0.01) expect_equal(bridge_normal_c$logml, expected = rep(log(2*pi), length(bridge_normal_c$logml)), tolerance = 0.01) expect_equal(bridge_warp3_c$logml, expected = rep(log(2*pi), length(bridge_warp3_c$logml)), tolerance = 0.01) expect_equal(bf(bridge_normal, bridge_warp3)$bf, expected = rep(1, 2), tolerance = 0.1) # check repetitions = 1 bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE) bridge_normal_c <- bridge_sampler(samples = x, log_posterior = "log_density", data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_warp3_c <- bridge_sampler(samples = x, log_posterior = "log_density", data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE) expect_equal(bridge_normal$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_normal_c$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3_c$logml, expected = log(2*pi), tolerance = 0.01) # check using dots repetitions > 1 mu <- c(1, 2) x <- rmvnorm(1e4, mean = mu, sigma = diag(2)) colnames(x) <- c("x1", "x2") log_density <- function(s, data, ...) { -.5*t(s - mu) %*% (s - mu) } assign(x = "log_density", value = log_density, envir = .GlobalEnv) lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) bridge_normal_dots <- bridge_sampler(samples = x, log_posterior = log_density, mu, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3_dots <- bridge_sampler(samples = x, log_posterior = log_density, mu, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE, repetitions = 2) bridge_normal_c_dots <- bridge_sampler(samples = x, log_posterior = "log_density", mu, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3_c_dots <- bridge_sampler(samples = x, log_posterior = "log_density", mu, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE, repetitions = 2) expect_equal(bridge_normal_dots$logml, expected = rep(log(2*pi), length(bridge_normal_dots$logml)), tolerance = 0.01) expect_equal(bridge_warp3_dots$logml, expected = rep(log(2*pi), length(bridge_warp3_dots$logml)), tolerance = 0.01) expect_equal(bridge_normal_c_dots$logml, expected = rep(log(2*pi), length(bridge_normal_c_dots$logml)), tolerance = 0.01) expect_equal(bridge_warp3_c_dots$logml, expected = rep(log(2*pi), length(bridge_warp3_c_dots$logml)), tolerance = 0.01) # check using dots mu <- c(1, 2) x <- rmvnorm(1e4, mean = mu, sigma = diag(2)) colnames(x) <- c("x1", "x2") log_density <- function(s, data, ...) { -.5*t(s - mu) %*% (s - mu) } assign(x = "log_density", value = log_density, envir = .GlobalEnv) lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) bridge_normal_dots <- bridge_sampler(samples = x, log_posterior = log_density, mu, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_warp3_dots <- bridge_sampler(samples = x, log_posterior = log_density, mu, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_normal_c_dots <- bridge_sampler(samples = x, log_posterior = "log_density", mu, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_warp3_c_dots <- bridge_sampler(samples = x, log_posterior = "log_density", mu, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE) expect_equal(bridge_normal_dots$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3_dots$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_normal_c_dots$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3_c_dots$logml, expected = log(2*pi), tolerance = 0.01) # check error_measures err <- error_measures(bridge_normal) expect_equal(names(err), c("re2", "cv", "percentage")) expect_is(unlist(err), "character") expect_error(error_measures(bridge_warp3), "not implemented for warp3") ### these are meant to check the bf and post_prob functions and not as a meaningful comparisons bf <- bf(bridge_normal, bridge_warp3) expect_is(bf$bf, "numeric") # without prior_prob post1 <- post_prob(bridge_normal, bridge_warp3, bridge_normal_c, bridge_warp3_c) expect_equal(sum(post1), 1) # with prior_prob post2 <- post_prob(bridge_normal, bridge_warp3, bridge_normal_c, bridge_warp3_c, prior_prob = c(0.2, 0.1, 0.25, 0.45)) expect_equal(sum(post2), 1) # with incorrect prior_prob expect_error(post_prob(bridge_normal, bridge_warp3, bridge_normal_c, bridge_warp3_c, prior_prob = c(0.2, 0.1, 0.25, 0.55)), "do not sum to one") }) context('non-standard parameter spaces') test_that("bridge sampler functions for non-standard parameter spaces", { # Test with only simplex ru <- replicate(10, runif(10)) theta <- (ru / rowSums(ru))[, -10] colnames(theta) <- paste0("sim", 1:9) theta_t <- .transform2Real(theta, lb = rep(0, 9), ub = rep(1, 9), theta_types = rep("simplex", 9)) expect_equal(theta_t$transTypes[1], c(sim1 = "simplex")) theta_t_t <- .invTransform2Real(theta_t$theta_t, lb = rep(0, 9), ub = rep(1, 9), theta_types = rep("simplex", 9)) expect_equal(theta, theta_t_t) # tranformations work for different input shapes nsimp <- 4 n <- 100 sum_to_one <- function(x) x / sum(x) ru <- t(replicate(n, c(rnorm(2), # unbounded sum_to_one(runif(nsimp)), # simplex runif(3), # double-bounded abs(rnorm(1)), # lower-bounded rnorm(2) %% (2*pi)))) # circular theta_original <- ru[, -(nsimp + 2)] pt <- c(rep("real", 2), rep("simplex", nsimp - 1), rep("real", 4), rep("circular", 2)) lb <- c(rep(-Inf, 2), rep(0, nsimp - 1), rep(0, 4), rep(0, 2)) ub <- c(rep(Inf, 2), rep(1, nsimp - 1), rep(1, 3), rep(Inf, 1), rep(2*pi, 2)) nm <- c(paste0("unbounded", 1:2), paste0("simplex", 1:(nsimp - 1)), paste0("doublebounded", 1:3), paste0("lower", 1), paste0("circular", 1:2)) colnames(theta_original) <- names(lb) <- names(ub) <- names(pt) <- nm theta_t <- .transform2Real(theta_original, lb, ub, pt) theta_t_t <- .invTransform2Real(theta_t$theta_t, lb, ub, pt) # The modulus is to force the circular variables to be equal if they lie on # the same place on the circle. The modulus is also taken for the linear # variables, for simplicity of programming. expect_equal(theta_original %% (2*pi), theta_t_t %% (2*pi)) # Works with one row theta <- theta_original[1, , drop = FALSE] theta_t <- .transform2Real(theta, lb, ub, pt) theta_t_t <- .invTransform2Real(theta_t$theta_t, lb, ub, pt) # The modulus is to force the circular variables to be equal if they lie on # the same place on the circle. expect_equal(theta %% (2*pi), theta_t_t %% (2*pi)) # Test bridge sampler function with non-standard sample spaces bs_ns <- bridge_sampler.matrix( theta_original, data = rnorm(10), log_posterior = function(s, data) -.5*t(s) %*% s, lb = lb, ub = ub, silent = TRUE, verbose = FALSE) expect_true(class(bs_ns) == "bridge") ############ TEST JACOBIAN n <- 2 theta_full <- t(c(.4, .6)) theta <- theta_full[, -n, drop = FALSE] colnames(theta) <- paste0("sim", (1:(n - 1))) y <- bridgesampling:::.transform2Real(theta, lb = rep(0, n - 1), ub = rep(1, n - 1), theta_types = rep("simplex", n - 1))$theta_t tt <- rep("simplex", n - 1) colnames(y) <- paste0("trans_sim", (1:(n - 1))) names(tt) <- paste0("sim", (1:(n - 1))) jacob <- .logJacobian(y, tt, lb = rep(0, n), ub = rep(1, n)) expect_true(is.numeric(jacob)) skip_if_not_installed("MCMCpack") invsimplex <- function(y) { y <- as.matrix(y) n <- length(y) colnames(y) <- paste0("trans_sim", (1:n)) out1 <- .invTransform2Real(y, lb = rep(0, n), ub = rep(1, n), theta_types = rep("simplex", n)) c(out1, 1 - sum(out1)) } invsimplex(100) p_y <- function(y) { y <- as.matrix(y) n <- length(y) tt <- rep("simplex", n) colnames(y) <- paste0("trans_sim", (1:n)) names(tt) <- paste0("sim", (1:n)) MCMCpack::ddirichlet(invsimplex(y), theta_full*10) * exp(.logJacobian(y, tt, lb = rep(0, n), ub = rep(1, n))) } # The jaobian corrects for the transformation expect_equal(integrate(Vectorize(p_y), -100, 100)$value, 1) }) bridgesampling/tests/testthat/test_dat.txt0000644000176200001440000005616113740544625020636 0ustar liggesuserstest_dat <- list(M = 1500L, J = 5L, T = 3, E = 1, G = 3L, N = c(65, 106, 129), ii = c(276, 220, 179, 65, 82, 68, 284, 116, 37, 90, 122, 251, 145, 261, 146, 6, 27, 204, 254, 245, 41, 218, 8, 268, 192, 74, 104, 281, 275, 72, 95, 207, 45, 196, 131, 297, 290, 260, 248, 157, 61, 274, 26, 81, 98, 165, 38, 124, 206, 88, 228, 215, 269, 200, 278, 85, 174, 106, 135, 93, 136, 293, 292, 143, 92, 32, 216, 151, 194, 57, 140, 28, 12, 159, 175, 52, 3, 102, 101, 77, 253, 182, 212, 31, 16, 195, 4, 189, 256, 172, 158, 128, 298, 291, 142, 240, 229, 210, 282, 236, 223, 103, 141, 44, 71, 9, 54, 79, 138, 277, 10, 267, 91, 238, 266, 86, 18, 123, 111, 19, 1, 247, 163, 34, 14, 96, 299, 205, 271, 201, 241, 29, 184, 25, 137, 66, 49, 147, 198, 87, 21, 188, 213, 150, 134, 51, 176, 237, 127, 243, 255, 202, 160, 7, 148, 296, 180, 193, 120, 185, 20, 125, 272, 113, 149, 139, 129, 15, 259, 226, 119, 263, 181, 230, 94, 42, 170, 80, 64, 48, 89, 155, 109, 70, 265, 222, 264, 203, 270, 35, 249, 117, 242, 279, 126, 258, 183, 285, 233, 110, 288, 289, 99, 171, 191, 50, 63, 5, 60, 177, 208, 133, 114, 199, 286, 283, 67, 168, 78, 132, 153, 56, 169, 40, 187, 190, 262, 224, 162, 250, 178, 115, 173, 287, 130, 39, 2, 76, 217, 22, 273, 152, 161, 221, 167, 69, 100, 219, 246, 53, 47, 97, 55, 154, 23, 107, 43, 46, 209, 294, 166, 58, 24, 234, 244, 17, 108, 197, 300, 105, 227, 112, 118, 83, 156, 164, 36, 73, 252, 59, 211, 75, 144, 33, 231, 214, 13, 295, 186, 280, 11, 257, 225, 239, 62, 121, 30, 232, 235, 84, 276, 220, 179, 65, 82, 68, 284, 116, 37, 90, 122, 251, 145, 261, 146, 6, 27, 204, 254, 245, 41, 218, 8, 268, 192, 74, 104, 281, 275, 72, 95, 207, 45, 196, 131, 297, 290, 260, 248, 157, 61, 274, 26, 81, 98, 165, 38, 124, 206, 88, 228, 215, 269, 200, 278, 85, 174, 106, 135, 93, 136, 293, 292, 143, 92, 32, 216, 151, 194, 57, 140, 28, 12, 159, 175, 52, 3, 102, 101, 77, 253, 182, 212, 31, 16, 195, 4, 189, 256, 172, 158, 128, 298, 291, 142, 240, 229, 210, 282, 236, 223, 103, 141, 44, 71, 9, 54, 79, 138, 277, 10, 267, 91, 238, 266, 86, 18, 123, 111, 19, 1, 247, 163, 34, 14, 96, 299, 205, 271, 201, 241, 29, 184, 25, 137, 66, 49, 147, 198, 87, 21, 188, 213, 150, 134, 51, 176, 237, 127, 243, 255, 202, 160, 7, 148, 296, 180, 193, 120, 185, 20, 125, 272, 113, 149, 139, 129, 15, 259, 226, 119, 263, 181, 230, 94, 42, 170, 80, 64, 48, 89, 155, 109, 70, 265, 222, 264, 203, 270, 35, 249, 117, 242, 279, 126, 258, 183, 285, 233, 110, 288, 289, 99, 171, 191, 50, 63, 5, 60, 177, 208, 133, 114, 199, 286, 283, 67, 168, 78, 132, 153, 56, 169, 40, 187, 190, 262, 224, 162, 250, 178, 115, 173, 287, 130, 39, 2, 76, 217, 22, 273, 152, 161, 221, 167, 69, 100, 219, 246, 53, 47, 97, 55, 154, 23, 107, 43, 46, 209, 294, 166, 58, 24, 234, 244, 17, 108, 197, 300, 105, 227, 112, 118, 83, 156, 164, 36, 73, 252, 59, 211, 75, 144, 33, 231, 214, 13, 295, 186, 280, 11, 257, 225, 239, 62, 121, 30, 232, 235, 84, 276, 220, 179, 65, 82, 68, 284, 116, 37, 90, 122, 251, 145, 261, 146, 6, 27, 204, 254, 245, 41, 218, 8, 268, 192, 74, 104, 281, 275, 72, 95, 207, 45, 196, 131, 297, 290, 260, 248, 157, 61, 274, 26, 81, 98, 165, 38, 124, 206, 88, 228, 215, 269, 200, 278, 85, 174, 106, 135, 93, 136, 293, 292, 143, 92, 32, 216, 151, 194, 57, 140, 28, 12, 159, 175, 52, 3, 102, 101, 77, 253, 182, 212, 31, 16, 195, 4, 189, 256, 172, 158, 128, 298, 291, 142, 240, 229, 210, 282, 236, 223, 103, 141, 44, 71, 9, 54, 79, 138, 277, 10, 267, 91, 238, 266, 86, 18, 123, 111, 19, 1, 247, 163, 34, 14, 96, 299, 205, 271, 201, 241, 29, 184, 25, 137, 66, 49, 147, 198, 87, 21, 188, 213, 150, 134, 51, 176, 237, 127, 243, 255, 202, 160, 7, 148, 296, 180, 193, 120, 185, 20, 125, 272, 113, 149, 139, 129, 15, 259, 226, 119, 263, 181, 230, 94, 42, 170, 80, 64, 48, 89, 155, 109, 70, 265, 222, 264, 203, 270, 35, 249, 117, 242, 279, 126, 258, 183, 285, 233, 110, 288, 289, 99, 171, 191, 50, 63, 5, 60, 177, 208, 133, 114, 199, 286, 283, 67, 168, 78, 132, 153, 56, 169, 40, 187, 190, 262, 224, 162, 250, 178, 115, 173, 287, 130, 39, 2, 76, 217, 22, 273, 152, 161, 221, 167, 69, 100, 219, 246, 53, 47, 97, 55, 154, 23, 107, 43, 46, 209, 294, 166, 58, 24, 234, 244, 17, 108, 197, 300, 105, 227, 112, 118, 83, 156, 164, 36, 73, 252, 59, 211, 75, 144, 33, 231, 214, 13, 295, 186, 280, 11, 257, 225, 239, 62, 121, 30, 232, 235, 84, 276, 220, 179, 65, 82, 68, 284, 116, 37, 90, 122, 251, 145, 261, 146, 6, 27, 204, 254, 245, 41, 218, 8, 268, 192, 74, 104, 281, 275, 72, 95, 207, 45, 196, 131, 297, 290, 260, 248, 157, 61, 274, 26, 81, 98, 165, 38, 124, 206, 88, 228, 215, 269, 200, 278, 85, 174, 106, 135, 93, 136, 293, 292, 143, 92, 32, 216, 151, 194, 57, 140, 28, 12, 159, 175, 52, 3, 102, 101, 77, 253, 182, 212, 31, 16, 195, 4, 189, 256, 172, 158, 128, 298, 291, 142, 240, 229, 210, 282, 236, 223, 103, 141, 44, 71, 9, 54, 79, 138, 277, 10, 267, 91, 238, 266, 86, 18, 123, 111, 19, 1, 247, 163, 34, 14, 96, 299, 205, 271, 201, 241, 29, 184, 25, 137, 66, 49, 147, 198, 87, 21, 188, 213, 150, 134, 51, 176, 237, 127, 243, 255, 202, 160, 7, 148, 296, 180, 193, 120, 185, 20, 125, 272, 113, 149, 139, 129, 15, 259, 226, 119, 263, 181, 230, 94, 42, 170, 80, 64, 48, 89, 155, 109, 70, 265, 222, 264, 203, 270, 35, 249, 117, 242, 279, 126, 258, 183, 285, 233, 110, 288, 289, 99, 171, 191, 50, 63, 5, 60, 177, 208, 133, 114, 199, 286, 283, 67, 168, 78, 132, 153, 56, 169, 40, 187, 190, 262, 224, 162, 250, 178, 115, 173, 287, 130, 39, 2, 76, 217, 22, 273, 152, 161, 221, 167, 69, 100, 219, 246, 53, 47, 97, 55, 154, 23, 107, 43, 46, 209, 294, 166, 58, 24, 234, 244, 17, 108, 197, 300, 105, 227, 112, 118, 83, 156, 164, 36, 73, 252, 59, 211, 75, 144, 33, 231, 214, 13, 295, 186, 280, 11, 257, 225, 239, 62, 121, 30, 232, 235, 84, 276, 220, 179, 65, 82, 68, 284, 116, 37, 90, 122, 251, 145, 261, 146, 6, 27, 204, 254, 245, 41, 218, 8, 268, 192, 74, 104, 281, 275, 72, 95, 207, 45, 196, 131, 297, 290, 260, 248, 157, 61, 274, 26, 81, 98, 165, 38, 124, 206, 88, 228, 215, 269, 200, 278, 85, 174, 106, 135, 93, 136, 293, 292, 143, 92, 32, 216, 151, 194, 57, 140, 28, 12, 159, 175, 52, 3, 102, 101, 77, 253, 182, 212, 31, 16, 195, 4, 189, 256, 172, 158, 128, 298, 291, 142, 240, 229, 210, 282, 236, 223, 103, 141, 44, 71, 9, 54, 79, 138, 277, 10, 267, 91, 238, 266, 86, 18, 123, 111, 19, 1, 247, 163, 34, 14, 96, 299, 205, 271, 201, 241, 29, 184, 25, 137, 66, 49, 147, 198, 87, 21, 188, 213, 150, 134, 51, 176, 237, 127, 243, 255, 202, 160, 7, 148, 296, 180, 193, 120, 185, 20, 125, 272, 113, 149, 139, 129, 15, 259, 226, 119, 263, 181, 230, 94, 42, 170, 80, 64, 48, 89, 155, 109, 70, 265, 222, 264, 203, 270, 35, 249, 117, 242, 279, 126, 258, 183, 285, 233, 110, 288, 289, 99, 171, 191, 50, 63, 5, 60, 177, 208, 133, 114, 199, 286, 283, 67, 168, 78, 132, 153, 56, 169, 40, 187, 190, 262, 224, 162, 250, 178, 115, 173, 287, 130, 39, 2, 76, 217, 22, 273, 152, 161, 221, 167, 69, 100, 219, 246, 53, 47, 97, 55, 154, 23, 107, 43, 46, 209, 294, 166, 58, 24, 234, 244, 17, 108, 197, 300, 105, 227, 112, 118, 83, 156, 164, 36, 73, 252, 59, 211, 75, 144, 33, 231, 214, 13, 295, 186, 280, 11, 257, 225, 239, 62, 121, 30, 232, 235, 84), jj = c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5), gg = c(2, 3, 2, 3, 3, 2, 3, 1, 3, 2, 1, 1, 2, 2, 3, 3, 3, 3, 3, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 1, 3, 2, 3, 3, 2, 3, 2, 3, 3, 2, 2, 1, 2, 2, 3, 3, 3, 2, 3, 3, 3, 1, 3, 1, 3, 3, 2, 1, 2, 2, 3, 1, 2, 2, 2, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 3, 2, 2, 3, 3, 3, 2, 1, 2, 2, 1, 1, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 1, 1, 3, 1, 2, 1, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 2, 1, 1, 3, 1, 2, 2, 2, 3, 3, 2, 2, 3, 3, 2, 2, 1, 2, 1, 3, 1, 2, 3, 2, 3, 3, 2, 3, 2, 2, 2, 2, 3, 1, 3, 2, 3, 1, 1, 2, 2, 2, 2, 3, 3, 2, 3, 3, 2, 3, 2, 1, 1, 1, 3, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 2, 3, 3, 2, 1, 2, 1, 1, 1, 3, 2, 3, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 3, 3, 1, 3, 2, 2, 3, 2, 2, 3, 3, 2, 2, 1, 3, 1, 2, 3, 2, 3, 2, 1, 3, 2, 3, 3, 2, 2, 2, 3, 1, 3, 3, 3, 3, 3, 1, 1, 1, 3, 1, 3, 2, 3, 2, 3, 1, 1, 3, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 3, 1, 3, 2, 1, 1, 1, 2, 2, 3, 2, 2, 2, 1, 3, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 2, 3, 3, 2, 3, 1, 3, 2, 1, 1, 2, 2, 3, 3, 3, 3, 3, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 1, 3, 2, 3, 3, 2, 3, 2, 3, 3, 2, 2, 1, 2, 2, 3, 3, 3, 2, 3, 3, 3, 1, 3, 1, 3, 3, 2, 1, 2, 2, 3, 1, 2, 2, 2, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 3, 2, 2, 3, 3, 3, 2, 1, 2, 2, 1, 1, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 1, 1, 3, 1, 2, 1, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 2, 1, 1, 3, 1, 2, 2, 2, 3, 3, 2, 2, 3, 3, 2, 2, 1, 2, 1, 3, 1, 2, 3, 2, 3, 3, 2, 3, 2, 2, 2, 2, 3, 1, 3, 2, 3, 1, 1, 2, 2, 2, 2, 3, 3, 2, 3, 3, 2, 3, 2, 1, 1, 1, 3, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 2, 3, 3, 2, 1, 2, 1, 1, 1, 3, 2, 3, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 3, 3, 1, 3, 2, 2, 3, 2, 2, 3, 3, 2, 2, 1, 3, 1, 2, 3, 2, 3, 2, 1, 3, 2, 3, 3, 2, 2, 2, 3, 1, 3, 3, 3, 3, 3, 1, 1, 1, 3, 1, 3, 2, 3, 2, 3, 1, 1, 3, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 3, 1, 3, 2, 1, 1, 1, 2, 2, 3, 2, 2, 2, 1, 3, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 2, 3, 3, 2, 3, 1, 3, 2, 1, 1, 2, 2, 3, 3, 3, 3, 3, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 1, 3, 2, 3, 3, 2, 3, 2, 3, 3, 2, 2, 1, 2, 2, 3, 3, 3, 2, 3, 3, 3, 1, 3, 1, 3, 3, 2, 1, 2, 2, 3, 1, 2, 2, 2, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 3, 2, 2, 3, 3, 3, 2, 1, 2, 2, 1, 1, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 1, 1, 3, 1, 2, 1, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 2, 1, 1, 3, 1, 2, 2, 2, 3, 3, 2, 2, 3, 3, 2, 2, 1, 2, 1, 3, 1, 2, 3, 2, 3, 3, 2, 3, 2, 2, 2, 2, 3, 1, 3, 2, 3, 1, 1, 2, 2, 2, 2, 3, 3, 2, 3, 3, 2, 3, 2, 1, 1, 1, 3, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 2, 3, 3, 2, 1, 2, 1, 1, 1, 3, 2, 3, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 3, 3, 1, 3, 2, 2, 3, 2, 2, 3, 3, 2, 2, 1, 3, 1, 2, 3, 2, 3, 2, 1, 3, 2, 3, 3, 2, 2, 2, 3, 1, 3, 3, 3, 3, 3, 1, 1, 1, 3, 1, 3, 2, 3, 2, 3, 1, 1, 3, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 3, 1, 3, 2, 1, 1, 1, 2, 2, 3, 2, 2, 2, 1, 3, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 2, 3, 3, 2, 3, 1, 3, 2, 1, 1, 2, 2, 3, 3, 3, 3, 3, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 1, 3, 2, 3, 3, 2, 3, 2, 3, 3, 2, 2, 1, 2, 2, 3, 3, 3, 2, 3, 3, 3, 1, 3, 1, 3, 3, 2, 1, 2, 2, 3, 1, 2, 2, 2, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 3, 2, 2, 3, 3, 3, 2, 1, 2, 2, 1, 1, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 1, 1, 3, 1, 2, 1, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 2, 1, 1, 3, 1, 2, 2, 2, 3, 3, 2, 2, 3, 3, 2, 2, 1, 2, 1, 3, 1, 2, 3, 2, 3, 3, 2, 3, 2, 2, 2, 2, 3, 1, 3, 2, 3, 1, 1, 2, 2, 2, 2, 3, 3, 2, 3, 3, 2, 3, 2, 1, 1, 1, 3, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 2, 3, 3, 2, 1, 2, 1, 1, 1, 3, 2, 3, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 3, 3, 1, 3, 2, 2, 3, 2, 2, 3, 3, 2, 2, 1, 3, 1, 2, 3, 2, 3, 2, 1, 3, 2, 3, 3, 2, 2, 2, 3, 1, 3, 3, 3, 3, 3, 1, 1, 1, 3, 1, 3, 2, 3, 2, 3, 1, 1, 3, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 3, 1, 3, 2, 1, 1, 1, 2, 2, 3, 2, 2, 2, 1, 3, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 2, 3, 3, 2, 3, 1, 3, 2, 1, 1, 2, 2, 3, 3, 3, 3, 3, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 1, 3, 2, 3, 3, 2, 3, 2, 3, 3, 2, 2, 1, 2, 2, 3, 3, 3, 2, 3, 3, 3, 1, 3, 1, 3, 3, 2, 1, 2, 2, 3, 1, 2, 2, 2, 2, 3, 3, 2, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 3, 2, 2, 3, 3, 3, 2, 1, 2, 2, 1, 1, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 3, 2, 1, 3, 1, 1, 1, 3, 1, 2, 1, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 2, 1, 1, 3, 1, 2, 2, 2, 3, 3, 2, 2, 3, 3, 2, 2, 1, 2, 1, 3, 1, 2, 3, 2, 3, 3, 2, 3, 2, 2, 2, 2, 3, 1, 3, 2, 3, 1, 1, 2, 2, 2, 2, 3, 3, 2, 3, 3, 2, 3, 2, 1, 1, 1, 3, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 2, 3, 3, 2, 1, 2, 1, 1, 1, 3, 2, 3, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 3, 3, 1, 3, 2, 2, 3, 2, 2, 3, 3, 2, 2, 1, 3, 1, 2, 3, 2, 3, 2, 1, 3, 2, 3, 3, 2, 2, 2, 3, 1, 3, 3, 3, 3, 3, 1, 1, 1, 3, 1, 3, 2, 3, 2, 3, 1, 1, 3, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 3, 1, 3, 2, 1, 1, 1, 2, 2, 3, 2, 2, 2, 1, 3, 2, 2, 3, 3, 3, 3, 3, 2, 2), g_all = c(116, 122, 251, 268, 72, 274, 215, 200, 106, 293, 57, 182, 31, 128, 142, 240, 138, 10, 267, 91, 266, 18, 247, 163, 271, 201, 29, 150, 51, 237, 20, 149, 139, 64, 48, 89, 109, 233, 288, 289, 99, 63, 5, 60, 133, 114, 199, 286, 78, 250, 115, 76, 100, 55, 154, 23, 43, 24, 234, 83, 73, 211, 75, 144, 280, 276, 179, 68, 90, 145, 261, 245, 8, 192, 74, 104, 207, 131, 290, 157, 61, 26, 81, 124, 174, 135, 93, 292, 143, 92, 32, 194, 140, 28, 12, 101, 253, 195, 4, 158, 298, 291, 282, 236, 9, 79, 86, 205, 184, 25, 137, 147, 198, 188, 213, 134, 127, 255, 7, 296, 180, 193, 120, 272, 129, 15, 259, 226, 181, 42, 80, 70, 222, 203, 35, 117, 126, 285, 110, 191, 177, 208, 283, 153, 56, 40, 187, 224, 162, 173, 130, 2, 22, 161, 221, 167, 209, 166, 17, 227, 156, 59, 33, 231, 13, 295, 186, 257, 225, 235, 84, 220, 65, 82, 284, 37, 146, 6, 27, 204, 254, 41, 218, 281, 275, 95, 45, 196, 297, 260, 248, 98, 165, 38, 206, 88, 228, 269, 278, 85, 136, 216, 151, 159, 175, 52, 3, 102, 77, 212, 16, 189, 256, 172, 229, 210, 223, 103, 141, 44, 71, 54, 277, 238, 123, 111, 19, 1, 34, 14, 96, 299, 241, 66, 49, 87, 21, 176, 243, 202, 160, 148, 185, 125, 113, 119, 263, 230, 94, 170, 155, 265, 264, 270, 249, 242, 279, 258, 183, 171, 50, 67, 168, 132, 169, 190, 262, 178, 287, 39, 217, 273, 152, 69, 219, 246, 53, 47, 97, 107, 46, 294, 58, 244, 108, 197, 300, 105, 112, 118, 164, 36, 252, 214, 11, 239, 62, 121, 30, 232), y = c(4, 3, 1, 2, 2, 2, 4, 4, 3, 2, 4, 1, 3, 2, 3, 2, 1, 2, 4, 4, 1, 3, 1, 2, 4, 2, 3, 4, 1, 3, 3, 3, 3, 2, 4, 4, 4, 4, 3, 3, 4, 2, 1, 2, 2, 3, 3, 3, 4, 2, 1, 4, 4, 4, 3, 4, 3, 2, 2, 3, 3, 4, 4, 2, 1, 2, 3, 3, 2, 3, 3, 2, 3, 2, 3, 3, 2, 2, 1, 4, 4, 2, 3, 2, 3, 3, 3, 4, 3, 2, 4, 4, 4, 4, 2, 3, 4, 1, 3, 4, 4, 3, 3, 2, 2, 1, 4, 2, 3, 3, 1, 3, 2, 3, 2, 2, 2, 4, 3, 4, 1, 4, 4, 2, 1, 3, 4, 3, 4, 4, 3, 2, 2, 1, 2, 3, 3, 4, 2, 2, 4, 3, 1, 3, 3, 3, 4, 1, 4, 2, 3, 2, 4, 1, 4, 4, 3, 2, 2, 3, 3, 3, 4, 3, 3, 2, 4, 2, 4, 4, 2, 3, 4, 2, 4, 2, 3, 2, 2, 2, 2, 3, 3, 2, 3, 4, 3, 2, 3, 3, 4, 3, 3, 4, 3, 4, 4, 4, 3, 2, 4, 4, 2, 2, 4, 3, 2, 3, 4, 4, 3, 1, 3, 2, 3, 3, 1, 3, 2, 2, 3, 3, 3, 1, 3, 4, 2, 4, 4, 4, 4, 2, 3, 3, 4, 3, 1, 4, 3, 3, 4, 4, 4, 4, 3, 2, 2, 3, 3, 4, 1, 2, 2, 3, 1, 4, 2, 2, 4, 3, 2, 3, 3, 3, 3, 1, 2, 3, 4, 3, 3, 1, 2, 3, 4, 2, 3, 4, 3, 2, 1, 4, 3, 3, 4, 3, 3, 4, 3, 3, 2, 4, 4, 1, 3, 3, 4, 4, 3, 3, 3, 4, 3, 3, 2, 1, 4, 3, 2, 3, 3, 4, 3, 4, 4, 2, 1, 4, 3, 3, 1, 4, 2, 3, 4, 3, 3, 2, 4, 3, 2, 4, 4, 3, 4, 4, 4, 4, 4, 3, 3, 4, 2, 4, 4, 3, 3, 3, 4, 1, 4, 3, 4, 3, 3, 4, 3, 3, 2, 3, 1, 4, 4, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 3, 2, 1, 1, 4, 2, 3, 2, 2, 3, 3, 4, 4, 4, 3, 4, 4, 4, 2, 4, 3, 2, 4, 4, 4, 2, 3, 1, 1, 3, 4, 4, 3, 4, 3, 3, 3, 2, 4, 2, 3, 3, 3, 4, 1, 4, 3, 3, 1, 4, 4, 3, 4, 4, 3, 3, 4, 2, 3, 2, 1, 3, 4, 3, 3, 4, 4, 3, 3, 3, 4, 3, 4, 4, 4, 3, 3, 2, 4, 4, 2, 4, 4, 3, 3, 3, 4, 4, 3, 3, 3, 2, 4, 4, 2, 2, 4, 2, 1, 3, 4, 4, 3, 2, 2, 3, 3, 3, 2, 4, 3, 4, 4, 2, 4, 3, 4, 4, 3, 4, 3, 4, 3, 4, 4, 4, 4, 4, 4, 4, 2, 2, 4, 4, 4, 3, 2, 3, 3, 4, 3, 4, 3, 2, 2, 4, 4, 2, 4, 3, 4, 4, 4, 4, 4, 3, 4, 3, 4, 2, 2, 4, 4, 3, 4, 4, 4, 4, 4, 2, 4, 4, 4, 3, 2, 3, 3, 3, 2, 2, 2, 4, 4, 4, 4, 3, 3, 4, 4, 2, 3, 4, 4, 3, 3, 4, 3, 4, 3, 3, 2, 3, 4, 3, 2, 4, 3, 4, 4, 2, 3, 4, 3, 3, 2, 4, 4, 3, 4, 4, 4, 3, 4, 4, 2, 4, 2, 1, 3, 2, 1, 1, 3, 2, 2, 2, 3, 3, 2, 1, 2, 4, 3, 4, 1, 3, 2, 4, 4, 3, 3, 2, 3, 3, 3, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 2, 1, 2, 3, 2, 3, 3, 3, 2, 1, 2, 3, 1, 4, 3, 4, 4, 1, 2, 3, 4, 2, 4, 3, 2, 2, 2, 3, 4, 4, 3, 1, 4, 3, 2, 1, 2, 2, 3, 3, 1, 2, 1, 3, 2, 2, 3, 4, 4, 3, 4, 4, 4, 4, 1, 1, 2, 3, 4, 4, 2, 3, 4, 3, 1, 4, 3, 1, 4, 2, 3, 2, 1, 2, 2, 3, 2, 4, 1, 1, 2, 2, 2, 4, 2, 4, 4, 3, 1, 2, 2, 2, 1, 1, 1, 3, 4, 2, 1, 3, 4, 3, 1, 1, 3, 4, 1, 4, 2, 3, 3, 4, 1, 3, 4, 3, 1, 2, 3, 3, 2, 4, 4, 1, 3, 4, 3, 4, 4, 4, 2, 3, 4, 4, 2, 4, 3, 1, 3, 1, 2, 3, 4, 2, 4, 3, 4, 4, 3, 4, 1, 2, 2, 3, 4, 1, 4, 4, 3, 2, 4, 3, 4, 4, 1, 2, 2, 4, 4, 4, 1, 1, 3, 4, 4, 2, 2, 2, 2, 1, 1, 3, 1, 3, 4, 1, 4, 4, 4, 4, 2, 3, 3, 3, 3, 1, 4, 3, 2, 4, 1, 4, 4, 3, 4, 4, 3, 2, 4, 1, 4, 2, 2, 3, 3, 2, 3, 4, 4, 3, 2, 3, 4, 2, 1, 3, 2, 4, 2, 3, 4, 3, 3, 3, 2, 3, 2, 2, 3, 2, 4, 3, 2, 1, 2, 2, 4, 4, 3, 2, 4, 4, 3, 4, 2, 2, 3, 4, 4, 2, 4, 1, 1, 2, 1, 4, 1, 2, 1, 2, 1, 3, 1, 3, 2, 3, 2, 3, 2, 1, 4, 1, 3, 4, 3, 1, 3, 2, 2, 2, 3, 2, 2, 2, 4, 2, 4, 3, 1, 2, 1, 1, 2, 3, 2, 3, 2, 2, 4, 2, 1, 3, 3, 4, 2, 4, 1, 1, 1, 1, 3, 4, 2, 1, 2, 3, 1, 1, 2, 3, 2, 1, 4, 4, 2, 1, 3, 1, 2, 2, 1, 1, 1, 1, 1, 1, 4, 4, 3, 3, 1, 4, 2, 4, 2, 3, 1, 2, 3, 4, 1, 3, 4, 4, 2, 4, 1, 1, 1, 1, 3, 1, 1, 3, 1, 1, 2, 3, 1, 1, 4, 1, 2, 1, 2, 4, 3, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 3, 3, 1, 1, 1, 3, 3, 1, 2, 3, 4, 3, 3, 1, 2, 4, 3, 1, 1, 2, 2, 4, 4, 4, 1, 2, 4, 1, 4, 4, 2, 3, 4, 3, 2, 2, 1, 1, 1, 1, 3, 3, 2, 2, 3, 4, 1, 2, 3, 2, 4, 2, 1, 4, 2, 4, 1, 1, 3, 1, 3, 2, 3, 2, 3, 1, 1, 2, 1, 3, 4, 1, 1, 2, 3, 2, 1, 3, 1, 2, 3, 1, 3, 1, 3, 4, 2, 4, 4, 2, 4, 1, 2, 4, 3, 3, 1, 4, 1, 1, 4, 1, 2, 3, 1, 3, 4, 3, 4, 1, 2, 4, 1, 1, 3, 1, 2, 2, 4, 4, 2, 1, 1, 1, 3, 1, 4, 2, 4, 1, 2, 4, 1, 1, 1, 2, 3, 1, 3, 3, 3, 4, 3, 1, 2, 2, 1, 4, 1, 1, 2, 3, 4, 1, 3, 1, 3, 2, 4, 1, 4, 4, 2, 2, 4, 4, 4, 3, 3, 3, 3, 2, 3, 2, 3, 2, 2, 4, 4, 4, 2, 2, 2, 4, 4, 4, 2, 3, 3, 3, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 1, 3, 3, 4, 3, 3, 4, 4, 2, 4, 3, 4, 4, 4, 4, 3, 2, 3, 4, 1, 4, 4, 4, 3, 2, 3, 3, 2, 3, 1, 1, 2, 4, 4, 4, 3, 2, 3, 1, 3, 2, 2, 3, 2, 2, 4, 4, 4, 3, 3, 4, 4, 4, 3, 2, 3, 2, 4, 4, 4, 3, 3, 4, 3, 2, 4, 2, 3, 3, 3, 3, 3, 2, 3, 2, 2, 4, 4, 3, 1, 4, 2, 4, 1, 4, 4, 4, 4, 4, 3, 3, 4, 1, 4, 2, 3, 3, 4, 2, 4, 3, 3, 3, 1, 3, 4, 2, 3, 4, 4, 4, 4, 1, 4, 4, 3, 3, 4, 4, 3, 3, 4, 4, 2, 2, 3, 3, 4, 4, 3, 3, 4, 3, 3, 2, 4, 3, 2, 3, 3, 3, 3, 2, 3, 4, 3, 3, 4, 3, 4, 3, 4, 4, 4, 4, 3, 4, 3, 4, 4, 4, 4, 4, 4, 4, 1, 2, 4, 4, 4, 2, 2, 4, 4, 3, 3, 3, 3, 2, 4, 3, 3, 1, 3, 4, 4, 4, 4, 4, 4, 2, 4, 4, 4, 4, 2, 4, 4, 2, 4, 3, 4, 4, 3, 2, 4, 4, 4, 4, 2, 2, 2, 4, 3, 3, 3, 4, 4, 4, 2, 4, 2, 4, 4, 2, 1, 3, 4, 3, 3, 4, 3, 3, 4, 2, 2, 4, 3, 4, 1, 3, 3, 2, 4, 3, 2, 4, 4, 3, 3, 4, 4, 3, 4, 4, 3, 3, 4, 4), obs_corr = structure(c(1, 1, 1, 0.51900000000000002, 0.626, 0.38700000000000001, 0.23699999999999999, 0.67900000000000005, 0.22800000000000001, 0.23999999999999999, 0.70999999999999996, 0.28199999999999997, 0.59999999999999998, 0.66000000000000003, 0.45500000000000002, 0.51900000000000002, 0.626, 0.38700000000000001, 1, 1, 1, 0.39400000000000002, 0.58199999999999996, 0.22700000000000001, 0.22700000000000001, 0.53900000000000003, 0.30599999999999999, 0.53700000000000003, 0.52600000000000002, 0.72199999999999998, 0.23699999999999999, 0.67900000000000005, 0.22800000000000001, 0.39400000000000002, 0.58199999999999996, 0.22700000000000001, 1, 1, 1, 0.49099999999999999, 0.68000000000000005, 0.52900000000000003, 0.46899999999999997, 0.56999999999999995, 0.44600000000000001, 0.23999999999999999, 0.70999999999999996, 0.28199999999999997, 0.22700000000000001, 0.53900000000000003, 0.30599999999999999, 0.49099999999999999, 0.68000000000000005, 0.52900000000000003, 1, 1, 1, 0.49299999999999999, 0.57199999999999995, 0.42899999999999999, 0.59999999999999998, 0.66000000000000003, 0.45500000000000002, 0.53700000000000003, 0.52600000000000002, 0.72199999999999998, 0.46899999999999997, 0.56999999999999995, 0.44600000000000001, 0.49299999999999999, 0.57199999999999995, 0.42899999999999999, 1, 1, 1), .Dim = c(3L, 5L, 5L))) bridgesampling/tests/testthat/test-stan_bridge_sampler_basic.R0000644000176200001440000001276714026400750024523 0ustar liggesusers context('bridge_sampler.stanfit works.') ### H0: mu = 0 mH0 <- function(y, sigma2 = 1, alpha = 2, beta = 3, rel.tol = 10^(-10)) { n <- length(y) mH0integrand <- function(tau2, y, sigma2, alpha, beta) { (sigma2 + tau2)^(-n/2) * exp(-(n*mean(y)^2 + (n - 1)*sd(y)^2)/(2*(sigma2 + tau2))) * tau2^(-alpha - 1) * exp(-beta/tau2) } (2*pi)^(-n/2) * beta^alpha/gamma(alpha) * integrate(mH0integrand, 0, Inf, rel.tol = rel.tol, y = y, sigma2 = sigma2, alpha = alpha, beta = beta)$value } test_that("stan_bridge_sampler", { testthat::skip_on_os("windows") if (require(rstan)) { set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ### set prior parameters ### mu0 <- 0 tau20 <- 1 alpha <- 1 beta <- 1 # models stancodeH0 <- 'data { int n; // number of observations vector[n] y; // observations real alpha; real beta; real sigma2; } parameters { real tau2; // group-level variance vector[n] theta; // participant effects } model { target += inv_gamma_lpdf(tau2 | alpha, beta); target += normal_lpdf(theta | 0, sqrt(tau2)); target += normal_lpdf(y | theta, sqrt(sigma2)); } ' # compile models stanmodelH0 <- suppressWarnings( stan_model(model_code = stancodeH0, model_name="stanmodel") ) # fit models stanobjectH0 <- sampling(stanmodelH0, data = list(y = y, n = n, alpha = alpha, beta = beta, sigma2 = sigma2), iter = 3500, warmup = 500, chains = 4, show_messages = FALSE, refresh = 0) expect_is( H0_bridge_norm <- bridge_sampler(samples = stanobjectH0, method = "normal", silent = TRUE) , "bridge") expect_is( H0_bridge_norm_rep <-bridge_sampler(stanobjectH0, method = "normal", repetitions = 2, silent = TRUE) , "bridge_list") expect_is( H0_bridge_warp3 <- bridge_sampler(stanobjectH0, method = "warp3", silent = TRUE) , "bridge") expect_is( H0_bridge_warp3_rep <- bridge_sampler(stanobjectH0, method = "warp3", repetitions = 2, silent = TRUE) , "bridge_list") expect_equal( H0_bridge_norm$logml, log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), tolerance = 0.1) expect_equal( H0_bridge_warp3$logml, log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), tolerance = 0.1) expect_equal( H0_bridge_norm_rep$logml, rep(log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), 2), tolerance = 0.1) expect_equal( H0_bridge_warp3_rep$logml, rep(log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), 2), tolerance = 0.1) } }) test_that("stan_bridge_sampler in multicore", { testthat::skip_on_cran() testthat::skip_on_travis() testthat::skip_on_os("windows") if (require(rstan)) { set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ### set prior parameters ### mu0 <- 0 tau20 <- 1 alpha <- 1 beta <- 1 # models stancodeH0 <- 'data { int n; // number of observations vector[n] y; // observations real alpha; real beta; real sigma2; } parameters { real tau2; // group-level variance vector[n] theta; // participant effects } model { target += inv_gamma_lpdf(tau2 | alpha, beta); target += normal_lpdf(theta | 0, sqrt(tau2)); target += normal_lpdf(y | theta, sqrt(sigma2)); } ' # compile models stanmodelH0 <- suppressWarnings( stan_model(model_code = stancodeH0, model_name="stanmodel") ) # fit models stanobjectH0 <- sampling(stanmodelH0, data = list(y = y, n = n, alpha = alpha, beta = beta, sigma2 = sigma2), iter = 2500, warmup = 500, chains = 4, show_messages = FALSE, refresh = 0) expect_is( H0_bridge_norm <- bridge_sampler(stanobjectH0, method = "normal", silent = TRUE, cores = 2) , "bridge") expect_is( H0_bridge_warp3 <- bridge_sampler(stanobjectH0, method = "warp3", silent = TRUE, cores = 2) , "bridge") expect_equal( H0_bridge_norm$logml, log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), tolerance = 0.1) expect_equal( H0_bridge_warp3$logml, log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), tolerance = 0.1) } }) bridgesampling/tests/testthat/test-stanreg_bridge_sampler_basic.R0000644000176200001440000000215513663004467025222 0ustar liggesusers context('bridge_sampler.stanreg works.') test_that("stan_bridge_sampler", { if (require(rstanarm)) { fit_1 <- stan_glm(mpg ~ wt + qsec + am, data = mtcars, chains = 2, cores = 2, iter = 5000, diagnostic_file = file.path(tempdir(), "df.csv")) bridge_norm <- bridge_sampler(fit_1) fit_2 <- update(fit_1, formula = . ~ . + cyl) bridge_warp <- bridge_sampler(fit_2, method = "warp3") expect_true(bridge_norm$logml > bridge_warp$logml) } }) test_that("stan_bridge_sampler in multicore", { testthat::skip_on_cran() testthat::skip_on_travis() #testthat::skip_on_os("windows") if (require(rstanarm)) { fit_1 <- stan_glm(mpg ~ wt + qsec + am, data = mtcars, chains = 2, cores = 2, iter = 5000, diagnostic_file = file.path(tempdir(), "df.csv")) bridge_norm <- bridge_sampler(fit_1, cores = 2) fit_2 <- update(fit_1, formula = . ~ . + cyl) bridge_warp <- bridge_sampler(fit_2, method = "warp3", cores = 2) expect_true(bridge_norm$logml > bridge_warp$logml) } }) bridgesampling/tests/testthat/test-bridge_sampler_parallel.R0000644000176200001440000000724213663004467024214 0ustar liggesusers context('basic bridge sampling behavior normal parallel') test_that("bridge sampler matches anlytical value normal example", { testthat::skip_on_cran() testthat::skip_on_travis() # library(bridgesampling) library(mvtnorm) x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) colnames(x) <- c("x1", "x2") log_density <- function(s, data) { -.5*t(s)%*%s } assign("log_density", log_density, envir = .GlobalEnv) lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "normal", cores = 2, silent = TRUE) bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "warp3", cores = 2, silent = TRUE) bridge_normal_c <- bridge_sampler(samples = x, log_posterior = "log_density", data = NULL, lb = lb, ub = ub, method = "normal", cores = 2, silent = TRUE, envir = sys.frame(sys.nframe())) bridge_warp3_c <- bridge_sampler(samples = x, log_posterior = "log_density", data = NULL, lb = lb, ub = ub, method = "warp3", cores = 2, silent = TRUE, envir = sys.frame(sys.nframe())) expect_equal(bridge_normal$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_normal_c$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3_c$logml, expected = log(2*pi), tolerance = 0.01) # test dots argument mu <- c(1, 2) x <- rmvnorm(1e4, mean = mu, sigma = diag(2)) colnames(x) <- c("x1", "x2") log_density <- function(s, data, ...) { -.5*t(s - mu) %*% (s - mu) } assign("log_density", log_density, envir = .GlobalEnv) lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) bridge_normal_dots <- bridge_sampler(samples = x, log_posterior = log_density, mu, data = NULL, lb = lb, ub = ub, method = "normal", cores = 2, silent = TRUE) bridge_warp3_dots <- bridge_sampler(samples = x, log_posterior = log_density, mu, data = NULL, lb = lb, ub = ub, method = "warp3", cores = 2, silent = TRUE) bridge_normal_c_dots <- bridge_sampler(samples = x, log_posterior = "log_density", mu, data = NULL, lb = lb, ub = ub, method = "normal", cores = 2, silent = TRUE, envir = sys.frame(sys.nframe())) # ls.str(envir = sys.frame(sys.nframe())) bridge_warp3_c_dots <- bridge_sampler(samples = x, log_posterior = "log_density", mu, data = NULL, lb = lb, ub = ub, method = "warp3", cores = 2, silent = TRUE, envir = sys.frame(sys.nframe())) expect_equal(bridge_normal_dots$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3_dots$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_normal_c_dots$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3_c_dots$logml, expected = log(2*pi), tolerance = 0.01) }) bridgesampling/tests/testthat/unnormalized_normal_density_mu.cpp0000644000176200001440000000121013663004467025271 0ustar liggesusers// load Rcpp #include #include #include using namespace Rcpp; using Eigen::VectorXd; using Eigen::Map; //------------------------------------------------------------------------------ // unnormalized standard multivariate normal density function (log) //------------------------------------------------------------------------------ // [[Rcpp::depends(RcppEigen)]] // [[Rcpp::export]] double log_densityRcpp_mu(NumericVector x, SEXP data, NumericVector mu) { VectorXd xe(as >(x)); VectorXd mue(as >(mu)); return -0.5*(xe - mue).transpose()*(xe - mue); } bridgesampling/tests/testthat/test-bridge_sampler_Rcpp_parallel.R0000644000176200001440000000552213663004467025177 0ustar liggesusers context('basic bridge sampling behavior normal Rcpp parallel') test_that("bridge sampler matches anlytical value normal example", { testthat::skip_on_cran() testthat::skip_on_travis() # library(bridgesampling) library(mvtnorm) if(require(RcppEigen)) { x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) colnames(x) <- c("x1", "x2") lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) Rcpp::sourceCpp(file = "unnormalized_normal_density.cpp") Rcpp::sourceCpp(file = "unnormalized_normal_density.cpp", env = .GlobalEnv) bridge_normal <- bridge_sampler(samples = x, log_posterior = "log_densityRcpp", data = NULL, lb = lb, ub = ub, method = "normal", packages = "RcppEigen", rcppFile = "unnormalized_normal_density.cpp", cores = 2, silent = TRUE) bridge_warp3 <- bridge_sampler(samples = x, log_posterior = "log_densityRcpp", data = NULL, lb = lb, ub = ub, method = "warp3", packages = "RcppEigen", rcppFile = "unnormalized_normal_density.cpp", cores = 2, silent = TRUE) expect_equal(bridge_normal$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3$logml, expected = log(2*pi), tolerance = 0.01) # test dots argument mu <- c(1, 2) x <- rmvnorm(1e4, mean = mu, sigma = diag(2)) colnames(x) <- c("x1", "x2") lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) Rcpp::sourceCpp(file = "unnormalized_normal_density_mu.cpp") Rcpp::sourceCpp(file = "unnormalized_normal_density_mu.cpp", env = .GlobalEnv) bridge_normal_dots <- bridge_sampler(samples = x, log_posterior = "log_densityRcpp_mu", mu, data = NULL, lb = lb, ub = ub, method = "normal", packages = "RcppEigen", rcppFile = "unnormalized_normal_density_mu.cpp", cores = 2, silent = TRUE) bridge_warp3_dots <- bridge_sampler(samples = x, log_posterior = "log_densityRcpp_mu", mu, data = NULL, lb = lb, ub = ub, method = "warp3", packages = "RcppEigen", rcppFile = "unnormalized_normal_density_mu.cpp", cores = 2, silent = TRUE) expect_equal(bridge_normal_dots$logml, expected = log(2*pi), tolerance = 0.01) expect_equal(bridge_warp3_dots$logml, expected = log(2*pi), tolerance = 0.01) } }) bridgesampling/tests/testthat/unnormalized_normal_density.cpp0000644000176200001440000000107213663004467024576 0ustar liggesusers// load Rcpp #include #include #include using namespace Rcpp; using Eigen::VectorXd; using Eigen::Map; //------------------------------------------------------------------------------ // unnormalized standard multivariate normal density function (log) //------------------------------------------------------------------------------ // [[Rcpp::depends(RcppEigen)]] // [[Rcpp::export]] double log_densityRcpp(NumericVector x, SEXP data) { VectorXd xe(as >(x)); return -0.5*xe.transpose()*xe; } bridgesampling/tests/testthat/test-vignette_example_nimble.R0000644000176200001440000001462013663004467024245 0ustar liggesusers context('test vignette bridgesampling_example_nimble.Rmd') test_that("bridge sampler yields correct results", { testthat::skip_on_cran() testthat::skip_on_travis() # library(bridgesampling) if (require(nimble)) { ### generate data ### set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ### set prior parameters ### mu0 <- 0 tau20 <- 1 alpha <- 1 beta <- 1 # models codeH0 <- nimbleCode({ invTau2 ~ dgamma(1, 1) tau2 <- 1/invTau2 for (i in 1:20) { theta[i] ~ dnorm(0, sd = sqrt(tau2)) y[i] ~ dnorm(theta[i], sd = 1) } }) codeH1 <- nimbleCode({ mu ~ dnorm(0, sd = 1) invTau2 ~ dgamma(1, 1) tau2 <- 1/invTau2 for (i in 1:20) { theta[i] ~ dnorm(mu, sd = sqrt(tau2)) y[i] ~ dnorm(theta[i], sd = 1) } }) ## steps for H0: modelH0 <- nimbleModel(codeH0) modelH0$setData(y = y) # set data cmodelH0 <- compileNimble(modelH0) # make compiled version from generated C++ ## steps for H1: modelH1 <- nimbleModel(codeH1) modelH1$setData(y = y) # set data cmodelH1 <- compileNimble(modelH1) # make compiled version from generated C++ # build MCMC functions, skipping customization of the configuration. mcmcH0 <- buildMCMC(modelH0, monitors = modelH0$getNodeNames(stochOnly = TRUE, includeData = FALSE)) mcmcH1 <- buildMCMC(modelH1, monitors = modelH1$getNodeNames(stochOnly = TRUE, includeData = FALSE)) # compile the MCMC function via generated C++ cmcmcH0 <- compileNimble(mcmcH0, project = modelH0) cmcmcH1 <- compileNimble(mcmcH1, project = modelH1) # run the MCMC. This is a wrapper for cmcmc$run() and extraction of samples. # the object samplesH1 is actually not needed as the samples are also in cmcmcH1 samplesH0 <- runMCMC(cmcmcH0, niter = 1e5, nburnin = 1000, nchains = 2, progressBar = FALSE) samplesH1 <- runMCMC(cmcmcH1, niter = 1e5, nburnin = 1000, nchains = 2, progressBar = FALSE) # compute log marginal likelihood via bridge sampling for H0 H0.bridge <- bridge_sampler(cmcmcH0, silent = TRUE) # compute log marginal likelihood via bridge sampling for H1 H1.bridge <- bridge_sampler(cmcmcH1, silent = TRUE) # compute percentage errors H0.error <- error_measures(H0.bridge)$percentage H1.error <- error_measures(H1.bridge)$percentage # compute Bayes factor BF01 <- bf(H0.bridge, H1.bridge) # compute posterior model probabilities (assuming equal prior model probabilities) post1 <- post_prob(H0.bridge, H1.bridge) # compute posterior model probabilities (using user-specified prior model probabilities) post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) # "exact" ml H1 mH1 <- function(data, rel.tol = 1e-10) { y <- data$y n <- data$n mu0 <- data$mu0 tau20 <- data$tau20 alpha <- data$alpha beta <- data$beta sigma2 <- data$sigma2 mH1integrand <- function(tau2, y, sigma2, mu0, tau20, alpha, beta) { (sigma2 + tau2)^(-n/2) * exp(-1/2 * ((n*mean(y)^2 + (n - 1)*sd(y)^2)/(sigma2 + tau2) + mu0^2/tau20 - ((n*mean(y))/(sigma2 + tau2) + mu0/tau20)^2 / (n/(sigma2 + tau2) + 1/tau20))) * (n/(sigma2 + tau2) + 1/tau20)^(-1/2) * tau2^(-alpha - 1) * exp(-beta/tau2) } (2*pi)^(-n/2) * (tau20)^(-1/2) * beta^alpha/gamma(alpha) * integrate(mH1integrand, 0, Inf, rel.tol = rel.tol, y = y, sigma2 = sigma2, mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta)$value } exact_logmlH1 <- log(mH1(list(y = y, n = n, mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta, sigma2 = sigma2))) # "exact" ml H1 mH0 <- function(data, rel.tol = 1e-10) { y <- data$y n <- data$n alpha <- data$alpha beta <- data$beta sigma2 <- data$sigma2 mH0integrand <- function(tau2, y, sigma2, alpha, beta) { n <- length(y) (sigma2 + tau2)^(-n/2) * exp(-(n*mean(y)^2 + (n - 1)*sd(y)^2)/ (2*(sigma2 + tau2))) * tau2^(-alpha - 1) * exp(-beta/tau2) } (2*pi)^(-n/2) * beta^alpha/gamma(alpha) * integrate(mH0integrand, 0, Inf, rel.tol = rel.tol, y = y, sigma2 = sigma2, alpha = alpha, beta = beta)$value } exact_logmlH0 <- log(mH0(list(y = y, n = n, alpha = alpha, beta = beta, sigma2 = sigma2))) exact_BF01 <- exp(exact_logmlH0 - exact_logmlH1) H0.bridge.curr <- H0.bridge H1.bridge.curr <- H1.bridge BF01.curr <- BF01 post1.curr <- post1 post2.curr <- post2 # load(system.file("extdata/", "vignette_example_nimble.RData", # package = "bridgesampling")) expect_equal( H0.bridge.curr$logml, expected = exact_logmlH0, tolerance = 0.01 ) expect_equal( H1.bridge.curr$logml, expected = exact_logmlH1, tolerance = 0.01 ) expect_equal( BF01.curr$bf, expected = exact_BF01, tolerance = 0.01 ) expect_equal( H0.bridge.curr$logml, expected = H0.bridge$logml, tolerance = 0.01 ) expect_equal( H1.bridge.curr$logml, expected = H1.bridge$logml, tolerance = 0.01 ) expect_equal( BF01.curr$bf, expected = BF01$bf, tolerance = 0.01 ) expect_equal( post1.curr, expected = post1, tolerance = 0.01 ) expect_equal( post2.curr, expected = post2, tolerance = 0.01 ) } }) bridgesampling/tests/testthat/test-bf.R0000644000176200001440000000473513663004467017754 0ustar liggesusers context('bridge sampling bf function') test_that("bf various basic checks", { # library(bridgesampling) library(mvtnorm) x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) colnames(x) <- c("x1", "x2") log_density <- function(s, data) { -.5*t(s)%*%s } lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) # repetitions = 1 bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE) expect_error(bf(bridge_normal, 4), "class 'bridge' or 'bridge_list'") BF <- bf(bridge_normal, bridge_warp3) log_BF <- bf(bridge_normal, bridge_warp3, log = TRUE) expect_output(print(BF), "Estimated Bayes factor") expect_output(print(log_BF), "Estimated log Bayes factor") BF2 <- bayes_factor(bridge_normal, bridge_warp3) log_BF2 <- bayes_factor(bridge_normal, bridge_warp3, log = TRUE) expect_output(print(BF2), "Estimated Bayes factor") expect_output(print(log_BF2), "Estimated log Bayes factor") # repetitions > 1 bridge_normal_mult <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3_mult <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE, repetitions = 2) BF_mult <- bf(bridge_normal_mult, bridge_warp3_mult) log_BF_mult <- bf(bridge_normal_mult, bridge_warp3_mult, log = TRUE) expect_output(print(BF_mult), "based on medians") expect_output(print(log_BF_mult), "based on medians") ## bf with multi and singular objects expect_is(suppressWarnings(bf(bridge_normal_mult, bridge_normal)), "bf_bridge_list") expect_is(bf(bridge_normal, bridge_normal_mult), "bf_bridge") expect_error(bf(bridge_normal_mult, 4), "class 'bridge' or 'bridge_list'") # default BF <- bf(1, 2) log_BF <- bf(1, 2, log = TRUE) expect_output(print(BF), "Bayes factor") expect_output(print(log_BF), "Log Bayes factor") }) bridgesampling/tests/testthat/test-post_prob.R0000644000176200001440000000337013663004467021366 0ustar liggesusers context('post_prob with lists') test_that("post_prob works with lists and with NAs.", { bridge_o <- structure(list(logml = c(4291.14352476047, 4293.29076119542, 4291.96372581169, 4293.02187182362, NA, NA, 4290.9761730488, 4293.32075269401, 4293.5762219227, 4294.02761288449), niter = c(104, 16, 52, 8, 1000, 1000, 167, 16, 21, 44), method = "normal", repetitions = 10), .Names = c("logml", "niter", "method", "repetitions"), class = "bridge_list") H0L <- structure(list(logml = c(-20.8088381186739, -20.8072772698116, -20.808454454621, -20.8083419072281, -20.8087870541247, -20.8084887398113, -20.8086023582344, -20.8079083169745, -20.8083048489095, -20.8090050811436 ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 4, 4), method = "normal", repetitions = 10), .Names = c("logml", "niter", "method", "repetitions"), class = "bridge_list") H1L <- structure(list(logml = c(-17.961665507006, -17.9611290723151, -17.9607509604499, -17.9608629535992, -17.9602093576442, -17.9600223300432, -17.9610157118017, -17.9615557696561, -17.9608437034849, -17.9606743200309 ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 3, 4), method = "normal", repetitions = 10), .Names = c("logml", "niter", "method", "repetitions"), class = "bridge_list") H0 <- structure(list(logml = -20.8084543022433, niter = 4, method = "normal"), .Names = c("logml", "niter", "method"), class = "bridge") expect_is(post_prob(H1L, H0L), "matrix") expect_warning(post_prob(H1L, H0L, H0), "recycled") expect_warning(post_prob(H1L, H0L, 4), "ignored") expect_warning(post_prob(H0, H0L, 4), "ignored") expect_warning(post_prob(H1L, H0L, bridge_o), "NA") expect_error(post_prob(H1L, 4, 5, 6), "one object") expect_error(post_prob(H0, 4, 5, 6), "one object") }) bridgesampling/tests/testthat/test-bridge_sampler_summary_method.R0000644000176200001440000000535713663004467025462 0ustar liggesusers context('bridge sampling summary method') test_that("bridge sampler summary method correctly displayed", { # library(bridgesampling) library(mvtnorm) x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) colnames(x) <- c("x1", "x2") log_density <- function(s, data) { -.5*t(s)%*%s } lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) # repetitions = 1 bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE) s_normal <- summary(bridge_normal) s_warp3 <- summary(bridge_warp3) expect_equal(names(s_normal), c("Logml_Estimate", "Relative_Mean_Squared_Error", "Coefficient_of_Variation", "Percentage_Error", "Method", "Repetitions")) expect_equal(names(s_warp3), c("Logml_Estimate", "Method", "Repetitions")) expect_output(print(s_normal), 'All error measures are approximate.') expect_output(print(s_warp3), 'No error measures are available for method = "warp3"') # repetitions > 1 bridge_normal_2 <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3_2 <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE, repetitions = 2) s_normal_2 <- summary(bridge_normal_2) s_warp3_2 <- summary(bridge_warp3_2) expect_equal(names(s_normal_2), c("Logml_Estimate", "Min", "Max", "Interquartile_Range", "Method", "Repetitions")) expect_equal(names(s_warp3_2), c("Logml_Estimate", "Min", "Max", "Interquartile_Range", "Method", "Repetitions")) expect_output(print(s_normal_2), 'All error measures are based on 2 estimates.') expect_output(print(s_warp3_2), 'All error measures are based on 2 estimates.') }) bridgesampling/tests/testthat/test-bridge_sampler_print_method.R0000644000176200001440000000405013663004467025106 0ustar liggesusers context('bridge sampling print method') test_that("bridge sampler print method correctly displayed", { # library(bridgesampling) library(mvtnorm) x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) colnames(x) <- c("x1", "x2") log_density <- function(s, data) { -.5*t(s)%*%s } lb <- rep(-Inf, 2) ub <- rep(Inf, 2) names(lb) <- names(ub) <- colnames(x) # repetitions = 1 bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE) bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE) expect_output(print(bridge_normal), "Bridge sampling estimate of the log marginal likelihood") expect_output(print(bridge_warp3), "Bridge sampling estimate of the log marginal likelihood") # repetitions > 1 bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, data = NULL, lb = lb, ub = ub, method = "warp3", silent = TRUE, repetitions = 2) expect_output(print(bridge_normal), "Median of") expect_output(print(bridge_warp3), "Median of") }) test_that("prints with NAs with warning.", { bridge_o <- structure(list(logml = c(4291.14352476047, 4293.29076119542, 4291.96372581169, 4293.02187182362, NA, NA, 4290.9761730488, 4293.32075269401, 4293.5762219227, 4294.02761288449), niter = c(104, 16, 52, 8, 1000, 1000, 167, 16, 21, 44), method = "normal", repetitions = 10), .Names = c("logml", "niter", "method", "repetitions"), class = "bridge_list") expect_warning(print(bridge_o), "NA") }) bridgesampling/tests/testthat/test-stan_bridge_sampler_bugs.R0000644000176200001440000001366614026163110024375 0ustar liggesusers context('Stan Bridge Sampler Bugs') test_that("subscript out of bounds error", { ## https://github.com/quentingronau/bridgesampling/issues/26 stan_mod = " data{ int M; int J; int T; int E; int G; int N[G]; int ii[M]; int jj[M]; int gg[M]; int g_all[sum(N)]; int y[M]; matrix[J,J] obs_corr[G]; } transformed data{ int N_all = sum(N); } parameters{ ordered[T] thresholds_raw[G,J]; matrix[E,J] lam[G]; matrix[N_all,E] eta; matrix[N_all,J] ystar_raw; } transformed parameters { ordered[T] thresholds[G,J]; for(g in 1:G) for(j in 1:J) thresholds[g,j] = thresholds_raw[g,j] * 5; } model{ matrix[N_all,J] ystar; int pos = 1; target += std_normal_lpdf(to_vector(ystar_raw)); target += std_normal_lpdf(to_vector(eta)); for(g in 1:G){ int g_ids[N[g]] = segment(g_all,pos,N[g]); target += normal_lpdf(to_vector(eta)| 0,5); for(j in 1:J) target += std_normal_lpdf(thresholds_raw[g,j]); ystar[g_ids,] = eta[g_ids,] * lam[g] + ystar_raw[g_ids,]; pos += N[g]; } for(m in 1:M) target += ordered_logistic_lpmf(y[m] | ystar[ii[m],jj[m]], thresholds[gg[m],jj[m]]); } " testthat::skip_on_cran() testthat::skip_on_travis() testthat::skip_if_not_installed("rstan") library("rstan") # source("tests/testthat/test_dat.txt") source("test_dat.txt") suppressWarnings( mod <- stan(model_code=stan_mod,data=test_dat, chains = 2, refresh = 0) ) expect_warning(object = bridge_sampler(mod, silent=TRUE), regexp = "Infinite value in iterative scheme, returning NA.") }) test_that("bridge_sampler.stanfit multicore works for one-parameter model.", { skip_on_cran() skip_on_travis() skip_on_os("windows") if (require(rstan)) { set.seed(12345) # compute difference scores n <- 10 y <- rnorm(n) # models stancodeH0 <- ' data { int n; // number of observations vector[n] y; // observations } parameters { real sigma2; // variance parameter } model { target += log(1/sigma2); // Jeffreys prior on sigma2 target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood } ' # compile models suppressWarnings( stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") ) # fit models stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n), iter = 10000, warmup = 1000, chains = 4, control = list(adapt_delta = 0.95), refresh = 0) ######### bridge sampling ########### suppressWarnings(H0 <- bridge_sampler(stanfitH0, cores = 2, silent = TRUE)) expect_s3_class(H0, "bridge") } }) test_that("turtle example",{ skip_on_cran() skip_on_travis() if (require(rstan)) { data("turtles") ### m1 (model with random intercepts) ### m1_code_nc <- "data { int nobs; int y[nobs]; real x[nobs]; int m; int clutch[nobs]; } parameters { real alpha0_raw; real alpha1_raw; vector[m] b_raw; real sigma2; } transformed parameters { vector[m] b; real sigma = sqrt(sigma2); real alpha0 = sqrt(10.0)*alpha0_raw; real alpha1 = sqrt(10.0)*alpha1_raw; b = b_raw*sigma; } model { // priors target += -2*log(1 + sigma2); // p(sigma2) = 1/(1 + sigma2)^2 target += normal_lpdf(alpha0_raw | 0, 1); target += normal_lpdf(alpha1_raw | 0, 1); // random effects target += normal_lpdf(b_raw | 0, 1); // likelihood for (i in 1:nobs) target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1*x[i] + b[clutch[i]])); }" suppressWarnings( stanobject_m1_nc <- stan(model_code = m1_code_nc, data = list(y = turtles$y, x = turtles$x, nobs = nrow(turtles), m = max(turtles$clutch), clutch = turtles$clutch), iter = 10500, warmup = 500, chains = 4, refresh = 0) ) bs_m1_nc <- bridge_sampler(stanobject_m1_nc, method = "warp3", repetitions = 25, silent=TRUE) m0_code_nc <- "data { int nobs; int y[nobs]; real x[nobs]; } parameters { real alpha0_raw; real alpha1_raw; } transformed parameters { real alpha0 = sqrt(10.0)*alpha0_raw; real alpha1 = sqrt(10.0)*alpha1_raw; } model { // priors target += normal_lpdf(alpha0_raw | 0, 1); target += normal_lpdf(alpha1_raw | 0, 1); // likelihood for (i in 1:nobs) target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1*x[i])); }" suppressWarnings( stanobject_m0_nc <- stan(model_code = m0_code_nc, data = list(y = turtles$y, x = turtles$x, nobs = nrow(turtles), m = max(turtles$clutch), clutch = turtles$clucth), iter = 10500, warmup = 500, chains = 4, refresh = 0) ) bs_m0_nc <- bridge_sampler(stanobject_m0_nc, method = "warp3", repetitions = 25, silent=TRUE) expect_equal(bf(bs_m0_nc, bs_m1_nc)$bf, rep(1.27, 25), tolerance = 0.02) } }) bridgesampling/tests/testthat/test-vignette_stan_ttest.R0000644000176200001440000001060213663004467023450 0ustar liggesusers context('test vignette bridgesampling_stan_ttest.Rmd') test_that("bridge sampler yields correct results", { testthat::skip_on_cran() testthat::skip_on_travis() # library(bridgesampling) if (require(rstan) && require(BayesFactor)) { set.seed(12345) # Sleep data from t.test example data(sleep) # compute difference scores y <- sleep$extra[sleep$group == 2] - sleep$extra[sleep$group == 1] n <- length(y) # models stancodeH0 <- ' data { int n; // number of observations vector[n] y; // observations } parameters { real sigma2; // variance parameter } model { target += log(1/sigma2); // Jeffreys prior on sigma2 target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood } ' stancodeH1 <- ' data { int n; // number of observations vector[n] y; // observations real r; // Cauchy prior scale } parameters { real delta; real sigma2;// variance parameter } model { target += cauchy_lpdf(delta | 0, r); // Cauchy prior on delta target += log(1/sigma2); // Jeffreys prior on sigma2 target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood } ' # compile models stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") # fit models stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n), iter = 20000, warmup = 1000, chains = 4, cores = 1, control = list(adapt_delta = .99)) stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, r = 1/sqrt(2)), iter = 20000, warmup = 1000, chains = 4, cores = 1, control = list(adapt_delta = .99)) set.seed(12345) suppressWarnings(H0 <- bridge_sampler(stanfitH0, silent = TRUE)) H1 <- bridge_sampler(stanfitH1, silent = TRUE) # compute percentage errors H0.error <- error_measures(H0)$percentage H1.error <- error_measures(H1)$percentage # compute Bayes factor BF10 <- bf(H1, H0) # BayesFactor result BF10.BayesFactor <- extractBF(ttestBF(y), onlybf = TRUE, logbf = FALSE) # one-sided test stancodeHplus <- ' data { int n; // number of observations vector[n] y; // observations real r; // Cauchy prior scale } parameters { real delta; // constrained to be positive real sigma2;// variance parameter } model { target += cauchy_lpdf(delta | 0, r) - cauchy_lccdf(0 | 0, r); // Cauchy prior on delta target += log(1/sigma2); // Jeffreys prior on sigma2 target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood } ' # compile and fit model stanmodelHplus <- stan_model(model_code = stancodeHplus, model_name="stanmodel") stanfitHplus <- sampling(stanmodelHplus, data = list(y = y, n = n, r = 1/sqrt(2)), iter = 30000, warmup = 1000, chains = 4, control = list(adapt_delta = .99)) Hplus <- bridge_sampler(stanfitHplus, silent = TRUE) Hplus.error <- error_measures(Hplus)$percentage # compute Bayes factor BFplus0 <- bf(Hplus, H0) BFplus0.BayesFactor <- extractBF(ttestBF(y, nullInterval = c(0, Inf)), onlybf = TRUE, logbf = FALSE)[1] H0.curr <- H0 H1.curr <- H1 Hplus.curr <- Hplus BF10.curr <- BF10 BFplus0.curr <- BFplus0 load(system.file("extdata/", "vignette_stan_ttest.RData", package = "bridgesampling")) expect_equal( H0.curr$logml, expected = H0$logml, tolerance = 0.01 ) expect_equal( H1.curr$logml, expected = H1$logml, tolerance = 0.01 ) expect_equal( BF10.curr$bf, expected = BF10$bf, tolerance = 0.01 ) expect_equal( BF10.curr$bf, expected = BF10.BayesFactor, tolerance = 0.03 ) expect_equal( BFplus0.curr$bf, expected = BFplus0$bf, tolerance = 0.01 ) expect_equal( BFplus0.curr$bf, expected = BFplus0.BayesFactor, tolerance = 0.03 ) } }) bridgesampling/tests/testthat/test-nimble_bridge_sampler.R0000644000176200001440000000335313663004467023665 0ustar liggesusers context('bridge_sampler.nimble works.') test_that("nimble support works", { testthat::skip_on_cran() testthat::skip_on_travis() testthat::skip_if_not_installed("nimble") if (require(nimble)) { set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) # create model codeH1 <- nimbleCode({ mu ~ dnorm(0, sd = 1) invTau2 ~ dgamma(1, 1) tau2 <- 1/invTau2 for (i in 1:20) { theta[i] ~ dnorm(mu, sd = sqrt(tau2)) y[i] ~ dnorm(theta[i], sd = 1) } }) modelH1 <- nimbleModel(codeH1) modelH1$setData(y = y) # set data # make compiled version from generated C++ cmodelH1 <- compileNimble(modelH1) # build an MCMC, skipping customization of the configuration. mcmcH1 <- buildMCMC(modelH1, monitors = modelH1$getNodeNames(stochOnly = TRUE, includeData = FALSE)) # compile the MCMC via generated C++ cmcmcH1 <- compileNimble(mcmcH1, project = modelH1) # run the MCMC. This is a wrapper for cmcmc$run() and extraction of samples. # the object samplesH1 is actually not needed as the samples are also in cmcmcH1 samplesH1 <- runMCMC(cmcmcH1, niter = 1e5, nburnin = 1000, nchains = 2, progressBar = FALSE) # bridge sampling bridge_H1 <- bridge_sampler(samples = cmcmcH1, cores = 1, method = "warp3", repetitions = 2) expect_equal(bridge_H1$logml, rep(-37.7983064265064, 2), tolerance = 0.01) } }) bridgesampling/tests/testthat/test-bridge_sampler_mcmc.list.R0000644000176200001440000003140113663004467024303 0ustar liggesusers context('test bridge_sampler mcmc.list method') test_that("bridge sampler matches analytical value", { testthat::skip_on_cran() testthat::skip_on_travis() # library(bridgesampling) if (require(R2jags) && require(runjags)) { ### generate data ### set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ### set prior parameters ### mu0 <- 0 tau20 <- 1 alpha <- 1 beta <- 1 ### function to get posterior samples ### # H1: mu != 0 getSamplesModelH1 <- function(data, niter = 12000, nburnin = 2000, nchains = 3) { model <- " model { for (i in 1:n) { theta[i] ~ dnorm(mu, invTau2) y[i] ~ dnorm(theta[i], 1/sigma2) } mu ~ dnorm(mu0, 1/tau20) invTau2 ~ dgamma(alpha, beta) tau2 <- 1/invTau2 }" s <- jags(data, parameters.to.save = c("theta", "mu", "invTau2"), model.file = textConnection(model), n.chains = nchains, n.iter = niter, n.burnin = nburnin, n.thin = 1, progress.bar = "none") return(s) } getSamplesModelH1_runjags <- function(data, niter = 12000, nburnin = 2000, nchains = 3) { model <- " model { for (i in 1:n) { theta[i] ~ dnorm(mu, invTau2) y[i] ~ dnorm(theta[i], 1/sigma2) } mu ~ dnorm(mu0, 1/tau20) invTau2 ~ dgamma(alpha, beta) tau2 <- 1/invTau2 }" s <- suppressWarnings(runjags::run.jags(model = model, data = data, monitor = c("theta", "mu", "invTau2"), n.chains = 3, burnin = 2000, sample = 10000, silent.jags = TRUE)) return(s) } ### get posterior samples ### # create data list for Jags data_H1 <- list(y = y, n = length(y), mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta, sigma2 = sigma2) # fit model samples_H1 <- getSamplesModelH1(data_H1) samples_runjags <- getSamplesModelH1_runjags(data_H1) ### function for evaluating the unnormalized posterior on log scale ### log_posterior_H1 <- function(samples.row, data) { mu <- samples.row[[ "mu" ]] invTau2 <- samples.row[[ "invTau2" ]] theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + dnorm(mu, data$mu0, sqrt(data$tau20), log = TRUE) + dgamma(invTau2, data$alpha, data$beta, log = TRUE) } # specify parameter bounds cn <- colnames(samples_H1$BUGSoutput$sims.matrix) lb_H1 <- rep(-Inf, length(cn) - 1) ub_H1 <- rep(Inf, length(cn) - 1) names(lb_H1) <- names(ub_H1) <- cn[cn != "deviance"] lb_H1[[ "invTau2" ]] <- 0 samples1 <- coda::as.mcmc(samples_H1) samples1 <- samples1[,cn != "deviance"] # mcmc.list bridge_normal <- bridge_sampler(samples = samples1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3 <- bridge_sampler(samples = samples1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "warp3", silent = TRUE, repetitions = 2) bridge_normal_m <- bridge_sampler(samples = samples1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "normal", silent = TRUE, repetitions = 2, cores = 2) bridge_warp3_m <- bridge_sampler(samples = samples1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "warp3", silent = TRUE, repetitions = 2, cores = 2) # mcmc bridge_normal_s <- bridge_sampler(samples = samples1[[1]], log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3_s <- bridge_sampler(samples = samples1[[1]], log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "warp3", silent = TRUE, repetitions = 2) bridge_normal_m_s <- bridge_sampler(samples = samples1[[1]], log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "normal", silent = TRUE, repetitions = 2, cores = 2) bridge_warp3_m_s <- bridge_sampler(samples = samples1[[1]], log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "warp3", silent = TRUE, repetitions = 2, cores = 2) # rjags bridge_normal_j <- bridge_sampler(samples = samples_H1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3_j <- bridge_sampler(samples = samples_H1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "warp3", silent = TRUE, repetitions = 2) bridge_normal_jm <- bridge_sampler(samples = samples_H1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "normal", silent = TRUE, repetitions = 2, cores = 2) bridge_warp3_jm <- bridge_sampler(samples = samples_H1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "warp3", silent = TRUE, repetitions = 2, cores = 2) # runjags bridge_normal_r <- bridge_sampler(samples = samples_runjags, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "normal", silent = TRUE, repetitions = 2) bridge_warp3_r <- bridge_sampler(samples = samples_runjags, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "warp3", silent = TRUE, repetitions = 2) bridge_normal_rm <- bridge_sampler(samples = samples_runjags, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "normal", silent = TRUE, repetitions = 2, cores = 2) bridge_warp3_rm <- bridge_sampler(samples = samples_runjags, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H1, method = "warp3", silent = TRUE, repetitions = 2, cores = 2) # "exact" ml mH1 <- function(data, rel.tol = 1e-10) { y <- data$y n <- data$n mu0 <- data$mu0 tau20 <- data$tau20 alpha <- data$alpha beta <- data$beta sigma2 <- data$sigma2 mH1integrand <- function(tau2, y, sigma2, mu0, tau20, alpha, beta) { (sigma2 + tau2)^(-n/2) * exp(-1/2 * ((n*mean(y)^2 + (n - 1)*sd(y)^2)/(sigma2 + tau2) + mu0^2/tau20 - ((n*mean(y))/(sigma2 + tau2) + mu0/tau20)^2 / (n/(sigma2 + tau2) + 1/tau20))) * (n/(sigma2 + tau2) + 1/tau20)^(-1/2) * tau2^(-alpha - 1) * exp(-beta/tau2) } (2*pi)^(-n/2) * (tau20)^(-1/2) * beta^alpha/gamma(alpha) * integrate(mH1integrand, 0, Inf, rel.tol = rel.tol, y = y, sigma2 = sigma2, mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta)$value } exact_logml <- log(mH1(data_H1)) expect_equal(class(samples1), expected = "mcmc.list") expect_equal( bridge_normal$logml, expected = rep(exact_logml, length(bridge_normal$logml)), tolerance = 0.01 ) expect_equal( bridge_warp3$logml, expected = rep(exact_logml, length(bridge_warp3$logml)), tolerance = 0.01 ) expect_equal( bridge_normal_m$logml, expected = rep(exact_logml, length(bridge_normal_m$logml)), tolerance = 0.01 ) expect_equal( bridge_warp3_m$logml, expected = rep(exact_logml, length(bridge_warp3_m$logml)), tolerance = 0.01 ) expect_equal(class(samples1[[1]]), expected = "mcmc") expect_equal( bridge_normal_s$logml, expected = rep(exact_logml, length(bridge_normal_s$logml)), tolerance = 0.01 ) expect_equal( bridge_warp3_s$logml, expected = rep(exact_logml, length(bridge_warp3_s$logml)), tolerance = 0.01 ) expect_equal( bridge_normal_m_s$logml, expected = rep(exact_logml, length(bridge_normal_m_s$logml)), tolerance = 0.01 ) expect_equal( bridge_warp3_m_s$logml, expected = rep(exact_logml, length(bridge_warp3_m_s$logml)), tolerance = 0.01 ) expect_equal(class(samples_H1), expected = "rjags") expect_equal( bridge_normal_j$logml, expected = rep(exact_logml, length(bridge_normal_j$logml)), tolerance = 0.01 ) expect_equal( bridge_warp3_j$logml, expected = rep(exact_logml, length(bridge_warp3_j$logml)), tolerance = 0.01 ) expect_equal( bridge_normal_jm$logml, expected = rep(exact_logml, length(bridge_normal_jm$logml)), tolerance = 0.01 ) expect_equal( bridge_warp3_jm$logml, expected = rep(exact_logml, length(bridge_warp3_jm$logml)), tolerance = 0.01 ) expect_equal(class(samples_runjags), expected = "runjags") expect_equal( bridge_normal_r$logml, expected = rep(exact_logml, length(bridge_normal_r$logml)), tolerance = 0.01 ) expect_equal( bridge_warp3_r$logml, expected = rep(exact_logml, length(bridge_warp3_r$logml)), tolerance = 0.01 ) expect_equal( bridge_normal_rm$logml, expected = rep(exact_logml, length(bridge_normal_rm$logml)), tolerance = 0.01 ) expect_equal( bridge_warp3_rm$logml, expected = rep(exact_logml, length(bridge_warp3_rm$logml)), tolerance = 0.01 ) ### check that wrong lb and ub produce errors: ub_H0 <- ub_H1[-2] lb_H0 <- lb_H1[-1] expect_error( bridge_sampler( samples = samples_runjags, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H0 ), "ub does not contain all parameters" ) expect_error( bridge_sampler( samples = samples_runjags, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H0, ub = ub_H1 ), "lb does not contain all parameters" ) expect_error( bridge_sampler( samples = samples1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H0 ), "ub does not contain all parameters" ) expect_error( bridge_sampler( samples = samples1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H0, ub = ub_H1 ), "lb does not contain all parameters" ) expect_error( bridge_sampler( samples = samples_H1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H0, ub = ub_H1 ), "lb does not contain all parameters" ) expect_error( bridge_sampler( samples = samples_H1, log_posterior = log_posterior_H1, data = data_H1, lb = lb_H1, ub = ub_H0 ), "ub does not contain all parameters" ) } }) bridgesampling/tests/testthat.R0000644000176200001440000000011713663004467016376 0ustar liggesusersSys.setenv("R_TESTS" = "") library(testthat) test_check("bridgesampling") bridgesampling/vignettes/0000755000176200001440000000000014036106021015242 5ustar liggesusersbridgesampling/vignettes/bridgesampling_paper_extended.pdf.asis0000644000176200001440000000020313663004467024744 0ustar liggesusers%\VignetteIndexEntry{bridgesampling: An R Package for Estimating Normalizing Constants (Extended)} %\VignetteEngine{R.rsp::asis} bridgesampling/vignettes/bridgesampling_paper.pdf.asis0000644000176200001440000000020613663004467023067 0ustar liggesusers%\VignetteIndexEntry{bridgesampling: An R Package for Estimating Normalizing Constants (JSS version)} %\VignetteEngine{R.rsp::asis} bridgesampling/vignettes/bridgesampling_example_jags.Rmd0000644000176200001440000002533413663004467023443 0ustar liggesusers--- title: "Hierarchical Normal Example (JAGS)" author: "Quentin F. Gronau" date: "`r Sys.Date()`" show_toc: true output: knitr:::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Hierarchical Normal Example JAGS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- In this vignette, we explain how one can compute marginal likelihoods, Bayes factors, and posterior model probabilities using a simple hierarchical normal model implemented in `JAGS`. This vignette uses the same models and data as the [`Stan` vignette](bridgesampling_example_stan.html). ## Model and Data The model that we will use assumes that each of the $n$ observations $y_i$ (where $i$ indexes the observation, $i = 1,2,...,n$) is normally distributed with corresponding mean $\theta_i$ and a common known variance $\sigma^2$: $y_i \sim \mathcal{N}(\theta_i, \sigma^2)$. Each $\theta_i$ is drawn from a normal group-level distribution with mean $\mu$ and variance $\tau^2$: $\theta_i \sim \mathcal{N}(\mu, \tau^2)$. For the group-level mean $\mu$, we use a normal prior distribution of the form $\mathcal{N}(\mu_0, \tau^2_0)$. For the group-level variance $\tau^2$, we use an inverse-gamma prior of the form $\text{Inv-Gamma}(\alpha, \beta)$. We will use `JAGS` to fit the model which parametrizes the normal distribution in terms of the precision (i.e., one over the variance). Consequently, we implement this inverse-gamma prior on $\tau^2$ by placing a gamma prior of the form $\text{Gamma}(\alpha, \beta)$ on the precision; we call this precision parameter `invTau2` in the code. In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the group-level mean $\mu = 0$, to the alternative model $\mathcal{H}_1$, which allows $\mu$ to be different from zero. First, we generate some data from the null model: ```{r} library(bridgesampling) ### generate data ### set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ``` Next, we specify the prior parameters $\mu_0$, $\tau^2_0$, $\alpha$, and $\beta$: ```{r,eval=FALSE} ### set prior parameters ### mu0 <- 0 tau20 <- 1 alpha <- 1 beta <- 1 ``` ## Fitting the Models Now we can fit the null and the alternative model in `JAGS` (note that it is necessary to install `JAGS` for this). One usually requires a larger number of posterior sample for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples (i.e., 50,000 post burn-in samples per chain) for this comparatively simple model. ```{r, eval=FALSE} library(R2jags) ### functions to get posterior samples ### # H0: mu = 0 getSamplesModelH0 <- function(data, niter = 52000, nburnin = 2000, nchains = 3) { model <- " model { for (i in 1:n) { theta[i] ~ dnorm(0, invTau2) y[i] ~ dnorm(theta[i], 1/sigma2) } invTau2 ~ dgamma(alpha, beta) tau2 <- 1/invTau2 }" s <- jags(data, parameters.to.save = c("theta", "invTau2"), model.file = textConnection(model), n.chains = nchains, n.iter = niter, n.burnin = nburnin, n.thin = 1) return(s) } # H1: mu != 0 getSamplesModelH1 <- function(data, niter = 52000, nburnin = 2000, nchains = 3) { model <- " model { for (i in 1:n) { theta[i] ~ dnorm(mu, invTau2) y[i] ~ dnorm(theta[i], 1/sigma2) } mu ~ dnorm(mu0, 1/tau20) invTau2 ~ dgamma(alpha, beta) tau2 <- 1/invTau2 }" s <- jags(data, parameters.to.save = c("theta", "mu", "invTau2"), model.file = textConnection(model), n.chains = nchains, n.iter = niter, n.burnin = nburnin, n.thin = 1) return(s) } ### get posterior samples ### # create data lists for JAGS data_H0 <- list(y = y, n = length(y), alpha = alpha, beta = beta, sigma2 = sigma2) data_H1 <- list(y = y, n = length(y), mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta, sigma2 = sigma2) # fit models samples_H0 <- getSamplesModelH0(data_H0) samples_H1 <- getSamplesModelH1(data_H1) ``` ## Specifying the Unnormalized Log Posterior Function The next step is to write the corresponding `log_posterior` (i.e., unnormalized posterior) function for both models. This function takes one draw from the joint posterior and the data object as input and returns the log of the unnormalized joint posterior density. When using MCMC software such as `JAGS` or `Stan`, specifying this function is relatively simple. As a rule of thumb, one only needs to look for all places where a "`~`" sign appears in the model code. The log of the densities on the right-hand side of these "`~`" symbols needs to be evaluated for the relevant quantities and then these log densities values are summed. For example, in the null model, there are three "`~`" signs. Starting at the data-level, we need to evaluate the log of the normal density with mean $\theta_i$ and variance $\sigma^2$ for all $y_i$ and then sum the resulting log density values. Next, we move one step up in the model and evaluate the log of the group-level density for all $\theta_i$. Hence, we evaluate the log of the normal density for $\theta_i$ with mean $\mu = 0$ and variance $\tau^2$ (remember that `JAGS` parametrizes the normal distribution in terms of the precision `invTau2` = $1/\tau^2$; in contrast, `R` parametrizes it in terms of the standard deviation) and sum the resulting log density values. The result of this summation is added to the result of the previous summation for the data-level normal distribution. Finally, we need to evaluate the log of the prior density for `invTau2`. This means that we compute the log density of the gamma distribution with parameters $\alpha$ and $\beta$ for the sampled `invTau2` value and add the resulting log density value to the result of summing the data-level and group-level log densities. The unnormalized log posterior for the alternative model can be obtained in a similar fashion. The resulting functions look as follows: ```{r,eval=FALSE} ### functions for evaluating the unnormalized posteriors on log scale ### log_posterior_H0 <- function(samples.row, data) { mu <- 0 invTau2 <- samples.row[[ "invTau2" ]] theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + dgamma(invTau2, data$alpha, data$beta, log = TRUE) } log_posterior_H1 <- function(samples.row, data) { mu <- samples.row[[ "mu" ]] invTau2 <- samples.row[[ "invTau2" ]] theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + dnorm(mu, data$mu0, sqrt(data$tau20), log = TRUE) + dgamma(invTau2, data$alpha, data$beta, log = TRUE) } ``` ## Specifying the Parameter Bounds The final step before computing the log marginal likelihoods is to specify the parameter bounds. In this example, for both models, all parameters can range from $-\infty$ to $\infty$ except the precision `invTau2` which has a lower bound of zero. These boundary vectors need to be named and the names need to match the order of the parameters. ```{r,eval=FALSE} # specify parameter bounds H0 cn <- colnames(samples_H0$BUGSoutput$sims.matrix) cn <- cn[cn != "deviance"] lb_H0 <- rep(-Inf, length(cn)) ub_H0 <- rep(Inf, length(cn)) names(lb_H0) <- names(ub_H0) <- cn lb_H0[[ "invTau2" ]] <- 0 # specify parameter bounds H1 cn <- colnames(samples_H1$BUGSoutput$sims.matrix) cn <- cn[cn != "deviance"] lb_H1 <- rep(-Inf, length(cn)) ub_H1 <- rep(Inf, length(cn)) names(lb_H1) <- names(ub_H1) <- cn lb_H1[[ "invTau2" ]] <- 0 ``` Note that currently, the lower and upper bound of a parameter cannot be a function of the bounds of another parameter. Furthermore, constraints that depend on multiple parameters of the model are not supported. This excludes, for example, parameters that constitute a covariance matrix or sets of parameters that need to sum to one. ## Computing the (Log) Marginal Likelihoods Now we are ready to compute the log marginal likelihoods using the `bridge_sampler` function. We use `silent = TRUE` to suppress printing the number of iterations to the console: ```{r, echo=FALSE} load(system.file("extdata/", "vignette_example_jags.RData", package = "bridgesampling")) ``` ```{r,eval=FALSE} # compute log marginal likelihood via bridge sampling for H0 H0.bridge <- bridge_sampler(samples = samples_H0, data = data_H0, log_posterior = log_posterior_H0, lb = lb_H0, ub = ub_H0, silent = TRUE) # compute log marginal likelihood via bridge sampling for H1 H1.bridge <- bridge_sampler(samples = samples_H1, data = data_H1, log_posterior = log_posterior_H1, lb = lb_H1, ub = ub_H1, silent = TRUE) ``` We obtain: ```{r} print(H0.bridge) print(H1.bridge) ``` We can use the `error_measures` function to compute an approximate percentage error of the estimates: ```{r,eval=FALSE} # compute percentage errors H0.error <- error_measures(H0.bridge)$percentage H1.error <- error_measures(H1.bridge)$percentage ``` We obtain: ```{r} print(H0.error) print(H1.error) ``` ## Bayesian Model Comparison To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. In our case, we compute $\text{BF}_{01}$, that is, the Bayes factor which quantifies how much more likely the data are under the null versus the alternative model: ```{r} # compute Bayes factor BF01 <- bf(H0.bridge, H1.bridge) print(BF01) ``` In this case, the Bayes factor is close to one, indicating that there is not much evidence for either model. We can also compute posterior model probabilities by using the `post_prob` function: ```{r} # compute posterior model probabilities (assuming equal prior model probabilities) post1 <- post_prob(H0.bridge, H1.bridge) print(post1) ``` When the argument `prior_prob` is not specified, as is the case here, the prior model probabilities of all models under consideration are set equal (i.e., in this case with two models to 0.5). However, if we had prior knowledge about how likely both models are, we could use the `prior_prob` argument to specify different prior model probabilities: ```{r} # compute posterior model probabilities (using user-specified prior model probabilities) post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) print(post2) ``` bridgesampling/vignettes/bridgesampling_example_nimble.Rmd0000644000176200001440000001632713663004467023767 0ustar liggesusers--- title: "Hierarchical Normal Example (nimble)" author: "Quentin F. Gronau, Henrik Singmann & Perry de Valpine" date: "`r Sys.Date()`" show_toc: true output: knitr:::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Hierarchical Normal Example Nimble} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- In this vignette, we explain how one can compute marginal likelihoods, Bayes factors, and posterior model probabilities using a simple hierarchical normal model implemented in `nimble`. The [`nimble` documentation](https://r-nimble.org/html_manual/cha-welcome-nimble.html) provides a comprehensive overview. This vignette uses the same models and data as the [`Stan` vignette](bridgesampling_example_stan.html) and [`Jags` vignette](bridgesampling_example_jags.html). ## Model and Data The model that we will use assumes that each of the $n$ observations $y_i$ (where $i$ indexes the observation, $i = 1,2,...,n$) is normally distributed with corresponding mean $\theta_i$ and a common known variance $\sigma^2$: $y_i \sim \mathcal{N}(\theta_i, \sigma^2)$. Each $\theta_i$ is drawn from a normal group-level distribution with mean $\mu$ and variance $\tau^2$: $\theta_i \sim \mathcal{N}(\mu, \tau^2)$. For the group-level mean $\mu$, we use a normal prior distribution of the form $\mathcal{N}(\mu_0, \tau^2_0)$. For the group-level variance $\tau^2$, we use an inverse-gamma prior of the form $\text{Inv-Gamma}(\alpha, \beta)$. In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the group-level mean $\mu = 0$, to the alternative model $\mathcal{H}_1$, which allows $\mu$ to be different from zero. First, we generate some data from the null model: ```{r} library(bridgesampling) ### generate data ### set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ``` Next, we specify the prior parameters $\mu_0$, $\tau^2_0$, $\alpha$, and $\beta$: ```{r,eval=FALSE} ### set prior parameters ### mu0 <- 0 tau20 <- 1 alpha <- 1 beta <- 1 ``` ## Specifying the Models Next, we implement the models in `nimble`. This requires to first transform the code into a `nimbleModel`, then we need to set the data, and then we can compile the model. Given that `nimble` is build on BUGS, the similarity between the `nimble` code and the [`Jags` code](bridgesampling_example_jags.html) is not too surprising. ```{r, eval=FALSE} library("nimble") # models codeH0 <- nimbleCode({ invTau2 ~ dgamma(1, 1) tau2 <- 1/invTau2 for (i in 1:20) { theta[i] ~ dnorm(0, sd = sqrt(tau2)) y[i] ~ dnorm(theta[i], sd = 1) } }) codeH1 <- nimbleCode({ mu ~ dnorm(0, sd = 1) invTau2 ~ dgamma(1, 1) tau2 <- 1/invTau2 for (i in 1:20) { theta[i] ~ dnorm(mu, sd = sqrt(tau2)) y[i] ~ dnorm(theta[i], sd = 1) } }) ## steps for H0: modelH0 <- nimbleModel(codeH0) modelH0$setData(y = y) # set data cmodelH0 <- compileNimble(modelH0) # make compiled version from generated C++ ## steps for H1: modelH1 <- nimbleModel(codeH1) modelH1$setData(y = y) # set data cmodelH1 <- compileNimble(modelH1) # make compiled version from generated C++ ``` ## Fitting the Models Fitting a model with `nimble` requires one to first create an MCMC function from the (compiled or uncompiled) model. This function then needs to be compiled again. With this object we can then create the samples. Note that nimble uses a reference object semantic so we do not actually need the samples object, as the samples will be saved in the MCMC function objects. But as `runMCMC` returns them anyway, we nevertheless save them. One usually requires a larger number of posterior samples for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples for these simple models. ```{r, eval=FALSE} # build MCMC functions, skipping customization of the configuration. mcmcH0 <- buildMCMC(modelH0, monitors = modelH0$getNodeNames(stochOnly = TRUE, includeData = FALSE)) mcmcH1 <- buildMCMC(modelH1, monitors = modelH1$getNodeNames(stochOnly = TRUE, includeData = FALSE)) # compile the MCMC function via generated C++ cmcmcH0 <- compileNimble(mcmcH0, project = modelH0) cmcmcH1 <- compileNimble(mcmcH1, project = modelH1) # run the MCMC. This is a wrapper for cmcmc$run() and extraction of samples. # the object samplesH1 is actually not needed as the samples are also in cmcmcH1 samplesH0 <- runMCMC(cmcmcH0, niter = 1e5, nburnin = 1000, nchains = 2, progressBar = FALSE) samplesH1 <- runMCMC(cmcmcH1, niter = 1e5, nburnin = 1000, nchains = 2, progressBar = FALSE) ``` ## Computing the (Log) Marginal Likelihoods Computing the (log) marginal likelihoods via the `bridge_sampler` function is now easy: we only need to pass the compiled MCMC function objects (of class `"MCMC_refClass"`) which contain all information necessary. We use `silent = TRUE` to suppress printing the number of iterations to the console: ```{r, echo=FALSE} load(system.file("extdata/", "vignette_example_nimble.RData", package = "bridgesampling")) ``` ```{r,eval=FALSE} # compute log marginal likelihood via bridge sampling for H0 H0.bridge <- bridge_sampler(cmcmcH0, silent = TRUE) # compute log marginal likelihood via bridge sampling for H1 H1.bridge <- bridge_sampler(cmcmcH1, silent = TRUE) ``` We obtain: ```{r} print(H0.bridge) print(H1.bridge) ``` We can use the `error_measures` function to compute an approximate percentage error of the estimates: ```{r,eval=FALSE} # compute percentage errors H0.error <- error_measures(H0.bridge)$percentage H1.error <- error_measures(H1.bridge)$percentage ``` We obtain: ```{r} print(H0.error) print(H1.error) ``` ## Bayesian Model Comparison To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. In our case, we compute $\text{BF}_{01}$, that is, the Bayes factor which quantifies how much more likely the data are under the null versus the alternative model: ```{r} # compute Bayes factor BF01 <- bf(H0.bridge, H1.bridge) print(BF01) ``` In this case, the Bayes factor is close to one, indicating that there is not much evidence for either model. We can also compute posterior model probabilities by using the `post_prob` function: ```{r} # compute posterior model probabilities (assuming equal prior model probabilities) post1 <- post_prob(H0.bridge, H1.bridge) print(post1) ``` When the argument `prior_prob` is not specified, as is the case here, the prior model probabilities of all models under consideration are set equal (i.e., in this case with two models to 0.5). However, if we had prior knowledge about how likely both models are, we could use the `prior_prob` argument to specify different prior model probabilities: ```{r} # compute posterior model probabilities (using user-specified prior model probabilities) post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) print(post2) ``` bridgesampling/vignettes/bridgesampling_tutorial.pdf.asis0000644000176200001440000000012413663004467023622 0ustar liggesusers%\VignetteIndexEntry{A Tutorial on Bridge Sampling} %\VignetteEngine{R.rsp::asis} bridgesampling/vignettes/bridgesampling_stan_ttest.Rmd0000644000176200001440000002123713663004467023172 0ustar liggesusers--- title: "Bayesian One-Sample T-Test (Stan)" author: "Quentin F. Gronau" date: "`r Sys.Date()`" show_toc: true output: knitr:::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Bayesian One-Sample T-Test Stan} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- In this vignette, we explain how we can compute the (log) marginal likelihood and the Bayes factor for models fitted in `Stan`. This approach has the advantage that the user only needs to pass the fitted `stanfit` object which contains all information that is necessary to compute the (log) marginal likelihood. Here we show how one can conduct a Bayesian one-sample t-test as implemented in the `BayesFactor` package (Morey & Rouder, 2015). ## Model The Bayesian one-sample t-test makes the assumption that the observations are normally distributed with mean $\mu$ and variance $\sigma^2$. The model is then reparametrized in terms of the standardized effect size $\delta = \mu/\sigma$. For the standardized effect size, a Cauchy prior with location zero and scale $r = 1/\sqrt{2}$ is used. For the variance $\sigma^2$, Jeffreys's prior is used: $p(\sigma^2) \propto 1/\sigma^2$. In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the effect size $\delta$ is zero, to the alternative hypothesis $\mathcal{H}_1$, which assigns $\delta$ the above described Cauchy prior. ## Data In this example, we will analyze the `sleep` data set from the `t.test` example. This data set shows the effect of two soporific drugs (increase in hours of sleep compared to control) on 10 patients. These data can be analyzed via a one-sample t-test by first computing the difference scores and then conducting the t-test using these difference scores as data. The difference scores are calculated as follows: ```{r} library(bridgesampling) set.seed(12345) # Sleep data from t.test example data(sleep) # compute difference scores y <- sleep$extra[sleep$group == 2] - sleep$extra[sleep$group == 1] n <- length(y) ``` ## Specifying the Models Next, we implement the models in `Stan`. Note that to compute the (log) marginal likelihood for a `Stan` model, we need to specify the model in a certain way. Instad of using `"~"` signs for specifying distributions, we need to directly use the (log) density functions. The reason for this is that when using the `"~"` sign, constant terms are dropped which are not needed for sampling from the posterior. However, for computing the marginal likelihood, these constants need to be retained. For instance, instead of writing `y ~ normal(mu, sigma)` we would need to write `target += normal_lpdf(y | mu, sigma)`. The models can then be specified and compiled as follows (note that it is necessary to install `rstan` for this): ```{r, eval=FALSE} library(rstan) # models stancodeH0 <- ' data { int n; // number of observations vector[n] y; // observations } parameters { real sigma2; // variance parameter } model { target += log(1/sigma2); // Jeffreys prior on sigma2 target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood } ' stancodeH1 <- ' data { int n; // number of observations vector[n] y; // observations real r; // Cauchy prior scale } parameters { real delta; real sigma2;// variance parameter } model { target += cauchy_lpdf(delta | 0, r); // Cauchy prior on delta target += log(1/sigma2); // Jeffreys prior on sigma2 target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood } ' # compile models stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") ``` ## Fitting the Models Now we can fit the null and the alternative model in `Stan`. One usually requires a larger number of posterior samples for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples for these simple models. ```{r, eval=FALSE} # fit models stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n), iter = 20000, warmup = 1000, chains = 4, cores = 1, control = list(adapt_delta = .99)) stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, r = 1/sqrt(2)), iter = 20000, warmup = 1000, chains = 4, cores = 1, control = list(adapt_delta = .99)) ``` ## Computing the (Log) Marginal Likelihoods Computing the (log) marginal likelihoods via the `bridge_sampler` function is now easy: we only need to pass the `stanfit` objects which contain all information necessary. We use `silent = TRUE` to suppress printing the number of iterations to the console: ```{r, echo=FALSE} load(system.file("extdata/", "vignette_stan_ttest.RData", package = "bridgesampling")) ``` ```{r, eval=FALSE} H0 <- bridge_sampler(stanfitH0, silent = TRUE) H1 <- bridge_sampler(stanfitH1, silent = TRUE) ``` We obtain: ```{r} print(H0) print(H1) ``` We can use the `error_measures` function to compute an approximate percentage error of the estimates: ```{r,eval=FALSE} # compute percentage errors H0.error <- error_measures(H0)$percentage H1.error <- error_measures(H1)$percentage ``` We obtain: ```{r} print(H0.error) print(H1.error) ``` ## Computing the Bayes Factor To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. In our case, we compute $\text{BF}_{10}$, that is, the Bayes factor which quantifies how much more likely the data are under the alternative versus the null hypothesis: ```{r} # compute Bayes factor BF10 <- bf(H1, H0) print(BF10) ``` We can compare the bridge sampling result to the `BayesFactor` package result: ```{r, eval=FALSE} library(BayesFactor) BF10.BayesFactor <- extractBF(ttestBF(y), onlybf = TRUE) ``` We obtain: ```{r, message=FALSE} print(BF10.BayesFactor) ``` ## One-sided Test We can also conduct one-sided tests. For instance, we could test the hypothesis that the effect size is positive versus the null hypothesis. Since we already fitted the null model and computed its marginal likelihood, we only need to slightly adjust the alternative model to reflect the directed hypothesis. To achieve this, we need to truncate the Cauchy prior distribution for $\delta$ at zero and then renormalize the (log) density. This is easily achieved via the `Stan` function `cauchy_lccdf` which corresponds to the log of the complementary cumulative distribution function of the Cauchy distribution. Thus, `cauchy_lccdf(0 | 0, r)` gives us the log of the area greater than zero which is required for renormalizing the truncated Cauchy prior. The model can then be specified and fitted as follows: ```{r, eval=FALSE} stancodeHplus <- ' data { int n; // number of observations vector[n] y; // observations real r; // Cauchy prior scale } parameters { real delta; // constrained to be positive real sigma2;// variance parameter } model { target += cauchy_lpdf(delta | 0, r) - cauchy_lccdf(0 | 0, r); // Cauchy prior on delta target += log(1/sigma2); // Jeffreys prior on sigma2 target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood } ' # compile and fit model stanmodelHplus <- stan_model(model_code = stancodeHplus, model_name="stanmodel") stanfitHplus <- sampling(stanmodelHplus, data = list(y = y, n = n, r = 1/sqrt(2)), iter = 30000, warmup = 1000, chains = 4, control = list(adapt_delta = .99)) ``` The (log) marginal likelihood is then computed as follows: ```{r,eval=FALSE} Hplus <- bridge_sampler(stanfitHplus, silent = TRUE) ``` We obtain: ```{r} print(Hplus) ``` We can again use the `error_measures` function to compute an approximate percentage error of the estimate: ```{r,eval=FALSE} Hplus.error <- error_measures(Hplus)$percentage ``` We obtain: ```{r} print(Hplus.error) ``` The one-sided Bayes factor in favor of a positive effect versus the null hypothesis can be computed as follows: ```{r} # compute Bayes factor BFplus0 <- bf(Hplus, H0) print(BFplus0) ``` We can compare the bridge sampling result to the `BayesFactor` package result: ```{r, eval=FALSE} BFplus0.BayesFactor <- extractBF(ttestBF(y, nullInterval = c(0, Inf)), onlybf = TRUE)[1] ``` We obtain: ```{r} print(BFplus0.BayesFactor) ``` ## References Richard D. Morey and Jeffrey N. Rouder (2015). BayesFactor: Computation of Bayes Factors for Common Designs. R package version 0.9.12-2. \url{https://CRAN.R-project.org/package=BayesFactor} bridgesampling/vignettes/bridgesampling_example_stan.Rmd0000644000176200001440000001634113663004467023462 0ustar liggesusers--- title: "Hierarchical Normal Example (Stan)" author: "Quentin F. Gronau" date: "`r Sys.Date()`" show_toc: true output: knitr:::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Hierarchical Normal Example Stan} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- In this vignette, we explain how one can compute marginal likelihoods, Bayes factors, and posterior model probabilities using a simple hierarchical normal model implemented in `Stan`. This vignette uses the same models and data as the [`Jags` vignette](bridgesampling_example_jags.html). ## Model and Data The model that we will use assumes that each of the $n$ observations $y_i$ (where $i$ indexes the observation, $i = 1,2,...,n$) is normally distributed with corresponding mean $\theta_i$ and a common known variance $\sigma^2$: $y_i \sim \mathcal{N}(\theta_i, \sigma^2)$. Each $\theta_i$ is drawn from a normal group-level distribution with mean $\mu$ and variance $\tau^2$: $\theta_i \sim \mathcal{N}(\mu, \tau^2)$. For the group-level mean $\mu$, we use a normal prior distribution of the form $\mathcal{N}(\mu_0, \tau^2_0)$. For the group-level variance $\tau^2$, we use an inverse-gamma prior of the form $\text{Inv-Gamma}(\alpha, \beta)$. In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the group-level mean $\mu = 0$, to the alternative model $\mathcal{H}_1$, which allows $\mu$ to be different from zero. First, we generate some data from the null model: ```{r} library(bridgesampling) ### generate data ### set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ``` Next, we specify the prior parameters $\mu_0$, $\tau^2_0$, $\alpha$, and $\beta$: ```{r,eval=FALSE} ### set prior parameters ### mu0 <- 0 tau20 <- 1 alpha <- 1 beta <- 1 ``` ## Specifying the Models Next, we implement the models in `Stan`. Note that to compute the (log) marginal likelihood for a `Stan` model, we need to specify the model in a certain way. Instad of using `"~"` signs for specifying distributions, we need to directly use the (log) density functions. The reason for this is that when using the `"~"` sign, constant terms are dropped which are not needed for sampling from the posterior. However, for computing the marginal likelihood, these constants need to be retained. For instance, instead of writing `y ~ normal(mu, sigma)` we would need to write `target += normal_lpdf(y | mu, sigma)`. The models can then be specified and compiled as follows (note that it is necessary to install `rstan` for this): ```{r, eval=FALSE} library(rstan) # models stancodeH0 <- 'data { int n; // number of observations vector[n] y; // observations real alpha; real beta; real sigma2; } parameters { real tau2; // group-level variance vector[n] theta; // participant effects } model { target += inv_gamma_lpdf(tau2 | alpha, beta); target += normal_lpdf(theta | 0, sqrt(tau2)); target += normal_lpdf(y | theta, sqrt(sigma2)); } ' stancodeH1 <- 'data { int n; // number of observations vector[n] y; // observations real mu0; real tau20; real alpha; real beta; real sigma2; } parameters { real mu; real tau2; // group-level variance vector[n] theta; // participant effects } model { target += normal_lpdf(mu | mu0, sqrt(tau20)); target += inv_gamma_lpdf(tau2 | alpha, beta); target += normal_lpdf(theta | mu, sqrt(tau2)); target += normal_lpdf(y | theta, sqrt(sigma2)); } ' # compile models stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") ``` ## Fitting the Models Now we can fit the null and the alternative model in `Stan`. One usually requires a larger number of posterior samples for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples for these simple models. ```{r, eval=FALSE} # fit models stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n, alpha = alpha, beta = beta, sigma2 = sigma2), iter = 50000, warmup = 1000, chains = 3, cores = 1) stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta, sigma2 = sigma2), iter = 50000, warmup = 1000, chains = 3, cores = 1) ``` ## Computing the (Log) Marginal Likelihoods Computing the (log) marginal likelihoods via the `bridge_sampler` function is now easy: we only need to pass the `stanfit` objects which contain all information necessary. We use `silent = TRUE` to suppress printing the number of iterations to the console: ```{r, echo=FALSE} load(system.file("extdata/", "vignette_example_stan.RData", package = "bridgesampling")) ``` ```{r,eval=FALSE} # compute log marginal likelihood via bridge sampling for H0 H0.bridge <- bridge_sampler(stanfitH0, silent = TRUE) # compute log marginal likelihood via bridge sampling for H1 H1.bridge <- bridge_sampler(stanfitH1, silent = TRUE) ``` We obtain: ```{r} print(H0.bridge) print(H1.bridge) ``` We can use the `error_measures` function to compute an approximate percentage error of the estimates: ```{r,eval=FALSE} # compute percentage errors H0.error <- error_measures(H0.bridge)$percentage H1.error <- error_measures(H1.bridge)$percentage ``` We obtain: ```{r} print(H0.error) print(H1.error) ``` ## Bayesian Model Comparison To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. In our case, we compute $\text{BF}_{01}$, that is, the Bayes factor which quantifies how much more likely the data are under the null versus the alternative model: ```{r} # compute Bayes factor BF01 <- bf(H0.bridge, H1.bridge) print(BF01) ``` In this case, the Bayes factor is close to one, indicating that there is not much evidence for either model. We can also compute posterior model probabilities by using the `post_prob` function: ```{r} # compute posterior model probabilities (assuming equal prior model probabilities) post1 <- post_prob(H0.bridge, H1.bridge) print(post1) ``` When the argument `prior_prob` is not specified, as is the case here, the prior model probabilities of all models under consideration are set equal (i.e., in this case with two models to 0.5). However, if we had prior knowledge about how likely both models are, we could use the `prior_prob` argument to specify different prior model probabilities: ```{r} # compute posterior model probabilities (using user-specified prior model probabilities) post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) print(post2) ``` bridgesampling/NEWS0000644000176200001440000002135014026401035013734 0ustar liggesusers *********************************** ** bridgesampling VERSION 1.1-0 ** *********************************** Changes in bridgesampling Version 1.1-0 Released March 2021 Significant User Visible Changes and New Features o Bugfixes o Fixed subscript out of bounds error, see https://github.com/quentingronau/bridgesampling/issues/26 o Deactivated stan tests on Windows to avoid CRAN check issues. *********************************** ** bridgesampling VERSION 1.0-0 ** *********************************** Changes in bridgesampling Version 1.0-0 Released February 2020 Significant User Visible Changes and New Features o Included citation file and references to JSS article *********************************** ** bridgesampling VERSION 0.8-x ** *********************************** Changes in bridgesampling Version 0.8-x Released December 2019 Significant User Visible Changes and New Features o Disabled use of mvnfast and revetred back to mvtnorn. see also: https://github.com/quentingronau/bridgesampling/issues/20 Bugfixes o Version 0.7-x introduced a bug that prevented a rerunning of the iterative scheme based on harmonic mean in case maxit was reached. This bug should now be removed. See: https://github.com/quentingronau/bridgesampling/issues/18 *********************************** ** bridgesampling VERSION 0.7-x ** *********************************** Changes in bridgesampling Version 0.7-x Released June 2019 Significant User Visible Changes and New Features o Better error message in case all samples from proposal distribution evaluate to NA with log_posterior. Bugfixes o bridge_sampler more robust in case of extreme numerical values while running iterative scheme. Addresses #14. o Better error message in case samples are outside parameter bounds. *********************************** ** bridgesampling VERSION 0.6-x ** *********************************** Changes in bridgesampling Version 0.6-x Released October 2018 Significant User Visible Changes and New Features o Added nimble vignette (Hierarchical Normal Example) o Added accepted JSS version of introductory paper, but kept existing version as extended version. Bugfixes o R CMD check on the package tar.gz should now run without packages that are in suggests installed (if the corresponding environment variable is set). Also, all vignettes should compile without suggested packages (achieved by precalculating the results and then loading them). *********************************** ** bridgesampling VERSION 0.5-x ** *********************************** Changes in bridgesampling Version 0.5-x Released August 2018 Significant User Visible Changes and New Features o Added support for nimble objects (http://r-nimble.org/) via bridge_sampler.MCMC_refClass method. Thanks to Perry de Valpine for his help in creating this method. o The print methods for the bf() function now try to deparse the model names from the user input and use these names instead of x1 and x2. o Added support for simplex and circular parameters which can be specified using the new argument param_types of the bridge_sampler function (thanks to Kees Mulder) Bugfixes o *********************************** ** bridgesampling VERSION 0.4-x ** *********************************** Changes in bridgesampling Version 0.4-x Released December 2017 Significant User Visible Changes and New Features o More informative error messages for methods due to checking of input values: - bridge_sampler() methods now check lb and ub. - bf() methods check class of x2. - post_prob() checks if only one object of appropriate class is passed. o Promoted error_measures() to generic function with methods for both repetitions = 1 and repetitions > 1. In the latter case median and IQR are reported. The only situation where we can not report error measures is if repetitions = 1 and method = "warp3". o Added summary() (and corresponding print.summary) methods for bridge and bridge_list objects. These methods now always invoke error_measures() and return a data.frame with both log marginal likelihood and error information. These methods are described in ?`bridge-methods`. o Updated bridgesampling vignette to latest version. Bugfixes o Retroactively updated the NEWS file. *********************************** ** bridgesampling VERSION 0.3-x ** *********************************** Changes in bridgesampling Version 0.3-x Released October 2017 Significant User Visible Changes and New Features o Added a variety of new methods for bridge_sampler() that automatically extract the posterior samples, but also require a log_posterior function. Specifically, bridge_sampler() now has methods of this kind for the following objects: matrix, mcmc.list, rjags, and runjags. o Added stanreg method to bridge_sampler() which allows to pass objects from rstanarm. Note that this method requires to specify the diagnostic_file option, see examples. Thanks to Ben Goodrich for the pull request. o Added new vignette introducing the package: bridgesampling: An R Package for Estimating Normalizing Constants o Added two new data sets plus code used in the new vignette, see ?ier and ?turtles o Added bayes_factor() as alias for bf(), as bf() is an existing function in package brms. o Added use_neff argument to bridge_sampler() which allows to determine whether the effective sample size or the actual sample size is used for bridge sampling. Bugfixes o bridge_sampler() for stan objects on windows should not fail anymore if cores > 1. Instead, cores will be set to 1 (with warnings). *********************************** ** bridgesampling VERSION 0.2-x ** *********************************** Changes in bridgesampling Version 0.2-x Released June 2017 Significant User Visible Changes and New Features o Added stan_bridge_sampler(), which allows one to obtain the marginal likelihood directly from a fitted stanfit object that contains posterior samples. Note that it may be necessary to compile a new stanfit object without samples if the one with samples was compiled in a different session/pc. See new vignettes for examples. o Added repetitions argument to bridge sampler functions which allows to compute independent bridge sample estimates (based on the same posterior samples). bridge_sampler() now returns object of class "bridge" for calculations with repetitions = 1, but an object of class "bridge_list" if repetitions > 1, the latter contains the full list of estimates (but no q vectors). o Renamed compute_post_prob() to post_prob(), which is now a generic function with methods for bridge objects. The default method allows just logml values. For "bridge_list" objects (i.e., with repetitions > 1) a matrix of posterior probabilities with rows for each repetition is returned. o added new generic function logml() which returns the log marginal likelihood as a scalar value. o Multicore computations (i.e., cors > 1) on Unix-like systen (e.g., Mac OS, Linux) are now performed with forking via parallel::mcapply(). Bugfixes o compute_post_prob() now works even when exp(logml) initially returns Inf (solution works via brobdingnag). o Bridge sampler more robust due to various small improvements and produces more informative error messages should it fail. o If log_prob() returns NA, these values are replaced with -Inf on the log scale (which assumes a likelihood of 0). With warning. bridgesampling/R/0000755000176200001440000000000013740627754013461 5ustar liggesusersbridgesampling/R/turtles-data.R0000644000176200001440000000264214026403772016207 0ustar liggesusers#' Turtles Data from Janzen, Tucker, and Paukstis (2000) #' #' This data set contains information about 244 newborn turtles from 31 #' different clutches. For each turtle, the data set includes information about #' survival status (column \code{y}; 0 = died, 1 = survived), birth weight in #' grams (column \code{x}), and clutch (family) membership (column #' \code{clutch}; an integer between one and 31). The clutches have been ordered #' according to mean birth weight. #' #' @docType data #' @keywords dataset #' @name turtles #' @usage turtles #' @format A data.frame with 244 rows and 3 variables. #' @source Janzen, F. J., Tucker, J. K., & Paukstis, G. L. (2000). Experimental #' analysis of an early life-history stage: Selection on size of hatchling #' turtles. \emph{Ecology, 81(8)}, 2290-2304. #' \doi{10.2307/177115} #' #' Overstall, A. M., & Forster, J. J. (2010). Default Bayesian model #' determination methods for generalised linear mixed models. #' \emph{Computational Statistics & Data Analysis, 54}, 3269-3288. #' \doi{10.1016/j.csda.2010.03.008} #' #' Sinharay, S., & Stern, H. S. (2005). An empirical comparison of methods for #' computing Bayes factors in generalized linear mixed models. \emph{Journal #' of Computational and Graphical Statistics, 14(2)}, 415-435. #' \doi{10.1198/106186005X47471} #' @encoding UTF-8 #' #' @example examples/example.turtles.R NULL bridgesampling/R/bridge_methods.R0000644000176200001440000001221013663004467016551 0ustar liggesusers#' Methods for bridge and bridge_list objects #' #' Methods defined for objects returned from the generic \code{\link{bridge_sampler}} function. #' #' @param object,x object of class \code{bridge} or \code{bridge_list} as returned from \code{\link{bridge_sampler}}. #' @param na.rm logical. Should NA estimates in \code{bridge_list} objects be removed? Passed to \code{\link{error_measures}}. #' @param ... further arguments, currently ignored. #' #' @return #' The \code{summary} methods return a \code{data.frame} which contains the log marginal likelihood plus the result returned from invoking \code{\link{error_measures}}. #' #' The \code{print} methods simply print and return nothing. #' #' #' @name bridge-methods NULL # summary methods #' @rdname bridge-methods #' @method summary bridge #' @export summary.bridge <- function(object, na.rm = TRUE, ...) { if( ! (object$method %in% c("normal", "warp3"))) { stop('object$method needs to be either "normal" or "warp3".', call. = FALSE) } if (object$method == "normal") { em <- error_measures(object) out <- data.frame("Logml_Estimate" = object$logml, "Relative_Mean_Squared_Error" = em$re2, "Coefficient_of_Variation" = em$cv, "Percentage_Error" = em$percentage, "Method" = object$method, "Repetitions" = 1, stringsAsFactors = FALSE) } else if (object$method == "warp3") { out <- data.frame("Logml_Estimate" = object$logml, "Method" = object$method, "Repetitions" = 1) } class(out) <- c("summary.bridge", "data.frame") return(out) } #' @rdname bridge-methods #' @method summary bridge_list #' @export summary.bridge_list <- function(object, na.rm = TRUE, ...) { if( ! (object$method %in% c("normal", "warp3"))) { stop('object$method needs to be either "normal" or "warp3".', call. = FALSE) } em <- error_measures(object, na.rm = na.rm) out <- data.frame("Logml_Estimate" = median(object$logml, na.rm = na.rm), "Min" = em$min, "Max" = em$max, "Interquartile_Range" = em$IQR, "Method" = object$method, "Repetitions" = object$repetitions) class(out) <- c("summary.bridge_list", "data.frame") return(out) } # print summary methods #' @rdname bridge-methods #' @method print summary.bridge #' @export print.summary.bridge <- function(x, ...) { if (x[["Method"]] == "normal") { cat('\nBridge sampling log marginal likelihood estimate \n(method = "', as.character(x[["Method"]]), '", repetitions = ', x[["Repetitions"]], '):\n\n ', x[["Logml_Estimate"]], '\n\nError Measures:\n\n Relative Mean-Squared Error: ', x[["Relative_Mean_Squared_Error"]], '\n Coefficient of Variation: ', x[["Coefficient_of_Variation"]], '\n Percentage Error: ', x[["Percentage_Error"]], '\n\nNote:\nAll error measures are approximate.\n\n', sep = "") } else if (x[["Method"]] == "warp3") { cat('\nBridge sampling log marginal likelihood estimate \n(method = "', as.character(x[["Method"]]), '", repetitions = ', x[["Repetitions"]], '):\n\n ', x[["Logml_Estimate"]], '\n\nNote:\nNo error measures are available for method = "warp3"', '\nwith repetitions = 1.', '\nWe recommend to run the warp3 procedure multiple times to', '\nassess the uncertainty of the estimate.\n\n', sep = "") } } #' @rdname bridge-methods #' @method print summary.bridge_list #' @export print.summary.bridge_list <- function(x, ...) { cat('\nBridge sampling log marginal likelihood estimate \n(method = "', as.character(x[["Method"]]), '", repetitions = ', x[["Repetitions"]], '):\n\n ', x[["Logml_Estimate"]], '\n\nError Measures:\n\n Min: ', x[["Min"]], '\n Max: ', x[["Max"]], '\n Interquartile Range: ', x[["Interquartile_Range"]], '\n\nNote:\nAll error measures are based on ', x[["Repetitions"]], ' estimates.\n\n', sep = "") } # print methods #' @rdname bridge-methods #' @method print bridge #' @export print.bridge <- function(x, ...) { cat("Bridge sampling estimate of the log marginal likelihood: ", round(x$logml, 5), "\nEstimate obtained in ", x$niter, " iteration(s) via method \"", x$method, "\".\n", sep = "") } #' @rdname bridge-methods #' @method print bridge_list #' @export print.bridge_list <- function(x, na.rm = TRUE, ...) { cat("Median of ", x$repetitions, " bridge sampling estimates\nof the log marginal likelihood: ", round(median(x$logml, na.rm = na.rm), 5), "\nRange of estimates: ", round(range(x$logml, na.rm = na.rm)[1], 5), " to ", round(range(x$logml, na.rm = na.rm)[2], 5), "\nInterquartile range: ", round(stats::IQR(x$logml, na.rm = na.rm), 5), "\nMethod: ", x$method, "\n", sep = "") if (any(is.na(x$logml))) warning(sum(is.na(x$logml))," bridge sampling estimate(s) are NAs.", call. = FALSE) } bridgesampling/R/bridge_sampler_internals.R0000644000176200001440000002245513740627754020652 0ustar liggesusers# Helper function to represent circular variables (such as mean directions) as # "gapless" numerical representations. .gaplessCircular <- function(th) { # Mean direction md <- atan2(sum(sin(th)), sum(cos(th))) # Shift th so that it is unlikely to have a gap. ((th - md + pi) %% (2*pi)) - pi + md } #### for matrix method ###### .transform2Real <- function(theta, lb, ub, theta_types = rep("real", ncol(theta))) { ### transform samples to real line theta_t <- theta transTypes <- character(ncol(theta)) cn <- colnames(theta) names(theta_types) <- names(transTypes) <- cn # Because the simplex transform must be done on all simplex parameters at # once, do it before the loop. This transformation follows the Stan reference # manual. For simplex variables, we expect one parameter less than the number # of weights due to the contstraint sum(simplex_theta) == 1. is_simplex_theta <- theta_types == "simplex" if (any(is_simplex_theta)) { # Select the simplex variables simplex_theta <- theta[, is_simplex_theta, drop = FALSE] # Simplex dimensionality simdim <- ncol(simplex_theta) cs <- cbind(0L, t(apply(simplex_theta, 1L, cumsum))[, -simdim, drop = FALSE]) # Get the break proportions. z_k <- (simplex_theta / (1L - cs)) y_k <- log(z_k) - log(1L - z_k) + matrix(log(simdim:1L), nrow(theta), simdim, byrow = TRUE) theta_t[, is_simplex_theta] <- y_k transTypes[is_simplex_theta] <- "simplex" } for (i in seq_len(ncol(theta))) { p <- cn[i] if (theta_types[[p]] == "circular") { transTypes[[p]] <- "circular" theta_t[,i] <- .gaplessCircular(theta[,i]) } else if (theta_types[[p]] == "real") { if (any(theta[,i] < lb[[p]])) { stop("Parameter values (samples) cannot be smaller than lb: ", p, call. = FALSE) } if (any(theta[,i] > ub[[p]])) { stop("Parameter values (samples) cannot be larger than ub: ", p, call. = FALSE) } if (lb[[p]] < ub[[p]] && is.infinite(lb[[p]]) && is.infinite(ub[[p]])) { transTypes[[p]] <- "unbounded" theta_t[,i] <- theta[,i] } else if (lb[[p]] < ub[[p]] && is.finite(lb[[p]]) && is.infinite(ub[[p]])) { transTypes[[p]] <- "lower-bounded" theta_t[,i] <- log(theta[,i] - lb[[p]]) } else if (lb[[p]] < ub[[p]] && is.infinite(lb[[p]]) && is.finite(ub[[p]])) { transTypes[[p]] <- "upper-bounded" theta_t[,i] <- log(ub[[p]] - theta[,i]) } else if (lb[[p]] < ub[[p]] && is.finite(lb[[p]]) && is.finite(ub[[p]])) { transTypes[[p]] <- "double-bounded" theta_t[,i] <- qnorm( (theta[,i] - lb[[p]])/(ub[[p]] - lb[[p]]) ) # Finally, give an error except for simplex variables, which are already # transformed. } else if (theta_types[p] != "simplex") stop(paste("Could not transform parameters, possibly due to invalid", "lower and/or upper prior bounds.")) } } colnames(theta_t) <- paste0("trans_", colnames(theta)) return(list(theta_t = theta_t, transTypes = transTypes)) } .invTransform2Real <- function(theta_t, lb, ub, theta_types = rep("real", ncol(theta))) { ### transform transformed samples back to original scales theta <- theta_t colnames(theta) <- stringr::str_sub(colnames(theta), 7) cn <- colnames(theta) names(theta_types) <- cn # Because the simplex transform must be done on all simplex parameters at # once, do it before the loop. This transformation follows the Stan reference # manual. For simplex variables, we expect one parameter less than the number # of weights due to the contstraint sum(simplex_theta) == 1. is_simplex_theta <- theta_types == "simplex" if (any(is_simplex_theta)) { # Select the simplex variables simplex_theta <- theta_t[, is_simplex_theta, drop = FALSE] # Simplex dimensionality simdim <- ncol(simplex_theta) logitz <- simplex_theta - matrix(log(simdim:1L), nrow(theta), simdim, byrow = TRUE) z_k <- exp(logitz) / (1 + exp(logitz)) x_k <- z_k if (simdim > 1) { for (k in 2:simdim) { x_k[, k] <- (1 - rowSums(x_k[, 1:(k - 1), drop = FALSE])) * z_k[, k] } } theta[, is_simplex_theta] <- x_k } # Note that the circular variables are not transformed back, because they are # simply a different numerical representation. for (i in seq_len(ncol(theta_t))) { p <- cn[i] if (theta_types[[p]] == "real") { if (lb[[p]] < ub[[p]] && is.infinite(lb[[p]]) && is.infinite(ub[[p]])) { theta[,i] <- theta_t[,i] } else if (lb[[p]] < ub[[p]] && is.finite(lb[[p]]) && is.infinite(ub[[p]])) { theta[,i] <- exp(theta_t[,i]) + lb[[p]] } else if (lb[[p]] < ub[[p]] && is.infinite(lb[[p]]) && is.finite(ub[[p]])) { theta[,i] <- ub[[p]] - exp(theta_t[,i]) } else if (lb[[p]] < ub[[p]] && is.finite(lb[[p]]) && is.finite(ub[[p]])) { theta[,i] <- pnorm(theta_t[,i])*(ub[[p]] - lb[[p]]) + lb[[p]] } else { stop("Could not transform parameters, possibly due to invalid lower and/or upper prior bounds.") } } } return(theta) } .logJacobian <- function(theta_t, transTypes, lb, ub) { ### compute log of Jacobian logJ <- matrix(nrow = nrow(theta_t), ncol = ncol(theta_t)) cn <- stringr::str_sub(colnames(theta_t), 7) # Separate the computations for the simplex is_simplex_theta <- transTypes == "simplex" if (any(is_simplex_theta)) { # Select the simplex variables simplex_theta <- theta_t[, is_simplex_theta, drop = FALSE] # Simplex dimensionality, this is K - 1 simdim <- ncol(simplex_theta) logitz <- simplex_theta - matrix(log(simdim:1L), nrow(theta_t), simdim, byrow = TRUE) z_k <- exp(logitz) / (1 + exp(logitz)) x_k <- z_k # Sum_x_k is the length of the remaining stick at step k. At the start, the # whole stick is still left sum_x_k <- matrix(nrow = nrow(theta_t), ncol = simdim) sum_x_k[, 1] <- 1 if (simdim > 1) { for (k in 2:simdim) { rsx <- rowSums(x_k[, 1:(k - 1), drop = FALSE]) x_k[, k] <- (1 - rsx) * z_k[, k] sum_x_k[, k] <- (1 - rsx) } } logJ[, is_simplex_theta] <- log(z_k) + log(1 - z_k) + log(sum_x_k) } for (i in seq_len( ncol(theta_t) )) { p <- cn[i] if (transTypes[[p]] == "unbounded") { logJ[,i] <- 0 } else if (transTypes[[p]] == "lower-bounded") { logJ[,i] <- theta_t[,i] } else if (transTypes[[p]] == "upper-bounded") { logJ[,i] <- theta_t[,i] } else if (transTypes[[p]] == "double-bounded") { logJ[,i] <- log(ub[[p]] - lb[[p]]) + dnorm(theta_t[,i], log = TRUE) } else if (transTypes[[p]] == "circular") { logJ[,i] <- 0 } } return(.rowSums(logJ, m = nrow(logJ), n = ncol(logJ))) } .split_matrix <- function(matrix, cores) { out <- vector("list", cores) borders <- ceiling(seq(from = 0, to = nrow(matrix), length.out = cores + 1)) for (i in seq_len(cores)) { out[[i]] <- matrix[(borders[i] + 1):borders[i + 1], , drop = FALSE] } out } .run.iterative.scheme <- function(q11, q12, q21, q22, r0, tol, L, method, maxiter, silent, criterion, neff) { ### run iterative updating scheme (using "optimal" bridge function, ### see Meng & Wong, 1996) if (method == "normal") { l1 <- q11 - q12 # log(l) l2 <- q21 - q22 # log(ltilde) } else if (method == "warp3") { l1 <- -log(2) + determinant(L)$modulus + (q11 - q12) # log(l) l2 <- -log(2) + determinant(L)$modulus + (q21 - q22) # log(ltilde) } ## for dbugging: # save( # l1, l2, # r0, tol, L, # method, maxiter, silent, # criterion, neff, # file = "iterative_scheme.rda" # ) lstar <- median(l1) n.1 <- length(l1) n.2 <- length(l2) s1 <- neff/(neff + n.2) s2 <- n.2/(neff + n.2) r <- r0 r_vals <- r logml <- log(r) + lstar logml_vals <- logml criterion_val <- 1 + tol e <- as.brob( exp(1) ) i <- 1 while (i <= maxiter && criterion_val > tol) { if (! silent) cat(paste0("Iteration: ", i, "\n")) rold <- r logmlold <- logml numi <- e^(l2 - lstar)/(s1 * e^(l2 - lstar) + s2 * r) deni <- 1/(s1 * e^(l1 - lstar) + s2 * r) if (any(is.infinite(as.numeric(numi))) || any(is.infinite(as.numeric((deni))))) { warning("Infinite value in iterative scheme, returning NA.\n Try rerunning with more samples.", call. = FALSE) return(list(logml = NA, niter = i)) } r <- (n.1/n.2) * sum(numi)/sum(deni) r_vals <- c(r_vals, r) logml <- log(r) + lstar logml_vals <- c(logml_vals, logml) criterion_val <- switch(criterion, "r" = abs((r - rold)/r), "logml" = abs((logml - logmlold)/logml)) i <- i + 1 } if (i >= maxiter) { return(list(logml = NA, niter = i-1, r_vals = r_vals)) } return(list(logml = logml, niter = i-1)) } bridgesampling/R/bf.R0000644000176200001440000001701214026403662014160 0ustar liggesusers#' Generic function that computes Bayes factor(s) from marginal likelihoods. \code{bayes_factor()} is simply an (S3 generic) alias for \code{bf()}. #' @export #' @title Bayes Factor(s) from Marginal Likelihoods #' @param x1 Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Additionally, the default method assumes that \code{x1} is a single numeric log marginal likelihood (e.g., from \code{\link{logml}}) and will throw an error otherwise. #' @param x2 Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Additionally, the default method assumes that \code{x2} is a single numeric log marginal likelihood (e.g., from \code{\link{logml}}) and will throw an error otherwise. #' @param log Boolean. If \code{TRUE}, the function returns the log of the Bayes factor. Default is \code{FALSE}. #' @param ... currently not used here, but can be used by other methods. #' @details Computes the Bayes factor (Kass & Raftery, 1995) in favor of the model associated with \code{x1} over the model associated with \code{x2}. #' @return For the default method returns a list of class \code{"bf_default"} with components: #' \itemize{ #' \item \code{bf}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2}. #' \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. #' } #' #' #' For the method for \code{"bridge"} objects returns a list of class \code{"bf_bridge"} with components: #' \itemize{ #' \item \code{bf}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2}. #' \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. #' } #' #' #' For the method for \code{"bridge_list"} objects returns a list of class \code{"bf_bridge_list"} with components: #' \itemize{ #' \item \code{bf}: a numeric vector consisting of Bayes factors where each element gives the Bayes factor for one set of logmls in favor of the model associated with \code{x1} over the model associated with \code{x2}. The length of this vector is given by the \code{"bridge_list"} element with the most \code{repetitions}. Elements with fewer repetitions will be recycled (with warning). #' \item \code{bf_median_based}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2} that is based on the median values of the logml estimates. #' \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. #' } #' @author Quentin F. Gronau #' @note For examples, see \code{\link{bridge_sampler}} and the accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} \cr \code{vignette("bridgesampling_example_stan")} #' @references #' Kass, R. E., & Raftery, A. E. (1995). Bayes factors. \emph{Journal of the American Statistical Association, 90(430)}, 773-795. \doi{10.1080/01621459.1995.10476572} #' @importFrom methods is bf <- function(x1, x2, log = FALSE, ...) { UseMethod("bf", x1) } #' @rdname bf #' @export bayes_factor <- function(x1, x2, log = FALSE, ...) { UseMethod("bayes_factor", x1) } #' @rdname bf #' @export bayes_factor.default <- function(x1, x2, log = FALSE, ...) { bf(x1, x2, log = log, ...) } .bf_calc <- function(logml1, logml2, log) { bf <- logml1 - logml2 if (! log) bf <- exp(bf) return(bf) } #' @rdname bf #' @export bf.bridge <- function(x1, x2, log = FALSE, ...) { if (!inherits(x2, c("bridge", "bridge_list"))) stop("x2 needs to be of class 'bridge' or 'bridge_list'.", call. = FALSE) bf <- .bf_calc(logml(x1), logml(x2), log = log) out <- list(bf = bf, log = log) class(out) <- "bf_bridge" try({ mc <- match.call() name1 <- deparse(mc[["x1"]]) name2 <- deparse(mc[["x2"]]) attr(out, "model_names") <- c(name1, name2) }, silent = TRUE) return(out) } #' @rdname bf #' @export bf.bridge_list <- function(x1, x2, log = FALSE, ...) { if (!inherits(x2, c("bridge", "bridge_list"))) stop("x2 needs to be of class 'bridge' or 'bridge_list'.", call. = FALSE) logml1 <- x1$logml logml2 <- x2$logml median1 <- median(logml1, na.rm = TRUE) median2 <- median(logml2, na.rm = TRUE) len1 <- length(logml1) len2 <- length(logml2) max_len <- max(c(len1, len2)) if (!all(c(len1, len2) == max_len)) { warning("Not all objects provide ", max_len, " logmls. Some values are recycled.", call. = FALSE) logml1 <- rep(logml1, length.out = max_len) logml2 <- rep(logml2, length.out = max_len) } bf <- .bf_calc(logml1, logml2, log = log) bf_median_based <- .bf_calc(median1, median2, log = log) out <- list(bf = bf, bf_median_based = bf_median_based, log = log) class(out) <- "bf_bridge_list" try({ mc <- match.call() name1 <- deparse(mc[["x1"]]) name2 <- deparse(mc[["x2"]]) attr(out, "model_names") <- c(name1, name2) }, silent = TRUE) return(out) } #' @rdname bf #' @export bf.default <- function(x1, x2, log = FALSE, ...) { if (!is.numeric(c(x1, x2))) { stop("logml values need to be numeric", call. = FALSE) } if (length(x1) > 1 || length(x2) > 1) { stop("Both logmls need to be scalar values.", call. = FALSE) } bf <- .bf_calc(x1, x2, log = log) out <- list(bf = bf, log = log) class(out) <- "bf_default" try({ mc <- match.call() name1 <- deparse(mc[["x1"]]) name2 <- deparse(mc[["x2"]]) attr(out, "model_names") <- c(name1, name2) }, silent = TRUE) return(out) } ######## Methods for bf objects: #' @method print bf_bridge #' @export print.bf_bridge <- function(x, ...) { if(!is.null(attr(x, "model_names"))) { model_names <- attr(x, "model_names") } else { model_names <- c("x1", "x2") } cat("Estimated ", if (x$log) "log " else NULL , "Bayes factor in favor of ", model_names[1], " over ", model_names[2], ": ", formatC(x$bf, digits = 5, format = "f"), "\n", sep = "") } #' @method print bf_bridge_list #' @export print.bf_bridge_list <- function(x, na.rm = TRUE,...) { if(!is.null(attr(x, "model_names"))) { model_names <- attr(x, "model_names") } else { model_names <- c("x1", "x2") } cat("Estimated ", if (x$log) "log " else NULL , "Bayes factor (based on medians of log marginal likelihood estimates)\n", " in favor of ", model_names[1], " over ", model_names[2], ": ", formatC(x$bf_median_based, digits = 5, format = "f"), "\nRange of estimates: ", formatC(range(x$bf, na.rm=na.rm)[1], digits = 5, format = "f"), " to ", formatC(range(x$bf, na.rm = na.rm)[2], digits = 5, format = "f"), "\nInterquartile range: ", formatC(stats::IQR(x$bf, na.rm = na.rm), digits = 5, format = "f"), "\n", sep = "") if (any(is.na(x$bf))) warning(sum(is.na(x$bf)), " log Bayes factor estimate(s) are NAs.", call. = FALSE) } #' @method print bf_default #' @export print.bf_default <- function(x, ...) { if(!is.null(attr(x, "model_names"))) { model_names <- attr(x, "model_names") } else { model_names <- c("Model 1", "Model 2") } cat(if (x$log) "Log " else NULL , "Bayes factor in favor of ", model_names[1], " over ", model_names[2], ": ", formatC(x$bf, digits = 5, format = "f"), "\n", sep = "") } bridgesampling/R/bridge_sampler_warp3.R0000644000176200001440000002773013740627724017705 0ustar liggesusers .bridge.sampler.warp3 <- function( samples_4_fit, # matrix with already transformed samples for fitting the # proposal (rows are samples), colnames are "trans_x" where # x is the parameter name samples_4_iter, # matrix with already transformed samples for the # iterative scheme (rows are samples), colnames are "trans_x" # where x is the parameter name neff, # effective sample size of samples_4_iter (i.e., already transformed samples), scalar log_posterior, ..., data, lb, ub, transTypes, # types of transformations (unbounded/lower/upperbounded) for the different parameters (named character vector) param_types, # Sample space for transformations (real, circular, simplex) cores, repetitions, packages, varlist, envir, rcppFile, maxiter, silent, verbose, r0, tol1, tol2) { if (is.null(neff)) neff <- nrow(samples_4_iter) n_post <- nrow(samples_4_iter) # get mean & covariance matrix and generate samples from proposal m <- apply(samples_4_fit, 2, mean) V_tmp <- cov(samples_4_fit) V <- as.matrix(nearPD(V_tmp)$mat) # make sure that V is positive-definite L <- t(chol(V)) # sample from multivariate normal distribution and evaluate for posterior samples and generated samples q12 <- dmvnorm((samples_4_iter - matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE)) %*% t(solve(L)), sigma = diag(ncol(samples_4_fit)), log = TRUE) q22 <- vector(mode = "list", length = repetitions) gen_samples <- vector(mode = "list", length = repetitions) for (i in seq_len(repetitions)) { gen_samples[[i]] <- rmvnorm(n_post, sigma = diag(ncol(samples_4_fit))) colnames(gen_samples[[i]]) <- colnames(samples_4_iter) q22[[i]] <- dmvnorm(gen_samples[[i]], sigma = diag(ncol(samples_4_fit)), log = TRUE) } e <- as.brob( exp(1) ) # evaluate log of likelihood times prior for posterior samples and generated samples q21 <- vector(mode = "list", length = repetitions) if (cores == 1) { q11 <- log(e^(apply(.invTransform2Real(samples_4_iter, lb, ub, param_types), 1, log_posterior, data = data,...) + .logJacobian(samples_4_iter, transTypes, lb, ub)) + e^(apply(.invTransform2Real(matrix(2*m, nrow = n_post, ncol = length(m), byrow = TRUE) - samples_4_iter, lb, ub, param_types), 1, log_posterior, data = data, ...) + .logJacobian(matrix(2*m, nrow = n_post, ncol = length(m), byrow = TRUE) - samples_4_iter, transTypes, lb, ub))) for (i in seq_len(repetitions)) { q21[[i]] <- log(e^(apply(.invTransform2Real(matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) - gen_samples[[i]] %*% t(L), lb, ub, param_types), 1, log_posterior, data = data, ...) + .logJacobian(matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) - gen_samples[[i]] %*% t(L), transTypes, lb, ub)) + e^(apply(.invTransform2Real(matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) + gen_samples[[i]] %*% t(L), lb, ub, param_types), 1, log_posterior, data = data, ...) + .logJacobian(matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) + gen_samples[[i]] %*% t(L), transTypes, lb, ub))) } } else if (cores > 1) { if ( .Platform$OS.type == "unix") { split1a <- .split_matrix(matrix=.invTransform2Real(samples_4_iter, lb, ub, param_types), cores=cores) split1b <- .split_matrix(matrix=.invTransform2Real( matrix(2*m, nrow = n_post, ncol = length(m), byrow = TRUE) - samples_4_iter, lb, ub, param_types ), cores=cores) q11a <- parallel::mclapply(split1a, FUN = function(x) apply(x, 1, log_posterior, data = data, ...), mc.preschedule = FALSE, mc.cores = cores) q11b <- parallel::mclapply(split1b, FUN = function(x) apply(x, 1, log_posterior, data = data, ...), mc.preschedule = FALSE, mc.cores = cores) q11 <- log(e^(unlist(q11a) + .logJacobian(samples_4_iter, transTypes, lb, ub)) + e^(unlist(q11b) + .logJacobian(matrix(2*m, nrow = n_post, ncol = length(m), byrow = TRUE) - samples_4_iter, transTypes, lb, ub))) for (i in seq_len(repetitions)) { tmp_mat2 <- gen_samples[[i]] %*% t(L) split2a <- .split_matrix(matrix=.invTransform2Real( matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) - tmp_mat2, lb, ub, param_types ), cores=cores) split2b <- .split_matrix(matrix=.invTransform2Real( matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) + tmp_mat2, lb, ub, param_types ), cores=cores) q21a <- parallel::mclapply(split2a, FUN = function(x) apply(x, 1, log_posterior, data = data, ...), mc.preschedule = FALSE, mc.cores = cores) q21b <- parallel::mclapply(split2b, FUN = function(x) apply(x, 1, log_posterior, data = data, ...), mc.preschedule = FALSE, mc.cores = cores) q21[[i]] <- log(e^(unlist(q21a) + .logJacobian(matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) - tmp_mat2, transTypes, lb, ub)) + e^(unlist(q21b) + .logJacobian(matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) + tmp_mat2, transTypes, lb, ub))) } } else { cl <- parallel::makeCluster(cores, useXDR = FALSE) sapply(packages, function(x) parallel::clusterCall(cl = cl, "require", package = x, character.only = TRUE)) parallel::clusterExport(cl = cl, varlist = varlist, envir = envir) if ( ! is.null(rcppFile)) { parallel::clusterExport(cl = cl, varlist = "rcppFile", envir = parent.frame()) parallel::clusterCall(cl = cl, "require", package = "Rcpp", character.only = TRUE) parallel::clusterEvalQ(cl = cl, Rcpp::sourceCpp(file = rcppFile)) } else if (is.character(log_posterior)) { parallel::clusterExport(cl = cl, varlist = log_posterior, envir = envir) } q11 <- log(e^(parallel::parRapply(cl = cl, x = .invTransform2Real(samples_4_iter, lb, ub, param_types), log_posterior, data = data, ...) + .logJacobian(samples_4_iter, transTypes, lb, ub)) + e^(parallel::parRapply(cl = cl, x = .invTransform2Real(matrix(2*m, nrow = n_post, ncol = length(m), byrow = TRUE) - samples_4_iter, lb, ub, param_types), log_posterior, data = data, ...) + .logJacobian(matrix(2*m, nrow = n_post, ncol = length(m), byrow = TRUE) - samples_4_iter, transTypes, lb, ub))) for (i in seq_len(repetitions)) { q21[[i]] <- log(e^(parallel::parRapply(cl = cl, x = .invTransform2Real(matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) - gen_samples[[i]] %*% t(L), lb, ub, param_types), log_posterior, data = data, ...) + .logJacobian(matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) - gen_samples[[i]] %*% t(L), transTypes, lb, ub)) + e^(parallel::parRapply(cl = cl, x = .invTransform2Real(matrix(m, nrow = n_post, ncol = length(m),byrow = TRUE) + gen_samples[[i]] %*% t(L), lb, ub, param_types), log_posterior, data = data, ...) + .logJacobian(matrix(m, nrow = n_post, ncol = length(m), byrow = TRUE) + gen_samples[[i]] %*% t(L), transTypes, lb, ub))) } parallel::stopCluster(cl) } } if (any(is.infinite(q11))) { warning(sum(is.infinite(q11)), " of the ", length(q11)," log_prob() evaluations on the warp-transformed posterior draws produced -Inf/Inf.", call. = FALSE) } for (i in seq_len(repetitions)) { if (any(is.infinite(q21[[i]]))) { warning(sum(is.infinite(q21[[i]])), " of the ", length(q21[[i]])," log_prob() evaluations on the warp-transformed proposal draws produced -Inf/Inf.", call. = FALSE) } } if (any(is.na(q11))) { warning(sum(is.na(q11)), " evaluation(s) of log_prob() on the warp-transformed posterior draws produced NA and have been replaced by -Inf.", call. = FALSE) q11[is.na(q11)] <- -Inf } for (i in seq_len(repetitions)) { if (any(is.na(q21[[i]]))) { warning(sum(is.na(q21[[i]])), " evaluation(s) of log_prob() on the warp-transformed proposal draws produced NA nd have been replaced by -Inf.", call. = FALSE) q21[[i]][is.na(q21[[i]])] <- -Inf } } if(verbose) { print("summary(q12): (log_dens of proposal for posterior samples)") print(summary(q12)) print("summary(q22): (log_dens of proposal for generated samples)") print(lapply(q22, summary)) print("summary(q11): (log_dens of posterior for posterior samples)") print(summary(q11)) print("summary(q21): (log_dens of posterior for generated samples)") print(lapply(q21, summary)) } logml <- numeric(repetitions) niter <- numeric(repetitions) # run iterative updating scheme to compute log of marginal likelihood for (i in seq_len(repetitions)) { tmp <- .run.iterative.scheme(q11 = q11, q12 = q12, q21 = q21[[i]], q22 = q22[[i]], r0 = r0, tol = tol1, L = L, method = "warp3", maxiter = maxiter, silent = silent, criterion = "r", neff = neff) if (is.na(tmp$logml) & !is.null(tmp$r_vals)) { warning("logml could not be estimated within maxiter, rerunning with adjusted starting value. \nEstimate might be more variable than usual.", call. = FALSE) lr <- length(tmp$r_vals) # use geometric mean as starting value r0_2 <- sqrt(tmp$r_vals[[lr - 1]] * tmp$r_vals[[lr]]) tmp <- .run.iterative.scheme(q11 = q11, q12 = q12, q21 = q21[[i]], q22 = q22[[i]], r0 = r0_2, tol = tol2, L = L, method = "warp3", maxiter = maxiter, silent = silent, criterion = "logml", neff = neff) tmp$niter <- maxiter + tmp$niter } logml[i] <- tmp$logml niter[i] <- tmp$niter if (niter[i] == maxiter) warning("logml could not be estimated within maxiter, returning NA.", call. = FALSE) } if (repetitions == 1) { out <- list(logml = logml, niter = niter, method = "warp3", q11 = q11, q12 = q12, q21 = q21[[1]], q22 = q22[[1]]) class(out) <- "bridge" } else if (repetitions > 1) { out <- list(logml = logml, niter = niter, method = "warp3", repetitions = repetitions) class(out) <- "bridge_list" } return(out) } bridgesampling/R/ier-data.R0000644000176200001440000000211213663004467015260 0ustar liggesusers#' Standardized International Exchange Rate Changes from 1975 to 1986 #' #' This data set contains the changes in monthly international exchange rates for pounds sterling from January 1975 to December 1986 obtained from West and Harrison (1997, pp. 612-615). Currencies tracked are US Dollar (column \code{us_dollar}), Canadian Dollar (column \code{canadian_dollar}), Japanese Yen (column \code{yen}), French Franc (column \code{franc}), Italian Lira (column \code{lira}), and the (West) German Mark (column \code{mark}). Each series has been standardized with respect to its sample mean and standard deviation. #' #' @docType data #' @keywords dataset #' @name ier #' @usage ier #' @format A matrix with 143 rows and 6 columns. #' @source West, M., Harrison, J. (1997). \emph{Bayesian forecasting and dynamic models} (2nd ed.). Springer-Verlag, New York. #' #' Lopes, H. F., West, M. (2004). Bayesian model assessment in factor analysis. \emph{Statistica Sinica, 14}, 41-67. \url{https://www.jstor.org/stable/24307179} #' @encoding UTF-8 #' #' @example examples/example.ier.R NULL bridgesampling/R/bridge_sampler_normal.R0000644000176200001440000002052713740627613020133 0ustar liggesusers .bridge.sampler.normal <- function( samples_4_fit, # matrix with already transformed samples for fitting the # proposal (rows are samples), colnames are "trans_x" where # x is the parameter name samples_4_iter, # matrix with already transformed samples for the # iterative scheme (rows are samples), colnames are "trans_x" # where x is the parameter name neff, # effective sample size of samples_4_iter (i.e., already transformed samples), scalar log_posterior, ..., data, lb, ub, transTypes, # types of transformations (unbounded/lower/upperbounded) for the different parameters (named character vector) param_types, # Sample space for transformations (real, circular, simplex) cores, repetitions, packages, varlist, envir, rcppFile, maxiter, silent, verbose, r0, tol1, tol2) { if (is.null(neff)) neff <- nrow(samples_4_iter) n_post <- nrow(samples_4_iter) # get mean & covariance matrix and generate samples from proposal m <- apply(samples_4_fit, 2, mean) V_tmp <- cov(samples_4_fit) V <- as.matrix(nearPD(V_tmp)$mat) # make sure that V is positive-definite # sample from multivariate normal distribution and evaluate for posterior samples and generated samples q12 <- dmvnorm(samples_4_iter, mean = m, sigma = V, log = TRUE) gen_samples <- vector(mode = "list", length = repetitions) q22 <- vector(mode = "list", length = repetitions) for (i in seq_len(repetitions)) { gen_samples[[i]] <- rmvnorm(n_post, mean = m, sigma = V) colnames(gen_samples[[i]]) <- colnames(samples_4_iter) q22[[i]] <- dmvnorm(gen_samples[[i]], mean = m, sigma = V, log = TRUE) } # evaluate log of likelihood times prior for posterior samples and generated samples q21 <- vector(mode = "list", length = repetitions) if (cores == 1) { q11 <- apply(.invTransform2Real(samples_4_iter, lb, ub, param_types), 1, log_posterior, data = data, ...) + .logJacobian(samples_4_iter, transTypes, lb, ub) for (i in seq_len(repetitions)) { q21[[i]] <- apply(.invTransform2Real(gen_samples[[i]], lb, ub, param_types), 1, log_posterior, data = data, ...) + .logJacobian(gen_samples[[i]], transTypes, lb, ub) } } else if (cores > 1) { if ( .Platform$OS.type == "unix") { split1 <- .split_matrix(matrix=.invTransform2Real(samples_4_iter, lb, ub, param_types), cores=cores) q11 <- parallel::mclapply(split1, FUN = function(x) apply(x, 1, log_posterior, data = data, ...), mc.preschedule = FALSE, mc.cores = cores) q11 <- unlist(q11) + .logJacobian(samples_4_iter, transTypes, lb, ub) for (i in seq_len(repetitions)) { split2 <- .split_matrix(matrix=.invTransform2Real(gen_samples[[i]], lb, ub, param_types), cores = cores) q21[[i]] <- parallel::mclapply(split2, FUN = function(x) apply(x, 1, log_posterior, data = data, ...), mc.preschedule = FALSE, mc.cores = cores) q21[[i]] <- unlist(q21[[i]]) + .logJacobian(gen_samples[[i]], transTypes, lb, ub) } } else { cl <- parallel::makeCluster(cores, useXDR = FALSE) sapply(packages, function(x) parallel::clusterCall(cl = cl, "require", package = x, character.only = TRUE)) parallel::clusterExport(cl = cl, varlist = varlist, envir = envir) if ( ! is.null(rcppFile)) { parallel::clusterExport(cl = cl, varlist = "rcppFile", envir = parent.frame()) parallel::clusterCall(cl = cl, "require", package = "Rcpp", character.only = TRUE) parallel::clusterEvalQ(cl = cl, Rcpp::sourceCpp(file = rcppFile)) } else if (is.character(log_posterior)) { parallel::clusterExport(cl = cl, varlist = log_posterior, envir = envir) } q11 <- parallel::parRapply(cl = cl, x = .invTransform2Real(samples_4_iter, lb, ub, param_types), log_posterior, data = data, ...) + .logJacobian(samples_4_iter, transTypes, lb, ub) for (i in seq_len(repetitions)) { q21[[i]] <- parallel::parRapply(cl = cl, x = .invTransform2Real(gen_samples[[i]], lb, ub, param_types), log_posterior, data = data, ...) + .logJacobian(gen_samples[[i]], transTypes, lb, ub) } parallel::stopCluster(cl) } } if(verbose) { print("summary(q12): (log_dens of proposal (i.e., with dmvnorm) for posterior samples)") print(summary(q12)) print("summary(q22): (log_dens of proposal (i.e., with dmvnorm) for generated samples)") print(lapply(q22, summary)) print("summary(q11): (log_dens of posterior (i.e., with log_posterior) for posterior samples)") print(summary(q11)) print("summary(q21): (log_dens of posterior (i.e., with log_posterior) for generated samples)") print(lapply(q21, summary)) .PROPOSALS <- vector("list", repetitions) # for (i in seq_len(repetitions)) { # .PROPOSALS[[i]] <- .invTransform2Real(gen_samples[[i]], lb, ub, param_types) # } # assign(".PROPOSALS", .PROPOSALS, pos = .GlobalEnv) # message("All proposal samples written to .GlobalEnv as .PROPOSALS") } if (any(is.infinite(q11))) { warning(sum(is.infinite(q11)), " of the ", length(q11)," log_prob() evaluations on the posterior draws produced -Inf/Inf.", call. = FALSE) } for (i in seq_len(repetitions)) { if (any(is.infinite(q21[[i]]))) { warning(sum(is.infinite(q21[[i]])), " of the ", length(q21[[i]])," log_prob() evaluations on the proposal draws produced -Inf/Inf.", call. = FALSE) } } if (any(is.na(q11))) { warning(sum(is.na(q11)), " evaluation(s) of log_prob() on the posterior draws produced NA and have been replaced by -Inf.", call. = FALSE) q11[is.na(q11)] <- -Inf } for (i in seq_len(repetitions)) { if (all(is.na(q21[[i]]))) { stop("Evaluations of log_prob() on all proposal draws produced NA.\n", "E.g., rounded to 3 digits (use verbose = TRUE for all proposal samples):\n", deparse(round( .invTransform2Real(gen_samples[[i]], lb, ub, param_types)[1,], 3), width.cutoff = 500L), call. = FALSE) } if (any(is.na(q21[[i]]))) { warning(sum(is.na(q21[[i]])), " evaluation(s) of log_prob() on the proposal draws produced NA and have been replaced by -Inf.", call. = FALSE) q21[[i]][is.na(q21[[i]])] <- -Inf } } logml <- numeric(repetitions) niter <- numeric(repetitions) # run iterative updating scheme to compute log of marginal likelihood for (i in seq_len(repetitions)) { tmp <- .run.iterative.scheme(q11 = q11, q12 = q12, q21 = q21[[i]], q22 = q22[[i]], r0 = r0, tol = tol1, L = NULL, method = "normal", maxiter = maxiter, silent = silent, criterion = "r", neff = neff) if (is.na(tmp$logml) & !is.null(tmp$r_vals)) { warning("logml could not be estimated within maxiter, rerunning with adjusted starting value. \nEstimate might be more variable than usual.", call. = FALSE) lr <- length(tmp$r_vals) # use geometric mean as starting value r0_2 <- sqrt(tmp$r_vals[[lr - 1]] * tmp$r_vals[[lr]]) tmp <- .run.iterative.scheme(q11 = q11, q12 = q12, q21 = q21[[i]], q22 = q22[[i]], r0 = r0_2, tol = tol2, L = NULL, method = "normal", maxiter = maxiter, silent = silent, criterion = "logml", neff = neff) tmp$niter <- maxiter + tmp$niter } logml[i] <- tmp$logml niter[i] <- tmp$niter if (niter[i] == maxiter) warning("logml could not be estimated within maxiter, returning NA.", call. = FALSE) } if (repetitions == 1) { out <- list(logml = logml, niter = niter, method = "normal", q11 = q11, q12 = q12, q21 = q21[[1]], q22 = q22[[1]]) class(out) <- "bridge" } else if (repetitions > 1) { out <- list(logml = logml, niter = niter, method = "normal", repetitions = repetitions) class(out) <- "bridge_list" } return(out) } bridgesampling/R/bridge_sampler.R0000644000176200001440000010005714026403722016547 0ustar liggesusers#'Computes log marginal likelihood via bridge sampling. #'@title Log Marginal Likelihood via Bridge Sampling #'@name bridge_sampler #'@param samples an \code{mcmc.list} object, a fitted \code{stanfit} object, a #' \code{stanreg} object, an \code{rjags} object, a \code{runjags} object, or a #' \code{matrix} with posterior samples (\code{colnames} need to correspond to #' parameter names in \code{lb} and \code{ub}) with posterior samples. #'@param log_posterior function or name of function that takes a parameter #' vector and the \code{data} as input and returns the log of the unnormalized #' posterior density (i.e., a scalar value). If the function name is passed, #' the function should exist in the \code{.GlobalEnv}. For special behavior if #' \code{cores > 1} see \code{Details}. #'@param ... additional arguments passed to \code{log_posterior}. Ignored for #' the \code{stanfit} and \code{stanreg} methods. #'@param data data object which is used in \code{log_posterior}. #'@param stanfit_model for the \code{stanfit} method, an additional object of #' class \code{"stanfit"} with the same model as \code{samples}, which will be #' used for evaluating the \code{log_posterior} (i.e., it does not need to #' contain any samples). The default is to use \code{samples}. In case #' \code{samples} was compiled in a different R session or on another computer #' with a different OS or setup, the \code{samples} model usually cannot be #' used for evaluation. In this case, one can compile the model on the current #' computer with \code{iter = 0} and pass it here (this usually needs to be #' done before \code{samples} is loaded). #'@param lb named vector with lower bounds for parameters. #'@param ub named vector with upper bounds for parameters. #'@param repetitions number of repetitions. #'@param method either \code{"normal"} or \code{"warp3"}. #'@param cores number of cores used for evaluating \code{log_posterior}. On #' unix-like systems (where \code{.Platform$OS.type == "unix"} evaluates to #' \code{TRUE}; e.g., Linux and Mac OS) forking via \code{\link{mclapply}} is #' used. Hence elements needed for evaluation should be in the #' \code{\link{.GlobalEnv}}. For other systems (e.g., Windows) #' \code{\link{makeCluster}} is used and further arguments specified below will #' be used. #'@param use_neff Boolean which determines whether the effective sample size is #' used in the optimal bridge function. Default is TRUE. If FALSE, the number #' of samples is used instead. If \code{samples} is a \code{matrix}, it is #' assumed that the \code{matrix} contains the samples of one chain in order. #' If \code{samples} come from more than one chain, we recommend to use an #' \code{mcmc.list} object for optimal performance. #'@param packages character vector with names of packages needed for evaluating #' \code{log_posterior} in parallel (only relevant if \code{cores > 1} and #' \code{.Platform$OS.type != "unix"}). #'@param varlist character vector with names of variables needed for evaluating #' \code{log_posterior} (only needed if \code{cores > 1} and #' \code{.Platform$OS.type != "unix"} as these objects will be exported to the #' nodes). These objects need to exist in \code{envir}. #'@param envir specifies the environment for \code{varlist} (only needed if #' \code{cores > 1} and \code{.Platform$OS.type != "unix"} as these objects #' will be exported to the nodes). Default is \code{\link{.GlobalEnv}}. #'@param rcppFile in case \code{cores > 1} and \code{log_posterior} is an #' \code{Rcpp} function, \code{rcppFile} specifies the path to the cpp file #' (will be compiled on all cores). #'@param maxiter maximum number of iterations for the iterative updating scheme. #' Default is 1,000 to avoid infinite loops. #'@param param_types character vector of length \code{ncol(samples)} with #' \code{"real"}, \code{"simplex"} or \code{"circular"}. For all regular #' bounded or unbounded continuous parameters, this should just be #' \code{"real"}. However, if there are parameters which lie on a simplex or on #' the circle, this should be noted here. Simplex parameters are parameters #' which are bounded below by zero and collectively sum to one, such as weights #' in a mixture model. For these, the stick-breaking transformation is #' performed as described in the Stan reference manual. The circular variables #' are given a numerical representation to which the normal distribution is #' most likely a good fit. Only possible to use with #' \code{bridge_sampler.matrix}. #'@param silent Boolean which determines whether to print the number of #' iterations of the updating scheme to the console. Default is FALSE. #'@param verbose Boolean. Should internal debug information be printed to #' console? Default is \code{FALSE}. #'@details Bridge sampling is implemented as described in Meng and Wong (1996, #' see equation 4.1) using the "optimal" bridge function. When \code{method = #' "normal"}, the proposal distribution is a multivariate normal distribution #' with mean vector equal to the sample mean vector of \code{samples} and #' covariance matrix equal to the sample covariance matrix of \code{samples}. #' For a recent tutorial on bridge sampling, see Gronau et al. (in press). #' #' When \code{method = "warp3"}, the proposal distribution is a standard #' multivariate normal distribution and the posterior distribution is "warped" #' (Meng & Schilling, 2002) so that it has the same mean vector, covariance #' matrix, and skew as the samples. \code{method = "warp3"} takes approximately #' twice as long as \code{method = "normal"}. #' #' Note that for the \code{matrix} method, the lower and upper bound of a #' parameter cannot be a function of the bounds of another parameter. #' Furthermore, constraints that depend on multiple parameters of the model are #' not supported. This usually excludes, for example, parameters that #' constitute a covariance matrix or sets of parameters that need to sum to #' one. #' #' However, if the retransformations are part of the model itself and the #' \code{log_posterior} accepts parameters on the real line and performs the #' appropriate Jacobian adjustments, such as done for \code{stanfit} and #' \code{stanreg} objects, such constraints are obviously possible (i.e., we #' currently do not know of any parameter supported within Stan that does not #' work with the current implementation through a \code{stanfit} object). #' #' \subsection{Parallel Computation}{ On unix-like systems forking is used via #' \code{\link{mclapply}}. Hence elements needed for evaluation of #' \code{log_posterior} should be in the \code{\link{.GlobalEnv}}. #' #' On other OSes (e.g., Windows), things can get more complicated. For normal #' parallel computation, the \code{log_posterior} function can be passed as #' both function and function name. If the latter, it needs to exist in the #' environment specified in the \code{envir} argument. For parallel computation #' when using an \code{Rcpp} function, \code{log_posterior} can only be passed #' as the function name (i.e., character). This function needs to result from #' calling \code{sourceCpp} on the file specified in \code{rcppFile}. #' #' Due to the way \code{rstan} currently works, parallel computations with #' \code{stanfit} and \code{stanreg} objects only work with forking (i.e., NOT #' on Windows). } #'@return if \code{repetitions = 1}, returns a list of class \code{"bridge"} #' with components: \itemize{ \item \code{logml}: estimate of log marginal #' likelihood. \item \code{niter}: number of iterations of the iterative #' updating scheme. \item \code{method}: bridge sampling method that was used #' to obtain the estimate. \item \code{q11}: log posterior evaluations for #' posterior samples. \item \code{q12}: log proposal evaluations for posterior #' samples. \item \code{q21}: log posterior evaluations for samples from #' proposal. \item \code{q22}: log proposal evaluations for samples from #' proposal. } if \code{repetitions > 1}, returns a list of class #' \code{"bridge_list"} with components: \itemize{ \item \code{logml}: numeric #' vector with estimates of log marginal likelihood. \item \code{niter}: #' numeric vector with number of iterations of the iterative updating scheme #' for each repetition. \item \code{method}: bridge sampling method that was #' used to obtain the estimates. \item \code{repetitions}: number of #' repetitions. } #'@section Warning: Note that the results depend strongly on the parameter #' priors. Therefore, it is strongly advised to think carefully about the #' priors before calculating marginal likelihoods. For example, the prior #' choices implemented in \pkg{rstanarm} or \pkg{brms} might not be optimal #' from a testing point of view. We recommend to use priors that have been #' chosen from a testing and not a purely estimation perspective. #' #' Also note that for testing, the number of posterior samples usually needs to #' be substantially larger than for estimation. #'@note To be able to use a \code{stanreg} object for \code{samples}, the user #' crucially needs to have specified the \code{diagnostic_file} when fitting #' the model in \pkg{rstanarm}. #'@author Quentin F. Gronau and Henrik Singmann. Parallel computing (i.e., #' \code{cores > 1}) and the \code{stanfit} method use code from \code{rstan} #' by Jiaqing Guo, Jonah Gabry, and Ben Goodrich. Ben Goodrich added the #' \code{stanreg} method. Kees Mulder added methods for simplex and circular #' variables. #'@references #' Gronau, Q. F., Singmann, H., & Wagenmakers, E.-J. (2020). bridgesampling: An #' R Package for Estimating Normalizing Constants. \emph{Journal of Statistical #' Software, 92}. \doi{10.18637/jss.v092.i10} #' #' Gronau, Q. F., Sarafoglou, A., Matzke, D., Ly, A., Boehm, U., #' Marsman, M., Leslie, D. S., Forster, J. J., Wagenmakers, E.-J., & #' Steingroever, H. (in press). A tutorial on bridge sampling. \emph{Journal of #' Mathematical Psychology}. \url{https://arxiv.org/abs/1703.05984} \cr #' \code{vignette("bridgesampling_tutorial")} #' #' Gronau, Q. F., Wagenmakers, E.-J., Heck, D. W., & Matzke, D. (2017). \emph{A #' simple method for comparing complex models: Bayesian model comparison for #' hierarchical multinomial processing tree models using Warp-III bridge #' sampling}. Manuscript submitted for publication. #' \url{https://psyarxiv.com/yxhfm} #' #' Meng, X.-L., & Wong, W. H. (1996). Simulating ratios of normalizing #' constants via a simple identity: A theoretical exploration. \emph{Statistica #' Sinica, 6}, 831-860. #' \url{http://www3.stat.sinica.edu.tw/statistica/j6n4/j6n43/j6n43.htm} #' #' Meng, X.-L., & Schilling, S. (2002). Warp bridge sampling. \emph{Journal of #' Computational and Graphical Statistics, 11(3)}, 552-586. #' \doi{10.1198/106186002457} #' #' Overstall, A. M., & Forster, J. J. (2010). Default Bayesian model #' determination methods for generalised linear mixed models. #' \emph{Computational Statistics & Data Analysis, 54}, 3269-3288. #' \doi{10.1016/j.csda.2010.03.008} #'@example examples/example.bridge_sampler.R #' #'@seealso \code{\link{bf}} allows the user to calculate Bayes factors and #' \code{\link{post_prob}} allows the user to calculate posterior model #' probabilities from bridge sampling estimates. \code{\link{bridge-methods}} #' lists some additional methods that automatically invoke the #' \code{\link{error_measures}} function. #' #'@importFrom mvtnorm rmvnorm dmvnorm #'@importFrom Matrix nearPD #'@import Brobdingnag #'@importFrom stringr str_sub #'@importFrom stats qnorm pnorm dnorm median cov var #'@export bridge_sampler <- function(samples, ...) { UseMethod("bridge_sampler", samples) } #' @rdname bridge_sampler #' @export bridge_sampler.stanfit <- function(samples = NULL, stanfit_model = samples, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, maxiter = 1000, silent = FALSE, verbose = FALSE, ...) { # cores > 1 only for unix: if (!(.Platform$OS.type == "unix") & (cores != 1)) { warning("cores > 1 only possible on Unix/MacOs. Uses 'core = 1' instead.", call. = FALSE) cores <- 1L } # convert samples into matrix if (!requireNamespace("rstan")) stop("package rstan required") ex <- rstan::extract(samples, permuted = FALSE) skeleton <- .create_skeleton(samples@sim$pars_oi, samples@par_dims[samples@sim$pars_oi]) upars <- apply(ex, 1:2, FUN = function(theta) { rstan::unconstrain_pars(stanfit_model, .rstan_relist(theta, skeleton)) }) if (length(dim(upars)) == 2) { # for one parameter models dim(upars) <- c(1, dim(upars)) } nr <- dim(upars)[2] samples4fit_index <- seq_len(nr) %in% seq_len(round(nr/2)) # split samples in two parts samples_4_fit <- apply(upars[,samples4fit_index,,drop=FALSE], 1, rbind) samples_4_iter_stan <- upars[,!samples4fit_index,,drop=FALSE] samples_4_iter_tmp <- vector("list", dim(upars)[3]) for (i in seq_along(samples_4_iter_tmp)) { samples_4_iter_tmp[[i]] <- coda::as.mcmc(t(samples_4_iter_stan[,,i])) } samples_4_iter_tmp <- coda::as.mcmc.list(samples_4_iter_tmp) if (use_neff) { neff <- tryCatch(median(coda::effectiveSize(samples_4_iter_tmp)), error = function(e) { warning("effective sample size cannot be calculated, has been replaced by number of samples.", call. = FALSE) return(NULL) }) } else { neff <- NULL } samples_4_iter <- apply(samples_4_iter_stan, 1, rbind) parameters <- paste0("x", (seq_len(dim(upars)[1]))) transTypes <- rep("unbounded", length(parameters)) names(transTypes) <- parameters # prepare lb and ub lb <- rep(-Inf, length(parameters)) ub <- rep(Inf, length(parameters)) names(lb) <- names(ub) <- parameters colnames(samples_4_iter) <- paste0("trans_", parameters) colnames(samples_4_fit) <- paste0("trans_", parameters) # run bridge sampling if (cores == 1) { bridge_output <- do.call(what = paste0(".bridge.sampler.", method), args = list(samples_4_fit = samples_4_fit, samples_4_iter = samples_4_iter, neff = neff, log_posterior = .stan_log_posterior, data = list(stanfit = stanfit_model), lb = lb, ub = ub, param_types = rep("real", ncol(samples_4_fit)), transTypes = transTypes, repetitions = repetitions, cores = cores, packages = "rstan", maxiter = maxiter, silent = silent, verbose = verbose, r0 = 0.5, tol1 = 1e-10, tol2 = 1e-4)) } else { bridge_output <- do.call(what = paste0(".bridge.sampler.", method), args = list(samples_4_fit = samples_4_fit, samples_4_iter = samples_4_iter, neff = neff, log_posterior = .stan_log_posterior, data = list(stanfit = stanfit_model), lb = lb, ub = ub, param_types = rep("real", ncol(samples_4_fit)), transTypes = transTypes, repetitions = repetitions, varlist = "stanfit", envir = sys.frame(sys.nframe()), cores = cores, packages = "rstan", maxiter = maxiter, silent = silent, verbose = verbose, r0 = 0.5, tol1 = 1e-10, tol2 = 1e-4)) } return(bridge_output) } #' @rdname bridge_sampler #' @export bridge_sampler.mcmc.list <- function(samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, param_types = rep("real", ncol(samples[[1]])), method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, silent = FALSE, verbose = FALSE) { # split samples in two parts nr <- nrow(samples[[1]]) samples4fit_index <- seq_len(nr) %in% seq_len(round(nr/2)) samples_4_fit_tmp <- samples[samples4fit_index,,drop=FALSE] samples_4_fit_tmp <- do.call("rbind", samples_4_fit_tmp) # check lb and ub if (!is.numeric(lb)) stop("lb needs to be numeric", call. = FALSE) if (!is.numeric(ub)) stop("ub needs to be numeric", call. = FALSE) if (!all(colnames(samples_4_fit_tmp) %in% names(lb))) stop("lb does not contain all parameters.", call. = FALSE) if (!all(colnames(samples_4_fit_tmp) %in% names(ub))) stop("ub does not contain all parameters.", call. = FALSE) # transform parameters to real line tmp <- .transform2Real(samples_4_fit_tmp, lb, ub) samples_4_fit <- tmp$theta_t transTypes <- tmp$transTypes samples_4_iter_tmp <- lapply(samples[!samples4fit_index,,drop=FALSE], function(x) .transform2Real(x, lb = lb, ub = ub)$theta_t) # compute effective sample size if (use_neff) { samples_4_iter_tmp <- coda::mcmc.list(lapply(samples_4_iter_tmp, coda::mcmc)) neff <- tryCatch(median(coda::effectiveSize(samples_4_iter_tmp)), error = function(e) { warning("effective sample size cannot be calculated, has been replaced by number of samples.", call. = FALSE) return(NULL) }) } else { neff <- NULL } # convert to matrix samples_4_iter <- do.call("rbind", samples_4_iter_tmp) # run bridge sampling out <- do.call(what = paste0(".bridge.sampler.", method), args = list(samples_4_fit = samples_4_fit, samples_4_iter = samples_4_iter, neff = neff, log_posterior = log_posterior, "..." = ..., data = data, lb = lb, ub = ub, transTypes = transTypes, repetitions = repetitions, cores = cores, packages = packages, varlist = varlist, envir = envir, param_types = param_types, rcppFile = rcppFile, maxiter = maxiter, silent = silent, verbose = verbose, r0 = 0.5, tol1 = 1e-10, tol2 = 1e-4)) return(out) } #' @rdname bridge_sampler #' @export bridge_sampler.mcmc <- function(samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, param_types = rep("real", ncol(samples)), silent = FALSE, verbose = FALSE) { samples <- as.matrix(samples) bridge_output <- bridge_sampler(samples = samples, log_posterior = log_posterior, ..., data = data, lb = lb, ub = ub, repetitions = repetitions, method = method, cores = cores, use_neff = use_neff, packages = packages, varlist = varlist, envir = envir, rcppFile = rcppFile, maxiter = maxiter, param_types = param_types, silent = silent, verbose = verbose) return(bridge_output) } #' @export #' @rdname bridge_sampler bridge_sampler.matrix <- function(samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, param_types = rep("real", ncol(samples)), silent = FALSE, verbose = FALSE) { # see Meng & Wong (1996), equation 4.1 # Check simplex computation is_simplex_param <- param_types == "simplex" if (any(is_simplex_param)) { simplex_samples <- samples[, is_simplex_param] if (any(!(round(rowSums(simplex_samples), 6) == 1L))) { stop(paste("Simplex parameters do not sum to one. This could be due to having multiple separate sets of simplex parameters, which are not supported. ")) } # Remove the last simplex variable because it is superfluous. last_sim <- which(is_simplex_param)[sum(is_simplex_param)] samples <- samples[, -last_sim] param_types <- param_types[-last_sim] lb <- lb[-last_sim] ub <- ub[-last_sim] } # transform parameters to real line tmp <- .transform2Real(samples, lb, ub, theta_types = param_types) theta_t <- tmp$theta_t transTypes <- tmp$transTypes # split samples for proposal/iterative scheme nr <- nrow(samples) samples4fit_index <- seq_len(nr) %in% seq_len(round(nr/2)) # split samples in two parts samples_4_fit <- theta_t[samples4fit_index, ,drop = FALSE] samples_4_iter <- theta_t[!samples4fit_index, , drop = FALSE] # compute effective sample size if (use_neff) { neff <- tryCatch(median(coda::effectiveSize(coda::mcmc(samples_4_iter))), error = function(e) { warning("effective sample size cannot be calculated, has been replaced by number of samples.", call. = FALSE) return(NULL) }) } else { neff <- NULL } out <- do.call(what = paste0(".bridge.sampler.", method), args = list(samples_4_fit = samples_4_fit, samples_4_iter = samples_4_iter, neff = neff, log_posterior = log_posterior, "..." = ..., data = data, lb = lb, ub = ub, transTypes = transTypes, param_types = param_types, repetitions = repetitions, cores = cores, packages = packages, varlist = varlist, envir = envir, rcppFile = rcppFile, maxiter = maxiter, silent = silent, verbose = verbose, r0 = 0.5, tol1 = 1e-10, tol2 = 1e-4)) return(out) } #' @rdname bridge_sampler #' @export #' @importFrom utils read.csv bridge_sampler.stanreg <- function(samples, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, maxiter = 1000, silent = FALSE, verbose = FALSE, ...) { df <- eval(samples$call$diagnostic_file) if (is.null(df)) stop("the 'diagnostic_file' option must be specified in the call to ", samples$stan_function, " to use the 'bridge_sampler'") sf <- samples$stanfit chains <- ncol(sf) if (chains > 1) df <- sapply(1:chains, FUN = function(j) sub("\\.csv$", paste0("_", j, ".csv"), df)) samples_list <- lapply(df, FUN = function(f) { d <- read.csv(f, comment.char = "#") excl <- c("lp__", "accept_stat__", "stepsize__" ,"treedepth__", "n_leapfrog__", "divergent__", "energy__") d <- d[,!(colnames(d) %in% excl), drop = FALSE] coda::as.mcmc(as.matrix(d[, 1:rstan::get_num_upars(sf), drop = FALSE])) }) samples <- coda::as.mcmc.list(samples_list) lb <- rep(-Inf, ncol(samples[[1]])) ub <- rep( Inf, ncol(samples[[1]])) names(lb) <- names(ub) <- colnames(samples[[1]]) # cores > 1 only for unix: if (!(.Platform$OS.type == "unix") & (cores != 1)) { warning("cores > 1 only possible on Unix/MacOs. Uses 'core = 1' instead.", call. = FALSE) cores <- 1L } if (cores == 1) { bridge_output <- bridge_sampler(samples = samples, log_posterior = .stan_log_posterior, data = list(stanfit = sf), lb = lb, ub = ub, repetitions = repetitions, method = method, cores = cores, use_neff = use_neff, packages = "rstan", maxiter = maxiter, silent = silent, verbose = verbose) } else { bridge_output <- bridge_sampler(samples = samples, log_posterior = .stan_log_posterior, data = list(stanfit = sf), lb = lb, ub = ub, repetitions = repetitions, varlist = "stanfit", envir = sys.frame(sys.nframe()), method = method, cores = cores, use_neff = use_neff, packages = "rstan", maxiter = maxiter, silent = silent, verbose = verbose) } return(bridge_output) } #' @rdname bridge_sampler #' @export bridge_sampler.rjags <- function(samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, silent = FALSE, verbose = FALSE) { # convert to mcmc.list cn <- colnames(samples$BUGSoutput$sims.matrix) samples <- coda::as.mcmc(samples) samples <- samples[,cn != "deviance", drop = FALSE] # run bridge sampling out <- bridge_sampler(samples = samples, log_posterior = log_posterior, ..., data = data, lb = lb, ub = ub, repetitions = repetitions, method = method, cores = cores, use_neff = use_neff, packages = packages, varlist = varlist, envir = envir, rcppFile = rcppFile, maxiter = maxiter, silent = silent, verbose = verbose) return(out) } #' @rdname bridge_sampler #' @export bridge_sampler.runjags <- function(samples = NULL, log_posterior = NULL, ..., data = NULL, lb = NULL, ub = NULL, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, packages = NULL, varlist = NULL, envir = .GlobalEnv, rcppFile = NULL, maxiter = 1000, silent = FALSE, verbose = FALSE) { # convert to mcmc.list samples <- coda::as.mcmc.list(samples) # run bridge sampling out <- bridge_sampler(samples = samples, log_posterior = log_posterior, ..., data = data, lb = lb, ub = ub, repetitions = repetitions, method = method, cores = cores, use_neff = use_neff, packages = packages, varlist = varlist, envir = envir, rcppFile = rcppFile, maxiter = maxiter, silent = silent, verbose = verbose) return(out) } #' @rdname bridge_sampler #' @export bridge_sampler.MCMC_refClass <- function(samples, repetitions = 1, method = "normal", cores = 1, use_neff = TRUE, maxiter = 1000, silent = FALSE, verbose = FALSE, ...) { if (!requireNamespace("nimble")) stop("package nimble required") ## functions for nimble support .log_posterior_nimble <- ".log_posterior_nimble <- nimble::nimbleFunction( # based on code by Perry de Valpine ## setup code is executed in R and specializes an instance ## of the nimbleFunction to a particular model or nodes setup = function(model, nodes) { calcNodes <- model$getDependencies(nodes) }, ## run code is called repeatedly and can be converted into C++ run = function(sample = double(1)) { values(model, nodes) <<- sample out <- model$calculate(calcNodes) return(out) returnType(double(0)) } )" eval(parse(text = .log_posterior_nimble)) ## trick for avoiding R CMD check NOTEs .nimble_bounds <- function(samples, model, which) { if ( ! (which %in% c("lower", "upper")) ) { stop('"which" needs to be either "lower" or "upper"\n', call. = FALSE) } cn <- colnames(samples) bounds <- numeric(length(cn)) names(bounds) <- cn for (i in seq_along(cn)) { bounds[[cn[i]]] <- model$getBound(cn[i], which) } return(bounds) } # cores > 1 only for unix: if (!(.Platform$OS.type == "unix") & (cores != 1)) { warning("cores > 1 only possible on Unix/MacOs. Uses 'core = 1' instead.", call. = FALSE) cores <- 1L } mcmc_samples <- as.matrix(samples$mvSamples) if (all(is.na(mcmc_samples))) { stop("nimble object does not contain samples. Call runMCMC() first.", call. = FALSE) } # make sure that samples is a list if (is.matrix(mcmc_samples)) { # TRUE in case nchains = 1 mcmc_samples <- list(mcmc_samples) } # convert samples to mcmc.list samples_mcmc <- lapply(mcmc_samples, FUN = coda::as.mcmc) samples_mcmc_list <- coda::as.mcmc.list(samples_mcmc) ## get model name from MCMC_refClass object mod_name <- ls(samples$nimbleProject$models)[1] nimble_model <- samples$nimbleProject$models[[mod_name]] # compile log_posterior for bridge sampling log_posterior_tmp <- .log_posterior_nimble(model = nimble_model, nodes = colnames(mcmc_samples[[1]])) suppressMessages( clog_posterior <- nimble::compileNimble(log_posterior_tmp, project = nimble_model)) # wrapper to match required format for log_posterior log_posterior <- function(x, data) { clog_posterior$run(x) } out <- bridge_sampler(samples = samples_mcmc_list, log_posterior = log_posterior, ..., data = NULL, lb = .nimble_bounds(mcmc_samples[[1]], nimble_model, "lower"), ub = .nimble_bounds(mcmc_samples[[1]], nimble_model, "upper"), repetitions = repetitions, method = method, cores = cores, use_neff = use_neff, packages = "nimble", maxiter = maxiter, silent = silent, verbose = verbose) return(out) } bridgesampling/R/error_measures.R0000644000176200001440000001025614026403742016630 0ustar liggesusers#' Computes error measures for estimated marginal likelihood. #' @export #' @title Error Measures for Estimated Marginal Likelihood #' @param bridge_object an object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. #' @param na.rm a logical indicating whether missing values in logml estimates should be removed. Ignored for the \code{bridge} method. #' @param ... additional arguments (currently ignored). #' @details Computes error measures for marginal likelihood bridge sampling estimates. The approximate errors for a \code{bridge_object} of class \code{"bridge"} that has been obtained with \code{method = "normal"} and \code{repetitions = 1} are based on Fruehwirth-Schnatter (2004). #' Not applicable in case the object of class \code{"bridge"} has been obtained with \code{method = "warp3"} and \code{repetitions = 1}. #' To assess the uncertainty of the estimate in this case, it is recommended to run the \code{"warp3"} procedure multiple times. #' @return If \code{bridge_object} is of class \code{"bridge"} and has been obtained with \code{method = "normal"} and \code{repetitions = 1}, returns a list with components: #' \itemize{ #' \item \code{re2}: approximate relative mean-squared error for marginal likelihood estimate. #' \item \code{cv}: approximate coefficient of variation for marginal likelihood estimate (assumes that bridge estimate is unbiased). #' \item \code{percentage}: approximate percentage error of marginal likelihood estimate. #' } #' If \code{bridge_object} is of class \code{"bridge_list"}, returns a list with components: #' \itemize{ #' \item \code{min}: minimum of the log marginal likelihood estimates. #' \item \code{max}: maximum of the log marginal likelihood estimates. #' \item \code{IQR}: interquartile range of the log marginal likelihood estimates. #' } #' @author Quentin F. Gronau #' @note For examples, see \code{\link{bridge_sampler}} and the accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} \cr \code{vignette("bridgesampling_example_stan")} #' #' @seealso The \code{summary} methods for \code{bridge} and \code{bridge_list} objects automatically invoke this function, see \code{\link{bridge-methods}}. #' #' @references #' Fruehwirth-Schnatter, S. (2004). Estimating marginal likelihoods for mixture and Markov switching models using bridge sampling techniques. \emph{The Econometrics Journal, 7}, 143-167. \doi{10.1111/j.1368-423X.2004.00125.x} #' @import Brobdingnag #' @importFrom coda spectrum0.ar #' @export error_measures <- function (bridge_object, ...) { UseMethod("error_measures", bridge_object) } #' @rdname error_measures #' @export error_measures.bridge <- function(bridge_object,...) { if (bridge_object$method == "warp3") { stop(paste0("error_measures not implemented for warp3 method with", "\n repetitions = 1.", "\n We recommend to run the warp3 procedure multiple times", "\n to assess the uncertainty of the estimate.")) } e <- as.brob( exp(1) ) ml <- e^(bridge_object$logml) g_p <- e^(bridge_object$q12) g_g <- e^(bridge_object$q22) priorTimesLik_p <- e^(bridge_object$q11) priorTimesLik_g <- e^(bridge_object$q21) p_p <- priorTimesLik_p/ml p_g <- priorTimesLik_g/ml N1 <- length(p_p) N2 <- length(g_g) s1 <- N1/(N1 + N2) s2 <- N2/(N1 + N2) f1 <- as.numeric( p_g/(s1*p_g + s2*g_g) ) f2 <- as.numeric( g_p/(s1*p_p + s2*g_p) ) rho_f2 <- spectrum0.ar( f2 )$spec term1 <- 1/N2 * var( f1 ) / mean( f1 )^2 term2 <- rho_f2/N1 * var( f2 ) / mean( f2 )^2 re2 <- term1 + term2 # convert to coefficient of variation (assumes that bridge estimate is unbiased) cv <- sqrt(re2) # convert to percentage error percentage <- scales::percent(cv) return(list(re2 = re2, cv = cv, percentage = percentage)) } #' @rdname error_measures #' @export error_measures.bridge_list <- function(bridge_object, na.rm = TRUE, ...) { return(list(min = min(bridge_object$logml, na.rm = na.rm), max = max(bridge_object$logml, na.rm = na.rm), IQR = stats::IQR(bridge_object$logml, na.rm = na.rm))) } bridgesampling/R/bridge_sampler_tools.R0000644000176200001440000000152213663004467017775 0ustar liggesusers #-------------------------------------------------------------------------- # functions for Stan support via rstan #-------------------------------------------------------------------------- # taken from rstan: .rstan_relist <- function (x, skeleton) { lst <- utils::relist(x, skeleton) for (i in seq_along(skeleton)) dim(lst[[i]]) <- dim(skeleton[[i]]) lst } # taken from rstan: .create_skeleton <- function (pars, dims) { lst <- lapply(seq_along(pars), function(i) { len_dims <- length(dims[[i]]) if (len_dims < 1) return(0) return(array(0, dim = dims[[i]])) }) names(lst) <- pars lst } .stan_log_posterior <- function(s.row, data) { out <- tryCatch(rstan::log_prob(object = data$stanfit, upars = s.row), error = function(e) -Inf) if (is.na(out)) out <- -Inf return(out) } bridgesampling/R/post_prob.R0000644000176200001440000001247713663004467015620 0ustar liggesusers#' Generic function that computes posterior model probabilities from marginal #' likelihoods. #' @export #' @title Posterior Model Probabilities from Marginal Likelihoods #' @param x Object of class \code{"bridge"} or \code{"bridge_list"} as returned #' from \code{\link{bridge_sampler}}. Additionally, the default method assumes #' that all passed objects are numeric log marginal likelihoods (e.g., from #' \code{\link{logml}}) and will throw an error otherwise. #' @param ... further objects of class \code{"bridge"} or \code{"bridge_list"} #' as returned from \code{\link{bridge_sampler}}. Or numeric values for the #' default method. #' @param prior_prob numeric vector with prior model probabilities. If omitted, #' a uniform prior is used (i.e., all models are equally likely a priori). The #' default \code{NULL} corresponds to equal prior model weights. #' @param model_names If \code{NULL} (the default) will use model names derived #' from deparsing the call. Otherwise will use the passed values as model #' names. #' #' @return For the default method and the method for \code{"bridge"} objects, a #' named numeric vector with posterior model probabilities (i.e., which sum to #' one). #' #' For the method for \code{"bridge_list"} objects, a matrix consisting of #' posterior model probabilities where each row sums to one and gives the #' model probabilities for one set of logmls. The (named) columns correspond #' to the models and the number of rows is given by the \code{"bridge_list"} #' element with the most \code{repetitions}. Elements with fewer repetitions #' will be recycled (with warning). #' @author Quentin F. Gronau and Henrik Singmann #' @note For realistic examples, see \code{\link{bridge_sampler}} and the #' accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} #' \cr \code{vignette("bridgesampling_example_stan")} #' @example examples/example.post_prob.R #' @importFrom methods is post_prob <- function (x, ..., prior_prob = NULL, model_names = NULL) { UseMethod("post_prob", x) } #' @rdname post_prob #' @export post_prob.bridge <- function(x, ..., prior_prob = NULL, model_names = NULL) { dots <- list(...) mc <- match.call() modb <- vapply(dots, inherits, NA, what = c("bridge", "bridge_list")) if (is.null(model_names)) model_names <- c(deparse(mc[["x"]]), vapply(which(modb), function(x) deparse(mc[[x+2]]), "")) if (sum(modb) == 0) stop("Only one object of class 'bridge' or 'bridge_list' passed.", call. = FALSE) if (sum(modb) != length(dots)) warning("Objects not of class 'bridge' or 'bridge_list' are ignored.", call. = FALSE) logml <- vapply(c(list(x), dots[modb]), logml, FUN.VALUE = 0) .post_prob_calc(logml=logml, model_names = model_names, prior_prob=prior_prob) } #' @rdname post_prob #' @export post_prob.bridge_list <- function(x, ..., prior_prob = NULL, model_names = NULL) { dots <- list(...) mc <- match.call() modb <- vapply(dots, inherits, NA, what = c("bridge", "bridge_list")) if (is.null(model_names)) model_names <- c(deparse(mc[["x"]]), vapply(which(modb), function(x) deparse(mc[[x+2]]), "")) if (sum(modb) == 0) stop("Only one object of class 'bridge' or 'bridge_list' passed.", call. = FALSE) if (sum(modb) != length(dots)) warning("Objects not of class 'bridge' or 'bridge_list' are ignored.", call. = FALSE) logml <- lapply(c(list(x), dots[modb]), "[[", i = "logml") len <- vapply(logml, length, FUN.VALUE = 0) if (!all(len == max(len))) { warning("Not all objects provide ", max(len), " logmls. Some values are recycled.", call. = FALSE) logml <- lapply(logml, function(x) rep(x, length.out = max(len))) } t(apply(as.data.frame(logml), 1, .post_prob_calc, model_names = model_names, prior_prob=prior_prob)) } #' @rdname post_prob #' @export post_prob.default <- function(x, ..., prior_prob = NULL, model_names = NULL) { dots <- list(...) mc <- match.call() if (is.null(model_names)) model_names <- c(rep(deparse(mc[["x"]]), length(x)), rep(vapply(seq_along(dots), function(x) deparse(mc[[x+2]]), ""), times = vapply(dots, length, 0))) logml <- c(x, unlist(dots)) if (!is.numeric(logml)) { stop("logml values need to be numeric", call. = FALSE) } .post_prob_calc(logml=logml, model_names = model_names, prior_prob=prior_prob) } .post_prob_calc <- function(logml, model_names, prior_prob) { e <- as.brob(exp(1)) if(is.null(prior_prob)) prior_prob <- rep(1/length(logml), length(logml)) if(!isTRUE(all.equal(sum(prior_prob), 1))) stop("Prior model probabilities do not sum to one.", call. = FALSE) if(length(logml) != length(prior_prob)) stop("Number of objects/logml-values needs to match number of elements in prior_prob.", call. = FALSE) if(any(is.na(logml))) { post_prob <- rep(NA_real_, length(logml)) warning("NAs in logml values. No posterior probabilities calculated.", call. = FALSE) } else { post_prob <- as.numeric(e^logml*prior_prob / sum(e^logml*prior_prob)) if(!isTRUE(all.equal(sum(post_prob), 1))) warning("Posterior model probabilities do not sum to one.", call. = FALSE) } names(post_prob) <- make.unique(as.character(model_names)) return(post_prob) } bridgesampling/R/logml.R0000644000176200001440000000204513663004467014711 0ustar liggesusers#' Generic function that returns log marginal likelihood from bridge objects. For objects of class \code{"bridge_list"}, which contains multiple log marginal likelihoods, \code{fun} is performed on the vector and its result returned. #' @title Log Marginal Likelihoods from Bridge Objects #' @param x Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. #' @param fun Function which returns a scalar value and is applied to the \code{logml} vector of \code{"bridge_list"} objects. Default is \code{\link{median}}. #' @param ... Further arguments passed to \code{fun}. #' @return scalar numeric #' @export logml <- function (x, ...) { UseMethod("logml", x) } #' @rdname logml #' @export logml.bridge <- function (x, ...) { x$logml } #' @rdname logml #' @export logml.bridge_list <- function (x, fun = median, ...) { out <- fun(x$logml, ...) if (length(out) != 1) { warning("fun returns results of length != 1, only first used.") out <- out[1] } out } bridgesampling/MD50000644000176200001440000001212014036247673013561 0ustar liggesusersa4783a67f4928e407b405fac7c9e49b1 *DESCRIPTION 1e64168fe16c395d08666dcae98fe5f3 *NAMESPACE 45704053d3cd5ba81a8ce255702dfda4 *NEWS 70e51c92df398949ae6190e655ed6e28 *R/bf.R 3d0797935c827c4495bd8cbfbae6c96f *R/bridge_methods.R a98660617ef6bb6c3298d42b4e7c95cf *R/bridge_sampler.R 72c396f70546d3e39ec1183d76091fe4 *R/bridge_sampler_internals.R 6d76162f242a5d6c4a2614edc758b578 *R/bridge_sampler_normal.R b0b0acacf1b94708f6b038711d3f160e *R/bridge_sampler_tools.R 1fcfdd86e0e361f303cb7a5158314bec *R/bridge_sampler_warp3.R a2b441e4bcc52ef80c542c8d8fb64131 *R/error_measures.R aacc2662e1c31c60e1fa76957e34e4cf *R/ier-data.R 8641cf36ba874cb2642ee7a827d6d242 *R/logml.R 6d07d9c18a308e31b21e8353a8ad3e94 *R/post_prob.R 6c40bf872e9dee0c75e859145d789dd4 *R/turtles-data.R 9071a82fe635acb2239949d9fd4cd49a *build/partial.rdb aa2404c7289154795d2f9e5dd1a7bedb *build/vignette.rds 0c51ed14d5576619c9f4c39aae27732a *data/ier.rda 0fe9eaacba1bdb2098f8fdde83f65c32 *data/turtles.rda c8af7b819cb641ab0f3f04fc44ccf1d7 *inst/CITATION 014733970d49356e2b6aee04796d6769 *inst/doc/bridgesampling_example_jags.R 68a47d0e6b39d06e187b6a3f91a68f96 *inst/doc/bridgesampling_example_jags.Rmd 627e3a5379b2c49077253067b20566f8 *inst/doc/bridgesampling_example_jags.html 32bb579b7078f951e40027a2a75d371f *inst/doc/bridgesampling_example_nimble.R 4fc29092a4fe010e7272d462f4498284 *inst/doc/bridgesampling_example_nimble.Rmd 1705c63ae5fe6b697f99b9f5aab7bdee *inst/doc/bridgesampling_example_nimble.html d2d1a72d9301853ff2d737e2b298f8ac *inst/doc/bridgesampling_example_stan.R a4fbba20f8468d3c14c926c5714ccfe3 *inst/doc/bridgesampling_example_stan.Rmd be80c7b2253a5563d8f2165228c42fbe *inst/doc/bridgesampling_example_stan.html 4b0fd56d15935e710e534f861adc3bf9 *inst/doc/bridgesampling_paper.pdf 9b7eec34adaa91fba9f7d06acac932a1 *inst/doc/bridgesampling_paper.pdf.asis 345279ef0cf2077986b5b415a48bcccd *inst/doc/bridgesampling_paper_extended.pdf 0e85e0567ff8e5e676c2cab97c7d05d0 *inst/doc/bridgesampling_paper_extended.pdf.asis 51d5a2714f843e28fde1044d229f3c00 *inst/doc/bridgesampling_stan_ttest.R adf63ecae42af914bc72d882863af693 *inst/doc/bridgesampling_stan_ttest.Rmd a427cfc7efbd7dd3e03440fbe3711aaf *inst/doc/bridgesampling_stan_ttest.html f92f2238d522938e94589d5b3e3928ce *inst/doc/bridgesampling_tutorial.pdf f5e5226ea108a5baaae6011e5b9613b8 *inst/doc/bridgesampling_tutorial.pdf.asis 434644c66d15f79cf041d83ccedbac7e *inst/extdata/vignette_example_jags.RData 57b34d4773334a565b2ff3e61c04769a *inst/extdata/vignette_example_nimble.RData e05efd47379e0f72f294bb6071ac487b *inst/extdata/vignette_example_stan.RData f0cf681aedd2e8400030847494c168dd *inst/extdata/vignette_stan_ttest.RData 602022e4e41f431522c90bd0ed35939d *man/bf.Rd b28b9cd03b1c46733b4c204eb7ad971c *man/bridge-methods.Rd dae99cd3cd4d5608acd7b2d7389c3022 *man/bridge_sampler.Rd ff312a18bc07498f9dac9f0ea6258743 *man/error_measures.Rd 65b75e3d1f2e762c2877eccded045d9e *man/ier.Rd b100717efbd8a2df5d79e3020746d272 *man/logml.Rd 369ab42095c64b0ce29c7ccfb550baf3 *man/post_prob.Rd f279a1308a32ca55b5d5d2fd7c52cfd5 *man/turtles.Rd 9421b4d9679b13b57cc825b7a1f6a0f1 *tests/testthat.R fb06595ad90c150c66fa8d603e76c547 *tests/testthat/test-bf.R 75ca5496bf61f76c4a98fc2405ca8d98 *tests/testthat/test-bridge_sampler.R 5a0a26507ab149adcdfe1ab064095e1a *tests/testthat/test-bridge_sampler_Rcpp.R 619c9e44f54d979fa917b8a971849ac6 *tests/testthat/test-bridge_sampler_Rcpp_parallel.R 7c23845a34df8ef1949aec02435b4e56 *tests/testthat/test-bridge_sampler_mcmc.list.R 063ed84bf3315678455e41e384c60438 *tests/testthat/test-bridge_sampler_parallel.R b1632abe9000b9d55e815d7da74cc12e *tests/testthat/test-bridge_sampler_print_method.R 333303dc0acf151e8f463e5c349dd303 *tests/testthat/test-bridge_sampler_summary_method.R d8d2bb28e6a26122ed5b849bbe6407b1 *tests/testthat/test-nimble_bridge_sampler.R 238b5d2cd0c8b3df11c6766ba0aa9496 *tests/testthat/test-post_prob.R 787015ddff1730ce7fe593ae5f5457b6 *tests/testthat/test-stan_bridge_sampler_basic.R 09148038fcaedf1920424e5a43d2b153 *tests/testthat/test-stan_bridge_sampler_bugs.R ad31d1913116deaa02f61ed8a56d6e8d *tests/testthat/test-stanreg_bridge_sampler_basic.R a41100dc3d2c74624c7bf3efa1bc1b5a *tests/testthat/test-vignette_example_jags.R a50ad82940ff8bb77491e077ed01af21 *tests/testthat/test-vignette_example_nimble.R 0b1ea3f34c83b3d8afbe7cbfc15374c3 *tests/testthat/test-vignette_example_stan.R c6e6bbc8d656da74766dc271b7eb9753 *tests/testthat/test-vignette_stan_ttest.R 401c56bd63239b0ea3d21295698d3c65 *tests/testthat/test_dat.txt dd38a25b9f17bf3b889da6286ebdba6d *tests/testthat/unnormalized_normal_density.cpp 12b88d9c16f53d5791635e8de5c35bad *tests/testthat/unnormalized_normal_density_mu.cpp 68a47d0e6b39d06e187b6a3f91a68f96 *vignettes/bridgesampling_example_jags.Rmd 4fc29092a4fe010e7272d462f4498284 *vignettes/bridgesampling_example_nimble.Rmd a4fbba20f8468d3c14c926c5714ccfe3 *vignettes/bridgesampling_example_stan.Rmd 9b7eec34adaa91fba9f7d06acac932a1 *vignettes/bridgesampling_paper.pdf.asis 0e85e0567ff8e5e676c2cab97c7d05d0 *vignettes/bridgesampling_paper_extended.pdf.asis adf63ecae42af914bc72d882863af693 *vignettes/bridgesampling_stan_ttest.Rmd f5e5226ea108a5baaae6011e5b9613b8 *vignettes/bridgesampling_tutorial.pdf.asis bridgesampling/inst/0000755000176200001440000000000014036106017014214 5ustar liggesusersbridgesampling/inst/doc/0000755000176200001440000000000014036106017014761 5ustar liggesusersbridgesampling/inst/doc/bridgesampling_example_stan.R0000644000176200001440000001044614036106015022636 0ustar liggesusers## ----------------------------------------------------------------------------- library(bridgesampling) ### generate data ### set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ## ----eval=FALSE--------------------------------------------------------------- # ### set prior parameters ### # mu0 <- 0 # tau20 <- 1 # alpha <- 1 # beta <- 1 ## ---- eval=FALSE-------------------------------------------------------------- # library(rstan) # # # models # stancodeH0 <- 'data { # int n; // number of observations # vector[n] y; // observations # real alpha; # real beta; # real sigma2; # } # parameters { # real tau2; // group-level variance # vector[n] theta; // participant effects # } # model { # target += inv_gamma_lpdf(tau2 | alpha, beta); # target += normal_lpdf(theta | 0, sqrt(tau2)); # target += normal_lpdf(y | theta, sqrt(sigma2)); # } # ' # stancodeH1 <- 'data { # int n; // number of observations # vector[n] y; // observations # real mu0; # real tau20; # real alpha; # real beta; # real sigma2; # } # parameters { # real mu; # real tau2; // group-level variance # vector[n] theta; // participant effects # } # model { # target += normal_lpdf(mu | mu0, sqrt(tau20)); # target += inv_gamma_lpdf(tau2 | alpha, beta); # target += normal_lpdf(theta | mu, sqrt(tau2)); # target += normal_lpdf(y | theta, sqrt(sigma2)); # } # ' # # compile models # stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") # stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") ## ---- eval=FALSE-------------------------------------------------------------- # # fit models # stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n, # alpha = alpha, # beta = beta, # sigma2 = sigma2), # iter = 50000, warmup = 1000, chains = 3, cores = 1) # stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, # mu0 = mu0, # tau20 = tau20, # alpha = alpha, # beta = beta, # sigma2 = sigma2), # iter = 50000, warmup = 1000, chains = 3, cores = 1) ## ---- echo=FALSE-------------------------------------------------------------- load(system.file("extdata/", "vignette_example_stan.RData", package = "bridgesampling")) ## ----eval=FALSE--------------------------------------------------------------- # # compute log marginal likelihood via bridge sampling for H0 # H0.bridge <- bridge_sampler(stanfitH0, silent = TRUE) # # # compute log marginal likelihood via bridge sampling for H1 # H1.bridge <- bridge_sampler(stanfitH1, silent = TRUE) ## ----------------------------------------------------------------------------- print(H0.bridge) print(H1.bridge) ## ----eval=FALSE--------------------------------------------------------------- # # compute percentage errors # H0.error <- error_measures(H0.bridge)$percentage # H1.error <- error_measures(H1.bridge)$percentage ## ----------------------------------------------------------------------------- print(H0.error) print(H1.error) ## ----------------------------------------------------------------------------- # compute Bayes factor BF01 <- bf(H0.bridge, H1.bridge) print(BF01) ## ----------------------------------------------------------------------------- # compute posterior model probabilities (assuming equal prior model probabilities) post1 <- post_prob(H0.bridge, H1.bridge) print(post1) ## ----------------------------------------------------------------------------- # compute posterior model probabilities (using user-specified prior model probabilities) post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) print(post2) bridgesampling/inst/doc/bridgesampling_paper_extended.pdf.asis0000644000176200001440000000020313663004467024456 0ustar liggesusers%\VignetteIndexEntry{bridgesampling: An R Package for Estimating Normalizing Constants (Extended)} %\VignetteEngine{R.rsp::asis} bridgesampling/inst/doc/bridgesampling_paper.pdf.asis0000644000176200001440000000020613663004467022601 0ustar liggesusers%\VignetteIndexEntry{bridgesampling: An R Package for Estimating Normalizing Constants (JSS version)} %\VignetteEngine{R.rsp::asis} bridgesampling/inst/doc/bridgesampling_example_jags.Rmd0000644000176200001440000002533413663004467023155 0ustar liggesusers--- title: "Hierarchical Normal Example (JAGS)" author: "Quentin F. Gronau" date: "`r Sys.Date()`" show_toc: true output: knitr:::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Hierarchical Normal Example JAGS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- In this vignette, we explain how one can compute marginal likelihoods, Bayes factors, and posterior model probabilities using a simple hierarchical normal model implemented in `JAGS`. This vignette uses the same models and data as the [`Stan` vignette](bridgesampling_example_stan.html). ## Model and Data The model that we will use assumes that each of the $n$ observations $y_i$ (where $i$ indexes the observation, $i = 1,2,...,n$) is normally distributed with corresponding mean $\theta_i$ and a common known variance $\sigma^2$: $y_i \sim \mathcal{N}(\theta_i, \sigma^2)$. Each $\theta_i$ is drawn from a normal group-level distribution with mean $\mu$ and variance $\tau^2$: $\theta_i \sim \mathcal{N}(\mu, \tau^2)$. For the group-level mean $\mu$, we use a normal prior distribution of the form $\mathcal{N}(\mu_0, \tau^2_0)$. For the group-level variance $\tau^2$, we use an inverse-gamma prior of the form $\text{Inv-Gamma}(\alpha, \beta)$. We will use `JAGS` to fit the model which parametrizes the normal distribution in terms of the precision (i.e., one over the variance). Consequently, we implement this inverse-gamma prior on $\tau^2$ by placing a gamma prior of the form $\text{Gamma}(\alpha, \beta)$ on the precision; we call this precision parameter `invTau2` in the code. In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the group-level mean $\mu = 0$, to the alternative model $\mathcal{H}_1$, which allows $\mu$ to be different from zero. First, we generate some data from the null model: ```{r} library(bridgesampling) ### generate data ### set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ``` Next, we specify the prior parameters $\mu_0$, $\tau^2_0$, $\alpha$, and $\beta$: ```{r,eval=FALSE} ### set prior parameters ### mu0 <- 0 tau20 <- 1 alpha <- 1 beta <- 1 ``` ## Fitting the Models Now we can fit the null and the alternative model in `JAGS` (note that it is necessary to install `JAGS` for this). One usually requires a larger number of posterior sample for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples (i.e., 50,000 post burn-in samples per chain) for this comparatively simple model. ```{r, eval=FALSE} library(R2jags) ### functions to get posterior samples ### # H0: mu = 0 getSamplesModelH0 <- function(data, niter = 52000, nburnin = 2000, nchains = 3) { model <- " model { for (i in 1:n) { theta[i] ~ dnorm(0, invTau2) y[i] ~ dnorm(theta[i], 1/sigma2) } invTau2 ~ dgamma(alpha, beta) tau2 <- 1/invTau2 }" s <- jags(data, parameters.to.save = c("theta", "invTau2"), model.file = textConnection(model), n.chains = nchains, n.iter = niter, n.burnin = nburnin, n.thin = 1) return(s) } # H1: mu != 0 getSamplesModelH1 <- function(data, niter = 52000, nburnin = 2000, nchains = 3) { model <- " model { for (i in 1:n) { theta[i] ~ dnorm(mu, invTau2) y[i] ~ dnorm(theta[i], 1/sigma2) } mu ~ dnorm(mu0, 1/tau20) invTau2 ~ dgamma(alpha, beta) tau2 <- 1/invTau2 }" s <- jags(data, parameters.to.save = c("theta", "mu", "invTau2"), model.file = textConnection(model), n.chains = nchains, n.iter = niter, n.burnin = nburnin, n.thin = 1) return(s) } ### get posterior samples ### # create data lists for JAGS data_H0 <- list(y = y, n = length(y), alpha = alpha, beta = beta, sigma2 = sigma2) data_H1 <- list(y = y, n = length(y), mu0 = mu0, tau20 = tau20, alpha = alpha, beta = beta, sigma2 = sigma2) # fit models samples_H0 <- getSamplesModelH0(data_H0) samples_H1 <- getSamplesModelH1(data_H1) ``` ## Specifying the Unnormalized Log Posterior Function The next step is to write the corresponding `log_posterior` (i.e., unnormalized posterior) function for both models. This function takes one draw from the joint posterior and the data object as input and returns the log of the unnormalized joint posterior density. When using MCMC software such as `JAGS` or `Stan`, specifying this function is relatively simple. As a rule of thumb, one only needs to look for all places where a "`~`" sign appears in the model code. The log of the densities on the right-hand side of these "`~`" symbols needs to be evaluated for the relevant quantities and then these log densities values are summed. For example, in the null model, there are three "`~`" signs. Starting at the data-level, we need to evaluate the log of the normal density with mean $\theta_i$ and variance $\sigma^2$ for all $y_i$ and then sum the resulting log density values. Next, we move one step up in the model and evaluate the log of the group-level density for all $\theta_i$. Hence, we evaluate the log of the normal density for $\theta_i$ with mean $\mu = 0$ and variance $\tau^2$ (remember that `JAGS` parametrizes the normal distribution in terms of the precision `invTau2` = $1/\tau^2$; in contrast, `R` parametrizes it in terms of the standard deviation) and sum the resulting log density values. The result of this summation is added to the result of the previous summation for the data-level normal distribution. Finally, we need to evaluate the log of the prior density for `invTau2`. This means that we compute the log density of the gamma distribution with parameters $\alpha$ and $\beta$ for the sampled `invTau2` value and add the resulting log density value to the result of summing the data-level and group-level log densities. The unnormalized log posterior for the alternative model can be obtained in a similar fashion. The resulting functions look as follows: ```{r,eval=FALSE} ### functions for evaluating the unnormalized posteriors on log scale ### log_posterior_H0 <- function(samples.row, data) { mu <- 0 invTau2 <- samples.row[[ "invTau2" ]] theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + dgamma(invTau2, data$alpha, data$beta, log = TRUE) } log_posterior_H1 <- function(samples.row, data) { mu <- samples.row[[ "mu" ]] invTau2 <- samples.row[[ "invTau2" ]] theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + dnorm(mu, data$mu0, sqrt(data$tau20), log = TRUE) + dgamma(invTau2, data$alpha, data$beta, log = TRUE) } ``` ## Specifying the Parameter Bounds The final step before computing the log marginal likelihoods is to specify the parameter bounds. In this example, for both models, all parameters can range from $-\infty$ to $\infty$ except the precision `invTau2` which has a lower bound of zero. These boundary vectors need to be named and the names need to match the order of the parameters. ```{r,eval=FALSE} # specify parameter bounds H0 cn <- colnames(samples_H0$BUGSoutput$sims.matrix) cn <- cn[cn != "deviance"] lb_H0 <- rep(-Inf, length(cn)) ub_H0 <- rep(Inf, length(cn)) names(lb_H0) <- names(ub_H0) <- cn lb_H0[[ "invTau2" ]] <- 0 # specify parameter bounds H1 cn <- colnames(samples_H1$BUGSoutput$sims.matrix) cn <- cn[cn != "deviance"] lb_H1 <- rep(-Inf, length(cn)) ub_H1 <- rep(Inf, length(cn)) names(lb_H1) <- names(ub_H1) <- cn lb_H1[[ "invTau2" ]] <- 0 ``` Note that currently, the lower and upper bound of a parameter cannot be a function of the bounds of another parameter. Furthermore, constraints that depend on multiple parameters of the model are not supported. This excludes, for example, parameters that constitute a covariance matrix or sets of parameters that need to sum to one. ## Computing the (Log) Marginal Likelihoods Now we are ready to compute the log marginal likelihoods using the `bridge_sampler` function. We use `silent = TRUE` to suppress printing the number of iterations to the console: ```{r, echo=FALSE} load(system.file("extdata/", "vignette_example_jags.RData", package = "bridgesampling")) ``` ```{r,eval=FALSE} # compute log marginal likelihood via bridge sampling for H0 H0.bridge <- bridge_sampler(samples = samples_H0, data = data_H0, log_posterior = log_posterior_H0, lb = lb_H0, ub = ub_H0, silent = TRUE) # compute log marginal likelihood via bridge sampling for H1 H1.bridge <- bridge_sampler(samples = samples_H1, data = data_H1, log_posterior = log_posterior_H1, lb = lb_H1, ub = ub_H1, silent = TRUE) ``` We obtain: ```{r} print(H0.bridge) print(H1.bridge) ``` We can use the `error_measures` function to compute an approximate percentage error of the estimates: ```{r,eval=FALSE} # compute percentage errors H0.error <- error_measures(H0.bridge)$percentage H1.error <- error_measures(H1.bridge)$percentage ``` We obtain: ```{r} print(H0.error) print(H1.error) ``` ## Bayesian Model Comparison To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. In our case, we compute $\text{BF}_{01}$, that is, the Bayes factor which quantifies how much more likely the data are under the null versus the alternative model: ```{r} # compute Bayes factor BF01 <- bf(H0.bridge, H1.bridge) print(BF01) ``` In this case, the Bayes factor is close to one, indicating that there is not much evidence for either model. We can also compute posterior model probabilities by using the `post_prob` function: ```{r} # compute posterior model probabilities (assuming equal prior model probabilities) post1 <- post_prob(H0.bridge, H1.bridge) print(post1) ``` When the argument `prior_prob` is not specified, as is the case here, the prior model probabilities of all models under consideration are set equal (i.e., in this case with two models to 0.5). However, if we had prior knowledge about how likely both models are, we could use the `prior_prob` argument to specify different prior model probabilities: ```{r} # compute posterior model probabilities (using user-specified prior model probabilities) post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) print(post2) ``` bridgesampling/inst/doc/bridgesampling_example_nimble.Rmd0000644000176200001440000001632713663004467023501 0ustar liggesusers--- title: "Hierarchical Normal Example (nimble)" author: "Quentin F. Gronau, Henrik Singmann & Perry de Valpine" date: "`r Sys.Date()`" show_toc: true output: knitr:::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Hierarchical Normal Example Nimble} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- In this vignette, we explain how one can compute marginal likelihoods, Bayes factors, and posterior model probabilities using a simple hierarchical normal model implemented in `nimble`. The [`nimble` documentation](https://r-nimble.org/html_manual/cha-welcome-nimble.html) provides a comprehensive overview. This vignette uses the same models and data as the [`Stan` vignette](bridgesampling_example_stan.html) and [`Jags` vignette](bridgesampling_example_jags.html). ## Model and Data The model that we will use assumes that each of the $n$ observations $y_i$ (where $i$ indexes the observation, $i = 1,2,...,n$) is normally distributed with corresponding mean $\theta_i$ and a common known variance $\sigma^2$: $y_i \sim \mathcal{N}(\theta_i, \sigma^2)$. Each $\theta_i$ is drawn from a normal group-level distribution with mean $\mu$ and variance $\tau^2$: $\theta_i \sim \mathcal{N}(\mu, \tau^2)$. For the group-level mean $\mu$, we use a normal prior distribution of the form $\mathcal{N}(\mu_0, \tau^2_0)$. For the group-level variance $\tau^2$, we use an inverse-gamma prior of the form $\text{Inv-Gamma}(\alpha, \beta)$. In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the group-level mean $\mu = 0$, to the alternative model $\mathcal{H}_1$, which allows $\mu$ to be different from zero. First, we generate some data from the null model: ```{r} library(bridgesampling) ### generate data ### set.seed(12345) mu <- 0 tau2 <- 0.5 sigma2 <- 1 n <- 20 theta <- rnorm(n, mu, sqrt(tau2)) y <- rnorm(n, theta, sqrt(sigma2)) ``` Next, we specify the prior parameters $\mu_0$, $\tau^2_0$, $\alpha$, and $\beta$: ```{r,eval=FALSE} ### set prior parameters ### mu0 <- 0 tau20 <- 1 alpha <- 1 beta <- 1 ``` ## Specifying the Models Next, we implement the models in `nimble`. This requires to first transform the code into a `nimbleModel`, then we need to set the data, and then we can compile the model. Given that `nimble` is build on BUGS, the similarity between the `nimble` code and the [`Jags` code](bridgesampling_example_jags.html) is not too surprising. ```{r, eval=FALSE} library("nimble") # models codeH0 <- nimbleCode({ invTau2 ~ dgamma(1, 1) tau2 <- 1/invTau2 for (i in 1:20) { theta[i] ~ dnorm(0, sd = sqrt(tau2)) y[i] ~ dnorm(theta[i], sd = 1) } }) codeH1 <- nimbleCode({ mu ~ dnorm(0, sd = 1) invTau2 ~ dgamma(1, 1) tau2 <- 1/invTau2 for (i in 1:20) { theta[i] ~ dnorm(mu, sd = sqrt(tau2)) y[i] ~ dnorm(theta[i], sd = 1) } }) ## steps for H0: modelH0 <- nimbleModel(codeH0) modelH0$setData(y = y) # set data cmodelH0 <- compileNimble(modelH0) # make compiled version from generated C++ ## steps for H1: modelH1 <- nimbleModel(codeH1) modelH1$setData(y = y) # set data cmodelH1 <- compileNimble(modelH1) # make compiled version from generated C++ ``` ## Fitting the Models Fitting a model with `nimble` requires one to first create an MCMC function from the (compiled or uncompiled) model. This function then needs to be compiled again. With this object we can then create the samples. Note that nimble uses a reference object semantic so we do not actually need the samples object, as the samples will be saved in the MCMC function objects. But as `runMCMC` returns them anyway, we nevertheless save them. One usually requires a larger number of posterior samples for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples for these simple models. ```{r, eval=FALSE} # build MCMC functions, skipping customization of the configuration. mcmcH0 <- buildMCMC(modelH0, monitors = modelH0$getNodeNames(stochOnly = TRUE, includeData = FALSE)) mcmcH1 <- buildMCMC(modelH1, monitors = modelH1$getNodeNames(stochOnly = TRUE, includeData = FALSE)) # compile the MCMC function via generated C++ cmcmcH0 <- compileNimble(mcmcH0, project = modelH0) cmcmcH1 <- compileNimble(mcmcH1, project = modelH1) # run the MCMC. This is a wrapper for cmcmc$run() and extraction of samples. # the object samplesH1 is actually not needed as the samples are also in cmcmcH1 samplesH0 <- runMCMC(cmcmcH0, niter = 1e5, nburnin = 1000, nchains = 2, progressBar = FALSE) samplesH1 <- runMCMC(cmcmcH1, niter = 1e5, nburnin = 1000, nchains = 2, progressBar = FALSE) ``` ## Computing the (Log) Marginal Likelihoods Computing the (log) marginal likelihoods via the `bridge_sampler` function is now easy: we only need to pass the compiled MCMC function objects (of class `"MCMC_refClass"`) which contain all information necessary. We use `silent = TRUE` to suppress printing the number of iterations to the console: ```{r, echo=FALSE} load(system.file("extdata/", "vignette_example_nimble.RData", package = "bridgesampling")) ``` ```{r,eval=FALSE} # compute log marginal likelihood via bridge sampling for H0 H0.bridge <- bridge_sampler(cmcmcH0, silent = TRUE) # compute log marginal likelihood via bridge sampling for H1 H1.bridge <- bridge_sampler(cmcmcH1, silent = TRUE) ``` We obtain: ```{r} print(H0.bridge) print(H1.bridge) ``` We can use the `error_measures` function to compute an approximate percentage error of the estimates: ```{r,eval=FALSE} # compute percentage errors H0.error <- error_measures(H0.bridge)$percentage H1.error <- error_measures(H1.bridge)$percentage ``` We obtain: ```{r} print(H0.error) print(H1.error) ``` ## Bayesian Model Comparison To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. In our case, we compute $\text{BF}_{01}$, that is, the Bayes factor which quantifies how much more likely the data are under the null versus the alternative model: ```{r} # compute Bayes factor BF01 <- bf(H0.bridge, H1.bridge) print(BF01) ``` In this case, the Bayes factor is close to one, indicating that there is not much evidence for either model. We can also compute posterior model probabilities by using the `post_prob` function: ```{r} # compute posterior model probabilities (assuming equal prior model probabilities) post1 <- post_prob(H0.bridge, H1.bridge) print(post1) ``` When the argument `prior_prob` is not specified, as is the case here, the prior model probabilities of all models under consideration are set equal (i.e., in this case with two models to 0.5). However, if we had prior knowledge about how likely both models are, we could use the `prior_prob` argument to specify different prior model probabilities: ```{r} # compute posterior model probabilities (using user-specified prior model probabilities) post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) print(post2) ``` bridgesampling/inst/doc/bridgesampling_paper_extended.pdf0000644000176200001440000213177414036106016023530 0ustar liggesusers%PDF-1.5 % 61 0 obj << /Length 3119 /Filter /FlateDecode >> stream xڵَ}`W} YY{Di1)ŞSW75& 꺫ZIU"WO_EXFH)BEVqUyv3Wv VSßx~Lqe˟4If^ܦq] y`йʸtd뼌2y)AZh##*bS PY;vCw D|ȏ0Lw%KLb]WK GveߎY˃f _BTz!XkYĻ͂]҂>{wdb79OHح,ăɷ8OY#y`M01 d{"4_4ĸ(~YBڍC2DpW:>vY-ɬ&%›yӐh;= odG/f'Հx퐺_ _!Cw;0!/ 9RQ ^tcSh-oa)n;rG].W^ِݡ(D },FN?MF( G[N_Dո=8cfndaM6{`ΈNl{<.{JCXIG6 ^ u^mX@U!&j[g`ĢL?n4[lhɍXd6+_F&r\E샓,!ή=gFE+TYfh8гG)eĶ7y;|Y>p 33$zL>nq!`̘S/R?L9ěNdžDPsB<>!EUHt.PE;QQ_Wq4?SZƛ!>1ws *F,Z5֠JJ -*$x E?wK[5JE8 dXU>KHNLV FuAl~`@ yEJY|cavCb"5ȷ5)dRY#'G6 K!zc̤ySS]0(O¡>OrIv/a@&PhGszGx2ӵi4ӥ}Q-JH@jJ٦X;|e dA {RYF(ęaO|aPXD^\׳w{8TǮ"NnEmcWZpι?lK־@B_AR k2zBdžj".XoSi,'AV@0ʃ [ P c;eqtjJ"hLL0ܷ)R7M⟾󷱶z y  mұS&ؚ|@)ڝ|[]&$lQfkȀ0YiK G ѴD^ЯZcbm&ĵf5\c3.yqF4G03w`Azg]%{I_,e\eCo0ֺ|jsHk]qBƫ뺕tK%p#6yC:Tyc<mԅdT65tb4R:$d7R;POJ 8{ .m'M&WlYKL4`rܹ\Zױ`1[WvU4vnrBؓ09šּqʵazY)h ֨d)uT&):Kvr6̢pdu_%qQjxY~Ѫ'L]ʅ Tz*Cr3Dsa!o&?ye& ;QfZP ^t]ѣ7 駯H;f„l:K{0Ln,1z;S.9"¤}y=G ڟNm_՚ǛڜkR*M.}Ibε$sjqs ص$}Op~By*SF\x&P=F|D8?eo:NB'Pz66m^sƎ.-e8i\ZL ^7ZF-ցc+x[3SM[ST^7 7 $/'lJ.LdŹɸ?BglPGi3Gqv Kk6{=f.ͺ5 Y焫0L2|{GΧ3y.t{כSrg8wnG),%Tk.?M6u gg_we>v7Syu}oS` endstream endobj 125 0 obj << /Length 4655 /Filter /FlateDecode >> stream x\KϯPMՊ!elWؓ*ęW1%y3)hpa$ Fw0a>y}wτ<++ ȱrMz*5i`w K䣉̀LzZ9}th0jIgz.1x|:j.{3wn"ϣѤRnRBˁȬ6J{\Gݛ2!⺲ëMҽQRXs82R zAu G\':P-u8`NM^X"d_X5DmS))z*n-zɰQLb,$a`aQo=h:\EЈCx03;Tx+ ; $߀ς ;E2a7@+ܐR<\}Xcd cYMt_CEPiCf/-C͒"@nv!p{@He!,*c!0CHMe\tyY`:AޠJYq2OO 򿥜GC9=Xsd,Föi|Jx\F`ۘvh|\'Fne[>SKLbcqѳSf `S#T0 2 y'TRAkư_@DlC ѓg h&~򔿸<83sgU`aЧ#W W2>HJk)0c$t!z3JJNbʮ|僛D؏(fzb/(4yVxH2#^'PkFI0a`N .LlA7yd3GDJRhbN+LOZeDa{]fL;T&Xcl^dYvAd3 &h  3mz_f mLuuYXlye/r 0j^nU3+j%88N{\, q29\-zXBTz{{-.mG]q_I ~Cw8wJ+BDw[2^sn[FT#mL|^w329燅+xK)ʻ&TF=T6 > P'#7o ].N3v8֒$*#z}ff t߹ϗQO. $(&&Cx0\3ۭ-eV5SCl" r&-X%Ɵ㏈X;e_<; J1Dv%RdJ fݮ ?Ū޳*tm84#sBfY~2dE,9!xSOxRdV\kx q9e5'l(K_,Zp'z~ ;-X 9_ͼRɒ"MXt;ϓz6 BQ ѯVJJL}B"=~TLKT"pvȋSv+>#塍m6qMPjPĩ`<yd[yjY&5A]̟f$]rtH BYц! T':7/S%K"I*p8`Nz\ @"@-pTw,珆":*uys 0@:/?AyI!3!E 9 ^qqKU׆s6ڿnӖ.rxU纰+vj'u±6yy b;xQ"[ʏƒ0'NM"(zx9PA+^~E|W[H 3|zg fб]4d?n;k(_ugU+Ls)ix@+KV( 4WzS*m\/}2gьJ,4x֕_+AFɔ+m8YPXզqyXs_3mʂa |RȮ0N!0Oj*ٞZ('(t :@ݫ"N 0 iv ٟ99z`Ew Tv|x621ڷ,a`X;r\7nDX_޶w UX+z"X oH|FS/7_|#)+hmeU29D"M鲂*^PkxYDs 4.Fc{?n)>/LlҌޱ\ς:2S,}Bx\:'^ ]L"rp/T[5"̬.5ʍ 7vnʠ S[EtTL5L*q0C&`  Kx>CA ;`8t}ztٱsBx>#ρ63J>l;B.aO[oldԬI2s~s:UqvPˁ$>y Yƍ^tŮ-Яs䆆 r*2ה#EkM л7/NTV%\rdqAb.\zSl{AϴԮ)"/\̊ ?mSt19=x|1ߓsgʡIMJ$ VlXw[C"Z:UPB%!9&dc 8dO"-nܽ vNRSL@`Ppޖigng4q m ts@ B'skTit@~s+Nko!0m4=m29Xeɷaa2I(CwjFj4k|d6 udȋ\|Pʣ,A E tir?"Yx:ry9f'PS`<;6QA;uJɣNQu:X:XdxBbl /N`C͢f`&ԝ&]wH& d[@ p'%߆C#.snm&ԯ&Wb޿OHZ~y1khēsofKxt(߯T|qaJ]‹oQ=dG]]JMlNkcpx(.0Ͻ]Q<˕" _Ƹ?dJD/짒w݆|%ӕk-bk# kǬz`w%ؤ j*aw_Nj%B#> stream xڭn_N6[ڭM, ( ĘeR{ d53wgߨ&W5yo_fU٩NO/'2e;il59O~)~<~nLS˒>֚5j7~ox?7Ҿ|- WV -eZWWҺfb#wgغ+ \nq %Lӓʶylx7X֝S nޝW+*=~Ņ3_~ A]\pt1oTSf :/kf8xï2,]aNK`Smu ({d7j~-. ,'o;֖uz;<1nP.7{Z>>#Rqq?]^PQRY1}Ow\HM+S:e'Sc>.QL] bsq6\QOȥ[@[]lb2PXUxNzY/vǐ4xNJx9a‹.[x @MFv!= ɓ<q 5R态xwm.dKvM<( 3\&z~t~FlxF9CVI,!Zuesػrc۸j1) 1Ljw,dó˂E5M7wx%&uyJX[B;S$~xEtp-A1e)cu)U(&.!#t^Wa jGmd=;)Q {K 6L0Y= mEAǽk"*2{uY7W)p+_b>2e޸Õ.[`rs^vԯ3a= -e0Az@db©^'N1J3nӌLsH3D].yIx ..t_GjV܃@AN@8[ih 2E"@9ĭ{l!i_/dS?Z1I,Ak/ˍԗf1އaTa8`,v[e}ޏhOYJ'EԌT|~|QBNjQy'w_t<DG␧O񿀂T3p9ӭg207B*wΏX]|+ u؀/B |Eg^ n3J9#yQcK%&J®]e88Xx>zUvu@ԚóLLq. t\5Ómu(:SC$ N!f0J_ oţiӘ25svp@lri_FVz_8MorR*Z[,@q< Lmj6m1SѧܻkcgT;ۀ4P)]B+r}*G0HN{G+y忈>6{ OR= ymW.P'9ap=?{YIYwAsٱDn-zSh),+]N0 IgT֮meMFtAsD 覡pY9IkIwܵ;ӒЦpÒQ|ȾjzGY>g)\K|ՁS6 M=+5lw +/`v?~27O7= S¦`3ȈxY'#ՙРʣ@$o i'Wݡyy_SW+ISj7:K;5))lN-ot p%yQo)7uDKcs#?H,澮)(/|HDh?5ӌ_m o@`xmJ۹ł݃p"׌C օ>嘨RSo:y+NV2ض4}nRDhӜ*d ^ F.I̧+*'(qGoY-#f5$cM8ԂhE\VgcS'gLk$e?[I>i;{ {He yXj4,LabyBYM<_=H))SE #}.%[ t!0'Sᙖ{4.T"PHm( b>3e Y2P0,_\8u JؼRUJ*sANCJiEMppcBƍtEf_pE:픛K8*]%/O˜-:b$+ jm@aF݆?(*e*&}(witB_O\'PE!mD.P,|El VNtm/T-w:ZY]w]:X_y8ڹT5*2k7Dm<ƟE*)BfmLpgq2c{ҭ-?bA w!U_DfTmssp2 iXڞcGAj\.ZzGYTUB1ǿFqN+{8 vaՏ5cg:"O;2u cj@9K\8i/ae:3k/koIM;b,_iZ 1^hr f2[75C;܎vrO ^M޽g̤*Ex8BWIp xXfNZPѴ BzͯB<>ufH۴G\J5N\v])uKILLvC PY^u֢^yoeO6N\a&^o!8 endstream endobj 2 0 obj << /Type /ObjStm /N 100 /First 831 /Length 2746 /Filter /FlateDecode >> stream x͚[s)$qpGǓَTc{zs56r[],Ii7/~8sJ8a+-H Yd1VPJh΄9/4(4d$tz1DaHh-!uabѠ@")Gy$ @p!Q?$FJ*Bk'`eF F8Fz`kaР)rio#kB[s|DTt@]kHXtUy$@ɢ/\H*F#o !Pp"4!X@{֝BڀO I M0Nd4FЇ k&D7 b *(yAD~Y.Di(&PbĵD1ev%"Ä6I* _"jyXa)"P&8(uO!PnM:uʠ4Kx0Wo$~Wj䉘m-woʳRɗϗO^o?XӦ:(Z7j~G^vߊET /.Ve U{Z.r[,K~rVMUy\34Mբme/=Ȧ Y}(mހԶl\\"-\u1PMS7CCG-UmhFq1CAvenϧ슳ơoC rh@++Xڪ5ͰzwhPZ'?9.99YsoQ̋Z۸h"lXl9ypv5?Jr.Vpg2a/Tⵘd⢜L.8Pp2}].esV.8U(s?Z1L }w|(sy9qOOt>}Iz)r2 ][)ƹ~WӺ9/VL>4|½;k;t8vR4d`cxAqw0ΪGopLu#0 QFP_ ht0V8 D_첚)Qu<&KAkdԘd2JC `a 0a9. q#(x]8 n@0db{!% e-bx]1r "7"Q2>nE &6th^liMŔ=(876~"GsF3^_$pMn:,KWMqO;[6N|+R f5,aQ9b LwʾE0oڻ!~gcDcΣ> yQ<`<|AA~oQ~2 AfW{zgoJ ,̢/mklܱHb=hcpHA3'*R+ }OKK4itNMNmN]N}NCNcN<(ˣ,<(ˣ,<(Y3mql>ƇvAӸ hx_41`1#X.ldGJI䔕N4䴖 Tъ/Xp(yki G"%!Bűa\ CbV,iV"x s8A`7_Jbƴ5XM'VlQjcZ<-Jw &\k(}g~ u+(e.j[7dx'֯dacLryZ˫9-AwGoG<+FCg Y)F[nZ"o|Of8c^X4ڶ72if-JQ-gE3.ӑqغac`G|?y,cuϫp{v1Of3祆B~xCUKoR-6x Ŭ#{)w*U77A,x^bҺs cɚÓFblKziJq "R'E:^qVuaz  &bqk6}v1 ad+Fر{DSFN HKL/eyjK͋N=^ d'cM2Ndw6Xa.ۺوs^6ҞcSXE Y/wvD_ ŴBܯxe?)jZk}kQL S X^Xlu3~eᾨF~y4I~# BWxϠ{bw 뻃5gH)O"(?2o#(?:#(?4r}oZ(@ ;e~ǗL|dg l|tvc_~ $En^Es=51Ay]Mgp´j+1 endstream endobj 209 0 obj << /Length 1768 /Filter /FlateDecode >> stream xڭXo6_G%{kt  V*v}"PD>t7ťvs2-|u5z^BW$[ tH} t7o U-alfjaG͵q*-˹uʝor{K!-}2q_ٟrtk 3VGv5[,%bY>\0E _H}GL(qT/K*;r\S =uia|dՋ@vu~ͽE9mM69~frYp_E|Z-"%$hՖLA)WY:E'DS&+IC,7?~laHBퟦ qoJ䗉wB}I%h+-Lcx^?Eůn$ojy=􀄏M6LUR>fy\xŝw86uHpL !t{- %/|i>)OtY#Q *?y'2zcg I<х;'#.]>Sw`=9ᖛox&zUjʈm,JuȡЉ:7ɵ4Ft.C [9wQLC8]qa0e m"[}n pW endstream endobj 161 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./flow_chart.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 212 0 R /BBox [0 0 680 737] /Resources << /ProcSet [ /PDF /Text ] /ColorSpace << /Cs1 213 0 R >>/Font << /TT2 214 0 R/TT3 215 0 R>> >> /Length 1808 /Filter /FlateDecode >> stream xXKoGbf4%Rre/61~x=Ŗ<3ꪯoF>ɿg#Wif]~[IG 1K>HUb+%q{:@A+0Wq ;wP~`"&nHGIܼ-7zuz>5MD$O, dA锨16*m3$jꁜ' :t-n:z}><;|XO+QwʄO7TQrNY3 Qێ͎87ڣ. <:1]3dnd=gP" *_H AEe F0.dlG;3)Xm1L{1 e |%QapTB[DFtJ1H$lI?8tj|U>a ~DP69  Zs<dA4;W w U;c9-xSۇZh,,@-pZij=-蓬+vz9M(p.[qQYL8q" _\V:ESr 4fĕPo,YJgUAy".ٌ7I> stream xwTSϽ7" %z ;HQIP&vDF)VdTG"cE b PQDE݌k 5ޚYg}׺PtX4X\XffGD=HƳ.d,P&s"7C$ E6<~&S2)212 "įl+ɘ&Y4Pޚ%ᣌ\%g|eTI(L0_&l2E9r9hxgIbטifSb1+MxL 0oE%YmhYh~S=zU&ϞAYl/$ZUm@O ޜl^ ' lsk.+7oʿ9V;?#I3eE妧KD d9i,UQ h A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf endstream endobj 224 0 obj << /Length 228 0 R /Filter /FlateDecode >> stream x]Oo@G|=1?T|hB߾oI*S>aq[AcLz.)8ŪrqˇgEIz^u\NyIWwNٳ5 ~>'<ѓi\Ԟq?g{RW}>,{uq#:V+)ynv> stream xXyp՝~ݯϹ{zIH ɲ$:Ʋ-؊ "A@5&Bble"f7ص!VmUⰄ6,3E*c[5_}}^{`q#43?ЌnV=x""rt*?C!1~4;oq?\g3w~9K%OP?2?CHusO5>J,!Qڄ.=h*h OMnqr(!I?W̝/1hIjs_"_A9%Q ?d]&I t(sDH} f  lH@*$"+r! IvxF@NDG!hX$,"b AŨ1<1-GWa v(urǹW%}s۴>ksdY𗑉H8Y]]"p#s}dM WWI TzR|Mol<hvIФ*6 7\,fLslf{gg%] .2u˿uQ;?; ~\őüڊ|]z@E̱QMeNINNIREp`e:J LnZ:B `u# !'$/w`pBz#^[dQG'~dv(jx!{s:cN>9^ Z`ؤ>8a'}@$SD 2S,te/Yb1 n\Zkf3M^xm0i u7rh[״m0U$y"ԱdpoPgJ "3;hϼW?:k_>P`$𶈲2drCS$ug?իν=bd{<> oޅ'&c߸3s~io[^97ޡG^1/,׀'  ZsCVH|Y^l>Wd'=FK~o[GzjWcӰ4j8o~84$u. ZQQ۬bK'GczjVS!xSo9K9[*,Š"OQ4ǰ;Ž+4g܃k5EGjZ=P?b{CN-MaFv|K1^#:xj"{?R&f&fXxuk`CҒefL( ~n8?)aQPY5ž`iM~ضg,~3 K%_* uzј΍Wt[6W-'K9'8Rwh},e5mlHPjjm-C<6omWtVOD}3JcQUc.zܡ}sŰi#\ yNPhk.9R;}kw/v}Tu.vO]wl.>q \ʞWa늻+tkKa $96;P:r]Q+:"m5v_>iT>A(JŤhhQUfX?8!yVn59'})xgq%[E>p ȜBu./PfyTSva݌d:ǜJ7M*s2TGnHE7BeoTOi0(FͪkUH=Z"#1ŰU OvĤh-B׾s]Xoud(zũTWF `X,*D/j|LSϢܚ2DNJ\JS92Kl^9-BҸ.׽zM%4q4OS]~gBHɯ~x {MY$iUvN˷Bj\YJMpI},YRnXVCMWmOCd-4cy9+c-\JQ nQ9ޜ-[v,81q$5;Z~H5 G+`RQ?~R=WH %:QCCJb`Z#tŒ>OC)Id~E@$XًՈ00#"E9&>TVٽFY 8t&&uLE֘tD%dѰ/fk W>!}a ZQ'j^oCvxr[ԮKa'ҧQBZ(òs_OOn &E N:=uop-NIO_nNv?ijd8z[ᄋ󟾽 ^9a0Te{I!'#a%Y[qY;.]ZH"9i[xݷBOju;׈ Wx bglvo%@QopBDg AzȢ2,V'_"OYp`p6ȝ1 uvolo-nڹ}z? endstream endobj 229 0 obj << /Length 231 0 R /Length1 13496 /Filter /FlateDecode >> stream x{y`Tս9g2̝> d!Lv$H ¢PT*PwڪLZEqA\@־jC_!3Ϲ3bܛ57m'zIYb*"Ӑ_x@ڲxՒB˗,_8QI]ھ`QN.!/]D#O_bOu| ^^g~`E{b|6޿jeǚd})H#ݘl*o ]a-r-r-UB‹ =~`ם>jK&'l&林'{}[2*l)5S-8v:\Y!v E-V;N`X}bh߁h! TJIN,1:-(Tkf2S4%5=#G[YzQWNT9(n+J\%e~o\N~l0CIrtaOHSQaO$  B\z-Q8_X c(Z`R fւH^.Ye>ҍSAIZ\5jD DpU*3O3iDSGTBҀPB 4@+@yp_ yB>I\Bl6>Tk~2 Xd{c T|F}5BU*a**ފr^TS3X}2'P9x%"GgY FY(<[DpN,B8+Wl\A\.?. ׽\ ۾0_dcɽ%O+9R:je-q8`-fD2?t 9uFq †k†Ұ$l( r†j3mFN9 ic DRCi~6RH{?nOԮMdğhLd"f Hj*WeT!_eS[&uZ֪jZT35Q"p'ؔ&)(岉2Q5#I*4ciCB03GD4ji c]ё>U|F,L{QM}468o4$ݱ՛̛=M"ݺ8nrUY*ͣkE*7ֆ\W0$-P̦siOkng6de ɳ榃NVV7k;k8@{A"LG| h2x#vKHcc d%%1<?A2Y_k յZo\vI =ûQ!mR/hgBњPm{|z=1TM56uLL -m w\;w?/]'kZ{y8^^ָ8y-Y!j2fn"e:-+5uVU\.6x>Ct>46jpAϯί]2ޕfc˵\$Lh6WݲZut$ vtt1YcZg rTe5y3`l5DoZW_Óˋ^)jI |!?0IXKGcm>Aщipdl~EIlmHq!7&\j$ "r VD&ϒ14(A^AdV/hȓ[GF1$Fo%OR)ElcBX1 $4Œq-,UF0_/O/i;#>O"4(;;I .><w"d-t_aGw&఑HޤaT+"s9H~CNȗR#ͦ}zZAŎ'+IBN Zs‡;aFr#)KC1 L,a/1di5g ri1M#N( EbcE4}#;]Tn0Kow{i>Of _ĵb]/I%ĺy$$/* .1jC"Y;qmLfu&ޓm7PIZj-4DgЙt-K嬇Iq=6G㿋%Ԁ-dTsy'EJi^8nNnؔ؊#xq|2dK@&ŸFCff};$y!.ꣅt6Vjz3T})3A0[ng~va"fp]/#Q4yb8Yl׉ɔ[V <6ذXm+3/DI c3YoI!O@>_A je|6BŸ@NЗQ =Aߤ跌aʡb ]7-l9YF>ͪT.tzP{Z((GB&_JH,*ne}B9.+RbxXХt1x?;J84,vVxtH<~"n׊?]ռO#7y ~+ tj΅YQ<]% 4}5OjX z}uhlEV!^Xbh)$?uLSW~X򵲆dCHU嘊ѣF^X0,?/ J/-qj1)NQ Q`Յ[֨?>C аઆhMCD2242<"?/P DOֆ}t&ֆ~@TZu]+X銒'r0~x]M!)Z 5/Mzݑ{hO~^ɜ lw1Y.>$祆)K9 $ja4Le)P՚چ!D?'(膾d)ji mļ ;TQ1JYԐJ꣦Y+IIn+%#::D4v|H}z.&54髇1Z;BS ̮Uu0C mF4Qѐ[Fvi朦8i ljlAhS:9_ xp`cvN\ cs୊-'u5DDpDSS JZy]"8$KAj307q {OIJrSt'RsïK0y뻕>WyA Zy4*%nԛ]) *~lAs@Efɜ!%"^(EZb}=ȴHV>P*&j*&k4])È;CTA\}_e S<0[F<^H[h2h| ½2d,}ŭɼy?Q-)g"!ˬտ{X c$9#r5aR(V #Ƀ=et`kcL2KGZJYJ8_:bޠ lOO=Mǿl]``Kd6,:4_HQ^|=pi:9m5B'E?z0!S/Xogj)T/t{L0ܮEKxú\a`>|ēQ ed`%)Hi>z.2n&"Ii$g['ٶ&)M; $"GQR4U/=!퓎Hg%)s[()yJ#ʳJQ{ՇQYhx -i 0﷌*/hAB?DLHey1 Aplx!ig6DaяU8)z-a2LZVcCmz0?ZVLV$~.ΒP<$,˲2$d}vhDHw~܇7MlhLNmӞYx|‹KtÊ =/MҺ,fѓNEi6M-1iR|~6$=dnJg*LV[Аuv;TJ@EGȔ4RaHT]dғj$1j6ΙwL82jRUU6w]n?3CdF: -˾&{Q_u/^:xS}ZL}^'!#ct5tn:KѢj-ukڛ}w_pP)/n33ou35on#00vYKT&Fs*c;W^S'į-ož{i+]`ХOO npGr/L]#ΒP0T\-pz.gЧu5-Z07[;8A#zMA~MnQː:MGs;1_V0E2Ι--\I$[zSք }CtSqMOuړ7Nw_:(!YCq֫+ [ngU=G=GWWpiun' ] vzn Di6k5O,?K?wjnj1+MN.on>EqP1D R$J:}|`g0PNwJ)(hsK{(R6`ӯ"fæ>pC6aqHI1䪸QQK8|vTmRvO{O^Ydtͦċm{BpA9bf$#TL]oçZڨ7ad8 cjBrӳ$%GJ+ŀ{XKQkň )Y4 *;v.Y{E8eVHdS?D2X80f]$%7C$S~~m% _Jzˊ]уr{r,+f <}a=7Udwև&zq\(\7^ӡvX,Nn#o6q$3Q_dxm}&t>t!]y}Ί nY]%v*:Kajn dQ(Ԛ #pW )å6,*u9Sh5;-@ZIKFT|>)B)&uv!HT\軐r."@8f\WԞwWY H\%Ň7~xzr#IjyBxc8a)$+Cf*v YZ­+)t%qk|ěw}Ž֭ouc886o^6"%4!AJ$['?p$2t0#N%#Aw=ýнsj05KzMB;RKWY}TA%8cBQTSmN1!S"Ԗ@ӫDjA돤Xl-D- ӉDZzlZDx"ڠ.)nwfx蛸VC[\栋@u M#&v_=}C+wи_W;p=g HcY(hcq1b1i,R~oOEK)ڦjK]Z(UHSեR) #h:r1v||iw,,YDb1Y"RbS Idz_03J E"VT_PTTX,5&R/P2g**wd+s%#i4jdiiF]kH 8I2|L^.R%)TUT<ςr&d'b.$pHq&^(r9Ⅲ^rZVFWƏ:ҋLm q ;E.#GSҭuU"O]z7Ͱͦ0mo2mD`OI7AO%XĂV7N$L0+fʣ("֏r:]>@}GGRsW~gy,x=@5OXaOЛsocǚO>ZnVm}a*7:wA۝t$lr|$m 5=@is$]Ty2S2SGR>cԴi+4c3y3_!;忇h*Cl}!p=\ ʹ>`(b'\.- gpM͞ Rl+ʀH6=*$`yjWkT}b*O>ҍiD]Hr sO1V{la!{Ê,x4h,2 29%©^'8$ef%?rڗ!cO=Yp۪M#wkcBf{Bݱb5$bЦZ&[Uj5Le{{F'Lc/K kYEfjw+c7,>lͶ)[Vv] w]ǦMWsB#c*=.cʮEGa%]>f 5k./a}Hjf$rլQT|J|W*ϱKX؈3Ҽ2jT2S+%|S]E 4oH cboA2`X |AQ|bS6r%apV;OGXƢrU9ftZz=eR?Ue?*^r[6Wb^i tTpHĩg`5@mHʸuv!\kR< "wB6$)Da,lYU8Vq:܃ٔ3KЎSĉp #I?/oyQᒊTenR<_0a)7~xr|sGND/z񲧴վ=Uܘrgʃ)JyMwFt[EQRMc7MfM'#f_yzR6و-/O dJTSj uUTŗ<?HO>:@Y!!x:G'ZO{an͉3H+8e)?Hvz qճ^i3\Vo 3Ch=m6]kYo;x-,,XbV<`aj^^:W:MK`umdirgTkMF 8!Nn>) {% .hd{˕%񦃫34-7}b hҖ|B I#2Cp9~,e˼y w9~sd|4)'wlVCoYӴdflqKbbO (/~G?+×q$|S2 _W[1$WYäS ׬\{ò4mFd< endstream endobj 249 0 obj << /Length 5686 /Filter /FlateDecode >> stream x=k+[ǃ7`?WTKbU\qه$_w1rrW.-g0nsN.'߾yk'm\ń)؉QqBM,'?MrƦYq3k K1.oKw:_-?.W5: yogVN4ji=wg?W k8,`WE;I&3@,|\fz|k=v~Wuwݬ;o7a :.6x{뻥~q /HkB 9cܾ:N̘ht)%ot ^)nnf\O7;BГNLD< ^gжߺc#vv&η9}lο\A[b!T5jEbiƝ-ֲjj4l;s&o e䄟qxXroh7 /^#nRqDa;̼t FΣX^& '8\p7 ̉5绤8Fj#8K!IH]k4dw bMٟ/_s6mZ/h8eÞ"kuca߹SH"%S'YlL6n0 qd_\㬘Fj_w2[io8.C3LWYp'>WqNWQ٤0Ny4UQܪS]y84fݴ ?O1&6BžmMAx߽y F, cZha@37Y^s;YC8\)=u]Mqɏ/U!w .DK0mdsA~ H$D5Xu{&}]֗OCn K`Ma0,@Rc2P&Z@s#9 MC)q@3F{1uZy+EH VaQpNDÔVn0TC#ۦ* Wf8dkW`1J_t8!\ ]N\4)^RJq4LGk/.EMzKuJpG,l˦Dez $\JВ x){XA|=S =|ޯ2yu7Vtь F넆 0$墆&8_:cr{*AR<=6cLL"kPׄntm F ``rHd-Kh\WpKժj aYP敿OGU{\Ȃ&ZPz^rt6拃IQ 5DM!rIgW0iUA2Dś*U!ɪz: A|F~8^wTl ߤ}?U#ňp :CLKG@bLn` Ox\Q=ӟikSK}i*3`PHx0{Y;1F=b u:.)hBErƝ:")bY>!W"wV:pFcՕu›V(6?F.=̆X?ql@FNa5yJ#J07Qxm-MAa+ SY"w]WGܑ#].v }}3$_| v!PK6A( )7 i{_Y6ۀ<ދ00[u͙q۴Ba^̺ʱHӗUe5\.A AQ :ä>ۨvȸ8Z iLUcJzvGp|EML"v?m)A</S%0f{U7,]cy4#sdɲfu ڙו8h)gJmYtyr ^/L4ΎF&$aZ ${Ǿ 3鋪QpZTGN֮׮.> N:f1C+v3Y"zU(Q(pK6:w~un%Ң&ܫ2dz}(2qUv 1˔#Oi#tcRKڤW+ Zdڧf]^Tf ~PP)7v,eːRFm: zw(m~ m S]wԃRobj,aq^d'COADH0kJ @LiPPV-拮XvB=?iB+ 4!u;*B.p0oB2}.bpul C|#!O3%s #9-2WHHg0㴮o tOŵþ%\l]O] ,ΈA/vn$Iz^ 0V^v&j03^3^Xx:#s?a?= IhഅS8U~N'lzM"?&Q>p(>X*E~wս 'ʔXbގaTp4fDq?IšqO-=q !X;c'L$oIe '瞀m 1(;.pX 4);*p'~*" 9p- oclN3 ?Ek+  +:8?-Cl YbͶW&3W P6G&u#%|[P}c۴ʴspl0Zjq?QJoBF |o`Jb9kEYK-uA`,N-g ~ᳬIִrJ,sۦcY\v [8r04U*$ш@e\() "*)@$ nj\xCˮe"}n~~ӿ5hOQ!+~G/2C/T#^U@{p~ NZ1 ΧTV$$P~PeD边sK=nӏ{rg)` 7+FdA}UKy&E۔"3'cp:/}_sj7$Nv'o}IߺC/o[sb:Jև+뾃U׶Ii;*ži쳨Sq<D\ WUO~gA+R rp}h}?4?IrcURK n)}aҳ~D]擘436Ӿ8pGOһR"B@`.!X'>3 tCycݞ@=REF ? )9& @?>Ϯv/ص|}HeSrEH >9ߥcq7&Q|1}&Q;ILnI 8T:ϗ1 H_TYQc/ o_$Ď|* $#jGu$V:ķ݄4]~s3 Ukr]">eUof<; endstream endobj 274 0 obj << /Length 4772 /Filter /FlateDecode >> stream x;io,q+k?HDA,,pwH)9SW_$$#v꺫=7|r3UW}ݫìSֵUon5zKT(8Р[5?=x3#.sS˾~M¯0ꚆW 12J(6a]XƏcB5Yn-fªVsDvwZǿߺf>ldpQ| JuUw~_LU sE@28!] oG"[&{g}&y""Jpa o&2jvDy2v" ,2бg t e*g{1] |w`>f>p n> `8f@*pJ?ݓ܍rѕ2,:rv)۶rm $?,B ܎ (3Sgf=q7Qv}չ>\bk5K/ue\"XDS@dDUDNt nS\(xƿ(t?`#Ȗ fuu`%]3{L 7`>Dn&8-f?q=SU/[U3}hzE O^Oʆkbxp$0Ixd́F|ӊOR\rc::cPʋMl]^gkb %iaa4uSc۰#{ͭ]{`W> тA?1U[5mx⭕P$nM W~7?~1K͠3Zl49W6HU`t uz'n'Q~iZ= _5=#\HHNkROp/={MzWՑitGauU ]&$ _/T6--'[rz+{ e#զ\!O+~n3K~Ѿ.T6q‚ mM`ởo a;HqCX^ 6.xk E;ƅ$:o)h?[\,9* _*X,0HBdruM0{{|.O%[Z,1|^edLHƛ,6ҒM7Ɲ@Nh}f=/JI W/H7ڀ$zM|X4l-f>_K<x 'ih7tK[A_lിOȐ+&B,BAv!h$7j.Z]O\ ·`h:["o7>Q NZ Ҡ(8kx6u5q&'G ,5M\iC"Qr{ ~EBЅqtʔ:dsxQl6w|>\DS"4H5с fO!FY~'%C2 xiu6" E]S틹G^Xt_qH{=½lU jD5ѳ& G'=å #elNRUjѴu\231IEDOpW(;:e׽b4n ":X .5lIgci_`P<*e %ah0  `+9۵Uml:$Qt*mH^%9#Ga l|ĎZr*L*nRXtӤ 2 7]0]eJ+w5u >-HjQB Pz<bcܯ)upQHt[uRgQ^|adHV44G7OFo+$FE AȋJyb6K͒u]Hm2BD4ޤ<-m:U\= .O`.l?]@(¼)-v"cDOJ\fvkR|3֍ek{xR;wMز.MwaPQ,_ aOc'JYH:/~Q0xs`eKGYU ]}; Pv'_cI5Q3  >Gu@%PYV>m Pu *ϗ)2r;8Qh4z ܰ)TChVNIFՓ *&jbJp93q,sT `5UӋF ?g|:K0d.7}jU ̀W H~1­n`9 |5]eb:'-2d(~]Q<5I0Rs,p:)8Z&5e&쑇hGv~e7a*}$ZmECcE6\'S {Gߕx+ߪ<Ȍ.4*¯źߵLCVb>{H§a,eSXR)Afa&:Lgp \ԂK0,h Qs3`n zu\п`xIȂ7,i؟!dHJkM|`0!\)W\7x-M"F4=NzS;e%_.{i> .y?eU?3eWB|۬+E?LK_M endstream endobj 291 0 obj << /Length 4142 /Filter /FlateDecode >> stream xk۶ }M-x;ixͤN'DߩN=w_Ͼ ,ž Lɷu',QtQOEcj1iå:swtKzG3os;n1`Yw) me^vuR!w\r LG{(ew2V]X+ʹ+e5U//g֔}}`[uS|X d, Z-iwZ2r4E:]vea ĩ8pnpv^Ha^ʻ9KW! M:kVE]FH*[9gc23V2m!;IOW5]ts#&PS{_iUuBU;S{=RXGr VR?.c c0eFŻ(M7N=ϑ5 VmckY`.*\DؼQJռQaZPu\]z`DMj䲟T5pɨ|}/d}o&eQ{3@kyFn&?^g[VV^y`F~M54H8 *7Q' #y2l#]0Fiz- WR \%FFFO ľi! 4S.lO\l8܊jGιJ#@/E=lLNqJMW6SN%ʦȌ:6c t:|Vy8ZX'P%cu4Ua.SpD!̑Ki~S >%UK媦uU'΃< S5ꌪi b*WUmi&NVEHl-6@OO1|cOZpz?5rO<{4Ó{Ӄ=WNGuV+F+qԧRD#vs&;bS?u.tY2- b3?,%~{9O؄&zQ4c%0RZ It t> :JSiR3KPH hU>8  0*!>޵+>"Oʲ\Ob®]gb4;$' ] ;7-NjFqĵTXL|?낪1X[?lwB.ؘ Ou)!}ՊoCmB`=HH=iW-6M q0fղNhXJ鼢.0yy2Zݘ4kС>yN!L[&ZN"+(]`nr;'x=dǍ%Xܺ^\ .8ءP{X*<q*LOLtzCNsaS&C$tR)p?Źi%y(ġ಴ 2rQIjD Gk.Y=?}KˈӶlL Pnl=О0- 0xm -lʸo2T-1}Wu_ęZkIFHRqS7@v+|=g'WYfI}6\jHb53Kx3ք}}S7D"Г*e`]K؟\0KzHp,y|>lf R>ߏYe ӪqJ7<]ilf;mo|1~aeH!wDBH{#}ɱ1;[wˌ=Oj@I>yAv(78yW \,<G2f$?hfݎÜׅ`|2@! Tw)Dm\#6RUocyɱHS"6r9T*J*7U_4Tښi%n?:WU4}I?t@-kX f3T$rmX?QS'p@ũ|I,0e k-n$ 1 m2;]Cy ٣pm[6hX5y,x&-2yP%DC beC',aTM8BB̓nȞp5BjE~ҩӹD臹>ᵒ,.pn!J.%|Z  jl&0_j:ו-Lc_hiq\b||B2$nxڻ\P_<٩ xt# ))a[:A aE4l ZSFqҞcwƩ *ch{4KـPUl '[0U4o5R@UJQs$Jt~;;:dAźw=)0* 8 N8ЩJ=`.ۄE3[$%t0']OONՈ0oi+f'hW]1 3nC.7Ԣ !T10 [PGg" x+Rʹ qs/|`AtH((Mi'Z>7lkWӛ Q=4ƨ-\+0n BlGGi@ ^TcҜO%mENZ!4$8Gx$ endstream endobj 309 0 obj << /Length 4619 /Filter /FlateDecode >> stream x\Y#~_!IhdpbNF`F3+{$9~{G[lIG,vyu|U$g|_^}03Y1{y7bc2oJLPk1߯-m?oﱛ+H-a벻^__/;y?^47Zy #z]zb#ҊU LuW+Z>ni}M=(HMqq/-q1 Kdn!3 hI/d&Po H.&.cO|`lT=\'q*8G 74EZ $&/P'7[$/j" Zd4 A^g{$cKk"EߺQ_n 8i8w,!mS(j-%]c++U$]r?_@χЎ T.-=) ʂrvIp&XX 0 ic{lBfi];)>`Kb]}\r,jX~n D8W0$;`3=ZWoi|P0%_!+^=v}SS_s9Hd9!=ov]XET0 Qhu8.odug'bS%Ɨ_qôuWE?2`؃8ǬoR'X9Ho_%Q+&k7$7Iӵ"?X<}fx@r2e06q&vɸR%Iyw1X=wAq8 Ug:!&RBDf%t֣"F%&{ɰVc`(Q ˡ>aP%z '<'iIs!$@ܞ 3g5T{:QY#Hm[!I\HptGa~g NI.d1/1 y;0) s~f"[DR*!KisY>"(+v ޭfነzb%ϼ T܂ A_F|)"59Ζ(}-i$NBL.=VαO崞9:šuRlj%]W ^{_qPXyG5!>obunj+D9zBdNۺ&7dtϟH#!*M1nSe;|S=tq넅Sp!r;DGbJkpt> Ǿrj4H m]Q^QV-SsFl`iIG"L/) ]xXkJuO<,j `v:Uq]dr'ykK,DGۣp h"ùQ(cԜln6c7Fw|niǾciڻ'x-݆,]x7_]ġ(,IDw .H%Fo&mFZ\9Xcng29+6)EA#C%QLD*;HQz]3Lk|ZSL5eK5D$S\AlO~8Ƭ,6&Pq!bP=hS JAtyGN=U}^O  cbs8hql㎍VN`оfʸׂ3b"f0 8gPv>0 r-c՗ cXz5o^ωLߖGeq;5%e! ;HB']!D[ۜ B^PH^gƬB<Έ\ i^yK>½GzQ?6'cw|6jJ.=;T#5!_3ܦ<6#jgnIx;MiїF2ov/'ܦmӑ {{ZG0A vK!k 9Zj/B.Yt2+ fEE+LT`0Cmv4|>k9x {™f ?|>0\\pq)݀㔥AQv0?uν5Lb3n݀JOe퐷72˪'Lwh D>d2dEN&/$l!lү*Cxl˦DNA u'9A݉ fO{^H;xֱ5rpX8 mw al7o`/ U+@uŪ؈,/1fWVoݙS\OF}Pĝѱ[.Sh96o Mڬ6Kq``'vU T2u6$;}/Css"3Д.j_Qt+ M"AH]#\gʤx=:M[ -] {R >`r> J錢70\:NB]//f8`6&@pm}ݤ3t|`f9x<&^>䋈Ŀ#]ފHo+b;{K"XnSۄ[-^{AM:I҅2aR,wO= Kwe}I%CΗ )Y4-jGg ]>뭆nV).X5\k7n)U҅@:"^DB0Fg ${6qчwG@l(9c].]8vx* endstream endobj 203 0 obj << /Type /ObjStm /N 100 /First 898 /Length 3066 /Filter /FlateDecode >> stream xZksG_UaRRelPdCBQ[l%Jr ,2sϽ=:(AhQFabF3l{G%Bv(HΠ4"g܏VhmpMLG'J%Xx6ʨO9X+'J|FPAQm:F{aL-ɨm*-D6 `0q2_VGl+x*T:Z @ /:ZL (TҜL."1Fɖђq0g!]p^s%$%\e ޸1[XD/"2D$|guXE^,KǣYcd>bwX{W@%`KBRFDU$܊9y1yL+ER\Mh]ҞErMAqHT0e@b,@)t \V-R`HCa-4@ !S`s"v(Y.bN^@LX+ R+x8}r`Oqrv|:U6?|ipǏ;իOjQ i:<;_hճj.eݛ7Fe@?Ԙ DFH ZPTߏ^D'~4uwu&@ɋ@R k F*(A:V`pvzZav{hu;㓄 >`h^(^*֣y;mq|(j $) HQLezt30J =_Lddn;ǁIj_coKQl!0xónLp(#~H{n*H2+(}䆐e - E8ES?u.ss`ȺfuF;{ IVHwXcEu/Wǩx>SbB My'd2:Ia;E}2(ZfXjAw!w ޭۙK\_Ouew֫Džqݝƅ^4Gý.d9zc0)4svgF(>Nwh-vA\\c{5>y'ZT;vqQz?6^~{wj\Ӵ߇qtyS7=8=|(I0%0gȪ`O1S ?G«8e=҂`T΃xI}!ovNzS}~p FOIx,iw8kY|`k  mՖ[Vw{a!~>Eq2_pF] #;?޷ܴQ>_B*?uOj<Ok<)n@7eu0jy!MN"uh`mcIylrXo=6`Z#Y1*!P<-q[Za+pX8H~T^{?NFg`k[ `0 ͹}O2xzr;v%8S[\$1v]fld0ZG\4;905,?汔~+=6*Y#o>uKL%zj]a!0Yֹsx*=>Yڈ+Vn#ܶY{3q)XYiҶkKߖai D#, > stream x\[~ׯ`剪Hޝ[rR[[ {S=E{Hɡey[7ʖe M;z_^=gFtM'gf*μvvzr~ZaC?Zg(Q ';o](|%_~G-/r5y k/LruBkT%%M>ՐtqUӁYf r9& Y;;gs UHiUiBZ6 hG2o`'ՙY.>ETLA}_;ôATЈK"+cO}4?lHo '?ﱫ.8xGaA:@~=X< XVvTP X,DBr#9@{.Kq ~C;{)ytз#q-sbH">pX; ۢz='Eƛ0Xl{wAQɜi10!CkX %JF:CF5 1qZb !:VTPpefך$N ߭saMAFezT?.Mv.A?>^0ekgk \\(t>wq9!\.qI zߡ1S4Ҋ4|]u83~^u~vM۴OA'-|G`2泀 j/l4k]8kq$ckd̶*3a4?Bv`'%~JiPmGr+%zBryبkF0-L{Pjl=$EFщk' @[ f5oM8`îP@?֗+(kf1SW((2_WQBq;~u P7Zim8AkWs^ڀWYi~VzJ-xgէo燃tفcnxދ -`!x*?~.$Ƨ/*E No2EuL,E[z/KlnSōsE[e,З+~ %[R唇k8/UP.~WElhc|Hf%>Ywh+''߅TJRrB6T#5+BG*?eE_'G.=9v0\0Ұ x !RߦO"Z@zc gb`?8ovͣ=~ˉ.཮gh=YH )peQZ{}ok Wi-v'Kveil4"ŇPp%w)R82O6K#Ɲs&UYIo꽰E| [b1if?uɽ||Ӧv(xU]>Tا?{uk 4nL&S8S߯rX74@j)(')3X0UFtf B@酔Cb s@EGkKMo(DOf >ű6-ʃ8iki<8oO^Ą~ g1 "혻&c |CowY&̦#e1'$?<ǫڰ|6TlQ>'s`A6KE6Di(d}4PK9]I3xFYhiL]t9.PjS|(kow<te[s4Jڳ K!Te_⬮2-7wD3h(>;~̑JuL1P/%|p #릋8ȋ*Ct\ަKo'h . pK[>E\iک>rw(Cf(y$iuvD>VacRJbm'VSQk.t0UB+[j'FMpol8$L8Pk,si=6Ӧ*6~^58:#|}CXgN|^q?#(nKuZ7 #1QlTӸIppEIȪ$*î7* OlVotS.7zGU('s., @UT0uO`y#X!XXT=*+A,jyT8&o±Ñ˃6g6X%ƀה|ʤ$q)aȷq \*W-+p|Xjkg~Zu>V8OouiX1^: 2 M_ca0,˸H|u؀TʉӫFnijtrJU\r V/pQ+&W;չ̷À5\FqA=ߐw(i{%ΚK_Z4׹hvxCi|y8:4fqU#dټ0#XY pV_Ņ=JIhANƂs$dq+"$d߱w^]4P, ln-aYY:/&%ߥm;Mg%^E`edFIiݥcZGsܦDeSyN/|ۇc4r-*{Ϗg۟"Hy'!_z!7䋇؅[/tF.OIu)ݣ"a\vQdv# ]M.QmNoF $P0T܇/MM&c꾣Ȅ>J_\wUnb}IL]Uٌ86$2`&r(F^ MU|M% °yH(GϷц_>]?ȇ>ePw8ECxi|gBw endstream endobj 366 0 obj << /Length 4607 /Filter /FlateDecode >> stream xڽ[Yo#~_A%Os78 IQERP_:{M&fr߿g_jfʢ+;3x3kͬsjܔW5*˸i-[Z]_+z~ww[S9/tȅy8Zʶ+\puH ~'T^ZB{$s28R=`aG{Z')B=q 7p.7C}oBL-'] 3 w̭]̼ZT 4Eq +;о!#O4 w5H-I[b<> oĀL~/st)ԀX}䍂y[; Ua5=}B 뻱^]3Ǚt*mg0Msy9RpL+ x]_HGb2ej$9. eH3"G̰KqpEp lڣ3pަ[ubsfe;3'ɭwE%BQ^ي!.*4CT"U9ڲp_A'Q;uApbm^p Ԑ7<#Rj$O*F$i=HV0-T_tŃup-[%#y5AVgɠCM#<7Bb4< tc!F@eYPXG*)JhwDml8+_Q" ce!tc-Rx"ykAͤeEgRxi9WrB)ƜC0_]5|4󚈣Gۼ-SAZPRev Y!"DrjԽy_3[q&a|w4Y[ޗAyG$#ȶ]6<ڎN]8D,g= g&!U"IWc{zGnl~-|8sA+qA/ `ovAS qfSDvP62]cs V/a,E_a>ġ~@͒mJ;TnH >]DҖ=n>KN<ך܅/۔qTuĥ⛝RG~f s(?7 nhv\ '1 𓂸^Țtdossc䰆s₂)}XT##O4ZScb}{z)) »h>rGBvp%w˚X qhܐw뇄uS>=Zv tAHЁjvcdze`)) \R5:KΡE>,l[ fQI[ 0e7k~TzN|$צn1*kpF<`#RCt:ҕ\(gL<8N/xɅ'4*ًx2^8ր?Rԁ:eU>Ȓ$cgQ3 oMt "Kq2ab"ZfbRLrr80'EL6 >T\1ᕶfTKZy2F% xs8F&v'֭rq2cQYH =+K J9ؽ+~+1+W`֙•>h&6 "6M`T. &?B˄wDҎx'o6ϿDVa&V#pe nڤiҐMvP(R5?P+| g q3o)>OʘD4&`/PQ_BRF+FF"Z/[Y˜yO9Wtsp _Y^C# <p5^' 8lƖbixJ6EׄdUEل"D8c k1W'Qim.dqlDfJPHCj)`P?ANGS0d[CgQݩřݠZ/y[4cr2Ø]8ߗU)BQ.J#;3wyk2OJNKiݦM91%Z4dCVךdma:NQ v6՘eٽi t?=( ')5ϠN!GsM?%$G7f*sƆx۬%,J&uÛ@Y٢״"M=B~ ݳ+) `hO,^nbRKhiYejϼr,4z6$E>PDA!#4 ǽKfx%Ysƙm3 z<8yMXr{ 6p>'\/%}q!#y%;Ejn-kuVI6 0 %k`&I03:!Z^VzN9a)4W?m6R1^ّozF2d!nl8X h"ݽWjJ,WMęR{s+{]Ѽ5oY"J(ڝ6r[f̱ysmvT8r824щH#XyEO(.l&nBIk"rP,1l6oFpB\`{·'^RbD:tH-{*"#yM&KOn-#(p gCe#Oy=_UX:0R^(^^B} ՜thB7R%B" FMEx#pI,m?H3 5HDQ[ cth9=)UužV%E ] z@j!B"? UQ7m„m6(:cY4VgG'p5۰Igoʀ D8  \w%lךxxґv<&NU5FMx\a3[Ƴu;Ni1G;QR܍T5D|{gՍ,^ Z6y=A} x6*+ +NR 9usHWq)Ѫ^FX(k7ֽNW=$Y|W?p@qԊ٢2G\0oUƨEδ/&U g_xRRעspD[[/|ҠJa|T^'69p$bON63维t98e{K| seL%lʺo@HEr7X I5H[Ϯ܁m0f&[>%4'm1+@V;^8^|[{U35L~'F|P̜pe۪w(g+Иtnw}m'dw FU*x\>Y}Ž?E>ݑW6ڬ@x_v>]3_:dr4ـ7֯H3rDOz`~xN3o?Hh+Ki`}igd?}cm ?t2G/,x^U43=ǭCdU]rmH &HI}gai_3'G=#`z1O]5&CCyrG{xsg(BGNt5x~*'rMzyq獥CFՇ27sq/3m}) u1p@&O`+8ZU!#&_a{Feu9ư7%%;DI?Dy7t3O endstream endobj 379 0 obj << /Length 1994 /Filter /FlateDecode >> stream xڝXoP3(|h}H҂hY(*l)ڿ$K}fvfvMLt1yq99;/]dMRFב\RFEV$ϢE!~;m6SWh3L hm[-r ,`n' ˫5 !`=͙Vd#jdHy^j+qDC@8Eà8i},"° ҊѦ{ \㫿w苮kIJyN3vO !^[#[Y. 96dÞ\w;nqhlb: g O2b} ;kZ~v{)o4ië@=`̳1(҈%x7޼ e3¦h(+9KB侬N+18AhAQ!bkbf97c !zDz;spϏqJ罼q~s:{ K&e{R ҝD~)zh˴|$2=H ? lJKƉq,)!ML:<"|J|H\5f-OG -rf!.EG%#@FuY\ ?ݚӲWz.k ssA͖.4gݜSB'^9;?AUklVoK˟J4-qF:ώ? jUYȣKNEju']5R/e -fПD~jZ0^zR"&>4TZ+J;/-ϘHVs^FdTPLjPo5R`nzyp"sY>=rMPeO3^I~I(* ^J|L3s9w}dWetOTmd[~s. 9')(w/ C3ET&U\@L"4by83j)5%YiÜSO~Wg̶7n7*~Tg+QVDZs8`_FU?a|im3__`ȻOw +x=^lT(-P2dtrj/XǕlѻV[6 endstream endobj 346 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./sleep-eps-converted-to.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 384 0 R /BBox [0 0 500 420] /Resources << /ProcSet [ /PDF /Text ] /ExtGState << /R7 385 0 R >>/Font << /R8 386 0 R>> >> /Length 4104 /Filter /FlateDecode >> stream x\ɮc+^, A;ۋrb9qF~?TqԈ@ЏGN^dz;?ߞ$9 Tt7>t5ģsw~=ᛶ?|:Z9y5~ ʏ'r?_ϿY磞]NՇṡ+՟߽|~ZGko>?Azĸ!pz dh=:2z V]_(G\!9egwtJ:w77+H6f}LH54&jٷҵ164!Ѱ r$8dGE`S]>?Z1r.$-^"aq4^%jR$a(!r<{G 鈐X}dإ,x٣н|!=}O; a!q`Tj+!Ohf?/>.`( P6#>*2fġ*"pm('pi$9Gu H8t `}҄ l`!]% Iii"EZRnSӼ GFL2(fzf%OJStm'$-6Q%Q \OԬWO NFR{TlP Va67V"Wgz*C>l!Dbf|t@x~ aFh dH#L  G|bH! gPJ2KeuCFȔ !&MS**:$)H N7"rs)Ni:(3DA*q x8fQIi4z!Q%S0&=%+,P2ě Q%HAPyx:XE!`RpͣEC"|yXO!c0fmZƵF SIؼ*`)9TfJ _ԓҁ`.og qQI U5yވH1MF-2-a x,TG͊ded^6HΧBS 3v [tMagދiM!fܸ"LTpam%LO[؁ҏhXbi_R _+JG,ztZB67!j(:0fb4C}yK0I,f)f +4W`Dv[i*Y)xKB3&fbdp&yXnfIH_6s a@"C'/N$(0r~5dH@ɢLH*y0y: F'FPXbT5Ny~LH xΉ'Wg"lWOpb=ʸ1gvg] 'h$)y|4~0}$j@H g_0: Wƙ\*ZQX%BMU6ܳ{VsjY=g5jY=g5Wr/AG^N;$^H^7Fs'`1 gʡ'F{H-MU-2IܙөWRH6(8* Xuj &r#O`L^Ċi=Hť"Fʔ8%46BvqZ2V:OV5`täjI JĖdғ,a:jysSZ< =!*@;gjV1" 0%hIGoI'H 'o;f $C^HLej7ƳZnƎxoVf2&J7ZV-4X+OLVHeo @>evHB|`%FϚ Y@L~bD-A!Ȓ1qQ %K@4K=UT"lkf@֤N&h"/z9Ñ 6I=RRV`^YuT;0lY7O֥:j@Q6M#ULB 5Z>kS3+U)TMv(8f jXR$y7$tӔk):9)pRD ^4_:m9  6 D=j:-0cj L6o+`)!uӧ\,ؘ>lZj~*^hQqKC$0>p)rr-CR%ЊO!sT[SWVo%9 k^^βRF˦/Z0-FE-GGn? Zy‘Yf P3 lY=w\&#I"75R˶#]r-tH77,+@k9c5}/o;"JSadT|F+Pe-lE?Fhb~,YjFH 6 WԼp6BzőH`+^G=0 ]q?.^̐3{ѧ+DQețLDԕ"/g&[k3s"̛|HL7^l)sמ#.ZwHr#CW1=G\˖` |Q>b |=sqyкKM| ^6b+͚=F\vi"VL-޵G+I U=f_@~?tvgFHWӇ?~8|ÿ_p8'.:<kG.߷ р endstream endobj 388 0 obj << /Filter /FlateDecode /Length 273 >> stream x]1n0 EwB7˒ Z%Ceɪ=ARu*9MF;OWj>N&)RhL9(h=e *p) ء;0v ZFD@p]d+ɖ.AUkT '"`$)^~'5z> stream xZy|TE>ut;/}!!!4!K@@0l1Da " nȪqPAd~nc|oN-:$@0 y =Ffon}vՌ׃|@ϛ~o05 ǭϊ-з6Sb"1mOmqW|<1m`Fs,=#|W;B`u[x{#[;:\l{4]`e Gb=%A1PR8 2؁&8AA2i Yaohm lokOt (Ӑ Epbu-EwFamL]2Ig y;:&t0KWCm3v#Ѷhgd ]F/=}ǢF#)&X`{=JT' uYɚ"Cp?"p?wтF,řB']s:Ld$ 2@ޥҨT=u'u=J/e`z5&2r>z8z7z\r5`A3FA>~/!k$2  z4Mmv ,HPpUaU]#gv\mhLGJ!P88b6 64 ~~aN9RA?6_ꪰ qH\CeqP4cL>X-vwۺ5ˇ sA0^D4LIU]n&a.TAte C#\x5ƒQB _Ax !\r}KQ>]p#mPBBxč \qCW^*ԹRD߇p5yG]hv_Ax !|:$"<߇p5Oyoቨ“~!'7p:72p'@|U^(3eH@E~lԧ|X,J%VXLf 7 ޷2C EC@l,ưãi'^"JFfiV(l_&A,  O'a&8ɳ!K'9j'c@K1!;{/ 0B2C^r~<H_ɘ &::pqe cUa񛝎ya׹&Xp,YO_r8gJcz^g1O *Eqƙ 1δ?9SR3v jvzJvT`8Lv-FzF972P&Wɒ`a'P8=*Is+[\/'p6Nx Uy.L-sPx6L^B0y^*|>|h10ٽ_#J($$Y=0j-OO]x_gYlz]-.9q_2ML[C֊wkoT;{aV{g[+z;|a5ckau:+;[(xFEQQ%"%γr;::qM?!4rz5T6qZ,]\>o>; ^8ẠL24O~0)b> 𥿰G~fj/x]|^I~P0zXPT5NCYٹ?$ #n?n=J_Njm/1ۂc *f2;q[dŧS,s-K-K/ZXH!]m8֙(Hb]EBJ5Wd\%,JIvzwOs iu:S1TW\ssF,ʣ*uj?XOxF;j4D$nh,ԕNƹQt='K$2, ¨8ze|_rqՋ[fZiAk_0`e׌^yd3-nS;y. 3j4tM+ Wz>P .g4_AvmCN$" j臌`ۃU, K EjG >(+@/Xr5~ЃOlto'g׼n:7FGaRrn쏤)|in^PJ9E8JFXH~G8Ӌ'N&j4Jψv-,Jgi^M!Z9 $_opkg#5C}\Vaɴ#Ot~̸'>,JLY-*rO(Pq t;HJ@_Rx躻d|HCM8Fkw%Az]~5 h?ahF#UKdiJcTC/5S4r~c#]HIv.3葺#{æM"y"ed.ֱ'bbΔ_#nu揑wI.M̟>=߼}h_DdClsrp8qℸ +>f$XЯ= ;.^w:8Vf@fV* j'Y,6;:%rq+?3VsVM 74Xh$Ia$?d45[j9b]mgep,FeLiuON~GҴGpsr_~eO޳3cCϸw~S a:pS룽 jQJ-U>^MI$KЂE&H g\[vL6FV* +CV6v.kvhlgnijMhBP"5vHEL\ԺIj*ԺD> VD'mO;?pcƭ<=򬐚H-qQp9èp1cg- 3c,gx} c Wxa@FŽI3Ͱ4KedNV$iFR$;& JKnE,FmO4/ v7noi $#%eVD"d.*i.}>s{r{=-18Q/7S=wy~ꒇ5Md"{uO}'/Pi(-(4J˯̖pżN%my\ܡQ9Jҩ@Ɂ$/ͣp,opw&9@Yʕ\ mGu-`+ ;@|eVc+Km+4"Y}Y} aQ/lHbD6.@L `ub0+-J5%j" k|NlU[0ppQ/;Gh˛ߒ;fPݻA#LU6-ۘf~s#6v5%c䶇 v:}+g3N6-557ShzJVy M"g$2>3 dS$o2lJS=BJNMJ웕zwjqbu#ݖ##F3d3$l&ِ'*g^O$($kvG86+9ZCq-d[fgr>5Wi/˒;vkPOqHRy .16dR^6)V@`Pr̦7 ĕ"N} x=rdoc bXmRt#<:eԉrSTIĕxOŎ#]wأfWJ?8GqSr&O2wwo)mC,IG~sçKRYF d5h<0仮KlJ:%RtYq&i=\`B6@$8-s{]3U>7MAݦޕB+CRRS4c7ߧN̐tyP?'GkF5юO%7)ak(& ,Ǐ@ٵ,xKy3=2$N,)atHr`N'32r2x$ e.QΞ.g[3S|AK\Tх\3nt Sc>kd[> mRr D߷lZѲ\Þ+?ϓQ0k?,㑖yy씒[{gg;gP@ЀEkpz~Ld<Fo7Q~eתe(S;53[SY,!hq4ձ#MbGtǗhe`5bD,j/&FWT ˘ؒ|N!CzmJƑ=[~R@u9%NԽgn^:ywH){$Timd̞Ͱ\5YcxՓǂ t1c4a^9孌YSk{}W#'jL$t|"IԴdz_*PdT6fOEzϾ$1&z=HV6ySBYP4E1Rpr ZZpZ=GuuxǨkyuj޾~K=  L&Zuq+=l}hwB&2Hy= _Ӷñӷ-MicJ@}L## 8, Ceev:˘`9*/agdN1?yOrc_oMVjS=;#R-@ZqxJ'\]>f=|B&ǐ?t$0U;p[CtZ~ȉ>z[A/]i ;D*~뗫<뗾rSFm|{ĪKyot~_I֍8HɊl=;}GXAI|B ߠA^v z,3x| T7koi5Au$W =r"nz}4$ /b@VsM 丯ZnOޠEqmoHQ]b,W$).c?ճ~Os<wZu.?* r b " "t*p8K(I#UM֎G$=o~G)ՋA9svD10C-U '0]@`jk0a$hT6k ](>{C{MO,=FJ߽xۏ|ʇH/oh endstream endobj 398 0 obj << /Length 2524 /Filter /FlateDecode >> stream xَ}BAG\6p~pC0:v%jVv< oO]}QhCɮ'o^V@QWjp;jPdETlp; U2Seq Xa[xSoO_-ῖXQ α΢2Wb Fxt?YnA"x~<{a~CX>ɖRjxo1R@g1ѫGsëOMj5I%CuMC2@ʺWNme !jQCD=dNe.@ׂe%~ g?/elo;VvO 70ƅW/}Bʗra[ !{!q*FCDՉut/υe daSYd;LeCzD;?~ɬ]M]2kMl GQMzDRQ{Dlq kɿM@$ S/͚r8s<lLLhO'ySqF_oXl*$< endstream endobj 404 0 obj << /Length 1616 /Filter /FlateDecode >> stream xr6=_ .vt;n֬;}tƆi^08nd::d{۳7ӳ1ۊ숍+~`E?&ƏcflSSI/:5Zbo,-\Hwi;05W#ǡg`Zwc'4RY?No.O%p'pw 3,籑V8_M\ !5;5c2򽨁w&,2'>~}? J &WWԦ/PYlfMS喏L0Y뽲3C68b-b|WPGsn0x^>Ank4m΢ٻ5XҶv[+Km=nUʠ:yw wv\H~vTojP7LH_ӂքvv5Ow*ށX+06d'u]!jk@BPaAe>Ώ9s`{s(EiK{q"RHls|B='f| iG2`xx8z*uOV0QF'gn`p?q,[pxdxFn m7}, Utۇ1v' DC^g N ʆq)[A(1mi*.$d^h ךF iZz$.f]"tnNBqK?NF$r@T oW$5m|&-"X6 Jvu&Pw%"(!D75y&#ij3?q\'X)GZPżꗌ sYGY g.6P3.7.XHʵDfٞpXhU|',!;r|s#lIgr<.c4NX-hh'y3OD4n D4RAe)cLj^8xT"$):׉W3p7A(0z.#d\œ-QQ6'NmmܑN֕HPLr ]p> b;"N'S5jҸ8{:!=NWkOڃc{(s>p^>v`ifjCݑ>t3{bt] O^+AiٖH%]7~bŃeU<2.,gʗXV,V\A‹V6Mٌ4X)s7WMأwz ۺ@ilfE:HTWJ hOJș.ZS6z¡&}hS愡'|yf!u* 2AŞCNt5 o.y} ٽP.z-A7Nyg;v4A,9G?/Gj???ǸM\lF"kQE[]'+zV膒^\Mן"&wkbyRBI.JBkM*dFHɿbk nbv 13DW4#4ծX_׭R]ZMisT27 gR1w:)k ~/ endstream endobj 411 0 obj << /Length 2217 /Filter /FlateDecode >> stream xko6{~P\X$EhM^+vQȎ-׊诿ᐢl+9A6w_]L>]PR`E%+BPJ,o-Z=y7Kڷc%JDMgG[UHNx q; d<%<_r(O"H'Sà"gަ&ӶXZ/mogȽnXp[h # (\5}FC: 4N8w  gfԷX*lGyKaVNlf8z@H9/wU+kP s$o잧 E06Ԭ<Ru2>gë lXvDQn ]>f/\؄Ʈ;b {j#^X(e`>"˴'1VFMei|:S]iֶ? cHDH{B &')QB%Sz@<7TF|}aJ Wԅ@ԥ$r)J#L'B1gOcTw0cM~??L Ή7eMp\ HrQ^ YW)5'6 VXɿ(2;W!%`xN a8AE7> b9ܳ9sNEy$$?e-YrUbG:42,;dP]yU&x=x`|uocceGMhܞdP=G>?h8mz.%HtE萲䧋žlx\yrgVeo}@O:0X՟(EN)LYc'5ق}aQFeus4^,Қ#WxI9N"P0$Wۺ4v6>D.™+|nc^p thy@A18pncvۻP<&nwG~?aJҏ9̀UgvF;[=:=v}^zq : k$1^8^ m|ڙYӻ;/x٨hc;r N)" .oa*LURcH|Xxх X%/Sr360Nk.F< ,peAU+WSPK BU5"\OJCR˸0կqY4 -eTRhmQ'uNbiyU~Uux_K# endstream endobj 424 0 obj << /Length 3493 /Filter /FlateDecode >> stream xڭZ[o~e87^N@cyZQMֽXv\g439wg_]=Uc/LYek.n/LEsQh]x;ץ{LW,&f|ӯ~Fg[uk)z'}E~lV/8rշ2!Uma=aiSԅ*~;/`d)\ \?]Jq+x)|.mjp)B>:다|n09| MNsEI-Jg0fMl&~~M|/+ N z҃6wyqɺsf_{z*~.ӇV0BmkLaG1ZpZhr߿`s$ۢ4LqAOǮ6}Y"7;yň0HkLQނOFޭbDEX+_9ϐ_.C uB?VqKƾF>.|bX=Y!8@U#ށHf(φE^$Ԕrv}4&W :_aTVpƟ͊fxtzk+|z cvUQ-[lBc,Z_JU4 nv '?;U8ۆ/,5 B8[M-yj$83ۥn!_}P,<tE׺ =oIǴ;a=)aN6ҟ "lvvi-+JmWAŌ{yz@-2RӍ_kP PY, UU]L- ܃8/ۑ#6'ۖTXdwg TMuBh9kP£(jD.SȾnHhIs+^,vFbhq2 z{& HM>a騴l~kyVM=3uNX*wB Iw7t=V5yЪMm[!IPO T5垢R_467d} RG_ؓ=A#o~y= s:!w{ p{A>XȦXNf.ˠ8N Zґn- J,cYrڅ4ȹmy(c`6|]-uw*!J+E'݂kmҚQȠ:E8hRJp Eơ$ϸL ?Y_\Dkw0ls'&m#58V~ݞӮ{`L {G1V=g6h (a Q;OL5d߸`4+*z~s(RrPY6iQOR0ΙIӮ1$1X+~ij۶(0y@u_n C3i؂ lBsˆI662Lby /aǭv)sHGݝ$^bK2##GQiT(fӎy l`Jt' !䟛Sώ"AP<(vΑaFif &M96v\9lObߠ8JRʛx-Ow/~F1-:}*H((1b> 1/h\;0m|dEu )pY|mkF^GOb6qǜBӌ~k`$ ٥:$+ii Plh)cP}s7?ՌѰRZ\: '㲅ksCC  jjx9٪ٲ y6.#BZmR2! ~64(uvOBE4i8j` ڌƴhoh(O<&*yYKf~T:&F4{JQ"%iܼ% l32sj'Sz}TH!*,eO@*ӘaMصÇ%?6mxE \L<,L&Lj2BgN='!g2&"ƮXś>+Ȱ5_; R g+9 R8DV XjB7%ԫ n=!wjm9}vE Ÿ/uUNƲDwoPT߱l=FˋͶKξYILs$e f 7fnj2I.9IBuM$($gf_h)ZK ~O6t"% ?:=mw] _>井uʗj0?L)D!qy2i=E9X}P-t^ZUhx 1f'+y( O`0gG=M'E;kK1>SqK,0. Uc_¶M'%ylJ EįGj1;9s(9B'kg#ȯ.C\/ rP|ճ7 endstream endobj 317 0 obj << /Type /ObjStm /N 100 /First 914 /Length 3193 /Filter /FlateDecode >> stream x[rF}WZ_RTVd{ި,gM%&c$(,!l̀AOO鞑 b`1յYKl4xRY3)4U 2}b*Np[L:QQX z9L%zyCdzc2%h``%2R)J3@?zheij쨝/̫-zi<BAY&Z _;?xjlNPWf4@; jLj3CD@F0+h)HDe4f6ì6沖9AZǜR3G=9Z"sp "3uOR.b7XTwi[#X4**PH,2G$ 酈!EuAhhd,G4$XޣBD51FG%Nh!F"IV ݕ W9P,tF $5k)ڐJZ.CMvTL X #"0Q4^JRk:jY)kH42SX{0rx]Cz@%<~<^],{X$;YoINm+e/3[)! =*0YűvccQM>+!Es^ԳzDTrC(zT>re*X.U{TV)P=Ey23.x)ɤIayTbJ@4K8Y"5"VJ.ߗ&EMZ2& 0\;M@@cknA&"VL8.hDHwQ݈c7rC$`aO{/`Yb9@Gȏ Aykc ˭͡}p)+P-%"" ߫Qh\η$ ?3]xѷ">(W_ ^}+j$ tBHZ9( :1;IcnSgb=(ESCf岚u,Z~bIm\ᭃ<`mvCɉ$"mRwJە+z7!X=)xZΪli7MQ- R{5Trl0Y8'(NJS$GF\/2LCD _i6p{<: #.2!hS ނBP8l_⢨ȁ`x aUm.r 9}9)6;kD )JU뾵;|< @&w9lL/[Ylk/mP?kÑ \ЭiLao򢂹Gr3ww;w;w;w}Wl_?mٹ7ӹ7ӹ7ߍkqm_vw^Rmٍߩ[6k![ z0H<#v *[mDz' 'SIÛy\dٴeUyYew,?Wӳ/.)p0^ O<oD@ۻ>TU >*Ǐz8}ܵ'8p)Sf;7t ۦ1.0( Rn<qJIX]awzu"czqIў[ !k¢p'uWf[KbOIG{CD;P>qѳe}:.{_>(* \s> -믆$rjʘB%E쐈գ ˍ9av ƌ,wIpuJ}d=Һ 7&hu:hLhͼt1fQ+ÉuP "?MPxyg %G\O>ʧMYm 9$fU<;CMWymd?NsJO,)# m_lSʻ}>#tqmFՍܖǻ!z[- rlb2<r!򦼸L{#.\+)q5 :gt$ґإ[zmѐإLCb@CaDp'oF4fGrX _ȵ"rXrs^41dڢ6- nmomC7 ڶˍxcImヮ\7t "hϐ\e;Nnf( \WR~.x]&Hk55$MK'Ҧ;qǻ8fpCY'pHy뮻FY;ިU+Rj%[HtԖ+;u+W22.{r]]-gyuҏDnӽl̓$ۮA1`]/F+P6:J1*Y %u"_> stream xZ[~_aI"uIl4HRMmǍ/SɳH)^΅fY6ꫛ/Vvj5UjV2s;Y'[hef*`jVI k{kvߞaLO [ SUy'Zj߇'os5dvIdRmb W5amչUuu',ee"r\hK/󆅪Jx.j.a%|H=J}o)G SU/ϋ)+?f?w3)nw-I .rXRֱ4= -1wWC"`tfVik6<-Wi8y{vBb/L^c\&]Nz CJ~7}#=ޱ U1YX(DF1N.._E nryHBQbENV| :4ۓWfVvX1C\lݒ5/KyUdUp_c+L"& =H/L^"0HE-&q 蜵|>@&SiQn"6/P&Ӣp!#b??,RgASY0 N}a%=1zy@*ٖEQ@=q$! {QWDf@p[a~- M-E|8! "$ZkZy 1`e.)h3pb|g*|AxO9k_Gwb6X T,j29y!;6k `sQVe(iB1`G)[pH: ]FdxR,u@Y]f-E奕 tqSژ⵮Ѝ]dTRC"4YZ0"?~QFo@<;UQLiVN~>,զ^'|p筰J_Q;S=U%OOFA!3,\A,tV@ 4NEס.Tv0 j.mHAmvI;_|]eliodњGIUG[y+wshp%K:HUv`U1/>̓K7W4R;8_'A^ص{p'o@onܡePlN6{OLQk Gyez??C冦I ^0;/^Ykm)f3?#XBܓz7DaF> G g8dL+CBlDzn51~g:}E8)䏍<[ǯγS܆aB55ƃl_ BmOc()Lw,qfs);Ӕ:UuTN^w#:~d>]~QNj$/1ǭt}?qk;mw߇+4[FfDפV̬?n]g75E!+/=#L5EkO.F85 Zx9g (T&i䈖U@"^dvas|f>?zRu{pg5Cu r\Rw?^fY2kNxU+b"]9]"i$g텆:-:Qj*YvjQ~:T&8>/mS| B* j7O.l! 'q]KXl]q&(-ng.;k:8NH d;Z9pbwef0F@|UMDX+ "ȇ/*WUyj2Ib1o> Wi-ًk14gjVu)άԳUR={m  ?ݪ6 /V~Op-xcCY!?(RW9H0ŞW od|c*5^n "໏@\˞k'Z:vIrԚRNk@sA " ׺ GWmJfisJdo'H˽h%+YzؓYPzGGlQV>s%|ƯoNv8Jj>`xGg(A}APts+EjH׺-wWf+>֑4@dS_2ȝf7b >\K;e J/L{ endstream endobj 468 0 obj << /Length 2716 /Filter /FlateDecode >> stream xr7򮯘քy ._T֪C5"󡐔_\@@w7Td><ИL^g7.jevyv[:`AL5*# Ywy?uk_|Olծ S~+k+qy;#L˹i!w.+#u g\噆ALRgҪr&[,h n?UfYxsο>e@=௪]a9diTQ)bnZj]͙!1r.HhxRM_B;kIQwfB٦ZU:TMS?l"[ʡ&ܤs-S].,bWr'ؚ|1:'d`.Ÿ[ "*J} 1u~tCߤ+\eaxZ֦̎aK4}2D.x%<),}%+߷¢ aBul' a߃)Xj@!|#F6E wZa$qgd g%s\%z]հ[b)S\xd ' ql-D)]Bp}nf-V1/e:x:GnC7,˷p-x7M)G 1u{+67MFO\9 E0q![aMz>ҵ(v'<-2ck9xi/*_GV7Gv m(tTC$ɨ[BT#ѥ*\W (P&iGYml )0 y' " \ðڮ`BZˎ:r*4~ߊA v ΞlZm)`,1EJLF^z!ܐ"uX5/YsCn[vVBclݰ. [^ 6u\ͨ3TϽ~*_HɭE[c X'9#)R/S*o.$^WI3{2/{9z=2x}tb+4V%$8DzKxDߑ*B4vxmxa|Xb%cW-PnC3jc{6pJD'a\qBxEY2cGp=&F|tY|]p,dUe2T:>HQp9U[W2L1~L}|!W ҝյ JX! y+Qy+*c 0|@ &=5T_O4VU'Ah(gSQ)Wj*T5O"4kHyR{l 2jSG6F7%#T,䁶r9`é] ED`q6>[{8p>WRI~҇$wc+IW3.pH e8=$<9忛o;ߔ#CɅ}Lx},1`(-sEpq??u!]6.z(O-Ui|Զ$ub| ?z=FR%^%ֽ}ZJR0q u=,MR*G] =(nV,ӡd=RHY鰳`o(#1|MҪX3|CPb֨f8ΞsI!REw~8 Szr8XVi}Ҁ.ՎfC ЍCZ|p=#HYR6%}hz;!o=q-_%Z&[vP&)h̺i2 HizTI+Qo~` A{-_aOh5Δ# @ЎLHz5+*zn6P d%wf&8AXu+$;zA݆Ǝz;tZjKr|2Xw']3{ozw"figv\㰞@y5Pz vKHM"7&2 fuhmMy$-DP_dH@6`ꏇ_$ endstream endobj 442 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./turtles-eps-converted-to.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 472 0 R /BBox [0 0 520 400] /Resources << /ProcSet [ /PDF /Text ] /ExtGState << /R7 473 0 R >>/Font << /R8 474 0 R>> >> /Length 6166 /Filter /FlateDecode >> stream x]dqSr1w"heSd]'@>g*XǗo~os~?_掜Ra [ ߽Opz;z "Hjxb@1#DhX潶_aVQl@ ?5ƀjN$b0uHP+_`ԷLn] uI-0 Uhx^dELsT4XXL|D+醍0w< fTg,YyE,9Y\P^z@ֈܥūZxDˤ2F?F_r>>J-c!R4KP٪7S[GU;x #)F/jv ͐ xe8hg@(ɂiQ &ݨLC̄D{`#Ovxe!; "Hj55kk洆k & p*uDNuu|U}Oq(V`ٶr踮eoPnEAź rPȳ([P!iT㯸nZ169(>E,+iiXM})G >1{R;n_VcgEy]ۏjTgwMKrcѯi%}~SO' k;.m4IADh,iq¶Ĭkɍ& G1[21,ñ9 src )D2̃U,苑T4pnSИT0eA(aw+U:.% թpYC qI2!vݸdL}KٷdVR@X$T&RpY` ݋.l2`Fb-X[A!Rl:Vd $oAI2ĖI`7)ۄ)tע$5 \GVɸdզ߂m+s!o% smV,p|M@ bnb58!'\^Xsh $չ,r$gb)tQ3([Pc|N˜|.>EW](\7E~SErs(!`kMk)Sfk Odˑ|\ʒ|\b,Jbk1FZLPv-&S0k40ZD>q~u&ri&M9Qn[Νf jKlۙu rA(YT=~SPd,(-4Q˥Z.>Mriz33Sy 3xv+,|Xu3D4JX0J yY3>JFA7ӆmYZfzj\ߗr5$RT$E7:eh߀q/o .R2aJ&+sr',S?Y֥Sh!(gXE2Ŷ$t<4[S3u^5۔qyQ= ~6vZV,kDgڌ ZE<"bcq,eixhK9sYFKlC{!Kl׶!E(mv!ťaBE܂i3HP#E<"%f\o·䄛65__-|L e G_~4P#dAϠ̭(g:z3RPoFj|Ƞ*DWG y*+_~FK(噷J Kl=f4wl$yZP&{Z˓gr l-H<6( t<BixظYqJ ~k7Z\kV0gT+;j}6"8lonak6.[  Q(ƪn RB_4 P~ ^Oŏ(QF(#%e|"G,~æ&̠mlQӺ~QSS;K/f+x5:1"Y(CgϜ3-gCIQd /PƊ&]0xb?[/P9|@Qۗ xRU9*ʜ ߴyBP7-x1$ZPE t~YVmO-q-Kx6YDiۘ7hnHlVDQfO"^8\JUrrF KHh;mˑ!*%yAPij Gޗf}?U}|ei=qζٖ `H#~oqH=91ۋ,Dqުd8˻͞#E 7{GIf,r9SKK<$_A 2 \M'QM4=iE$ 8[pLl)Ђ4S=0AȲ  hT@˦:*(Ȟz0's99Sp 4_?5ii062g '=Bw:sr0\uf_c7fe7oUΤrUv: k)òʹ1fZьBA(T֌BՌ *N/jNx6/F2ɻjM}n sf>!<(F}P4HA9]IFfT^ZPo%j4 Ga;cA̴rRPA Z,uw`d瑶9'ՑQ3U5yfX:'MrViĶ9t9N4]ػ;ϋ֔˸ o$01+ʪ~=,Ȯ׏pD"^!;-KcA~ă'% 鶴"ooh9 R-+(9=L}r?` _55u⏱QyQq~DN"Rw%yqvYŽ}ix^dJ|WUF|?@u4K2, yCG5X A>~fJ:asUϬveCcQƑYяB5ft_~_ǿz/(6CUd endstream endobj 476 0 obj << /Filter /FlateDecode /Length 232 >> stream x]=n0 wB78 -钡AL" 3}dH{ǜV|%|Ӫcsr.)+9u#j< i(>|NNs',3݊T}qSNQIv8e :p|;(`jߺ1G'aŌ :)cЁN̘H8G/y%ýVʫ'p.)_e)Qrz endstream endobj 477 0 obj << /Filter /FlateDecode /Length1 13288 /Length 8867 >> stream x{w|TUsn{3&$$!QHC)B0(5"`AeqYE .oBf~Ͻ ~^ιs=O=Zuy ^ym<{d=rr=6o:w̖ws-&8/No8eW;W NY @5g/=9>l-s&O̪fO}.^csM=}t?w~5#)а'!Ķ7@#L /ٳN. )!,`lL8Fnx b CUHS ^Mpl0bO,z˓`e1H"X 'g#; n}Ì>t<0\=o%&2ju%g\XJAtjO@NϠ7e s'_O$>O|wx p އpNxY  :b#yd2 x?/AI)-eT5MPQ-NcCxDO_/3,gL533mxf0l1۟Ύf[uzz2dd]. pqw^F0TIwc]SZq.rAnx~ܺ :fؕ ܂sSL98-;wBrQ C?՗]NfMF^(r$,CS2*}UO뷀[G-[F_Zn>gMTh@s-oHmӀ 1HimaB64HbU *@ q؝!ڌ >V23!͋ ON\%:hJ3OhjmF'm_+654yyqaf 3p.1YRaoV4. o;ЀrQe3,4!͹iXORs ɚv*X#ӆhwC iZ|^EL rT "paO "Eſ᾿ ᒟGi.C/~hO Qၿ_p#\4W W_FxpO EF5Uyk#{p/#|COoDjo!< \ny" 7]C8joP!q$(MW: -%2'[w.Sh!>L;10 1&Zi/cv'FGmoLjG/N+Fğkdd'y< ~v$|{v텹Z0bNvWy~;p1Yr3rciw1 kQcڟ u芑m1' )b1jGG";<xLPwZkT8̃Zù\`_L'>v`q,r m cdI&9\􇇺`ˣ_Vr7q<.윁^+x\h/sKNP?Kx6FF9(6<3<o%!p!Fwh%ILxϜ&L<]xNljkhn3Sf_aZyCѦhT>׷]򟙬\lI?(tO(|Q֤$[:+gԕ߶*P]۠c1rk݂Z60o'Q|8o>; &^xP2}ISz[ӏL,>sr^mS9!vƘa_pD4`0/j^Ò*Qs,ErWS|ngW'uu \-2@L*K ;%Kf1DfV#Z3rmW3zmAWW-|)`6ѥ,5_>DZw~э5O[/??r\:D~=uߋɪWZNߵvx+8Wy>kwtx!SŌag9ou-q$k(>ke]j]xB Q39hXZEǽЛ椨j_Ko;CYiWiz,Pk1F=U:U(5/oLbѐn,ԕSp>,{tF'Q 6 ܃WԊQ0G$Y7hV] #sDf-۸~Ȫoƿ{ꦡcfqE9 NG݃uMə 3%d.#AܡT#,UZ6+1jYQT~u^T0ASs+EH s?% : ;6%D!\٘v,`O2(C ,Fˈ@$ȹ>b"TutÁgvzG4`֦?:j$. y 5q 3ſnz x:^|b3x{Ϳ|~- G5m me}IKulҽC0"-/ϘA` ^2PR=oꭆdd QXNn,I% +6 VW?%B;qYiȣ_Q E" ` "[]U;Qi/eCLvW {>L.H&ۜcQ %Tʢ~Tl%*ֆ0Dʨ4E_ȼ❶,IfWclT2$.H&ԩ:/o%I-cT"Hgs.{`pgUYj_1:QĴ$9&3l`OYV~kh q8 svYw) ti#zY\s\[Vޯ0kI~b ++nT3E3* =Iqʩ9/̇4wY9D޹ܺE!_D~d_E3$Df~!JJuJ8B lb j`Tr B&-.&IfMͤdDcUVvDc5s`_:7[x#]#Ǜo{륤2fcb(I$_%z䨍EUcTO1{7Pjx(Yf4amV{wO㼠i fqHeMYWJ@fDТj]F̂EmH.v-۵{ڽ.9ʋoSB9ir=+2W_; Et!SnUQj`TFCT5a">KH$}O~V{VG̳T63wu:8$s[O9:>p0~`2E `jZw%v6_ Wm^f)čZUk FS:唚ȍQ|6/6n6ii+ȼF(Fo : :䓀t:/*i|޷r R+b+qC.S=Ƒ ~3%g:4G$"ª;sI'5ȸhQY H Yc `8Y$lKme [jwD&G)By] ?%GFYk3:sGÃ2)&{QyLISȼ]o IIIwlSup!|eRǸ0G’0LJO_5Me:Iп+Ť]~=c36کat)9i6̊ڰY*R]1X-VV'}YAFLf$FAeyW֡M5;QM) +S\L5haD4>t5èx j /lVD `cgyHVvD{Ulj[s6Ob-N]&Xv~*_y6KJ9"dP:$xZ(gCLjYV緗J BMM> ?7Bz2!w;2kQ% W?,0>A-^d#fհrռFT*}`}0UcQ\s:$UtȨۗd[1 Gm ]!0ǒvk!7Ca&Hz+& 0DAenU>ٮ X]D/ XFg}䇕7挍=~З_Cr "?Nk<5D뢅J*7RIQNT١<))*f JQE*mF  U`HJPdO/0ٕd:V\\ES'M>G< #?g?3+dtA?4Igg46o .]ֺƳ&6kmV*ʟ'hlŚ#J#s";1eT|=Ǒ,GgLCx 1n*DY(0lGc{c=M  :%>POn}rߪo&m^"Pې?>~.n]zl5osv׶Weg#SvŇwddPCFq #YV6 j$tvumMZs`0"Y Dg-J*\1*WBؾ"HyUPD%E<-(&Z)jBrT((n=fƄ:IbHѢ>1+ћz=%~VJO}_H+~폩ÏnMŮȠʋ$r?n\[Xm HѢʱڙLLjRNޭfz@w)+3\-MjLpZ?mKzX8 NXDDHc##d^m"{0' [<:<ޡ _jvtDxؼ umk}}//;s}]YݐL/wmPWynvnvaauKYT/it:j5['LoR'}r5s2^nM1D322ъ (Rrf% (^&9^c%<\QK-r Vqtp%˰bUȑR'չ&sȨRG- a K3NKe&1 ͠YϬɲ<ڥ5zeG xcpqBOGP89%& ŸeiqA|?ⓢEdd(1~_V`|[Bq /^B_D G AzAV׾ǹ7;]RªӲc$aUsY\V렳LYa֖PA5;G0qX֮^Bt, \rm_0U^PJ3%@0ƈ Wɵ3 1He9<S=U!p-bD'qݥs/27p벧 wTȾR YNz dGTfj4{(K[  Vc61 mhqgW۸pIXr}/}?̓/=cF6LN|$MފKVU$nv5YxFy5c4њ٦&Jm*)K)WI}Gt12$\D\5#ӎxguv%mJψ:v/(~OvY^$[FT¬ I|W޼T7Bs+H3=j_,ax%i]_~'Y^ٵ1z#ԓWyrNdo ?`0*qN+@5@)|-J!2ܮpk4335\1SHi{*ujΒt*+\r*K4;)f ~Z>3 SЙ%y)WZ0p=c:Xd9plk$`|uYJ-*F63SIC})@~RŅ</@ 6K^_}*K?"1pp 4y ߰;=o:ߨk=%^?l^h+\ڴ@cIN،A7:$*#{ϰd\74kjvmq*MOyHH`U9Dm*:1O+|~(:N% ʨ6l~ĵr2 Y~T8`(FT%n9E1ZgQG}=Z0exFjcMO;1k̇s>ARFaCW N%$+zot+Yaqk$zթѩcSS++} RvdiL&K63 Y!|)RƤ-t8ږ.* A7涘,AsP l*wPY֜k$URk$~q/xy"tDNJ2o xUn/Hl:l,^QxRARd c:jr$9yp(f~p d G$m6q( -&+$AyŞ)[ow݀>63*CP>Ͼ>ˑvlΘ17U)iVl}nGҬzgvFN fz(*x-ȇP^0R4Rj0[n8j5"fA_VBW?U%Xm9q%H 7w5-4/MĿ('HGB*@Џf0>Ht[%;$,n)oOBKʷUK5b@)B\~:'@MfVa'({fVrLZ/l{b.17  =B.y\o:A<`W #O/+\E z_ièZ4FtrЊzznztŸcImPtKsӂ_{do A9sD1 \,%ga[b0#%X^e20m8NhwPa= v4l? bQ}-Ƕ$_MCǫKmr+>߹?`Uzu  endstream endobj 483 0 obj << /Length 1722 /Filter /FlateDecode >> stream xYKsHW8ɰ桙!ŁbsK(JۅY?YYRlmQQO?dgԑG'G_(QR2WN:z9tuߝ))b*E&3.*Yg2|+*fW=<#ɶ{QeX9GQA,fr_he|ҦRNO+.aaL^Yw`I g< JrU^x6RnO,yðbZHo"(Ji.\6E>]\fN c\ ~Ś ?YqɭI~R9з&Vr5r'yJRhtB7"11Mj40.J=#aJMk_A5R&4&2L05ǔN1θe5 J[pV p65շH!4%[LiD^iv7v|v;zߙgc*CUmbGT.T-B?gXk$Ed2LIgV6b.eLn\t>ˢߥT+Hux=yP>Q9Ճr z}JCzsŌȡA'p.V,s?qtyLX$(|fXSzF SGXb G%S3U+*@ߤ5"mL|V eۊ j%eCѐ,Mb}"kh҈]żuR5},9v>w`(!W*|Bx\Wڑ:a.ds[ǼlCcff@6A3aMtGebZڦk]Rd*P8"L3$Oe KmNRd,1 +`X5\klĬnq]7&c*yҏ ݫS wX |c;^X&wTƀb: kTYSJVo| v{VG%HÙDMTCz n{Y?c/ܰEYϥr :颶ۧn7a^p<6w)ؕ\bKY$|ݓ+VIcXW RJ0QHh+j<KʲXO6C22 t17<^Sf5n+^'!u%2a#҉4c0NU}OYKn՞X݄~z'̵ZRW7ge1[Zĩ% s܁_Tj7l.?$*ο۪=?7 7+_=9)0Z6/߆ET }ֺa+}>$ onAG[f.n[Avz7L endstream endobj 492 0 obj << /Length 2613 /Filter /FlateDecode >> stream xn_1kq}G Dz:䁖ًhzg(ꮞ^KY stwU]Ua,F lCe_&JKxdF۱j;升p.7R;&{Qya!wc8=0!*-,)G"'OD[|<&s#%\+J-\74OTcMl\ DRLbF-F\XC",I\I4wӯh_⇛na~x{?9\_sf>(É8 Q 'ŀzX<(K!Dw/ ρn>Q,J)=*C# $>t]Ɉroxi`h:Ǔα1PCagÒ-oO?v5F$o]8 9? 1E1nsbpeRQ7qڼ Y*nOhL>k&&4{(9:{4pA?50 #aܴ"܄$g3)o*˝)&| &dX`i5& 5V\yj6ajMnF`B;,nf#L\O. KQ?MeF<)Ɨp8d'iM!7Ľ+Dzp!:pd" \. Ox +DfL"$b$j*b-ݺェL{HT`{}  Wl0,H'X'jި(%boXRU; @F܂2ҙ1=ąwayLm'|o ܗ ׳q#::,9,H.h69 ΢rBR(;) Ɯ)MŭKz,`ojWn= 5[iG]"g*6tƽٖJA,:b۞PPJ,j/8n'c.X9K =% 5p.?y)VSɆN`Reyd/7Ȼ(N`Heo閖2fҴ]8 ;oCO|s& +X5ii˴yXwK^\]q9zluEirSp[ݹqy0%y.Z\z.&'p Wow^ endstream endobj 506 0 obj << /Length 2766 /Filter /FlateDecode >> stream xZ[o#~Km@ Ic[Iu9䘲4!;ggW3=_iѪэ]\j3J5]g>p:fFպ*4kyn|6kOwvw |i(iH:$p4PuY2応/ mH刔iEWc`+~t\o;&Z/ț +q-ܑ| g0"HuV&$;im vL4˵qy}j3ɔ/|^h!.s>~͇̍=2_4˷[WNW|n+@*^&0R"vԺ5OY"{TQ֬]a#q{oZa4[|gGx#3Lg*Ҩ {;d*>7)XD_PhmUYf%ixc),~i)alohиP{ߐ3Ҵ:8^5n"? HY{ђ!.,z()xhMlA.igǃA#אvF2. bxW9 `=X ǯfh[y cԠfC@"+}lG^bem&Am9@ґ=*g~pcȑ'*="(=&\/V|XgQ. \Uֆ\b^]E@a&cB4f69IK4N{ \1ˌQ61yE CtKvJCwĔNKk[9=Rl:`;7}ePu;H`oU^Kô}#V ^2E$uSywP?p@~vZDg䴈Ӣ>|b÷"['G TџWr0XM2h*Sƹd|/d;TL=iKȼc% OR<YBz낓 .5K^1̓L^aS72=| ~x$JT0_8iC#3FO׼]!Ƿ0?p%9UTRPՑCw`&k2MuS`DMnHa3cˆ(80yFQ@ p"bnxZ LxM UoY~UL9kT96ԨrJҪ ~DZKiߟӑ0ӗG)pRtgGIW%U[kYGٍB`Z_' ^V0P4H9_hZ 89I&ceX9' 赟]o[T`I\)Fy0;_W2#7 +8O?ŗ 74j^ ]dn|u:vc*K(T^# SS8*Ie_]<|Q:r}4CʒKV0F5faQFYc#bn҄,gXcO.V9LSgR1Qڣ K\q-/ #M T ΪgHBFndC9>=:7G '̪JOҫS~P`RԕҠ \4zZ?rLT } :TE]t9 kb| @i'GISU{2- TPSN-FD*;B͸.ӧ8ǽ#jU[e_8TUhlk`֨`9ʦ6HNk?&6+ h?tFeLB#VV^PfnjGǁ\rf("$<ڝgk:H٦Ѭ`1ngo~N28@SA1!Hu5TgM 1 e ?V'ln8ȶ 7QI]!)`1:,XeSj4M9@M,M l I} Z  endstream endobj 537 0 obj << /Length 3943 /Filter /FlateDecode >> stream x[obO38H&VPjPPZJxe#wgWr%7o{0zr=ѓrɋNVnjb IUTql69~wbYjz sɜpM_֫v ?yWJoy䵧plWD)L~go+Χ5QZoOl=pfO^"ށ+ea{Ļ58呖 O<7.K "jS 15*MZU5Cb .&P4J7XʔrUN7'ڦ'֔~$ZֺB9_"s 2OvTزZ_E*&-_~ ʚ XOeR:8ՀhE3.{jJ :^x̻FU$2U0"z {kRϼ5#mJP*PIM,dVRN*dZ9c$Pj_uz:>rrybOVXYy4kCYcoƪnW$ =ӽ ɜP/?`iŶ\ 㥷 玹B!KQ%_lL;ySaJ5. CDZ۔ j͂hAU=6A@]h1BEl .ux?b"y MP= 04 !'d/q7>Ad L%p!i'v܎3xXx* 0ۛ0@bs2t隀lƛ>,5?tf7RM$nl&]xU_"En/H$6LS|Q@897XihIS7,*\5#dC |$1!gw 8mC8\%9W g-k`K,ctS}b944```qA<=2@:D$\Pډ$ŭ '[ FJ2 H;Ɠ JWOؗBfzEχXhjBym?TJR8R2x|.dU1m(Q~\ 5c̾RT&IM˰QښیSu0LC69և^zz1l=o,\Qfȩewbm=! lU;l =`"f~{ dZG19mE#*k|1mW(+Wqc\{D !? yL]*$A Jy5QwǺݲ xϏWIi\oCA𖯃n>} ~~!2n{?z Ĉ(u;(KUV;^A5}'[y\$!xSa壟qҞѨŖ8K`ʼsp2݂\Yki`*-9B$Gh:wh2wL\^k6@,QeSm(`"Dx+L&)YH*QLRehD*W$NU 윂`Xrh?e&|UC{Tʖ ƃ=c˫]W@[|:)!KaE/eB2yIA}3  3B&2IJ9:# VM R!ipK ].[(71<`M!Ykؒ]CsWQG#`SvUJ~ 2 >#Q՞,J^KR]֪1QIiC빏w!2)lIGC ^;Ωi)U󿆛d[ݹ(IPةhVGcɧ ^isXݞ/Lz$EUcGҿcC!~CtE-o}r~>;#MJ%az5fz`|No͑9׳X>jęh OZoE dܣ\IV\K_EjYzj5f*'$;9}_fgoBm"lBŌ ARy.6,Im?Z'5QqT(gh:)*9,40 rh<3yr4u1;\^I.ӻp ?a߳c J M DŽ4xqgv9??2.f0ai‡ GB: wCc7ؐ4;!P?DcxВM~ѝ< 6bA މB-xC\ExGly鷂tCsSc,Mft#2;`UO!DFw-7qED-1fDIyeFN"숟.e83 s?])BEPw??dd<p:1Wnwyӄ٣_Jtͤ6PYe|r|t`AM^OӬT:֓]JJ*U*ƟCB+̹5.!%/4I״^hl'o4EqĖ# iőe͚ql'LڋyƷjW=9ÿݚ"rЈF%s_/C1UOIΖhj6g|Iir;]qҏ-J'7t-CC'~$;/S171ⓐ$\8;0oo5VboU~h%"^I}CGv#<5›9nxf) '{`Z:`Q6lAo;"k<Ў [7yu+7nt4UT\@!m AÍA}ن60 Cf)2O?Mk\Eˏ>MGi$oBBNc ?w endstream endobj 547 0 obj << /Length 1041 /Filter /FlateDecode >> stream xڕV[oH~ϯ𣭭 =jM.̂% F誣,Uԉ*w Jb!K-ܯIid< c WeO]%t9jQt/K_qc9Log-v{N6'0.mO"I+e0eN oBNjGxZGF%P,{v E a?M/;mwn߃N/ X(̏qL)d~gj,h10Ѯ`.!ܲ[NR|4k(f"霜NDʅy"W;kKux=14DeBsg;&4 V_YZaRSYypEM*q{s!i^H{;S@:=ʺSHce}QEur=LMگ? ruiٗjEZ{ ,g174~܈ȺƎ{n'ۧm7N8Sα;{i endstream endobj 512 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (./ier-eps-converted-to.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 550 0 R /BBox [0 0 640 780] /Resources << /ProcSet [ /PDF /Text ] /ExtGState << /R7 551 0 R >>/Font << /R8 552 0 R>> >> /Length 6189 /Filter /FlateDecode >> stream x՜M%qﯸKkNqdaY dEqr:U"_ɀ֨E|Gx?ןOo۴|ooGmhե/GQU %#iE~۳oZ⦥5߄8#זqv!]\FGlֶ1]75) u7ntYu/瑦O8~j0%}$rgRwr,-W$ V28;$E5F;%Yhuײ̅ʺRYZ[jI7ݾV^i=hUJI[tr촒sob9؝9ɚ(Et2^fѽyzh#0eeɲV[>OFdۇSuμ¿}h>h69<'cKŗSɞzm[69p9iW}>ɴ/(o>~Tgm[L6?~W/ןKG r|eh*6>Nk]3"ASHbI,M7wb -m2 C%,[.i@0iȺJ s͝w44ek :-g@5lFr BTZa/*Q݀),[?sҢ+2,)*ِ1T,JʍPsT%*т,ر*[ߤ9.8AܴXr]^N&݄ХZ,buw*s-E9VUb5T[r V=b՚\ Vͧ@Z Ī gX=b1֭ ,;UժjUVꊡZH} j"بVw] r WwpTZ:ԪŦZutUkqjujsSV9ju7p@B0(Vw1bu/]U|Ъ{H UXNjպiZ VYn9!V0y^^a CQNU=x%@V,e-%@ZsիkS.ZuDhUkrV%GhUgXEH%ϠUwĪ9QŪY VbuO i*7)Vw)VBjCZ,l-{Rx VwUbu7PUkN\eʖ |(W|RWrf\\i\J^u5Uzk-z}EkV{WjqB!XNk@ZkhPnUgPA')iMwmzsS5Q%T+K%yjjby@UC@kTiḢT57 I4NbS[,mp[PR227Uz*_ |5KQQjV:Lyj׮>YS X XSM+@4*V9 $`aХ|:N%/TR|^eIt)VjbJW%ARdRdCnʢ=JWy]e%验 OMU*{xT$_žoxU*"^uf:p4 Ve^3Ox5羹\9Si/j^HAU |5==HH%VTnWӼӐل|5]UjYN*Ct`MP6U k ZU9 B,j WFe_5mUs2˝:mU-7hU-w꼶W]DW׶ږ;_u^r端F}lulUg٪1jJ}}6W.[VSx/[d2mo>0*:U]sx*Lz#UMNyHUy(DU'J_SUƨjzmq@U9:1ߓ=ڙP@IU TuP5M]*T$$+EB4T(U6XUʁd "4k-Tu H?@U@4OJ@UQ(R%,~r0AU]\)9SJ;ql!UM-@AbNFUESbTXuqOuO]TĩK4S-޸u 4 8uW2UO#hUu*N$$NMsT, FQajЪ{$S,Oչy x j&#hNMu44uc+T)\3 q:eWSszi8UøNMfzj$NMخ b345ϝk&,M om%Mj O*K<4@xBL$P mB)U;N$oTJս6Rt\@WtUUL*oB2*WUχUDXTd'W$$WGj mBZJ1SM͉L2uGmT-߈T-KTs*#Ru]j<ZAƨSrRuRC^jLU˼\sLP2$^"$jwu e%0kBb `Nyw}zsچ;c5'w}#Jw}7Z*w}}9>6]r]5/Jw}w^OwJW=Cͫu\H8SSRJxQW2VRw畱zzzmzVU2wU9JŋտY j[y뻆dU&~[01PqPC85EY+ȪTNd5_gU9Ნd5Eu 5C01j;"j('^yZ D!DaHmhU @D!Jj,Ih5KoLuM_yS2Ѫ5hu,ɪsY g>^]:uᶡ[Uʉ$HJIVmIVClz%X WٿTEJRMD B Pd`U)*x+!#XuU 'Y ^xcj,c`U<6UC$!%@g`U^C*Yߩ!U tīKPU|PUt?PUP5#*T]iJ/UÔHUT ABrPUoQj 0/OOUάtTuM%IU]GAU]*^AUÔJU%q0(ߨRuV -ڜ&}Wj&>!*Ueέ!3jBj+-! zC7'J U%d\#axLj C茪D&}곤-Īt;`=0;HO\U&m LD UWfdնfBUY+ BU7 O{%TՌPU aOIᣊ5PUg"TUnTJ1f2# RՐ΀i j+/j?7KzZN͋TX}^\kcWoUOU_T>T}NL+#WoTOT_P>P}>\$>u Ԥ;ԍ8/t/d&.nl e:n(Du+;zRTe)' @k@k@@k kr.uZV"ڒGjS YjI(K*Huԥ{@l )Kw(Q]րG_0HcAT8A'AU꫷ק^^zz}7'^^zz}uq꫷ק^^zz}Uu</u~ z^piS8W+_Yק۫_YWז5Dr~[H<[w endstream endobj 554 0 obj << /Filter /FlateDecode /Length 270 >> stream x]1n0 EwB7R ^%Cl > stream x[y|Tսs{g,Y' d XeQH`,"*""C@XJh( }F>*;Cޜ̹| PC00ёb(fc&͚8'., _Nj}ci֩snupr납E9ӦL-Jpʦa"LvִYyFuvKϚxa2¶1g풰8>g9"mh:F)Lޘ;ߙJ~1du`3lz&Fx̀d<  q oT ypnPgf GW@j!Xo% a \u5tOQp#l7f Sg@y'54 50{$JM+T'I5|I%{SROF㽘%0eGS_Dr𩭰w}#3I} )Sf}xxBtJ @N0T2/HE~8Ygں_+1c#F6:|fîQM>~dQhkk7Ř1*ˍY1³Wjuz h~ztUlM\!,]#K{6!qFmzSubW oLOъ¨.{^XM]Mj]Ͽ.:YN/!4ȟ2*D|3$=ď-)9Lq?cy ĸW16]r],bY Gš.p_mp ኟpp#\{Cx5W8_JSI0: <!CM)I_=_/"Nwty*p|LT)+yJ]gtygŃm)9)Z?0R cm`sQ|xo!$Μ:< ~ `L+eaQΧ aaW/;8;)?`-8)XWz rmP]}Eoί;MՇ\q߃R:n8 ;6. eM4nm}]^/PTJ[8fUw!WU!qOV:!,> CFݵ0bn}E[(* ٕ4& TbӯǀaBa)AҌw^-23ii}`6iy BYi?Sf2KVٷUΑ;k/ySx?[y:2n]$t0B '?y7/J߼UϊAGȀ2Ni~ `t FEjʉZ2X\FנB""U RTBL2 e}}ll|Ws+7\>u<60ЁMNMdW;_T::qO^8mԯ UaOPyP)24zA <, K"N2v5_˖z:>ʽ 4%/t@9/RƿGЊ [=DN &90[1#́`S V䄖Ci!c+/ްeӾGgs'珘~/%w%[Kfmth3Q9ü4y#o"3I= J@1mCe4&) w䃴?-*ZT¶ܻ<`EFA((Vty\i9 iʳgGr_2G4|/!i ҜWof|cT0QɅab*AP@\AH]߫`il2"SWXԟ}}3:w+ݾ=|)sEmpDss$h!]:~IJeMT._vk4ݒ;cg=lt$?to?D]GGL9UjBz2CdWr_3*eY^@vle4h$.JJ. .s,2]FQ!ɉWJJt%&>,%o%V7DW&LrdBY}ǒO}9U@!vl,Z=W ? "úQa"H.&t~Q<{ 2aktx[ώfns/t/!˩#ʹ--rd dN;m7qLy=}*rLM(OyfVբEVa-8Y* *0ȍ(V6VEqLij%@y*WDu_^ "j<ॿ>tOQo$|/l+lg4CJG\vՐڏ'/>†)Cˊ.َh#=Q:e9GSi,rf\npHAC${Pf5 FЇCV5&M u^c҇Y'*|o(Wӗ Su ]:_ɻWέw|bmEn7H]ߐQIe>^HgО "%03ʡe򂜑YPZ,vŀ#*QDt%)VOچk;_UaE?^73$5^mBrTqˇ./~>kH2DNJZtC9l<zD`fI:1u:Y 6؟0fpFFa2FUQm'C/3oc'<Ytt26]Y27\N^2+s}ΓN6`89B됷]!*C6[0ok(^% ^~BkɎDZi툼#ԃrceNF-+S2YAv q>J&oa`U%#irsrs!--5MJ-3A Ivн'_snxBnQ Z b (Ãw.?0Ƒ~-e pj%5{t dk[ *C<(J*iN^%g|ܥU*siAvJZYkyNp2-8-.r(TYWU9|vζ,GspF/aT@e#LFMa--AwAq-iR2,VfG|Зɇ\Z2MBZ"HjtN#tF5~\( l577M+V4`RݷJbϧϷTn]N^%,֢u_A[29e3r Uȳ=4go776nLlpf2RmҊuQD_6CLPݗi`YN 2BH6[6!N 83rXȀji hu;L.Cb^6l3e^e讹مT0<ŻYVɸ1 nRPQl:5Z'=u+{J34uER%W"mt=b#2?`]:L=ɚI.|dȬ,[!Td%AVcJHLOnPPQQXd(q? dgɋ$\Rc&y L}g#碑ldu t ceT:xâ :K؜8.ḧ}»ك{sldQO}Js[d鉷qc'+sX@>2 aIhP=z>5wCsf"(+vz"/(L ?4!`i.ͭE+$\}G%VyݭeA;3R JpeIȑ+Ki6iC%RԷLҐYUNDJ %[$_f:Al }.:|}^folcj͘?O"jR|P͛^:k/^B <Ak&Mtn.N0 }`v|#w{NslPp\Jt:o { p ߺ0FήbP1ZY""4>y(#"Pb#ALê۶z{Vۘ(*VOX<8yEKv!3T`#щkI}M8¨C'k02BgA"Ɣxol}hDK鑼@Lࡻ3Ӂm %^ޱ#h*R=7.D]AIjp-ZfR۹7V: d=x0,V̥әd\In)"+/<'}3~cE ! 8JFmqZ:zXҳ ,f]V,г":HZӢ;O;h?HC?&}t}G>J^ק>l_+y2/}&OX5vh2{ߓHHԤN2/CP?DHAz#ƍ9pV T5d5d Mͺ%@@@5/0/ս-Քo,g(#7h S`s :]ulUFxFKy"vl Y|(l/xB*u]':B5+"c&8ߪu>|4aHx| >`ͥ>#ތL25j!`@ d٘uNiO.M)&u5$Y[f${(ȷ!6ycXyݼc@ xd;L_>2b\ر7ea3融\=nZɫ{?BEmO}EP'zV#̅r-͌lTh1F[|+\\"K05k$O2AyU%n*ݠeUUKсزs̪ǒ_HkHSiM}^Dy@!$3B`LSkj̒K6kig2 M̬L:ˌHa B#2=B5`$bO@7[Qߌ8i#W{8G8E(HdX@i0? CFAȅr\2xra"!e. (Ilzg!=_]1U$K|H%ݜ]C>u7#W2n:4[FJ>_ɯ6of0rj5[*)畎.WpۭHf-GοX(Ϣ횄|FM}KԳ\^O;"E]Рf>kBS]uSPs<3<9B+V3c&+ ob^O+ j,1R$y-f44S7ҞߓOFGs6'%vm7f7?H;_LvI1ce#և `%Gϲۄq5${VQOʥWxDCvf \AA]%W`:ݣ mnO/7O -g΀ݡwK;wvSBJADlH m9T0Rј5O.~rϓхw>U=#Ѿ}C&5 c>9,ϲWB#G ##W`o_d^lΥ7O=K~^E*FS6ؕRPX6O6w+b.]~]3g gpP5e&d_^^ n\i+?z< ]3Z22"gƜ$8 UcI>y_J*Xu]Fّtي\Qnci; B*=#L++&U+Kb(4v2vON|b$\a ib?&[&qr/S d&R6{2}.?&Ѳ=s͔^fnsיS(]Lx9CЇ3H \r2cT*p2nҏ)xBfW˩"6׶*ժlH7WU`_l fŷ-b!P%T<2KJ߱Yr*YkN4;ȱDuv&o~cQ+f!F|9#GYll#ognyCE\ 7U}mտ4j␎tӿ0T1< c^C*2Q(zFUu+v@m5cw»R.]^15/r'`)f3(ZAպ j⥀k/&-F8hQ?j|=87ΐv.J8)g$%\-uMGϯ8=԰wL0 Tӓp#Q1qc FL%w!lJLط'1؋q8^vߊ8Xc׹m ,HWϾwvrsFХz_H{͔!C·t endstream endobj 429 0 obj << /Type /ObjStm /N 100 /First 908 /Length 3084 /Filter /FlateDecode >> stream x[ko[_oK0Kp:I,A]|PG#{l7)0y$!h3^hB'>a [#L@p gc5" 9֞#B5:ovHBl`AF't";ukchKE'Sx͘岰Jqfy-m%籮O'ltY{@N)Mdl2L18Q:vi2g/J{Vߑ<JQoA 9HӼ@^kL>o^Mƃ7-R4D:KhslG)JE"L'A;-|=B64ki"?+|x!SU5uoһ2^fO 79)RE}[F IIY*K݀&@-@@"bʤw8;6J-`% ãÒX sG|~gXiLT?H, ;Iyqh ^Erlef j \}^P"VHOI*WRUa;]KyWɯ3g ^LK脺 CH C7GKowV᫁@M]P:G)dNă''t0~,u~Ol8gx85 ZoJb|xݐ1􇓱>fkѤp0ǯ'ofX׼|BŷӶcgi{ 9=amo?a*Up !?|O{5 gI=IAŨOPEcS,k]\,V[[.gHΪuIZ%QϏO:<.A6iq1(6}\qjyU:썎[s~?-ptTHp_kIa [cs%=GE7v4m<ܖ' ll|jB RG'Vm:Ajj܀(39KY-8*&s2pvi`,1y*`>^!23,JJ2d !* kaUO5z k+nV(2N#e!ٙ/!R=MWv(]t5+%7+ T$`*4Ueֻom_|-ǼsC_VFڡ\'#z9 x/7 +HPqY*iMK]&u;g_^C xۤtzWgNcGz[1lm( endstream endobj 561 0 obj << /Length 2748 /Filter /FlateDecode >> stream xڽZ[o~ׯ RpH)N b 0 ]jKʊf8⮤-5Ù3|>J-4g'^VYҤNk#UdI$.Y6T-9L`I/% [4x k72זbl[b'ߦFysZq J4WvξR #3KNR֪_F9 E1ZR^$|KD@W{5[1$f+ꈶhToG8fo=S͎Alg>]OxzP|ASԊXW|,`}額GqV9b#4/ |U?ɢ0 T.;ڹx70)6L-u DiJC ٨LouBޅe7.||<þS烨&`zOWɄRcg*={<ċ{T 2gm 6}Nk |Dq$e> f=mRU:eTgJ ګlm}.:x[r@?~%Anq*] .u)auy۞d<񽊗~nQԙ]!2FV}6Ւ.&ő*)ߜVajp zKBə >]6an%'p]'ɻhawEf݄f޲LyNjTqjG˞#MT(%玝6$y uF)4+"w7˔syW%YnK=ъ=ؼt/)ި^g*2ImRf̤RNB-JR7S 6vpdz[?Wϼή@\̼+&Mg1tZCs0 t^OMSY#:Ttpcc4'V z9R3ytטзEXSK8cW 3 j$Sy}1si~\%.jijP:Q]SfkM\ve8@ɱ^K0>E*{$JXE)i ~h~ 1JLY1Y1b 6w[YWXF ;H4Vi߃ jBTZ{}^^A&rZ8@d Lʮrᮎzwb#l֮l W|?p͝Am{6v@58.0K/6j8p/}'V__,GP<5 j" @8 r/t_;ƒ^|̕RNgnTK^{ ?Tx endstream endobj 567 0 obj << /Length 1714 /Filter /FlateDecode >> stream xڭYo6_at/V"%#XPV{( CWJBb˵}w#E}m K"wQ׋pɫɋ7R/d(0"T"buT[ CLf \.b ~64NB ,e , "K=,4*ZZ>x +hH~<3ry+pec:?#^&1pif4o'Jz RXYC=׆䞧oY d$trH"ks٪@sl.(PY녚gXъ16 +'״ɹ~ᨺ ć/C)rv4w;/dKZQLEr6'ROVOVɪ5^(A˕+rѿAdBؐ9١4(RM|]p ocw,ɖf-&ddGpB ,8ףL#FcPYF̙ KgO!'Iw3+gW'> endstream endobj 588 0 obj << /Length 3302 /Filter /FlateDecode >> stream xڽk Y'R(5HKr".Bk˻˾w%޽(kQp839L'tͷw7ߖvbҤJ+3[NLnrsTY>[LO5S?[Gheev 6w!<3Yk6#rOOH! HCR.zj;76 4nV(Emd1LK3"!`m K |r@qw7Sۓt.vjTgV >rnyKGDJi d'w3@k4O!)$lK*lqLZL3 _ E95$sٗc [91tjŖj im-Z{&kxX֠YZMvb_4 )K=lD:V`ږ_i0DuZm+^¸WH#lhhכN8Y{29yy\)i5oM&bh ~,1,6/p$;%ဣ |PGB2u0?x7VN>c Wҗk3MS7@Tt\h+;vaE ;zWQ0 9XvE >⋘|C+ >RpxgCOL&]6±AZ3kI>r=/I#+bFA[FVلO {{I2K!\ } lI%Op`aCB<lǖY!A {QZ×O2渐P9pꛞS*6]|!Bjˮ#RQ}OY tM)0O^pbwgJ(u(`~ B Xt/EHi>pf-]]eU#łMlUQQp] -2zzvЯdz"IN8gl 3K dB T~~"f>ςAjl:%&9IV(F4Bý"M_45d؈Cfthe!dx_Gd쎳dYBfG%Hmzu1kz5+VKAI'ef[nഭt`yv&irlT=Elc0wIQ WpR|ޟ>ze][5=rƴp~Ju~S ;ev3.H-y mZ *Akn e+ 7%:w4mSubWzHYu].QeWjz5Pq~r&L Md*Q\<5}^fa Z&1=l:abcnsHQ)S1,N*IK7"|q rROR}2F}%T՝]b|H2" Β^b4wXHz`@r z; ꌀތM( 麽"p!RSi"N! )Ofey x6&#f+)S yDs ]Đ ]0La) x2{:)Ts 8NC > $QhSx~|<"~^H2ďSh#b(P"TUvag ,"^gfD/SFƒlHP=KayPTc9PT;5%q*+2wXD\A0^ěL] HU/$5]&g~ԛRR'|BAvI<[ۣk!W|wnt ׭pNvؾ~/ :<;1mM{pgל)S2YC OCqc\lCvf J{f!Uṯpwp2&D|ޒyb}Y<" :˥`y-y"Mp\9U|֚O*r ;I*Nֱu@̢wtTL-Zz n4DF I+h 5v\K5 O+`|jm8_ド-(|z(W]]94}"p \qU=Cb]}Wv)MT쐥Zwף%Ia8f@__"JUm,=ee쯱NXSQush4K^EF^3fWpzӞuPڹ_ %GfG>+V\7U?)a  o'%e<# PD槶Bш<ȯ9_r^9`|$hf 5+լp[me:^ٻ6w#Uϱ&pAd _w  _pճ`ron~1dfRX|&d]IV'ڀR`}'̗ ` l Td0G!ڼi5Q\PPB2B_W+;eF~ :߳O#%^I[$JXm:(Im]yMAЅvb+O-|[R!ea5EN/$U;u-#0̖u. +VB/+ endstream endobj 609 0 obj << /Length 4156 /Filter /FlateDecode >> stream xڵ[Ys~ׯS`p9۱֮J %  wWs0BzZ{Fn._|aҤNk]TfQeRgzii_Vi63UTiPKv?w?n ?*.R[SV:٤vOp}Ye藫ΖU^ 8i6;,^{.WPĿnsҤJ a(#/n}X~2l.VeR浲6bg_aȲZ$Ǐf.W.-a37+/k'fŠf 82CJMdLN{e;0ٜM6&AUw0*=m4T4 dG,),;2/P@`vl Xa X;!oi|` ɪ|ѣ&뿾XDtܟC61eʱ#%LdҦteǍ/~0͠(,)mX/~%]lHL2[VsYw-~'2HZeJzA 723U.[Ԭʂ4x͖__ְc';;h:_~ת(ӛNҞ6 ﰑ6 K mv:NZ-=ʌ'#RyE|ʺg],"}FY$ͳ@WQHe]-a9%@-A}m9lr_TjK蘧e=LȊ$?4{,VNH/) aX\ tH)_"Kte#]9ΐ;E%f]ybL(%it45b$ˊѲY瞗@T%I*…!WJPsFFUsH.HIĞR}GFfXJuzh=;oh{`[~z_*޵ئup,%Ffɽ:w[ڏhތbC8YĤkb4 ȫH:):BUqggFva G6b^G&YnlQѨCMmUL7Na&zj.)#L∴6uV=v;yDz[cwY A>۷RC% :N/k  s]! * G=)X%3LwpAd<^!+r)Ų#1oߴEx?`?"@5kC*Pe.ZTʼn|b&n1á&O) R(k~+{߱'IrU4W:nФd2Өho4 =WpsipU>iQl"TH{# ("cf1)6$,\j\ߎ̀ϙ @`+31#Q^@r^e#|7e쩫Y {MlǮW[ڲ%;c¹x\_¥I,$=0ԷݵL{!?)nlw܂]wpd0Ф( 4 KVdbNo:FlJ tF{asW^f;`Hm߰=SViL8lI,][1)51FP{KVJ ٕ1PDG'k*r#Ƣ 힝Ea,D)53˵-GO#5 zX BOx=8  99 O\V/@)=qomb|451ā ۼZdxk85B߭}^$*Bhs{!g5躰y6)+1YIhzMwN]d%\Z2i96H(6X8qЬcînmrynŨ; |WX("8g̃!9b7(.sȻ-')aEo/)=Ew}\q,gȩ: ^q[+P1M,":ˤH &+xv ;׹ĩǨpW.)tC_ų :,/ ב'uU-d7´$YQ{E]pG|mfw ",?pU>qM$QH8#9nI׼״ߛΙ8 EF]FSefV H0͏qvvѠsh/9MOY|0K1d-=ʙ!䡥Ηc54pO-LQrTiג#>` #x,dJ;mq=fbu+")ze8|u]SaC cpr3uؒga O'BVAs$sK{5㓈Eޞ$@TB)-Jſ!¥6( ]Hs4VKN2MX՜Ls"8ZPv7!4M-з,]B&y>oږ;s^C-4z V8 F%I;XCC,OWl`f'8k $ "P9 =&4cQ,'[jn|FecbG3-vhK5U]΀Բ9IR%bo-/{U}5x8G˓4y ųO FGIZz;B"5PB9"I.N*?ȶPn>Rw \)nXS*IҺ 7IUx ,~c~NVNROM4١V\H ZBt|Z qĝP+-E츖JA O%m;xS WxM25 kIƶ|̛;:TB=޴ \zoiGN.T5"c_GIJ͟:㱖@@ (ڊvYV8ܱ&ՠVZytk ,I6Z^G`&Y8Eb$aֳa.kz7腔s>_h9'ە"I xsw'q2TK"ݒ~: +Ħ(gx]ŕ,BPG~ߨ);xdl;XoxȊ\ߩq+uh,9(%ΗGׁ'az|b?'_'7/\4(q`A8W7- k=އt9܉) e#9&q;rNRH 3Hb, bLRnzBdU@ɝ+W"oW/,=?H2x%5|,Spzaס>Д-| N99U,&j@\ {n4g^&Y 擷͢^iU쯺c7@~#i *[҄cEhO1UGT^5x,^˱vCYt&Çe Jj IZ ;4~ܱ$bb8GK39~[CcEOͣ`5Od0j`԰[DVQxմ;3+͒(ƔčNHġHZ O"  RVg0 Ț< Gm#gN 0#OD wEH8݇6&8tO.Qm-tQf|ZX/d&̂4zDx<(\g#X6B6U<^{B|IDgF9I>۾ZNjAjOB·i'g8ʸ&Sʝ:Wc#j/R@_: endstream endobj 634 0 obj << /Length 3646 /Filter /FlateDecode >> stream xڥZKW|V-a`OrX+b-U X.#`W׫TZD,y_fGan&,fyMgWqpOsez[k cۦ:57{H |ֻ\W-oo%|*ZH˕)[W}8W`24r~ГPU[neBAmoBJ1ж^*k~&[enT&cƍ-TMQ$Us76,.5z -ڶk>~g'^<8H )q+ GIf r.п8F:\l wOe6v<%N)A2'DS#x%P< aETB[̔%bi1AYh< =6~ɛ#NHe {:/WiWK ҒmFQW#', M.r\Mr;}L9gr_qau{!r 勵&qF }Q ;2"sB!a4z{BHm|aȤfR{ʀMtrNhYQ)c^PȀo4S¹Ītw3~QFT zJ-س' 2)5Bxu}@dhAY@[TY!"]^RNnyH5XC1/M=rO%ӃA@hF PQ#W47#`{m ?䚈Y?:t/b@8qp-k&3>d`t='6n:#+nG67qK<0sVEaP$q1:9 TGk=W<@V.|%K 9-:2:fpGV (YNt[s.Qz? z;*[ذ(SWUaUO3$c3{Vo va"]a4x?mr[rX-N=%{ab^;qa;08}^L+uޜTqP69nHÌFs^Pu'f֨0ђݗ`5 <:m}"Nqqlb'ԏ8+`tDŽɋL5+^Mzf$+a^a ܄m;/O o+_ox2jeaa<5DĈ(4C/"!^<9+xh'a<’cQ|lAJ]i6o Khnн Si~$'<Rh+M"Ђ>t2%,GOP,_In7jSu҂<.,;>,{ _aaGR|#>lR(%Qޫ ~t\0Ћ yn:lJ5'c)*Lgv]!6]9\f9x+f~Gg]Vf\~h t |۾cbb0_[wr7_`DRyd_ct tهQ("P)RQY endstream endobj 647 0 obj << /Length 3363 /Filter /FlateDecode >> stream xZKs8WKW 9U{'d-ggv@[^IN@P&e@6>|uC 9fp{eTIn u`܌7`$*ʸH 腬nU^ÿjgSӛDX".85q.8M67[::yq傛OyXQIC4` Oϔct`%Z?t1ڽ!9+>i YyT2#V|r"OΗ%*yt%/A#>粙 ;i87Hp \5}"W}&q XfY]NH+,{ !\;r>RӉRˡ3\j6g Gcm5: '֔nA} >4[}P$k>a感s# /Dq<G-Od>>r$@Z ic۰d ^HAEEN}< g1Ԏ9aCiw(lgKjk1q~_. y0i pk 6^W#:+vt,,|GG3F34V15pTH`y#DM\Z&O@ӍD S٠lh ˲/] W-rJ3Q KV|\bΖ\V3-d)_N o_3e-3G2/S¼5-xuJw soܶy#;բBdr/ѷFrB]?[j[2uL5KyժrԳݱoUqatf8eߋo|rh*b!b4jg,uK)qV>j\>*smuXŦ4-yxw.xظ2{L+ ܏&,o!UvH> stream xZYSI~PÎ]W` xg"v `AjZGo^U}0O:22t~}ūLTqFW#tRFqѿSu {:pCL#(FX4__5/x˹r͙a:+4R|;[$ev\LiYqq={u\{&# ۣehbM$7h,Lt}@r|ᷦ Q`c6#v14XPV,lm} 62,΋^WP/ A5[ I8(9^w$SUnLsg:$#5s(&*~~5m෿w۶ìb<VP h Y)vFN16WuBO7:,J$Jbۨ`S6Mm(oF&PHNItvg\x-01?єlID5+i@4hVwPmK£# e ig\Jq]o\xk.JSЩx͋%*Jm?n=<{"2ҏ ߇ܓQ^rԴL%= Nn"҉~̐@S*UbGV숅_xgܑG ~ЅD9%g Uo]CZenji&᡺eփsNlVyجM5lۇDo<+u-be8`BwFU# qbuS{k=r*3wF%.8݊:r42-\o~btJO%3F9N>2e3];m}wy8YUy_c.=Q=7YV[]]Bƃ=&>ՃZ5bReT:*3 [">ء CcM~ )|%i~D^qؚq'+Z%W~s`*0eo&} &NM:&ZpR ŕ,>D 䩵))7p zd쪰z)+m݆}<|9B)iF UFۯ4pg:EmgAV4wrtdF݃EB+^PZpn)ͺ(x ms[>yWн=m Rsȟ^ID*192Y/FTEio6{r&c3ŴĂ9ymI[2\xx+MdvSWG:qOMy*N)!RVxk_>#:"'JVSy2\.Vyۙ86Lk13>&''7w> ic=}PIb0n>Ƈiؒe-zAKqXTwYџqʁkk唲  ھjiHy*%Y[}MvUG|$3çݽ\gi;aWŜ$S |~߆?-ZL˂e t￝0+V!̷Bug2|PMnjU- y{Tj`k1ǁՙ-$#[W|ߪak=*1QR7@ K ?of\&B;EoJa:?Qwu M’LK` mJ[O|.>vmldS:\ Bnbs |@G(uЩQW\{Ρ48B|qW>}ڙw&%W|gQbW^ `x~(dM]dcȮZu$d$ Ě1Ah?)b5Scot .kQã@}J*ɊmuC9+{Qry\HfC>mؑ%:R&$_/torQ ``wξͬO'@r{ZxgMώlE|lYcWt.w: %5SmsKRf؞pkkUޥ+P$& fI)Irx=$>)gD͋E@^tWț2 nvGZ.b5+f|Zr>;68UB8 mtlKј~iߛd6JLw@=~?A\K9|?V J1h>6ƃjPdO A&W9x Ba_p("|@Sy|Q> stream xZr}Wq -ed;uʛ,'D˴eRKR+_C MI 3ƙFq&xo &{3 !m)w6#Zob}(&9){!fCUAEr2DAE!IɄ UP 6 %1TFڿ> ^HѰD} z=éh_@]2#LQ?M8p]@qa\$} bD`P KHO&29 x/.xEJ4sRQ@ .‰ZSK*r0A3S;0HL^o%#n:=1XaC dlRHɫVoRq* p/ddlrPӡ1\ۋ@Yϋ)>C!'/ /"nJ` S"L^2 z8s9:,hAWL*A ð(5;- .+7eН }_pQ}H,zqC@.^ń`ٕZ.@~oyj>ݙW_1‰g''o~J1lV ѪքSogi3| ^[E(C>_pQ#Ӽ|4ۯSivyp:A T5lo']~}?=}5GNcR 1'-z덡xؼ?Gx)qJ :ݴ1~kqד{~yV*VL[k[WjVSϥ`8.) :_\Xd'KmV}˴p&Gꇉ+Y.WFiIk@ K{f5!aESg@W贝i'SF-X2}@mGxp6 /,@x!%,xȽ)lS b JP w̻F? H;%]Caf q72=8aGĘ^h!1H DJZ~@DOTlы & ׋,Cr_`.. nifZE;&7MY!%R-6xU&Kox;a\QY Xn  8AmAĔK/&vs|ܼJu+JWPU.Nyj\{NY%,|7ʬ{n3ʛjk[)1WJ̕ s\0W*̕ sWͮlꓪOhuKH\xkذ mFEE3_?9yxkVw:hf4cp]檔,E*&h%I8t=!:#H\{+fй̝T' Ӹ-oHg ӏao:m;^f)3G5)yFFAWew_މ1ڔ7As{b_vdG|G~HHZj"FzO*atO_NG'd,̤u ]/HqZ0&z$+8$t=ȟm;De9c?bR>ϜT&cT l(?}2-zj lXd+&Fz ͂lF!c+Xx޴p^_/ӷeґ=t7+}eǡ+K~l8oVpD3yCW~4q%5&Kd<) Zӣitc@O^;x':ȧgĸ5VDBmUj+ENkٺTAQϜbU1lJ7!콛4_}*w6&EIFiE8PN Ey܆Tý:OӗS- I ݩ1uQ}>/P۝\ ymd#\9FV!+\X\m#to+Y#v6fۺ}T07vzk;3CD}n9w*YX-뭖ﮧxT.oK6eL6j-ȒDMz endstream endobj 670 0 obj << /Length 3526 /Filter /FlateDecode >> stream xZIsFWrD,۲VIvRdDQDB%mMHeR9h^:ۙd;O vTYv_j'?wb MEg9B>~sorvSsI&s#{ƩnmwdNNA{*9V8o_.d.:S ݹ9$qsO]`>'8nnlIKtuT:K_J^}rPylKC-Xy*9].}24RiV>924-fHCͭOV:[XN0 S\As 1PB`븐K΄HJ*#K"*}BBKYE|D,SyEeke\]P;gEV}@GZ.׍JWV#SkQB'Ӛpo>7ETQTԄ"&ߐЍgD^|ܟvG LDdj2i5'Q9oyVXQkiD9'durW}CQrj Twy)~B.y5Rauх%ՅdpH5I3S=Lx %ՍZ!ǍNBqWu2MwA-5lI3mYUG]]GA޶ulZFѱ(:Ѩk~ZE"%/SP7N֦ z͔;aFS_~d v#GD,xet("9" ˪p&-jm!*gl΢V`:A9Vh~L!g~7!#EόmK/k2G#:d1i/0S~EdY[ש^UEUa~AAE(W`ɘ}jo,fx 6K}Ќ:\%oCIO1E xkhާK t7!y+*ܗ:Dh7z8@̗ufmO[ЭYʩO'`*c"Dfg pYdJJxTiNθۈVQ6!R"WVQf-R,թ˧Ex; C7fg)5,n}0R>][g(\cm t )3[o|,L2 5|ߒJϗ7d[Ǣ(96y>I%5G;'MvmAކ 7܂bִ-TB/j#(X?h8>G[,͛&_C&\aYM=6b{ekSfFVY8Fҕhbεu?%'|X݆\_M v6ҟgc/' '? V f<{ U[Z`5 R6UrLe=#;n ,ob{`ƷoLW.c#< N.]&)9>PkRqvn8JEr$&dL8u0X_GOU HÍ6~ܼ`\M(U; 1>xPQ E S1XQ x^()Q&Dw? Ԓ[D]Į.NnI̡cUa8CXHmb Ijq}XTMjaR'>gz-pۧjHEyl}r:n^qʉ||17;U7Pɷ|iҢe-92J LҬ)4m&tXJ.{<:B֛^!\N?aSu;]ܼV~uj}GkتJWyF,wpy\ބ|CT_]߂ kw찙J勒./mU|oEF~z4b!_H\yx-7^'Gd`7gW2S9i[qw<G=FQ%ƛںa*>T71u3 8.e"_"R"蠎33qh|},2\/c9o.>*J4>5XCe蝸* HqKBrou"E2ȱG$:cQoch6!p7Xj:*5YewFra%V.JB}ǃ &>L_ endstream endobj 676 0 obj << /Length 1038 /Filter /FlateDecode >> stream xڥVKs6WpzgL |Tc\JNǢ%J#v4@K K$@~ ddt ]C*)}k8GzjեTL#sΪ3j:u>yP..Oe6"LMH*K|7ȧU3ayJN];{Wwr`,!ɜUQȎme6)N/aWjc-9N1zWQߟbْR3E5k6ygek$PPAɏ4ѷKjA}e:xVWpa3 LL(`!IN<4M kdk}fu,`(DZ'*yzO1?$DVPigh"9R^B*&o1ErԸa;`vP4JQS.hcqvs֖?UUa/ջ Yi`Qk.J,^S05Pr/Er)SBxIcQanگ%9҆iEbں2F,lf6j"qHX]j?dޔE=Yt^[Թve>@#&0fFtxMC˗2r˞Ѐm!}cJ,[FU`K/K*toQ{z5MbBjXXs du,nZ07e eO׎*<} b.-h}*f 4pܴ/rM_CXޒ}yKX;F;, &i  >n풎 q#PUdܴP#]HTL {'vZu<_1 moEn3vXbcn='C`'= ?1I۩On3: ?L͛iP<%d L?@׿jM_],0t>= ~M厚Wقet::W endstream endobj 714 0 obj << /Length1 1721 /Length2 10174 /Length3 0 /Length 11268 /Filter /FlateDecode >> stream xڍP-[c=w݂[I\;3_^uUYkj uf1s{3 3; ?@BQ ƁBCvhv2p$M]^;' C{'~9H;x:-\^xpق@S;%#PA.NŁݝ֙R vANn sJ?;cAhX[:/s~p39^ ?4`u7v;gS lg C@ei&oCS)bjbGi1UK t;88![d喥%mmAv.(;/dm`;sM:jځ]ArPY\\lll<#b^K&@`   ;; t,v(D,/w{^`dB/s{;?̗U\OJG]ώ։{9\lv'Ro)߫81 e+tP3 Z %Zz$RjR,z9cg/Yf7#+杴ؤZY= ^ɢlUާEhZQ)eIkfe5ϭ&|ųA X>hл6sVNZR؍EdgyB[^ݭi'skTDA6m5.va\'*lZqz g`91EG{ R^.zmzGoM-X=eݲ(50ɴ C$Y(/tPfp].&Ꞙ)b?{|n/FZ#.&4(&sTGeŭ9,FtU^4hG铎\}<<72=MPOU%@ Qv&)o vi=ďΚoyn&Dj<`|=V6뢹߾>d{S-p4FB `#ߑBP$[9@ϻCvB {FQΟ?vdUڪ=Ie˴6ees}A) CEJ4.kǸAePǪD9SlZ(|B&xDdzoY>k*ٟ˲G  oO #Bw$u$\Z45q]é릷 _5\s$}|jp4Oz'acZIVF}@y,&=" ύA'v;ÎV8dxv=_i-)%-+'aqKz Koo9.,,A"DuOP 8E<=M_|Mά-dGg<}즩B4g? 斿hw>a('Hx!2[kB[Jb[S͸wֈ?ÉE1Hw]5'͕ʳzX~˭[w;Ô#YN 3ڂl@}@I`=Wz@>!I 久@}.gm=mYcZolnw9J\2a~isJKTOQ 23ɤ;ZԽy:F0cP][rɇ'}  v!TV4!Ɍنλj k̓rגlɯ/4s5ŘJR8LIe$4xd*y)_[DKvq|[u(Z(*-r|P['ZTݰ9?x<{3%v'F۽ {S~^Q 5$$">Y(D_ 4a(å$3GuK©@ZZg_;H.A@b5oIxR"_'ͅ uX:׵7603gG9? 𕧅' ~``3lJqI^M6H.(أf,EZl#\iAU ᥱw#n,+Mż}lgZln0r[Md'A&n)k󆲢tDf`#ec٤=;<%`6^r~mFGI8|Ӆ]7C_ى 8<"`4tW%3~{_&Z6";4jcf@?- >|&Kdw9CȬx`7&` uK5C֫aWpm ^oud/RYj zݰKXٱPpk;KH{v#7%Њ=$hhF iK+4B<ɯm\.=a'QmN)d*nV S3d= <V$so&NڶE; `n68d qBz䞙ô("emDЫ'uyJH1@0NJѿO%ڐ7vQ(* |"џ~& -F.v(N{ "Yi0ؐ3? 5DܜN[xyJS/"ߓh9uv{bU\wED/w"Bi"WU/!1}R\jx*ޑ h(`b-S43V_ &M}ODO׻$*)sLWootC~gZ%/<=\fcd_œ4#hhp}+JdCv 9U";\tO4Q蔛uxW7Ϫyܢ8I~* Isl!xXb79P*1Lj5elW !nU\}*F*Ym/Bqʉibgahnj t .%^ɹ-9uA>2SꨂG/L U$X5LѬԘv݌̴,"M҄& O @DG21WaUtzY?%:S, '8R͠ÇPS2(1/u/m諣A+ 3 w A^b (g7WW#_8>D3wG'=G`< kuv?S*!2iڻLJF9{F߱w/-8VtU?CQ}cGG^/nmm3qCxo53u]3*TP]Ԛwp.#q*BtfL+| c"syF0&X6ɩNG}M kEk%ҞoZ^3N|[1AZ<| qG}r+;pc)38@5a!skb![}ysЃUĿ\Wl%"J,xIh2Ka>lJ`U#9d k1AK(lKj 蘣AѤN-+>| y9gD4L7ρ[㔘VEZ^7=6xPEH|%a_ A% ,!%W'Y1&c%RWo<Ix|l[[W}iwGq]Ÿf͕_ş=LX7FAZu>ihcfH!?.V wGLԘd_Jw O7;>9_ϖ6Ӛ)K J1g!=40bԘQU!evS4!fS_|LHx]m B`vت PI|w8mۘ79$)K:{ t-e_3ƥ%iUܜ; F|4w/@wOY@W,䬾\}sxGщi)/!Hf\<ʼXYzXѮbYĜ z(Ky1<NJo>L풎/Q|}pB-m9浍e}|쑯vo}δ`t*UC4R-- ;ݲa?KE}W1U<]49E|P6%W8V{^sTI#q6,@6=NK-/`f%> F]nxZ+M q`LЙ`8-kMWӖF>XWO܃WZ%c(H_Džabx;0pXZF[Q\G6JS<}Wv5y^GT &@M؈ ;TҜ}*$|L w96*%ɶqKڷ ŞL,.^B6&wb]k+o-qI^|aFzR9^OJ0hY1HN"KD'j[%oU/p$?f?V 9aak׈|\A"'eye.v([|m"I߮AD=_jWWu 4VoMoyF-?1TڬKzۮ( 9hX^A!bMm-ԧMNClP￯6k&,h;@UӍQEyJm[D"y? &NWVdK㠂ՍY\_Ba$\.Fdbסsz] ,C)'2g+]5eԻU KxQT2%,^di;HBj&ƯF06Y-;WjVMټ8$w$7p,f.h^VV؟7]8bJrթ5- 5K3Kn wt HSչ|`f#HfT.jڶ챌Ic481"$=)B\S\+#W?I7 XW$XG)ҭ4܎׉Vn,R-)NjVLд5&t昢52+ &jHQ5t˚" a]V4ArMSj ᘚ8l>*S~s5 hѣWIn|qblO,ei>,5v;ԈIn\sǺ ;On#4pc$LEKv}?97$?L;եN|IԍEHs3˯ں :]V;J`BfjYOˏ#9 ԯBh/Qs} S.dmt"#DتqouE+|\NJtQGάO G]`,cdU=F?$Z  j/r>C W)=Dkx?}&O4GPBm>}<Ӿл4EVзW4p6:X5p([fj[h)<\ {.yoR}%'T}Ob1Eh ZI F,b˅;W;3ZW8(In㶃dٯ &ڵSBL+hǞ.mMpd,oH0%`O?KƟ#~ a+n0:.(HwbX٭8#da588tLi;LdDTp1e%CLme(dY'f@I֨3ŗ\^@K-dR[)9!t450G{tvμ xs)8xrNg,G τױYxF&<ٹx_{R1ɫb jB$PEaNXwHl[N>=>M҉*MVѻ:;{|45h?]~ fkIh_hKДZb2b!{sf1k1O2)"(&Ľorй9YMS۰,.B]8ܖ@ зa 6Tm,PQ_Ne1ӊa !3LYTЧ*cLR)C Ea vҥupé73k63dOLЏ٥T h32OK;CinhA O&it}(OȘ i5qui>C|BE] {ke=1F Q5 NDKaFXR=qd2.y8*pᜧNbMLׯiM)05r$cFq ⼮/o:e qլwy~U[PpnqD.`awxf\p.v@O8G@^=fkgF#d(' endstream endobj 716 0 obj << /Length1 2074 /Length2 16029 /Length3 0 /Length 17301 /Filter /FlateDecode >> stream xڌP]k cݝKp l58 4w` ˙339սEi}Ţ$UVc5s0I9ػ22ŴYY,,L,,lV)5A.V|`;2 ;Q f `errXXxCtpHݭL9{ <{<hLi 2ځLE%=)`jr4|L@;&g !Z%@rv*4&xJ 5sW3.2ٻٛj nm#+MM^Vs+[ࣔ+hohntZM JU+w}.V.L.V6Kڛ;ف]]Odw/_e92k[9d%yY\,,,\<ijWu/Gпkqtp2qn ?*̬L]& +{? ;[yXǏ '_G̬-)Bs0rr8Y\ w7w{g B __J t}N__&//3rohgeo亹o._s5:YW.[V.RV 3e+WS˿o_fkeRvpj0vڼ_.3/}y7_[~@gg!#N:<5f&{w{q~sgN,o `/f0KAf?,qw/Y}2?ځ,bOϻk"w` rݻ;cs|Ͽnrޥ&/'d `CxO)VKw!{--?w?{5-{{½;=??޻e鳾''_9.ww̮ΠtW^Nww^zv?{av4z@ SySOֵE <wg(wh}="äVe;ߊ lK܈,Ä&=>%N?##dT}q lurAVø+] U٫Gx.fT4K$k ڕev=gD.3{[Üj:K.. 9˜ulȸOsxdʸΟJ3:1(rICFYV/Djf?N4:|pX? bluds=?@3>/)}ĞS':>GS5uo_/'rAKAg^C5 V$Qo++{R#'Y61Ӳ,Au9L;B IG3R©>a䙗Re3Sjg(t1B̠l>C& ||$]=tCA`^9dGqP'Kϝ%9dٺȒ"|l$s`ϝճͿmIb$>?yyV;7[2^[k-0_Z҅/ eE]|!x;LE)O#4/SX}p^lg;ʚ~U^~ڇq՚Oqk˻æEޣ*eM9}QK]G}L-t+x؍h\U_z1Tpg1(w9^Tڴr7\ Y!D@G4ր2Eڪ-Ĥ;(g430@t$ԛߚzyz1/ixџkT[~'>~Kh 'H]wX[A"$j,(i[,uք167|՛H7r eo@JXaSA[y[!<Ŋ=3\0=H+6u!dt3!'ltrZLta8?59%H|Fo*@/%72ft^];os!\lJAbD.Q)?yyߚӰTIUNeqo_Ky9Qӟ6:x*9z[LULh?yxy`~9 *^}HCD;sY^hb44>'$5ݛ垨>dXŅEmI?5q$b:2!jBpCr1"=< gHn(HRO4ng x& _,‰9,Rj=,viN{RK9Npnӗ(NdEx>bޤoK3/h ]wa*񝶔qbe8/^LӋ9T)~$YÏ8rY]|{¸%ÀD+|n/'DUWl`Eo.IKP)ͽ2 &6vS5(Gm!Č\m${vQ#dTȤҏNQ}W&wU,l:94afsZdzx.z1bt %0*-!%1Nв*O")&,ԅ(5+ y!Gd2LǎR62rO"bg}e; Lpp"k`Cm:+FT@?Cuh&hRGZ-&\+$؎z1">;l7d>M~FelIRcoDd-C}y Wkg0 8:DJO[ " 31- u26gM1>]__5j”V#`zw&܊8.C Slk ]f&gzU0E^VBz(QR+]Scp0z<b1L~ga2)Iݶ&ݎC }}{!,_ØZհsN?]d3H N@;Y$@sZs 5}% |/^`[pdF"VȿDsi8+ĠD$5G qwڴ1@Y *~ECzB//ca mlEvυH+y.nѓfuθ5¿mCRF5̼V9PlVYZ,V[v>6c}$\x,ŏLim6t ~?79"ւ%TYA MPWIC/`yl#,񣴤ED [@)%8BqƩuLp|o]pQ*@Mb<:͜SD4.o ܀CkNcb<څAC.KhTܽwo"7sDgIurfE|ܜATXwjZ%2E e aaOMuW/Ã劷&v_kub6'#|?Lʙ#:ٛѬ,gz_5:HʉA V\)?׾|_HF]8i{IRtЋ2!\U?-Q˳(2 IVlXc2Qݱ8Gz|wx[ׄA)^c7ЊPJBꔂUJ]LIW|>ө$_((oMmg@U0ڷYg:ZFTd׽ RIa=`|!P`2܁5.xU^i)p:H^םzh!1XpC-`)y>KRyp,Y](!5̵~ ic_.63ԥ IxZm_4Qѫ,!$u\@ ᰻DF_+,uuxGLm^+ *nf+O> :}TV$1;r ((o@T(b=o6$7ik&A 6ed ʘMXML3WcS /rv4H88{V$;C,Yw9sCS|[|]-g+O[ig:S+oJq!Uh áFqn0Ri@YNRm^ɂ"n/f4BCDtX| SKW$Mo4̴NT"/l9JF9Atm<ԉlSEC"__=>ikxƲ:G4y>iʨn-;6lji/|YtAIy]ڭ5$)(?Xtz3/!B>|paƑ^)7юJ:D" ҭuc qI3cY+9>Iʮ BuNt*; hG1347wX/spǏ4)+;?_hM 0 ^Iw-NbӾd8N>iڥҼEMgwd."M4P@:(夶J!_gvg)GL:e `^vlBTB~`isEN"B8If e5 kJW^İJecx3<ШY{fe<{gú Ek3">fCe̠9׌IXQM[3dcx7=/aMwKs;Fd!.bE~{1,>Cg L$DlqPkw:5ˆy[#\NIL_dNꧾ}wڟ#30w@?ݢ!$ Re5Os 60+nmN_L9dCbqHPQȀFa@'L>>Q^GQe*[7%:sqէ bS3F:9T51Rvx#p:Ea z,z]O\Ɲ~#1z X:2#^Mt7^&u^ZZ{ ń\yD 뭇)$)ӷ LW }r_"Kbe!=e1В;O!ͤd E~)՘fB {9{R?0H+Vw~#΋^ nd̰m]w򞙆?go%{_9.W_Q3^DR)2m4*Mdu-#K7/4ӛ.+դ7]vVgf2P~y1K Bl"@Šbx/ĵMZٜ<63hwq?w.=Ӯ BB^0`x!oct,hJFI E!9)%ðwyDJ v8T|Ӧ>Pk OIYX*hs! q9M^7fu5=q<:ӓd:P#B'mYi*_kοEOXTp#gwg!C+KCqO*^h;g]TB_ϺҫNhzF>$ Gof#/ M6놓[J-`9ƥ PK1EZy[SbLwyHWk.E!Bh5UAxzuN⬯8\V_lӖqx!/?FmN[|VM*2nH,`pˠx2azh-Ct<_G'ueK#tԢ/ g(kiNM!4CQP<λ ⿻6jO˻$؃g/?*ꨶk\xq7OŚ ;X (ړD[O;ƛ:c_Ns£w9ZY*Ŭ;x,yhtENkï vaK tN+ ?T8i0 grGm q Ѓ;Kz yKCr}p*)]J=XiNeĵ2J%53JczR+?_^kpCkHPx)ɞOt>(d Jwc!Rm4z~);ǰ$U# i*of+ @6 Q$Fn,~|wJZXbGDz(kxXGq`zY"`;ӌ1|S?r`5!$@~x`@ e6ke`kCC^"/Py\eh<%D0 *P%:lYYa6IJ4&<'IjTu>7T mI`McҪu=wqwF忞fF%p]-Sƾ12iqiB!9ܔ_vvZ85Zpf\bYkOv@45ZbgQ$y- ʳ2fA[6diTDXuph<>pIaKmfv.R6 puPZ$~:Ui [~S%s6B#3ޜ>~bouXMzz/JKʑ\ lXP&] 0W.eQZ7x> X`B(L GZΕ<)jCPo kI|ok}uf9-qp _q t65JT!-NYk-EX:HK߫hLj(QXv;k ? ݗI@+{!S3_8fnZe\eHhBj2*d=1~X[WE> t`8YG*dQ_W&ާO+Di7u#&2cV@KKyT+j5KbR#UG"`n8@a3.&$-ޛ=iZkWP-+yJ56h/9IK/.€ӳI0ȑ->cGEOH%w:}M= X.5ˬc2.Kɒ%@wX:j{ gU%ڼxP<*M]9!m0k#m0 ! o;v5ɇvwDa0p"> pž Rai]uþ,]?Ͻ1>6&䮟з!ۘ6Cw]K]gs]C_Bۊ*Ɗ !/>-pj2?.Kxq?>o2y RC ̄-'(@]m=n]vtLyL* S]Peh%LܺbWogSK [4 =b4N֑4;P㍺A;$T v;L5TTi~گA{ tm)7[\8i>JJ?Mw穈tPgN~jʃo6EUW @NaκZ"~ APy[`դ`c>Wh6?Pޓ{bR.]qy}4nÅrڭEg9<HΖoܥAZO9̄n=v8]l"B=Bk,Vݩ#mb}[Hg|(^ۂH+ԎD[SZwt~e2Q>dsdx`P1}X[LmJ00 KM13/XfRI1E>x}fu> 09շ;$գ1AqB8nj#(|+Bh.G*J"o_7aZ>LW!~s`r8&KMe(' _۴v%8:b1ʑY[WQSjhI;?3fwC ,{ZPZYS "s߯?H`j)DR̀N{NWq1^w ?\%ܣF\#xbtMOWHѹ1b-YOl@wcr^TZDkPoE>GD٬!F:m2猊W֭`%(Y^Pptuࢢ|CUH7-zƁy <>+ۍ_~6V?%ZR0`6]m(mڨs"ZFAtuB>3 izvo3͌i5py|Ux39@*+"q}Iv0یH3Ӝ\OE_J(c"OLc 2ՅɄeKCʙ/Qqho;HC/e.wU@'^WhO>3jUsFFYh+nј(<~VA = 'w)KIIBZ'Ԯ rmTDgJNwc$*Tv ^0T"ljYOA#GzDUś(BV='BX'W(plJp N=S z}#[jM,[OPT#M= )1͡NE̋83pnVFb*,l`Ċ(cIqDsp$E3hI"νRŏx_,`Lj{j~mPIcTIw h䦽)ʭD|j**'O3DwUX~g̽=_:)!{i$Կꎤl?؍M"[T~6+MEgTL"@2_~_y@.Clb66EhΎ|D3@P=3,Jۚt]Sd&|;=Ez^vD ris6MLx~O5 G70 {"f;~J+1wG/TcT#$+n['UlCL9B׃w<-2cKSyak>A&7s1"i+g{{b^]}usio.9pCz:#ʋ "u/Sr!"T <0$N$_mȡ8gYR -d)n>9834)쩸 ) @Q7綗n[xCL37rV8P۝" x F963lB\+DX}XvZ(&,0eZFDyA_cE۬,jJv[b\}&?*.h2:f]JAXbPPkpukUF X7/AĶ*Fr!rIJd _\ab;`*~c /TSJ˗򦈘Gh.0?ǞNE 63l !zR1Or6H3[$'b'=[8 ßiCta-HU/޲f_MSYk+!%0"fēhF>èjqUmE-d=+XoO7Je:'~߰wTim#Nk;QlQƶhZQJwi"!+ r^D]|H?2n"I:m# 7?(?b4[W} iN͠dVqf+3XN׌'Ɔ, vz{͋9Oz #ogQN pkT+or1%`eM!;=%TugϋE&5pG3AuE"#P}^F a-˚C +́dd憹k_XR"f8D!J7z/fkkAuk$V.Dtdw q]Z2XWEʋ3Ey +H %h^Y2[A7he4QLy" ]yxC{л2u9h3}AپP!- Dnss/AF25jǙ_{*DRoĠi\,\hL-##w]S97Q(pv`]A,Xi%Q+n*V% -kٓف8ѷ[<oovdq)r CO[àD֌hy,㧅=(Q-旛;l3)1kB⑿΄;ތ׉zuPoJFmU$!Ods˳?[$m|TҮJ5`#%jx{b>:\d@Xn") nAdzx]8H]ChCu~zUL87=_2K-]F2HܷW  S{U9_"I0l  -^ Y ZE/\uL׏|ĸKpQQ/k)ͳ%Bz_UF{JOosH؊boRꫀ,@Oh!8X 1OҔ=╨uTKv))GS(}zlUF|N &#Z힉cEvw.6Y2M^lW#KG;@&o]^}l>l6M󟓖ͪ +U^ٗH18/Eq2fWt8ͬԿ@}1b s$~vƒ KSgfbÜ1J9浯AuNR&g[35߻~d@`1$E\gare G V7][ϛ'8YXE K#Z)U"%yf@ۏQ0RVC܍DjڦL 7V[* ͣǁc=)=>G>.,L|i`=MN:$MAsC`NOr>K`D-DahP @ѯN"JÍW riȧ81.!G5LH9 endstream endobj 718 0 obj << /Length1 2215 /Length2 15878 /Length3 0 /Length 17196 /Filter /FlateDecode >> stream xڌpZ Ƕ'mNcضh4Fc۶٘o}o}'3-gBNJ/ljo sgf`ʋh1XX,]lÑk-xa!4r|d\m̬ffN&&  xbFny\#>LܜtmN&Fvy# GF#%BPY8023:3; P-],*@gW#[[c#YX:Kjon|l,Mv.v@'Gv@h/c}8f_,v621u03Yr ..t#;ӿ l?܌,m? . ! 098Y:838[#_a>YTh W}bN@sdZٻy,LjՁQ(-o9 :&%PtdKу   `ji0[!?1~~IcLl<}Ōri""ozvV= ;38JF鏯=_~Jv P{AKcr?do:" WT2Zxcr]]>@c&_+bocu.F lgnct*YXk\%Wkl,JΖ=-zf&.c&V?Sۙ؛e,#''#O8Qbagx3))02ٻ|>;uFD"0!.08(XR(2#.>+%J#Ȯ>j#Gv?3HGFGEGIf33?>J4tc?|?"X>.z>dެ ?#h'ه?:Q\ǎ9 GW{ V?~>G7CVQ$0X8q 8\h|x#GxGw^,W^=&pK&!V5!mŸg4Ө齗]Sm8 BYZ&z>ioIRn}y1HPk[?#ŧWyuo!vtBREp+] Sޯ␅)QX4Kc9CBOCv<{{75N$@ {Z୽8V܅KC~6:E-r*]\']hF?(Ўlstdbk Pyg*,Dy\dF5-h Y+bS`˙KD[*AE>niʺwEno#ij0·+߉ I;:7j&AH-x ~hW[ ~*!ޣ$XpYoQlsQm5dѴT8n#m1"Eʢ[|җ)b&8Z.dRYUldKn~StD3^7U2dZ%b3%]:y*!\Y6$`,cLd)ÀűP9K+?tJR/C[4#a;4CQ۪"w6Mͽj,~gr[wKSAJcZBa?W+*䬰്sw|R})|Gg|&X!+Cl`|<EI\RNKl~/NcdW,.}: !kݎT;vמ2t+CGK)VR< 4>m(tvٕZZT&P.IQ< UC&X"%9s{ٚ*z߹{!3l}LCIچ)}K>;Ce}5CpXHi T}Y^;ͨB \' m3DkX+n1hJ?.rqH8z.Dtg %z$ o/*~Nkߒz'^P hoðxP4;]ldKbUݕf5eSsr\ .Oj1ʉ2(8 NwAЎjH`VpY[@Aٴbo5)t v\|#2噴4f*,ϲk8T)'g6ico$mAx3،@@F"Brq? =! z- C+sk'@'W_6.P凊G *EaB;1-6rq<gJMvD׶6a<-PLf*8SB#wxUg|C47^)<_[)d+;:q%hT8]lYG#⬞VNK*a]e˒ M'Cw)Mr-u?DjlnoˣZM10>̉0W}cri/Y5)16' ۈ ļD`[eW2u)'PnVNs߃ P٪i "\:q*d|٢iDWlJƝR!A:^)*d3/:l衽^*GHFҐII@kʶnj"yQ&W|{vҽ:)XwzZZ4~J,&Xxd L{1x^xomӸ^u8gfI% Y3173'řx] 22pD28ÿqcmIb:헀C woad>J7ߌQd%т i(gᅯf7ā1EIv};~14az?u-/1e,;dkf4HwF[d5an|(^ʟUWh8\_ d`_Eد |] SJ;H8v^qy*} 1,Ȓb~kбפN9danΨG&RCl5 ]rL)+_-x6dv#YB䆩yKvW@Ϯ[<3Eb)׵x}FMEωN^"lMʲKc bmxe <z RܻqO$ Gl Ȇ1Rav{59ܢ" Ze&JaN=A󋚵 @ f :T}2cO.WZ :]&9$5(rq/ 4- rf$f@FQG ƈWE]nt۱3 דU9`?b́d,H^)c봻Ac B(?M`F޳JP/u MgSfg4AJg}åɖi(ԶՎQiP>Xz[=^'XPdF>[ ֮(U_=A$BzX1z}iED'< @!ؾl4? anָ|4 & `257k9}Z7c-N>YVGi\4K1 2j1tL6oP_;ȥԨ[3yݵ+9JAVz4EJ kane'HL8{ o&ַ DvOn s,)+wH9.`^+t%nO5 ;l= ufQB8 Dg1WImDk.<[V߈'4$+2`ȣ|˜=~VLd)HV%:A\ڧ\zJ}Cd ¦2g1PyFmb wMN&ޟcV9ȀjAwoy%6quͳ3ZefvtK( cqݺcpZ8"ԛYV\}3<"mv,z[f:3p緸T{D\5L)Ͼ,S~V. ÀqEbqlb1r*xVwL& ~9,ja7jk x+CQXDeڂ:KAUFnJGo@"A=h|?>!ڤߤ60d\6EO[; L0du{'~ p #Śfy7(Us76Z@ 7VۊlKk{1.9מ{W޿ݕuhdsh/vl6ͤlU$ !*>#UqJP.{ϔEכ/B\ڇ  uq@ pgͧ4qJ]`ؖ]1Muz7WѪZq:s9 $0_AU1}4[G"³? D\_?Q6I+[mou"3mغ1#rsSDVxCYٲlkлa̰;:i$Fbd'/ F${+~?C&&IS$Qh$A_td.j*]\bMrYΟ#HżJtx PL(dR)fV DQAM^_őVH: t}!98[X3H7XGRV󍊤 ~N^,ü_ NI'ls(Tm!]}Nās{X2P4}cP16m:]Ti;)d\ Q.ofs@L٥˻i}B|i\wa!mlXY`z.v-Jw2Q59T1m _uǰ@S4O`?CTrWXE/Jlv)`&urx:(I穳S0 ׃xZc6g5Y~/<ă"yIV,^_= TI D8$^ODR_M1nc}A~~? jf(sBY&J~{[V ZWwY Π“k@Hp7w V0K/MF۔_Ýo1\`abF]s/N[ SޔSk*zb A.b3tsmbHq{}-J k#" |͗o 7'zϑΈir6:6Mc=FXDPy+ZC6;$"T n.)}¶F{zUz  =zrug Wv m"&M0 օkuw 7N'K7AJ>?TBX3 ʍg9}D;@;rhhڙgg: Cg)!q+ƈ8AYlK8;юNtEAhxȍeOAU, pl)Ww"7מ]EQV#'<wt4V~xtFz,oOXWL*,v#d4TQO{K#͂K Zί*ݜ/1o>Ka%^Do2?9s)&6L+MaWmj{ T>#Wφ[uJ8 ';@Ya6|3PSU@j|6 ߗl31ҵ-3w1[ݧ<UGiT[,ѹGs Tz?kMt7ݦ_%_'L Y .R=y/v\)8:Lts\~IUC3Ls͒8o^BQ.{E23\49G"$#i,/ŨTNݨ,X_1՗E.&z5MX @ORNU/Gf7@!殺&n5Syr;=swԈAAjZop1էTfYj ɇ C(&7gi2dz6WZ%zuҠ_Cwtû_D䈏Lpݶ{ !: 9l7f0TGNsxwB G/RS辬@[Ԥ߅v9U`ڒh.:-C"G/%vFYԺnlL>}Zzc?ʪTI "-,f_V1)_|ֳ>FLz *L<@lDPgN[2 +熥&%xSqZS>^٬,x3m\ ш$P*+ W7D"PU7Po(KJwL8ʹ8 :[+:b Irrm3PAKhW? K TzSb gH.<xM+* \֫=F4]Kylg0sKt,\ +Cm]}GVq9C*ZcuH?vp֔[w@kBů^=;۸N%iN*e Fuü=6C36q/HgNrJO;voeoaqlo4&'x3(sFW`(ji+ |&9l83wE+UAqwqjI{]l`b'smRh4"|r9~)Tt <Ԑ#jf2)r  n~|c)|~oކGE|b<4Aj Eo#wFBBkN /xdbd~GXi.!Y4m`:t C/WQ+Hkê5ks``SՕSɛSNJT0'Z'<:$DU1+Q{tNîP 7F@;iYV*ӸI$5'. :Rr"5?ي bټA uXMhX=]Ѽ"`h2܍sc,m%M4) pbzv|I ŒgԒ67i9dxs{}+#y;SSG)\@6:f }œh?>/XDWń\vRpŚѸ6Gk"1&,RAޖV姝Mj6#XQPBS5 ZLJ#~M'V׼+3k%o(mc71,’,ǿpeMAfQlpo$ NOtyf `yS /OE+M(ۀ%Ϟ-g0IzWzfE_p$PUnps2#jբvcW.eBЩO 1ōn@S*B0@T̤o4v9վa E't: Y5F15y]=HzK,SYg|މCHUm:]w~j/ O ]n ?ENw-6H]Z|\58Bw=f,DfE } ġP_FYӻ8(7+^J G)|jz;'LhBj4&m~Dr,qDmgݴd%,}Oc+?AZȺx\1o2 9s.!GV(;#TtD$mQT9 nW;{H>F?r0# sAm2?1'Cf;?Bx u]9N%&' LS¹xWqCU~E"`xXA8)Ӡ#d^D<ڈ$ }Fd pHj{*caTpC`:(cW8ÑHn{]l\spt.iTۣd?^FT)H&T9Qj48? cPӕi֪bx"{/g"ƪʅv> B#u3ƋS<!)푄ISz`B.];cR-.q4vb:!t .֑VoigM60b"e=Ql]\8 aKKߦn;E6v՜^{[&syi:3A[J~(Κ›9"jB"lqb (uӑ]<# &w3n9߃6 8˲yew&| r3}iXYUxe)dzkVxVTpV,_xNX>o.a-vkjPܯhviZ(Nu\ קD-ͦrn'őB=wέxZ_I.+=Y"ܤNW|vI/Hq,DӑP0/M_mi=vh^'pz?ufb [y&)L?aBj+y>No 08?hb/qBiCb Mگb]ti2sMbez?d B,ϱX}G CdS?qKvrjYC"Y1~-t^5OZ&p()fK J3p1e&P8fqz)t.`1tK¤k(%.3tZ\4חKCy76BiâZݘ=6ꌲN#k<#[D5[Z_*5ͬǒוG$nAH؄n2aQiʑ-6ё#ʐtG #R]z 'Yډ4X*/&'CeS,WYydZ"/|:~,+.j]DCNZ،L`4D dKO` lD^:oLA/;MmA" kǒb^J dvw!BQ}`re9E>IK Gwx}PW;giT#SD|ÅG(ʁ%rbk h3| [}?dsO.с}WJ /Bj蒰*6z ձw-it i T#$H.J ]zRpSj +GM.S V"GB# Bits"kHb=wEWOT#&#+UKDUKs؍kD9 1xsX饦~W:tg {lpŻ [L׋`tVګ}fH@_NK(LL3϶k"N[W·/9>UV@Sl4z0\tЗTbHE{ K &unC&.(Y7Q{;Cajqv(fxs4FAr)<>%!]j"h)~pw^e#!.yuP3d\n cˑ;ABZ'b6I;^OBSfYC*($hx Ee #ENAjb᎘ PgV$,~8}E,4ޤgݪ6dޓ8/ 6Rq~?IqE>Qe = npPJZ R+*^M^ͰXV~It~W;LGe[Ve5׆,`zsK/Isض{m: g}|.pXo|ID /`/_0)4nE`¨U`(SBkk:?g9I,ʧhmFB{Y@r 'Rm~-.m@%^R1t=0f`4 o9aÚ$<~_4y  f)LS*A͗7\Έ DҖ]Z&rg1t?(iψSa \-Gu; AxC#]s/.Tǩ1?ϕ補PNL˷- jHF](hp@r1tdԼ !A0F/(,}@(0k|2&M3Zw-dw0cKLj9%) Ҏ5ۦ| 0[Uо@qypX<vSPL9ڟ)0*JiJ)ѪQ|s®jDYBR%%'U$>9uzn9qDcC`<5FPܚn4FRL }7 .^M0_$ {T.mֹ½q'RNuEͥ0vykʂB-5qC9ؿ!`bԄ,o,%XM@[s&uG7gNP#&0|G~oÐ(0vCT{O&פ|K M| cd+j =8L t40ɾ[gsU3W =UeʴofN3{&s4 |VPE9&sk9°M7b}z] ůWdFъ$#KnDm܆3\uN]7o`: r2.%eۧ# Pآ(0hT/-#*obKr@MU΋c0{<d Y̺@,WshU##F1-MW寯o:5Rw҇ l f&H烤3l,E0QQ`c^ 4c`ܲMے0A$#WljOZ_sy ?)hYjݜ}Jƞerbi%;c^KӍzٜh@Q9BH~S7nP0!'Őg3zYeYKWjc G@FivANFUʇCzׁg_/ۓ7Zap!n[(c)zf잢b ' `ҍSℒ'AYZܿf:&Y.~5/5>瑓ztq@`6Lz4G;͋)&HE~[v9hF;G( '5 &Xσ d>p Ti[8&\0reAz_Tm2O(NRԗU&a]g'NnoR. ]ύyB@:XuĨuk..f\3O;5G[̄_,SLAdA62 -<|Tc(6fB|1t+'nE׎1s%ΔDƙ[OB'7!N@)V^!>Zޛ"i6@㡪[J(4j{OXT~xS^:B!ye6刋[s~M!]2vµ⪬1R]p?WSjy7r3ʌWsrdz~uCΚ LO.)Ge閿Kـj٠註$xύmSjRn/ (o| PB@<,6RH-n~/ze0ܸXjNJ8kqX'}CNDfDž_xO|U0+#{1j-eE$+Ckjo;-zқhƠ efY6np7ٶp&[.i0l3 9I ZR=ނuZ#n㱵8-,bkvxƔpO@rA9|J ImUMF,x)OYmW, 9e{͟&c]w ""jYBGuqIϔY,YD>Bg=E endstream endobj 720 0 obj << /Length1 1378 /Length2 6185 /Length3 0 /Length 7137 /Filter /FlateDecode >> stream xڍwT6 C)H R90 1C HwR]RJ[ֻּg}>y޵EGW*#(^>Cq $ 88 `(_7!@ 0>E0 Dj.@! P|T{ @M>p( |0Gf-v\@ 1@yW(f5(G+fG; Pa|QSr{vC dx0#PEzA!_?80?~}=1CH fsP =U C>0j+k( x 0`/0l T1 sCyy\~ 攕+Oa݇:p= 5Ӎs*`\(B݁P#>nA_n~n7=fhy<^P ߁Z fB`pcP?604C/WP2163c"@?^ ( ($,YF ?p{POcW^ W\B`X r GSo UEnHww]a.>z0D`do1h"\ SE12;a04C9/PG+W #,;ġC!(F7R n(D}+X"@?!Po &.h@~ݧ vM`cDb&eV5vN'y7'86r!t-)q3_yќra7;R#_x@Q3Λjy$:OUu=eç7t?W^ZLr>aC2ܧ&<<fVK|*3[?]0xKNkFÈsH18vf_iqɩG>-}5fZz׼aFd-o~~)p|rj\j&LK ;ǩKRrV|75Wz8:Dz1l}uڂUXm[nڳ zt ރaX cZA:ӤQŽVW?5h(z>Bi8ă^\Epx$B%YP> =0ea lJynoq2|}4*wNV#{D>zi̳Zh(Q5Q偯-8v}O:2FYLq?cyT+$^OL!©?j^l>~(qNhޒSA)hӵ&utʝgEn:56Mm>8 e7'7{@@~r匛a!}OjGWRoep}EdȵiE]nZkWtLQJԴ*S[(32e4#leV)u4@Ӛ|]ezna=67 li庒{1Ԯ3Ò>aP}WXMK?H}߆S8طd󛑴_ ?%CB $STҸF=DjLQrU]_u"um}W TǷ85_7>s*/-7+~bl +=z:Zpy$Zû,9+Ӄޱb4nCM!^#$O pl';W;bݼ(^ΨQZ間tTDCcx_VQdd%@tRxk/G&>igѸV&ß:)<:>^ !Z 7Q}z G;`w_w~g(FmF8lZk}?zKn2$Bр%m\':{`sH>~@uo(:Qa_JB~*5#(wǩ[]b>Rx'oqNRozz[%wi K};qNL٘7$k9=yo6 !!ӝ@;`ɻܿ?H1w[!;5=R;Lh"ir-reXvWq R VfhCL iO:> +E+ Ϲ2&|=*bգ9b?Tl'D';(4Kq bl%p0fގ;zk'w@u+"U2܅~C܍B])Eש)~v Yb1 lqF]Nn wg\lFs]a-Wz6s)n1g®Bq[`c!A5u%q2=+~!r9_a.7Ph pӍ$C89i-)~?I;|hNg`A! e1+['"@<3(Dz;q"n#~oC?DKIGӊ3VuOҬh r@qe=gbvEeG-n9\dd?{hLBQM|,{S-aG(Uf~,K;w6єO͹}*1Up n.Z+CN|ڵ-読yvnhιvg>n\m"E  /Ze,4̠-a9}6Myxm$=43ik<9ɆGuJK'xNwY?3||H7s&7e;Я_hrwpI,lX%wkA̟VtK峓:z~e dD@ P)B‚WNA@[3~oT?2L `ҍ(s2 ̏JX{ymw.TYrgB"B542tWt%6]K9*TdfoļZkڃՃL3?_3SqMsoaE'BI~Hm)d^1S򙋦4Q-uLJRq1^lJt{j3Kq_p]\6,a+ 6ۃEzp6$ 'SX)j^\,9= VͲxm'u_{,7{NoV*bWT-whDzmtҀc"L 샕N}U){n K?HpC>x&)tZ[{v4y9D%!n|*QȠhdP͵ZLl&5$éكlG4R@jfk}= ^ @ nE.W߻M#pXh%"z.~撨,_YT .>Dj(RJ}¿S)w^Yb-70FfjgbRW #H^n4"],/WTxՈ(t 8zſ-fq sR#dwPHjAOѵP=)NL8RVO(^Fg PLC[ŝNKln&ju`iiFZ;;"ջf>=e J~n⣽5;p5^{]Z鉧:COxfIc5*85;>y؋9V 'ۆtF*܃6= alheZw-ΆdM>ވ,] g r'M%`NxA)hEпPla٧1֯;d"2 x>-9 =)364=PdmrHBɋL,̣%D0h: SDZVdԢHFY$uesH/ !^XH4#~b4lpXUk\yَ &]lz%` Cj"#b3 Ξi~*llB ~P+d`X)4/ɍ&HiǧdbrѕS%ZO;q=,ט!TkVE2J;NN (*w5( 9B5ȝ,^_KJuP`9$fK?T*J'6eN[=f8Z+R ̅g|η7h>-+K 죏\r2fb6Mޙ/j/B\8Hȹt3$6#A}w^K6Q*uz^65c }4??D.O K6@y X^<ҍ۰Z^APP:%'qo=m$goaM7nd3& X%4ff 8EኄS9j8;իaHM;<$udlβO}1!F`)z";_nMrg-D/owCP XBϟ[/5ׇ3°V Ű'7%~[8NǮÊKw$I,\IFi%._B:l:S9c]>o퍐ddi ş3]W0 vvU>pZ'>`.^ +e-ba YPO=OH\2 Ļ[]sꚜ +U1:1~GX#5fZB!tRuK&Q{,wtE蝣c隸v2~Jg!dzm_!8Oz tpۆdNŒUvAmq&W]eUvQ5[~l.ѱ_SȢ2+h _l"$fCzXf%y%[ fMI+^Վmp p55qaK+]O7[S~!^/kMM;VB0ˮN񸔶ȼiQvQ,ϑ(>{|D7nޅIkI|)@+՗qϖ>虈RQ7~bHɲ/CZUi:6?\'T^RgiZ.\rH}ah&hwCtX>p x^jT{ 4J:<*w4s/ڗ f#.ZR?J˷-cj+nܺ8Lj̊OS9P$ Oy"+d|A>fd͓A%nS~N os]z:LV['OoEX*qyIA {, <Ӯ!sĥwUnLw?[&lbzx4k< \TLR3$EC,p+x!=+Lxj6+:M0QxU+'uüiG j1Nt0eDc5;s}tNj||o-2&7e &~Ci3@qV8.u6 endstream endobj 722 0 obj << /Length1 1553 /Length2 8138 /Length3 0 /Length 9158 /Filter /FlateDecode >> stream xڍtw8[."DD1z"0fE%DQwDW99>^}+ 8#] >^v7P g9 'b'W0!^g8 PPݲw O/|;W%U[!|R)/%5(ΥW0Mxy-?oR,ߚ]`?l_ =^.󠎸 R ,f>egT-# uXjA-lΟvc!Z' Y?'NVn=s|B0 {$%=:~_m >M_jܶgM^!vQ$ҎxZk/2K{sW(YAZ^> akMûzUU>_RPc-<AFP6M0:/RxDHJNks?IhXSw\5-}J$0<4Sti9.IjpFbeٔlt]Lܔ-ĒP ha7F=FN%C{vΉ3M+ٍGh3ŘcW"+FZ0%8V)A&KbKFj.&;[-l )7~'3,j4ji^r\#޵צ)W1cr0Gƹ!(JF7M0/I?d:jSxdAUE8yR{{Hce^k4)%IOQFj2:dj&0`=_-WP!q_Edhhf- d_[53 :?@icVyيcryt>ѿ;Pͻa4BppES޸5 h{E6 H %?:9mCBߴXoLq~튀Ç~MMYZPF5ˆX:Zrceߓ|(Kh¹|e,2^>_,OGyݻ"xYW޽ ,z9!@GzsYzuU<^M'6[Gxwp]MI)L w(8B@ȍ>U蓤Oؒ3ݧa:Cg&|{ J`¡$_(BOq۪(0 0_bg`݄#zx{@oIUP8ڥBϴ]T{2q+lm{yj %?:A[ڷGץCT|>0ž\O3 M5@O:J(}9rR##9kVUa>e # гVIo"f}b/䣻GbN[+kkJRIigAYmcӱ nl,ʌdz2P,UK3F=gC#(~:v:0|W7Uہ4Y!,Ec f*C 佉=ZX? ]?_nFDQҠԀKJMy_TI˗IGJdD gOOY/A 凄cǹ (ʠOs_[[sVFTi+?GR13q|Vt"ݑ΋$Tu bkxo_a,*_JJR?򍍦 hHu M^ K{Th.!%TW+_Nt1&thw]*TE/srUSw-@#AgG~y̘vFuLd򗬑*~k=3"KЉ[?U` ;:B^ HJGLvCr'~ܵվA{">g!2gmV9&tS|]9֕`5<jo-:*NHyS2MZ}\@pɢ,r!\էr2n:x_f[̞~]51񲢨ɧb/:mn~kzݳzz3]Ui($]+*B{axJı0 6ݦ~Kq:|1f^ױW̍7=m3iF>է^6UZ)c'Xܱ4^_:vnv1gFe_s4=l5XigfqhdWQpF@-ʜRlꊹ  gAATBOM~nWSxYzF<4smjەv2?+ 2j+} xSiU1l{ZW3Grs=K әńUrOz=iϧEZtVeSFC)Sn'߷@el2svmFvn#+_5\n,亶yeѧes$h".5UYX[Ի&V)@K?p&( ]ltmY̋ϑs~`"%r;k`}t&K~fl$+5_9l!49S}n'G3C |NY});gC}:IG\,UD3J b A+Y,~yM&hWz*UI A<{O{PȒFi'~> 5?<%,Y#8wkz/A<M@)?i„x~#01/#%]im*G÷J`}-?1 0H]bEw+JoOrx:<Ē^ك_d2+7 9 S -vn[9׮hJ<$bފF_o3F7&>p)P"Jٻf,)Ms>i^2R(C(' QtQTRX8R}%vVy'K7s0b}.|\l}R7 4{Ş؄„]SRjۜ,S'UJtTmygI06 M]iJ*da+t^ğ}^Zũş>ކ;(x /=ӯAyBlԴJxǨ)}5:(㭵!YM/Vwk#d+șY:I(CCĨ|T;$##EN^|_,韋Q\ uPL-3u0h  龟]>le&''8qDg;Y39ɚa^txfP>QepVĒ^87L>]k4*˅L 7*LMݝp3=E3n(/guwv`T+>cNP ɝ-(>Jnl$ Rpҕ q9{m/seT-OcP4nz6(q.}k:g :"e T@]iy(Թ݋gO0d4|p|0=^cuڅš.Q)!RaE'Sߺ,kHh=SZC7gz'(CdbIjV[/ŦK| ^fSsesv⽗?.M;5cѸ!57+jsZLPϊ(ܢuX#ܙwH͖u[wWvsU#>PJ%úa&=l '1ᐚ!˱.tꑰtNtg(~1u %a$7c\Y>ΖĤ%i^îACGW]KwLgG3T7x!q4sK.)mWtchvh9mE3%7d.Dȵiq7ɱ'}|3n1 ߽tgSbjf<\J9Yȓ P6˛/?+e O^뇲(i9ݎvXpK|&nzUͨmU ^?JJҼ݈Λ_6N~1O85<ZVha_6=.pԎ_KK~δ7̌ V;lsUy`nZJC euR)V2j*G_t?Ϊi>;Qʪȃ ^q(VVS}7_8k=?F\d(߅Q _|?難jy :u2(.}q=͈ؑݫ W+O1cv2c)K8&EO39z?>=-:*aEd[əUs.ᶓ)Tp 5e$Z\?(/dΞ64KD_sCU_ `dOEOU<$ηK"чAA,D@diJ[/_IAZ; O[7|fH!.bݚG.Tȿ9D:XZ ڭЖf8 D{wOm֛)Ds66O6pzJf>j`QiANzMe̫WO5K WGiCQV\9zShrsPg ?OHGp4'jLc3nE6_:XG -0`PhbJ1X ]MG?3J(6(pP5w+m|96:@|ͤl$oIVT*G%Sjh*g)$")4,NOƊ7Jaað^-. +~oE큠 I9l2WjCZixEx2ѯPckl?zkK7$(ج>o0!6(tj5DD"͞:?3=c(WxeoPbS9([4| Dű&TJL>7+mgj)'Y$;ޛl-UIfO#?FM8{/=Gzt-s>e'] __,ab+Fv_pՏPm8!3G46`N-eIcSvZN;+2w6h#; B!2NErY-r" j& :~Oɺ2[qfVVu.]CsAa%>8u|<[b¶0~;ÄW#u,Oi1\tnASނMR~)2N;]hYhYlYu?dLʉR#-xd~)iCuch@sJ̎0KE1z]9sn/0k3cL[wKRPqڇ?|JGY\.1p3WK6~h|+>sɾjLbLuXKn2ȾzL7 f\zg_¹|HȫL\ endstream endobj 724 0 obj << /Length1 1584 /Length2 8805 /Length3 0 /Length 9845 /Filter /FlateDecode >> stream xڍweT- qw' 4N74n%';3^UꜪ|i49,a yԉG ,FKÇȨv`2`T_8&kHUA/?WHWX#" .5s[0(Qf[8=G+WTTp[la(9ـ>hah, 'J"nd/e֑ `l Gd ]6@-0Z6`?]0+'W38h-@P g%x\PA$+I/ # Z@U^͉`M48\3G7KkBG 8ɑ ]%4Z޾A1O Y.Y_,2½sP_lDуsY>dpNWMimO)0[GL+ c.l71"g"fT+[D tu^΢-fBō ~qjj;&0`}57B2쮦/AA*#}Dh $3ܸLYQLJwȓÃm# *8L'S#Giy!"?\w<-_ʚA2Q΁1H0;Gֻj3~pZeID .ؓ9B^CFƧQHe0 ;dRRE ]Aȝ=8l<zMXzρ Ki٠cPFCBd)੫wL^"b%fޠŐ5{QqRTfkr'BQQG{bq_d|A| Fi.R=/}b?ס"g@'G1x PP]Ɏrܜ2u7K3npfN@Ft<'nNLmO6A/[v ɏsWƜ4v$}j(rv67ÔA}c2YAP9M#L:NfA E-9{{K~&7fB4 2D_y,.zi4sH%nPsy2Mi /DKɪ<H_.0၄4`n>,(ET&RTAgU>C!zbqfbumVl(벨GuT"_;TDM6Բx[:dUO0p%\pת)Ѣiҭ''Xʛk2[T,o%?Ok%?ɞSm`+zDⴐ/feUVc򨱖Y1arp;؅3wo,Mx4zS<<&ӗKD 7Lo~H#^ϖԛ`\o;aE1`zY> `BxRBRyѻW @4C,W2]$bcFIkwiFDŎu6m˞'1C0ܝud;%!p^!ES 'QK,/C2R ~eD33)}"}B 8,"Ib w=Py"g p=QA>rf.g(VS7gHg-O}jְPU)~>##3#a+K |# QWlŊ&]G=dr2Ah>TvWc( 20HMS0YH¤xc7/Z9:2}Db BBJXC-bi)qD:9R^auȼ]MwY J"zsң[z V5w;ky]w;[w$=T!퓦+Jӥ"q]K^;C>(ȓUvҭǔUU:~?# st0_Tye("H,@s02.=fԣ2=t7z\bQBdv{g(zf/8&7mFbTN+8\ho JՕ;ihUHt+dd玺 rg-|$SVJ8MbC>+i#.kqդ\Vq m{Q&.TxmͻZӘC>Ɏ6(9t!"7 j/L<]nԛ%Q8^XocY' 8*|K0f(Wm8{d,;gUQz}^.{|V}d8g~iT Nr13Ֆڏn&0'N`:Gs[Z"CK^[jvZ`V`5`iGO;`b,vA}3~F0X>e=u>"]Χ0>: 1FUl_OOjϴ$<Lc)3V{&d(:8ʙs<;kl?}8R*]Id j:Y~e0~o%y ݰJT^8{eNjWc[(8D+K'^H u6v>oHyרls߈DžEJ~I? NPؚ}(Te"&PEh3 #g8}H^;9 i[;a״"-ù,&lD~Pjߜ/iE5@HVZɦ5V' ˓xggJFlny; $~n*ƓkO6G[I>]ssY;^)fԦh,}z(~ ;&<&YJCf KVHݵ8;6Eqf([/3$oWȍP\ӕcVmNy'/oa@Vm9/_2J ]&%Etq擕kYX|ŘԍD,d+`H/BC2>>ݾtI{MnN ͡eȰ+[!XB?-ޤsB98]צ}.I|es|ìG-䓹[bǨPIcN1=; ӗbnjj9 saq2SLtu%8T22NEĉMľ (KYʷ[GY6܊3wFK`+ǷL]SR 'GtzX]67k;:g" =@ѝM†4$դƨp4w6=OE+ 73V1/~o ;0`;  .sg1^ @9i]5?f6[/-9| ě2M}x`g*v)m d᫟['޷ vZqхܫN/9@sS.=񫹢~iւ}kB(kxoomu/aWEGfF.s\eA4ujU=wd &  9a4( 5f#]CBޏYJͫ|W{-.J6ֱ[SM~.EM^.FZSUS\W$ڮ"Q><赋(WWk챿x@nv| VHiA1FLҼq[6{D#Z+1 kLUi 3ULlph#qLy'-?5G ("Z <-jmQڒU̱,ڍ޸c;p Yc|z _nY,D)Lо3mR (d{-9.֯7A*3rm.B2q[T W)+[܌zgV"0ːnDPras\0ʥ@ɝΒ㚙fЮ 0u]w7.!KfA}!O爈ŵ8~o͔s2YFE{ea^u׬-'bAYU^k㻽,~6eg7Pyr~لK"ΝXCd.{JrJ3PYd@,i}b\ | ~U<J}_s$GCCy#rxUEچ^@ U"q}Sj:1:7cu>#zGh>ߧ(׭uGW?|\4(@Oj--1dfI5q0'sZĥ`@#-O31۶&+^ݨ_C syj9q6CCq2Uk?_iYZ;K7i/E!| 'l|V4پvg򂣾d| cB:ZNAye]!gFy{L4-{ii?Z>a ": |7a_ Φ0!w+zM2zVf$8mxfˤlYTWPX듵6LZݮg!,Dxt ռ%[ZV_ϊdcfvIޒ ۞$;5_DJ]KFAnhUcMON${$*oee]2evDrZE d>Kr.3[Y-tץMyu`ԌAgzރ)t YE *߮#nًg=zcƧtL{ \_?!{asC;Z<T0.aV/rRcWc}$lu=gOdӎ;7'EbN?EA_'1>(϶]UNJyՂ 5L4m`/\Dh+|\h>s4G#\{;d2yЕ[iH5fy,v /hsu=#GJ}*Ӂ#Wx.iKm>/ zfjbS[c?mv2ߦ*AЋ se9. y`2غJ}Pߕ2׷:M4C${qP+‡`'fżofڡ&2wH/_ 0bT&>m5uZb+xsMWEV8#!`?SX5ccE1:2ۭݦ `'-G:|#{>}0h}aȓķ9 spw*O u4DFp5qx@7[ls =Ěm\N_h/ (^ҡ1Z@l/Sd#ǛvԳ,5D "~A%L }Ie]`o0Bߙf#dOqQX &70Tۇ)|lD%yYn*c?8 0h9W>XѼrbQaOc.-NLJ3;gmщ]d<&V :s;4]1JsƠJ/&lg9(cdHd@Ƥ87 k8Mu`\MY:͕w^%KĪ8\>F>.ŒS 2y,-H.k֭n19'p-x݀3 Ē.'߇J`](9BW 5o2@?!c1/E#-q`CuPfۛu狘P"ZI#uӎ:wv^W]?rx1 ~}}@ ZF>uKl?{QHjҲS 1懔q{ʕ2(>/{;FNԫ&nPx7ا Jcy'׵N2HhPi MX@qk|C 夻$r ((y8NJvwFmej3D⩬,$Edo|3jfw&1A@F:QFCVusyɾF"w]2hD?= {] Ȭ7$j ij l٫[ Soմ~%qdArcC[\[_"7lт0%.*."-}rZ<7$ۖMCv㡾&v@rpѤpCsCs gi\`w+Gh;/.!o~8Px^ ކ}Cl9Q7HSivu&c..wn.2y`56xL72eg[M+:&8*i B>[6p0rGaa`lB(p_lG֝:КGdznqs /+:О-&[ș> -尘4kHayTz>^- ~XRA4RJ(̮c\,|5Cn]DNOGvŔ-g ssHMVpy-NãѨы6Ym7#Bb U7PMJzpլ'vD~a7yZ:Ma:W{u.`ʐ7_w Ln4o4C_G݅Y;bH\1b;0x=s4G/g/ڬu5*{0)aWY @u荚 skgŵxW,=jM KsV~d%@Ϋuf,`Zbsb> stream xڍuT\-Ht(1t-)% 030 )4""-- HKJHC?ZY{s>F:m]Nk%XEpxDr O<>.^lFF=g w` ]L#jU7G xyxD$y;PA،r0g/8qϟ?,V/9@ X t ^J"n@8rs{xxp\`p[IVa`kϖ 'ָzv?] +0NwtUZ`d?߇rJ[3K 99^-h)s!<' ӃAGW cZ!W.W;f EbO[ݝu<>l PkmX9sC!.n`ߜ1[0 #+$(Vv?7r?w=8Ü6wm 6/lW;| { !V%;]lO ϝ~9u늹uUeU(+ p8yEy@~~/߉A߅CD X~O+߹4awXv)Io6V g?8 qg]7h_![Cܜ @w u !O6aec5G s|]@`wfpoY4^Aya]JHk/#0ĝpמy@^^%dC@fl F9G,/?RR0Lb/G/W S'"!< ?_<;߃m Gk#/Lo G݂n绗ߢyWBOj0k7+uӯH.bG\!,[w!~M7 [aL¬BkB[Ϋdzp K|f\3`He ^ʤw̭(H^l7aD4={|{e3֌==Bp[SOz ]1M_ܣ[ɳɵ땂j8W%cqA-'(ԘlDON?~UMbۉ+1^⍿/u}OҘ(F*Oqqx[%)S [6x4dU!/# Ih0_ #b)pw*{qyLoh[/˾GܐoUpT8+ͬh543ϤsSNeey[.H_`PhB1fts,C#)|>j槗3JyvK!bY|<ɵVt^ӴUKg̨a2eԌDi=U;&ِR1]ńj(aRtϚRGMv/JO2^i|ϗQ]M$? -@T bRLq(fhfn/7$Š~I&<ʥ(y֫~ q}HwsmB$_5X\(lx0=%m^w)=jƈЭ H.> Tƴř"pP|` GAb[6I;,a|:#".eVakVEd$s\5\}TJ {HW@&xI0pβWQFwZ}c|]i)[cRDrrbu%=)+x<:窦8 z;RY.{\W>#Fѱzr_&ђjt8'#W2wcZzt#D&_h|4BD#+^y|[9ɔ)Ie7˗TD;S(<7Yye1OakHI72(ln 񭴠$EBp?T(PFLrrhW vH{ 51"O~O5һ_υ쓵izp@gAXN),DM|r&.N !F'Y_Yhv6mcv YK!%ڵX`eNr&yR:^9GHW3GpE:ef4 2+o :oumZ2ˋհQ7KSkfԳU(D'ӔuH!TDMU"1Z9m:REE14FvzB_L7;@ZCԺ]5Q#'xjr]U%Qj)j`"7-IW?zdMX %8sb-F=\%W-I ~̳OC]@Rm87hق[KdC)= Li+Ea*oޱгco .膣}c+=ooza$ agY"޳3GE:jLk9]ou{m͑=[lj'|OIK$;pu}#nӃdZ ĝ7 *z7bk&u>LS9 Y_xO0 *Dnfд|/U&J7j+m?أE탤^Pvϳr1CQ3qxd68ݔL\wÁJ^L|KUBH-+~@gxs(yZCgf@]UhEM&O`Si=yf؅YƐ~7Y6~=\_Tb&QV~*?G`HTU,YۊOn6~j06m'KJS&` ~Re^(S{3'@ /.Ty5c6 GFkUH!LP]q'qR~+9FŪzI/6i,v>5F5N֫B:q Ox-Y,VWqu1j̊ūkI,}>"sFHޟL֞C\Y.@;f [î)z/XhSwjxݦZ7lmElZ47DmqȒ^-ߎrr(XMϢd.!trywQhԬBHj;?>=MCyih(K#J0S/b$.l>Ra:ukg6n)-ENmh\5-GXtx%+x$P߅ S6O=DnO_5" KIկӝ$$rxiœkN?vÍmj4trmu`?^mnۆ1,:XaH (ZG$ֈ#n1SNd5Gu?\e6BH_jތKǧNlSKP_%rer.2j訴V3 TJgྙ9ֱЗ.wYV{̔&y:9,/.!\Vlk9 ZAc4+C(F= o] b<+:v4:MO7 ҏ}uo?kVCuAzr$6eT+~Ut 61EV~tʺd"B@o+0(vUHu{h!%L/"g/Ŏ]A1O#KRTW')sxїqaØۊs2|\`2"P=*.$ .|4g,7L utQ~[GΟJK3]`\MA b8"ZjІ[)TwdЗ4,[~nݎ8S )tH?"mS#ͣ}Sֶ$2XK-_dDYa~>YjfX71[@8"2lhMP6x媝VNmtPKǫY4+(YXjݠIJ S' jVTçu?G}=7 ⰨMM.Rh󶳄x= a#MyE[K GZTXb j<ʔ`|GESJw}idڡVO d,}d]sm-Ftu(g/xL.15y ^j9'PPcKQ!ƽo o@4uEQ'g(]y't|:/<՘?ߵx>3Jdb ;+aLAr|E&֕Mܿl;pyD(kBs&|ՙ0#~[8"̙{ y ާK{Az>F~Fv,yD]Ln$Bt8sOÊҁY[yK'~c$Ɯ.W-Gz t F*^ORz!n:܀~0:ffw6Xt;Y/r/s6uz bAEFc?}&$T/Ld]U[,q ڲ3m;1Z5 mK8-KH-xe͕ƵRw1V{ [)zT:ߒƆJy+_h 4yq?z@=5 f0q/ʔ^h̷~.[}g)}ݚ`ی^{3(:~ZԍXLDN[d+2.${KnLԂ˜kᄒdqġ}ue+=T(-Кo״cu-Q%$rߓ#R?/:̯Q)8oo=FD*ˤ sl9-.By'$'z|xD!@N*F,b/6yL\1 I'A`n},c{:D%Йaj `@Uذ]PTii䓊Jȉ%FuM7W|uP'^G̒?ɠ E>wQHhQnM$hW&3sZ{@iM^Z63'2rym|>'S91#5UYPnisɀ3y ' `l=`I֢i=-/gKFumj؇h?(}Wtq ye='qWߒ5:?ExcNcqܼuOctuZOm2k!!f@r1RB<$XEUA? }0RhWo2;B$u\'_`'@Ey6S!*Z`v2: ]}W ?""l!DW9 m 'L˲u$*8~(ίiۋ10[+A BjsN ynނQty}=?m{t ?u5J7߀Ҫ Ƈ^Υi3ݯu9Pܧ972$Lq1k͋`[S?CBE N=J 8@C#(.%V:TLi5. ]A"X:Lѕj lfxW=|SF[SU?%UvߙWa<)sHu#߱`WkJe+KHAo o[Exȍp)1j:|:7<tMyBpǁgeC1kO)[ٺ:>CML ׳_,wBDzA ],4n{_!jVe#:Z@ӓ')į<1Cl}w5|U1xqv>˛7\:?"GR'+Z$H52MLq.VGS@Zw2@k H4dT.„3 흡}I>,KHNF@^# $#쏧NER,@G%6Sp0#ƴOHXcP%mr̵)Q]}L=M#E`v־fG( i=X QO7nU%5;Gh]z{e*H5.Bi"+h6 efAo T2I4;lxIVu*e1"'A#,E9}x14vbs*s"$VCS%rs eHÃpΞ(c6$)Lm5-w,\{9lI- h2'pk5yO1s'#~D(`y+M}\i44S#r'0G*@x*CMns1M[kn^7Ek:cr)Epgs:G_,2{mKm[CZM7ꯝ+R"S hs'>i6{7Sf 1v<_EKAj, `̞YK٘=_! 1 3:j;OүxAqRe% ò[Nd 3x"7x2WelDTC.* 7_6&T۶c>ymN)E.9`{dL(4YE f3ִKHlpcd~v orc>)p5⹑Aji9d6$ʱ?0882!\:=)T+U޳2 jaOP`|~ȭHl?G䥖%10:]AbpnjBl.0|(Y oVng3'.GHAާDd!jU)0/D򾔄D&y9x'!PhQi+t3 Ks]IiүOPZ[{y+gGgPՑǗs<&nڴQzhf>d:Ig~8xM8 ybвF=Roތ&ŠptVQ>R)}+)HGoƙ)~=,-||ԛK`Иe)xW88:Њq(&e_mjgx΂^Se*Y O7>sA۳“}b&2:ϖ|%߃:wo%BYr endstream endobj 728 0 obj << /Length1 1992 /Length2 13503 /Length3 0 /Length 14746 /Filter /FlateDecode >> stream xڍPk S-P@pwwע wb݋KP(VHqޯϜ3Ikֽ9, wffcaH()ɱXY9XXYhi5!ζh0'Ԟ ,9?*A.67??++?P?@ 1(`'4Z  bi?z31;0 b(v@ ?!@777 f)p8[N`+Weߥ4 NRh@-@00Y` 1;=؛a 9E_Ɗ2`96_ ;̠v {%b H+8;3@l Wdl7u@ZL z9 N,NۿjRP;;_$!0s=>\{2]ZGmEh,.V^^.v7@/s >^Ps` 8\>^*!fS%bſ }cCm=1ʊ:j]Pw3; y~8 ȿy+go/ύg7ϣ 3\f_locWI]lmGzyt]@ TUC\Vb#/9I6W8Ykl%kl!`U+ً螷qz@N+AK<͠m;7<Оq؞t,Pgs> ( %x8@/_/cuG=3;?B<h|mu9>;X@>>>/dNkG<>Sl_Lsl!zN! x`W Oz`V?:~ bG{OG؞c8)= g{Gi I <3f.08}</0l05 n#wc&xzŹ_jgFVBBuufKfjKjع3 ^W簛tũap8]"ok{'Hȇ)wtUd*Qi^Wwn6:'yneZA-ޭ gmpdh ;2@5p&WJ`BJ{}N~: y*Cu+.+D(ߞ7K⩰a/{ =+)mB<gz']R陔+6ӶGZ·NݔzGɰci6k>dF]7ժMSh/_Z5\w鬣Cnѱ*>/sn; 8,&JwOCA;hg6;y܏QP~uv^gDp4 KK?6>\PUǚÕ(ߞ}qD{>çx" hOuZr~G^`z:nIQc{Phkf$xegek\)qSEʇG@6H_dgBXOOPccq ہQS Y=k]{i'8#sf(Ӳ>(?԰Y ,Bd3hΔ/ytXE1tӱZ'x\[Zj4|Csxdڳ3m9=wv:)UuqI:F1iW8cblrLӏ=GQ@!H{xKwnَM.NS_>͔,. |wIHOY]w=Ȓ}}`_Sf!?ƭ+3ƥ9wȺQUx6U~a)[m7UЇ=ck46M+v48i|}A[6 Q?TqU6|2ҋ^cmH>tufm^֯ /n~>=TwKɏFn>BPRty4a9o/~ jyfqӅ=Pʋ>A':IŨgP mFVVKȚL40T'//+"Nd#7ɢcQڹ9V=\ohM^!@Q ,F M7 P{~F!A=I/'3m90!AefKγ'#ʯVWUVFݲ 5ɔ9"\|X s^sھbv}b_r(kW}kB1, g=0h{('.]=׎)L:Ô}@ ֮uAS_ 2%i$Λպ'k1 ˩0ZT_jZ;_E{%thCnboDD"RZ+Cq,TPtV&7JrJ\nXmtU\S%7QjWߗcu&}ߥp7R}P Քzk <;!,eF@C8yR 6=+efDCY|r/ƥ=i:_[p;Rzպio&$Tx@'+Sk5S_x3dk7R "y.Y[1.[ +qL3жV$8U6#v0EJ/ߋ&@ST$1U Mr%I呇ryOZD"q#JYE$,܁[['otz$xw"iNO_5JGE4=.*&%4$ '!Swʗ'yǔ\]Nlv?@o/`Ba BA?_P|<ܚKH+ji#]&_fBK(j]m,,kς4U6W[XfMFʱb3rnPb朼o۶~mG,Ƀk=,H CI[,NZnZvϬٓB04A ngz۾eV8}IJcig3`), gB-3_%~FBp%vF4=+QN#2-ܷ%sk2@'K0{ڢoY`+(/ΤK_V<4 * lOފU"bM+#b7]Veȳp:ş'y$q>0ڴ6g$JIzA}i/+;{XR̪lY?/&Z@I}wb}ӣ TF,^Z6axyU} aKJnA"ߟo7V_I_=2B}FVl2!qעNF2O,q($*W#6 4&0h@GAd?o,|77aּC ^E 3=dt< }ߗIEh ~ZSCM\̉D~TVeZJ|#D)kAt9AmH,~Pitr)(JRy'TT'c 먍FCL1U1 = ׄGȶ1) @>Z%IRֆ(_TbH3P7gg[2+-PfIhr'fUc};{bpowGµacbޓ:XDŽDZY؍`u9Eձy>v:'J?žy:8fG-<:5'8S7?tcKFXȘQ7ЯiqW988@|]ta -s4*F9ݸRa8H. ^^+b֌oN%3@2``hR҃pkiw2-?8ˍ߅2چNnzh4ɹ^g&E/JnݔD~+w 2hZRSܗeyOdhcÄ6/ PtG=(呱؜z3LΏ}#.Gs|2OBwq;dþxJ g)1ydJ `Em0R]7TUge 79%'vGGl ^͜8]9=1ʟfp֐:lܻ嬤?]>-rwˤo0 QSf\<l^># '/ݦ:O2gWٙ1g$ N2f,Xff9#kQ(zK1PaZ;o?d_!2;D+Vnsr:SMLAߗi]]ۺaw"԰z'2T&᫲`}< %tygV⣑xȝjeXI)Agzv4e0yN"ko)tS,B\U_'j!'m wg0vT(GV^cb=#I. Pz%9Aw`Aq)UxikSl*jlL@=.>;>c<(xuGafЛr^QIRn#YٓDRfx 9Br&É G+/Q&YJޠHʒ,v7bl6;" Yy(,y{ʙ>-o\E?:LmVz|~%HJ(s*4,nH9QJzryF<:ӹ9S┫ haACaRq|ʮ nw"\c&͎OHt^"Nwuc!nDx8gLo6#TO;H7I9/6rH>@ɋãs *R_ RГLLf}znZN;YÚ2{|zCaqb G:5Y%>_Ƚ1r{vSû)P11<FLBQZ1]y*(a"!"$[(?{H&.t"7=%{1ʙa@ޠvf,MCbtDӁ@$d(2y =#OÑ9 s拺6\)Nɲkmtx=ԧ~ 3g~@'k>4K9gfiϭW('oXFx}`wұIkHqkR)5yDI$l4ECs#TGjP|D͗ޤՁ "q>^+j~H}ZEy'f2]3ѯ=՛L!&nΡ..|-Y(LgwFsģjV#:P4yy}bߤN t&R6P7Y5uR|~2b$v"g2J`gm*1^7($Ϳk0/B˦cv%H+ImGp& |IҸB% -CWQ=nrw@?sْJo\#%j_:y{B!hX2IXƫaA9ASק 4FzX?k!11'!\˽ ^O q#wL UÙ[ 9w-"ud%l'/9oD}cSJ 'xWb#{ `(%Dw,KnAË4OVI?Kަu<x~V3 ;Ocm >x1n{A+aBH`uU^@@b?nc L_0Ǧ 3,ykl\ AUvz-y"(e: hq-:PS)0~5N2.[ xY>0ϓw&;z,v&2Xh 6Lkztm{Hl3Nz_CN'ऩ 8bNJ:?WOWXwwckTitX>3Xjp-ǔ!{bY +ue& OLLx)C&)WxFr XQ~Qz& v` wu. 4/2)G J o u#rtvrW5!E1T?tlvJyA^>iN4U}8KՂyutXKNr_HwKb^cQE;E!qX,&kp jCr9abO)GP]UB#2=AOi]I264ƶ9 SGͩ0sf m#wM ]N(KP h-aUNz?q'Dȴ=DGTbsUGu `fx;nvfYW+r!^{a ~vT$8Zf.qFlK0,[M@rsc|Nz4 [鏠 eq ݂W`|"izG8Cq@d9iQ~yww>? iJ;jtէZcr4/蝫 rB{UK.f>hƿz?5#R^t19nr*vK:ǖ!n&f-,0֑|rF$U_0y]eʭ2"d*ahbp\侣F~gFm4X?Rwt"T8֤u~9 zԀ!W~2eRn.pukC^MsclchڏldϑN~y@g-$~ԓ(X$SqcgHtTP67P]HŇaSRR5t$bs1CcWf })W'};tg1~q:G0ժ<* Ům\Da{F0lw_k$rؖ^a5·_Wb"L^,)m¯vˊk]=aƓcRԽR Dى8 LDZ';p`)GsfnF CQ])7t 1O!An:L]Y׊MtI߹tbYb[Vi#s$e9c@DP7"Z t+ԛ! $/x;)hd"Ň$Y573C)y˟1dX}-7'wv9nN䎚I!qo?c|LvR@^R0[Oo0З*93&cc3lceMm0F^qX<&X39gX)nVd:̭k}SOvi*,Y[n{fYM/xcC6ZxRÇIY KƖD6oNQx! -:1ClT[Ώy) XeՎ7 pV$fSm;W{YA˜*H/u,s3fVfn|>x%$ao/zɊgFI 50'92pz>јJ+X$s1A@x7i)=X:u>2as}eҺS='v);-$d,G Ii9M U ;3ǖ,- ; x:,F(W_l^ +ץlh"@N~r?BcwOT>Ҧ,̓LH}E*KvJ孁23b4%et> zaj =OJ0,~^[ueNDd= !m9=Y)0s%?QURJ xAD~"-Ù{W&iK;[Γ{\@nY3LWȮ,ajd?7/ +Kc |4Ya u.tn}/N N7gzr>(,wF?=, xxKՀ6=K-}Ò. &Daj:ju?O :%kzw>>(tL?0c ˲ O h[ /3S {>b۸SvV+uh;!L?"G"heOS3ԲW5'㧵y";zV˧ iy1W`.;4HYea^ IU&[ǖT_GvS>a8pz[ پRlU4xӅcFJ"+yr-noc{o6: p|cNt Cta^H]ƷgQ# {NhcѢ[JSKO8% sc6C[؋ލmtĸk)ȈSD :HUצO>KCU Qu֑*Jݚ}hySiˤB}vUы v2wRKp8? [GZub33Alξ_-:m|[vxwޓM B(\[B;tox؂M)I ~rźͽ~Mmn%e'`2sl 6@Cćb]&R}ㆭ _qhqo n6YEd@"ls+(|gC*&H2H+T,m1Lhד;4is-tIyN]+/\!?Mq**K &fN\n$T K/hqTja#: 0LkƬX~q 7x)U7B$7 /-lbWTjZBXd^8<~P\X?51͙8Ȓ |xbI YrYC1ܻpuc~.MaR3v'D O BXӶ.<#&Er =.O8%s'76zPٲV1A> c\㼧s).\ďLM~L7.|]yyCK{!﹐.ܖD>h0<ߔCдi=10ަM \iRjIaQKp\r eR:{7=Xmɉ]>-i,8qycR;B6򜓼?~S!N`SՇRXvk|fIQ|CpqA19[MFڵ݋ E g[ c +,sx$0 nhRb`y&L=a ZzW+?~?Qys[\''hN+ADdVfظ(^H74o)|p2\=L ]]=@+Ν1 \}G)\=U#OI7P.ڷUVM̭!]FfX&GKɼv@BdݒU64LH;Ub.]B6S =M,ˎbA_ KWZH84e-x:(!W/>a<2NGMUq (x5lz˴dhc079͓Q4Pz {~xģ~|#ZPn]*.Vw;ji%&fJƦƼVXH~/QwxTU|G]LfH&E7I&gK iFweX(,#Ԑ¯!鲃 ^&(%xx.^WկWAQQ/ЮCf5~ֺF(젮wZd<>_},G1ZxxQ4u-r;4&R>j6J8fĢe.=}AGnj'܊|9OE3 kV,&D,&㰷ĘdlmUy4;A z*2q(zǦXҥ[c-Y'Eu]gD8{C\Lbp=xo_6v&Y27YF' (gO'[<̈́D/nWԆ;e`ǨU{aM= D æmwO*L2M\^L&a .w1W*8ލ%{㥜:t3(ViVQ ʝ': [*aoCn-vwƶor1>=}ֹEJ:徕#%bsy ([VM!Q{:RF߽洘HbU#8 @:%R\Ͱ̺ʮ lM3kgUsj!pw)_c;77XXYeϻm_V#'Rd]s&&(ƻ b+2 M'8>1wMzjLv@e8) qWZPX6ML~x[ֆJԢb(ͥ\9'w6z6[4%h0^_4yMDa)*8z#N(b8!0?ij-_4|R u\5B终SЏ=d.N5a%l׬"ͥldE)cLƣxr+!NG.+hzBH딨9vԱ)ucQg{v囸F@/읳G+;,6 16yeؐ"{8:Լk @sr2Riepomaca Laş 1"v;26) Dp(Y` FORq/.}n%!(U[_s"x}+ko63h..bsOۜi5Wn9?{*@,0YGv~yn5}>7jt]ą D<~S{}e +k.;VWo%(jP,9K薸WpsP>\ 25FRfZ 1atP  YBE߃ O(Yە3)^FgxsG)G#8'$.{3[2㯯ȿ aNQ3[ON;[<c­ooFu 긠SRDF撳$Ըr`sPCCf3u@Ի 3 H pᴬLG5hce&eOܴj%INjcO,L{~9`%BB&zMlj^2HA "|_}MO( PU-'Ti<cc~<Bm /%CK7¯7e(RkiEmI'zeo)xͼtdp1N&Eq'j?"t_)gߟZJr&?ks[ ͚B/Ay/yVw4x v2`"br/m"՛rR( *VqM8N2YiW쉖pU[9(Jb:TȻZ@0ޱb·V߇_x1+OAziZV󀅦 J[ve)7pAwKX2wfl6:ԅ8¶P8CWvKٍȀFESMei7kA{dA',:BaF5cv IEvvoI dD%KTo4H} Kd^O4^yL)Kas*sAk\o(")]5j-hm;]. LmX@eHG4GѻBzF$U8v홚3<,9M޺,p6ꌓ PЧӶ1Hlhq6IC6Rm{f9û[;*=i>3Wun1sLs._|.*h^NYG8*+aMiZT7V];*I$_JK/?;T!ۨ'poZTA˳ﬣ"D23~6!tWvjݽ׃G5QF (BTW+\=\0:MFdJ`NKXeN⩌3[tXeslQCb=o A)~zU䋶F3s{5q\ǰ6Ӷ9׬kO'%7P?NkgȰ pUNɧ6av.FV XXl%cy 3 s\T w.f~.o e!}C0W2'sl)LNfڛ endstream endobj 730 0 obj << /Length1 1394 /Length2 5989 /Length3 0 /Length 6939 /Filter /FlateDecode >> stream xڍvX7("LABa0CK16 ]" ݝRC ! )ݍH?v]}|cg畷FYAUPH,/O@   X;zc(@!%B cq:%0D! ! HT$&) BKk& Qnh pAb<܁P4F5X8 (_!8X$? ÇBd8y.,@PkϖZ`{@8۠P Na@4PooA|3 H7AA*|XW, aP83a~􇁠X aGap׬VDCX g}J4w7?}D =lHkmX;9"NP5? EńEPG rڀz!l bNP/mh!P2nh+TG?P?'sìQH;F̯/P@^!WPD^FC_5 ^E]pN࿃ipԅ9a/F2+Rqe ?`{NXhpˀojBNmUÂq ( G`TPkMzß g@BuPG K@l-<=F08n61O[סn(F$P_!QX ׳4$ F".B] "`[\΅wONwcA޾\ "lӯiE*A*-Z gF<޺ eRY1Y%㘞eV-^QdO!X֡~~eX˽ۅ*b[ S5K<ޜYu,RϘ_xi̢.|v)#"..[\Y^Izld +UJŻYzԀnغjsƶ<8'DH_` V&֨Ͽ~6 K^ '&zEG2W8n,8L_,qrdzu'wk<-`·3 FWh1O $Ksi.xW2Z@,!.D-Í~q+M v(ڬ fs/f #zE 9_K{8(Pv9aF0|m>mRZ8C+>*oYx`5h*Uxwv1IkesUyb>^7>eԬ 'YV[%dkbI (,缠(I*7#(n` zau.<ʺFyX͘dcSI2H%}{]Th6 &3*oЖrO m]' LbEe+{L:) (.#=YBo~ړRtsBmi#^QJDXD޳'WdИ6SS$ 5dV;g U܋rŘ7hN1)듾g h(&Ad]-W 1J!}=F,PtQ!K_rgIo[Ԫv|ϻCzi -օamF}U玗A}B1TJ[Dso:{"xAR3BM59Qa4/týN_65U^xUVU}Juէ"WWVO!1S) 5-O9kvr'R6L$WigX#V[=I-kp;iizKt6ȑΊG^W{`=~o6B \m! SKp-NJ#sM7M'ԙ4~®}WdY'$}$^=PuH2gO;+>;) _`p3򼛞;h,U Q[8Oݥ6bN# I:Ѯ&i|O*oo!Vu{͚C Bf FS$&Th$Qu {{_~ù [K u'^៪|JUΔizG<&>xw8e0BKخ~rq\۲d*W o7L<[$'675q\А+M`㖶k4ILA1JϦ\缜:]]RrǑqtnG*n\`|Nuφ朧o&Y #n]XI>?մnC _YUt8E a6KfV9 H[bo.Eq%'mSw6\r96q}DW1?Gr9p*[.';] &~#y €2Ia-v'c_01ňa\Fm//Y> VW;vF>k.mX8R:WɘaLL 34gb[HXP/W.ޏ1#lzհ~/b|]WN:~1{ ^qH2KtgNh `*S^Oq${'&i?9"a$q]v@e=Vaо[MQn'N\Me(^ڛl?E{5ls,fVsLȏhp Sak.C '>\|x<$@n׍6>»P}ݣ[#U (NJ94*|w RHzTśJE]pt_]>䩅,R,aTHIb+sts;JK9cf~ 2bv̝Y*}ŵgֵuu:eF꾑ˤT‹g'$; -'pz^AUnh[*υQ'&xLSKNjmG}[U%1?&s#38X.ZkgK'7|͆^9X4cٵ7{sƁ <_έ-`[8|7cC1W{aEjk.!&IKW*[ѣ;ABeŵB;F0GJ 0n;VM<镥M7E 6o*{('OɅԘu}T`%䞪?n܅K밑-*G $u0XO:nyߓ.zFܢC m:jFUAh2g^-yd{^][; >H OO;U|x/bTێE-YiQ/mN]#i؄u gjwᏸkqs1".'qFJ%/T&q11 CmUA2 |Y?V'p,y,Xl3O+<M7uwZH:cY˷ץg#]7ѤO9õL{QiVXXk0UDf"_?:{\O'Hn06AZ*jmܾV "_[%$omЌO$.főyӁbvowp">%F$fxD7Mg=}b>PëE z.SDVݸs|1Pixʞn t"<7;$ͅȈfs^؞No*q)O#E<1ːW]KM,!ӽM:9VH zCՆ\fOMT*^h!x/K/wiCiV=0\TkX>O/c܈5_xv/^apBYfצ^G\E!=Ԋ׆  emi*/B@}%:#kj>+*yЁ1 o>Ǵn1iÅnԫjfv曊HZ9UeڤnoҔ\Gw1VxWhTS.Y;ɩasnؗ#eNdEU5v@ ϱj1Du$7ܳ M9 I* 6^3xre,:R4CVEiPFau0FaJ[&#Y 9ذ M)AZ]$f1/x.ǒW[+UK؇xu o7.P_̶WbVF+I}OFU8~24}-j:S$0(Xrώ% -cy&%WZCY1ν8rKvn̲x!п=HɌz7ӻ㇇' j'y5o53Ж xqq[A|Vn9)h7[l"L>&8wa;f|VMQ*\'وFYP|è7f2s71]NLs[N^Ek&,4P5:af 3.,^rҼ>o7Aow9TsZ CM^F--֒=?ddH'+CE[\.7).6LW}YCz7$}nܭw^*Bxv ~m{^ۭ$4Ggʈv*5\'BʰI&?ɑX/,(fPOsf y,1Kl&segǴ9_dҪ-y o<o0=\=;?or%vM͗ n5N j2/V=h6*>~17(YYxyt :"֙CHFڬaxfSq](AIxx~PH70B(|r+#s3֯a!^W^·̏|kk|3u'L}z^>#+Jힾ~G{.m,Rz4 (^i/0Ng[e- s^InlX&~ӧ9{fԒK;l<Թ~ $/r3.;n1ӳ%~b2Ok2;*%s{:ĩ%JI>;Gf|Ƣ3$Po %E5T/"zJkrs椏+Ciփyu[pf:OH.#5˂U"XO†{?K]b0S1x}%kRճy&֗o@<NiT55<AGf\Z8GC >jJu|d2I u ^pdpЪ+9#1;x٪CYb,JJh|L3`mg:%\ki))@ tyG-|"F52 C`,w̃iE/f:6<sNIN\ZxaZLF'LcF4/. R~`B=HG+jI;Q#* cH4iϮQ axRZ%]WR V{4yZ㑕iIEtTU蛯l4;|lZlq}²+v,'|  t~R@b^r]> stream xڍtT> !J7 ] J#= 0 C J)! 4҈tI)%%!7{{}kώ9s8+Pp0  JZZjb PX"G`Ĝp+%4b 6N 9@H\  Dw %PG!ĜJ(g/4_K7PpP;B!=x,%(!qr@mey@ Ga_!N? sp= # Gb3ܐ08 x G l п;!H/`p50> qtEa!#s`k6$07gA$ W"c@ qa {Brv86!8`np_t"0"Wp?6O)=9^0+P)*<>=D ]#.!mPOcW/qYLe-7̀@(?SwῪo$nܿp+KZ7 VZ( j#Z-8 ^5 +-  Fxa ~I?B"~-, |X}A+\W0 } (/  h4ċ{XK@ap Ql ;/&u͢Ax ( D)aMo}@hlm_CgPP C2vSN^ZaU,23f5J3] }i>_YkDY1SWǸ>'LwqrY8o!$U9qX\=YW4L˺>UC:ed;2gq{ow1~x"Ǥ,BXi8w"}+ZRFO+MO)/a삝>#.x%/m,LX#P"9{ 0'ֆ/hOf-q$y7ĝĮS z)P~en=(A,r?5X*J<&4{N 'c ߗ@{R'L\}3%@7j-n9eڀG UّL~:.Cx#*>C:S{#!]WIqzc.IFLivpS zIKvm;w].4bzDQ}&6k& G|_r @ڸ3AZR5W15l.uʅdW!/5k=(Wp X;RmTil0;B˯ *l]x>ވx#e׋%ꛀ~l*#WM=7bZˋ19Z).b-ͯLIpY8#/h,iVCӶzw'cVpr# Lْkc ݞ@( ~n:hO?Uy$ 4-Ry!#&EQuTg RkRKTzh+ptRѓY]$oi7Xe޲x(GCH 'LG6yip,.,gtT4MQn(ŃEt Sq/ˉc1QYՠaRvD]{]oSf[bu(~Y*`*QE1IO6)t })6R9i-eC[j|[PE"~F_R09*b9ϝTϐIҌy|&KYdaed 1/#1\jiPIUi˯|62%7*]Ʒ++Q(24$tOXNN}]~ev5_cԛEt}^E;0>-3A,H][VUǺ.5]PWƉ9SDt~H~v"5M7xU t ̳CRLcކ~W6/qɦX; r_S]]T?* .jTroLY= ^\ *׬xfmQ,Z&Bh@@SU<Iu?+kkrOgMcls3Sac0h:l+=#]hUţ d /.u[TQ^Ol#gމӵbtRKU5A":0̘w<u|?ocEg ~Wywak3+šY|"eɵѬ0Tuն^2!ftevʼnU[_ Ѱ;pMxZrkοBSgD0u e\A^ Z3̄!Eƪ ִx+Z ?R*8.YE\#ziN f=|WD憳>ܥt"jeYgw&4/ɓl ei" wf5ȟ 'j3_|ƑyCnrK.4_TsCu95 /;NݮpgAֺsI)cxߋ7 ]/'i>3UF^ g]c$ R@~:dcDSZ@L7\d*v^Į~Tj@9DLG$l7,RյIir ќ>3.s;NdY}m4,9z+2& ESV>.T#\rd2 %K!{IXn_5[@:w8u O:&+%g]#pZ/<%!ΗO_ qvdSdpg_z2uǷF%OsRpd$8sd? VLFZGX2b zV8O dFSd& 8:ڙjd\~ܠ^wBY*J.<^mea8iR8IQ缼ڒK /Gjͺh#wm#Zk\xU+%N:cO9w==!}O?y)1o|l % GGŊ-0[ұE~ sEjm)n叱kݽMR˩ဖ{vL+z;ggȷNplS>ET'WOsf'{|;fM}sEb|]G%Ŧ"8 R'Ŋs7us;{{ߞIߖ.pUˮ7FB\39ʮwwl˕&yO!u/&jK.s1BM/F+53+=.ITvΟO ~ڮu#3ᘂ뤲y|l ΑAYXh|]a)nzR5k]?}$fi dIkP@Ϝ>E6,n]gH{_hח#brͣ&DMKS >eG38f RZ&:R_& )U4 5孒4E@9c11E*9r^9 awtE|wп ]LJ&>΢)>}X ?j-bAhnՁU kx_<_[g=lg{0xjO`|͜r[T7UZGO$xܬ V#2u IkZ{p.Q|VTI@G860\?lvÝ.*NMӽ3}=q 43"?E*G!,ih]*!cr*#M1AUR0!)_Қj%/OX+.K˨>ۜrN@n}f۲ْ zdik`x1ik!@D+H`#8$~[ȉ*y~=XATW\r3*ʾh f^LfکRJMAD@ ̴4ܗQ:|P5EUZz-ZFY^Q[E ѯgtmlR<,,ȘJQ [ijTN}e1r4E;Iڅڨ`eTut=y;uCSo|K!KčXлlgM8|,zt-)VQ_NvBC"i ~Ӫ)f2]q7.vRT8Z:/AUO8%D|Jfϫϼ~p<\ Iyjdβ11ϝC")~JU=AMlW 1;I*Dcf*gT{bZ&[w5rWQ ?P|_"~س:`" v_`%cbB @|C+o)h;2?kSA)G[[(Md*M_G0TBi/Bt% /^lXX6۷? ?|,VgW<ԥh3YI8DU#NV9AOf졭ӻ^Hn27-:O zSRa-iȲ|bt|_B秲9]iM=3"sZ7&h%8ryaT:t?]bue`4؉Xwk9RnVzT' >Y:n ٘9w1'#L-GTk*hov}j)O|^k{n K:Ay+D㳧W_L4<;>W2QY}T+X[;P-r- LoCYIoԜf}VO')~G}'  kvg Օk7йs  m4)쪪`:vyQq$Fgk ?" R\gsg¹>Ɉ>w>s;?krW ulیavE_:Z K:UG+2%݃j {)c9;s(A`s&1 gv SiB-=ҏ|2f0~`T)3%"J*]S{|^,@fz_g/ "-$]lPn{536gP? rWΒ@4WG?S^ 2ˎœVkzT;! ^"$ IHQ\䱬Xq^+w̎> stream x\ms[_q>s'ε;uN];idh$gAD=3@g.v]wcLH̝sst.w9tM]m邏q e.e%k;.Eyep$C]]+h hA 3`Zt& Ca&סS6- "&o7j% $ Nb K#`1Kz F;PA/x LʂaΏC#ޅq|k+\ Z_)bRb ~< (Qqq,.bގBq,Ic+;T y AD k?XdYZ\*b>d7bdX\< eΘSXnNJ/QE@ q)^* D7"#PU~Xp\^\i..Of7g#&F4;a\w܃q/brxߗo&l~=?ԉJc=:=},^\#v>==r?ZbPJo~ 97`c~seE z;lhԋ؇6c{B~ڒ-Mۋ;ه@nO]xm/xxzuL%vt9u@׽=$q0'} Snxգnnnٽ6.ϰ|yEXpzIȦӣ]GJ^O{6Y`;%ٺ;Ȕ:A>]\х2ǚE VC@T'9,*.&b*^Yp9_ dyvfű/^P,,C/0 w7Ӌ /y?9~ -fڱz ?e+M^Ph>\cr| Bvh7`S~Go^L LPQ-p9;勉/.T6+ )J`b/؝qr4)`^,(BnA|#BS&[xxcvlSbϺ k@ʭHl~ $B$G]B܏8*>H܅cBX> (0@IF]CuJJFnѻ {c0Vl2;]+sv?^'Ws,$ fRZden9/j$p}Xyb/JcoIʈmQ1pyPKhXiI(Yu`Gc Z-ޯQK_+#춦Ʒ9p`mݍ6飭s{᎑\'hNL0#XaKȩdnc ].nKET~6 (3 00WIcc 9g#Cu5EbV0I`#^ ,9Un:,Rߧ?tk罆~uM:g~X9 T3$X*NḢe}s4S e!swB5G/C3` :Ю ĢUhPŮE@9ƈ ey~#Jr_x/lCZͲ:e+,ΒGWgE Yw2:GݺNfFM Zly9ÊHJ5w6-΋[vLLH%+E6WWzKʼnE’,ʴA1Qx5tY^`H9Ҩ@A;bfg09Q71!k!L*XA9dOҋih90ILt@{ѓFuZ%qTw@$'zk 8Xͽ4&@m12Y4jpfg)'JF-˅.Ǻ>R6~[pJ-kڒ `+ hmOP)]l`M\I^YhVנI%}/KĶfA7[5i2jh*WeOخWoaY y zoPj#h&j c9NV'g052ӖRF\S+`cլ v$hG'\pru@qزXIXf;$3!+4+L-l#^ɷ?ng]TNHL1@M%@^^f:s!3-j9 4h ;ƑOnjҌ jpq+roމyB)}sXLdb[#<唻{JpfĨW|-?I g9YW뜿Z%u&?_]=cn3ΦO nmZCJ*U9䫖LF:yn¬ -n#zdN2^eq^mT\<(2zh= IuSTSfWٰLr}U@-߀BQeD8+FVFr=z॑Df*mfZ6d  $@}Ѭ: y[>)2?'LC_/k`)..m߬GW)45&5ՈL#05*bUGn3#d Tqn 4S("(P,CvF!)dZBDpF8ŬyA/cΪTQ5:e*25)ļ}a 1 Ӂ* +nGl oO ߙh[E3zt&˶ݵ(X^K=eBxDz-WX~G"ֲUȍ2 ,)ia?\V:ш6v26~A*ڰʅ}ҋΤږ-]ju)[^OöR~eu܁Q]F.jDZؕKrYh|IۙmM`ge> -F&!@WK<%Z?2v 'E7Y$52ѣe: ) ܈AW=z&<}:b33ȖVkMVXyxp1BM4k Ww_nb/T(L=$ѓY[X*;}E--nKҺ% 6Ra5>말e )^Knat6x|>+ԫLw^UO\GIEo^MCCv$/anP: F&n2|5tӣW._M/|?vr'z7}3tr|y%||IgB{G_㓥ܢ>- /ӳc|0*q2Y0 χz8f8N_Lχt8·p1,a9,·:G3a5xł+ Ldxfx2<z6= /fgồr2M.њY,{3_-Z)gSob@ ;ab:ux75]̷9OOvpX]|&%+}!5wl8CU*7mnZ9ϫrztf[8Vy|~$~օokw[ulkEvˊ=sE/ڦpuQ)ʢ(U;W%!Zq\JۋwY䓟=}Eڵ"ы+i* o/m/Zfu"Ї;.jdô]ج~vδ|NOsԭݮ릫m /ӥ[̏/&I+Lص-;YOw[C [oagyzq"X9֏{4=7B 5gW'ĄO˙͛98 Xi)_N.Ohe+Xwk|?{{;3+N8NN.d{_bk1yw/Ee/Ҿ_:T endstream endobj 734 0 obj << /Length1 1640 /Length2 9522 /Length3 0 /Length 10601 /Filter /FlateDecode >> stream xڍT6 ҭt 0CI)ݝ0 5twwJ7Hww7H("--{yoZ3ϵ{ -1D cb 2 @U@6jB,saO6q0)N u8 ~k~ +Α v4(d'TZ1;{wGKs 1z00@||YDl!&`(@ >hٙXB`UAggwuue:9edZ,' {`"dlu Kjvf0W#d4@2G5y=W_,w]߅,&&v`%`fi(Iʳ`,0w )? Q{<'GK{yڲT9Obvwnj YBM~alϮtpȈdBpy_sr ?Noޞv! ޖfTO' stx{ZsK(?!fwtD/S;/2_'*jdrp xeO ݧ=e:]LX<}'⿫X I:q3`[KX {Rݓj ζ땁 5b3+ I bl 335~k Qsry$01rs1IO݇8y`GG;?!n'I?A`O)fv q؟l r<vv@^?aOc>koԦ=f 14G?>D7?=o$v8;:>V/ALP?/ؙYTL"hqN %f 7Բ2$?$ :En,^xnRԹs]RHS-<^=K&Zj{"J_L݅"djޯFE_ӻJTlpbGWui<>)MUXBM(Tt*9[*U>HYS8Rj>Ϝt\Oc)<1|5Okt=VT̷"8F1q@{H{+q$EϷ9 Z/)c!KH;)KL8ß.vp;C8y%C*Z_*l$A}(W8-_N^n" 3mo.V&"?RA/)- Re3O9]ia:&~ä*=]~#C 껫k P,=dj c,eCW[ 3͈I<*3gܟ*68>4]|[{FoCi-8kv%/jtm]Rס3@O*U0N \b.i,b'6%$)5e{i껝 잺r wd$w0"XevʩTv`gPs^|}&]~8F_Lܜ79}j/tT!WU8{{>Gj GGs$Ln!R/F#~ 횫6nvOb¸*|Yvѣ=oRfZ 9/ &(}A *E9rRmB/r?qr% EXiEuc+vʈ=rݨȘʽ(T$&]('??c@=HIQn6*~ -zyM iT4jXa@&yX- yMZuGR!]t1ƈ% JW]Κnom#[2_\9LB$b}8g&=0p)|6I\&;m}$ʰ&A[ȂE/OtxAEcnâhý0+]oê(^2UV<5%zG 0ώ(L$#/`R6UXq^ -r|Ad'g継HڳǠ/CoYJ^4i& e*v˜5]N&J˔Q}ce5u'O^fQ*SvLDݰoZ [$yg6v` a ?rT8)uJP¾d|ə4ß$s/sId ۄvfr^e <=;Ȩ (CCs^39pU5C\v bI۝dv0݋C$ȽP~V>rC =j˚"]+XF3yf̓8Hv)֙mx_0Ԩ]\xQGjZPdh)3x! @p,לqO7"a?ûEx`Ww?P?n.=\94 A[T>C/$ubi~zsw7~Sh+ 9/72JS/;eۈg"j-rC%i}P^DMBM9V!i<W@~ Q!ݗJSN҄*S;/~\5B'k)L =H8)zbL^vb醟}.Gs^㨠P$?ڰgi0OJ'H)[SÇPFoVse'ɿO<, L~ qtc[+f^S/,^)2Et͘c . X ٱ.!@)y T_nnIħoZT0BbR23UN 2HR͋~8FAħVUD(LҰɸe S/M}xQ{ /WطeH5tP!{<3%{] 5ŭX]lf`f~gz >>ZgW}] "Ĝi{|zT<(66q|Żd:fEg8eGfuPM_]R7ߺQHd hLdAFvQXq̳JN*h4z^H5Re+ t]Gb\:ӷLؖ?1׮#e;p.P v!Ӊ*^d6Fx:$U{/|*$~SEDkWPxE4JΔK`$=G^d:!WYօ x~鷠qF.:=g9Mdx"uu, Isx@x&$p==]a [={ $#3[gY##I~b~M)+/u~}kKI0:O$?c˙Vmf󦀱uJ@X]? TxX\f$>3US*S {\՝KEF8O"reEi`yDQP찌=R8|#/ EO6drhe[Xys}BG[ĭvUgT}@b1ݫ_;?X :"dM>#568Mq;hWT 3&jո2kI>:w!^p1Gzpjo4]ITpa#^4ŪJL]jiw <2+ϙt9ũ2d}4[YvGØ&EJz@q1GoVe/4?aͿfaq} s*%M3/n&L[= H!#L\A4C*"۶lb|&v"󴔑.@%ORES*ׅ̭79ȵTqX3C,;6jA]h&p`jN U?֓vDlۃ.Kߌ017C]txrg>+w$NH&j"PLe?bof874MTj nWc{] J:YO<㞳{ //"ak8ӐXrwLZUX&[X`=pmd]dG ;18Եlk8!۫~$ACuM;\VН0NX_Ϸ<8"Ϩ>h[e5f@JƄPG`HwY/L"oյu/8,l‘oDR} :k½iZ JUGReJ3qhy[3,XIߵ,/x$c3i L Є|. t qW5ίt0 m\i9x0)t=ywpafE=X<8JWWI[c|i^J FIV>ׂB߽ 7-VLUbTV):,NhQ|-U J UyE^vR*=Qc5QS>X,N2Mw c ~-.q,TdkԲ'=r7 ѮR:<4+y0-LGIՔسUhhѪzSζ2{~qUi!6@}pqu7"vAy#ʅ;sPʾM5O{<'Ou z-$>|9!bu [zOag@7dBld9@mM+a%eUIT~U"эI;fik#z68(ط,8DUE5.hZr,Z/H5ҠUf?x#>tz?7˞HWzz#/C'6I2350o:;^ksǷs}Kw_m6XOغr }? KoGM*`*fQK+#@9oCfe_Az=rIUEeIa* #^2kcDx_)- `=vn~ucM|'Lݰ$JmD,z4pco>.2k9):NFί 棉:/36N8odE!򶂨ϺO1p.*Gd1 ~aKAIql]ot,/s>ۻ@Չ;H$9c.fX,ګа>cd22c_mo}.</(Sb+)7TrG8f ܏>D֣._ (K\wǴHxu1ZцՁ5Ic-rќ/Eh68,HU%p{yq &'Gp6enz _1 /,马DeHŇ^P{ s-{ݟG3beD;)h8E^w{>J1f72z/"U䬏bXC )U=aD| e6TaFpi,D $.[bhwB0"=s|5*%XhZe_}#i3n1,MI?[AٽpGo%kpäUP`djZ/|bFsF1 {dZ5ݴUX9x7ݦB1(ྋVYnNhMMLΛȖthҲXʅN+_QIWez;a((EeóќśXПⓑFc}C.w:6|yӉѐ *E5o8"~kߚ~jrbys}b؇)mad3 _I[^xm!_ljD_`>6*(CMR<9rI+Zxtk U}f:l9-C`/jW.J[n 2ĖIjN'Fd]01B8`*~Pc/"Z$/&96tE儈p%$d}j`p*I{ʜiG=ؑZQ˜I[!M8Ng!f{U)co쯛*v)hH;zUCۖ(6V^<{ O [ϋ1BVSA=iE_ʀ=I\TOB IC>oė}~;-\K{=.8bPvz5Ni7(T7@D:8nc[*e|}䆊'ĔnfqqDWR2U@O}~􍻪\[{h3M}O58  Bf9c#D@|^닁ϚyzoP,*%^:fO3~ZmX^&>-zA1-,~c8aC3q@ F&^U6Cݰk <>b\Ift;0Hx:'*Q)rC(['/Ԇo1` z,cb7cʮzF3Tfu}c_eѤ[E_ZgӴ͜_5&ePl7 ^ZTӆPSdf5d-1cyDfYl]$#};/N ѲM<܇SP[8j'^B\?ǽTB*_ۢmHmREJ/ʰn-pƁ[nY|PptU4N7ɞ~DV+͓&Q?֛?}mvѷ 6~Ip2|_k%5nv')\7j,s)ae[BcX?V,5D a[MI%%ҟx,l):1 G7̕F\ /kr5)N `Vhg:OryHRv"G_Ǹ*bDѭ11-lk 1LHI-esUԎ:[HA[;kiYiے[ކtqWne$4c}ȡ+J `sӕ!eMCMa[ΘO\Cj92Y7^:SI鮬TfjdPdG\7 a#(TB]ikbwٱ^wJ[Vh_VkL!7۪A/tU%}FZh ,(NZp7gcA9{&"|1иڴ kˍq5x#x5 kD@TthNKcWtWS uȤYcHĘ;:-c¹9Pл&XMTm&gHEze^,8̍1z 88s\/?SRʗn.sYƻuе$]zɟN6$៕fO7D=G);,/byJE$!_m0juLN-ێC]VK _%ϨUy-e,Hn:W]Fgpkɚ;47~8n&-aFM$#m{c_ endstream endobj 737 0 obj << /Length1 1401 /Length2 6245 /Length3 0 /Length 7207 /Filter /FlateDecode >> stream xڍtT-] (b@@M !@( $ H7E{ U:KrsZ杙=<Ƭ BXBp4$P@ ) &e3"Q0\P08  @>H_@R vY48E&p@llc pxEEN8B0m!`.=QCvqss;HIG7"]V_4?qla?~]5  Ga3\VP${8@WE</7U @N` n9@Zhw#n v@!`W0l PAœ(n׈<`oYn%pt(_ÐP=xlp{5ap+_CX8a.P?>( @Pw-ϯzNA_nޞN'5v7#D]4HyyV0` ISZG ,x_ce;x{׽a@/4eN_,o#tVJǺx%;Fh6iF׌Sqڋ%S] NGoG%Ht2'6I2pâeXZR:8$>[?#B|xWq ?U~_RJT 2Pj7iJhR1 A(PD3%N |t`̈#VeL`nk ]hF4իF?+:|OOɞ+wwBWpҍ(Ekf>!W5-EK/dbRO#0qFUy!j%xLd lJotRyP>8$f0KON0`H>S{rZx\;eLӁh7u]Cj j`E ڗ V~5F μ=$ W6ee'KLes O\.xs<@{G-?+́ڊW p8{zv-/W)][n }VXVpCn2kZ זW (;HOxT5Tz WfFH,nZA$bN{]܀7**\ct?GYT½UvMD>7ZV`]u,=W #ͦw*4Y'D]h&*Y\!U9ZdDs|,3V!r\|Ziu;žms[#'}fΥcF%kAt!8{x!s'j< ~=^6MԸ`Wg"'ȁVo/Vߢj%مp.:n \Rb{FA֩K{88{"!½{;b!r,]oE1Bzxf66A KUݦ3hUj 1ƯK{축$y7 h%v.ՐQ`z,oȟ+lpK-rc@ ;i!ݒn-K寧u&DFĥێnfn^a~A2#EE*2,9bت}Y>Q“ة0w`g*hpwz8ԏeS*:>h\ض4ė#뒄1Juu@ŕOCwLLf6%kV&mL2' sX]|kL7i:sd̶Ix ۭx\O,T.R n=P]9 m)}U9/'R*8)]<p!8lg_m5>m. zUWI߀-Ä\\=u]w# o"9+/|u8l;gHQ| ߴIoB&ָgͶ5 lp'J훴 ( R+HHǴuX:f`ʓu)rٛ˗/[~wA$NŸ;"物T>.^0e6FRrmP׌"ķ/3hzfr,N(sJӬG F56n-/.*'xXddO:6tG;;$.!v bW+w`u v1?dL5Kt$]O/ HKGEJq{ZaR1$#sHjyu'b\\3-{$/WÊIٟiψo~+ me[!NroY򑋜YYpB?ھ#wURdZN'bxۑ?F87G4ׯb.55r.Nb&sv}3m _k7ق%MAy#|_kc>"K,6-~}6H1 Z ꐊ+G5'⬌n=Γ ; $@tصuo zFaO]ڶax=X]zv 6ad?xZ45'ƤsƊI0Ж,P8E@vS)*R!qFIG^E jd?ΰ ;9}1.*v#B5y(JK9'7ۜpFe.$IͬJH5kiE4M眪ԟߚue,O9TǴlijEec}M/B=OT&Vp?@E f p2E*}x۴f~~nu/mHh?2bqPS{ИߩXЦkRkޜd)2?q-?ۋMt!2`X}]fYd;3cKi^#B(kբBL1O|g,-켡]b(JhX'ncCmHT$v_ٳ5ZGʍD~v2]jR>V/hbUgLR )/"NDQ\ᙛ#Ė{jWpzۜ=Z)킧\]@zoN|ޱڄZxPLGDѲMvm!G%.Vz*S< Ŝ/ ['8+iŲ<>7/~D+O?~ѓ &Y`V`)cy+Qz"Q8B}# j-`(=%C-.GbIjl 'İyW7fS>59lOy}Yy hKP! F}m׮㵺[ChoyWWQטv e6Y[Tm52K q}~ dTǮ0Heb*uPx1A{ţ$ T$Qcۏ/4KO!)y%jeiJ2{jcR 7n+w&<<IFs&BoC^5PR{WOH-۴ӱP*硛ʟLe'&'Ҷ ^fūWy|WE*gt(yx{lfIV-ill.y *L C/)2#,iƭ|ul6攩SXX#[Hus8AD]Fp2\Ddf[-e/"ʧ,zW͋zt9`n(ޞu@<'\&.M] iF ;BO>]l2"ɶ^Z({2{TJ jxBBk0*1lY+x'fWHwYs~sOFk Rk#%#(ICW;5l_9E' 87>wZaWo;qM([':Ğu1|v4Mz*`<~Vր|KR9O8l?8P.E6a_fZԶt('>)#[QD."2>86-H=t%mKa]ʕ8ɬXD%9"eΥ;NQ+Li"2n0AcF.C0ٜbUclV~>1`HeI1nPxM=p0>z6#(܋Q alI+'?p(>|02THehzswLo8/4-dÐVV\v逹{%өjp(ѽ33Գ˛Uy@qm@W-ϣL}qcJKWE#;Vl_N5Q^am4xCC>CMy77 4n8Ki_e ̺GqRUk1-J8VNwӺ_՗+=~-=冞4^uCs{ƛOC-=ri2W oTg .z2^M:hH둙-{|g:^RpչE_8Fxݵ8 ~zC O+Hcs^b.r[TVq!gJ+D2]Oh)]A*@!3Q+4?fYM Y۩ǽlǻO};)Z|^\$&yomn5AG-ZיU_D |Z^5>K]"tqM-inHa9IAB= twLJOo%>ܑ0 N$Ь)ȴ* ڢ*>{Xo9pkzZNDG=$ثpSDNH @ؖ_ endstream endobj 739 0 obj << /Length1 1468 /Length2 8041 /Length3 0 /Length 9020 /Filter /FlateDecode >> stream xڍwPm.n-R8P8w@w)ZX)-PHq(O_?gL\{ﵙyؘ4ux(K dԔe  "\a8a.R(0MzTC!`~XH ,,@ ѿ(g106Y3V\0i3 A\m`w'ZA:(+_)8m\]ŀ@^ /Zph\`0(wux 6?=:(3#`H7$ ; p!$Iu;0/tEN@ B98B^54Ty]=]$7bC;C Z]5bptuuAn;=#(Յw}rg{H(w#P7GStg" `N ^?.|Q]#0?vCq\`~>7"P+f@'I 0) dv'2( iSIK?22(OX'B'҄ *bp@z._Kw.uԝza$Kgfo/A8 nPw T؟ ,o+n!w #\0&OimawH+"  /!!An0?d "Qw!p3 ʿM >PC0W 0!xEODW~0WOVnwMS`0+o('!! ?Kz vDSojɊ8=%2ct/W!%|75Zrn;L*sEՍoC9?jMyXqt!Dƶ8{me!.G}h\*!%2ecMʬ.'O_+lH\)FG,:eHjQy> c[k :k tL*VJ&ĀLo:8ߖ_hZ gr)Z=:* mJLةD,k*]t gnUl xjwcXyr}E>,oS3^A Ռp@1gU{hTc}Zps(s! ܆yQooXj;3ZTB7Vsb\|gaA5rݹѯ3%]nC ~ }~NT7DlugU/p:p2w7_$ƍ`/ =5xqkB84{ˈfZ@ڴ3w?k`=J!v~XMhI^,mu߫n)Q́Һ]6DSUI$TЬJQkc͎'wbٸ9|4[,8~6 ~7WT ~63hu6ڙSׅ(ڢ(}S06RV+cYldvߢlL=*h+IAyuG ǗN䲦qSsYĆMr KK"E޷qo_kO;C>--qE'NMZU_J́6| MrϹ_UjPl@g@Llۄsw$.!C}"K6=iMsr? {58 'XuMLR"v#BvCumV[3aVlᘗfgJle/k(x̗>=EL{ y2%E8tuV.T7S}q/V!޸Ɍ-Ɩ\"K8*CRY=)dm԰Tl~sy9cB3ut'YLP8!mr72iMG|ѥMjڭ!SOf?q=pq g㪳j|{ZESj~*>]}F Sdizܧ'Guw=ˮ^ O7`LhoIpZ"%8;PtK>oW왑r;%2=zMαefLմT+fiqa6j6Ӝ6o8?+=6 M7fLF=fl YX;~5 1NJQmormX7,|YwL6ۥ߹,m'+0 a]X5[#VhiiۏL||%N֞fYykh~="fHN!r\:mG}Gۧk(ۘ%OȮW1Y ]깋{&dq<퓾Hh9ԯWV~c9LaN˼꼨ƅz]odSh4]\a1^_1KZ%=\gIdnW?)1!5 e\?qUa] -an~Z4m>%c˩l{zj5zKŦOjP} ߰9Rz?fD`z7͊nK]XΟc8>|VQi|,o` ٦@SߙIll_6Ll]b~b,/Eo(@k\iS8uXKM]"AJ^&W *VȾlT.>GZ^G[c*=w53;$*7!b!L"mj{^k},+:qCYLlU%`<7|FY.b["b8cowմe/ȥTLp?g i ٥~ ӽt[֏Tw)]!efsiH>5)J3 ^MS} 5wX㶹1޲󞺯",-фيQ=nQ ƒ`*+R\bCwh͚KKC"d)"F@I/fDAYI @eQb*) vI*Ky|8've߯,V&k))\tH[B K3c /rNOtqsSz u&nq .5qi*D*@:ň5WP)-y;eauL)y~E-Bbp"EizAomO.Z}eĻU ,KGawcoR2$tceQs@W- SD֜Ҟ#]ް~$ W PvD,(TAMsw0?6YY=<2;R7C& [̌ǗbH$_˝F[n3Oy:JZ~2b@+A tKN9:˴d!OYwPw^]lXDƚZxM@"ޏok օ6'։B=YLXƷuћ^+Ozjc3x\nAl2:m γɄCc03(_"k_K+8ƁBj'YO;_jzx>`Pc;NXZL_,m:lS f3`}b#Ky^~$ӀkZ OZ ʛ*P^=| EsE_{Y&Dgݢд$Զӑ?NvKgP;뻯c7.J.o_Yk*(bݧ'9ghxr]a2 |hC&B,*1O 乳sP~D2*bU)s⍲x3)\3F!ހ^:_+%XmJ>yB%a7Fҽ/D:9v'E~ +"*1j̹85l w.?t6oEk]iʰ'kW| ;*}[otm-x{cfAv0тƞ&i"fp E/m1v֠˿ukxd/EL%lJL12 -EHZƾS=a)۞DjԠd~%Xa?f3e&1g.Z9290'ONOI^ $hEV`}37u/4 g!¤]5aYڅ3^ SkYԊ|[_\ bY6F˻xzk2 'QSW\(ޮ],ǘm,37X+ԈT8Z=~0D:#ڐWXM}Y9W:%RKOp#PyJ-,-mbV+ibJ{qG0c+Wd  &;NK+<nG[W{U> $n>UV噱%4~U  d}',ndtdm쮲7Ln[_ IJCIܬ JLDFb 9RV]720@nh$s-ï ߊJ\ȍO)~򱟐;kWjGgOi+@{ H^l,NINmVQL\L&6# 4=yZʒW?+'%^AKEMaJh9 JJACؾ_R+#$y;V(^*jk!pWW^HU_oÂzB\%0G>RPHY.ɿjQ~KzV8 ZIP4$bg${i=,و葈Wa~M9.aO MAO+кcMr(߳ĹBФ9Y(c@uE PnVO/#R3DOw~Ͻ~nz:[t&/^ |G-{:vC{pR:kTwġ7w\RaUx?HSM݅mC~$BR1:~G;OK}<և BosyϿp,L{4j x#5y:zI.>*-!LJZ W$YZXK,DQO_ֲ♕̭CNuznc6ne-]2)%#dv:(REF8 /0Yޫ7ܟ%mrI|DĶI5Em |0blij~c^ -bhAQkg _"^`Ē?_Q{(0&VS{ vZ ~j)Q4Y:G M|(k(um)#(=WG',NaicI`֜fć皺-IryHSc)7="9x7g<hk7ER;/5W/ o&-‰QKi,/eVH=eS"Bϱ.^lJ07PDЈo_.%1-Yʓصl! 7a4K\֢sR9'*)kwg~x0[]0l|Z# vX5Hx,d>5Up"-xVo$v缝+Z e$ƽk<yly %173kl(YK. K"4y]{Ѝ&Y7G:Y4 hq]&1#C<7OQ~>r|&ۍ󝱦lXfBBf؏sbtƸRئ+ӝ>sg4ecoa}a_? Q#ep_`miZӖyRۜ,%4fX ݢ#㞽в!f,dd<.r,xЍ$_v`uj7ưT0f]Nl7\PHQc+0cniQX3H;*Ƹ mV_̺/Fd$=@Say =2{1i.F@ cJm`O ta#Y mwUC!r" }>Uk1ZJr="G|j": .: ۪sQx矜 Zm8f6h endstream endobj 741 0 obj << /Length1 1440 /Length2 6827 /Length3 0 /Length 7813 /Filter /FlateDecode >> stream xڍtTk. J2 ҡtH 1Р ݝ҂HIRJ# !H#Q|ub~~~4uxdPETdE /Oή C9B?"aBȹB!(OBpc7GX@~Ho U qYxp(] CaDd0+AA'ZA:+v(8/ ɋpr<`(;6 uuZ~ P8AkC lPW(pYAHt @QQh8C܀._ٿ ࿓!VV'g  EU^'[Bt>sX[e͇r9H~A_ZBrߖ nmk k7g>=8 ""g E@"b u3wˍ A@}w(߁[`0fXBmapP?6z0O1M?0?_hY#^O@P[믑Ex|xB~!/ ?]G?*p //p hB0$B;F_UL98 M]7Zj ՇѮ,c*(Z 2p[4y ?R ք_ap&  @Ef~Ehj Ahš~ Ekߍ(ֿ/$ BѻG[B0ZP(t =JkϢ>_X^   m +7WWtozB'V[+e=xV? (q_ q$Ec|-"6M l7-BfrwBbƿBnmKߠq,=v9XTPqnA/1֧<:QQbV *66FiBK=31a跶HgnInt }b1 c_R \,S@؟1 ,dTEq: V|Z }Qȵh{>Y*R#>~? 55'ƣ2_ky~˓q9XZvq ȃ|[_M^N {RGr?H1kQ->>VhN>"t>MfuX)yy _~z̫e3R Wa=G[Acwr_jt.H7mJ& ~ N5i @4[`LАi4ͿM'+Vs _HV ?4&Y3$1T]oSCe忼 S&=I#0 aK8==d6D|e; rzىS؊dv], NwZ+QK!/d i˚mv%K&ݺtH\^8Q_ є{YF= /eCST T5|l7BXwi џ֭)OW] z4Zs5"*+*x "Gr%]̤qE:a6_9ex|htb $ьSHM#pe pb%&kI<%5Uq 鏸 &zW>/҉b1@ekfv9а!dbmu|n{Sc|wE82 )٫Q3KuW+ïsd\S ,dyŔ\蹯Lh^\ҾLK?_ⳇgeC#o2y'k6,}% @a:T/ݘ̾7SdVCeY$^GLi:kqu?L n.|͢K & Uk_(GV̾~Lx( ?jUƵ}w[TF"d9hZg]l0&)J I~~rh壼XrאZWn$tNpƏw]P쒋*Lpg"V?)tKE+tHd^m#QX6:c˪27nG_Z̔P]rA̒"6=;龩nH |~#w,Yme0;kLq1 &Bqب7Ѫxx^)B?ӱR5\if"<{UxbQX5Y_{'Dbq;=!iQu[ i"U`': %-΃f["H^`/߸,X(}t& œHz[DdP$Gd- "n,Ћ듻/cf^hxŧӥ0ΛT.4y`Mo I XX9a۝%)"Y륞Zh6P@H==}>c1sYսpEa_Ik؀p~*GliVoUE^Abt]^5SCn?/)UݮgQn0[m_,:?pSQ:L?vEGL3Y3ƨc R3"(Y#"o5cU.ݽiF^XՊ}慻ݝv^D]&Op;?i%XfSBqDZJ&}f%c#4weAI b(tkf7 &X5i&ZXM'ExwL5O亓'wC7LVKGE7;2mnGJM93_S co,UO'&DԻ?r9zK*kД^E󈨨C: fE;D| %pcdpR9E~~=>cgI&>WS#œ\^ ױ#|3bh 9@ ']e_%Fl,n Oa`z~6 li1syLtnQY@Zd]`^|GhB%j^lTOWG;aDM߷3UQ ?%xegVo:ݏ,y <GY3K|7d'JqÖ a2Y+H`~&\=`UpWd-bhPY9%vCAsb l&y~߳$̐\yM99 a=QL S2!^BCLb؃Puwk (cL +xJPMRPSG:'͈4`, +],`XjPbIlU z^NϽLl'1ͪ`=Ҁ4W݀H#CocaZ&RoA̾.̽6fYƍTH>3)!gktNgOA_$XggY(}3e*_)% s]{d/2!,HY!"9\x=?4?|  42%SϕLN"If&K,j}MC x:BPWD@J>Dwl룲>DwUy+sMK:a 4YY5'Bv9\VJ|e+ݜf{?e=̊|!Q2f a0tYЫܙWc$!wQ䍁t'=&)!91Y1mlN=O<d~BZus.^ܺt*uH$y$<.hkŽ.t~mܞ$*hiw|=À,[!ȭݵ s%VqHzJx~"<2=]lzo.T ! 6:,0.y]KڿVqDn就LVy?$8(D3$#MWK0r>g83MMeo!}p¨7*i}).Ca<^zh9. _0Y\1|:| pʏx>EEb[+NslϰUʖ8Bl*f4J5EeIK߼?sgc d+W齃}nZRll)ϮZWXjΓV9jT wnSRyGڳeIVP6Hk C*"9WeD V7w(&$GN Hα!Ϛ3i5|ݓTr]BM;R%;1s֕c iZ dAK],<=b߇d7^=?. y&4u) 2bVP 1ڭf2bm i`4k=mazږF4A-?|G+3T AXhXU% USzyNbG8.^ۉc>1v8@dY|C})5Y`x=afI5e rk;iMX^w K3T7ҷ[g0 1 t ύ1tuO }Uߚŏ;ʈ:~ r|ۘ+O.;hvA짺c)L>QA'_6ctVURM4H"Xi=S얓$ڷ+)%Sjp*Ko-qMcէ?b]e 7a+3n؈Ysn:f*6Zp=Į`hN&ǣ[&Zi1L Ƽ|DzuQDk#ӭʜer\ cJswͽhΩRI#9K[LeD#~?R=Oӈ'ޟ+ьR-]˲XQ*V$zAx,bD`c3kr_u.xo>}v}H:/X~)33cc*s 2Pr4ްSi2ǝPcuJE]Ol:> کՄit!һ:RO:0vIZv?f9L@N},–qHR*x e7NϠ|ԅbx#_q_j)G8yEhc9a26[9n"{J._˗R9%"]ti]M4{lR: ԏkBl&hث=zFͩQ׭Sf4Ol&ZĨȷw+0Fy"Kz$;^b%ʺOv6+;,U!g/\v/3!:ua_!g$O [c1k-7+7g#-M#n5B1ۏi`ݳ6j)?&3$Ls1&|yam/y2G9> stream xڍtTkۮtH7HK] 0 % ")R"tlBBnN19kzy00R"aHF$ j*@1a Pq'6p$B!TP0S`@]$@ )Y   Ddo8+ B"`hRn H J(Pp 8ܱ ncW >yg CVDGF>p3aPϕzw؟ՄI&p1AX@cSP 0{:? SOBpįd#p7@_]G@П@͇xCn{,!~hF~( P; AO9`O">-G8s )TH90 |E~60 rcw@zk0? `P^E p GS9X2 Du"&ʪj2/J Qq@ :9j"؃{f?$~!ԅa5P}+FULԽ~~8+]$V ~kW& Kh $~hu/ j88foO0$ M+++Rw*0= )>Q IbkIAXBa F 1v#EA O:xPƿmK0/́'\Kexi8y~!aفX~gֱ9M7W @Wهq _ (ouDW[Zى!N ;uF=>Q~FAϽu y';D5[dqDLbdMJMo+Fǚ56~Q"A|abZ ma(R \R3*<qo2W'78ܩIWm)rȼ0=i:⥹H#ʬ+_+UW~]O>O܌D'ڏypCO# ,T&o{꧚e괆&? f(X{mszjvZ]5%hɭ+;UzB'a<`㭋ǩ cRwZ΍ &vn u[h$8?Zl -%VJK*d;y?is#ίH5KG=e +yTg'Xt7FΕ7>!fm)%+Z;5i?jIHQT_ {Ω)2!t O!uT\:'a \ zL,<XZE0=HT|6RWV#'YIXyDpʍ'5sGKsm8r#ZJ_l]TL52v+~ cowIBkQ]>J cX{sJ^[Nwa5iL^"Eۊˀp[ }Z8oω?_lj'`u'yzC};8|-mLN)6\Е%>HY{l[zst4"*=D~zOc<nag*Q3TƘ!qa<5N{i~u^}j$)jGUDN6*vX㽙d4X뽹G/tŵ$eH d|KCgV,ywéG7%M Nb}>Y$zwqG]Jj^$fH91ڄ86ęVm!UķMĭ' ϺArB;O¼޶C6:pH42Lf>igG82HlaK ٖchᰮ4G%!젌y7XKhw MK x<PmtRQ7 7(Tvp1P.R]vF(MOE9L³Uzv_IeJ$^P}N֥Ui՝sc.#sNPM _ ,>ea3EF~, %sMk%.*LT6g̢@@w\]1\h2Dϒixȭ댠|oZ_%kl6~T/ .]6]aIj`아;|d SE='yQic6cDZ `>̰Jg^AZ7BP4^;+yl˿>D>OSNJ50Q /=}ЎUeAf!LWM￾ў\+ph\!F|J&yEf gdqRyLɢ-))$B8e^l:S]{f>& <ջ2tfwb^My $|*,485!|0^Wms@:M/f; ~.mUJЈՕ {!JM{v0XYa;~-𺕟ggr Ui:oPP\1bY<"J^4|!]g'(L+*"<<Nm:q P2ՁD>rAi\: I'@(\ h\YԴf/ oJhY4ms<ϖkQ1꣕ F~;{Kg-&E 3d&bmc( ?^;'S#⢛L7nY۷j݇^?HKaAD9qj4gWVIBޚ>Wo2g}$ATzُ@Mw#]ܛIV*dͯnL؜Kb;^/f+ 5~`>,Q;}[!l:ޚXl+yO/9T]~^`,5ZgـU0*}œU īKמFGpYANt?6>杘>t)c#}!d/- ٞHrwyz̏'٠hR0VV3qϚY}qtΈb52g+ʣoXcOvb[,J-Nt͚ ArU}u]8N\|EPi"LbԍxDKƆf=]A_Б,mYmIsԲV4`F2x[\ ?q^knˇqq[_*B"ʘ {}׳٪7nj]IG\ y'ɧ13"J j}$^xԩb;h 9Ӗy2  u\ւ,Zf]|7ؑ-\ 9IpH;ZoWXt?>;b0'.[HݷOߩ,*+^L(>n~>'#%eTGz7O'W/-d5 L,xiÀO^⡌\JcNMg@!bIΐr*Dc:Y|+lUrd훙8E*ΈSTy 4z% hCZKWW)|!84:7S?Z ,R%aWҌWO!UxNjl_B`l!,!*2-Ǥ1KIo>л;M SaKӷT7NcdҍzH^EkS cVff <ϬGuC;~P`6[?~xrBL^!} uG$orbe_@Go%G_ O˨yWMxaf64ڈ;W|R57m?wKQ2+{%e:Qq P%" g"&J`U"5Q{pG:/۲FO4e'6{KCB$A ?ΓmOj-w]&(bV6?E @ܼVSr?^-/bȰE=QU2}~b.玤.~H?$E"眊*KQgJ7Zeh2.2ӛU1DGiv\,YΖorBw쎪J6*CDڠªG/r_Q]:11!_ݜd<; m@)q̓-:?X,BJe FcV M#lRWE.?}_[;6U&@ٴeLT&5'FU*BoT #Ѝ澺tiVm(b1^sx`Up٨hbb*gcy/*ۏ1sKI.@x LwNM X3.˲ R(]>[.u#M”McϘl*{8ڥvq듍<'^HVA?iNO~jL-/ : &=jͮ⽠TVY_i:[ 7FwӓOm`WU3bjXފbTAJ꒪g zQLC9} ?aVKPw@&L3>m[4=dnUFJjdv'UQAc8=z;<`!㘗(kcS}3M ΚX=j(*kqUkto/䚗*kd^5;fEtW ̈́9 8"{̵'[~;h P~0"10>]xRU}Sg'|VFǝ/يƓteero !o endstream endobj 745 0 obj << /Length1 2808 /Length2 24311 /Length3 0 /Length 25886 /Filter /FlateDecode >> stream xڌP A !8 nAB ;$6UWT-Ͻ*1P΅ & `ffcdffPtc:9[ 4rč\@~vW ?G{'^)@ cotwt4pG p- obU41ڛX]<Łݝ֙\nbP:܀_ lwOPtۮjon 6&@;gP) *P(:vہl,,'W"KLLm<-f6@ ==(s#2?98Y:83:[jWДٙ\tffvf0uu`RttJ2]\o6Nkf?V g"8@I-4>g?K˂\,LM@B E{A= z t~W'\v]=&s&|!V5!mwU" ;c;)4 ދNo|LZw<ԍZxMxKrϓAN+VxޡHm!OgG@kflGWJ_{%=jJGvVr"re70eS(B (;\e3Y|#oy·yw4`NW䳳<\ wa9i>R)gK6ʱŋ=79٥%{v+"uϢނx~nU0և֚v;j0tՏ?! ڇ5_DUymOn|")-dmqDBQo|\|1lZSw["N @igc;Sa%dZmze?6 T8֬a\ vj EM|5!l7P%w۲b.1d~^ }ԫ|hP X=(X-Y*ؤst%w9$! ըT/"oe+{kdd\J({`>qVÓjLLtI'\F17w'Hu?W | WKF@4k ba.M}oݷ=k_H_8[Y/J5J:B0Ɇ~%?;,᥀2gjs+>u10h/XuR.5Ӿ78Ds/~&D뎧--?HާJ|:|iǼNXz!/s/\6͹8{prBʆ+p 6-˸"s0DE-9,m!#*ODL6R*UyZ#SZq!{}' pt(nWvd<Ѹ+Vf7Ok5>5;|K+mR_7R+OXѺPol" gY.B[{`~g*5-)̍ kzq;șNUlmh0E W_=0wi{ݓOPwLaXg_ҳ0x캗#j{"[v!xГ6si @(T#gǡzNYp8]U)uWs#0HM=&dP7罭x)f2k mY_I9H*t`+ܨ9VWnbLM`MoNzuOM;;^" UQ9%X'%b"6|;Z$`IaRBUQzհ 4 E^e} 禓5Q_ڑ<^9Jo\2aYs2$ΉA.[a5a巷J @+e0AMxA҈@RήDRSzv ξ;,;[$<4%b 6ƃ*}26dÓZW2rU4)e!-A0v^- ,Wf[i~VȨaI]:G|j%+!L* +qcM# yEWƒTy*I^lwF,U#~RMG‘|@Wgӏivo`ߔez}D=+qBZ -J^\Z2Ad 'x \b,ΡhGa)w;uTc`K;_e/_,,\3zDg=/g{4od :3gU]`5vO3(p,GjVHSIKt=pѴ7%W9U%`d4'I8&AiD EQC^p~MJ|_DK)b.w_jf٨AJf=3\,u0|n/ ޲gikv~XdžrzFy9}Ohnr+-?'\;ي8YMm(v>O0C%#6ei8:%ɌmajbJ0{S96F)քlVǽ8ԛ@g66YLàFԫb8<+Um z>G (w6>i9} <׎yhDj~YqR ^cDbIuAfwJR٘>o4%0PID8/~%.}sǒ4zl¡pS %*|Yy`N}U쁩LE}&hs_S / 9'eăge=nоUfQ\>MǡGAvo |a%ϓe7[ˑB7sH| O)5 K电a wG\g ΅0KP Va)>m(@3!%E% Aa.cebUMɊq{V+l:JOt5d_0QťBov>( ~G,97ւx Z,/#¶kKV/c/1nq*?Ph;j‹EL WǏ8:V*$4ن|8d! Y^X3J*idwQ*PL|O|tvO2. ;S4+Kg1M@tqDZ?@@v9݂jvrE6NӋ@^I[xv*@x[ +*T# ~u}.=N!'qH\*+ʖ3fJOnʜq%^>ʤJ2ݥ/UO4f^iB_MU(_Onv`A[%l}F"$m[t&$gIA/s~0'~/",NB-ߥ2w|3CtSؤ[03Oa5:@"4JQvh#}C^x؅ܦL[ۚ@2x7%TbB!8τ߫΅q20`DkS_85-Oi{[-@VPId?oT=\O+uy'~yWr*0``qh ixpXBtzıӏyUo6:}97lRC{+ ^'`\6z$y#~zgIRĪ-t!Mxml|PPlj`:jŐbf %ܚ^h6lݤxvF0`ˈF5-:hDÕQzNv^Rҕ@2]"K# S FjrGqA_f%<E}z§ S͒7|0yPN"Elf"Aڨeݫ=퓧GShN(71KDoT!W ". %1h6[JZ,6yªㆠio%4(߄wNzmB+,4 $DWުC\;v;חqQo7t*;"Ɯ[ثүĉ”9hhtV0r,pȻnw.##"|rF!hVUC;}?O5ߤ؅;La}FH20Z`.1$`#䩬 0jǐsP~}ݧs?B۾dړ,õyM^mZ'=߃TR6@1h~.ŪH״/]r@K? .#T)O-E"\a*L.}W;ZF"ZٓA+ &pCVlB.}dW( l_TCRnnL%oCرj"^+DE%-kO~AXЊ_p[-BuЫƱ̬OrKdS vڠ3DJn|/t$t4XZv[][HB)Kr#;a1$8[C+5]K꽀 {ʦ>-n9z^4LS7rГz~;Mř9zwL S+7mH 𲐒\[>?Kv:^>Aruf%m"lsgqskMduWj .Lp}`|bOuzĬ蜬xq릗% S:|SrH5~.-^e!jcQUZLҞZ=m}b>XeZr@;L fdtV>SyW>~%S2!SD/<Ksd,;h,fRhH$Up/VvȻSօ )%.Jcm!!wqz_E?lf靳R+7 ihk?wli۴}2)aT/ޥ1Vz5@r[EgMEFx${"xػ+b|.+#sL?U47ǤDɌ$},!LRHh6_kbS܁ ئykK / 2_!"QefɜhILkBaR49׳ّ$(-a<Ch%nBZڑ]1m ~Nf=;*uPcpt\2[ko²_8;2)Wf#-YH71ţ۫g1gt$c꺦ͣbL`WUhpI,DAG{^lr{3 N~κ,Q Szο`YEԠg9CztA'Ƭ9rޱˉp ~l f F+6DԖۻ<&\돠lOw=Q}3{廔s+vgvv\Ǫuץ0Fka&' Y2܅KtA39µǒu';)`-KIT`> 7|ė$d )&uNQ߰,GP d*Kޙ 5E.LnQ}6y+:M5uY@CN3V5i/qiѧ+ԛ\P7saØ´b&Z$ TP;g^2%>1 prխ粮P1&VͶJD3_nX@v%1ՋRd2º?njVNS5Z̳|5}Fom oL6=BDk0Lyߦd5Q/rF&nkJV"CiP 6ߎ-|B2w+.)^ip F80Ձ*D ȍYk v*+z%UBGl.s]k/+{J!JY&?Lq@Lzd hV?l1U~cXͽGJ4z,:OM$h>w` {ʲai'3_#o4|c{^1ED;v&ƒRZTǚ`T>8>ӘK2y' M76R ޓMF,09̠x ʐ6aM~f7 _x\Ξ<$~RWb(%P'lAu%$S]@"Tְͺ~KeҴG|ż@9vE{K`Z;&K0,p\ T؁YMlMN@QZ$t?¤jh4C@xv 4ӬұsŤn  K#E$GztsXҏou>}8p2 s,S2:t4&($v:?A`Kgbi/d?~"$EWF̨v~ @HZK ;sC%?߽˰A4ƥB_ৼ騉/Ϣc[ʛcod"ہީL:f"nvƴA{o}4GT 1ךi@VU?i!'*ʠF: zylՠ^|pӄFڃQ\%=ܗd)q+SN!g9n埅}+k9"A_M *ܯ֠HSŹ|&@E @5WvZ2'kvO0R3EMѨR%taiY=ڶM2Q=&2h,v-UDhVvK>oߎd(ӡ 7.`/xDt ~;z("7zGAuey:=> #)m>v6hLt$aw"Ev\$g'\)7Gk=w}ãX`aK,`^_vjF y0+RgTEukbCǓśNz>ڱJby.(.KPˀ]d:`ti)!O7#Yecp\$r֗J6%POu5ƑKn',6UIe 3 M65 D\ǿ6 ׷p( ډfNñmhk2ojp.t74;t'뭖).gGĉBԩ!]1j >&t sBܖ˾P '=#QYѠS6bR%c||QYtsfL2i}.9^2-} S cvMa-u#1 e1" {>yUƵ"tUwAygKBV8U<רHY+=_KxxFV4ge$NAo06O'x"ѩly*ލSP)ZчT1CWEo cWVMEʥ8hu3Ȫc--uvFw?k jXgLRhP}#{_ ht> wg;? %1bB,֯tEvGѵ||2f+֟95g1@uF2lՁ98՜st8 du\iY~hnI[4vʏ~Xl!}Y+eVjEzw[T9o7f){O%ȎPjL3 q8SC{e+z-5'ZQC HDK6qݪU-6({,(Refe\FdN!|Ʋ+U2;hrŊ萖q`.lUȇIzO"6 9#oONo>4] 4U=LLqڍSgWF&7<\U#:Ld;/sXi ha$" \s~s1vr#zULIy5\kl1#͜>'P{M2 !: 25^&*l wV wpkicO@HhnM&J{\S_uRrhĊ [(ZjWxCƦky;_cVD 3IAsNC`D̑&Y^S&c801fSD@9UsAT<) [}PŸ+QTfWm1z[Ur ҙb rv]g:QA<B鬍EO|ȜtrŃzӫoE5]! ᭋR\NY.9Ы6@? 5XTZ +`kss@@7cҚEz;mۻ3\VV C>W(wSZu;ue悩|Cp;a]7{ MAVVOo[B1Oo[[cn=k,0uޢ`<';=iGr,52=â,R{Gp9ZG,J,⟋G?ttvIH}z6Ai\q ڵHY*$t񮆗ȉ =ݶ͆# ~:1REDgu[xJ9z10 2sQXf&UDRɔ2Q>ZGQ A& YF;,Uk X*E[ER>-BQk>'p;F?@V[7NLSw$SԞh_T^Ȥ٧8CYg\|mͥG ivNY`Q)1q6VXZ'PI.q'2"e9x`ݳ4U8vOK^}?nX1>*>K(H,6e;x8Agb˫h98#O Ҷa]=J=v0ch?;CXv)$JYIC mk`40Kc"<>CFN6$ݒ HNv@|fD/.Ґ %n۾OD(f@9"m<+!lH9ߧڽ`:0)"fz&JQhq l6/qBáe5B|J {&ZٰuqA橴+}̓BOX3NWxL&J_HgsIJb'ctIQD6 {VAM*U')ꮳ ؎DS[\=TǗ=ܟI)%4e8-<& =jv ?È*nADe07|OU:Z;Yw\DPݧCXA4*%RNkYqW'p[8T&Ac%~q=T mXWUEb)NSdTm^(quN{EnvL{Z=xӅ1Tq,9Jvn{dxw*)U{kkpjٸQbvmo?cPoEh}* F4qB/E*ibHx_1NƘp-o3%5;XKo(H=Ž1ffSA(@%/vƗOȞh|m@@C_6Eѯ1|eFPDjYbimᑏƋ܁jQGӹcGrӝZ4O -N3dUi=B37|0VIAQ)WmV©+D!G<{[R Zp? v+rjV\maF.h&|'N Gp@[c:Idj0<@{%!Lvl1?l$$=]K7ҬU}QωЗϬyښ5FMm\q#Z:̧C;͹cf4OrKuA$祇*<t_|L}jWge@Еm(Xh m?Ȑ"~1.>IB[;A^o2nt^y%O.06 ]E*v T,qa't8 +G}Nbj^,gh&Ta-|r[{`a>mg0/8^/?W.KH` F ~#S*ym|Q%p-[/7̷^=xXpҳ5j7R(ɞ`Uh.KIQ]cenُ?\g-F}M5['eB:gq,!'Bj9ͥhƉGkFyDq%*V pNj?D@ROl+f+KT#^]ǡk=];;ټTOaJ=t qIK$Z:2՞ #5my|[;\SäfkUb\Zk͏=GIjU8зh@Xtk*>hO/vzūfAN,Q.2:r (ЧY{e` QFx7"s >Z=R Sa?BAKSoꭈ|f<|;.A 'n.ov\#mRSg]G>BH10#bT8Qt#P~# V&G`p3쐳q$֒nfltKk2=kYbul]9;)G:k2_͹i2$Bhx-r$A׌+@D_i0%KZ fC[tF˿ٛdpF3gӹ2kFz@/ ×dY~C+dAIqȸۘ[䝟 DHԴUV홖Ъ-UxT@9"n;"#41D Gs D[ۨ+b2w,|ͲGL+J("g baby`Q㑞xt:+y2Ϣo&zU|讐-odum'FzR%`gLzDJsWzy"t+2 Wó|w |\sZnEnX$&Oɜz5i.&)ʰmGO"-N\1J>ZI)R?dy=d`"뜸 >Xxـ^^ XaA5xH{SHV9kFYWK@vbllL!q>Ѡ>VU@ |A ״?պ%YD.ይ'anm6nܔqW^Z'ղ wDԡQ\C} sWv^y8ߘ:z$]B5}pƜ3+{TM>o\mܙ [~;ΆBLmx|,eXu_$m>1@!F r4C؄cnJhP>EY[WVv"lCoC1Mp8 I_69B$.R个@Е#!~ѐnQנ{܊Oۄ +.寂ZBm^5X |} *p?#lq-p&^ n)L%+'<@Օn0!{@-GgacV !ds7ϛJ~(δS8 2 _?&/,/ϭO-IyX.lR;Hn!#_MTT=TqdsdcjrJm zpo=Fxj>s_Y*"͈9vϮV&"kauɌSiDOT3+_BδɏDQ$<_s Pl 8MU"EN~ǂT}0TK/I ޿N+{jt +}ьo&NGk+c_;Zq#gy+oц7ԗݪ2g$"]dmKaO_~'Pn7#V?078ki)1t/sJ(jIG= My4{1dQG%Gݐz"9T'BT%O`E;6$64Wݮӈ+MfSsPa]W9aim`r) ;>h_c7u*zN;p_*ːO};i3i?{DȔM%ܽ;bXB`h˷P5VC=ZfXlGHCn͠{(bf&:L5آHx=0!M%oI86_;#ުK[^5p*,ƽ8)]'2&$$E .~9T9Q {ZCOsKhYJ^I :m\DhzаXY얟%zOOhO4o9k}Jvt}\Wy3N=dU KIٸ~Õ%ش֧D@?{}z3e"kmVBw1;@Tj75DOE!pzx7!"$C!p"6@<5݄>V5ugu+u~dn6qB_U *0JKM<ʾQVzvB^8E!EyщFWLHד~n=^k $qI_RF.mD,_&YFXQm~X=8\Wlu pg) #)+u8KD`]K(ȫ*d9"ٷ6nRndYwE ?I-ՠiѦː %1{ J j)g?0J,}VLSP>t7C_®Fae2btP;y3?hR,(un;,Mh݆{+D@ǁ芜4nT.^ltܭZ1!G9W4!q1[2#@y z6D: TpUNnK^l1nŨ)蔋%I}_$>bF-l; ^:ښr3_>0Q|?Wr?TIڏDu;]MjmߧgYެ I }*{&mv '?bϳC|yďx*2 2a9!ST/ӝ*th#BY}Ќ):׏./r^;O/cӥ)aC|F*ՇчK;灈fP.۳:޿ЯldNgr['JkvYI^aYzp! ݺEyW{)λ0 K 7ݻC +u+6eOS5 ޱvMg]k)2[/7,Ô뵘CP|f'1tjta(+uӠ%M,KSN S, ;?B|fEb0a :h-ϵ<Zɰ"fXW>[oeJ.:]}4rHnz.v3糠o ?{ |TC;ZDž~:n[/`=D~O_:%Սj(:0J ׄpv6ZS3u}lMZѲ]$O 2VX/4eDpak^q"Lp)Xy(> ʋc~䒙B5+uK -6xVK_'IB ?oT7.̙F11jjKyfr-5N$ScނtL?Ox*8}wYˊbrV!ۯ.P 5DH_/._\i<.,5[(p^G,*>D$hMRupmT$2fF!D0kCoB b3dz@"j8ʑ4rb1CQ_4Od;Sm}y[$| W餐ye  F g@7¨<K1~mr۫ ƫq]@C5 [%$/#SH.0݋*IăxcwnZ>裏},wtNI@iuV<2?[S u;[GnB 1r%&Ɠku&[t!؇ǖ8}tdPoK6"Ѡ&6Zc3v's5`x.%i7`C:z?P= p qukO*rn,!3qs !vpдSMញ^_;|!^Q΢l"=TvgcaV:]OԐ_ˏa_t^B]=wGۡ-~FQU|>-k&'TFrMd6?.ȵ6Xx[|Y;᭿eC:eOҋ}KmmثAшE /taad 󋿡#jMՍO;߇mKUЇ] S˨vyl`T\8?SJ{<7\79}:0j"ERv8W& ce&ΘT9ekxXmsMM^¤F?a?n$ɦ{- ᘳtUA~h?c7BtŦ|E6@6J@F1\! (R =^PWtML+ Qy[Y\ vwF 0r1}u//g5l}*͍M7BaUg5搯C*v@*gDIZd;+ My",x{j6u()*ʈ9X(( m+Wl@ֶK:F뮀]jm5nb˜wmuT );| iןdR]ӏiE$q9KrZ&w {VJipkϹX#K)jI?u9]T[E8LK QF\TnS ג#uL\iC3s:z]iޣ5{#}<.sC۷?$Oΰ3jsh9z6dPI~^f[֋\P8mu26c|E{{^ ć+EtAv qNk.OUPKo~]Hc,W v$UITM|4VM企PaZrd}Vz~W 4V"i®\~7`;dͷrkbE湣Y-P:Bv޹ 钜\ꖌ1}eGzNN$6 .arBu*ZPAjcPd3~%K>aA@g V$ܐ0iG_d4kĤ*Q9@FIG=6^ SV}l('˱O$5y)1A\粴zzwȁDbj_˥T/:#\k%cDzQeף3-/n{I `>eu6C> '"Sß3z<[C 'i8z/fڦ,QQa pb]i҉ zEe[jS){c]ܾF$ܺ)y6|@7$/9Uig[s7 @9UeHjn]du^`hg5ڪezK%u'eNTA(cvҘΧJKؿ1Na[Le^B e zНst0yhǰD0)+Jn..?d$[|M<!q3\W7ܷ bT΍gNu;!O%G0໎$&rwq)YqV^,&oVۜ\U(P)&|rAp /(bߙ!g޿o+_lYuqc8>SE(Sn2ψN\(л!K.i!i189-"|ؽ%heFER L L Uի Gt0+Z"`^OfH0ѹB=HJY$O]q^p9~Dw(OoCѠ1]8q4i Ld5#SCE-pG);_e =F"êT^닰GTׄI\%Hއ>*"\:%veIyٖc_eTeߍm\qhې76[:.%E0 !ltHetF{唷"ɵbX(} jvLq͕C':Y{]]a endstream endobj 747 0 obj << /Length1 1556 /Length2 7695 /Length3 0 /Length 8734 /Filter /FlateDecode >> stream xڍT6L#  0twI7(! 00tHKHItKt7s[̻}0s0((P<@ ?.32T p0A& Pss Dā@?(C\r4yj0(Y#YsYD~p5 !NȌ G>Fx+=,rrٹ=@ mheC\0[ G5tpڀdn@ cǀ h|<|W 3 zAv[#Dp@P_ GW8 t duj 8#\y\!:9d% EO[#Z0ϟ-jc7g^C( #B@ PDT v=y7rV8Ü~[0 n`?V[@+Ot$ ##o}>#e:zcrytrno<p ||bQt@  vZ vci!Pho?v!֣[Kт ^|uC CnMUlqs_*9C*C<6: -# ցB~=(n> tȭv@>H>VKJPkͯp.򊑒6` @a ٞuH'?-!?$?DPdub^@3E:m , w>!+"${8JB&@x - G`kܙIDЦJkRcki>3f+dE\ro;ܪ۩7t?HY;=L5- #nu_ j9.n:y*~ \]Vǻ.51 *gεʜdBp 9$?=#SKۉ(1Y⏻/3wmb2A?%:#`]"kES&3!q6*J^t2ljNEXsjG" V\ډ;mGjBu+G{϶Gj *1k8Dy2bM9C3K'$1t#o_`,ƺxQh>EL+%Q(b P.my:>bH@ˬm AzkplISEaт0*{]o,K;{~H {白.|/:8`s[ϴP.eQ~;EϜ%un 2Iy1UY y~^.1WH4%vD.IMIk1;; `";%99<^;ӤhD% 6947ohɖ(݌BoӐwpz.F{4$ߐkg.JwiĘ[MH'lqpbl>R2@毣 "f=6*Kc`M 9$&? +tQU{ foŭr%&#4T)B$oW7pEw6]A3}bT'ϖ2aoC*c_o!0o14\CP> O:9EO uͩ-?&ctijc8m[~C`D8<}2]B8Yo- գզa{FhPԊ3PgWlg& hߌ?Jh?}h}DzJfHQ&JriP܉M&8`$KƷ ;9g-RzC B6*& 5lQV%'wк~Yъl>S!їX ~[!w]#'l,X -ݵQC$Us^o_qٞ>o:KT;2L;޹_q\xEeȝQAHhyMJԆ]zxn&1Rs&UNRH%nC9}z'fv9h)!gh_=Yn㶘+=yNZ\/'9OWkIM?cNz6LAf4(]Q>͢muBݔ6Q7Ψz~&/[QzI `H+TлZfh_PcTg^ui5ڙW䕭OMWgs0,/{1l|q)ىw/*1n1h,E=,/ih=:\Aˀ'FRI̳D)&zY*:^9>]`9nH+>K(k3Reo6& {!*2O_=X KpMlB$סh;xĮgQ 9mex~2d%;!iڮ`_%))A8I䖈/URa2Z͟af^+g$ y++R}dP hon}: u٭jOXwÕheUr)q1s~3 a ڷ(q KU,]$ i3Ys* fڢ/Pׇ,G[7O + Z*?ITS͍T-7cP{N튃9kHN-5<8Vw*g;\|zR+*׵)3u`u`]S4Am*: x{FU>]0-gpXkhwv'.Wq (qR{,D5_8_h#N*I|}Kp5&1F֮"!ӭ[ܒ0\APN>/wKQ5ƣi%OU )kd=J^M.8 ^mJbK4fqyȻڤZq I17#^j`̽,_1 8E{3ؘrN5a ݽel)@Nj£hGQKOTYsp-ZfʍS& n؏oj2JGsvO0v 7[{R%jjeC,ӄ%5]=q+ "GYBB=]7rsH7(!TM? /.2)z\,w͈CKÓ|l0jSN{ N]6g7xU$}mZْ9q=_la05 N*YتNX,>H\JhAɹğۈGgƌʀpMr*+w\!l9 ۍ Ś:L*ܹHDf!Tނ?=@&1EhlH6fܳJ/ѩ6YfSONf2ۑ>&8ORkr 桎HqdK;CSz_DPUGV}Q&ة,,dF3Z1Wޤh ĥ)a'Ľ*q'߫iykhjNhd.>7Vբ@s|V65T7_X+\=/drb&/h`q=&M콘dxv-a=b>i:W!!Bzn,l{%gcLÜl7~,yZ09{%y`}E`Qڷa;7}f?esC ZJL$^"h٨[a|2cC6.tS8w(ݫƫ(%DOB1\`#K5u Fz=þ ם >)MțV[?4s,ho}mKPe~C 9R Y>@Af%}OK76I n$~L0ͩsFUAO*/Yo,6;"{K2؏* ֗C(>4(TO<[i9Y/a(or61]v6R5K c /z )J/`o42 N: ]WdqBjk.i0Lp]xA:,\T 2X 5 >!=/M4F7r OqE\ʙ8LvSthpuJ'ɄG>}>gte";hZ@aA:;o*qRmUi LcL+JJ@|a+ǞgH/!) _k~h4SjNViℋfTA{׈*ǂL/ ״xUy02f0|J;> A;!FsjEN9x;Tv"ޮń/x`:-1&jU=쀧{䂹 ׶7O *P3 D7,7+RC ->Թ窗WӺ!wtYI^slJ??.X 5 Lga]0/n"4p ]` |~qGR |n;as֒t6L=p 0M32ujQ}[BuN "˺[ºt&yrk*jnІ;V/?sL%i վo Ar[|*K<]3Zə-225-"Nh/־jǏңMё- gy{]oo_K7Z0Xg IZ-xC^ BoV2P^}+f$=A;p}y.|Lpc.iǡK@r$g@kN>qt/jd5iwc"xD`N\K̖gMϝz}a<>%47 :`thi%vY?*՛[eN̿7Ԕ7'w-Mˁaځfhex/ߡdH'~ylJg8X-R(ͣl `fCn53*IF|C`*e^Bfp@T' Gf4;z Z Ժ/3Ϳ}7蚎v؃y1TF$3es }E΀d{R<+lMߺ Vrjέ؎.I“Op"&)A&ؕٝn`w*iWDM€)ъl3Zb1xr:7#%yԗ:7H'0a|1N;3/4cZ|v8isV}$M[mL.4bXZ?΄/M2\k[>'(̥ $ʨvjϳQlAϙf(1޷K)"#ϚƘRu씴J ^YC+Z*dBǫ] ţODƖpOkszt ԪN"#)s=< 뻷^ uo&?kudҬn|'g"# 9^wn 1y<W'x?7Lv܌i,hJ[Kʛjg|ݺ vKL|ƚA:}h3 'UY^КBNo%?wXW`)7'nP* r<(2X8~"ەQӽ(qL"L2i(g1B&øoKn|Ϻ/i+DYuհE}һ\ =4Md~݀+ Zoo&16 %'s^8^'NB2>e"NI@6>-xGeYIDhfuq_}c{h?HwΥٿfWyJ0+ɜ[WCbiԶʼjt'6{2z2ƙЩ*,ql'?_òG V<= JIN=n 6N<#::~opЍا|*Y4|xz*v MQ!7GDy,Tй MeJ' a e qZm?54wgcޠ=,$3 - },i;{G?İ!HR75UILuxN^G+Pj*'캼Ntx:_e)tn$9ZE]ωfa6I]me-]ӊC u~pGg5ы5fƥ~'ZjLTęGJn,uadՔs.m00~Aި}j,eyR uw?i'O dK4ymH>aW$vZrI7O|Yb8M Joӫ>>8۷LnPPhbv\x+ ~)|qo/|m0Yw8IvO-XR9a¯EB׾Y5!Ę ,u U| }z009!z.XeRqS ޷bUґ[&|\Vǭ躒#MtrTIEy׼4!!X0 z֣N,KA;+P5oI ᚛3OP,6y"rn7%(wm $5U%%+1t|;Awxع*jM$=xih5_x^.,7~XlU:𥹗:-IK+w2ސlޒ:O%lzy^>f58*N0M`>.5?6tygI?я 9GO />K! [pQ6'Jݝ>ӓ|CDn_t,E endstream endobj 749 0 obj << /Length1 1958 /Length2 11990 /Length3 0 /Length 13205 /Filter /FlateDecode >> stream xڍPZ-;ww %H4݂{p]K wfnfzmg-UԙMR`fv6;E5>' 5Z  4qyI)ڃrvN;;߆N 79@ g:#QwtYZ#Ό;@23M\v7@@ A'd bbbd rNn@so%;_XV .&N@d;:́N׻ e oc * ?; Xle)& MRbW~blrpqfqf;k%`gIfUd6`{wg 7sWVM0(+/W?2K t=̬XtKd|Av6q\\*!Af.S%OW1oy'@ul?9Ζ=*')=̜<fn6;/QTL@ʂ_Y=d_}-=c)ٿN,@πq~?_.;e7)W[ۿt_yuuy}E 6uU\W+bb`K,\̬嚿;~?(fv6ѽn:KWJo7ů@&wyuX;!'U/:RAV`5q"}&jd_e;?.V\8_N8X_3rXAWN^J=^~t^K?`,hc.I\$l]tA;))6(GAaPk8w5 |*K ׹ҊL}'!d-eWlEO3O04Dr+|jTY!Þ`PdtosV$fbb;_:L4W ȏ[vu7NznzL@P6$=SOpn/ k|4-U*c Xl"k)M$VQx1sM03]d![Hzh?tTt|qpn!qd VD=~[٤`dju"^ ٟKԚw3KrM .=@LF$bG\"˶7¶>#Wa&"2C]!Զ]R s`-93RN ׭u:.a㭲Q;{m[DQj9%T *-C.,\P5S/d3IҺ_q%<)!V/ߏL˧:#+cZV7ĥe?rg>fX`ۆ][(%cz;#VBmے4{?SK\@ d .rjơgrYUQl{yE15:YZ!z Hȸ^N`o*w{ɵY;} Iwӡhs:G\ } 13/vbIrb7z"1wHKLbT'S e! UKME4a~dWQ9WqfHU|k{5_wb"]8/wl:,YGX^S.s`tT!qt7mJVcHdms sJ4_8f~C ,-1FsKD+翑s7H~=n*0f.`ZbJuپ(%!'C>6-3{ٞ0ɾ'|h\D=CӍvU id$D*bip\S \\^ $na+IC,lq$~ᛅ9$*o=H.vU&X0tIYehCxI{엦=v.01ot>)l!)֌72ghؽ3ps&YXnH(*4yE5`QE'ZN.||w^㲑k;e\>֕3~w͞_Kl4lܓr ]gb3`kڸCsZ7TEn$r2g ՅYq{ܾrhAPE4V}99hh 6,3J9D14!1f3oc7 /jBgRS̲2A9C`HNΎ3 _^8tJt9}20g?}n$tS+;),U۱_D_D&h=W~vа>(Z޷xh߂x2':H)Y2tz־"n{P-aZiSxkYe'.YJ`~= AcOK{D<&4SQxG.(T~$2B{}P \ePüs{+,dOl%CV.6ΛY̶Jej3)79WNrwf7zsh3Q>]y53QW F#G;TYX0]UUow FjC! ^#69F(~HQ~ڐϪRxd?+#^sG.X7cbhH͕xԥCz8 m]N18t丗+W_X6zG's9O .9?<jZd|{q~H::鸔B&avP&2$Lg6 2|w#Fpb@=V^Rrn*<)UYDeTlk&n~)y ^{2c{'sDd6QWo)wc v {1WT4,Զ#(,!^|4`KK68Ӳ}cr/fxLx')W^q<"I1:x?OK^3Xd=-Y(mcg(C,c{ $-_iD'\~A>[ӥX.?)7fwoŔcKӀlRO\iXs<o,R9쩶Qv;{D3[KvzАM`䦵rK>ҢVEiyOZE& Vc=e(w!ȷ. 'hM䮰ʛw90} qBG{xE4??ԧ#qOUlW2uI޹&Aj;RE(#L7O-O:`AeKr\a;!` s竌oz,_Ƨz /jGzb]PKĀv?rx.}+ތ֥|1*oƼAk\7w@w2%s]{nYؐ40g!s(,P _8],`Cv i ЊԻ0_`l9v]6MhnRqp,(2p_ -lMz % `T >/Yrޕ Q M~laD%giMxMYUk<+5sJamD5ūR)N8zEpP)jxg_;VFY[(Е&pԳG"X%B^;x2i`S+8 {?*>E!y.5{/K||[w~>ӏ[ ^?M1m;haU-z*5G %+.%}6V* YoXxTP;lJ?f<&/V =7W۲io64XR$Y(eꑼc_~]l}OU(HPyլVh%R "Usq Y0"́c.{}ѩ[B#Y!}Z)P=b;X>F7$lKmitgH r+㬜@<G݉I1u`=fL0PH n E(_l06v9>L_KJ\$SLݗ3 =_Ԏ|Ԁd\KD+1`DmT;16OR!mw,M)~ݭiAX];Ԩ*Ԧ|#I.c a3ǮKYtWƝ,:ym]&~79dl0`od޷l gG#kܼs FD+QpNTvl=|3aѠiiEP}0jO1װYeB8bY3pen4I/Lu]KkrDHX ʞ7ЙYbl\pΣ &ɹXl[ ~G(ۛȓ*MFx ^A\M \@&-VM MAyLE !%0{g뀃8 ̸} vǴ- izKl{k|=kDiIB(duuC>j;Σ|{ɸ9Oc*tZ1jx#D*Mnp<%y˔tIñ XvT%X ͰAغQ0ǤEm#H2FTaNJ9-RI&|?9:>R4mv68[ +6yk&B5HA, 6~)V2"0=S5U֟<Sᗀm8$9=^pC0hR_]%=ʀ=p N꨹$꺜QK]uN{xoXzVh"uZ,QA,9OW@i~@F?E2|6"^0`G{QDtCTԵuL#6lH֥K(_(ܑ]qңЪ,ۣG(biok=}٫\ϧheD[Um/Pӡe{psJN .Ϛnt'ON!a^pS8Z {܅|HtxFW6yj=$e}EC4l[sH{2TnrWXbV\F3B_8s=76;б 77ژ@[\Wf2^MM`hq5W"HLyP8EՓGjy}/%`%TʐdDi q7xg@V&Q5Դ}bj!]ɴ/n֋ݘ\8 ?>)솷ΉYhެ8BAP[L,{tik:np=ʶartff>E㇕>.ǞCu6v_Q%_jpG:Ḇw\ :dS%ߘܺss= 1a NhMc S)Z2t'{Gu=&f˅_!,_LB%Xզ\IYΨ]{Ow %xN~@y*CV/lw#ES-@9;5L3Q/TWvCh{/a3oH1t1TP[[i6p:;דm >WioZum3 Č-7q" 42œ: 7m])FVS.5-Ȩ)1 z^WcnYՎdf A*[YF$U|ʌKN>o?$TC:nIfϷ6N`G9ԓo AzgoaB;Fε͆gUB6z##fʣ]_' XMd+\nYzF QCBxiG0m7MOJj#?y[XT7ww(mtex?3 aO8x`>rS3`'Dtb$['G$ߍ:_b5ϚR؋LҳU^x-p#F4]ErƏΛۈgqv'׵Usf fJ}ҭo[1 U8 *}$D>9?,HY*1&I5M܇ z !$$wAd^f-fp+@uۈBm ұ-7 tz wAM7}^AEWӀN SG5e{5Fd!]|M+x?(ds/pTA~[V$N/kBu^&QdRt}kPݠ"qC76W}G c>]tT/1J*{;uΩ;Y{Yi-K3=z`T:ōF9~rHvtf24Z: 0_b">LuTԜSiߺ 2-"Ұy[PZ Xs6t"Ţ}cs{imf~g@9N1z#bKiP p endstream endobj 751 0 obj << /Length1 2143 /Length2 16478 /Length3 0 /Length 17770 /Filter /FlateDecode >> stream xڌP\۶ ݵ@N,X $8'z{}ÿ1ǘTYA(` SP03123SR\mH)5. {Ŝ&2qw3{- `efCg^;uS98z9,\߳#ڌE;@ 23(Z3@@WՑÃ΅Rr]@s_t&v1Sԭ@.,\=Lw- hfotc@Ok,, v613ss4[,@@<+=/C[ww߅$ET&!_aޛ,ao.`gwu>q3^L? {s(92i؃܀2xY]\<ifWpu/GJ8:8,)@xw oF,,s+h ] 3>x,>}2x-s{[?.߄u0X9,lE*[8x)KSN?A߱'3f_,oWHK5zG>nﳯT Ϻ*AnvW+j"m"E 4WY3*5Z0[=Pׅ``af?2y4\o}iwJ {3󿶋`l~@Ͽ'hxpp<99LA&?$_ `XLR+Ib0A {"w??qxޑzi_3s}oHL܀,wſ_JNOn n`/^՟8ޑ_g~gn/N_ݿJ}P'ٻ _b} @ ?RHY}ſZ/_}`y/OKgrp{/_g/_*_fN}V=f f|um5" Zi|aRi7JP DHVi}}6JTo_,:OGȠ.|dS2ɍY ƽGձ}jN98X9Ӝy\2hW"XZKO_yo$t~ql>[.]xxDwcT>i8>_<[IPYA1Mc,{Џ{ɵeMFdJXխF1l6@ _+<3\ɵHQڼ]lyEEge䰭,q^dOmFǴ9jJ-Wtҵ~)=ʑy.EM; Ip<8E B]N[ gH4MK"T΁dj<0iP" JZuC_!Js֕c*6dDHPwB&Gn* ]Ǭ\ -_!A;#b%Fl{qįdPO`ڐe7)Z:Se!F7b`bԤl QOEEYJ~<n$# ;vO&fUb]mȐ#pďQҦmm; H3WZ6v_C-CS gì YjRdxIt 4 аtUFhCupR%,!SƫB@AVFGuEy  2NTSû1Tn%LwɌVA\CNE<ܭ\K,۸8/o]F+39^iU\S(NxMξ t2DF!1|,.o6Kv[0z".J?\T!'Qd#EmTeTXa'?(AvunBwzJ*%22e7A [ę,ܣcEcCk5\iCܬOBdq"c;6^ѭk֜{֨ʎA\6:r**Mwװˡw3sO:4{, mVZZ[OzIWjP'krh~1cki {cen]Ȏ(SnQWr]$ad▟}Xz܊g8An}*PSh9M'q%mR̕E|5|җ"Up6.(#~4atb"!GA4k-gNa0Ee^ޗ:񴴻f^_04'Vף+K,*2R I1 p!i/э}5ZlЯw-RiGz(CRJuoĽZ،qܠ[FWZV-rvϩ{ƨupt;5}cbDa6ZjZ0Vghv9Ȕ43}ֽb"jc8 gЀ fg{o wQSQJtL3U[,HFpD]ͽf{?yז&>?4QcwMhB߸ӝ/74tX@^.Ut7(3A^]P4B{<=lP䘚)y1E;(A%7*oBAmwyufֻ,q_@<Vʳus,S0a^8 6F9n~:pz#0_0UfFm.ruBdOlAl+"DŲ!8hΦ A+֐-/O1ALpœ-]Ȁ5Td-trmuo fdD^Km$V:+'gj[AR!'|qoL!Mk tTۄꁌV96E}-t<0;j-Teo/-bNK~!kCM#Op=q5({gP Kלw1rI@-UuTa]SK͏O y4^F֤~kB;!JwÌD"MR 9+x4v@yUjS2ΆG ^*/j\D *cpz%n_s؋^:KVM˳Vhv?eb  m.s]HM:AK1Ȱֹ|}UY#=l(4ˤR7uKxa԰w#>k4 g8n4l!K~Ŕ1#W]FCu0ȝ<& 4EFypMx%.#HjZC\-|d<4zJmIq_ 1HzLflzm0Tj蠕)GdA&yS; x=j8==y6Smk\XXoaBN{1\YT'xo4ke GCJQXRBq #;gCoT\L[] "{(Nv|FWwIV#neb[[xRÑ|P"@8،H>X^&2ReW2gU*^jrBxi=A;/pSt:xuWzL4s۶fL䓜~4b=c LP W)9$.Xz%'F6)L1Aa2ɷ@bYFѿZhG#9*傌OB_>d&](RV]›ySRlC}Sc.ybN:1O6j"p$Ƿ{3KBAs2;3DHܫ y:lYVLjSR#qݽe)A9kԩQ2Op R'b+LTF b%uۨY<~j%JPGם"[rHb΅ ^TFTbDvj;vps~vHP!T83 rzVy% |+I5m$2*&aGjf]~}ۍٽ̞Wvq@S&0Oo"q@؈Fx6qrd"˹r]?Q>o3HYmcW%-u| =U"t9"GuI͠nf7zHDTeأfJ'"ũs>J4+ҠZMyΎe&nXr"el~Go%/zP#Xx* -^h[Ik6REAX)}/T!=+Ĺ$bQ62<rTl8a{p#;" K 7mѻ"5 "9"~cm2W%ɯba}_4bjKl%Q㵂!y:ϒ>ή8wWP9A0J?.TVoC5HPv:AuTzDy6_dfCR+kM(qwh#?;+V#FI$4*QhK]':N$Ք] ,\cʱA3zc| tIhJJX,m#/2sl"3́>:G0yx/ߣPjf [>+&73"We?8.:MK]`؈=(!;)y64u5T' =#/F"[^ZjRFX;Fl`> a&]}L5>`UD@>99 V>:O L;z)fRveF~n3Y4' y XNdI+DH{t$Hya]"mOpVQ/eS/\m*! 1\Λ")o?>|U,zwu1Kmx]q>> up9BWt}wAxBDRx@Nn,K[L"/ C] *i ~%I:~.r#:I<1XqmntXܓpdzoNݞsaB K9kX޴p(xңg#ԀY~0SA+y|Te0QP W Iᐪub H>aa!ZQͽzGSQesbÙxYR}%YZZoLjuC= c<~Dzlۏ"t!'d;`pwIz(D=?Ipb }I/~ r0rPm?b бz ڀʉ?!inK$b0~FMJIO$-.e-STp3@] *ј5kW21P@d{}+*P5g+nsCu9ףCɍVq,:'MpRljuhb| Lh!a*DiS/ŎX&I m q0bμW57Dhs*{`;c9Z >ĉvbKM|M*9zߢu7Ѭ2}u TItK8{Am1jd2V.K:`j<' SȽlsv` QU,> q<526ҷ6/U: 9+j_|cZ5Rz7Sm*-0jrZjO/C<'L>T9!NP `tVz{(=x.MvA*I/ײe.#Vˤ%E)lSb\)6A2kIo}-riXtv9$ q[$]==ʵ!o*u&~"Oj ,  ;S|N*$Yq7D{L{<(0>♨S/Mt'O}/E}nCޡ)zk׼X*8ǧLbCPZ#f7aZ_-r;`&p @EMJiȏ%:6Qs喊:RYEKiK=ѧ~k_xՑK_ I3X!mbƱD'V: KYq?b[ ~F^a9ѹJW\B\3 <( B9=um#nY xλJq$n6;Tኇr3+b!ѐ$zĞX \^NOfX…%iz'UE]B+xYf^< n>[ {(9`t<}*^EuB%UvDda_ `[jQ1c@h WOG sRcoh]g6D*Mpܛ :Yq@I-Sn!Emtc̶N GP$M)]?QJ`θ>}[hա_чV"vX,%B!GkC56cwM𠕥D>5ɨ|ִ^ڭg3U*=:OĒ!lf8S/ Z;b5%9{;kT hLqH4t tGkl\>loDptc=M0ZT lF?O 41 A̽2.|`#[ӄKMVU;_(WU.7)eG# آc|Km=\Gm|*WpuyC.zE~UٱR =֚֗M. mu/Yoz$v4RQ63 U ¶Jƀ@9L/ ]Z8Kz~|y=<ş 5Ggpz懽L'[((dPZg]0nNs0!+zEypǽT$S<,uF;؇%)~F;̷&(I}/~/UFFHv[a$ S8qC1REjϙGHquP0'Yw=v멶7Zv`j.@NpB{Ə|h5fBb{wǻXdQTe%+=JsX UmJd0,;l>.n\`kl΃A%i\!IJN<7qċ.˾5DrV`fdI'6Cr]BU2/QiA|\lIȼ9# ކxT:C@9福Z!t> :;q3%jj)1mzt tvqToҶ& R Kۓmy;Z`q?5-ODۤݏMK/3hiemVՊ\zCzv+YGqffS;=.㤯0\(BQ '&o/`4)_M!J84R'J)mYXVK Ԍum h%%w] $bw؞guEkF: p~\.d;8+*-3p=Pۛ7X?M`:e!VEb=@;X7@JD)1+&}z#3k BMn;Ϳ v:SOOjjL[ mڸo)*%փT+ > ṧWf^5(mZh]<4423`LL\1jqI%?kdsR0(e0XPGlWbQ1!I*Grnd-TBR\Y p ́7fmyy>2 1E:HH#f}-Ñ8箻 ˇ &,Ev m lhdLä)\XunVKZkꏐB;_!uL *gKJ}aJk9FՊNJem=$6YukrDRU BFto9s?^rVew`~e0g ,'`<:xb995[8dG.|y5:_IyFVPI3i?5C@5';i6$4sc2MvqB繘qD:J6z\_3Σ=9[qBO-dϣRT! K*WSe5D/JQh`&y~͚ZEiRk@M`=O,=Dt\Q19Ù% 2L˦m-wƒl0.˜Z~@B_I)c9]Y #(Dܩڊ]l n΅x3Zس tʵ~~ޏHnH*bߑ> 4}כ^&`]\PwvijܮJ14ԈSgvn ,&X

T=+٫݃곺/t¤+>7- 2'L~׻Ԍ4D Tv>Ѕ]`Vb)~NpBfqkt!Tzp[g)ތ4K)>n38:?|0ܑqvQM#TT. qUX:U<`]!A#I/nF?ڧOŤ/-zw?9T]WP (GtyN5u\VS!ẑ^e9F۱(vd4;`}ɦrH#9!\R ӄ?ɮF1sS+lvNʐD ܋̯!%<k.Oc5ΌP;6KQF*!7oOuabC5rulJRg Ͱ8x.A/}yA}&D롥x&fhfxG-TM%K = /]݃(Q[eO( *J+bY✛fOkϯ~lMvmJeaC /ch\ԣKajx…tΎhDRVӻVP6SBoP m::-#PJYsݗjZ ha2ۨjż50!Jl'I7r(CO쎬ĔIUʗbZ k">4`c&y4‚ 5!\L9WZODss\B f%_=Ѿ@2iF'fBJ(q< 9nCU5EO&(nqT.w6D)Їrbon9W~qMOG$eE+ԚY?>:e]m^NŤ[~\z関LiG֑Kzlo?E1Sr2&\}"tzh;.]$w9A|$SE#%ۻ `zpgX AMV r>"ڴZ} qOCβLpo\*ڞ[by6J~efH=-ݐD`PytZHFS3p#t9CڠjVn(.VVM>"tI߸1=7\E766D*7W *h˞^|&&!3,m #چPڥ63FV5`fuC>#ɪ*/:|?ZćBKA{ޜ+bK,+uf LТ2. |2G7vflx| 2}\ۑU4(A j1sIo/bK}F | 5.ag EFYϕh) Œ-9ߙe=խ8BԙRN9 +̕^eȯ{l6>8yO cR{r~/:hR<*CSa׏⹬RY4Y,qG%Qߚu<yRE2T6Bp^[TG~8RsXʻ щp̥Js4zZX:g$b(e *,,^΋YN@L:RQ{nyWH& u`$ NEڮQb^P+ 뉳XvaoJ 2496D ZsyvIxrQ٧-ǭ0' 2D\ <*8GT^^)+9>š 7 A\bs_gΠC]2- ?%J>:L|z39ڢټ^E49ڜ;=ZDv2=4ibAFMv$"HTGJHbe9DW hF9y4L~ɫ 'zeBRptQ:Bz+-H#<ίHWl:}+W1!x[8RnG !;/ɣnڗº?Qlhh("՜7lB|"sݻ"1 ^/e55r M7{Z;dިY&0~!6^HP@<\Z~@CX{ڒ)g+Gܻ-Tj.`Cc=O#9ۨAa} tE q9re39QWOE Œ$Rd g߂l"ֶ1:Ovc_~tK0r|4Uʷϕ($gh,^IivSC7D6l=m?QPC5/,k<Nj|n=Ss8V>zpܮTqD:"Z% &6\jrh })UzQ: N'a #~A/+cŧm} Or}gs|L2& VGԣ7scb͒acRҳFk;n1Q%"γC= `D(w_DNKE#iRxVaӭf}y/4ȫ2hQs.?RjǜMAcJH7d#TE-]&B(W⢣b1ůr:7aè&_Ѯ6/ BA fE&5걣Ǩ]$k?!fxhJz_j=BJ-6Zй|}5B,ˍ *Ⱥ?]x`j7Ln+HtN&h?X p}7'_!UsQLզ`X;9?dxG\r)>F-@ԽVĠ)R KmYT6v9Tw28ւa4(W6u_+jlm l5r ~?2_:7iMDlm䐡&[#{$Ed%Y ;mKJauNdf廍q5lW+:"_!LEfW΂ƞDێtSɠGlED6Axzz}ψAЎb FQw}6[۲H5䷤!շڙ Os@=20m(hI :8~ ISwU;J'8\W!(<~痀n@ug j[;ԏrf|nS7Hn]jr~ݿZ(#F,0 ,%6O Dt*#Tҵ12a/cQvG]♔O}Y%J J}aݧa}8GC6 4qT5DILc0|{!==z1)D>.2*ߠ42FzX#©!i)$r0Ų A:^^L:"0;caĦIN>{~3:l .H68vCV nL:sA9l}YFMA4E8lA3&G=c4F!ތuTmuڤ[eGU -u=[g𵬆0mzBKS/D$'t3Fs5\.'B) &6"ijZAӛg\RʘYʸ!=,Tmru 5f BRn:{gbZ\fTpfGܶ&G]m\b]U3_DVm|߱UY Umi亷Y(ONfPHL5ҟ48a]#|*':>V T#D 9tTěPLX-WyqHn&+a.1ujciI=]6I{G^ dMX9ش`L[kRٍ3|RkEn݊78Ve@^ P:dzR!E2>OZ3x0lÑm#fj0m 4x?9K ȟZ'ǺtP!Aa4V"g[1MCS繸zo +GM>q'^ =T_lB a]Nev11TGq!~gfg0㥛)C&ZB4|@,Jx ,| Q\# `x>os~P%*'RXBkϜEnѫDSBe2RmhdfPyxyo%Jt<髾t1t#I̺V~!H`i1EQkGvR0o>ٚsJQ9o`n;:N_f֔6,b"epHe%RS$H~w6#(Ͼrk9K]LEc]¥ b'{O8((8=ՂҒ&60,}%  鿍p,OuƋeСhhw,- GZj[o!NɃ& |v(ř'9ff'Y6 .ܔuEYT2D9)ly_/׫6f]3kC13͌K Rko {AP=[:*((^Ofr?#|23v Axc XwNHfxIj xNcv(*ɇiΨa*f x#wsu(c6]gP"y$Oɠ [hq3'dα-ԙ63iSm(uXl' %%vjs0sJ]DAYT++`-.e0 |/Efc:eL HR^F6oK&g5֑qH˜><%>(nӊ4"3uL2E5Cx]#7_@>? ][X?СƪS{ @ c*&j?Qco/.4IeN%*hlx-:1t/m;حݕVr{ĉT Je$YUJ=@$d2@!j7"F39읁ݙ:_ݫ }a} [(a4/y]Ͼr~*5|p2yۺԊSVmL~VllBb龙~.Gc5E8[5/҆1B'ա3/a98q+$qYyh U&efRSTg%&c<4lDԹQew]dPI̘FPf":+G1%A E\ 侘ftވCXp2[<2ԗ5sMB=L0Ѥ<nJfY竁BVVneL[.xȴzX0JQB CC5R4Oy>̭<%jaoR@ȱ0i꼐6`-ƛB,A5sk\YaAdzO_ӿaK .)-džp endstream endobj 753 0 obj << /Length1 1722 /Length2 10910 /Length3 0 /Length 12013 /Filter /FlateDecode >> stream xڍP w'@ [pwȽUWS5[ݫwk " HŽ ((EOcc'!Q23O5Cknҵ{'J[Z$zzV::6!]4-@d O"hilcbdl^?r} =;;+~s@Zd^Q_ hos\vvV@#-58  5-< @/ n03Yؾ[lRY+_dԀ7@OKtG`]}}Ks+] g # +"EkdG е0kfkkbNs~y{gocbegKkkbG?Ҽo9 ؀ZX:ZXцP$.7d`ceg '}c@:0jei0|onbzwulAv/@F&d7 ocF>~?^}z0K 31P^J_LtҰ0h #k:+nah `k%;= d,' g5?!,W$bof/_o۽@]  K {}WDӰ2e61qș54ٕH @COG|7}Dl'O]B[VB10tmltߏ1\Eirs@Z K{CK?Ε'bEA?1ElL#bUߑ?轺{?A3V7?wɿ{9zw.O?Y@14}6l#k mlo?~^ H~iRG_}%?#8RU 0þ 5cn"^/c·S5L:S P̥V(CmjI4c[){=Ɏ F #FA ,jݹCQ.2sZDPl66FDp6r*vW|㧱"}# [`&xGGHV7iG kRW k[7Qv?HJQޣG)lb`{Zeğm"Н-{W&/ lx*_7ljghjVg;b?}K!N4rY WfZ~ gr=!|7A.oݚ"ƠՕu}%+z;Tײ>Jk^pӲ7 >gMԴ KN(\c(+HR#R3/ GE2 `=ǰJ5=A0ӄ}5 J&n؞ 3`-/}< NًF-'eDX^EeIˡG2F@?<xj)ukvzMu|Y;J֛^H  e烓 ٤/[Jd`-g;Ef{Rfpkd*ήPhu:G'Eo61yFLwnz`_,ʿl ,U@|Mu0{ ՞':EeWYF@vuuCh(7ԥDkFV- s㋈%ViDӸ2{[`q4O$ (YG3RG6mgT-gj>]z.⒳(bBꬣ,]A=%20Ь&UZ:Ì[4ZTώD.߇j"Xd&p!-Dn }c֜T3~PVIeW-#R<9}z5 f )XIS)ͧ)FI$ nU;}K8ʨVmc,: 5)i3&ǥ4r3ߴLG& V <0K u~{85X#/U=-9 :E]R\rA/OM;>u ʍR*@@PƨyBoz6᧜|Kik;NAY%7 X-\aǚS{+S6FG##$t&ؒuͬb'-r5ZMD)#ͥm<.P&p_) " Ew­jō hR1Z˧K5mH\MtL8`VN9ִAއB ˥̔㔪fKYhLߥ4逽 CBkSqK{k>juvIndOܵԠK*lZvxK/&NDC3xÍ'ZxY`? @ٷ*wWWFO$l .yqC6 =d82VQf<~-%Ϭg%+@%KەE1J13k#es|2E[~4qI%ُύ=.wj#)dަ$q.3L d1?=^ Ks>ј~pbxf~!ӓیY?ՋD BE/ cS#=w}UW*3) z,{qVBNw kz6ѽK 1 xYV\!V+Xچ ӧ)z~8~X xUk<r)`uGv>)q o+LDu d)^`g*b ^pvr[Dx^̥ +]ğ,^bI#J#Z;cػÜ>;H ^PV#=1'_^ SRKD,}>q [̻5osw Og{co]y"h0k@W{N\l7JEmN`g=؂Yq&(6vUFd]tl'SJXCedcmQlf<!? _r՝>|pcoðGf¬O>Q}T^?9}JgxIijT4vƆퟮ[>xኾd5vxs+(LťZ=>p5SQa {[ro0uU%a-ȕF ~8W"nG%\6!xW9 lYx|9bps[Ŏai=J+T[:SVU&&Er6#?DMeU(gQ}b=/䩅D,mcoS=M\}~ HgǢ7J$qaL,R:g)* U":]]528.7=G Ƅ\apr{jM?+dueׁ7KФ=j]CJQFm:枾 [ XH>(kR .[Tr_/O'0b\F׉lN$ BySiLq@['%1XVz$ڔ`;UrvѓXpFI`FN z角X鄮_L1mcLiDb9lNEdB@GWœI`jXΐ!NTDcO6^5=ں;'ᖧE/ݥkLH5l7ozkx.79pS~-fe6Fnm^ ėh--1TjX"U+r}dvN R5cUIͮ3fW)mdhVFt VJݎON2"/B0gVvf`j3ugKA4CX\y\%O XAuHw߭#jrL[ tOuJ-qcg\TwPj H`U",Fj*~MsiyNݽJfzz<2b ~4,n{Z]!tz|RDH/4 1s o2/̦&Sݑhs>S][YVxq}KF#E](#Klΰvo&7se5ԔgGBu]tl64ȱK]%]:&;\岛}-{m\rсtoc<9L`hv퇟!꣍4.9.JS;f8ٯ-)P:׊9 l5휸XD?| LBÙ܂ک`eijK-lHafcta'gl}i4(&Mc"c* MDZDt?yr{OB= o]nzF&y83vЕC?ޗ2e4 OY6ucA}s#j֛Q|6BN"9n5+|vb+-iNmdllvW}rƎOj˘֟xy[HFCg,is[b?MG~| } ٻA0b'c2K}z-hr+SW\0CUlJeTm> q@6/ڱGBBۅ:i'h>n<0ĺ\q `OR@2*Twɼ$LpzdX6ɇdhODa?]Q-#^QL^7=UZrF/ 0A5O;?.&Hg<^*:*mIuVlʗ<='q|ۧfym2 q}k^wμ+f I6n7$Kupn1iv,sZXzZ `7Ѯ Pn@T-rٕ7qv,'n9$GŰu7lacS U f6I|ˑrȚ57 {&XZЋ{, rc:3zFZ:VY36ޠNLJAq̤ͯSAh"OD7'"2rtZT8~$4YfݥbVO6ż}aĸML"WOΧ6VNb1( AP[{o&=CB*śHtbƚַޥ?֡owGb+M:Ȱ`u}CoTP.)dpu5m-_ b!=,ߚO\{ɘX'ilmp7ӒP4[f(XhI%\}h  |sU2a>V_s!XA ~'5DΈd  ^ ;㊽+Rk1gyY,{n:ppsLjqA~9!ǎRg+hcjhǏLR`El'QͩENk-A :_:Fn> +vXUΏz%GR%_2} Mlx9@ |쌃Xy0d=o1 6XC3iaC:T`Pgƃ[I͗{Y޼$tO4coeđ[},bڰ$?#'4IӖ9tnEEWJgkNe{- ϳvtqeSO&9ӛLh`S?Z1B; uE J44KֶJ:K=y]Ikӡ-,]Yۇ|%^]_:o3k@jO+ k2-,/(< 0&ќX˞y:]ҵVs+!Brm51@LU5[ZXK_*:M +`@704i:C+[Y{e~E;Ekm'F0X{'oFuSIAoؑjAu/2*d '!Tʅi>Weju٢H2OPj'Eb2Z<p2n []FF-W9 ht&{78!BNMt|qi۳T~_=1$釐IBIash~9?h!1/#r$8+$#@96r$o>{:I;=;(AthvUkuƱ6L䛐"cefWtDH.?t,.>g{؈9_e#J:]B"ֱ9,LM˹*zp5.- `(?뤌}`!-3QY u|_?Cb{eCt  "X[:Na\vd40A?z:ef]*⨍M NOylseWCG>(I^<Ϟ)6FXٵ}d PU& o#JY1`񪥩%Ojv(Qȉ4Bhďǝ9ko#$;ŞjCk)jǀV&[ZNO~?Ik~Gq x!DMX>Rib75g0{17w\7-O>\,` =K%X Eƹ=_3U EּzO}>u=4j"2]<YN.ajYMg 1|L5r`Ե>L9UQ<p}8WȗxXs4ᄗ $]cԂzCIuړ4C$,-E6SNl>F} 3Twުm~@c@ e~ī|b4L-%|+^vH@iQ;0 hz0?X\[ZP@>k ~J#w5r6r|ToZ_5mjL1@zȮΐU†& G;')ݠO0QY ˶*܌x m F@@P cȀ}y-^3cB֭K{J/iۜd*Wf>Ʃ>F v;1_v_Tuިf&&{oRGe^ K-"4 f@ܞ>!fxI䞉> e'vM^'**Dصnuqξp\IS?WJQ;Cb947q芏X2kCW[r&yB\]ᘥ31TT+qD,AuʩqzILS=Bj< ֪L:2>4 1ʤef5'Ln %y/P_6d$PÈoǗ7aH59]O28Oo3S|Kx*k!B25tߒ&WsI\)H0=X%pyߎQr6ihRW"['Zv[H\]QP( !j/SO^DbtuOT~y*>PYQ66\ `oRw/_tC}uA,:5JpA-PUb#޸Ho(oK`:bClBmH6q0>0.ǧJg1 %lMbVg IE𲬶(&g=8Rk|0w?g̛,J=dSGilj}SƌΤ[ &\"Q c ::k3 NU 9IPA~EV|xpn"+*]r|uck ;!_`? ȵʐV2Re@K}1 :ʲK%ӱx'Ө~ݰ-4o@潷.oh 5g A.DcHp-X*.LlAP4*oVc%:kl%eᬲ+g C,_Y\Հ(,J9 %2 ԶB0?.k/o ie#EsAT <*j%jG n_jF/wb' xYhQaE9k]e>ؗaՍ/PΒGt@im m< &. &Pwsu :&±I1D.͐m/ @T!H'oo~h2d(H wWb2̰! M(/h"z\ٷ,@4MGyףƦgPOK6nzAO,+XK[=`Va,+! @<@TtѐdW {E8OacȩjY(YIܢ+I$QS( |݄+kɮL(X;N󨲨0vY@ mSp+ke2nr↷D)"JE OFZi`[e;Et^#DiVwas- k #!inibү BRGe.%O=NITgS&kIԨDܑ_U!KkU \ĥ2:4^Q;0L~ 5ED}2b;u4kGwd' 1/TMs5NjЈ\;n9Q6보/LZ71-aTb8m#t#g6K({r%AY$;ft 'ݪ1B6W':|WQ*8]u2y`q!g "ȣ(('l$EɳWʇWLJ&3=GRAԓߑ!BTJ6,?ҺP5s%@Q)qXl谰#xT.&m]0ۺkӓUؤ\0HDyHs [kz F1h"0m-_r9;*B'74{K¨":\Pɚ1|3=@mgՂab9AAe20" Ls oO8^k𶇟3SRI]ջ!Eu?K'Y?^^4PT\L#ϔ/|&D9aiC|S+fzGu`YT#Oɱ< 토:%o!,eW%A?@لz+ LRD,Ha%#9(exLX!^JYبX9 g0,k ęd $ Q`xoL  ـCh`WYA^~IK IPW[L%jr$,_ݨAcْoF0Vy}V!M K7Ü"5m0z?_!^l8}`0BQQmY XS#g "3'w'4kFҗ OOSx@Km~?|έtC|O.f&nXZiFWzN{ei@/" !;ɓbGGaeWoxڕNZw ;IxCfL!Qܿ_ x ʮFeл[K\."65fYv~b@1-ʰm~WhU=V}V)Cuϲ⵹%(^!嫎SQ+d9Z۹⧲}NK珕t^8 Y fܸ(p5v itfb.[U:V+],/MTEm$7δ]&v;˻i)PfBsA.Q֏n^PWH5 R_#Lr)u]V4*sf[ ܢUT:P!o&؇OvpnɮgFA |P}ÿIᇆVg?3q:2et=zu|8\n Ww]lkz) Pl\B%G R'kx`n E|#:X9KqH̠=~f|slM ѣ"_J$1Qk}pTqz0y8 Y<%*通wݻOڭ#tiirf g(B {srX(bI ,:Xهi԰ endstream endobj 755 0 obj << /Length1 2616 /Length2 16759 /Length3 0 /Length 18278 /Filter /FlateDecode >> stream xڌveT\۲.ܥqw.{pww Aw ,A^ɹxUs-JRe5FsGS#+ @LAM^]†@InfAZ;:a#4qM@ Yw;+;?.|qks@@)bmi?4fV^^n"@k3hf t4ܜ===L]],inVU+hl=(V֮-z֖lúE<4I8 qm8xĕ'  # $=myE{`1!ϙZ+!]^ 'fyK_P ME8b=~;vV! )W:^]I.@fqW&$2Q.t/ȜRnW:U|S}l?z{?3Re({XL pVpH-\b뺓S2pY [Fʴ >+]u6ĵ MsY>g=?iL~U=vD EتV̌tHg9]ҡ$WQ+R hf7 " x^ ,Mı2fO7kW>Zٗyݫڣz9V*m@bv:Oɨ"^/HQPY..;uުE&i2j腎7V0ͷ^OPC3xgX Y<Ϙȱ.6$ /fkQژpa1OHk3[IzBHۨ 7ZQyQQPƔgD{N`&b01~ʊy"LiWy'9. )qn_5/z~G؛ToiݜT/cp(jY`aq W|uD"7f6<pÝ̒߉?Y#e060Yj(OWZJ/LnFRN.uQbM m>1cͶW=AEE,A=Q׊/!+}Cw7z L4kGڷ9.&Х$J&h-f"ѹP澨,1XֿfAU1suZL7,j>Ee7:1K-EEۭbOg7a~NXWL"p}I(.zS [+&0n[ZYx.IdM.2֋p!\)`,NRUnC$e\N8hiUTyLV{l..8ShMYDQ$KPݢ$n$Q;bDBHC[ ]GT^3)yVdx0Pqb4N_{oֵx_[w36?eUxeR0sM<1/aBA&\0>NxRFԈa"P|tY#\|MSlI"Re%+qܪ tr(ΗˋxWۡh!1 ]z{P;~LaE4-2Oo:#VA5*̤!eq4;jUES>L&c)V;1F5OR7$.a/2k5Tֆ$J**7颢YH?\jGLҖDO9ܨ2zph7vAI{7{=)p6g!?l'J|},IoZͨpϕ::ah[ө 'FB2rBB\|(M`_4 6s|Y:g3ׯ'j^˨L83IDB!;6riX)j ǣ{6 rӉg-e[vd1E IlNu Cesbn'JFWa 'vLh# &gw $҈f9~N2rSu~R'MU9u4%S}{ 㨦Myf7Nqz4:3Cۧz`x?wh.dP5iD[ vgT!-kQ8nF+ݔY>}D,q;1_E76ti˵vϜOn,n&]Ton^ Y궴@=[߱)êD0gkCBd%R.B?=gtWSݏP*&S,[ rcNWbr +.BpQ pQ`f[!n 쾻D3h.*e~ìcR=E?N#ٵh, IV+롕:5-ԒUǿܸu8rq``޻/C6!ki9cj?_k_6_mi> l{yrBVU7_TcJs`6?Hb~ڽ.luAo9t9)y_H옰}q0TQ/'hfCNQ蓥8jz GVq &$L>Y;S֔wJ"H<HD"E^w:Q2Re ?˫ȣ _+YCc`ƵyQvxG_s~UEL 1R(ɒA= "p[9MQM(Gt4W̅@ ,mVapH Q#֣ W)O: '\ې}ht%JvwП_1 ^M+܈:c4}OƖ<=$L? V}4mx#߹>t߰&EQ[:wx묤N *grz"VZYJ@w},R+_c\yb^9aǵ/"V8:r`|·6M:Uh'V3HJlbZWSdfV| VXH#<"IWN%Ȝzѽ])C1(θbrhVS!$vɈE f?J&Ⱔ jL\,j\j>8޴po.>,67kٟDsCz1 ?5m{I-UcףqC;&7{vOW@B/=5Ʉ;VzeSXl\[_S嗯!t̞b~0곙^/Uxfr '2>~Zοmוt+ú9+ſϤufT} ,8J4C&EZj:-Vu,@Q7qpL`&}nEUAoX>{jJ m4IYaj|z0x#mDܲ#Mvd&O ~>5m2) ~}2AY&(4=KćNNB孥yI:D>7koڸtnG>1u ==\ܣЭ{Vvr qS[3J/ @ jm~>Q&^Q!96|fdpi%4Q|un{AIjR Kju*ݜ%n>rL{N ^+wS1hN;{E3hL.T AP`+g,)Ѡ凧6ՊKF ]ZXm,3r_H5>R޵U8hڼAO_15lc})sAe+rmfZ">z8cL9++B-,m@'A/V WWB/t"A=?^n:˜T,q+A uIgKeGJ~LyE [9oE;Iޱm*`l4嗦m-՞;agr%'siW6p7<5FsEƞi_+b~+J=^FAkY"鶂2!mf<#(%=X|(mh@KQQI <} g1fqSz<eN0-x]|T!y|_ˆ~ΏѬ%CђLzO+AGV<(p 2ۑ% OZM qgo?vf}"9W cx)d y y_9Kb -mW8QYj |T օP>`xG(=,9!~ƙM#*{ S8{BnƌK//%ɅڊɄ MGDRO~;[EՄ?|޴?ut/DT7Κ3q%*ȧ7<6qk$4%^5*FT0Βҕ 7kWcA;[kRr}NɃNL!9eXN@& Q?wtO'܁W0qGkIЧWG^,m 7'.2ɹn덪;ф¥yU| Aؘ 8|т2Uԝmf ]&OCޱ6tZmL21/0f"-uydJGÚ^RcKȁE8yhSFAXO2RZ )z-4Ze/o8"K3y8Zԑabkl4d+->*јY?fXN|5yIHDHOwSQ=v" hŻH2b@c*-I:Sk L98BI1{4֖8ϳ2"jEpj@p0r762RSum\eރ +Pk:BܶK?)sG@|}Θ,߽urmB]WP da,&Tg!\ZT!UWqADtp`y/"d e +ƠH(QOFC^1*d(>O{ZeelI 2PWh}WjN|I'Gcё EI %.3CϪ%jT˱]A~8˦ìr\\.N7r0zt'UP0!@y1o>Z3n:Mim(&}ꑰ}"m"}趋:,b=cSk}'fKR;Gs7t jV;IEo~x%#WYMx>>0" }DbTZ&Tcm *ROPPMڕ Ϳ}ɦ$'zI%IEd"~3':n!@xm EQ*h srUXr>8&xSB_y >S /{L 4pQ ۅܻdnKпȷ<] PfR|#v'x͋/32ҷIrc`TqCeWhM{z!:4Lc"N6I= xaT4l4zů a@oBOfr]a@85]J}]%\G.M}?9!O~J_k崦/^\i0:R.]#o"S ш7Zfl2FR˟ߧi|D3A CKLhŷ~y[; hn(ZkҕtDCa!8"]r sfz .W?М{uY}'ӕ)'ڡ#-j\|y]5·FMdػ8m~`k>GUWM55a{\^v/8(ud++'B5qֵn>`V +Z W hNr$j ;iÿmc3Va{`Wm̢_ 0C⇐P.AdzQ|WPQoDF,ZEH)puY_oǪ?I)k .\cm ?5ܤl,bXI ,ZeH sh'MyhaWpwGr攂XF2"u!Uy> hvӏ9ܦ ^ht $W>lP^|HN3.=b," ٫ʜ]q΅L ѽ<'kx^ŲCvH%&qF\('.='Al=tau4hSW t,~Cff@vFFKb}qjfgos4h_-n P 8|<$f(첽[p3͑oU 4Q6oq+ WRAea"gEr JdP˘O_v. hK0B7>y#g(_H}Eb,T|F^gfUQZU xfu!ƌSиM*cP!}"DM:բvZ3 <#>I\iB&%')bu\P?,6.wEM/?L4qG"+??8LSσ%Md 닣 M}B*6aDo2G?!aœhs8y>Ze9K~*j̖gx)G*t/ɫp/ZF3&brhs:_nZT1:T! MpwpSxsp}r+(lffY5VAsDt-o ɠc1pw!.l^ɏ W\P?2c5Jnq fx''WF utHXMr?;op9DQJ#TR۾Ky M^f@r&5Z5Rw*s}(̒_I6 ş_|ۋҕ7hO4YG) 1uM)Ӈ=Qii=Pyi<8U7A$eɀB.!րwd1Bۊ .$%(^Eܕ'QP5 ױK/Ja߈6r#jz=Fv*ծ[P*mwUT bmN[Ԭfu8)軤h lm) VDHom"_η\ (GB*>Hܨ/tnk$ [ie_NfK?&iAa2zz5LV(vYp}l-af8; =a.qc͆WP5؈ WݏoW }*wC$vAM,oU J%@Y_L>F?B MXV>~ÉIyif6ua;=j_'R[])"JxL) ] VwE )y2kw%/`7=BR@#qr:żl+b nR٪}{MkxsūKa$_1~ L:r\Gjji`NÑ8rW}$ -3$m%pSmPpuӉ)e w=<7FK"d&AE Q룐QizB73@ҏ횊Mj[FW- ǿhk|S˅g".gfQa(~-ˤ'^_•@:͵MqN>z 9 +(. ប_kXluƣ0"樞SU[raLrj8Id͒=G٪0VEV ˃6#|ROekG${C4tB̩Y:s$\D^ȍo*{S KasH{WU4!^?V1\ńe]Γhdtx}3'XUf  (vgoS=E Sb|}!(lvgF_)/l&4܇u?g.[#d[YaI*J\0e+I#`w=;VWh),fLlb\iD/ob2PYXnS530H F$,UNYGˬuȉ X5Idv>_ߤ ֚30Cr&2S!s[۔'0e7SkDz{/|{KZ^ه-;TG"gxb(j-g\~Uȧu*,S$yiUSd-)i!.[c~1tw?!֬y=]i@;5z3q֊:ʿv&|=#{/(!"UU {z62 OI0|=M.?u-QaYVT '[nagI?J kNf(A{WaТ]0ɫ됸m}NTgΏWN&bFY9CDxNjZtrJuшS^_"OʻMcV]X7}s1Ο2=L<|R`»7\1Ruw,1 ehoG+K3dTRiI)R6Z]R*WRBp3+K8jnY1YT׋Bb_ 􌌁+pi\0*Pm NtAv&\w4M4sw'FC+9J"[Gj JxY9o5vnV'_]{\" SMx%ZuobH6+šxx2 JE"٪t5h1QGb.s"mAy|7ңڐ(o;\ʃIcAq0EߝIgU*FMJo~^ AУ/: RBv) TD}WO?t }/PŜu G_K&>.ެ_TMkgkh/ gfEv5zvK86x\N4[EUͲR!˖yXMW#4n9srK;]P0\7u סߑ}>d0*kj8}ły1Ɣ?L5 P=U "//F M^tj<6,f%1NI$hdCV9k8ҟx=YJn>!{-3Rc:h50*5ܪ NyŌ hZ*UIk,p<^Z6{۟]r+4$,e`=Zd]d!q1= dppU1˛/}T&tGP:]c(M@t, TqLw| ٶ-=̞)|.]ƓD L1-w(RQMuk]V+6o.sFCт ̬GokK=ƷFG[,]NOZ1#qǒ ,%MJ7aȕ)گ%,zZX0ORN?No9 9lCQH+rb-yD!ǤVJO^XL[a-flE^T0c=Miܙ^/ ׳o4ZZfd&J'pМq9لk7m37HOq!xࢯ/p~Rt*t`?G& Ez`S֘m庐z :Y}c#=@6Es1A&fAE6]ҷ{8ͨ<"r?ĿZ"D<<6k> ք5/ˌu{%U#DN0]>((D`╯%GXVLm'RP ]6$HlԍX?|uZ{S\ _Z({?w]Jvt1Kl{-cɃWnbFCez%ʂ*N v׆T7 6ﵦTW/qf$G]-bt *Ga@6q(O_7 w0L.GN*m3 gaC3baӗe $Cܝ]K& V䨤GS ~Z9C@.\5<`d*R8iBi2+m}:`'RP2Ӌ]jzEO{d8*'(۬TpѴfY>iȳ؃[5J&Dʬ^B,*#CyJ JF}(Do?)RҠQ Oj*vJ*{2Efs܅S֡HAB-mAtm !V["Z?mϐ?9P˟2- Om)q~95I;8'CkYk/qO<ǐZqЌޏǞ XkbZv* tKx_%vt dkBQ+aƯeP=gw86}^ 9My9稂Z:Y;w %LkY&l?*7q~"SbxI"Y#Mp`ER,YAZ쪌]ґ#kә,p xܫqىrm`AsNJ8m|} 5= 8OJy]3%H=yh۴C sťkWj=mi;8ɩ.>:QiEUػ۱ɰ)ƚgŨi$: {oc9z&9ʡBNAL \m&i┷ûİjthVGROJaQpJk-} i]T"Ժ*hy(04K!4奈q}c0i6X腤 Iֽ{^yHjM* 11X׻.~&u@K;Ul m1CLՍ',{]rSVI<*V#~.JZe#*Su0/уXF]2 ٣dG=yǷ6}=|,C"-gmsY75lGX0W-ri=|#Wn;Oo̥C^u OuǘۢT1~0"dyy0Li1fц;&b* {j3Alb3a.kӍm~g)4K{aӈU߄0b;Ѩ_F.*_cs=wU7mI>+.\gVy R=R$N;NR/<E, B*ŚN콧4B4*FG H5w\nŝ}Vmj89Ke ;?o|젶ª<VfZW[w;edALzZt&$K80~ Ժ̙o˞ΐ|rdlkrAWc<PAmzb۠bF%́o~i/L'tsab(bE/2WPB@̝~ͱBNχX6q +6<'w[?,37/b>޺)nȠfh`~>t(13*rs[4 2usV qKL}gp\*ǃFe^\;knhC;ļL5?'j ١6ID="Df*bBW yg Qctq|7 endstream endobj 757 0 obj << /Length1 1470 /Length2 6712 /Length3 0 /Length 7696 /Filter /FlateDecode >> stream xڍw4]׶hщ[މe> c1Z!zB(At!%z]<}_֬5s_{_{ϹY3vP5$/$`!@04!PFA!k }MF" /8 $K IH@0$T 0@[@"Hw?}_=7 $%%;Pt` A;CݮW0E+3.-(#q@0zBQPWˀ gk쀡3"?Y]+C}ĂF*&JFSI b @JJ FUG8"?ަ*S\7\:kBtn _ ;'_YWwEj^po?qd\ }=Q@7jC`^nUGCA{#aj0_ m^48 Cz~-_w?geDJrs (>&cNG?Vi5cfG4$'GYy ş(5ў-XħCVOw~fgxymZ9}OR&fi{bp(+&tb}$ EͪK"iQ+s8'0Xzg(i55f .=|w:Sb&\gȤ +h{HSquRPɺS%I-c;\O&"2mPE}ʱVuIt-2Cp8}<5棚a˜{fi|$!al ᠯZC?ceEI:ZrK w<{T[B0w-6Қhg4d$Fʛ13e[u+v+)*W3ȋIk;YwuUGsw/74Ŵ7t$V;թ1%@6ɡjJ_ dԚERw"f^bjS <S.<zYl"S#%~SQy ='-r8g˵cELu2둩b27nZW{;/Bse4=U+t}I @[;MG+, eŸl[hzeq}9K :Pe2lOפ!hۤ"hrZ+i_QpQd:(Bq::8=~X:ǽc5y.- ًX<}$O jPsҌ9נcl^œHf3R{v*}Sӭspx',8u a"C[sebӠJܘIJH_X2J\532hxf0UWؠuҎxE(DQ)q޶-Z 4lO uzDUQ4Ib-īL{΄m:d7%Ev[iaZ/9h܂4ۊum7-":TI*?|[s~@7J"7Ss_^?gݻuuO\)ANt|6y}tF _ұiX='%Z)"f`ɿTO}ߛ!HT8I=*<]}i73f4PWKYZ7#v'&ͦQCKsտsmڝSsbv)[ ܢP#( ?:*^rb1[G~siHӚPhkXD&Z٨0'ەҾbk/Ȳ~BQSo<\Tel+$(3T88r1jSRV &]aL7x ub7|-M}V>mRGMU[Ai*LBӇzci`ޔEP\[3U4_ݥ2 &8aϳ'0sHl_e .uc˩sXp5!ؐPhOJCO%$nPU ~L2Z97-~cAZ7u#稏5cΖ\]KgY' lE֒ȂP<\Cc\uҊ Hdvim=S 9WTswUTDav?@5rL)bL{.aJt`p!bfj>$dUHwHB`*n`amm=%6%,1olbАt}Q6uN0˽ If}exfe Ĥā Ȥ&\"z y+U0qL{ZRF#MtXyVOB;1`;7:gke;Ymg9} &d@]z7ьՕ2B*ZVIW"w, @b|[~9 }xr1dƤ1OG!}T}AcX he%HLFoqNAֻq ^O=hQ/>k u;O:X %gBmƻOO%n۟ crƓb X16t7yhȷH4M> -84sјTM-+Z)nHe h9դē&GrJ?ޒg즰mҿ!ߛ ߉QbM7XB14cםHo<~r8Ja"4<"Ҥ3j./Hgr6{?-?>{?5)őUfWOX-y}\,ՄUtI .nZ͍=u7r~w7b=t/XZQQstƕzګrbL!xC>ޭg DbtsF&N2;/-f:F7R˲ũ Cd\1fG}eե'(Sk〶n^-ěf]{]e<|>?IT _kO^M$;edC˖}mi2̗)a;bMY11E!aZZ0m@A9L.qM¸ _K?Žw?K[yiT,MLhY,և p9ӢDʻo"8k]\j8Q JpN(dPa9;3;!B\X%*cVQ#0RN]=ɿ\^%v.d|,pA =Rc{ͯgx*%Jp۰JY\*κ)Q)ө5:%M,78G/f'J %UP%BnQڬסq\C*BA5ћE?GsjihQeg8 *X.~^4BJNqݯ_Pq!zmd+Q.nHٔ%NGj cKϼQ![@1_p'&#t`㔴ֲRxC䑊-Ç|4m/~2)Tybdv>9jjė|˹2k<,fÛgOkJx&mNFL'n{ sj^ď1z-($OzUdE*;mwjltn HV}h‚EIM]a{#=ʮO 9mpHP@ChK1=K QH;5}p娜Em ?p|3Zn;M+J;#}$%/dkѮ XmdZS>f+]hu$m܄ОvVrA7S˽H5ݔRrtgu6:g9+@1sF0@}H?gnl-BiCE?rSӓчg>zgN٬;, *\EH jHco½ Gz졖mc}XpUd:=pMi@B&w3ih֩/~3*cPIL68}iZaClDSE.z` endstream endobj 759 0 obj << /Length1 1419 /Length2 6374 /Length3 0 /Length 7333 /Filter /FlateDecode >> stream xڍvTk64H34"]JC -]RR* !HKw |=;k֚yg >, eWB9 )@^S__@ !B MawqE@A_ k&PsC`a,&!H?@qGM@ w%G9y ll`IIq逬8-D( 裠8_%mh')AA>@zpW;Ђ8L&@"\Qh v Pu# \jNp?`?>`U; ^GJhO4qB|;X]~wdu uA8]\_# *seEG<v%՟^wDy8eX#aֿ9 :" A]lh@I3 *or_Or!?>w8vq-B0!h np$nǾ|'` d~M/ *3199'/& 0HH~.A\UGk 5c8xB]pCr3(zURrC"0 pMZ74Q2po1h50GUѐk!: ^#U P?l72$rEz`b҂_:\);VοTt`$&$*@\\ ^ח|m>k-I  8)x~5ʅ׍\-q0 %mIzX€o_@\\0׽-8%2 {xZ)47;+\9V?CyYVFן8td—wtӬ-+p&E%ص( C1 F,R<ӽEficʕhMb?C2/mKgJtX 4o>ThrƆE 8ko(DHِDŽ:M6sk10^ ,jsctɓܪ Q>ҤoG\NSUԷY0XqBF^P<(eavKpA]~{,#=&En8)Sp@