brms/0000755000176200001440000000000014504354272011223 5ustar liggesusersbrms/NAMESPACE0000644000176200001440000004244514464713266012462 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method("+",bform) S3method("+",brmsprior) S3method("+",stanvars) S3method(.compute_point_estimate,brmsprep) S3method(.compute_point_estimate,mvbrmsprep) S3method(.extract_par_terms,brmsfit) S3method(.extract_par_terms,brmsterms) S3method(.extract_par_terms,mvbrmsterms) S3method(.ndraws,brmsprep) S3method(.ndraws,mvbrmsprep) S3method(.thin_draws,brmsprep) S3method(.thin_draws,mvbrmsprep) S3method(.tidy_index,brmsterms) S3method(.tidy_index,mvbrmsterms) S3method(LOO,brmsfit) S3method(VarCorr,brmsfit) S3method(WAIC,brmsfit) S3method(add_criterion,brmsfit) S3method(add_ic,brmsfit) S3method(as.array,brmsfit) S3method(as.data.frame,brmsfit) S3method(as.matrix,brmsfit) S3method(as.mcmc,brmsfit) S3method(as_draws,brmsfit) S3method(as_draws_array,brmsfit) S3method(as_draws_df,brmsfit) S3method(as_draws_list,brmsfit) S3method(as_draws_matrix,brmsfit) S3method(as_draws_rvars,brmsfit) S3method(autocor,brmsfit) S3method(bayes_R2,brmsfit) S3method(bayes_factor,brmsfit) S3method(bridge_sampler,brmsfit) S3method(brmsterms,brmsformula) S3method(brmsterms,default) S3method(brmsterms,mvbrmsformula) S3method(c,brmsprior) S3method(c,stanvars) S3method(coef,brmsfit) S3method(compute_xi,brmsfit) S3method(compute_xi,brmsprep) S3method(compute_xi,mvbrmsprep) S3method(conditional_effects,brmsfit) S3method(conditional_effects,brmsterms) S3method(conditional_effects,mvbrmsterms) S3method(conditional_smooths,brmsfit) S3method(conditional_smooths,brmsterms) S3method(conditional_smooths,btl) S3method(conditional_smooths,default) S3method(conditional_smooths,mvbrmsterms) S3method(control_params,brmsfit) S3method(data_predictor,brmsterms) S3method(data_predictor,btl) S3method(data_predictor,btnl) S3method(data_predictor,mvbrmsterms) S3method(data_response,brmsterms) S3method(data_response,mvbrmsterms) S3method(def_scale_prior,brmsterms) S3method(def_scale_prior,mvbrmsterms) S3method(dpar_family,default) S3method(dpar_family,mixfamily) S3method(duplicated,brmsprior) S3method(exclude_pars,brmsfit) S3method(exclude_pars,brmsterms) S3method(exclude_pars,btl) S3method(exclude_pars,default) S3method(exclude_pars,mvbrmsterms) S3method(exclude_terms,brmsfit) S3method(exclude_terms,brmsformula) S3method(exclude_terms,mvbrmsformula) S3method(expose_functions,brmsfit) S3method(family,brmsfit) S3method(family_bounds,brmsterms) S3method(family_bounds,mvbrmsterms) S3method(family_info,brmsfamily) S3method(family_info,brmsfit) S3method(family_info,brmsformula) S3method(family_info,brmsterms) S3method(family_info,btl) S3method(family_info,btnl) S3method(family_info,default) S3method(family_info,family) S3method(family_info,list) S3method(family_info,mixfamily) S3method(family_info,mvbrmsformula) S3method(family_info,mvbrmsterms) S3method(fitted,brmsfit) S3method(fixef,brmsfit) S3method(formula,brmsfit) S3method(getCall,brmsfit) S3method(get_ad_vars,brmsterms) S3method(get_ad_vars,mvbrmsterms) S3method(get_all_effects,brmsterms) S3method(get_all_effects,btl) S3method(get_all_effects,btnl) S3method(get_all_effects,default) S3method(get_all_effects,mvbrmsterms) S3method(get_data2_autocor,brmsformula) S3method(get_data2_autocor,mvbrmsformula) S3method(get_effect,brmsfit) S3method(get_effect,brmsformula) S3method(get_effect,brmsterms) S3method(get_effect,btl) S3method(get_effect,btnl) S3method(get_effect,default) S3method(get_effect,mvbrmsformula) S3method(get_effect,mvbrmsterms) S3method(get_element,default) S3method(get_element,mvbrmsformula) S3method(get_element,mvbrmsterms) S3method(get_group_vars,brmsfit) S3method(get_group_vars,brmsterms) S3method(get_group_vars,default) S3method(get_group_vars,mvbrmsterms) S3method(get_int_vars,brmsterms) S3method(get_int_vars,mvbrmsterms) S3method(get_re,brmsterms) S3method(get_re,btl) S3method(get_re,default) S3method(get_re,mvbrmsterms) S3method(get_unused_arg_vars,brmsformula) S3method(get_unused_arg_vars,brmsterms) S3method(get_unused_arg_vars,mvbrmsformula) S3method(get_unused_arg_vars,mvbrmsterms) S3method(hypothesis,brmsfit) S3method(hypothesis,default) S3method(kfold,brmsfit) S3method(launch_shinystan,brmsfit) S3method(logLik,brmsfit) S3method(log_lik,brmsfit) S3method(log_lik,brmsprep) S3method(log_lik,mvbrmsprep) S3method(log_posterior,brmsfit) S3method(loo,brmsfit) S3method(loo_R2,brmsfit) S3method(loo_compare,brmsfit) S3method(loo_linpred,brmsfit) S3method(loo_model_weights,brmsfit) S3method(loo_moment_match,brmsfit) S3method(loo_predict,brmsfit) S3method(loo_predictive_interval,brmsfit) S3method(loo_subsample,brmsfit) S3method(marginal_effects,brmsfit) S3method(marginal_smooths,brmsfit) S3method(mcmc_plot,brmsfit) S3method(model.frame,brmsfit) S3method(model_weights,brmsfit) S3method(nchains,brmsfit) S3method(ndraws,brmsfit) S3method(neff_ratio,brmsfit) S3method(ngrps,brmsfit) S3method(niterations,brmsfit) S3method(nobs,brmsfit) S3method(nsamples,brmsfit) S3method(nuts_params,brmsfit) S3method(nvariables,brmsfit) S3method(pairs,brmsfit) S3method(parnames,brmsfit) S3method(parnames,default) S3method(plot,brmsMarginalEffects) S3method(plot,brms_conditional_effects) S3method(plot,brmsfit) S3method(plot,brmshypothesis) S3method(post_prob,brmsfit) S3method(posterior_average,brmsfit) S3method(posterior_epred,brmsfit) S3method(posterior_epred,brmsprep) S3method(posterior_epred,mvbrmsprep) S3method(posterior_interval,brmsfit) S3method(posterior_linpred,brmsfit) S3method(posterior_predict,brmsfit) S3method(posterior_predict,brmsprep) S3method(posterior_predict,mvbrmsprep) S3method(posterior_samples,brmsfit) S3method(posterior_samples,default) S3method(posterior_smooths,brmsfit) S3method(posterior_smooths,btl) S3method(posterior_smooths,btnl) S3method(posterior_summary,brmsfit) S3method(posterior_summary,default) S3method(pp_average,brmsfit) S3method(pp_check,brmsfit) S3method(pp_mixture,brmsfit) S3method(predict,brmsfit) S3method(predictive_error,brmsfit) S3method(predictive_interval,brmsfit) S3method(predictor,bprepl) S3method(predictor,bprepnl) S3method(prepare_predictions,brmsfit) S3method(prepare_predictions,brmsterms) S3method(prepare_predictions,btl) S3method(prepare_predictions,btnl) S3method(prepare_predictions,default) S3method(prepare_predictions,mvbrmsterms) S3method(print,brmsMarginalEffects) S3method(print,brms_conditional_effects) S3method(print,brmsfamily) S3method(print,brmsfit) S3method(print,brmsformula) S3method(print,brmshypothesis) S3method(print,brmsmodel) S3method(print,brmsprior) S3method(print,brmssummary) S3method(print,cor_arma) S3method(print,cor_brms_formula) S3method(print,cor_car) S3method(print,cor_cosy) S3method(print,cor_empty) S3method(print,cor_fixed) S3method(print,cor_sar) S3method(print,cov_fixed) S3method(print,customfamily) S3method(print,iclist) S3method(print,loolist) S3method(print,mixfamily) S3method(print,mvbrmsformula) S3method(prior_draws,brmsfit) S3method(prior_draws,default) S3method(prior_predictor,btl) S3method(prior_predictor,btnl) S3method(prior_predictor,default) S3method(prior_summary,brmsfit) S3method(projpred::get_refmodel,brmsfit) S3method(r_eff_log_lik,"function") S3method(r_eff_log_lik,matrix) S3method(ranef,brmsfit) S3method(reloo,brmsfit) S3method(reloo,loo) S3method(rename_predictor,brmsterms) S3method(rename_predictor,btl) S3method(rename_predictor,default) S3method(rename_predictor,mvbrmsterms) S3method(rescale_old_mo,brmsfit) S3method(rescale_old_mo,brmsterms) S3method(rescale_old_mo,btl) S3method(rescale_old_mo,btnl) S3method(rescale_old_mo,mvbrmsterms) S3method(residuals,brmsfit) S3method(rhat,brmsfit) S3method(stan_log_lik,brmsterms) S3method(stan_log_lik,family) S3method(stan_log_lik,mixfamily) S3method(stan_log_lik,mvbrmsterms) S3method(stan_predictor,brmsterms) S3method(stan_predictor,btl) S3method(stan_predictor,btnl) S3method(stan_predictor,mvbrmsterms) S3method(stancode,brmsfit) S3method(standata,brmsfit) S3method(standata_basis,brmsterms) S3method(standata_basis,btl) S3method(standata_basis,btnl) S3method(standata_basis,default) S3method(standata_basis,mvbrmsterms) S3method(stanplot,brmsfit) S3method(summarise_families,brmsformula) S3method(summarise_families,mvbrmsformula) S3method(summarise_links,brmsformula) S3method(summarise_links,mvbrmsformula) S3method(summary,brmsfit) S3method(summary,customfamily) S3method(summary,family) S3method(summary,mixfamily) S3method(tidy_acef,"NULL") S3method(tidy_acef,acef) S3method(tidy_acef,brmsterms) S3method(tidy_acef,btl) S3method(tidy_acef,btnl) S3method(tidy_acef,default) S3method(tidy_acef,mvbrmsterms) S3method(unclass_draws,default) S3method(unclass_draws,draws_df) S3method(update,brmsfit) S3method(update,brmsfit_multiple) S3method(update,brmsformula) S3method(update,mvbrmsformula) S3method(update_old_family,brmsfamily) S3method(update_old_family,brmsformula) S3method(update_old_family,customfamily) S3method(update_old_family,default) S3method(update_old_family,mixfamily) S3method(update_old_family,mvbrmsformula) S3method(update_re_terms,brmsformula) S3method(update_re_terms,formula) S3method(update_re_terms,mvbrmsformula) S3method(valid_dpars,brmsfit) S3method(valid_dpars,brmsformula) S3method(valid_dpars,brmsterms) S3method(valid_dpars,default) S3method(valid_dpars,mixfamily) S3method(valid_dpars,mvbrmsformula) S3method(valid_dpars,mvbrmsterms) S3method(validate_formula,brmsformula) S3method(validate_formula,default) S3method(validate_formula,mvbrmsformula) S3method(validate_special_prior,brmsprior) S3method(validate_special_prior,brmsterms) S3method(validate_special_prior,btl) S3method(validate_special_prior,btnl) S3method(validate_special_prior,default) S3method(validate_special_prior,mvbrmsterms) S3method(variables,brmsfit) S3method(vars_keep_na,brmsterms) S3method(vars_keep_na,mvbrmsterms) S3method(vcov,brmsfit) S3method(waic,brmsfit) export("add_ic<-") export(Beta) export(LOO) export(R2D2) export(VarCorr) export(WAIC) export(acat) export(acformula) export(add_criterion) export(add_ic) export(add_loo) export(add_rstan_model) export(add_waic) export(ar) export(arma) export(as.brmsprior) export(as.mcmc) export(as_draws) export(as_draws_array) export(as_draws_df) export(as_draws_list) export(as_draws_matrix) export(as_draws_rvars) export(asym_laplace) export(autocor) export(bayes_R2) export(bayes_factor) export(bernoulli) export(beta_binomial) export(bf) export(bridge_sampler) export(brm) export(brm_multiple) export(brmsfamily) export(brmsfit_needs_refit) export(brmsformula) export(brmsterms) export(car) export(categorical) export(combine_models) export(compare_ic) export(conditional_effects) export(conditional_smooths) export(control_params) export(cor_ar) export(cor_arma) export(cor_arr) export(cor_bsts) export(cor_car) export(cor_cosy) export(cor_errorsar) export(cor_fixed) export(cor_icar) export(cor_lagsar) export(cor_ma) export(cor_sar) export(cosy) export(cox) export(cratio) export(cs) export(cse) export(cumulative) export(custom_family) export(dasym_laplace) export(data_predictor) export(data_response) export(dbeta_binomial) export(ddirichlet) export(density_ratio) export(dexgaussian) export(dfrechet) export(dgen_extreme_value) export(dhurdle_gamma) export(dhurdle_lognormal) export(dhurdle_negbinomial) export(dhurdle_poisson) export(dinv_gaussian) export(dirichlet) export(dlogistic_normal) export(dmulti_normal) export(dmulti_student_t) export(do_call) export(dshifted_lnorm) export(dskew_normal) export(dstudent_t) export(dvon_mises) export(dwiener) export(dzero_inflated_beta) export(dzero_inflated_beta_binomial) export(dzero_inflated_binomial) export(dzero_inflated_negbinomial) export(dzero_inflated_poisson) export(empty_prior) export(exgaussian) export(exponential) export(expose_functions) export(expp1) export(extract_draws) export(fcor) export(fixef) export(frechet) export(gen_extreme_value) export(geometric) export(get_dpar) export(get_prior) export(get_y) export(gp) export(gr) export(horseshoe) export(hurdle_cumulative) export(hurdle_gamma) export(hurdle_lognormal) export(hurdle_negbinomial) export(hurdle_poisson) export(hypothesis) export(inv_logit_scaled) export(is.brmsfit) export(is.brmsfit_multiple) export(is.brmsformula) export(is.brmsprior) export(is.brmsterms) export(is.cor_arma) export(is.cor_brms) export(is.cor_car) export(is.cor_cosy) export(is.cor_fixed) export(is.cor_sar) export(is.mvbrmsformula) export(is.mvbrmsterms) export(kfold) export(kfold_predict) export(lasso) export(launch_shinystan) export(lf) export(log_lik) export(log_posterior) export(logistic_normal) export(logit_scaled) export(logm1) export(lognormal) export(loo) export(loo_R2) export(loo_compare) export(loo_linpred) export(loo_model_weights) export(loo_moment_match) export(loo_predict) export(loo_predictive_interval) export(loo_subsample) export(ma) export(make_conditions) export(make_stancode) export(make_standata) export(marginal_effects) export(marginal_smooths) export(mcmc_plot) export(me) export(mi) export(mixture) export(mm) export(mmc) export(mo) export(model_weights) export(multinomial) export(mvbf) export(mvbind) export(mvbrmsformula) export(nchains) export(ndraws) export(neff_ratio) export(negbinomial) export(ngrps) export(niterations) export(nlf) export(nsamples) export(nuts_params) export(nvariables) export(opencl) export(parnames) export(parse_bf) export(pasym_laplace) export(pbeta_binomial) export(pexgaussian) export(pfrechet) export(pgen_extreme_value) export(phurdle_gamma) export(phurdle_lognormal) export(phurdle_negbinomial) export(phurdle_poisson) export(pinv_gaussian) export(post_prob) export(posterior_average) export(posterior_epred) export(posterior_interval) export(posterior_linpred) export(posterior_predict) export(posterior_samples) export(posterior_smooths) export(posterior_summary) export(posterior_table) export(pp_average) export(pp_check) export(pp_expect) export(pp_mixture) export(predictive_error) export(predictive_interval) export(prepare_predictions) export(prior) export(prior_) export(prior_draws) export(prior_samples) export(prior_string) export(prior_summary) export(pshifted_lnorm) export(pskew_normal) export(pstudent_t) export(pvon_mises) export(pzero_inflated_beta) export(pzero_inflated_beta_binomial) export(pzero_inflated_binomial) export(pzero_inflated_negbinomial) export(pzero_inflated_poisson) export(qasym_laplace) export(qfrechet) export(qgen_extreme_value) export(qshifted_lnorm) export(qskew_normal) export(qstudent_t) export(ranef) export(rasym_laplace) export(rbeta_binomial) export(rdirichlet) export(recompile_model) export(reloo) export(rename_pars) export(resp_cat) export(resp_cens) export(resp_dec) export(resp_index) export(resp_mi) export(resp_rate) export(resp_se) export(resp_subset) export(resp_thres) export(resp_trials) export(resp_trunc) export(resp_vint) export(resp_vreal) export(resp_weights) export(restructure) export(rexgaussian) export(rfrechet) export(rgen_extreme_value) export(rhat) export(rinv_gaussian) export(rlogistic_normal) export(rmulti_normal) export(rmulti_student_t) export(rows2labels) export(rshifted_lnorm) export(rskew_normal) export(rstudent_t) export(rvon_mises) export(rwiener) export(s) export(sar) export(save_pars) export(set_mecor) export(set_nl) export(set_prior) export(set_rescor) export(shifted_lognormal) export(skew_normal) export(sratio) export(stancode) export(standata) export(stanplot) export(stanvar) export(student) export(t2) export(theme_black) export(theme_default) export(threading) export(unstr) export(update_adterms) export(validate_newdata) export(validate_prior) export(variables) export(von_mises) export(waic) export(weibull) export(wiener) export(zero_inflated_beta) export(zero_inflated_beta_binomial) export(zero_inflated_binomial) export(zero_inflated_negbinomial) export(zero_inflated_poisson) export(zero_one_inflated_beta) import(Rcpp) import(abind) import(ggplot2) import(methods) import(parallel) import(stats) importFrom(bayesplot,log_posterior) importFrom(bayesplot,neff_ratio) importFrom(bayesplot,nuts_params) importFrom(bayesplot,pp_check) importFrom(bayesplot,theme_default) importFrom(bridgesampling,bayes_factor) importFrom(bridgesampling,bridge_sampler) importFrom(bridgesampling,post_prob) importFrom(coda,as.mcmc) importFrom(grDevices,devAskNewPage) importFrom(graphics,plot) importFrom(loo,.compute_point_estimate) importFrom(loo,.ndraws) importFrom(loo,.thin_draws) importFrom(loo,is.loo) importFrom(loo,kfold) importFrom(loo,loo) importFrom(loo,loo_compare) importFrom(loo,loo_model_weights) importFrom(loo,loo_moment_match) importFrom(loo,loo_subsample) importFrom(loo,waic) importFrom(nlme,VarCorr) importFrom(nlme,fixef) importFrom(nlme,ranef) importFrom(posterior,as_draws) importFrom(posterior,as_draws_array) importFrom(posterior,as_draws_df) importFrom(posterior,as_draws_list) importFrom(posterior,as_draws_matrix) importFrom(posterior,as_draws_rvars) importFrom(posterior,nchains) importFrom(posterior,ndraws) importFrom(posterior,niterations) importFrom(posterior,nvariables) importFrom(posterior,rhat) importFrom(posterior,subset_draws) importFrom(posterior,summarize_draws) importFrom(posterior,variables) importFrom(rlang,.data) importFrom(rstantools,bayes_R2) importFrom(rstantools,log_lik) importFrom(rstantools,loo_R2) importFrom(rstantools,loo_linpred) importFrom(rstantools,loo_predict) importFrom(rstantools,loo_predictive_interval) importFrom(rstantools,nsamples) importFrom(rstantools,posterior_epred) importFrom(rstantools,posterior_interval) importFrom(rstantools,posterior_linpred) importFrom(rstantools,posterior_predict) importFrom(rstantools,predictive_error) importFrom(rstantools,predictive_interval) importFrom(rstantools,prior_summary) importFrom(shinystan,launch_shinystan) importMethodsFrom(rstan,summary) brms/README.md0000644000176200001440000003644114464713225012514 0ustar liggesusers brms Logo[Stan Logo](https://mc-stan.org/) # brms [![R-CMD-check](https://github.com/paul-buerkner/brms/workflows/R-CMD-check/badge.svg)](https://github.com/paul-buerkner/brms/actions) [![Coverage Status](https://codecov.io/github/paul-buerkner/brms/coverage.svg?branch=master)](https://app.codecov.io/github/paul-buerkner/brms?branch=master) [![CRAN Version](https://www.r-pkg.org/badges/version/brms)](https://cran.r-project.org/package=brms) [![Downloads](https://cranlogs.r-pkg.org/badges/brms?color=brightgreen)](https://CRAN.R-project.org/package=brms) ## Overview The **brms** package provides an interface to fit Bayesian generalized (non-)linear multivariate multilevel models using Stan, which is a C++ package for performing full Bayesian inference (see ). The formula syntax is very similar to that of the package lme4 to provide a familiar and simple interface for performing regression analyses. A wide range of response distributions are supported, allowing users to fit – among others – linear, robust linear, count data, survival, response times, ordinal, zero-inflated, and even self-defined mixture models all in a multilevel context. Further modeling options include non-linear and smooth terms, auto-correlation structures, censored data, missing value imputation, and quite a few more. In addition, all parameters of the response distribution can be predicted in order to perform distributional regression. Multivariate models (i.e., models with multiple response variables) can be fit, as well. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their beliefs. Model fit can easily be assessed and compared with posterior predictive checks, cross-validation, and Bayes factors. ## Resources - [Introduction to brms](https://doi.org/10.18637/jss.v080.i01) (Journal of Statistical Software) - [Advanced multilevel modeling with brms](https://journal.r-project.org/archive/2018/RJ-2018-017/index.html) (The R Journal) - [Website](https://paul-buerkner.github.io/brms/) (Website of brms with documentation and vignettes) - [Blog posts](https://paul-buerkner.github.io/software/brms-blogposts.html) (List of blog posts about brms) - [Ask a question](https://discourse.mc-stan.org/) (Stan Forums on Discourse) - [Open an issue](https://github.com/paul-buerkner/brms/issues) (GitHub issues for bug reports and feature requests) ## How to use brms ``` r library(brms) ``` As a simple example, we use poisson regression to model the seizure counts in epileptic patients to investigate whether the treatment (represented by variable `Trt`) can reduce the seizure counts and whether the effect of the treatment varies with the (standardized) baseline number of seizures a person had before treatment (variable `zBase`). As we have multiple observations per person, a group-level intercept is incorporated to account for the resulting dependency in the data. ``` r fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson()) ``` The results (i.e., posterior draws) can be investigated using ``` r summary(fit1) #> Family: poisson #> Links: mu = log #> Formula: count ~ zAge + zBase * Trt + (1 | patient) #> Data: epilepsy (Number of observations: 236) #> Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; #> total post-warmup draws = 4000 #> #> Group-Level Effects: #> ~patient (Number of levels: 59) #> Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS #> sd(Intercept) 0.58 0.07 0.46 0.73 1.01 768 1579 #> #> Population-Level Effects: #> Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS #> Intercept 1.77 0.11 1.54 1.99 1.00 753 1511 #> zAge 0.09 0.08 -0.07 0.26 1.00 830 1429 #> zBase 0.70 0.12 0.47 0.95 1.00 678 1389 #> Trt1 -0.26 0.16 -0.59 0.05 1.01 709 1356 #> zBase:Trt1 0.05 0.17 -0.29 0.37 1.01 721 1404 #> #> Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS #> and Tail_ESS are effective sample size measures, and Rhat is the potential #> scale reduction factor on split chains (at convergence, Rhat = 1). ``` On the top of the output, some general information on the model is given, such as family, formula, number of iterations and chains. Next, group-level effects are displayed separately for each grouping factor in terms of standard deviations and (in case of more than one group-level effect per grouping factor; not displayed here) correlations between group-level effects. On the bottom of the output, population-level effects (i.e. regression coefficients) are displayed. If incorporated, autocorrelation effects and family specific parameters (e.g., the residual standard deviation ‘sigma’ in normal models) are also given. In general, every parameter is summarized using the mean (‘Estimate’) and the standard deviation (‘Est.Error’) of the posterior distribution as well as two-sided 95% credible intervals (‘l-95% CI’ and ‘u-95% CI’) based on quantiles. We see that the coefficient of `Trt` is negative with a zero overlapping 95%-CI. This indicates that, on average, the treatment may reduce seizure counts by some amount but the evidence based on the data and applied model is not very strong and still insufficient by standard decision rules. Further, we find little evidence that the treatment effect varies with the baseline number of seizures. The last three values (‘ESS_bulk’, ‘ESS_tail’, and ‘Rhat’) provide information on how well the algorithm could estimate the posterior distribution of this parameter. If ‘Rhat’ is considerably greater than 1, the algorithm has not yet converged and it is necessary to run more iterations and / or set stronger priors. To visually investigate the chains as well as the posterior distributions, we can use the `plot` method. If we just want to see results of the regression coefficients of `Trt` and `zBase`, we go for ``` r plot(fit1, variable = c("b_Trt1", "b_zBase")) ``` A more detailed investigation can be performed by running `launch_shinystan(fit1)`. To better understand the relationship of the predictors with the response, I recommend the `conditional_effects` method: ``` r plot(conditional_effects(fit1, effects = "zBase:Trt")) ``` This method uses some prediction functionality behind the scenes, which can also be called directly. Suppose that we want to predict responses (i.e. seizure counts) of a person in the treatment group (`Trt = 1`) and in the control group (`Trt = 0`) with average age and average number of previous seizures. Than we can use ``` r newdata <- data.frame(Trt = c(0, 1), zAge = 0, zBase = 0) predict(fit1, newdata = newdata, re_formula = NA) #> Estimate Est.Error Q2.5 Q97.5 #> [1,] 5.90325 2.486249 2 11 #> [2,] 4.59025 2.180262 1 9 ``` We need to set `re_formula = NA` in order not to condition of the group-level effects. While the `predict` method returns predictions of the responses, the `fitted` method returns predictions of the regression line. ``` r fitted(fit1, newdata = newdata, re_formula = NA) #> Estimate Est.Error Q2.5 Q97.5 #> [1,] 5.918847 0.6762827 4.666180 7.308699 #> [2,] 4.554778 0.5144053 3.630642 5.659664 ``` Both methods return the same estimate (up to random error), while the latter has smaller variance, because the uncertainty in the regression line is smaller than the uncertainty in each response. If we want to predict values of the original data, we can just leave the `newdata` argument empty. Suppose, we want to investigate whether there is overdispersion in the model, that is residual variation not accounted for by the response distribution. For this purpose, we include a second group-level intercept that captures possible overdispersion. ``` r fit2 <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson()) ``` We can then go ahead and compare both models via approximate leave-one-out (LOO) cross-validation. ``` r loo(fit1, fit2) #> Output of model 'fit1': #> #> Computed from 4000 by 236 log-likelihood matrix #> #> Estimate SE #> elpd_loo -671.6 35.8 #> p_loo 94.6 13.6 #> looic 1343.3 71.6 #> ------ #> Monte Carlo SE of elpd_loo is NA. #> #> Pareto k diagnostic values: #> Count Pct. Min. n_eff #> (-Inf, 0.5] (good) 209 88.6% 546 #> (0.5, 0.7] (ok) 18 7.6% 125 #> (0.7, 1] (bad) 7 3.0% 23 #> (1, Inf) (very bad) 2 0.8% 12 #> See help('pareto-k-diagnostic') for details. #> #> Output of model 'fit2': #> #> Computed from 4000 by 236 log-likelihood matrix #> #> Estimate SE #> elpd_loo -596.2 14.1 #> p_loo 108.5 7.3 #> looic 1192.3 28.3 #> ------ #> Monte Carlo SE of elpd_loo is NA. #> #> Pareto k diagnostic values: #> Count Pct. Min. n_eff #> (-Inf, 0.5] (good) 84 35.6% 755 #> (0.5, 0.7] (ok) 96 40.7% 171 #> (0.7, 1] (bad) 50 21.2% 25 #> (1, Inf) (very bad) 6 2.5% 10 #> See help('pareto-k-diagnostic') for details. #> #> Model comparisons: #> elpd_diff se_diff #> fit2 0.0 0.0 #> fit1 -75.5 26.3 ``` The `loo` output when comparing models is a little verbose. We first see the individual LOO summaries of the two models and then the comparison between them. Since higher `elpd` (i.e., expected log posterior density) values indicate better fit, we see that the model accounting for overdispersion (i.e., `fit2`) fits substantially better. However, we also see in the individual LOO outputs that there are several problematic observations for which the approximations may have not have been very accurate. To deal with this appropriately, we need to fall back to other methods such as `reloo` or `kfold` but this requires the model to be refit several times which takes too long for the purpose of a quick example. The post-processing methods we have shown above are just the tip of the iceberg. For a full list of methods to apply on fitted model objects, type `methods(class = "brmsfit")`. ## Citing brms and related software Developing and maintaining open source software is an important yet often underappreciated contribution to scientific progress. Thus, whenever you are using open source software (or software in general), please make sure to cite it appropriately so that developers get credit for their work. When using brms, please cite one or more of the following publications: - Bürkner P. C. (2017). brms: An R Package for Bayesian Multilevel Models using Stan. *Journal of Statistical Software*. 80(1), 1-28. doi.org/10.18637/jss.v080.i01 - Bürkner P. C. (2018). Advanced Bayesian Multilevel Modeling with the R Package brms. *The R Journal*. 10(1), 395-411. doi.org/10.32614/RJ-2018-017 - Bürkner P. C. (2021). Bayesian Item Response Modeling in R with brms and Stan. *Journal of Statistical Software*, 100(5), 1-54. doi.org/10.18637/jss.v100.i05 As brms is a high-level interface to Stan, please additionally cite Stan (see also ): - Stan Development Team. YEAR. Stan Modeling Language Users Guide and Reference Manual, VERSION. - Carpenter B., Gelman A., Hoffman M. D., Lee D., Goodrich B., Betancourt M., Brubaker M., Guo J., Li P., and Riddell A. (2017). Stan: A probabilistic programming language. *Journal of Statistical Software*. 76(1). doi.org/10.18637/jss.v076.i01 Further, brms relies on several other R packages and, of course, on R itself. To find out how to cite R and its packages, use the `citation` function. There are some features of brms which specifically rely on certain packages. The **rstan** package together with **Rcpp** makes Stan conveniently accessible in R. Visualizations and posterior-predictive checks are based on **bayesplot** and **ggplot2**. Approximate leave-one-out cross-validation using `loo` and related methods is done via the **loo** package. Marginal likelihood based methods such as `bayes_factor` are realized by means of the **bridgesampling** package. Splines specified via the `s` and `t2` functions rely on **mgcv**. If you use some of these features, please also consider citing the related packages. ## FAQ ### How do I install brms? To install the latest release version from CRAN use ``` r install.packages("brms") ``` The current developmental version can be downloaded from GitHub via ``` r if (!requireNamespace("remotes")) { install.packages("remotes") } remotes::install_github("paul-buerkner/brms") ``` Because brms is based on Stan, a C++ compiler is required. The program Rtools (available on ) comes with a C++ compiler for Windows. On Mac, you should install Xcode. For further instructions on how to get the compilers running, see the prerequisites section on . ### I am new to brms. Where can I start? Detailed instructions and case studies are given in the package’s extensive vignettes. See `vignette(package = "brms")` for an overview. For documentation on formula syntax, families, and prior distributions see `help("brm")`. ### Where do I ask questions, propose a new feature, or report a bug? Questions can be asked on the [Stan forums](https://discourse.mc-stan.org/) on Discourse. To propose a new feature or report a bug, please open an issue on [GitHub](https://github.com/paul-buerkner/brms). ### How can I extract the generated Stan code? If you have already fitted a model, just apply the `stancode` method on the fitted model object. If you just want to generate the Stan code without any model fitting, use the `make_stancode` function. ### Can I avoid compiling models? When you fit your model for the first time with brms, there is currently no way to avoid compilation. However, if you have already fitted your model and want to run it again, for instance with more draws, you can do this without recompilation by using the `update` method. For more details see `help("update.brmsfit")`. ### What is the difference between brms and rstanarm? The rstanarm package is similar to brms in that it also allows to fit regression models using Stan for the backend estimation. Contrary to brms, rstanarm comes with precompiled code to save the compilation time (and the need for a C++ compiler) when fitting a model. However, as brms generates its Stan code on the fly, it offers much more flexibility in model specification than rstanarm. Also, multilevel models are currently fitted a bit more efficiently in brms. For detailed comparisons of brms with other common R packages implementing multilevel models, see `vignette("brms_multilevel")` and `vignette("brms_overview")`. brms/data/0000755000176200001440000000000014157566641012145 5ustar liggesusersbrms/data/epilepsy.rda0000644000176200001440000000542713442002443014454 0ustar liggesusersBZh91AY&SY 4>w@h4L*zzAꪦ?O424hh24&&44hL! MF@ M  FL@рahd2ih4L@""&&ELLhڛDGbjD6)4̦G=LDOO&d4z=Fe4<Ѧi3)R%w4jm{\ ̛<0gc1ɑYrr~.&5:kYtsKvN"xLSUPv' ƗSi|SXdgj=w([zDܗ˫n̶}fnMɞr2& Ѿcdh&֛.k &SPg::Pjތ_.olYQ4MQfٰ3L I\79cV83SJi9l4Ɖ8Pf1333i1ߵ->¤-U$ p* FB*IQ\^8 RI%Ta0IQY-kF*82+B8ۦY%i"ʹtlie}dzs-UV,G$#OG#z4d2FTr!TBFh) 4CQ $i@CY Dii! HBC d2B$'9iM]UP%l123456789:9:;<=>?@ABCDDEFGHIJKLMNOPRSTUVWXYZ[\]^_`abcddefghiS5Uu6V KO͐B,U})@]B@x>]y#<-L $̤%TBJfLoƜ""""""""""""""""""""""""*5~-@)zI $JO$,T"BLbRWI!#*@ ‰! ֕'[GdJ{!dD*8rW$TG:WŨ$PPPT'^sĘI3 Eʏ*LEj9#t,tPFTh2^7gl-TmGKF2J- F1.TkHۅȴg #wGxØ5+^=zM&۞ sܪI$I$]vи\/˥۷n!UYB(TUUE {.{)e )`EYeMir {^ZZ۲Z[qJR 0 1x^zUUWd oL0ֲֲֲֲYjU*4 YY]uZ*+Yp0J뮺UUZʡ!!!!!!!!4@”0,0 ",UU[/I$I$@2[Bд/KKKmUUU}1ZI$I$ƀҁm KmUUV$I$I?`Ja/뮵UU\ *I$I$P ,Ey` r'F:c:AkD4ώ~=#xGfSpp=ޝ6 1QԚάOr'2A >6f3%'Y^U%𴪲UMAklMz "*"cQ/UDI(Q@¡b-hK PQ% Eh BT% T+-CJ;M>k 7IV'8Poǽ/#!~--:q~F]֍,|U#\Q7eo+tWb\{nc?T ђ|c|~811Oh NM0't^GcYv'x=0 zN :GmOKy{;S'DN?@|9# ϳY?g0Hek m;\kWVd > 啪HO4s7#)b|ڔ_a˚}dv% {3:pɵ9" v oZS?H"ŹV-瞼y+=i3OH/G"aCW0ݖy%Ke^o 僨%L}𮔯vJt!Q}AIƓ+*8i,Ri- Af@N\:ӷ;;H|8SVvtyGݞGNj!=@͉?k>ٝAMrjMA3AMl &йϟPN"`Gt=L.E8i o,3v\_bd9ŻZۜ`` w-8pS wfra͓:!9jGt2ī|830b8n蘓/ ܔ_9r;AU;S|O0b2`G2`P珸q%mfEx& ”ebbRY F}#Ju ˢ-x< -`!ҽR@؍6 +瘄Sogi?qRWoX90=l{)']CJ?{"(Őbj>Exm̔!ԴBIa9xo?F{*V4F6/r:4M]jI.X^ZhDEJ)_ H`hჃIR(Zqr D M-j׊><̃L4nbhV36IB)p7$M_l$# 0`H0qٹ`X8Bg|cD Tv$ūR$v5$Xbix_2S!0Yj**"ł$@PY"X1U`,X1F( RR#"jL'SHa Hmdd< h'/pg4:r$xLX5bԚ2?fR_!uʮjRbE°q AZ 㲆LحEΟJ4f[DTA(S229}՜b h۔1P.WLВ.p Jbrms/man/0000755000176200001440000000000014504270214011767 5ustar liggesusersbrms/man/draws-brms.Rd0000644000176200001440000000465614213413565014357 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior.R \name{draws-brms} \alias{draws-brms} \alias{as_draws} \alias{as_draws_matrix} \alias{as_draws_array} \alias{as_draws_df} \alias{as_draws_rvars} \alias{as_draws_list} \alias{as_draws.brmsfit} \alias{as_draws_matrix.brmsfit} \alias{as_draws_array.brmsfit} \alias{as_draws_df.brmsfit} \alias{as_draws_list.brmsfit} \alias{as_draws_rvars.brmsfit} \title{Transform \code{brmsfit} to \code{draws} objects} \usage{ \method{as_draws}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_matrix}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_array}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_df}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_list}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_rvars}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) } \arguments{ \item{x}{A \code{brmsfit} object or another \R object for which the methods are defined.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} \item{regex}{Logical; Should variable should be treated as a (vector of) regular expressions? Any variable in \code{x} matching at least one of the regular expressions will be selected. Defaults to \code{FALSE}.} \item{inc_warmup}{Should warmup draws be included? Defaults to \code{FALSE}.} \item{...}{Arguments passed to individual methods (if applicable).} } \description{ Transform a \code{brmsfit} object to a format supported by the \pkg{posterior} package. } \details{ To subset iterations, chains, or draws, use the \code{\link[posterior:subset_draws]{subset_draws}} method after transforming the \code{brmsfit} to a \code{draws} object. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson()) # extract posterior draws in an array format (draws_fit <- as_draws_array(fit)) posterior::summarize_draws(draws_fit) # extract only certain variables as_draws_array(fit, variable = "r_patient") as_draws_array(fit, variable = "^b_", regex = TRUE) # extract posterior draws in a random variables format as_draws_rvars(fit) } } \seealso{ \code{\link[posterior:draws]{draws}} \code{\link[posterior:subset_draws]{subset_draws}} } brms/man/restructure.Rd0000644000176200001440000000173714431360315014656 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/restructure.R \name{restructure} \alias{restructure} \title{Restructure Old \code{brmsfit} Objects} \usage{ restructure(x, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{...}{Currently ignored.} } \value{ A \code{brmsfit} object compatible with the latest version of \pkg{brms}. } \description{ Restructure old \code{brmsfit} objects to work with the latest \pkg{brms} version. This function is called internally when applying post-processing methods. However, in order to avoid unnecessary run time caused by the restructuring, I recommend explicitly calling \code{restructure} once per model after updating \pkg{brms}. } \details{ If you are restructuring an old spline model (fitted with brms < 2.19.3) to avoid prediction inconsistencies between machines (see GitHub issue #1465), please make sure to \code{restructure} your model on the machine on which it was originally fitted. } brms/man/make_conditions.Rd0000644000176200001440000000212614213413565015432 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conditional_effects.R \name{make_conditions} \alias{make_conditions} \title{Prepare Fully Crossed Conditions} \usage{ make_conditions(x, vars, ...) } \arguments{ \item{x}{An \R object from which to extract the variables that should be part of the conditions.} \item{vars}{Names of the variables that should be part of the conditions.} \item{...}{Arguments passed to \code{\link{rows2labels}}.} } \value{ A \code{data.frame} where each row indicates a condition. } \description{ This is a helper function to prepare fully crossed conditions primarily for use with the \code{conditions} argument of \code{\link{conditional_effects}}. Automatically creates labels for each row in the \code{cond__} column. } \details{ For factor like variables, all levels are used as conditions. For numeric variables, \code{mean + (-1:1) * SD} are used as conditions. } \examples{ df <- data.frame(x = c("a", "b"), y = rnorm(10)) make_conditions(df, vars = c("x", "y")) } \seealso{ \code{\link{conditional_effects}}, \code{\link{rows2labels}} } brms/man/sar.Rd0000644000176200001440000000350214213413565013050 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{sar} \alias{sar} \title{Spatial simultaneous autoregressive (SAR) structures} \usage{ sar(M, type = "lag") } \arguments{ \item{M}{An object specifying the spatial weighting matrix. Can be either the spatial weight matrix itself or an object of class \code{listw} or \code{nb}, from which the spatial weighting matrix can be computed.} \item{type}{Type of the SAR structure. Either \code{"lag"} (for SAR of the response values) or \code{"error"} (for SAR of the residuals). More information is provided in the 'Details' section.} } \value{ An object of class \code{'sar_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up an spatial simultaneous autoregressive (SAR) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with SAR terms. } \details{ The \code{lagsar} structure implements SAR of the response values: \deqn{y = \rho W y + \eta + e} The \code{errorsar} structure implements SAR of the residuals: \deqn{y = \eta + u, u = \rho W u + e} In the above equations, \eqn{\eta} is the predictor term and \eqn{e} are independent normally or t-distributed residuals. Currently, only families \code{gaussian} and \code{student} support SAR structures. } \examples{ \dontrun{ data(oldcol, package = "spdep") fit1 <- brm(CRIME ~ INC + HOVAL + sar(COL.nb, type = "lag"), data = COL.OLD, data2 = list(COL.nb = COL.nb), chains = 2, cores = 2) summary(fit1) plot(fit1) fit2 <- brm(CRIME ~ INC + HOVAL + sar(COL.nb, type = "error"), data = COL.OLD, data2 = list(COL.nb = COL.nb), chains = 2, cores = 2) summary(fit2) plot(fit2) } } \seealso{ \code{\link{autocor-terms}} } brms/man/AsymLaplace.Rd0000644000176200001440000000266514275436221014471 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{AsymLaplace} \alias{AsymLaplace} \alias{dasym_laplace} \alias{pasym_laplace} \alias{qasym_laplace} \alias{rasym_laplace} \title{The Asymmetric Laplace Distribution} \usage{ dasym_laplace(x, mu = 0, sigma = 1, quantile = 0.5, log = FALSE) pasym_laplace( q, mu = 0, sigma = 1, quantile = 0.5, lower.tail = TRUE, log.p = FALSE ) qasym_laplace( p, mu = 0, sigma = 1, quantile = 0.5, lower.tail = TRUE, log.p = FALSE ) rasym_laplace(n, mu = 0, sigma = 1, quantile = 0.5) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of locations.} \item{sigma}{Vector of scales.} \item{quantile}{Asymmetry parameter corresponding to quantiles in quantile regression (hence the name).} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, quantile function and random generation for the asymmetric Laplace distribution with location \code{mu}, scale \code{sigma} and asymmetry parameter \code{quantile}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/coef.brmsfit.Rd0000644000176200001440000000344614213413565014653 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{coef.brmsfit} \alias{coef.brmsfit} \title{Extract Model Coefficients} \usage{ \method{coef}{brmsfit}(object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{fixef.brmsfit}} and \code{\link{ranef.brmsfit}}.} } \value{ A list of 3D arrays (one per grouping factor). If \code{summary} is \code{TRUE}, the 1st dimension contains the factor levels, the 2nd dimension contains the summary statistics (see \code{\link{posterior_summary}}), and the 3rd dimension contains the group-level effects. If \code{summary} is \code{FALSE}, the 1st dimension contains the posterior draws, the 2nd dimension contains the factor levels, and the 3rd dimension contains the group-level effects. } \description{ Extract model coefficients, which are the sum of population-level effects and corresponding group-level effects } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), data = epilepsy, family = gaussian(), chains = 2) ## extract population and group-level coefficients separately fixef(fit) ranef(fit) ## extract combined coefficients coef(fit) } } brms/man/get_dpar.Rd0000644000176200001440000000337714213413565014062 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-helpers.R \name{get_dpar} \alias{get_dpar} \title{Draws of a Distributional Parameter} \usage{ get_dpar(prep, dpar, i = NULL, inv_link = NULL) } \arguments{ \item{prep}{A 'brmsprep' or 'mvbrmsprep' object created by \code{\link[brms:prepare_predictions.brmsfit]{prepare_predictions}}.} \item{dpar}{Name of the distributional parameter.} \item{i}{The observation numbers for which predictions shall be extracted. If \code{NULL} (the default), all observation will be extracted. Ignored if \code{dpar} is not predicted.} \item{inv_link}{Should the inverse link function be applied? If \code{NULL} (the default), the value is chosen internally. In particular, \code{inv_link} is \code{TRUE} by default for custom families.} } \value{ If the parameter is predicted and \code{i} is \code{NULL} or \code{length(i) > 1}, an \code{S x N} matrix. If the parameter it not predicted or \code{length(i) == 1}, a vector of length \code{S}. Here \code{S} is the number of draws and \code{N} is the number of observations or length of \code{i} if specified. } \description{ Get draws of a distributional parameter from a \code{brmsprep} or \code{mvbrmsprep} object. This function is primarily useful when developing custom families or packages depending on \pkg{brms}. This function lets callers easily handle both the case when the distributional parameter is predicted directly, via a (non-)linear predictor or fixed to a constant. See the vignette \code{vignette("brms_customfamilies")} for an example use case. } \examples{ \dontrun{ posterior_predict_my_dist <- function(i, prep, ...) { mu <- brms::get_dpar(prep, "mu", i = i) mypar <- brms::get_dpar(prep, "mypar", i = i) my_rng(mu, mypar) } } } brms/man/mi.Rd0000644000176200001440000000404514224021465012667 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-sp.R \name{mi} \alias{mi} \title{Predictors with Missing Values in \pkg{brms} Models} \usage{ mi(x, idx = NA) } \arguments{ \item{x}{The variable containing missing values.} \item{idx}{An optional variable containing indices of observations in `x` that are to be used in the model. This is mostly relevant in partially subsetted models (via \code{resp_subset}) but may also have other applications that I haven't thought of.} } \description{ Specify predictor term with missing values in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model. For documentation on how to specify missing values in response variables, see \code{\link{resp_mi}}. } \details{ For detailed documentation see \code{help(brmsformula)}. } \examples{ \dontrun{ data("nhanes", package = "mice") N <- nrow(nhanes) # simple model with missing data bform1 <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) fit1 <- brm(bform1, data = nhanes) summary(fit1) plot(conditional_effects(fit1, resp = "bmi"), ask = FALSE) loo(fit1, newdata = na.omit(fit1$data)) # simulate some measurement noise nhanes$se <- rexp(N, 2) # measurement noise can be handled within 'mi' terms # with or without the presence of missing values bform2 <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi(se) ~ age) + set_rescor(FALSE) fit2 <- brm(bform2, data = nhanes) summary(fit2) plot(conditional_effects(fit2, resp = "bmi"), ask = FALSE) # 'mi' terms can also be used when some responses are subsetted nhanes$sub <- TRUE nhanes$sub[1:2] <- FALSE nhanes$id <- 1:N nhanes$idx <- sample(3:N, N, TRUE) # this requires the addition term 'index' being specified # in the subsetted part of the model bform3 <- bf(bmi | mi() ~ age * mi(chl, idx)) + bf(chl | mi(se) + subset(sub) + index(id) ~ age) + set_rescor(FALSE) fit3 <- brm(bform3, data = nhanes) summary(fit3) plot(conditional_effects(fit3, resp = "bmi"), ask = FALSE) } } \seealso{ \code{\link{brmsformula}} } brms/man/threading.Rd0000644000176200001440000000451614213413565014236 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/backends.R \name{threading} \alias{threading} \title{Threading in Stan} \usage{ threading(threads = NULL, grainsize = NULL, static = FALSE) } \arguments{ \item{threads}{Number of threads to use in within-chain parallelization.} \item{grainsize}{Number of observations evaluated together in one chunk on one of the CPUs used for threading. If \code{NULL} (the default), \code{grainsize} is currently chosen as \code{max(100, N / (2 * threads))}, where \code{N} is the number of observations in the data. This default is experimental and may change in the future without prior notice.} \item{static}{Logical. Apply the static (non-adaptive) version of \code{reduce_sum}? Defaults to \code{FALSE}. Setting it to \code{TRUE} is required to achieve exact reproducibility of the model results (if the random seed is set as well).} } \value{ A \code{brmsthreads} object which can be passed to the \code{threads} argument of \code{brm} and related functions. } \description{ Use threads for within-chain parallelization in \pkg{Stan} via the \pkg{brms} interface. Within-chain parallelization is experimental! We recommend its use only if you are experienced with Stan's \code{reduce_sum} function and have a slow running model that cannot be sped up by any other means. } \details{ The adaptive scheduling procedure used by \code{reduce_sum} will prevent the results to be exactly reproducible even if you set the random seed. If you need exact reproducibility, you have to set argument \code{static = TRUE} which may reduce efficiency a bit. To ensure that chunks (whose size is defined by \code{grainsize}) require roughly the same amount of computing time, we recommend storing observations in random order in the data. At least, please avoid sorting observations after the response values. This is because the latter often cause variations in the computing time of the pointwise log-likelihood, which makes up a big part of the parallelized code. } \examples{ \dontrun{ # this model just serves as an illustration # threading may not actually speed things up here fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = negbinomial(), chains = 1, threads = threading(2, grainsize = 100), backend = "cmdstanr") summary(fit) } } brms/man/Frechet.Rd0000644000176200001440000000235414275436221013651 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Frechet} \alias{Frechet} \alias{dfrechet} \alias{pfrechet} \alias{qfrechet} \alias{rfrechet} \title{The Frechet Distribution} \usage{ dfrechet(x, loc = 0, scale = 1, shape = 1, log = FALSE) pfrechet(q, loc = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) qfrechet(p, loc = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) rfrechet(n, loc = 0, scale = 1, shape = 1) } \arguments{ \item{x, q}{Vector of quantiles.} \item{loc}{Vector of locations.} \item{scale}{Vector of scales.} \item{shape}{Vector of shapes.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, quantile function and random generation for the Frechet distribution with location \code{loc}, scale \code{scale}, and shape \code{shape}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/predictive_error.brmsfit.Rd0000644000176200001440000000513214417771074017310 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictive_error.R \name{predictive_error.brmsfit} \alias{predictive_error.brmsfit} \alias{predictive_error} \title{Posterior Draws of Predictive Errors} \usage{ \method{predictive_error}{brmsfit}( object, newdata = NULL, re_formula = NULL, re.form = NULL, method = "posterior_predict", resp = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{re.form}{Alias of \code{re_formula}.} \item{method}{Method used to obtain predictions. Can be set to \code{"posterior_predict"} (the default), \code{"posterior_epred"}, or \code{"posterior_linpred"}. For more details, see the respective function documentations.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An S x N \code{array} of predictive error draws, where S is the number of posterior draws and N is the number of observations. } \description{ Compute posterior draws of predictive errors, that is, observed minus predicted responses. Can be performed for the data used to fit the model (posterior predictive checks) or for new data. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, cores = 2) ## extract predictive errors pe <- predictive_error(fit) str(pe) } } brms/man/posterior_epred.brmsfit.Rd0000644000176200001440000001000514277205374017140 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_epred.R \name{posterior_epred.brmsfit} \alias{posterior_epred.brmsfit} \alias{pp_expect} \alias{posterior_epred} \title{Draws from the Expected Value of the Posterior Predictive Distribution} \usage{ \method{posterior_epred}{brmsfit}( object, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{re.form}{Alias of \code{re_formula}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Optional name of a predicted distributional parameter. If specified, expected predictions of this parameters are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of draws. For categorical and ordinal models, the output is an S x N x C array. Otherwise, the output is an S x N matrix, where S is the number of posterior draws, N is the number of observations, and C is the number of categories. In multivariate models, an additional dimension is added to the output which indexes along the different response variables. } \description{ Compute posterior draws of the expected value of the posterior predictive distribution. Can be performed for the data used to fit the model (posterior predictive checks) or for new data. By definition, these predictions have smaller variance than the posterior predictions performed by the \code{\link{posterior_predict.brmsfit}} method. This is because only the uncertainty in the expected value of the posterior predictive distribution is incorporated in the draws computed by \code{posterior_epred} while the residual error is ignored there. However, the estimated means of both methods averaged across draws should be very similar. } \details{ \code{NA} values within factors in \code{newdata}, are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding. In multilevel models, it is possible to allow new levels of grouping factors to be used in the predictions. This can be controlled via argument \code{allow_new_levels}. New levels can be sampled in multiple ways, which can be controlled via argument \code{sample_new_levels}. Both of these arguments are documented in \code{\link{prepare_predictions}} along with several other useful arguments to control specific aspects of the predictions. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) ## compute expected predictions ppe <- posterior_epred(fit) str(ppe) } } brms/man/update.brmsfit.Rd0000644000176200001440000000606214264526224015221 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/update.R \name{update.brmsfit} \alias{update.brmsfit} \title{Update \pkg{brms} models} \usage{ \method{update}{brmsfit}(object, formula., newdata = NULL, recompile = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{formula.}{Changes to the formula; for details see \code{\link{update.formula}} and \code{\link{brmsformula}}.} \item{newdata}{Optional \code{data.frame} to update the model with new data. Data-dependent default priors will not be updated automatically.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled. If \code{NULL} (the default), \code{update} tries to figure out internally, if recompilation is necessary. Setting it to \code{FALSE} will cause all Stan code changing arguments to be ignored.} \item{...}{Other arguments passed to \code{\link{brm}}.} } \description{ This method allows to update an existing \code{brmsfit} object. } \details{ When updating a \code{brmsfit} created with the \pkg{cmdstanr} backend in a different \R session, a recompilation will be triggered because by default, \pkg{cmdstanr} writes the model executable to a temporary directory. To avoid that, set option \code{"cmdstanr_write_stan_file_dir"} to a nontemporary path of your choice before creating the original \code{brmsfit} (see section 'Examples' below). } \examples{ \dontrun{ fit1 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), data = kidney, family = gaussian("log")) summary(fit1) ## remove effects of 'disease' fit2 <- update(fit1, formula. = ~ . - disease) summary(fit2) ## remove the group specific term of 'patient' and ## change the data (just take a subset in this example) fit3 <- update(fit1, formula. = ~ . - (1|patient), newdata = kidney[1:38, ]) summary(fit3) ## use another family and add population-level priors fit4 <- update(fit1, family = weibull(), init = "0", prior = set_prior("normal(0,5)")) summary(fit4) ## to avoid a recompilation when updating a 'cmdstanr'-backend fit in a fresh ## R session, set option 'cmdstanr_write_stan_file_dir' before creating the ## initial 'brmsfit' ## CAUTION: the following code creates some files in the current working ## directory: two 'model_.stan' files, one 'model_(.exe)' ## executable, and one 'fit_cmdstanr_.rds' file set.seed(7) fname <- paste0("fit_cmdstanr_", sample.int(.Machine$integer.max, 1)) options(cmdstanr_write_stan_file_dir = getwd()) fit_cmdstanr <- brm(rate ~ conc + state, data = Puromycin, backend = "cmdstanr", file = fname) # now restart the R session and run the following (after attaching 'brms') set.seed(7) fname <- paste0("fit_cmdstanr_", sample.int(.Machine$integer.max, 1)) fit_cmdstanr <- brm(rate ~ conc + state, data = Puromycin, backend = "cmdstanr", file = fname) upd_cmdstanr <- update(fit_cmdstanr, formula. = rate ~ conc) } } brms/man/plot.brmsfit.Rd0000644000176200001440000000476614213413565014723 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot.brmsfit} \alias{plot.brmsfit} \title{Trace and Density Plots for MCMC Draws} \usage{ \method{plot}{brmsfit}( x, pars = NA, combo = c("dens", "trace"), N = 5, variable = NULL, regex = FALSE, fixed = FALSE, theme = NULL, plot = TRUE, ask = TRUE, newpage = TRUE, ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{pars}{Deprecated alias of \code{variable}. Names of the parameters to plot, as given by a character vector or a regular expression.} \item{combo}{A character vector with at least two elements. Each element of \code{combo} corresponds to a column in the resulting graphic and should be the name of one of the available \code{\link[bayesplot:MCMC-overview]{MCMC}} functions (omitting the \code{mcmc_} prefix).} \item{N}{The number of parameters plotted per page.} \item{variable}{Names of the variables (parameters) to plot, as given by a character vector or a regular expression (if \code{regex = TRUE}). By default, a hopefully not too large selection of variables is plotted.} \item{regex}{Logical; Indicates whether \code{variable} should be treated as regular expressions. Defaults to \code{FALSE}.} \item{fixed}{(Deprecated) Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE} and only works with argument \code{pars}.} \item{theme}{A \code{\link[ggplot2:theme]{theme}} object modifying the appearance of the plots. For some basic themes see \code{\link[ggplot2:ggtheme]{ggtheme}} and \code{\link[bayesplot:theme_default]{theme_default}}.} \item{plot}{Logical; indicates if plots should be plotted directly in the active graphic device. Defaults to \code{TRUE}.} \item{ask}{Logical; indicates if the user is prompted before a new page is plotted. Only used if \code{plot} is \code{TRUE}.} \item{newpage}{Logical; indicates if the first set of plots should be plotted to a new page. Only used if \code{plot} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link[bayesplot:MCMC-combos]{mcmc_combo}}.} } \value{ An invisible list of \code{\link[gtable:gtable]{gtable}} objects. } \description{ Trace and Density Plots for MCMC Draws } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|visit), data = epilepsy, family = "poisson") plot(fit) ## plot population-level effects only plot(fit, variable = "^b_", regex = TRUE) } } brms/man/summary.brmsfit.Rd0000644000176200001440000000305514213413565015430 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{summary.brmsfit} \alias{summary.brmsfit} \title{Create a summary of a fitted model represented by a \code{brmsfit} object} \usage{ \method{summary}{brmsfit}( object, priors = FALSE, prob = 0.95, robust = FALSE, mc_se = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{priors}{Logical; Indicating if priors should be included in the summary. Default is \code{FALSE}.} \item{prob}{A value between 0 and 1 indicating the desired probability to be covered by the uncertainty intervals. The default is 0.95.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead.} \item{mc_se}{Logical; Indicating if the uncertainty in \code{Estimate} caused by the MCMC sampling should be shown in the summary. Defaults to \code{FALSE}.} \item{...}{Other potential arguments} } \description{ Create a summary of a fitted model represented by a \code{brmsfit} object } \details{ The convergence diagnostics \code{Rhat}, \code{Bulk_ESS}, and \code{Tail_ESS} are described in detail in Vehtari et al. (2020). } \references{ Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, and Paul-Christian Bürkner (2020). Rank-normalization, folding, and localization: An improved R-hat for assessing convergence of MCMC. *Bayesian Analysis*. 1–28. dpi:10.1214/20-BA1221 } brms/man/custom_family.Rd0000644000176200001440000001426714213413565015150 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/families.R \name{custom_family} \alias{custom_family} \alias{customfamily} \title{Custom Families in \pkg{brms} Models} \usage{ custom_family( name, dpars = "mu", links = "identity", type = c("real", "int"), lb = NA, ub = NA, vars = NULL, loop = TRUE, specials = NULL, threshold = "flexible", log_lik = NULL, posterior_predict = NULL, posterior_epred = NULL, predict = NULL, fitted = NULL, env = parent.frame() ) } \arguments{ \item{name}{Name of the custom family.} \item{dpars}{Names of the distributional parameters of the family. One parameter must be named \code{"mu"} and the main formula of the model will correspond to that parameter.} \item{links}{Names of the link functions of the distributional parameters.} \item{type}{Indicates if the response distribution is continuous (\code{"real"}) or discrete (\code{"int"}). This controls if the corresponding density function will be named with \code{_lpdf} or \code{_lpmf}.} \item{lb}{Vector of lower bounds of the distributional parameters. Defaults to \code{NA} that is no lower bound.} \item{ub}{Vector of upper bounds of the distributional parameters. Defaults to \code{NA} that is no upper bound.} \item{vars}{Names of variables that are part of the likelihood function without being distributional parameters. That is, \code{vars} can be used to pass data to the likelihood. Such arguments will be added to the list of function arguments at the end, after the distributional parameters. See \code{\link{stanvar}} for details about adding self-defined data to the generated \pkg{Stan} model. Addition arguments \code{vreal} and \code{vint} may be used for this purpose as well (see Examples below). See also \code{\link{brmsformula}} and \code{\link{addition-terms}} for more details.} \item{loop}{Logical; Should the likelihood be evaluated via a loop (\code{TRUE}; the default) over observations in Stan? If \code{FALSE}, the Stan code will be written in a vectorized manner over observations if possible.} \item{specials}{A character vector of special options to enable for this custom family. Currently for internal use only.} \item{threshold}{Optional threshold type for custom ordinal families. Ignored for non-ordinal families.} \item{log_lik}{Optional function to compute log-likelihood values of the model in \R. This is only relevant if one wants to ensure compatibility with method \code{\link[brms:log_lik.brmsfit]{log_lik}}.} \item{posterior_predict}{Optional function to compute posterior prediction of the model in \R. This is only relevant if one wants to ensure compatibility with method \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}}.} \item{posterior_epred}{Optional function to compute expected values of the posterior predictive distribution of the model in \R. This is only relevant if one wants to ensure compatibility with method \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}.} \item{predict}{Deprecated alias of `posterior_predict`.} \item{fitted}{Deprecated alias of `posterior_epred`.} \item{env}{An \code{\link{environment}} in which certain post-processing functions related to the custom family can be found, if there were not directly passed to \code{custom_family}. This is only relevant if one wants to ensure compatibility with the methods \code{\link[brms:log_lik.brmsfit]{log_lik}}, \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}}, or \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}. By default, \code{env} is the environment from which \code{custom_family} is called.} } \value{ An object of class \code{customfamily} inheriting from class \code{\link{brmsfamily}}. } \description{ Define custom families (i.e. response distribution) for use in \pkg{brms} models. It allows users to benefit from the modeling flexibility of \pkg{brms}, while applying their self-defined likelihood functions. All of the post-processing methods for \code{brmsfit} objects can be made compatible with custom families. See \code{vignette("brms_customfamilies")} for more details. For a list of built-in families see \code{\link{brmsfamily}}. } \details{ The corresponding probability density or mass \code{Stan} functions need to have the same name as the custom family. That is if a family is called \code{myfamily}, then the \pkg{Stan} functions should be called \code{myfamily_lpdf} or \code{myfamily_lpmf} depending on whether it defines a continuous or discrete distribution. } \examples{ \dontrun{ ## demonstrate how to fit a beta-binomial model ## generate some fake data phi <- 0.7 n <- 300 z <- rnorm(n, sd = 0.2) ntrials <- sample(1:10, n, replace = TRUE) eta <- 1 + z mu <- exp(eta) / (1 + exp(eta)) a <- mu * phi b <- (1 - mu) * phi p <- rbeta(n, a, b) y <- rbinom(n, ntrials, p) dat <- data.frame(y, z, ntrials) # define a custom family beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "vint1[n]" ) # define the corresponding Stan density function stan_density <- " real beta_binomial2_lpmf(int y, real mu, real phi, int N) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } " stanvars <- stanvar(scode = stan_density, block = "functions") # fit the model fit <- brm(y | vint(ntrials) ~ z, data = dat, family = beta_binomial2, stanvars = stanvars) summary(fit) # define a *vectorized* custom family (no loop over observations) # notice also that 'vint' no longer has an observation index beta_binomial2_vec <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "vint1", loop = FALSE ) # define the corresponding Stan density function stan_density_vec <- " real beta_binomial2_lpmf(int[] y, vector mu, real phi, int[] N) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } " stanvars_vec <- stanvar(scode = stan_density_vec, block = "functions") # fit the model fit_vec <- brm(y | vint(ntrials) ~ z, data = dat, family = beta_binomial2_vec, stanvars = stanvars_vec) summary(fit_vec) } } \seealso{ \code{\link{brmsfamily}}, \code{\link{brmsformula}}, \code{\link{stanvar}} } brms/man/s.Rd0000644000176200001440000000343014363466275012541 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-sm.R \name{s} \alias{s} \alias{t2} \title{Defining smooths in \pkg{brms} formulas} \usage{ s(...) t2(...) } \arguments{ \item{...}{Arguments passed to \code{\link[mgcv:s]{mgcv::s}} or \code{\link[mgcv:t2]{mgcv::t2}}.} } \description{ Functions used in definition of smooth terms within a model formulas. The function does not evaluate a (spline) smooth - it exists purely to help set up a model using spline based smooths. } \details{ The function defined here are just simple wrappers of the respective functions of the \pkg{mgcv} package. When using them, please cite the appropriate references obtained via \code{citation("mgcv")}. \pkg{brms} uses the "random effects" parameterization of smoothing splines as explained in \code{\link[mgcv:gamm]{mgcv::gamm}}. A nice tutorial on this topic can be found in Pedersen et al. (2019). The answers provided in this \href{https://discourse.mc-stan.org/t/better-priors-non-flat-for-gams-brms/23012/4}{Stan discourse post} may also be helpful. } \examples{ \dontrun{ # simulate some data dat <- mgcv::gamSim(1, n = 200, scale = 2) # fit univariate smooths for all predictors fit1 <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat, chains = 2) summary(fit1) plot(conditional_smooths(fit1), ask = FALSE) # fit a more complicated smooth model fit2 <- brm(y ~ t2(x0, x1) + s(x2, by = x3), data = dat, chains = 2) summary(fit2) plot(conditional_smooths(fit2), ask = FALSE) } } \references{ Pedersen, E. J., Miller, D. L., Simpson, G. L., & Ross, N. (2019). Hierarchical generalized additive models in ecology: an introduction with mgcv. PeerJ. } \seealso{ \code{\link{brmsformula}}, \code{\link[mgcv:s]{mgcv::s}}, \code{\link[mgcv:t2]{mgcv::t2}} } brms/man/get_y.Rd0000644000176200001440000000163214213413565013374 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-response.R \name{get_y} \alias{get_y} \title{Extract response values} \usage{ get_y(x, resp = NULL, sort = FALSE, warn = FALSE, ...) } \arguments{ \item{x}{A \code{\link{brmsfit}} object.} \item{resp}{Optional names of response variables for which to extract values.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{warn}{For internal use only.} \item{...}{Further arguments passed to \code{\link{standata}}.} } \value{ Returns a vector of response values for univariate models and a matrix of response values with one column per response variable for multivariate models. } \description{ Extract response values from a \code{\link{brmsfit}} object. } \keyword{internal} brms/man/post_prob.brmsfit.Rd0000644000176200001440000000533114213413565015741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bridgesampling.R \name{post_prob.brmsfit} \alias{post_prob.brmsfit} \alias{post_prob} \title{Posterior Model Probabilities from Marginal Likelihoods} \usage{ \method{post_prob}{brmsfit}(x, ..., prior_prob = NULL, model_names = NULL) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{prior_prob}{Numeric vector with prior model probabilities. If omitted, a uniform prior is used (i.e., all models are equally likely a priori). The default \code{NULL} corresponds to equal prior model weights.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \description{ Compute posterior model probabilities from marginal likelihoods. The \code{brmsfit} method is just a thin wrapper around the corresponding method for \code{bridge} objects. } \details{ Computing the marginal likelihood requires samples of all variables defined in Stan's \code{parameters} block to be saved. Otherwise \code{post_prob} cannot be computed. Thus, please set \code{save_all_pars = TRUE} in the call to \code{brm}, if you are planning to apply \code{post_prob} to your models. The computation of model probabilities based on bridge sampling requires a lot more posterior samples than usual. A good conservative rule of thump is perhaps 10-fold more samples (read: the default of 4000 samples may not be enough in many cases). If not enough posterior samples are provided, the bridge sampling algorithm tends to be unstable leading to considerably different results each time it is run. We thus recommend running \code{post_prob} multiple times to check the stability of the results. More details are provided under \code{\link[bridgesampling:post_prob]{bridgesampling::post_prob}}. } \examples{ \dontrun{ # model with the treatment effect fit1 <- brm( count ~ zAge + zBase + Trt, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_all_pars = TRUE ) summary(fit1) # model without the treatent effect fit2 <- brm( count ~ zAge + zBase, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_all_pars = TRUE ) summary(fit2) # compute the posterior model probabilities post_prob(fit1, fit2) # specify prior model probabilities post_prob(fit1, fit2, prior_prob = c(0.8, 0.2)) } } \seealso{ \code{ \link[brms:bridge_sampler.brmsfit]{bridge_sampler}, \link[brms:bayes_factor.brmsfit]{bayes_factor} } } brms/man/brm_multiple.Rd0000644000176200001440000002505514415240707014765 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brm_multiple.R \name{brm_multiple} \alias{brm_multiple} \title{Run the same \pkg{brms} model on multiple datasets} \usage{ brm_multiple( formula, data, family = gaussian(), prior = NULL, data2 = NULL, autocor = NULL, cov_ranef = NULL, sample_prior = c("no", "yes", "only"), sparse = NULL, knots = NULL, stanvars = NULL, stan_funs = NULL, silent = 1, recompile = FALSE, combine = TRUE, fit = NA, algorithm = getOption("brms.algorithm", "sampling"), seed = NA, file = NULL, file_compress = TRUE, file_refit = getOption("brms.file_refit", "never"), ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{A \emph{list} of data.frames each of which will be used to fit a separate model. Alternatively, a \code{mids} object from the \pkg{mice} package.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{data2}{A \emph{list} of named lists each of which will be used to fit a separate model. Each of the named lists contains objects representing data which cannot be passed via argument \code{data} (see \code{\link{brm}} for examples). The length of the outer list should match the length of the list passed to the \code{data} argument.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{cov_ranef}{(Deprecated) A list of matrices that are proportional to the (within) covariance structure of the group-level effects. The names of the matrices should correspond to columns in \code{data} that are used as grouping factors. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others to model pedigrees and phylogenetic effects. It is now recommended to specify those matrices in the formula interface using the \code{\link{gr}} and related functions. See \code{vignette("brms_phylogenetics")} for more details.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{sparse}{(Deprecated) Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased. It is now recommended to use the \code{sparse} argument of \code{\link{brmsformula}} and related functions.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{stanvars}{An optional \code{stanvars} object generated by function \code{\link{stanvar}} to define additional variables for use in \pkg{Stan}'s program blocks.} \item{stan_funs}{(Deprecated) An optional character string containing self-defined \pkg{Stan} functions, which will be included in the functions block of the generated \pkg{Stan} code. It is now recommended to use the \code{stanvars} argument for this purpose instead.} \item{silent}{Verbosity level between \code{0} and \code{2}. If \code{1} (the default), most of the informational messages of compiler and sampler are suppressed. If \code{2}, even more messages are suppressed. The actual sampling progress is still printed. Set \code{refresh = 0} to turn this off as well. If using \code{backend = "rstan"} you can also set \code{open_progress = FALSE} to prevent opening additional progress bars.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled for every imputed data set. Defaults to \code{FALSE}. If \code{NULL}, \code{brm_multiple} tries to figure out internally, if recompilation is necessary, for example because data-dependent priors have changed. Using the default of no recompilation should be fine in most cases.} \item{combine}{Logical; Indicates if the fitted models should be combined into a single fitted model object via \code{\link{combine_models}}. Defaults to \code{TRUE}.} \item{fit}{An instance of S3 class \code{brmsfit_multiple} derived from a previous fit; defaults to \code{NA}. If \code{fit} is of class \code{brmsfit_multiple}, the compiled model associated with the fitted result is re-used and all arguments modifying the model code or data are ignored. It is not recommended to use this argument directly, but to call the \code{\link[brms:update.brmsfit_multiple]{update}} method, instead.} \item{algorithm}{Character string naming the estimation approach to use. Options are \code{"sampling"} for MCMC (the default), \code{"meanfield"} for variational inference with independent normal distributions, \code{"fullrank"} for variational inference with a multivariate normal distribution, or \code{"fixed_param"} for sampling from fixed parameter values. Can be set globally for the current \R session via the \code{"brms.algorithm"} option (see \code{\link{options}}).} \item{seed}{The seed for random number generation to make results reproducible. If \code{NA} (the default), \pkg{Stan} will set the seed randomly.} \item{file}{Either \code{NULL} or a character string. In the latter case, the fitted model object is saved via \code{\link{saveRDS}} in a file named after the string supplied in \code{file}. The \code{.rds} extension is added automatically. If the file already exists, \code{brm} will load and return the saved model object instead of refitting the model. Unless you specify the \code{file_refit} argument as well, the existing files won't be overwritten, you have to manually remove the file in order to refit and save the model under an existing file name. The file name is stored in the \code{brmsfit} object for later usage.} \item{file_compress}{Logical or a character string, specifying one of the compression algorithms supported by \code{\link{saveRDS}}. If the \code{file} argument is provided, this compression will be used when saving the fitted model object.} \item{file_refit}{Modifies when the fit stored via the \code{file} argument is re-used. Can be set globally for the current \R session via the \code{"brms.file_refit"} option (see \code{\link{options}}). For \code{"never"} (default) the fit is always loaded if it exists and fitting is skipped. For \code{"always"} the model is always refitted. If set to \code{"on_change"}, brms will refit the model if model, data or algorithm as passed to Stan differ from what is stored in the file. This also covers changes in priors, \code{sample_prior}, \code{stanvars}, covariance structure, etc. If you believe there was a false positive, you can use \code{\link{brmsfit_needs_refit}} to see why refit is deemed necessary. Refit will not be triggered for changes in additional parameters of the fit (e.g., initial values, number of iterations, control arguments, ...). A known limitation is that a refit will be triggered if within-chain parallelization is switched on/off.} \item{...}{Further arguments passed to \code{\link{brm}}.} } \value{ If \code{combine = TRUE} a \code{brmsfit_multiple} object, which inherits from class \code{brmsfit} and behaves essentially the same. If \code{combine = FALSE} a list of \code{brmsfit} objects. } \description{ Run the same \pkg{brms} model on multiple datasets and then combine the results into one fitted model object. This is useful in particular for multiple missing value imputation, where the same model is fitted on multiple imputed data sets. Models can be run in parallel using the \pkg{future} package. } \details{ The combined model may issue false positive convergence warnings, as the MCMC chains corresponding to different datasets may not necessarily overlap, even if each of the original models did converge. To find out whether each of the original models converged, investigate \code{fit$rhats}, where \code{fit} denotes the output of \code{brm_multiple}. } \examples{ \dontrun{ library(mice) imp <- mice(nhanes2) # fit the model using mice and lm fit_imp1 <- with(lm(bmi ~ age + hyp + chl), data = imp) summary(pool(fit_imp1)) # fit the model using brms fit_imp2 <- brm_multiple(bmi ~ age + hyp + chl, data = imp, chains = 1) summary(fit_imp2) plot(fit_imp2, pars = "^b_") # investigate convergence of the original models fit_imp2$rhats # use the future package for parallelization library(future) plan(multiprocess) fit_imp3 <- brm_multiple(bmi~age+hyp+chl, data = imp, chains = 1) summary(fit_imp3) } } \author{ Paul-Christian Buerkner \email{paul.buerkner@gmail.com} } brms/man/do_call.Rd0000644000176200001440000000170714160105076013661 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{do_call} \alias{do_call} \title{Execute a Function Call} \usage{ do_call(what, args, pkg = NULL, envir = parent.frame()) } \arguments{ \item{what}{Either a function or a non-empty character string naming the function to be called.} \item{args}{A list of arguments to the function call. The names attribute of \code{args} gives the argument names.} \item{pkg}{Optional name of the package in which to search for the function if \code{what} is a character string.} \item{envir}{An environment within which to evaluate the call.} } \value{ The result of the (evaluated) function call. } \description{ Execute a function call similar to \code{\link{do.call}}, but without deparsing function arguments. For large number of arguments (i.e., more than a few thousand) this function currently is somewhat inefficient and should be used with care in this case. } \keyword{internal} brms/man/lasso.Rd0000644000176200001440000000152614424715563013417 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{lasso} \alias{lasso} \title{(Defunct) Set up a lasso prior in \pkg{brms}} \usage{ lasso(df = 1, scale = 1) } \arguments{ \item{df}{Degrees of freedom of the chi-square prior of the inverse tuning parameter. Defaults to \code{1}.} \item{scale}{Scale of the lasso prior. Defaults to \code{1}.} } \value{ An error indicating that the lasso prior is no longer supported. } \description{ This functionality is no longer supported as of brms version 2.19.2. Please use the \code{\link{horseshoe}} or \code{\link{R2D2}} shrinkage priors instead. } \references{ Park, T., & Casella, G. (2008). The Bayesian Lasso. Journal of the American Statistical Association, 103(482), 681-686. } \seealso{ \code{\link{set_prior}}, \code{\link{horseshoe}}, \code{\link{R2D2}} } brms/man/R2D2.Rd0000644000176200001440000000544014424734053012741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{R2D2} \alias{R2D2} \title{R2D2 Priors in \pkg{brms}} \usage{ R2D2(mean_R2 = 0.5, prec_R2 = 2, cons_D2 = 0.5, autoscale = TRUE, main = FALSE) } \arguments{ \item{mean_R2}{Mean of the Beta prior on the coefficient of determination R^2.} \item{prec_R2}{Precision of the Beta prior on the coefficient of determination R^2.} \item{cons_D2}{Concentration vector of the Dirichlet prior on the variance decomposition parameters. Lower values imply more shrinkage.} \item{autoscale}{Logical; indicating whether the R2D2 prior should be scaled using the residual standard deviation \code{sigma} if possible and sensible (defaults to \code{TRUE}). Autoscaling is not applied for distributional parameters or when the model does not contain the parameter \code{sigma}.} \item{main}{Logical (defaults to \code{FALSE}); only relevant if the R2D2 prior spans multiple parameter classes. In this case, only arguments given in the single instance where \code{main} is \code{TRUE} will be used. Arguments given in other instances of the prior will be ignored. See the Examples section below.} } \description{ Function used to set up R2D2 priors for population-level effects in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up the model. } \details{ Currently, the following classes support the R2D2 prior: \code{b} (overall regression coefficients), \code{sds} (SDs of smoothing splines), \code{sdgp} (SDs of Gaussian processes), \code{ar} (autoregressive coefficients), \code{ma} (moving average coefficients), \code{sderr} (SD of latent residuals), \code{sdcar} (SD of spatial CAR structures), \code{sd} (SD of varying coefficients). Even when the R2D2 prior is applied to multiple parameter classes at once, the concentration vector (argument \code{cons_D2}) has to be provided jointly in the the one instance of the prior where \code{main = TRUE}. The order in which the elements of concentration vector correspond to the classes' coefficients is the same as the order of the classes provided above. } \examples{ set_prior(R2D2(mean_R2 = 0.8, prec_R2 = 10)) # specify the R2D2 prior across multiple parameter classes set_prior(R2D2(mean_R2 = 0.8, prec_R2 = 10, main = TRUE), class = "b") + set_prior(R2D2(), class = "sd") } \references{ Zhang, Y. D., Naughton, B. P., Bondell, H. D., & Reich, B. J. (2020). Bayesian regression using a prior on the model fit: The R2-D2 shrinkage prior. Journal of the American Statistical Association. \url{https://arxiv.org/pdf/1609.00046.pdf} Aguilar J. E. & Bürkner P. C. (2022). Intuitive Joint Priors for Bayesian Linear Multilevel Models: The R2D2M2 prior. ArXiv preprint. \url{https://arxiv.org/pdf/2208.07132.pdf} } \seealso{ \code{\link{set_prior}} } brms/man/recompile_model.Rd0000644000176200001440000000153114136566260015427 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/backends.R \name{recompile_model} \alias{recompile_model} \title{Recompile Stan models in \code{brmsfit} objects} \usage{ recompile_model(x, recompile = NULL) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled. If \code{NULL} (the default), \code{recompile_model} tries to figure out internally, if recompilation is necessary. Setting it to \code{FALSE} will cause \code{recompile_model} to always return the \code{brmsfit} object unchanged.} } \value{ A (possibly updated) \code{brmsfit} object. } \description{ Recompile the Stan model inside a \code{brmsfit} object, if necessary. This does not change the model, it simply recreates the executable so that sampling is possible again. } brms/man/brmsfit_needs_refit.Rd0000644000176200001440000000277014213413565016306 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-helpers.R \name{brmsfit_needs_refit} \alias{brmsfit_needs_refit} \title{Check if cached fit can be used.} \usage{ brmsfit_needs_refit( fit, sdata = NULL, scode = NULL, data = NULL, algorithm = NULL, silent = FALSE, verbose = FALSE ) } \arguments{ \item{fit}{Old \code{brmsfit} object (e.g., loaded from file).} \item{sdata}{New Stan data (result of a call to \code{\link{make_standata}}). Pass \code{NULL} to avoid this data check.} \item{scode}{New Stan code (result of a call to \code{\link{make_stancode}}). Pass \code{NULL} to avoid this code check.} \item{data}{New data to check consistency of factor level names. Pass \code{NULL} to avoid this data check.} \item{algorithm}{New algorithm. Pass \code{NULL} to avoid algorithm check.} \item{silent}{Logical. If \code{TRUE}, no messages will be given.} \item{verbose}{Logical. If \code{TRUE} detailed report of the differences is printed to the console.} } \value{ A boolean indicating whether a refit is needed. } \description{ Checks whether a given cached fit can be used without refitting when \code{file_refit = "on_change"} is used. This function is internal and exposed only to facilitate debugging problems with cached fits. The function may change or be removed in future versions and scripts should not use it. } \details{ Use with \code{verbose = TRUE} to get additional info on how the stored fit differs from the given data and code. } \keyword{internal} brms/man/ExGaussian.Rd0000644000176200001440000000243014275436221014333 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{ExGaussian} \alias{ExGaussian} \alias{dexgaussian} \alias{pexgaussian} \alias{rexgaussian} \title{The Exponentially Modified Gaussian Distribution} \usage{ dexgaussian(x, mu, sigma, beta, log = FALSE) pexgaussian(q, mu, sigma, beta, lower.tail = TRUE, log.p = FALSE) rexgaussian(n, mu, sigma, beta) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of means of the combined distribution.} \item{sigma}{Vector of standard deviations of the gaussian component.} \item{beta}{Vector of scales of the exponential component.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the exponentially modified Gaussian distribution with mean \code{mu} and standard deviation \code{sigma} of the gaussian component, as well as scale \code{beta} of the exponential component. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/get_prior.Rd0000644000176200001440000000763214275447604014276 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{get_prior} \alias{get_prior} \title{Overview on Priors for \pkg{brms} Models} \usage{ get_prior( formula, data, family = gaussian(), autocor = NULL, data2 = NULL, knots = NULL, drop_unused_levels = TRUE, sparse = NULL, ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{drop_unused_levels}{Should unused factors levels in the data be dropped? Defaults to \code{TRUE}.} \item{sparse}{(Deprecated) Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased. It is now recommended to use the \code{sparse} argument of \code{\link{brmsformula}} and related functions.} \item{...}{Other arguments for internal usage only.} } \value{ A data.frame with columns \code{prior}, \code{class}, \code{coef}, and \code{group} and several rows, each providing information on a parameter (or parameter class) on which priors can be specified. The prior column is empty except for internal default priors. } \description{ Get information on all parameters (and parameter classes) for which priors may be specified including default priors. } \examples{ ## get all parameters and parameters classes to define priors on (prior <- get_prior(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson())) ## define a prior on all population-level effects a once prior$prior[1] <- "normal(0,10)" ## define a specific prior on the population-level effect of Trt prior$prior[5] <- "student_t(10, 0, 5)" ## verify that the priors indeed found their way into Stan's model code make_stancode(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson(), prior = prior) } \seealso{ \code{\link{set_prior}} } brms/man/pp_mixture.brmsfit.Rd0000644000176200001440000000746614213413565016141 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pp_mixture.R \name{pp_mixture.brmsfit} \alias{pp_mixture.brmsfit} \alias{pp_mixture} \title{Posterior Probabilities of Mixture Component Memberships} \usage{ \method{pp_mixture}{brmsfit}( x, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, log = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) pp_mixture(x, ...) } \arguments{ \item{x}{An \R object usually of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{log}{Logical; Indicates whether to return probabilities on the log-scale.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ If \code{summary = TRUE}, an N x E x K array, where N is the number of observations, K is the number of mixture components, and E is equal to \code{length(probs) + 2}. If \code{summary = FALSE}, an S x N x K array, where S is the number of posterior draws. } \description{ Compute the posterior probabilities of mixture component memberships for each observation including uncertainty estimates. } \details{ The returned probabilities can be written as \eqn{P(Kn = k | Yn)}, that is the posterior probability that observation n originates from component k. They are computed using Bayes' Theorem \deqn{P(Kn = k | Yn) = P(Yn | Kn = k) P(Kn = k) / P(Yn),} where \eqn{P(Yn | Kn = k)} is the (posterior) likelihood of observation n for component k, \eqn{P(Kn = k)} is the (posterior) mixing probability of component k (i.e. parameter \code{theta}), and \deqn{P(Yn) = \sum (k=1,...,K) P(Yn | Kn = k) P(Kn = k)} is a normalizing constant. } \examples{ \dontrun{ ## simulate some data set.seed(1234) dat <- data.frame( y = c(rnorm(100), rnorm(50, 2)), x = rnorm(150) ) ## fit a simple normal mixture model mix <- mixture(gaussian, nmix = 2) prior <- c( prior(normal(0, 5), Intercept, nlpar = mu1), prior(normal(0, 5), Intercept, nlpar = mu2), prior(dirichlet(2, 2), theta) ) fit1 <- brm(bf(y ~ x), dat, family = mix, prior = prior, chains = 2, init = 0) summary(fit1) ## compute the membership probabilities ppm <- pp_mixture(fit1) str(ppm) ## extract point estimates for each observation head(ppm[, 1, ]) ## classify every observation according to ## the most likely component apply(ppm[, 1, ], 1, which.max) } } brms/man/posterior_linpred.brmsfit.Rd0000644000176200001440000000623314273177713017507 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_epred.R \name{posterior_linpred.brmsfit} \alias{posterior_linpred.brmsfit} \alias{posterior_linpred} \title{Posterior Draws of the Linear Predictor} \usage{ \method{posterior_linpred}{brmsfit}( object, transform = FALSE, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, dpar = NULL, nlpar = NULL, incl_thres = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{transform}{Logical; if \code{FALSE} (the default), draws of the linear predictor are returned. If \code{TRUE}, draws of the transformed linear predictor, that is, after applying the inverse link function are returned.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{re.form}{Alias of \code{re_formula}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Name of a predicted distributional parameter for which draws are to be returned. By default, draws of the main distributional parameter(s) \code{"mu"} are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{incl_thres}{Logical; only relevant for ordinal models when \code{transform} is \code{FALSE}, and ignored otherwise. Shall the thresholds and category-specific effects be included in the linear predictor? For backwards compatibility, the default is to not include them.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \description{ Compute posterior draws of the linear predictor, that is draws before applying any link functions or other transformations. Can be performed for the data used to fit the model (posterior predictive checks) or for new data. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) ## extract linear predictor values pl <- posterior_linpred(fit) str(pl) } } \seealso{ \code{\link{posterior_epred.brmsfit}} } brms/man/validate_newdata.Rd0000644000176200001440000000410714213413565015561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-helpers.R \name{validate_newdata} \alias{validate_newdata} \title{Validate New Data} \usage{ validate_newdata( newdata, object, re_formula = NULL, allow_new_levels = FALSE, newdata2 = NULL, resp = NULL, check_response = TRUE, incl_autocor = TRUE, group_vars = NULL, req_vars = NULL, ... ) } \arguments{ \item{newdata}{A \code{data.frame} containing new data to be validated.} \item{object}{A \code{brmsfit} object.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{allow_new_levels}{A flag indicating if new levels of group-level effects are allowed (defaults to \code{FALSE}). Only relevant if \code{newdata} is provided.} \item{newdata2}{A named \code{list} of objects containing new data, which cannot be passed via argument \code{newdata}. Required for some objects used in autocorrelation structures, or \code{\link{stanvars}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{check_response}{Logical; Indicates if response variables should be checked as well. Defaults to \code{TRUE}.} \item{incl_autocor}{A flag indicating if correlation structures originally specified via \code{autocor} should be included in the predictions. Defaults to \code{TRUE}.} \item{group_vars}{Optional names of grouping variables to be validated. Defaults to all grouping variables in the model.} \item{req_vars}{Optional names of variables required in \code{newdata}. If \code{NULL} (the default), all variables in the original data are required (unless ignored for some other reason).} \item{...}{Currently ignored.} } \value{ A validated \code{'data.frame'} based on \code{newdata}. } \description{ Validate new data passed to post-processing methods of \pkg{brms}. Unless you are a package developer, you will rarely need to call \code{validate_newdata} directly. } brms/man/cs.Rd0000644000176200001440000000160714224021465012670 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-cs.R \name{cs} \alias{cs} \alias{cse} \title{Category Specific Predictors in \pkg{brms} Models} \usage{ cs(expr) } \arguments{ \item{expr}{Expression containing predictors, for which category specific effects should be estimated. For evaluation, \R formula syntax is applied.} } \description{ Category Specific Predictors in \pkg{brms} Models } \details{ For detailed documentation see \code{help(brmsformula)} as well as \code{vignette("brms_overview")}. This function is almost solely useful when called in formulas passed to the \pkg{brms} package. } \examples{ \dontrun{ fit <- brm(rating ~ period + carry + cs(treat), data = inhaler, family = sratio("cloglog"), prior = set_prior("normal(0,5)"), chains = 2) summary(fit) plot(fit, ask = FALSE) } } \seealso{ \code{\link{brmsformula}} } brms/man/stanvar.Rd0000644000176200001440000000640714430677576013767 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stanvars.R \name{stanvar} \alias{stanvar} \alias{stanvars} \title{User-defined variables passed to Stan} \usage{ stanvar( x = NULL, name = NULL, scode = NULL, block = "data", position = "start", pll_args = NULL ) } \arguments{ \item{x}{An \R object containing data to be passed to Stan. Only required if \code{block = 'data'} and ignored otherwise.} \item{name}{Optional character string providing the desired variable name of the object in \code{x}. If \code{NULL} (the default) the variable name is directly inferred from \code{x}.} \item{scode}{Line of Stan code to define the variable in Stan language. If \code{block = 'data'}, the Stan code is inferred based on the class of \code{x} by default.} \item{block}{Name of one of Stan's program blocks in which the variable should be defined. Can be \code{'data'}, \code{'tdata'} (transformed data), \code{'parameters'}, \code{'tparameters'} (transformed parameters), \code{'model'}, \code{'likelihood'} (part of the model block where the likelihood is given), \code{'genquant'} (generated quantities) or \code{'functions'}.} \item{position}{Name of the position within the block where the Stan code should be placed. Currently allowed are \code{'start'} (the default) and \code{'end'} of the block.} \item{pll_args}{Optional Stan code to be put into the header of \code{partial_log_lik} functions. This ensures that the variables specified in \code{scode} can be used in the likelihood even when within-chain parallelization is activated via \code{\link{threading}}.} } \value{ An object of class \code{stanvars}. } \description{ Prepare user-defined variables to be passed to one of Stan's program blocks. This is primarily useful for defining more complex priors, for refitting models without recompilation despite changing priors, or for defining custom Stan functions. } \details{ The \code{stanvar} function is not vectorized. Instead, multiple \code{stanvars} objects can be added together via \code{+} (see Examples). } \examples{ bprior <- prior(normal(mean_intercept, 10), class = "Intercept") stanvars <- stanvar(5, name = "mean_intercept") make_stancode(count ~ Trt, epilepsy, prior = bprior, stanvars = stanvars) # define a multi-normal prior with known covariance matrix bprior <- prior(multi_normal(M, V), class = "b") stanvars <- stanvar(rep(0, 2), "M", scode = " vector[K] M;") + stanvar(diag(2), "V", scode = " matrix[K, K] V;") make_stancode(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) # define a hierachical prior on the regression coefficients bprior <- set_prior("normal(0, tau)", class = "b") + set_prior("target += normal_lpdf(tau | 0, 10)", check = FALSE) stanvars <- stanvar(scode = "real tau;", block = "parameters") make_stancode(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) # ensure that 'tau' is passed to the likelihood of a threaded model # not necessary for this example but may be necessary in other cases stanvars <- stanvar(scode = "real tau;", block = "parameters", pll_args = "real tau") make_stancode(count ~ Trt + zBase, epilepsy, stanvars = stanvars, threads = threading(2)) } brms/man/brmsterms.Rd0000644000176200001440000000404314213413565014302 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsterms.R \name{brmsterms} \alias{brmsterms} \alias{parse_bf} \alias{brmsterms.default} \alias{brmsterms.brmsformula} \alias{brmsterms.mvbrmsformula} \title{Parse Formulas of \pkg{brms} Models} \usage{ brmsterms(formula, ...) \method{brmsterms}{default}(formula, ...) \method{brmsterms}{brmsformula}(formula, check_response = TRUE, resp_rhs_all = TRUE, ...) \method{brmsterms}{mvbrmsformula}(formula, ...) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{...}{Further arguments passed to or from other methods.} \item{check_response}{Logical; Indicates whether the left-hand side of \code{formula} (i.e. response variables and addition arguments) should be parsed. If \code{FALSE}, \code{formula} may also be one-sided.} \item{resp_rhs_all}{Logical; Indicates whether to also include response variables on the right-hand side of formula \code{.$allvars}, where \code{.} represents the output of \code{brmsterms}.} } \value{ An object of class \code{brmsterms} or \code{mvbrmsterms} (for multivariate models), which is a \code{list} containing all required information initially stored in \code{formula} in an easier to use format, basically a list of formulas (not an abstract syntax tree). } \description{ Parse formulas objects for use in \pkg{brms}. } \details{ This is the main formula parsing function of \pkg{brms}. It should usually not be called directly, but is exported to allow package developers making use of the formula syntax implemented in \pkg{brms}. As long as no other packages depend on this functions, it may be changed without deprecation warnings, when new features make this necessary. } \seealso{ \code{\link{brm}}, \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} } brms/man/mmc.Rd0000644000176200001440000000202414213413565013035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-re.R \name{mmc} \alias{mmc} \title{Multi-Membership Covariates} \usage{ mmc(...) } \arguments{ \item{...}{One or more terms containing covariates corresponding to the grouping levels specified in \code{\link{mm}}.} } \value{ A matrix with covariates as columns. } \description{ Specify covariates that vary over different levels of multi-membership grouping factors thus requiring special treatment. This function is almost solely useful, when called in combination with \code{\link{mm}}. Outside of multi-membership terms it will behave very much like \code{\link{cbind}}. } \examples{ \dontrun{ # simulate some data dat <- data.frame( y = rnorm(100), x1 = rnorm(100), x2 = rnorm(100), g1 = sample(1:10, 100, TRUE), g2 = sample(1:10, 100, TRUE) ) # multi-membership model with level specific covariate values dat$xc <- (dat$x1 + dat$x2) / 2 fit <- brm(y ~ xc + (1 + mmc(x1, x2) | mm(g1, g2)), data = dat) summary(fit) } } \seealso{ \code{\link{mm}} } brms/man/loo_model_weights.brmsfit.Rd0000644000176200001440000000243314213413565017435 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{loo_model_weights.brmsfit} \alias{loo_model_weights.brmsfit} \alias{loo_model_weights} \title{Model averaging via stacking or pseudo-BMA weighting.} \usage{ \method{loo_model_weights}{brmsfit}(x, ..., model_names = NULL) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ A named vector of model weights. } \description{ Compute model weights for \code{brmsfit} objects via stacking or pseudo-BMA weighting. For more details, see \code{\link[loo:loo_model_weights]{loo::loo_model_weights}}. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler, family = "gaussian") # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "gaussian") loo_model_weights(fit1, fit2) } } brms/man/rows2labels.Rd0000644000176200001440000000154614213413565014530 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conditional_effects.R \name{rows2labels} \alias{rows2labels} \title{Convert Rows to Labels} \usage{ rows2labels(x, digits = 2, sep = " & ", incl_vars = TRUE, ...) } \arguments{ \item{x}{A \code{data.frame} for which to extract labels.} \item{digits}{Minimal number of decimal places shown in the labels of numeric variables.} \item{sep}{A single character string defining the separator between variables used in the labels.} \item{incl_vars}{Indicates if variable names should be part of the labels. Defaults to \code{TRUE}.} \item{...}{Currently unused.} } \value{ A character vector of the same length as the number of rows of \code{x}. } \description{ Convert information in rows to labels for each row. } \seealso{ \code{\link{make_conditions}}, \code{\link{conditional_effects}} } brms/man/mixture.Rd0000644000176200001440000000673214213413565013770 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/families.R \name{mixture} \alias{mixture} \title{Finite Mixture Families in \pkg{brms}} \usage{ mixture(..., flist = NULL, nmix = 1, order = NULL) } \arguments{ \item{...}{One or more objects providing a description of the response distributions to be combined in the mixture model. These can be family functions, calls to family functions or character strings naming the families. For details of supported families see \code{\link{brmsfamily}}.} \item{flist}{Optional list of objects, which are treated in the same way as objects passed via the \code{...} argument.} \item{nmix}{Optional numeric vector specifying the number of times each family is repeated. If specified, it must have the same length as the number of families passed via \code{...} and \code{flist}.} \item{order}{Ordering constraint to identify mixture components. If \code{'mu'} or \code{TRUE}, population-level intercepts of the mean parameters are ordered in non-ordinal models and fixed to the same value in ordinal models (see details). If \code{'none'} or \code{FALSE}, no ordering constraint is applied. If \code{NULL} (the default), \code{order} is set to \code{'mu'} if all families are the same and \code{'none'} otherwise. Other ordering constraints may be implemented in the future.} } \value{ An object of class \code{mixfamily}. } \description{ Set up a finite mixture family for use in \pkg{brms}. } \details{ Most families supported by \pkg{brms} can be used to form mixtures. The response variable has to be valid for all components of the mixture family. Currently, the number of mixture components has to be specified by the user. It is not yet possible to estimate the number of mixture components from the data. Ordering intercepts in mixtures of ordinal families is not possible as each family has itself a set of vector of intercepts (i.e. ordinal thresholds). Instead, \pkg{brms} will fix the vector of intercepts across components in ordinal mixtures, if desired, so that users can try to identify the mixture model via selective inclusion of predictors. For most mixture models, you may want to specify priors on the population-level intercepts via \code{\link{set_prior}} to improve convergence. In addition, it is sometimes necessary to set \code{init = 0} in the call to \code{\link{brm}} to allow chains to initialize properly. For more details on the specification of mixture models, see \code{\link{brmsformula}}. } \examples{ \dontrun{ ## simulate some data set.seed(1234) dat <- data.frame( y = c(rnorm(200), rnorm(100, 6)), x = rnorm(300), z = sample(0:1, 300, TRUE) ) ## fit a simple normal mixture model mix <- mixture(gaussian, gaussian) prior <- c( prior(normal(0, 7), Intercept, dpar = mu1), prior(normal(5, 7), Intercept, dpar = mu2) ) fit1 <- brm(bf(y ~ x + z), dat, family = mix, prior = prior, chains = 2) summary(fit1) pp_check(fit1) ## use different predictors for the components fit2 <- brm(bf(y ~ 1, mu1 ~ x, mu2 ~ z), dat, family = mix, prior = prior, chains = 2) summary(fit2) ## fix the mixing proportions fit3 <- brm(bf(y ~ x + z, theta1 = 1, theta2 = 2), dat, family = mix, prior = prior, init = 0, chains = 2) summary(fit3) pp_check(fit3) ## predict the mixing proportions fit4 <- brm(bf(y ~ x + z, theta2 ~ x), dat, family = mix, prior = prior, init = 0, chains = 2) summary(fit4) pp_check(fit4) ## compare model fit LOO(fit1, fit2, fit3, fit4) } } brms/man/mcmc_plot.brmsfit.Rd0000644000176200001440000000550414213413565015711 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{mcmc_plot.brmsfit} \alias{mcmc_plot.brmsfit} \alias{stanplot} \alias{stanplot.brmsfit} \alias{mcmc_plot} \title{MCMC Plots Implemented in \pkg{bayesplot}} \usage{ \method{mcmc_plot}{brmsfit}( object, pars = NA, type = "intervals", variable = NULL, regex = FALSE, fixed = FALSE, ... ) mcmc_plot(object, ...) } \arguments{ \item{object}{An \R object typically of class \code{brmsfit}} \item{pars}{Deprecated alias of \code{variable}. Names of the parameters to plot, as given by a character vector or a regular expression.} \item{type}{The type of the plot. Supported types are (as names) \code{hist}, \code{dens}, \code{hist_by_chain}, \code{dens_overlay}, \code{violin}, \code{intervals}, \code{areas}, \code{acf}, \code{acf_bar},\code{trace}, \code{trace_highlight}, \code{scatter}, \code{rhat}, \code{rhat_hist}, \code{neff}, \code{neff_hist} \code{nuts_acceptance}, \code{nuts_divergence}, \code{nuts_stepsize}, \code{nuts_treedepth}, and \code{nuts_energy}. For an overview on the various plot types see \code{\link[bayesplot:MCMC-overview]{MCMC-overview}}.} \item{variable}{Names of the variables (parameters) to plot, as given by a character vector or a regular expression (if \code{regex = TRUE}). By default, a hopefully not too large selection of variables is plotted.} \item{regex}{Logical; Indicates whether \code{variable} should be treated as regular expressions. Defaults to \code{FALSE}.} \item{fixed}{(Deprecated) Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE} and only works with argument \code{pars}.} \item{...}{Additional arguments passed to the plotting functions. See \code{\link[bayesplot:MCMC-overview]{MCMC-overview}} for more details.} } \value{ A \code{\link[ggplot2:ggplot]{ggplot}} object that can be further customized using the \pkg{ggplot2} package. } \description{ Convenient way to call MCMC plotting functions implemented in the \pkg{bayesplot} package. } \details{ Also consider using the \pkg{shinystan} package available via method \code{\link{launch_shinystan}} in \pkg{brms} for flexible and interactive visual analysis. } \examples{ \dontrun{ model <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = "poisson") # plot posterior intervals mcmc_plot(model) # only show population-level effects in the plots mcmc_plot(model, variable = "^b_", regex = TRUE) # show histograms of the posterior distributions mcmc_plot(model, type = "hist") # plot some diagnostics of the sampler mcmc_plot(model, type = "neff") mcmc_plot(model, type = "rhat") # plot some diagnostics specific to the NUTS sampler mcmc_plot(model, type = "nuts_acceptance") mcmc_plot(model, type = "nuts_divergence") } } brms/man/pairs.brmsfit.Rd0000644000176200001440000000307014213413565015046 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{pairs.brmsfit} \alias{pairs.brmsfit} \title{Create a matrix of output plots from a \code{brmsfit} object} \usage{ \method{pairs}{brmsfit}(x, pars = NA, variable = NULL, regex = FALSE, fixed = FALSE, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}} \item{pars}{Deprecated alias of \code{variable}. Names of the parameters to plot, as given by a character vector or a regular expression.} \item{variable}{Names of the variables (parameters) to plot, as given by a character vector or a regular expression (if \code{regex = TRUE}). By default, a hopefully not too large selection of variables is plotted.} \item{regex}{Logical; Indicates whether \code{variable} should be treated as regular expressions. Defaults to \code{FALSE}.} \item{fixed}{(Deprecated) Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE} and only works with argument \code{pars}.} \item{...}{Further arguments to be passed to \code{\link[bayesplot:MCMC-scatterplots]{mcmc_pairs}}.} } \description{ A \code{\link[graphics:pairs]{pairs}} method that is customized for MCMC output. } \details{ For a detailed description see \code{\link[bayesplot:MCMC-scatterplots]{mcmc_pairs}}. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|visit), data = epilepsy, family = "poisson") pairs(fit, variable = variables(fit)[1:3]) pairs(fit, variable = "^sd_", regex = TRUE) } } brms/man/addition-terms.Rd0000644000176200001440000001206414275420414015210 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ad.R \name{addition-terms} \alias{addition-terms} \alias{se} \alias{weights} \alias{trials} \alias{thres} \alias{cat} \alias{dec} \alias{cens} \alias{trunc} \alias{index} \alias{rate} \alias{subset} \alias{vreal} \alias{vint} \alias{resp_se} \alias{resp_weights} \alias{resp_trials} \alias{resp_thres} \alias{resp_cat} \alias{resp_dec} \alias{resp_cens} \alias{resp_trunc} \alias{resp_mi} \alias{resp_index} \alias{resp_rate} \alias{resp_subset} \alias{resp_vreal} \alias{resp_vint} \title{Additional Response Information} \usage{ resp_se(x, sigma = FALSE) resp_weights(x, scale = FALSE) resp_trials(x) resp_thres(x, gr = NA) resp_cat(x) resp_dec(x) resp_cens(x, y2 = NA) resp_trunc(lb = -Inf, ub = Inf) resp_mi(sdy = NA) resp_index(x) resp_rate(denom) resp_subset(x) resp_vreal(...) resp_vint(...) } \arguments{ \item{x}{A vector; Ideally a single variable defined in the data (see Details). Allowed values depend on the function: \code{resp_se} and \code{resp_weights} require positive numeric values. \code{resp_trials}, \code{resp_thres}, and \code{resp_cat} require positive integers. \code{resp_dec} requires \code{0} and \code{1}, or alternatively \code{'lower'} and \code{'upper'}. \code{resp_subset} requires \code{0} and \code{1}, or alternatively \code{FALSE} and \code{TRUE}. \code{resp_cens} requires \code{'left'}, \code{'none'}, \code{'right'}, and \code{'interval'} (or equivalently \code{-1}, \code{0}, \code{1}, and \code{2}) to indicate left, no, right, or interval censoring. \code{resp_index} does not make any requirements other than the value being unique for each observation.} \item{sigma}{Logical; Indicates whether the residual standard deviation parameter \code{sigma} should be included in addition to the known measurement error. Defaults to \code{FALSE} for backwards compatibility, but setting it to \code{TRUE} is usually the better choice.} \item{scale}{Logical; Indicates whether weights should be scaled so that the average weight equals one. Defaults to \code{FALSE}.} \item{gr}{A vector of grouping indicators.} \item{y2}{A vector specifying the upper bounds in interval censoring. Will be ignored for non-interval censored observations. However, it should NOT be \code{NA} even for non-interval censored observations to avoid accidental exclusion of these observations.} \item{lb}{A numeric vector or single numeric value specifying the lower truncation bound.} \item{ub}{A numeric vector or single numeric value specifying the upper truncation bound.} \item{sdy}{Optional known measurement error of the response treated as standard deviation. If specified, handles measurement error and (completely) missing values at the same time using the plausible-values-technique.} \item{denom}{A vector of positive numeric values specifying the denominator values from which the response rates are computed.} \item{...}{For \code{resp_vreal}, vectors of real values. For \code{resp_vint}, vectors of integer values. In Stan, these variables will be named \code{vreal1}, \code{vreal2}, ..., and \code{vint1}, \code{vint2}, ..., respectively.} } \value{ A list of additional response information to be processed further by \pkg{brms}. } \description{ Provide additional information on the response variable in \pkg{brms} models, such as censoring, truncation, or known measurement error. Detailed documentation on the use of each of these functions can be found in the Details section of \code{\link{brmsformula}} (under "Additional response information"). } \details{ These functions are almost solely useful when called in formulas passed to the \pkg{brms} package. Within formulas, the \code{resp_} prefix may be omitted. More information is given in the 'Details' section of \code{\link{brmsformula}} (under "Additional response information"). It is highly recommended to use a single data variable as input for \code{x} (instead of a more complicated expression) to make sure all post-processing functions work as expected. } \examples{ \dontrun{ ## Random effects meta-analysis nstudies <- 20 true_effects <- rnorm(nstudies, 0.5, 0.2) sei <- runif(nstudies, 0.05, 0.3) outcomes <- rnorm(nstudies, true_effects, sei) data1 <- data.frame(outcomes, sei) fit1 <- brm(outcomes | se(sei, sigma = TRUE) ~ 1, data = data1) summary(fit1) ## Probit regression using the binomial family n <- sample(1:10, 100, TRUE) # number of trials success <- rbinom(100, size = n, prob = 0.4) x <- rnorm(100) data2 <- data.frame(n, success, x) fit2 <- brm(success | trials(n) ~ x, data = data2, family = binomial("probit")) summary(fit2) ## Survival regression modeling the time between the first ## and second recurrence of an infection in kidney patients. fit3 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), data = kidney, family = lognormal()) summary(fit3) ## Poisson model with truncated counts fit4 <- brm(count | trunc(ub = 104) ~ zBase * Trt, data = epilepsy, family = poisson()) summary(fit4) } } \seealso{ \code{\link{brm}}, \code{\link{brmsformula}} } brms/man/posterior_table.Rd0000644000176200001440000000154614213413565015466 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{posterior_table} \alias{posterior_table} \title{Table Creation for Posterior Draws} \usage{ posterior_table(x, levels = NULL) } \arguments{ \item{x}{A matrix of posterior draws where rows indicate draws and columns indicate parameters.} \item{levels}{Optional values of possible posterior values. Defaults to all unique values in \code{x}.} } \value{ A matrix where rows indicate parameters and columns indicate the unique values of posterior draws. } \description{ Create a table for unique values of posterior draws. This is usually only useful when summarizing predictions of ordinal models. } \examples{ \dontrun{ fit <- brm(rating ~ period + carry + treat, data = inhaler, family = cumulative()) pr <- predict(fit, summary = FALSE) posterior_table(pr) } } brms/man/save_pars.Rd0000644000176200001440000000375614500561321014252 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/exclude_pars.R \name{save_pars} \alias{save_pars} \title{Control Saving of Parameter Draws} \usage{ save_pars(group = TRUE, latent = FALSE, all = FALSE, manual = NULL) } \arguments{ \item{group}{A flag to indicate if group-level coefficients for each level of the grouping factors should be saved (default is \code{TRUE}). Set to \code{FALSE} to save memory. Alternatively, \code{group} may also be a character vector naming the grouping factors for which to save draws of coefficients.} \item{latent}{A flag to indicate if draws of latent variables obtained by using \code{me} and \code{mi} terms should be saved (default is \code{FALSE}). Saving these draws allows to better use methods such as \code{posterior_predict} with the latent variables but leads to very large \R objects even for models of moderate size and complexity. Alternatively, \code{latent} may also be a character vector naming the latent variables for which to save draws.} \item{all}{A flag to indicate if draws of all variables defined in Stan's \code{parameters} block should be saved (default is \code{FALSE}). Saving these draws is required in order to apply the certain methods such as \code{bridge_sampler} and \code{bayes_factor}.} \item{manual}{A character vector naming Stan variable names which should be saved. These names should match the variable names inside the Stan code before renaming. This feature is meant for power users only and will rarely be useful outside of very special cases.} } \value{ A list of class \code{"save_pars"}. } \description{ Control which (draws of) parameters should be saved in a \pkg{brms} model. The output of this function is meant for usage in the \code{save_pars} argument of \code{\link{brm}}. } \examples{ \dontrun{ # don't store group-level coefficients fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), save_pars = save_pars(group = FALSE)) variables(fit) } } brms/man/opencl.Rd0000644000176200001440000000241514213413565013545 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/backends.R \name{opencl} \alias{opencl} \title{GPU support in Stan via OpenCL} \usage{ opencl(ids = NULL) } \arguments{ \item{ids}{(integer vector of length 2) The platform and device IDs of the OpenCL device to use for fitting. If you don't know the IDs of your OpenCL device, \code{c(0,0)} is most likely what you need.} } \value{ A \code{brmsopencl} object which can be passed to the \code{opencl} argument of \code{brm} and related functions. } \description{ Use OpenCL for GPU support in \pkg{Stan} via the \pkg{brms} interface. Only some \pkg{Stan} functions can be run on a GPU at this point and so a lot of \pkg{brms} models won't benefit from OpenCL for now. } \details{ For more details on OpenCL in \pkg{Stan}, check out \url{https://mc-stan.org/docs/2_26/cmdstan-guide/parallelization.html#opencl} as well as \url{https://mc-stan.org/docs/2_26/stan-users-guide/opencl.html}. } \examples{ \dontrun{ # this model just serves as an illustration # OpenCL may not actually speed things up here fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), chains = 2, cores = 2, opencl = opencl(c(0, 0)), backend = "cmdstanr") summary(fit) } } brms/man/fixef.brmsfit.Rd0000644000176200001440000000310214213413565015025 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{fixef.brmsfit} \alias{fixef.brmsfit} \alias{fixef} \title{Extract Population-Level Estimates} \usage{ \method{fixef}{brmsfit}( object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), pars = NULL, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{pars}{Optional names of coefficients to extract. By default, all coefficients are extracted.} \item{...}{Currently ignored.} } \value{ If \code{summary} is \code{TRUE}, a matrix returned by \code{\link{posterior_summary}} for the population-level effects. If \code{summary} is \code{FALSE}, a matrix with one row per posterior draw and one column per population-level effect. } \description{ Extract the population-level ('fixed') effects from a \code{brmsfit} object. } \examples{ \dontrun{ fit <- brm(time | cens(censored) ~ age + sex + disease, data = kidney, family = "exponential") fixef(fit) # extract only some coefficients fixef(fit, pars = c("age", "sex")) } } brms/man/cor_cosy.Rd0000644000176200001440000000231014213413565014077 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_cosy} \alias{cor_cosy} \alias{cor_cosy-class} \title{(Deprecated) Compound Symmetry (COSY) Correlation Structure} \usage{ cor_cosy(formula = ~1) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} } \value{ An object of class \code{cor_cosy}, representing a compound symmetry correlation structure. } \description{ This function is deprecated. Please see \code{\link{cosy}} for the new syntax. This functions is a constructor for the \code{cor_cosy} class, representing a compound symmetry structure corresponding to uniform correlation. } \examples{ cor_cosy(~ visit | patient) } brms/man/loss.Rd0000644000176200001440000000302614213413565013244 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{loss} \alias{loss} \title{Cumulative Insurance Loss Payments} \format{ A data frame of 55 observations containing information on the following 4 variables. \describe{ \item{AY}{Origin year of the insurance (1991 to 2000)} \item{dev}{Deviation from the origin year in months} \item{cum}{Cumulative loss payments} \item{premium}{Achieved premiums for the given origin year} } } \source{ Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving Models. \emph{CAS Research Papers}. } \usage{ loss } \description{ This dataset, discussed in Gesmann & Morris (2020), contains cumulative insurance loss payments over the course of ten years. } \examples{ \dontrun{ # non-linear model to predict cumulative loss payments fit_loss <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) # basic summaries summary(fit_loss) conditional_effects(fit_loss) # plot predictions per origin year conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_loss <- conditional_effects( fit_loss, conditions = conditions, re_formula = NULL, method = "predict" ) plot(me_loss, ncol = 5, points = TRUE) } } \keyword{datasets} brms/man/is.brmsformula.Rd0000644000176200001440000000050614160105076015223 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{is.brmsformula} \alias{is.brmsformula} \title{Checks if argument is a \code{brmsformula} object} \usage{ is.brmsformula(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsformula} object } brms/man/as.brmsprior.Rd0000644000176200001440000000061414430676246014715 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{as.brmsprior} \alias{as.brmsprior} \title{Transform into a brmsprior object} \usage{ as.brmsprior(x) } \arguments{ \item{x}{An object to be transformed.} } \value{ A \code{brmsprior} object if the transformation was possible. } \description{ Try to transform an object into a \code{brmsprior} object. } brms/man/MultiStudentT.Rd0000644000176200001440000000227114275436221015054 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{MultiStudentT} \alias{MultiStudentT} \alias{dmulti_student_t} \alias{rmulti_student_t} \title{The Multivariate Student-t Distribution} \usage{ dmulti_student_t(x, df, mu, Sigma, log = FALSE, check = FALSE) rmulti_student_t(n, df, mu, Sigma, check = FALSE) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{df}{Vector of degrees of freedom.} \item{mu}{Location vector with length equal to the number of dimensions.} \item{Sigma}{Covariance matrix.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{check}{Logical; Indicates whether several input checks should be performed. Defaults to \code{FALSE} to improve efficiency.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density function and random generation for the multivariate Student-t distribution with location vector \code{mu}, covariance matrix \code{Sigma}, and degrees of freedom \code{df}. } \details{ See the Stan user's manual \url{https://mc-stan.org/documentation/} for details on the parameterization } brms/man/update_adterms.Rd0000644000176200001440000000202213565500267015265 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{update_adterms} \alias{update_adterms} \title{Update Formula Addition Terms} \usage{ update_adterms(formula, adform, action = c("update", "replace")) } \arguments{ \item{formula}{Two-sided formula to be updated.} \item{adform}{One-sided formula containing addition terms to update \code{formula} with.} \item{action}{Indicates what should happen to the existing addition terms in \code{formula}. If \code{"update"} (the default), old addition terms that have no corresponding term in \code{adform} will be kept. If \code{"replace"}, all old addition terms will be removed.} } \value{ An object of class \code{formula}. } \description{ Update additions terms used in formulas of \pkg{brms}. See \code{\link{addition-terms}} for details. } \examples{ form <- y | trials(size) ~ x update_adterms(form, ~ trials(10)) update_adterms(form, ~ weights(w)) update_adterms(form, ~ weights(w), action = "replace") update_adterms(y ~ x, ~ trials(10)) } brms/man/loo_R2.brmsfit.Rd0000644000176200001440000000356714417767011015103 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo_predict.R \name{loo_R2.brmsfit} \alias{loo_R2.brmsfit} \alias{loo_R2} \title{Compute a LOO-adjusted R-squared for regression models} \usage{ \method{loo_R2}{brmsfit}( object, resp = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}} and \code{\link[brms:log_lik.brmsfit]{log_lik}}, which are used in the computation of the R-squared values.} } \value{ If \code{summary = TRUE}, an M x C matrix is returned (M = number of response variables and c = \code{length(probs) + 2}) containing summary statistics of the LOO-adjusted R-squared values. If \code{summary = FALSE}, the posterior draws of the LOO-adjusted R-squared values are returned in an S x M matrix (S is the number of draws). } \description{ Compute a LOO-adjusted R-squared for regression models } \examples{ \dontrun{ fit <- brm(mpg ~ wt + cyl, data = mtcars) summary(fit) loo_R2(fit) # compute R2 with new data nd <- data.frame(mpg = c(10, 20, 30), wt = c(4, 3, 2), cyl = c(8, 6, 4)) loo_R2(fit, newdata = nd) } } brms/man/brms-package.Rd0000644000176200001440000000745214213413565014627 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brms-package.R \docType{package} \name{brms-package} \alias{brms-package} \alias{brms} \title{Bayesian Regression Models using 'Stan'} \description{ \if{html}{ \figure{stanlogo.png}{options: width="50" alt="https://mc-stan.org/about/logo/"} \emph{Stan Development Team} } The \pkg{brms} package provides an interface to fit Bayesian generalized multivariate (non-)linear multilevel models using \pkg{Stan}, which is a C++ package for obtaining full Bayesian inference (see \url{https://mc-stan.org/}). The formula syntax is an extended version of the syntax applied in the \pkg{lme4} package to provide a familiar and simple interface for performing regression analyses. } \details{ The main function of \pkg{brms} is \code{\link{brm}}, which uses formula syntax to specify a wide range of complex Bayesian models (see \code{\link{brmsformula}} for details). Based on the supplied formulas, data, and additional information, it writes the Stan code on the fly via \code{\link{make_stancode}}, prepares the data via \code{\link{make_standata}}, and fits the model using \pkg{\link[rstan:rstan]{Stan}}. Subsequently, a large number of post-processing methods can be applied: To get an overview on the estimated parameters, \code{\link[brms:summary.brmsfit]{summary}} or \code{\link[brms:conditional_effects.brmsfit]{conditional_effects}} are perfectly suited. Detailed visual analyses can be performed by applying the \code{\link{pp_check}} and \code{\link{stanplot}} methods, which both rely on the \pkg{\link[bayesplot:bayesplot-package]{bayesplot}} package. Model comparisons can be done via \code{\link{loo}} and \code{\link{waic}}, which make use of the \pkg{\link[loo:loo-package]{loo}} package as well as via \code{\link{bayes_factor}} which relies on the \pkg{bridgesampling} package. For a full list of methods to apply, type \code{methods(class = "brmsfit")}. Because \pkg{brms} is based on \pkg{Stan}, a C++ compiler is required. The program Rtools (available on \url{https://cran.r-project.org/bin/windows/Rtools/}) comes with a C++ compiler for Windows. On Mac, you should use Xcode. For further instructions on how to get the compilers running, see the prerequisites section at the \href{https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started}{RStan-Getting-Started} page. When comparing other packages fitting multilevel models to \pkg{brms}, keep in mind that the latter needs to compile models before actually fitting them, which will require between 20 and 40 seconds depending on your machine, operating system and overall model complexity. Thus, fitting smaller models may be relatively slow as compilation time makes up the majority of the whole running time. For larger / more complex models however, fitting my take several minutes or even hours, so that the compilation time won't make much of a difference for these models. See \code{vignette("brms_overview")} and \code{vignette("brms_multilevel")} for a general introduction and overview of \pkg{brms}. For a full list of available vignettes, type \code{vignette(package = "brms")}. } \references{ Paul-Christian Buerkner (2017). brms: An R Package for Bayesian Multilevel Models Using Stan. \emph{Journal of Statistical Software}, 80(1), 1-28. \code{doi:10.18637/jss.v080.i01} Paul-Christian Buerkner (2018). Advanced Bayesian Multilevel Modeling with the R Package brms. \emph{The R Journal}. 10(1), 395–411. \code{doi:10.32614/RJ-2018-017} The Stan Development Team. \emph{Stan Modeling Language User's Guide and Reference Manual}. \url{https://mc-stan.org/users/documentation/}. Stan Development Team (2020). RStan: the R interface to Stan. R package version 2.21.2. \url{https://mc-stan.org/} } \seealso{ \code{\link{brm}}, \code{\link{brmsformula}}, \code{\link{brmsfamily}}, \code{\link{brmsfit}} } brms/man/ma.Rd0000644000176200001440000000325514361545260012667 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{ma} \alias{ma} \title{Set up MA(q) correlation structures} \usage{ ma(time = NA, gr = NA, q = 1, cov = FALSE) } \arguments{ \item{time}{An optional time variable specifying the time ordering of the observations. By default, the existing order of the observations in the data is used.} \item{gr}{An optional grouping variable. If specified, the correlation structure is assumed to apply only to observations within the same grouping level.} \item{q}{A non-negative integer specifying the moving average (MA) order of the ARMA structure. Default is \code{1}.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default), a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{'arma_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up a moving average (MA) term of order q in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with MA terms. } \examples{ \dontrun{ data("LakeHuron") LakeHuron <- as.data.frame(LakeHuron) fit <- brm(x ~ ma(p = 2), data = LakeHuron) summary(fit) } } \seealso{ \code{\link{autocor-terms}}, \code{\link{arma}}, \code{\link{ar}} } brms/man/print.brmsfit.Rd0000644000176200001440000000133514213413565015066 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{print.brmsfit} \alias{print.brmsfit} \alias{print.brmssummary} \title{Print a summary for a fitted model represented by a \code{brmsfit} object} \usage{ \method{print}{brmsfit}(x, digits = 2, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}} \item{digits}{The number of significant digits for printing out the summary; defaults to 2. The effective sample size is always rounded to integers.} \item{...}{Additional arguments that would be passed to method \code{summary} of \code{brmsfit}.} } \description{ Print a summary for a fitted model represented by a \code{brmsfit} object } \seealso{ \code{\link{summary.brmsfit}} } brms/man/inv_logit_scaled.Rd0000644000176200001440000000077313565500270015576 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/numeric-helpers.R \name{inv_logit_scaled} \alias{inv_logit_scaled} \title{Scaled inverse logit-link} \usage{ inv_logit_scaled(x, lb = 0, ub = 1) } \arguments{ \item{x}{A numeric or complex vector.} \item{lb}{Lower bound defaulting to \code{0}.} \item{ub}{Upper bound defaulting to \code{1}.} } \value{ A numeric or complex vector between \code{lb} and \code{ub}. } \description{ Computes \code{inv_logit(x) * (ub - lb) + lb} } brms/man/posterior_samples.brmsfit.Rd0000644000176200001440000000462314213413565017507 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_samples.R \name{posterior_samples.brmsfit} \alias{posterior_samples.brmsfit} \alias{posterior_samples} \title{(Deprecated) Extract Posterior Samples} \usage{ \method{posterior_samples}{brmsfit}( x, pars = NA, fixed = FALSE, add_chain = FALSE, subset = NULL, as.matrix = FALSE, as.array = FALSE, ... ) posterior_samples(x, pars = NA, ...) } \arguments{ \item{x}{An \code{R} object typically of class \code{brmsfit}} \item{pars}{Names of parameters for which posterior samples should be returned, as given by a character vector or regular expressions. By default, all posterior samples of all parameters are extracted.} \item{fixed}{Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE}.} \item{add_chain}{A flag indicating if the returned \code{data.frame} should contain two additional columns. The \code{chain} column indicates the chain in which each sample was generated, the \code{iter} column indicates the iteration number within each chain.} \item{subset}{A numeric vector indicating the rows (i.e., posterior samples) to be returned. If \code{NULL} (the default), all posterior samples are returned.} \item{as.matrix}{Should the output be a \code{matrix} instead of a \code{data.frame}? Defaults to \code{FALSE}.} \item{as.array}{Should the output be an \code{array} instead of a \code{data.frame}? Defaults to \code{FALSE}.} \item{...}{Arguments passed to individual methods (if applicable).} } \value{ A data.frame (matrix or array) containing the posterior samples. } \description{ Extract posterior samples of specified parameters. The \code{posterior_samples} method is deprecated. We recommend using the more modern and consistent \code{\link[brms:draws-brms]{as_draws_*}} extractor functions of the \pkg{posterior} package instead. } \examples{ \dontrun{ fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "cumulative") # extract posterior samples of population-level effects samples1 <- posterior_samples(fit, pars = "^b") head(samples1) # extract posterior samples of group-level standard deviations samples2 <- posterior_samples(fit, pars = "^sd_") head(samples2) } } \seealso{ \code{\link[brms:draws-brms]{as_draws}}, \code{\link[brms:as.data.frame.brmsfit]{as.data.frame}} } brms/man/get_refmodel.brmsfit.Rd0000644000176200001440000000607614433142215016370 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/projpred.R \name{get_refmodel.brmsfit} \alias{get_refmodel.brmsfit} \title{Projection Predictive Variable Selection: Get Reference Model} \usage{ get_refmodel.brmsfit( object, newdata = NULL, resp = NULL, cvfun = NULL, dis = NULL, latent = FALSE, brms_seed = NULL, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{cvfun}{Optional cross-validation function (see \code{\link[projpred:get_refmodel]{get_refmodel}} for details). If \code{NULL} (the default), \code{cvfun} is defined internally based on \code{\link{kfold.brmsfit}}.} \item{dis}{Passed to argument \code{dis} of \code{\link[projpred:init_refmodel]{init_refmodel}}, but leave this at \code{NULL} unless \pkg{projpred} complains about it.} \item{latent}{See argument \code{latent} of \code{\link[projpred:extend_family]{extend_family}}. Setting this to \code{TRUE} requires a \pkg{projpred} version >= 2.4.0.} \item{brms_seed}{A seed used to infer seeds for \code{\link{kfold.brmsfit}} and for sampling group-level effects for new levels (in multilevel models). If \code{NULL}, then \code{\link{set.seed}} is not called at all. If not \code{NULL}, then the pseudorandom number generator (PRNG) state is reset (to the state before calling this function) upon exiting this function.} \item{...}{Further arguments passed to \code{\link[projpred:init_refmodel]{init_refmodel}}.} } \value{ A \code{refmodel} object to be used in conjunction with the \pkg{projpred} package. } \description{ The \code{get_refmodel.brmsfit} method can be used to create the reference model structure which is needed by the \pkg{projpred} package for performing a projection predictive variable selection. This method is called automatically when performing variable selection via \code{\link[projpred:varsel]{varsel}} or \code{\link[projpred:cv_varsel]{cv_varsel}}, so you will rarely need to call it manually yourself. } \details{ Note that the \code{extract_model_data} function used internally by \code{get_refmodel.brmsfit} ignores arguments \code{wrhs} and \code{orhs}. This is relevant for \code{\link[projpred:predict.refmodel]{predict.refmodel}}, for example. } \examples{ \dontrun{ # fit a simple model fit <- brm(count ~ zAge + zBase * Trt, data = epilepsy, family = poisson()) summary(fit) # The following code requires the 'projpred' package to be installed: library(projpred) # perform variable selection without cross-validation vs <- varsel(fit) summary(vs) plot(vs) # perform variable selection with cross-validation cv_vs <- cv_varsel(fit) summary(cv_vs) plot(cv_vs) } } brms/man/is.brmsfit.Rd0000644000176200001440000000046414160105076014343 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-class.R \name{is.brmsfit} \alias{is.brmsfit} \title{Checks if argument is a \code{brmsfit} object} \usage{ is.brmsfit(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsfit} object } brms/man/epilepsy.Rd0000644000176200001440000000365614213413565014127 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{epilepsy} \alias{epilepsy} \title{Epileptic seizure counts} \format{ A data frame of 236 observations containing information on the following 9 variables. \describe{ \item{Age}{The age of the patients in years} \item{Base}{The seizure count at 8-weeks baseline} \item{Trt}{Either \code{0} or \code{1} indicating if the patient received anti-convulsant therapy} \item{patient}{The patient number} \item{visit}{The session number from \code{1} (first visit) to \code{4} (last visit)} \item{count}{The seizure count between two visits} \item{obs}{The observation number, that is a unique identifier for each observation} \item{zAge}{Standardized \code{Age}} \item{zBase}{Standardized \code{Base}} } } \source{ Thall, P. F., & Vail, S. C. (1990). Some covariance models for longitudinal count data with overdispersion. \emph{Biometrics, 46(2)}, 657-671. \cr Breslow, N. E., & Clayton, D. G. (1993). Approximate inference in generalized linear mixed models. \emph{Journal of the American Statistical Association}, 88(421), 9-25. } \usage{ epilepsy } \description{ Breslow and Clayton (1993) analyze data initially provided by Thall and Vail (1990) concerning seizure counts in a randomized trial of anti-convulsant therapy in epilepsy. Covariates are treatment, 8-week baseline seizure counts, and age of the patients in years. } \examples{ \dontrun{ ## poisson regression without random effects. fit1 <- brm(count ~ zAge + zBase * Trt, data = epilepsy, family = poisson()) summary(fit1) plot(fit1) ## poisson regression with varying intercepts of patients ## as well as normal priors for overall effects parameters. fit2 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), prior = set_prior("normal(0,5)")) summary(fit2) plot(fit2) } } \keyword{datasets} brms/man/cor_arr.Rd0000644000176200001440000000162514160105076013712 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_arr} \alias{cor_arr} \title{(Defunct) ARR correlation structure} \usage{ cor_arr(formula = ~1, r = 1) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} \item{r}{No longer supported.} } \description{ The ARR correlation structure is no longer supported. } \keyword{internal} brms/man/GenExtremeValue.Rd0000644000176200001440000000254114403575116015327 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{GenExtremeValue} \alias{GenExtremeValue} \alias{dgen_extreme_value} \alias{pgen_extreme_value} \alias{qgen_extreme_value} \alias{rgen_extreme_value} \title{The Generalized Extreme Value Distribution} \usage{ dgen_extreme_value(x, mu = 0, sigma = 1, xi = 0, log = FALSE) pgen_extreme_value( q, mu = 0, sigma = 1, xi = 0, lower.tail = TRUE, log.p = FALSE ) qgen_extreme_value( p, mu = 0, sigma = 1, xi = 0, lower.tail = TRUE, log.p = FALSE ) rgen_extreme_value(n, mu = 0, sigma = 1, xi = 0) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of locations.} \item{sigma}{Vector of scales.} \item{xi}{Vector of shapes.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the generalized extreme value distribution with location \code{mu}, scale \code{sigma} and shape \code{xi}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/emmeans-brms-helpers.Rd0000644000176200001440000000605214253041545016313 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emmeans.R \name{emmeans-brms-helpers} \alias{emmeans-brms-helpers} \alias{recover_data.brmsfit} \alias{emm_basis.brmsfit} \title{Support Functions for \pkg{emmeans}} \usage{ recover_data.brmsfit( object, data, resp = NULL, dpar = NULL, nlpar = NULL, re_formula = NA, epred = FALSE, ... ) emm_basis.brmsfit( object, trms, xlev, grid, vcov., resp = NULL, dpar = NULL, nlpar = NULL, re_formula = NA, epred = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{data, trms, xlev, grid, vcov.}{Arguments required by \pkg{emmeans}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Optional name of a predicted distributional parameter. If specified, expected predictions of this parameters are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{re_formula}{Optional formula containing group-level effects to be considered in the prediction. If \code{NULL}, include all group-level effects; if \code{NA} (default), include no group-level effects.} \item{epred}{Logical. If \code{TRUE} compute predictions of the posterior predictive distribution's mean (see \code{\link{posterior_epred.brmsfit}}) while ignoring arguments \code{dpar} and \code{nlpar}. Defaults to \code{FALSE}. If you have specified a response transformation within the formula, you need to set \code{epred} to \code{TRUE} for \pkg{emmeans} to detect this transformation.} \item{...}{Additional arguments passed to \pkg{emmeans}.} } \description{ Functions required for compatibility of \pkg{brms} with \pkg{emmeans}. Users are not required to call these functions themselves. Instead, they will be called automatically by the \code{emmeans} function of the \pkg{emmeans} package. } \details{ In order to ensure compatibility of most \pkg{brms} models with \pkg{emmeans}, predictions are not generated 'manually' via a design matrix and coefficient vector, but rather via \code{\link{posterior_linpred.brmsfit}}. This appears to generally work well, but note that it produces an `.@linfct` slot that contains the computed predictions as columns instead of the coefficients. } \examples{ \dontrun{ fit1 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), data = kidney, family = lognormal()) summary(fit1) # summarize via 'emmeans' library(emmeans) rg <- ref_grid(fit1) em <- emmeans(rg, "disease") summary(em, point.est = mean) # obtain estimates for the posterior predictive distribution's mean epred <- emmeans(fit1, "disease", epred = TRUE) summary(epred, point.est = mean) # model with transformed response variable fit2 <- brm(log(mpg) ~ factor(cyl), data = mtcars) summary(fit2) # results will be on the log scale by default emmeans(fit2, ~ cyl) # log transform is detected and can be adjusted automatically emmeans(fit2, ~ cyl, epred = TRUE, type = "response") } } brms/man/LogisticNormal.Rd0000644000176200001440000000216514275436221015217 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{LogisticNormal} \alias{LogisticNormal} \alias{dlogistic_normal} \alias{rlogistic_normal} \title{The (Multivariate) Logistic Normal Distribution} \usage{ dlogistic_normal(x, mu, Sigma, refcat = 1, log = FALSE, check = FALSE) rlogistic_normal(n, mu, Sigma, refcat = 1, check = FALSE) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{mu}{Mean vector with length equal to the number of dimensions.} \item{Sigma}{Covariance matrix.} \item{refcat}{A single integer indicating the reference category. Defaults to \code{1}.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{check}{Logical; Indicates whether several input checks should be performed. Defaults to \code{FALSE} to improve efficiency.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density function and random generation for the (multivariate) logistic normal distribution with latent mean vector \code{mu} and covariance matrix \code{Sigma}. } brms/man/cor_ar.Rd0000644000176200001440000000376014213413565013536 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_ar} \alias{cor_ar} \title{(Deprecated) AR(p) correlation structure} \usage{ cor_ar(formula = ~1, p = 1, cov = FALSE) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} \item{p}{A non-negative integer specifying the autoregressive (AR) order of the ARMA structure. Default is 1.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default) a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{cor_arma} containing solely autoregression terms. } \description{ This function is deprecated. Please see \code{\link{ar}} for the new syntax. This function is a constructor for the \code{cor_arma} class, allowing for autoregression terms only. } \details{ AR refers to autoregressive effects of residuals, which is what is typically understood as autoregressive effects. However, one may also model autoregressive effects of the response variable, which is called ARR in \pkg{brms}. } \examples{ cor_ar(~visit|patient, p = 2) } \seealso{ \code{\link{cor_arma}} } brms/man/brmsformula.Rd0000644000176200001440000010346614361545260014630 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{brmsformula} \alias{brmsformula} \alias{bf} \title{Set up a model formula for use in \pkg{brms}} \usage{ brmsformula( formula, ..., flist = NULL, family = NULL, autocor = NULL, nl = NULL, loop = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL, unused = NULL ) } \arguments{ \item{formula}{An object of class \code{formula} (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given in 'Details'.} \item{...}{Additional \code{formula} objects to specify predictors of non-linear and distributional parameters. Formulas can either be named directly or contain names on their left-hand side. Alternatively, it is possible to fix parameters to certain values by passing numbers or character strings in which case arguments have to be named to provide the parameter names. See 'Details' for more information.} \item{flist}{Optional list of formulas, which are treated in the same way as formulas passed via the \code{...} argument.} \item{family}{Same argument as in \code{\link{brm}}. If \code{family} is specified in \code{brmsformula}, it will overwrite the value specified in other functions.} \item{autocor}{An optional \code{formula} which contains autocorrelation terms as described in \code{\link{autocor-terms}} or alternatively a \code{\link{cor_brms}} object (deprecated). If \code{autocor} is specified in \code{brmsformula}, it will overwrite the value specified in other functions.} \item{nl}{Logical; Indicates whether \code{formula} should be treated as specifying a non-linear model. By default, \code{formula} is treated as an ordinary linear model formula.} \item{loop}{Logical; Only used in non-linear models. Indicates if the computation of the non-linear formula should be done inside (\code{TRUE}) or outside (\code{FALSE}) a loop over observations. Defaults to \code{TRUE}.} \item{center}{Logical; Indicates if the population-level design matrix should be centered, which usually increases sampling efficiency. See the 'Details' section for more information. Defaults to \code{TRUE} for distributional parameters and to \code{FALSE} for non-linear parameters.} \item{cmc}{Logical; Indicates whether automatic cell-mean coding should be enabled when removing the intercept by adding \code{0} to the right-hand of model formulas. Defaults to \code{TRUE} to mirror the behavior of standard \R formula parsing.} \item{sparse}{Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased.} \item{decomp}{Optional name of the decomposition used for the population-level design matrix. Defaults to \code{NULL} that is no decomposition. Other options currently available are \code{"QR"} for the QR decomposition that helps in fitting models with highly correlated predictors.} \item{unused}{An optional \code{formula} which contains variables that are unused in the model but should still be stored in the model's data frame. This can be useful, for example, if those variables are required for post-processing the model.} } \value{ An object of class \code{brmsformula}, which is essentially a \code{list} containing all model formulas as well as some additional information. } \description{ Set up a model formula for use in the \pkg{brms} package allowing to define (potentially non-linear) additive multilevel models for all parameters of the assumed response distribution. } \details{ \bold{General formula structure} The \code{formula} argument accepts formulas of the following syntax: \code{response | aterms ~ pterms + (gterms | group)} The \code{pterms} part contains effects that are assumed to be the same across observations. We call them 'population-level' or 'overall' effects, or (adopting frequentist vocabulary) 'fixed' effects. The optional \code{gterms} part may contain effects that are assumed to vary across grouping variables specified in \code{group}. We call them 'group-level' or 'varying' effects, or (adopting frequentist vocabulary) 'random' effects, although the latter name is misleading in a Bayesian context. For more details type \code{vignette("brms_overview")} and \code{vignette("brms_multilevel")}. \bold{Group-level terms} Multiple grouping factors each with multiple group-level effects are possible. (Of course we can also run models without any group-level effects.) Instead of \code{|} you may use \code{||} in grouping terms to prevent correlations from being modeled. Equivalently, the \code{cor} argument of the \code{\link{gr}} function can be used for this purpose, for example, \code{(1 + x || g)} is equivalent to \code{(1 + x | gr(g, cor = FALSE))}. It is also possible to model different group-level terms of the same grouping factor as correlated (even across different formulas, e.g., in non-linear models) by using \code{||} instead of \code{|}. All group-level terms sharing the same ID will be modeled as correlated. If, for instance, one specifies the terms \code{(1+x|i|g)} and \code{(1+z|i|g)} somewhere in the formulas passed to \code{brmsformula}, correlations between the corresponding group-level effects will be estimated. In the above example, \code{i} is not a variable in the data but just a symbol to indicate correlations between multiple group-level terms. Equivalently, the \code{id} argument of the \code{\link{gr}} function can be used as well, for example, \code{(1 + x | gr(g, id = "i"))}. If levels of the grouping factor belong to different sub-populations, it may be reasonable to assume a different covariance matrix for each of the sub-populations. For instance, the variation within the treatment group and within the control group in a randomized control trial might differ. Suppose that \code{y} is the outcome, and \code{x} is the factor indicating the treatment and control group. Then, we could estimate different hyper-parameters of the varying effects (in this case a varying intercept) for treatment and control group via \code{y ~ x + (1 | gr(subject, by = x))}. You can specify multi-membership terms using the \code{\link{mm}} function. For instance, a multi-membership term with two members could be \code{(1 | mm(g1, g2))}, where \code{g1} and \code{g2} specify the first and second member, respectively. Moreover, if a covariate \code{x} varies across the levels of the grouping-factors \code{g1} and \code{g2}, we can save the respective covariate values in the variables \code{x1} and \code{x2} and then model the varying effect as \code{(1 + mmc(x1, x2) | mm(g1, g2))}. \bold{Special predictor terms} Flexible non-linear smooth terms can modeled using the \code{\link{s}} and \code{\link{t2}} functions in the \code{pterms} part of the model formula. This allows to fit generalized additive mixed models (GAMMs) with \pkg{brms}. The implementation is similar to that used in the \pkg{gamm4} package. For more details on this model class see \code{\link[mgcv:gam]{gam}} and \code{\link[mgcv:gamm]{gamm}}. Gaussian process terms can be fitted using the \code{\link{gp}} function in the \code{pterms} part of the model formula. Similar to smooth terms, Gaussian processes can be used to model complex non-linear relationships, for instance temporal or spatial autocorrelation. However, they are computationally demanding and are thus not recommended for very large datasets or approximations need to be used. The \code{pterms} and \code{gterms} parts may contain four non-standard effect types namely monotonic, measurement error, missing value, and category specific effects, which can be specified using terms of the form \code{mo(predictor)}, \code{me(predictor, sd_predictor)}, \code{mi(predictor)}, and \code{cs()}, respectively. Category specific effects can only be estimated in ordinal models and are explained in more detail in the package's main vignette (type \code{vignette("brms_overview")}). The other three effect types are explained in the following. A monotonic predictor must either be integer valued or an ordered factor, which is the first difference to an ordinary continuous predictor. More importantly, predictor categories (or integers) are not assumed to be equidistant with respect to their effect on the response variable. Instead, the distance between adjacent predictor categories (or integers) is estimated from the data and may vary across categories. This is realized by parameterizing as follows: One parameter takes care of the direction and size of the effect similar to an ordinary regression parameter, while an additional parameter vector estimates the normalized distances between consecutive predictor categories. A main application of monotonic effects are ordinal predictors that can this way be modeled without (falsely) treating them as continuous or as unordered categorical predictors. For more details and examples see \code{vignette("brms_monotonic")}. Quite often, predictors are measured and as such naturally contain measurement error. Although most researchers are well aware of this problem, measurement error in predictors is ignored in most regression analyses, possibly because only few packages allow for modeling it. Notably, measurement error can be handled in structural equation models, but many more general regression models (such as those featured by \pkg{brms}) cannot be transferred to the SEM framework. In \pkg{brms}, effects of noise-free predictors can be modeled using the \code{me} (for 'measurement error') function. If, say, \code{y} is the response variable and \code{x} is a measured predictor with known measurement error \code{sdx}, we can simply include it on the right-hand side of the model formula via \code{y ~ me(x, sdx)}. This can easily be extended to more general formulas. If \code{x2} is another measured predictor with corresponding error \code{sdx2} and \code{z} is a predictor without error (e.g., an experimental setting), we can model all main effects and interactions of the three predictors in the well known manner: \code{y ~ me(x, sdx) * me(x2, sdx2) * z}. The \code{me} function is soft deprecated in favor of the more flexible and consistent \code{mi} function (see below). When a variable contains missing values, the corresponding rows will be excluded from the data by default (row-wise exclusion). However, quite often we want to keep these rows and instead estimate the missing values. There are two approaches for this: (a) Impute missing values before the model fitting for instance via multiple imputation (see \code{\link{brm_multiple}} for a way to handle multiple imputed datasets). (b) Impute missing values on the fly during model fitting. The latter approach is explained in the following. Using a variable with missing values as predictors requires two things, First, we need to specify that the predictor contains missings that should to be imputed. If, say, \code{y} is the primary response, \code{x} is a predictor with missings and \code{z} is a predictor without missings, we go for \code{y ~ mi(x) + z}. Second, we need to model \code{x} as an additional response with corresponding predictors and the addition term \code{mi()}. In our example, we could write \code{x | mi() ~ z}. Measurement error may be included via the \code{sdy} argument, say, \code{x | mi(sdy = se) ~ z}. See \code{\link{mi}} for examples with real data. \bold{Autocorrelation terms} Autocorrelation terms can be directly specified inside the \code{pterms} part as well. Details can be found in \code{\link{autocor-terms}}. \bold{Additional response information} Another special of the \pkg{brms} formula syntax is the optional \code{aterms} part, which may contain multiple terms of the form \code{fun()} separated by \code{+} each providing special information on the response variable. \code{fun} can be replaced with either \code{se}, \code{weights}, \code{subset}, \code{cens}, \code{trunc}, \code{trials}, \code{cat}, \code{dec}, \code{rate}, \code{vreal}, or \code{vint}. Their meanings are explained below (see also \code{\link{addition-terms}}). For families \code{gaussian}, \code{student} and \code{skew_normal}, it is possible to specify standard errors of the observations, thus allowing to perform meta-analysis. Suppose that the variable \code{yi} contains the effect sizes from the studies and \code{sei} the corresponding standard errors. Then, fixed and random effects meta-analyses can be conducted using the formulas \code{yi | se(sei) ~ 1} and \code{yi | se(sei) ~ 1 + (1|study)}, respectively, where \code{study} is a variable uniquely identifying every study. If desired, meta-regression can be performed via \code{yi | se(sei) ~ 1 + mod1 + mod2 + (1|study)} or \cr \code{yi | se(sei) ~ 1 + mod1 + mod2 + (1 + mod1 + mod2|study)}, where \code{mod1} and \code{mod2} represent moderator variables. By default, the standard errors replace the parameter \code{sigma}. To model \code{sigma} in addition to the known standard errors, set argument \code{sigma} in function \code{se} to \code{TRUE}, for instance, \code{yi | se(sei, sigma = TRUE) ~ 1}. For all families, weighted regression may be performed using \code{weights} in the \code{aterms} part. Internally, this is implemented by multiplying the log-posterior values of each observation by their corresponding weights. Suppose that variable \code{wei} contains the weights and that \code{yi} is the response variable. Then, formula \code{yi | weights(wei) ~ predictors} implements a weighted regression. For multivariate models, \code{subset} may be used in the \code{aterms} part, to use different subsets of the data in different univariate models. For instance, if \code{sub} is a logical variable and \code{y} is the response of one of the univariate models, we may write \code{y | subset(sub) ~ predictors} so that \code{y} is predicted only for those observations for which \code{sub} evaluates to \code{TRUE}. For log-linear models such as poisson models, \code{rate} may be used in the \code{aterms} part to specify the denominator of a response that is expressed as a rate. The numerator is given by the actual response variable and has a distribution according to the family as usual. Using \code{rate(denom)} is equivalent to adding \code{offset(log(denom))} to the linear predictor of the main parameter but the former is arguably more convenient and explicit. With the exception of categorical and ordinal families, left, right, and interval censoring can be modeled through \code{y | cens(censored) ~ predictors}. The censoring variable (named \code{censored} in this example) should contain the values \code{'left'}, \code{'none'}, \code{'right'}, and \code{'interval'} (or equivalently \code{-1}, \code{0}, \code{1}, and \code{2}) to indicate that the corresponding observation is left censored, not censored, right censored, or interval censored. For interval censored data, a second variable (let's call it \code{y2}) has to be passed to \code{cens}. In this case, the formula has the structure \code{y | cens(censored, y2) ~ predictors}. While the lower bounds are given in \code{y}, the upper bounds are given in \code{y2} for interval censored data. Intervals are assumed to be open on the left and closed on the right: \code{(y, y2]}. With the exception of categorical and ordinal families, the response distribution can be truncated using the \code{trunc} function in the addition part. If the response variable is truncated between, say, 0 and 100, we can specify this via \code{yi | trunc(lb = 0, ub = 100) ~ predictors}. Instead of numbers, variables in the data set can also be passed allowing for varying truncation points across observations. Defining only one of the two arguments in \code{trunc} leads to one-sided truncation. For all continuous families, missing values in the responses can be imputed within Stan by using the addition term \code{mi}. This is mostly useful in combination with \code{mi} predictor terms as explained above under 'Special predictor terms'. For families \code{binomial} and \code{zero_inflated_binomial}, addition should contain a variable indicating the number of trials underlying each observation. In \code{lme4} syntax, we may write for instance \code{cbind(success, n - success)}, which is equivalent to \code{success | trials(n)} in \pkg{brms} syntax. If the number of trials is constant across all observations, say \code{10}, we may also write \code{success | trials(10)}. \bold{Please note that the \code{cbind()} syntax will not work in \pkg{brms} in the expected way because this syntax is reserved for other purposes.} For all ordinal families, \code{aterms} may contain a term \code{thres(number)} to specify the number thresholds (e.g, \code{thres(6)}), which should be equal to the total number of response categories - 1. If not given, the number of thresholds is calculated from the data. If different threshold vectors should be used for different subsets of the data, the \code{gr} argument can be used to provide the grouping variable (e.g, \code{thres(6, gr = item)}, if \code{item} is the grouping variable). In this case, the number of thresholds can also be a variable in the data with different values per group. A deprecated quasi alias of \code{thres()} is \code{cat()} with which the total number of response categories (i.e., number of thresholds + 1) can be specified. In Wiener diffusion models (family \code{wiener}) the addition term \code{dec} is mandatory to specify the (vector of) binary decisions corresponding to the reaction times. Non-zero values will be treated as a response on the upper boundary of the diffusion process and zeros will be treated as a response on the lower boundary. Alternatively, the variable passed to \code{dec} might also be a character vector consisting of \code{'lower'} and \code{'upper'}. All families support the \code{index} addition term to uniquely identify each observation of the corresponding response variable. Currently, \code{index} is primarily useful in combination with the \code{subset} addition and \code{\link{mi}} terms. For custom families, it is possible to pass an arbitrary number of real and integer vectors via the addition terms \code{vreal} and \code{vint}, respectively. An example is provided in \code{vignette('brms_customfamilies')}. To pass multiple vectors of the same data type, provide them separated by commas inside a single \code{vreal} or \code{vint} statement. Multiple addition terms of different types may be specified at the same time using the \code{+} operator. For example, the formula \code{formula = yi | se(sei) + cens(censored) ~ 1} implies a censored meta-analytic model. The addition argument \code{disp} (short for dispersion) has been removed in version 2.0. You may instead use the distributional regression approach by specifying \code{sigma ~ 1 + offset(log(xdisp))} or \code{shape ~ 1 + offset(log(xdisp))}, where \code{xdisp} is the variable being previously passed to \code{disp}. \bold{Parameterization of the population-level intercept} By default, the population-level intercept (if incorporated) is estimated separately and not as part of population-level parameter vector \code{b} As a result, priors on the intercept also have to be specified separately. Furthermore, to increase sampling efficiency, the population-level design matrix \code{X} is centered around its column means \code{X_means} if the intercept is incorporated. This leads to a temporary bias in the intercept equal to \code{}, where \code{<,>} is the scalar product. The bias is corrected after fitting the model, but be aware that you are effectively defining a prior on the intercept of the centered design matrix not on the real intercept. You can turn off this special handling of the intercept by setting argument \code{center} to \code{FALSE}. For more details on setting priors on population-level intercepts, see \code{\link{set_prior}}. This behavior can be avoided by using the reserved (and internally generated) variable \code{Intercept}. Instead of \code{y ~ x}, you may write \code{y ~ 0 + Intercept + x}. This way, priors can be defined on the real intercept, directly. In addition, the intercept is just treated as an ordinary population-level effect and thus priors defined on \code{b} will also apply to it. Note that this parameterization may be less efficient than the default parameterization discussed above. \bold{Formula syntax for non-linear models} In \pkg{brms}, it is possible to specify non-linear models of arbitrary complexity. The non-linear model can just be specified within the \code{formula} argument. Suppose, that we want to predict the response \code{y} through the predictor \code{x}, where \code{x} is linked to \code{y} through \code{y = alpha - beta * lambda^x}, with parameters \code{alpha}, \code{beta}, and \code{lambda}. This is certainly a non-linear model being defined via \code{formula = y ~ alpha - beta * lambda^x} (addition arguments can be added in the same way as for ordinary formulas). To tell \pkg{brms} that this is a non-linear model, we set argument \code{nl} to \code{TRUE}. Now we have to specify a model for each of the non-linear parameters. Let's say we just want to estimate those three parameters with no further covariates or random effects. Then we can pass \code{alpha + beta + lambda ~ 1} or equivalently (and more flexible) \code{alpha ~ 1, beta ~ 1, lambda ~ 1} to the \code{...} argument. This can, of course, be extended. If we have another predictor \code{z} and observations nested within the grouping factor \code{g}, we may write for instance \code{alpha ~ 1, beta ~ 1 + z + (1|g), lambda ~ 1}. The formula syntax described above applies here as well. In this example, we are using \code{z} and \code{g} only for the prediction of \code{beta}, but we might also use them for the other non-linear parameters (provided that the resulting model is still scientifically reasonable). By default, non-linear covariates are treated as real vectors in Stan. However, if the data of the covariates is of type `integer` in \R (which can be enforced by the `as.integer` function), the Stan type will be changed to an integer array. That way, covariates can also be used for indexing purposes in Stan. Non-linear models may not be uniquely identified and / or show bad convergence. For this reason it is mandatory to specify priors on the non-linear parameters. For instructions on how to do that, see \code{\link{set_prior}}. For some examples of non-linear models, see \code{vignette("brms_nonlinear")}. \bold{Formula syntax for predicting distributional parameters} It is also possible to predict parameters of the response distribution such as the residual standard deviation \code{sigma} in gaussian models or the hurdle probability \code{hu} in hurdle models. The syntax closely resembles that of a non-linear parameter, for instance \code{sigma ~ x + s(z) + (1+x|g)}. For some examples of distributional models, see \code{vignette("brms_distreg")}. Parameter \code{mu} exists for every family and can be used as an alternative to specifying terms in \code{formula}. If both \code{mu} and \code{formula} are given, the right-hand side of \code{formula} is ignored. Accordingly, specifying terms on the right-hand side of both \code{formula} and \code{mu} at the same time is deprecated. In future versions, \code{formula} might be updated by \code{mu}. The following are distributional parameters of specific families (all other parameters are treated as non-linear parameters): \code{sigma} (residual standard deviation or scale of the \code{gaussian}, \code{student}, \code{skew_normal}, \code{lognormal} \code{exgaussian}, and \code{asym_laplace} families); \code{shape} (shape parameter of the \code{Gamma}, \code{weibull}, \code{negbinomial}, and related zero-inflated / hurdle families); \code{nu} (degrees of freedom parameter of the \code{student} and \code{frechet} families); \code{phi} (precision parameter of the \code{beta} and \code{zero_inflated_beta} families); \code{kappa} (precision parameter of the \code{von_mises} family); \code{beta} (mean parameter of the exponential component of the \code{exgaussian} family); \code{quantile} (quantile parameter of the \code{asym_laplace} family); \code{zi} (zero-inflation probability); \code{hu} (hurdle probability); \code{zoi} (zero-one-inflation probability); \code{coi} (conditional one-inflation probability); \code{disc} (discrimination) for ordinal models; \code{bs}, \code{ndt}, and \code{bias} (boundary separation, non-decision time, and initial bias of the \code{wiener} diffusion model). By default, distributional parameters are modeled on the log scale if they can be positive only or on the logit scale if the can only be within the unit interval. Alternatively, one may fix distributional parameters to certain values. However, this is mainly useful when models become too complicated and otherwise have convergence issues. We thus suggest to be generally careful when making use of this option. The \code{quantile} parameter of the \code{asym_laplace} distribution is a good example where it is useful. By fixing \code{quantile}, one can perform quantile regression for the specified quantile. For instance, \code{quantile = 0.25} allows predicting the 25\%-quantile. Furthermore, the \code{bias} parameter in drift-diffusion models, is assumed to be \code{0.5} (i.e. no bias) in many applications. To achieve this, simply write \code{bias = 0.5}. Other possible applications are the Cauchy distribution as a special case of the Student-t distribution with \code{nu = 1}, or the geometric distribution as a special case of the negative binomial distribution with \code{shape = 1}. Furthermore, the parameter \code{disc} ('discrimination') in ordinal models is fixed to \code{1} by default and not estimated, but may be modeled as any other distributional parameter if desired (see examples). For reasons of identification, \code{'disc'} can only be positive, which is achieved by applying the log-link. In categorical models, distributional parameters do not have fixed names. Instead, they are named after the response categories (excluding the first one, which serves as the reference category), with the prefix \code{'mu'}. If, for instance, categories are named \code{cat1}, \code{cat2}, and \code{cat3}, the distributional parameters will be named \code{mucat2} and \code{mucat3}. Some distributional parameters currently supported by \code{brmsformula} have to be positive (a negative standard deviation or precision parameter does not make any sense) or are bounded between 0 and 1 (for zero-inflated / hurdle probabilities, quantiles, or the initial bias parameter of drift-diffusion models). However, linear predictors can be positive or negative, and thus the log link (for positive parameters) or logit link (for probability parameters) are used by default to ensure that distributional parameters are within their valid intervals. This implies that, by default, effects for such distributional parameters are estimated on the log / logit scale and one has to apply the inverse link function to get to the effects on the original scale. Alternatively, it is possible to use the identity link to predict parameters on their original scale, directly. However, this is much more likely to lead to problems in the model fitting, if the parameter actually has a restricted range. See also \code{\link{brmsfamily}} for an overview of valid link functions. \bold{Formula syntax for mixture models} The specification of mixture models closely resembles that of non-mixture models. If not specified otherwise (see below), all mean parameters of the mixture components are predicted using the right-hand side of \code{formula}. All types of predictor terms allowed in non-mixture models are allowed in mixture models as well. Distributional parameters of mixture distributions have the same name as those of the corresponding ordinary distributions, but with a number at the end to indicate the mixture component. For instance, if you use family \code{mixture(gaussian, gaussian)}, the distributional parameters are \code{sigma1} and \code{sigma2}. Distributional parameters of the same class can be fixed to the same value. For the above example, we could write \code{sigma2 = "sigma1"} to make sure that both components have the same residual standard deviation, which is in turn estimated from the data. In addition, there are two types of special distributional parameters. The first are named \code{mu}, that allow for modeling different predictors for the mean parameters of different mixture components. For instance, if you want to predict the mean of the first component using predictor \code{x} and the mean of the second component using predictor \code{z}, you can write \code{mu1 ~ x} as well as \code{mu2 ~ z}. The second are named \code{theta}, which constitute the mixing proportions. If the mixing proportions are fixed to certain values, they are internally normalized to form a probability vector. If one seeks to predict the mixing proportions, all but one of the them has to be predicted, while the remaining one is used as the reference category to identify the model. The so-called 'softmax' transformation is applied on the linear predictor terms to form a probability vector. For more information on mixture models, see the documentation of \code{\link{mixture}}. \bold{Formula syntax for multivariate models} Multivariate models may be specified using \code{mvbind} notation or with help of the \code{\link{mvbf}} function. Suppose that \code{y1} and \code{y2} are response variables and \code{x} is a predictor. Then \code{mvbind(y1, y2) ~ x} specifies a multivariate model. The effects of all terms specified at the RHS of the formula are assumed to vary across response variables. For instance, two parameters will be estimated for \code{x}, one for the effect on \code{y1} and another for the effect on \code{y2}. This is also true for group-level effects. When writing, for instance, \code{mvbind(y1, y2) ~ x + (1+x|g)}, group-level effects will be estimated separately for each response. To model these effects as correlated across responses, use the ID syntax (see above). For the present example, this would look as follows: \code{mvbind(y1, y2) ~ x + (1+x|2|g)}. Of course, you could also use any value other than \code{2} as ID. It is also possible to specify different formulas for different responses. If, for instance, \code{y1} should be predicted by \code{x} and \code{y2} should be predicted by \code{z}, we could write \code{mvbf(y1 ~ x, y2 ~ z)}. Alternatively, multiple \code{brmsformula} objects can be added to specify a joint multivariate model (see 'Examples'). } \examples{ # multilevel model with smoothing terms brmsformula(y ~ x1*x2 + s(z) + (1+x1|1) + (1|g2)) # additionally predict 'sigma' brmsformula(y ~ x1*x2 + s(z) + (1+x1|1) + (1|g2), sigma ~ x1 + (1|g2)) # use the shorter alias 'bf' (formula1 <- brmsformula(y ~ x + (x|g))) (formula2 <- bf(y ~ x + (x|g))) # will be TRUE identical(formula1, formula2) # incorporate censoring bf(y | cens(censor_variable) ~ predictors) # define a simple non-linear model bf(y ~ a1 - a2^x, a1 + a2 ~ 1, nl = TRUE) # predict a1 and a2 differently bf(y ~ a1 - a2^x, a1 ~ 1, a2 ~ x + (x|g), nl = TRUE) # correlated group-level effects across parameters bf(y ~ a1 - a2^x, a1 ~ 1 + (1 |2| g), a2 ~ x + (x |2| g), nl = TRUE) # alternative but equivalent way to specify the above model bf(y ~ a1 - a2^x, a1 ~ 1 + (1 | gr(g, id = 2)), a2 ~ x + (x | gr(g, id = 2)), nl = TRUE) # define a multivariate model bf(mvbind(y1, y2) ~ x * z + (1|g)) # define a zero-inflated model # also predicting the zero-inflation part bf(y ~ x * z + (1+x|ID1|g), zi ~ x + (1|ID1|g)) # specify a predictor as monotonic bf(y ~ mo(x) + more_predictors) # for ordinal models only # specify a predictor as category specific bf(y ~ cs(x) + more_predictors) # add a category specific group-level intercept bf(y ~ cs(x) + (cs(1)|g)) # specify parameter 'disc' bf(y ~ person + item, disc ~ item) # specify variables containing measurement error bf(y ~ me(x, sdx)) # specify predictors on all parameters of the wiener diffusion model # the main formula models the drift rate 'delta' bf(rt | dec(decision) ~ x, bs ~ x, ndt ~ x, bias ~ x) # fix the bias parameter to 0.5 bf(rt | dec(decision) ~ x, bias = 0.5) # specify different predictors for different mixture components mix <- mixture(gaussian, gaussian) bf(y ~ 1, mu1 ~ x, mu2 ~ z, family = mix) # fix both residual standard deviations to the same value bf(y ~ x, sigma2 = "sigma1", family = mix) # use the '+' operator to specify models bf(y ~ 1) + nlf(sigma ~ a * exp(b * x), a ~ x) + lf(b ~ z + (1|g), dpar = "sigma") + gaussian() # specify a multivariate model using the '+' operator bf(y1 ~ x + (1|g)) + gaussian() + cor_ar(~1|g) + bf(y2 ~ z) + poisson() # specify correlated residuals of a gaussian and a poisson model form1 <- bf(y1 ~ 1 + x + (1|c|obs), sigma = 1) + gaussian() form2 <- bf(y2 ~ 1 + x + (1|c|obs)) + poisson() # model missing values in predictors bf(bmi ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) # model sigma as a function of the mean bf(y ~ eta, nl = TRUE) + lf(eta ~ 1 + x) + nlf(sigma ~ tau * sqrt(eta)) + lf(tau ~ 1) } \seealso{ \code{\link{mvbrmsformula}}, \code{\link{brmsformula-helpers}} } brms/man/draws-index-brms.Rd0000644000176200001440000000156314160105076015452 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior.R \name{draws-index-brms} \alias{draws-index-brms} \alias{variables} \alias{nvariables} \alias{niterations} \alias{nchains} \alias{ndraws} \alias{Index} \alias{variables,} \alias{iterations,} \alias{chains,} \alias{and} \alias{draws.} \alias{variables.brmsfit} \alias{nvariables.brmsfit} \alias{niterations.brmsfit} \alias{nchains.brmsfit} \alias{ndraws.brmsfit} \title{Index \code{brmsfit} objects} \usage{ \method{variables}{brmsfit}(x, ...) \method{nvariables}{brmsfit}(x, ...) \method{niterations}{brmsfit}(x) \method{nchains}{brmsfit}(x) \method{ndraws}{brmsfit}(x) } \arguments{ \item{x}{A \code{brmsfit} object or another \R object for which the methods are defined.} \item{...}{Arguments passed to individual methods (if applicable).} } \description{ Index \code{brmsfit} objects } brms/man/brm.Rd0000644000176200001440000005644614424476075013073 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brm.R \name{brm} \alias{brm} \title{Fit Bayesian Generalized (Non-)Linear Multivariate Multilevel Models} \usage{ brm( formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sample_prior = "no", sparse = NULL, knots = NULL, drop_unused_levels = TRUE, stanvars = NULL, stan_funs = NULL, fit = NA, save_pars = NULL, save_ranef = NULL, save_mevars = NULL, save_all_pars = NULL, init = NULL, inits = NULL, chains = 4, iter = 2000, warmup = floor(iter/2), thin = 1, cores = getOption("mc.cores", 1), threads = getOption("brms.threads", NULL), opencl = getOption("brms.opencl", NULL), normalize = getOption("brms.normalize", TRUE), control = NULL, algorithm = getOption("brms.algorithm", "sampling"), backend = getOption("brms.backend", "rstan"), future = getOption("future", FALSE), silent = 1, seed = NA, save_model = NULL, stan_model_args = list(), file = NULL, file_compress = TRUE, file_refit = getOption("brms.file_refit", "never"), empty = FALSE, rename = TRUE, ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{cov_ranef}{(Deprecated) A list of matrices that are proportional to the (within) covariance structure of the group-level effects. The names of the matrices should correspond to columns in \code{data} that are used as grouping factors. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others to model pedigrees and phylogenetic effects. It is now recommended to specify those matrices in the formula interface using the \code{\link{gr}} and related functions. See \code{vignette("brms_phylogenetics")} for more details.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{sparse}{(Deprecated) Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased. It is now recommended to use the \code{sparse} argument of \code{\link{brmsformula}} and related functions.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{drop_unused_levels}{Should unused factors levels in the data be dropped? Defaults to \code{TRUE}.} \item{stanvars}{An optional \code{stanvars} object generated by function \code{\link{stanvar}} to define additional variables for use in \pkg{Stan}'s program blocks.} \item{stan_funs}{(Deprecated) An optional character string containing self-defined \pkg{Stan} functions, which will be included in the functions block of the generated \pkg{Stan} code. It is now recommended to use the \code{stanvars} argument for this purpose instead.} \item{fit}{An instance of S3 class \code{brmsfit} derived from a previous fit; defaults to \code{NA}. If \code{fit} is of class \code{brmsfit}, the compiled model associated with the fitted result is re-used and all arguments modifying the model code or data are ignored. It is not recommended to use this argument directly, but to call the \code{\link[brms:update.brmsfit]{update}} method, instead.} \item{save_pars}{An object generated by \code{\link{save_pars}} controlling which parameters should be saved in the model. The argument has no impact on the model fitting itself.} \item{save_ranef}{(Deprecated) A flag to indicate if group-level effects for each level of the grouping factor(s) should be saved (default is \code{TRUE}). Set to \code{FALSE} to save memory. The argument has no impact on the model fitting itself.} \item{save_mevars}{(Deprecated) A flag to indicate if draws of latent noise-free variables obtained by using \code{me} and \code{mi} terms should be saved (default is \code{FALSE}). Saving these draws allows to better use methods such as \code{predict} with the latent variables but leads to very large \R objects even for models of moderate size and complexity.} \item{save_all_pars}{(Deprecated) A flag to indicate if draws from all variables defined in Stan's \code{parameters} block should be saved (default is \code{FALSE}). Saving these draws is required in order to apply the methods \code{bridge_sampler}, \code{bayes_factor}, and \code{post_prob}.} \item{init}{Initial values for the sampler. If \code{NULL} (the default) or \code{"random"}, Stan will randomly generate initial values for parameters in a reasonable range. If \code{0}, all parameters are initialized to zero on the unconstrained space. This option is sometimes useful for certain families, as it happens that default random initial values cause draws to be essentially constant. Generally, setting \code{init = 0} is worth a try, if chains do not initialize or behave well. Alternatively, \code{init} can be a list of lists containing the initial values, or a function (or function name) generating initial values. The latter options are mainly implemented for internal testing but are available to users if necessary. If specifying initial values using a list or a function then currently the parameter names must correspond to the names used in the generated Stan code (not the names used in \R). For more details on specifying initial values you can consult the documentation of the selected \code{backend}.} \item{inits}{(Deprecated) Alias of \code{init}.} \item{chains}{Number of Markov chains (defaults to 4).} \item{iter}{Number of total iterations per chain (including warmup; defaults to 2000).} \item{warmup}{A positive integer specifying number of warmup (aka burnin) iterations. This also specifies the number of iterations used for stepsize adaptation, so warmup draws should not be used for inference. The number of warmup should not be larger than \code{iter} and the default is \code{iter/2}.} \item{thin}{Thinning rate. Must be a positive integer. Set \code{thin > 1} to save memory and computation time if \code{iter} is large.} \item{cores}{Number of cores to use when executing the chains in parallel, which defaults to 1 but we recommend setting the \code{mc.cores} option to be as many processors as the hardware and RAM allow (up to the number of chains). For non-Windows OS in non-interactive \R sessions, forking is used instead of PSOCK clusters.} \item{threads}{Number of threads to use in within-chain parallelization. For more control over the threading process, \code{threads} may also be a \code{brmsthreads} object created by \code{\link{threading}}. Within-chain parallelization is experimental! We recommend its use only if you are experienced with Stan's \code{reduce_sum} function and have a slow running model that cannot be sped up by any other means. Can be set globally for the current \R session via the \code{"brms.threads"} option (see \code{\link{options}}).} \item{opencl}{The platform and device IDs of the OpenCL device to use for fitting using GPU support. If you don't know the IDs of your OpenCL device, \code{c(0,0)} is most likely what you need. For more details, see \code{\link{opencl}}. Can be set globally for the current \R session via the \code{"brms.opencl"} option} \item{normalize}{Logical. Indicates whether normalization constants should be included in the Stan code (defaults to \code{TRUE}). Setting it to \code{FALSE} requires Stan version >= 2.25 to work. If \code{FALSE}, sampling efficiency may be increased but some post processing functions such as \code{\link{bridge_sampler}} will not be available. Can be controlled globally for the current \R session via the `brms.normalize` option.} \item{control}{A named \code{list} of parameters to control the sampler's behavior. It defaults to \code{NULL} so all the default values are used. The most important control parameters are discussed in the 'Details' section below. For a comprehensive overview see \code{\link[rstan:stan]{stan}}.} \item{algorithm}{Character string naming the estimation approach to use. Options are \code{"sampling"} for MCMC (the default), \code{"meanfield"} for variational inference with independent normal distributions, \code{"fullrank"} for variational inference with a multivariate normal distribution, or \code{"fixed_param"} for sampling from fixed parameter values. Can be set globally for the current \R session via the \code{"brms.algorithm"} option (see \code{\link{options}}).} \item{backend}{Character string naming the package to use as the backend for fitting the Stan model. Options are \code{"rstan"} (the default) or \code{"cmdstanr"}. Can be set globally for the current \R session via the \code{"brms.backend"} option (see \code{\link{options}}). Details on the \pkg{rstan} and \pkg{cmdstanr} packages are available at \url{https://mc-stan.org/rstan/} and \url{https://mc-stan.org/cmdstanr/}, respectively. Additionally a \code{"mock"} backend is available to make testing \pkg{brms} and packages that depend on it easier. The \code{"mock"} backend does not actually do any fitting, it only checks the generated Stan code for correctness and then returns whatever is passed in an additional \code{mock_fit} argument as the result of the fit.} \item{future}{Logical; If \code{TRUE}, the \pkg{\link[future:future]{future}} package is used for parallel execution of the chains and argument \code{cores} will be ignored. Can be set globally for the current \R session via the \code{"future"} option. The execution type is controlled via \code{\link[future:plan]{plan}} (see the examples section below).} \item{silent}{Verbosity level between \code{0} and \code{2}. If \code{1} (the default), most of the informational messages of compiler and sampler are suppressed. If \code{2}, even more messages are suppressed. The actual sampling progress is still printed. Set \code{refresh = 0} to turn this off as well. If using \code{backend = "rstan"} you can also set \code{open_progress = FALSE} to prevent opening additional progress bars.} \item{seed}{The seed for random number generation to make results reproducible. If \code{NA} (the default), \pkg{Stan} will set the seed randomly.} \item{save_model}{Either \code{NULL} or a character string. In the latter case, the model's Stan code is saved via \code{\link{cat}} in a text file named after the string supplied in \code{save_model}.} \item{stan_model_args}{A \code{list} of further arguments passed to \code{\link[rstan:stan_model]{rstan::stan_model}} for \code{backend = "rstan"} or to \code{cmdstanr::cmdstan_model} for \code{backend = "cmdstanr"}, which allows to change how models are compiled.} \item{file}{Either \code{NULL} or a character string. In the latter case, the fitted model object is saved via \code{\link{saveRDS}} in a file named after the string supplied in \code{file}. The \code{.rds} extension is added automatically. If the file already exists, \code{brm} will load and return the saved model object instead of refitting the model. Unless you specify the \code{file_refit} argument as well, the existing files won't be overwritten, you have to manually remove the file in order to refit and save the model under an existing file name. The file name is stored in the \code{brmsfit} object for later usage.} \item{file_compress}{Logical or a character string, specifying one of the compression algorithms supported by \code{\link{saveRDS}}. If the \code{file} argument is provided, this compression will be used when saving the fitted model object.} \item{file_refit}{Modifies when the fit stored via the \code{file} argument is re-used. Can be set globally for the current \R session via the \code{"brms.file_refit"} option (see \code{\link{options}}). For \code{"never"} (default) the fit is always loaded if it exists and fitting is skipped. For \code{"always"} the model is always refitted. If set to \code{"on_change"}, brms will refit the model if model, data or algorithm as passed to Stan differ from what is stored in the file. This also covers changes in priors, \code{sample_prior}, \code{stanvars}, covariance structure, etc. If you believe there was a false positive, you can use \code{\link{brmsfit_needs_refit}} to see why refit is deemed necessary. Refit will not be triggered for changes in additional parameters of the fit (e.g., initial values, number of iterations, control arguments, ...). A known limitation is that a refit will be triggered if within-chain parallelization is switched on/off.} \item{empty}{Logical. If \code{TRUE}, the Stan model is not created and compiled and the corresponding \code{'fit'} slot of the \code{brmsfit} object will be empty. This is useful if you have estimated a brms-created Stan model outside of \pkg{brms} and want to feed it back into the package.} \item{rename}{For internal use only.} \item{...}{Further arguments passed to Stan. For \code{backend = "rstan"} the arguments are passed to \code{\link[rstan]{sampling}} or \code{\link[rstan]{vb}}. For \code{backend = "cmdstanr"} the arguments are passed to the \code{cmdstanr::sample} or \code{cmdstanr::variational} method.} } \value{ An object of class \code{brmsfit}, which contains the posterior draws along with many other useful information about the model. Use \code{methods(class = "brmsfit")} for an overview on available methods. } \description{ Fit Bayesian generalized (non-)linear multivariate multilevel models using Stan for full Bayesian inference. A wide range of distributions and link functions are supported, allowing users to fit -- among others -- linear, robust linear, count data, survival, response times, ordinal, zero-inflated, hurdle, and even self-defined mixture models all in a multilevel context. Further modeling options include non-linear and smooth terms, auto-correlation structures, censored data, meta-analytic standard errors, and quite a few more. In addition, all parameters of the response distributions can be predicted in order to perform distributional regression. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their beliefs. In addition, model fit can easily be assessed and compared with posterior predictive checks and leave-one-out cross-validation. } \details{ Fit a generalized (non-)linear multivariate multilevel model via full Bayesian inference using Stan. A general overview is provided in the vignettes \code{vignette("brms_overview")} and \code{vignette("brms_multilevel")}. For a full list of available vignettes see \code{vignette(package = "brms")}. \bold{Formula syntax of brms models} Details of the formula syntax applied in \pkg{brms} can be found in \code{\link{brmsformula}}. \bold{Families and link functions} Details of families supported by \pkg{brms} can be found in \code{\link{brmsfamily}}. \bold{Prior distributions} Priors should be specified using the \code{\link[brms:set_prior]{set_prior}} function. Its documentation contains detailed information on how to correctly specify priors. To find out on which parameters or parameter classes priors can be defined, use \code{\link[brms:get_prior]{get_prior}}. Default priors are chosen to be non or very weakly informative so that their influence on the results will be negligible and you usually don't have to worry about them. However, after getting more familiar with Bayesian statistics, I recommend you to start thinking about reasonable informative priors for your model parameters: Nearly always, there is at least some prior information available that can be used to improve your inference. \bold{Adjusting the sampling behavior of \pkg{Stan}} In addition to choosing the number of iterations, warmup draws, and chains, users can control the behavior of the NUTS sampler, by using the \code{control} argument. The most important reason to use \code{control} is to decrease (or eliminate at best) the number of divergent transitions that cause a bias in the obtained posterior draws. Whenever you see the warning "There were x divergent transitions after warmup." you should really think about increasing \code{adapt_delta}. To do this, write \code{control = list(adapt_delta = )}, where \code{} should usually be value between \code{0.8} (current default) and \code{1}. Increasing \code{adapt_delta} will slow down the sampler but will decrease the number of divergent transitions threatening the validity of your posterior draws. Another problem arises when the depth of the tree being evaluated in each iteration is exceeded. This is less common than having divergent transitions, but may also bias the posterior draws. When it happens, \pkg{Stan} will throw out a warning suggesting to increase \code{max_treedepth}, which can be accomplished by writing \code{control = list(max_treedepth = )} with a positive integer \code{} that should usually be larger than the current default of \code{10}. For more details on the \code{control} argument see \code{\link[rstan:stan]{stan}}. } \examples{ \dontrun{ # Poisson regression for the number of seizures in epileptic patients # using normal priors for population-level effects # and half-cauchy priors for standard deviations of group-level effects prior1 <- prior(normal(0,10), class = b) + prior(cauchy(0,2), class = sd) fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), prior = prior1) # generate a summary of the results summary(fit1) # plot the MCMC chains as well as the posterior distributions plot(fit1, ask = FALSE) # predict responses based on the fitted model head(predict(fit1)) # plot conditional effects for each predictor plot(conditional_effects(fit1), ask = FALSE) # investigate model fit loo(fit1) pp_check(fit1) # Ordinal regression modeling patient's rating of inhaler instructions # category specific effects are estimated for variable 'treat' fit2 <- brm(rating ~ period + carry + cs(treat), data = inhaler, family = sratio("logit"), prior = set_prior("normal(0,5)"), chains = 2) summary(fit2) plot(fit2, ask = FALSE) WAIC(fit2) # Survival regression modeling the time between the first # and second recurrence of an infection in kidney patients. fit3 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), data = kidney, family = lognormal()) summary(fit3) plot(fit3, ask = FALSE) plot(conditional_effects(fit3), ask = FALSE) # Probit regression using the binomial family ntrials <- sample(1:10, 100, TRUE) success <- rbinom(100, size = ntrials, prob = 0.4) x <- rnorm(100) data4 <- data.frame(ntrials, success, x) fit4 <- brm(success | trials(ntrials) ~ x, data = data4, family = binomial("probit")) summary(fit4) # Non-linear Gaussian model fit5 <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) summary(fit5) conditional_effects(fit5) # Normal model with heterogeneous variances data_het <- data.frame( y = c(rnorm(50), rnorm(50, 1, 2)), x = factor(rep(c("a", "b"), each = 50)) ) fit6 <- brm(bf(y ~ x, sigma ~ 0 + x), data = data_het) summary(fit6) plot(fit6) conditional_effects(fit6) # extract estimated residual SDs of both groups sigmas <- exp(as.data.frame(fit6, variable = "^b_sigma_", regex = TRUE)) ggplot(stack(sigmas), aes(values)) + geom_density(aes(fill = ind)) # Quantile regression predicting the 25\%-quantile fit7 <- brm(bf(y ~ x, quantile = 0.25), data = data_het, family = asym_laplace()) summary(fit7) conditional_effects(fit7) # use the future package for more flexible parallelization library(future) plan(multiprocess) fit7 <- update(fit7, future = TRUE) # fit a model manually via rstan scode <- make_stancode(count ~ Trt, data = epilepsy) sdata <- make_standata(count ~ Trt, data = epilepsy) stanfit <- rstan::stan(model_code = scode, data = sdata) # feed the Stan model back into brms fit8 <- brm(count ~ Trt, data = epilepsy, empty = TRUE) fit8$fit <- stanfit fit8 <- rename_pars(fit8) summary(fit8) } } \references{ Paul-Christian Buerkner (2017). brms: An R Package for Bayesian Multilevel Models Using Stan. \emph{Journal of Statistical Software}, 80(1), 1-28. \code{doi:10.18637/jss.v080.i01} Paul-Christian Buerkner (2018). Advanced Bayesian Multilevel Modeling with the R Package brms. \emph{The R Journal}. 10(1), 395–411. \code{doi:10.32614/RJ-2018-017} } \seealso{ \code{\link{brms}}, \code{\link{brmsformula}}, \code{\link{brmsfamily}}, \code{\link{brmsfit}} } \author{ Paul-Christian Buerkner \email{paul.buerkner@gmail.com} } brms/man/inhaler.Rd0000644000176200001440000000317214213413565013710 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{inhaler} \alias{inhaler} \title{Clarity of inhaler instructions} \format{ A data frame of 572 observations containing information on the following 5 variables. \describe{ \item{subject}{The subject number} \item{rating}{The rating of the inhaler instructions on a scale ranging from 1 to 4} \item{treat}{A contrast to indicate which of the two inhaler devices was used} \item{period}{A contrast to indicate the time of administration} \item{carry}{A contrast to indicate possible carry over effects} } } \source{ Ezzet, F., & Whitehead, J. (1991). A random effects model for ordinal responses from a crossover trial. \emph{Statistics in Medicine}, 10(6), 901-907. } \usage{ inhaler } \description{ Ezzet and Whitehead (1991) analyze data from a two-treatment, two-period crossover trial to compare 2 inhalation devices for delivering the drug salbutamol in 286 asthma patients. Patients were asked to rate the clarity of leaflet instructions accompanying each device, using a 4-point ordinal scale. } \examples{ \dontrun{ ## ordinal regression with family "sratio" fit1 <- brm(rating ~ treat + period + carry, data = inhaler, family = sratio(), prior = set_prior("normal(0,5)")) summary(fit1) plot(fit1) ## ordinal regression with family "cumulative" ## and random intercept over subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative(), prior = set_prior("normal(0,5)")) summary(fit2) plot(fit2) } } \keyword{datasets} brms/man/brmsfit-class.Rd0000644000176200001440000000543214427444030015036 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-class.R \docType{class} \name{brmsfit-class} \alias{brmsfit-class} \alias{brmsfit} \title{Class \code{brmsfit} of models fitted with the \pkg{brms} package} \description{ Models fitted with the \code{\link[brms:brms-package]{brms}} package are represented as a \code{brmsfit} object, which contains the posterior draws (samples), model formula, Stan code, relevant data, and other information. } \details{ See \code{methods(class = "brmsfit")} for an overview of available methods. } \section{Slots}{ \describe{ \item{\code{formula}}{A \code{\link{brmsformula}} object.} \item{\code{data}}{A \code{data.frame} containing all variables used in the model.} \item{\code{data2}}{A \code{list} of data objects which cannot be passed via \code{data}.} \item{\code{prior}}{A \code{\link{brmsprior}} object containing information on the priors used in the model.} \item{\code{stanvars}}{A \code{\link{stanvars}} object.} \item{\code{model}}{The model code in \pkg{Stan} language.} \item{\code{ranef}}{A \code{data.frame} containing the group-level structure.} \item{\code{exclude}}{The names of the parameters for which draws are not saved.} \item{\code{algorithm}}{The name of the algorithm used to fit the model.} \item{\code{backend}}{The name of the backend used to fit the model.} \item{\code{threads}}{An object of class `brmsthreads` created by \code{\link{threading}}.} \item{\code{opencl}}{An object of class `brmsopencl` created by \code{\link{opencl}}.} \item{\code{stan_args}}{Named list of additional control arguments that were passed to the Stan backend directly.} \item{\code{fit}}{An object of class \code{\link[rstan:stanfit-class]{stanfit}} among others containing the posterior draws.} \item{\code{basis}}{An object that contains a small subset of the Stan data created at fitting time, which is needed to process new data correctly.} \item{\code{criteria}}{An empty \code{list} for adding model fit criteria after estimation of the model.} \item{\code{file}}{Optional name of a file in which the model object was stored in or loaded from.} \item{\code{version}}{The versions of \pkg{brms} and \pkg{rstan} with which the model was fitted.} \item{\code{family}}{(Deprecated) A \code{\link{brmsfamily}} object.} \item{\code{autocor}}{(Deprecated) An \code{\link{cor_brms}} object containing the autocorrelation structure if specified.} \item{\code{cov_ranef}}{(Deprecated) A \code{list} of customized group-level covariance matrices.} \item{\code{stan_funs}}{(Deprecated) A character string of length one or \code{NULL}.} \item{\code{data.name}}{(Deprecated) The name of \code{data} as specified by the user.} }} \seealso{ \code{\link{brms}}, \code{\link{brm}}, \code{\link{brmsformula}}, \code{\link{brmsfamily}} } brms/man/unstr.Rd0000644000176200001440000000206114361545260013437 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{unstr} \alias{unstr} \title{Set up UNSTR correlation structures} \usage{ unstr(time, gr) } \arguments{ \item{time}{An optional time variable specifying the time ordering of the observations. By default, the existing order of the observations in the data is used.} \item{gr}{An optional grouping variable. If specified, the correlation structure is assumed to apply only to observations within the same grouping level.} } \value{ An object of class \code{'unstr_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up an unstructured (UNSTR) correlation term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with UNSTR terms. } \examples{ \dontrun{ # add an unstructured correlation matrix for visits within the same patient fit <- brm(count ~ Trt + unstr(visit, patient), data = epilepsy) summary(fit) } } \seealso{ \code{\link{autocor-terms}} } brms/man/cor_fixed.Rd0000644000176200001440000000151514213413565014227 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_fixed} \alias{cor_fixed} \alias{cov_fixed} \title{(Deprecated) Fixed user-defined covariance matrices} \usage{ cor_fixed(V) } \arguments{ \item{V}{Known covariance matrix of the response variable. If a vector is passed, it will be used as diagonal entries (variances) and covariances will be set to zero.} } \value{ An object of class \code{cor_fixed}. } \description{ This function is deprecated. Please see \code{\link{fcor}} for the new syntax. Define a fixed covariance matrix of the response variable for instance to model multivariate effect sizes in meta-analysis. } \examples{ \dontrun{ dat <- data.frame(y = rnorm(3)) V <- cbind(c(0.5, 0.3, 0.2), c(0.3, 1, 0.1), c(0.2, 0.1, 0.2)) fit <- brm(y~1, data = dat, autocor = cor_fixed(V)) } } brms/man/gr.Rd0000644000176200001440000000416414160105076012674 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-re.R \name{gr} \alias{gr} \title{Set up basic grouping terms in \pkg{brms}} \usage{ gr(..., by = NULL, cor = TRUE, id = NA, cov = NULL, dist = "gaussian") } \arguments{ \item{...}{One or more terms containing grouping factors.} \item{by}{An optional factor variable, specifying sub-populations of the groups. For each level of the \code{by} variable, a separate variance-covariance matrix will be fitted. Levels of the grouping factor must be nested in levels of the \code{by} variable.} \item{cor}{Logical. If \code{TRUE} (the default), group-level terms will be modelled as correlated.} \item{id}{Optional character string. All group-level terms across the model with the same \code{id} will be modeled as correlated (if \code{cor} is \code{TRUE}). See \code{\link{brmsformula}} for more details.} \item{cov}{An optional matrix which is proportional to the withon-group covariance matrix of the group-level effects. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others, to model pedigrees and phylogenetic effects. See \code{vignette("brms_phylogenetics")} for more details. By default, levels of the same grouping factor are modeled as independent of each other.} \item{dist}{Name of the distribution of the group-level effects. Currently \code{"gaussian"} is the only option.} } \description{ Function used to set up a basic grouping term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with grouping terms. \code{gr} is called implicitly inside the package and there is usually no need to call it directly. } \examples{ \dontrun{ # model using basic lme4-style formula fit1 <- brm(count ~ Trt + (1|patient), data = epilepsy) summary(fit1) # equivalent model using 'gr' which is called anyway internally fit2 <- brm(count ~ Trt + (1|gr(patient)), data = epilepsy) summary(fit2) # include Trt as a by variable fit3 <- brm(count ~ Trt + (1|gr(patient, by = Trt)), data = epilepsy) summary(fit3) } } \seealso{ \code{\link{brmsformula}} } brms/man/MultiNormal.Rd0000644000176200001440000000211014275436221014522 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{MultiNormal} \alias{MultiNormal} \alias{dmulti_normal} \alias{rmulti_normal} \title{The Multivariate Normal Distribution} \usage{ dmulti_normal(x, mu, Sigma, log = FALSE, check = FALSE) rmulti_normal(n, mu, Sigma, check = FALSE) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{mu}{Mean vector with length equal to the number of dimensions.} \item{Sigma}{Covariance matrix.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{check}{Logical; Indicates whether several input checks should be performed. Defaults to \code{FALSE} to improve efficiency.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density function and random generation for the multivariate normal distribution with mean vector \code{mu} and covariance matrix \code{Sigma}. } \details{ See the Stan user's manual \url{https://mc-stan.org/documentation/} for details on the parameterization } brms/man/standata.brmsfit.Rd0000644000176200001440000000316614213413565015535 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_standata.R \name{standata.brmsfit} \alias{standata.brmsfit} \alias{standata} \title{Extract data passed to Stan} \usage{ \method{standata}{brmsfit}( object, newdata = NULL, re_formula = NULL, newdata2 = NULL, new_objects = NULL, incl_autocor = TRUE, ... ) standata(object, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{newdata2}{A named \code{list} of objects containing new data, which cannot be passed via argument \code{newdata}. Required for some objects used in autocorrelation structures, or \code{\link{stanvars}}.} \item{new_objects}{Deprecated alias of \code{newdata2}.} \item{incl_autocor}{A flag indicating if correlation structures originally specified via \code{autocor} should be included in the predictions. Defaults to \code{TRUE}.} \item{...}{More arguments passed to \code{\link{make_standata}} and \code{\link{validate_newdata}}.} } \value{ A named list containing the data originally passed to Stan. } \description{ Extract all data that was used by Stan to fit the model. } brms/man/prepare_predictions.Rd0000644000176200001440000001217314361545260016332 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prepare_predictions.R \name{prepare_predictions.brmsfit} \alias{prepare_predictions.brmsfit} \alias{prepare_predictions} \alias{extract_draws} \title{Prepare Predictions} \usage{ \method{prepare_predictions}{brmsfit}( x, newdata = NULL, re_formula = NULL, allow_new_levels = FALSE, sample_new_levels = "uncertainty", incl_autocor = TRUE, oos = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, nug = NULL, smooths_only = FALSE, offset = TRUE, newdata2 = NULL, new_objects = NULL, point_estimate = NULL, ndraws_point_estimate = 1, ... ) prepare_predictions(x, ...) } \arguments{ \item{x}{An \R object typically of class \code{'brmsfit'}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{allow_new_levels}{A flag indicating if new levels of group-level effects are allowed (defaults to \code{FALSE}). Only relevant if \code{newdata} is provided.} \item{sample_new_levels}{Indicates how to sample new levels for grouping factors specified in \code{re_formula}. This argument is only relevant if \code{newdata} is provided and \code{allow_new_levels} is set to \code{TRUE}. If \code{"uncertainty"} (default), each posterior sample for a new level is drawn from the posterior draws of a randomly chosen existing level. Each posterior sample for a new level may be drawn from a different existing level such that the resulting set of new posterior draws represents the variation across existing levels. If \code{"gaussian"}, sample new levels from the (multivariate) normal distribution implied by the group-level standard deviations and correlations. This options may be useful for conducting Bayesian power analysis or predicting new levels in situations where relatively few levels where observed in the old_data. If \code{"old_levels"}, directly sample new levels from the existing levels, where a new level is assigned all of the posterior draws of the same (randomly chosen) existing level.} \item{incl_autocor}{A flag indicating if correlation structures originally specified via \code{autocor} should be included in the predictions. Defaults to \code{TRUE}.} \item{oos}{Optional indices of observations for which to compute out-of-sample rather than in-sample predictions. Only required in models that make use of response values to make predictions, that is, currently only ARMA models.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{subset}{Deprecated alias of \code{draw_ids}.} \item{nug}{Small positive number for Gaussian process terms only. For numerical reasons, the covariance matrix of a Gaussian process might not be positive definite. Adding a very small number to the matrix's diagonal often solves this problem. If \code{NULL} (the default), \code{nug} is chosen internally.} \item{smooths_only}{Logical; If \code{TRUE} only predictions related to the} \item{offset}{Logical; Indicates if offsets should be included in the predictions. Defaults to \code{TRUE}.} \item{newdata2}{A named \code{list} of objects containing new data, which cannot be passed via argument \code{newdata}. Required for some objects used in autocorrelation structures, or \code{\link{stanvars}}.} \item{new_objects}{Deprecated alias of \code{newdata2}.} \item{point_estimate}{Shall the returned object contain only point estimates of the parameters instead of their posterior draws? Defaults to \code{NULL} in which case no point estimate is computed. Alternatively, may be set to \code{"mean"} or \code{"median"}. This argument is primarily implemented to ensure compatibility with the \code{\link{loo_subsample}} method.} \item{ndraws_point_estimate}{Only used if \code{point_estimate} is not \code{NULL}. How often shall the point estimate's value be repeated? Defaults to \code{1}.} \item{...}{Further arguments passed to \code{\link{validate_newdata}}.} } \value{ An object of class \code{'brmsprep'} or \code{'mvbrmsprep'}, depending on whether a univariate or multivariate model is passed. } \description{ This method helps in preparing \pkg{brms} models for certin post-processing tasks most notably various forms of predictions. Unless you are a package developer, you will rarely need to call \code{prepare_predictions} directly. } brms/man/me.Rd0000644000176200001440000000313014213413565012661 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-sp.R \name{me} \alias{me} \title{Predictors with Measurement Error in \pkg{brms} Models} \usage{ me(x, sdx, gr = NULL) } \arguments{ \item{x}{The variable measured with error.} \item{sdx}{Known measurement error of \code{x} treated as standard deviation.} \item{gr}{Optional grouping factor to specify which values of \code{x} correspond to the same value of the latent variable. If \code{NULL} (the default) each observation will have its own value of the latent variable.} } \description{ (Soft deprecated) Specify predictors with measurement error. The function does not evaluate its arguments -- it exists purely to help set up a model. } \details{ For detailed documentation see \code{help(brmsformula)}. \code{me} terms are soft deprecated in favor of the more general and consistent \code{\link{mi}} terms. By default, latent noise-free variables are assumed to be correlated. To change that, add \code{set_mecor(FALSE)} to your model formula object (see examples). } \examples{ \dontrun{ # sample some data N <- 100 dat <- data.frame( y = rnorm(N), x1 = rnorm(N), x2 = rnorm(N), sdx = abs(rnorm(N, 1)) ) # fit a simple error-in-variables model fit1 <- brm(y ~ me(x1, sdx) + me(x2, sdx), data = dat, save_pars = save_pars(latent = TRUE)) summary(fit1) # turn off modeling of correlations bform <- bf(y ~ me(x1, sdx) + me(x2, sdx)) + set_mecor(FALSE) fit2 <- brm(bform, data = dat, save_pars = save_pars(latent = TRUE)) summary(fit2) } } \seealso{ \code{\link{brmsformula}}, \code{\link{brmsformula-helpers}} } brms/man/control_params.Rd0000644000176200001440000000135214213413565015307 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnostics.R \name{control_params} \alias{control_params} \alias{control_params.brmsfit} \title{Extract Control Parameters of the NUTS Sampler} \usage{ control_params(x, ...) \method{control_params}{brmsfit}(x, pars = NULL, ...) } \arguments{ \item{x}{An \R object} \item{...}{Currently ignored.} \item{pars}{Optional names of the control parameters to be returned. If \code{NULL} (the default) all control parameters are returned. See \code{\link[rstan:stan]{stan}} for more details.} } \value{ A named \code{list} with control parameter values. } \description{ Extract control parameters of the NUTS sampler such as \code{adapt_delta} or \code{max_treedepth}. } brms/man/bayes_R2.brmsfit.Rd0000644000176200001440000000437014417767011015406 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayes_R2.R \name{bayes_R2.brmsfit} \alias{bayes_R2.brmsfit} \alias{bayes_R2} \title{Compute a Bayesian version of R-squared for regression models} \usage{ \method{bayes_R2}{brmsfit}( object, resp = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}, which is used in the computation of the R-squared values.} } \value{ If \code{summary = TRUE}, an M x C matrix is returned (M = number of response variables and c = \code{length(probs) + 2}) containing summary statistics of the Bayesian R-squared values. If \code{summary = FALSE}, the posterior draws of the Bayesian R-squared values are returned in an S x M matrix (S is the number of draws). } \description{ Compute a Bayesian version of R-squared for regression models } \details{ For an introduction to the approach, see Gelman et al. (2018) and \url{https://github.com/jgabry/bayes_R2/}. } \examples{ \dontrun{ fit <- brm(mpg ~ wt + cyl, data = mtcars) summary(fit) bayes_R2(fit) # compute R2 with new data nd <- data.frame(mpg = c(10, 20, 30), wt = c(4, 3, 2), cyl = c(8, 6, 4)) bayes_R2(fit, newdata = nd) } } \references{ Andrew Gelman, Ben Goodrich, Jonah Gabry & Aki Vehtari. (2018). R-squared for Bayesian regression models, \emph{The American Statistician}. \code{10.1080/00031305.2018.1549100} (Preprint available at \url{https://stat.columbia.edu/~gelman/research/published/bayes_R2_v3.pdf}) } brms/man/kidney.Rd0000644000176200001440000000351414213413565013551 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{kidney} \alias{kidney} \title{Infections in kidney patients} \format{ A data frame of 76 observations containing information on the following 7 variables. \describe{ \item{time}{The time to first or second recurrence of the infection, or the time of censoring} \item{recur}{A factor of levels \code{1} or \code{2} indicating if the infection recurred for the first or second time for this patient} \item{censored}{Either \code{0} or \code{1}, where \code{0} indicates no censoring of recurrence time and \code{1} indicates right censoring} \item{patient}{The patient number} \item{age}{The age of the patient} \item{sex}{The sex of the patient} \item{disease}{A factor of levels \code{other, GN, AN}, and \code{PKD} specifying the type of disease} } } \source{ McGilchrist, C. A., & Aisbett, C. W. (1991). Regression with frailty in survival analysis. \emph{Biometrics}, 47(2), 461-466. } \usage{ kidney } \description{ This dataset, originally discussed in McGilchrist and Aisbett (1991), describes the first and second (possibly right censored) recurrence time of infection in kidney patients using portable dialysis equipment. In addition, information on the risk variables age, sex and disease type is provided. } \examples{ \dontrun{ ## performing surivival analysis using the "weibull" family fit1 <- brm(time | cens(censored) ~ age + sex + disease, data = kidney, family = weibull, init = "0") summary(fit1) plot(fit1) ## adding random intercepts over patients fit2 <- brm(time | cens(censored) ~ age + sex + disease + (1|patient), data = kidney, family = weibull(), init = "0", prior = set_prior("cauchy(0,2)", class = "sd")) summary(fit2) plot(fit2) } } \keyword{datasets} brms/man/data_response.Rd0000644000176200001440000000076614213413565015123 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-response.R \name{data_response} \alias{data_response} \title{Prepare Response Data} \usage{ data_response(x, ...) } \arguments{ \item{x}{An \R object.} \item{...}{Further arguments passed to or from other methods.} } \value{ A named list of data related to response variables. } \description{ Prepare data related to response variables in \pkg{brms}. Only exported for use in package development. } \keyword{internal} brms/man/validate_prior.Rd0000644000176200001440000000671114275447604015305 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{validate_prior} \alias{validate_prior} \title{Validate Prior for \pkg{brms} Models} \usage{ validate_prior( prior, formula, data, family = gaussian(), sample_prior = "no", data2 = NULL, knots = NULL, drop_unused_levels = TRUE, ... ) } \arguments{ \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{drop_unused_levels}{Should unused factors levels in the data be dropped? Defaults to \code{TRUE}.} \item{...}{Other arguments for internal usage only.} } \value{ An object of class \code{brmsprior}. } \description{ Validate priors supplied by the user. Return a complete set of priors for the given model, including default priors. } \examples{ prior1 <- prior(normal(0,10), class = b) + prior(cauchy(0,2), class = sd) validate_prior(prior1, count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson()) } \seealso{ \code{\link{get_prior}}, \code{\link{set_prior}}. } brms/man/Hurdle.Rd0000644000176200001440000000320214275473342013512 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Hurdle} \alias{Hurdle} \alias{dhurdle_poisson} \alias{phurdle_poisson} \alias{dhurdle_negbinomial} \alias{phurdle_negbinomial} \alias{dhurdle_gamma} \alias{phurdle_gamma} \alias{dhurdle_lognormal} \alias{phurdle_lognormal} \title{Hurdle Distributions} \usage{ dhurdle_poisson(x, lambda, hu, log = FALSE) phurdle_poisson(q, lambda, hu, lower.tail = TRUE, log.p = FALSE) dhurdle_negbinomial(x, mu, shape, hu, log = FALSE) phurdle_negbinomial(q, mu, shape, hu, lower.tail = TRUE, log.p = FALSE) dhurdle_gamma(x, shape, scale, hu, log = FALSE) phurdle_gamma(q, shape, scale, hu, lower.tail = TRUE, log.p = FALSE) dhurdle_lognormal(x, mu, sigma, hu, log = FALSE) phurdle_lognormal(q, mu, sigma, hu, lower.tail = TRUE, log.p = FALSE) } \arguments{ \item{x}{Vector of quantiles.} \item{hu}{hurdle probability} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{q}{Vector of quantiles.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{mu, lambda}{location parameter} \item{shape}{shape parameter} \item{sigma, scale}{scale parameter} } \description{ Density and distribution functions for hurdle distributions. } \details{ The density of a hurdle distribution can be specified as follows. If \eqn{x = 0} set \eqn{f(x) = \theta}. Else set \eqn{f(x) = (1 - \theta) * g(x) / (1 - G(0))} where \eqn{g(x)} and \eqn{G(x)} are the density and distribution function of the non-hurdle part, respectively. } brms/man/combine_models.Rd0000644000176200001440000000173014213413565015243 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brm_multiple.R \name{combine_models} \alias{combine_models} \title{Combine Models fitted with \pkg{brms}} \usage{ combine_models(..., mlist = NULL, check_data = TRUE) } \arguments{ \item{...}{One or more \code{brmsfit} objects.} \item{mlist}{Optional list of one or more \code{brmsfit} objects.} \item{check_data}{Logical; indicates if the data should be checked for being the same across models (defaults to \code{TRUE}). Setting it to \code{FALSE} may be useful for instance when combining models fitted on multiple imputed data sets.} } \value{ A \code{brmsfit} object. } \description{ Combine multiple \code{brmsfit} objects, which fitted the same model. This is usefully for instance when having manually run models in parallel. } \details{ This function just takes the first model and replaces its \code{stanfit} object (slot \code{fit}) by the combined \code{stanfit} objects of all models. } brms/man/mvbind.Rd0000644000176200001440000000102713565500267013547 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{mvbind} \alias{mvbind} \title{Bind response variables in multivariate models} \usage{ mvbind(...) } \arguments{ \item{...}{Same as in \code{\link{cbind}}} } \description{ Can be used to specify a multivariate \pkg{brms} model within a single formula. Outside of \code{\link{brmsformula}}, it just behaves like \code{\link{cbind}}. } \examples{ bf(mvbind(y1, y2) ~ x) } \seealso{ \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} } brms/man/theme_black.Rd0000644000176200001440000000231414213413565014521 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot-themes.R \name{theme_black} \alias{theme_black} \title{(Deprecated) Black Theme for \pkg{ggplot2} Graphics} \usage{ theme_black(base_size = 12, base_family = "") } \arguments{ \item{base_size}{base font size} \item{base_family}{base font family} } \value{ A \code{theme} object used in \pkg{ggplot2} graphics. } \description{ A black theme for ggplot graphics inspired by a blog post of Jon Lefcheck (\url{https://jonlefcheck.net/2013/03/11/black-theme-for-ggplot2-2/}). } \details{ When using \code{theme_black} in plots powered by the \pkg{bayesplot} package such as \code{pp_check} or \code{stanplot}, I recommend using the \code{"viridisC"} color scheme (see examples). } \examples{ \dontrun{ # change default ggplot theme ggplot2::theme_set(theme_black()) # change default bayesplot color scheme bayesplot::color_scheme_set("viridisC") # fit a simple model fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), chains = 2) summary(fit) # create various plots plot(marginal_effects(fit), ask = FALSE) pp_check(fit) mcmc_plot(fit, type = "hex", variable = c("b_Intercept", "b_Trt1")) } } brms/man/ranef.brmsfit.Rd0000644000176200001440000000351314213413565015025 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{ranef.brmsfit} \alias{ranef.brmsfit} \alias{ranef} \title{Extract Group-Level Estimates} \usage{ \method{ranef}{brmsfit}( object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), pars = NULL, groups = NULL, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{pars}{Optional names of coefficients to extract. By default, all coefficients are extracted.} \item{groups}{Optional names of grouping variables for which to extract effects.} \item{...}{Currently ignored.} } \value{ A list of 3D arrays (one per grouping factor). If \code{summary} is \code{TRUE}, the 1st dimension contains the factor levels, the 2nd dimension contains the summary statistics (see \code{\link{posterior_summary}}), and the 3rd dimension contains the group-level effects. If \code{summary} is \code{FALSE}, the 1st dimension contains the posterior draws, the 2nd dimension contains the factor levels, and the 3rd dimension contains the group-level effects. } \description{ Extract the group-level ('random') effects of each level from a \code{brmsfit} object. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), data = epilepsy, family = gaussian(), chains = 2) ranef(fit) } } brms/man/make_stancode.Rd0000644000176200001440000001532714275447604015101 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_stancode.R \name{make_stancode} \alias{make_stancode} \title{Stan Code for \pkg{brms} Models} \usage{ make_stancode( formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sparse = NULL, sample_prior = "no", stanvars = NULL, stan_funs = NULL, knots = NULL, drop_unused_levels = TRUE, threads = getOption("brms.threads", NULL), normalize = getOption("brms.normalize", TRUE), save_model = NULL, ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{cov_ranef}{(Deprecated) A list of matrices that are proportional to the (within) covariance structure of the group-level effects. The names of the matrices should correspond to columns in \code{data} that are used as grouping factors. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others to model pedigrees and phylogenetic effects. It is now recommended to specify those matrices in the formula interface using the \code{\link{gr}} and related functions. See \code{vignette("brms_phylogenetics")} for more details.} \item{sparse}{(Deprecated) Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased. It is now recommended to use the \code{sparse} argument of \code{\link{brmsformula}} and related functions.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{stanvars}{An optional \code{stanvars} object generated by function \code{\link{stanvar}} to define additional variables for use in \pkg{Stan}'s program blocks.} \item{stan_funs}{(Deprecated) An optional character string containing self-defined \pkg{Stan} functions, which will be included in the functions block of the generated \pkg{Stan} code. It is now recommended to use the \code{stanvars} argument for this purpose instead.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{drop_unused_levels}{Should unused factors levels in the data be dropped? Defaults to \code{TRUE}.} \item{threads}{Number of threads to use in within-chain parallelization. For more control over the threading process, \code{threads} may also be a \code{brmsthreads} object created by \code{\link{threading}}. Within-chain parallelization is experimental! We recommend its use only if you are experienced with Stan's \code{reduce_sum} function and have a slow running model that cannot be sped up by any other means. Can be set globally for the current \R session via the \code{"brms.threads"} option (see \code{\link{options}}).} \item{normalize}{Logical. Indicates whether normalization constants should be included in the Stan code (defaults to \code{TRUE}). Setting it to \code{FALSE} requires Stan version >= 2.25 to work. If \code{FALSE}, sampling efficiency may be increased but some post processing functions such as \code{\link{bridge_sampler}} will not be available. Can be controlled globally for the current \R session via the `brms.normalize` option.} \item{save_model}{Either \code{NULL} or a character string. In the latter case, the model's Stan code is saved via \code{\link{cat}} in a text file named after the string supplied in \code{save_model}.} \item{...}{Other arguments for internal usage only.} } \value{ A character string containing the fully commented \pkg{Stan} code to fit a \pkg{brms} model. } \description{ Generate Stan code for \pkg{brms} models } \examples{ make_stancode(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "cumulative") make_stancode(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = "poisson") } brms/man/reloo.brmsfit.Rd0000644000176200001440000000624714361545260015063 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reloo.R \name{reloo.brmsfit} \alias{reloo.brmsfit} \alias{reloo.loo} \alias{reloo} \title{Compute exact cross-validation for problematic observations} \usage{ \method{reloo}{brmsfit}( x, loo, k_threshold = 0.7, newdata = NULL, resp = NULL, check = TRUE, recompile = NULL, future_args = list(), ... ) \method{reloo}{loo}(x, fit, ...) reloo(x, ...) } \arguments{ \item{x}{An \R object of class \code{brmsfit} or \code{loo} depending on the method.} \item{loo}{An \R object of class \code{loo}.} \item{k_threshold}{The threshold at which Pareto \eqn{k} estimates are treated as problematic. Defaults to \code{0.7}. See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} for more details.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{check}{Logical; If \code{TRUE} (the default), some checks check are performed if the \code{loo} object was generated from the \code{brmsfit} object passed to argument \code{fit}.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled. This may be necessary if you are running \code{reloo} on another machine than the one used to fit the model.} \item{future_args}{A list of further arguments passed to \code{\link[future:future]{future}} for additional control over parallel execution if activated.} \item{...}{Further arguments passed to \code{\link{update.brmsfit}} and \code{\link{log_lik.brmsfit}}.} \item{fit}{An \R object of class \code{brmsfit}.} } \value{ An object of the class \code{loo}. } \description{ Compute exact cross-validation for problematic observations for which approximate leave-one-out cross-validation may return incorrect results. Models for problematic observations can be run in parallel using the \pkg{future} package. } \details{ Warnings about Pareto \eqn{k} estimates indicate observations for which the approximation to LOO is problematic (this is described in detail in Vehtari, Gelman, and Gabry (2017) and the \pkg{\link[loo:loo-package]{loo}} package documentation). If there are \eqn{J} observations with \eqn{k} estimates above \code{k_threshold}, then \code{reloo} will refit the original model \eqn{J} times, each time leaving out one of the \eqn{J} problematic observations. The pointwise contributions of these observations to the total ELPD are then computed directly and substituted for the previous estimates from these \eqn{J} observations that are stored in the original \code{loo} object. } \examples{ \dontrun{ fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson()) # throws warning about some pareto k estimates being too high (loo1 <- loo(fit1)) (reloo1 <- reloo(fit1, loo = loo1, chains = 1)) } } \seealso{ \code{\link{loo}}, \code{\link{kfold}} } brms/man/posterior_interval.brmsfit.Rd0000644000176200001440000000236214213413565017665 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{posterior_interval.brmsfit} \alias{posterior_interval.brmsfit} \alias{posterior_interval} \title{Compute posterior uncertainty intervals} \usage{ \method{posterior_interval}{brmsfit}(object, pars = NA, variable = NULL, prob = 0.95, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{pars}{Deprecated alias of \code{variable}. For reasons of backwards compatibility, \code{pars} is interpreted as a vector of regular expressions by default unless \code{fixed = TRUE} is specified.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} \item{prob}{A value between 0 and 1 indicating the desired probability to be covered by the uncertainty intervals. The default is 0.95.} \item{...}{More arguments passed to \code{\link{as.matrix.brmsfit}}.} } \value{ A \code{matrix} with lower and upper interval bounds as columns and as many rows as selected variables. } \description{ Compute posterior uncertainty intervals for \code{brmsfit} objects. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt, data = epilepsy, family = negbinomial()) posterior_interval(fit) } } brms/man/launch_shinystan.brmsfit.Rd0000644000176200001440000000214514213413565017304 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/launch_shinystan.R \name{launch_shinystan.brmsfit} \alias{launch_shinystan.brmsfit} \alias{launch_shinystan} \title{Interface to \pkg{shinystan}} \usage{ \method{launch_shinystan}{brmsfit}(object, rstudio = getOption("shinystan.rstudio"), ...) } \arguments{ \item{object}{A fitted model object typically of class \code{brmsfit}.} \item{rstudio}{Only relevant for RStudio users. The default (\code{rstudio=FALSE}) is to launch the app in the default web browser rather than RStudio's pop-up Viewer. Users can change the default to \code{TRUE} by setting the global option \cr \code{options(shinystan.rstudio = TRUE)}.} \item{...}{Optional arguments to pass to \code{\link[shiny:runApp]{runApp}}} } \value{ An S4 shinystan object } \description{ Provide an interface to \pkg{shinystan} for models fitted with \pkg{brms} } \examples{ \dontrun{ fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "gaussian") launch_shinystan(fit) } } \seealso{ \code{\link[shinystan:launch_shinystan]{launch_shinystan}} } brms/man/cor_arma.Rd0000644000176200001440000000405214213413565014047 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_arma} \alias{cor_arma} \alias{cor_arma-class} \title{(Deprecated) ARMA(p,q) correlation structure} \usage{ cor_arma(formula = ~1, p = 0, q = 0, r = 0, cov = FALSE) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} \item{p}{A non-negative integer specifying the autoregressive (AR) order of the ARMA structure. Default is 0.} \item{q}{A non-negative integer specifying the moving average (MA) order of the ARMA structure. Default is 0.} \item{r}{No longer supported.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default) a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{cor_arma}, representing an autoregression-moving-average correlation structure. } \description{ This function is deprecated. Please see \code{\link{arma}} for the new syntax. This functions is a constructor for the \code{cor_arma} class, representing an autoregression-moving average correlation structure of order (p, q). } \examples{ cor_arma(~ visit | patient, p = 2, q = 2) } \seealso{ \code{\link{cor_ar}}, \code{\link{cor_ma}} } brms/man/model_weights.brmsfit.Rd0000644000176200001440000000374114213413565016567 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_weights.R \name{model_weights.brmsfit} \alias{model_weights.brmsfit} \alias{model_weights} \title{Model Weighting Methods} \usage{ \method{model_weights}{brmsfit}(x, ..., weights = "stacking", model_names = NULL) model_weights(x, ...) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{weights}{Name of the criterion to compute weights from. Should be one of \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"stacking"} (current default), or \code{"bma"}, \code{"pseudobma"}, For the former three options, Akaike weights will be computed based on the information criterion values returned by the respective methods. For \code{"stacking"} and \code{"pseudobma"}, method \code{\link{loo_model_weights}} will be used to obtain weights. For \code{"bma"}, method \code{\link{post_prob}} will be used to compute Bayesian model averaging weights based on log marginal likelihood values (make sure to specify reasonable priors in this case). For some methods, \code{weights} may also be a numeric vector of pre-specified weights.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ A numeric vector of weights for the models. } \description{ Compute model weights in various ways, for instance, via stacking of posterior predictive distributions, Akaike weights, or marginal likelihoods. } \examples{ \dontrun{ # model with 'treat' as predictor fit1 <- brm(rating ~ treat + period + carry, data = inhaler) summary(fit1) # model without 'treat' as predictor fit2 <- brm(rating ~ period + carry, data = inhaler) summary(fit2) # obtain Akaike weights based on the WAIC model_weights(fit1, fit2, weights = "waic") } } brms/man/log_lik.brmsfit.Rd0000644000176200001440000000714514213413565015357 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/log_lik.R \name{log_lik.brmsfit} \alias{log_lik.brmsfit} \alias{log_lik} \alias{logLik.brmsfit} \title{Compute the Pointwise Log-Likelihood} \usage{ \method{log_lik}{brmsfit}( object, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, pointwise = FALSE, combine = TRUE, add_point_estimate = FALSE, cores = NULL, ... ) } \arguments{ \item{object}{A fitted model object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{pointwise}{A flag indicating whether to compute the full log-likelihood matrix at once (the default), or just return the likelihood function along with all data and draws required to compute the log-likelihood separately for each observation. The latter option is rarely useful when calling \code{log_lik} directly, but rather when computing \code{\link{waic}} or \code{\link{loo}}.} \item{combine}{Only relevant in multivariate models. Indicates if the log-likelihoods of the submodels should be combined per observation (i.e. added together; the default) or if the log-likelihoods should be returned separately.} \item{add_point_estimate}{For internal use only. Ensures compatibility with the \code{\link{loo_subsample}} method.} \item{cores}{Number of cores (defaults to \code{1}). On non-Windows systems, this argument can be set globally via the \code{mc.cores} option.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ Usually, an S x N matrix containing the pointwise log-likelihood draws, where S is the number of draws and N is the number of observations in the data. For multivariate models and if \code{combine} is \code{FALSE}, an S x N x R array is returned, where R is the number of response variables. If \code{pointwise = TRUE}, the output is a function with a \code{draws} attribute containing all relevant data and posterior draws. } \description{ Compute the Pointwise Log-Likelihood } \details{ \code{NA} values within factors in \code{newdata}, are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding. In multilevel models, it is possible to allow new levels of grouping factors to be used in the predictions. This can be controlled via argument \code{allow_new_levels}. New levels can be sampled in multiple ways, which can be controlled via argument \code{sample_new_levels}. Both of these arguments are documented in \code{\link{prepare_predictions}} along with several other useful arguments to control specific aspects of the predictions. } brms/man/gp.Rd0000644000176200001440000001226014213413565012672 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-gp.R \name{gp} \alias{gp} \title{Set up Gaussian process terms in \pkg{brms}} \usage{ gp( ..., by = NA, k = NA, cov = "exp_quad", iso = TRUE, gr = TRUE, cmc = TRUE, scale = TRUE, c = NULL ) } \arguments{ \item{...}{One or more predictors for the GP.} \item{by}{A numeric or factor variable of the same length as each predictor. In the numeric vector case, the elements multiply the values returned by the GP. In the factor variable case, a separate GP is fitted for each factor level.} \item{k}{Optional number of basis functions for computing approximate GPs. If \code{NA} (the default), exact GPs are computed.} \item{cov}{Name of the covariance kernel. By default, the exponentiated-quadratic kernel \code{"exp_quad"} is used.} \item{iso}{A flag to indicate whether an isotropic (\code{TRUE}; the default) or a non-isotropic GP should be used. In the former case, the same amount of smoothing is applied to all predictors. In the latter case, predictors may have different smoothing. Ignored if only a single predictor is supplied.} \item{gr}{Logical; Indicates if auto-grouping should be used (defaults to \code{TRUE}). If enabled, observations sharing the same predictor values will be represented by the same latent variable in the GP. This will improve sampling efficiency drastically if the number of unique predictor combinations is small relative to the number of observations.} \item{cmc}{Logical; Only relevant if \code{by} is a factor. If \code{TRUE} (the default), cell-mean coding is used for the \code{by}-factor, that is one GP per level is estimated. If \code{FALSE}, contrast GPs are estimated according to the contrasts set for the \code{by}-factor.} \item{scale}{Logical; If \code{TRUE} (the default), predictors are scaled so that the maximum Euclidean distance between two points is 1. This often improves sampling speed and convergence. Scaling also affects the estimated length-scale parameters in that they resemble those of scaled predictors (not of the original predictors) if \code{scale} is \code{TRUE}.} \item{c}{Numeric value only used in approximate GPs. Defines the multiplicative constant of the predictors' range over which predictions should be computed. A good default could be \code{c = 5/4} but we are still working on providing better recommendations.} } \value{ An object of class \code{'gp_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up a Gaussian process (GP) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with GP terms. } \details{ A GP is a stochastic process, which describes the relation between one or more predictors \eqn{x = (x_1, ..., x_d)} and a response \eqn{f(x)}, where \eqn{d} is the number of predictors. A GP is the generalization of the multivariate normal distribution to an infinite number of dimensions. Thus, it can be interpreted as a prior over functions. The values of \eqn{f( )} at any finite set of locations are jointly multivariate normal, with a covariance matrix defined by the covariance kernel \eqn{k_p(x_i, x_j)}, where \eqn{p} is the vector of parameters of the GP: \deqn{(f(x_1), \ldots f(x_n) \sim MVN(0, (k_p(x_i, x_j))_{i,j=1}^n) .} The smoothness and general behavior of the function \eqn{f} depends only on the choice of covariance kernel. For a more detailed introduction to Gaussian processes, see \url{https://en.wikipedia.org/wiki/Gaussian_process}. Below, we describe the currently supported covariance kernels: \itemize{ \item{"exp_quad": }{The exponentiated-quadratic kernel is defined as \eqn{k(x_i, x_j) = sdgp^2 \exp(- || x_i - x_j ||^2 / (2 lscale^2))}, where \eqn{|| . ||} is the Euclidean norm, \eqn{sdgp} is a standard deviation parameter, and \eqn{lscale} is characteristic length-scale parameter. The latter practically measures how close two points \eqn{x_i} and \eqn{x_j} have to be to influence each other substantially.} } In the current implementation, \code{"exp_quad"} is the only supported covariance kernel. More options will follow in the future. } \examples{ \dontrun{ # simulate data using the mgcv package dat <- mgcv::gamSim(1, n = 30, scale = 2) # fit a simple GP model fit1 <- brm(y ~ gp(x2), dat, chains = 2) summary(fit1) me1 <- conditional_effects(fit1, ndraws = 200, spaghetti = TRUE) plot(me1, ask = FALSE, points = TRUE) # fit a more complicated GP model fit2 <- brm(y ~ gp(x0) + x1 + gp(x2) + x3, dat, chains = 2) summary(fit2) me2 <- conditional_effects(fit2, ndraws = 200, spaghetti = TRUE) plot(me2, ask = FALSE, points = TRUE) # fit a multivariate GP model fit3 <- brm(y ~ gp(x1, x2), dat, chains = 2) summary(fit3) me3 <- conditional_effects(fit3, ndraws = 200, spaghetti = TRUE) plot(me3, ask = FALSE, points = TRUE) # compare model fit LOO(fit1, fit2, fit3) # simulate data with a factor covariate dat2 <- mgcv::gamSim(4, n = 90, scale = 2) # fit separate gaussian processes for different levels of 'fac' fit4 <- brm(y ~ gp(x2, by = fac), dat2, chains = 2) summary(fit4) plot(conditional_effects(fit4), points = TRUE) } } \seealso{ \code{\link{brmsformula}} } brms/man/bayes_factor.brmsfit.Rd0000644000176200001440000000420014213413565016365 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bridgesampling.R \name{bayes_factor.brmsfit} \alias{bayes_factor.brmsfit} \alias{bayes_factor} \title{Bayes Factors from Marginal Likelihoods} \usage{ \method{bayes_factor}{brmsfit}(x1, x2, log = FALSE, ...) } \arguments{ \item{x1}{A \code{brmsfit} object} \item{x2}{Another \code{brmsfit} object based on the same responses.} \item{log}{Report Bayes factors on the log-scale?} \item{...}{Additional arguments passed to \code{\link[brms:bridge_sampler.brmsfit]{bridge_sampler}}.} } \description{ Compute Bayes factors from marginal likelihoods. } \details{ Computing the marginal likelihood requires samples of all variables defined in Stan's \code{parameters} block to be saved. Otherwise \code{bayes_factor} cannot be computed. Thus, please set \code{save_all_pars = TRUE} in the call to \code{brm}, if you are planning to apply \code{bayes_factor} to your models. The computation of Bayes factors based on bridge sampling requires a lot more posterior samples than usual. A good conservative rule of thumb is perhaps 10-fold more samples (read: the default of 4000 samples may not be enough in many cases). If not enough posterior samples are provided, the bridge sampling algorithm tends to be unstable, leading to considerably different results each time it is run. We thus recommend running \code{bayes_factor} multiple times to check the stability of the results. More details are provided under \code{\link[bridgesampling:bf]{bridgesampling::bayes_factor}}. } \examples{ \dontrun{ # model with the treatment effect fit1 <- brm( count ~ zAge + zBase + Trt, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_all_pars = TRUE ) summary(fit1) # model without the treatment effect fit2 <- brm( count ~ zAge + zBase, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_all_pars = TRUE ) summary(fit2) # compute the bayes factor bayes_factor(fit1, fit2) } } \seealso{ \code{ \link[brms:bridge_sampler.brmsfit]{bridge_sampler}, \link[brms:post_prob.brmsfit]{post_prob} } } brms/man/predictive_interval.brmsfit.Rd0000644000176200001440000000161714160105076017773 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_predict.R \name{predictive_interval.brmsfit} \alias{predictive_interval.brmsfit} \alias{predictive_interval} \title{Predictive Intervals} \usage{ \method{predictive_interval}{brmsfit}(object, prob = 0.9, ...) } \arguments{ \item{object}{An \R object of class \code{brmsfit}.} \item{prob}{A number p (0 < p < 1) indicating the desired probability mass to include in the intervals. Defaults to \code{0.9}.} \item{...}{Further arguments passed to \code{\link{posterior_predict}}.} } \value{ A matrix with 2 columns for the lower and upper bounds of the intervals, respectively, and as many rows as observations being predicted. } \description{ Compute intervals from the posterior predictive distribution. } \examples{ \dontrun{ fit <- brm(count ~ zBase, data = epilepsy, family = poisson()) predictive_interval(fit) } } brms/man/expose_functions.brmsfit.Rd0000644000176200001440000000165214213413565017327 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{expose_functions.brmsfit} \alias{expose_functions.brmsfit} \alias{expose_functions} \title{Expose user-defined \pkg{Stan} functions} \usage{ \method{expose_functions}{brmsfit}(x, vectorize = FALSE, env = globalenv(), ...) expose_functions(x, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{vectorize}{Logical; Indicates if the exposed functions should be vectorized via \code{\link{Vectorize}}. Defaults to \code{FALSE}.} \item{env}{Environment where the functions should be made available. Defaults to the global environment.} \item{...}{Further arguments passed to \code{\link[rstan:expose_stan_functions]{expose_stan_functions}}.} } \description{ Export user-defined \pkg{Stan} function and optionally vectorize them. For more details see \code{\link[rstan:expose_stan_functions]{expose_stan_functions}}. } brms/man/VonMises.Rd0000644000176200001440000000211214275436221014024 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{VonMises} \alias{VonMises} \alias{dvon_mises} \alias{pvon_mises} \alias{rvon_mises} \title{The von Mises Distribution} \usage{ dvon_mises(x, mu, kappa, log = FALSE) pvon_mises(q, mu, kappa, lower.tail = TRUE, log.p = FALSE, acc = 1e-20) rvon_mises(n, mu, kappa) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of location values.} \item{kappa}{Vector of precision values.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{acc}{Accuracy of numerical approximations.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the von Mises distribution with location \code{mu}, and precision \code{kappa}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/add_rstan_model.Rd0000644000176200001440000000145214213413565015404 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-helpers.R \name{add_rstan_model} \alias{add_rstan_model} \title{Add compiled \pkg{rstan} models to \code{brmsfit} objects} \usage{ add_rstan_model(x, overwrite = FALSE) } \arguments{ \item{x}{A \code{brmsfit} object to be updated.} \item{overwrite}{Logical. If \code{TRUE}, overwrite any existing \code{\link[rstan:stanmodel-class]{stanmodel}}. Defaults to \code{FALSE}.} } \value{ A (possibly updated) \code{brmsfit} object. } \description{ Compile a \code{\link[rstan:stanmodel-class]{stanmodel}} and add it to a \code{brmsfit} object. This enables some advanced functionality of \pkg{rstan}, most notably \code{\link[rstan:log_prob]{log_prob}} and friends, to be used with brms models fitted with other Stan backends. } brms/man/conditional_effects.brmsfit.Rd0000644000176200001440000003115114361545260017735 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conditional_effects.R \name{conditional_effects.brmsfit} \alias{conditional_effects.brmsfit} \alias{marginal_effects} \alias{marginal_effects.brmsfit} \alias{conditional_effects} \alias{plot.brms_conditional_effects} \title{Display Conditional Effects of Predictors} \usage{ \method{conditional_effects}{brmsfit}( x, effects = NULL, conditions = NULL, int_conditions = NULL, re_formula = NA, prob = 0.95, robust = TRUE, method = "posterior_epred", spaghetti = FALSE, surface = FALSE, categorical = FALSE, ordinal = FALSE, transform = NULL, resolution = 100, select_points = 0, too_far = 0, probs = NULL, ... ) conditional_effects(x, ...) \method{plot}{brms_conditional_effects}( x, ncol = NULL, points = getOption("brms.plot_points", FALSE), rug = getOption("brms.plot_rug", FALSE), mean = TRUE, jitter_width = 0, stype = c("contour", "raster"), line_args = list(), cat_args = list(), errorbar_args = list(), surface_args = list(), spaghetti_args = list(), point_args = list(), rug_args = list(), facet_args = list(), theme = NULL, ask = TRUE, plot = TRUE, ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{effects}{An optional character vector naming effects (main effects or interactions) for which to compute conditional plots. Interactions are specified by a \code{:} between variable names. If \code{NULL} (the default), plots are generated for all main effects and two-way interactions estimated in the model. When specifying \code{effects} manually, \emph{all} two-way interactions (including grouping variables) may be plotted even if not originally modeled.} \item{conditions}{An optional \code{data.frame} containing variable values to condition on. Each effect defined in \code{effects} will be plotted separately for each row of \code{conditions}. Values in the \code{cond__} column will be used as titles of the subplots. If \code{cond__} is not given, the row names will be used for this purpose instead. It is recommended to only define a few rows in order to keep the plots clear. See \code{\link{make_conditions}} for an easy way to define conditions. If \code{NULL} (the default), numeric variables will be conditionalized by using their means and factors will get their first level assigned. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{int_conditions}{An optional named \code{list} whose elements are vectors of values of the variables specified in \code{effects}. At these values, predictions are evaluated. The names of \code{int_conditions} have to match the variable names exactly. Additionally, the elements of the vectors may be named themselves, in which case their names appear as labels for the conditions in the plots. Instead of vectors, functions returning vectors may be passed and are applied on the original values of the corresponding variable. If \code{NULL} (the default), predictions are evaluated at the \eqn{mean} and at \eqn{mean +/- sd} for numeric predictors and at all categories for factor-like predictors.} \item{re_formula}{A formula containing group-level effects to be considered in the conditional predictions. If \code{NULL}, include all group-level effects; if \code{NA} (default), include no group-level effects.} \item{prob}{A value between 0 and 1 indicating the desired probability to be covered by the uncertainty intervals. The default is 0.95.} \item{robust}{If \code{TRUE} (the default) the median is used as the measure of central tendency. If \code{FALSE} the mean is used instead.} \item{method}{Method used to obtain predictions. Can be set to \code{"posterior_epred"} (the default), \code{"posterior_predict"}, or \code{"posterior_linpred"}. For more details, see the respective function documentations.} \item{spaghetti}{Logical. Indicates if predictions should be visualized via spaghetti plots. Only applied for numeric predictors. If \code{TRUE}, it is recommended to set argument \code{ndraws} to a relatively small value (e.g., \code{100}) in order to reduce computation time.} \item{surface}{Logical. Indicates if interactions or two-dimensional smooths should be visualized as a surface. Defaults to \code{FALSE}. The surface type can be controlled via argument \code{stype} of the related plotting method.} \item{categorical}{Logical. Indicates if effects of categorical or ordinal models should be shown in terms of probabilities of response categories. Defaults to \code{FALSE}.} \item{ordinal}{(Deprecated) Please use argument \code{categorical}. Logical. Indicates if effects in ordinal models should be visualized as a raster with the response categories on the y-axis. Defaults to \code{FALSE}.} \item{transform}{A function or a character string naming a function to be applied on the predicted responses before summary statistics are computed. Only allowed if \code{method = "posterior_predict"}.} \item{resolution}{Number of support points used to generate the plots. Higher resolution leads to smoother plots. Defaults to \code{100}. If \code{surface} is \code{TRUE}, this implies \code{10000} support points for interaction terms, so it might be necessary to reduce \code{resolution} when only few RAM is available.} \item{select_points}{Positive number. Only relevant if \code{points} or \code{rug} are set to \code{TRUE}: Actual data points of numeric variables that are too far away from the values specified in \code{conditions} can be excluded from the plot. Values are scaled into the unit interval and then points more than \code{select_points} from the values in \code{conditions} are excluded. By default, all points are used.} \item{too_far}{Positive number. For surface plots only: Grid points that are too far away from the actual data points can be excluded from the plot. \code{too_far} determines what is too far. The grid is scaled into the unit square and then grid points more than \code{too_far} from the predictor variables are excluded. By default, all grid points are used. Ignored for non-surface plots.} \item{probs}{(Deprecated) The quantiles to be used in the computation of uncertainty intervals. Please use argument \code{prob} instead.} \item{...}{Further arguments such as \code{draw_ids} or \code{ndraws} passed to \code{\link{posterior_predict}} or \code{\link{posterior_epred}}.} \item{ncol}{Number of plots to display per column for each effect. If \code{NULL} (default), \code{ncol} is computed internally based on the number of rows of \code{conditions}.} \item{points}{Logical. Indicates if the original data points should be added via \code{\link{geom_jitter}}. Default is \code{FALSE}. Can be controlled globally via the \code{brms.plot_points} option. Note that only those data points will be added that match the specified conditions defined in \code{conditions}. For categorical predictors, the conditions have to match exactly. For numeric predictors, argument \code{select_points} is used to determine, which points do match a condition.} \item{rug}{Logical. Indicates if a rug representation of predictor values should be added via \code{\link{geom_rug}}. Default is \code{FALSE}. Depends on \code{select_points} in the same way as \code{points} does. Can be controlled globally via the \code{brms.plot_rug} option.} \item{mean}{Logical. Only relevant for spaghetti plots. If \code{TRUE} (the default), display the mean regression line on top of the regression lines for each sample.} \item{jitter_width}{Only used if \code{points = TRUE}: Amount of horizontal jittering of the data points. Mainly useful for ordinal models. Defaults to \code{0} that is no jittering.} \item{stype}{Indicates how surface plots should be displayed. Either \code{"contour"} or \code{"raster"}.} \item{line_args}{Only used in plots of continuous predictors: A named list of arguments passed to \code{\link{geom_smooth}}.} \item{cat_args}{Only used in plots of categorical predictors: A named list of arguments passed to \code{\link{geom_point}}.} \item{errorbar_args}{Only used in plots of categorical predictors: A named list of arguments passed to \code{\link{geom_errorbar}}.} \item{surface_args}{Only used in surface plots: A named list of arguments passed to \code{\link{geom_contour}} or \code{\link{geom_raster}} (depending on argument \code{stype}).} \item{spaghetti_args}{Only used in spaghetti plots: A named list of arguments passed to \code{\link{geom_smooth}}.} \item{point_args}{Only used if \code{points = TRUE}: A named list of arguments passed to \code{\link{geom_jitter}}.} \item{rug_args}{Only used if \code{rug = TRUE}: A named list of arguments passed to \code{\link{geom_rug}}.} \item{facet_args}{Only used if if multiple condtions are provided: A named list of arguments passed to \code{\link{facet_wrap}}.} \item{theme}{A \code{\link[ggplot2:theme]{theme}} object modifying the appearance of the plots. For some basic themes see \code{\link[ggplot2:ggtheme]{ggtheme}} and \code{\link[bayesplot:theme_default]{theme_default}}.} \item{ask}{Logical; indicates if the user is prompted before a new page is plotted. Only used if \code{plot} is \code{TRUE}.} \item{plot}{Logical; indicates if plots should be plotted directly in the active graphic device. Defaults to \code{TRUE}.} } \value{ An object of class \code{'brms_conditional_effects'} which is a named list with one data.frame per effect containing all information required to generate conditional effects plots. Among others, these data.frames contain some special variables, namely \code{estimate__} (predicted values of the response), \code{se__} (standard error of the predicted response), \code{lower__} and \code{upper__} (lower and upper bounds of the uncertainty interval of the response), as well as \code{cond__} (used in faceting when \code{conditions} contains multiple rows). The corresponding \code{plot} method returns a named list of \code{\link{ggplot}} objects, which can be further customized using the \pkg{ggplot2} package. } \description{ Display conditional effects of one or more numeric and/or categorical predictors including two-way interaction effects. } \details{ When creating \code{conditional_effects} for a particular predictor (or interaction of two predictors), one has to choose the values of all other predictors to condition on. By default, the mean is used for continuous variables and the reference category is used for factors, but you may change these values via argument \code{conditions}. This also has an implication for the \code{points} argument: In the created plots, only those points will be shown that correspond to the factor levels actually used in the conditioning, in order not to create the false impression of bad model fit, where it is just due to conditioning on certain factor levels. To fully change colors of the created plots, one has to amend both \code{scale_colour} and \code{scale_fill}. See \code{\link{scale_colour_grey}} or \code{\link{scale_colour_gradient}} for more details. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1 | patient), data = epilepsy, family = poisson()) ## plot all conditional effects plot(conditional_effects(fit), ask = FALSE) ## change colours to grey scale library(ggplot2) ce <- conditional_effects(fit, "zBase:Trt") plot(ce, plot = FALSE)[[1]] + scale_color_grey() + scale_fill_grey() ## only plot the conditional interaction effect of 'zBase:Trt' ## for different values for 'zAge' conditions <- data.frame(zAge = c(-1, 0, 1)) plot(conditional_effects(fit, effects = "zBase:Trt", conditions = conditions)) ## also incorporate group-level effects variance over patients ## also add data points and a rug representation of predictor values plot(conditional_effects(fit, effects = "zBase:Trt", conditions = conditions, re_formula = NULL), points = TRUE, rug = TRUE) ## change handling of two-way interactions int_conditions <- list( zBase = setNames(c(-2, 1, 0), c("b", "c", "a")) ) conditional_effects(fit, effects = "Trt:zBase", int_conditions = int_conditions) conditional_effects(fit, effects = "Trt:zBase", int_conditions = list(zBase = quantile)) ## fit a model to illustrate how to plot 3-way interactions fit3way <- brm(count ~ zAge * zBase * Trt, data = epilepsy) conditions <- make_conditions(fit3way, "zAge") conditional_effects(fit3way, "zBase:Trt", conditions = conditions) ## only include points close to the specified values of zAge ce <- conditional_effects( fit3way, "zBase:Trt", conditions = conditions, select_points = 0.1 ) plot(ce, points = TRUE) } } brms/man/horseshoe.Rd0000644000176200001440000001416614424733633014277 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{horseshoe} \alias{horseshoe} \title{Regularized horseshoe priors in \pkg{brms}} \usage{ horseshoe( df = 1, scale_global = 1, df_global = 1, scale_slab = 2, df_slab = 4, par_ratio = NULL, autoscale = TRUE, main = FALSE ) } \arguments{ \item{df}{Degrees of freedom of student-t prior of the local shrinkage parameters. Defaults to \code{1}.} \item{scale_global}{Scale of the student-t prior of the global shrinkage parameter. Defaults to \code{1}. In linear models, \code{scale_global} will internally be multiplied by the residual standard deviation parameter \code{sigma}.} \item{df_global}{Degrees of freedom of student-t prior of the global shrinkage parameter. Defaults to \code{1}. If \code{df_global} is greater \code{1}, the shape of the prior will no longer resemble a horseshoe and it may be more appropriately called an hierarchical shrinkage prior in this case.} \item{scale_slab}{Scale of the Student-t slab. Defaults to \code{2}. The original unregularized horseshoe prior is obtained by setting \code{scale_slab} to infinite, which we can approximate in practice by setting it to a very large real value.} \item{df_slab}{Degrees of freedom of the student-t slab. Defaults to \code{4}.} \item{par_ratio}{Ratio of the expected number of non-zero coefficients to the expected number of zero coefficients. If specified, \code{scale_global} is ignored and internally computed as \code{par_ratio / sqrt(N)}, where \code{N} is the total number of observations in the data.} \item{autoscale}{Logical; indicating whether the horseshoe prior should be scaled using the residual standard deviation \code{sigma} if possible and sensible (defaults to \code{TRUE}). Autoscaling is not applied for distributional parameters or when the model does not contain the parameter \code{sigma}.} \item{main}{Logical (defaults to \code{FALSE}); only relevant if the horseshoe prior spans multiple parameter classes. In this case, only arguments given in the single instance where \code{main} is \code{TRUE} will be used. Arguments given in other instances of the prior will be ignored. See the Examples section below.} } \value{ A character string obtained by \code{match.call()} with additional arguments. } \description{ Function used to set up regularized horseshoe priors and related hierarchical shrinkage priors for population-level effects in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up the model. } \details{ The horseshoe prior is a special shrinkage prior initially proposed by Carvalho et al. (2009). It is symmetric around zero with fat tails and an infinitely large spike at zero. This makes it ideal for sparse models that have many regression coefficients, although only a minority of them is non-zero. The horseshoe prior can be applied on all population-level effects at once (excluding the intercept) by using \code{set_prior("horseshoe(1)")}. The \code{1} implies that the student-t prior of the local shrinkage parameters has 1 degrees of freedom. This may, however, lead to an increased number of divergent transition in \pkg{Stan}. Accordingly, increasing the degrees of freedom to slightly higher values (e.g., \code{3}) may often be a better option, although the prior no longer resembles a horseshoe in this case. Further, the scale of the global shrinkage parameter plays an important role in amount of shrinkage applied. It defaults to \code{1}, but this may result in too few shrinkage (Piironen & Vehtari, 2016). It is thus possible to change the scale using argument \code{scale_global} of the horseshoe prior, for instance \code{horseshoe(1, scale_global = 0.5)}. In linear models, \code{scale_global} will internally be multiplied by the residual standard deviation parameter \code{sigma}. See Piironen and Vehtari (2016) for recommendations how to properly set the global scale. The degrees of freedom of the global shrinkage prior may also be adjusted via argument \code{df_global}. Piironen and Vehtari (2017) recommend to specifying the ratio of the expected number of non-zero coefficients to the expected number of zero coefficients \code{par_ratio} rather than \code{scale_global} directly. As proposed by Piironen and Vehtari (2017), an additional regularization is applied that only affects non-zero coefficients. The amount of regularization can be controlled via \code{scale_slab} and \code{df_slab}. To make sure that shrinkage can equally affect all coefficients, predictors should be one the same scale. Generally, models with horseshoe priors a more likely than other models to have divergent transitions so that increasing \code{adapt_delta} from \code{0.8} to values closer to \code{1} will often be necessary. See the documentation of \code{\link{brm}} for instructions on how to increase \code{adapt_delta}. Currently, the following classes support the horseshoe prior: \code{b} (overall regression coefficients), \code{sds} (SDs of smoothing splines), \code{sdgp} (SDs of Gaussian processes), \code{ar} (autoregressive coefficients), \code{ma} (moving average coefficients), \code{sderr} (SD of latent residuals), \code{sdcar} (SD of spatial CAR structures), \code{sd} (SD of varying coefficients). } \examples{ set_prior(horseshoe(df = 3, par_ratio = 0.1)) # specify the horseshoe prior across multiple parameter classes set_prior(horseshoe(df = 3, par_ratio = 0.1, main = TRUE), class = "b") + set_prior(horseshoe(), class = "sd") } \references{ Carvalho, C. M., Polson, N. G., & Scott, J. G. (2009). Handling sparsity via the horseshoe. Artificial Intelligence and Statistics. \url{http://proceedings.mlr.press/v5/carvalho09a} Piironen J. & Vehtari A. (2017). On the Hyperprior Choice for the Global Shrinkage Parameter in the Horseshoe Prior. Artificial Intelligence and Statistics. \url{https://arxiv.org/pdf/1610.05559v1.pdf} Piironen, J., and Vehtari, A. (2017). Sparsity information and regularization in the horseshoe and other shrinkage priors. Electronic Journal of Statistics. \url{https://arxiv.org/abs/1707.01694} } \seealso{ \code{\link{set_prior}} } brms/man/density_ratio.Rd0000644000176200001440000000275114160105076015141 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hypothesis.R \name{density_ratio} \alias{density_ratio} \title{Compute Density Ratios} \usage{ density_ratio(x, y = NULL, point = 0, n = 4096, ...) } \arguments{ \item{x}{Vector of draws from the first distribution, usually the posterior distribution of the quantity of interest.} \item{y}{Optional vector of draws from the second distribution, usually the prior distribution of the quantity of interest. If \code{NULL} (the default), only the density of \code{x} will be evaluated.} \item{point}{Numeric values at which to evaluate and compare the densities. Defaults to \code{0}.} \item{n}{Single numeric value. Influences the accuracy of the density estimation. See \code{\link[stats:density]{density}} for details.} \item{...}{Further arguments passed to \code{\link[stats:density]{density}}.} } \value{ A vector of length equal to \code{length(point)}. If \code{y} is provided, the density ratio of \code{x} against \code{y} is returned. Else, only the density of \code{x} is returned. } \description{ Compute the ratio of two densities at given points based on draws of the corresponding distributions. } \details{ In order to achieve sufficient accuracy in the density estimation, more draws than usual are required. That is you may need an effective sample size of 10,000 or more to reliably estimate the densities. } \examples{ x <- rnorm(10000) y <- rnorm(10000, mean = 1) density_ratio(x, y, point = c(0, 1)) } brms/man/brmsfamily.Rd0000644000176200001440000003213714453716015014440 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/families.R \name{brmsfamily} \alias{brmsfamily} \alias{student} \alias{bernoulli} \alias{beta_binomial} \alias{negbinomial} \alias{geometric} \alias{lognormal} \alias{shifted_lognormal} \alias{skew_normal} \alias{exponential} \alias{weibull} \alias{frechet} \alias{gen_extreme_value} \alias{exgaussian} \alias{wiener} \alias{Beta} \alias{dirichlet} \alias{logistic_normal} \alias{von_mises} \alias{asym_laplace} \alias{cox} \alias{hurdle_poisson} \alias{hurdle_negbinomial} \alias{hurdle_gamma} \alias{hurdle_lognormal} \alias{hurdle_cumulative} \alias{zero_inflated_beta} \alias{zero_one_inflated_beta} \alias{zero_inflated_poisson} \alias{zero_inflated_negbinomial} \alias{zero_inflated_binomial} \alias{zero_inflated_beta_binomial} \alias{categorical} \alias{multinomial} \alias{cumulative} \alias{sratio} \alias{cratio} \alias{acat} \title{Special Family Functions for \pkg{brms} Models} \usage{ brmsfamily( family, link = NULL, link_sigma = "log", link_shape = "log", link_nu = "logm1", link_phi = "log", link_kappa = "log", link_beta = "log", link_zi = "logit", link_hu = "logit", link_zoi = "logit", link_coi = "logit", link_disc = "log", link_bs = "log", link_ndt = "log", link_bias = "logit", link_xi = "log1p", link_alpha = "identity", link_quantile = "logit", threshold = "flexible", refcat = NULL, bhaz = NULL ) student(link = "identity", link_sigma = "log", link_nu = "logm1") bernoulli(link = "logit") beta_binomial(link = "logit", link_phi = "log") negbinomial(link = "log", link_shape = "log") geometric(link = "log") lognormal(link = "identity", link_sigma = "log") shifted_lognormal(link = "identity", link_sigma = "log", link_ndt = "log") skew_normal(link = "identity", link_sigma = "log", link_alpha = "identity") exponential(link = "log") weibull(link = "log", link_shape = "log") frechet(link = "log", link_nu = "logm1") gen_extreme_value(link = "identity", link_sigma = "log", link_xi = "log1p") exgaussian(link = "identity", link_sigma = "log", link_beta = "log") wiener( link = "identity", link_bs = "log", link_ndt = "log", link_bias = "logit" ) Beta(link = "logit", link_phi = "log") dirichlet(link = "logit", link_phi = "log", refcat = NULL) logistic_normal(link = "identity", link_sigma = "log", refcat = NULL) von_mises(link = "tan_half", link_kappa = "log") asym_laplace(link = "identity", link_sigma = "log", link_quantile = "logit") cox(link = "log", bhaz = NULL) hurdle_poisson(link = "log", link_hu = "logit") hurdle_negbinomial(link = "log", link_shape = "log", link_hu = "logit") hurdle_gamma(link = "log", link_shape = "log", link_hu = "logit") hurdle_lognormal(link = "identity", link_sigma = "log", link_hu = "logit") hurdle_cumulative( link = "logit", link_hu = "logit", link_disc = "log", threshold = "flexible" ) zero_inflated_beta(link = "logit", link_phi = "log", link_zi = "logit") zero_one_inflated_beta( link = "logit", link_phi = "log", link_zoi = "logit", link_coi = "logit" ) zero_inflated_poisson(link = "log", link_zi = "logit") zero_inflated_negbinomial(link = "log", link_shape = "log", link_zi = "logit") zero_inflated_binomial(link = "logit", link_zi = "logit") zero_inflated_beta_binomial( link = "logit", link_phi = "log", link_zi = "logit" ) categorical(link = "logit", refcat = NULL) multinomial(link = "logit", refcat = NULL) cumulative(link = "logit", link_disc = "log", threshold = "flexible") sratio(link = "logit", link_disc = "log", threshold = "flexible") cratio(link = "logit", link_disc = "log", threshold = "flexible") acat(link = "logit", link_disc = "log", threshold = "flexible") } \arguments{ \item{family}{A character string naming the distribution family of the response variable to be used in the model. Currently, the following families are supported: \code{gaussian}, \code{student}, \code{binomial}, \code{bernoulli}, \code{beta-binomial}, \code{poisson}, \code{negbinomial}, \code{geometric}, \code{Gamma}, \code{skew_normal}, \code{lognormal}, \code{shifted_lognormal}, \code{exgaussian}, \code{wiener}, \code{inverse.gaussian}, \code{exponential}, \code{weibull}, \code{frechet}, \code{Beta}, \code{dirichlet}, \code{von_mises}, \code{asym_laplace}, \code{gen_extreme_value}, \code{categorical}, \code{multinomial}, \code{cumulative}, \code{cratio}, \code{sratio}, \code{acat}, \code{hurdle_poisson}, \code{hurdle_negbinomial}, \code{hurdle_gamma}, \code{hurdle_lognormal}, \code{hurdle_cumulative}, \code{zero_inflated_binomial}, \code{zero_inflated_beta_binomial}, \code{zero_inflated_beta}, \code{zero_inflated_negbinomial}, \code{zero_inflated_poisson}, and \code{zero_one_inflated_beta}.} \item{link}{A specification for the model link function. This can be a name/expression or character string. See the 'Details' section for more information on link functions supported by each family.} \item{link_sigma}{Link of auxiliary parameter \code{sigma} if being predicted.} \item{link_shape}{Link of auxiliary parameter \code{shape} if being predicted.} \item{link_nu}{Link of auxiliary parameter \code{nu} if being predicted.} \item{link_phi}{Link of auxiliary parameter \code{phi} if being predicted.} \item{link_kappa}{Link of auxiliary parameter \code{kappa} if being predicted.} \item{link_beta}{Link of auxiliary parameter \code{beta} if being predicted.} \item{link_zi}{Link of auxiliary parameter \code{zi} if being predicted.} \item{link_hu}{Link of auxiliary parameter \code{hu} if being predicted.} \item{link_zoi}{Link of auxiliary parameter \code{zoi} if being predicted.} \item{link_coi}{Link of auxiliary parameter \code{coi} if being predicted.} \item{link_disc}{Link of auxiliary parameter \code{disc} if being predicted.} \item{link_bs}{Link of auxiliary parameter \code{bs} if being predicted.} \item{link_ndt}{Link of auxiliary parameter \code{ndt} if being predicted.} \item{link_bias}{Link of auxiliary parameter \code{bias} if being predicted.} \item{link_xi}{Link of auxiliary parameter \code{xi} if being predicted.} \item{link_alpha}{Link of auxiliary parameter \code{alpha} if being predicted.} \item{link_quantile}{Link of auxiliary parameter \code{quantile} if being predicted.} \item{threshold}{A character string indicating the type of thresholds (i.e. intercepts) used in an ordinal model. \code{"flexible"} provides the standard unstructured thresholds, \code{"equidistant"} restricts the distance between consecutive thresholds to the same value, and \code{"sum_to_zero"} ensures the thresholds sum to zero.} \item{refcat}{Optional name of the reference response category used in \code{categorical}, \code{multinomial}, \code{dirichlet} and \code{logistic_normal} models. If \code{NULL} (the default), the first category is used as the reference. If \code{NA}, all categories will be predicted, which requires strong priors or carefully specified predictor terms in order to lead to an identified model.} \item{bhaz}{Currently for experimental purposes only.} } \description{ Family objects provide a convenient way to specify the details of the models used by many model fitting functions. The family functions presented here are for use with \pkg{brms} only and will **not** work with other model fitting functions such as \code{glm} or \code{glmer}. However, the standard family functions as described in \code{\link[stats:family]{family}} will work with \pkg{brms}. You can also specify custom families for use in \pkg{brms} with the \code{\link{custom_family}} function. } \details{ Below, we list common use cases for the different families. This list is not ment to be exhaustive. \itemize{ \item{Family \code{gaussian} can be used for linear regression.} \item{Family \code{student} can be used for robust linear regression that is less influenced by outliers.} \item{Family \code{skew_normal} can handle skewed responses in linear regression.} \item{Families \code{poisson}, \code{negbinomial}, and \code{geometric} can be used for regression of unbounded count data.} \item{Families \code{bernoulli}, \code{binomial}, and \code{beta_binomial} can be used for binary regression (i.e., most commonly logistic regression).} \item{Families \code{categorical} and \code{multinomial} can be used for multi-logistic regression when there are more than two possible outcomes.} \item{Families \code{cumulative}, \code{cratio} ('continuation ratio'), \code{sratio} ('stopping ratio'), and \code{acat} ('adjacent category') leads to ordinal regression.} \item{Families \code{Gamma}, \code{weibull}, \code{exponential}, \code{lognormal}, \code{frechet}, \code{inverse.gaussian}, and \code{cox} (Cox proportional hazards model) can be used (among others) for time-to-event regression also known as survival regression.} \item{Families \code{weibull}, \code{frechet}, and \code{gen_extreme_value} ('generalized extreme value') allow for modeling extremes.} \item{Families \code{beta}, \code{dirichlet}, and \code{logistic_normal} can be used to model responses representing rates or probabilities.} \item{Family \code{asym_laplace} allows for quantile regression when fixing the auxiliary \code{quantile} parameter to the quantile of interest.} \item{Family \code{exgaussian} ('exponentially modified Gaussian') and \code{shifted_lognormal} are especially suited to model reaction times.} \item{Family \code{wiener} provides an implementation of the Wiener diffusion model. For this family, the main formula predicts the drift parameter 'delta' and all other parameters are modeled as auxiliary parameters (see \code{\link{brmsformula}} for details).} \item{Families \code{hurdle_poisson}, \code{hurdle_negbinomial}, \code{hurdle_gamma}, \code{hurdle_lognormal}, \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, \code{zero_inflated_binomial}, \code{zero_inflated_beta_binomial}, \code{zero_inflated_beta}, \code{zero_one_inflated_beta}, and \code{hurdle_cumulative} allow to estimate zero-inflated and hurdle models. These models can be very helpful when there are many zeros in the data (or ones in case of one-inflated models) that cannot be explained by the primary distribution of the response.} } Below, we list all possible links for each family. The first link mentioned for each family is the default. \itemize{ \item{Families \code{gaussian}, \code{student}, \code{skew_normal}, \code{exgaussian}, \code{asym_laplace}, and \code{gen_extreme_value} support the links (as names) \code{identity}, \code{log}, \code{inverse}, and \code{softplus}.} \item{Families \code{poisson}, \code{negbinomial}, \code{geometric}, \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, \code{hurdle_poisson}, and \code{hurdle_negbinomial} support \code{log}, \code{identity}, \code{sqrt}, and \code{softplus}.} \item{Families \code{binomial}, \code{bernoulli}, \code{beta_binomial}, \code{zero_inflated_binomial}, \code{zero_inflated_beta_binomial}, \code{Beta}, \code{zero_inflated_beta}, and \code{zero_one_inflated_beta} support \code{logit}, \code{probit}, \code{probit_approx}, \code{cloglog}, \code{cauchit}, \code{identity}, and \code{log}.} \item{Families \code{cumulative}, \code{cratio}, \code{sratio}, \code{acat}, and \code{hurdle_cumulative} support \code{logit}, \code{probit}, \code{probit_approx}, \code{cloglog}, and \code{cauchit}.} \item{Families \code{categorical}, \code{multinomial}, and \code{dirichlet} support \code{logit}.} \item{Families \code{Gamma}, \code{weibull}, \code{exponential}, \code{frechet}, and \code{hurdle_gamma} support \code{log}, \code{identity}, \code{inverse}, and \code{softplus}.} \item{Families \code{lognormal} and \code{hurdle_lognormal} support \code{identity} and \code{inverse}.} \item{Family \code{logistic_normal} supports \code{identity}.} \item{Family \code{inverse.gaussian} supports \code{1/mu^2}, \code{inverse}, \code{identity}, \code{log}, and \code{softplus}.} \item{Family \code{von_mises} supports \code{tan_half} and \code{identity}.} \item{Family \code{cox} supports \code{log}, \code{identity}, and \code{softplus} for the proportional hazards parameter.} \item{Family \code{wiener} supports \code{identity}, \code{log}, and \code{softplus} for the main parameter which represents the drift rate.} } Please note that when calling the \code{\link[stats:family]{Gamma}} family function of the \pkg{stats} package, the default link will be \code{inverse} instead of \code{log} although the latter is the default in \pkg{brms}. Also, when using the family functions \code{gaussian}, \code{binomial}, \code{poisson}, and \code{Gamma} of the \pkg{stats} package (see \code{\link[stats:family]{family}}), special link functions such as \code{softplus} or \code{cauchit} won't work. In this case, you have to use \code{brmsfamily} to specify the family with corresponding link function. } \examples{ # create a family object (fam1 <- student("log")) # alternatively use the brmsfamily function (fam2 <- brmsfamily("student", "log")) # both leads to the same object identical(fam1, fam2) } \seealso{ \code{\link[brms:brm]{brm}}, \code{\link[stats:family]{family}}, \code{\link{customfamily}} } brms/man/is.brmsterms.Rd0000644000176200001440000000055614160105076014715 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsterms.R \name{is.brmsterms} \alias{is.brmsterms} \title{Checks if argument is a \code{brmsterms} object} \usage{ is.brmsterms(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsterms} object } \seealso{ \code{\link[brms:brmsterms]{brmsterms}} } brms/man/cor_ma.Rd0000644000176200001440000000337314213413565013531 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_ma} \alias{cor_ma} \title{(Deprecated) MA(q) correlation structure} \usage{ cor_ma(formula = ~1, q = 1, cov = FALSE) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} \item{q}{A non-negative integer specifying the moving average (MA) order of the ARMA structure. Default is 1.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default) a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{cor_arma} containing solely moving average terms. } \description{ This function is deprecated. Please see \code{\link{ma}} for the new syntax. This function is a constructor for the \code{cor_arma} class, allowing for moving average terms only. } \examples{ cor_ma(~visit|patient, q = 2) } \seealso{ \code{\link{cor_arma}} } brms/man/is.mvbrmsformula.Rd0000644000176200001440000000052014160105076015562 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{is.mvbrmsformula} \alias{is.mvbrmsformula} \title{Checks if argument is a \code{mvbrmsformula} object} \usage{ is.mvbrmsformula(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{mvbrmsformula} object } brms/man/data_predictor.Rd0000644000176200001440000000077514213413565015260 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-predictor.R \name{data_predictor} \alias{data_predictor} \title{Prepare Predictor Data} \usage{ data_predictor(x, ...) } \arguments{ \item{x}{An \R object.} \item{...}{Further arguments passed to or from other methods.} } \value{ A named list of data related to predictor variables. } \description{ Prepare data related to predictor variables in \pkg{brms}. Only exported for use in package development. } \keyword{internal} brms/man/brmshypothesis.Rd0000644000176200001440000000417114213413565015351 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hypothesis.R \name{brmshypothesis} \alias{brmshypothesis} \alias{print.brmshypothesis} \alias{plot.brmshypothesis} \title{Descriptions of \code{brmshypothesis} Objects} \usage{ \method{print}{brmshypothesis}(x, digits = 2, chars = 20, ...) \method{plot}{brmshypothesis}( x, N = 5, ignore_prior = FALSE, chars = 40, colors = NULL, theme = NULL, ask = TRUE, plot = TRUE, ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{digits}{Minimal number of significant digits, see \code{\link[base:print.default]{print.default}}.} \item{chars}{Maximum number of characters of each hypothesis to print or plot. If \code{NULL}, print the full hypotheses. Defaults to \code{20}.} \item{...}{Currently ignored.} \item{N}{The number of parameters plotted per page.} \item{ignore_prior}{A flag indicating if prior distributions should also be plotted. Only used if priors were specified on the relevant parameters.} \item{colors}{Two values specifying the colors of the posterior and prior density respectively. If \code{NULL} (the default) colors are taken from the current color scheme of the \pkg{bayesplot} package.} \item{theme}{A \code{\link[ggplot2:theme]{theme}} object modifying the appearance of the plots. For some basic themes see \code{\link[ggplot2:ggtheme]{ggtheme}} and \code{\link[bayesplot:theme_default]{theme_default}}.} \item{ask}{Logical; indicates if the user is prompted before a new page is plotted. Only used if \code{plot} is \code{TRUE}.} \item{plot}{Logical; indicates if plots should be plotted directly in the active graphic device. Defaults to \code{TRUE}.} } \description{ A \code{brmshypothesis} object contains posterior draws as well as summary statistics of non-linear hypotheses as returned by \code{\link{hypothesis}}. } \details{ The two most important elements of a \code{brmshypothesis} object are \code{hypothesis}, which is a data.frame containing the summary estimates of the hypotheses, and \code{samples}, which is a data.frame containing the corresponding posterior draws. } \seealso{ \code{\link{hypothesis}} } brms/man/logm1.Rd0000644000176200001440000000066314160105076013303 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/numeric-helpers.R \name{logm1} \alias{logm1} \title{Logarithm with a minus one offset.} \usage{ logm1(x, base = exp(1)) } \arguments{ \item{x}{A numeric or complex vector.} \item{base}{A positive or complex number: the base with respect to which logarithms are computed. Defaults to \emph{e} = \code{exp(1)}.} } \description{ Computes \code{log(x - 1)}. } brms/man/figures/0000755000176200001440000000000014504270214013433 5ustar liggesusersbrms/man/figures/stanlogo.png0000644000176200001440000003745413271032273016005 0ustar liggesusersPNG  IHDRwx+sBIT|d pHYs&:4tEXtSoftwarewww.inkscape.org< IDATxw|ՙsf$w+$` ؘbQCB ے'7B6R6uq6e7M ل$ ؒm Л$W43+WY{nyޯ/lIw >̙31 B2J #7¡# #a@H P U] ]tuW~V-nUt+ˊ@l sy#/f TILxC&f~I&`= PX]&b.{gʘɋE 邞0t hrh=OuO\Κ;gnnՓ zt2L¾xYIqAsb?l_3bw끳1s+WAŮmZ􇕻OA_LӀsxH`9pO_Can5 j.͠gڪ' UX;i\}2Ə A|g2xEnE٬Z;),V%sNLbALpCfX3j8w5O+~WϪg}1~X%L]PSyL1|/cʽ atC=؟{9ROLl;-!/aKH> `<` 4u-7%ʽNiܻ ;)x+֑|1c^"Qs.ȇ} hLOSq#cʽ-p+5o P;)7Ŵ0o܋|F dS |1J7(`-Nczʽ,a؈#~ܔgw3VE`ܗyBwS o0{V,sQ?|}1K"{/Y.+q5Jy9NZx "j9UX\oӶwa^2[xmoG!F@ǘ,٤׆2O2X{Lã)A¿6ҲwrdgK?%F#c]JF>;H9rϓJ?#ti;/evyʁ{4Qs%AFb_ .YB*2wc K ^;Kri*oC}1@J;-ߙ 0=Q=S8NRJܳZRGӠ_.[|s~5wS JBja킾 ˘ʎ7՞6rfjߣOASdb1E 8y)PF҄we߁ʑ{-aї1hnY@ʽG1a8wc Jл,Exq@f_VsaLy%p",CþYTFnwc r =U[(H_,N?+LpleK鲑;ɕt\/;}g1&V.NpTi/}2W徘 z+YɒdFɒzd˽ ^ r,C<{hyt$CʶR}&{)R{)-`ʲQ} VĘUnw*{9+EԾW¦mDe_鑲*&j&` J5Kgw Sʦܛ -=;r\Ȕ(&j?s^KY;)M%_¿aʚMޕ )|wSvv 9A;[Y;)?%9r^#ࣾ@,]NߙLy+ro?@;)i1"ДܴLfnrdP%Xs(%5roS5sJE2^n1Ţdʽ+\O 7oV.܂9/E)*%Q~ <5}" 9~wc˽F)&̞*"ܔ.%AQd mĉ_1( $W69I1ٗ۩{AP!Ug[S2r_ȸG,003.;1&rA5 Z[hL}15O89}4 5U|'1GNK}؍)/9-SypZ.a0B"Fq xN\t뮐z˘ÑY{sO;hnLƏ Vӕ|bYモc= [܊"&p 4a/71 b؍)5xSLSXHwyܛȋ@U6kЖ&u-y4,er$S''L&o+Hl;#wV7؍g9;U0i S#'!> |*[6rof\<?dϫevA)TiB2k$~*?+/z1ٷqnT 2 =϶vO][V-e쨈j`@6gzF3 9zk|g1X)d"]8&B8ɄY=&(Vy ؍f L z2ȉ'v]qx pe9Xhg7f?ׁ͋la\v(o#y/Vy'r{)w9$`*N2S+'r4/2zYҟ+H35O`F5&sSZjXOG+i2_#b3e­L<"E<]$EO9`-x]Eh]L^o ˜@j'(LKΡSA|BKĻ1sKЇzg;}12ۀ'+:ݡ')LVuG`j=ˋɻ$-T pCNQS]T[?^}'(7&(-3K"_ie&?1G"'55'_{DVA+)\Z]U]yBJ ~ړq rES>lV|*=NHftɚ*7.zX=ZD8Vlk> Cꩂ:8;) NՇ U4p{a۽~\n68Ȕ%Nu]#1\;y͵ԥb*SaG~ȅq{&kuϋLo88d3ɂW}M\Vy߳bYc# z:l8͘Cp!ɥa]!kLOll}(UK86">8]f88"3zOMqpn˽R&IoRHe5h!JR&W=[3߹ɂeF6m;t;rBA$sh 9pTzZÖωHW 6hJKcC}ae%V55ss 3dގw)u̦.%YOd?P-vxΑmph66*7H߬ Z|qȟ&jM4y"i;wBڣ8`&pv1HN͚nF\2Jpe|TQ{<{X87A'eLq.Kn޴ V!e9(3Ag>ksw$g*6Le2|#m#W.5g8$ru9c~;D?WթzUvB$:̍?}ș 5Cs@,!zKqtu;ZE.>v#azblbɳCh=<#?QB + \1>}`;U]oorF&VˠԞ -L˸JTo9vO >ҮɻSW\8G!` \'.慃س}ޠQc;Bڈ.$S#!=g&`TOjuW>fX|a_uLz7:.ΊǎQ#ԖBm)}LxE< zZ>s~c撷"8o8* 83d1Y9z6}0SP"~c_N.ʘqQ`+yc"!5sorדRږ_]~q+UAѱcZ2Jޘ~C'*3 88jB{*zx`.ΑmR2HLX'u{~ V<+EGhn%(/c␩Q@13ݞLF>zn7=η3zsnv\]ld+$!p|r|ƌ) `o7E\½ 4(dBߚ81.wE&o)cB-scae~*m[Dy0ˆcc 6ecʀ1j2Vd]wRuOJݛ\2WrK'Ȉq(({p)aw 2&ϡe{j{ڵo5 q k*:88ҾMQdr,%Ikw?t^n+a vQ ##jqH͛1]a(5M+ { >w s8`R؍v;Miџ*;9m]<FK<-ܘ;YȸG'dҗ]bJcj0(և<~d^ҒyKAĈD8&maSƅ [=/Vw]Edt8P+wpVg :*Xś%_R, eN r/E\Qh?5%V0Q|RÉMQゎ .`x,XVfˎ"i)PF694/\|YivT8W]sA !S P Δ=ӵ<qVgtӣЖ^[nHžw sU*483p^N{8HIKvb'^q / !τ123  A6;W|1sd831]Ɍ(dXsx;*:Xk-EB`&;*T8. &R67ފNBlRxw]Bf)qzgA Agĺx$"R P`j”b^ bۍ-f?/u}_#e{9%dsed 'D!6Ɓ-4X'>;͡WrJ=mSGM.aetM" wWtp~Gm%MQ, Ч|1:y]5U8!VE'ɲ jS3f(mABKK솗25@:*f_TbD`<@Eiq Ͷ /;hnQLAT8Mԧ+9% 9v,§"kX ]7)V\QACΐV%o(:mGҢ E 6r76H)qȔ^vѝ0IDAT .Rgv|G1r+Y0~RLҢuYԊb&{jTbRccbeKX^%0fdaqJne2WgD&Hhu1ĊH)ʃN.IWlrU>ØBLNx9Hhski+1Q cN陂tM,Ua^S(뜲&iu VE!+` zZ=K!Qg2JJu,]ɬt%ӣБV޼:{K$gO7Q 0<ɒK,c=|$OБb\bw5zHUgy]3!P;jbv&$]2g9.x޿٧yr7%Axqe%%촢Ϫ];dL 0:qNu+Niu-Ab ^kSiyv4N0"IqJ]f+M _ ҜzD^3S v42nEct>Bb}P˘0L9J.(d-{wn*\PצnBNBwc|쒁*^nh6Ӣ.aUb6gJQc)/Joc{ bd#&2,,FnØči¿Jճvd2x^GFgeLQ;岢 O.TOeŖ?xL61#j‚8Si Zu..7$2T;v8 A61+E湱oHѐ(Ű[q3Dݦ;*~ R=htܝ"ywci 'D'D] fK(7!$M+_QJ.B7D9cLu.fe&(i)xaz-)TN8g[zWS`|03EcΐE2ms4$Ċ!]lݘz%J.8,إ'U~^y>;C.9Id9]*U\@J=Az[N"`D9C}>ہrsHU 'DJ蘒&?͵ w}KNyc{cU>{97d71$1cgBbuM Wo|mlcYy `rE c鋁*{.cyCƷw}c7L~h7ЃuM> z\"cay*;wHJwmof@%h M_h]7ݘ4> :*CdItŞ+P[ \ϒhNA_AM85Q&jZKk]a+g olߴP_LjGi0onɴ 4e-1&%uTpNg\x Ck+vE?O;LCoN[~2q800"sT"EaCǺ!ȦdAOG從Q< `S)*!prrtH*]RqM?=@9џ]\Ý@0!*Q1:ݛb,2 Y =|u/'&E>Nk]#-}Cw;w!Yzp=MAy8O1S4'Oy8)L߭urG]flnLaFZ>| =-v` 1&T`A@2bM[0Y 7/CAz:cLN$@Sۥ=6S}"bh7/c1~%W+ <+I "z~cX k1/z^ neq+3^(6Tŋ޼3_cCPޓbpDƊ`}|zy0ч17t~-ٖC E_^rn}}1[OhW='3ۚyBo19w_ 4rPGZ=?>Rws֭Դ>z_ OY+D3V8t`}}u:kzc \=`bm_YQ{AgJ_D2vTDq)#i&%[RWǸ )Y ;#YM X<Ê|2"_OB*v +B"ءs{"G3ۂڲHc*l<;̡dls=AۋYK{cJD$$pˌ@Յ\ U؞C[/#0_cSYp pͪuMԾ'KlHS6*̇y_¦T Ɓ2rv4oBЛҶ<|3ɍ}g1;V5S}"1)iл/f߁jW`,WO뙽yRw6rHo ,1$ p.Re߁Vj @;9l;dyǰ.ެ7r@5QȧJy!@yEy\cr(^'pk#k}ʆLJrC?pnVG%dqLYwlzԷZqs@coLYwl[L }حԋm Łs2r̕_\Ø hSi!Sx;Dq_s:mL!ɃaZ)yw ڜ.߁r)˄K}?h=P;ɪ6\ >nstqk^Ϩ?^ÝO!$ﳟ$R@Yyrk?>?Քd K}?S4_ne[@;-FZs~g.4QspIg*;,ͮq@0p* odA@8SΖB>NpwֳY߁e)+#җ]^A६DDNfjyN㝂> !s=XHC\;CFZ)_kgvzmGeB5^S.s@13tu;9M959.p @ܗd` \͡\\Oy>gپ3%)NEt$ֳv@LxE%3L(H{}޼]P[@>od2S-LqoF \Z:@-M):?.7Qn|Vpp#>3=AV*:T%(:[tY@l_g2-GĖy!w[PM30Uq 0))&d[@Q |+w2g"] ^;!H) \( b7י,R}gDR;sdLѻqnK4!""A.dJZ;i":R80w".Kp*S9 Id&91QXƔ0J|,AR@O(,sݻH[.oo#zp>0^2@}o; y{H];Or)d΂|AO@މr@[<~20SLY v(ٴ\;Au[9@c:oȧhSD/{.p"h=ɩn8Aꀩsiw(=яU) /)Kۮ|?0iBBtVW}yNeܱBw{6v5f AuJR'TcKam+QlȻOf98vM|g+~ZNƟ )rt{j1EqWLӺcKymS^Νͪud{7v/?k9rcJIK@xlV$ܻ۬dZq)r}VN{e]pϡe$PN}m+Ӏscf% Xw\)r_DI7KɌk}g2xd@Rc_Qs N#B|lZ7kw|hdwŸ|g1xs_;Y[e2-R&WFo.W^_9ޭvoP63ƔE!o+]av&Y-3crGJG[VʲwkKW)ƔLė/E^!诀c]};Oe_ >! )vil~A|rLxyQY;H!(u=QOۚv4b-Czk2imT+czJ{1\B+9&jjLڮiY;H!rBj:t0wc>^ppZPٴL7.ieY|Y1{4gXwF=DA Ř2+GiyL7{g+fwcʉB$idrYMB=kS.Jd+ޱ{d6 `<Ɣ]|Ra1G'}ifbLy( yl<;Hi~Cˊ!wcJXܳd5 ;1E5z FYHCh?i'[gs 3Z4Y)pwXrR1ǘ +|g.ľÔ"+~Y) H>lLs=O~ A?_Oo})VyC;IY=jLإ*:b\X{̸Z} ]؟)iꕾ+R}f(:Y.}TOF=S&)7&M['>L/^YTB>%9aK$?gNa{Y!~8wc' I;1[|1r/PVm}s-cd^S;9gv+m89{[1[BywO<۫^~y4vq>>̮reoh3q{6s +2邼O@<&o^+zs#m 4z >]]r們w@TUsKo D7~1ʝ{jv8wu nZִˡjWtSw.$qoX

Vf>?+}piܱ++G0YS 4^~әnD r{gC8F05M`U~k'yZ&>00<)Ky#bڴkKup\!S9 Hq;<,VfV14wC8y +Vp{.fzUU ޠ*X|X5j9W/ׯyAEy qK鱽oA )9 3"2p_}x}qOq$sCuO+o}LJNDÖ+w'F CdFg98ɶ)fAy6K|Yk++G 7*=`GPy7/z\lerj5*0MHLf|gˣx^yOͦe 9pQEeˍ#N ԝae4`F,\|Y[jd (0O1.`+dUׯW%z<{5wzΗs6lS]os$7^%LI4!@&&1F(:BHō t3f 3c}f]yJX9Q4Gn@~NERCI;D8'WݴsByK)j0~D%P! d޹EQpm,Fi*RڂX U`$j2G1l?Πɡ"^@p_zNƙa$FF:u5ȏ2IENDB`brms/man/figures/README-plot-1.png0000644000176200001440000033350114366502665016233 0ustar liggesusersPNG  IHDRX=ciCCPkCGColorSpaceGenericRGB8U]hU>+$΃Ԧ5lRфem,lAݝi&3i)>A['!j-P(G 3k~s ,[%,-:t} }-+*&¿ gPG݅ج8"eŲ]A b ;l õWϙ2_E,(ۈ#Zsێ<5)"E6N#ӽEkۃO0}*rUt.iei #]r >cU{t7+ԙg߃xuWB_-%=^ t0uvW9 %/VBW'_tMۓP\>@y0`D i|[` hh)Tj0B#ЪhU# ~yhu fp#1I/I"0! 'Sdd:J5ǖ"sdy#R7wAgdJ7kʕn^:}nWFVst$gj-tԝr_װ_7Z ~V54V }o[G=Nd>-UlaY5V}xg[?k&>srq߀].r_r_qsGjy4k iQܟBZ-<(d=dKO a/zv7]ǰod}sn?TF'|3Nn#I?"mzv~K=گsl<b|_|4>?pߋQrib 2* (Ѧh{28oIyes8';Z9h6g>xRx'b8ՃWOϫ[xn%|^z}%x c8eXIfMM*iXw@IDATx}űv]Χtw$$@,Y` 06< coo3ј`cL&$A P))\%]P랚흛ݽj[73,TW'V  @F [['   F@|A@A@@@{ $1QA@A@.A@A@A 7ILA@A@g@A@A@MA@A@!A@A@b!1pDA@A@A@|A@A@@@{ $1QA@A@.A@A@A 7ILA@A@g@A@A@MA@A@!A@A@b!1pDA@A@A@|A@A@@@{ $1QA@A@.A@A@A 7ILA@A@g@A@A@MA@A@!yV\sNto\  ޽{iǎm\@#E ]x 6las=+*}SN#GF'{G{// &zA@O~)ytuu*x8ӣo}޵k=TZZJ=\(1->g?ܹ'{n#;4o<[HSS͟?>czK.㣏>'֬Yn}@z>?я!o~^x$w_VV/]G<@}\^^N_Ϧz+E=Gueggw;--M'&&cӇnvZlYT?.7|3qGgy~gɅ ~1Ԯ:|UDIOO߅7oiӦns˼Mr-=q@l`tLQ:3"Vw饗FBH3<㫻@˷]*A@C3Ow[8GɓiPjjjqǔH .@cc#-ZHݷo_Qjjj'.WWW… 4}tnJ']D:|̜9lfcƌo1xxxGׯ_~-]櫯K#lĠr!H/,Rk_:?UsYd +oЅ^v!ޥnV^Mr]w-aZB#q2Q򗿤_|ϧ{ s:餶f^ӎ?\;,,X@x***sΥ={谘_WdzB{ـ?: ^Ղ<_yza)bỢ *z~iٳgk/]IA#N\s^<o;`(+ Blﯿ1 sadꪫth+/&!]qWdt7t.xEd`A!i# SMmm7]Z#3PpD\w9AFi" Ay /vhϱӽޫwժU:.[w}޳xKs cO xu>CoN 9] u=ZA HPDl(cņ#A6RA@H@hС.QH كrt~#T;+ BH ;RqA &@ PFRK'NDTdA t] u{"1Z"(L&LR"$t-qZe4A@A@AMHV6&A@A@E@{-   mB@{`N  @" ĽkA@A@6! ĽMI'A@A@AkhW:Hz]Ұ'lA[:tjjjNZd4A@A@A hqonnn,XN1##~駟օ:IM%%%A">`е\  AMyxˣ.魷ޢW^y@_z饰9sܹsOvCJJ zzfX9A@A@=uuutakn+Wիiҥtg:7x~߷{w˗W_MW]u:48*kY+hފmT^zeCcQvFjLHw> zcw8d gDM4Qm ^b&7n$ _r]~kninRGӕOge%%:l'9bX{߾}:[6#5q>|`A=zo;W.YRSSiƍo7H,&GA $+_tRSiJTT4P45߀>sN/HyY!J  {>a:CkF{싋i…s{GMMM:|&TSٳ+**t}P\hwI{VF2M_HiIgC5y߱ʪigey~kvA@@UU3`;v,%&F6KWA@h!vҋM}VƤ$홇wk_~4k,ccBBBI/n}#QVCI 4؂ H$%&ЀL UG[J4ٿO轅_F rA Xtw봽W^y%-[L_#Y&lba!A@"n/GyD:thF'#/|( 6g%){=<2=\7h )^ҍ_Mi]I# BA@!j dff^7|3АIc뮻&M^ˉ @D3'|N9v}N<^[[bшt<{рI/,N=IINZ:hwUWTA@yy9M2;!5e]4ݻuǠ0ؤĉHJs"zSd0j+٩t|}w) 1Ųc* 466T`gw믧{/ +6V^1JAB |%̧d֯;x ֋ͭC\$1;b!L2xa͎w??b$}EF맧 [A%o &:&?NZA 2^B My=<éD+C* v>Ķ6[Z# !vchMh'kŃTLguGwlv[yg %zDKN< 욽w^p"=J6H""B~lt##P?3SԶg>:A nbPp&Ԏƃ.r-uV[h̙zm@وz,XBA ^zc`ޯ~Z[Y'̂uGE*mEwI$Qgb.c s=W/B褓NI{m^IֽkA v;*,U(;ŽK U5>Z6A 8s"S P:A@ڏ@1ohS]{`)T%uU3EޱP]e`LZJk[' &} kHL1W2qGhԨQoҋ/HI ?4w\;htWɓo. =xcВbۻs LAG+Z t<B;S(8Q:k}Ґ!C覛nqitҐO2;< dKi˖-o+ =C`&6]j A漵ӽ5MڄgmR#A"5q/++g}=T={&eРAGӧO弇#̈́SF1&÷.k5%Z $eUt/**c~1Z'&Ç~1(GnX>`*.ViY\=xQ)%9C"ksV1;d`A@:k7ѳzVD t"mZgիi„ tG5J}do "~QQݤ׮]K[n _nq8x[^?/J{*tO;\ @!P益PJ7N=KG(DaRA{#!lc׮]z飏>lyiZYg ˦&={'{S9sUF:v%xA,.sɡ4yʵ hZ[nN܏ >B܏ 2  O2Cmu|ݎtނńEIx< ܹӫJcwo;2&ŀ&"\& ᐟ` /rW;wO#D'SN9&MRci!B, bmr-YN/rBC|.A@0ccnYAϼQұ'|ł@ .>m4Bbcǎr->lذRbNf/C^͘@F JNfEtOQM]K+tN^h 6xXZAϗ};ёڮF}}eټ{n ҹ/^1{3<3^.z&:RsR(3CYRS͘ޞo CdPA@ h AeoM"KiCIYTsoMX?:[wW&E+ H Ѕ?:' wPnjN;^~BB_=͘1C_6^7^JGɰ:g2xXb{<  쁳H!$յ]C:>Сt %A}Ww˯25kaJLL%$H/c#ofK /M]]xAnذJJJJ?su:tItRrr_*7;j{MƋ.>V諾uR4fWDA h./j,³&JĿ>cwz'JoAo~BNK}kkOWPvVwM @ƏMj@d9Q3#Gjyy睚]9?PVV&'=,DIKM^)1? ꓫB~jBcz00c 毠'->t%(OpG~@(}XNYTf}ۃuA⠲Q pC^tbn8ĽwHB l9vH"c @#z>F:;Ρ2󗬥/onbPFӉ{ץ4] ,7W rԀNcl-W :iQ+EWd:@l~W%pRmvӺMZ@ɺlOG-R{&T >qE:Fk!yQԵZsS)#l"%^^Fڄ@BɄ2bh[ujƐvSNH]65/Og?O mFXO&'/卥zY= c@aiʴFe7V[8qx6яG#nj:{A1̝1@bO(QIa2 ,\*YgQ@# (m!~50nuf gGWY'AL֍h+T>tojENJL5w"Ճ*k\pCEB܏q6nqi%m޹WϺwwMb"$~H?XlPڮMz @g )!ͤP L\Cɴݫ?Z춶LJ?'ԤxSSqWobO8j϶ua8̽E@!Q%mG`tt S{ZILL dA8vwe>= hmye Zb hDzIEl㚍;hJ<^SSU73JII֛UTUSMmmVGLܝ\u$:{:EDh%Gw&$B#pS?//Fr-QK.[͎tqꗫɤ?ȣ&!u 9T'U_^;iR]MmCcc\ ! =:D:J(?`H;{OL[(!qA@@@P G;(L$g_Gz[y 51bvigu0]{*^yYRʐ kxnl>64M.qEMޤ(O E#?1HxG?[^*Kuj.Nn)0+}Rڠ$9C9 " =RDM|t @Wi!QJku0e mXi`n(N-[KBIں)j25lV{naُ&X>P2LV[z_ǜ%q_SZxuZ~Vs9?4(o2@aǂ,yKY>F_26\# gɺ]jh8 =gckvKzB{}춳xfm[<ķMLOlԛrA ):k_U+5ycv賓>Ρg-*}dȄ7~ შo+ns>;rۥP~:x'.E{G_nGJ ~`yc1^0`/|"uxƓ:A5 e5AyP!k@ :w]-炀 p`bp@2))I[bc "vags$#Z!Y%nKCexn|tu8' l?dq<%9E8W7aE B1L1D7T-D닷 N50v}IX}a7n{e^װ1-%y~n[EU_R Ľ-IlKQs:=@mw5}.2=b V,qי}dHUPz.q 2™l:I8TœgQ8>JQ~\/;M@x#:&lz {*7bʋ(S5h7E 3s^4zmeU>GxJvGEa>fZ<PV 8>8LJޕiYp?Y;@@`x -\l$&K 4X0m;oD&Lt_ 9HBe)jd?lmoKDU*L+:9 > pj{@Z+UQJX}m6Q hpܪMu>Opn;cuefzy:FU-xyGUm‚~Χl8,v:tw>;om1m~a̽cQȾU"$29˶M/9\Oִぐ (h֬Yj*D:t m+-VTժ=D 4:U[w;猪yڳ8HaEt;h™?%B!.{>\;y>2lVĔgaіI1 7.5=(!5~*KXq=nU{qKY;bF&%a%U7BW{ikR6Hv* 6jڸ<td\< Zrm1AnХ_}J o.CPm/ %ٷw|8ЉsGխkA!s]wџ'*,,e˖ѕW^Ij(7AQP54G^ծݓ/@2w(WОvNV͇i˶ۻC7oe:Cj-n{m {j_1?G206rlӲEjv i Ӓ(+# ]&/ddxe* <4w\;htWɓo)I'}*!E\uֱ!zg>=ۺ?P.LXw[hORk{LIps9 Q_VL,0NO agěOcl16<<7o1cBjF@8tq?ư(ўd?|؎qgҌūmgQ|F<豜>~mFЗm#;6}Byvlc.w+t߶K(o?ĚSոsz0@[@:^a2B]2""4e:(3|1K/lBomO_$uLES/ {>AhYd '_|/.O˳BBo),ѕHܢZ Ȱ.&* q (t-k&14TT#l<\{i ` ?t=.>/Um&zbHGGVU;!;JJ׈#n,=z2N?{.@N@h9myB_=n}{QːDx*P*mTY8o_cUa;Sq~&:wG-n}W,8%wym>SO=Ek֬?0b ׿5}S90ߞL ^ˤFzwzU?9uA 4 [kYYy=}oFڻwo΍7--!䥪谉t&R7ǵiZ;4szvL:Afs334At9xLdR_`/NjWRRVlJHfsXЧCeq*f|7,,dP^{cBL8Uyܭw%~9-B'H,ױLڏ9ѹl/ q@}r;o;L%ʩWKawӡdݛPw EzqҤqNuMXdR~9{[nޢ8xޱ:~h3q_t)Ҏ_v@N{,]wuxڜ`ҤI.-ݬvfʤjPiߘ$GA &ŧ6:5Ԧ&ny{>3ڵzk&{ 6Y{&IchwcҦ9!LUa)(Lla[!J i{YW _k[D1}ff[ȀTy!۞~_}k\&|Pd;&+OsIif ^k\d hw+>#]5Tq(H ERl{ }ΐalYd/;2Ӥ~hs_?0 x~Y?>[ni;7񄣵]<.ם kGPp RSȧ ,mTc[L/9K1ps~UU[wOXwr5{&  @,YBx8E{i:5}8/BAw4e31ό~1(IƎl Æ i^{Tֆd*bw6{1u"_UYTj5ACQo>b;&H!J VT]ϙTS.Фtbꕓ5T յ&Ӌ뗪^b1}.x.*[Ld_O`db~kXByTȺ:m<㒤bQS3hÖn1;˃+xVL[BqY_Gc}Q飢c0&g2$qCNzH)RiܰFkR6&PzP+4.@r'ۂ%3 (@IDATQ+(4!Z*jqA@wFMW_}5=4x`5k}^ /P '?iq ']:r=nķCKTJ2 oGg̘Aw#n53.)/ײ+wL mȺ9dQiLɦ=5hkp N9K\xBJC@=fV?lڻHrjJs7HahՎ2$Pq\<}o1H3p* qt8q5ɒz{%mDC♨s zUv@ҳ28?_!!/=0'NO vIw:<,iN &;55ĸ^9hvVmr"xhT6_5@ovu1e!srr5|d(99A klNd'*5,'N-@{vmtI'4t?%6l~t{4}[5"Ʋ QX0-1pCN7Emt3i"iҵAdG&k ny؁?ߜCͦ; LPqc9Lf%U?L~ aBtĴ,J7g}1tTըE8@h\BSiW-%%Y}{ԙũ)H}zcY.ޅt6mf3dԡ淨ph7iӎ]?-|Kw+&5!qUhߡ 3xa砾nmBERmM&OCf^RJ"0sI6i _;ΰ|Ȁ,Mt_쭧<>cNA8s"CYY;SBvDڷ#l^Hs@-%+*& k6Rp< ^NxAA.!1<&ھ\_U;ܚn)G8uނ1>٤Kkoq@u&Ay}2 p!vm97#XPu[\qaMh:IJ[\\~2yX6`\ɘ}H?/(Tǽk7PnƪDL(owBRaLkq'_~~kmשd5S/H jÛs'tXd. -VS8UNts#*E=33SoﻔD?N>-csVoua K?/XFA ڢPovۤp}m6urofrn5@ً6 D2,ҏVbjIGhJ] &f҈1#Ʊ 2&SzMX1h>H!?MdxĴ4t.ĚQ)@@h3JNO5F*f=L1s][ب*Ԉ\ubszHaf,y#xЩHщ^O0vJ;߸-&ܫTEKЭ-T'v.ey&QH zE`bo nAܷl٢ĉ[Z(51 TQӠm-6 #A;|Q:Ș@OEɞbN e pLykaW,6gТě= R_OO"}{]{D&ub|FAhj݊q@9KF^uڣn:;GACbTaiPj3`]& 6xK"mFW#;|s7j+B?Lҵ6gǏcmA;޾lUxk}M 4%+7iq18{ \dl˹ nej<{֭[[n3g4faFCC=ct뭷RvvGr&L&'+EUZXP']4|7|/҉#jA@QIcEFMvgemqۦ!wmcy>ëb5q9bHziL܉ҚY֛^xv8 Ȥu:h\%_ōřεL2v'DȄ_{.:NcPKsT9[)h31&yDU񼠂I<[6p%:o2Dx=c"w|45Q=iB~p8FS6Ǹc$塇N;kc/R«UVѼySO _Rg>r=}Q](a2ao^VZF{3' GA r~czE7 =| vvzk{\M "<+T5p,Mц:#~ɣWLjChoʥd'T]p]YZc׺Ne] I%qח-pW}FdtaPӾB']3ZxÉ π<> v7N{ q/+i #Q(o~|.XN) 1099E:lKnbZQPVO;șӞn^8Ʈe[1dB5ۄvO oG.;L¹MJٱm0iҪ; OwKJ9D8~>笓{VG8g(:f.3Ggr1F)8+֋~fk= AH6F\)t?}ނ4(6^\/[i~_ڤ.>fOm{Z_?E9E*; vRY#yoP0BxKy1|jj SEzI}-4Tlj<!~rq 'puLvB?6(TT3~K\/x~,?aR <[&7l4~})Yzz``vNJݐ>tYW,0~GpLW=Zzƃ21Z &;T 1Fe9a6&WlzG^pn_8i3Pq6qn 0b/ޭAẅ=GKFZ2agwE=@hϙDz8 G  )*@.Wb+xAG0[d}_8aER 0Ɋ0 %#W00MO32 S4ZbH(Bm- t,H>~&rO=~py&4堍{\7v N٘b8Os6?VsMY!p ǂZ{[/;Mr.D#!A[Ͷ /H\B`rڼjJ)Eڏi%6j Z D!.ԩ l#6%v],}U+I;PR&F NVS7 L MKvn97\ Z1;<-/;"qsz{%,Giq1]dG"0MTI1I|{ېŗ7{9cmڤ &G F{!d2}ziaS2 4 U#KSarlO]mzK܇B:TnuSsxW.t3mve,NuBa'6!DARMU| { Tc#& WPav}b.~X'HyM;p1#.lL[%;Kp5{чM@$q%E9u}~^G*R_D*oLQDDB bȚMZ#U>n/;LOCA]r l=E'Ncۈ~L:o[F)W1s%;XF^4RSޢ if/qʀ67yPҡ}"x)c`QG!#Zc=3RgۊQ&̓P#?ǵ)R f#q&Xiq< )VV;SO88&~yy4dpE[rת)ۤ:yb wL*ߨ_W1i?FP}uİllg9c:Q٣ qkn1I_jrk+j1b&V+'Unk5}%=|no7d.XBMu;vSǼ YPׁV9hqL?2=b-<08mdD^H "D!D=vk !M;~tZxN/Kdˏpq;:p>xxk~ 2~ y$6, ,~ES}:<#ӊ{%yvIX ch: !98 G}|vx[&amUY#T><`=ʪZ‹RY]r4B5^Sc^'6jE!A259ip ȅ Dag{z‹HZ&S*9Ua*3J^?Jf<ϐw*Ԫҧv.ʽ쿦0z uj}AF@G*m!@(|"(B"E^`eJ CΞeQ&]a ^,:C2c 6XlX|\*hD*3Z̃Z;| i1 /3ڋJ!q^dŔU :a >ѯ]cg9<ádcز TC,6_sP|^vtD2&c| `jc2@`醝xTJXgӮPQVҢ?tHM0QygwdeZp9"49UgjB}+.wF'$Qw;-5]XL$3ƌ)ߢS3f/E vmy0s M=J&TA<> v}d<~m~ ͳҊ GAzʨʾkUSd?:tLGЏ>N3% ] TӒtL(bݥ@ԅ H&t|G)Q d%q$ abb548^=wH;/6 up;<ޡ!>Gx>Ey|Jxs1svC䐉NA>q笇PG<~v;<{gg@ z>Kougaך-33k J{ Kx2ɋL!hj[,y#yfe /.^"E_āFJs׿&бk-k.nĝ/#<y!xn v(s ՗0qK\0wx !T6vx cܵ~vhI4u TQcvc,|wQ~We5,s?]rq_xݎ2ķK*Jqi-^s@Fލ4܃;1$1GuoqAv*dvly|d]*ޯ("K"{x<ǎ^ ~',H;L.nK6cs)c{(-XTfUߗۥ,~y,^-,?!o:CMRtD]߷[;Y^xHY;ʪw/br]k=Wnޣ1:Pd:06g5w61E SabN?2\V]xuYUZSAwmI*ƽ*Ą=~7_vKۮ wl| αvle]~Gw_ƍԭٽ663CSE=@pT8y.c?HbZk@ oyc8x;('KF][ddKFw>@<"/m(}pg_R/ݸ@MB,b߶_a'TcqlFy>~zBՁp{'WP+c: ufx𲉻 b߅ '&c)>7*N ºkQ#PkxQ{jz{:c 6"{Aoտ) 9JV!:̴VU;t=Ṫ 2ou Oc(Oǝ73_"6ٟK?9`,Wyk xpdܚmy޾]~k).[#Gm_T{~2It ^ zQpTEXc)t16 %mˤ7TP& q6"+K4ewU 5{b.Ti/^6A6னT_ȣx _\ʶCj/Ƒ|V-yu\Ϻy| 51bE QVUUѬYhժU+H\!PSDSD: {HE&,.\s'*3:xU-~!PG**;\ÏٺlC[΂khT &іsp:@.َphBY;)Nfb>S>9oLHٳeРAGӧO"/ׇ^]-TEi-^:o~_W9ݥ@/Z6p1Ĉ_`m"YX4{g<.kj jN< t Lk; J9##:JO79ʨe|[ocYGS/xՅZiat l-#pp*po qzQTJڶG E[=^BؤB&24xX`҇l&͊cv?;aUIǶ{o7vG&x۩ crPF ꧯ쮠dX|'~؍vJ[Ec:ƝGE;{z w9@߿]q+D Хw^|rsTgW&Gss^ Pz)Q xZr1(xjDWɚ*j]T܋I(&6"o[j_GF[vLabHDZ- yӫN_;g0 qP&5boCg{-1m ـ/Zɽ5nLf_=\q2AKmy[}IX;/mPcy|#Х|ZnL? W?{_XEu+@UVR[WJ]k[VmOkVUi..( ";5H!!ݙ3ɼd^K޹23w=o͙s}Ӭ ˭~.DE \fRh.]U*E\h`{t,di杊yLjVL]yV@B$hLDfqM*2HTqqjJ{,\CyX ɋ5PNf]1f5O7ǧ?*Nu1*gzbCDia'i; I}.s nǤq_?~/U*O:Ol?Q¤z|UZj5AܽGUw;o{tC{ \F˄&( s9EM+bWCa{'y4嘈uu3o8;*9;rQ*+wkY#P507ہ"L /,8*9&‡K]//wkju"H(s<ӏ~q恪jc:bLy U"^E#(bJAZl ݺu2bL.D_,ȲMďګn1ɪieJqg \.W {ɯisM%L-N ͛1^a }egYq˳@H17qǵC:C ycHZ|Z騱6aUXyu Xy\f*LG `"S>fjݺ5͟?ߴ i߾}4zz9-{.՝ -2F0Kƌ!!|$e RڴS}9!v@ie^ fu\D,JA^ݧpz RqZ<^@19(2B{ܿ멄56nBzks莼ͅcې\5I'l|`7^[ž٧wXj\P{ܑ)KTRǍG;vPet'|,ovXUiĈ~0aa[gs;Ҳ#ѭJ9I*Y5ie,ɳWD 4pS O % TLx4 CM43Cy51 D Y|=ٱLЀ7?8/=0ypxJ:1?*ehx*2vRAtb{k)q2 㻵vN^;e)|A&fLpe1X6pf?L❛nq0E j-o+,Q7|6o O=OBϴI3yS/ޗw\/LV+8rPJ9Hh̔9k5Cir޶m[G""MAىDW6.;1\:ˀOf=!\eC &  fSI51\dO~6#{Il ibR[qT|"%ꡁ typD9ʪ}TnL?wLSۨc=F jdY rJzĦ#*~ucb +wzDtduSU.ԷΠyzvaѰin()+=R\L~QB9`L𢛤Tڮ];!& /gRީ]&%'Em@#m2G*TvA@^U}2Zbi&k1c4Jlό$ޭnWj5.L6wdB:<* |$Y5TGτ v;~Zc˔BVv۹۹QFREmPOv0!3}'_+Y%ol3יf#qHŧÔa]כ1{sG_-l.3x (`kS5[b: xˠLŘ岈q\՛hٙq^$04?wG}Srm役G{ԩgmԉCG&I ?ķ@!QiAR*|rgyʬ,-+)SxtHMT^if̽òJy+bن&7mB{'36`c4h9HO֬HO)O~{|SH෩US~;hHnfSmP$<B-@(`c/)'2{ԾHL|B̋NtVy9A5R\ka l$"ӻζ[kJi$s[m>V-U(rٞlBx\u۬ fhc#xYک•0tNFEsֻn6Qs纶Qn_f[oqo6l/Ol/^zT>-*'F=qx(0r!n,vJb =s|3u=^o|Cdf$WPP_rqO|@22.%#v7g.BA  %=DT <݅}3%Ъev5iʛ$Ӭ7</$x' U2qf~ nDXxbݩUX &͘3ITeLp*}lt#;oncsqE լ)Ƕ9GL< i 87/BJOf)-J~5uE@{^{:_dFKLH`89\f֒M}Oߍj b sʪϯ;_fBʲm-=6GTbanon1Qp$вQdJG)3-9艁ngYuYr +aFxڇ,6`qt;va =' 4NxN1dY ޞjC*e.ު}b},kn).wnE* 'SjU_Du;8T%~𤋮${dz $%gczCn҆)>[ޘL[#;[ٕ =lj-v"{1~!#LRnO u<VǸHET#2ӕW|" ΍wCr&_&nL,e[yZJTu%k϶m+X]8<"EZݜkβZq=fJQ !E䦜7fI["L%+COk;vaUS FO@H\^h<.R徾1Cƫ۾d*D+VVr7& GKE7ZK h&iU$_F!/a t9\qOtsJ Ki2\B$X!VIw1Vl;I fKR+=g-,0fSan5ti s*ll2Í?bIJ#ˁBoy`u7^cg( {ϲϺ)6?j xo(M|n=.ԙ}۳_۩c_ r> C R?[Is{7w@u[V~ca8H9*ӦMe˖9DЯ۶m%K8EѬY6-0/U+zMrY  I߭<]CkYL3mҫȚ-axL!9: 59AV5qwBؖd qij2|ΐcG9uV qOSgV*OJCevJiy^S+JHGzGz W\q) wqt^7|3'UL"o)c&ܖc nƧ:jSqp/9oP$s1']mUTޚy'U%RjD^^Y6Sך(7,ijQSZ:d~ZkCw0p챼h3/SL;TGC)/< uY-)hB6A8^uhBӘ}Ri,(du@;n JK6@k1 FM|&m246ڸ^{Η~!xWh;PFF N`$}*V1(YSiɢ~| ˤM2uuH]NvRC~Kܽ/4md} 2 tS OyZ@_R9ⶃ v59sj ݯ'-޼Oqt+3 ȳeG:z@IDAT +iag=Pz@Nkޤ)ڗaοL2[76aW>܃ 4;.A8q"}ٚ]tmܸ{= ᇷ}̘1SOufȆu8䕉4&vjFkHgeRzZ n'C< ,X%eO37:I]tS@/VadJqDcu\io.XAQU8!Ibq؎LM9%nhx*l=;WG3~\}BJWiu m]ޒQV1C<ѢmG" STChhsdWZғa'D3l;ljTOއ:~uRy w,zzB?0s>$ ڊWTU KL@_6U{fxa_pcB\P|s'{aў՛R떁/I '?o5( d\hR4(ҡ?mXӾ}{Zn"ʫ,XRRRhڵ;ӣ>J~=H/3^8DwNavêU-{R 2!2H,ظ16܌iPd-My96K^'LZl} QhȜcN [S9ljOn~\:eźqX4TLt4;wh<P;jl@0 0[ BdP$?EONx**}f~Hj^6㐧'x"d=z4 2$5uTʪYcWfْLx dN3%6?O@3n*b 9BCw"G+?L*WyQMVdžˏcđ2b(U@V( X&Ψr'/4zu:$\gXK*:W^bMVde+pcig,;?mSALdOL67lhkp&nx:/Wq `Y0tgD-v %XG q1bߤTQxWB? \Y&n +<㿳iM)[A@$=K Z+oܷ5ˏiӶzj<~e @I5rHztZ . a5X,/uaUHIvL>v@P*TX vZES?0Uݛ7SJUb7}npbbTf\IGD-]9Z|)uLmm:OovLԪj(J3L[u>oXS&{izӽm.Mi=HQxlYa`)lavfP^hZc5)Yc@-j0f'msEOW,`?B@{ ްd_ 4&?-׬%hZMU c4iR{*BO[Bl{nJ">ij2f9Q;@ϛ'U{EO,kթn :q@X|p9ۣ>:c\`OIߺg-Y]u^ۭCOv"UlFX29T01^q &a\'>;#![F 6I.9K7ILJe Ͳ{?9HUQ/zDA֭[/,,}IA'| ;&p >.rɃ?@;YyܙqAP΃I^ڛۃ>"u\YJׇwEqsୗX$[{T;#hlς\.zMR]Ȳm9}Z`#։-Odwha T 6ڠ-7ӻ ci屃kIOf9V+?>^} =QOE@y!tk _og9_MyM"NXc.jBms;EWגmt̃:0I U yO<'5dgIMiNT*-)Msޞ6kQzpOhǎNXx x{`U@vӦMtp!X3(,UTO͙n51 AaCwLdDv:ŤV1pȤ>M7;d L>q #`^ I$`mtm~;E<6s)-)4*kkt:]R-LXYs m7'B*|CxRZZp  r*LMZ4ClK(9VxNoiMACg ; 1P8qOLkA_L 2& 2T;~cnŒ[}ƎKwu/?N7o &(fQ?,A<&iŒ =]r%O2vmtFpWi2D)o1 |w>xLLd#=fRry9i٦m(g"lh"!NIǢhklU*|H>.j}Kg}]g6 D>p>sU̾6|'H8=qW,,PÖd_ Mo8\ڔa}=c }OD-՞b^_ԇx;Md?fJ'p7l/WomVKt~."zub/StG-#gul̴#;nP&OL;ww,&<,P(d=958o:$DS+ZYw1 ݅=OAm3aՐej75fP"X9ǿV;Ŷ&o?dc9^M-¸Ͷ!sO$5sS3܂VJQw?n =|=6Tt0ݑ:?;;iksnk\E3kt;s挻& [,[+,o-ڞRTiAvJ7m!P/;)ӦM#xS}saעEh֬Ywb0?-T.  EǶ*t :ߟ7-yD]v!I;F bΤAػtҠ06ude@av\p~MDԳ.ċ7c(Lq{5@(5Ly,Qvz f$`Uy`Xgp8euОb6!nB0QɹCiYXƴȯb嵯&$ @T\\LW\qtNv# wGV3ee?o\,^]r,'鎋O^[5!@"c1fR5AtrRjdD2IR'5,'T@E(6^D 1>7 d]47pSD cܞ'lVwE1otXA(_g[K_Nʨh}b>0}~z?hcs".׳.\äfG˾ϧJs|$4u"x4{lzw)Lw僚ロ=1@mH^{>&? 69 b.ZqVA-#%1TI7CWfk]Q  $qjyYa.DZXd$qrfoM4o:6 ;c -ENmtڢ#@rX(iCyl RF uhg" skkq/3 u ^5X7 (ֵPv8$L{Yv(\D:e?fuw2MA'NH^x& E]Do6!e]՞[wXtSP*3,o{fzdr[(k߷{.}|J| ?S\, @ )wASkLnݤ y㇫Euw@9TI%v)Wigqgچ~mGcnY>g_/x! c}=B X{"[nmjY΍3c {Ӭ k;sm,;L_=:}}JPV ơzxW"U4r1/5{1nPT_< a$d>sL,;v$ji_p|˗[""pLG<v*P^uv S1/ @ I0%T dMy-:a]g-t<]-¡"HS)Sś{! rʅ15R%F`KunV6:J]e9|};Lut ry'ӳ7^@7}W9c6]zH}Ǭ,Kbb9lqd捝kRի[ma q_\;xXuZO0+^[3*,Xlڵkw}Q:x[T˻(%j6z}@F,`[U-Qx1"}`1HYd$k~6umJhIT則-yBpqz[V|]s\ @ KFTSc7d9&{ԻGgǁ=ckT۔6Y[j5>탟mϩ_.ALxQ1Zfұۨl:ԧ 8O=゛8L7]9D 6awBRUPv2~x2e Kt5蘴>j ?cƌ&ŦfLJ3Q=[=)ʫvTT֤2;:X?)(=ܤ'EɶٮMPdL(+Z#Clg:.v ctUxwULQz(3&dmvU-CMܭd7I&׳]z4՘R>=|ĝq~j XZƲuZ`iBs - o|l=V 6+))ѭ3<(LoO>5rHztZ . u]MZIj#i yI]G_c!*{,p}pP l&$ˬ S-M~ z̡,S{GYiDJ;߀i{вC47z(m~.iŵay&,kdN;I{_nMWa,オAS'snY75qslUoV~B?:{Nj9mm<\\M_uy}Ͳ'P=??VZrÖW!2xuܹڹP{뮻Nw"XWUyg|X&W+^$!5K++iP#Gp>'T^T59 <[Ρ\onҬ%Kպ O~!k0e'qԭ ѡڀKꘈ`ߩWz 'tjEG*+ٲ><=tiy}"pA\?՘x =qT󦼻>9&hkulDGX?sܢq^xomVúB!Bq{C6Jx}Aǭ IB[,L?*7#3A'$ fnt0b:sqǓsB3ON6;zyvV{uX]ޭ[7wuyxߥ-iZQv2)6~ ۊrVzÅP2A \ BThTa0\߽32t`26Ce&ssLJQs0TXosvu^ */}n~yLWN޲IVhߌMl7oq.rB]C16[ &cƌ֭[,,,}ѣlܸQ{O8?"S ,^&Ϛ0\K9R_MTE̻A@{B?ӱM0ɜ L41 \\:X"g-ׇڲoM9;_WȨbb?#,# ✦V-)Ahoh Ķq:4;\Jmsظ^/Qg7CK}!] '|B;vp f*ƍӲ6m{wNJZСC3}GYYM=#G7n"WMTMInNKӯ$Q-7LROEO<{r(c?ۚ~ 6:Tn8鏽ߦ,> ^dߺP$ 17gflGDĽlCS,KYr^}Xlo{ٕX %CyrYCDPe#_Snv&[\Is| ~f~Z2gulMʽ?CطmV\fRN]OCe[|J* {kCXil7ot@*.<'b, 9Y}6!DF/~ }w)iA au8+᭭ۛ ,Ls/6Iqw)!f{/1V?i5~4q?ly=~6сtMCnqa]Ѡn=(ޮo(χS_/[.Ue(}䥾nzM[[}?j㧉$8mg) }k)^1 GIovL{a6ϭ[?&0!uj!)p=^95qgʮjFW+w]ALo<0,~^vǡdDvgŋ =zЩUk<:;!@.Y>G7x#]q;契'oMvZGC ͢3f.{_ÂK-U?)@mdpV^D8&r^HX8:jyF2%E-w= Mĝ"N+GVݐu;q_\t3ĚxzSn>Ӡuqob\z/N[1ۗ~ӟ uF>[σK׿x Wc**ӘvWINKE?TCTL Ȗ]tB&:˹/Lw_ ]B]"߭[WZXw\s*H;d[4n@ Vf\*vQUqRpCeB:r]֞Aد!=7b; ٙ\UN_]sl8 Fќ9s=Ts=2Ǐ&<3n8ꪫO>7Wd[t>t|6lpbջw^oq7|>|n /cI'o~C'\p|jϹ5.R򘸊,8 ϧw}WﳾXn:U~壅4uL{K~qB눩4FWfyg"oҍJ!`1ٯk{ q];S\-|deǼo0Mŵ7\ X=`uBcǬm-[ ⌉]vQH8vD)((*}tBW&ڠ:twIMM%xf "MZZ;8 qos7__CeGhLNZ,Gc€RD}"LǪ9(oo}61A@@d/y331ZȔjV'*LPxGW]d8T&+ 7?#62\0xܰŖx[-H.rޭ0d)++-[8z^>s:uЃ[ΩNrrr1Ǝo4ssUܠAM*[a~1g;ү MiԹ}&K)z9²#6ilm;g%;S $hZ86bt˝q,nxr{~JcO;1Nk}}ٯ}01H B \@G*bב͆{, w˺(Tmq 8Ut#4:gK{}g&J @YTbl*W.#~sId@DH%-2#=~I<{Ih)OHwʱAZ؆^xX2n>Z><Ո _fMLm7otM4rHz(33SǤԈ(v&2Q7\*?q*=%Y8QM@." z'<=;gkU ?dyC 3lQw1&uPsAuMB Ѷe1GhP:+&eVLjfxG\8 +Ƣ`TĠ?ӚǢX!=hw_zyiFߎڷHrJ3}]Ž|%ӞvuV, A)KPtK}5xQ@I&<^hѢEA -s)&B V?/&yZg}yo߾{Gx[ʕ+ &ֱ`G;#-{S'QIqVe"+m[}De* }wȞHv] d۫kh<{CGr<}aijO"o/@=X=q $ 2D r / 7ܠc%^}矧o]g뮻&OzK'Svv6$t=h'oD_]?-[iĉ lM#zP#ܹsO&4Z-]7}rYcZ6j_OBL\b۷ʢoKo=M pbٗ_~IÆ kbXqe&SV|S;8i&phʹE- H9<Kѣ׏Zxw"9?{ICđ)73|IG*1OBcbx CfuӉ\r_O/XNE $S %)qZTBeް:u"Y̕Sl E{ѣUŲKKPB iEyFTQ/@ǂLj2U{w-YEnD(738-ZHA@Hh$V(v& }eqn  hz톝Oߚ,s&+A.MM㥬#4BR脁mpw)X3^|' ?mcuA_m/SvMt]:$tyY,騊nB HrBKXg>K[jE/jnGtP)P&)] DZ7iDܑb+99q.:tQq=O®cmmz<URTdJL{?]HaѦiG9s2g;Э ؊R691?eI(Y"d삀 ԉa+<edduZD͚f֧XyHM]z8-4_XtS-HH S[b/.;^JiCc!D{^=H|yԻckԽh>2ۍ  $awD /Pv颋.~{=˸:h[A<()+a.X}BZm{1zׂtV8A:k9ê^'X2;@WZibѹxUӶ6y鴫P}*]䎽 /+*)ݳ^|mӦM~QNܞ[p38msռ&zMbbD#41'֙]5!C_N\pAݕH˸@g>~鸰G; | 뮻U@{&x;賈 1n&:餓 m?τxոqK.i4۷oիWS׮]}h/&ٳg… zp~?w~p͛3~8@IDAT79ByyyS:\0W_}E ,,ul;frbΪU/П$ч~W{W㋴'{i}5x#dYb'?~ /iӦ/ˠפIB~V,O[_ƉPX GzGtEW\bc tx7MJ=/cQW_\{饗҃>HO=r-tSYYY ;rACs=T"~+g/--gy~j|P.]Qc+(6v{%1ӷo_=쳆1c_~.b;w.Wm/rAFȁ/pt#Gӧj5!fصkW_DٳOմs 7TE/ƭڴiSMM\O/j\KU?JJJ3V)^~읺P;3gάR,3gU)BIBW? m޼ꫯZx~Nu5jT_Gϝ_9Gq ;Mյ^myw$bW.H@=ѿ7w_"ϮUU~WZv?_HsUee#_k|p_9U^O?駟_EZ1@v|#i ڵkUq}[Q]:x`}'(s+zkG?1îbW&(aaFO1 eB9G~wzw~,}N9;HU3D*jE㎣s=W*m;:=A@yHysb @CVM_'`׿7|St$/"@}_}FnD@yy?tQxAFWP{LUJ280:u$>l0}0Wί\Pg!f͚Ix a~GJzkЍ);frk;dROH1\psC*c+ּ:l/r _7ZhQl%=`ƣ*LCe˖U;Xߠw8XFl@}_}ji gϔk߾=[Z)I~)))vZ흿 d \ꋫ_FQM?&+WCWSL/u:_9NT%O_ zw~BقzF6*df̘A&< /c+W=9㋴i7^7kro<7LKKӻ;wj[Լ$ s1TXXHo4ɨ@*"@}_}FtAW&&zC=C>y lOxF 6n7W9DG]8j+Heu'Ȁ %էmtGV?}A~3C{O~m5R \?_-|=Mw_9{ӚP=W.T?Lܑed„ QQ6Nӿ[HE2h{ ml?r>?[,)ȑX3f%Ŭ'(o۶{DH.,$SL?Pq|8|u\ Ri\H'-{뮻N{yD v"HCmpT.~?w~X{p)VgM/6~BJ>x8sDTyP7 =z9!H'H݉_9_ }A?h_~w~ )HOqom6Xn?(pl{ݧj="N iz `^8$G, cĶcIq%ք%XD~)70fL42VLEA(#_9Ä]^O~x'233u;ȥ1 2u MȜOosD0YʹLyw| iPOtځn(8%ƽ\kEhb/VB1Ni!~'uv2HsW!|! # @=%Ư-CL;n"9֝΃g<B;fr܏{[\\L6lM~0auVˁEZt#@@{#H5. C/.Z[d-زe 7ձK:ى~ᴍD3#ɉ#Vԡ\eA0+BƍӲjb! w8&&%j ׺7?: /idN ϝ_ڋv~+ĨIJs7G;fr܏{ :njMV4%t;frn8 /r 8hXbu8/q3uYq~ȑ#+mGux|wyCY/6~X{;}t4iaa*X ' s;fr׽}W-~i$YvwyA~W‡oM\QF9w|  *.YL{#0F;cҀ'd!''ЈGPӖee{r/=~Qp'p^m6\cϝ_rSZ܈zo1)g]v!yUw|EjF@{YA@A@A .<$LnnHĸ[AA@A@W0y< PEBe yWA@A ^{0c̙XPlȐ!zX qw.  񋀄A@A@!#  /Bڈe  w A@A@!{m2A@A@AA@  6b   ݁BvA@A@E@{^LA@A@p@!;  @" =~X&  8qwA@A@A ~F,A@A@;PȎ   qk#   B(dGA@A@_4LA@A@hx9BW-ZP޽!{C.}   =w[oiԿʢoJKKcn*Ubޫt(  1#FЪU4aOJJ˗Syyz4gΜzo1MA@Aaꫩsδ}voi…~z3f6hSO8nLA@A ضmy晴hѢj}i|aa!5fϞ=fPKG  7|?T#e˖tEa\2Ñ21[:A@A@w;<ٳgH3{!&ES兄Wʆ >g4xhuWx"  E`Öo&'E[1a5)|%ҥKi޼yUdĄGG"  i_P=b>Z!1\:A@A@hlX8 zwiذa bdiإSA@A@Ƃ޽{u'6iVqo,SA@A 9C7x#]xa&x$2   @EE7ꪐ}Μ9|`#,  4&+Au]W={ŋgӧSfͪDBGQ'  [_wr<<ӷ~KsWa駟֯_xmegquYȉ @`ɒ%{ɓ/_ Qi_о}tߌ j֬^x_,Q!+‰bNzꩧjgݯ?2 U!ĽFa|[owظq#zrԨQtu)M~P=A@ +Vc9 $?믿N;vs{aavnvms-Э](4Y*++999sС֭[)//W!*eqjC*:c@ff& :4&meo۶V,*g / 6M7Ta /!WתU+MqѺuk}7nn{衇oGh"W&w}q%S{SVxD=T93uF%+ Dߟ#Q  O>$-TKh6oKdYU5k#eMfz:eGƦ3gүkf{ qz7n`3x:B1cg? K?4UUUꫯj=;Vp; T?pcǎuBk`/^6mڸjeńRǍ᪫;Õ)))ѱIaߧ~tr"IPbӦMz~ =viu/3¡u` d}eʔ)n}@ʗ.]e`^K5oKp 6?u?=\M}Q}\'Ћ/gϞM'p ,"B`ʍ_բN8/VgJ|[F*D{@X"Xrlvk֬:uU?Twup{+"AH ;,ZHۀ,>lٲ~ܯwnI'@nF]0 ?Otl)@qϋ(Mwq"ydii6m!>ڂpX ;8̀ɼNxq_=Di0N XklN 8& wlyj#UVi {OF81yG6[K<޾~~Z2k~=sѩ Ã#{{!2rC8F0vYY^s J<ɁrxlSxs)ĝNcǂtΈı.rK]8pJށ=Z`w%Gh#.C ѩAzkc[.9rÏ#, AiG7*DXH0\mۦ#bSA41G~H~s5BeP^Ȉɱ1 RȋW/e#;V=A@?W1-R}X . x 1E U8Yp^wx~a=ڰ ,HCk\XA 7Ds/HWt9r/ag9,xEA #^SA+ׯpڤN5x 8Ym+f(#9 .o ?م](XT+Qxa9և0ĕru١4.~B is9~Wx9A'ȶoby9 A9 3^$}%=DL ^"~S8mc!b:sM/`] o R"${Cd.L0^~ܿ85$xa=u>X 2(_~~)}q:." ;qυ+<@A Z⨣ҿn7GM3}\DUru"B 'b8~d Jh)/BM?xO |dG!lj_GgC3b9<'|2!]@&3i$o]x?c9F`+d!*![ n\R^ E5) >HH#&3Ψuq#5"oI:,7CG`n#bnM@ oOb1qoŽhu,eO(kd" aF['<ܶjJo͏ո^BҞ0"7i֦R%7V6sb́=Wc >[ޞ+6vBJ4 asA хg?p&|/GoፗqĚ["(8 ?oqoO@Ə FS'EdCsxDj ĸ˷ )+_DRD{$H 45v'҂=,G xs+A@A@H:ȰH   qOAA@A@" =,<(  $Bs+A@A@q 4    A@A@A ,B#  @b =1>BA@A@@& ,?|ɰFA vUة/LK6qػ3U[#Q!2@ ?y>q_dIǗ 4U2᰻Gݔ!72ṫQeސ5+wgCH ̝;^xZx1M<9֧/j*}/))*9  24i2Lp=];}ZXF7VYތ**Ҷ]kw\.M膋l0D ̙3@7l`A$E@{~2's'>SzZ3߫쒯}*|[BO|.e7h3{r.Ԋõ;C .UO_[  qOO?>it_iZgaQ9̦cw+w>c&ͧ= yOQNYfuz}yP(@ $ e#ޱm.sH5A[@GyoͿсXD  `Ͷm>.Ƹ"  }Gb<]:4Ch=}{Ԇ.]^ֽzs^3Hb޺ɂ&х}Jő\4m֭[Gwy'K~c_$=`7/v<5ȋ(|p>? $R 7%&Pu {cQk&MR۴֤lO\s =ԩS qob_17rNwGxŘC`ԨQ4` bmqٳiѢEK{,uҥSn@MG=!} j ŸcVKOrb<ܳgΜIk֬F ˏ~#}Z|9mܸ cǎ)ҾXVVSnV gKӋӍ X5Qt0q w VҎüW^퍹DQ8?-c ZZ=65k3F:oϟO-ZիWO?M<1pq L^ow r c W;~=g+-߰DzO&2TF`Ȑ!?p=<:tp"vh]Գ} \)2ƙC lQf&eT0}* (=YKQY-UkYɧnm=dYY]{:=/Gch܉)SO$wl'?O qِ]~knj?7b臨4 V+){e' i4Ɉ6io9C 'd ˄}]1ܸα.Й(7;C?<pZf=M7D?Oƕ˙_2:J@>:ꖊD.uVX=שeGt.ԧYZuEbc刑zI2Ǵfcםh8*PQYuncvA_D9١=5LQ}'am[NfzWcǎK3c:묳ǢшU矧iӦ;+VC=DJΧY h4ګ={j m2nEKVg?MN$ Ba M'|1~>98kJGh!D7H4!.F>-_;2b{7y b$5|lcmMDbcicBC,UƏ iJ煷dRb\qO)|]wuVM܏;r!ԣ!^=EO)؅'?JJJO> o߾: 2aÆQ @B*CyfW69ԱmnBmyNUyv6 1Ɍ?k#X@D4Am>vwn*zvޝ%x Hv$вTmA!ĸ[֊@w W_}ܼy3WQԂ*Gl7 *2p]tݨ1`"(!I,&w~DpOm%E W`n͢'R!˷Ѯʐ]@sekm2#>hbaN҄&A@#VDAA[>A **Zݭ5W&biE4ɷ+WG@HB#S92~Cdx _ƪmgCJ~ٹ YX&&|d.R;\"vĄԾ6U"$q_z)@4WTZQMYi=[thN}]zs:h  e7g2sڻ1#Y6Ev񗀝zxKLrdv{aQ jvH1AƈR;p蠶4}V*K6M{::?C>A `"VeyMJ4` Gr+sVnn픃UtӬVҵ?9)hHoP'^/N j oڡڑ0w&|d;/U*;vd]Գk첃w)@mĕgdd' ӪUvA/欦fm4X׊oT?]tUl;a:#䬠"^m8x 8ns9<Y} >|]}Qiz탩jwxDI([<1Qp T- 9oke@-jǝx^ӵM2N\=Opk#9tO"% 77'ѷ(Sޝ/~Gm:ڷ6'q6^fAfς 7Э~V/}YB# Rd(@*1>|qk"|^障qO5igU&S&=׾[FiMz>b ĝ=FnyUWq/& L\7^ymja3m7[&y,7ZbΩ;[=ym=p 3aʹxw"qO:Yyz#xBI&ʰm(7']ޯkdA ӽCk3|Bήa=c{8⼾qbY&H1]/=sCսzq@Z8ܣnG=nW" = ?dRU^T=(/A7[,V2Gb SU! P *3fD>n&b\oj6} DZyZ\#[ $}~Ļ8'RԖibehΑǫ1c笛e:ԑuofhdX_#  !n! sg,,H֯GrxԿGK}gwK7qADT)dh&|L|Az{]jq߼|S>V ,ZqVm٥Imus\G@i E[lj_}}깣^{;ˋQmzõ#'2!U&Rqg$VO9 mo]뫈{L$d. JA (͚;x_|aI֌|}pwdgY=^ӫ\m0R'櫊m_-1Qt!jb܃Qr&j/Vj6|*+nYs:Zkb 2NG_dS޽R?qxhrJW=Zi*!3 lΌ2 xRLx]Y^L:[K3nj&Ͻ0IwWu?Eva ucyI:lB l4啊oK6xsqb,`;{gݧmQs'辑P96U*A!~H]B!P\VE^ZPVfyyYGsA JԟW\ %bȞXoƪT^MeaM91xn{͞NJKcR=l^_Íus]mGwnJ& tڧA3CKF:Bi0lK(nhԔc菮^0}"G[BqOiO 'X}R|f.ڐS  ar_L0A֥c) ~u!䃪E8I&,~ i/Q>{!SRQMd̰+Z3TQpS9FH:pi yDSȉWsc-Yx(Rc,\D:X`bTv6 l=Kqor㻂R\Zsq}ƒɁ@^1wE@wjM{uMvKni2 $?(ػ/@t(*#gɍE߹kIg c|0O1Rfw#&G)ݣ<$3=4qk<,+۲eVXj j;KںޜDGaYWP| G`Ji~AG0 Gș3w0PLXvU1ĝBBSCnS^TFO0Kߧ{ HoY}v @0CMHlTi27tN:1Kv&oJeO,,q% no]AwJ@*c?ӭp-qQNjSqW>'vUUJq"" %ܸ7Gw*FSn'ĻK: "\eĎ $ Q8ߢ1:xmOMn%^k'Id"&ngr6N9i_3N;[?F/] d- +larsНt`'dǞ#˘q^jgDy[-GWLyR 捁m[h Ғj&JO ᷦ+o^JjåT-3KzTC-D1kS #TʄUXq T5kaq{Q$}{h/Q۳Y^YXonfz o*lm\Wz ;xeцE+,@m!G:K JŠ0,Ǒ~B>._zD+ЉcD e2uә(rYԈt)KfWv_-Le(d@TN0u%i(f}A4l?uw|DŽLX'+-6[/ہ#awN,A=Yjž6>v'?<f sӱ&Ya`G}YLq„'8a<:4:vUҼ%kalEW=:Uύ).-96y>0ڤ)JvOd\;5̼Lx݁M†@4 M<3Lp῱;J_ w9I$ߦ[1xyH[J*zXQ}dbUl]39NK3ĝmf}6E7=VdNBzb2' z5 2޶-Y!5:%y|͘1JJDjW_K+Wo2]R-IaI%v}̕ZO5*p1馇 I1V2re89`qXsbl3kd"Pv4e*ne8GCKWsT2X8,a<'Uqt>U[c"pPҲr' {K x(Reʛiv,@_蓙 r&{wFI:tsUW]E698 eeetwW_}EÇݻwӕW^I}]^uoO?F+ľ"X<"bWDyULJJ̎ Q̤~Wbp_}T2~y;pGu_9[YwG{mVtG٠Bz.{dR96nN+-O3vF}Yr(l?Mw5YmOv8:\6>6/6:{~1Jay; à@cM#OAS1oȅ ] )w9aKCZeQfQo `"=NJ?hذaxuQtmySOі-[wу77![%\Bk֬?0$۷oqG$;B^}ՠX^,\Ub9 3u0Ƞzݏلڶ(;U+gDоɢ-?l{S!,~rߵ{sMzYw{e8x;VxYZmBmԱ΃y`_NɃh"}nH=cz!w==w٠gcQ}FFwr4V5gN?'tdw5WN}b҄gҘw'jQӝոrAP;RqOO?Aê&S r711@?sVwKk/N(Ř&tҥ;w+WRezώvڠt]۷/ٳn xƃqtQj͡]:h۲m'QPsaO΢jI)m&r6l:&p ;{m6y;s9&UOg/UgQD X|7N,\=6q\y^mu&sCBʤtʟlwtSESf ]t@-?eq9sE.j>umc3 5l/<6e Dw|wz~̈ë_+?) 4>6U2kƷ,1-h[C-q=1?$j׮]zF5߆u+ݻwՋooʌ .nqG 駟R~~ݔAv"!E޽@d4H.Jh ׹ahcja_hN4} aE0v ?M9%Z!{Thۃk5[d3jǨb sצ3 q\̼a{۵vj1n܀ZZ;)aLVwUMğK#ޮl+?Naȉ @ ]3 'g@ oӗj݈m>{h2Zf !"%^|oR!)(UUk < B-Yfш#tƍiݺuG:"?kE=o'xF9 vlc22W&?sI*%hBAtho׾~vjwPC8ݟX#%dpwt衃ƁLٻ: [vűc2AŔ\Q57/ڲ;ڬƋqGχqs$ıM fK֧+!'W}(,G&<20X\eZ.YOiE5 Oa9s6Go螳}Pa,Lz6sX }_v3/l?<aN@^p6mÃIZ"]τٙypqNs̓{Fr5j vk=T&b[qO;f O_;mA;u66Z"/D@ǎ&ܤ;q~xC?O6l;wj?sLz?1c-2Twf|WkΛ1]ARnҔYT]lSv=5%0W$;܆>ȬrjH>oĄȑZ@htIir{􎮎Asn[@{ A_Rc\cqC,MFJ-6*|@r[Y$Pkz܆74(h[m70Gx`rٟ!=Oj,s SB&Vk B@<)q'dS 1'O, d5Tj!o%ĶK0r# dffN /y[@d]>4'9(qU1Za+N bw4=B۴P]Ǘ(I+d2{<V !OӴL BeX<9t\j__u6}ļ9TCfڔ ٺ"O« (6ZY ygCQEqvE" 8ܱLϒ:Z̷i0F8龎zTLvh2 mrwQXqcQjb~* [_,&6 1 pꩧR۶m ^r`S"BHK4'/tMpgɾhlqwI`3#S@?ֵ&Nxr&ޱm;Tlw\2t*;^bj#4Wb[rK2ڷoK2HIæOxuQB6>h<)6y>ZPMO7&}ɤ  Z\fuSw`>Qtq=.MttLxn6ӎgA?[gP֍v&dŶ9ަUڡG16 0徺Bڡ 5Wo&.Xpm.ڪ5 *lʧ;vӂe܌A]I3E`̣1ũ h?&rՍixW+mZS̢Gf}I_A $gy&Bd*R9{CtԳgOzꩧDίc@loM!xy\Q{g]t鹺)!=-e&YLYLpH{.*ˊ*;nP@z#\ZJAJAP ,=f®탰*۫CLN?c LhdӖO z#_>h^*0:ʪYŞooNg7@},ET2_~0iϣ*Lu]3@݆C-n! <)/-X5;~Hjܸ:{Vf %c(`I;Oy#<1 ­&gS)K6xc-yg qwOvlj73qgš2&J_98qB\\*V?**Gհ&>X'';~y&>1_ڤ۷o4Ι8fbk3cG$#&kٕ( 9ŻXQfD1R6VYTcG4EyU 8́ÒK+ҏQ۸ K6o/U[JxҾ5<F xrJ@|:eڽu PDk.hA&6mq9)u(KU |*bo6O2ŒaNp>=Я>G[Й :wѓf=wGAh"t~4wWS~z`Aپ(hA\/GA @C#B9PwW?(M%cZMY_̡.*u8KboFr4d%N:35K7$*\x!hSQzPoԂדȦQ&qȠ^n] 3j={'DI!!3ly,Qoc >@y$ PY[@#u8HOqZ*W8+*^ޅ p}\pP/l,z>rͬLYp?(eif C8ԛ۫VTvP'UW?mZa Y#:2Grz^]kjAKGkOhR\Dо'ad¦ɢï@,R2Y6D㤩!O?[Cױj揎0Uݨdg`BgٙN;~ѩ }eigRP#jdAfMҞnp` >nG U*䈍>.RХjc,ݕA*zj/=G HwuvUlz~iʚL<`A%{d17t樝/۴S!V=uy98FSiYmXԭY욦o7=갸=nVK]T*E4Y1tx vS}1)):s6@Wzm.N*~zydr!لn`o>w;mpƃA&mEl"į~vA50a/MuޭumڨQ!Ϥ.*{ؐuWcݽ-yz>b*?xy.%Ī#x#tZK7nQ M"q7lFKVz|'-Fɩrf~ Nrm=m bQ{۬Y`rxXu_.T!Qli؉ǣJNy|xb;@8|c9\{#G+@73ѕ*J B`7?7[ǻe~`bw@ R W]qjc M/5 G{͟];G6Mo{B[23A5jת BdƊCG1s"Vj`MA􃊺S nڸ?Z9u۠׾v$l>anՎgߞ<(KZ%=8vݾ4窟csEx fp~ŧ 61508j뮻hjWozw=RNjALdْ6l-ͻJ k n `@Wr N{ _ԘEN\*i%|M5Җ]69Ԗqrcda]AmJQN;b(-HHC! !W"lĘl+NZ}).[tAslzI.W836 _ [c6*-~dv=Ss21LEuUȥ8. I[aj8sx-zxlrX)g=ߟ}3f ]veTw}yGQ^x[q5 J^]N+EH-ĐKk̑TB chnnS;]z {Ab*OMPYGjMoa.vڃ{cq? 0:8 Ꙉm6z*mHػG'=@n( cy(:_P[CSysUcuW;xA 8 J8[3PmuiG=|ٺVGwF gT&1R}#/Im۶=zЍ7HC }Μ9l"KLzR>Zf|Z+dDA >LA;rT*g0D=1畽*LdͪcLK5lMjrm7^jZ|5t(հA7FÃJCe++vfιxm`L blY_] K,W?82sΝAys Vm!]8g,5s~睟Yf'穋@\k駟y< b 32Q''I̮i$Ker;ccg2H}zRCCeH) l %G 'kJP= xk:UJ8Αa+޽L=tAAYAr}qҋ-; ׬/^Lk֬ j_l|$$[*JNƛ(Ƞ*6z+|m1=޷tEG@$$Hh?+*!vd0ĪvtHMmĄ ; /̇I%3Kuk ׇ"k4f*-$B[ ((GG@`[ o& #܆(۴4q:U 8(^IWe#Ǫpg E[ϠUwg$k>]q{lӭ:i{ hGlwP'PXEZH~9XGtJzQhJ꧇2rL-"O?r9Xkl^`d:k ~~ă_ N@ *<Ni[(#COb@T1^oe;Ə1aSosN4;*<0zlYl%zZQmFDvfpa5mvE.,G#gXs݌ߌڨ| (xp+9 ; |ׄԐ^zi[n%IذZM&ǩ!Wo.ԛ)@2"Pv&7W:Ѐ42Hq0ġ=^:]okyh|'GsBfOV6a 5g7[1΅br ]l -G=cXXL8u?obo1jcu!=cŗp{ڗǷǨ +[V@X1Mz*=ŴpBzꩧ:9iz5tNv:+=\Mo.hq >6oy/>ap2NSIġfa ץh6U3m^dr#m\˦Mݴ: *svq2;{OX3[wD vz>k&|#ؽ97M]\c˲;o>ʛQ 9q%+WnMk?|jܳkX )HTLLfKD5BjHooolT?*5tOe|dM?ŁqBIC,x] W|MD^g\EM}ɟC+]ؘs0W5c>~<ڧ lPl91[ͽWN%؄]h ^;!w>B8; zXC,m#?/ԷnќTơ*]hjva %'@\{߾}iĉntu w)@Al*hEE%b PGW=,ɏ B?Hߖ4cㅶqq;v K -֋*hhK-Vkb9j˯@1 ˋSqnۇo9|@OS]"z_o׼(7g`n.nXW8G?,dG{%!<ʰ'ĺ ֍:vcC͞ttOD Mlj[f*m`uGyڴG@|TMaLi_qdsO8p|2tg8 ;+jGS۳oC8PK\vnO?wEڳt &oH]cmkB{4#k 3elMqpjc-^e~猙fCs)*"TFJ齩KxBR2Weya a! =X U3H!&hNg5^آynP75W9IFƹ _V~:x;Şy۞bNB8Vy1\_㘥vDxЊ11 Qn>vh_}l[0.,s؎vjGx"c|5 ud9g ϿM;w17[=8U+]zQm+~b%' WݞY8/)w 93/OviZRC682@8XʄoPv1ak:8Ow~}l"mO1D Hr4Gp}Q7~C7<7n[?pQe^>FyIOa1!stO'UB?M-3ۄk~hr+'˪TG@{b0wLEr8L&Y(ύQeA4f+P^995uL}ܪpgcZ,ۣ} {lgqnA7 S:gTsV DT+c' wㇼ PɣQ}YNw^W^/,sheQ`mFn Y4q iԹsgAIlxAZXa)<^ڭ@I-뱸wL[!A:=*HGD_8!fDzh-zY6\xHreՉ5L!^llˇ:Xk$J,Bepco4%GɍB'CFms 1R];ђջR\LsMobq#wUW]Es?0u53gzuϞ=o'm_+ Q\ :CۉJ#-kH0w@祄F+Z(w\\>D@n~crm|8%W]m ڎyϙ(X*ۂ塇7obFZm1>pTamz@BSsnY߶&}J08.J33"\ڴĄ&7ѣiԩ{艹8Ӱahǎtm3ڶmK .:t$ےCz#gb2%bA ,T"Tt؞VPl;GHL,2Tycޙ;4#=Be^VShl!>Z@@6m'mHM`;1CIRFdD@{2~qiRحSm r:Q)dO8XX{{QF-.Ķ5-*o;.Q "RƇ&Zđ-ٛe:Č8c$(ൈgqG kpp9q!s`O9oJfWDZ'3XBoNÆ 4^CC͜9뮄_QMM͂ (++EqIsΥ.8'3x,B^dH 4 fb{,\edchP~{D 2ogG0vNq,W0 vNh5wCKh_L'!M-o,U"]z|ijgnlKe=nm]G+Ý|h(+@TbM}e^7l)sfy3f̠B0aB>X082*+/ lrIdbF?adW `p0gfpNe%dZȊi±ØkLCUXnރ@i;U{$1m 1L;Q1S# FUh6n9Y2&# 9FrA<0_Y5s7"2LXzěsZ:HEV4}"`U=)aVYxYJv^NHEڴiS6m9rdLyv#kߑx~B"0KւU _&sM܃t6ȹ0v*>Ѻ6l eN&Uݩ AGL&Id50 y񈸉ܯsۗ`ԃarw;g KtׇJ\>~|_޼ޞ7hufPA u$K={3^UHϾl*̳{=\e%馛hʔ)/TJVXA7n]vCGvtɦF>lL~ݠAvU=HQ^RRBvikk hnZmůg f2ǔY7'J>F**,5 ۞ͳxl~Ȩعb+VpSolP ['b%04 y\% 3D?co1ŶL knK!ky#/UbF-@ǵ{ }|-Zg[ځl ZkC@%M:MkDr+aܪ"}5&rHQp.jˢ\aU8p]qpҬZ9Vi̛yW$r*K[YZU7#8έì7ت|,TM=^&-%i%i۞<~}،#6jӢOREz:S3IAoTR)oVܚefx[@0.]I[TM=XL$oH⊡n r0+ฏ|{f-)fȏ+fKG .˥lmdffd0sBvi'NT4ɑI.Qd0'G޴&̮:QMB[l 4WtXnP_w,\@1Ly'N<BR"W $ZQ׸#ҸF>|S i yfupf$ky!uA*#fr:D/raͼaVR_-rTr C!&%&6Cfbc"JkmF7oYCk|r;A`{M>U:hq66M[ghfWV@ivW!EWgжUHKxHq:EwXOڙc*!mekf\Y|MKSN䬠, ]}0z>96^*ӆת IFxYavԙڽFd᧙CSb\yxv)!ܤzxRmy `{<PJh@=pD[uiV7Peȃ\Umф\ JMԦI$]ej:=wjAQʅeI;!h2+f0I'IVJP~빐hifN^_Ho~~*/ ܱ0?Q>\k17XÇ1AzmXcz^1lAZ"`{Z>'S 28 rmK-RʊEy3c*po21kmQr8Ya(-[:V_Rg7 ME@|Mlyx!MmHDρ,NEzhsKӯ[]w=/d.n?$6Je,qOιm]GalTXnVټE 't%GnrhT+R+ a.UFHby0fn"vNwZ;|ޚD$ K2fɖExalInVdy:Mfyjw1>ț)Ӊw"%ĤY}y"%o!y`'j'u|_vi괙|Θ9˸2`EDm]z!`{z=럳8 B3ecؠfž2]&U9Ig˴< FH 1& Arh6RMجYY,>Nlr)4k[9 8ȇvS oՈ2]IߜzcxKе?Wt:Z~ܼחI%*A8)Jԧr_ѧĈ9cZ)W:Z` 6=sdVXK_b3dvk+oɺf$sږ[XϹYSO_5.b6H;;28IuņEPH;섻731S !zf.O(s'NRqb}j`gnjݱ B)WS/t ˸S-r铮!hɨvE[RZb+@$"d2hmj^>dMqq2^歮+N_[~X~%L5j=k;j}_6ߗV/&muT _d7N=^?HU ?p3-g:R!&8baT%Y2rSvWѣ0ǾQRlU>b-&<447Xha uZdTtr'"U(:;K\k<21 (/#}lC3Qb%rO g-6ۚa1R#/~vVe4FՃ@ mԏWپJFձ\$cx_u+- KyCljgaoI2 2H[~UdS(Bv3eRӕMfنD8;]I444xVw!bqQ]:Nn4ȊgOɬ}<7$i=M$,W#˕.V`)#CoH˥ҹ++ y2,m]S7nEs7aA"- ¦*NetTCjïjm522je1?Am.1>w\kO/{lo}muJ*H=J 0j{ Z) $)DXA,&i1a+*2 {e⦱o?ծш"E?!e 6RчiU4+X~BRҨ_[=XМu S΄fL&tez\aw8*~LvGEn_ Asp"2F^8 /kW&vB{vitCTYsȢ⎼$5xQb\#xk1nΉzxђ6Wqk wzč|E7<9g0Q[Y9 )s|LϾWfޘ~fh&u DJЫhܼgK i@#|xZ'G/B16Y:A#;U5@G>@@D7Ȑ']ȝW!\0{!=Eݛ+K cvzܴ~胷f?"xjI:Uz a+r1|@e5Q|&̶"qC 5d dEz\d.h8"lq!#]/A`Ί[K`gd;J*"Pee?0uv*N)"S3¬>O6,v'HvF??Ggl_O[o1w6w )GC!B(]#h2-B\TājK:+Ǖ&ƢbwX Bq7q8B%ֹ,POͰzQԟ@7&&Kâb*xG[6B @CK`WeHhSUpKM`XsWjypIst>_5w+LXpyqGnf KHOJ{jhΜETSSA t-x.U#pODApN(blF,_}>h$Ej~2xnԗς7~ uWBګzSSe q?!jn?Sn|/"yy4}=C;´D@oOO:}]۶mMb^pIeZ@DFͧjh;+sOU"v,>aU̮!AK)fk;[rዽjV8fK\uHٸq %%5a9 Sf} ]_+E@Eo!~bibq:n* b ZAW\C&_G rɧ m6nJ KH[q|AHEI jL]+oL"lx"眗F!K|7wTw9~!3y#;ʦ2vV!}p0=TWߛjxh"_+D9{ԜZ4;+xqr,jШb^8ݡh$2#^NDgefVǑcC i,]tL4l>6~_2(AK[eh?+ hZG_a  #|8eHZ|)Z^0, bo|. *Sȵ\]N0(¦>.FADwmlGͲ&Yg[0 ]Z3ҋr!rU?2T븴9XcYq!ic}%_B<\ Ӕɇʭ3}h`WFkQmRDq[JF byƢJz]':V7ܢHecnh<>7m'*t7Jcyfx$1_{ $Rwަ1r?AHm?2]MQcE3"%1,@"mЄ{JRrlكEXVZcFOwO Sg&'^IFn u1>}48,_QVfO*h:NJއJ"iw~rm$X%|UHU}/&|y?}M H$BL H;sO?\mt?LMU7/&f)]٪> ܥdmO>f^Oc:}A#쾀GJC`Bڰ>޼u>}ku)vEgϦu7|6mڤ=H@xNM`e@>?PB:T*zd&H&{ڒmzVUX$䌭oFG!pBlJU;7Աfz?0xnW\i5H=bɣ?[Q.df72 FQtڿW)&@W kXXݏ/K[p%\B Q 7@\ssuQvEN6wZqS)&"y*/BNyn&^LvbZ `h]yL9U_!wBbFϖX(ㄡ0o*~9an!r#+VAuFS Ȗd:-ZxN{K|vatWg1wVF(l5;EF v "ߊuJN^J(F(Vl%۩7/ C0ꐇo^pȒLrANSBZʚ䲯\@/ԛ%1ښ:QqqT"B8"i9e`.p7j.r*@GN@T< =Hv񻆪1T:ְW^ُy.'V2wt3$EګEG,/&Xf"o Mm|>\xmÖ䤽Jhrs\w@OKl)@DGy;FQ&a}AGRNr zi"w \ yu; 9 ;]#}Ql(v0 RJMBK\1_sc֛G`.=➞PwBzF51E!_ݯ1=@\ ztϿ>owjfNRe-RS2dH̔Iuɒ%w޸̚I+WT?Ѓ_{oI7o5ULp Zoz3$=IWn #2op@E^a5S,$zՙ4g2L|x#Ӽ7}߿*ԢG] 99dVrO&a:pr"uJ3d?jku^0B/RUe\W;=LMB+ YH? Ҋ^wp/*ȥC˨#!}TC]I6V (CF62nv0ǐ{$i)egпqLC7>}_:Sv5|sЊ\**{Ra҉Oy9٪{+TKV@rrbcx9ute/L> ]~#^|E;W_}5}_>wE(!~郅t*p6%>EZ."k $V.U }rݿ. 7BXWNuW"`Y\7e TRY4 5< YlpS!t09q}E[Ȫlؔ~,*G[i,}DD%H`KB0׏xH;Ŝ^ښ<|gU5#x }ϞJZ=p:O h7=H}Я{Vpr5RXXґd Q7n}k_￟*++^ /O?#h}e&9BKik7ϱ+m2(%GKȭ)/qS8c 3~Q|"\o!w5{j2ϑxZ8@ ya^ Ӯ0Jb >([n(WNZYo cHv5 v;BS] qL3T?aN#SX5(˔싶KP0@3l<5Gc?&1sA:#ϾZMܹ<JmQlF|/a0h NN"v쥭fBUJqk4Eì&zq)N-kd ya (t!X ` wQV&$$|MxqQ>:>(S=i>8 )-6"ydFQIw)bDV~PB #_krE QUDsi/Mn_G &n $%+dBCYgA_ӑv ! gt R(pueM[n<;oOF[F58R`v I@![ׯqO}c\S/xVs\KSs^NJr#0b&)}iW^[ub=Q`J6mmݺ՝4:bo&wk׮ꫯRロ~' iH>H|$Lڃx;t дaK#\cʔpZ>J.`>8^lݛOr]~>XKww! ]e|z E=\m' h`@Ȇ8?7pMuQA-Hc^M=qu!ġU|\({m}v eqw&dsuѴ"ϱ.^ [L6Ozh!5)Z) .l7W}2PH{ltM4eBj 2+Vƍi[yPy{OVr2ҧ+AB&D c qR)e aW}|JaӜU^2bIDATv!ZIB/u'J[k?zۑ UN3< 6htBAI2XqAa 0feF!#2̼w^H8"K60(͜)qę~}4VlX\u%Me}+0crz%h- ϤK/ЯXͪݵJue({ `{ $WPQ\H%EY4lݐ|O1=4.MFkUI+OOYs9(R6oYrgm۶);q!a}X)7T*ewA:YK!җvgZIQdY)m.WZ&ՙ=|<0;f2H>Mz&r5# hw%G u4$rH"I9gF#/RA8| @æc]sٴn./>27G*,A,q"?}MZe9lf +=hˎCD(SћU4hP b.5;u߫d/̟G*Ao{}ʋ|0Q(zxBN8tsxm5;uI;CLLdoEv줷f.CJeKW?Ϝ{2] x0ǴڣFdE\%zܕ\cn"/IšEvR#G!_2yQr$~,vୃX ўwJ`, :,wv3ޞy_E3tW#ku6cX5xw:u<?-8&@oF&\'2fSU/mYe[ F+0?!S:N6n HQ$M(k- 1/9uT U|y?Щf`JU雏|݁\4a.t5$aTW16qۊ^@m>wL=Jb`1j ߞyV,_王 3uX;1i`}b{0'U*17_I<-z ͤelK[jeE(Јr'd 4 3crf0/B% ®S ~&4T_U{(}d9$R k'eRL{mLK>Ȫk" ?H's4O^4$j8q̈́bL7~<),z%o͘OUALW"Mͼ,OE&)׻K鑗*]sG"*@ ?7C|{x`Dࠑ_;mREdIBiTHWZH;:xy"f7 X柗ӱAL1W~q94^9/i]_,vge*K9&e\XQ\,nrJ!1v6cp= ? 8WVG)/ΦCl$|Vet$CO|1Zv") ߸4c2(W(|E&&B& 1L'FC )rߑ+ }kyRJܲdd.k"Ylu"O@~/G$z ,t:\cʑ6Pq=yxkJICNbWeJI{f,}5]=ɞtK~mϩClp 6dHv 28&Zz3̀X>٧eI*)&%$wK;!:J/i#ON+MۼzwpvY:k6K6pȊ GWQi1"s\1{Jk"YW  L$DcOlfWDgߠY:ģf_˿? ;=|eCs" X.H$gNjBl23tā$Qߪi&QN|PNW=OYDA@k.~rkz#Ķe.EBCp ;(EKf~莑oXt'* v E*0NR 3)5L+ph62&Dw>AlRyݜhcxFV%D7B>8y#Q&KJ^Vef, ;0>Sg,;>Ci\jn}F7]z/\[*Υkц@B,΍<>AH|=y䅄sXm_rWMFͱp踡 kRe*cX]2'x|%M.inUMl zTn6OnM*c+.wm"ī9%.;bN畒|:A# {V;@OS>wNھk/}Jx!mFϤ3C!x.i/Rf%h-m3c7,7jjpGg>/8IN"Idi!Qu;5C I)f\.OG_;Rq"lڴMn+~vK5M8d[iĪׅ!,A Q\}ԽE U&2n/}g=*m^x*]|'VJ+ Wg)W3oMfE2Y'N'4H{XM)%h.miKQL~#WȖ[əATVZD 5,\3CRkH^67Aqڳx$W+;UY %ל>jC2%3ͨgi9[ ֯?3^ ?npӣ-Ug6Q~k!sR!GWji2.5dP)7R6~az$i﯋~.;zoƌ-^C.:|<|OuG;\!0( U[<^+"GZ!(|=E@nۻDZOazFK;E*^N< }w4ۻ#,Q,X*cw~}c7o kG4먌ׇmF[鈬!%4fpڸ+kGkq獉HC钗iXܥTZyWmD &aCq|qģ%{@'ΫDO4GcGtЋ"ЩۘaTQCU}tϿ>f,?~|ѣ;u<+,Kj|Z柿Lu¦"~^şe&j\ԁ.fVrg% &]`#Dctg-f1"quI6t'bՔkT*e}1bĸ_21-t^n|Gl܍D'vЂwsTE1]itiG0Fj-PL࠲؈40zDwpN t\} EA^Q ݋[ת6!"`G }ߏTN7Ғջi:'/O?i>-Mɵ9ɆFDQwf%'/`bj^4 SmIYoʽW>_s 1ۆo5ašepa9r-QL gҕ_H1,9@B בg-ҝAcale"^BviGc*dS|A;^PnG-A9^Kp>`M}UFd jH#_DG Q;ؙےdF~z;wL?Jt%=TɈ<3=U%kLo8cub xS4,CpK睠o .6ZܻT.T, a1n0Vj! tީ2"|#hxd*a% Q]6ilЊ\:X/TmHJ ťsO}7"MlKΦKHM'}H(-94D!b!,eR.d8xn:[R]I4F{`}< ,=2]\>5p5jq#K%w-)@IQ6t **@xܣc,U>\[nj,qjsoYtQV -E"p dgS6 sxm}uW큈}-N@@\;A!{1Gy9fr,%`;_NLPQ4 ?SYqM@#?ӡ ~']Y- :hXM꧍1ofC*،~%[+ud6SV}_vV|q+nPaH;FrwђMי} ;a@o@ /7[2D1.N8r27߸Sad{KܻwyGC[ eM@!PN<>\=G.D\~eV{v$Q^~ohw`Ԗl$dE!`{ ko_M4vu.eRP@y֏f,tуh]i[rm;OM{щT: jmU$?rROwۖ{lgXށ#Aا\w~+W.18zEn.A3GRBCsiѪTWsHA%="akO@ݎyX=ukOLؗoC^F ɧa92iQRXYGŽ\lXRAǡMnh״s $:4it" L9ه~~mzg)77&MD;vo>ˋax뭷SO%|@7pE-+ƏIk>z&yMє#"c@^vtm^ͦ84 0CVAnKXRYƶRu>ڱv]oiiU'nQCSg,w'я?nub3t k2nxAG"2/| O?O_wݿ?eff)>aڷo[f3Dk;;cY: n'fBdB7#p.>Pc[y@ ".q(I|7)2&jf2Z3,͢C*?dX)p]oLӧOWLC"zWwO2tYgŔE"`@ykpHG+6ӴWѿgPy780ihyedX7X<@ 9+~̴ |37Q-:R-b]E,+j3iL1v.8{DE)U6b%Kh޽R=\gƎKgBmE"`#Э@^$k?,D3oP=p$XׇVRa-@[dg'ĶQ^Mrd !J5I;}Κ;ѭeUwdլmkTi䍄hwS wTƺm梁O 6(ΥU~ joQ7eB~esSVAf%,_f 8eќ0֓N:)u*zaڶ#Q}p[{P3j\ձ?3M3LQaKKkW4Z3ö9j~hZ4\M u8fx型R6mC=4hH3jԨvO|YY=^Dq߉'3ᓏ7 |oC‚pgw>w93_e)@y:ydO0vi`ܹ.]J|&){7rH,F*9gS֝gy:di֬Yt駻Co>wC{NOa3GqWДgώ|ʂ#nn_ŭwHOkת GydOƟ9s^B֭[ 7$8p 1":3W9[5p,nKwyg+[[Hƌye%Z+GoheӁ J^xa+(M>LL#nedO>m+Ze6oXm#[Xe+"`X9\d 7߬1< 7 טz@0mE"`H8X%l+_|$! 6Y,@FH;4^H-i4"`t nCnؔl/tDXC9hRһ,jo^| bIoH0Aޒ ZnxF eED\̷͂;ucY,E"`Xz3=J{30V7E"`X,@oB[}{ĭ.E"`X,dBdzZVWE"`X,E}vE"`X,Ʉ@qJ&| -^ϟO L .Ta9?")ᐘw}Wř.--g۶mj*~cB:Y|!QRuȿjSSSCo6͙3Tjܞߡ /-XN;4g6꿋|FMr%*6Q۹d刺 `STT䶎:\F}?&]<iϞ=t=}=Xe9Gm'kkk'FX4iTרv6Ѧ$C-[业ܹ[Xq,Q檫j?OxrСKW^;mq+ E*b 5bd{"_D3s1;-ު8꿋̱aSTl 7_+Wo>:Dn'~Cܙ+D R9Gm'\@IQLl>y=ɞ6>dȐͥlѢE1uRPUUE7oV2`y׻įz$?U`f Ͱ66͆v,VWW6Ioܸ,|G&@sظSL)ׯeff .0Pk/L_W56*6Qۅ…*؈x_Dsv"7ng?w|7&@_Q]aeQteɇ%I@LBvv"IzJ:-[CEVKr1h<G-H^"!$FQ=ஔ.I0yG,b?' cJZ" p@壏>J^x~!"jp[::^8J4q&j;ƍwD[\`@xK@57߹|JuQۉܶ"p*-l}r `{r<'WK1"!U0 l&SX7JuaIG7Q,Q7x>Qd$[DF,wqjcB]z* ʕ+''MYȿ3Fu3\nD.s2w>$J1.XpDW7}(o"( `Ǿ%^lXųLl3uENgA$ &) oo_nHfh޾-a P _~a)[x[ŋ/XEQF)kݠAKY:>U3.s8t/~{[@>]DmODw/K b#⋊b#}[6Q۹2OFLy$h@Ϋ6cF}GO:nSq*kqOg簡V`v&DC>H; a8·<&vHUF2"^c#><uᔌY;E3IfЇڢ.>3ޘ(ˏpMv1b.//O߃\Fsv2F(oAꫯV )R9Gm;{a(g g#!Z--ҜAKDA TlA6% &Qfqʋoj!]̫{}ud.kޱ6 E]YNXHG"}(kniʶwvNyfS JӼaNQ!`KyF1>(ܐ.1Ŗy{mn G9*)͟,,X%E\l͏h[+ ^A%U$hLmɉU,:==`IlBMy~O2͉QaZG)~1>K%iyx2eXK`y GC }N0?}Q'e(bڈvrbp7hr:-Sa~J㽍hڴ]WT2G:do/yk?ZJ^'_|SykhE &نrnEWIF3ێ'^G ! ҩs?~zzJd-~vv]KKڅbF0W7;F)Ң[H`SyfyTQl"*-\.WrcZ "P;h9Eo.eU<:2G(7Rszl/Z:Gu4ܫ,dc7sߞ K:Fⷶ҂2:vc%F0N9\IC%oӶH%>(se9̑rYNMTne~OhrCwǞ-sTnHhżO@בJ@$  Hq@$  H@pTQ$  H@ H@$  T@@ýJRE H@$  h$  H@$P *I%  H@$n$  H@@4+$U$  H@m@$  H@pTQ$  H@ H@$  T@@ýJRE H@$  h$  H@$P *I%  H@$1&IENDB`brms/man/figures/brms.png0000644000176200001440000012477314371144564015134 0ustar liggesusersPNG  IHDR?+``iTXtXML:com.adobe.xmp logos_brms_vector ƤiCCPsRGB IEC61966-2.1(u+DQ?f(/aƨFiI3Y3 *3sJ3eVS-cxr2LqGco\BYjY1jpx[YMOL)P.8ta3O,x)SӮWzi)mZ"8AtGNO,k,$E.QW$&6!&3͚}}~7{]*l* C.2x=Wa. Z|7+˓H B5̻=UW߰ )g4< pHYs1^1^d. IDATxwT7 .;4i&bic($$M)&T{z;*vDǙq3ggg'lKy Qsc7O88ZvYVVOϟ<33E(N2%X,TAu0n8̞="oeuBC I$sMs~loo|M60rH$''#<<8qСC/_|E 7u1oe;v5kpM3Hӧ#)) ...\?T*Enn񂂂֭;58!(2-ZtOXXgaaac=z8y߾~i 066:Yvvg233۹'\%tBۮtjkkrJOd7HHH̙3 ʺo=8!dJ脨D"1tss8::q+++'޺'`Æ 踥XHIIAHH)9r6??+W=8!%tBT*%%%qo _b TWWs)1c ,Z۴(**zڴ%tBT !!!*44p7ޱ Ç})ϟgy]pAQަkpBt%tB8s5w"ljjիsNHs /ӹ...طou܃(D"wvv^}^lڴ ׯG[[Ѓp}ǏSVBn%tBnҥKO8qػvBFFxib?0`mͷ2mWW̬A !7jԨo&M;vqq1Ұw^ޡo^~e</;;*#K͡NMH$_GGGO71{x.6oތ^yFRR"##>sLb-[{pB%tBI""""x5Jo{ŋcԦ룄N ۚn:ujﶣ]n\All,fϞ m`{rrr2A\:! okytt,U5]~=>mM۴^8t ˗/=8!:!Wxވ|͍keL;w^D۱dr-ozajJC vۧcƌ[@~~>RSSq)ޡ՞͛_|* ^QQi%:y[#""ƪ/h>P988 !!fky A ,DbILLcVVV\alذT9rwhejj{>,6/^ڵkSuuԦJD',\{kTTdމWƷ~;lmM+-Z{⎽{5B h5y[櫢ƍTۄ#99#GǏlժU{'D PB'Zkҥ/N4i/_mMX,C=xi9w#ԦhJDۚn4i7EEEHKKþ}x&WPeֆuuu ,Љ֐5&::U5]n6oތ>ɍyyyaɒ%*iZXXغe˖}=8!Ì:x"##㜜5믑qmMԩStRx{s_x޽{N8i%:hIIIsǏ~PP{Ezz:x&Csb[ަ̤WoDPB')...800pԩSy5-++CFFt\Fll,yUihhhx׉&N4D"1"::zmmmX~= mkms} yyy/-_| %t$Sf3L&Î;f466 MH$´iӰdq-ozٳ^kpB8Nŋ;v'ۚ9r8}4d'K/ĽMkkk,++kGyy~)5r!!!HIIAhh(uOQVN(4eʔ75駟rJMԔH$Œ3GGG{{{sٳ׭[W58!C@ /ƍǽS|LLL0|<3022ŋҬ/6DHЉmMEGGO20zxښ`*iZRRұwWSSSWsN PB'5͌zw[Ӟlܸ~!5%4n8$''#00{V?~UVrN PB'nҥ/O4)CmMO,_C-#">>666\cwww#+++ܹsM+.ɰĉx>wҰ~ޡ377ܦU_ko444eggP[[58!WNTN"lwf[[lxzzbɒ%-[{pB(5͈ZȽW_}L477 Mtܔ)StRpwޢSN=z#܃G DRRǏOUmMP\\;4!X9sW^QEVdggVRR2ڴ(BL:u*ښ#''ʵaammW^y>(6555999+RM+:B˘LMM5}_ghBɘ0aاNRFFfNNnD";"""EmMoߎkR[S"8E֤$1klTݻw:s5kp Nt%t2d/5v vc>|((mDb޼yxajj5KdvY^^$i%7:i#G1w[j,_foox<cwٳͷz܃E '111s,--ִghBTjԨQHII1cϯzŊ?sN%t2()Sӓ{[~VBmm-Є H{prr[ަٳg6F(JLL6f̘o6ޱO<4jkJ {9<۴677KOmZɵPB'״pBmQQQۚ644`ժU舘)J...XhnmZ_OMM]=8hDgmmm)!nCJJJڴU;vUVesN4%trYrrdҤI|‵5@EEЄ5Xx񰵵[ަuwii5k!G >>~bHH'NٳHKKÁx&D㥗^“O>6.6H$>>>c^x"֭[[R[SB%K=ٳg/߿?!==cڣ$qyddBGGGaR)KdffghBɓ6+>yիW-J:&))iބ ZgS[SBI__>(^yXZ@VV%%%s333'j :eʔU5MKKCnn.k%d`̙3G%mZsssW'SVF ]I$KOO/bbbf{gښ1qDmZ%_sN%t-%HvvvHvuut۶mXv-h%D"bbbwwwR)sajӪ}(kE=0nܸFͽCFmM Q1CCC9!qqqxKeeewNN[o&$#>~ښFdq}Ç?r'Æ{WLOOOښ\uuuCB@$ŋUҦ577Haau֕p N%t xGXXئc:}IѣCB8066s={NmZevYjӪY(k n+W?@mM ...HLL==vIII}^wVpNT5] ﶦGghB0;v,RRR=v^^^ǟXre+J 99I&˗S[SB4X,ߏisM&|=aޱ M^z%̛7{Fivv555ԦUPBWCwKTTT*ښY۶m}rB=ٳgߟ!d(y[UQQQ V×_~ښCTܦɓRV@ ]MݻZJ.Ü9sT٦9ԦUXe:"==999\B45$ Ν˽MkmmmoNNΚ%ԦU"H,w[K.!33ښBK.ɓ>}t_6Ï05}3""b*ښnݺk׮Ņ x&hHh$%%ÃELݻw^f)uQBF/~pܸq$ //iii8s Є-fhh'x/̸nkkڵ뻲'MQB[#""F޷BFFjkJ2Ui}?:UH"5w[ӎ|GϨ)!`MkC~~˗/{pB ]U^}צN=VZEmM !*!p]wapvб999SV(sxgXXƱcmV ĉxwpqޡ !*xg1|iNu%tNmMEFFNPE[+VraD{c?sϞ=LMM\PBEe}ttS666ۚ~㏩)!DpaaaHIIApp0؇:vioAJJJI}||Lx߱|rTVVM!Ch;;;{zzkVs (ABB¤ѣG=~x5MMME^^Є^x=7,Ȳ ֌9҂w윜,[ ϟByʊk쎎dggY\\(i6Jױp°QFm2eϟGzz:rssy&YYYa*iZWWכ>D _EGGߣ+9BvArr*۴.{p E ]NI...\lJRlݺ֭"KM뙂M+%tŋ7n܇!!!6cS[SB O_VI֬KKK6:FDDeR[SB… CMkݻnll.dBH$#F󐅅|6mDmMoٛL@*AAAHIIرc>vX#G[|w܃1K诽SNۚ~wXjy^$nV{.ʁ'ZJhH;/ 6G g[kp53 }ѢEw}ƽcǐ'N,|v_( Oˆk쌄̘1{={;S{p5 ]􃘘yۚ~'ذa5IN@+s ҿ2@ ~&jknBtOXX1j(>\}'W\ʄ7yTU5_b TUUqis$g['z.UG4 TBX,̙3{{{{zzXڴjUBOHH"ok:w3g --ښU'[bTH ]g h: 'PG~h5M놚6iՊ.HDEEE>PqY۷o}ޏ8^e@CPPt_`K"KFvYl:8T4l3B0rwwŋqs-oӚ>HD";::~ImM?stIg !jvDvH0LXb\'ګlJ:50qD$''Ϗ{,߿ĉsW^{a }ɒ%M8qu@@9։ Xou~/:ٌZ%0ѯnw^>@ohmٳg#66\cwvv"++9 \%'O;vII ӱ{nޡup3';끂w/KR`>N+ -OG ND"?6}999kkJYILMMnmmŻヒoښr`Dno:85ـ[f& pUBzҥK1e Z8{p>K$[I...7{kR)lقuŋ`fVVV-[cƌu$H~~~S[jz3g:99q-SYY7xWFc#ϓ@0U 8 81;ɮ bh IDATp&6,QwBT*ѣG}vXXX (([VOOOQFeOJ[ȳWnGG}]s%D d)`L}][Z-MzۀssۏWA+Ӫ!D}ttt ++ v킏\]]Ŷ 266^yf߾}gj_{N>LJq߹s'{n Mi*p>.8bXfǽ"Y^Fx蠊蔆|(**ѣaaa-ѣ y緃hrpO-g{O>ʊx=ŋo&**"c / uO Q X`E;td !ڣ7oFww7BCC猵X,op@@yyy,z>̚5k7Dzz:x&W /۫Vy6c$c{۱|?_l*ePCáCwbF9qgwnn4kk'\§~?ÝIt0q5=%|ZtxF m8`Tb~`ppc#1c %%E%mZ9R?oʕp~:COII6m&MgllįTfg7WV#Mն?YBQQQP,ZbtPPн?8p[zBBBDhhۚ -- ܀mLvYg=p+j1>K ݭ ^駟ަIiuuKlӪz\\Ӝ9s~pww… HKK?TUdhjݩKN{?l+gS6-}ف=U_#h߿?#sCdbb" kkkӳiy܂D"wrrZ"ﶦϱ~zjk*>v7o+.$𙫬-/Yg&aGQ?ǏGJJ >p@ى'ZjϸgK.+""b)CoYYYHHH?n 8lƫ( (]1DzQ *++FzV^BUUU[066*((腑#GN>//˺&?vg̘񬝝rxױ~z477 MnHpr2t}r9Y#nm؍^M[B&p)l۶ 5j6LVrBH$֏>3gLVX׿PZZ34SW *H8P-ܸC[d"1{3ګٖ5p!\KWWك_\'ZXX,ػwﱡijddd37oƻKmMՄHu{Gse x_op paF]Kӓk\L{=u#k֬HC'%%=:eʔܘ͹8pعs'5U#VAWw=Jj2/gM\JCm6K' E֖mZ%nMMN2%>BEE222_A&Sntl|\*Y_pt5T;Yg8ேylmmٳgskӪPUUՓ񟙙;gH$777:v|شi\WG"ykpy&{>,lo@SN^#GDrr2ù>~xÇ_|.7}GrM;v@bb"5Uc6Ckj'W{5SDGfh>p{%hF|w8w*ڴ۴׼s݄8W^}w̴z_=??ښj:x0ed}@_@@W2)Q~"ۑ!(..͛ե6 B~БD9o޼f͚Ļimm-|M,[ښjc{ 0p͙F`nX=z;Yo<aoUҦ5 iWO6%я 'P[S !wSƎL T~NQU .7dzNUHIIAHHGjʕ%KN46mM$$$௿OP>mhҢvuX@,_3v:t'KEرc1fUi}*((h<ùV/((ҥKgQ d?oeUweK bbBnL&Caa!l izzzv{@SSÇy'@lCnz;#d%Tͅ|t-6,:!ގ5k56ۋO>3g!j4NǸL@|4lH*wz~EsõߟBnEee%ϣKL. }׮]x衇b Igfu5V 3}8Y0ծ'0w\% (++`u ρv[5PEWhaC R[l̙3q!ZRBoii;#Gy{'&3R[W^d6}_V"1:BT˖-ٳs f—_~IZ˝m(.ǖIʟ Gְj:T4vaC=%%%Xp!֯_Sw zk!'`=h+*zZpJ,cx> _!J} l;\T4[8E!%tss6`d|}Zvn,pZ 7.BJgUg QFUNZOBN y'ӁN$&\p"롄lЯPhlHAT S>ss/FE!E ]DYM8 銞V؛_zc{ (Q1BPBa׺_]#|﯋?{ <l!\%t%6x(ߩ[N_pcEB %te8F s/x!Z( =cu:`|T t\tUw#PqCp"( so6ü܄E4x0~7+р(AF!QB1ba \okV*ܸ][u+Ke !1<d*We;iv~#n.{BPB9n2Rh-fРָE/ܸ!SDh#l YPT|f`X7.B(ɀKg%_- G#E>s8Of<@ ]G ؾB{%PCAod z&+2(ۮEʀ IcE_6 iN%tgx vQ*Ɩ!d8QBAs$L ek;z's "JZNp0sW>n:TU*Z^}ǟB %t-gʼ^V%.r-O,]lm ::BDlvYo MG-P;դ|f%o*ذ!:3F (uT P\8ecI!QBbW_xRhJ{m1p% 'JZJ$|zpy6I()gx=Y 7&B<U \<&xYG5pWv!DPBB"=VԨyO+pzpcv6b= "} p!S'@ ] Ov͊FWճtiN>еHz0|Ee^Us_F;(kcGc֍#TU:WG8jеH Xx)u Kg\*g c!%t-b`+5vƥ3`krpj؀I%!: &*,MItT5,i|! %t-o8Eg]M@N@-ܸtQ@YMYž!7JZĉ쯹KK%NRNSP6BPB.μh\EVχ%(k2Z Y"KE@L݄!DQBAHs!ɤU`%pc"h7JN 583?Mǁ}UMݴ["1`= p,!څM(./5\C_[9i+S>3qo !DPB@F]AG P{DMȀ<8(6`-!D{PB@K T,Ԉnfz۔,Xw=#E55a1w(D=՚ Q3fU hٝ%t c*ov^#p`C"Y|1XB8a|5mN횢tpf߳;! 1AL\g1`v^4jDfuTEWҝ"paC5Ȉ}@kWhTxH%#DC>p h@&Ȑ]**n:XB%t ،R hNGvHNxh`\B55z`h|vxh4)3El5u00UX5SŽ ]{ k" ,(:NѡLYVAq[#jw- FS4;/A!73q\MϚ XٚO]ljq~(!\:Jbv{&ګᤷ yW0#}EP՘z0P>kʶ 7&W^A0vnLD ]Y.>; V*x@ʶ`3$A ]M􀀗CK峎z}DT[VlFAl>Vh!d()so_OEOWpl@:DG W>λ/%_ 7&Z%_ SWg"=bzY>7Ok// .(=d@].Ld4+&lI]l@eBؿ#@,fg(~՗?W$.)w+7V:kYَZY%vZ(p0( jn\Āؘ%n=#y6LGp~Nր#g00pϩs^ @%~ErzIV(#;VHƠr@A@#ܸȍ3oS  _j$`~xQ蛲;^W{s,ݣ`}I}B𣄮NDD&9I(Q"Kমa+d桬?,dP8`*$$P5z;3y2(sv]in388L,q Uqx7>gLkp"}@8.{(-g&n`؍eoXo.`w {|VQ=Jjp26-tHyhĚ8MaCvy/ $z^-|I8Ukz:دn=b^8@g~olVL]SX{3yW$|ycP^Ϙ] br|!6.WГf%t5!c3=Qg @)ܸt%1;t;>plp]u5-gK@G%K}wNcl9͊{[olh `XGVr뛱&( @G|[ia/h!PBWz&SQ VEXh#^`I-Zϳi1@W2R):n>o.on6Ć'`v؄6!Tbm7|do@OmlAڅ_7BQBWȐ##bgF?8N o2n<d#C)+u>lp%vg[VC1[e+.1l6gA-ϛ~[{-P8h<(?LGJj"Ձnb32"}n*#&(xop!ͦk%sA[#.6K[(rkq@Q=6cwciB:/+e?eCBp8NC&lI[l0=aT T=پli'pjs$_Dq^^l`_Oso99Dv׉8/}W@QS[u&=JB)uPsYclRl P#P|RY U .JL؟Ez@H2j?gܚO瀢zlUbq`e;ρ!DQBm0eؒ&:+0bvo*/rٸBj=+.]fOaĽHVU>x+P;~-V>^vXZDd/  mjb!QBx Vg˺C=PDzlahf7J>8P=[No9'].lAi/z?^n< P(5GՍ>P8}S1h-b_oؕ?BC ]@TVD1c`|lF{&a"j`Ke;ufVMmزumph+ttC#zvȑ0q{AwewԮ1kg/~} 0Q-y( D$fKVtzZlf#bWuLy+^)&NkQ'K D=PB#26ٌg=`.PdERŏ@@k! Ӭ+!a?;g7=3f%h˿c @&e/h{Zء3@&1mK9Nի-"2cen,fş JBJ⤵ռn)vhL "XR#VvRM IDATm@(4xI `3UnQWu@k P3{~?KڊSKZD^+bHmPQ1J3gYB_'p+{~=֣'EKuկ? d+Xu\Rv<bK`_4dL t7Kْ⍀S otX֖lq2{Aw~ ; N%t;=wϫwMP0D2+1uyG=P#P3V=כͧ ]ψm71`NpG۱Wlȗ⭭vxVy7@[!d( s6A  ݸ.鱻֞Y=s "Zd}@k1;^rV}+=-lB&TBլ[]֥EWD3d_' v| :,6}Z:("v~6-ozuiK߱Aʓ.4mB_W>sd 7.A}l `? gdǾfC8P=;g,Q> /r '{؞+e}@@n隢3@ p ^q,_|8T"ذJO3;vTҩ[=n,KN1ҵ5 `4dKκN&ʶxDƎ aǦnۦqƚX< F>}3-B-fz{%\wv긽b؇Q:YT~zP*2v nz'n`D0 ʾ~amQWUL؍Q>~ڵu]Eu}rEb0n.=j>|Yd_[`7 "c]J^vnn}b1[U d?;콗9!E ]YB{;Mus` 8EC[Wz: Em;|p@}mMgÊ4s/*#fNvV~ΉCߧ!N@r8kDb!Dei_*C6+w׾#AK\y+_)%!a7 ]qڶY8  v}DbvXz`κUYJ*$~O Y ܘxpηs zQtVfоaQ;SqxV렽&ѭ05p^2o^ylx+Ik=˪ =|6b&p< h+lXZAκ4>Om b6naim/,Ũ6h>)xQ|aµ;ْo:]R3Rb~ 8*츴EG5pz5;< o= %:~f봭D%t鱥TzZYQ b` mx?_>"vǬ8Q#@`=g~g?ԍEAÚ5峽uYE9#;\> gW}OX: O%t0qf_7h:–4m_SlW8A 6Qm^@a+pf›ݭ.$츴MopcO`5_eOf :Q9J蜉(F+ziC+:m}uC; (NB\8ɮS)x?"(p<{;VF,`?\dOʶ:Q%JZ2[ϳ=5M"+~N֝Bl`[)شU_6 87qx}(X 4a]Q߿ 4L.e}D!AG68">OW\eRVn7?8H\zm ٰ.lbum o)>iŀu=#ֶuʇϓr(sg8FglCq ؀}pձO͒wA&Y֬'Y3E2!0+,@svGG܁ ++JS!PBQ~UM|\Ȋ|3}3f,A3?W`I2a^`˯Yft?{wU{f}MPq MQ6+[ls̲qC_ₚTZ[ieʢ o3e<0s}g)`9y켹DX@\ygY\^|Z!:]<}J[p*#Pzh-g%5@=cXn]-v(Pk+IcD5$@щ\պW Y#K6 ЯO@E+K8h[|6f[<ވ2NLnXjmR џFԫ"8=8]ojw:J]m=.2|^ch~SӞp潀ۀQ;YS^tUpuǛ\ 4(LN ܑp'㧟iJޛktj1 ~JDM pn;+jX c]]43+* ˀďZKf-0b0;!ʠӀǪr\55M^F˕m!:C=###]iw6'-VSp}{؏mm+,=GgKs}5+:==ضuQ@ |PzSؑ7n$efCZ`ɩ\_Bcn[=!PB Fl=V_݋̜ \'9Z]w\W9'>/ilJDiP \8ya []3wOrN zv'5\UD \%tsֻkJjo1t\R2PxcBcާm"X]@\QS0z-;JeWRݽ@eܗ#zn75J3ɧnmvSOfQ%=u@{it|F8pgVlZ?.pk.Wfӽr}Vr;l0BOc9Xn_x G\F\'^>B 6)noQO}C u-[{k SqTq M P}wmW#VzM]9Bo'AKhf Qx.̜[K3~Jn CPn,wv8p -% >ʒ>&;ݍl[hIJ^C|;ݨٮʳB۳}{DZƷ[ܓn{N{UlL`;wƦEn77@UG"Sa,͜h>%v8o^Ptyi>}VݞpC W9%7>!i;ѶK5oRYhĕ0q KF~/Sg>}!kp@qm~!Szȉx#cXj=+xո'2Kj;Wme/( ;Docʀg+13gne e;.0; p~"KH*$,=gˍ._h x5po^W?;3Ql=P_Fs M25阺Ro@MʣͻU4VZV#'f=}-6qunw.{2H>@Вy\\p[/$#lsoG4=;Q*m PSViZˬoo"u*:k(9vW`tpݾXKVn.Wf>#Q}9 N,vfNs=H'@UW#EGÆր .;vKI]wPBoL*Q}WI@gݞ7UWm\J 6&%W󆊃BcaK/@-Mώ<,@:XSjsNʟn3>cyg# ex x׬թkA*p=C}øN_twrISШ\-rpD5zŏv5tV#[U$ %tִ u!pχXCZگ@B~+XCloؾy;/ýTY9X\i 7M<&J2q7/Km(Z\ F5s.`7ySi-P|Е(!(doX/Y.N\;VfIXlݞh?EM;V|Y"3WpW-W_77pєm͏9s҉nNٺVN[p|cQHۉ._ y! /\p%Tq,BMZR*kR[d8-ݖw 8 9٢J > 庶ъބWC ^c؏|6}\jK@ wN)MbaD\`kO΋ _nfn}kѲ70r6C@ שUG_Kkf]S1v͇Y)w*s'u {U(:ݢ KM erkb} ԕru?AKӹyc{n(]/Z6n#oI]hzb-kQ0@KGA,ks1CROw9J.ysyyD@/m=,\Ogcn` t=z,;!pX J+*[_\8v5pzJ3s!ڡ28=HQF֞U(Nf@S僛@`x8W>uV13kR7srpbK56 X"H\nQ${~=^ &tFtl `lЪŋX?O4ԾdNtP]Y v#ᚙ/lm>H>[>b( z@9*޳nO6԰nre^W \[ *Ҩ+mfUΣ ́knND;RY+uCk`JjnzfpKDMv{pVWWe D\Fki᮹i-r n}ՀoZ2ϥ@ -ZeB7\bD+>R2':ȍVYs b g @G7._ X=4꧗{(~H Ľ i݁]JcJw=fhwoZ m'WX*}Fow p{K񆍪կo[@GI!.ز=Ұq8jȆJD8`]3SzлMh"w]DIj?/9s҅&>u@Y ,d<>JFv@eCY. @ɜgk/;"wȏGP+e0 ?*L)Z{\_\X3lz5B+Rw4z`ﭙ&/SMZsLUAzMp-V< ps#Lkvh?G_3BDI8zcV@M!hTD:WgzEg{ ` rShzVDIeOA+Nj.rUYJӫppg& sBӳ"Dp >Ik~3VHLm IDAT(BWK+Km?) ܈*2[w"D.kG}m5턯t&tBHt@%,*QB'r*n^r'Bt%tB!DPB'Bt%tB!DPB'Bt%tB!Dб5BQsF;SJN3":!A8P)pgDt -B!::!(B!::!(BZhɖ5= fu)h%J脐v{v XMOCFne8Q: !J3"w t#ĺ.#4=-F|g]7&Lmۆ 5̈EhK}W٠ !8OG^ V3$L`.ŀf90`QΟv>amyЉ^#OHdHp\Rk 50;6)VCeV?YiZr'($'bh#9g*"X" C!jNQ[7eC4KY j'?_3`׻iYiJBƻ6Wԍg)a⻢CrigiJOF,-*jGV⛂XIZh=AdiJ脐VE%*L؎"IgF,fEt4vS~e !0B$ D8 oo5όγ HT+/VNiFl냮,H-5ΈX &FA`#nalJ脐;bM)r2A=LBZM%wRhx ZzN1"<BFBOlD+QseCLagY !vf,s⁤Jͳ"DzV~d`}`+4S4:!]&1)0N3"D9ET4:!w}6BSc5l=I3"Dy;$"F]A_=(8C ] UUHt#co+::NW=+Ũ;!UY>i~J3lllc񰰰 P[[*TVV~5Za>eYuuur/ D"B!jkkQ]]Rlذ?U񯠔׭Ec q2M3jm4 dI/|_Eɣ 7@@@`jjZXt)***~?' PSStZ I7Ǐ/ D`RuuuF~~>q)9r%%%j{N}+20U]#,.UgyfC ] -(&M5 &&o4qy,[ gΜQT;8;ހuuu044l1ob ?w}իWcXd y{{3sl9.*i<3^(w<2sʂuaɒ%WWW8p^^^ꊡCbҤI/nݺVٶmMUz\x3f늻z"333`䓇tXcv"3|`#HgG ] 8qc%pA2-Ə{* akk:?Gܫs:gӜ /___v&M7.\qƵu/N8=*B3mFq6E ,ȑ#?s5ѣ&aטN/˭ W/vMg􈥥%-[֡׾ 矎o[bE_wcǎYm-Wiq7"QBo0*ݻ$.XjJ.^,Fxp V%^~iX[[w赯*,,:ݺu뵕@ @\\^]rA`n.w b}&Nh"0a\zU%;WC4e%8A3P(|3 >} ;;iw}t²,N8?())P(!lmmxyyC3o|\+3@x15J(?x=z<@n0eS׿ ŋU7ITYYm۶!)) ׯ>)ٳg#""Bų^#Lo4{ B+&H$,]7oƘ1ck>|CU$Jٱc ܼyS`3<'|]%&tgh;Jt5 BpppvN8$ ~7,]{yf9rÆ k3'ܐj6#] +ϴ,P_p1޽[3ϟ!8q"ݻ𴊮_srrR1sv/**B\\~zFb"4Rj 8#գ{ooVɼQXX'!ʐZhk V[[ŋcԩ͒y"kJ4h4,FxLjMےZmd|Y~̖{fE۬8QӚmi8W[yr!jzPBo7n`ܸq L-[_){;vlÀA7 ^⁡(q;Ne1c lܸQRSSqYbkBeh+w鵎2;7Ų,BBBZ=\CJ _gii$;EU[B +p:W$K.\!CP__?scw(5Vu {W}bw{h>{Utrrz)uZΆ]׫<"8rHǕ̕2e!* ²T[jO1mw z*1`0 xb@^ CMcKd=^[h(X{?q|^nҷRuv)(VF-v(s jq7g;Zkkz.Q}`)H\!G}F]e `ժUG}5g%P|ERgD ]UQ*TKٕ*8T*~ptY{uw3`0ʴ'{~5&Uxj&z<_r-ћ s`YӧONJ+<ϑo'+:g!0Y8TJVS7!V;;؏ޞbo9k;&rvԲ:])CtT*Ehh(F[%cȐ!ƨQT_Y"֞Xa"H餮h iJ%k9OqEi(^uŊ샰PLL1n߃$) ;:x Uu"… xOO?Ř1cڼؽ{7 ⶇ}?Mհ׷ (®N.+*QxJ7B %kf3$ïd$7/KƷ:yUp覄"wzfʔ)Xb++ǎ₭[bɏ#o`Q=ΆU쩧R HYnU};+pNUu +0vuӭP("Gt {{{;vhWFY:r/HanM:Uݽ{Wxf] }(sLv]$DbiO:vJnGy,ޏB:ie/ꨂsaYuuuHOO̙3! `ܹ>fnnѣG#>>^E3R`$wC/ խ tBJw.U8.rT7ited@L+J?qwᝌQcI"fPQQ+W@, AAAjRp5ai/c,ˢRN =ZW!s^|]&p2eܯ Hi'H_^d6)77WƁp *:MՁ;v:Jr ])+[[[(Y%U^T\B^}9%sZ[(w̘1h3W5@ZZz-ƀ}+YQBWXkQL3"^[+ n)v7TȂh5H(3<Ӯ_ gCHsݐ;>άww tgϪx&7*P"R5F=诖gSNŁʷ/;oՅJM8wEZSk֬r"4qKEK+jPBW„ `oo9^^^?k.)_mk*L"IUYr !XVl'`ذa%+:۷q riWg.w%aҥHIIAZZ QTTzt pOBT%EJȩ ۫[ZVns1~x|(((@RRҐT`VVVիF^zIv{uy:u$@xx?gϞ{ァttBTJ6>s(s^dކ^uSY57D>H%nܱ,ܫLmyU%w {.|}})P:W |8~v)⃹ov+a$5F(YJJ ]iHQ'|;_؉阄L6MwX eղDTԈmڴ  X+ABbbJyfCtɯ |9-M? m2'_32kwSH-ۜȫGmb?#jk_'}Ž^' |}}1a3]GaY'ODdd$~w_(n 6bU7-<+§*HR{KWNN֭[M6i*\OQt28^__sܹsH$!C0vX9ppp@׮]akk+ƕ+WpI9rj76R4:!4e޿Ȩ_w{aY"ibbZX #J脐H1!HX;w /j!9g,*|5H(BYʽ2ThkVC_t]} !|+w561B^d.3娔R{#k$sg (Xh_\d]'D,FH 4_w ]A!]F "S-+oP,CYqYvHsee78+B "Wjm!^OA&9 ޶FtFGH~o b(^vD ѲxN0X e'q|Ν;1l0B!p|8ﱯ\R8g͚5+z^Ewϙgff"""'NB:,\SLz캸}O}UV!==ЄBH3B*,Xkkk^cWWW#::djjkQQQ=X,988D͵`-z/o>}0ރAc QCCu;˗W]pBQu[ӓ'ON˶hʕ|{փ"22|&T4..ҭ[^ݸqc Ik:iѣO-,,x[UU{رڴBׯBBB0l0c_|ŋW^}U Q@@@cǎ^VV"""pqN!: .ɓU>nV>ieBo8e{#,, o;4! 100iִ3==(+U ڴ~iT}aÆ ԦB:1a˗Ņ,ԩSn޼9#mMEz6&MM{njJ!1zhc߸qEDD=8:MBop¡{ƌӛiiiǩSM!gVVVx7Uִ>&&fC~~~6'W%F˖-O>|ǎŪUp]CByLBSNł `eek솶ښ\:mB]v7n6aֲ2>CB ooowoiqٻW^}ﶦҩz#XomZKJJ~z߿ڴB8;;#00P%mMܹSq̙oyF:-^x̠Av1™طnBXXM!DU5-,,3OmMEzF WE#G`;4!/T4::LZZڔ 6LmLצ;??]tucmm6|&7tPibbb˗]6 … {&??k֬Ç:!<&GGG,Y?<ӫO:Qhhh'FK,yfС?yyyu;+W+WBtö|ol~;++kښ$F+W`ڵԦB0 }YёIoߞq4^k)Kצy߫hӺyfOԦB0`{˗/\x]mik.z-X_~Ǝ;ﲁوcǨM+!4ŢE0i$ۚfee}YTTYg)'Nڴ~?p@c'$$ ,, n;4!t6mΝ'O<1]ۚ % mZLmZߏ 6Єql2U5yWׯ_%beϞ=w=ojjrDEEa׮]34!hwww,_\ۚ %t9-Z4x{njww"<<qqq|&¼yo477>66v]~~~>'Wz-[6sȑ{ָ8Z iizqB!^{5̟?UUU>zgkk.Е ƍ7ޞ6;wDTTi%tZFBpp0y}̙k׮y:z;4iϏڴB₠ y(wŋG 8pȑ#ݲ  6fff3g΄H$5vC[999 tPB !!! M}Q^YYY|&x[[[^c"&&ݻw_>zcjh7KmZGlٲڴB4nС x}lk.yp}}}Gݦk׮şI !j%K=vZZZ3g>\PBYCmի ˗M!c٘={Jښ"""pQjJ0;;;,Z*ښ~W_Jj8iذa[ DmZ !ZӧO{[r6::`FF }nk.լMgcǎ ֭Yž}M+!M AAApvv5vC[ mMoE ]Cb//PVB:˗/ǨQx}qDDnރ(kXC=G}t#66׸믿{[Ӝص!T3(k#Gد_?Kc:u VBjj*ߡ !H$zҒ߷GޠE ]bZ??hӺk.DEEЄ-6zh͍gΜIv둑'F ] b;77=~~~Ƽ.)) o>*#KSe[䊳g. {paеX@@w{{{ M 333̝;3fPI[sssR[SC ?jԨpUi=vV^LCBH W^A@@ll-uQ[[SR[SE "''oǍ mZ'lٲ|&BBBпc'$$d]|yںubxNxE Ypaw77ƍ6֭A tRL0iii+CCC%Njɒ%O 2dǰaxoz5ҥK|&g;Cғ'O='**D(wr+V3fg*iӺn:-3B0xtRtڕ}:mM $111.:yh5*wf|>~8"""M+;&M¢ET4.==}*5%I3bX5nܸٶnO?͛7SV ׏ϟϼr[֭=8(.\ԫW~~~*iCQVg}iiiU+BCC#yN:5JD%Kwuuw;< k׮6b/,Y^c75MsΔ 658)Љbqݷaee{~۷oGuu5 Q)OOO`Ȑ!NJJKJJf͚yNt%tn ,pӧ>___jJ+޽{N΢N:,00%//M 7!.FFF>}Jښѿݻwo5%E <X,geyozlذ|&ۚGƖJ<%226ޠNx!]]]{̌6~-~jJ49r$ﱯ]V0w{yN %t«E ׯ>~hӺj*uX`N{[E|HmM ( zkĈowxQV2oނ0HU,SpjB E tZw#C} CHu^]-#VWzvvLB9:HZR54e/vz! q7~'MիWӎ;Ϗ#kzʕ+)ȚK8GUȴ2Q<|9B۷o .!!j5D"ȚpЁwMDU bɋ/tTRRBGEKhh(jd̷/^x1M`>N7o޼E޶l騡4x8___ھ};]l.߉Ɂo8tjzۢE |dZϞ=Kݍ7doRv,K˗W3 80.r|32lh(22s[[[S[|gørdZ) LFOi"" Kִ^}}F1x8T*"**X,fimoo'VL}tӦM4e۷nzh2*zzz!k \ʻロ!H> ci=y$Roo/ipQ|M^###TSSsf@\: ֙VNtAdZ=IPtt4mgZ7 3:TQDD1T'%@Okd=)==/_|^[[<'tpy,Xp0:::j%VKɼiݺum6b;wL\vm=p-p'6mRtWHHo5Quu57|rˋ^uʢێiۥKV k  qJebbr>2eeeta[|YDDDPNN1nkkܼ8qࠃ[rdZ?IHH%Lkww7z2|EPjj*XHMMMVp'8ֲVҹsޮ={fc= hҤIB;v ___cf#k:t p92:LLJKִ VCpc82G ůXyhhJJJ V' lwzq'AȴfmhϞ=T__zC^`}ƍ555}}}i<:x,Z5>>0<uuu%''R$_d6-6mip92rV֙Q2dZŔWִu paBP*aaarbֵA2T]]L3 IIII̷;;;544Z>.&tyttaXbD_VLJ6oL7nd5yCqOO;ȚDFQI$\H>>Lϴ=zFGGn&(;;R)NSSS^8AxFiii Ν{T"vgg't:̴qf^fƍ*|NtpZ7<ИlڵkJKK3PyIDATA'wcBB{3DDd4i򢤤$̤3f0+5p.tqqϬY*Wv;}5h4GQkk`ssfP|/tT*DFFKHH`i~:X,db믿tudffe gL&?^YSbj[[j6pܐ#\.z{{3F=tiTT;:88b^ZZ:yp<@NNR"hgϞԙ֑X, +5p_8‘iBn|/tsYqT*+$΃Ԧ5lRфem,lAݝi&3i)>A['!j-P(G 3k~s ,[%,-:t} }-+*&¿ gPG݅ج8"eŲ]A b ;l õWϙ2_E,(ۈ#Zsێ<5)"E6N#ӽEkۃO0}*rUt.iei #]r >cU{t7+ԙg߃xuWB_-%=^ t0uvW9 %/VBW'_tMۓP\>@y0`D i|[` hh)Tj0B#ЪhU# ~yhu fp#1I/I"0! 'Sdd:J5ǖ"sdy#R7wAgdJ7kʕn^:}nWFVst$gj-tԝr_װ_7Z ~V54V }o[G=Nd>-UlaY5V}xg[?k&>srq߀].r_r_qsGjy4k iQܟBZ-<(d=dKO a/zv7]ǰod}sn?TF'|3Nn#I?"mzv~K=گsl<b|_|4>?pߋQrib 2* (Ѧh{28oIyes8';Z9h6g>xRx'b8ՃWOϫ[xn%|^z}%x c8eXIfMM*iXw@IDATx %eyggY}!mE0\"Ch\> Q&r׏nD"jȾ08,={>Ot}SuZ~t:oU﷎鷟z*W7aC@@P Twt@@<w  D@=7."  @w@@G&E@@   $  ;@@" @D@@ p;  @#p"  E|3o|E8@@@ @[؛@MM^Z֯_!C@@3h  @e+U@@%@  TF2\@@_h  @e+U@@%@  TF2\@@_h  @e+U@@%@  TF2\@@_h  @e+U@@%@  TF2\@@_h  @e+U@@%@  TF2\@@_h  @e+U@@%@  nߐ pES@@ /?~h !ഷъ& pEk@@`\}64@_1# qh p{zDs}~ @@@ S4LG8@HP*Rr^@H3e4{Y $B]LPdo@@ nWTLGǰ*-@^;@@ 6. /^q a+ @")vtDt:ѸO@" I"{o]G@ hnѕ 2@۩d % p/,E@d `jw%F]~_d5sGn*O>fmc;m| T l~˷ ۷7,۶mO?]瞂e\  @RYTUL ͞=[֬Y#> t˷9@KtX [|JK0=sd`}vH.n:ɷy %@~X:|V,H|ߗ2k֬=zԛm_jUg .+Vȫ_qfٷo_@@x bjoHF^Ա^KYPm۴i2>2e^mq+wߝт 2@#@Lru%G\y{sZuuXlҤIm&N|eDwX/ϙ3'c?;@@ nOt(#@$x@>dT78ݶKH?Kܹs  dQV\ -[W_G/!lm&f-;03P-_\ϟW@"വe./݂?_f͇3gKYT7mLͷ]9x dp ܳ>lҗ1/~Ln&ر+rJ<˸(;@@v:G%TR}VE楗^KO 7v'  MC47'CUroЌOQ(r}U7W_-W]uUv񦦦FV^-ׯт  GAIݓuh}2Y?c'A"7X=:4Ӟ|e;vh" ,mh:h,,@ 9Afsu|:  `K?U{и  %=V C\@b'S=v75"p {  RdBzc-[F %`rYY؉ !մ]Ά@ˈͥ@@ ^8 {dnE@&;~'~3Z@(@ۋ9 5  .eT@\@+ Gq@i2a"| @@<E p/*''C@ ;3@-DLJbC[@@@PK$*4@ @l Ipے8l pC[F Epq/(#@G  \^J Cp _Gq!{0C@`jQ9Iq( $H=Aea p]O  .]xSuK$uƌ Nu ({E9 N M&v75"pM  P^ /hMaC\  Kec=+}> [@^r}.@:|iD{GG@@<~gۭWWI{n8E@~WL7*y D#@H&.Ys%@Stt=:" @:}]|qاO0# @2> /`j2%{. Zmk?^M>}`@#ඵ[; p/,E@H qoH·_=" @vWuq Y+3!M&߉0= w>  @;&!pMagcxS &7Yg $G#0D4@@D h+ID}C*6Xsa@!XK[Y2c2{`:D@8 M1э1^\@"$TLnW ܣ{9 @{=e Kp8e6l*C@D ={=h\=G" @ζY15f_=7! @ywk:o/MJ|: p[o'}J+@^Z_Ύ vq'm  p/@@ A\ 5CQTJR\@B%7O{/&Tå3 pM  P|3pƱS@H@*%nvj2cѲ(Ea$  eU)3Q;{4F@" 8-;6[Oc gSa $Jmi5w|Bc  p/  @5>j| pn@HoT)ߞ/GFIBw@@~{2;_[F y8>5Mơd4){E9 DF_@q܋ɩ@@ bVVOHb4/{$9 DNo~{JGQN2@-෢)};s@q܋Y@@ bnw}}z]P*@^TNN Q;n:F~2(@Oo~{Sc S# @|V9Vx Um*S{Ų gwa/ P+gcmK(@^B\N !hWMzO{'3t@&45SU% 2qKg˙@@ d Z}f0dc; pOg $Jט//V@B"ේu!yt p狀 >*TIw#*$pʝ  \ [#PRrr@ce AS?QKE2K  @eWqtt# @f_cUch@K-@@noݾp2h\8s@@4[\v,t @"/47=z# @ܶ6q}$jD*p?~[κU\u{p#tuuy4iRƸ-ƶeDwuYrwf|\ϟ  P i2GR +sǸj> /^,cu]'0?cr˜9syfmK.ͻ]9dӷn-}@@ɿJTi@E"nR6m 0Yn 3 xwwwg@[|y2N@Nع]@O9o}hPx D6ppyɌ3Y3gz"rm||ۥ   `e 16zlH6mjtMrAg/v=@rJoF7   w?* ^|;rȑa" m^s5bf}vVmƎ뮻pm7|^  @4r\ІCzѮrUn|~W~vy[BKG4\}L$sƍꫯ53omm7tޱիWjg  PbE_{Uv:lt}p͕Ó:DcrD"p{cc̚5+#X|俽ʹ|ug= @(@jquԺc 4C R J>1. mj3n@*.6oJ2şl@$rIϨ@@nO< c絠=E~{\{oG@ Ot;)@'@@ > w;:~= p=F@b t@_1E p/:)'D@r M:2 ..qB x@@4 Gݏm@@ |Ru >g7pz4qƋ @6w(里! pM   \ Pdsd+Fυ@@`MwqjBYt@N)(&ی%@Ao@@`<>q;d| ǹ1 _ Hvaɔf]G^I& q{8\wu;/w^9i662@%dFM~{ſݨ}NUWW{g_Ա=v! qMp}CZJ _gK/4cqǥ!{\$@@ &=oVc>Oʴi|J>/?яJOU[n>۱c<3/26@"($MIN@Fn?W2so 'n3V@B.Cd|ŋf3@@T}ɔ6B筩;| @zM &-&00@@ ZD~a @%t{<"Z{5I2Iی@ 8ǃɄt@t+Cק+㎵ ƭ4dNvLv(#p] \m_MY'[Yt)_ wuۛ۶m~=PYGyD6o,7W^$ cy[ p4nX'‒ X|FwȡC봶z~SGWUۆu]O~R:ҨleƍwK_<|\\_T#T;iV^-[sJ@b-'}烎tHxØR#}c<`ppPv%+W'yTMv= D] uL:A{I?aYjUZ>k`(9 p;LOä6CODH=B7" 7 -# z @ycL#N{Bn4D@ lnkcZ&sѰ?E H8e~s2<@W9v,ߦt0H@Q{Wܛ|䥗^݈O@@ o?V`2%E 3ΚeGKKرC~, @$Xn{Z]Jׄ nX 444e]?v-d+ck[݊ @- `E#Y5*߇r_e˖-W;000X*g}_XM @b.ꄞݬfʰ!X}+V>yG3 i._\,Xsv" @rcgMg?ۓ%aY ]w?5kȜ9s^  {56&$ԁ2d M7ݔ^GG̘1#4D@8k A[x @ j~'d a̸<(eb[wwN5O!)JOʍ7(/ p|tx(5:7T(=x;|gևTm7MI@@ cG}wVI=HvnOrڵkTG?*wlÇ  ?;PCP=~_FH(5\# .l! @@ VNvG5X}LaE*\,\O~z  -Sѥ.=8ѺEq?s;O}(k? Z $Ei8V]d>E Ν;=}kcrكl  h4ZqgC %Un>cMMdWa$^  m841wެkZf[ZZT\O[[N pꏋ`c&!Ћ" mz"-sǎӇ_{?rHG%{_'&6K?쳇0^{>G@h 8G2{, ܧM7SN>\sD'?5P͛eO\2^@'v}zawnP|kN81`XɵYN[n%W3# @ζjPI&w?UVsҤJG-d[d===ï_ ŶsMd nٟjkkw!k9i ] &-Tjo5=]L[sOr?uhѢ\WE 7o~򶷽-Cgo?SK<N p%nz@+e}Jͺ[z|˵L {׻ޕQ%p@:ɉ@@ Adb(۷o:rGKZlT!hzOى @b&q1,a>Oe_^l!͂vK93˚^1k׮~C\p\ryl`[[e+f NV^-fLm`v" @~^}4?d-bͷ^e뮻nU-޵sLj,JT{@>/9j`~c\/PˌFx饗~jjAeC_nz!Yj|#joQxc7bT! 4Wׯ RzБƘ\s,-Oj Xa0ʉ_~Y{t~c-ʌ }{+^7y3gqc׿.MMM>| -77w]>杳foFo߲e˼ ޞk}Q@@ Ͷڛ n3f&m/r/}|,[Oi16uV;j/g P oołf[[[Ԝ ̞=~媫fm7tE@ػw>6V\:葉mY$^Ey8׿|_}{A]X[ۅ^8{EyCmX>r?g؟;֭[=`O@ 8Ǐ;01i (ɺc\(Ųzrʊ6aNG,7I9}f fg8 ;@@ C9|(c_>;jОin{> A*c),6{ fy}n:k;mڴ^8?꓾{@({1V5fhߥ)6l O(=_J暜W߻w=[߼'}7{Zfӷ_*F.];^s{+Cm۶+;@Mɽ|̐E(Jn^}|FOTe(>L0A>я};3g:ی-]Tm.oi\g?= qpij 4<ȍ(J~饗zd/XR>4 n6ã?~{j/CHҷ}˗/|ۥ  ;:~L@@QNZnUclV$[`i&ֻ. [9ȑ̙3S=jXBm7ܼF@ .  -pGVa3<# ^u+Yf.Xuj7tU]zP%K>v\F^  pBxSg~k:V~lRG>bwvɓ?Vݖ=s}p}nCYY[鳟/@@JߺtIN]%p'׿xeFV7˽Y|#?*^iiiF>זo\~t @ N6d~7+Iqh}Xg?+˧>#Œ~k׮K]Z`ۮ} DI2AvdQ5BE -_nzs_p^{,C@ 8aζwq#[E{(umǯX @B(vl;Xp) !Pr8qVU@@ =ၢ*|@2=od:tHf; DG!Pwjw8tq2Ї-oy\ve^]unS- rw{e;{@s0_~,mmm2s̊(f7y+~;5b;G  \@K:;:Y3Ȏ'`Xlm;OoVE ܭV_{֭[Û}_jL>"  \ eMڑ"=ԡR:UK@!@H))gD@ 9h z![)*3>ZQQΞ~JYYK  m۵K;]n6jSЈ+TWU6p'Ufč% Ip϶)^KLҿCt DN9tH$`n.MR@4ʔƕ" = PSq52X&Vw^=o@@A}:G.ǜ;cTnѸW)ƃ @)4={(55lJ2l PRrr@!PlAihize(= @1 mocws 0D4@@ l.d;㘈 V[*0 ܋ɹ@#-pl{@9 ۷ *usa@*/Һnzi6*P[[+{\yro~o|C:tA! "-n̶XB/x?a43a @ 4hw7D巳!@g͕@Zl"@0@)>(p6i?lHN^#P0s@B$=tB%t@Aq0 HkRwpGX62@ҏނKy6Oo5[ Pr(s @B"_xK2;I7(@ĻΘ@)Sw4-a="P 0* DD9_Dnuj}T Bcn]$qc.׀q/4A@nO8`hv{9  p/ܐ3  zVsΘ[%}䶇 3B@ UkmǁMh@p jͶK|MD]؋ @,6qeKJ+ɐ؏({159 !H >mg=d$X=7#[f({I9! ! 2}{wM{r )@>&" Mvu̶@J%@^*Y΋ @|m5hn5^@ D"a p$u6 6Ά p 8VR_~[⫄@#w0 @ܖHm'+#,@G@&3僻?jZ.aq p˝d H

,SSqF L7  :[in ܯF=r/+ss8KYg9%{^jq@IDATL4B@BZ+ҾAgJ/44S$g(WH{n7E@j^n4|y綵iK3ӟS4D`<Res@*$භsH[L@[R۷#oӜ~yuyASdFnzzemS]F 8@ =Z}TUy+>+_3R;4h{uegf˖6%@^,I΃ @k"SyWsٝ]!'#מ҇RGng/]F`߲O @@Oq\?) |#nsWAg+]NybLH G5A܃q  %p{{%AamP\O|yguA,m؊rl)nӠ}+!Plbr>@X˙;ضES8:H QުG{-xpC-TC&[\zr6@ 8"Ӥ6fss\gmUTϝ//8 UrN A{ ;&@^4JN @0CRGYuČ :ilkm3忖,^?j~ª&.߰rF@H$U)EZp!TgN2U̕ ߐhZmgC !`Z[;4n[8'Wm8Q^yLu34^`~T# P$"Ar@ 8 =)ap;4}i>,5{jj䡕Kۤɣ.0C6Z&rMKg˙@-/BK[ɫTREf\& ܎NImӠ]SlZ^?uڨLwA,Q.)Z@N x{BKH @/hߺEWG~`[6izb>rMKo@%`QplcAᓶ?_Bv̚q部(Ž ޘ+  0,vjƾxESd19viɅekOO /c: $@{  n4J?jz~RdeJixβ!hpaGg͕@.E|~Ԁ#A{QE^z̈f.e 5VFeCTRk#$FmjHҏV}'F= cv:K~lJ1g4M^[`} >F@`^ܱlљY*p޳#f2]'-h_d&y.^#P.rIs@d X^7D (hf)VKRV=fЖ95py2X=:,A_Rq9KQFCrJN C6nA7[h.A Gvuk}@d0y|y;atUn]̴MeFK+ @m/hLkS)x3M'Y%]':jA;bMCp @ܦ&I+h`;5p# xv%}J-&yA{;rw3UQG:$!t@ >|ܮ9ݍz&k'E/*){8X>qM<كKG&<@ Nè's _tI9fpvԠDQA{涏j5h?KYbC a3 @ a3lk y8֩g.zu/6Ӿ|i2uɭR-C>ʅ7 p=G p[hm嵟.WjF9V %ݷ3C 23f͒+B.천r=c6{lYf<2F-vC_@q2jl%@NCm:u<%Ӄz9sPx"Uٶּ}dɒ%b .ˋ_nnF@`1te>=}us$P琖{ܵkxa%:kZx̙3'c?;@%mm j> 2 1۷&d :~&hmM>8gh[nҥKeDwXJ̼y2>y6@x ׊SwAՀU{ _RjǎQjJ,?K6Μ8gAδ64F  ϟvwwg0[|.$@HQ9i>btǞ/W>}Bݧdҷ{ix3g{RM,6{05v= @2w4^ :ՠQvP]uiׇ>OKyZLWHОNAtMrA?5+rJo8G(PR۵^rwM=6M&f WkmLh D.p 32kVJ}Gw[^zI.>Y5v' @Nnd (hO9MyܮnIVSg{21_jfva4\ݢo\v=$z-ȻQ}'VIϗ7W_-W]uըanA9իE!@@3&qO7:Wt}{F8I=RjΗZ$ @\G5 B:Si/< Α#ۯvy85]ga|ۥ _)0hOОEԣ|fc;syr fа# c~ @4|]s5MNgi Wt;NikiVj}5}[pLixd 0 UiFymw՞hKqȨsD  pOf YtͯёwKLd-`ݻҷfg-UCsAT*Ǥ>IIی@`A{ʂ҃S4hg\cOie8IKGxZTPfjΐV{|@q܋@"`)-[7-TfOkCVx=|XC3Rc!}13xc+1f% p/!.FF"}lzӃtdtuv=KkgF)ȹ!ˢJV9jȉ pO-g @}MMA hJSPkau!;g浧o;5ǚޛ%}Kii3=I pOg @4hOm&ncCA 6δ7RVGWW>uX]綨SϔfgƗhZ̟hN{ A{; p; ixA<YݮnIPJ<>vOSdҷa%5qߓ=&:fe~5C@ L{тocHI#384~7.-;[:ſ%@@"%3xOδ7NsˉYPe? *==j45UM>(S {>JAҙm[N{cA}ՠ)}5kYЩ#{؝9g["qR[=xΰL2 Y:@/R M@Dr6!N[C~ѥq\i#hfSڻW$,5z}23/sβ_Hj̰'/"@Dc@B'I,o:δ{n\4I~~9j 1}'9!! E@w HooAcӢ1/h>@9:ѿddv/\(O,X"Yf٭;uHF> b @2m .^i_?舛Yj({9 @IYGM)da+p>X?;/_Pee?Kcu[Pi9 bCs E*ƤlYvӔuSG`}=~Lr>?uU푹t4ѽwHP/r^ѡ Ѡ#k(YmTtZ,.;S=WsٳoKu@ ف؋@ K)@(Sé}ԙzR.ͫni19g&sٲ&k Pg/YjfٳR J",go5%vkоQgړW-8ZL*}rtxͲ[.\fٳR Z#p;-t}{L45"Js/N>/9>\tRf ba pݠ/ sv*>=@JV=Z9KѴr%LsÄj9Y|@%u@4;4;wiOrEƼ1= mSAqaޜd|JŴnmFgFŘ@E@ nomżA9IzzXGsWީ%Ksϐ1qVOaQ pʝ @{pRWBus<8K*2OՉs>̛k7OY\^jung/:ՇOՇTZw#IXBF=wJ =؎v-J1} \O g#O"J?x4l !@A@rZ5Rc c}ݮSȯOF-u{zsߪ)e c?e4[uv&V jB"@A7@hi)yO, -`y^jLCv/`Ҏt窴 &Hݲ>A@b״ P+@^^o$[Gom/tUP-=Ucw냧o!S9SV>~9  "jRBHnѴͺRWI Ow[Ԡ} i1Y؉@~ u~;]\M(tk *r7/`o}tҘ|k R#O@QEuaL  pO g @9-SYvSW#ڍ:^ IwO䰏̑W,g&XU/7א^ƙ({ -O譀3Džn1Gl*:7ޤUbX=5wl:aqW Æ pϽd$ @(Ŕ4ݠcb:aohGW;}hw-5wVO띮'v{00Ҷhx;>FIvzΰB>! dgF]HѴ P \[5h߮Al?Rt~j$w=w#mblS/#ش6xӖ;0qtaSu뮖w}ckdn ߪ?5Hm5{Dg`{P}w@܋Fɉ@ jV9xEaMgz2Z횻fu 3gKdS86e]>Zί&`W!kEU?MiRz,w%=!P|rF@ :ZW~[ͮr{i悮Rm1)/w^7 Z,WΗViiG[Tgh٢#pޮ/-^ՌStU`ԙ{;ݞGs@ ɀ9p@ܞB,`Yv7JiS8h}|A vSfġh3K,z,ݫ3^Dφ@y@ Xn5%)/G|i[${mnASSz]yjqgEYu/vVZzϖ/Wvfxxi"> p?WG#yt]Z3ko6hEIClyb٤GM6?6[nI/%{tԩ2OΪWMӻD '3hHGH:-p$3kfk GQSaGw5`ۯa~;kl؎J9MuY}IeRcZ;a p @AqV-X֪f g)nW׉`]sץ'Ͽ.H 5K0ItV" @v@@?mL`ݛUJ .Y{J@g-XYvn6þE yvjn 5}l9cA\>0i%-X ~~ͨDӿ̟:Yj4UZss WK"Y!} *BUf]<> uKkåNOƓ?U芦O?Uvk8ôlg]_?䮇1Q-b5OW*Z>R_,PE{(AD xͰ[ZfaM١Qq:u ֽj0>RO=Zfd3JNcή0 [UNSb Nbڃ˱\n_w$!eR`“uz4 )S Q_9ghEol D= Ep:ũ8-J7J=C9h&u\XA/~; ٩k&;Xhxά_:NGg[MOJ/jz.r4UϘ!O.u߬I>Ei@N4|PV6՚ۡAm=yPs- Y+k^=u{5G4PAY,i@h2 6{Ne`V_ʦo^n5X-wYٽ˞3V~v-7"N+4X&X{ r;(fa`yCTib hK% JFHZIeZfkajFjBDM[VL͵,"iK * ~?{{9w9:}g}>{;K uRPejEG^G!/(&HH @HwX q舼RZ-X_EڸS#ٛ̈:FFzZ7J+)wT%iUGO/՜t®.G bU=DI({J @ C^0XG4UPv?n/ZA/U.P|_CW6L0;٤v_3UEi*֋hMa}X+Qus?<[LXhIO1"4pO3`vO$m>;++ ^f}?2.z纈>uI/&4%*և{sHS *OU>Aˌ+sG]=K9~(RtyA^ɭ'Hb4lJ$" wa+ 0k4,*q*5ˑ4Ym +l'љ9 ThWt{gCRorX՝EpD8|z..K'7h?$@'D$&xa-0XoiqAPuIXQK'갦5&> ϸ_c$LV##`G@藣,í%hIW9\B[ VttsuCp_.YG$Yr$@$. T#G)'" P~Pp8zl"!)~hQ~uwH߯Q_]*03^Uu}XeDL06+ s*EZ]įe%%$Џ P嫑 @VuJ\`R&sAiw` E fQPd)%F5#Gk^Sq{A'H1A+W= s!fѬy8jEZ!΋sU=~g6L$g~}; @ww"Tc+Q`LFEA}J)h0%݄:DzSd *8?0PEr@E{kt<q^T㘨B}`MvlIX؃/K9 @'@oH'`~Sj=R բ^T t]50e`?~*6jM?/ ). pˋ#4uuk1+yHwz~ q+9y\,._:#D~X&H{x @Rp{AnkFۦVJq(ЩB]7B %̨ ]m&CXՂ{X9:rK \XǪ} stXH/Cnsٶ4tnu>D PC/ԼXA {k !(VNR|;r TtzީShŢn]sKiְ*SI;ǢÃ0CjE%؅yXLu[#y8Wfj619r^cHȂ0eH(}<|u$A>ꠢ4AVW N+ݒo苉rsuO1%2BW@%Ju%v(Q>VXءknEŴYUL#7X~簞@Pϣ8H CPgDp$@ a\!UC)$56J&ԏ`\LjՕ% Q ᛞbjVQX~谢8W+:Uy t Q^>&$Α̎ojq`H#99Dq*!  {a50%& /pq$O kI]]P# .X5Y5УT\ hߺ5zB]#*8BJ݂t?rK..=Q\'hϤ VE\WLZ<_7N \V ĝsG#" GqVN$T8,84a$v,pyZ+U׵H9,-CypmR1~T-8L8vѽ%^sC4ﻄo,7xHk9';rX!ʱ,--$@~#@@/y}t8g yh 9{0רOyT56Ksj)Xs A#|QBH=Dnz q A!<A#FBpQer+\S;Ę @j(SƻHԊ 9v`@WDR]}-y~l:QXjZۥF+;~V&RFE9r`%;tH䝎צH/Wp0UN^e:6 r*-XO\R`!d9yU"=Osx yH|N> %q_pDQq"m!)[ې/zA}n^&G^G[IÁ-T,}νC}r-757},C!U tT#mfW簒g9*z^ԩK$a*Ķ#QG֡L1Q! #@{$/R y',MEEu(79TBɬ5B4!!ϮC!sFh%0tDF3JS+{]}:N8oSro'b=(8wD:Q;pVG|h,`MrM2* ,1rHGy$@$@ pO+ENA 7˹ uX;%E7!#aMyHǾ} N&sLUPoF(5nPukT*Uhe*ԇ/Uq AW2-#Mw rb8\XPN77Ty5ZSg @׵' "!,*UU֣F89B+{Y&<Ѝ*Up7MzTʳuZnT:V6a #?RR%z^YRz{ֲw8wH!&! ʎ!a)ղ] a2 @7ݐzH pX;\w mYCA%r,O!9,M*LTtWꮝ96Br=QoB\ZX;U\fZ@.q/V1 T\_>J}~ȂKJv"9##"sk׷ dQT1U>n,(B*P\"(|   p>lqMhiVlXQ!\ۙ6Ĵ]!QMHt; FoǟrXT*MekhnXٖ>tr*F۱P "!+YKG\)U25L #O(7WD+>@W$  W ZwuϞ=R^^.f͒ęN2jDA?Ceϩ5=geֺеrt#:D4}XNo> viУC7ҝ&5϶#vu "ٹ֪m(V]Zekl܇{uL='*v 7gm 4Zᎋ 3 C`֭R]]mQVV&͓M6_,j̤ԠӶ,Xw>2sϨ/KN^ Zi[f!  p km&ƍ4c yeǎrD^rn#-}>tYYꎃqqo#6Zl{#r x4zhڽ{w 1#F?;dW-. cg`(>݆=>:".yav PYb[.!Ag2 ';~Q#ߍ X5UTTt_~Y֬YNgL, 8n[Z.5ذ`ki׶pt'   wN744/69~-S<^4i\.ZJFTxiC ? N6fc#Ǐs%x3\sMk˗/V     pxccc7Nĉ]c % b"T,RMo;\bHHHHH Ned߾}R^^5{0rJT=OHHHHH R/\P㏇#[d3,a $@$@$@$@A +)3һkӦM]vmdG2@75kܹ3[ٞHHHH"[inn* 瘪ݡɜHHHH ]|2$6]5jTl5IHHHH =f!    P'MHHHHH P $@$@$@$@$  5 >HHHH @$6!    &@3 @(&$@$@$@$@$(z|    H{؄HHHH{_O$@$@$@$@ I @  ټy Z2 C`ѲpBw:c/ 3f ]avvJMM';O ++IIDATKʤV7bk*uuuZ(>|%~ֺFٵ>Q|͛'/q ?"o\ILD>rUWɘ1c\铝'Qs=W\!&M߈Xn,^XNZ(> 6A'?k]#qF9eٮɎ1w |X }}e Cg>y'3̰˧>)yA/Lif_,X O#:,Yz,] z?|ke4?.!rJz$@$@$@$@^!@8IHHH|Mϗ'     uaaa gQ!O ʆ rI'D@`Μ92v^x5I&L 5.NIHHHBwI$@$@$@$k~< WP{e8N    _{E|ee%wty:;;eϞ=R^^.f͒b Ks0޹sɓe̙?=*&G؈);t]VXSؕCeDz۷ڵkIRzwʠA*.x뭷o˻+2{lY~=Ż |źuߦp?$^(3pz7ސ;2p~V/obcƌEy2q,ά|GC]we_F!yٵkl޼Y@|vyWѣMok&|ӟyg}V^u$^LBq ~zer֭[bIeee2o<ٴi9] ϲ 矗nM8 ϗ~r 7؝LB#׌+pcJ?Oe#+++=a$_{1}LNhq+gG= ߡCF]Æ ?69ݘHaǔcǎ +Keʕ)1}gazE,C!@sOoF=γPnQa,{駣}#OzLBv2W (oЫqO:vRhwg+iӺ/:Bԝ >+C1G.[L^)//QFݻDr):HK d6l`KKK8 ,㉻c/vS]n5,uk (3xR((G3C[p\ٸ?viI~6-\G?%v.طoviN Kletm͚5=읷 PO\E\`lSO_x[:w\Z{8#k?)ccTCqng94T}+9;;[nƸX<|BG}TfΜ)pnk ;H#b+?Im!B0 @';R'JH @EA$@$@$@$@i&@3c{    p {{    p {{    p {{    p {{    p@ ^׿}휜>|1BfΜ)ӧOێ$@$@$ dn`[  Gw]m^x,]T?Y~m9宻  L=etHDo.s5[l˗tttȍ7(w 8Py?~: $@$@$ 'CmIH @III&;;O{QFIkk<, @In'  ,+p]"Y&  pqjRؘH?U78+bɓ&beeUxQ~;#rYgE]d`alMMΟqoacVTT?9|ٳcHHChqdq$@%w}OO:N9sxדر+ꫯʔ)Sdru>9[^?r饗L8ydN[ꫯX?gm}HZZZ7a}!nٴi|+_/r,=s  #@ys A|u}n.7nԭ?1v~ _&ᚃv)'Md_ w޽{(6ܹSn*~v_~d"  woGK$FǏNmnCGiXˑ_߾ |ke]4|QسgŦMH!Z?ԩS_򗒗gql=.5?M7षzK(IH;;ső ">py'>~ƌv w1\m_ys8Qf6:$/ljaa!qaʕ}ƌc_կ:-6ml۶->GVL$@$\IIz=c=b~H7oj:_p9va7o^T~/`!׾5pE~?nܸEC9 x拣%pUYf=) i&Nh_lӼKƹvQg U#@/FjpTp6[9',p]X.c"#bQի4X Q?vjꪫ΢Ԝz?I!!'!ʝK+ ]GTRRb;Adx$@$@O=#$-~;@F&la/B \9"'O_)oi!Xu$@$@"@hIHHH|J>>x6 P{k8Z    pIHHHE[ђ O'M$@$@$@$-ޚ/HHHH(}:|m    op|q$@$@$@$@>%@Ӊk x拣%   ) wN<_HHHH[(ܽ5_- O Pt$@$@$@$@"@hIHHH|Jݧ&    IENDB`brms/man/loo_moment_match.brmsfit.Rd0000644000176200001440000000556314361545260017267 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo_moment_match.R \name{loo_moment_match.brmsfit} \alias{loo_moment_match.brmsfit} \alias{loo_moment_match} \title{Moment matching for efficient approximate leave-one-out cross-validation} \usage{ \method{loo_moment_match}{brmsfit}( x, loo, k_threshold = 0.7, newdata = NULL, resp = NULL, check = TRUE, recompile = FALSE, ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{loo}{An object of class \code{loo} originally created from \code{x}.} \item{k_threshold}{The threshold at which Pareto \eqn{k} estimates are treated as problematic. Defaults to \code{0.7}. See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} for more details.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{check}{Logical; If \code{TRUE} (the default), some checks check are performed if the \code{loo} object was generated from the \code{brmsfit} object passed to argument \code{fit}.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled. This may be necessary if you are running moment matching on another machine than the one used to fit the model. No recompilation is done by default.} \item{...}{Further arguments passed to the underlying methods. Additional arguments initially passed to \code{\link{loo}}, for example, \code{newdata} or \code{resp} need to be passed again to \code{loo_moment_match} in order for the latter to work correctly.} } \value{ An updated object of class \code{loo}. } \description{ Moment matching for efficient approximate leave-one-out cross-validation (LOO-CV). See \code{\link[loo:loo_moment_match]{loo_moment_match}} for more details. } \details{ The moment matching algorithm requires draws of all variables defined in Stan's \code{parameters} block to be saved. Otherwise \code{loo_moment_match} cannot be computed. Thus, please set \code{save_pars = save_pars(all = TRUE)} in the call to \code{\link{brm}}, if you are planning to apply \code{loo_moment_match} to your models. } \examples{ \dontrun{ fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), save_pars = save_pars(all = TRUE)) # throws warning about some pareto k estimates being too high (loo1 <- loo(fit1)) (mmloo1 <- loo_moment_match(fit1, loo = loo1)) } } \references{ Paananen, T., Piironen, J., Buerkner, P.-C., Vehtari, A. (2021). Implicitly Adaptive Importance Sampling. Statistics and Computing. } brms/man/brmsformula-helpers.Rd0000644000176200001440000001114114213413565016252 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{brmsformula-helpers} \alias{brmsformula-helpers} \alias{bf-helpers} \alias{nlf} \alias{lf} \alias{set_nl} \alias{set_rescor} \alias{acformula} \alias{set_mecor} \title{Linear and Non-linear formulas in \pkg{brms}} \usage{ nlf(formula, ..., flist = NULL, dpar = NULL, resp = NULL, loop = NULL) lf( ..., flist = NULL, dpar = NULL, resp = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL ) acformula(autocor, resp = NULL) set_nl(nl = TRUE, dpar = NULL, resp = NULL) set_rescor(rescor = TRUE) set_mecor(mecor = TRUE) } \arguments{ \item{formula}{Non-linear formula for a distributional parameter. The name of the distributional parameter can either be specified on the left-hand side of \code{formula} or via argument \code{dpar}.} \item{...}{Additional \code{formula} objects to specify predictors of non-linear and distributional parameters. Formulas can either be named directly or contain names on their left-hand side. Alternatively, it is possible to fix parameters to certain values by passing numbers or character strings in which case arguments have to be named to provide the parameter names. See 'Details' for more information.} \item{flist}{Optional list of formulas, which are treated in the same way as formulas passed via the \code{...} argument.} \item{dpar}{Optional character string specifying the distributional parameter to which the formulas passed via \code{...} and \code{flist} belong.} \item{resp}{Optional character string specifying the response variable to which the formulas passed via \code{...} and \code{flist} belong. Only relevant in multivariate models.} \item{loop}{Logical; Only used in non-linear models. Indicates if the computation of the non-linear formula should be done inside (\code{TRUE}) or outside (\code{FALSE}) a loop over observations. Defaults to \code{TRUE}.} \item{center}{Logical; Indicates if the population-level design matrix should be centered, which usually increases sampling efficiency. See the 'Details' section for more information. Defaults to \code{TRUE} for distributional parameters and to \code{FALSE} for non-linear parameters.} \item{cmc}{Logical; Indicates whether automatic cell-mean coding should be enabled when removing the intercept by adding \code{0} to the right-hand of model formulas. Defaults to \code{TRUE} to mirror the behavior of standard \R formula parsing.} \item{sparse}{Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased.} \item{decomp}{Optional name of the decomposition used for the population-level design matrix. Defaults to \code{NULL} that is no decomposition. Other options currently available are \code{"QR"} for the QR decomposition that helps in fitting models with highly correlated predictors.} \item{autocor}{A one sided formula containing autocorrelation terms. All none autocorrelation terms in \code{autocor} will be silently ignored.} \item{nl}{Logical; Indicates whether \code{formula} should be treated as specifying a non-linear model. By default, \code{formula} is treated as an ordinary linear model formula.} \item{rescor}{Logical; Indicates if residual correlation between the response variables should be modeled. Currently this is only possible in multivariate \code{gaussian} and \code{student} models. Only relevant in multivariate models.} \item{mecor}{Logical; Indicates if correlations between latent variables defined by \code{\link{me}} terms should be modeled. Defaults to \code{TRUE}.} } \value{ For \code{lf} and \code{nlf} a \code{list} that can be passed to \code{\link[brms:brmsformula]{brmsformula}} or added to an existing \code{brmsformula} or \code{mvbrmsformula} object. For \code{set_nl} and \code{set_rescor} a logical value that can be added to an existing \code{brmsformula} or \code{mvbrmsformula} object. } \description{ Helper functions to specify linear and non-linear formulas for use with \code{\link[brms:brmsformula]{brmsformula}}. } \examples{ # add more formulas to the model bf(y ~ 1) + nlf(sigma ~ a * exp(b * x)) + lf(a ~ x, b ~ z + (1|g)) + gaussian() # specify 'nl' later on bf(y ~ a * inv_logit(x * b)) + lf(a + b ~ z) + set_nl(TRUE) # specify a multivariate model bf(y1 ~ x + (1|g)) + bf(y2 ~ z) + set_rescor(TRUE) # add autocorrelation terms bf(y ~ x) + acformula(~ arma(p = 1, q = 1) + car(W)) } \seealso{ \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} } brms/man/autocor.brmsfit.Rd0000644000176200001440000000132714213413565015407 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{autocor.brmsfit} \alias{autocor.brmsfit} \alias{autocor} \title{(Deprecated) Extract Autocorrelation Objects} \usage{ \method{autocor}{brmsfit}(object, resp = NULL, ...) autocor(object, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{...}{Currently unused.} } \value{ A \code{cor_brms} object or a list of such objects for multivariate models. Not supported for models fitted with brms 2.11.1 or higher. } \description{ (Deprecated) Extract Autocorrelation Objects } brms/man/Wiener.Rd0000644000176200001440000000371614275473342013532 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Wiener} \alias{Wiener} \alias{dwiener} \alias{rwiener} \title{The Wiener Diffusion Model Distribution} \usage{ dwiener( x, alpha, tau, beta, delta, resp = 1, log = FALSE, backend = getOption("wiener_backend", "Rwiener") ) rwiener( n, alpha, tau, beta, delta, types = c("q", "resp"), backend = getOption("wiener_backend", "Rwiener") ) } \arguments{ \item{x}{Vector of quantiles.} \item{alpha}{Boundary separation parameter.} \item{tau}{Non-decision time parameter.} \item{beta}{Bias parameter.} \item{delta}{Drift rate parameter.} \item{resp}{Response: \code{"upper"} or \code{"lower"}. If no character vector, it is coerced to logical where \code{TRUE} indicates \code{"upper"} and \code{FALSE} indicates \code{"lower"}.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{backend}{Name of the package to use as backend for the computations. Either \code{"Rwiener"} (the default) or \code{"rtdists"}. Can be set globally for the current \R session via the \code{"wiener_backend"} option (see \code{\link{options}}).} \item{n}{Number of draws to sample from the distribution.} \item{types}{Which types of responses to return? By default, return both the response times \code{"q"} and the dichotomous responses \code{"resp"}. If either \code{"q"} or \code{"resp"}, return only one of the two types.} } \description{ Density function and random generation for the Wiener diffusion model distribution with boundary separation \code{alpha}, non-decision time \code{tau}, bias \code{beta} and drift rate \code{delta}. } \details{ These are wrappers around functions of the \pkg{RWiener} or \pkg{rtdists} package (depending on the chosen \code{backend}). See \code{vignette("brms_families")} for details on the parameterization. } \seealso{ \code{\link[RWiener:wienerdist]{wienerdist}}, \code{\link[rtdists:Diffusion]{Diffusion}} } brms/man/add_ic.Rd0000644000176200001440000000217614213413565013474 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{add_loo} \alias{add_loo} \alias{add_waic} \alias{add_ic} \alias{add_ic.brmsfit} \alias{add_ic<-} \title{Add model fit criteria to model objects} \usage{ add_loo(x, model_name = NULL, ...) add_waic(x, model_name = NULL, ...) add_ic(x, ...) \method{add_ic}{brmsfit}(x, ic = "loo", model_name = NULL, ...) add_ic(x, ...) <- value } \arguments{ \item{x}{An \R object typically of class \code{brmsfit}.} \item{model_name}{Optional name of the model. If \code{NULL} (the default) the name is taken from the call to \code{x}.} \item{...}{Further arguments passed to the underlying functions computing the model fit criteria.} \item{ic, value}{Names of model fit criteria to compute. Currently supported are \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"R2"} (R-squared), and \code{"marglik"} (log marginal likelihood).} } \value{ An object of the same class as \code{x}, but with model fit criteria added for later usage. Previously computed criterion objects will be overwritten. } \description{ Deprecated aliases of \code{\link{add_criterion}}. } brms/man/compare_ic.Rd0000644000176200001440000000303314213413565014363 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{compare_ic} \alias{compare_ic} \title{Compare Information Criteria of Different Models} \usage{ compare_ic(..., x = NULL, ic = c("loo", "waic", "kfold")) } \arguments{ \item{...}{At least two objects returned by \code{\link{waic}} or \code{\link{loo}}. Alternatively, \code{brmsfit} objects with information criteria precomputed via \code{\link{add_ic}} may be passed, as well.} \item{x}{A \code{list} containing the same types of objects as can be passed via \code{...}.} \item{ic}{The name of the information criterion to be extracted from \code{brmsfit} objects. Ignored if information criterion objects are only passed directly.} } \value{ An object of class \code{iclist}. } \description{ Compare information criteria of different models fitted with \code{\link{waic}} or \code{\link{loo}}. Deprecated and will be removed in the future. Please use \code{\link{loo_compare}} instead. } \details{ See \code{\link{loo_compare}} for the recommended way of comparing models with the \pkg{loo} package. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) waic1 <- waic(fit1) # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) waic2 <- waic(fit2) # compare both models compare_ic(waic1, waic2) } } \seealso{ \code{\link{loo}}, \code{\link{loo_compare}} \code{\link{add_criterion}} } brms/man/loo.brmsfit.Rd0000644000176200001440000001004514213413565014521 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{loo.brmsfit} \alias{loo.brmsfit} \alias{loo} \alias{LOO} \alias{LOO.brmsfit} \title{Efficient approximate leave-one-out cross-validation (LOO)} \usage{ \method{loo}{brmsfit}( x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, moment_match = FALSE, reloo = FALSE, k_threshold = 0.7, save_psis = FALSE, moment_match_args = list(), reloo_args = list(), model_names = NULL ) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{compare}{A flag indicating if the information criteria of the models should be compared to each other via \code{\link{loo_compare}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{pointwise}{A flag indicating whether to compute the full log-likelihood matrix at once or separately for each observation. The latter approach is usually considerably slower but requires much less working memory. Accordingly, if one runs into memory issues, \code{pointwise = TRUE} is the way to go.} \item{moment_match}{Logical; Indicate whether \code{\link{loo_moment_match}} should be applied on problematic observations. Defaults to \code{FALSE}. For most models, moment matching will only work if you have set \code{save_pars = save_pars(all = TRUE)} when fitting the model with \code{\link{brm}}. See \code{\link{loo_moment_match.brmsfit}} for more details.} \item{reloo}{Logical; Indicate whether \code{\link{reloo}} should be applied on problematic observations. Defaults to \code{FALSE}.} \item{k_threshold}{The threshold at which pareto \eqn{k} estimates are treated as problematic. Defaults to \code{0.7}. Only used if argument \code{reloo} is \code{TRUE}. See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} for more details.} \item{save_psis}{Should the \code{"psis"} object created internally be saved in the returned object? For more details see \code{\link[loo:loo]{loo}}.} \item{moment_match_args}{Optional \code{list} of additional arguments passed to \code{\link{loo_moment_match}}.} \item{reloo_args}{Optional \code{list} of additional arguments passed to \code{\link{reloo}}.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ If just one object is provided, an object of class \code{loo}. If multiple objects are provided, an object of class \code{loolist}. } \description{ Perform approximate leave-one-out cross-validation based on the posterior likelihood using the \pkg{loo} package. For more details see \code{\link[loo:loo]{loo}}. } \details{ See \code{\link{loo_compare}} for details on model comparisons. For \code{brmsfit} objects, \code{LOO} is an alias of \code{loo}. Use method \code{\link{add_criterion}} to store information criteria in the fitted model object for later usage. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) (loo1 <- loo(fit1)) # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) (loo2 <- loo(fit2)) # compare both models loo_compare(loo1, loo2) } } \references{ Vehtari, A., Gelman, A., & Gabry J. (2016). Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. In Statistics and Computing, doi:10.1007/s11222-016-9696-4. arXiv preprint arXiv:1507.04544. Gelman, A., Hwang, J., & Vehtari, A. (2014). Understanding predictive information criteria for Bayesian models. Statistics and Computing, 24, 997-1016. Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation and widely applicable information criterion in singular learning theory. The Journal of Machine Learning Research, 11, 3571-3594. } brms/man/car.Rd0000644000176200001440000000456214213413565013037 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{car} \alias{car} \title{Spatial conditional autoregressive (CAR) structures} \usage{ car(M, gr = NA, type = "escar") } \arguments{ \item{M}{Adjacency matrix of locations. All non-zero entries are treated as if the two locations are adjacent. If \code{gr} is specified, the row names of \code{M} have to match the levels of the grouping factor.} \item{gr}{An optional grouping factor mapping observations to spatial locations. If not specified, each observation is treated as a separate location. It is recommended to always specify a grouping factor to allow for handling of new data in post-processing methods.} \item{type}{Type of the CAR structure. Currently implemented are \code{"escar"} (exact sparse CAR), \code{"esicar"} (exact sparse intrinsic CAR), \code{"icar"} (intrinsic CAR), and \code{"bym2"}. More information is provided in the 'Details' section.} } \value{ An object of class \code{'car_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up an spatial conditional autoregressive (CAR) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with CAR terms. } \details{ The \code{escar} and \code{esicar} types are implemented based on the case study of Max Joseph (\url{https://github.com/mbjoseph/CARstan}). The \code{icar} and \code{bym2} type is implemented based on the case study of Mitzi Morris (\url{https://mc-stan.org/users/documentation/case-studies/icar_stan.html}). } \examples{ \dontrun{ # generate some spatial data east <- north <- 1:10 Grid <- expand.grid(east, north) K <- nrow(Grid) # set up distance and neighbourhood matrices distance <- as.matrix(dist(Grid)) W <- array(0, c(K, K)) W[distance == 1] <- 1 # generate the covariates and response data x1 <- rnorm(K) x2 <- rnorm(K) theta <- rnorm(K, sd = 0.05) phi <- rmulti_normal( 1, mu = rep(0, K), Sigma = 0.4 * exp(-0.1 * distance) ) eta <- x1 + x2 + phi prob <- exp(eta) / (1 + exp(eta)) size <- rep(50, K) y <- rbinom(n = K, size = size, prob = prob) dat <- data.frame(y, size, x1, x2) # fit a CAR model fit <- brm(y | trials(size) ~ x1 + x2 + car(W), data = dat, data2 = list(W = W), family = binomial()) summary(fit) } } \seealso{ \code{\link{autocor-terms}} } brms/man/BetaBinomial.Rd0000644000176200001440000000245414275436221014620 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{BetaBinomial} \alias{BetaBinomial} \alias{dbeta_binomial} \alias{pbeta_binomial} \alias{rbeta_binomial} \title{The Beta-binomial Distribution} \usage{ dbeta_binomial(x, size, mu, phi, log = FALSE) pbeta_binomial(q, size, mu, phi, lower.tail = TRUE, log.p = FALSE) rbeta_binomial(n, size, mu, phi) } \arguments{ \item{x, q}{Vector of quantiles.} \item{size}{Vector of number of trials (zero or more).} \item{mu}{Vector of means.} \item{phi}{Vector of precisions.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{n}{Number of draws to sample from the distribution.} } \description{ Cumulative density & mass functions, and random number generation for the Beta-binomial distribution using the following re-parameterisation of the \href{https://mc-stan.org/docs/2_29/functions-reference/beta-binomial-distribution.html}{Stan Beta-binomial definition}: \itemize{ \item{\code{mu = alpha * beta}} mean probability of trial success. \item{\code{phi = (1 - mu) * beta}} precision or over-dispersion, component. } } brms/man/StudentT.Rd0000644000176200001440000000251114403575116014036 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{StudentT} \alias{StudentT} \alias{dstudent_t} \alias{pstudent_t} \alias{qstudent_t} \alias{rstudent_t} \title{The Student-t Distribution} \usage{ dstudent_t(x, df, mu = 0, sigma = 1, log = FALSE) pstudent_t(q, df, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) qstudent_t(p, df, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) rstudent_t(n, df, mu = 0, sigma = 1) } \arguments{ \item{x}{Vector of quantiles.} \item{df}{Vector of degrees of freedom.} \item{mu}{Vector of location values.} \item{sigma}{Vector of scale values.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{q}{Vector of quantiles.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, quantile function and random generation for the Student-t distribution with location \code{mu}, scale \code{sigma}, and degrees of freedom \code{df}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } \seealso{ \code{\link[stats:TDist]{TDist}} } brms/man/loo_compare.brmsfit.Rd0000644000176200001440000000250514213413565016231 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{loo_compare.brmsfit} \alias{loo_compare.brmsfit} \alias{loo_compare} \title{Model comparison with the \pkg{loo} package} \usage{ \method{loo_compare}{brmsfit}(x, ..., criterion = c("loo", "waic", "kfold"), model_names = NULL) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects.} \item{criterion}{The name of the criterion to be extracted from \code{brmsfit} objects.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ An object of class "\code{compare.loo}". } \description{ For more details see \code{\link[loo:loo_compare]{loo_compare}}. } \details{ All \code{brmsfit} objects should contain precomputed criterion objects. See \code{\link{add_criterion}} for more help. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) fit1 <- add_criterion(fit1, "waic") # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) fit2 <- add_criterion(fit2, "waic") # compare both models loo_compare(fit1, fit2, criterion = "waic") } } brms/man/theme_default.Rd0000644000176200001440000000076614160105076015076 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{theme_default} \alias{theme_default} \title{Default \pkg{bayesplot} Theme for \pkg{ggplot2} Graphics} \arguments{ \item{base_size}{base font size} \item{base_family}{base font family} } \value{ A \code{theme} object used in \pkg{ggplot2} graphics. } \description{ This theme is imported from the \pkg{bayesplot} package. See \code{\link[bayesplot:theme_default]{theme_default}} for a complete documentation. } brms/man/as.data.frame.brmsfit.Rd0000644000176200001440000000333414160105076016333 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior.R \name{as.data.frame.brmsfit} \alias{as.data.frame.brmsfit} \alias{as.matrix.brmsfit} \alias{as.array.brmsfit} \title{Extract Posterior Draws} \usage{ \method{as.data.frame}{brmsfit}( x, row.names = NULL, optional = TRUE, pars = NA, variable = NULL, draw = NULL, subset = NULL, ... ) \method{as.matrix}{brmsfit}(x, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) \method{as.array}{brmsfit}(x, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) } \arguments{ \item{x}{A \code{brmsfit} object or another \R object for which the methods are defined.} \item{row.names, optional}{Unused and only added for consistency with the \code{\link[base:as.data.frame]{as.data.frame}} generic.} \item{pars}{Deprecated alias of \code{variable}. For reasons of backwards compatibility, \code{pars} is interpreted as a vector of regular expressions by default unless \code{fixed = TRUE} is specified.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} \item{draw}{The draw indices to be select. Subsetting draw indices will lead to an automatic merging of chains.} \item{subset}{Deprecated alias of \code{draw}.} \item{...}{Further arguments to be passed to the corresponding \code{\link[brms:draws-brms]{as_draws_*}} methods as well as to \code{\link[posterior:subset_draws]{subset_draws}}.} } \value{ A data.frame, matrix, or array containing the posterior draws. } \description{ Extract posterior draws in conventional formats as data.frames, matrices, or arrays. } \seealso{ \code{\link[brms:draws-brms]{as_draws}}, \code{\link[posterior:subset_draws]{subset_draws}} } brms/man/posterior_average.brmsfit.Rd0000644000176200001440000000626414213413565017460 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_weights.R \name{posterior_average.brmsfit} \alias{posterior_average.brmsfit} \alias{posterior_average} \title{Posterior draws of parameters averaged across models} \usage{ \method{posterior_average}{brmsfit}( x, ..., variable = NULL, pars = NULL, weights = "stacking", ndraws = NULL, nsamples = NULL, missing = NULL, model_names = NULL, control = list(), seed = NULL ) posterior_average(x, ...) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{variable}{Names of variables (parameters) for which to average across models. Only those variables can be averaged that appear in every model. Defaults to all overlapping variables.} \item{pars}{Deprecated alias of \code{variable}.} \item{weights}{Name of the criterion to compute weights from. Should be one of \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"stacking"} (current default), or \code{"bma"}, \code{"pseudobma"}, For the former three options, Akaike weights will be computed based on the information criterion values returned by the respective methods. For \code{"stacking"} and \code{"pseudobma"}, method \code{\link{loo_model_weights}} will be used to obtain weights. For \code{"bma"}, method \code{\link{post_prob}} will be used to compute Bayesian model averaging weights based on log marginal likelihood values (make sure to specify reasonable priors in this case). For some methods, \code{weights} may also be a numeric vector of pre-specified weights.} \item{ndraws}{Total number of posterior draws to use.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{missing}{An optional numeric value or a named list of numeric values to use if a model does not contain a variable for which posterior draws should be averaged. Defaults to \code{NULL}, in which case only those variables can be averaged that are present in all of the models.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} \item{control}{Optional \code{list} of further arguments passed to the function specified in \code{weights}.} \item{seed}{A single numeric value passed to \code{\link{set.seed}} to make results reproducible.} } \value{ A \code{data.frame} of posterior draws. } \description{ Extract posterior draws of parameters averaged across models. Weighting can be done in various ways, for instance using Akaike weights based on information criteria or marginal likelihoods. } \details{ Weights are computed with the \code{\link{model_weights}} method. } \examples{ \dontrun{ # model with 'treat' as predictor fit1 <- brm(rating ~ treat + period + carry, data = inhaler) summary(fit1) # model without 'treat' as predictor fit2 <- brm(rating ~ period + carry, data = inhaler) summary(fit2) # compute model-averaged posteriors of overlapping parameters posterior_average(fit1, fit2, weights = "waic") } } \seealso{ \code{\link{model_weights}}, \code{\link{pp_average}} } brms/man/logit_scaled.Rd0000644000176200001440000000070514160105076014712 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/numeric-helpers.R \name{logit_scaled} \alias{logit_scaled} \title{Scaled logit-link} \usage{ logit_scaled(x, lb = 0, ub = 1) } \arguments{ \item{x}{A numeric or complex vector.} \item{lb}{Lower bound defaulting to \code{0}.} \item{ub}{Upper bound defaulting to \code{1}.} } \value{ A numeric or complex vector. } \description{ Computes \code{logit((x - lb) / (ub - lb))} } brms/man/residuals.brmsfit.Rd0000644000176200001440000000726114417771074015741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictive_error.R \name{residuals.brmsfit} \alias{residuals.brmsfit} \title{Posterior Draws of Residuals/Predictive Errors} \usage{ \method{residuals}{brmsfit}( object, newdata = NULL, re_formula = NULL, method = "posterior_predict", type = c("ordinary", "pearson"), resp = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{method}{Method used to obtain predictions. Can be set to \code{"posterior_predict"} (the default), \code{"posterior_epred"}, or \code{"posterior_linpred"}. For more details, see the respective function documentations.} \item{type}{The type of the residuals, either \code{"ordinary"} or \code{"pearson"}. More information is provided under 'Details'.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}..} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of predictive error/residual draws. If \code{summary = FALSE} the output resembles those of \code{\link{predictive_error.brmsfit}}. If \code{summary = TRUE} the output is an N x E matrix, where N is the number of observations and E denotes the summary statistics computed from the draws. } \description{ This method is an alias of \code{\link{predictive_error.brmsfit}} with additional arguments for obtaining summaries of the computed draws. } \details{ Residuals of type \code{'ordinary'} are of the form \eqn{R = Y - Yrep}, where \eqn{Y} is the observed and \eqn{Yrep} is the predicted response. Residuals of type \code{pearson} are of the form \eqn{R = (Y - Yrep) / SD(Yrep)}, where \eqn{SD(Yrep)} is an estimate of the standard deviation of \eqn{Yrep}. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, cores = 2) ## extract residuals/predictive errors res <- residuals(fit) head(res) } } brms/man/InvGaussian.Rd0000644000176200001440000000206414275436221014516 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{InvGaussian} \alias{InvGaussian} \alias{dinv_gaussian} \alias{pinv_gaussian} \alias{rinv_gaussian} \title{The Inverse Gaussian Distribution} \usage{ dinv_gaussian(x, mu = 1, shape = 1, log = FALSE) pinv_gaussian(q, mu = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) rinv_gaussian(n, mu = 1, shape = 1) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of locations.} \item{shape}{Vector of shapes.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the inverse Gaussian distribution with location \code{mu}, and shape \code{shape}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/pp_average.brmsfit.Rd0000644000176200001440000000710314213413565016042 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_weights.R \name{pp_average.brmsfit} \alias{pp_average.brmsfit} \alias{pp_average} \title{Posterior predictive draws averaged across models} \usage{ \method{pp_average}{brmsfit}( x, ..., weights = "stacking", method = "posterior_predict", ndraws = NULL, nsamples = NULL, summary = TRUE, probs = c(0.025, 0.975), robust = FALSE, model_names = NULL, control = list(), seed = NULL ) pp_average(x, ...) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{weights}{Name of the criterion to compute weights from. Should be one of \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"stacking"} (current default), or \code{"bma"}, \code{"pseudobma"}, For the former three options, Akaike weights will be computed based on the information criterion values returned by the respective methods. For \code{"stacking"} and \code{"pseudobma"}, method \code{\link{loo_model_weights}} will be used to obtain weights. For \code{"bma"}, method \code{\link{post_prob}} will be used to compute Bayesian model averaging weights based on log marginal likelihood values (make sure to specify reasonable priors in this case). For some methods, \code{weights} may also be a numeric vector of pre-specified weights.} \item{method}{Method used to obtain predictions to average over. Should be one of \code{"posterior_predict"} (default), \code{"posterior_epred"}, \code{"posterior_linpred"} or \code{"predictive_error"}.} \item{ndraws}{Total number of posterior draws to use.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{summary}{Should summary statistics (i.e. means, sds, and 95\% intervals) be returned instead of the raw values? Default is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} \item{control}{Optional \code{list} of further arguments passed to the function specified in \code{weights}.} \item{seed}{A single numeric value passed to \code{\link{set.seed}} to make results reproducible.} } \value{ Same as the output of the method specified in argument \code{method}. } \description{ Compute posterior predictive draws averaged across models. Weighting can be done in various ways, for instance using Akaike weights based on information criteria or marginal likelihoods. } \details{ Weights are computed with the \code{\link{model_weights}} method. } \examples{ \dontrun{ # model with 'treat' as predictor fit1 <- brm(rating ~ treat + period + carry, data = inhaler) summary(fit1) # model without 'treat' as predictor fit2 <- brm(rating ~ period + carry, data = inhaler) summary(fit2) # compute model-averaged predicted values (df <- unique(inhaler[, c("treat", "period", "carry")])) pp_average(fit1, fit2, newdata = df) # compute model-averaged fitted values pp_average(fit1, fit2, method = "fitted", newdata = df) } } \seealso{ \code{\link{model_weights}}, \code{\link{posterior_average}} } brms/man/cor_bsts.Rd0000644000176200001440000000157614160105076014106 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_bsts} \alias{cor_bsts} \title{(Defunct) Basic Bayesian Structural Time Series} \usage{ cor_bsts(formula = ~1) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} } \description{ The BSTS correlation structure is no longer supported. } \keyword{internal} brms/man/VarCorr.brmsfit.Rd0000644000176200001440000000326614213413565015315 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{VarCorr.brmsfit} \alias{VarCorr.brmsfit} \alias{VarCorr} \title{Extract Variance and Correlation Components} \usage{ \method{VarCorr}{brmsfit}( x, sigma = 1, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{sigma}{Ignored (included for compatibility with \code{\link[nlme:VarCorr]{VarCorr}}).} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Currently ignored.} } \value{ A list of lists (one per grouping factor), each with three elements: a matrix containing the standard deviations, an array containing the correlation matrix, and an array containing the covariance matrix with variances on the diagonal. } \description{ This function calculates the estimated standard deviations, correlations and covariances of the group-level terms in a multilevel model of class \code{brmsfit}. For linear models, the residual standard deviations, correlations and covariances are also returned. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), data = epilepsy, family = gaussian(), chains = 2) VarCorr(fit) } } brms/man/arma.Rd0000644000176200001440000000352614361545260013213 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{arma} \alias{arma} \title{Set up ARMA(p,q) correlation structures} \usage{ arma(time = NA, gr = NA, p = 1, q = 1, cov = FALSE) } \arguments{ \item{time}{An optional time variable specifying the time ordering of the observations. By default, the existing order of the observations in the data is used.} \item{gr}{An optional grouping variable. If specified, the correlation structure is assumed to apply only to observations within the same grouping level.} \item{p}{A non-negative integer specifying the autoregressive (AR) order of the ARMA structure. Default is \code{1}.} \item{q}{A non-negative integer specifying the moving average (MA) order of the ARMA structure. Default is \code{1}.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default), a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{'arma_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up an autoregressive moving average (ARMA) term of order (p, q) in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with ARMA terms. } \examples{ \dontrun{ data("LakeHuron") LakeHuron <- as.data.frame(LakeHuron) fit <- brm(x ~ arma(p = 2, q = 1), data = LakeHuron) summary(fit) } } \seealso{ \code{\link{autocor-terms}}, \code{\link{ar}}, \code{\link{ma}}, } brms/man/mm.Rd0000644000176200001440000000553314213413565012702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-re.R \name{mm} \alias{mm} \title{Set up multi-membership grouping terms in \pkg{brms}} \usage{ mm( ..., weights = NULL, scale = TRUE, by = NULL, cor = TRUE, id = NA, cov = NULL, dist = "gaussian" ) } \arguments{ \item{...}{One or more terms containing grouping factors.} \item{weights}{A matrix specifying the weights of each member. It should have as many columns as grouping terms specified in \code{...}. If \code{NULL} (the default), equally weights are used.} \item{scale}{Logical; if \code{TRUE} (the default), weights are standardized in order to sum to one per row. If negative weights are specified, \code{scale} needs to be set to \code{FALSE}.} \item{by}{An optional factor matrix, specifying sub-populations of the groups. It should have as many columns as grouping terms specified in \code{...}. For each level of the \code{by} variable, a separate variance-covariance matrix will be fitted. Levels of the grouping factor must be nested in levels of the \code{by} variable matrix.} \item{cor}{Logical. If \code{TRUE} (the default), group-level terms will be modelled as correlated.} \item{id}{Optional character string. All group-level terms across the model with the same \code{id} will be modeled as correlated (if \code{cor} is \code{TRUE}). See \code{\link{brmsformula}} for more details.} \item{cov}{An optional matrix which is proportional to the withon-group covariance matrix of the group-level effects. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others, to model pedigrees and phylogenetic effects. See \code{vignette("brms_phylogenetics")} for more details. By default, levels of the same grouping factor are modeled as independent of each other.} \item{dist}{Name of the distribution of the group-level effects. Currently \code{"gaussian"} is the only option.} } \description{ Function to set up a multi-membership grouping term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with grouping terms. } \examples{ \dontrun{ # simulate some data dat <- data.frame( y = rnorm(100), x1 = rnorm(100), x2 = rnorm(100), g1 = sample(1:10, 100, TRUE), g2 = sample(1:10, 100, TRUE) ) # multi-membership model with two members per group and equal weights fit1 <- brm(y ~ x1 + (1|mm(g1, g2)), data = dat) summary(fit1) # weight the first member two times for than the second member dat$w1 <- rep(2, 100) dat$w2 <- rep(1, 100) fit2 <- brm(y ~ x1 + (1|mm(g1, g2, weights = cbind(w1, w2))), data = dat) summary(fit2) # multi-membership model with level specific covariate values dat$xc <- (dat$x1 + dat$x2) / 2 fit3 <- brm(y ~ xc + (1 + mmc(x1, x2) | mm(g1, g2)), data = dat) summary(fit3) } } \seealso{ \code{\link{brmsformula}}, \code{\link{mmc}} } brms/man/posterior_smooths.brmsfit.Rd0000644000176200001440000000400714213413565017533 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_smooths.R \name{posterior_smooths.brmsfit} \alias{posterior_smooths.brmsfit} \alias{posterior_smooths} \title{Posterior Predictions of Smooth Terms} \usage{ \method{posterior_smooths}{brmsfit}( object, smooth, newdata = NULL, resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, ... ) posterior_smooths(object, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{smooth}{Name of a single smooth term for which predictions should be computed.} \item{newdata}{An optional \code{data.frame} for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. Only those variables appearing in the chosen \code{smooth} term are required.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Optional name of a predicted distributional parameter. If specified, expected predictions of this parameters are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{...}{Currently ignored.} } \value{ An S x N matrix, where S is the number of posterior draws and N is the number of observations. } \description{ Compute posterior predictions of smooth \code{s} and \code{t2} terms of models fitted with \pkg{brms}. } \examples{ \dontrun{ set.seed(0) dat <- mgcv::gamSim(1, n = 200, scale = 2) fit <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) summary(fit) newdata <- data.frame(x2 = seq(0, 1, 10)) str(posterior_smooths(fit, smooth = "s(x2)", newdata = newdata)) } } brms/man/set_prior.Rd0000644000176200001440000004600114430733376014300 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{set_prior} \alias{set_prior} \alias{brmsprior} \alias{brmsprior-class} \alias{prior} \alias{prior_} \alias{prior_string} \alias{empty_prior} \title{Prior Definitions for \pkg{brms} Models} \usage{ set_prior( prior, class = "b", coef = "", group = "", resp = "", dpar = "", nlpar = "", lb = NA, ub = NA, check = TRUE ) prior(prior, ...) prior_(prior, ...) prior_string(prior, ...) empty_prior() } \arguments{ \item{prior}{A character string defining a distribution in \pkg{Stan} language} \item{class}{The parameter class. Defaults to \code{"b"} (i.e. population-level effects). See 'Details' for other valid parameter classes.} \item{coef}{Name of the coefficient within the parameter class.} \item{group}{Grouping factor for group-level parameters.} \item{resp}{Name of the response variable. Only used in multivariate models.} \item{dpar}{Name of a distributional parameter. Only used in distributional models.} \item{nlpar}{Name of a non-linear parameter. Only used in non-linear models.} \item{lb}{Lower bound for parameter restriction. Currently only allowed for classes \code{"b"}. Defaults to \code{NULL}, that is no restriction.} \item{ub}{Upper bound for parameter restriction. Currently only allowed for classes \code{"b"}. Defaults to \code{NULL}, that is no restriction.} \item{check}{Logical; Indicates whether priors should be checked for validity (as far as possible). Defaults to \code{TRUE}. If \code{FALSE}, \code{prior} is passed to the Stan code as is, and all other arguments are ignored.} \item{...}{Arguments passed to \code{set_prior}.} } \value{ An object of class \code{brmsprior} to be used in the \code{prior} argument of \code{\link{brm}}. } \description{ Define priors for specific parameters or classes of parameters. } \details{ \code{set_prior} is used to define prior distributions for parameters in \pkg{brms} models. The functions \code{prior}, \code{prior_}, and \code{prior_string} are aliases of \code{set_prior} each allowing for a different kind of argument specification. \code{prior} allows specifying arguments as expression without quotation marks using non-standard evaluation. \code{prior_} allows specifying arguments as one-sided formulas or wrapped in \code{quote}. \code{prior_string} allows specifying arguments as strings just as \code{set_prior} itself. Below, we explain its usage and list some common prior distributions for parameters. A complete overview on possible prior distributions is given in the Stan Reference Manual available at \url{https://mc-stan.org/}. To combine multiple priors, use \code{c(...)} or the \code{+} operator (see 'Examples'). \pkg{brms} does not check if the priors are written in correct \pkg{Stan} language. Instead, \pkg{Stan} will check their syntactical correctness when the model is parsed to \code{C++} and returns an error if they are not. This, however, does not imply that priors are always meaningful if they are accepted by \pkg{Stan}. Although \pkg{brms} trys to find common problems (e.g., setting bounded priors on unbounded parameters), there is no guarantee that the defined priors are reasonable for the model. Below, we list the types of parameters in \pkg{brms} models, for which the user can specify prior distributions. Below, we provide details for the individual parameter classes that you can set priors on. Often, it may not be immediately clear, which parameters are present in the model. To get a full list of parameters and parameter classes for which priors can be specified (depending on the model) use function \code{\link{get_prior}}. 1. Population-level ('fixed') effects Every Population-level effect has its own regression parameter represents the name of the corresponding population-level effect. Suppose, for instance, that \code{y} is predicted by \code{x1} and \code{x2} (i.e., \code{y ~ x1 + x2} in formula syntax). Then, \code{x1} and \code{x2} have regression parameters \code{b_x1} and \code{b_x2} respectively. The default prior for population-level effects (including monotonic and category specific effects) is an improper flat prior over the reals. Other common options are normal priors or student-t priors. If we want to have a normal prior with mean 0 and standard deviation 5 for \code{x1}, and a unit student-t prior with 10 degrees of freedom for \code{x2}, we can specify this via \code{set_prior("normal(0,5)", class = "b", coef = "x1")} and \cr \code{set_prior("student_t(10, 0, 1)", class = "b", coef = "x2")}. To put the same prior on all population-level effects at once, we may write as a shortcut \code{set_prior("", class = "b")}. This also leads to faster sampling, because priors can be vectorized in this case. Both ways of defining priors can be combined using for instance \code{set_prior("normal(0, 2)", class = "b")} and \cr \code{set_prior("normal(0, 10)", class = "b", coef = "x1")} at the same time. This will set a \code{normal(0, 10)} prior on the effect of \code{x1} and a \code{normal(0, 2)} prior on all other population-level effects. However, this will break vectorization and may slow down the sampling procedure a bit. In case of the default intercept parameterization (discussed in the 'Details' section of \code{\link{brmsformula}}), general priors on class \code{"b"} will \emph{not} affect the intercept. Instead, the intercept has its own parameter class named \code{"Intercept"} and priors can thus be specified via \code{set_prior("", class = "Intercept")}. Setting a prior on the intercept will not break vectorization of the other population-level effects. Note that technically, this prior is set on an intercept that results when internally centering all population-level predictors around zero to improve sampling efficiency. On this centered intercept, specifying a prior is actually much easier and intuitive than on the original intercept, since the former represents the expected response value when all predictors are at their means. To treat the intercept as an ordinary population-level effect and avoid the centering parameterization, use \code{0 + Intercept} on the right-hand side of the model formula. In non-linear models, population-level effects are defined separately for each non-linear parameter. Accordingly, it is necessary to specify the non-linear parameter in \code{set_prior} so that priors we can be assigned correctly. If, for instance, \code{alpha} is the parameter and \code{x} the predictor for which we want to define the prior, we can write \code{set_prior("", coef = "x", nlpar = "alpha")}. As a shortcut we can use \code{set_prior("", nlpar = "alpha")} to set the same prior on all population-level effects of \code{alpha} at once. The same goes for specifying priors for specific distributional parameters in the context of distributional regression, for example, \code{set_prior("", coef = "x", dpar = "sigma")}. For most other parameter classes (see below), you need to indicate non-linear and distributional parameters in the same way as shown here. If desired, population-level effects can be restricted to fall only within a certain interval using the \code{lb} and \code{ub} arguments of \code{set_prior}. This is often required when defining priors that are not defined everywhere on the real line, such as uniform or gamma priors. When defining a \code{uniform(2,4)} prior, you should write \code{set_prior("uniform(2,4)", lb = 2, ub = 4)}. When using a prior that is defined on the positive reals only (such as a gamma prior) set \code{lb = 0}. In most situations, it is not useful to restrict population-level parameters through bounded priors (non-linear models are an important exception), but if you really want to this is the way to go. 2. Group-level ('random') effects Each group-level effect of each grouping factor has a standard deviation named \code{sd__}. Consider, for instance, the formula \code{y ~ x1 + x2 + (1 + x1 | g)}. We see that the intercept as well as \code{x1} are group-level effects nested in the grouping factor \code{g}. The corresponding standard deviation parameters are named as \code{sd_g_Intercept} and \code{sd_g_x1} respectively. These parameters are restricted to be non-negative and, by default, have a half student-t prior with 3 degrees of freedom and a scale parameter that depends on the standard deviation of the response after applying the link function. Minimally, the scale parameter is 2.5. This prior is used (a) to be only weakly informative in order to influence results as few as possible, while (b) providing at least some regularization to considerably improve convergence and sampling efficiency. To define a prior distribution only for standard deviations of a specific grouping factor, use \cr \code{set_prior("", class = "sd", group = "")}. To define a prior distribution only for a specific standard deviation of a specific grouping factor, you may write \cr \code{set_prior("", class = "sd", group = "", coef = "")}. If there is more than one group-level effect per grouping factor, the correlations between those effects have to be estimated. The prior \code{lkj_corr_cholesky(eta)} or in short \code{lkj(eta)} with \code{eta > 0} is essentially the only prior for (Cholesky factors) of correlation matrices. If \code{eta = 1} (the default) all correlations matrices are equally likely a priori. If \code{eta > 1}, extreme correlations become less likely, whereas \code{0 < eta < 1} results in higher probabilities for extreme correlations. Correlation matrix parameters in \code{brms} models are named as \code{cor_}, (e.g., \code{cor_g} if \code{g} is the grouping factor). To set the same prior on every correlation matrix, use for instance \code{set_prior("lkj(2)", class = "cor")}. Internally, the priors are transformed to be put on the Cholesky factors of the correlation matrices to improve efficiency and numerical stability. The corresponding parameter class of the Cholesky factors is \code{L}, but it is not recommended to specify priors for this parameter class directly. 4. Smoothing Splines Smoothing splines are implemented in \pkg{brms} using the 'random effects' formulation as explained in \code{\link[mgcv:gamm]{gamm}}). Thus, each spline has its corresponding standard deviations modeling the variability within this term. In \pkg{brms}, this parameter class is called \code{sds} and priors can be specified via \code{set_prior("", class = "sds", coef = "")}. The default prior is the same as for standard deviations of group-level effects. 5. Gaussian processes Gaussian processes as currently implemented in \pkg{brms} have two parameters, the standard deviation parameter \code{sdgp}, and characteristic length-scale parameter \code{lscale} (see \code{\link{gp}} for more details). The default prior of \code{sdgp} is the same as for standard deviations of group-level effects. The default prior of \code{lscale} is an informative inverse-gamma prior specifically tuned to the covariates of the Gaussian process (for more details see \url{https://betanalpha.github.io/assets/case_studies/gp_part3/part3.html}). This tuned prior may be overly informative in some cases, so please consider other priors as well to make sure inference is robust to the prior specification. If tuning fails, a half-normal prior is used instead. 6. Autocorrelation parameters The autocorrelation parameters currently implemented are named \code{ar} (autoregression), \code{ma} (moving average), \code{sderr} (standard deviation of latent residuals in latent ARMA models), \code{cosy} (compound symmetry correlation), \code{car} (spatial conditional autoregression), as well as \code{lagsar} and \code{errorsar} (spatial simultaneous autoregression). Priors can be defined by \code{set_prior("", class = "ar")} for \code{ar} and similar for other autocorrelation parameters. By default, \code{ar} and \code{ma} are bounded between \code{-1} and \code{1}; \code{cosy}, \code{car}, \code{lagsar}, and \code{errorsar} are bounded between \code{0} and \code{1}. The default priors are flat over the respective definition areas. 7. Parameters of measurement error terms Latent variables induced via measurement error \code{\link{me}} terms require both mean and standard deviation parameters, whose prior classes are named \code{"meanme"} and \code{"sdme"}, respectively. If multiple latent variables are induced this way, their correlation matrix will be modeled as well and corresponding priors can be specified via the \code{"corme"} class. All of the above parameters have flat priors over their respective definition spaces by default. 8. Distance parameters of monotonic effects As explained in the details section of \code{\link{brm}}, monotonic effects make use of a special parameter vector to estimate the 'normalized distances' between consecutive predictor categories. This is realized in \pkg{Stan} using the \code{simplex} parameter type. This class is named \code{"simo"} (short for simplex monotonic) in \pkg{brms}. The only valid prior for simplex parameters is the dirichlet prior, which accepts a vector of length \code{K - 1} (K = number of predictor categories) as input defining the 'concentration' of the distribution. Explaining the dirichlet prior is beyond the scope of this documentation, but we want to describe how to define this prior syntactically correct. If a predictor \code{x} with \code{K} categories is modeled as monotonic, we can define a prior on its corresponding simplex via \cr \code{prior(dirichlet(), class = simo, coef = mox1)}. The \code{1} in the end of \code{coef} indicates that this is the first simplex in this term. If interactions between multiple monotonic variables are modeled, multiple simplexes per term are required. For \code{}, we can put in any \code{R} expression defining a vector of length \code{K - 1}. The default is a uniform prior (i.e. \code{ = rep(1, K-1)}) over all simplexes of the respective dimension. 9. Parameters for specific families Some families need additional parameters to be estimated. Families \code{gaussian}, \code{student}, \code{skew_normal}, \code{lognormal}, and \code{gen_extreme_value} need the parameter \code{sigma} to account for the residual standard deviation. By default, \code{sigma} has a half student-t prior that scales in the same way as the group-level standard deviations. Further, family \code{student} needs the parameter \code{nu} representing the degrees of freedom of students-t distribution. By default, \code{nu} has prior \code{gamma(2, 0.1)} and a fixed lower bound of \code{1}. Families \code{gamma}, \code{weibull}, \code{inverse.gaussian}, and \code{negbinomial} need a \code{shape} parameter that has a \code{gamma(0.01, 0.01)} prior by default. For families \code{cumulative}, \code{cratio}, \code{sratio}, and \code{acat}, and only if \code{threshold = "equidistant"}, the parameter \code{delta} is used to model the distance between two adjacent thresholds. By default, \code{delta} has an improper flat prior over the reals. The \code{von_mises} family needs the parameter \code{kappa}, representing the concentration parameter. By default, \code{kappa} has prior \code{gamma(2, 0.01)}. Every family specific parameter has its own prior class, so that \code{set_prior("", class = "")} is the right way to go. All of these priors are chosen to be weakly informative, having only minimal influence on the estimations, while improving convergence and sampling efficiency. 10. Shrinkage priors To reduce the danger of overfitting in models with many predictor terms fit on comparably sparse data, brms supports special shrinkage priors, namely the (regularized) \code{\link{horseshoe}} and the \code{\link{R2D2}} prior. These priors can be applied on many parameter classes, either directly on the coefficient classes (e.g., class \code{b}), if directly setting priors on them is supported, or on the corresponding standard deviation hyperparameters (e.g., class \code{sd}) otherwise. Currently, the following classes support shrinkage priors: \code{b} (overall regression coefficients), \code{sds} (SDs of smoothing splines), \code{sdgp} (SDs of Gaussian processes), \code{ar} (autoregressive coefficients), \code{ma} (moving average coefficients), \code{sderr} (SD of latent residuals), \code{sdcar} (SD of spatial CAR structures), \code{sd} (SD of varying coefficients). 11. Fixing parameters to constants Fixing parameters to constants is possible by using the \code{constant} function, for example, \code{constant(1)} to fix a parameter to 1. Broadcasting to vectors and matrices is done automatically. } \section{Functions}{ \itemize{ \item \code{prior()}: Alias of \code{set_prior} allowing to specify arguments as expressions without quotation marks. \item \code{prior_()}: Alias of \code{set_prior} allowing to specify arguments as as one-sided formulas or wrapped in \code{quote}. \item \code{prior_string()}: Alias of \code{set_prior} allowing to specify arguments as strings. \item \code{empty_prior()}: Create an empty \code{brmsprior} object. }} \examples{ ## use alias functions (prior1 <- prior(cauchy(0, 1), class = sd)) (prior2 <- prior_(~cauchy(0, 1), class = ~sd)) (prior3 <- prior_string("cauchy(0, 1)", class = "sd")) identical(prior1, prior2) identical(prior1, prior3) # check which parameters can have priors get_prior(rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative()) # define some priors bprior <- c(prior_string("normal(0,10)", class = "b"), prior(normal(1,2), class = b, coef = treat), prior_(~cauchy(0,2), class = ~sd, group = ~subject, coef = ~Intercept)) # verify that the priors indeed found their way into Stan's model code make_stancode(rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative(), prior = bprior) # use the horseshoe prior to model sparsity in regression coefficients make_stancode(count ~ zAge + zBase * Trt, data = epilepsy, family = poisson(), prior = set_prior("horseshoe(3)")) # fix certain priors to constants bprior <- prior(constant(1), class = "b") + prior(constant(2), class = "b", coef = "zBase") + prior(constant(0.5), class = "sd") make_stancode(count ~ zAge + zBase + (1 | patient), data = epilepsy, prior = bprior) # pass priors to Stan without checking prior <- prior_string("target += normal_lpdf(b[1] | 0, 1)", check = FALSE) make_stancode(count ~ Trt, data = epilepsy, prior = prior) # define priors in a vectorized manner # useful in particular for categorical or multivariate models set_prior("normal(0, 2)", dpar = c("muX", "muY", "muZ")) } \seealso{ \code{\link{get_prior}} } brms/man/is.brmsprior.Rd0000644000176200001440000000046714160105076014717 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{is.brmsprior} \alias{is.brmsprior} \title{Checks if argument is a \code{brmsprior} object} \usage{ is.brmsprior(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsprior} object } brms/man/diagnostic-quantities.Rd0000644000176200001440000000274614361545260016606 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnostics.R \name{diagnostic-quantities} \alias{diagnostic-quantities} \alias{log_posterior} \alias{nuts_params} \alias{rhat} \alias{neff_ratio} \alias{log_posterior.brmsfit} \alias{nuts_params.brmsfit} \alias{rhat.brmsfit} \alias{neff_ratio.brmsfit} \title{Extract Diagnostic Quantities of \pkg{brms} Models} \usage{ \method{log_posterior}{brmsfit}(object, ...) \method{nuts_params}{brmsfit}(object, pars = NULL, ...) \method{rhat}{brmsfit}(x, pars = NULL, ...) \method{neff_ratio}{brmsfit}(object, pars = NULL, ...) } \arguments{ \item{object, x}{A \code{brmsfit} object.} \item{...}{Arguments passed to individual methods.} \item{pars}{An optional character vector of parameter names. For \code{nuts_params} these will be NUTS sampler parameter names rather than model parameters. If pars is omitted all parameters are included.} } \value{ The exact form of the output depends on the method. } \description{ Extract quantities that can be used to diagnose sampling behavior of the algorithms applied by \pkg{Stan} at the back-end of \pkg{brms}. } \details{ For more details see \code{\link[bayesplot:bayesplot-extractors]{bayesplot-extractors}}. } \examples{ \dontrun{ fit <- brm(time ~ age * sex, data = kidney) lp <- log_posterior(fit) head(lp) np <- nuts_params(fit) str(np) # extract the number of divergence transitions sum(subset(np, Parameter == "divergent__")$Value) head(rhat(fit)) head(neff_ratio(fit)) } } brms/man/add_criterion.Rd0000644000176200001440000000447614213413565015104 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{add_criterion} \alias{add_criterion} \alias{add_criterion.brmsfit} \title{Add model fit criteria to model objects} \usage{ add_criterion(x, ...) \method{add_criterion}{brmsfit}( x, criterion, model_name = NULL, overwrite = FALSE, file = NULL, force_save = FALSE, ... ) } \arguments{ \item{x}{An \R object typically of class \code{brmsfit}.} \item{...}{Further arguments passed to the underlying functions computing the model fit criteria.} \item{criterion}{Names of model fit criteria to compute. Currently supported are \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"loo_subsample"}, \code{"bayes_R2"} (Bayesian R-squared), \code{"loo_R2"} (LOO-adjusted R-squared), and \code{"marglik"} (log marginal likelihood).} \item{model_name}{Optional name of the model. If \code{NULL} (the default) the name is taken from the call to \code{x}.} \item{overwrite}{Logical; Indicates if already stored fit indices should be overwritten. Defaults to \code{FALSE}.} \item{file}{Either \code{NULL} or a character string. In the latter case, the fitted model object including the newly added criterion values is saved via \code{\link{saveRDS}} in a file named after the string supplied in \code{file}. The \code{.rds} extension is added automatically. If \code{x} was already stored in a file before, the file name will be reused automatically (with a message) unless overwritten by \code{file}. In any case, \code{file} only applies if new criteria were actually added via \code{add_criterion} or if \code{force_save} was set to \code{TRUE}.} \item{force_save}{Logical; only relevant if \code{file} is specified and ignored otherwise. If \code{TRUE}, the fitted model object will be saved regardless of whether new criteria were added via \code{add_criterion}.} } \value{ An object of the same class as \code{x}, but with model fit criteria added for later usage. } \description{ Add model fit criteria to model objects } \details{ Functions \code{add_loo} and \code{add_waic} are aliases of \code{add_criterion} with fixed values for the \code{criterion} argument. } \examples{ \dontrun{ fit <- brm(count ~ Trt, data = epilepsy) # add both LOO and WAIC at once fit <- add_criterion(fit, c("loo", "waic")) print(fit$criteria$loo) print(fit$criteria$waic) } } brms/man/vcov.brmsfit.Rd0000644000176200001440000000207414213413565014710 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{vcov.brmsfit} \alias{vcov.brmsfit} \title{Covariance and Correlation Matrix of Population-Level Effects} \usage{ \method{vcov}{brmsfit}(object, correlation = FALSE, pars = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{correlation}{Logical; if \code{FALSE} (the default), compute the covariance matrix, if \code{TRUE}, compute the correlation matrix.} \item{pars}{Optional names of coefficients to extract. By default, all coefficients are extracted.} \item{...}{Currently ignored.} } \value{ covariance or correlation matrix of population-level parameters } \description{ Get a point estimate of the covariance or correlation matrix of population-level parameters } \details{ Estimates are obtained by calculating the maximum likelihood covariances (correlations) of the posterior draws. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), data = epilepsy, family = gaussian(), chains = 2) vcov(fit) } } brms/man/mvbrmsformula.Rd0000644000176200001440000000274014213413565015162 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{mvbrmsformula} \alias{mvbrmsformula} \alias{mvbf} \title{Set up a multivariate model formula for use in \pkg{brms}} \usage{ mvbrmsformula(..., flist = NULL, rescor = NULL) } \arguments{ \item{...}{Objects of class \code{formula} or \code{brmsformula}, each specifying a univariate model. See \code{\link{brmsformula}} for details on how to specify univariate models.} \item{flist}{Optional list of formulas, which are treated in the same way as formulas passed via the \code{...} argument.} \item{rescor}{Logical; Indicates if residual correlation between the response variables should be modeled. Currently, this is only possible in multivariate \code{gaussian} and \code{student} models. If \code{NULL} (the default), \code{rescor} is internally set to \code{TRUE} when possible.} } \value{ An object of class \code{mvbrmsformula}, which is essentially a \code{list} containing all model formulas as well as some additional information for multivariate models. } \description{ Set up a multivariate model formula for use in the \pkg{brms} package allowing to define (potentially non-linear) additive multilevel models for all parameters of the assumed response distributions. } \details{ See \code{vignette("brms_multivariate")} for a case study. } \examples{ bf1 <- bf(y1 ~ x + (1|g)) bf2 <- bf(y2 ~ s(z)) mvbf(bf1, bf2) } \seealso{ \code{\link{brmsformula}}, \code{\link{brmsformula-helpers}} } brms/man/make_standata.Rd0000644000176200001440000001272414275447604015076 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_standata.R \name{make_standata} \alias{make_standata} \title{Data for \pkg{brms} Models} \usage{ make_standata( formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sample_prior = "no", stanvars = NULL, threads = getOption("brms.threads", NULL), knots = NULL, drop_unused_levels = TRUE, ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{cov_ranef}{(Deprecated) A list of matrices that are proportional to the (within) covariance structure of the group-level effects. The names of the matrices should correspond to columns in \code{data} that are used as grouping factors. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others to model pedigrees and phylogenetic effects. It is now recommended to specify those matrices in the formula interface using the \code{\link{gr}} and related functions. See \code{vignette("brms_phylogenetics")} for more details.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{stanvars}{An optional \code{stanvars} object generated by function \code{\link{stanvar}} to define additional variables for use in \pkg{Stan}'s program blocks.} \item{threads}{Number of threads to use in within-chain parallelization. For more control over the threading process, \code{threads} may also be a \code{brmsthreads} object created by \code{\link{threading}}. Within-chain parallelization is experimental! We recommend its use only if you are experienced with Stan's \code{reduce_sum} function and have a slow running model that cannot be sped up by any other means. Can be set globally for the current \R session via the \code{"brms.threads"} option (see \code{\link{options}}).} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{drop_unused_levels}{Should unused factors levels in the data be dropped? Defaults to \code{TRUE}.} \item{...}{Other arguments for internal use.} } \value{ A named list of objects containing the required data to fit a \pkg{brms} model with \pkg{Stan}. } \description{ Generate data for \pkg{brms} models to be passed to \pkg{Stan} } \examples{ sdata1 <- make_standata(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "cumulative") str(sdata1) sdata2 <- make_standata(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = "poisson") str(sdata2) } \author{ Paul-Christian Buerkner \email{paul.buerkner@gmail.com} } brms/man/prior_summary.brmsfit.Rd0000644000176200001440000000210114213413565016632 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{prior_summary.brmsfit} \alias{prior_summary.brmsfit} \alias{prior_summary} \title{Extract Priors of a Bayesian Model Fitted with \pkg{brms}} \usage{ \method{prior_summary}{brmsfit}(object, all = TRUE, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{all}{Logical; Show all parameters in the model which may have priors (\code{TRUE}) or only those with proper priors (\code{FALSE})?} \item{...}{Further arguments passed to or from other methods.} } \value{ For \code{brmsfit} objects, an object of class \code{brmsprior}. } \description{ Extract Priors of a Bayesian Model Fitted with \pkg{brms} } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson(), prior = c(prior(student_t(5,0,10), class = b), prior(cauchy(0,2), class = sd))) prior_summary(fit) prior_summary(fit, all = FALSE) print(prior_summary(fit, all = FALSE), show_df = FALSE) } } brms/man/nsamples.brmsfit.Rd0000644000176200001440000000134514160105076015551 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{nsamples.brmsfit} \alias{nsamples.brmsfit} \alias{nsamples} \title{(Deprecated) Number of Posterior Samples} \usage{ \method{nsamples}{brmsfit}(object, subset = NULL, incl_warmup = FALSE, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{subset}{An optional integer vector defining a subset of samples to be considered.} \item{incl_warmup}{A flag indicating whether to also count warmup / burn-in samples.} \item{...}{Currently ignored.} } \description{ Extract the number of posterior samples (draws) stored in a fitted Bayesian model. Method \code{nsamples} is deprecated. Please use \code{ndraws} instead. } brms/man/fitted.brmsfit.Rd0000644000176200001440000001101714213413565015207 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_epred.R \name{fitted.brmsfit} \alias{fitted.brmsfit} \title{Expected Values of the Posterior Predictive Distribution} \usage{ \method{fitted}{brmsfit}( object, newdata = NULL, re_formula = NULL, scale = c("response", "linear"), resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{scale}{Either \code{"response"} or \code{"linear"}. If \code{"response"}, results are returned on the scale of the response variable. If \code{"linear"}, results are returned on the scale of the linear predictor term, that is without applying the inverse link function or other transformations.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Optional name of a predicted distributional parameter. If specified, expected predictions of this parameters are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}..} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of predicted \emph{mean} response values. If \code{summary = FALSE} the output resembles those of \code{\link{posterior_epred.brmsfit}}. If \code{summary = TRUE} the output depends on the family: For categorical and ordinal families, the output is an N x E x C array, where N is the number of observations, E is the number of summary statistics, and C is the number of categories. For all other families, the output is an N x E matrix. The number of summary statistics E is equal to \code{2 + length(probs)}: The \code{Estimate} column contains point estimates (either mean or median depending on argument \code{robust}), while the \code{Est.Error} column contains uncertainty estimates (either standard deviation or median absolute deviation depending on argument \code{robust}). The remaining columns starting with \code{Q} contain quantile estimates as specified via argument \code{probs}. In multivariate models, an additional dimension is added to the output which indexes along the different response variables. } \description{ This method is an alias of \code{\link{posterior_epred.brmsfit}} with additional arguments for obtaining summaries of the computed draws. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) ## compute expected predictions fitted_values <- fitted(fit) head(fitted_values) ## plot expected predictions against actual response dat <- as.data.frame(cbind(Y = standata(fit)$Y, fitted_values)) ggplot(dat) + geom_point(aes(x = Estimate, y = Y)) } } \seealso{ \code{\link{posterior_epred.brmsfit}} } brms/man/mo.Rd0000644000176200001440000000407214224021465012675 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-sp.R \name{mo} \alias{mo} \title{Monotonic Predictors in \pkg{brms} Models} \usage{ mo(x, id = NA) } \arguments{ \item{x}{An integer variable or an ordered factor to be modeled as monotonic.} \item{id}{Optional character string. All monotonic terms with the same \code{id} within one formula will be modeled as having the same simplex (shape) parameter vector. If all monotonic terms of the same predictor have the same \code{id}, the resulting predictions will be conditionally monotonic for all values of interacting covariates (Bürkner & Charpentier, 2020).} } \description{ Specify a monotonic predictor term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model. } \details{ See Bürkner and Charpentier (2020) for the underlying theory. For detailed documentation of the formula syntax used for monotonic terms, see \code{help(brmsformula)} as well as \code{vignette("brms_monotonic")}. } \examples{ \dontrun{ # generate some data income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") income <- factor(sample(income_options, 100, TRUE), levels = income_options, ordered = TRUE) mean_ls <- c(30, 60, 70, 75) ls <- mean_ls[income] + rnorm(100, sd = 7) dat <- data.frame(income, ls) # fit a simple monotonic model fit1 <- brm(ls ~ mo(income), data = dat) summary(fit1) plot(fit1, N = 6) plot(conditional_effects(fit1), points = TRUE) # model interaction with other variables dat$x <- sample(c("a", "b", "c"), 100, TRUE) fit2 <- brm(ls ~ mo(income)*x, data = dat) summary(fit2) plot(conditional_effects(fit2), points = TRUE) # ensure conditional monotonicity fit3 <- brm(ls ~ mo(income, id = "i")*x, data = dat) summary(fit3) plot(conditional_effects(fit3), points = TRUE) } } \references{ Bürkner P. C. & Charpentier E. (2020). Modeling Monotonic Effects of Ordinal Predictors in Regression Models. British Journal of Mathematical and Statistical Psychology. doi:10.1111/bmsp.12195 } \seealso{ \code{\link{brmsformula}} } brms/man/is.cor_brms.Rd0000644000176200001440000000100014160105076014466 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{is.cor_brms} \alias{is.cor_brms} \alias{is.cor_arma} \alias{is.cor_cosy} \alias{is.cor_sar} \alias{is.cor_car} \alias{is.cor_fixed} \title{Check if argument is a correlation structure} \usage{ is.cor_brms(x) is.cor_arma(x) is.cor_cosy(x) is.cor_sar(x) is.cor_car(x) is.cor_fixed(x) } \arguments{ \item{x}{An \R object.} } \description{ Check if argument is one of the correlation structures used in \pkg{brms}. } brms/man/cor_car.Rd0000644000176200001440000000454014213413565013676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_car} \alias{cor_car} \alias{cor_icar} \title{(Deprecated) Spatial conditional autoregressive (CAR) structures} \usage{ cor_car(W, formula = ~1, type = "escar") cor_icar(W, formula = ~1) } \arguments{ \item{W}{Adjacency matrix of locations. All non-zero entries are treated as if the two locations are adjacent. If \code{formula} contains a grouping factor, the row names of \code{W} have to match the levels of the grouping factor.} \item{formula}{An optional one-sided formula of the form \code{~ 1 | g}, where \code{g} is a grouping factor mapping observations to spatial locations. If not specified, each observation is treated as a separate location. It is recommended to always specify a grouping factor to allow for handling of new data in post-processing methods.} \item{type}{Type of the CAR structure. Currently implemented are \code{"escar"} (exact sparse CAR), \code{"esicar"} (exact sparse intrinsic CAR), \code{"icar"} (intrinsic CAR), and \code{"bym2"}. More information is provided in the 'Details' section.} } \description{ These function are deprecated. Please see \code{\link{car}} for the new syntax. These functions are constructors for the \code{cor_car} class implementing spatial conditional autoregressive structures. } \details{ The \code{escar} and \code{esicar} types are implemented based on the case study of Max Joseph (\url{https://github.com/mbjoseph/CARstan}). The \code{icar} and \code{bym2} type is implemented based on the case study of Mitzi Morris (\url{https://mc-stan.org/users/documentation/case-studies/icar_stan.html}). } \examples{ \dontrun{ # generate some spatial data east <- north <- 1:10 Grid <- expand.grid(east, north) K <- nrow(Grid) # set up distance and neighbourhood matrices distance <- as.matrix(dist(Grid)) W <- array(0, c(K, K)) W[distance == 1] <- 1 # generate the covariates and response data x1 <- rnorm(K) x2 <- rnorm(K) theta <- rnorm(K, sd = 0.05) phi <- rmulti_normal( 1, mu = rep(0, K), Sigma = 0.4 * exp(-0.1 * distance) ) eta <- x1 + x2 + phi prob <- exp(eta) / (1 + exp(eta)) size <- rep(50, K) y <- rbinom(n = K, size = size, prob = prob) dat <- data.frame(y, size, x1, x2) # fit a CAR model fit <- brm(y | trials(size) ~ x1 + x2, data = dat, family = binomial(), autocor = cor_car(W)) summary(fit) } } brms/man/posterior_summary.Rd0000644000176200001440000000362414213413565016073 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{posterior_summary} \alias{posterior_summary} \alias{posterior_summary.default} \alias{posterior_summary.brmsfit} \title{Summarize Posterior draws} \usage{ posterior_summary(x, ...) \method{posterior_summary}{default}(x, probs = c(0.025, 0.975), robust = FALSE, ...) \method{posterior_summary}{brmsfit}( x, pars = NA, variable = NULL, probs = c(0.025, 0.975), robust = FALSE, ... ) } \arguments{ \item{x}{An \R object.} \item{...}{More arguments passed to or from other methods.} \item{probs}{The percentiles to be computed by the \code{\link[stats:quantile]{quantile}} function.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead.} \item{pars}{Deprecated alias of \code{variable}. For reasons of backwards compatibility, \code{pars} is interpreted as a vector of regular expressions by default unless \code{fixed = TRUE} is specified.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} } \value{ A matrix where rows indicate variables and columns indicate the summary estimates. } \description{ Summarizes posterior draws based on point estimates (mean or median), estimation errors (SD or MAD) and quantiles. This function mainly exists to retain backwards compatibility. It will eventually be replaced by functions of the \pkg{posterior} package (see examples below). } \examples{ \dontrun{ fit <- brm(time ~ age * sex, data = kidney) posterior_summary(fit) # recommended workflow using posterior library(posterior) draws <- as_draws_array(fit) summarise_draws(draws, default_summary_measures()) } } \seealso{ \code{\link[posterior:summarize_draws]{summarize_draws}} } brms/man/cosy.Rd0000644000176200001440000000174514213413565013247 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{cosy} \alias{cosy} \title{Set up COSY correlation structures} \usage{ cosy(time = NA, gr = NA) } \arguments{ \item{time}{An optional time variable specifying the time ordering of the observations. By default, the existing order of the observations in the data is used.} \item{gr}{An optional grouping variable. If specified, the correlation structure is assumed to apply only to observations within the same grouping level.} } \value{ An object of class \code{'cosy_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up a compounds symmetry (COSY) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with COSY terms. } \examples{ \dontrun{ data("lh") lh <- as.data.frame(lh) fit <- brm(x ~ cosy(), data = lh) summary(fit) } } \seealso{ \code{\link{autocor-terms}} } brms/man/cor_brms.Rd0000644000176200001440000000211514213413565014070 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_brms} \alias{cor_brms} \alias{cor_brms-class} \title{(Deprecated) Correlation structure classes for the \pkg{brms} package} \description{ Classes of correlation structures available in the \pkg{brms} package. \code{cor_brms} is not a correlation structure itself, but the class common to all correlation structures implemented in \pkg{brms}. } \section{Available correlation structures}{ \describe{ \item{cor_arma}{autoregressive-moving average (ARMA) structure, with arbitrary orders for the autoregressive and moving average components} \item{cor_ar}{autoregressive (AR) structure of arbitrary order} \item{cor_ma}{moving average (MA) structure of arbitrary order} \item{cor_car}{Spatial conditional autoregressive (CAR) structure} \item{cor_sar}{Spatial simultaneous autoregressive (SAR) structure} \item{cor_fixed}{fixed user-defined covariance structure} } } \seealso{ \code{\link{cor_arma}, \link{cor_ar}, \link{cor_ma}, \link{cor_car}, \link{cor_sar}, \link{cor_fixed}} } brms/man/is.mvbrmsterms.Rd0000644000176200001440000000057014160105076015254 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsterms.R \name{is.mvbrmsterms} \alias{is.mvbrmsterms} \title{Checks if argument is a \code{mvbrmsterms} object} \usage{ is.mvbrmsterms(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{mvbrmsterms} object } \seealso{ \code{\link[brms:brmsterms]{brmsterms}} } brms/man/hypothesis.brmsfit.Rd0000644000176200001440000001522214213413565016131 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hypothesis.R \name{hypothesis.brmsfit} \alias{hypothesis.brmsfit} \alias{hypothesis} \alias{hypothesis.default} \title{Non-Linear Hypothesis Testing} \usage{ \method{hypothesis}{brmsfit}( x, hypothesis, class = "b", group = "", scope = c("standard", "ranef", "coef"), alpha = 0.05, robust = FALSE, seed = NULL, ... ) hypothesis(x, ...) \method{hypothesis}{default}(x, hypothesis, alpha = 0.05, robust = FALSE, ...) } \arguments{ \item{x}{An \code{R} object. If it is no \code{brmsfit} object, it must be coercible to a \code{data.frame}. In the latter case, the variables used in the \code{hypothesis} argument need to correspond to column names of \code{x}, while the rows are treated as representing posterior draws of the variables.} \item{hypothesis}{A character vector specifying one or more non-linear hypothesis concerning parameters of the model.} \item{class}{A string specifying the class of parameters being tested. Default is "b" for population-level effects. Other typical options are "sd" or "cor". If \code{class = NULL}, all parameters can be tested against each other, but have to be specified with their full name (see also \code{\link[brms:draws-index-brms]{variables}})} \item{group}{Name of a grouping factor to evaluate only group-level effects parameters related to this grouping factor.} \item{scope}{Indicates where to look for the variables specified in \code{hypothesis}. If \code{"standard"}, use the full parameter names (subject to the restriction given by \code{class} and \code{group}). If \code{"coef"} or \code{"ranef"}, compute the hypothesis for all levels of the grouping factor given in \code{"group"}, based on the output of \code{\link{coef.brmsfit}} and \code{\link{ranef.brmsfit}}, respectively.} \item{alpha}{The alpha-level of the tests (default is 0.05; see 'Details' for more information).} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead.} \item{seed}{A single numeric value passed to \code{\link{set.seed}} to make results reproducible.} \item{...}{Currently ignored.} } \value{ A \code{\link{brmshypothesis}} object. } \description{ Perform non-linear hypothesis testing for all model parameters. } \details{ Among others, \code{hypothesis} computes an evidence ratio (\code{Evid.Ratio}) for each hypothesis. For a one-sided hypothesis, this is just the posterior probability (\code{Post.Prob}) under the hypothesis against its alternative. That is, when the hypothesis is of the form \code{a > b}, the evidence ratio is the ratio of the posterior probability of \code{a > b} and the posterior probability of \code{a < b}. In this example, values greater than one indicate that the evidence in favor of \code{a > b} is larger than evidence in favor of \code{a < b}. For an two-sided (point) hypothesis, the evidence ratio is a Bayes factor between the hypothesis and its alternative computed via the Savage-Dickey density ratio method. That is the posterior density at the point of interest divided by the prior density at that point. Values greater than one indicate that evidence in favor of the point hypothesis has increased after seeing the data. In order to calculate this Bayes factor, all parameters related to the hypothesis must have proper priors and argument \code{sample_prior} of function \code{brm} must be set to \code{"yes"}. Otherwise \code{Evid.Ratio} (and \code{Post.Prob}) will be \code{NA}. Please note that, for technical reasons, we cannot sample from priors of certain parameters classes. Most notably, these include overall intercept parameters (prior class \code{"Intercept"}) as well as group-level coefficients. When interpreting Bayes factors, make sure that your priors are reasonable and carefully chosen, as the result will depend heavily on the priors. In particular, avoid using default priors. The \code{Evid.Ratio} may sometimes be \code{0} or \code{Inf} implying very small or large evidence, respectively, in favor of the tested hypothesis. For one-sided hypotheses pairs, this basically means that all posterior draws are on the same side of the value dividing the two hypotheses. In that sense, instead of \code{0} or \code{Inf,} you may rather read it as \code{Evid.Ratio} smaller \code{1 / S} or greater \code{S}, respectively, where \code{S} denotes the number of posterior draws used in the computations. The argument \code{alpha} specifies the size of the credible interval (i.e., Bayesian confidence interval). For instance, if we tested a two-sided hypothesis and set \code{alpha = 0.05} (5\%) an, the credible interval will contain \code{1 - alpha = 0.95} (95\%) of the posterior values. Hence, \code{alpha * 100}\% of the posterior values will lie outside of the credible interval. Although this allows testing of hypotheses in a similar manner as in the frequentist null-hypothesis testing framework, we strongly argue against using arbitrary cutoffs (e.g., \code{p < .05}) to determine the 'existence' of an effect. } \examples{ \dontrun{ ## define priors prior <- c(set_prior("normal(0,2)", class = "b"), set_prior("student_t(10,0,1)", class = "sigma"), set_prior("student_t(10,0,1)", class = "sd")) ## fit a linear mixed effects models fit <- brm(time ~ age + sex + disease + (1 + age|patient), data = kidney, family = lognormal(), prior = prior, sample_prior = "yes", control = list(adapt_delta = 0.95)) ## perform two-sided hypothesis testing (hyp1 <- hypothesis(fit, "sexfemale = age + diseasePKD")) plot(hyp1) hypothesis(fit, "exp(age) - 3 = 0", alpha = 0.01) ## perform one-sided hypothesis testing hypothesis(fit, "diseasePKD + diseaseGN - 3 < 0") hypothesis(fit, "age < Intercept", class = "sd", group = "patient") ## test the amount of random intercept variance on all variance h <- paste("sd_patient__Intercept^2 / (sd_patient__Intercept^2 +", "sd_patient__age^2 + sigma^2) = 0") (hyp2 <- hypothesis(fit, h, class = NULL)) plot(hyp2) ## test more than one hypothesis at once h <- c("diseaseGN = diseaseAN", "2 * diseaseGN - diseasePKD = 0") (hyp3 <- hypothesis(fit, h)) plot(hyp3, ignore_prior = TRUE) ## compute hypotheses for all levels of a grouping factor hypothesis(fit, "age = 0", scope = "coef", group = "patient") ## use the default method dat <- as.data.frame(fit) str(dat) hypothesis(dat, "b_age > 0") } } \seealso{ \code{\link{brmshypothesis}} } \author{ Paul-Christian Buerkner \email{paul.buerkner@gmail.com} } brms/man/family.brmsfit.Rd0000644000176200001440000000113214160105076015202 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{family.brmsfit} \alias{family.brmsfit} \title{Extract Model Family Objects} \usage{ \method{family}{brmsfit}(object, resp = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{...}{Currently unused.} } \value{ A \code{brmsfamily} object or a list of such objects for multivariate models. } \description{ Extract Model Family Objects } brms/man/expp1.Rd0000644000176200001440000000042614160105076013316 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/numeric-helpers.R \name{expp1} \alias{expp1} \title{Exponential function plus one.} \usage{ expp1(x) } \arguments{ \item{x}{A numeric or complex vector.} } \description{ Computes \code{exp(x) + 1}. } brms/man/kfold_predict.Rd0000644000176200001440000000330514213413565015075 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kfold.R \name{kfold_predict} \alias{kfold_predict} \title{Predictions from K-Fold Cross-Validation} \usage{ kfold_predict(x, method = c("predict", "fitted"), resp = NULL, ...) } \arguments{ \item{x}{Object of class \code{'kfold'} computed by \code{\link{kfold}}. For \code{kfold_predict} to work, the fitted model objects need to have been stored via argument \code{save_fits} of \code{\link{kfold}}.} \item{method}{The method used to make predictions. Either \code{"predict"} or \code{"fitted"}. See \code{\link{predict.brmsfit}} for details.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ A \code{list} with two slots named \code{'y'} and \code{'yrep'}. Slot \code{y} contains the vector of observed responses. Slot \code{yrep} contains the matrix of predicted responses, with rows being posterior draws and columns being observations. } \description{ Compute and evaluate predictions after performing K-fold cross-validation via \code{\link{kfold}}. } \examples{ \dontrun{ fit <- brm(count ~ zBase * Trt + (1|patient), data = epilepsy, family = poisson()) # perform k-fold cross validation (kf <- kfold(fit, save_fits = TRUE, chains = 1)) # define a loss function rmse <- function(y, yrep) { yrep_mean <- colMeans(yrep) sqrt(mean((yrep_mean - y)^2)) } # predict responses and evaluate the loss kfp <- kfold_predict(kf) rmse(y = kfp$y, yrep = kfp$yrep) } } \seealso{ \code{\link{kfold}} } brms/man/update.brmsfit_multiple.Rd0000644000176200001440000000214314213413565017125 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/update.R \name{update.brmsfit_multiple} \alias{update.brmsfit_multiple} \title{Update \pkg{brms} models based on multiple data sets} \usage{ \method{update}{brmsfit_multiple}(object, formula., newdata = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit_multiple}.} \item{formula.}{Changes to the formula; for details see \code{\link{update.formula}} and \code{\link{brmsformula}}.} \item{newdata}{List of \code{data.frames} to update the model with new data. Currently required even if the original data should be used.} \item{...}{Other arguments passed to \code{\link{update.brmsfit}} and \code{\link{brm_multiple}}.} } \description{ This method allows to update an existing \code{brmsfit_multiple} object. } \examples{ \dontrun{ library(mice) imp <- mice(nhanes2) # initially fit the model fit_imp1 <- brm_multiple(bmi ~ age + hyp + chl, data = imp, chains = 1) summary(fit_imp1) # update the model using fewer predictors fit_imp2 <- update(fit_imp1, formula. = . ~ hyp + chl, newdata = imp) summary(fit_imp2) } } brms/man/stancode.brmsfit.Rd0000644000176200001440000000223414224021465015525 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_stancode.R \name{stancode.brmsfit} \alias{stancode.brmsfit} \alias{stancode} \title{Extract Stan model code} \usage{ \method{stancode}{brmsfit}( object, version = TRUE, regenerate = NULL, threads = NULL, backend = NULL, ... ) stancode(object, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{version}{Logical; indicates if the first line containing the \pkg{brms} version number should be included. Defaults to \code{TRUE}.} \item{regenerate}{Logical; indicates if the Stan code should be regenerated with the current \pkg{brms} version. By default, \code{regenerate} will be \code{FALSE} unless required to be \code{TRUE} by other arguments.} \item{threads}{Controls whether the Stan code should be threaded. See \code{\link{threading}} for details.} \item{backend}{Controls the Stan backend. See \code{\link{brm}} for details.} \item{...}{Further arguments passed to \code{\link{make_stancode}} if the Stan code is regenerated.} } \value{ Stan model code for further processing. } \description{ Extract Stan code that was used to specify the model. } brms/man/Dirichlet.Rd0000644000176200001440000000146214275436221014177 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Dirichlet} \alias{Dirichlet} \alias{ddirichlet} \alias{rdirichlet} \title{The Dirichlet Distribution} \usage{ ddirichlet(x, alpha, log = FALSE) rdirichlet(n, alpha) } \arguments{ \item{x}{Matrix of quantiles. Each row corresponds to one probability vector.} \item{alpha}{Matrix of positive shape parameters. Each row corresponds to one probability vector.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density function and random number generation for the dirichlet distribution with shape parameter vector \code{alpha}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/print.brmsprior.Rd0000644000176200001440000000102614213413565015434 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{print.brmsprior} \alias{print.brmsprior} \title{Print method for \code{brmsprior} objects} \usage{ \method{print}{brmsprior}(x, show_df = NULL, ...) } \arguments{ \item{x}{An object of class \code{brmsprior}.} \item{show_df}{Logical; Print priors as a single \code{data.frame} (\code{TRUE}) or as a sequence of sampling statements (\code{FALSE})?} \item{...}{Currently ignored.} } \description{ Print method for \code{brmsprior} objects } brms/man/pp_check.brmsfit.Rd0000644000176200001440000000644714224021465015513 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pp_check.R \name{pp_check.brmsfit} \alias{pp_check.brmsfit} \alias{pp_check} \title{Posterior Predictive Checks for \code{brmsfit} Objects} \usage{ \method{pp_check}{brmsfit}( object, type, ndraws = NULL, prefix = c("ppc", "ppd"), group = NULL, x = NULL, newdata = NULL, resp = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{type}{Type of the ppc plot as given by a character string. See \code{\link[bayesplot:PPC-overview]{PPC}} for an overview of currently supported types. You may also use an invalid type (e.g. \code{type = "xyz"}) to get a list of supported types in the resulting error message.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} all draws are used. If not specified, the number of posterior draws is chosen automatically. Ignored if \code{draw_ids} is not \code{NULL}.} \item{prefix}{The prefix of the \pkg{bayesplot} function to be applied. Either `"ppc"` (posterior predictive check; the default) or `"ppd"` (posterior predictive distribution), the latter being the same as the former except that the observed data is not shown for `"ppd"`.} \item{group}{Optional name of a factor variable in the model by which to stratify the ppc plot. This argument is required for ppc \code{*_grouped} types and ignored otherwise.} \item{x}{Optional name of a variable in the model. Only used for ppc types having an \code{x} argument and ignored otherwise.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{subset}{Deprecated alias of \code{draw_ids}.} \item{...}{Further arguments passed to \code{\link{predict.brmsfit}} as well as to the PPC function specified in \code{type}.} } \value{ A ggplot object that can be further customized using the \pkg{ggplot2} package. } \description{ Perform posterior predictive checks with the help of the \pkg{bayesplot} package. } \details{ For a detailed explanation of each of the ppc functions, see the \code{\link[bayesplot:PPC-overview]{PPC}} documentation of the \pkg{\link[bayesplot:bayesplot-package]{bayesplot}} package. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson()) pp_check(fit) # shows dens_overlay plot by default pp_check(fit, type = "error_hist", ndraws = 11) pp_check(fit, type = "scatter_avg", ndraws = 100) pp_check(fit, type = "stat_2d") pp_check(fit, type = "rootogram") pp_check(fit, type = "loo_pit") ## get an overview of all valid types pp_check(fit, type = "xyz") ## get a plot without the observed data pp_check(fit, prefix = "ppd") } } brms/man/conditional_smooths.brmsfit.Rd0000644000176200001440000000775214213413565020022 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conditional_smooths.R \name{conditional_smooths.brmsfit} \alias{conditional_smooths.brmsfit} \alias{marginal_smooths} \alias{marginal_smooths.brmsfit} \alias{conditional_smooths} \title{Display Smooth Terms} \usage{ \method{conditional_smooths}{brmsfit}( x, smooths = NULL, int_conditions = NULL, prob = 0.95, spaghetti = FALSE, resolution = 100, too_far = 0, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, probs = NULL, ... ) conditional_smooths(x, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{smooths}{Optional character vector of smooth terms to display. If \code{NULL} (the default) all smooth terms are shown.} \item{int_conditions}{An optional named \code{list} whose elements are vectors of values of the variables specified in \code{effects}. At these values, predictions are evaluated. The names of \code{int_conditions} have to match the variable names exactly. Additionally, the elements of the vectors may be named themselves, in which case their names appear as labels for the conditions in the plots. Instead of vectors, functions returning vectors may be passed and are applied on the original values of the corresponding variable. If \code{NULL} (the default), predictions are evaluated at the \eqn{mean} and at \eqn{mean +/- sd} for numeric predictors and at all categories for factor-like predictors.} \item{prob}{A value between 0 and 1 indicating the desired probability to be covered by the uncertainty intervals. The default is 0.95.} \item{spaghetti}{Logical. Indicates if predictions should be visualized via spaghetti plots. Only applied for numeric predictors. If \code{TRUE}, it is recommended to set argument \code{ndraws} to a relatively small value (e.g., \code{100}) in order to reduce computation time.} \item{resolution}{Number of support points used to generate the plots. Higher resolution leads to smoother plots. Defaults to \code{100}. If \code{surface} is \code{TRUE}, this implies \code{10000} support points for interaction terms, so it might be necessary to reduce \code{resolution} when only few RAM is available.} \item{too_far}{Positive number. For surface plots only: Grid points that are too far away from the actual data points can be excluded from the plot. \code{too_far} determines what is too far. The grid is scaled into the unit square and then grid points more than \code{too_far} from the predictor variables are excluded. By default, all grid points are used. Ignored for non-surface plots.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{subset}{Deprecated alias of \code{draw_ids}.} \item{probs}{(Deprecated) The quantiles to be used in the computation of uncertainty intervals. Please use argument \code{prob} instead.} \item{...}{Currently ignored.} } \value{ For the \code{brmsfit} method, an object of class \code{brms_conditional_effects}. See \code{\link{conditional_effects}} for more details and documentation of the related plotting function. } \description{ Display smooth \code{s} and \code{t2} terms of models fitted with \pkg{brms}. } \details{ Two-dimensional smooth terms will be visualized using either contour or raster plots. } \examples{ \dontrun{ set.seed(0) dat <- mgcv::gamSim(1, n = 200, scale = 2) fit <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) # show all smooth terms plot(conditional_smooths(fit), rug = TRUE, ask = FALSE) # show only the smooth term s(x2) plot(conditional_smooths(fit, smooths = "s(x2)"), ask = FALSE) # fit and plot a two-dimensional smooth term fit2 <- brm(y ~ t2(x0, x2), data = dat) ms <- conditional_smooths(fit2) plot(ms, stype = "contour") plot(ms, stype = "raster") } } brms/man/loo_predict.brmsfit.Rd0000644000176200001440000000616214213413565016240 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo_predict.R \name{loo_predict.brmsfit} \alias{loo_predict.brmsfit} \alias{loo_predict} \alias{loo_linpred} \alias{loo_predictive_interval} \alias{loo_linpred.brmsfit} \alias{loo_predictive_interval.brmsfit} \title{Compute Weighted Expectations Using LOO} \usage{ \method{loo_predict}{brmsfit}( object, type = c("mean", "var", "quantile"), probs = 0.5, psis_object = NULL, resp = NULL, ... ) \method{loo_linpred}{brmsfit}( object, type = c("mean", "var", "quantile"), probs = 0.5, psis_object = NULL, resp = NULL, ... ) \method{loo_predictive_interval}{brmsfit}(object, prob = 0.9, psis_object = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{type}{The statistic to be computed on the results. Can by either \code{"mean"} (default), \code{"var"}, or \code{"quantile"}.} \item{probs}{A vector of quantiles to compute. Only used if \code{type = quantile}.} \item{psis_object}{An optional object returned by \code{\link[loo]{psis}}. If \code{psis_object} is missing then \code{\link[loo]{psis}} is executed internally, which may be time consuming for models fit to very large datasets.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{...}{Optional arguments passed to the underlying methods that is \code{\link[brms:log_lik.brmsfit]{log_lik}}, as well as \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}} or \code{\link[brms:posterior_linpred.brmsfit]{posterior_linpred}}.} \item{prob}{For \code{loo_predictive_interval}, a scalar in \eqn{(0,1)} indicating the desired probability mass to include in the intervals. The default is \code{prob = 0.9} (\eqn{90}\% intervals).} } \value{ \code{loo_predict} and \code{loo_linpred} return a vector with one element per observation. The only exception is if \code{type = "quantile"} and \code{length(probs) >= 2}, in which case a separate vector for each element of \code{probs} is computed and they are returned in a matrix with \code{length(probs)} rows and one column per observation. \code{loo_predictive_interval} returns a matrix with one row per observation and two columns. \code{loo_predictive_interval(..., prob = p)} is equivalent to \code{loo_predict(..., type = "quantile", probs = c(a, 1-a))} with \code{a = (1 - p)/2}, except it transposes the result and adds informative column names. } \description{ These functions are wrappers around the \code{\link[loo]{E_loo}} function of the \pkg{loo} package. } \examples{ \dontrun{ ## data from help("lm") ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14) trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69) d <- data.frame( weight = c(ctl, trt), group = gl(2, 10, 20, labels = c("Ctl", "Trt")) ) fit <- brm(weight ~ group, data = d) loo_predictive_interval(fit, prob = 0.8) ## optionally log-weights can be pre-computed and reused psis <- loo::psis(-log_lik(fit), cores = 2) loo_predictive_interval(fit, prob = 0.8, psis_object = psis) loo_predict(fit, type = "var", psis_object = psis) } } brms/man/fcor.Rd0000644000176200001440000000216414213413565013217 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{fcor} \alias{fcor} \title{Fixed residual correlation (FCOR) structures} \usage{ fcor(M) } \arguments{ \item{M}{Known correlation/covariance matrix of the response variable. If a vector is passed, it will be used as diagonal entries (variances) and correlations/covariances will be set to zero. The actual covariance matrix used in the likelihood is obtained by multiplying \code{M} by the square of the residual standard deviation parameter \code{sigma} estimated as part of the model.} } \value{ An object of class \code{'fcor_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up a fixed residual correlation (FCOR) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with FCOR terms. } \examples{ \dontrun{ dat <- data.frame(y = rnorm(3)) V <- cbind(c(0.5, 0.3, 0.2), c(0.3, 1, 0.1), c(0.2, 0.1, 0.2)) fit <- brm(y ~ 1 + fcor(V), data = dat, data2 = list(V = V)) } } \seealso{ \code{\link{autocor-terms}} } brms/man/Shifted_Lognormal.Rd0000644000176200001440000000262614275436221015673 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Shifted_Lognormal} \alias{Shifted_Lognormal} \alias{dshifted_lnorm} \alias{pshifted_lnorm} \alias{qshifted_lnorm} \alias{rshifted_lnorm} \title{The Shifted Log Normal Distribution} \usage{ dshifted_lnorm(x, meanlog = 0, sdlog = 1, shift = 0, log = FALSE) pshifted_lnorm( q, meanlog = 0, sdlog = 1, shift = 0, lower.tail = TRUE, log.p = FALSE ) qshifted_lnorm( p, meanlog = 0, sdlog = 1, shift = 0, lower.tail = TRUE, log.p = FALSE ) rshifted_lnorm(n, meanlog = 0, sdlog = 1, shift = 0) } \arguments{ \item{x, q}{Vector of quantiles.} \item{meanlog}{Vector of means.} \item{sdlog}{Vector of standard deviations.} \item{shift}{Vector of shifts.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, quantile function and random generation for the shifted log normal distribution with mean \code{meanlog}, standard deviation \code{sdlog}, and shift parameter \code{shift}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/parnames.Rd0000644000176200001440000000070414160105076014066 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_samples.R \name{parnames} \alias{parnames} \alias{parnames.brmsfit} \title{Extract Parameter Names} \usage{ parnames(x, ...) } \arguments{ \item{x}{An \R object} \item{...}{Further arguments passed to or from other methods.} } \value{ A character vector containing the parameter names of the model. } \description{ Extract all parameter names of a given model. } brms/man/ar.Rd0000644000176200001440000000325614361545260012675 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{ar} \alias{ar} \title{Set up AR(p) correlation structures} \usage{ ar(time = NA, gr = NA, p = 1, cov = FALSE) } \arguments{ \item{time}{An optional time variable specifying the time ordering of the observations. By default, the existing order of the observations in the data is used.} \item{gr}{An optional grouping variable. If specified, the correlation structure is assumed to apply only to observations within the same grouping level.} \item{p}{A non-negative integer specifying the autoregressive (AR) order of the ARMA structure. Default is \code{1}.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default), a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{'arma_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up an autoregressive (AR) term of order p in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with AR terms. } \examples{ \dontrun{ data("LakeHuron") LakeHuron <- as.data.frame(LakeHuron) fit <- brm(x ~ ar(p = 2), data = LakeHuron) summary(fit) } } \seealso{ \code{\link{autocor-terms}}, \code{\link{arma}}, \code{\link{ma}} } brms/man/ngrps.brmsfit.Rd0000644000176200001440000000075414160105076015063 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{ngrps.brmsfit} \alias{ngrps.brmsfit} \alias{ngrps} \title{Number of Grouping Factor Levels} \usage{ \method{ngrps}{brmsfit}(object, ...) ngrps(object, ...) } \arguments{ \item{object}{An \R object.} \item{...}{Currently ignored.} } \value{ A named list containing the number of levels per grouping factor. } \description{ Extract the number of levels of one or more grouping factors. } brms/man/is.brmsfit_multiple.Rd0000644000176200001440000000054114160105076016252 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-class.R \name{is.brmsfit_multiple} \alias{is.brmsfit_multiple} \title{Checks if argument is a \code{brmsfit_multiple} object} \usage{ is.brmsfit_multiple(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsfit_multiple} object } brms/man/prior_draws.brmsfit.Rd0000644000176200001440000000354514213413565016272 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prior_draws.R \name{prior_draws.brmsfit} \alias{prior_draws.brmsfit} \alias{prior_samples} \alias{prior_draws} \title{Extract Prior Draws} \usage{ \method{prior_draws}{brmsfit}(x, variable = NULL, pars = NULL, ...) prior_draws(x, ...) prior_samples(x, ...) } \arguments{ \item{x}{An \code{R} object typically of class \code{brmsfit}.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} \item{pars}{Deprecated alias of \code{variable}. For reasons of backwards compatibility, \code{pars} is interpreted as a vector of regular expressions by default unless \code{fixed = TRUE} is specified.} \item{...}{Arguments passed to individual methods (if applicable).} } \value{ A \code{data.frame} containing the prior draws. } \description{ Extract prior draws of specified parameters } \details{ To make use of this function, the model must contain draws of prior distributions. This can be ensured by setting \code{sample_prior = TRUE} in function \code{brm}. Priors of certain parameters cannot be saved for technical reasons. For instance, this is the case for the population-level intercept, which is only computed after fitting the model by default. If you want to treat the intercept as part of all the other regression coefficients, so that sampling from its prior becomes possible, use \code{... ~ 0 + Intercept + ...} in the formulas. } \examples{ \dontrun{ fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "cumulative", prior = set_prior("normal(0,2)", class = "b"), sample_prior = TRUE) # extract all prior draws draws1 <- prior_draws(fit) head(draws1) # extract prior draws for the coefficient of 'treat' draws2 <- prior_draws(fit, "b_treat") head(draws2) } } brms/man/autocor-terms.Rd0000644000176200001440000000260114361545260015070 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{autocor-terms} \alias{autocor-terms} \title{Autocorrelation structures} \description{ Specify autocorrelation terms in \pkg{brms} models. Currently supported terms are \code{\link{arma}}, \code{\link{ar}}, \code{\link{ma}}, \code{\link{cosy}}, \code{\link{unstr}}, \code{\link{sar}}, \code{\link{car}}, and \code{\link{fcor}}. Terms can be directly specified within the formula, or passed to the \code{autocor} argument of \code{\link{brmsformula}} in the form of a one-sided formula. For deprecated ways of specifying autocorrelation terms, see \code{\link{cor_brms}}. } \details{ The autocor term functions are almost solely useful when called in formulas passed to the \pkg{brms} package. They do not evaluate its arguments -- but exist purely to help set up a model with autocorrelation terms. } \examples{ # specify autocor terms within the formula y ~ x + arma(p = 1, q = 1) + car(M) # specify autocor terms in the 'autocor' argument bf(y ~ x, autocor = ~ arma(p = 1, q = 1) + car(M)) # specify autocor terms via 'acformula' bf(y ~ x) + acformula(~ arma(p = 1, q = 1) + car(M)) } \seealso{ \code{\link{brmsformula}}, \code{\link{acformula}}, \code{\link{arma}}, \code{\link{ar}}, \code{\link{ma}}, \code{\link{cosy}}, \code{\link{unstr}}, \code{\link{sar}}, \code{\link{car}}, \code{\link{fcor}} } brms/man/ZeroInflated.Rd0000644000176200001440000000410014275473342014653 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{ZeroInflated} \alias{ZeroInflated} \alias{dzero_inflated_poisson} \alias{pzero_inflated_poisson} \alias{dzero_inflated_negbinomial} \alias{pzero_inflated_negbinomial} \alias{dzero_inflated_binomial} \alias{pzero_inflated_binomial} \alias{dzero_inflated_beta_binomial} \alias{pzero_inflated_beta_binomial} \alias{dzero_inflated_beta} \alias{pzero_inflated_beta} \title{Zero-Inflated Distributions} \usage{ dzero_inflated_poisson(x, lambda, zi, log = FALSE) pzero_inflated_poisson(q, lambda, zi, lower.tail = TRUE, log.p = FALSE) dzero_inflated_negbinomial(x, mu, shape, zi, log = FALSE) pzero_inflated_negbinomial(q, mu, shape, zi, lower.tail = TRUE, log.p = FALSE) dzero_inflated_binomial(x, size, prob, zi, log = FALSE) pzero_inflated_binomial(q, size, prob, zi, lower.tail = TRUE, log.p = FALSE) dzero_inflated_beta_binomial(x, size, mu, phi, zi, log = FALSE) pzero_inflated_beta_binomial( q, size, mu, phi, zi, lower.tail = TRUE, log.p = FALSE ) dzero_inflated_beta(x, shape1, shape2, zi, log = FALSE) pzero_inflated_beta(q, shape1, shape2, zi, lower.tail = TRUE, log.p = FALSE) } \arguments{ \item{x}{Vector of quantiles.} \item{zi}{zero-inflation probability} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{q}{Vector of quantiles.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{mu, lambda}{location parameter} \item{shape, shape1, shape2}{shape parameter} \item{size}{number of trials} \item{prob}{probability of success on each trial} \item{phi}{precision parameter} } \description{ Density and distribution functions for zero-inflated distributions. } \details{ The density of a zero-inflated distribution can be specified as follows. If \eqn{x = 0} set \eqn{f(x) = \theta + (1 - \theta) * g(0)}. Else set \eqn{f(x) = (1 - \theta) * g(x)}, where \eqn{g(x)} is the density of the non-zero-inflated part. } brms/man/predict.brmsfit.Rd0000644000176200001440000001130014213413565015355 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_predict.R \name{predict.brmsfit} \alias{predict.brmsfit} \title{Draws from the Posterior Predictive Distribution} \usage{ \method{predict}{brmsfit}( object, newdata = NULL, re_formula = NULL, transform = NULL, resp = NULL, negative_rt = FALSE, ndraws = NULL, draw_ids = NULL, sort = FALSE, ntrys = 5, cores = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{transform}{(Deprecated) A function or a character string naming a function to be applied on the predicted responses before summary statistics are computed.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{negative_rt}{Only relevant for Wiener diffusion models. A flag indicating whether response times of responses on the lower boundary should be returned as negative values. This allows to distinguish responses on the upper and lower boundary. Defaults to \code{FALSE}.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{ntrys}{Parameter used in rejection sampling for truncated discrete models only (defaults to \code{5}). See Details for more information.} \item{cores}{Number of cores (defaults to \code{1}). On non-Windows systems, this argument can be set globally via the \code{mc.cores} option.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of predicted response values. If \code{summary = FALSE} the output resembles those of \code{\link{posterior_predict.brmsfit}}. If \code{summary = TRUE} the output depends on the family: For categorical and ordinal families, the output is an N x C matrix, where N is the number of observations, C is the number of categories, and the values are predicted category probabilities. For all other families, the output is a N x E matrix where E = \code{2 + length(probs)} is the number of summary statistics: The \code{Estimate} column contains point estimates (either mean or median depending on argument \code{robust}), while the \code{Est.Error} column contains uncertainty estimates (either standard deviation or median absolute deviation depending on argument \code{robust}). The remaining columns starting with \code{Q} contain quantile estimates as specified via argument \code{probs}. } \description{ This method is an alias of \code{\link{posterior_predict.brmsfit}} with additional arguments for obtaining summaries of the computed draws. } \examples{ \dontrun{ ## fit a model fit <- brm(time | cens(censored) ~ age + sex + (1 + age || patient), data = kidney, family = "exponential", init = "0") ## predicted responses pp <- predict(fit) head(pp) ## predicted responses excluding the group-level effect of age pp <- predict(fit, re_formula = ~ (1 | patient)) head(pp) ## predicted responses of patient 1 for new data newdata <- data.frame( sex = factor(c("male", "female")), age = c(20, 50), patient = c(1, 1) ) predict(fit, newdata = newdata) } } \seealso{ \code{\link{posterior_predict.brmsfit}} } brms/man/bridge_sampler.brmsfit.Rd0000644000176200001440000000500614361545260016712 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bridgesampling.R \name{bridge_sampler.brmsfit} \alias{bridge_sampler.brmsfit} \alias{bridge_sampler} \title{Log Marginal Likelihood via Bridge Sampling} \usage{ \method{bridge_sampler}{brmsfit}(samples, recompile = FALSE, ...) } \arguments{ \item{samples}{A \code{brmsfit} object.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled. This may be necessary if you are running bridge sampling on another machine than the one used to fit the model. No recompilation is done by default.} \item{...}{Additional arguments passed to \code{\link[bridgesampling:bridge_sampler]{bridge_sampler.stanfit}}.} } \description{ Computes log marginal likelihood via bridge sampling, which can be used in the computation of bayes factors and posterior model probabilities. The \code{brmsfit} method is just a thin wrapper around the corresponding method for \code{stanfit} objects. } \details{ Computing the marginal likelihood requires samples of all variables defined in Stan's \code{parameters} block to be saved. Otherwise \code{bridge_sampler} cannot be computed. Thus, please set \code{save_pars = save_pars(all = TRUE)} in the call to \code{brm}, if you are planning to apply \code{bridge_sampler} to your models. The computation of marginal likelihoods based on bridge sampling requires a lot more posterior draws than usual. A good conservative rule of thump is perhaps 10-fold more draws (read: the default of 4000 draws may not be enough in many cases). If not enough posterior draws are provided, the bridge sampling algorithm tends to be unstable leading to considerably different results each time it is run. We thus recommend running \code{bridge_sampler} multiple times to check the stability of the results. More details are provided under \code{\link[bridgesampling:bridge_sampler]{bridgesampling::bridge_sampler}}. } \examples{ \dontrun{ # model with the treatment effect fit1 <- brm( count ~ zAge + zBase + Trt, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_pars = save_pars(all = TRUE) ) summary(fit1) bridge_sampler(fit1) # model without the treatment effect fit2 <- brm( count ~ zAge + zBase, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_pars = save_pars(all = TRUE) ) summary(fit2) bridge_sampler(fit2) } } \seealso{ \code{ \link[brms:bayes_factor.brmsfit]{bayes_factor}, \link[brms:post_prob.brmsfit]{post_prob} } } brms/man/loo_subsample.brmsfit.Rd0000644000176200001440000000312214213413565016572 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo_subsample.R \name{loo_subsample.brmsfit} \alias{loo_subsample.brmsfit} \alias{loo_subsample} \title{Efficient approximate leave-one-out cross-validation (LOO) using subsampling} \usage{ \method{loo_subsample}{brmsfit}(x, ..., compare = TRUE, resp = NULL, model_names = NULL) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{compare}{A flag indicating if the information criteria of the models should be compared to each other via \code{\link{loo_compare}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \description{ Efficient approximate leave-one-out cross-validation (LOO) using subsampling } \details{ More details can be found on \code{\link[loo:loo_subsample]{loo_subsample}}. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) (loo1 <- loo_subsample(fit1)) # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) (loo2 <- loo_subsample(fit2)) # compare both models loo_compare(loo1, loo2) } } brms/man/posterior_predict.brmsfit.Rd0000644000176200001440000001243114277205214017471 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_predict.R \name{posterior_predict.brmsfit} \alias{posterior_predict.brmsfit} \alias{posterior_predict} \title{Draws from the Posterior Predictive Distribution} \usage{ \method{posterior_predict}{brmsfit}( object, newdata = NULL, re_formula = NULL, re.form = NULL, transform = NULL, resp = NULL, negative_rt = FALSE, ndraws = NULL, draw_ids = NULL, sort = FALSE, ntrys = 5, cores = NULL, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{re.form}{Alias of \code{re_formula}.} \item{transform}{(Deprecated) A function or a character string naming a function to be applied on the predicted responses before summary statistics are computed.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{negative_rt}{Only relevant for Wiener diffusion models. A flag indicating whether response times of responses on the lower boundary should be returned as negative values. This allows to distinguish responses on the upper and lower boundary. Defaults to \code{FALSE}.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{ntrys}{Parameter used in rejection sampling for truncated discrete models only (defaults to \code{5}). See Details for more information.} \item{cores}{Number of cores (defaults to \code{1}). On non-Windows systems, this argument can be set globally via the \code{mc.cores} option.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of draws. In univariate models, the output is as an S x N matrix, where S is the number of posterior draws and N is the number of observations. In multivariate models, an additional dimension is added to the output which indexes along the different response variables. } \description{ Compute posterior draws of the posterior predictive distribution. Can be performed for the data used to fit the model (posterior predictive checks) or for new data. By definition, these draws have higher variance than draws of the expected value of the posterior predictive distribution computed by \code{\link{posterior_epred.brmsfit}}. This is because the residual error is incorporated in \code{posterior_predict}. However, the estimated means of both methods averaged across draws should be very similar. } \details{ \code{NA} values within factors in \code{newdata}, are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding. In multilevel models, it is possible to allow new levels of grouping factors to be used in the predictions. This can be controlled via argument \code{allow_new_levels}. New levels can be sampled in multiple ways, which can be controlled via argument \code{sample_new_levels}. Both of these arguments are documented in \code{\link{prepare_predictions}} along with several other useful arguments to control specific aspects of the predictions. For truncated discrete models only: In the absence of any general algorithm to sample from truncated discrete distributions, rejection sampling is applied in this special case. This means that values are sampled until a value lies within the defined truncation boundaries. In practice, this procedure may be rather slow (especially in \R). Thus, we try to do approximate rejection sampling by sampling each value \code{ntrys} times and then select a valid value. If all values are invalid, the closest boundary is used, instead. If there are more than a few of these pathological cases, a warning will occur suggesting to increase argument \code{ntrys}. } \examples{ \dontrun{ ## fit a model fit <- brm(time | cens(censored) ~ age + sex + (1 + age || patient), data = kidney, family = "exponential", init = "0") ## predicted responses pp <- posterior_predict(fit) str(pp) ## predicted responses excluding the group-level effect of age pp <- posterior_predict(fit, re_formula = ~ (1 | patient)) str(pp) ## predicted responses of patient 1 for new data newdata <- data.frame( sex = factor(c("male", "female")), age = c(20, 50), patient = c(1, 1) ) pp <- posterior_predict(fit, newdata = newdata) str(pp) } } brms/man/as.mcmc.brmsfit.Rd0000644000176200001440000000246314213413565015256 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_samples.R \name{as.mcmc.brmsfit} \alias{as.mcmc.brmsfit} \alias{as.mcmc} \title{Extract posterior samples for use with the \pkg{coda} package} \usage{ \method{as.mcmc}{brmsfit}( x, pars = NA, fixed = FALSE, combine_chains = FALSE, inc_warmup = FALSE, ... ) } \arguments{ \item{x}{An \code{R} object typically of class \code{brmsfit}} \item{pars}{Names of parameters for which posterior samples should be returned, as given by a character vector or regular expressions. By default, all posterior samples of all parameters are extracted.} \item{fixed}{Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE}.} \item{combine_chains}{Indicates whether chains should be combined.} \item{inc_warmup}{Indicates if the warmup samples should be included. Default is \code{FALSE}. Warmup samples are used to tune the parameters of the sampling algorithm and should not be analyzed.} \item{...}{currently unused} } \value{ If \code{combine_chains = TRUE} an \code{mcmc} object is returned. If \code{combine_chains = FALSE} an \code{mcmc.list} object is returned. } \description{ Extract posterior samples for use with the \pkg{coda} package } brms/man/kfold.brmsfit.Rd0000644000176200001440000001667214361545260015045 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kfold.R \name{kfold.brmsfit} \alias{kfold.brmsfit} \alias{kfold} \title{K-Fold Cross-Validation} \usage{ \method{kfold}{brmsfit}( x, ..., K = 10, Ksub = NULL, folds = NULL, group = NULL, exact_loo = NULL, compare = TRUE, resp = NULL, model_names = NULL, save_fits = FALSE, recompile = NULL, future_args = list() ) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{Further arguments passed to \code{\link{brm}}.} \item{K}{The number of subsets of equal (if possible) size into which the data will be partitioned for performing \eqn{K}-fold cross-validation. The model is refit \code{K} times, each time leaving out one of the \code{K} subsets. If \code{K} is equal to the total number of observations in the data then \eqn{K}-fold cross-validation is equivalent to exact leave-one-out cross-validation.} \item{Ksub}{Optional number of subsets (of those subsets defined by \code{K}) to be evaluated. If \code{NULL} (the default), \eqn{K}-fold cross-validation will be performed on all subsets. If \code{Ksub} is a single integer, \code{Ksub} subsets (out of all \code{K}) subsets will be randomly chosen. If \code{Ksub} consists of multiple integers or a one-dimensional array (created via \code{as.array}) potentially of length one, the corresponding subsets will be used. This argument is primarily useful, if evaluation of all subsets is infeasible for some reason.} \item{folds}{Determines how the subsets are being constructed. Possible values are \code{NULL} (the default), \code{"stratified"}, \code{"grouped"}, or \code{"loo"}. May also be a vector of length equal to the number of observations in the data. Alters the way \code{group} is handled. More information is provided in the 'Details' section.} \item{group}{Optional name of a grouping variable or factor in the model. What exactly is done with this variable depends on argument \code{folds}. More information is provided in the 'Details' section.} \item{exact_loo}{Deprecated! Please use \code{folds = "loo"} instead.} \item{compare}{A flag indicating if the information criteria of the models should be compared to each other via \code{\link{loo_compare}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} \item{save_fits}{If \code{TRUE}, a component \code{fits} is added to the returned object to store the cross-validated \code{brmsfit} objects and the indices of the omitted observations for each fold. Defaults to \code{FALSE}.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled. This may be necessary if you are running \code{reloo} on another machine than the one used to fit the model.} \item{future_args}{A list of further arguments passed to \code{\link[future:future]{future}} for additional control over parallel execution if activated.} } \value{ \code{kfold} returns an object that has a similar structure as the objects returned by the \code{loo} and \code{waic} methods and can be used with the same post-processing functions. } \description{ Perform exact K-fold cross-validation by refitting the model \eqn{K} times each leaving out one-\eqn{K}th of the original data. Folds can be run in parallel using the \pkg{future} package. } \details{ The \code{kfold} function performs exact \eqn{K}-fold cross-validation. First the data are partitioned into \eqn{K} folds (i.e. subsets) of equal (or as close to equal as possible) size by default. Then the model is refit \eqn{K} times, each time leaving out one of the \code{K} subsets. If \eqn{K} is equal to the total number of observations in the data then \eqn{K}-fold cross-validation is equivalent to exact leave-one-out cross-validation (to which \code{loo} is an efficient approximation). The \code{compare_ic} function is also compatible with the objects returned by \code{kfold}. The subsets can be constructed in multiple different ways: \itemize{ \item If both \code{folds} and \code{group} are \code{NULL}, the subsets are randomly chosen so that they have equal (or as close to equal as possible) size. \item If \code{folds} is \code{NULL} but \code{group} is specified, the data is split up into subsets, each time omitting all observations of one of the factor levels, while ignoring argument \code{K}. \item If \code{folds = "stratified"} the subsets are stratified after \code{group} using \code{\link[loo:kfold-helpers]{loo::kfold_split_stratified}}. \item If \code{folds = "grouped"} the subsets are split by \code{group} using \code{\link[loo:kfold-helpers]{loo::kfold_split_grouped}}. \item If \code{folds = "loo"} exact leave-one-out cross-validation will be performed and \code{K} will be ignored. Further, if \code{group} is specified, all observations corresponding to the factor level of the currently predicted single value are omitted. Thus, in this case, the predicted values are only a subset of the omitted ones. \item If \code{folds} is a numeric vector, it must contain one element per observation in the data. Each element of the vector is an integer in \code{1:K} indicating to which of the \code{K} folds the corresponding observation belongs. There are some convenience functions available in the \pkg{loo} package that create integer vectors to use for this purpose (see the Examples section below and also the \link[loo:kfold-helpers]{kfold-helpers} page). } When running \code{kfold} on a \code{brmsfit} created with the \pkg{cmdstanr} backend in a different \R session, several recompilations will be triggered because by default, \pkg{cmdstanr} writes the model executable to a temporary directory. To avoid that, set option \code{"cmdstanr_write_stan_file_dir"} to a nontemporary path of your choice before creating the original \code{brmsfit} (see section 'Examples' below). } \examples{ \dontrun{ fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson()) # throws warning about some pareto k estimates being too high (loo1 <- loo(fit1)) # perform 10-fold cross validation (kfold1 <- kfold(fit1, chains = 1)) # use the future package for parallelization library(future) plan(multiprocess) kfold(fit1, chains = 1) ## to avoid recompilations when running kfold() on a 'cmdstanr'-backend fit ## in a fresh R session, set option 'cmdstanr_write_stan_file_dir' before ## creating the initial 'brmsfit' ## CAUTION: the following code creates some files in the current working ## directory: two 'model_.stan' files, one 'model_(.exe)' ## executable, and one 'fit_cmdstanr_.rds' file set.seed(7) fname <- paste0("fit_cmdstanr_", sample.int(.Machine$integer.max, 1)) options(cmdstanr_write_stan_file_dir = getwd()) fit_cmdstanr <- brm(rate ~ conc + state, data = Puromycin, backend = "cmdstanr", file = fname) # now restart the R session and run the following (after attaching 'brms') set.seed(7) fname <- paste0("fit_cmdstanr_", sample.int(.Machine$integer.max, 1)) fit_cmdstanr <- brm(rate ~ conc + state, data = Puromycin, backend = "cmdstanr", file = fname) kfold_cmdstanr <- kfold(fit_cmdstanr, K = 2) } } \seealso{ \code{\link{loo}}, \code{\link{reloo}} } brms/man/waic.brmsfit.Rd0000644000176200001440000000555214213413565014662 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{waic.brmsfit} \alias{waic.brmsfit} \alias{waic} \alias{WAIC} \alias{WAIC.brmsfit} \title{Widely Applicable Information Criterion (WAIC)} \usage{ \method{waic}{brmsfit}( x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, model_names = NULL ) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{compare}{A flag indicating if the information criteria of the models should be compared to each other via \code{\link{loo_compare}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{pointwise}{A flag indicating whether to compute the full log-likelihood matrix at once or separately for each observation. The latter approach is usually considerably slower but requires much less working memory. Accordingly, if one runs into memory issues, \code{pointwise = TRUE} is the way to go.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ If just one object is provided, an object of class \code{loo}. If multiple objects are provided, an object of class \code{loolist}. } \description{ Compute the widely applicable information criterion (WAIC) based on the posterior likelihood using the \pkg{loo} package. For more details see \code{\link[loo:waic]{waic}}. } \details{ See \code{\link{loo_compare}} for details on model comparisons. For \code{brmsfit} objects, \code{WAIC} is an alias of \code{waic}. Use method \code{\link[brms:add_criterion]{add_criterion}} to store information criteria in the fitted model object for later usage. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) (waic1 <- waic(fit1)) # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) (waic2 <- waic(fit2)) # compare both models loo_compare(waic1, waic2) } } \references{ Vehtari, A., Gelman, A., & Gabry J. (2016). Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. In Statistics and Computing, doi:10.1007/s11222-016-9696-4. arXiv preprint arXiv:1507.04544. Gelman, A., Hwang, J., & Vehtari, A. (2014). Understanding predictive information criteria for Bayesian models. Statistics and Computing, 24, 997-1016. Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation and widely applicable information criterion in singular learning theory. The Journal of Machine Learning Research, 11, 3571-3594. } brms/man/cor_sar.Rd0000644000176200001440000000335114213413565013715 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_sar} \alias{cor_sar} \alias{cor_lagsar} \alias{cor_errorsar} \title{(Deprecated) Spatial simultaneous autoregressive (SAR) structures} \usage{ cor_sar(W, type = c("lag", "error")) cor_lagsar(W) cor_errorsar(W) } \arguments{ \item{W}{An object specifying the spatial weighting matrix. Can be either the spatial weight matrix itself or an object of class \code{listw} or \code{nb}, from which the spatial weighting matrix can be computed.} \item{type}{Type of the SAR structure. Either \code{"lag"} (for SAR of the response values) or \code{"error"} (for SAR of the residuals).} } \value{ An object of class \code{cor_sar} to be used in calls to \code{\link{brm}}. } \description{ Thse functions are deprecated. Please see \code{\link{sar}} for the new syntax. These functions are constructors for the \code{cor_sar} class implementing spatial simultaneous autoregressive structures. The \code{lagsar} structure implements SAR of the response values: \deqn{y = \rho W y + \eta + e} The \code{errorsar} structure implements SAR of the residuals: \deqn{y = \eta + u, u = \rho W u + e} In the above equations, \eqn{\eta} is the predictor term and \eqn{e} are independent normally or t-distributed residuals. } \details{ Currently, only families \code{gaussian} and \code{student} support SAR structures. } \examples{ \dontrun{ data(oldcol, package = "spdep") fit1 <- brm(CRIME ~ INC + HOVAL, data = COL.OLD, autocor = cor_lagsar(COL.nb), chains = 2, cores = 2) summary(fit1) plot(fit1) fit2 <- brm(CRIME ~ INC + HOVAL, data = COL.OLD, autocor = cor_errorsar(COL.nb), chains = 2, cores = 2) summary(fit2) plot(fit2) } } brms/man/rename_pars.Rd0000644000176200001440000000177414424476256014602 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rename_pars.R \name{rename_pars} \alias{rename_pars} \title{Rename parameters in brmsfit objects} \usage{ rename_pars(x) } \arguments{ \item{x}{A \code{brmsfit} object.} } \value{ A \code{brmsfit} object with adjusted parameter names. } \description{ Rename parameters within the \code{stanfit} object after model fitting to ensure reasonable parameter names. This function is usually called automatically by \code{\link{brm}} and users will rarely be required to call it themselves. } \details{ Function \code{rename_pars} is a deprecated alias of \code{rename_pars}. } \examples{ \dontrun{ # fit a model manually via rstan scode <- make_stancode(count ~ Trt, data = epilepsy) sdata <- make_standata(count ~ Trt, data = epilepsy) stanfit <- rstan::stan(model_code = scode, data = sdata) # feed the Stan model back into brms fit <- brm(count ~ Trt, data = epilepsy, empty = TRUE) fit$fit <- stanfit fit <- rename_pars(fit) summary(fit) } } brms/man/SkewNormal.Rd0000644000176200001440000000337314275436221014355 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{SkewNormal} \alias{SkewNormal} \alias{dskew_normal} \alias{pskew_normal} \alias{qskew_normal} \alias{rskew_normal} \title{The Skew-Normal Distribution} \usage{ dskew_normal( x, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, log = FALSE ) pskew_normal( q, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, lower.tail = TRUE, log.p = FALSE ) qskew_normal( p, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, lower.tail = TRUE, log.p = FALSE, tol = 1e-08 ) rskew_normal(n, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of mean values.} \item{sigma}{Vector of standard deviation values.} \item{alpha}{Vector of skewness values.} \item{xi}{Optional vector of location values. If \code{NULL} (the default), will be computed internally.} \item{omega}{Optional vector of scale values. If \code{NULL} (the default), will be computed internally.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{tol}{Tolerance of the approximation used in the computation of quantiles.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the skew-normal distribution with mean \code{mu}, standard deviation \code{sigma}, and skewness \code{alpha}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/DESCRIPTION0000644000176200001440000000667414504354271012745 0ustar liggesusersPackage: brms Encoding: UTF-8 Type: Package Title: Bayesian Regression Models using 'Stan' Version: 2.20.4 Date: 2023-09-25 Authors@R: c(person("Paul-Christian", "Bürkner", email = "paul.buerkner@gmail.com", role = c("aut", "cre")), person("Jonah", "Gabry", role = c("ctb")), person("Sebastian", "Weber", role = c("ctb")), person("Andrew", "Johnson", role = c("ctb")), person("Martin", "Modrak", role = c("ctb")), person("Hamada S.", "Badr", role = c("ctb")), person("Frank", "Weber", role = c("ctb")), person("Mattan S.", "Ben-Shachar", role = c("ctb")), person("Hayden", "Rabel", role = c("ctb")), person("Simon C.", "Mills", role = c("ctb")), person("Stephen", "Wild", role = c("ctb"))) Depends: R (>= 3.6.0), Rcpp (>= 0.12.0), methods Imports: rstan (>= 2.26.0), ggplot2 (>= 2.0.0), loo (>= 2.3.1), posterior (>= 1.0.0), Matrix (>= 1.1.1), mgcv (>= 1.8-13), rstantools (>= 2.1.1), bayesplot (>= 1.5.0), shinystan (>= 2.4.0), bridgesampling (>= 0.3-0), glue (>= 1.3.0), rlang (>= 1.0.0), future (>= 1.19.0), matrixStats, nleqslv, nlme, coda, abind, stats, utils, parallel, grDevices, backports Suggests: testthat (>= 0.9.1), emmeans (>= 1.4.2), cmdstanr (>= 0.5.0), projpred (>= 2.0.0), RWiener, rtdists, extraDistr, processx, mice, spdep, mnormt, lme4, MCMCglmm, splines2, ape, arm, statmod, digest, diffobj, R.rsp, gtable, shiny, knitr, rmarkdown Description: Fit Bayesian generalized (non-)linear multivariate multilevel models using 'Stan' for full Bayesian inference. A wide range of distributions and link functions are supported, allowing users to fit -- among others -- linear, robust linear, count data, survival, response times, ordinal, zero-inflated, hurdle, and even self-defined mixture models all in a multilevel context. Further modeling options include both theory-driven and data-driven non-linear terms, auto-correlation structures, censoring and truncation, meta-analytic standard errors, and quite a few more. In addition, all parameters of the response distribution can be predicted in order to perform distributional regression. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their prior knowledge. Models can easily be evaluated and compared using several methods assessing posterior or prior predictions. References: Bürkner (2017) ; Bürkner (2018) ; Bürkner (2021) ; Carpenter et al. (2017) . LazyData: true NeedsCompilation: no License: GPL-2 URL: https://github.com/paul-buerkner/brms, https://discourse.mc-stan.org/, https://paul-buerkner.github.io/brms/ BugReports: https://github.com/paul-buerkner/brms/issues Additional_repositories: https://mc-stan.org/r-packages/ VignetteBuilder: knitr, R.rsp RoxygenNote: 7.2.3 Packaged: 2023-09-25 11:35:40 UTC; paul.buerkner Author: Paul-Christian Bürkner [aut, cre], Jonah Gabry [ctb], Sebastian Weber [ctb], Andrew Johnson [ctb], Martin Modrak [ctb], Hamada S. Badr [ctb], Frank Weber [ctb], Mattan S. Ben-Shachar [ctb], Hayden Rabel [ctb], Simon C. Mills [ctb], Stephen Wild [ctb] Maintainer: Paul-Christian Bürkner Repository: CRAN Date/Publication: 2023-09-25 19:00:09 UTC brms/build/0000755000176200001440000000000014504270212012311 5ustar liggesusersbrms/build/vignette.rds0000644000176200001440000000110514504270212014645 0ustar liggesusersTQo020V!BZt !ސ\ ǎle9UAFs@~zeJƐk?л&^hLZEK dחR+퇧) hȸbKP /lTwY#exYFM x4r.Rjr 3) { stop("At most 3 dimensions are allowed here.") } else { need_drop <- FALSE } ndraws <- dim(x)[1] nobsv <- dim(x)[2] ncat <- dim(x)[3] out <- aperm( array( sapply(seq_len(nobsv), function(i) { out_i <- log_softmax(slice(x, 2, i)) if (!log) { out_i <- exp(out_i) } out_i }, simplify = "array"), dim = c(ndraws, ncat, nobsv) ), perm = c(1, 3, 2) ) # Quick-and-dirty solution to drop the margin for a single observation (but # only if the input object was not a 3-dimensional array): if (need_drop) { return(slice(out, 2, 1)) } out } environment(inv_link_categorical_ch) <- as.environment(asNamespace("brms")) brms/tests/testthat/helpers/simopts_catlike.R0000644000176200001440000000011614160105076021174 0ustar liggesusersset.seed(1234) ndraws_vec <- c(1, 5) nobsv_vec <- c(1, 4) ncat_vec <- c(2, 3) brms/tests/testthat/helpers/link_ordinal_ch.R0000644000176200001440000000465314160105076021133 0ustar liggesuserslink_ch <- function(x, link) { # switch() would be more straightforward, but for testing purposes, use if () # here: if (link == "logit") { return(qlogis(x)) } else if (link == "probit") { return(qnorm(x)) } else if (link == "cauchit") { return(qcauchy(x)) } else if (link == "cloglog") { return(log(-log(1 - x))) } else { stop("Unknown link.") } } # Very similar to link_cumulative(), but iterates over the observations: link_cumulative_ch <- function(x, link) { # For testing purposes, only allow 3-dimensional arrays here: stopifnot(length(dim(x)) == 3) ndraws <- dim(x)[1] nobsv <- dim(x)[2] ncat <- dim(x)[3] x_cumsum <- aperm( array( sapply(seq_len(nobsv), function(i) { apply(x[, i, -ncat, drop = FALSE], 1, cumsum) }, simplify = "array"), dim = c(ncat - 1, ndraws, nobsv) ), perm = c(2, 3, 1) ) link_ch(x_cumsum, link = link) } # The same as link_sratio(), but dropping margins: link_sratio_ch <- function(x, link) { ndim <- length(dim(x)) .F_k <- function(k) { if (k == 1) { prev_res <- list(F_k = NULL, S_km1_prod = 1) } else { prev_res <- .F_k(k - 1) } F_k <- slice(x, ndim, k) / prev_res$S_km1_prod return(list(F_k = abind::abind(prev_res$F_k, F_k, along = ndim), S_km1_prod = prev_res$S_km1_prod * (1 - F_k))) } x <- .F_k(dim(x)[ndim] - 1)$F_k link_ch(x, link) } environment(link_sratio_ch) <- as.environment(asNamespace("brms")) # The same as link_cratio(), but dropping margins: link_cratio_ch <- function(x, link) { ndim <- length(dim(x)) .F_k <- function(k) { if (k == 1) { prev_res <- list(F_k = NULL, F_km1_prod = 1) } else { prev_res <- .F_k(k - 1) } F_k <- 1 - slice(x, ndim, k) / prev_res$F_km1_prod return(list(F_k = abind::abind(prev_res$F_k, F_k, along = ndim), F_km1_prod = prev_res$F_km1_prod * F_k)) } x <- .F_k(dim(x)[ndim] - 1)$F_k link_ch(x, link) } environment(link_cratio_ch) <- as.environment(asNamespace("brms")) # The same as link_acat(), but possibly dropping margins and not treating the # logit link as a special case: link_acat_ch <- function(x, link) { ndim <- length(dim(x)) ncat <- dim(x)[ndim] dim_noncat <- dim(x)[-ndim] x <- slice(x, ndim, -1) / slice(x, ndim, -ncat) x <- inv_odds(x) array(link_ch(x, link), dim = c(dim_noncat, ncat - 1)) } environment(link_acat_ch) <- as.environment(asNamespace("brms")) brms/tests/testthat/helpers/insert_refcat_ch.R0000644000176200001440000000262714160105076021315 0ustar liggesusers# Very similar to insert_refcat(), but iterates over the observations (if # necessary): insert_refcat_ch <- function(eta, family) { ndim <- length(dim(eta)) if (ndim == 2) { return(insert_refcat_ch_i(eta, family = family)) } else if (ndim == 3) { out <- abind::abind(lapply(seq_cols(eta), function(i) { insert_refcat_ch_i(slice_col(eta, i), family = family) }), along = 3) return(aperm(out, perm = c(1, 3, 2))) } else { stop2("eta has wrong dimensions.") } } environment(insert_refcat_ch) <- as.environment(asNamespace("brms")) # A matrix-only variant of insert_refcat() (used to be insert_refcat() before it # was extended to arrays): insert_refcat_ch_i <- function(eta, family) { stopifnot(is.matrix(eta), is.brmsfamily(family)) if (!conv_cats_dpars(family) || isNA(family$refcat)) { return(eta) } # need to add zeros for the reference category zeros <- as.matrix(rep(0, nrow(eta))) if (is.null(family$refcat) || is.null(family$cats)) { # no information on the categories provided: # use the first category as the reference return(cbind(zeros, eta)) } colnames(zeros) <- paste0("mu", family$refcat) iref <- match(family$refcat, family$cats) before <- seq_len(iref - 1) after <- setdiff(seq_cols(eta), before) cbind(eta[, before, drop = FALSE], zeros, eta[, after, drop = FALSE]) } environment(insert_refcat_ch_i) <- as.environment(asNamespace("brms")) brms/tests/testthat/helpers/simopts_catlike_oneobs.R0000644000176200001440000000015214160105076022541 0ustar liggesusers# This test corresponds to a single observation. set.seed(1234) ndraws_vec <- c(1, 5) ncat_vec <- c(2, 3) brms/tests/testthat/helpers/link_categorical_ch.R0000644000176200001440000000132514176602337021762 0ustar liggesusers# Very similar to link_categorical(), but iterates over the observations: link_categorical_ch <- function(x, refcat = 1, return_refcat = FALSE) { # For testing purposes, only allow 3-dimensional arrays here: stopifnot(length(dim(x)) == 3) x_tosweep <- if (return_refcat) { x } else { slice(x, 3, -refcat, drop = FALSE) } ndraws <- dim(x)[1] nobsv <- dim(x)[2] ncat <- dim(x)[3] log(aperm( array( sapply(seq_len(nobsv), function(i) { slice(x_tosweep, 2, i) / slice(slice(x, 2, i), 2, refcat) }, simplify = "array"), dim = c(ndraws, ncat - !return_refcat, nobsv) ), perm = c(1, 3, 2) )) } environment(link_categorical_ch) <- as.environment(asNamespace("brms")) brms/tests/testthat/helpers/inv_link_ordinal_ch.R0000644000176200001440000000557714213413565022021 0ustar liggesusersinv_link_cumulative_ch <- function(x, link) { x <- inv_link(x, link) ndim <- length(dim(x)) ncat <- dim(x)[ndim] + 1 out <- vector("list", ncat) out[[1]] <- slice(x, ndim, 1) if (ncat > 2) { .diff <- function(k) { slice(x, ndim, k) - slice(x, ndim, k - 1) } mid_cats <- 2:(ncat - 1) out[mid_cats] <- lapply(mid_cats, .diff) } out[[ncat]] <- 1 - slice(x, ndim, ncat - 1) abind::abind(out, along = ndim) } environment(inv_link_cumulative_ch) <- as.environment(asNamespace("brms")) inv_link_sratio_ch <- function(x, link) { x <- inv_link(x, link) ndim <- length(dim(x)) ncat <- dim(x)[ndim] + 1 marg_noncat <- seq_along(dim(x))[-ndim] out <- vector("list", ncat) out[[1]] <- slice(x, ndim, 1) if (ncat > 2) { .condprod <- function(k) { slice(x, ndim, k) * apply(1 - slice(x, ndim, 1:(k - 1), drop = FALSE), marg_noncat, prod) } mid_cats <- 2:(ncat - 1) out[mid_cats] <- lapply(mid_cats, .condprod) } out[[ncat]] <- apply(1 - x, marg_noncat, prod) abind::abind(out, along = ndim) } environment(inv_link_sratio_ch) <- as.environment(asNamespace("brms")) inv_link_cratio_ch <- function(x, link) { x <- inv_link(x, link) ndim <- length(dim(x)) ncat <- dim(x)[ndim] + 1 marg_noncat <- seq_along(dim(x))[-ndim] out <- vector("list", ncat) out[[1]] <- 1 - slice(x, ndim, 1) if (ncat > 2) { .condprod <- function(k) { (1 - slice(x, ndim, k)) * apply(slice(x, ndim, 1:(k - 1), drop = FALSE), marg_noncat, prod) } mid_cats <- 2:(ncat - 1) out[mid_cats] <- lapply(mid_cats, .condprod) } out[[ncat]] <- apply(x, marg_noncat, prod) abind::abind(out, along = ndim) } environment(inv_link_cratio_ch) <- as.environment(asNamespace("brms")) inv_link_acat_ch <- function(x, link) { ndim <- length(dim(x)) ncat <- dim(x)[ndim] + 1 marg_noncat <- seq_along(dim(x))[-ndim] out <- vector("list", ncat) if (link == "logit") { # faster evaluation in this case out[[1]] <- array(1, dim = dim(x)[-ndim]) out[[2]] <- exp(slice(x, ndim, 1)) if (ncat > 2) { .catsum <- function(k) { exp(apply(slice(x, ndim, 1:(k - 1), drop = FALSE), marg_noncat, sum)) } remaincats <- 3:ncat out[remaincats] <- lapply(remaincats, .catsum) } } else { x <- inv_link(x, link) out[[1]] <- apply(1 - x, marg_noncat, prod) if (ncat > 2) { .othercatprod <- function(k) { apply(slice(x, ndim, 1:(k - 1), drop = FALSE), marg_noncat, prod) * apply(slice(1 - x, ndim, k:(ncat - 1), drop = FALSE), marg_noncat, prod) } mid_cats <- 2:(ncat - 1) out[mid_cats] <- lapply(mid_cats, .othercatprod) } out[[ncat]] <- apply(x, marg_noncat, prod) } out <- abind::abind(out, along = ndim) catsum <- apply(out, marg_noncat, sum) sweep(out, marg_noncat, catsum, "/") } environment(inv_link_acat_ch) <- as.environment(asNamespace("brms")) brms/tests/testthat/tests.exclude_pars.R0000644000176200001440000000317014424475714020176 0ustar liggesuserscontext("Tests for exclude_pars helper functions") test_that("exclude_pars returns expected parameter names", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10), g = rep(1:5, 2), h = factor(rep(1:5, each = 2))) fit <- brm(y ~ x1*x2 + (x1*x2|g) + (1 | h), dat, empty = TRUE) ep <- brms:::exclude_pars(fit) expect_true(all(c("r_1", "r_2") %in% ep)) fit <- brm(y ~ x1*x2 + (x1*x2|g) + (1 | h), dat, empty = TRUE, save_pars = save_pars(all = TRUE)) ep <- brms:::exclude_pars(fit) expect_true(!any(c("z_1", "z_2") %in% ep)) fit <- brm(y ~ x1*x2 + (x1*x2|g) + (1 | h), dat, empty = TRUE, save_pars = save_pars(group = FALSE)) ep <- brms:::exclude_pars(fit) expect_true("r_1_1" %in% ep) fit <- brm(y ~ x1*x2 + (x1 | g) + (1 | h), dat, empty = TRUE, save_pars = save_pars(group = "h")) ep <- brms:::exclude_pars(fit) expect_true(!"r_1_3" %in% ep) fit <- brm(y ~ s(x1) + x2, dat, empty = TRUE) ep <- brms:::exclude_pars(fit) expect_true("zs_1_1" %in% ep) fit <- brm(bf(y ~ eta, eta ~ x1 + s(x2), nl = TRUE), dat, empty = TRUE) ep <- brms:::exclude_pars(fit) expect_true("zs_eta_1_1" %in% ep) fit <- brm(y ~ me(x1, g), dat, empty = TRUE) ep <- brms:::exclude_pars(fit) expect_true("Xme_1" %in% ep) fit <- brm(y ~ me(x1, g), dat, empty = TRUE, save_pars = save_pars(latent = "x1")) ep <- brms:::exclude_pars(fit) expect_true(!"Xme_1" %in% ep) fit <- brm(y ~ me(x1, g), dat, empty = TRUE, save_pars = save_pars(manual = "Lme_1")) ep <- brms:::exclude_pars(fit) expect_true(!"Lme_1" %in% ep) }) brms/tests/testthat/tests.make_stancode.R0000644000176200001440000033441614500152173020311 0ustar liggesuserscontext("Tests for make_stancode") # simplifies manual calling of tests expect_match2 <- brms:::expect_match2 SW <- brms:::SW # parsing the Stan code ensures syntactical correctness of models # setting this option to FALSE speeds up testing not_cran <- identical(Sys.getenv("NOT_CRAN"), "true") options(brms.parse_stancode = not_cran, brms.backend = "rstan") test_that("specified priors appear in the Stan code", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10), g = rep(1:5, 2), h = factor(rep(1:5, each = 2))) prior <- c(prior(std_normal(), coef = x1), prior(normal(0,2), coef = x2), prior(normal(0,5), Intercept, lb = 0), prior(cauchy(0,1), sd, group = g, lb = "", ub = 5), prior(cauchy(0,2), sd, group = g, coef = x1), prior(gamma(1, 1), sd, group = h, ub = 10)) scode <- make_stancode(y ~ x1*x2 + (x1*x2|g) + (1 | h), dat, prior = prior, sample_prior = "yes") expect_match2(scode, "vector[M_1] sd_1;") expect_match2(scode, "vector[M_2] sd_2;") expect_match2(scode, "target += lprior;") expect_match2(scode, "lprior += std_normal_lpdf(b[1])") expect_match2(scode, "lprior += normal_lpdf(b[2] | 0, 2)") expect_match2(scode, "lprior += normal_lpdf(Intercept | 0, 5)") expect_match2(scode, "lprior += cauchy_lpdf(sd_1[1] | 0, 1)") expect_match2(scode, "- 1 * cauchy_lcdf(5 | 0, 1)") expect_match2(scode, "lprior += cauchy_lpdf(sd_1[2] | 0, 2)") expect_match2(scode, "lprior += student_t_lpdf(sigma | 3, 0, 3.7)") expect_match2(scode, "- 1 * student_t_lccdf(0 | 3, 0, 3.7)") expect_match2(scode, "lprior += gamma_lpdf(sd_2 | 1, 1)") expect_match2(scode, "prior_b__1 = normal_rng(0,1);") expect_match2(scode, "prior_sd_1__1 = cauchy_rng(0,1)") expect_match2(scode, "while (prior_sd_1__1 > 5)") expect_match2(scode, "prior_sd_2 = gamma_rng(1,1)") expect_match2(scode, "while (prior_sd_2 < 0 || prior_sd_2 > 10)") prior <- c(prior(lkj(0.5), class = cor, group = g), prior(normal(0, 1), class = b), prior(normal(0, 5), class = Intercept), prior(cauchy(0, 5), class = sd)) scode <- make_stancode(y ~ x1 + cs(x2) + (0 + x1 + x2 | g), data = dat, family = acat(), prior = prior, sample_prior = TRUE) expect_match2(scode, "lprior += normal_lpdf(b | 0, 1)") expect_match2(scode, "lprior += normal_lpdf(Intercept | 0, 5)") expect_match2(scode, "lprior += cauchy_lpdf(sd_1 | 0, 5)") expect_match2(scode, "lprior += lkj_corr_cholesky_lpdf(L_1 | 0.5)") expect_match2(scode, "lprior += normal_lpdf(to_vector(bcs) | 0, 1)") expect_match2(scode, "prior_bcs = normal_rng(0,1)") prior <- c(prior(normal(0,5), nlpar = a), prior(normal(0,10), nlpar = b), prior(cauchy(0,1), class = sd, nlpar = a), prior(lkj(2), class = cor, group = g)) scode <- make_stancode( bf(y ~ a * exp(-b * x1), a + b ~ (1|ID|g), nl = TRUE), data = dat, prior = prior, sample_prior = TRUE ) expect_match2(scode, "lprior += normal_lpdf(b_a | 0, 5)") expect_match2(scode, "lprior += normal_lpdf(b_b | 0, 10)") expect_match2(scode, "lprior += cauchy_lpdf(sd_1[1] | 0, 1)") expect_match2(scode, "lprior += lkj_corr_cholesky_lpdf(L_1 | 2)") expect_match2(scode, "prior_b_a = normal_rng(0,5)") expect_match2(scode, "prior_sd_1__2 = student_t_rng(3,0,3.7)") expect_match2(scode, "prior_cor_1 = lkj_corr_rng(M_1,2)[1, 2]") prior <- c(prior(lkj(2), rescor), prior(cauchy(0, 5), sigma, resp = y), prior(cauchy(0, 1), sigma, resp = x1)) form <- bf(mvbind(y, x1) ~ x2) + set_rescor(TRUE) scode <- make_stancode(form, dat, prior = prior, sample_prior = TRUE) expect_match2(scode, "lprior += lkj_corr_cholesky_lpdf(Lrescor | 2)") expect_match2(scode, "prior_sigma_y = cauchy_rng(0,5)") expect_match2(scode, "prior_rescor = lkj_corr_rng(nresp,2)[1, 2]") prior <- c(prior(uniform(-1, 1), ar), prior(normal(0, 0.5), ma), prior(normal(0, 5))) scode <- make_stancode(y ~ mo(g) + arma(cov = TRUE), dat, prior = prior, sample_prior = TRUE) expect_match2(scode, "vector[Kar] ar;") expect_match2(scode, "vector[Kma] ma;") expect_match2(scode, "lprior += uniform_lpdf(ar | -1, 1)") expect_match2(scode, "lprior += normal_lpdf(ma | 0, 0.5)") expect_match2(scode, "- 1 * log_diff_exp(normal_lcdf(1 | 0, 0.5), normal_lcdf(-1 | 0, 0.5))" ) expect_match2(scode, "lprior += normal_lpdf(bsp | 0, 5)") expect_match2(scode, "lprior += dirichlet_lpdf(simo_1 | con_simo_1)") expect_match2(scode, "prior_simo_1 = dirichlet_rng(con_simo_1)") expect_match2(scode, "prior_ar = uniform_rng(-1,1)") expect_match2(scode, "while (prior_ar < -1 || prior_ar > 1)") # test for problem described in #213 prior <- c(prior(normal(0, 1), coef = x1), prior(normal(0, 2), coef = x1, dpar = sigma)) scode <- make_stancode(bf(y ~ x1, sigma ~ x1), dat, prior = prior) expect_match2(scode, "lprior += normal_lpdf(b[1] | 0, 1);") expect_match2(scode, "lprior += normal_lpdf(b_sigma[1] | 0, 2);") prior <- c(set_prior("target += normal_lpdf(b[1] | 0, 1)", check = FALSE), set_prior("", class = "sigma")) scode <- make_stancode(y ~ x1, dat, prior = prior, sample_prior = TRUE) expect_match2(scode, "target += normal_lpdf(b[1] | 0, 1)") expect_true(!grepl("sigma \\|", scode)) # commented out until fixes implemented in 'check_prior_content' prior <- prior(gamma(0, 1), coef = x1) expect_warning(make_stancode(y ~ x1, dat, prior = prior), "no natural lower bound") prior <- prior(uniform(0,5), class = sd) expect_warning(make_stancode(y ~ x1 + (1|g), dat, prior = prior), "no natural upper bound") prior <- prior(uniform(-1, 1), class = cor) expect_error( make_stancode(y ~ x1 + (x1|g), dat, prior = prior), "prior for correlation matrices is the 'lkj' prior" ) }) test_that("special shrinkage priors appear in the Stan code", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10), g = rep(1:2, each = 5), x3 = sample(1:5, 10, TRUE)) # horseshoe prior hs <- horseshoe(7, scale_global = 2, df_global = 3, df_slab = 6, scale_slab = 3) scode <- make_stancode(y ~ x1*x2, data = dat, prior = set_prior(hs), sample_prior = TRUE) expect_match2(scode, "vector[Kscales] hs_local;") expect_match2(scode, "real hs_global;") expect_match2(scode, "target += student_t_lpdf(hs_local | hs_df, 0, 1)" ) expect_match2(scode, "lprior += student_t_lpdf(hs_global | hs_df_global, 0, hs_scale_global * sigma)" ) expect_match2(scode, "lprior += inv_gamma_lpdf(hs_slab | 0.5 * hs_df_slab, 0.5 * hs_df_slab)" ) expect_match2(scode, "scales = scales_horseshoe(hs_local, hs_global, hs_scale_slab^2 * hs_slab);" ) scode <- make_stancode(y ~ x1*x2, data = dat, poisson(), prior = prior(horseshoe(scale_global = 3))) expect_match2(scode, "scales = scales_horseshoe(hs_local, hs_global, hs_scale_slab^2 * hs_slab);" ) scode <- make_stancode(x1 ~ mo(y), dat, prior = prior(horseshoe())) expect_match2(scode, "target += std_normal_lpdf(zbsp);") expect_match2(scode, "target += student_t_lpdf(hs_local | hs_df, 0, 1)" ) expect_match2(scode, "scales = scales_horseshoe(hs_local, hs_global, hs_scale_slab^2 * hs_slab);" ) # R2D2 prior scode <- make_stancode(y ~ x1*x2, data = dat, prior = prior(R2D2(0.5, 10)), sample_prior = TRUE) expect_match2(scode, "scales = scales_R2D2(R2D2_phi, R2D2_tau2);") expect_match2(scode, "target += dirichlet_lpdf(R2D2_phi | R2D2_cons_D2);") expect_match2(scode, "lprior += beta_lpdf(R2D2_R2 | R2D2_mean_R2 * R2D2_prec_R2, (1 - R2D2_mean_R2) * R2D2_prec_R2);") expect_match2(scode, "R2D2_tau2 = sigma^2 * R2D2_R2 / (1 - R2D2_R2);") # shrinkage priors applied in a non-linear model hs_a1 <- horseshoe(7, scale_global = 2, df_global = 3) R2D2_a2 <- R2D2(0.5, 10) scode <- SW(make_stancode( bf(y ~ a1 + a2, a1 ~ x1, a2 ~ 0 + x2, nl = TRUE), data = dat, sample_prior = TRUE, prior = c(set_prior(hs_a1, nlpar = "a1"), set_prior(R2D2_a2, nlpar = "a2")) )) expect_match2(scode, "vector[Kscales_a1] hs_local_a1;") expect_match2(scode, "real hs_global_a1;") expect_match2(scode, "target += student_t_lpdf(hs_local_a1 | hs_df_a1, 0, 1)" ) expect_match2(scode, "lprior += student_t_lpdf(hs_global_a1 | hs_df_global_a1, 0, hs_scale_global_a1 * sigma)" ) expect_match2(scode, "lprior += inv_gamma_lpdf(hs_slab_a1 | 0.5 * hs_df_slab_a1, 0.5 * hs_df_slab_a1)" ) expect_match2(scode, "scales_a1 = scales_horseshoe(hs_local_a1, hs_global_a1, hs_scale_slab_a1^2 * hs_slab_a1);" ) expect_match2(scode, "scales_a2 = scales_R2D2(R2D2_phi_a2, R2D2_tau2_a2);") # shrinkage priors can be applied globally bform <- bf(y ~ x1*mo(x3) + (1|g) + (1|x1) + gp(x3) + s(x2) + arma(p = 2, q = 2, gr = g)) bprior <- prior(R2D2(main = TRUE), class = b) + prior(R2D2(), class = sd) + prior(R2D2(), class = sds) + prior(R2D2(), class = sdgp) + prior(R2D2(), class = ar) + prior(R2D2(), class = ma) scode <- make_stancode(bform, data = dat, prior = bprior) expect_match2(scode, "sdb = scales[(1):(Kc)];") expect_match2(scode, "sdbsp = scales[(1+Kc):(Kc+Ksp)];") expect_match2(scode, "sdbs = scales[(1+Kc+Ksp):(Kc+Ksp+Ks)];") expect_match2(scode, "sds_1 = scales[(1+Kc+Ksp+Ks):(Kc+Ksp+Ks+nb_1)];") expect_match2(scode, "sdgp_1 = scales[(1+Kc+Ksp+Ks+nb_1):(Kc+Ksp+Ks+nb_1+Kgp_1)];") expect_match2(scode, "sdar = scales[(1+Kc+Ksp+Ks+nb_1+Kgp_1):(Kc+Ksp+Ks+nb_1+Kgp_1+Kar)];") expect_match2(scode, "sdma = scales[(1+Kc+Ksp+Ks+nb_1+Kgp_1+Kar):(Kc+Ksp+Ks+nb_1+Kgp_1+Kar+Kma)];") expect_match2(scode, "sd_1 = scales[(1+Kc+Ksp+Ks+nb_1+Kgp_1+Kar+Kma):(Kc+Ksp+Ks+nb_1+Kgp_1+Kar+Kma+M_1)];") expect_match2(scode, "sd_2 = scales[(1+Kc+Ksp+Ks+nb_1+Kgp_1+Kar+Kma+M_1):(Kc+Ksp+Ks+nb_1+Kgp_1+Kar+Kma+M_1+M_2)];") expect_match2(scode, "bsp = zbsp .* sdbsp; // scale coefficients") expect_match2(scode, "ar = zar .* sdar; // scale coefficients") # check error messages expect_error(make_stancode(y ~ x1*x2, data = dat, prior = prior(horseshoe(-1))), "Degrees of freedom of the local priors") expect_error(make_stancode(y ~ x1*x2, data = dat, prior = prior(horseshoe(1, -1))), "Scale of the global prior") expect_error(make_stancode(y ~ cs(x1), dat, acat(), prior = prior(R2D2())), "Special priors are not yet allowed") bprior <- prior(horseshoe()) + prior(normal(0, 1), coef = "y") expect_error(make_stancode(x1 ~ y, dat, prior = bprior), "Defining separate priors for single coefficients") expect_error(make_stancode(x1 ~ y, dat, prior = prior(horseshoe(), lb = 0)), "Setting boundaries on coefficients is not allowed") expect_error( make_stancode(y ~ x1*x2, data = dat, prior = prior(lasso(2, scale = 10))), "The lasso prior is no longer supported" ) }) test_that("priors can be fixed to constants", { dat <- data.frame(y = 1:12, x1 = rnorm(12), x2 = rnorm(12), g = rep(1:6, each = 2), h = factor(rep(1:2, each = 6))) prior <- prior(normal(0, 1), b) + prior(constant(3), b, coef = x1) + prior(constant(-1), b, coef = x2) + prior(constant(10), Intercept) + prior(normal(0, 5), sd) + prior(constant(1), sd, group = g, coef = x2) + prior(constant(2), sd, group = g, coef = x1) + prior(constant(0.3), sigma) scode <- make_stancode(y ~ x1*x2 + (x1*x2 | g), dat, prior = prior) expect_match2(scode, "b[1] = 3;") expect_match2(scode, "b[2] = -1;") expect_match2(scode, "b[3] = par_b_3;") expect_match2(scode, "lprior += normal_lpdf(b[3] | 0, 1);") expect_match2(scode, "Intercept = 1") expect_match2(scode, "sd_1[3] = 1;") expect_match2(scode, "sd_1[2] = 2;") expect_match2(scode, "sd_1[4] = par_sd_1_4;") expect_match2(scode, "lprior += normal_lpdf(sd_1[4] | 0, 5)") expect_match2(scode, "sigma = 0.3;") prior <- prior(constant(3)) scode <- make_stancode(y ~ x2 + x1 + cs(g), dat, family = sratio(), prior = prior) expect_match2(scode, "b = rep_vector(3, rows(b));") expect_match2(scode, "bcs = rep_matrix(3, rows(bcs), cols(bcs));") prior <- prior(normal(0, 3)) + prior(constant(3), coef = x1) + prior(constant(-1), coef = g) scode <- make_stancode(y ~ x1 + cs(x2) + cs(g), dat, family = sratio(), prior = prior) expect_match2(scode, "b[1] = 3;") expect_match2(scode, "bcs[1] = par_bcs_1;") expect_match2(scode, "lprior += normal_lpdf(bcs[1] | 0, 3);") expect_match2(scode, "bcs[2] = rep_row_vector(-1, cols(bcs[2]));") prior <- prior(constant(3), class = "sd", group = "g") + prior("constant([[1, 0], [0, 1]])", class = "cor") scode <- make_stancode(y ~ x1 + (x1 | gr(g, by = h)), dat, prior = prior) expect_match2(scode, "sd_1 = rep_matrix(3, rows(sd_1), cols(sd_1));") expect_match2(scode, "L_1[2] = [[1, 0], [0, 1]];") prior <- prior(constant(0.5), class = lscale, coef = gpx1h1) + prior(normal(0, 10), class = lscale, coef = gpx1h2) scode <- make_stancode(y ~ gp(x1, by = h), dat, prior = prior) expect_match2(scode, "lscale_1[1][1] = 0.5;") expect_match2(scode, "lscale_1[2][1] = par_lscale_1_2_1;") expect_match2(scode, "lprior += normal_lpdf(lscale_1[2][1] | 0, 10)") # test that improper base priors are correctly recognized (#919) prior <- prior(constant(-1), b, coef = x2) scode <- make_stancode(y ~ x1*x2, dat, prior = prior) expect_match2(scode, "real par_b_1;") expect_match2(scode, "b[3] = par_b_3;") # test error messages prior <- prior(normal(0, 1), Intercept) + prior(constant(3), Intercept, coef = 2) expect_error( make_stancode(y ~ x1, data = dat, family = cumulative(), prior = prior), "Can either estimate or fix all values" ) }) test_that("link functions appear in the Stan code", { dat <- data.frame(y = 1:10, x = rnorm(10)) expect_match2(make_stancode(y ~ s(x), dat, family = poisson()), "target += poisson_log_lpmf(Y | mu);") expect_match2(make_stancode(mvbind(y, y + 1) ~ x, dat, family = skew_normal("log")), "mu_y = exp(mu_y);") expect_match2(make_stancode(y ~ x, dat, family = von_mises(tan_half)), "mu = inv_tan_half_vector(mu);") expect_match2(make_stancode(y ~ x, dat, family = weibull()), "mu = exp(mu);") expect_match2(make_stancode(y ~ x, dat, family = poisson("sqrt")), "mu = square(mu);") expect_match2(make_stancode(y ~ s(x), dat, family = bernoulli()), "target += bernoulli_logit_lpmf(Y | mu);") scode <- make_stancode(y ~ x, dat, family = beta_binomial('logit')) expect_match2(scode, "mu = inv_logit(mu);") scode <- make_stancode(y ~ x, dat, family = beta_binomial('cloglog')) expect_match2(scode, "mu = inv_cloglog(mu);") scode <- make_stancode(y ~ x, dat, family = beta_binomial('cauchit')) expect_match2(scode, "mu = inv_cauchit_vector(mu);") scode <- make_stancode(y ~ x, dat, family = cumulative('cauchit')) expect_match2(scode, "p = inv_cauchit(disc * (thres[1] - mu));") }) test_that("Stan GLM primitives are applied correctly", { dat <- data.frame(x = rnorm(10), y = 1:10) scode <- make_stancode(y ~ x, dat, family = gaussian) expect_match2(scode, "normal_id_glm_lpdf(Y | Xc, Intercept, b, sigma)") scode <- make_stancode(y ~ x, dat, family = bernoulli) expect_match2(scode, "bernoulli_logit_glm_lpmf(Y | Xc, Intercept, b)") scode <- make_stancode(y ~ x, dat, family = poisson) expect_match2(scode, "poisson_log_glm_lpmf(Y | Xc, Intercept, b)") scode <- make_stancode(y ~ x, dat, family = negbinomial) expect_match2(scode, "neg_binomial_2_log_glm_lpmf(Y | Xc, Intercept, b, shape)" ) scode <- make_stancode(y ~ x, dat, family = brmsfamily("negbinomial2")) expect_match2(scode, "neg_binomial_2_log_glm_lpmf(Y | Xc, Intercept, b, inv(sigma))" ) scode <- make_stancode(y ~ 0 + x, dat, family = gaussian) expect_match2(scode, "normal_id_glm_lpdf(Y | X, 0, b, sigma)") bform <- bf(y ~ x) + bf(x ~ 1, family = negbinomial()) + set_rescor(FALSE) scode <- make_stancode(bform, dat, family = gaussian) expect_match2(scode, "normal_id_glm_lpdf(Y_y | Xc_y, Intercept_y, b_y, sigma_y)" ) scode <- make_stancode(bf(y ~ x, decomp = "QR"), dat, family = gaussian) expect_match2(scode, "normal_id_glm_lpdf(Y | XQ, Intercept, bQ, sigma);") }) test_that("customized covariances appear in the Stan code", { M <- diag(1, nrow = length(unique(inhaler$subject))) rownames(M) <- unique(inhaler$subject) dat2 <- list(M = M) scode <- make_stancode(rating ~ treat + (1 | gr(subject, cov = M)), data = inhaler, data2 = dat2) expect_match2(scode, "r_1_1 = (sd_1[1] * (Lcov_1 * z_1[1]))") scode <- make_stancode(rating ~ treat + (1 + treat | gr(subject, cov = M)), data = inhaler, data2 = dat2) expect_match2(scode, "r_1 = scale_r_cor_cov(z_1, sd_1, L_1, Lcov_1);") expect_match2(scode, "cor_1[choose(k - 1, 2) + j] = Cor_1[j, k];") scode <- make_stancode(rating ~ (1 + treat | gr(subject, cor = FALSE, cov = M)), data = inhaler, data2 = dat2) expect_match2(scode, "r_1_1 = (sd_1[1] * (Lcov_1 * z_1[1]));") expect_match2(scode, "r_1_2 = (sd_1[2] * (Lcov_1 * z_1[2]));") inhaler$by <- inhaler$subject %% 2 scode <- make_stancode(rating ~ (1 + treat | gr(subject, by = by, cov = M)), data = inhaler, data2 = dat2) expect_match2(scode, "r_1 = scale_r_cor_by_cov(z_1, sd_1, L_1, Jby_1, Lcov_1);") expect_warning( scode <- make_stancode(rating ~ treat + period + carry + (1|subject), data = inhaler, cov_ranef = list(subject = 1)), "Argument 'cov_ranef' is deprecated" ) expect_match2(scode, "r_1_1 = (sd_1[1] * (Lcov_1 * z_1[1]))") }) test_that("truncation appears in the Stan code", { scode <- make_stancode(time | trunc(0) ~ age + sex + disease, data = kidney, family = "gamma") expect_match2(scode, "target += gamma_lpdf(Y[n] | shape, shape / mu[n]) -") expect_match2(scode, "gamma_lccdf(lb[n] | shape, shape / mu[n]);") scode <- make_stancode(time | trunc(ub = 100) ~ age + sex + disease, data = kidney, family = student("log")) expect_match2(scode, "target += student_t_lpdf(Y[n] | nu, mu[n], sigma) -") expect_match2(scode, "student_t_lcdf(ub[n] | nu, mu[n], sigma);") scode <- make_stancode(count | trunc(0, 150) ~ Trt, data = epilepsy, family = "poisson") expect_match2(scode, "target += poisson_lpmf(Y[n] | mu[n]) -") expect_match2(scode, "log_diff_exp(poisson_lcdf(ub[n] | mu[n]), poisson_lcdf(lb[n] - 1 | mu[n]));" ) }) test_that("make_stancode handles models without fixed effects", { expect_match2(make_stancode(count ~ 0 + (1|patient) + (1+Trt|visit), data = epilepsy, family = "poisson"), "mu = rep_vector(0.0, N);") }) test_that("make_stancode correctly restricts FE parameters", { data <- data.frame(y = rep(0:1, each = 5), x = rnorm(10)) scode <- make_stancode(y ~ x, data, prior = set_prior("", lb = 2)) expect_match2(scode, "vector[Kc] b") scode <- make_stancode( y ~ x, data, prior = set_prior("normal (0, 2)", ub = "4") ) expect_match2(scode, "vector[Kc] b") expect_match2(scode, "- 1 * normal_lcdf(4 | 0, 2)") prior <- set_prior("normal(0,5)", lb = "-3", ub = 5) scode <- make_stancode(y ~ 0 + x, data, prior = prior) expect_match2(scode, "vector[K] b") }) test_that("self-defined functions appear in the Stan code", { # cauchit link scode <- make_stancode(rating ~ treat, data = inhaler, family = bernoulli("cauchit")) expect_match2(scode, "real inv_cauchit(real y)") # softplus link scode <- make_stancode(rating ~ treat, data = inhaler, family = brmsfamily("poisson", "softplus")) expect_match2(scode, "vector log_expm1_vector(vector x)") # squareplus link scode <- make_stancode(rating ~ treat, data = inhaler, family = brmsfamily("poisson", "squareplus")) expect_match2(scode, "real squareplus(real x)") # tan_half link expect_match2(make_stancode(rating ~ treat, data = inhaler, family = von_mises("tan_half")), "vector inv_tan_half_vector(vector y)") # logm1 link expect_match2(make_stancode(rating ~ treat, data = inhaler, family = frechet()), "real expp1(real y)") # inverse gaussian models scode <- make_stancode(time | cens(censored) ~ age, data = kidney, family = inverse.gaussian) expect_match2(scode, "real inv_gaussian_lpdf(real y") expect_match2(scode, "real inv_gaussian_lcdf(real y") expect_match2(scode, "real inv_gaussian_lccdf(real y") expect_match2(scode, "real inv_gaussian_vector_lpdf(vector y") # von Mises models scode <- make_stancode(time ~ age, data = kidney, family = von_mises) expect_match2(scode, "real von_mises_real_lpdf(real y") expect_match2(scode, "real von_mises_vector_lpdf(vector y") # zero-inflated and hurdle models expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_inflated_poisson"), "real zero_inflated_poisson_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_inflated_negbinomial"), "real zero_inflated_neg_binomial_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_inflated_binomial"), "real zero_inflated_binomial_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_inflated_beta_binomial"), "real zero_inflated_beta_binomial_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_inflated_beta"), "real zero_inflated_beta_lpdf(real y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_one_inflated_beta"), "real zero_one_inflated_beta_lpdf(real y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = hurdle_poisson()), "real hurdle_poisson_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = hurdle_negbinomial), "real hurdle_neg_binomial_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = hurdle_gamma("log")), "real hurdle_gamma_lpdf(real y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = hurdle_lognormal("identity")), "real hurdle_lognormal_lpdf(real y") # linear models with special covariance structures expect_match2( make_stancode(rating ~ treat + ar(cov = TRUE), data = inhaler), "real normal_time_hom_lpdf(vector y" ) expect_match2( make_stancode(time ~ age + ar(cov = TRUE), data = kidney, family = "student"), "real student_t_time_hom_lpdf(vector y" ) # ARMA covariance matrices expect_match2( make_stancode(rating ~ treat + ar(cov = TRUE), data = inhaler), "matrix cholesky_cor_ar1(real ar" ) expect_match2( make_stancode(time ~ age + ma(cov = TRUE), data = kidney), "matrix cholesky_cor_ma1(real ma" ) expect_match2( make_stancode(time ~ age + arma(cov = TRUE), data = kidney), "matrix cholesky_cor_arma1(real ar, real ma" ) }) test_that("invalid combinations of modeling options are detected", { data <- data.frame(y1 = rnorm(10), y2 = rnorm(10), wi = 1:10, ci = sample(-1:1, 10, TRUE)) expect_error( make_stancode(y1 | cens(ci) ~ y2 + ar(cov = TRUE), data = data), "Invalid addition arguments for this model" ) form <- bf(mvbind(y1, y2) ~ 1 + ar(cov = TRUE)) + set_rescor(TRUE) expect_error( make_stancode(form, data = data), "Explicit covariance terms cannot be modeled when 'rescor'" ) expect_error( make_stancode(y1 | resp_se(wi) ~ y2 + ma(), data = data), "Please set cov = TRUE in ARMA structures" ) }) test_that("Stan code for multivariate models is correct", { dat <- data.frame( y1 = rnorm(10), y2 = rnorm(10), x = 1:10, g = rep(1:2, each = 5), censi = sample(0:1, 10, TRUE) ) # models with residual correlations form <- bf(mvbind(y1, y2) ~ x) + set_rescor(TRUE) prior <- prior(horseshoe(2), resp = "y1") + prior(horseshoe(2), resp = "y2") scode <- make_stancode(form, dat, prior = prior) expect_match2(scode, "target += multi_normal_cholesky_lpdf(Y | Mu, LSigma);") expect_match2(scode, "LSigma = diag_pre_multiply(sigma, Lrescor);") expect_match2(scode, "target += student_t_lpdf(hs_local_y1 | hs_df_y1, 0, 1)") expect_match2(scode, "target += student_t_lpdf(hs_local_y2 | hs_df_y2, 0, 1)") expect_match2(scode, "rescor[choose(k - 1, 2) + j] = Rescor[j, k];") form <- bf(mvbind(y1, y2) ~ x) + set_rescor(TRUE) prior <- prior(R2D2(0.2, 10), resp = "y1") + prior(R2D2(0.5, 10), resp = "y2") scode <- SW(make_stancode(form, dat, student(), prior = prior)) expect_match2(scode, "target += multi_student_t_lpdf(Y | nu, Mu, Sigma);") expect_match2(scode, "matrix[nresp, nresp] Sigma = multiply_lower") expect_match2(scode, "lprior += gamma_lpdf(nu | 2, 0.1)") expect_match2(scode, "target += dirichlet_lpdf(R2D2_phi_y2 | R2D2_cons_D2_y2);") form <- bf(mvbind(y1, y2) | weights(x) ~ 1) + set_rescor(TRUE) scode <- make_stancode(form, dat) expect_match2(scode, "target += weights[n] * (multi_normal_cholesky_lpdf(Y[n] | Mu[n], LSigma));" ) # models without residual correlations expect_warning( bform <- bf(y1 | cens(censi) ~ x + y2 + (x|2|g)) + gaussian() + cor_ar() + (bf(x ~ 1) + mixture(poisson, nmix = 2)) + (bf(y2 ~ s(y2) + (1|2|g)) + skew_normal()), "Using 'cor_brms' objects for 'autocor' is deprecated" ) bprior <- prior(normal(0, 5), resp = y1) + prior(normal(0, 10), resp = y2) scode <- make_stancode(bform, dat, prior = bprior) expect_match2(scode, "r_1_y2_3 = r_1[, 3]") expect_match2(scode, "err_y1[n] = Y_y1[n] - mu_y1[n]") expect_match2(scode, "target += normal_lccdf(Y_y1[n] | mu_y1[n], sigma_y1)") expect_match2(scode, "target += skew_normal_lpdf(Y_y2 | mu_y2, omega_y2, alpha_y2)") expect_match2(scode, "ps[1] = log(theta1_x) + poisson_log_lpmf(Y_x[n] | mu1_x[n])") expect_match2(scode, "lprior += normal_lpdf(b_y1 | 0, 5)") expect_match2(scode, "lprior += normal_lpdf(bs_y2 | 0, 10)") # multivariate binomial models bform <- bf(x ~ 1) + bf(g ~ 1) + binomial() scode <- make_stancode(bform, dat) expect_match2(scode, "binomial_logit_lpmf(Y_x | trials_x, mu_x)") expect_match2(scode, "binomial_logit_lpmf(Y_g | trials_g, mu_g)") # multivariate weibull models bform <- bform + weibull() scode <- make_stancode(bform, dat) expect_match2(scode, "weibull_lpdf(Y_g | shape_g, mu_g / tgamma(1 + 1 / shape_g));") }) test_that("Stan code for categorical models is correct", { dat <- data.frame(y = rep(c(1, 2, 3, "a_b"), 2), x = 1:8, .g = 1:8) prior <- prior(normal(0, 5), "b", dpar = muab) + prior(normal(0, 10), "b", dpar = mu2) + prior(cauchy(0, 1), "Intercept", dpar = mu2) + prior(normal(0, 2), "Intercept", dpar = mu3) scode <- make_stancode(y ~ x + (1 | gr(.g, id = "ID")), data = dat, family = categorical(), prior = prior) expect_match2(scode, "target += categorical_logit_lpmf(Y[n] | mu[n]);") expect_match2(scode, "mu[n] = transpose([0, mu2[n], mu3[n], muab[n]]);") expect_match2(scode, "mu2 += Intercept_mu2 + Xc_mu2 * b_mu2;") expect_match2(scode, "muab[n] += r_1_muab_3[J_1[n]] * Z_1_muab_3[n];") expect_match2(scode, "lprior += normal_lpdf(b_mu2 | 0, 10);") expect_match2(scode, "lprior += normal_lpdf(b_muab | 0, 5);") expect_match2(scode, "lprior += cauchy_lpdf(Intercept_mu2 | 0, 1);") expect_match2(scode, "lprior += normal_lpdf(Intercept_mu3 | 0, 2);") expect_match2(scode, "r_1 = scale_r_cor(z_1, sd_1, L_1);") scode <- make_stancode(y ~ x + (1 |ID| .g), data = dat, family = categorical(refcat = NA)) expect_match2(scode, "mu[n] = transpose([mu1[n], mu2[n], mu3[n], muab[n]]);") }) test_that("Stan code for multinomial models is correct", { N <- 15 dat <- data.frame( y1 = rbinom(N, 10, 0.3), y2 = rbinom(N, 10, 0.5), y3 = rbinom(N, 10, 0.7), x = rnorm(N) ) dat$size <- with(dat, y1 + y2 + y3) dat$y <- with(dat, cbind(y1, y2, y3)) prior <- prior(normal(0, 10), "b", dpar = muy2) + prior(cauchy(0, 1), "Intercept", dpar = muy2) + prior(normal(0, 2), "Intercept", dpar = muy3) scode <- make_stancode(bf(y | trials(size) ~ 1, muy2 ~ x), data = dat, family = multinomial(), prior = prior) expect_match2(scode, "array[N, ncat] int Y;") expect_match2(scode, "target += multinomial_logit2_lpmf(Y[n] | mu[n]);") expect_match2(scode, "muy2 += Intercept_muy2 + Xc_muy2 * b_muy2;") expect_match2(scode, "lprior += normal_lpdf(b_muy2 | 0, 10);") expect_match2(scode, "lprior += cauchy_lpdf(Intercept_muy2 | 0, 1);") expect_match2(scode, "lprior += normal_lpdf(Intercept_muy3 | 0, 2);") }) test_that("Stan code for dirichlet models is correct", { N <- 15 dat <- as.data.frame(rdirichlet(N, c(3, 2, 1))) names(dat) <- c("y1", "y2", "y3") dat$x <- rnorm(N) dat$y <- with(dat, cbind(y1, y2, y3)) # dirichlet in probability-sum(alpha) concentration prior <- prior(normal(0, 5), class = "b", dpar = "muy3") + prior(exponential(10), "phi") scode <- make_stancode(bf(y ~ 1, muy3 ~ x), data = dat, family = dirichlet(), prior = prior) expect_match2(scode, "array[N] vector[ncat] Y;") expect_match2(scode, "target += dirichlet_logit_lpdf(Y[n] | mu[n], phi);") expect_match2(scode, "muy3 += Intercept_muy3 + Xc_muy3 * b_muy3;") expect_match2(scode, "lprior += normal_lpdf(b_muy3 | 0, 5);") expect_match2(scode, "lprior += exponential_lpdf(phi | 10);") scode <- make_stancode(bf(y ~ x, phi ~ x), data = dat, family = dirichlet()) expect_match2(scode, "target += dirichlet_logit_lpdf(Y[n] | mu[n], phi[n]);") expect_match2(scode, "phi += Intercept_phi + Xc_phi * b_phi;") expect_match2(scode, "phi = exp(phi);") # dirichlet2 in alpha parameterization prior <- prior(normal(0, 5), class = "b", dpar = "muy3") scode <- make_stancode(bf(y ~ 1, muy3 ~ x), data = dat, family = brmsfamily("dirichlet2"), prior = prior) expect_match2(scode, "array[N] vector[ncat] Y;") expect_match2(scode, "muy3 = exp(muy3);") expect_match2(scode, "target += dirichlet_lpdf(Y[n] | mu[n]);") expect_match2(scode, "muy3 += Intercept_muy3 + Xc_muy3 * b_muy3;") expect_match2(scode, "mu[n] = transpose([muy1[n], muy2[n], muy3[n]]);") expect_match2(scode, "lprior += normal_lpdf(b_muy3 | 0, 5);") expect_match2(scode, "lprior += student_t_lpdf(Intercept_muy1 | 3, 0, 2.5);") }) test_that("Stan code for logistic_normal models is correct", { N <- 15 dat <- as.data.frame(rdirichlet(N, c(3, 2, 1))) names(dat) <- c("y1", "y2", "y3") dat$x <- rnorm(N) dat$y <- with(dat, cbind(y1, y2, y3)) prior <- prior(normal(0, 5), class = "b", dpar = "muy3") + prior(exponential(10), "sigmay1") + prior(lkj(3), "lncor") scode <- make_stancode(bf(y ~ x), data = dat, family = logistic_normal(refcat = "y2"), prior = prior) expect_match2(scode, "array[N] vector[ncat] Y;") expect_match2(scode, "mu[n] = transpose([muy1[n], muy3[n]]);") expect_match2(scode, "vector[ncat-1] sigma = transpose([sigmay1, sigmay3]);") expect_match2(scode, "target += logistic_normal_cholesky_cor_lpdf(Y[n] | mu[n], sigma, Llncor, 2);") expect_match2(scode, "muy3 += Intercept_muy3 + Xc_muy3 * b_muy3;") expect_match2(scode, "lprior += normal_lpdf(b_muy3 | 0, 5);") expect_match2(scode, "lprior += exponential_lpdf(sigmay1 | 10);") expect_match2(scode, "lprior += lkj_corr_cholesky_lpdf(Llncor | 3);") prior <- prior(normal(0, 5), class = "b", dpar = "muy3") + prior(normal(0, 3), class = "b", dpar = "sigmay2") scode <- make_stancode(bf(y ~ 1, muy3 ~ x, sigmay2 ~ x), data = dat, family = logistic_normal(), prior = prior) expect_match2(scode, "array[N] vector[ncat] Y;") expect_match2(scode, "mu[n] = transpose([muy2[n], muy3[n]]);") expect_match2(scode, "sigma[n] = transpose([sigmay2[n], sigmay3]);") expect_match2(scode, "target += logistic_normal_cholesky_cor_lpdf(Y[n] | mu[n], sigma[n], Llncor, 1);") expect_match2(scode, "muy3 += Intercept_muy3 + Xc_muy3 * b_muy3;") expect_match2(scode, "lprior += normal_lpdf(b_muy3 | 0, 5);") expect_match2(scode, "lprior += normal_lpdf(b_sigmay2 | 0, 3);") expect_match2(scode, "lprior += lkj_corr_cholesky_lpdf(Llncor | 1);") }) test_that("Stan code for ARMA models is correct", { dat <- data.frame(y = rep(1:4, 2), x = 1:8, time = 1:8) scode <- make_stancode(y ~ x + ar(time), dat, student()) expect_match2(scode, "vector[Kar] ar") expect_match2(scode, "err[n] = Y[n] - mu[n];") expect_match2(scode, "mu[n] += Err[n, 1:Kar] * ar;") scode <- make_stancode(y ~ x + ma(time, q = 2), dat, student()) expect_match2(scode, "mu[n] += Err[n, 1:Kma] * ma;") expect_warning( scode <- make_stancode(mvbind(y, x) ~ 1, dat, gaussian(), autocor = cor_ar()), "Argument 'autocor' should be specified within the 'formula' argument" ) expect_match2(scode, "err_y[n] = Y_y[n] - mu_y[n];") bform <- bf(y ~ x, sigma ~ x) + acformula(~arma(time, cov = TRUE)) scode <- make_stancode(bform, dat, family = student) expect_match2(scode, "student_t_time_het_lpdf(Y | nu, mu, sigma, Lcortime") bform <- bf(y ~ exp(eta) - 1, eta ~ x, autocor = ~ar(time), nl = TRUE) scode <- make_stancode(bform, dat, family = student, prior = prior(normal(0, 1), nlpar = eta)) expect_match2(scode, "mu[n] += Err[n, 1:Kar] * ar;") # correlations of latent residuals scode <- make_stancode( y ~ x + ar(time, cov = TRUE), dat, family = poisson, prior = prior(cauchy(0, 10), class = sderr) ) expect_match2(scode, "Lcortime = cholesky_cor_ar1(ar[1], max_nobs_tg);") expect_match2(scode, "err = scale_time_err(zerr, sderr, Lcortime, nobs_tg, begin_tg, end_tg);" ) expect_match2(scode, "mu += Intercept + Xc * b + err;") expect_match2(scode, "lprior += cauchy_lpdf(sderr | 0, 10)") scode <- make_stancode( y ~ x + ar(time), dat, family = poisson, prior = prior(cauchy(0, 10), class = sderr) ) expect_match2(scode, "vector[Kar] ar") expect_match2(scode, "mu[n] += Err[n, 1:Kar] * ar;") expect_match2(scode, "err = sderr * zerr;") expect_match2(scode, "mu += Intercept + Xc * b + err;") expect_match2(scode, "lprior += cauchy_lpdf(sderr | 0, 10)") # apply shrinkage priors on sderr scode <- make_stancode( y ~ x + ar(time), dat, family = poisson, prior = prior(horseshoe(main = TRUE), class = b) + prior(horseshoe(), class = sderr) ) expect_match2(scode, "sderr = scales[(1+Kc):(Kc+1)][1];") }) test_that("Stan code for compound symmetry models is correct", { dat <- data.frame(y = rep(1:4, 2), x = 1:8, time = 1:8) scode <- make_stancode( y ~ x + cosy(time), dat, prior = prior(normal(0, 2), cosy) ) expect_match2(scode, "real cosy;") expect_match2(scode, "Lcortime = cholesky_cor_cosy(cosy, max_nobs_tg);") expect_match2(scode, "lprior += normal_lpdf(cosy | 0, 2)") scode <- make_stancode(bf(y ~ x + cosy(time), sigma ~ x), dat) expect_match2(scode, "normal_time_het_lpdf(Y | mu, sigma, Lcortime") scode <- make_stancode(y ~ x + cosy(time), dat, family = poisson) expect_match2(scode, "Lcortime = cholesky_cor_cosy(cosy, max_nobs_tg);") }) test_that("Stan code for UNSTR covariance terms is correct", { dat <- data.frame(y = 1:12, x = rnorm(12), tim = c(5:1, 1:5, c(0, 4)), g = c(rep(3:4, 5), rep(2, 2))) scode <- make_stancode(y ~ x + unstr(tim, g), data = dat) expect_match2(scode, "normal_time_hom_flex_lpdf(Y | mu, sigma, Lcortime, nobs_tg, begin_tg, end_tg, Jtime_tg);") expect_match2(scode, "cortime[choose(k - 1, 2) + j] = Cortime[j, k];") expect_match2(scode, "lprior += lkj_corr_cholesky_lpdf(Lcortime | 1);") scode <- make_stancode( y ~ x + unstr(tim, g), data = dat, family = student(), prior = prior(lkj(4), cortime) ) expect_match2(scode, "student_t_time_hom_flex_lpdf(Y | nu, mu, sigma, Lcortime, nobs_tg, begin_tg, end_tg, Jtime_tg);") expect_match2(scode, "lprior += lkj_corr_cholesky_lpdf(Lcortime | 4);") # test standard error scode <- make_stancode( y | se(1, sigma = TRUE) ~ x + unstr(tim, g), data = dat, family = gaussian(), ) expect_match2(scode, "normal_time_hom_se_flex_lpdf(Y | mu, sigma, se2, Lcortime, nobs_tg, begin_tg, end_tg, Jtime_tg);") # test latent representation scode <- make_stancode( y ~ x + unstr(tim, g), data = dat, family = poisson() ) expect_match2(scode, "err = scale_time_err_flex(zerr, sderr, Lcortime, nobs_tg, begin_tg, end_tg,") expect_match2(scode, "mu += Intercept + Xc * b + err;") # non-linear model scode <- make_stancode( bf(y ~ a, a ~ x, autocor = ~ unstr(tim, g), nl = TRUE), data = dat, family = student(), prior = prior(normal(0,1), nlpar = a) ) expect_match2(scode, "student_t_time_hom_flex_lpdf(Y | nu, mu, sigma, Lcortime, nobs_tg, begin_tg, end_tg, Jtime_tg);") }) test_that("Stan code for intercept only models is correct", { expect_match2(make_stancode(rating ~ 1, data = inhaler), "b_Intercept = Intercept;") expect_match2(make_stancode(rating ~ 1, data = inhaler, family = cratio()), "b_Intercept = Intercept;") expect_match2(make_stancode(rating ~ 1, data = inhaler, family = categorical()), "b_mu3_Intercept = Intercept_mu3;") }) test_that("Stan code of ordinal models is correct", { dat <- data.frame(y = c(rep(1:4, 2), 1, 1), x1 = rnorm(10), x2 = rnorm(10), g = factor(rep(1:2, 5))) scode <- make_stancode( y ~ x1, dat, family = cumulative(), prior = prior(normal(0, 2), Intercept, coef = 2) ) expect_match2(scode, "target += ordered_logistic_lpmf(Y[n] | mu[n], Intercept);" ) expect_match2(scode, "lprior += student_t_lpdf(Intercept[1] | 3, 0, 2.5);") expect_match2(scode, "lprior += normal_lpdf(Intercept[2] | 0, 2);") scode <- make_stancode( y ~ x1, dat, cumulative("probit", threshold = "equidistant"), prior = prior(normal(0, 2), Intercept) ) expect_match2(scode, "real cumulative_probit_lpmf(int y") expect_match2(scode, "p = Phi(disc * (thres[1] - mu));") expect_match2(scode, "real delta;") expect_match2(scode, "Intercept[k] = first_Intercept + (k - 1.0) * delta;") expect_match2(scode, "b_Intercept = Intercept + dot_product(means_X, b);") expect_match2(scode, "lprior += normal_lpdf(first_Intercept | 0, 2);") scode <- make_stancode(y ~ x1, dat, family = cratio("probit")) expect_match2(scode, "real cratio_probit_lpmf(int y") expect_match2(scode, "q[k] = normal_lcdf(disc * (mu - thres[k])|0,1);") scode <- make_stancode(y ~ x1 + cs(x2) + cs(g), dat, family = sratio()) expect_match2(scode, "real sratio_logit_lpmf(int y") expect_match2(scode, "matrix[N, Kcs] Xcs;") expect_match2(scode, "matrix[Kcs, nthres] bcs;") expect_match2(scode, "mucs = Xcs * bcs;") expect_match2(scode, "target += sratio_logit_lpmf(Y[n] | mu[n], disc, Intercept - transpose(mucs[n]));" ) scode <- make_stancode(y ~ x1 + cse(x2) + (cse(1)|g), dat, family = acat()) expect_match2(scode, "real acat_logit_lpmf(int y") expect_match2(scode, "mucs[n, 1] = mucs[n, 1] + r_1_1[J_1[n]] * Z_1_1[n];") expect_match2(scode, "b_Intercept = Intercept + dot_product(means_X, b);") scode <- make_stancode(y ~ x1 + (cse(x2)||g), dat, family = acat("probit_approx")) expect_match2(scode, paste("mucs[n, 3] = mucs[n, 3] + r_1_3[J_1[n]] * Z_1_3[n]", "+ r_1_6[J_1[n]] * Z_1_6[n];")) expect_match2(scode, "target += acat_probit_approx_lpmf(Y[n] | mu[n], disc, Intercept - transpose(mucs[n]));" ) # sum-to-zero thresholds scode <- make_stancode( y ~ x1, dat, cumulative("probit", threshold = "sum_to_zero"), prior = prior(normal(0, 2), Intercept) ) expect_match2(scode, "Intercept_stz = Intercept - mean(Intercept);") expect_match2(scode, "cumulative_probit_lpmf(Y[n] | mu[n], disc, Intercept_stz);") expect_match2(scode, "vector[nthres] b_Intercept = Intercept_stz;") # non-linear ordinal models scode <- make_stancode( bf(y ~ eta, eta ~ x1, nl = TRUE), dat, family = cumulative(), prior = prior(normal(0, 2), nlpar = eta) ) expect_match2(scode, "ordered[nthres] Intercept;") expect_match2(scode, "target += ordered_logistic_lpmf(Y[n] | mu[n], Intercept);" ) # ordinal mixture models with fixed intercepts scode <- make_stancode( bf(y ~ 1, mu1 ~ x1, mu2 ~ 1), data = dat, family = mixture(cumulative(), nmix = 2, order = "mu") ) expect_match2(scode, "Intercept_mu2 = fixed_Intercept;") expect_match2(scode, "lprior += student_t_lpdf(fixed_Intercept | 3, 0, 2.5);") }) test_that("ordinal disc parameters appear in the Stan code", { scode <- make_stancode( bf(rating ~ period + carry + treat, disc ~ period), data = inhaler, family = cumulative(), prior = prior(normal(0,5), dpar = disc) ) expect_match2(scode, "target += cumulative_logit_lpmf(Y[n] | mu[n], disc[n], Intercept)" ) expect_match2(scode, "lprior += normal_lpdf(b_disc | 0, 5)") expect_match2(scode, "disc = exp(disc)") }) test_that("grouped ordinal thresholds appear in the Stan code", { dat <- data.frame( y = sample(1:6, 10, TRUE), y2 = sample(1:6, 10, TRUE), gr = rep(c("a", "b"), each = 5), th = rep(5:6, each = 5), x = rnorm(10) ) prior <- prior(normal(0,1), class = "Intercept", group = "b") scode <- make_stancode( y | thres(th, gr) ~ x, data = dat, family = sratio(), prior = prior ) expect_match2(scode, "array[ngrthres] int nthres;") expect_match2(scode, "merged_Intercept[Kthres_start[1]:Kthres_end[1]] = Intercept_1;") expect_match2(scode, "target += sratio_logit_merged_lpmf(Y[n]") expect_match2(scode, "lprior += normal_lpdf(Intercept_2 | 0, 1);") # centering needs to be deactivated automatically expect_match2(scode, "vector[nthres[1]] b_Intercept_1 = Intercept_1;") # model with equidistant thresholds scode <- make_stancode( y | thres(th, gr) ~ x, data = dat, family = cumulative(threshold = "equidistant"), prior = prior ) expect_match2(scode, "target += ordered_logistic_merged_lpmf(Y[n]") expect_match2(scode, "real first_Intercept_1;") expect_match2(scode, "lprior += normal_lpdf(first_Intercept_2 | 0, 1);") expect_match2(scode, "Intercept_2[k] = first_Intercept_2 + (k - 1.0) * delta_2;") # sum-to-zero constraints scode <- make_stancode( y | thres(gr = gr) ~ x, data = dat, cumulative(threshold = "sum_to_zero"), prior = prior(normal(0, 2), Intercept) ) expect_match2(scode, "merged_Intercept_stz[Kthres_start[2]:Kthres_end[2]] = Intercept_stz_2;") expect_match2(scode, "ordered_logistic_merged_lpmf(Y[n] | mu[n], merged_Intercept_stz, Jthres[n]);") # ordinal mixture model scode <- make_stancode( y | thres(th, gr) ~ x, data = dat, family = mixture(cratio, acat, order = "mu"), prior = prior ) expect_match2(scode, "ps[1] = log(theta1) + cratio_logit_merged_lpmf(Y[n]") expect_match2(scode, "ps[2] = log(theta2) + acat_logit_merged_lpmf(Y[n]") expect_match2(scode, "vector[nmthres] merged_Intercept_mu1;") expect_match2(scode, "merged_Intercept_mu2[Kthres_start[1]:Kthres_end[1]] = Intercept_mu2_1;") expect_match2(scode, "vector[nthres[1]] b_mu1_Intercept_1 = Intercept_mu1_1;") # multivariate ordinal model bform <- bf(y | thres(th, gr) ~ x, family = sratio) + bf(y2 | thres(th, gr) ~ x, family = cumulative) scode <- make_stancode(bform, data = dat) expect_match2(scode, "lprior += student_t_lpdf(Intercept_y2_1 | 3, 0, 2.5);") expect_match2(scode, "merged_Intercept_y[Kthres_start_y[2]:Kthres_end_y[2]] = Intercept_y_2;") }) test_that("Stan code of hurdle cumulative model is correct", { dat <- data.frame(y = rep(0:4, 2), x1 = rnorm(10), x2 = rnorm(10), g = factor(rep(1:2, 5))) scode <- make_stancode( y ~ x1, dat, family = hurdle_cumulative(), prior = prior(normal(0, 2), Intercept, coef = 2) ) expect_match2(scode, "target += hurdle_cumulative_ordered_logistic_lpmf(Y[n] | mu[n], hu, disc, Intercept);" ) scode <- make_stancode( bf(y ~ x1, hu ~ x2), dat, hurdle_cumulative("probit", threshold = "equidistant"), prior = prior(normal(0, 2), Intercept) ) expect_match2(scode, "real hurdle_cumulative_probit_lpmf(int y") expect_match2(scode, "p = Phi(disc * (thres[1] - mu)) * (1 - hu);") expect_match2(scode, "Intercept[k] = first_Intercept + (k - 1.0) * delta;") # sum-to-zero thresholds scode <- make_stancode( bf(y ~ x1, hu ~ x2, disc ~ g), dat, hurdle_cumulative("cloglog", threshold = "sum_to_zero"), prior = prior(normal(0, 2), Intercept) ) expect_match2(scode, "Intercept_stz = Intercept - mean(Intercept);") expect_match2(scode, "hurdle_cumulative_cloglog_lpmf(Y[n] | mu[n], hu[n], disc[n], Intercept_stz);") expect_match2(scode, "vector[nthres] b_Intercept = Intercept_stz;") # non-linear ordinal models scode <- make_stancode( bf(y ~ eta, eta ~ x1, nl = TRUE), dat, family = hurdle_cumulative(), prior = prior(normal(0, 2), nlpar = eta) ) expect_match2(scode, "target += hurdle_cumulative_ordered_logistic_lpmf(Y[n] | mu[n], hu, disc, Intercept);" ) }) test_that("monotonic effects appear in the Stan code", { dat <- data.frame(y = rpois(120, 10), x1 = rep(1:4, 30), x2 = factor(rep(c("a", "b", "c"), 40), ordered = TRUE), g = rep(1:10, each = 12)) prior <- c(prior(normal(0,1), class = b, coef = mox1), prior(dirichlet(c(1,0.5,2)), simo, coef = mox11), prior(dirichlet(c(1,0.5,2)), simo, coef = mox21)) scode <- make_stancode(y ~ y*mo(x1)*mo(x2), dat, prior = prior) expect_match2(scode, "array[N] int Xmo_3;") expect_match2(scode, "simplex[Jmo[1]] simo_1;") expect_match2(scode, "(bsp[2]) * mo(simo_2, Xmo_2[n])") expect_match2(scode, "(bsp[6]) * mo(simo_7, Xmo_7[n]) * mo(simo_8, Xmo_8[n]) * Csp_3[n]" ) expect_match2(scode, "lprior += normal_lpdf(bsp[1] | 0, 1)") expect_match2(scode, "lprior += dirichlet_lpdf(simo_1 | con_simo_1);") expect_match2(scode, "lprior += dirichlet_lpdf(simo_8 | con_simo_8);") scode <- make_stancode(y ~ mo(x1) + (mo(x1) | x2), dat) expect_match2(scode, "(bsp[1] + r_1_2[J_1[n]]) * mo(simo_1, Xmo_1[n])") expect_true(!grepl("Z_1_w", scode)) # test issue reported in discourse post #12978 scode <- make_stancode(y ~ mo(x1) + (mo(x1) | x2) + (mo(x1) | g), dat) expect_match2(scode, "(bsp[1] + r_1_2[J_1[n]] + r_2_2[J_2[n]]) * mo(simo_1, Xmo_1[n])") # test issue #813 scode <- make_stancode(y ~ mo(x1):y, dat) expect_match2(scode, "mu[n] += (bsp[1]) * mo(simo_1, Xmo_1[n]) * Csp_1[n];") # test issue #924 (conditional monotonicity) prior <- c(prior(dirichlet(c(1,0.5,2)), simo, coef = "v"), prior(dirichlet(c(1,0.5,2)), simo, coef = "w")) scode <- make_stancode(y ~ y*mo(x1, id = "v")*mo(x2, id = "w"), dat, prior = prior) expect_match2(scode, "lprior += dirichlet_lpdf(simo_1 | con_simo_1);") expect_match2(scode, "lprior += dirichlet_lpdf(simo_2 | con_simo_2);") expect_match2(scode, "simplex[Jmo[6]] simo_6 = simo_2;") expect_match2(scode, "simplex[Jmo[7]] simo_7 = simo_1;") expect_error( make_stancode(y ~ mo(x1) + (mo(x2) | x2), dat), "Special group-level terms require" ) prior <- prior(beta(1, 1), simo, coef = mox11) expect_error( make_stancode(y ~ mo(x1), dat, prior = prior), "'dirichlet' is the only valid prior for simplex parameters" ) }) test_that("Stan code for non-linear models is correct", { flist <- list(a ~ x, b ~ z + (1|g)) data <- data.frame( y = rgamma(9, 1, 1), x = rnorm(9), z = rnorm(9), v = 1L:9L, g = rep(1:3, 3) ) prior <- c(set_prior("normal(0,5)", nlpar = "a"), set_prior("normal(0,1)", nlpar = "b")) # syntactic validity is already checked within make_stancode scode <- make_stancode( bf(y ~ a - exp(b^z) * (z <= a) * v, flist = flist, nl = TRUE), data = data, prior = prior ) expect_match2(scode, "mu[n] = (nlp_a[n] - exp(nlp_b[n] ^ C_1[n]) * (C_1[n] <= nlp_a[n]) * C_2[n]);" ) expect_match2(scode, "vector[N] C_1;") expect_match2(scode, "array[N] int C_2;") # non-linear predictor can be computed outside a loop scode <- make_stancode(bf(y ~ a - exp(b + z), flist = flist, nl = TRUE, loop = FALSE), data = data, prior = prior) expect_match2(scode, "mu = (nlp_a - exp(nlp_b + C_1));") # check if that also works with threading scode <- make_stancode(bf(y ~ a - exp(b + z), flist = flist, nl = TRUE, loop = FALSE), data = data, prior = prior, threads = threading(2), parse = FALSE) expect_match2(scode, "mu = (nlp_a - exp(nlp_b + C_1[start:end]));") flist <- list(a1 ~ 1, a2 ~ z + (x|g)) prior <- c(set_prior("beta(1,1)", nlpar = "a1", lb = 0, ub = 1), set_prior("normal(0,1)", nlpar = "a2")) scode <- make_stancode( bf(y ~ a1 * exp(-x/(a2 + z)), flist = flist, nl = TRUE), data = data, family = Gamma("log"), prior = prior ) expect_match2(scode, "mu[n] = exp(nlp_a1[n] * exp( - C_1[n] / (nlp_a2[n] + C_2[n])));") bform <- bf(y ~ x) + nlf(sigma ~ a1 * exp(-x/(a2 + z))) + lf(a1 ~ 1, a2 ~ z + (x|g)) + lf(alpha ~ x) scode <- make_stancode( bform, data, family = skew_normal(), prior = c( prior(normal(0, 1), nlpar = a1), prior(normal(0, 5), nlpar = a2) ) ) expect_match2(scode, "nlp_a1 += X_a1 * b_a1") expect_match2(scode, "sigma[n] = exp(nlp_a1[n] * exp( - C_sigma_1[n] / (nlp_a2[n] + C_sigma_2[n])))" ) expect_match2(scode, "lprior += normal_lpdf(b_a2 | 0, 5)") }) test_that("Stan code for nested non-linear parameters is correct", { dat <- data.frame(y = rnorm(10), x = rnorm(10), z = 1:5) bform <- bf( y ~ lb + (1 - lb) * inv_logit(b * x), b + a ~ 1 + (1 | z), nlf(lb ~ inv_logit(a / x)), nl = TRUE ) bprior <- prior(normal(0, 1), nlpar = "a") + prior(normal(0, 1), nlpar = "b") scode <- make_stancode(bform, dat, prior = bprior) expect_match2(scode, "nlp_lb[n] = (inv_logit(nlp_a[n] / C_lb_1[n]));") expect_match2(scode, "mu[n] = (nlp_lb[n] + (1 - nlp_lb[n]) * inv_logit(nlp_b[n] * C_1[n]));" ) }) test_that("make_stancode is correct for non-linear matrix covariates", { N <- 10 dat <- data.frame(y=rnorm(N)) dat$X <- matrix(rnorm(N*2), N, 2) dat$X2 <- matrix(1L:4L, N, 2) # numeric matrix nlfun_stan <- " real nlfun(real a, real b, real c, row_vector X) { return a + b * X[1] + c * X[2]; } " nlstanvar <- stanvar(scode = nlfun_stan, block = "functions") bform <- bf(y~nlfun(a, b, c, X), a~1, b~1, c~1, nl = TRUE) scode <- make_stancode(bform, dat, stanvars = nlstanvar) expect_match2(scode, "matrix[N, 2] C_1;") # integer matrix nlfun_stan_int <- " real nlfun(real a, real b, real c, int[] X) { return a + b * X[1] + c * X[2]; } " nlstanvar <- stanvar(scode = nlfun_stan_int, block = "functions") bform <- bf(y~nlfun(a, b, c, X2), a~1, b~1, c~1, nl = TRUE) scode <- make_stancode(bform, dat, stanvars = nlstanvar) expect_match2(scode, "array[N, 2] int C_1;") }) test_that("make_stancode accepts very long non-linear formulas", { data <- data.frame(y = rnorm(10), this_is_a_very_long_predictor = rnorm(10)) expect_silent(make_stancode(bf(y ~ b0 + this_is_a_very_long_predictor + this_is_a_very_long_predictor + this_is_a_very_long_predictor, b0 ~ 1, nl = TRUE), data = data, prior = prior(normal(0,1), nlpar = "b0"))) }) test_that("no loop in trans-par is defined for simple 'identity' models", { expect_true(!grepl(make_stancode(time ~ age, data = kidney), "mu[n] = (mu[n]);", fixed = TRUE)) expect_true(!grepl(make_stancode(time ~ age, data = kidney, family = poisson("identity")), "mu[n] = (mu[n]);", fixed = TRUE)) }) test_that("known standard errors appear in the Stan code", { scode <- make_stancode(time | se(age) ~ sex, data = kidney) expect_match2(scode, "target += normal_lpdf(Y | mu, se)") scode <- make_stancode(time | se(age) + weights(age) ~ sex, data = kidney) expect_match2(scode, "target += weights[n] * (normal_lpdf(Y[n] | mu[n], se[n]))") scode <- make_stancode(time | se(age, sigma = TRUE) ~ sex, data = kidney) expect_match2(scode, "target += normal_lpdf(Y | mu, sqrt(square(sigma) + se2))") scode <- make_stancode(bf(time | se(age, sigma = TRUE) ~ sex, sigma ~ sex), data = kidney) expect_match2(scode, "target += normal_lpdf(Y | mu, sqrt(square(sigma) + se2))") }) test_that("functions defined in 'stan_funs' appear in the functions block", { test_fun <- paste0(" real test_fun(real a, real b) {\n", " return a + b;\n", " }\n") scode <- SW(make_stancode(time ~ age, data = kidney, stan_funs = test_fun)) expect_match2(scode, test_fun) }) test_that("FCOR matrices appear in the Stan code", { data <- data.frame(y = 1:5) V <- diag(5) expect_match2(make_stancode(y ~ fcor(V), data = data, family = gaussian(), data2 = list(V = V)), "target += normal_fcor_hom_lpdf(Y | mu, sigma, Lfcor);") expect_match2(make_stancode(y ~ fcor(V), data = data, family = student(), data2 = list(V = V)), "target += student_t_fcor_hom_lpdf(Y | nu, mu, sigma, Lfcor);") }) test_that("Stan code for GAMMs is correct", { dat <- data.frame(y = rnorm(10), x = rnorm(10), g = factor(rep(1:2, 5))) scode <- make_stancode(y ~ s(x) + (1|g), data = dat, prior = set_prior("normal(0,2)", "sds")) expect_match2(scode, "Zs_1_1 * s_1_1") expect_match2(scode, "matrix[N, knots_1[1]] Zs_1_1") expect_match2(scode, "target += std_normal_lpdf(zs_1_1)") expect_match2(scode, "lprior += normal_lpdf(sds_1 | 0,2)") prior <- c(set_prior("normal(0,5)", nlpar = "lp"), set_prior("normal(0,2)", "sds", nlpar = "lp")) scode <- make_stancode(bf(y ~ lp, lp ~ s(x) + (1|g), nl = TRUE), data = dat, prior = prior) expect_match2(scode, "Zs_lp_1_1 * s_lp_1_1") expect_match2(scode, "matrix[N, knots_lp_1[1]] Zs_lp_1_1") expect_match2(scode, "target += std_normal_lpdf(zs_lp_1_1)") expect_match2(scode, "lprior += normal_lpdf(sds_lp_1 | 0,2)") scode <- make_stancode( y ~ s(x) + t2(x,y), data = dat, prior = set_prior("normal(0,1)", "sds") + set_prior("normal(0,2)", "sds", coef = "t2(x, y)") ) expect_match2(scode, "Zs_2_2 * s_2_2") expect_match2(scode, "matrix[N, knots_2[2]] Zs_2_2") expect_match2(scode, "target += std_normal_lpdf(zs_2_2)") expect_match2(scode, "lprior += normal_lpdf(sds_1 | 0,1)") expect_match2(scode, "lprior += normal_lpdf(sds_2 | 0,2)") scode <- make_stancode(y ~ g + s(x, by = g), data = dat) expect_match2(scode, "vector[knots_2[1]] zs_2_1") expect_match2(scode, "s_2_1 = sds_2[1] * zs_2_1") }) test_that("Stan code of response times models is correct", { dat <- epilepsy dat$cens <- sample(-1:1, nrow(dat), TRUE) scode <- make_stancode(count ~ Trt + (1|patient), data = dat, family = exgaussian("log"), prior = prior(gamma(1,1), class = beta)) expect_match2(scode, "target += exp_mod_normal_lpdf(Y | mu - beta, sigma, inv(beta))" ) expect_match2(scode, "mu = exp(mu)") expect_match2(scode, "lprior += gamma_lpdf(beta | 1, 1)") scode <- make_stancode(bf(count ~ Trt + (1|patient), sigma ~ Trt, beta ~ Trt), data = dat, family = exgaussian()) expect_match2(scode, "target += exp_mod_normal_lpdf(Y | mu - beta, sigma, inv(beta))" ) expect_match2(scode, "beta = exp(beta)") scode <- make_stancode(count | cens(cens) ~ Trt + (1|patient), data = dat, family = exgaussian("inverse")) expect_match2(scode, "exp_mod_normal_lccdf(Y[n] | mu[n] - beta, sigma, inv(beta))") scode <- make_stancode(count ~ Trt, dat, family = shifted_lognormal()) expect_match2(scode, "target += lognormal_lpdf(Y - ndt | mu, sigma)") scode <- make_stancode(count | cens(cens) ~ Trt, dat, family = shifted_lognormal()) expect_match2(scode, "target += lognormal_lcdf(Y[n] - ndt | mu[n], sigma)") # test issue #837 scode <- make_stancode(mvbind(count, zBase) ~ Trt, data = dat, family = shifted_lognormal()) expect_match2(scode, "lprior += uniform_lpdf(ndt_count | 0, min_Y_count)") expect_match2(scode, "lprior += uniform_lpdf(ndt_zBase | 0, min_Y_zBase)") }) test_that("Stan code of wiener diffusion models is correct", { dat <- data.frame(q = 1:10, resp = sample(0:1, 10, TRUE), x = rnorm(10)) scode <- make_stancode(q | dec(resp) ~ x, data = dat, family = wiener()) expect_match2(scode, "target += wiener_diffusion_lpdf(Y[n] | dec[n], bs, ndt, bias, mu[n])" ) scode <- make_stancode(bf(q | dec(resp) ~ x, bs ~ x, ndt ~ x, bias ~ x), data = dat, family = wiener()) expect_match2(scode, "target += wiener_diffusion_lpdf(Y[n] | dec[n], bs[n], ndt[n], bias[n], mu[n])" ) expect_match2(scode, "bias = inv_logit(bias);") scode <- make_stancode(bf(q | dec(resp) ~ x, ndt = 0.5), data = dat, family = wiener()) expect_match2(scode, "real ndt = 0.5;") expect_error(make_stancode(q ~ x, data = dat, family = wiener()), "Addition argument 'dec' is required for family 'wiener'") }) test_that("Group IDs appear in the Stan code", { form <- bf(count ~ Trt + (1+Trt|3|visit) + (1|patient), shape ~ (1|3|visit) + (Trt||patient)) scode <- make_stancode(form, data = epilepsy, family = negbinomial()) expect_match2(scode, "r_2_1 = r_2[, 1]") expect_match2(scode, "r_2_shape_3 = r_2[, 3]") form <- bf(count ~ a, sigma ~ (1|3|visit) + (Trt||patient), a ~ Trt + (1+Trt|3|visit) + (1|patient), nl = TRUE) scode <- make_stancode(form, data = epilepsy, family = student(), prior = set_prior("normal(0,5)", nlpar = "a")) expect_match2(scode, "r_2_a_2 = r_2[, 2];") expect_match2(scode, "r_1_sigma_2 = (sd_1[2] * (z_1[2]));") }) test_that("weighted, censored, and truncated likelihoods are correct", { dat <- data.frame(y = 1:9, x = rep(-1:1, 3), y2 = 10:18) scode <- make_stancode(y | weights(y2) ~ 1, dat, poisson()) expect_match2(scode, "target += weights[n] * (poisson_log_lpmf(Y[n] | mu[n]));") scode <- make_stancode(y | trials(y2) + weights(y2) ~ 1, dat, binomial()) expect_match2(scode, "target += weights[n] * (binomial_logit_lpmf(Y[n] | trials[n], mu[n]));" ) scode <- make_stancode(y | cens(x, y2) ~ 1, dat, poisson()) expect_match2(scode, "target += poisson_lpmf(Y[n] | mu[n]);") scode <- make_stancode(y | cens(x) ~ 1, dat, exponential()) expect_match2(scode, "target += exponential_lccdf(Y[n] | inv(mu[n]));") dat$x[1] <- 2 scode <- make_stancode(y | cens(x, y2) ~ 1, dat, gaussian()) expect_match2(scode, paste0( "target += log_diff_exp(\n", " normal_lcdf(rcens[n] | mu[n], sigma)," )) dat$x <- 1 expect_match2(make_stancode(y | cens(x) + weights(x) ~ 1, dat, exponential()), "target += weights[n] * exponential_lccdf(Y[n] | inv(mu[n]));") scode <- make_stancode(y | cens(x) + trunc(0.1) ~ 1, dat, exponential()) expect_match2(scode, "target += exponential_lccdf(Y[n] | inv(mu[n])) -") expect_match2(scode, " exponential_lccdf(lb[n] | inv(mu[n]));") scode <- make_stancode(y | cens(x) + trunc(ub = 30) ~ 1, dat) expect_match2(scode, "target += normal_lccdf(Y[n] | mu[n], sigma) -") expect_match2(scode, " normal_lcdf(ub[n] | mu[n], sigma);") scode <- make_stancode(y | weights(x) + trunc(0, 30) ~ 1, dat) expect_match2(scode, "target += weights[n] * (normal_lpdf(Y[n] | mu[n], sigma) -") expect_match2(scode, " log_diff_exp(normal_lcdf(ub[n] | mu[n], sigma),") expect_match2( make_stancode(y | trials(y2) + weights(y2) ~ 1, dat, beta_binomial()), "target += weights[n] * (beta_binomial_lpmf(Y[n] | trials[n], mu[n] * phi," ) expect_match2( make_stancode(y | trials(y2) + trunc(0, 30) ~ 1, dat, beta_binomial()), "log_diff_exp(beta_binomial_lcdf(ub[n] | trials[n], mu[n] * phi," ) expect_match2( make_stancode(y | trials(y2) + cens(x, y2) ~ 1, dat, beta_binomial()), "beta_binomial_lcdf(rcens[n] | trials[n], mu[n] * phi," ) }) test_that("noise-free terms appear in the Stan code", { N <- 30 dat <- data.frame( y = rnorm(N), x = rnorm(N), z = rnorm(N), xsd = abs(rnorm(N, 1)), zsd = abs(rnorm(N, 1)), ID = rep(1:5, each = N / 5) ) me_prior <- prior(normal(0,5)) + prior(normal(0, 10), "meanme") + prior(cauchy(0, 5), "sdme", coef = "mez") + prior(lkj(2), "corme") scode <- make_stancode( y ~ me(x, xsd)*me(z, zsd)*x, data = dat, prior = me_prior, sample_prior = "yes" ) expect_match2(scode, "(bsp[1]) * Xme_1[n] + (bsp[2]) * Xme_2[n] + (bsp[3]) * Xme_1[n] * Xme_2[n]" ) expect_match2(scode, "(bsp[6]) * Xme_1[n] * Xme_2[n] * Csp_3[n]") expect_match2(scode, "target += normal_lpdf(Xn_2 | Xme_2, noise_2)") expect_match2(scode, "lprior += normal_lpdf(bsp | 0, 5)") expect_match2(scode, "target += std_normal_lpdf(to_vector(zme_1))") expect_match2(scode, "lprior += normal_lpdf(meanme_1 | 0, 10)") expect_match2(scode, "lprior += cauchy_lpdf(sdme_1[2] | 0, 5)") expect_match2(scode, "lprior += lkj_corr_cholesky_lpdf(Lme_1 | 2)") expect_match2(scode, "+ transpose(diag_pre_multiply(sdme_1, Lme_1) * zme_1)") expect_match2(scode, "corme_1[choose(k - 1, 2) + j] = Corme_1[j, k];") scode <- make_stancode( y ~ me(x, xsd)*z + (me(x, xsd)*z | ID), data = dat ) expect_match2(scode, "(bsp[1] + r_1_3[J_1[n]]) * Xme_1[n]") expect_match2(scode, "(bsp[2] + r_1_4[J_1[n]]) * Xme_1[n] * Csp_1[n]") expect_match2(make_stancode(y ~ I(me(x, xsd)^2), data = dat), "(bsp[1]) * (Xme_1[n]^2)") # test that noise-free variables are unique across model parts scode <- make_stancode( bf(y ~ me(x, xsd)*me(z, zsd)*x, sigma ~ me(x, xsd)), data = dat, prior = prior(normal(0,5)) ) expect_match2(scode, "mu[n] += (bsp[1]) * Xme_1[n]") expect_match2(scode, "sigma[n] += (bsp_sigma[1]) * Xme_1[n]") scode <- make_stancode( bf(y ~ a * b, a + b ~ me(x, xsd), nl = TRUE), data = dat, prior = prior(normal(0,5), nlpar = a) + prior(normal(0, 5), nlpar = b) ) expect_match2(scode, "nlp_a[n] += (bsp_a[1]) * Xme_1[n]") expect_match2(scode, "nlp_b[n] += (bsp_b[1]) * Xme_1[n]") bform <- bf(mvbind(y, z) ~ me(x, xsd)) + set_rescor(TRUE) + set_mecor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "mu_y[n] += (bsp_y[1]) * Xme_1[n]") expect_match2(scode, "mu_z[n] += (bsp_z[1]) * Xme_1[n]") expect_match2(scode, "Xme_1 = meanme_1[1] + sdme_1[1] * zme_1;") # noise-free terms with grouping factors bform <- bf(y ~ me(x, xsd, ID) + me(z, xsd) + (me(x, xsd, ID) | ID)) scode <- make_stancode(bform, dat) expect_match2(scode, "vector[Nme_1] Xn_1;") expect_match2(scode, "Xme_1 = meanme_1[1] + sdme_1[1] * zme_1;") expect_match2(scode, "Xme_2 = meanme_2[1] + sdme_2[1] * zme_2;") expect_match2(scode, "(bsp[1] + r_1_2[J_1[n]]) * Xme_1[Jme_1[n]]") bform <- bform + set_mecor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "Xme_1 = meanme_1[1] + sdme_1[1] * zme_1;") }) test_that("Stan code of multi-membership models is correct", { dat <- data.frame(y = rnorm(10), g1 = sample(1:10, 10, TRUE), g2 = sample(1:10, 10, TRUE), w1 = rep(1, 10), w2 = rep(abs(rnorm(10)))) expect_match2(make_stancode(y ~ (1|mm(g1, g2)), data = dat), paste0(" W_1_1[n] * r_1_1[J_1_1[n]] * Z_1_1_1[n]", " + W_1_2[n] * r_1_1[J_1_2[n]] * Z_1_1_2[n]") ) expect_match2(make_stancode(y ~ (1+w1|mm(g1,g2)), data = dat), paste0(" W_1_1[n] * r_1_2[J_1_1[n]] * Z_1_2_1[n]", " + W_1_2[n] * r_1_2[J_1_2[n]] * Z_1_2_2[n]") ) expect_match2(make_stancode(y ~ (1+mmc(w1, w2)|mm(g1,g2)), data = dat), " W_1_2[n] * r_1_2[J_1_2[n]] * Z_1_2_2[n];" ) }) test_that("by variables in grouping terms are handled correctly", { dat <- data.frame( y = rnorm(100), x = rnorm(100), g = rep(1:10, each = 10), z = factor(rep(c(0, 4.5, 3, 2, 5), each = 20)) ) scode <- make_stancode(y ~ x + (1 | gr(g, by = z)), dat) expect_match2(scode, "r_1_1 = (transpose(sd_1[1, Jby_1]) .* (z_1[1]));") scode <- make_stancode(y ~ x + (x | gr(g, by = z)), dat) expect_match2(scode, "r_1 = scale_r_cor_by(z_1, sd_1, L_1, Jby_1);") expect_match2(scode, "lprior += student_t_lpdf(to_vector(sd_1) | 3, 0, 2.5)") expect_match2(scode, "lprior += lkj_corr_cholesky_lpdf(L_1[5] | 1);") }) test_that("Group syntax | and || is handled correctly,", { data <- data.frame(y = rnorm(10), x = rnorm(10), g1 = rep(1:5, each = 2), g2 = rep(1:2, 5)) scode <- make_stancode(y ~ x + (1+x||g1) + (I(x/4)|g2), data) expect_match2(scode, "r_1_2 = (sd_1[2] * (z_1[2]));") expect_match2(scode, "r_2_1 = r_2[, 1];") expect_match2(scode, "r_2 = scale_r_cor(z_2, sd_2, L_2);") }) test_that("predicting zi and hu works correctly", { scode <- make_stancode(bf(count ~ Trt, zi ~ Trt), epilepsy, family = "zero_inflated_poisson") expect_match2(scode, "target += zero_inflated_poisson_log_logit_lpmf(Y[n] | mu[n], zi[n])" ) expect_true(!grepl("inv_logit\\(", scode)) expect_true(!grepl("exp(mu[n])", scode, fixed = TRUE)) scode <- make_stancode(bf(count ~ Trt, zi ~ Trt), epilepsy, family = zero_inflated_poisson(identity)) expect_match2(scode, "target += zero_inflated_poisson_logit_lpmf(Y[n] | mu[n], zi[n])" ) scode <- make_stancode(bf(count ~ Trt, zi ~ Trt), epilepsy, family = "zero_inflated_binomial") expect_match2(scode, "target += zero_inflated_binomial_blogit_logit_lpmf(Y[n] | trials[n], mu[n], zi[n])" ) expect_true(!grepl("inv_logit\\(", scode)) fam <- zero_inflated_binomial("probit", link_zi = "identity") scode <- make_stancode( bf(count ~ Trt, zi ~ Trt), epilepsy, family = fam, prior = prior("", class = Intercept, dpar = zi, lb = 0, ub = 1) ) expect_match2(scode, "target += zero_inflated_binomial_lpmf(Y[n] | trials[n], mu[n], zi[n])" ) expect_match2(scode, "mu = Phi(mu);") scode <- make_stancode(bf(count ~ Trt, zi ~ Trt), epilepsy, family = "zero_inflated_beta_binomial") expect_match2(scode, paste("target += zero_inflated_beta_binomial_logit_lpmf(Y[n]", "| trials[n], mu[n], phi, zi[n])")) expect_match2(scode, "mu = inv_logit(mu);") scode <- make_stancode( bf(count ~ Trt, zi ~ Trt), epilepsy, zero_inflated_beta_binomial("probit", link_zi = "identity"), prior = prior("", class = Intercept, dpar = zi, lb = 0, ub = 1) ) expect_match2(scode, paste("target += zero_inflated_beta_binomial_lpmf(Y[n]", "| trials[n], mu[n], phi, zi[n])")) expect_match2(scode, "mu = Phi(mu);") scode <- make_stancode( bf(count ~ Trt, zi ~ Trt), epilepsy, family = zero_inflated_beta() ) expect_match2(scode, "target += zero_inflated_beta_logit_lpdf(Y[n] | mu[n], phi, zi[n])" ) scode <- make_stancode(bf(count ~ Trt, hu ~ Trt), epilepsy, family = "hurdle_negbinomial") expect_match2(scode, "target += hurdle_neg_binomial_log_logit_lpmf(Y[n] | mu[n], shape, hu[n])" ) expect_true(!grepl("inv_logit\\(", scode)) expect_true(!grepl("exp(mu)", scode, fixed = TRUE)) scode <- make_stancode(bf(count ~ Trt, hu ~ Trt), epilepsy, family = "hurdle_gamma") expect_match2(scode, "hurdle_gamma_logit_lpdf(Y[n] | shape, shape / mu[n], hu[n])" ) expect_true(!grepl("inv_logit\\(", scode)) scode <- make_stancode( bf(count ~ Trt, hu ~ Trt), epilepsy, family = hurdle_gamma(link_hu = "identity"), prior = prior("", class = Intercept, dpar = hu, lb = 0, ub = 1) ) expect_match2(scode, "hurdle_gamma_lpdf(Y[n] | shape, shape / mu[n], hu[n])") expect_true(!grepl("inv_logit\\(", scode)) }) test_that("fixing auxiliary parameters is possible", { scode <- make_stancode(bf(y ~ 1, sigma = 0.5), data = list(y = rnorm(10))) expect_match2(scode, "real sigma = 0.5;") }) test_that("Stan code of quantile regression models is correct", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) scode <- make_stancode(y ~ x, data, family = asym_laplace()) expect_match2(scode, "target += asym_laplace_lpdf(Y[n] | mu[n], sigma, quantile)") scode <- make_stancode(bf(y ~ x, quantile = 0.75), data, family = asym_laplace()) expect_match2(scode, "real quantile = 0.75;") scode <- make_stancode(y | cens(c) ~ x, data, family = asym_laplace()) expect_match2(scode, "target += asym_laplace_lccdf(Y[n] | mu[n], sigma, quantile)") scode <- make_stancode(bf(y ~ x, sigma ~ x), data, family = asym_laplace()) expect_match2(scode, "target += asym_laplace_lpdf(Y[n] | mu[n], sigma[n], quantile)") scode <- make_stancode(bf(y ~ x, quantile = 0.75), data, family = brmsfamily("zero_inflated_asym_laplace")) expect_match2(scode, "target += zero_inflated_asym_laplace_lpdf(Y[n] | mu[n], sigma, quantile, zi)" ) }) test_that("Stan code of addition term 'rate' is correct", { data <- data.frame(y = rpois(10, 1), x = rnorm(10), time = 1:10) scode <- make_stancode(y | rate(time) ~ x, data, poisson()) expect_match2(scode, "target += poisson_log_lpmf(Y | mu + log_denom);") scode <- make_stancode(y | rate(time) ~ x, data, poisson("identity")) expect_match2(scode, "target += poisson_lpmf(Y | mu .* denom);") scode <- make_stancode(y | rate(time) ~ x, data, negbinomial()) expect_match2(scode, "target += neg_binomial_2_log_lpmf(Y | mu + log_denom, shape * denom);") bform <- bf(y | rate(time) ~ mi(x), shape ~ mi(x), family = negbinomial()) + bf(x | mi() ~ 1, family = gaussian()) scode <- make_stancode(bform, data) expect_match2(scode, "target += neg_binomial_2_log_lpmf(Y_y | mu_y + log_denom_y, shape_y .* denom_y);") scode <- make_stancode(y | rate(time) ~ x, data, brmsfamily("negbinomial2")) expect_match2(scode, "target += neg_binomial_2_log_lpmf(Y | mu + log_denom, inv(sigma) * denom);") scode <- make_stancode(y | rate(time) + cens(1) ~ x, data, geometric()) expect_match2(scode, "target += neg_binomial_2_lpmf(Y[n] | mu[n] * denom[n], 1 * denom[n]);") }) test_that("Stan code of GEV models is correct", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) scode <- make_stancode(y ~ x, data, gen_extreme_value()) expect_match2(scode, "target += gen_extreme_value_lpdf(Y[n] | mu[n], sigma, xi)") expect_match2(scode, "xi = scale_xi(tmp_xi, Y, mu, sigma)") scode <- make_stancode(bf(y ~ x, sigma ~ x), data, gen_extreme_value()) expect_match2(scode, "xi = scale_xi_vector(tmp_xi, Y, mu, sigma)") scode <- make_stancode(bf(y ~ x, xi ~ x), data, gen_extreme_value()) expect_match2(scode, "xi = expm1(xi)") scode <- make_stancode(bf(y ~ x, xi = 0), data, gen_extreme_value()) expect_match2(scode, "real xi = 0; // shape parameter") scode <- make_stancode(y | cens(c) ~ x, data, gen_extreme_value()) expect_match2(scode, "target += gen_extreme_value_lccdf(Y[n] | mu[n], sigma, xi)") }) test_that("Stan code of Cox models is correct", { data <- data.frame(y = rexp(100), ce = sample(0:1, 100, TRUE), x = rnorm(100)) bform <- bf(y | cens(ce) ~ x) scode <- make_stancode(bform, data, brmsfamily("cox")) expect_match2(scode, "target += cox_log_lpdf(Y[n] | mu[n], bhaz[n], cbhaz[n]);") expect_match2(scode, "vector[N] cbhaz = Zcbhaz * sbhaz;") expect_match2(scode, "lprior += dirichlet_lpdf(sbhaz | con_sbhaz);") expect_match2(scode, "simplex[Kbhaz] sbhaz;") scode <- make_stancode(bform, data, brmsfamily("cox", "identity")) expect_match2(scode, "target += cox_lccdf(Y[n] | mu[n], bhaz[n], cbhaz[n]);") }) test_that("offsets appear in the Stan code", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) scode <- make_stancode(y ~ x + offset(c), data) expect_match2(scode, "+ offsets;") scode <- make_stancode(bf(y ~ a, a ~ offset(log(c + 1)), nl = TRUE), data, prior = prior(normal(0,1), nlpar = a)) expect_match2(scode, "+ offsets_a;") }) test_that("prior only models are correctly checked", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) prior <- prior(normal(0, 5), b) + prior("", Intercept) expect_error(make_stancode(y ~ x, data, prior = prior, sample_prior = "only"), "Sampling from priors is not possible") prior <- prior(normal(0, 5), b) + prior(normal(0, 10), Intercept) scode <- make_stancode(y ~ x, data, prior = prior, sample_prior = "only") expect_match2(scode, "lprior += normal_lpdf(Intercept | 0, 10)") }) test_that("Stan code of mixture model is correct", { data <- data.frame(y = 1:10, x = rnorm(10), c = 1) data$z <- abs(data$y) scode <- make_stancode( bf(y ~ x, sigma2 ~ x), data, family = mixture(gaussian, gaussian), sample_prior = TRUE ) expect_match2(scode, "ordered[2] ordered_Intercept;") expect_match2(scode, "Intercept_mu2 = ordered_Intercept[2];") expect_match2(scode, "lprior += dirichlet_lpdf(theta | con_theta);") expect_match2(scode, "ps[1] = log(theta1) + normal_lpdf(Y[n] | mu1[n], sigma1);") expect_match2(scode, "ps[2] = log(theta2) + normal_lpdf(Y[n] | mu2[n], sigma2[n]);") expect_match2(scode, "target += log_sum_exp(ps);") expect_match2(scode, "simplex[2] prior_theta = dirichlet_rng(con_theta);") scode <- make_stancode(bf(z | weights(c) ~ x, shape1 ~ x, theta1 = 1, theta2 = 2), data = data, mixture(Gamma("log"), weibull)) expect_match(scode, "data \\{[^\\}]*real theta1;") expect_match(scode, "data \\{[^\\}]*real theta2;") expect_match2(scode, "ps[1] = log(theta1) + gamma_lpdf(Y[n] | shape1[n], shape1[n] / mu1[n]);") expect_match2(scode, "target += weights[n] * log_sum_exp(ps);") scode <- make_stancode(bf(abs(y) | se(c) ~ x), data = data, mixture(gaussian, student)) expect_match2(scode, "ps[1] = log(theta1) + normal_lpdf(Y[n] | mu1[n], se[n]);") expect_match2(scode, "ps[2] = log(theta2) + student_t_lpdf(Y[n] | nu2, mu2[n], se[n]);") fam <- mixture(gaussian, student, exgaussian) scode <- make_stancode(bf(y ~ x), data = data, family = fam) expect_match(scode, "parameters \\{[^\\}]*real Intercept_mu3;") expect_match2(scode, "ps[2] = log(theta2) + student_t_lpdf(Y[n] | nu2, mu2[n], sigma2);" ) expect_match2(scode, "ps[3] = log(theta3) + exp_mod_normal_lpdf(Y[n] | mu3[n] - beta3, sigma3, inv(beta3));" ) scode <- make_stancode(bf(y ~ x, theta1 ~ x, theta3 ~ x), data = data, family = fam) expect_match2(scode, "log_sum_exp_theta = log(exp(theta1[n]) + exp(theta2[n]) + exp(theta3[n]));") expect_match2(scode, "theta2 = rep_vector(0.0, N);") expect_match2(scode, "theta3[n] = theta3[n] - log_sum_exp_theta;") expect_match2(scode, "ps[1] = theta1[n] + normal_lpdf(Y[n] | mu1[n], sigma1);") fam <- mixture(cumulative, sratio) scode <- make_stancode(y ~ x, data, family = fam) expect_match2(scode, "ordered_logistic_lpmf(Y[n] | mu1[n], Intercept_mu1);") expect_match2(scode, "sratio_logit_lpmf(Y[n] | mu2[n], disc2, Intercept_mu2);") # censored mixture model fam <- mixture(gaussian, gaussian) scode <- make_stancode(y | cens(2, y2 = 2) ~ x, data, fam) expect_match2(scode, "ps[2] = log(theta2) + normal_lccdf(Y[n] | mu2[n], sigma2);" ) expect_match2(scode, paste0( "ps[2] = log(theta2) + log_diff_exp(\n", " normal_lcdf(rcens[n] | mu2[n], sigma2)," )) # truncated mixture model scode <- make_stancode(y | trunc(3) ~ x, data, fam) expect_match2(scode, paste0( "ps[1] = log(theta1) + normal_lpdf(Y[n] | mu1[n], sigma1) -\n", " normal_lccdf(lb[n] | mu1[n], sigma1);" )) # non-linear mixture model bform <- bf(y ~ 1) + nlf(mu1 ~ eta^2) + nlf(mu2 ~ log(eta) + a) + lf(eta + a ~ x) + mixture(gaussian, nmix = 2) bprior <- prior(normal(0, 1), nlpar = "eta") + prior(normal(0, 1), nlpar = "a") scode <- make_stancode(bform, data = data, prior = bprior) expect_match2(scode, "mu1[n] = (nlp_eta[n] ^ 2);") expect_match2(scode, "mu2[n] = (log(nlp_eta[n]) + nlp_a[n]);") }) test_that("sparse matrix multiplication is applied correctly", { data <- data.frame(y = rnorm(10), x = rnorm(10)) # linear model scode <- make_stancode( bf(y ~ x, sparse = TRUE) + lf(sigma ~ x, sparse = TRUE), data, prior = prior(normal(0, 5), coef = "Intercept") ) expect_match2(scode, "wX = csr_extract_w(X);") expect_match2(scode, "mu += csr_matrix_times_vector(rows(X), cols(X), wX, vX, uX, b);" ) expect_match2(scode, "uX_sigma[size(csr_extract_u(X_sigma))] = csr_extract_u(X_sigma);" ) expect_match2(scode, paste0( "sigma += csr_matrix_times_vector(rows(X_sigma), cols(X_sigma), ", "wX_sigma, vX_sigma, uX_sigma, b_sigma);" ) ) expect_match2(scode, "lprior += normal_lpdf(b[1] | 0, 5);") expect_match2(scode, "target += normal_lpdf(Y | mu, sigma);") # non-linear model scode <- make_stancode( bf(y ~ a, lf(a ~ x, sparse = TRUE), nl = TRUE), data, prior = prior(normal(0, 1), nlpar = a) ) expect_match2(scode, "vX_a[size(csr_extract_v(X_a))] = csr_extract_v(X_a);" ) expect_match2(scode, "nlp_a += csr_matrix_times_vector(rows(X_a), cols(X_a), wX_a, vX_a, uX_a, b_a);" ) }) test_that("QR decomposition is included in the Stan code", { data <- data.frame(y = rnorm(10), x1 = rnorm(10), x2 = rnorm(10)) bform <- bf(y ~ x1 + x2, decomp = "QR") + lf(sigma ~ 0 + x1 + x2, decomp = "QR") # simple priors scode <- make_stancode(bform, data, prior = prior(normal(0, 2))) expect_match2(scode, "XQ = qr_thin_Q(Xc) * sqrt(N - 1);") expect_match2(scode, "b = XR_inv * bQ;") expect_match2(scode, "lprior += normal_lpdf(bQ | 0, 2);") expect_match2(scode, "XQ * bQ") expect_match2(scode, "XR_sigma = qr_thin_R(X_sigma) / sqrt(N - 1);") # horseshoe prior scode <- make_stancode(bform, data, prior = prior(horseshoe(1))) expect_match2(scode, "target += std_normal_lpdf(zb);") expect_match2(scode, "scales = scales_horseshoe(") expect_match2(scode, "sdb = scales[(1):(Kc)];") expect_match2(scode, "bQ = zb .* sdb;") }) test_that("Stan code for Gaussian processes is correct", { set.seed(1234) dat <- data.frame(y = rnorm(40), x1 = rnorm(40), x2 = rnorm(40), z = factor(rep(3:6, each = 10))) prior <- prior(gamma(0.1, 0.1), sdgp) + prior(gamma(4, 2), sdgp, coef = gpx2x1) scode <- make_stancode(y ~ gp(x1) + gp(x2, by = x1, gr = FALSE), dat, prior = prior) expect_match2(scode, "lprior += inv_gamma_lpdf(lscale_1[1]") expect_match2(scode, "lprior += gamma_lpdf(sdgp_1 | 0.1, 0.1)") expect_match2(scode, "lprior += gamma_lpdf(sdgp_2 | 4, 2)") expect_match2(scode, "gp_pred_2 = gp(Xgp_2, sdgp_2[1], lscale_2[1], zgp_2);") expect_match2(scode, "Cgp_2 .* gp_pred_2;") prior <- prior + prior(normal(0, 1), lscale, coef = gpx1) scode <- make_stancode(y ~ gp(x1) + gp(x2, by = x1, gr = TRUE), data = dat, prior = prior) expect_match2(scode, "lprior += normal_lpdf(lscale_1[1][1] | 0, 1)") expect_match2(scode, "gp_pred_2 = gp(Xgp_2, sdgp_2[1], lscale_2[1], zgp_2);") expect_match2(scode, "+ Cgp_2 .* gp_pred_2[Jgp_2]") # non-isotropic GP scode <- make_stancode(y ~ gp(x1, x2, by = z, iso = FALSE), data = dat) expect_match2(scode, "lprior += inv_gamma_lpdf(lscale_1[1][2]") expect_match2(scode, "lprior += inv_gamma_lpdf(lscale_1[4][2]") # Suppress Stan parser warnings that can currently not be avoided scode <- make_stancode(y ~ gp(x1, x2) + gp(x1, by = z, gr = FALSE), dat, silent = TRUE) expect_match2(scode, "gp(Xgp_1, sdgp_1[1], lscale_1[1], zgp_1)") expect_match2(scode, "mu[Igp_2_2] += Cgp_2_2 .* gp_pred_2_2;") # approximate GPS scode <- make_stancode( y ~ gp(x1, k = 10, c = 5/4) + gp(x2, by = x1, k = 10, c = 5/4), data = dat ) expect_match2(scode, "lprior += inv_gamma_lpdf(lscale_1") expect_match2(scode, "rgp_1 = sqrt(spd_cov_exp_quad(slambda_1, sdgp_1[1], lscale_1[1])) .* zgp_1;" ) expect_match2(scode, "Cgp_2 .* gp_pred_2[Jgp_2]") prior <- c(prior(normal(0, 10), lscale, coef = gpx1, nlpar = a), prior(gamma(0.1, 0.1), sdgp, nlpar = a), prior(normal(0, 1), b, nlpar = a)) scode <- make_stancode(bf(y ~ a, a ~ gp(x1), nl = TRUE), data = dat, prior = prior) expect_match2(scode, "lprior += normal_lpdf(lscale_a_1[1][1] | 0, 10)") expect_match2(scode, "lprior += gamma_lpdf(sdgp_a_1 | 0.1, 0.1)") expect_match2(scode, "gp(Xgp_a_1, sdgp_a_1[1], lscale_a_1[1], zgp_a_1)") prior <- prior(gamma(2, 2), lscale, coef = gpx1z5, nlpar = "a") scode <- make_stancode(bf(y ~ a, a ~ gp(x1, by = z, gr = TRUE), nl = TRUE), data = dat, prior = prior, silent = TRUE) expect_match2(scode, "nlp_a[Igp_a_1_1] += Cgp_a_1_1 .* gp_pred_a_1_1[Jgp_a_1_1];" ) expect_match2(scode, "gp(Xgp_a_1_3, sdgp_a_1[3], lscale_a_1[3], zgp_a_1_3)") expect_match2(scode, "lprior += gamma_lpdf(lscale_a_1[3][1] | 2, 2);") expect_match2(scode, "target += std_normal_lpdf(zgp_a_1_3);") # test warnings prior <- prior(normal(0, 1), lscale) expect_warning( make_stancode(y ~ gp(x1), data = dat, prior = prior), "The global prior 'normal(0, 1)' of class 'lscale' will not be used", fixed = TRUE ) }) test_that("Stan code for SAR models is correct", { dat <- data.frame(y = rnorm(10), x = rnorm(10)) W <- matrix(0, nrow = 10, ncol = 10) dat2 <- list(W = W) scode <- make_stancode( y ~ x + sar(W), data = dat, prior = prior(normal(0.5, 1), lagsar), data2 = dat2 ) expect_match2(scode, "target += normal_lagsar_lpdf(Y | mu, sigma, lagsar, Msar, eigenMsar)" ) expect_match2(scode, "lprior += normal_lpdf(lagsar | 0.5, 1)") scode <- make_stancode( y ~ x + sar(W, type = "lag"), data = dat, family = student(), data2 = dat2 ) expect_match2(scode, "target += student_t_lagsar_lpdf(Y | nu, mu, sigma, lagsar, Msar, eigenMsar)" ) scode <- make_stancode(y ~ x + sar(W, type = "error"), data = dat, data2 = dat2) expect_match2(scode, "target += normal_errorsar_lpdf(Y | mu, sigma, errorsar, Msar, eigenMsar)" ) scode <- make_stancode( y ~ x + sar(W, "error"), data = dat, family = student(), prior = prior(beta(2, 3), errorsar), data2 = dat2 ) expect_match2(scode, "target += student_t_errorsar_lpdf(Y | nu, mu, sigma, errorsar, Msar, eigenMsar)" ) expect_match2(scode, "lprior += beta_lpdf(errorsar | 2, 3)") expect_error( make_stancode(bf(y ~ sar(W), sigma ~ x), data = dat), "SAR models are not implemented when predicting 'sigma'" ) }) test_that("Stan code for CAR models is correct", { dat <- data.frame(y = rnorm(10), x = rnorm(10)) edges <- cbind(1:10, 10:1) W <- matrix(0, nrow = 10, ncol = 10) for (i in seq_len(nrow(edges))) { W[edges[i, 1], edges[i, 2]] <- 1 } rownames(W) <- seq_len(nrow(W)) dat2 <- list(W = W) scode <- make_stancode(y ~ x + car(W), dat, data2 = dat2) expect_match2(scode, "real car;") expect_match2(scode, "real sparse_car_lpdf(vector phi") expect_match2(scode, "target += sparse_car_lpdf(") expect_match2(scode, "mu[n] += rcar[Jloc[n]]") scode <- make_stancode(y ~ x + car(W, type = "esicar"), dat, data2 = dat2) expect_match2(scode, "real sparse_icar_lpdf(vector phi") expect_match2(scode, "target += sparse_icar_lpdf(") expect_match2(scode, "mu[n] += rcar[Jloc[n]]") expect_match2(scode, "rcar[Nloc] = - sum(zcar)") scode <- make_stancode(y ~ x + car(W, type = "icar"), dat, data2 = dat2) expect_match2(scode, "target += -0.5 * dot_self(zcar[edges1] - zcar[edges2])") expect_match2(scode, "target += normal_lpdf(sum(zcar) | 0, 0.001 * Nloc)") expect_match2(scode, "mu[n] += rcar[Jloc[n]]") expect_match2(scode, "rcar = zcar * sdcar") scode <- make_stancode(y ~ x + car(W, type = "bym2"), dat, data2 = dat2) expect_match2(scode, "target += -0.5 * dot_self(zcar[edges1] - zcar[edges2])") expect_match2(scode, "target += normal_lpdf(sum(zcar) | 0, 0.001 * Nloc)") expect_match2(scode, "mu[n] += rcar[Jloc[n]]") expect_match2(scode, "lprior += beta_lpdf(rhocar | 1, 1)") expect_match2(scode, paste0( "rcar = (sqrt(1 - rhocar) * nszcar + ", "sqrt(rhocar * inv(car_scale)) * zcar) * sdcar" )) # apply a CAR term on a distributional parameter other than 'mu' scode <- make_stancode(bf(y ~ x, sigma ~ car(W)), dat, data2 = dat2) expect_match2(scode, "real sparse_car_lpdf(vector phi") expect_match2(scode, "target += sparse_car_lpdf(") expect_match2(scode, "sigma[n] += rcar_sigma[Jloc_sigma[n]]") # apply shrinkage priors on a CAR term scode <- make_stancode(bf(y ~ x + car(W)), dat, data2 = dat2, prior = prior(horseshoe(main = TRUE), class = b) + prior(horseshoe(), class = sdcar)) expect_match2(scode, "sdcar = scales[(1+Kc):(Kc+1)][1];") }) test_that("Stan code for skew_normal models is correct", { dat = data.frame(y = rnorm(10), x = rnorm(10)) scode <- make_stancode(y ~ x, dat, skew_normal()) expect_match2(scode, "delta = alpha / sqrt(1 + alpha^2);") expect_match2(scode, "omega = sigma / sqrt(1 - sqrt(2 / pi())^2 * delta^2);") expect_match2(scode, "mu[n] = mu[n] - omega * delta * sqrt(2 / pi());") scode <- make_stancode(bf(y ~ x, sigma ~ x), dat, skew_normal()) expect_match2(scode, "omega[n] = sigma[n] / sqrt(1 - sqrt(2 / pi())^2 * delta^2);") expect_match2(scode, "mu[n] = mu[n] - omega[n] * delta * sqrt(2 / pi());") scode <- make_stancode(bf(y | se(x) ~ x, alpha ~ x), dat, skew_normal()) expect_match2(scode, "delta[n] = alpha[n] / sqrt(1 + alpha[n]^2);") expect_match2(scode, "omega[n] = se[n] / sqrt(1 - sqrt(2 / pi())^2 * delta[n]^2);") expect_match2(scode, "mu[n] = mu[n] - omega[n] * delta[n] * sqrt(2 / pi());") scode <- make_stancode(y ~ x, dat, mixture(skew_normal, nmix = 2)) expect_match2(scode, "omega1 = sigma1 / sqrt(1 - sqrt(2 / pi())^2 * delta1^2);") expect_match2(scode, "mu2[n] = mu2[n] - omega2 * delta2 * sqrt(2 / pi());") }) test_that("Stan code for missing value terms works correctly", { dat = data.frame(y = rnorm(10), x = rnorm(10), g = 1:10, z = 1) dat$x[c(1, 3, 9)] <- NA bform <- bf(y ~ mi(x)*g) + bf(x | mi() ~ g) + set_rescor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "Yl_x[Jmi_x] = Ymi_x;") expect_match2(scode, "(bsp_y[1]) * Yl_x[n] + (bsp_y[2]) * Yl_x[n] * Csp_y_1[n];") expect_match2(scode, "target += normal_id_glm_lpdf(Yl_x | Xc_x, Intercept_x, b_x, sigma_x);") bform <- bf(y ~ mi(x) + (mi(x) | g)) + bf(x | mi() ~ 1) + set_rescor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "(bsp_y[1] + r_1_y_2[J_1_y[n]]) * Yl_x[n] + r_1_y_1[J_1_y[n]] * Z_1_y_1[n];" ) bform <- bf(y ~ a, a ~ mi(x), nl = TRUE) + bf(x | mi() ~ 1) + set_rescor(FALSE) bprior <- prior(normal(0, 1), nlpar = "a", resp = "y") scode <- make_stancode(bform, dat, prior = bprior) expect_match2(scode, "nlp_y_a[n] += (bsp_y_a[1]) * Yl_x[n];") expect_match2(scode, "lprior += normal_lpdf(bsp_y_a | 0, 1);") bform <- bf(y ~ mi(x)*mo(g)) + bf(x | mi() ~ 1) + set_rescor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "(bsp_y[3]) * Yl_x[n] * mo(simo_y_2, Xmo_y_2[n]);") bform <- bf(y ~ 1, sigma ~ 1) + bf(x | mi() ~ 1) + set_rescor(TRUE) scode <- make_stancode(bform, dat) expect_match2(scode, "Yl[n][2] = Yl_x[n];") expect_match2(scode, "sigma[n] = transpose([sigma_y[n], sigma_x]);") expect_match2(scode, "LSigma[n] = diag_pre_multiply(sigma[n], Lrescor);") bform <- bf(x | mi() ~ y, family = "lognormal") scode <- make_stancode(bform, dat) expect_match2(scode, "vector[Nmi] Ymi;") bform <- bf(y ~ I(log(mi(x))) * g) + bf(x | mi() + trunc(lb = 1) ~ y, family = "lognormal") scode <- make_stancode(bform, dat) expect_match2(scode, "vector[Nmi_x] Ymi_x;") expect_match2(scode, "(bsp_y[1]) * (log(Yl_x[n])) + (bsp_y[2]) * (log(Yl_x[n])) * Csp_y_1[n]" ) bform <- bf(y ~ mi(x)*g) + bf(x | mi() + cens(z) ~ y, family = "beta") scode <- make_stancode(bform, dat) expect_match2(scode, "vector[Nmi_x] Ymi_x;") expect_match2(scode, "target += beta_lpdf(Yl_x[n] | mu_x[n] * phi_x, (1 - mu_x[n]) * phi_x);" ) bform <- bf(y | mi() ~ mi(x), shape ~ mi(x), family=weibull()) + bf(x| mi() ~ z, family=gaussian()) + set_rescor(FALSE) scode <- make_stancode(bform, data = dat) expect_match2(scode, "weibull_lpdf(Yl_y | shape_y, mu_y ./ tgamma(1 + 1 ./ shape_y));") expect_match2(scode, "shape_y[n] += (bsp_shape_y[1]) * Yl_x[n];") }) test_that("Stan code for overimputation works correctly", { dat = data.frame(y = rnorm(10), x_x = rnorm(10), g = 1:10, z = 1) dat$x[c(1, 3, 9)] <- NA bform <- bf(y ~ mi(x_x)*g) + bf(x_x | mi(g) ~ 1) + set_rescor(FALSE) scode <- make_stancode(bform, dat, sample_prior = "yes") expect_match2(scode, "target += normal_lpdf(Yl_xx | mu_xx, sigma_xx)") expect_match2(scode, "target += normal_lpdf(Y_xx[Jme_xx] | Yl_xx[Jme_xx], noise_xx[Jme_xx])" ) expect_match2(scode, "vector[N_xx] Yl_xx;") }) test_that("Missing value terms can be combined with 'subset'", { dat <- data.frame( y = rnorm(10), x = c(rnorm(9), NA), z = rnorm(10), g2 = 10:1, g1 = sample(1:5, 10, TRUE), s = c(FALSE, rep(TRUE, 9)) ) bform <- bf(y ~ mi(x, idx = g1)*mi(z)) + bf(x | mi() + index(g2) + subset(s) ~ 1) + bf(z | mi() ~ s) + set_rescor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "(bsp_y[1]) * Yl_x[idxl_y_x_1[n]]") expect_match2(scode, "(bsp_y[2]) * Yl_z[n]") expect_match2(scode, "(bsp_y[3]) * Yl_x[idxl_y_x_1[n]] * Yl_z[n]") expect_match2(scode, "array[N_y] int idxl_y_x_1;") }) test_that("Stan code for advanced count data distribution is correct", { scode <- make_stancode( count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = brmsfamily("discrete_weibull") ) expect_match2(scode, "mu = inv_logit(mu);") expect_match2(scode, "target += discrete_weibull_lpmf(Y[n] | mu[n], shape);") scode <- make_stancode( count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = brmsfamily("com_poisson") ) expect_match2(scode, "target += com_poisson_log_lpmf(Y[n] | mu[n], shape);") }) test_that("argument 'stanvars' is handled correctly", { bprior <- prior(normal(mean_intercept, 10), class = "Intercept") mean_intercept <- 5 stanvars <- stanvar(mean_intercept) scode <- make_stancode(count ~ Trt, data = epilepsy, prior = bprior, stanvars = stanvars) expect_match2(scode, "real mean_intercept;") # define a multi_normal prior with known covariance matrix bprior <- prior(multi_normal(M, V), class = "b") stanvars <- stanvar(rep(0, 2), "M", scode = "vector[K] M;") + stanvar(diag(2), "V", scode = "matrix[K, K] V;") scode <- make_stancode(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) expect_match2(scode, "vector[K] M;") expect_match2(scode, "matrix[K, K] V;") # define a hierarchical prior on the regression coefficients bprior <- set_prior("normal(0, tau)", class = "b") + set_prior("target += normal_lpdf(tau | 0, 10)", check = FALSE) stanvars <- stanvar(scode = "real tau;", block = "parameters") scode <- make_stancode(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) expect_match2(scode, "real tau;") expect_match2(scode, "lprior += normal_lpdf(b | 0, tau);") # ensure that variables are passed to the likelihood of a threaded model foo <- 0.5 stanvars <- stanvar(foo) + stanvar(scode = "real tau;", block = "parameters", pll_args = "real tau") scode <- make_stancode(count ~ 1, data = epilepsy, family = poisson(), stanvars = stanvars, threads = threading(2), parse = FALSE) expect_match2(scode, "partial_log_lik_lpmf(int[] seq, int start, int end, data int[] Y, real Intercept, data real foo, real tau)" ) expect_match2(scode, "reduce_sum(partial_log_lik_lpmf, seq, grainsize, Y, Intercept, foo, tau)" ) # specify Stan code in the likelihood part of the model block stanvars <- stanvar(scode = "mu += 1.0;", block = "likelihood", position = "start") scode <- make_stancode(count ~ Trt + (1|patient), data = epilepsy, stanvars = stanvars) expect_match2(scode, "mu += 1.0;") stanvars <- stanvar(scode = "mu += 1.0;", block = "likelihood", position = "start") scode <- make_stancode(count ~ Trt + (1|patient), data = epilepsy, stanvars = stanvars, threads = 2, parse = FALSE) expect_match2(scode, "mu += 1.0;") # add transformation at the end of a block stanvars <- stanvar(scode = "r_1_1 = r_1_1 * 2;", block = "tparameters", position = "end") scode <- make_stancode(count ~ Trt + (1 | patient), epilepsy, stanvars = stanvars) expect_match2(scode, "r_1_1 = r_1_1 * 2;\n}") # use the non-centered parameterization for 'b' # unofficial feature not supported anymore for the time being # bprior <- set_prior("target += normal_lpdf(zb | 0, 1)", check = FALSE) + # set_prior("target += normal_lpdf(tau | 0, 10)", check = FALSE) # stanvars <- stanvar(scode = "vector[Kc] zb;", block = "parameters") + # stanvar(scode = "real tau;", block = "parameters") + # stanvar(scode = "vector[Kc] b = zb * tau;", # block="tparameters", name = "b") # scode <- make_stancode(count ~ Trt, epilepsy, # prior = bprior, stanvars = stanvars) # expect_match2(scode, "vector[Kc] b = zb * tau;") # stanvars <- stanvar(scode = "vector[Ksp] zbsp;", block = "parameters") + # stanvar(scode = "real tau;", block = "parameters") + # stanvar(scode = "vector[Ksp] bsp = zbsp * tau;", # block = "tparameters", name = "bsp") # scode <- make_stancode(count ~ mo(Base), epilepsy, stanvars = stanvars) # expect_match2(scode, "vector[Ksp] bsp = zbsp * tau;") }) test_that("custom families are handled correctly", { dat <- data.frame(size = 10, y = sample(0:10, 20, TRUE), x = rnorm(20)) # define a custom beta-binomial family log_lik_beta_binomial2 <- function(i, prep) { mu <- prep$dpars$mu[, i] tau <- prep$dpars$tau trials <- prep$data$vint1[i] y <- prep$data$Y[i] beta_binomial2_lpmf(y, mu, tau, trials) } posterior_predict_beta_binomial2 <- function(i, prep, ...) { mu <- prep$dpars$mu[, i] tau <- prep$dpars$tau trials <- prep$data$vint1[i] beta_binomial2_rng(mu, tau, trials) } posterior_epred_beta_binomial2 <- function(prep) { mu <- prep$dpars$mu trials <- prep$data$vint1 trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE) mu * trials } beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = c("vint1[n]", "vreal1[n]"), log_lik = log_lik_beta_binomial2, posterior_epred = posterior_epred_beta_binomial2, posterior_predict = posterior_predict_beta_binomial2 ) # define custom stan functions # real R is just to also test the vreal addition argument stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int N, real R) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int N, real R) { return beta_binomial_rng(N, mu * phi, (1 - mu) * phi); } " stanvars <- stanvar(scode = stan_funs, block = "functions") scode <- make_stancode( y | vint(size) + vreal(size) ~ x, data = dat, family = beta_binomial2, prior = prior(gamma(0.1, 0.1), class = "tau"), stanvars = stanvars ) expect_match2(scode, "array[N] int vint1;") expect_match2(scode, "real tau;") expect_match2(scode, "mu = inv_logit(mu);") expect_match2(scode, "lprior += gamma_lpdf(tau | 0.1, 0.1);") expect_match2(scode, "target += beta_binomial2_lpmf(Y[n] | mu[n], tau, vint1[n], vreal1[n]);" ) scode <- make_stancode( bf(y | vint(size) + vreal(size) ~ x, tau ~ x), data = dat, family = beta_binomial2, stanvars = stanvars ) expect_match2(scode, "tau = exp(tau);") expect_match2(scode, "target += beta_binomial2_lpmf(Y[n] | mu[n], tau[n], vint1[n], vreal1[n]);" ) # check custom families in mixture models scode <- make_stancode( y | vint(size) + vreal(size) + trials(size) ~ x, data = dat, family = mixture(binomial, beta_binomial2), stanvars = stanvars ) expect_match2(scode, "log(theta2) + beta_binomial2_lpmf(Y[n] | mu2[n], tau2, vint1[n], vreal1[n]);" ) # check custom families in multivariate models bform <- bf( y | vint(size) + vreal(size) + trials(size) ~ x, family = beta_binomial2 ) + bf(x ~ 1, family = gaussian()) scode <- make_stancode(bform, data = dat, stanvars = stanvars) expect_match2(scode, "target += beta_binomial2_lpmf(Y_y[n] | mu_y[n], tau_y, vint1_y[n], vreal1_y[n]);" ) # check vectorized custom families beta_binomial2_vec <- custom_family( "beta_binomial2_vec", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = c("vint1", "vreal1"), loop = FALSE ) stan_funs_vec <- " real beta_binomial2_vec_lpmf(int[] y, vector mu, real phi, int[] N, real[] R) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int N, real R) { return beta_binomial_rng(N, mu * phi, (1 - mu) * phi); } " stanvars <- stanvar(scode = stan_funs_vec, block = "functions") scode <- make_stancode( y | vint(size) + vreal(size) ~ x, data = dat, family = beta_binomial2_vec, prior = prior(gamma(0.1, 0.1), class = "tau"), stanvars = stanvars ) expect_match2(scode, "target += beta_binomial2_vec_lpmf(Y | mu, tau, vint1, vreal1);" ) }) test_that("likelihood of distributional beta models is correct", { # test issue #404 dat <- data.frame(prop = rbeta(100, shape1 = 2, shape2 = 2)) scode <- make_stancode( bf(prop ~ 1, phi ~ 1), data = dat, family = Beta() ) expect_match2(scode, "beta_lpdf(Y[n] | mu[n] * phi[n], (1 - mu[n]) * phi[n])") }) test_that("student-t group-level effects work without errors", { scode <- make_stancode(count ~ Trt + (1|gr(patient, dist = "st")), epilepsy) expect_match2(scode, "dfm_1 = sqrt(df_1 * udf_1);") expect_match2(scode, "dfm_1 .* (sd_1[1] * (z_1[1]));") expect_match2(scode, "lprior += gamma_lpdf(df_1 | 2, 0.1)") expect_match2(scode, "target += inv_chi_square_lpdf(udf_1 | df_1);") bprior <- prior(normal(20, 5), class = df, group = patient) scode <- make_stancode( count ~ Trt + (Trt|gr(patient, dist = "st")), epilepsy, prior = bprior ) expect_match2(scode, "r_1 = rep_matrix(dfm_1, M_1) .* scale_r_cor(z_1, sd_1, L_1);" ) expect_match2(scode, "lprior += normal_lpdf(df_1 | 20, 5)") }) test_that("centering design matrices can be changed correctly", { dat <- data.frame(y = 1:10, x = 1:10) scode <- make_stancode( bf(y ~ x, center = FALSE), data = dat, family = weibull(), prior = prior(normal(0,1), coef = Intercept) ) expect_match2(scode, "mu += X * b;") expect_match2(scode, "lprior += normal_lpdf(b[1] | 0, 1);") bform <- bf(y ~ eta, nl = TRUE) + lf(eta ~ x, center = TRUE) scode <- make_stancode(bform, data = dat) expect_match2(scode, "nlp_eta += Intercept_eta + Xc_eta * b_eta;") }) test_that("to_vector() is correctly removed from prior of SD parameters", { # see https://discourse.mc-stan.org/t/prior-for-sd-generate-parsing-text-error/12292/5 dat <- data.frame( y = rnorm(100), ID = 1:10, group = rep(1:2, each = 5) ) bform <- bf( y ~ 1 + (1 | p | gr(ID, by=group)), sigma ~ 1 + (1 | p | gr(ID, by=group)) ) bprior <- c( prior(normal(0, 0.1), class = sd) , prior(normal(0, 0.01), class = sd, dpar = sigma) ) scode <- make_stancode( bform, data = dat, prior = bprior, sample_prior = TRUE ) expect_match2(scode, "prior_sd_1__1 = normal_rng(0,0.1);") expect_match2(scode, "prior_sd_1__2 = normal_rng(0,0.01);") }) test_that("Dirichlet priors can be flexibly included", { # tests issue #1165 dat <- data.frame(y = rnorm(10), x1 = rnorm(10), x2 = rnorm(10)) bprior <- prior("dirichlet([1,2]')", class = "b") scode <- make_stancode(y ~ x1 + x2, dat, prior = bprior) expect_match2(scode, "simplex[Kc] b;") }) test_that("threaded Stan code is correct", { # tests require cmdstanr which is not yet on CRAN skip_on_cran() # only run if cmdstan >= 2.29 can be found on the system # otherwise the canonicalized code will cause test failures cmdstan_version <- try(cmdstanr::cmdstan_version(), silent = TRUE) found_cmdstan <- !brms:::is_try_error(cmdstan_version) skip_if_not(found_cmdstan && cmdstan_version >= "2.29.0") options(brms.backend = "cmdstanr") dat <- data.frame( count = rpois(236, lambda = 20), visit = rep(1:4, each = 59), patient = factor(rep(1:59, 4)), Age = rnorm(236), Trt = factor(sample(0:1, 236, TRUE)), AgeSD = abs(rnorm(236, 1)), Exp = sample(1:5, 236, TRUE), volume = rnorm(236), gender = factor(c(rep("m", 30), rep("f", 29))) ) threads <- threading(2, grainsize = 20) bform <- bf( count ~ Trt*Age + mo(Exp) + s(Age) + offset(Age) + (1+Trt|visit), sigma ~ Trt + gp(Age) + gp(volume, by = Trt) ) scode <- make_stancode(bform, dat, family = student(), threads = threads) expect_match2(scode, "real partial_log_lik_lpmf(array[] int seq, int start,") expect_match2(scode, "mu[n] += bsp[1] * mo(simo_1, Xmo_1[nn])") expect_match2(scode, "ptarget += student_t_lpdf(Y[start : end] | nu, mu, sigma);") expect_match2(scode, "+ gp_pred_sigma_1[Jgp_sigma_1[start : end]]") expect_match2(scode, ".* gp_pred_sigma_2_1[Jgp_sigma_2_1[which_gp_sigma_2_1]];") expect_match2(scode, "sigma[start_at_one(Igp_sigma_2_2[which_gp_sigma_2_2], start)] +=") expect_match2(scode, "target += reduce_sum(partial_log_lik_lpmf, seq, grainsize, Y,") scode <- make_stancode( visit ~ cs(Trt) + Age, dat, family = sratio(), threads = threads, ) expect_match2(scode, "matrix[N, nthres] mucs = Xcs[start : end] * bcs;") expect_match2(scode, "ptarget += sratio_logit_lpmf(Y[nn] | mu[n], disc, Intercept") expect_match2(scode, " - transpose(mucs[n]));") scode <- make_stancode( bf(visit ~ a * Trt ^ b, a ~ mo(Exp), b ~ s(Age), nl = TRUE), data = dat, family = Gamma("log"), prior = set_prior("normal(0, 1)", nlpar = c("a", "b")), threads = threads ) expect_match2(scode, "mu[n] = exp(nlp_a[n] * C_1[nn] ^ nlp_b[n]);") expect_match2(scode, "ptarget += gamma_lpdf(Y[start : end] | shape, shape ./ mu);") bform <- bf(mvbind(count, Exp) ~ Trt) + set_rescor(TRUE) scode <- make_stancode(bform, dat, gaussian(), threads = threads) expect_match2(scode, "ptarget += multi_normal_cholesky_lpdf(Y[start : end] | Mu, LSigma);") bform <- bf(brms::mvbind(count, Exp) ~ Trt) + set_rescor(FALSE) scode <- make_stancode(bform, dat, gaussian(), threads = threads) expect_match2(scode, "target += reduce_sum(partial_log_lik_count_lpmf, seq_count,") expect_match2(scode, "target += reduce_sum(partial_log_lik_Exp_lpmf, seq_Exp,") expect_match2(scode, "ptarget += normal_id_glm_lpdf(Y_Exp[start : end] | Xc_Exp[start : end], Intercept_Exp, b_Exp, sigma_Exp);" ) scode <- make_stancode( visit ~ Trt, dat, family = mixture(poisson(), nmix = 2), threads = threading(4, grainsize = 10, static = TRUE) ) expect_match2(scode, "ps[1] = log(theta1) + poisson_log_lpmf(Y[nn] | mu1[n]);") expect_match2(scode, "ptarget += log_sum_exp(ps);") expect_match2(scode, "target += reduce_sum_static(partial_log_lik_lpmf,") }) test_that("Un-normalized Stan code is correct", { # tests require cmdstanr which is not yet on CRAN skip_on_cran() # only run if cmdstan >= 2.29 can be found on the system # otherwise the canonicalized code will cause test failures cmdstan_version <- try(cmdstanr::cmdstan_version(), silent = TRUE) found_cmdstan <- !brms:::is_try_error(cmdstan_version) skip_if_not(found_cmdstan && cmdstan_version >= "2.29.0") options(brms.backend = "cmdstanr") scode <- make_stancode( count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson(), prior = prior(student_t(5,0,10), class = b) + prior(cauchy(0,2), class = sd), normalize = FALSE ) expect_match2(scode, "target += poisson_log_glm_lupmf(Y | Xc, mu, b);") expect_match2(scode, "lprior += student_t_lupdf(b | 5, 0, 10);") expect_match2(scode, "lprior += student_t_lupdf(Intercept | 3, 1.4, 2.5);") expect_match2(scode, "lprior += cauchy_lupdf(sd_1 | 0, 2);") expect_match2(scode, "target += std_normal_lupdf(z_1[1]);") scode <- make_stancode( count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson(), prior = prior(student_t(5,0,10), class = b) + prior(cauchy(0,2), class = sd), normalize = FALSE, threads = threading(2) ) expect_match2(scode, "target += reduce_sum(partial_log_lik_lpmf, seq, grainsize, Y, Xc, b,") expect_match2(scode, " Intercept, J_1, Z_1_1, r_1_1, J_2, Z_2_1, r_2_1);") expect_match2(scode, "ptarget += poisson_log_glm_lupmf(Y[start : end] | Xc[start : end], mu, b);") expect_match2(scode, "lprior += student_t_lupdf(b | 5, 0, 10);") expect_match2(scode, "lprior += student_t_lupdf(Intercept | 3, 1.4, 2.5);") expect_match2(scode, "lprior += cauchy_lupdf(sd_1 | 0, 2);") expect_match2(scode, "target += std_normal_lupdf(z_1[1]);") # Check that brms custom distributions stay normalized scode <- make_stancode( rating ~ period + carry + cs(treat), data = inhaler, family = sratio("cloglog"), normalize = FALSE ) expect_match2(scode, "target += sratio_cloglog_lpmf(Y[n] | mu[n], disc, Intercept") expect_match2(scode, " - transpose(mucs[n]));") # Check that user-specified custom distributions stay normalized dat <- data.frame(size = 10, y = sample(0:10, 20, TRUE), x = rnorm(20)) beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = c("vint1[n]", "vreal1[n]"), ) stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int N, real R) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } " stanvars <- stanvar(scode = stan_funs, block = "functions") scode <- make_stancode( y | vint(size) + vreal(size) ~ x, data = dat, family = beta_binomial2, prior = prior(gamma(0.1, 0.1), class = "tau"), stanvars = stanvars, normalize = FALSE, backend = "cmdstanr" ) expect_match2(scode, "target += beta_binomial2_lpmf(Y[n] | mu[n], tau, vint1[n], vreal1[n]);") expect_match2(scode, "gamma_lupdf(tau | 0.1, 0.1);") }) # the new array syntax is now used throughout brms # test_that("Canonicalizing Stan code is correct", { # # tests require cmdstanr which is not yet on CRAN # skip_on_cran() # # # only run if cmdstan >= 2.29 can be found on the system # # otherwise the canonicalized code will cause test failures # cmdstan_version <- try(cmdstanr::cmdstan_version(), silent = TRUE) # found_cmdstan <- !is_try_error(cmdstan_version) # skip_if_not(found_cmdstan && cmdstan_version >= "2.29.0") # options(brms.backend = "cmdstanr") # # scode <- make_stancode( # count ~ zAge + zBase * Trt + (1|patient) + (1|obs), # data = epilepsy, family = poisson(), # prior = prior(student_t(5,0,10), class = b) + # prior(cauchy(0,2), class = sd), # normalize = FALSE # ) # expect_match2(scode, "array[M_1] vector[N_1] z_1;") # expect_match2(scode, "array[M_2] vector[N_2] z_2;") # # model <- " # data { # int a[5]; # real b[5]; # vector[5] c[4]; # } # parameters { # real d[5]; # vector[5] e[4]; # } # " # stan_file <- cmdstanr::write_stan_file(model) # canonicalized_code <- .canonicalize_stan_model(stan_file, overwrite_file = FALSE) # expect_match2(canonicalized_code, "array[5] int a;") # expect_match2(canonicalized_code, "array[5] real b;") # expect_match2(canonicalized_code, "array[4] vector[5] c;") # expect_match2(canonicalized_code, "array[5] real d;") # expect_match2(canonicalized_code, "array[4] vector[5] e;") # }) test_that("Normalizing Stan code works correctly", { normalize_stancode <- brms:::normalize_stancode expect_equal( normalize_stancode("// a\nb;\n b + c = 4; // kde\ndata"), normalize_stancode("// dasflkjldl\n // adsfadsfa\n b;\n\n \n \t\rb + c = 4;\ndata") ) expect_equal( normalize_stancode("data /* adfa */ {\nint a;\n /* asdddede \n asdfas \n asf */}\n"), normalize_stancode("data {\nint a;\n} /* aa \n adfasdf \n asdfadsf ddd */\n") ) expect_equal( normalize_stancode("data \n {\nint a;\n\n } \t\n"), normalize_stancode("data {\nint a;\n} \n") ) expect_equal( normalize_stancode("/* \n\n */\na*/"), normalize_stancode("a*/") ) expect_equal( normalize_stancode("//adsfadf \ra // asdfasdf\r\n"), normalize_stancode("a") ) expect_equal( normalize_stancode("/* * \n * \n * fg / */hhh"), normalize_stancode("hhh") ) expect_equal( normalize_stancode("a //b"), normalize_stancode("a") ) expect_false(normalize_stancode("// a\ndata {\nint a;\n}\n") == normalize_stancode("// a\ndata {\nint b;\n}\n")) # should not remove single whitespace expect_false(normalize_stancode("da ta") == normalize_stancode("data")) # should handle wrong nested comments expect_false(normalize_stancode("/* \n\n */\na*/") == normalize_stancode("b*/")) }) brms/tests/testthat/tests.brmsfit-helpers.R0000644000176200001440000001666614213413565020634 0ustar liggesuserscontext("Tests for brmsfit helper functions") test_that("first_greater returns expected results", { A <- cbind(1:10, 11:20, 21:30) x <- c(5, 25, 7, 15, 7, 10, 15, 19, 3, 11) expect_equal(first_greater(A, x), c(2, 3, 2, 3, 2, 2, 2, 3, 1, 2)) expect_equal(first_greater(A, x, i = 2), c(2, 3, 2, 3, 2, 2, 2, 3, 2, 2)) }) test_that("array2list performs correct conversion", { A <- array(1:27, dim = c(3,3,3)) B <- list(matrix(1:9,3,3), matrix(10:18,3,3), matrix(19:27,3,3)) expect_equal(brms:::array2list(A), B) }) test_that("probit and probit_approx produce similar results", { expect_equal(brms:::inv_link(-10:10, "probit"), brms:::inv_link(-10:10, "probit_approx"), tolerance = 1e-3) }) test_that("autocorrelation matrices are computed correctly", { ar <- 0.5 ma <- 0.3 ar_mat <- brms:::get_cor_matrix_ar1(ar = matrix(ar), nobs = 4) expected_ar_mat <- 1 / (1 - ar^2) * cbind(c(1, ar, ar^2, ar^3), c(ar, 1, ar, ar^2), c(ar^2, ar, 1, ar), c(ar^3, ar^2, ar, 1)) expect_equal(ar_mat[1, , ], expected_ar_mat) ma_mat <- brms:::get_cor_matrix_ma1(ma = matrix(ma), nobs = 4) expected_ma_mat <- cbind(c(1+ma^2, ma, 0, 0), c(ma, 1+ma^2, ma, 0), c(0, ma, 1+ma^2, ma), c(0, 0, ma, 1+ma^2)) expect_equal(ma_mat[1, , ], expected_ma_mat) arma_mat <- brms:::get_cor_matrix_arma1( ar = matrix(ar), ma = matrix(ma), nobs = 4 ) g0 <- 1 + ma^2 + 2 * ar * ma g1 <- (1 + ar * ma) * (ar + ma) expected_arma_mat <- 1 / (1 - ar^2) * cbind(c(g0, g1, g1 * ar, g1 * ar^2), c(g1, g0, g1, g1 * ar), c(g1 * ar, g1, g0, g1), c(g1 * ar^2, g1 * ar, g1, g0)) expect_equal(arma_mat[1, , ], expected_arma_mat) cosy <- 0.6 cosy_mat <- brms:::get_cor_matrix_cosy(cosy = as.matrix(cosy), nobs = 4) expected_cosy_mat <- matrix(cosy, 4, 4) diag(expected_cosy_mat) <- 1 expect_equal(cosy_mat[1, , ], expected_cosy_mat) ident_mat <- brms:::get_cor_matrix_ident(ndraws = 10, nobs = 4) expected_ident_mat <- diag(1, 4) expect_equal(ident_mat[1, , ], expected_ident_mat) }) test_that("evidence_ratio returns expected results", { ps <- -4:10 prs <- -2:12 expect_true(evidence_ratio(ps, prior_samples = prs) > 1) expect_true(is.na(evidence_ratio(ps))) expect_equal(evidence_ratio(ps, cut = 0.5, wsign = "greater"), 10/5) expect_equal(evidence_ratio(ps, cut = 0.5, wsign = "less"), 5/10) }) test_that("find_vars finds all valid variable names in a string", { string <- "x + b.x - .5 + abc(a__3) : 1/2 - 0.2" expect_equal(find_vars(string), c("x", "b.x", "a__3")) }) test_that(".predictor_arma runs without errors", { ns <- 20 nobs <- 30 Y = rnorm(nobs) J_lag = c(1:3, 3, 3, rep(c(0:3, 3), 4), 0:3, 0) ar <- matrix(rnorm(ns * 3), nrow = ns, ncol = 3) ma <- matrix(rnorm(ns * 1), nrow = ns, ncol = 1) eta <- matrix(rnorm(ns * nobs), nrow = ns, ncol = nobs) expect_equal(.predictor_arma(eta, Y = Y, J_lag = J_lag), eta) expect_silent(.predictor_arma(eta, Y = Y, J_lag = J_lag, ar = ar)) expect_silent(.predictor_arma(eta, Y = Y, J_lag = J_lag, ma = ma)) expect_silent(.predictor_arma(eta, Y = Y, J_lag = J_lag, ar = ar, ma = ma)) }) test_that("make_conditions works correctly", { conds <- make_conditions(epilepsy, c("zBase", "zAge")) expect_equal(dim(conds), c(9, 3)) expect_equal(conds$cond__[3], "zBase = -1 & zAge = 1") }) test_that("brmsfit_needs_refit works correctly", { cache_tmp <- tempfile(fileext = ".rds") expect_null(read_brmsfit(cache_tmp)) saveRDS(list(a = 1), file = cache_tmp) expect_error(read_brmsfit(cache_tmp)) data_model1 <- data.frame(y = rnorm(10), x = rnorm(10)) fake_fit <- brm(y ~ x, data = data_model1, empty = TRUE) fake_fit_file <- fake_fit fake_fit_file$file <- cache_tmp scode_model1 <- make_stancode(y ~ x, data = data_model1) sdata_model1 <- make_standata(y ~ x, data = data_model1) data_model2 <- data_model1 data_model2$x[1] <- data_model2$x[1] + 1 scode_model2 <- make_stancode(y ~ 0 + x, data = data_model2) sdata_model2 <- make_standata(y ~ 0 + x, data = data_model2) write_brmsfit(fake_fit, file = cache_tmp) cache_res <- read_brmsfit(file = cache_tmp) expect_equal(cache_res, fake_fit_file) expect_false(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = scode_model1, algorithm = "sampling", silent = TRUE)) expect_false(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = scode_model1, algorithm = NULL, silent = TRUE)) expect_false(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = NULL, algorithm = "sampling", silent = TRUE)) expect_false(brmsfit_needs_refit( cache_res, sdata = NULL, scode = scode_model1, algorithm = "sampling", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = sdata_model2, scode = scode_model1, algorithm = "sampling", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = scode_model2, algorithm = "sampling", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = sdata_model2, scode = scode_model2, algorithm = "sampling", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = scode_model1, algorithm = "optimize", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = make_standata(y ~ x, data = data_model1, sample_prior = "only"), scode = scode_model1, algorithm = NULL, silent = TRUE)) }) test_that("insert_refcat() works correctly", { source(testthat::test_path(file.path("helpers", "insert_refcat_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { cats <- paste0("cat", 1:ncat) ref_list <- list( ref1 = 1, reflast = ncat ) fam_list <- list( fam_ref1 = categorical(refcat = cats[1]), fam_reflast = categorical(refcat = cats[ncat]) ) if (ncat > 2) { ref_list <- c(ref_list, list(ref2 = 2)) fam_list <- c(fam_list, list(fam_ref2 = categorical(refcat = cats[2]))) } eta_test_list <- list(array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1))) if (nobsv == 1) { eta_test_list <- c( eta_test_list, list(matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws)) ) } for (eta_test in eta_test_list) { for (i in seq_along(fam_list)) { # Emulate content of `fam` after fit: fam <- fam_list[[i]] if (is.null(fam$refcat)) { fam$refcat <- cats[1] } fam$cats <- cats ref <- ref_list[[i]] # Perform the check: eta_ref <- insert_refcat(eta_test, ref) eta_ref_ch <- insert_refcat_ch(eta_test, fam) expect_equivalent(eta_ref, eta_ref_ch) if (length(dim(eta_test)) == 3) { expect_equal(dim(eta_ref), c(ndraws, nobsv, ncat)) } else if (length(dim(eta_test)) == 2) { expect_equal(dim(eta_ref), c(ndraws, ncat)) } } } } } } }) brms/tests/testthat/tests.priors.R0000644000176200001440000001327714430676476017054 0ustar liggesusers# most tests of prior related stuff can be found in tests.make_stancode.R context("Tests for prior generating functions") test_that("get_prior finds all classes for which priors can be specified", { expect_equal( sort( get_prior( count ~ zBase * Trt + (1|patient) + (1+Trt|visit), data = epilepsy, family = "poisson" )$class ), sort(c(rep("b", 4), c("cor", "cor"), "Intercept", rep("sd", 6))) ) expect_equal( sort( get_prior( rating ~ treat + period + cse(carry), data = inhaler, family = sratio(threshold = "equidistant") )$class ), sort(c(rep("b", 4), "delta", rep("Intercept", 1))) ) }) test_that("set_prior allows arguments to be vectors", { bprior <- set_prior("normal(0, 2)", class = c("b", "sd")) expect_is(bprior, "brmsprior") expect_equal(bprior$prior, rep("normal(0, 2)", 2)) expect_equal(bprior$class, c("b", "sd")) }) test_that("print for class brmsprior works correctly", { expect_output(print(set_prior("normal(0,1)")), fixed = TRUE, "b ~ normal(0,1)") expect_output(print(set_prior("normal(0,1)", coef = "x")), "b_x ~ normal(0,1)", fixed = TRUE) expect_output(print(set_prior("cauchy(0,1)", class = "sd", group = "x")), "sd_x ~ cauchy(0,1)", fixed = TRUE) expect_output(print(set_prior("target += normal_lpdf(x | 0,1))", check = FALSE)), "target += normal_lpdf(x | 0,1))", fixed = TRUE) }) test_that("get_prior returns correct nlpar names for random effects pars", { # reported in issue #47 data <- data.frame(y = rnorm(10), x = rnorm(10), g = rep(1:2, 5)) gp <- get_prior(bf(y ~ a - b^x, a + b ~ (1+x|g), nl = TRUE), data = data) expect_equal(sort(unique(gp$nlpar)), c("", "a", "b")) }) test_that("get_prior returns correct fixed effect names for GAMMs", { dat <- data.frame(y = rnorm(10), x = rnorm(10), z = rnorm(10), g = rep(1:2, 5)) prior <- get_prior(y ~ z + s(x) + (1|g), data = dat) expect_equal(prior[prior$class == "b", ]$coef, c("", "sx_1", "z")) prior <- get_prior(bf(y ~ lp, lp ~ z + s(x) + (1|g), nl = TRUE), data = dat) expect_equal(prior[prior$class == "b", ]$coef, c("", "Intercept", "sx_1", "z")) }) test_that("get_prior returns correct prior names for auxiliary parameters", { dat <- data.frame(y = rnorm(10), x = rnorm(10), z = rnorm(10), g = rep(1:2, 5)) prior <- get_prior(bf(y ~ 1, phi ~ z + (1|g)), data = dat, family = Beta()) prior <- prior[prior$dpar == "phi", ] pdata <- data.frame(class = c("b", "b", "Intercept", rep("sd", 3)), coef = c("", "z", "", "", "", "Intercept"), group = c(rep("", 4), "g", "g"), stringsAsFactors = FALSE) pdata <- pdata[with(pdata, order(class, group, coef)), ] expect_equivalent(prior[, c("class", "coef", "group")], pdata) }) test_that("get_prior returns correct priors for multivariate models", { dat <- data.frame(y1 = rnorm(10), y2 = c(1, rep(1:3, 3)), x = rnorm(10), g = rep(1:2, 5)) bform <- bf(mvbind(y1, y2) ~ x + (x|ID1|g)) + set_rescor(TRUE) # check global priors prior <- get_prior(bform, dat, family = gaussian()) expect_equal(prior[prior$resp == "y1" & prior$class == "b", "coef"], c("", "x")) expect_equal(prior[prior$class == "rescor", "prior"], "lkj(1)") # check family and autocor specific priors family <- list(gaussian, Beta()) bform <- bf(y1 ~ x + (x|ID1|g) + ar()) + bf(y2 ~ 1) prior <- get_prior(bform, dat, family = family) expect_true(any(with(prior, class == "sigma" & resp == "y1"))) expect_true(any(with(prior, class == "ar" & resp == "y1"))) expect_true(any(with(prior, class == "phi" & resp == "y2"))) expect_true(!any(with(prior, class == "ar" & resp == "y2"))) }) test_that("get_prior returns correct priors for categorical models", { # check global priors dat <- data.frame(y2 = c(1, rep(1:3, 3)), x = rnorm(10), g = rep(1:2, 5)) prior <- get_prior(y2 ~ x + (x|ID1|g), data = dat, family = categorical()) expect_equal(prior[prior$dpar == "mu2" & prior$class == "b", "coef"], c("", "x")) }) test_that("set_prior alias functions produce equivalent results", { expect_equal(set_prior("normal(0, 1)", class = "sd"), prior(normal(0, 1), class = sd)) expect_equal(set_prior("normal(0, 1)", class = "sd", nlpar = "a"), prior(normal(0, 1), class = "sd", nlpar = a)) expect_equal(set_prior("normal(0, 1)", class = "sd", nlpar = "a"), prior_(~normal(0, 1), class = ~sd, nlpar = quote(a))) expect_equal(set_prior("normal(0, 1)", class = "sd"), prior_string("normal(0, 1)", class = "sd")) }) test_that("external interface of validate_prior works correctly", { prior1 <- prior(normal(0,10), class = b) + prior(cauchy(0,2), class = sd) prior1 <- validate_prior( prior1, count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson() ) expect_true(all(c("b", "Intercept", "sd") %in% prior1$class)) expect_equal(nrow(prior1), 9) }) test_that("overall intercept priors are adjusted for the intercept", { dat <- data.frame(y = rep(c(1, 3), each = 5), off = 10) prior1 <- get_prior(y ~ 1 + offset(off), dat) int_prior <- prior1$prior[prior1$class == "Intercept"] expect_equal(int_prior, "student_t(3, -8, 2.5)") }) test_that("as.brmsprior works correctly", { dat <- data.frame(prior = "normal(0,1)", x = "test", coef = c("a", "b")) bprior <- as.brmsprior(dat) expect_equal(bprior$prior, rep("normal(0,1)", 2)) expect_equal(bprior$class, rep("b", 2)) expect_equal(bprior$coef, c("a", "b")) expect_equal(bprior$x, NULL) expect_equal(bprior$lb, rep(NA_character_, 2)) }) brms/tests/testthat/tests.brmsformula.R0000644000176200001440000000366514213413565020052 0ustar liggesuserscontext("Tests for brmsformula") test_that("brmsformula validates formulas of non-linear parameters", { expect_error(bf(y ~ a, ~ 1, a ~ 1), "Additional formulas must be named") expect_error(bf(y ~ a^x, a.b ~ 1), "not contain dots or underscores") expect_error(bf(y ~ a^(x+b), a_b ~ 1), "not contain dots or underscores") }) test_that("brmsformula validates formulas of auxiliary parameters", { expect_error(bf(y ~ a, ~ 1, sigma ~ 1), "Additional formulas must be named") }) test_that("brmsformula detects use if '~~'", { # checks fix of issue #749 expect_error(bf(y~~x), "~~") }) test_that("brmsformula does not change a 'brmsformula' object", { form <- bf(y ~ a, sigma ~ 1) expect_identical(form, bf(form)) form <- bf(y ~ a, sigma ~ 1, a ~ x, nl = TRUE) expect_identical(form, bf(form)) }) test_that("brmsformula detects auxiliary parameter equations", { expect_error(bf(y~x, sigma1 = "sigmaa2"), "Can only equate parameters of the same class") expect_error(bf(y~x, mu3 = "mu2"), "Equating parameters of class 'mu' is not allowed") expect_error(bf(y~x, sigma1 = "sigma1"), "Equating 'sigma1' with itself is not meaningful") expect_error(bf(y~x, shape1 ~ x, shape2 = "shape1"), "Cannot use predicted parameters on the right-hand side") expect_error(bf(y~x, shape1 = "shape3", shape2 = "shape1"), "Cannot use fixed parameters on the right-hand side") }) test_that("update_adterms works correctly", { form <- y | trials(size) ~ x expect_equal( update_adterms(form, ~ trials(10)), y | trials(10) ~ x ) expect_equal( update_adterms(form, ~ weights(w)), y | trials(size) + weights(w) ~ x ) expect_equal( update_adterms(form, ~ weights(w), action = "replace"), y | weights(w) ~ x ) expect_equal( update_adterms(y ~ x, ~ trials(10)), y | trials(10) ~ x ) }) brms/tests/testthat/tests.make_standata.R0000644000176200001440000013072114453525534020315 0ustar liggesuserscontext("Tests for make_standata") test_that(paste("make_standata returns correct data names ", "for fixed and random effects"), { expect_equal(sort(names(make_standata(rating ~ treat + period + carry + (1|subject), data = inhaler))), sort(c("N", "Y", "K", "Kc", "X", "Z_1_1", "J_1", "N_1", "M_1", "NC_1", "prior_only"))) expect_equal(sort(names(make_standata(rating ~ treat + period + carry + (1+treat|id|subject), data = inhaler, family = "categorical"))), sort(c("N", "Y", "ncat", "K_mu2", "Kc_mu2", "X_mu2", "Z_1_mu2_1", "Z_1_mu2_2", "K_mu3", "Kc_mu3", "X_mu3", "Z_1_mu3_3", "Z_1_mu3_4", "K_mu4", "Kc_mu4", "X_mu4", "Z_1_mu4_5", "Z_1_mu4_6", "J_1", "N_1", "M_1", "NC_1", "prior_only"))) expect_equal(sort(names(make_standata(rating ~ treat + period + carry + (1+treat|subject), data = inhaler))), sort(c("N", "Y", "K", "Kc", "X", "Z_1_1", "Z_1_2", "J_1", "N_1", "M_1", "NC_1", "prior_only"))) dat <- data.frame(y = 1:10, g = 1:10, h = 11:10, x = rep(0,10)) expect_equal(sort(names(make_standata(y ~ 0 + Intercept + x + (1|g) + (1|h), dat, "poisson"))), sort(c("N", "Y", "K", "X", "Z_1_1", "Z_2_1", "J_1", "J_2", "N_1", "M_1", "NC_1", "N_2", "M_2", "NC_2", "prior_only"))) expect_true(all(c("Z_1_1", "Z_1_2", "Z_2_1", "Z_2_2") %in% names(make_standata(y ~ x + (1+x|g/h), dat)))) expect_equal(make_standata(y ~ x + (1+x|g+h), dat), make_standata(y ~ x + (1+x|g) + (1+x|h), dat)) }) test_that(paste("make_standata handles variables used as fixed effects", "and grouping factors at the same time"), { data <- data.frame(y = 1:9, x = factor(rep(c("a","b","c"), 3))) standata <- make_standata(y ~ x + (1|x), data = data) expect_equal(colnames(standata$X), c("Intercept", "xb", "xc")) expect_equal(standata$J_1, as.array(rep(1:3, 3))) standata2 <- make_standata(y ~ x + (1|x), data = data, control = list(not4stan = TRUE)) expect_equal(colnames(standata2$X), c("Intercept", "xb", "xc")) }) test_that("make_standata returns correct data names for addition terms", { dat <- data.frame(y = 1:10, w = 1:10, t = 1:10, x = rep(0,10), c = sample(-1:1,10,TRUE)) expect_equal(names(make_standata(y | se(w) ~ x, dat, gaussian())), c("N", "Y", "se", "K", "Kc", "X", "sigma", "prior_only")) expect_equal(names(make_standata(y | weights(w) ~ x, dat, "gaussian")), c("N", "Y", "weights", "K", "Kc", "X", "prior_only")) expect_equal(names(make_standata(y | cens(c) ~ x, dat, "student")), c("N", "Y", "cens", "K", "Kc", "X", "prior_only")) expect_equal(names(make_standata(y | trials(t) ~ x, dat, "binomial")), c("N", "Y", "trials", "K", "Kc", "X", "prior_only")) expect_equal(names(make_standata(y | trials(10) ~ x, dat, "binomial")), c("N", "Y", "trials", "K", "Kc", "X", "prior_only")) expect_equal(names(make_standata(y | thres(11) ~ x, dat, "acat")), c("N", "Y", "nthres", "K", "Kc", "X", "disc", "prior_only")) expect_equal(names(make_standata(y | thres(10) ~ x, dat, cumulative())), c("N", "Y", "nthres", "K", "Kc", "X", "disc", "prior_only")) sdata <- make_standata(y | trunc(0,20) ~ x, dat, "gaussian") expect_true(all(sdata$lb == 0) && all(sdata$ub == 20)) sdata <- make_standata(y | trunc(ub = 21:30) ~ x, dat) expect_true(all(all(sdata$ub == 21:30))) }) test_that(paste("make_standata accepts correct response variables", "depending on the family"), { expect_equal(make_standata(y ~ 1, data = data.frame(y = seq(-9.9,0,0.1)), family = "student")$Y, as.array(seq(-9.9,0,0.1))) expect_equal(make_standata(y | trials(10) ~ 1, data = data.frame(y = 1:10), family = "binomial")$Y, as.array(1:10)) expect_equal(make_standata(y ~ 1, data = data.frame(y = 10:20), family = "poisson")$Y, as.array(10:20)) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(-c(1:2),5)), family = "bernoulli")$Y, as.array(rep(1:0,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(c(TRUE, FALSE),5)), family = "bernoulli")$Y, as.array(rep(1:0,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(1,5)), family = "bernoulli")$Y, as.array(rep(1, 5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(0,5)), family = "bernoulli")$Y, as.array(rep(0, 5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(1:10,5)), family = "categorical")$Y, as.array(rep(1:10,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(11:20,5)), family = "categorical")$Y, as.array(rep(1:10,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = factor(rep(11:20,5))), family = "categorical")$Y, as.array(rep(1:10,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(1:10,5)), family = "cumulative")$Y, as.array(rep(1:10,5))) dat <- data.frame(y = factor(rep(-4:5,5), order = TRUE)) expect_equal(make_standata(y ~ 1, data = dat, family = "acat")$Y, as.array(rep(1:10,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = seq(1,10,0.1)), family = "exponential")$Y, as.array(seq(1,10,0.1))) dat <- data.frame(y1 = 1:10, y2 = 11:20, x = rep(0,10)) form <- bf(mvbind(y1, y2) ~ x) + set_rescor(TRUE) sdata <- make_standata(form, data = dat) expect_equal(sdata$Y_y1, as.array(1:10)) expect_equal(sdata$Y_y2, as.array(11:20)) }) test_that(paste("make_standata rejects incorrect response variables", "depending on the family"), { expect_error(make_standata(y ~ 1, data = data.frame(y = factor(1:10)), family = "student"), "Family 'student' requires numeric responses") expect_error(make_standata(y ~ 1, data = data.frame(y = -5:5), family = "geometric"), "Family 'geometric' requires response greater than or equal to 0") expect_error(make_standata(y ~ 1, data = data.frame(y = -1:1), family = "bernoulli"), "contain only two different values") expect_error(make_standata(y ~ 1, data = data.frame(y = factor(-1:1)), family = "cratio"), "Family 'cratio' requires either positive integers or ordered factors") expect_error(make_standata(y ~ 1, data = data.frame(y = rep(0.5:7.5), 2), family = "sratio"), "Family 'sratio' requires either positive integers or ordered factors") expect_error(make_standata(y ~ 1, data = data.frame(y = rep(-7.5:7.5), 2), family = "gamma"), "Family 'gamma' requires response greater than 0") expect_error(make_standata(y ~ 1, data = data.frame(y = c(0.1, 0.5, 1)), family = Beta()), "Family 'beta' requires response smaller than 1") expect_error(make_standata(y ~ 1, data = data.frame(y = c(0, 0.5, 4)), family = von_mises()), "Family 'von_mises' requires response smaller than or equal to 3.14") expect_error(make_standata(y ~ 1, data = data.frame(y = c(-1, 2, 5)), family = hurdle_gamma()), "Family 'hurdle_gamma' requires response greater than or equal to 0") }) test_that("make_standata suggests using family bernoulli if appropriate", { expect_message(make_standata(y | trials(1) ~ 1, data = list(y = rep(0:1,5)), family = "binomial"), "family 'bernoulli' might be a more efficient choice.") expect_message(make_standata(y ~ 1, data = data.frame(y = rep(1:2, 5)), family = "acat"), "family 'bernoulli' might be a more efficient choice.") expect_message(make_standata(y ~ 1, data = data.frame(y = rep(0:1,5)), family = "categorical"), "family 'bernoulli' might be a more efficient choice.") }) test_that("make_standata returns correct values for addition terms", { dat <- data.frame(y = rnorm(9), s = 1:9, w = 1:9, c1 = rep(-1:1, 3), c2 = rep(c("left","none","right"), 3), c3 = c(rep(c(TRUE, FALSE), 4), FALSE), c4 = c(sample(-1:1, 5, TRUE), rep(2, 4)), t = 11:19) expect_equivalent(make_standata(y | se(s) ~ 1, data = dat)$se, as.array(1:9)) expect_equal(make_standata(y | weights(w) ~ 1, data = dat)$weights, as.array(1:9)) expect_equal(make_standata(y | cens(c1) ~ 1, data = dat)$cens, as.array(rep(-1:1, 3))) expect_equal(make_standata(y | cens(c2) ~ 1, data = dat)$cens, as.array(rep(-1:1, 3))) expect_equal(make_standata(y | cens(c3) ~ 1, data = dat)$cens, as.array(c(rep(1:0, 4), 0))) expect_equal(make_standata(y | cens(c4, y + 2) ~ 1, data = dat)$rcens, as.array(c(rep(0, 5), dat$y[6:9] + 2))) expect_equal(make_standata(s | trials(10) ~ 1, dat, family = "binomial")$trials, as.array(rep(10, 9))) expect_equal(make_standata(s | trials(t) ~ 1, data = dat, family = "binomial")$trials, as.array(11:19)) expect_equal(SW(make_standata(s | cat(19) ~ 1, data = dat, family = "cumulative"))$nthres, 18) }) test_that("make_standata rejects incorrect addition terms", { dat <- data.frame(y = rnorm(9), s = -(1:9), w = -(1:9), c = rep(-2:0, 3), t = 9:1, z = 1:9) expect_error(make_standata(y | se(s) ~ 1, data = dat), "Standard errors must be non-negative") expect_error(make_standata(y | weights(w) ~ 1, data = dat), "Weights must be non-negative") expect_error(make_standata(y | cens(c) ~ 1, data = dat)) expect_error(make_standata(z | trials(t) ~ 1, data = dat, family = "binomial"), "Number of trials is smaller than the number of events") }) test_that("make_standata handles multivariate models", { dat <- data.frame( y1 = 1:10, y2 = 11:20, x = rep(0, 10), g = rep(1:2, 5), censi = sample(0:1, 10, TRUE), tim = 10:1, w = 1:10 ) form <- bf(mvbind(y1, y2) | weights(w) ~ x) + set_rescor(TRUE) sdata <- make_standata(form, data = dat) expect_equal(sdata$Y_y1, as.array(dat$y1)) expect_equal(sdata$Y_y2, as.array(dat$y2)) expect_equal(sdata$weights_y1, as.array(1:10)) expect_error(make_standata(bf(mvbind(y1, y2, y2) ~ x) + set_resor(FALSE), data = dat), "Cannot use the same response variable twice") form <- bf(mvbind(y1 / y2, y2, y1 * 3) ~ x) + set_rescor(FALSE) sdata <- make_standata(form, data = dat) expect_equal(sdata$Y_y1y2, as.array(dat$y1 / dat$y2)) sdata <- suppressWarnings( make_standata(mvbind(y1, y2) ~ x, dat, autocor = cor_ar(~ tim | g)) ) target1 <- c(seq(9, 1, -2), seq(10, 2, -2)) expect_equal(sdata$Y_y1, as.array(target1)) target2 <- c(seq(19, 11, -2), seq(20, 12, -2)) expect_equal(sdata$Y_y2, as.array(target2)) # models without residual correlations expect_warning( bform <- bf(y1 | cens(censi) ~ x + y2 + (x|2|g)) + gaussian() + cor_ar() + (bf(x ~ 1) + mixture(poisson, nmix = 2)) + (bf(y2 ~ s(y2) + (1|2|g)) + skew_normal()), "Using 'cor_brms' objects for 'autocor' is deprecated" ) bprior <- prior(normal(0, 5), resp = y1) + prior(normal(0, 10), resp = y2) + prior(dirichlet(2, 1), theta, resp = x) sdata <- make_standata(bform, dat, prior = bprior) sdata_names <- c( "N", "J_1_y1", "cens_y1", "Kma_y1", "Z_1_y2_3", "Zs_y2_1_1", "Y_y2", "con_theta_x", "X_mu2_x" ) expect_true(all(sdata_names %in% names(sdata))) expect_equal(sdata$con_theta_x, as.array(c(2, 1))) }) test_that("make_standata removes NAs correctly", { dat <- data.frame(y = c(rnorm(9), NA)) sdata <- suppressWarnings(make_standata(y ~ 1, dat)) expect_equal(as.numeric(sdata$Y), dat$y[1:9]) }) test_that("make_standata handles the 'subset' addition argument correctly", { dat1 <- data.frame( y1 = rnorm(15), y2 = NA, x1 = rnorm(15), x2 = NA, x3 = rnorm(15), sub1 = 1, sub2 = 0 ) dat2 <- data.frame( y1 = NA, y2 = rnorm(10), x1 = NA, x2 = rnorm(10), x3 = NA, sub1 = 0, sub2 = 1 ) dat <- rbind(dat1, dat2) bform <- bf(y1 | subset(sub1) ~ x1*x3 + sin(x1), family = gaussian()) + bf(y2 | subset(sub2) ~ x2, family = gaussian()) + set_rescor(FALSE) sdata <- make_standata(bform, dat) nsub1 <- sum(dat$sub1) nsub2 <- sum(dat$sub2) expect_equal(sdata$N_y1, nsub1) expect_equal(sdata$N_y2, nsub2) expect_equal(length(sdata$Y_y1), nsub1) expect_equal(nrow(sdata$X_y2), nsub2) }) test_that("make_standata returns correct data for ARMA terms", { dat <- data.frame(y = 1:10, x = rep(0, 10), tim = 10:1, g = rep(3:4, 5)) sdata <- make_standata(y ~ x + ma(tim, g), data = dat) expect_equal(sdata$J_lag, as.array(c(1, 1, 1, 1, 0, 1, 1, 1, 1, 0))) sdata <- make_standata(y ~ x + ar(tim, g, p = 2), data = dat) expect_equal(sdata$J_lag, as.array(c(1, 2, 2, 2, 0, 1, 2, 2, 2, 0))) sdata <- make_standata(y ~ x + ar(tim, g, cov = TRUE), data = dat) expect_equal(sdata$begin_tg, as.array(c(1, 6))) expect_equal(sdata$nobs_tg, as.array(c(5, 5))) sdata <- make_standata(y ~ x + ar(tim), data = dat, family = poisson(), prior = prior(horseshoe(), class = sderr)) expect_equal(sdata$Kscales, 1) bform <- bf(y ~ exp(b * x), b ~ 1, nl = TRUE, autocor = ~arma()) sdata <- make_standata(bform, dat) }) test_that("make_standata returns correct data for UNSTR covariance terms", { dat <- data.frame(y = 1:12, x = rnorm(12), tim = c(5:1, 1:5, c(0, 4)), g = c(rep(3:4, 5), rep(2, 2))) sdata <- make_standata(y ~ x + unstr(tim, g), data = dat) expect_equal(sdata$n_unique_t, 6) expect_equal(sdata$n_unique_cortime, 15) Jtime <- rbind(c(1, 5, 0, 0, 0), 2:6, 2:6) expect_equal(sdata$Jtime_tg, Jtime) }) test_that("make_standata allows to retrieve the initial data order", { dat <- data.frame(y1 = rnorm(100), y2 = rnorm(100), id = sample(1:10, 100, TRUE), time = sample(1:100, 100)) # univariate model sdata1 <- make_standata(y1 ~ ar(time, id), data = dat, internal = TRUE) expect_equal(dat$y1, as.numeric(sdata1$Y[attr(sdata1, "old_order")])) # multivariate model form <- bf(mvbind(y1, y2) ~ ma(time, id)) + set_rescor(FALSE) sdata2 <- make_standata(form, data = dat, internal = TRUE) expect_equal(sdata2$Y_y1[attr(sdata2, "old_order")], as.array(dat$y1)) expect_equal(sdata2$Y_y2[attr(sdata2, "old_order")], as.array(dat$y2)) }) test_that("make_standata handles covariance matrices correctly", { A <- structure(diag(1, 4), dimnames = list(1:4, NULL)) sdata <- make_standata(count ~ Trt + (1|gr(visit, cov = A)), data = epilepsy, data2 = list(A = A)) expect_equivalent(sdata$Lcov_1, t(chol(A))) B <- structure(diag(1:5), dimnames = list(c(1,5,2,4,3), NULL)) sdata <- make_standata(count ~ Trt + (1|gr(visit, cov = B)), data = epilepsy, data2 = list(B = B)) expect_equivalent(sdata$Lcov_1, t(chol(B[c(1,3,5,4), c(1,3,5,4)]))) B <- diag(1, 4) expect_error(make_standata(count ~ Trt + (1|gr(visit, cov = B)), data = epilepsy, data2 = list(B = B)), "Row or column names are required") B <- structure(diag(1, 4), dimnames = list(2:5, NULL)) expect_error(make_standata(count ~ Trt + (1|gr(visit, cov = B)), data = epilepsy, data2 = list(B = B)), "Levels of .* do not match") B <- A B[1,2] <- 0.5 expect_error(make_standata(count ~ Trt + (1|gr(visit, cov = B)), data = epilepsy, data2 = list(B = B)), "must be symmetric") expect_warning( sdata <- make_standata(count ~ Trt + (1|visit), data = epilepsy, cov_ranef = list(visit = A)), "Argument 'cov_ranef' is deprecated" ) expect_equivalent(sdata$Lcov_1, t(chol(A))) }) test_that("make_standata correctly prepares data for non-linear models", { flist <- list(a ~ x + (1|1|g), b ~ mo(z) + (1|1|g)) dat <- data.frame( y = rnorm(9), x = rnorm(9), z = sample(1:9, 9), g = rep(1:3, 3) ) bform <- bf(y ~ a - b^z, flist = flist, nl = TRUE) sdata <- make_standata(bform, data = dat) expect_equal(names(sdata), c("N", "Y", "C_1", "K_a", "X_a", "Z_1_a_1", "K_b", "X_b", "Ksp_b", "Imo_b", "Xmo_b_1", "Jmo_b", "con_simo_b_1", "Z_1_b_2", "J_1", "N_1", "M_1", "NC_1", "prior_only") ) expect_equal(colnames(sdata$X_a), c("Intercept", "x")) expect_equal(sdata$J_1, as.array(dat$g)) bform <- bf(y ~ x) + nlf(sigma ~ a1 * exp(-x/(a2 + z))) + lf(a1 ~ 1, a2 ~ z + (x|g)) + lf(alpha ~ x) sdata <- make_standata(bform, dat, family = skew_normal()) sdata_names <- c("C_sigma_1", "X_a2", "Z_1_a2_1") expect_true(all(sdata_names %in% names(sdata))) }) test_that("make_standata correctly prepares data for monotonic effects", { data <- data.frame( y = rpois(120, 10), x1 = rep(1:4, 30), z = rnorm(10), x2 = factor(rep(c("a", "b", "c"), 40), ordered = TRUE) ) sdata <- make_standata(y ~ mo(x1)*mo(x2)*y, data = data) sdata_names <- c("Xmo_1", "Imo", "Jmo", "con_simo_8", "con_simo_5") expect_true(all(sdata_names %in% names(sdata))) expect_equivalent(sdata$Xmo_1, as.array(data$x1 - 1)) expect_equivalent(sdata$Xmo_2, as.array(as.numeric(data$x2) - 1)) expect_equal( as.vector(unname(sdata$Jmo)), rep(c(max(data$x1) - 1, length(unique(data$x2)) - 1), 4) ) expect_equal(sdata$con_simo_1, as.array(rep(1, 3))) prior <- set_prior("dirichlet(1:3)", coef = "mox11", class = "simo", dpar = "sigma") sdata <- make_standata(bf(y ~ 1, sigma ~ mo(x1)), data = data, prior = prior) expect_equal(sdata$con_simo_sigma_1, as.array(1:3)) prior <- c( set_prior("normal(0,1)", class = "b", coef = "mox1"), set_prior("dirichlet(c(1, 0.5, 2))", class = "simo", coef = "mox11"), prior_(~dirichlet(c(1, 0.5, 2)), class = "simo", coef = "mox1:mox21") ) sdata <- make_standata(y ~ mo(x1)*mo(x2), data = data, prior = prior) expect_equal(sdata$con_simo_1, as.array(c(1, 0.5, 2))) expect_equal(sdata$con_simo_3, as.array(c(1, 0.5, 2))) # test issue #924 (conditional monotonicity) prior <- c(prior(dirichlet(c(1, 0.5, 2)), simo, coef = "v"), prior(dirichlet(c(1,3)), simo, coef = "w")) sdata <- make_standata(y ~ y*mo(x1, id = "v")*mo(x2, id = "w"), data, prior = prior) expect_equal(sdata$con_simo_1, as.array(c(1, 0.5, 2))) expect_equal(sdata$con_simo_2, as.array(c(1, 3))) expect_true(!"sdata$con_simo_3" %in% names(sdata)) expect_error( make_standata(y ~ mo(z), data = data), "Monotonic predictors must be integers or ordered factors" ) prior <- c(set_prior("dirichlet(c(1,0.5,2))", class = "simo", coef = "mox21")) expect_error( make_standata(y ~ mo(x2), data = data, prior = prior), "Invalid Dirichlet prior" ) }) test_that("make_standata returns FCOR covariance matrices", { data <- data.frame(y = 1:5) data2 <- list(V = diag(5)) expect_equal(make_standata(y ~ fcor(V), data, data2 = data2)$Mfcor, data2$V, check.attributes = FALSE) expect_warning( expect_error( make_standata(y~1, data, autocor = cor_fixed(diag(2))), "Dimensions of 'M' for FCOR terms must be equal" ), "Using 'cor_brms' objects for 'autocor' is deprecated" ) }) test_that("make_standata returns data for GAMMs", { dat <- data.frame(y = rnorm(10), x1 = rnorm(10), x2 = rnorm(10), x3 = rnorm(10), z = rnorm(10), g = factor(rep(1:2, 5))) sdata <- make_standata(y ~ s(x1) + z + s(x2, by = x3), data = dat) expect_equal(sdata$nb_1, 1) expect_equal(as.vector(sdata$knots_2), 8) expect_equal(dim(sdata$Zs_1_1), c(10, 8)) expect_equal(dim(sdata$Zs_2_1), c(10, 8)) bform <- bf(y ~ lp, lp ~ s(x1) + z + s(x2, by = x3), nl = TRUE) sdata <- make_standata(bform, dat) expect_equal(sdata$nb_lp_1, 1) expect_equal(as.vector(sdata$knots_lp_2), 8) expect_equal(dim(sdata$Zs_lp_1_1), c(10, 8)) expect_equal(dim(sdata$Zs_lp_2_1), c(10, 8)) sdata <- make_standata(y ~ g + s(x2, by = g), data = dat) expect_true(all(c("knots_1", "knots_2") %in% names(sdata))) # test issue #562 dat$g <- as.character(dat$g) sdata <- make_standata(y ~ g + s(x2, by = g), data = dat) expect_true(all(c("knots_1", "knots_2") %in% names(sdata))) sdata <- make_standata(y ~ t2(x1, x2), data = dat) expect_equal(sdata$nb_1, 3) expect_equal(as.vector(sdata$knots_1), c(9, 6, 6)) expect_equal(dim(sdata$Zs_1_1), c(10, 9)) expect_equal(dim(sdata$Zs_1_3), c(10, 6)) expect_error(make_standata(y ~ te(x1, x2), data = dat), "smooths 'te' and 'ti' are not yet implemented") }) test_that("make_standata returns correct group ID data", { form <- bf(count ~ Trt + (1+Trt|3|visit) + (1|patient), shape ~ (1|3|visit) + (Trt||patient)) sdata <- make_standata(form, data = epilepsy, family = negbinomial()) expect_true(all(c("Z_1_1", "Z_2_2", "Z_3_shape_1", "Z_2_shape_3") %in% names(sdata))) form <- bf(count ~ a, sigma ~ (1|3|visit) + (Trt||patient), a ~ Trt + (1+Trt|3|visit) + (1|patient), nl = TRUE) sdata <- make_standata(form, data = epilepsy, family = student()) expect_true(all(c("Z_1_sigma_1", "Z_2_a_3", "Z_2_sigma_1", "Z_3_a_1") %in% names(sdata))) }) test_that("make_standata handles population-level intercepts", { dat <- data.frame(y = 10:1, x = 1:10) sdata <- make_standata(y ~ 0 + x, data = dat) expect_equal(unname(sdata$X[, 1]), dat$x) sdata <- make_standata(y ~ x, dat, cumulative(), control = list(not4stan = TRUE)) expect_equal(unname(sdata$X[, 1]), dat$x) sdata <- make_standata(y ~ 0 + Intercept + x, data = dat) expect_equal(unname(sdata$X), cbind(1, dat$x)) }) test_that("make_standata handles category specific effects", { sdata <- make_standata(rating ~ period + carry + cse(treat), data = inhaler, family = sratio()) expect_equivalent(sdata$Xcs, matrix(inhaler$treat)) sdata <- make_standata(rating ~ period + carry + cs(treat) + (cs(1)|subject), data = inhaler, family = acat()) expect_equivalent(sdata$Z_1_3, as.array(rep(1, nrow(inhaler)))) sdata <- make_standata(rating ~ period + carry + (cs(treat)|subject), data = inhaler, family = cratio()) expect_equivalent(sdata$Z_1_4, as.array(inhaler$treat)) expect_warning( make_standata(rating ~ 1 + cs(treat), data = inhaler, family = "cumulative"), "Category specific effects for this family should be considered experimental" ) expect_error(make_standata(rating ~ 1 + (treat + cs(1)|subject), data = inhaler, family = "cratio"), "category specific effects in separate group-level terms") }) test_that("make_standata handles wiener diffusion models", { dat <- data.frame(q = 1:10, resp = sample(0:1, 10, TRUE), x = rnorm(10)) dat$dec <- ifelse(dat$resp == 0, "lower", "upper") dat$test <- "a" sdata <- make_standata(q | dec(resp) ~ x, data = dat, family = wiener()) expect_equal(sdata$dec, as.array(dat$resp)) sdata <- make_standata(q | dec(dec) ~ x, data = dat, family = wiener()) expect_equal(sdata$dec, as.array(dat$resp)) expect_error(make_standata(q | dec(test) ~ x, data = dat, family = wiener()), "Decisions should be 'lower' or 'upper'") }) test_that("make_standata handles noise-free terms", { N <- 30 dat <- data.frame( y = rnorm(N), x = rnorm(N), z = rnorm(N), xsd = abs(rnorm(N, 1)), zsd = abs(rnorm(N, 1)), ID = rep(1:5, each = N / 5) ) sdata <- make_standata( bf(y ~ me(x, xsd)*me(z, zsd)*x, sigma ~ me(x, xsd)), data = dat ) expect_equal(sdata$Xn_1, as.array(dat$x)) expect_equal(sdata$noise_2, as.array(dat$zsd)) expect_equal(unname(sdata$Csp_3), as.array(dat$x)) expect_equal(sdata$Ksp, 6) expect_equal(sdata$NCme_1, 1) }) test_that("make_standata handles noise-free terms with grouping factors", { dat <- data.frame( y = rnorm(10), x1 = rep(1:5, each = 2), sdx = rep(1:5, each = 2), g = rep(c("b", "c", "a", "d", 1), each = 2) ) sdata <- make_standata(y ~ me(x1, sdx, gr = g), dat) expect_equal(unname(sdata$Xn_1), as.array(c(5, 3, 1, 2, 4))) expect_equal(unname(sdata$noise_1), as.array(c(5, 3, 1, 2, 4))) dat$sdx[2] <- 10 expect_error( make_standata(y ~ me(x1, sdx, gr = g), dat), "Measured values and measurement error should be unique" ) }) test_that("make_standata handles missing value terms", { dat = data.frame(y = rnorm(10), x = rnorm(10), g = 1:10) miss <- c(1, 3, 9) dat$x[miss] <- NA bform <- bf(y ~ mi(x)*g) + bf(x | mi() ~ g) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_equal(sdata$Jmi_x, as.array(miss)) expect_true(all(is.infinite(sdata$Y_x[miss]))) # dots in variable names are correctly handled #452 dat$x.2 <- dat$x bform <- bf(y ~ mi(x.2)*g) + bf(x.2 | mi() ~ g) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_equal(sdata$Jmi_x, as.array(miss)) dat$z <- rbeta(10, 1, 1) dat$z[miss] <- NA bform <- bf(exp(y) ~ mi(z)*g) + bf(z | mi() ~ g, family = Beta()) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_equal(sdata$Jmi_z, as.array(miss)) }) test_that("make_standata handles overimputation", { dat = data.frame(y = rnorm(10), x = rnorm(10), g = 1:10, sdy = 1) miss <- c(1, 3, 9) dat$x[miss] <- dat$sdy[miss] <- NA bform <- bf(y ~ mi(x)*g) + bf(x | mi(sdy) ~ g) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_equal(sdata$Jme_x, as.array(setdiff(1:10, miss))) expect_true(all(is.infinite(sdata$Y_x[miss]))) expect_true(all(is.infinite(sdata$noise_x[miss]))) }) test_that("make_standata handles 'mi' terms with 'subset'", { dat <- data.frame( y = rnorm(10), x = c(rnorm(9), NA), z = rnorm(10), g1 = sample(1:5, 10, TRUE), g2 = 10:1, g3 = 1:10, s = c(FALSE, rep(TRUE, 9)) ) bform <- bf(y ~ mi(x, idx = g1)) + bf(x | mi() + index(g2) + subset(s) ~ 1) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_true(all(sdata$idxl_y_x_1 %in% 9:5)) # test a bunch of errors # fails on CRAN for some reason # bform <- bf(y ~ mi(x, idx = g1)) + # bf(x | mi() + index(g3) + subset(s) ~ 1) + # set_rescor(FALSE) # expect_error(make_standata(bform, dat), # "Could not match all indices in response 'x'" # ) bform <- bf(y ~ mi(x, idx = g1)) + bf(x | mi() + subset(s) ~ 1) + set_rescor(FALSE) expect_error(make_standata(bform, dat), "Response 'x' needs to have an 'index' addition term" ) bform <- bf(y ~ mi(x)) + bf(x | mi() + subset(s) + index(g2) ~ 1) + set_rescor(FALSE) expect_error(make_standata(bform, dat), "mi() terms of subsetted variables require the 'idx' argument", fixed = TRUE ) bform <- bf(y | mi() ~ mi(x, idx = g1)) + bf(x | mi() + subset(s) + index(g2) ~ mi(y)) + set_rescor(FALSE) expect_error(make_standata(bform, dat), "mi() terms in subsetted formulas require the 'idx' argument", fixed = TRUE ) }) test_that("make_standata handles multi-membership models", { dat <- data.frame(y = rnorm(10), g1 = c(7:2, rep(10, 4)), g2 = 1:10, w1 = rep(1, 10), w2 = rep(abs(rnorm(10)))) sdata <- make_standata(y ~ (1|mm(g1,g2,g1,g2)), data = dat) expect_true(all(paste0(c("W_1_", "J_1_"), 1:4) %in% names(sdata))) expect_equal(sdata$W_1_4, as.array(rep(0.25, 10))) expect_equal(unname(sdata$Z_1_1_1), as.array(rep(1, 10))) expect_equal(unname(sdata$Z_1_1_2), as.array(rep(1, 10))) # this checks whether combintation of factor levels works as intended expect_equal(sdata$J_1_1, as.array(c(6, 5, 4, 3, 2, 1, 7, 7, 7, 7))) expect_equal(sdata$J_1_2, as.array(c(8, 1, 2, 3, 4, 5, 6, 9, 10, 7))) sdata <- make_standata(y ~ (1|mm(g1,g2, weights = cbind(w1, w2))), dat) expect_equal(sdata$W_1_1, as.array(dat$w1 / (dat$w1 + dat$w2))) # tests mmc terms sdata <- make_standata(y ~ (1+mmc(w1, w2)|mm(g1,g2)), data = dat) expect_equal(unname(sdata$Z_1_2_1), as.array(dat$w1)) expect_equal(unname(sdata$Z_1_2_2), as.array(dat$w2)) expect_error( make_standata(y ~ (mmc(w1, w2, y)|mm(g1,g2)), data = dat), "Invalid term 'mmc(w1, w2, y)':", fixed = TRUE ) expect_error( make_standata(y ~ (mmc(w1, w2)*y|mm(g1,g2)), data = dat), "The term 'mmc(w1, w2):y' is invalid", fixed = TRUE ) # tests if ":" works in multi-membership models sdata <- make_standata(y ~ (1|mm(w1:g1,w1:g2)), dat) expect_true(all(c("J_1_1", "J_1_2") %in% names(sdata))) }) test_that("by variables in grouping terms are handled correctly", { gvar <- c("1A", "1B", "2A", "2B", "3A", "3B", "10", "100", "2", "3") gvar <- rep(gvar, each = 10) g_order <- order(gvar) byvar <- c(0, 4.5, 3, 2, "x 1") byvar <- factor(rep(byvar, each = 20)) dat <- data.frame( y = rnorm(100), x = rnorm(100), g = gvar, g2 = gvar[g_order], z = byvar, z2 = byvar[g_order], z3 = factor(1:2) ) sdata <- make_standata(y ~ x + (x | gr(g, by = z)), dat) expect_equal(sdata$Nby_1, 5) expect_equal(sdata$Jby_1, as.array(c(2, 2, 1, 1, 5, 4, 4, 5, 3, 3))) sdata <- make_standata(y ~ x + (x | mm(g, g2, by = cbind(z, z2))), dat) expect_equal(sdata$Nby_1, 5) expect_equal(sdata$Jby_1, as.array(c(2, 2, 1, 1, 5, 4, 4, 5, 3, 3))) expect_error(make_standata(y ~ x + (1|gr(g, by = z3)), dat), "Some levels of 'g' correspond to multiple levels of 'z3'") }) test_that("make_standata handles calls to the 'poly' function", { dat <- data.frame(y = rnorm(10), x = rnorm(10)) expect_equal(colnames(make_standata(y ~ 1 + poly(x, 3), dat)$X), c("Intercept", "polyx31", "polyx32", "polyx33")) }) test_that("make_standata allows fixed distributional parameters", { dat <- list(y = 1:10) expect_equal(make_standata(bf(y ~ 1, nu = 3), dat, student())$nu, 3) expect_equal(make_standata(y ~ 1, dat, acat())$disc, 1) expect_error(make_standata(bf(y ~ 1, bias = 0.5), dat), "Invalid fixed parameters: 'bias'") }) test_that("Cell-mean coding can be disabled", { df <- data.frame(y = 1:10, g = rep(c("a", "b"), 5)) bform <- bf(y ~ g) + lf(disc ~ 0 + g + (0 + g | y), cmc = FALSE) + cumulative() sdata <- make_standata(bform, df) target <- matrix(rep(0:1, 5), dimnames = list(1:10, "gb")) expect_equal(sdata$X_disc, target) expect_equal(unname(sdata$Z_1_disc_1), as.array(rep(0:1, 5))) expect_true(!"Z_1_disc_2" %in% names(sdata)) bform <- bf(y ~ 0 + g + (1 | y), cmc = FALSE) sdata <- make_standata(bform, df) expect_equal(sdata$X, target) expect_equal(unname(sdata$Z_1_1), as.array(rep(1, 10))) }) test_that("make_standata correctly includes offsets", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) sdata <- make_standata(bf(y ~ x + offset(c), sigma ~ offset(c + 1)), data) expect_equal(sdata$offsets, as.array(data$c)) expect_equal(sdata$offsets_sigma, as.array(data$c + 1)) sdata <- make_standata(y ~ x + offset(c) + offset(x), data) expect_equal(sdata$offsets, as.array(data$c + data$x)) }) test_that("make_standata includes data for mixture models", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) form <- bf(y ~ x, mu1 ~ 1, family = mixture(gaussian, gaussian)) sdata <- make_standata(form, data) expect_equal(sdata$con_theta, as.array(c(1, 1))) expect_equal(dim(sdata$X_mu1), c(10, 1)) expect_equal(dim(sdata$X_mu2), c(10, 2)) form <- bf(y ~ x, family = mixture(gaussian, gaussian)) sdata <- make_standata(form, data, prior = prior(dirichlet(10, 2), theta)) expect_equal(sdata$con_theta, as.array(c(10, 2))) form <- bf(y ~ x, theta1 = 1, theta2 = 3, family = mixture(gaussian, gaussian)) sdata <- make_standata(form, data) expect_equal(sdata$theta1, 1/4) expect_equal(sdata$theta2, 3/4) }) test_that("make_standata includes data for Gaussian processes", { dat <- data.frame(y = rnorm(10), x1 = rnorm(10), z = factor(c(2, 2, 2, 3, 4, rep(5, 5)))) sdata <- make_standata(y ~ gp(x1), dat) expect_equal(max(sdata$Xgp_1) - min(sdata$Xgp_1), 1) sdata <- make_standata(y ~ gp(x1, scale = FALSE), dat) expect_equal(max(sdata$Xgp_1) - min(sdata$Xgp_1), max(dat$x1) - min(dat$x1)) sdata <- SW(make_standata(y ~ gp(x1, by = z, gr = TRUE, scale = FALSE), dat)) expect_equal(sdata$Igp_1_2, as.array(4)) expect_equal(sdata$Jgp_1_4, as.array(1:5)) expect_equal(sdata$Igp_1_4, as.array(6:10)) sdata <- SW(make_standata(y ~ gp(x1, by = y, gr = TRUE), dat)) expect_equal(sdata$Cgp_1, as.array(dat$y)) }) test_that("make_standata includes data for approximate Gaussian processes", { dat <- data.frame(y = rnorm(10), x1 = sample(1:10, 10), z = factor(c(2, 2, 2, 3, 4, rep(5, 5)))) sdata <- make_standata(y ~ gp(x1, k = 5, c = 5/4), dat) expect_equal(sdata$NBgp_1, 5) expect_equal(dim(sdata$Xgp_1), c(10, 5)) expect_equal(dim(sdata$slambda_1), c(5, 1)) sdata <- SW(make_standata(y ~ gp(x1, by = z, k = 5, c = 5/4, scale = FALSE), dat)) expect_equal(sdata$Igp_1_2, as.array(4)) expect_equal(sdata$Cgp_1_2, as.array(1)) expect_equal(sdata$Igp_1_4, as.array(6:10)) }) test_that("make_standata includes data for SAR models", { dat <- data.frame(y = rnorm(10), x = rnorm(10)) W <- matrix(0, nrow = 10, ncol = 10) dat2 <- list(W = W) sdata <- make_standata(y ~ x + sar(W), data = dat, data2 = dat2) expect_equal(dim(sdata$M), rep(nrow(W), 2)) dat2 <- list(W = matrix(0, 2, 2)) expect_error( make_standata(y ~ x + sar(W), data = dat, data2 = dat2), "Dimensions of 'M' for SAR terms must be equal" ) }) test_that("make_standata includes data for CAR models", { dat = data.frame(y = rnorm(10), x = rnorm(10), obs = 1:10) edges <- cbind(1:10, 10:1) W <- matrix(0, nrow = 10, ncol = 10) for (i in seq_len(nrow(edges))) { W[edges[i, 1], edges[i, 2]] <- 1 } rownames(W) <- 1:nrow(W) dat2 <- list(W = W) sdata <- make_standata(y ~ x + car(W, gr = obs), dat, data2 = dat2) expect_equal(sdata$Nloc, 10) expect_equal(unname(sdata$Nneigh), rep(1, 10)) expect_equal(unname(sdata$edges1), as.array(10:6)) expect_equal(unname(sdata$edges2), as.array(1:5)) sdata_old <- SW(make_standata(y ~ x, dat, autocor = cor_car(W))) expect_equal(sdata, sdata_old) rownames(dat2$W) <- c("a", 2:9, "b") dat$group <- rep(c("a", "b"), each = 5) sdata <- make_standata(y ~ x + car(W, gr = group), dat, data2 = dat2, prior = prior(horseshoe(), class = sdcar)) expect_equal(sdata$Nloc, 2) expect_equal(sdata$edges1, as.array(2)) expect_equal(sdata$edges2, as.array(1)) expect_equal(sdata$Kscales, 1) sdata <- make_standata(y ~ x + car(W, group, type = "bym2"), data = dat, data2 = dat2) expect_equal(length(sdata$car_scale), 1L) dat2$W[1, 10] <- 4 dat2$W[10, 1] <- 4 expect_message(make_standata(y ~ car(W, gr = group), dat, data2 = dat2), "Converting all non-zero values in 'M' to 1") # test error messages rownames(dat2$W) <- c(1:9, "a") expect_error(make_standata(y ~ car(W, gr = group), dat, data2 = dat2), "Row names of 'M' for CAR terms do not match") rownames(dat2$W) <- NULL expect_error(make_standata(y ~ car(W, gr = group), dat, data2 = dat2), "Row names are required for 'M'") dat2$W[1, 10] <- 0 expect_error(make_standata(y ~ car(W), dat, data2 = dat2), "'M' for CAR terms must be symmetric") dat2$W[10, 1] <- 0 expect_error(SW(make_standata(y ~ x + car(W), dat, data2 = dat2)), "all locations should have at least one neighbor") }) test_that("make_standata includes data of special priors", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10), g = rep(1:2, each = 5), x3 = sample(1:5, 10, TRUE)) # horseshoe prior hs <- horseshoe(7, scale_global = 2, df_global = 3, df_slab = 6, scale_slab = 3) sdata <- make_standata(y ~ x1*x2, data = dat, prior = set_prior(hs)) expect_equal(sdata$hs_df, 7) expect_equal(sdata$hs_df_global, 3) expect_equal(sdata$hs_df_slab, 6) expect_equal(sdata$hs_scale_global, 2) expect_equal(sdata$hs_scale_slab, 3) hs <- horseshoe(par_ratio = 0.1) sdata <- make_standata(y ~ x1*x2, data = dat, prior = set_prior(hs)) expect_equal(sdata$hs_scale_global, 0.1 / sqrt(nrow(dat))) # R2D2 prior sdata <- make_standata(y ~ x1*x2, data = dat, prior = prior(R2D2(0.5, 10))) expect_equal(sdata$R2D2_mean_R2, 0.5) expect_equal(sdata$R2D2_prec_R2, 10) expect_equal(sdata$R2D2_cons_D2, as.array(rep(0.5, 3))) # horseshoe and R2D2 prior applied in a non-linear model hs_a1 <- horseshoe(7, scale_global = 2, df_global = 3) R2D2_a2 <- R2D2(0.5, 10) sdata <- make_standata( bf(y ~ a1 + a2, a1 ~ x1, a2 ~ 0 + x2, nl = TRUE), data = dat, sample_prior = TRUE, prior = c(set_prior(hs_a1, nlpar = "a1"), set_prior(R2D2_a2, nlpar = "a2")) ) expect_equal(sdata$hs_df_a1, 7) expect_equal(sdata$R2D2_mean_R2_a2, 0.5) bform <- bf(y ~ x1*mo(x3) + (1|g) + gp(x3) + s(x2) + arma(p = 2, q = 2, gr = g)) bprior <- prior(R2D2(cons_D2 = 11:1, main = TRUE), class = b) + prior(R2D2(), class = sd) + prior(R2D2(), class = sds) + prior(R2D2(), class = sdgp) + prior(R2D2(), class = ar) + prior(R2D2(), class = ma) sdata <- make_standata(bform, data = dat, prior = bprior) expect_equal(sdata$Kscales, 11) expect_equal(sdata$R2D2_cons_D2, as.array(11:1)) }) test_that("dots in formula are correctly expanded", { dat <- data.frame(y = 1:10, x1 = 1:10, x2 = 1:10) sdata <- make_standata(y ~ ., dat) expect_equal(colnames(sdata$X), c("Intercept", "x1", "x2")) }) test_that("argument 'stanvars' is handled correctly", { bprior <- prior(normal(mean_intercept, 10), class = "Intercept") mean_intercept <- 5 stanvars <- stanvar(mean_intercept) sdata <- make_standata(count ~ Trt, data = epilepsy, prior = bprior, stanvars = stanvars) expect_equal(sdata$mean_intercept, 5) # define a multi_normal prior with known covariance matrix bprior <- prior(multi_normal(M, V), class = "b") stanvars <- stanvar(rep(0, 2), "M", scode = " vector[K] M;") + stanvar(diag(2), "V", scode = " matrix[K, K] V;") sdata <- make_standata(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) expect_equal(sdata$M, rep(0, 2)) expect_equal(sdata$V, diag(2)) }) test_that("addition arguments 'vint' and 'vreal' work correctly", { dat <- data.frame(size = 10, y = sample(0:10, 20, TRUE), x = rnorm(20)) beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = c("vint1[n]", "vreal1[n]") ) sdata <- make_standata( y | vint(size) + vreal(x, size) ~ 1, data = dat, family = beta_binomial2, ) expect_equal(sdata$vint1, as.array(rep(10, 20))) expect_equal(sdata$vreal1, as.array(dat$x)) expect_equal(sdata$vreal2, as.array(rep(10, 20))) }) test_that("reserved variables 'Intercept' is handled correctly", { dat <- data.frame(y = 1:10) expect_warning( sdata <- make_standata(y ~ 0 + intercept, dat), "Reserved variable name 'intercept' is deprecated." ) expect_true(all(sdata$X[, "intercept"] == 1)) sdata <- make_standata(y ~ 0 + Intercept, dat) expect_true(all(sdata$X[, "Intercept"] == 1)) }) test_that("data for multinomial and dirichlet models is correct", { N <- 15 dat <- as.data.frame(rdirichlet(N, c(3, 2, 1))) names(dat) <- c("y1", "y2", "y3") dat$t1 <- round(dat$y1 * rpois(N, 10)) dat$t2 <- round(dat$y2 * rpois(N, 10)) dat$t3 <- round(dat$y3 * rpois(N, 10)) dat$x <- rnorm(N) dat$y <- with(dat, cbind(y1, y2, y3)) dat$t <- with(dat, cbind(t1, t2, t3)) dat$size <- rowSums(dat$t) sdata <- make_standata(t | trials(size) ~ x, dat, multinomial()) expect_equal(sdata$trials, as.array(dat$size)) expect_equal(sdata$ncat, 3) expect_equal(sdata$Y, unname(dat$t)) sdata <- make_standata(y ~ x, data = dat, family = dirichlet()) expect_equal(sdata$ncat, 3) expect_equal(sdata$Y, unname(dat$y)) expect_error( make_standata(t | trials(10) ~ x, data = dat, family = multinomial()), "Number of trials does not match the number of events" ) expect_error(make_standata(t ~ x, data = dat, family = dirichlet()), "Response values in simplex models must sum to 1") }) test_that("make_standata handles cox models correctly", { data <- data.frame(y = rexp(100), x = rnorm(100)) bform <- bf(y ~ x) bprior <- prior(dirichlet(3), sbhaz) sdata <- make_standata(bform, data, brmsfamily("cox"), prior = bprior) expect_equal(dim(sdata$Zbhaz), c(100, 5)) expect_equal(dim(sdata$Zcbhaz), c(100, 5)) expect_equal(sdata$con_sbhaz, as.array(rep(3, 5))) sdata <- make_standata(bform, data, brmsfamily("cox", bhaz = list(df = 6))) expect_equal(dim(sdata$Zbhaz), c(100, 6)) expect_equal(dim(sdata$Zcbhaz), c(100, 6)) }) test_that("make_standata handles addition term 'rate' is correctly", { data <- data.frame(y = rpois(10, 1), x = rnorm(10), time = 1:10) sdata <- make_standata(y | rate(time) ~ x, data, poisson()) expect_equal(sdata$denom, as.array(data$time)) }) test_that("make_standata handles grouped ordinal thresholds correctly", { dat <- data.frame( y = c(1:5, 1:4, 4), gr = rep(c("a", "b"), each = 5), th = rep(5:6, each = 5), x = rnorm(10) ) # thresholds without a grouping factor sdata <- make_standata(y ~ x, dat, cumulative()) expect_equal(sdata$nthres, 4) sdata <- make_standata(y | thres(5) ~ x, dat, cumulative()) expect_equal(sdata$nthres, 5) expect_error( make_standata(y | thres(th) ~ x, dat, cumulative()), "Number of thresholds needs to be a single value" ) # thresholds with a grouping factor sdata <- make_standata(y | thres(th, gr) ~ x, dat, cumulative()) expect_equal(sdata$nthres, as.array(c(5, 6))) expect_equal(sdata$ngrthres, 2) expect_equal(unname(sdata$Jthres[1, ]), c(1, 5)) expect_equal(unname(sdata$Jthres[10, ]), c(6, 11)) sdata <- make_standata(y | thres(gr = gr) ~ x, dat, cumulative()) expect_equal(sdata$nthres, as.array(c(4, 3))) expect_equal(sdata$ngrthres, 2) sdata <- make_standata(y | thres(6, gr = gr) ~ x, dat, cumulative()) expect_equal(sdata$nthres, as.array(c(6, 6))) expect_equal(sdata$ngrthres, 2) }) test_that("information for threading is handled correctly", { dat <- data.frame(y = 1:10) sdata <- make_standata(y ~ 1, dat, threads = threading(2, grainsize = 3)) expect_equal(sdata$grainsize, 3) }) test_that("variables in data2 can be used in population-level effects", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10), x3 = rnorm(10)) foo <- function(..., idx = NULL) { out <- cbind(...) if (!is.null(idx)) { out <- out[, idx, drop = FALSE] } out } sdata <- make_standata(y ~ foo(x1, x2, x3, idx = id), data = dat, data2 = list(id = c(3, 1))) target <- c("Intercept", "foox1x2x3idxEQidx3", "foox1x2x3idxEQidx1") expect_equal(colnames(sdata$X), target) expect_equivalent(sdata$X[, 2], dat$x3) expect_equivalent(sdata$X[, 3], dat$x1) }) test_that("NAs are allowed in unused interval censoring variables", { dat <- data.frame(y = rnorm(10), ce = c(1, rep(2, 9))) dat$y2 <- dat$y + 2 dat$y2[1] <- NA sdata <- make_standata(y | cens(ce, y2 = y2) ~ 1, data = dat) expect_equal(sdata$N, 10L) expect_equal(sdata$rcens[1], 0) dat$ce[1] <- 2 expect_error( make_standata(y | cens(ce, y2 = y2) ~ 1, data = dat), "'y2' should not be NA for interval censored observations" ) }) test_that("drop_unused_factor levels works correctly", { dat <- data.frame(y = rnorm(10), x = factor(c("a", "b"), levels = c("a", "b", "c"))) # should drop level "c" sdata <- make_standata(y ~ x, data = dat) expect_equal(colnames(sdata$X), c("Intercept", "xb")) # should not drop level "c" sdata <- make_standata(y ~ x, data = dat, drop_unused_levels = FALSE) expect_equal(colnames(sdata$X), c("Intercept", "xb", "xc")) }) brms/tests/testthat/tests.distributions.R0000644000176200001440000005427214403600113020407 0ustar liggesuserscontext("Tests for distribution functions") test_that("student distribution works correctly", { expect_equal(integrate(dstudent_t, -100, 100, df = 15, mu = 10, sigma = 5)$value, 1) expect_equal(dstudent_t(1, df = 10, mu = 0, sigma = 5), dt(1/5, df = 10)/5) expect_equal(pstudent_t(2, df = 20, mu = 2, sigma = 0.4), pt(0, df = 20)) expect_equal(qstudent_t(0.7, df = 5, mu = 2, sigma = 3), 2 + 3*qt(0.7, df = 5)) expect_equal(length(rstudent_t(10, df = 10, mu = rnorm(10), sigma = 1:10)), 10) }) test_that("multivariate normal and student distributions work correctly", { mu <- rnorm(3) Sigma <- cov(matrix(rnorm(300), ncol = 3)) expect_equal(dmulti_normal(1:3, mu = mu, Sigma = Sigma), mnormt::dmnorm(1:3, mu, Sigma)) expect_equal(dmulti_student_t(1:3, mu = mu, Sigma = Sigma, df = 10, log = TRUE), mnormt::dmt(1:3, df = 10, mean = mu, S = Sigma, log = TRUE)) expect_equal(dim(rmulti_normal(7, mu = mu, Sigma = Sigma)), c(7, 3)) expect_equal(dim(rmulti_student_t(7, mu = mu, Sigma = Sigma, df = 10)), c(7, 3)) # test errors expect_error(dmulti_normal(1:3, mu = rnorm(2), Sigma = Sigma, check = TRUE), "Dimension of mu is incorrect") expect_error(dmulti_normal(1:3, mu = mu, Sigma = Sigma[1:2, 1:2], check = TRUE), "Dimension of Sigma is incorrect") expect_error(dmulti_normal(1:3, mu = mu, Sigma = Sigma[1:3, 3:1], check = TRUE), "Sigma must be a symmetric matrix") expect_error(rmulti_normal(1.5, mu = mu, Sigma = Sigma, check = TRUE), "n must be a positive integer") expect_error(rmulti_normal(10, mu = mu, Sigma = Sigma[1:3, 3:1], check = TRUE), "Sigma must be a symmetric matrix") expect_error(dmulti_student_t(rnorm(3), mu = mu, Sigma = Sigma, df = -1, check = TRUE), "df must be greater than 0") expect_error(dmulti_student_t(rnorm(3), mu = mu, Sigma = Sigma[1:3, 3:1], df = 30, check = TRUE), "Sigma must be a symmetric matrix") expect_error(rmulti_student_t(10, mu = mu, Sigma = Sigma, df = -1, check = TRUE), "df must be greater than 0") }) test_that("von_mises distribution functions run without errors", { n <- 10 res <- dvon_mises(runif(n, -pi, pi), mu = 1, kappa = 1:n) expect_true(length(res) == n) res <- pvon_mises(runif(n, -pi, pi), mu = rnorm(n), kappa = 0:(n-1)) expect_true(length(res) == n) res <- rvon_mises(n, mu = rnorm(n), kappa = 0:(n-1)) expect_true(length(res) == n) }) test_that("skew_normal distribution functions run without errors", { n <- 10 x <- rnorm(n, 10, 3) res <- dskew_normal(x, mu = 1, sigma = 2, alpha = 1) expect_true(length(res) == n) res <- pskew_normal(x, mu = rnorm(n), sigma = 1:n, alpha = 3, log.p = TRUE) expect_true(length(res) == n) p <- log(runif(n, 0, 1)) res <- qskew_normal(p, mu = rnorm(n), sigma = 1:n, alpha = 3, log.p = TRUE) expect_true(length(res) == n) res <- rskew_normal(n, mu = rnorm(n), sigma = 10, alpha = -4:5) expect_true(length(res) == n) }) test_that("exgaussian distribution functions run without errors", { n <- 10 x <- rnorm(n, 10, 3) res <- dexgaussian(x, mu = 1, sigma = 2, beta = 1) expect_true(length(res) == n) res <- pexgaussian(x, mu = rnorm(n), sigma = 1:n, beta = 3, log.p = TRUE) expect_true(length(res) == n) res <- rexgaussian(n, mu = rnorm(n), sigma = 10, beta = 1:10) expect_true(length(res) == n) }) test_that("frechet distribution functions run without errors", { n <- 10 x <- 21:30 res <- dfrechet(x, loc = 1, scale = 2, shape = 1, log = TRUE) expect_true(length(res) == n) loc <- 1:10 res <- pfrechet(x, loc = loc, scale = 1:n, shape = 3) expect_true(length(res) == n) q <- qfrechet(res, loc = loc, scale = 1:n, shape = 3) expect_equal(x, q) res <- rfrechet(n, loc = loc, scale = 10, shape = 1:10) expect_true(length(res) == n) }) test_that("inv_gaussian distribution functions run without errors", { n <- 10 x <- rgamma(n, 10, 3) res <- dinv_gaussian(x, mu = 1, shape = 1) expect_true(length(res) == n) res <- pinv_gaussian(x, mu = abs(rnorm(n)), shape = 3) expect_true(length(res) == n) res <- rinv_gaussian(n, mu = abs(rnorm(n)), shape = 1:10) expect_true(length(res) == n) }) test_that("beta_binomial distribution functions run without errors", { n <- 10 x <- rpois(n, lambda = 1) res <- dbeta_binomial(x, c(2, 10), mu = 0.4, phi = 1) expect_true(length(res) == n) res <- pbeta_binomial(x, c(2, 10), mu = 0.4, phi = 1) expect_true(length(res) == n) res <- rbeta_binomial(n, c(2, 10), mu = 0.4, phi = 1) expect_true(length(res) == n) }) test_that("gen_extreme_value distribution functions run without errors", { n <- 10 x <- rgamma(n, 10, 3) res <- dgen_extreme_value(x, mu = 1, sigma = 2, xi = 1) expect_true(length(res) == n) res <- pgen_extreme_value(x, mu = rnorm(n), sigma = 1:n, xi = 3) expect_true(length(res) == n) res <- rgen_extreme_value(n, mu = rnorm(n), sigma = 10, xi = 1:10) expect_true(length(res) == n) }) test_that("asym_laplace distribution functions run without errors", { n <- 10 x <- rnorm(n, 10, 3) res <- dasym_laplace(x, mu = 1, sigma = 2, quantile = 0.5) expect_true(length(res) == n) res <- pasym_laplace(x, mu = rnorm(n), sigma = 1:n, quantile = 0.3) expect_true(length(res) == n) res <- rasym_laplace(n, mu = rnorm(n), sigma = 10, quantile = runif(n, 0, 1)) expect_true(length(res) == n) }) test_that("zero-inflated distribution functions run without errors", { n <- 10 x <- rpois(n, lambda = 1) res <- dzero_inflated_poisson(x, lambda = 1, zi = 0.1) expect_true(length(res) == n) res <- pzero_inflated_poisson(x, lambda = 1, zi = 0.1) expect_true(length(res) == n) res <- dzero_inflated_negbinomial(x, mu = 2, shape = 5, zi = 0.1) expect_true(length(res) == n) res <- pzero_inflated_negbinomial(x, mu = 2, shape = 5, zi = 0.1) expect_true(length(res) == n) res <- dzero_inflated_binomial(x, size = c(2, 10), prob = 0.4, zi = 0.1) expect_true(length(res) == n) res <- pzero_inflated_binomial(x, size = c(2, 10), prob = 0.4, zi = 0.1) expect_true(length(res) == n) res <- dzero_inflated_beta_binomial(x, c(2, 10), mu = 0.4, phi = 1, zi = 0.1) expect_true(length(res) == n) res <- pzero_inflated_beta_binomial(x, c(2, 10), mu = 0.4, phi = 1, zi = 0.1) expect_true(length(res) == n) x <- c(rbeta(n - 2, shape1 = 2, shape2 = 3), 0, 0) res <- dzero_inflated_beta(x, shape1 = 2, shape2 = 3, zi = 0.1) expect_true(length(res) == n) res <- pzero_inflated_beta(x, shape1 = 2, shape2 = 3, zi = 0.1) expect_true(length(res) == n) }) test_that("hurdle distribution functions run without errors", { n <- 10 x <- rpois(n, lambda = 1) res <- dhurdle_poisson(x, lambda = 1, hu = 0.1) expect_true(length(res) == n) res <- phurdle_poisson(x, lambda = 1, hu = 0.1) expect_true(length(res) == n) res <- dhurdle_negbinomial(x, mu = 2, shape = 5, hu = 0.1) expect_true(length(res) == n) res <- phurdle_negbinomial(x, mu = 2, shape = 5, hu = 0.1) expect_true(length(res) == n) res <- dhurdle_gamma(x, shape = 1, scale = 3, hu = 0.1) expect_true(length(res) == n) res <- phurdle_gamma(x, shape = 1, scale = 3, hu = 0.1) expect_true(length(res) == n) res <- dhurdle_lognormal(x, mu = 2, sigma = 5, hu = 0.1) expect_true(length(res) == n) res <- phurdle_lognormal(x, mu = 2, sigma = 5, hu = 0.1) expect_true(length(res) == n) }) test_that("wiener distribution functions run without errors", { set.seed(1234) n <- 10 x <- seq(0.1, 1, 0.1) alpha <- rexp(n) tau <- 0.05 beta <- 0.5 delta <- rnorm(n) resp <- sample(c(0, 1), n, TRUE) d1 <- dwiener(x, alpha, tau, beta, delta, resp, backend = "Rwiener") d2 <- dwiener(x, alpha, tau, beta, delta, resp, backend = "rtdists") expect_equal(d1, d2) r1 <- rwiener(n, alpha, tau, beta, delta, backend = "Rwiener") r2 <- rwiener(n, alpha, tau, beta, delta, backend = "rtdists") expect_equal(names(r1), names(r2)) expect_equal(dim(r1), dim(r2)) }) test_that("d() works correctly", { source(testthat::test_path(file.path("helpers", "inv_link_ordinal_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike_oneobs.R"))) for (ndraws in ndraws_vec) { for (ncat in ncat_vec) { thres_test <- matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws) # Emulate no category-specific effects (i.e., only a single vector of # linear predictors) as well as category-specific effects (i.e., a matrix # of linear predictors): eta_test_list <- list( rnorm(ndraws), matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws) ) for (eta_test in eta_test_list) { thres_eta <- if (is.matrix(eta_test)) { stopifnot(identical(dim(eta_test), dim(thres_test))) thres_test - eta_test } else { # Just to try something different: sweep(thres_test, 1, as.array(eta_test)) } eta_thres <- if (is.matrix(eta_test)) { stopifnot(identical(dim(eta_test), dim(thres_test))) eta_test - thres_test } else { # Just to try something different: sweep(-thres_test, 1, as.array(eta_test), FUN = "+") } for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): d_cumul <- dcumulative(seq_len(ncat), eta_test, thres_test, link = link) d_cumul_ch <- inv_link_cumulative_ch(thres_eta, link = link) expect_equivalent(d_cumul, d_cumul_ch) expect_equal(dim(d_cumul), c(ndraws, ncat)) # sratio(): d_sratio <- dsratio(seq_len(ncat), eta_test, thres_test, link = link) d_sratio_ch <- inv_link_sratio_ch(thres_eta, link = link) expect_equivalent(d_sratio, d_sratio_ch) expect_equal(dim(d_sratio), c(ndraws, ncat)) # cratio(): d_cratio <- dcratio(seq_len(ncat), eta_test, thres_test, link = link) d_cratio_ch <- inv_link_cratio_ch(eta_thres, link = link) expect_equivalent(d_cratio, d_cratio_ch) expect_equal(dim(d_cratio), c(ndraws, ncat)) # acat(): d_acat <- dacat(seq_len(ncat), eta_test, thres_test, link = link) d_acat_ch <- inv_link_acat_ch(eta_thres, link = link) expect_equivalent(d_acat, d_acat_ch) expect_equal(dim(d_acat), c(ndraws, ncat)) } } } } }) test_that("inv_link_() works correctly for arrays", { source(testthat::test_path(file.path("helpers", "inv_link_ordinal_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) nx_test <- -x_test for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): il_cumul <- inv_link_cumulative(x_test, link = link) il_cumul_ch <- inv_link_cumulative_ch(x_test, link = link) expect_equivalent(il_cumul, il_cumul_ch) expect_equal(dim(il_cumul), c(ndraws, nobsv, ncat)) # sratio(): il_sratio <- inv_link_sratio(x_test, link = link) il_sratio_ch <- inv_link_sratio_ch(x_test, link = link) expect_equivalent(il_sratio, il_sratio_ch) expect_equal(dim(il_sratio), c(ndraws, nobsv, ncat)) # cratio(): il_cratio <- inv_link_cratio(nx_test, link = link) il_cratio_ch <- inv_link_cratio_ch(nx_test, link = link) expect_equivalent(il_cratio, il_cratio_ch) expect_equal(dim(il_cratio), c(ndraws, nobsv, ncat)) # acat(): il_acat <- inv_link_acat(nx_test, link = link) il_acat_ch <- inv_link_acat_ch(nx_test, link = link) expect_equivalent(il_acat, il_acat_ch) expect_equal(dim(il_acat), c(ndraws, nobsv, ncat)) } } } } }) test_that("link_() works correctly for arrays", { source(testthat::test_path(file.path("helpers", "link_ordinal_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): l_cumul <- link_cumulative(x_test, link = link) l_cumul_ch <- link_cumulative_ch(x_test, link = link) expect_equivalent(l_cumul, l_cumul_ch) expect_equal(dim(l_cumul), c(ndraws, nobsv, ncat - 1)) # sratio(): l_sratio <- link_sratio(x_test, link = link) l_sratio_ch <- link_sratio_ch(x_test, link = link) expect_equivalent(l_sratio, l_sratio_ch) expect_equal(dim(l_sratio), c(ndraws, nobsv, ncat - 1)) # cratio(): l_cratio <- link_cratio(x_test, link = link) l_cratio_ch <- link_cratio_ch(x_test, link = link) expect_equivalent(l_cratio, l_cratio_ch) expect_equal(dim(l_cratio), c(ndraws, nobsv, ncat - 1)) # acat(): l_acat <- link_acat(x_test, link = link) l_acat_ch <- link_acat_ch(x_test, link = link) expect_equivalent(l_acat, l_acat_ch) expect_equal(dim(l_acat), c(ndraws, nobsv, ncat - 1)) } } } } }) test_that("inv_link_() inverts link_()", { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): l_cumul <- link_cumulative(x_test, link = link) il_cumul <- inv_link_cumulative(l_cumul, link = link) expect_equivalent(il_cumul, x_test) # sratio(): l_sratio <- link_sratio(x_test, link = link) il_sratio <- inv_link_sratio(l_sratio, link = link) expect_equivalent(il_sratio, x_test) # cratio(): l_cratio <- link_cratio(x_test, link = link) il_cratio <- inv_link_cratio(l_cratio, link = link) expect_equivalent(il_cratio, x_test) # acat(): l_acat <- link_acat(x_test, link = link) il_acat <- inv_link_acat(l_acat, link = link) expect_equivalent(il_acat, x_test) } } } } }) test_that("link_() inverts inv_link_()", { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) nx_test <- -x_test for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): il_cumul <- inv_link_cumulative(x_test, link = link) l_cumul <- link_cumulative(il_cumul, link = link) expect_equivalent(l_cumul, x_test) # sratio(): il_sratio <- inv_link_sratio(x_test, link = link) l_sratio <- link_sratio(il_sratio, link = link) expect_equivalent(l_sratio, x_test) # cratio(): il_cratio <- inv_link_cratio(x_test, link = link) l_cratio <- link_cratio(il_cratio, link = link) expect_equivalent(l_cratio, x_test) # acat(): il_acat <- inv_link_acat(x_test, link = link) l_acat <- link_acat(il_acat, link = link) expect_equivalent(l_acat, x_test) } } } } }) test_that(paste( "dsratio() and dcratio() give the same results for symmetric distribution", "functions" ), { source(testthat::test_path(file.path("helpers", "simopts_catlike_oneobs.R"))) for (ndraws in ndraws_vec) { for (ncat in ncat_vec) { thres_test <- matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws) # Emulate no category-specific effects (i.e., only a single vector of # linear predictors) as well as category-specific effects (i.e., a matrix # of linear predictors): eta_test_list <- list( rnorm(ndraws), matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws) ) for (eta_test in eta_test_list) { for (link in c("logit", "probit", "cauchit", "cloglog")) { d_sratio <- dsratio(seq_len(ncat), eta_test, thres_test, link = link) d_cratio <- dcratio(seq_len(ncat), eta_test, thres_test, link = link) if (link != "cloglog") { expect_equal(d_sratio, d_cratio) } else { expect_false(isTRUE(all.equal(d_sratio, d_cratio))) } } } } } }) test_that(paste( "inv_link_sratio() and inv_link_cratio() applied to arrays give the same", "results for symmetric distribution functions (when respecting the sign", "appropriately)." ), { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) nx_test <- -x_test for (link in c("logit", "probit", "cauchit", "cloglog")) { il_sratio <- inv_link_sratio(x_test, link = link) il_cratio <- inv_link_cratio(nx_test, link = link) if (link != "cloglog") { expect_equal(il_sratio, il_cratio) } else { expect_false(isTRUE(all.equal(il_sratio, il_cratio))) } } } } } }) test_that(paste( "link_sratio() and link_cratio() applied to arrays give the same", "results for symmetric distribution functions (when respecting the sign", "appropriately)." ), { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) for (link in c("logit", "probit", "cauchit", "cloglog")) { l_sratio <- link_sratio(x_test, link = link) l_cratio <- link_cratio(x_test, link = link) if (link != "cloglog") { expect_equal(l_sratio, -l_cratio) } else { expect_false(isTRUE(all.equal(l_sratio, -l_cratio))) } } } } } }) test_that("dcategorical() works correctly", { source(testthat::test_path(file.path("helpers", "inv_link_categorical_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike_oneobs.R"))) for (ndraws in ndraws_vec) { for (ncat in ncat_vec) { eta_test_list <- list(cbind( 0, matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws) )) if (ndraws == 1) { eta_test_list <- c(eta_test_list, list(c(0, rnorm(ncat - 1)))) } for (eta_test in eta_test_list) { d_categorical <- dcategorical(seq_len(ncat), eta_test) d_categorical_ch <- inv_link_categorical_ch(eta_test, refcat_ins = FALSE) expect_equivalent(d_categorical, d_categorical_ch) expect_equal(dim(d_categorical), c(ndraws, ncat)) } } } }) test_that("inv_link_categorical() works correctly for arrays", { source(testthat::test_path(file.path("helpers", "inv_link_categorical_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) il_categorical <- inv_link_categorical(x_test) il_categorical_ch <- inv_link_categorical_ch(x_test) expect_equivalent(il_categorical, il_categorical_ch) expect_equal(dim(il_categorical), c(ndraws, nobsv, ncat)) } } } }) test_that("link_categorical() works correctly for arrays", { source(testthat::test_path(file.path("helpers", "link_categorical_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) l_categorical <- link_categorical(x_test) l_categorical_ch <- link_categorical_ch(x_test) expect_equivalent(l_categorical, l_categorical_ch) expect_equal(dim(l_categorical), c(ndraws, nobsv, ncat - 1)) } } } }) test_that("inv_link_categorical() inverts link_categorical()", { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) l_categorical <- link_categorical(x_test) il_categorical <- inv_link_categorical(l_categorical) expect_equivalent(il_categorical, x_test) } } } }) test_that("link_categorical() inverts inv_link_categorical()", { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) il_categorical <- inv_link_categorical(x_test) l_categorical <- link_categorical(il_categorical) expect_equivalent(l_categorical, x_test) } } } }) brms/tests/testthat/tests.posterior_epred.R0000644000176200001440000002071414453526235020725 0ustar liggesuserscontext("Tests for posterior_epred helper functions") # to reduce testing time on CRAN skip_on_cran() test_that("posterior_epred helper functions run without errors", { # actually run posterior_epred.brmsfit that call the helper functions fit <- brms:::rename_pars(brms:::brmsfit_example1) add_dummy_draws <- brms:::add_dummy_draws fit <- add_dummy_draws(fit, "shape", dist = "exp") fit <- add_dummy_draws(fit, "alpha", dist = "norm") fit <- add_dummy_draws(fit, "hu", dist = "beta", shape1 = 1, shape2 = 1) fit <- add_dummy_draws(fit, "phi", dist = "beta", shape1 = 1, shape2 = 1) fit <- add_dummy_draws(fit, "zi", dist = "beta", shape1 = 1, shape2 = 1) fit <- add_dummy_draws(fit, "quantile", dist = "beta", shape1 = 2, shape2 = 1) fit <- add_dummy_draws(fit, "xi", dist = "unif", min = -1, max = 0.5) fit <- add_dummy_draws(fit, "ndt", dist = "exp") fit$formula$formula <- update(fit$formula$formula, .~. - arma(visit, patient)) prep <- brms:::prepare_predictions(fit) prep$dpars$mu <- brms:::get_dpar(prep, "mu") prep$dpars$sigma <- brms:::get_dpar(prep, "sigma") prep$dpars$nu <- brms:::get_dpar(prep, "nu") ndraws <- ndraws(fit) nobs <- nobs(fit) # test preparation of truncated models prep$data$lb <- 0 prep$data$ub <- 200 mu <- brms:::posterior_epred_trunc(prep) expect_equal(dim(mu), c(ndraws, nobs)) # pseudo log-normal model fit$family <- fit$formula$family <- lognormal() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo shifted log-normal model fit$family <- fit$formula$family <- shifted_lognormal() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo skew-normal model fit$family <- fit$formula$family <- skew_normal() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo asym_laplace model fit$family <- fit$formula$family <- asym_laplace() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo zero_inflated_asym_laplace model fit$family <- fit$formula$family <- brmsfamily("zero_inflated_asym_laplace") expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo gen_extreme_value model fit$family <- fit$formula$family <- gen_extreme_value() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo weibull model fit$formula$pforms <- NULL fit$family <- fit$formula$family <- weibull() expect_equal(dim(SW(posterior_epred(fit, summary = FALSE))), c(ndraws, nobs)) # pseudo binomial model old_formula <- fit$formula$formula fit$formula$formula <- update(fit$formula$formula, . | trials(100) ~ .) fit$autocor <- brms:::cor_empty() fit$family <- fit$formula$family <- binomial() expect_equal(dim(SW(posterior_epred(fit, summary = FALSE))), c(ndraws, nobs)) # pseudo beta-binomial model fit$family <- fit$formula$family <- beta_binomial() expect_equal(dim(SW(posterior_epred(fit, summary = FALSE))), c(ndraws, nobs)) # pseudo zero inflated binomial model fit$family <- fit$formula$family <- zero_inflated_binomial() expect_equal(dim(SW(posterior_epred(fit, summary = FALSE))), c(ndraws, nobs)) # pseudo zero inflated beta binomial model fit$family <- fit$formula$family <- zero_inflated_beta_binomial() expect_equal(dim(SW(posterior_epred(fit, summary = FALSE))), c(ndraws, nobs)) # pseudo hurdle poisson model fit$formula$formula <- old_formula fit$family <- fit$formula$family <- hurdle_poisson() fit$formula <- bf(count ~ Trt*Age + mo(Exp) + offset(Age) + (1+Trt|visit), family = family(fit)) expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo zero-inflated poisson model fit$family <- fit$formula$family <- zero_inflated_poisson() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo custom model posterior_epred_test <- function(prep) { prep$dpars$mu } fit$family <- fit$formula$family <- custom_family( "test", dpars = "mu", links = c("logit"), type = "int", vars = "trials[n]" ) expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # truncated continuous models prep$dpars$shape <- c(as.matrix(fit, variable = "shape")) mu <- brms:::posterior_epred_trunc_gaussian(prep, lb = 0, ub = 10) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_student(prep, lb = -Inf, ub = 15) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_lognormal(prep, lb = 2, ub = 15) expect_equal(dim(mu), c(ndraws, nobs)) prep$dpars$mu <- exp(prep$dpars$mu) mu <- brms:::posterior_epred_trunc_gamma(prep, lb = 1, ub = 7) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_exponential(prep, lb = 0, ub = Inf) expect_equal(dim(mu), c(ndraws, nobs)) mu <- SW(brms:::posterior_epred_trunc_weibull(prep, lb = -Inf, ub = Inf)) expect_equal(dim(mu), c(ndraws, nobs)) # truncated discrete models data <- list(Y = sample(100, 10), trials = 1:10, N = 10) lb <- matrix(0, nrow = ndraws, ncol = nobs) ub <- matrix(100, nrow = ndraws, ncol = nobs) mu <- brms:::posterior_epred_trunc_poisson(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_negbinomial(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_negbinomial2(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_geometric(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) prep$data$trials <- 120 lb <- matrix(-Inf, nrow = ndraws, ncol = nobs) prep$dpars$mu <- brms:::inv_link(prep$dpars$mu, "logit") mu <- brms:::posterior_epred_trunc_binomial(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) }) test_that("posterior_epred_lagsar runs without errors", { prep <- list( dpars = list(mu = matrix(rnorm(30), nrow = 3)), ac = list( lagsar = matrix(c(0.3, 0.5, 0.7)), Msar = matrix(1:100, 10, 10) ), ndraws = 3, nobs = 10, family = gaussian() ) mu_new <- brms:::posterior_epred_lagsar(prep) expect_equal(dim(mu_new), dim(prep$dpars$mu)) expect_true(!identical(mu_new, prep$dpars$mu)) }) test_that("posterior_epred for advanced count data distributions runs without errors", { ns <- 15 nobs <- 5 ncat <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = array(rbeta(ns*nobs, 2, 2), dim = c(ns, nobs)), shape = array(rexp(ns*nobs, 3), dim = c(ns, nobs)) ) prep$family <- brmsfamily("discrete_weibull") pred <- suppressWarnings(brms:::posterior_epred_discrete_weibull(prep)) expect_equal(dim(pred), c(ns, nobs)) prep$family <- brmsfamily("com_poisson") pred <- suppressWarnings(brms:::posterior_epred_com_poisson(prep)) expect_equal(dim(pred), c(ns, nobs)) }) test_that("posterior_epred for multinomial and dirichlet models runs without errors", { ns <- 15 nobs <- 8 ncat <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu1 = array(rnorm(ns*nobs), dim = c(ns, nobs)), mu2 = array(rnorm(ns*nobs), dim = c(ns, nobs)) ) prep$data <- list(ncat = ncat, trials = sample(1:20, nobs)) prep$refcat <- 1 prep$family <- multinomial() pred <- brms:::posterior_epred_multinomial(prep = prep) expect_equal(dim(pred), c(ns, nobs, ncat)) prep$family <- dirichlet() pred <- brms:::posterior_epred_dirichlet(prep = prep) expect_equal(dim(pred), c(ns, nobs, ncat)) prep$family <- brmsfamily("dirichlet2") prep$dpars$mu1 <- array(rexp(ns*nobs, 1), dim = c(ns, nobs)) prep$dpars$mu2 <- array(rexp(ns*nobs, 1), dim = c(ns, nobs)) prep$dpars$mu3 <- array(rexp(ns*nobs, 1), dim = c(ns, nobs)) pred <- brms:::posterior_epred_dirichlet2(prep = prep) expect_equal(dim(pred), c(ns, nobs, ncat)) }) test_that("posterior_epred() can be reproduced by using d()", { fit4 <- rename_pars(brms:::brmsfit_example4) epred4 <- posterior_epred(fit4) eta4 <- posterior_linpred(fit4) bprep4 <- prepare_predictions(fit4) thres4 <- bprep4$thres$thres disc4 <- bprep4$dpars$disc$fe$b %*% t(bprep4$dpars$disc$fe$X) disc4 <- exp(disc4) epred4_ch <- aperm(sapply(seq_len(dim(eta4)[2]), function(i) { dsratio(seq_len(ncol(thres4) + 1), eta4[, i, ], thres4, disc4[, i]) }, simplify = "array"), perm = c(1, 3, 2)) expect_equivalent(epred4, epred4_ch) }) brms/tests/testthat/tests.posterior_predict.R0000644000176200001440000003344714361545260021264 0ustar liggesuserscontext("Tests for posterior_predict helper functions") test_that("posterior_predict for location shift models runs without errors", { ns <- 30 nobs <- 10 prep <- structure(list(ndraws = ns), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns * nobs), ncol = nobs), sigma = rchisq(ns, 3), nu = rgamma(ns, 4) ) i <- sample(nobs, 1) pred <- brms:::posterior_predict_gaussian(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_student(i, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for various skewed models runs without errors", { ns <- 50 nobs <- 2 prep <- structure(list(ndraws = ns), class = "brmsprep") prep$dpars <- list( sigma = rchisq(ns, 3), beta = rchisq(ns, 3), mu = matrix(rnorm(ns * nobs), ncol = nobs), alpha = rnorm(ns), ndt = 1 ) pred <- brms:::posterior_predict_lognormal(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_shifted_lognormal(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_exgaussian(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_skew_normal(1, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for aysm_laplace models runs without errors", { ns <- 50 prep <- structure(list(ndraws = ns), class = "brmsprep") prep$dpars <- list( sigma = rchisq(ns, 3), quantile = rbeta(ns, 2, 1), mu = matrix(rnorm(ns*2), ncol = 2), zi = rbeta(ns, 10, 10) ) pred <- brms:::posterior_predict_asym_laplace(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_inflated_asym_laplace(1, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for multivariate linear models runs without errors", { ns <- 10 nvars <- 3 ncols <- 4 nobs <- nvars * ncols Sigma = array(cov(matrix(rnorm(300), ncol = 3)), dim = c(3, 3, 10)) prep <- structure(list(ndraws = ns), class = "mvbrmsprep") prep$mvpars <- list( Mu = array(rnorm(ns*nobs*nvars), dim = c(ns, nobs, nvars)), Sigma = aperm(Sigma, c(3, 1, 2)) ) prep$dpars <- list(nu = rgamma(ns, 5)) prep$data <- list(N = nobs, N_trait = ncols) pred <- brms:::posterior_predict_gaussian_mv(1, prep = prep) expect_equal(dim(pred), c(ns, nvars)) pred <- brms:::posterior_predict_student_mv(2, prep = prep) expect_equal(dim(pred), c(ns, nvars)) }) test_that("posterior_predict for ARMA covariance models runs without errors", { ns <- 20 nobs <- 15 prep <- structure(list(ndraws = ns), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns*nobs), ncol = nobs), sigma = rchisq(ns, 3), nu = rgamma(ns, 5) ) prep$ac <- list( ar = matrix(rbeta(ns, 0.5, 0.5), ncol = 1), ma = matrix(rnorm(ns, 0.2, 1), ncol = 1), begin_tg = c(1, 5, 12), end_tg = c(4, 11, 15) ) prep$data <- list(se = rgamma(ns, 10)) prep$family$fun <- "gaussian_time" pred <- brms:::posterior_predict_gaussian_time(1, prep = prep) expect_equal(length(pred), ns * 4) prep$family$fun <- "student_time" pred <- brms:::posterior_predict_student_time(2, prep = prep) expect_equal(length(pred), ns * 7) }) test_that("loglik for SAR models runs without errors", { ns = 3 prep <- structure(list(ndraws = ns, nobs = 10), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(30), nrow = ns), nu = rep(2, ns), sigma = rep(10, ns) ) prep$ac <- list(lagsar = matrix(c(0.3, 0.5, 0.7)), Msar = diag(10)) pred <- brms:::posterior_predict_gaussian_lagsar(1, prep = prep) expect_equal(dim(pred), c(3, 10)) pred <- brms:::posterior_predict_student_lagsar(1, prep = prep) expect_equal(dim(pred), c(3, 10)) prep$ac$errorsar <- prep$ac$lagsar prep$ac$lagsar <- NULL pred <- brms:::posterior_predict_gaussian_errorsar(1, prep = prep) expect_equal(dim(pred), c(3, 10)) pred <- brms:::posterior_predict_student_errorsar(1, prep = prep) expect_equal(dim(pred), c(3, 10)) }) test_that("posterior_predict for FCOR models runs without errors", { ns <- 3 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(nobs * ns), nrow = ns), sigma = rep(1, ns), nu = rep(2, ns) ) prep$ac <- list(Mfcor = diag(nobs)) pred <- brms:::posterior_predict_gaussian_fcor(1, prep = prep) expect_equal(dim(pred), c(ns, nobs)) pred <- brms:::posterior_predict_student_fcor(1, prep = prep) expect_equal(dim(pred), c(ns, nobs)) }) test_that("posterior_predict for count and survival models runs without errors", { ns <- 25 nobs <- 10 trials <- sample(10:30, nobs, replace = TRUE) prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( eta = matrix(rnorm(ns * nobs), ncol = nobs), shape = rgamma(ns, 4), xi = 0, phi = rgamma(ns, 1) ) prep$dpars$nu <- prep$dpars$sigma <- prep$dpars$shape + 1 prep$data <- list(trials = trials) i <- sample(nobs, 1) prep$dpars$mu <- brms:::inv_cloglog(prep$dpars$eta) pred <- brms:::posterior_predict_binomial(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_beta_binomial(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_discrete_weibull(i, prep = prep) expect_equal(length(pred), ns) prep$dpars$mu <- exp(prep$dpars$eta) pred <- brms:::posterior_predict_poisson(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_negbinomial(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_negbinomial2(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_geometric(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_com_poisson(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_exponential(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_gamma(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_frechet(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_inverse.gaussian(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_gen_extreme_value(i, prep = prep) expect_equal(length(pred), ns) prep$family$link <- "log" pred <- brms:::posterior_predict_weibull(i, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for bernoulli and beta models works correctly", { ns <- 17 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = brms:::inv_logit(matrix(rnorm(ns * nobs * 2), ncol = 2 * nobs)), phi = rgamma(ns, 4) ) i <- sample(1:nobs, 1) pred <- brms:::posterior_predict_bernoulli(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_beta(i, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for circular models runs without errors", { ns <- 15 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = 2 * atan(matrix(rnorm(ns * nobs * 2), ncol = nobs * 2)), kappa = rgamma(ns, 4) ) i <- sample(seq_len(nobs), 1) pred <- brms:::posterior_predict_von_mises(i, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for zero-inflated and hurdle models runs without erros", { ns <- 50 nobs <- 8 trials <- sample(10:30, nobs, replace = TRUE) prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( eta = matrix(rnorm(ns * nobs * 2), ncol = nobs * 2), shape = rgamma(ns, 4), phi = rgamma(ns, 1), zi = rbeta(ns, 1, 1), coi = rbeta(ns, 5, 7) ) prep$dpars$hu <- prep$dpars$zoi <- prep$dpars$zi prep$data <- list(trials = trials) prep$dpars$mu <- exp(prep$dpars$eta) pred <- brms:::posterior_predict_hurdle_poisson(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_hurdle_negbinomial(2, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_hurdle_gamma(5, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_inflated_poisson(3, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_inflated_negbinomial(6, prep = prep) expect_equal(length(pred), ns) prep$dpars$mu <- brms:::inv_logit(prep$dpars$eta) pred <- brms:::posterior_predict_zero_inflated_binomial(4, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_inflated_beta_binomial(6, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_inflated_beta(8, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_one_inflated_beta(7, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for ordinal models runs without errors", { ns <- 50 nobs <- 8 nthres <- 3 ncat <- nthres + 1 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = array(rnorm(ns * nobs), dim = c(ns, nobs)), disc = rexp(ns), hu = rbeta(ns, 1, 1) ) prep$thres$thres <- array(0, dim = c(ns, nthres)) prep$data <- list(Y = rep(1:ncat, 2), ncat = ncat) prep$family$link <- "logit" prep$family$family <- "cumulative" pred <- sapply(1:nobs, brms:::posterior_predict_cumulative, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$family$family <- "sratio" pred <- sapply(1:nobs, brms:::posterior_predict_sratio, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$family$family <- "cratio" pred <- sapply(1:nobs, brms:::posterior_predict_cratio, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$family$family <- "acat" pred <- sapply(1:nobs, brms:::posterior_predict_acat, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$family$link <- "probit" pred <- sapply(1:nobs, brms:::posterior_predict_acat, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$family$family <- "hurdle_cumulative" pred <- sapply(1:nobs, brms:::posterior_predict_hurdle_cumulative, prep = prep) expect_equal(dim(pred), c(ns, nobs)) }) test_that("posterior_predict for categorical and related models runs without erros", { set.seed(1234) ns <- 50 nobs <- 8 ncat <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu1 = array(rnorm(ns*nobs, 0, 0.1), dim = c(ns, nobs)), mu2 = array(rnorm(ns*nobs, 0, 0.1), dim = c(ns, nobs)) ) prep$data <- list(Y = rep(1:ncat, 2), ncat = ncat) prep$family <- categorical() prep$refcat <- 1 pred <- sapply(1:nobs, brms:::posterior_predict_categorical, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$data$trials <- sample(1:20, nobs) prep$family <- multinomial() pred <- brms:::posterior_predict_multinomial(i = sample(1:nobs, 1), prep = prep) expect_equal(dim(pred), c(ns, ncat)) prep$dpars$phi <- rexp(ns, 1) prep$family <- dirichlet() pred <- brms:::posterior_predict_dirichlet(i = sample(1:nobs, 1), prep = prep) expect_equal(dim(pred), c(ns, ncat)) expect_equal(rowSums(pred), rep(1, nrow(pred))) prep$family <- brmsfamily("dirichlet2") prep$dpars$mu1 <- rexp(ns, 10) prep$dpars$mu2 <- rexp(ns, 10) prep$dpars$mu3 <- rexp(ns, 10) pred <- brms:::posterior_predict_dirichlet2(i = sample(1:nobs, 1), prep = prep) expect_equal(dim(pred), c(ns, ncat)) expect_equal(rowSums(pred), rep(1, nrow(pred))) prep$family <- brmsfamily("logistic_normal") prep$dpars <- list( mu2 = rnorm(ns), mu3 = rnorm(ns), sigma2 = rexp(ns, 10), sigma3 = rexp(ns, 10) ) prep$lncor <- rbeta(ns, 2, 1) pred <- brms:::posterior_predict_logistic_normal(i = sample(1:nobs, 1), prep = prep) expect_equal(dim(pred), c(ns, ncat)) expect_equal(rowSums(pred), rep(1, nrow(pred))) }) test_that("truncated posterior_predict run without errors", { ns <- 30 nobs <- 15 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns * nobs), ncol = nobs), sigma = rchisq(ns, 3) ) prep$refcat <- 1 prep$data <- list(lb = sample(-(4:7), nobs, TRUE)) pred <- sapply(1:nobs, brms:::posterior_predict_gaussian, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$dpars$mu <- exp(prep$dpars$mu) prep$data <- list(ub = sample(70:80, nobs, TRUE)) pred <- sapply(1:nobs, brms:::posterior_predict_poisson, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$data <- list(lb = rep(0, nobs), ub = sample(70:75, nobs, TRUE)) pred <- sapply(1:nobs, brms:::posterior_predict_poisson, prep = prep) expect_equal(dim(pred), c(ns, nobs)) }) test_that("posterior_predict for the wiener diffusion model runs without errors", { skip("skip as long as RWiener fails on R-devel for 3.6.0") ns <- 5 nobs <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns * nobs), ncol = nobs), bs = rchisq(ns, 3), ndt = rep(0.5, ns), bias = rbeta(ns, 1, 1) ) prep$data <- list(Y = abs(rnorm(ns)) + 0.5, dec = c(1, 0, 1)) i <- sample(1:nobs, 1) expect_equal(nrow(brms:::posterior_predict_wiener(i, prep)), ns) }) test_that("posterior_predict_custom runs without errors", { ns <- 15 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rbeta(ns * nobs * 2, 1, 1), ncol = nobs * 2) ) prep$data <- list(trials = rep(1, nobs)) prep$family <- custom_family( "beta_binomial2", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "trials[n]" ) posterior_predict_beta_binomial2 <- function(i, prep) { mu <- prep$dpars$mu[, i] rbinom(prep$ndraws, size = prep$data$trials[i], prob = mu) } expect_equal(length(brms:::posterior_predict_custom(sample(1:nobs, 1), prep)), ns) }) brms/tests/testthat/tests.brm.R0000644000176200001440000001274514213413565016300 0ustar liggesusers# calling context() avoids a strange bug in testthat 2.0.0 # cannot actually run brms models in tests as it takes way too long context("Tests for brms error messages") test_that("brm works fully with mock backend", { skip_on_cran() dat <- data.frame(y = rnorm(10), x = rnorm(10), g = rep(1:5, 2)) # Positive control - forced error gets thrown and propagated expect_error(brm(y ~ x + (1|g), dat, backend = "mock", stan_model_args = list(compile_error = "Test error")), "Test error") # Positive control - bad Stan code from stanvars gets an error expect_error(suppressMessages( brm(y ~ x + (1|g), dat, backend = "mock", stanvars = stanvar(scode = "invalid;", block = "model")) )) # Testing some models mock_fit <- brm(y ~ x + (1|g), dat, mock_fit = 1, backend = "mock", rename = FALSE) expect_equal(mock_fit$fit, 1) }) test_that("brm(file = xx) works fully with mock backend", { skip_on_cran() dat <- data.frame(y = rnorm(10), x = rnorm(10), g = rep(1:5, 2)) file <- tempfile(fileext = ".rds") mock_fit1 <- brm(y ~ x + (1|g), dat, mock_fit = "stored", backend = "mock", rename = FALSE, file = file) expect_true(file.exists(file)) mock_fit2 <- brm(y ~ x + (1|g), dat, mock_fit = "new", backend = "mock", rename = FALSE, file = file) expect_equal(mock_fit2$fit, "stored") # In default settings, even using different data/model should result in the # model being loaded from file changed_data <- dat[1:8, ] mock_fit2 <- brm(y ~ x + 0, changed_data, mock_fit = "new", backend = "mock", rename = FALSE, file = file) expect_equal(mock_fit2$fit, "stored") # Now test using file_refit = "on_change" which should be more clever # No change mock_fit2 <- brm(y ~ x + (1|g), dat, mock_fit = "new", backend = "mock", rename = FALSE, file = file) expect_equal(mock_fit2$fit, "stored") # Change data, but not code mock_fit2 <- brm(y ~ x + (1|g), changed_data, mock_fit = "new", backend = "mock", rename = FALSE, file = file, file_refit = "on_change") expect_equal(mock_fit2$fit, "new") # Change code but not data mock_fit2 <- brm(y ~ x + (1|g), dat, mock_fit = "new", backend = "mock", rename = FALSE, file = file, file_refit = "on_change", prior = prior(normal(0,2), class = sd)) expect_equal(mock_fit2$fit, "new") # Change both mock_fit2 <- brm(y ~ x + 0, changed_data, mock_fit = "new", backend = "mock", rename = FALSE, file = file, file_refit = "on_change") expect_equal(mock_fit2$fit, "new") }) test_that("brm produces expected errors", { dat <- data.frame(y = rnorm(10), x = rnorm(10), g = rep(1:5, 2)) # formula parsing expect_error(brm(~ x + (1|g), dat, file = "test"), "Response variable is missing") expect_error(brm(bf(y ~ a, nl = TRUE)), "No non-linear parameters specified") expect_error(brm(bf(y | se(sei) ~ x, sigma ~ x), dat), "Cannot predict or fix 'sigma' in this model") expect_error(brm(y | se(sei) ~ x, dat, family = weibull()), "Argument 'se' is not supported for family") expect_error(brm(y | se(sei) + se(sei2) ~ x, dat, family = gaussian()), "Each addition argument may only be defined once") expect_error(brm(y | abc(sei) ~ x, family = gaussian()), "The following addition terms are invalid:\n'abc(sei)'", fixed = TRUE) expect_error(brm(y | disp(sei) ~ x, dat, family = gaussian()), "The following addition terms are invalid:") expect_error(brm(bf(y ~ x, shape ~ x), family = gaussian()), "The parameter 'shape' is not a valid distributional") expect_error(brm(y ~ x + (1|abc|g/x), dat), "Can only combine group-level terms") expect_error(brm(y ~ x + (1|g) + (x|g), dat), "Duplicated group-level effects are not allowed") expect_error(brm(y~mo(g)*t2(x), dat), fixed = TRUE, "The term 'mo(g):t2(x)' is invalid") expect_error(brm(y~x*cs(g), dat), fixed = TRUE, "The term 'x:cs(g)' is invalid") expect_error(brm(y~me(x, 2 * g)*me(x, g), dat), "Variable 'x' is used in different calls to 'me'") expect_error(brm(y ~ 1 + set_rescor(TRUE), data = dat), "Function 'set_rescor' should not be part") # autocorrelation expect_error(brm(y ~ ar(x+y, g), dat), "Cannot coerce 'x \\+ y' to a single variable name") expect_error(brm(y ~ ar(gr = g1/g2), dat), "Illegal grouping term 'g1/g2'") expect_error(brm(y ~ ma(x), dat, poisson()), "Please set cov = TRUE") expect_error(brm(bf(y ~ 1) + arma(x), dat), "Autocorrelation terms can only be specified") # ordinal models expect_error(brm(rating ~ treat + (cs(period)|subject), data = inhaler, family = categorical()), "Category specific effects are not supported") # families and links expect_error(brm(y ~ x, dat, family = poisson("inverse")), "'inverse' is not a supported link for family 'poisson'") expect_error(brm(y ~ x, dat, family = c("weibull", "sqrt")), "'sqrt' is not a supported link for family 'weibull'") expect_error(brm(y ~ x, dat, family = c("categorical", "probit")), "'probit' is not a supported link for family 'categorical'") expect_error(brm(y ~ x, dat, family = "ordinal"), "ordinal is not a supported family") }) brms/tests/testthat/tests.brmsfit-methods.R0000644000176200001440000010401114424715563020622 0ustar liggesuserscontext("Tests for brmsfit methods") # to reduce testing time on CRAN substantially skip_on_cran() expect_range <- function(object, lower = -Inf, upper = Inf, ...) { testthat::expect_true(all(object >= lower & object <= upper), ...) } expect_ggplot <- function(object, ...) { testthat::expect_true(is(object, "ggplot"), ...) } SM <- suppressMessages SW <- suppressWarnings fit1 <- rename_pars(brms:::brmsfit_example1) fit2 <- rename_pars(brms:::brmsfit_example2) fit3 <- rename_pars(brms:::brmsfit_example3) fit4 <- rename_pars(brms:::brmsfit_example4) fit5 <- rename_pars(brms:::brmsfit_example5) fit6 <- rename_pars(brms:::brmsfit_example6) # some high level info about the data sets nobs <- 40 npatients <- 10 nsubjects <- 8 nvisits <- 4 # test S3 methods in alphabetical order test_that("as_draws and friends have resonable outputs", { draws <- as_draws(fit1, variable = "b_Intercept") expect_s3_class(draws, "draws_list") expect_equal(variables(draws), "b_Intercept") expect_equal(ndraws(draws), ndraws(fit1)) draws <- SM(as_draws_matrix(fit1)) expect_s3_class(draws, "draws_matrix") expect_equal(ndraws(draws), ndraws(fit1)) draws <- as_draws_array(fit2) expect_s3_class(draws, "draws_array") expect_equal(niterations(draws), ndraws(fit2)) draws <- as_draws_df(fit2, variable = "^b_", regex = TRUE) expect_s3_class(draws, "draws_df") expect_true(all(grepl("^b_", variables(draws)))) draws <- as_draws_list(fit2) expect_s3_class(draws, "draws_list") expect_equal(nchains(draws), nchains(fit2)) draws <- as_draws_rvars(fit3) expect_s3_class(draws, "draws_rvars") expect_equal(ndraws(draws), ndraws(fit3)) expect_true(length(variables(draws)) > 0) }) test_that("as.data.frame has reasonable ouputs", { draws <- as.data.frame(fit1) expect_true(is(draws, "data.frame")) expect_equal(dim(draws), c(ndraws(fit1), length(variables(fit1)))) # deprecated 'pars' argument still works expect_warning( draws <- as.data.frame(fit1, pars = "^b_"), "'pars' is deprecated" ) expect_s3_class(draws, "data.frame") expect_true(ncol(draws) > 0) # deprecated 'subset' argument still works expect_warning( draws <- as.data.frame(fit1, subset = 10:20), "'subset' is deprecated" ) expect_s3_class(draws, "data.frame") expect_equal(nrow(draws), 11) }) test_that("as.matrix has reasonable ouputs", { draws <- as.matrix(fit1, iteration = 1:10) expect_true(is(draws, "matrix")) expect_equal(dim(draws), c(10, length(variables(fit1)))) }) test_that("as.array has reasonable ouputs", { draws <- as.array(fit1) expect_true(is.array(draws)) chains <- fit1$fit@sim$chains ps_dim <- c(niterations(fit1), chains, length(variables(fit1))) expect_equal(dim(draws), ps_dim) draws <- as.array(fit1, chain = 1) expect_true(is.array(draws)) ps_dim <- c(niterations(fit1), 1, length(variables(fit1))) expect_equal(dim(draws), ps_dim) }) test_that("as.mcmc has reasonable ouputs", { chains <- fit1$fit@sim$chains mc <- SW(as.mcmc(fit1)) expect_equal(length(mc), chains) expect_equal(dim(mc[[1]]), c(ndraws(fit1) / chains, length(variables(fit1)))) mc <- SW(as.mcmc(fit1, combine_chains = TRUE)) expect_equal(dim(mc), c(ndraws(fit1), length(variables(fit1)))) # test assumes thin = 1 expect_equal(dim(SW(as.mcmc(fit1, inc_warmup = TRUE)[[1]])), c(fit1$fit@sim$iter, length(variables(fit1)))) }) test_that("autocor has reasonable ouputs", { expect_true(is.null(SW(autocor(fit1)))) expect_true(is.null(SW(autocor(fit6, resp = "count")))) }) test_that("bayes_R2 has reasonable ouputs", { fit1 <- add_criterion(fit1, "bayes_R2") R2 <- bayes_R2(fit1, summary = FALSE) expect_equal(dim(R2), c(ndraws(fit1), 1)) R2 <- bayes_R2(fit2, newdata = model.frame(fit2)[1:5, ], re_formula = NA) expect_equal(dim(R2), c(1, 4)) R2 <- bayes_R2(fit6) expect_equal(dim(R2), c(2, 4)) }) test_that("bayes_factor has reasonable ouputs", { # don't test for now as it requires calling Stan's C++ code }) test_that("bridge_sampler has reasonable ouputs", { # don't test for now as it requires calling Stan's C++ code }) test_that("coef has reasonable ouputs", { coef1 <- SM(coef(fit1)) expect_equal(dim(coef1$visit), c(4, 4, 9)) coef1 <- SM(coef(fit1, summary = FALSE)) expect_equal(dim(coef1$visit), c(ndraws(fit1), 4, 9)) coef2 <- SM(coef(fit2)) expect_equal(dim(coef2$patient), c(npatients, 4, 4)) coef4 <- SM(coef(fit4)) expect_equal(dim(coef4$subject), c(nsubjects, 4, 8)) }) test_that("combine_models has reasonable ouputs", { expect_equal(ndraws(combine_models(fit1, fit1)), ndraws(fit1) * 2) }) test_that("conditional_effects has reasonable ouputs", { me <- conditional_effects(fit1, resp = "count") expect_equal(nrow(me[[2]]), 100) meplot <- plot(me, points = TRUE, rug = TRUE, ask = FALSE, plot = FALSE) expect_ggplot(meplot[[1]]) me <- conditional_effects(fit1, "Trt", select_points = 0.1) expect_lt(nrow(attr(me[[1]], "points")), nobs(fit1)) me <- conditional_effects(fit1, "volume:Age", surface = TRUE, resolution = 15, too_far = 0.2) meplot <- plot(me, plot = FALSE) expect_ggplot(meplot[[1]]) meplot <- plot(me, stype = "raster", plot = FALSE) expect_ggplot(meplot[[1]]) me <- conditional_effects(fit1, "Age", spaghetti = TRUE, ndraws = 10) expect_equal(nrow(attr(me$Age, "spaghetti")), 1000) meplot <- plot(me, plot = FALSE) expect_ggplot(meplot[[1]]) expect_error( conditional_effects(fit1, "Age", spaghetti = TRUE, surface = TRUE), "Cannot use 'spaghetti' and 'surface' at the same time" ) me <- conditional_effects(fit1, effects = c("Age", "Age:visit"), re_formula = NULL) expect_equal(nrow(me[[1]]), 100) exp_nrow <- 100 * length(unique(fit1$data$visit)) expect_equal(nrow(me[[2]]), exp_nrow) mdata = data.frame( Age = c(-0.3, 0, 0.3), count = c(10, 20, 30), Exp = c(1, 3, 5) ) exp_nrow <- nrow(mdata) * 100 me <- conditional_effects(fit1, effects = "Age", conditions = mdata) expect_equal(nrow(me[[1]]), exp_nrow) mdata$visit <- 1:3 me <- conditional_effects(fit1, re_formula = NULL, conditions = mdata) expect_equal(nrow(me$Age), exp_nrow) me <- conditional_effects( fit1, "Age:Trt", int_conditions = list(Age = rnorm(5)) ) expect_equal(nrow(me[[1]]), 10) me <- conditional_effects( fit1, "Age:Trt", int_conditions = list(Age = quantile) ) expect_equal(nrow(me[[1]]), 10) expect_error(conditional_effects(fit1, effects = "Trtc"), "All specified effects are invalid for this model") expect_warning(conditional_effects(fit1, effects = c("Trtc", "Trt")), "Some specified effects are invalid for this model") expect_error(conditional_effects(fit1, effects = "Trtc:a:b"), "please use the 'conditions' argument") mdata$visit <- NULL mdata$Exp <- NULL mdata$patient <- 1 expect_equal(nrow(conditional_effects(fit2)[[2]]), 100) me <- conditional_effects(fit2, re_formula = NULL, conditions = mdata) expect_equal(nrow(me$Age), exp_nrow) expect_warning( me4 <- conditional_effects(fit4), "Predictions are treated as continuous variables" ) expect_true(is(me4, "brms_conditional_effects")) me4 <- conditional_effects(fit4, "x2", categorical = TRUE) expect_true(is(me4, "brms_conditional_effects")) me5 <- conditional_effects(fit5) expect_true(is(me5, "brms_conditional_effects")) me6 <- conditional_effects(fit6, ndraws = 20) expect_true(is(me6, "brms_conditional_effects")) }) test_that("plot of conditional_effects has reasonable outputs", { SW(ggplot2::theme_set(theme_black())) N <- 90 marg_results <- data.frame( effect1__ = rpois(N, 20), effect2__ = factor(rep(1:3, each = N / 3)), estimate__ = rnorm(N, sd = 5), se__ = rt(N, df = 10), cond__ = rep(1:2, each = N / 2), cats__ = factor(rep(1:3, each = N / 3)) ) marg_results[["lower__"]] <- marg_results$estimate__ - 2 marg_results[["upper__"]] <- marg_results$estimate__ + 2 marg_results <- list(marg_results[order(marg_results$effect1__), ]) class(marg_results) <- "brms_conditional_effects" attr(marg_results[[1]], "response") <- "count" # test with 1 numeric predictor attr(marg_results[[1]], "effects") <- "P1" marg_plot <- plot(marg_results, plot = FALSE) expect_ggplot(marg_plot[[1]]) # test with 1 categorical predictor attr(marg_results[[1]], "effects") <- "P2" marg_plot <- plot(marg_results, plot = FALSE) expect_ggplot(marg_plot[[1]]) # test with 1 numeric and 1 categorical predictor attr(marg_results[[1]], "effects") <- c("P1", "P2") marg_plot <- plot(marg_results, plot = FALSE) expect_ggplot(marg_plot[[1]]) # test ordinal raster plot attr(marg_results[[1]], "effects") <- c("P1", "cats__") attr(marg_results[[1]], "ordinal") <- TRUE marg_plot <- plot(marg_results, plot = FALSE) expect_ggplot(marg_plot[[1]]) }) test_that("conditional_smooths has reasonable ouputs", { ms <- conditional_smooths(fit1) expect_equal(nrow(ms[[1]]), 100) expect_true(is(ms, "brms_conditional_effects")) ms <- conditional_smooths(fit1, spaghetti = TRUE, ndraws = 10) expect_equal(nrow(attr(ms[[1]], "spaghetti")), 1000) expect_error(conditional_smooths(fit1, smooths = "s3"), "No valid smooth terms found in the model") expect_error(conditional_smooths(fit2), "No valid smooth terms found in the model") }) test_that("family has reasonable ouputs", { expect_is(family(fit1), "brmsfamily") expect_is(family(fit6, resp = "count"), "brmsfamily") expect_output(print(family(fit1), links = TRUE), "student.*log.*logm1") expect_output(print(family(fit5)), "Mixture.*gaussian.*exponential") }) test_that("fitted has reasonable outputs", { skip_on_cran() fi <- fitted(fit1) expect_equal(dim(fi), c(nobs(fit1), 4)) expect_equal(colnames(fi), c("Estimate", "Est.Error", "Q2.5", "Q97.5")) newdata <- data.frame( Age = c(0, -0.2), visit = c(1, 4), Trt = c(0, 1), count = c(20, 13), patient = c(1, 42), Exp = c(2, 4), volume = 0 ) fi <- fitted(fit1, newdata = newdata) expect_equal(dim(fi), c(2, 4)) newdata$visit <- c(1, 6) fi <- fitted(fit1, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(fi), c(2, 4)) # fitted values with new_levels newdata <- data.frame( Age = 0, visit = paste0("a", 1:100), Trt = 0, count = 20, patient = 1, Exp = 2, volume = 0 ) fi <- fitted(fit1, newdata = newdata, allow_new_levels = TRUE, sample_new_levels = "old_levels", ndraws = 10) expect_equal(dim(fi), c(100, 4)) fi <- fitted(fit1, newdata = newdata, allow_new_levels = TRUE, sample_new_levels = "gaussian", ndraws = 1) expect_equal(dim(fi), c(100, 4)) # fitted values of auxiliary parameters newdata <- data.frame( Age = 0, visit = c("a", "b"), Trt = 0, count = 20, patient = 1, Exp = 2, volume = 0 ) fi <- fitted(fit1, dpar = "sigma") expect_equal(dim(fi), c(nobs(fit1), 4)) expect_true(all(fi > 0)) fi_lin <- fitted(fit1, dpar = "sigma", scale = "linear") expect_equal(dim(fi_lin), c(nobs(fit1), 4)) expect_true(!isTRUE(all.equal(fi, fi_lin))) expect_error(fitted(fit1, dpar = "inv"), "Invalid argument 'dpar'") fi <- fitted(fit2) expect_equal(dim(fi), c(nobs(fit2), 4)) fi <- fitted(fit2, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(fi), c(2, 4)) fi <- fitted(fit2, dpar = "shape") expect_equal(dim(fi), c(nobs(fit2), 4)) expect_equal(fi[1, ], fi[2, ]) fi <- fitted(fit2, nlpar = "a") expect_equal(dim(fi), c(nobs(fit2), 4)) fi <- fitted(fit3, newdata = fit3$data[1:10, ]) expect_equal(dim(fi), c(10, 4)) fi <- fitted(fit4) expect_equal(dim(fi), c(nobs(fit4), 4, 4)) fi <- fitted(fit4, newdata = fit4$data[1, ]) expect_equal(dim(fi), c(1, 4, 4)) fi <- fitted(fit4, newdata = fit4$data[1, ], scale = "linear") expect_equal(dim(fi), c(1, 4, 3)) fi <- fitted(fit5) expect_equal(dim(fi), c(nobs(fit5), 4)) fi <- fitted(fit6) expect_equal(dim(fi), c(nobs(fit6), 4, 2)) expect_equal(dimnames(fi)[[3]], c("volume", "count")) }) test_that("fixef has reasonable ouputs", { fixef1 <- SM(fixef(fit1)) expect_equal(rownames(fixef1), c("Intercept", "sigma_Intercept", "Trt1", "Age", "volume", "Trt1:Age", "sigma_Trt1", "sAge_1", "moExp") ) fixef1 <- SM(fixef(fit1, pars = c("Age", "sAge_1"))) expect_equal(rownames(fixef1), c("Age", "sAge_1")) }) test_that("formula has reasonable ouputs", { expect_true(is.brmsformula(formula(fit1))) }) test_that("hypothesis has reasonable ouputs", { hyp <- hypothesis(fit1, c("Age > Trt1", "Trt1:Age = -1")) expect_equal(dim(hyp$hypothesis), c(2, 8)) expect_output(print(hyp), "(Age)-(Trt1) > 0", fixed = TRUE) expect_ggplot(plot(hyp, plot = FALSE)[[1]]) hyp <- hypothesis(fit1, "Intercept = 0", class = "sd", group = "visit") expect_true(is.numeric(hyp$hypothesis$Evid.Ratio[1])) expect_output(print(hyp), "class sd_visit:", fixed = TRUE) expect_ggplot(plot(hyp, ignore_prior = TRUE, plot = FALSE)[[1]]) hyp <- hypothesis(fit1, "0 > r_visit[4,Intercept]", class = "", alpha = 0.01) expect_equal(dim(hyp$hypothesis), c(1, 8)) expect_output(print(hyp, chars = NULL), "r_visit[4,Intercept]", fixed = TRUE) expect_output(print(hyp), "99%-CI", fixed = TRUE) hyp <- hypothesis( fit1, c("Intercept = 0", "Intercept + exp(Trt1) = 0"), group = "visit", scope = "coef" ) expect_equal(dim(hyp$hypothesis), c(8, 9)) expect_equal(hyp$hypothesis$Group[1], factor(1, levels = 1:4)) expect_error(hypothesis(fit1, "Intercept > x"), fixed = TRUE, "cannot be found in the model: \n'b_x'") expect_error(hypothesis(fit1, 1), "Argument 'hypothesis' must be a character vector") expect_error(hypothesis(fit2, "b_Age = 0", alpha = 2), "Argument 'alpha' must be a single value in [0,1]", fixed = TRUE) expect_error(hypothesis(fit3, "b_Age x 0"), "Every hypothesis must be of the form 'left (= OR < OR >) right'", fixed = TRUE) # test hypothesis.default method hyp <- hypothesis(as.data.frame(fit3), "bsp_meAgeAgeSD > sigma") expect_equal(dim(hyp$hypothesis), c(1, 8)) hyp <- hypothesis(fit3$fit, "bsp_meAgeAgeSD > sigma") expect_equal(dim(hyp$hypothesis), c(1, 8)) }) test_that("launch_shinystan has reasonable ouputs", { # requires running shiny which is not reasonable in automated tests }) test_that("log_lik has reasonable ouputs", { expect_equal(dim(log_lik(fit1)), c(ndraws(fit1), nobs(fit1))) expect_equal(dim(logLik(fit1)), c(ndraws(fit1), nobs(fit1))) expect_equal(dim(log_lik(fit2)), c(ndraws(fit2), nobs(fit2))) }) test_that("loo has reasonable outputs", { skip_on_cran() loo1 <- SW(LOO(fit1, cores = 1)) expect_true(is.numeric(loo1$estimates)) expect_output(print(loo1), "looic") loo_compare1 <- SW(loo(fit1, fit1, cores = 1)) expect_equal(names(loo_compare1$loos), c("fit1", "fit1")) expect_equal(dim(loo_compare1$ic_diffs__), c(1, 2)) expect_output(print(loo_compare1), "'fit1':") expect_is(loo_compare1$diffs, "compare.loo") loo2 <- SW(loo(fit2, cores = 1)) expect_true(is.numeric(loo2$estimates)) loo3 <- SW(loo(fit3, cores = 1)) expect_true(is.numeric(loo3$estimates)) loo3 <- SW(loo(fit3, pointwise = TRUE, cores = 1)) expect_true(is.numeric(loo3$estimates)) loo4 <- SW(loo(fit4, cores = 1)) expect_true(is.numeric(loo4$estimates)) # fails because of too small effective sample size # loo5 <- SW(loo(fit5, cores = 1)) # expect_true(is.numeric(loo5$estimates)) loo6_1 <- SW(loo(fit6, cores = 1)) expect_true(is.numeric(loo6_1$estimates)) loo6_2 <- SW(loo(fit6, cores = 1, newdata = fit6$data)) expect_true(is.numeric(loo6_2$estimates)) loo_compare <- loo_compare(loo6_1, loo6_2) expect_range(loo_compare[2, 1], -1, 1) }) test_that("loo_subsample has reasonable outputs", { skip_on_cran() loo2 <- SW(loo_subsample(fit2, observations = 30)) expect_true(is.numeric(loo2$estimates)) expect_equal(nrow(loo2$pointwise), 30) expect_output(print(loo2), "looic") }) test_that("loo_R2 has reasonable outputs", { skip_on_cran() R2 <- SW(loo_R2(fit1)) expect_equal(dim(R2), c(1, 4)) R2 <- SW(loo_R2(fit2, summary = FALSE)) expect_equal(dim(R2), c(ndraws(fit1), 1)) }) test_that("loo_linpred has reasonable outputs", { skip_on_cran() llp <- SW(loo_linpred(fit1)) expect_equal(length(llp), nobs(fit1)) expect_error(loo_linpred(fit4), "Method 'loo_linpred'") llp <- SW(loo_linpred(fit2, scale = "response", type = "var")) expect_equal(length(llp), nobs(fit2)) }) test_that("loo_predict has reasonable outputs", { skip_on_cran() llp <- SW(loo_predict(fit1)) expect_equal(length(llp), nobs(fit1)) newdata <- data.frame( Age = 0, visit = c("a", "b"), Trt = 0, count = 20, patient = 1, Exp = 2, volume = 0 ) llp <- SW(loo_predict( fit1, newdata = newdata, type = "quantile", probs = c(0.25, 0.75), allow_new_levels = TRUE )) expect_equal(dim(llp), c(2, nrow(newdata))) llp <- SW(loo_predict(fit4)) expect_equal(length(llp), nobs(fit4)) }) test_that("loo_predictive_interval has reasonable outputs", { skip_on_cran() llp <- SW(loo_predictive_interval(fit3)) expect_equal(dim(llp), c(nobs(fit3), 2)) }) test_that("loo_model_weights has reasonable outputs", { skip_on_cran() llw <- SW(loo_model_weights(fit1, fit1)) expect_is(llw[1:2], "numeric") expect_equal(names(llw), c("fit1", "fit1")) }) test_that("model.frame has reasonable ouputs", { expect_equal(model.frame(fit1), fit1$data) }) test_that("model_weights has reasonable ouputs", { mw <- model_weights(fit1, fit1, weights = "waic") expect_equal(names(mw), c("fit1", "fit1")) # fails with MKL on CRAN for unknown reasons # expect_equal(mw, setNames(c(0.5, 0.5), c("fit1", "fit1"))) }) test_that("ndraws and friends have reasonable ouputs", { expect_equal(ndraws(fit1), 25) expect_equal(nchains(fit1), 1) expect_equal(niterations(fit1), 25) }) test_that("ngrps has reasonable ouputs", { expect_equal(ngrps(fit1), list(visit = 4)) expect_equal(ngrps(fit2), list(patient = 10)) }) test_that("nobs has reasonable ouputs", { expect_equal(nobs(fit1), nobs) }) test_that("nsamples has reasonable ouputs", { expect_equal(SW(nsamples(fit1)), 25) expect_equal(SW(nsamples(fit1, subset = 10:1)), 10) expect_equal(SW(nsamples(fit1, incl_warmup = TRUE)), 75) }) test_that("pairs has reasonable outputs", { expect_s3_class(SW(pairs(fit1, variable = variables(fit1)[1:3])), "bayesplot_grid") }) test_that("plot has reasonable outputs", { expect_silent(p <- plot(fit1, plot = FALSE)) expect_silent(p <- plot(fit1, variable = "^b", regex = TRUE, plot = FALSE)) expect_silent(p <- plot(fit1, variable = "^sd", regex = TRUE, plot = FALSE)) expect_error(plot(fit1, variable = "123")) }) test_that("post_prob has reasonable ouputs", { # only test error messages for now expect_error(post_prob(fit1, fit2, model_names = "test1"), "Number of model names is not equal to the number of models") }) test_that("posterior_average has reasonable outputs", { pnames <- c("b_Age", "nu") draws <- posterior_average(fit1, fit1, variable = pnames, weights = c(0.3, 0.7)) expect_equal(dim(draws), c(ndraws(fit1), 2)) expect_equal(names(draws), pnames) weights <- rexp(3) draws <- brms:::SW(posterior_average( fit1, fit2, fit3, variable = "nu", weights = rexp(3), missing = 1, ndraws = 10 )) expect_equal(dim(draws), c(10, 1)) expect_equal(names(draws), "nu") }) test_that("posterior_samples has reasonable outputs", { draws <- SW(posterior_samples(fit1)) expect_equal(dim(draws), c(ndraws(fit1), length(variables(fit1)))) expect_equal(names(draws), variables(fit1)) expect_equal(names(SW(posterior_samples(fit1, pars = "^b_"))), c("b_Intercept", "b_sigma_Intercept", "b_Trt1", "b_Age", "b_volume", "b_Trt1:Age", "b_sigma_Trt1")) # test default method draws <- SW(posterior_samples(fit1$fit, "^b_Intercept$")) expect_equal(dim(draws), c(ndraws(fit1), 1)) }) test_that("posterior_summary has reasonable outputs", { draws <- posterior_summary(fit1, variable = "^b_", regex = TRUE) expect_equal(dim(draws), c(7, 4)) }) test_that("posterior_interval has reasonable outputs", { expect_equal(dim(posterior_interval(fit1)), c(length(variables(fit1)), 2)) }) test_that("posterior_predict has reasonable outputs", { expect_equal(dim(posterior_predict(fit1)), c(ndraws(fit1), nobs(fit1))) }) test_that("posterior_linpred has reasonable outputs", { expect_equal(dim(posterior_linpred(fit1)), c(ndraws(fit1), nobs(fit1))) }) test_that("pp_average has reasonable outputs", { ppa <- pp_average(fit1, fit1, weights = "waic") expect_equal(dim(ppa), c(nobs(fit1), 4)) ppa <- pp_average(fit1, fit1, weights = c(1, 4)) expect_equal(attr(ppa, "weights"), c(fit1 = 0.2, fit1 = 0.8)) ns <- c(fit1 = ndraws(fit1) / 5, fit1 = 4 * ndraws(fit1) / 5) expect_equal(attr(ppa, "ndraws"), ns) }) test_that("pp_check has reasonable outputs", { expect_ggplot(pp_check(fit1)) expect_ggplot(pp_check(fit1, newdata = fit1$data[1:10, ])) expect_ggplot(pp_check(fit1, "stat", ndraws = 5)) expect_ggplot(pp_check(fit1, "error_binned")) pp <- pp_check(fit1, "ribbon_grouped", group = "visit", x = "Age") expect_ggplot(pp) pp <- pp_check(fit1, type = "violin_grouped", group = "visit", newdata = fit1$data[1:10, ]) expect_ggplot(pp) pp <- SW(pp_check(fit1, type = "loo_pit", cores = 1)) expect_ggplot(pp) # ppd plots work expect_ggplot(pp_check(fit1, prefix = "ppd")) # reduce test time on CRAN skip_on_cran() expect_ggplot(pp_check(fit3)) expect_ggplot(pp_check(fit2, "ribbon", x = "Age")) expect_error(pp_check(fit2, "ribbon", x = "x"), "Variable 'x' could not be found in the data") expect_error(pp_check(fit1, "wrong_type")) expect_error(pp_check(fit2, "violin_grouped"), "group") expect_error(pp_check(fit1, "stat_grouped", group = "g"), "Variable 'g' could not be found in the data") expect_ggplot(pp_check(fit4)) expect_ggplot(pp_check(fit5)) expect_error(pp_check(fit4, "error_binned"), "Type 'error_binned' is not available") }) test_that("posterior_epred has reasonable outputs", { expect_equal(dim(posterior_epred(fit1)), c(ndraws(fit1), nobs(fit1))) # test that point_estimate produces identical draws pe <- posterior_epred(fit1, point_estimate = "median", ndraws_point_estimate = 2) expect_equal(nrow(pe), 2) expect_true(all(pe[1, ] == pe[2, ])) }) test_that("pp_mixture has reasonable outputs", { expect_equal(dim(pp_mixture(fit5)), c(nobs(fit5), 4, 2)) expect_error(pp_mixture(fit1), "Method 'pp_mixture' can only be applied to mixture models" ) }) test_that("predict has reasonable outputs", { pred <- predict(fit1) expect_equal(dim(pred), c(nobs(fit1), 4)) expect_equal(colnames(pred), c("Estimate", "Est.Error", "Q2.5", "Q97.5")) pred <- predict(fit1, ndraws = 10, probs = c(0.2, 0.5, 0.8)) expect_equal(dim(pred), c(nobs(fit1), 5)) newdata <- data.frame( Age = c(0, -0.2), visit = c(1, 4), Trt = c(1, 0), count = c(2, 10), patient = c(1, 42), Exp = c(1, 2), volume = 0 ) pred <- predict(fit1, newdata = newdata) expect_equal(dim(pred), c(2, 4)) newdata$visit <- c(1, 6) pred <- predict(fit1, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(pred), c(2, 4)) # predict NA responses in ARMA models df <- fit1$data[1:10, ] df$count[8:10] <- NA pred <- predict(fit1, newdata = df, ndraws = 1) expect_true(!anyNA(pred[, "Estimate"])) pred <- predict(fit2) expect_equal(dim(pred), c(nobs(fit2), 4)) pred <- predict(fit2, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(pred), c(2, 4)) # check if grouping factors with a single level are accepted newdata$patient <- factor(2) pred <- predict(fit2, newdata = newdata) expect_equal(dim(pred), c(2, 4)) pred <- predict(fit4) expect_equal(dim(pred), c(nobs(fit4), 4)) expect_equal(colnames(pred), paste0("P(Y = ", 1:4, ")")) pred <- predict(fit4, newdata = fit4$data[1, ]) expect_equal(dim(pred), c(1, 4)) pred <- predict(fit5) expect_equal(dim(pred), c(nobs(fit5), 4)) newdata <- fit5$data[1:5, ] newdata$patient <- "a" pred <- predict(fit5, newdata, allow_new_levels = TRUE, sample_new_levels = "old_levels") expect_equal(dim(pred), c(5, 4)) pred <- predict(fit5, newdata, allow_new_levels = TRUE, sample_new_levels = "gaussian") expect_equal(dim(pred), c(5, 4)) }) test_that("predictive_error has reasonable outputs", { expect_equal(dim(predictive_error(fit1)), c(ndraws(fit1), nobs(fit1))) }) test_that("print has reasonable outputs", { expect_output(SW(print(fit1)), "Group-Level Effects:") }) test_that("prior_draws has reasonable outputs", { prs1 <- prior_draws(fit1) prior_names <- c( "Intercept", "b", paste0("simo_moExp1[", 1:4, "]"), "bsp", "bs", "sds_sAge", "b_sigma", "Intercept_sigma", "nu", "sd_visit", "cor_visit" ) expect_equal(colnames(prs1), prior_names) prs2 <- prior_draws(fit1, variable = "b_Trt1") expect_equal(dimnames(prs2), list(as.character(1:ndraws(fit1)), "b_Trt1")) expect_equal(sort(prs1$b), sort(prs2$b_Trt)) # test default method prs <- prior_draws(fit1$fit, variable = "^sd_visit", regex = TRUE) expect_equal(names(prs), "prior_sd_visit") }) test_that("prior_summary has reasonable outputs", { expect_true(is(prior_summary(fit1), "brmsprior")) }) test_that("ranef has reasonable outputs", { ranef1 <- SM(ranef(fit1)) expect_equal(dim(ranef1$visit), c(nvisits, 4, 2)) ranef1 <- SM(ranef(fit1, pars = "Trt1")) expect_equal(dimnames(ranef1$visit)[[3]], "Trt1") ranef1 <- SM(ranef(fit1, groups = "a")) expect_equal(length(ranef1), 0L) ranef2 <- SM(ranef(fit2, summary = FALSE)) expect_equal(dim(ranef2$patient), c(ndraws(fit2), npatients, 2)) }) test_that("residuals has reasonable outputs", { res1 <- SW(residuals(fit1, type = "pearson", probs = c(0.65))) expect_equal(dim(res1), c(nobs(fit1), 3)) newdata <- cbind(epilepsy[1:10, ], Exp = rep(1:5, 2), volume = 0) res2 <- residuals(fit1, newdata = newdata) expect_equal(dim(res2), c(10, 4)) newdata$visit <- rep(1:5, 2) res3 <- residuals(fit1, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(res3), c(10, 4)) res4 <- residuals(fit2) expect_equal(dim(res4), c(nobs(fit2), 4)) expect_error(residuals(fit4), "Predictive errors are not defined") res6 <- residuals(fit6) expect_equal(dim(res6), c(nobs(fit6), 4, 2)) expect_equal(dimnames(res6)[[3]], c("volume", "count")) }) test_that("stancode has reasonable outputs", { scode <- stancode(fit1) expect_true(is.character(stancode(fit1))) expect_match(stancode(fit1), "generated quantities") expect_identical(scode, fit1$model) # test that stancode can be updated scode <- stancode(fit2, threads = threading(1)) expect_match(scode, "reduce_sum(partial_log_lik_lpmf,", fixed = TRUE) }) test_that("standata has reasonable outputs", { expect_equal(sort(names(standata(fit1))), sort(c("N", "Y", "Kar", "Kma", "J_lag", "K", "Kc", "X", "Ksp", "Imo", "Xmo_1", "Jmo", "con_simo_1", "Z_1_1", "Z_1_2", "nb_1", "knots_1", "Zs_1_1", "Ks", "Xs", "offsets", "K_sigma", "Kc_sigma", "X_sigma", "J_1", "N_1", "M_1", "NC_1", "prior_only")) ) expect_equal(sort(names(standata(fit2))), sort(c("N", "Y", "weights", "C_1", "K_a", "X_a", "Z_1_a_1", "K_b", "X_b", "Z_1_b_2", "J_1", "N_1", "M_1", "NC_1", "prior_only")) ) }) test_that("mcmc_plot has reasonable outputs", { expect_ggplot(mcmc_plot(fit1)) expect_ggplot(mcmc_plot(fit1, variable = "^b", regex = TRUE)) expect_ggplot(SM(mcmc_plot(fit1, type = "trace", variable = "^b_", regex = TRUE))) expect_ggplot(mcmc_plot(fit1, type = "hist", variable = "^sd_", regex = TRUE)) expect_ggplot(mcmc_plot(fit1, type = "dens")) expect_ggplot(mcmc_plot(fit1, type = "scatter", variable = variables(fit1)[2:3])) expect_ggplot(SW(mcmc_plot(fit1, type = "rhat", variable = "^b_", regex = TRUE))) expect_ggplot(SW(mcmc_plot(fit1, type = "neff"))) expect_ggplot(mcmc_plot(fit1, type = "acf")) expect_silent(p <- mcmc_plot(fit1, type = "nuts_divergence")) expect_error(mcmc_plot(fit1, type = "density"), "Invalid plot type") expect_error(mcmc_plot(fit1, type = "hex"), "Exactly 2 parameters must be selected") }) test_that("summary has reasonable outputs", { summary1 <- SW(summary(fit1, priors = TRUE)) expect_true(is.data.frame(summary1$fixed)) expect_equal(rownames(summary1$fixed), c("Intercept", "sigma_Intercept", "Trt1", "Age", "volume", "Trt1:Age", "sigma_Trt1", "sAge_1", "moExp")) expect_equal(colnames(summary1$fixed), c("Estimate", "Est.Error", "l-95% CI", "u-95% CI", "Rhat", "Bulk_ESS", "Tail_ESS")) expect_equal(rownames(summary1$random$visit), c("sd(Intercept)", "sd(Trt1)", "cor(Intercept,Trt1)")) expect_output(print(summary1), "Population-Level Effects:") expect_output(print(summary1), "Priors:") summary5 <- SW(summary(fit5, robust = TRUE)) expect_output(print(summary5), "sigma1") expect_output(print(summary5), "theta1") summary6 <- SW(summary(fit6)) expect_output(print(summary6), "sdgp") }) test_that("update has reasonable outputs", { # Do not actually refit the model as is causes CRAN checks to fail. # Some tests are commented out as they fail when updating Stan code # of internal example models because of Stan code mismatches. Refitting # these example models is slow especially when done repeatedly and # leads the git repo to blow up eventually due the size of the models. up <- update(fit1, testmode = TRUE) expect_true(is(up, "brmsfit")) new_data <- data.frame( Age = rnorm(18), visit = rep(c(3, 2, 4), 6), Trt = rep(0:1, 9), count = rep(c(5, 17, 28), 6), patient = 1, Exp = 4, volume = 0 ) up <- update(fit1, newdata = new_data, save_pars = save_pars(group = FALSE), testmode = TRUE) expect_true(is(up, "brmsfit")) expect_equal(attr(up$data, "data_name"), "new_data") # expect_equal(attr(up$ranef, "levels")$visit, c("2", "3", "4")) # expect_true("r_1_1" %in% up$exclude) expect_error(update(fit1, data = new_data), "use argument 'newdata'") up <- update(fit1, formula = ~ . + I(exp(Age)), testmode = TRUE, prior = set_prior("normal(0,10)")) expect_true(is(up, "brmsfit")) up <- update(fit1, ~ . - Age + factor(Age), testmode = TRUE) expect_true(is(up, "brmsfit")) up <- update(fit1, formula = ~ . + I(exp(Age)), newdata = new_data, sample_prior = FALSE, testmode = TRUE) expect_true(is(up, "brmsfit")) expect_error(update(fit1, formula. = ~ . + wrong_var), "New variables found: 'wrong_var'") up <- update(fit1, save_pars = save_pars(group = FALSE), testmode = TRUE) expect_true(is(up, "brmsfit")) # expect_true("r_1_1" %in% up$exclude) up <- update(fit3, save_pars = save_pars(latent = FALSE), testmode = TRUE) expect_true(is(up, "brmsfit")) # expect_true("Xme_1" %in% up$exclude) up <- update(fit2, algorithm = "fullrank", testmode = TRUE) expect_true(is(up, "brmsfit")) # expect_equal(up$algorithm, "fullrank") up <- update(fit2, formula. = bf(. ~ ., a + b ~ 1, nl = TRUE), testmode = TRUE) expect_true(is(up, "brmsfit")) up <- update(fit2, formula. = bf(count ~ a + b, nl = TRUE), testmode = TRUE) expect_true(is(up, "brmsfit")) up <- update(fit3, family = acat(), testmode = TRUE) expect_true(is(up, "brmsfit")) up <- update(fit3, bf(~., family = acat()), testmode = TRUE) expect_true(is(up, "brmsfit")) }) test_that("VarCorr has reasonable outputs", { vc <- VarCorr(fit1) expect_equal(names(vc), c("visit")) Names <- c("Intercept", "Trt1") expect_equal(dimnames(vc$visit$cov)[c(1, 3)], list(Names, Names)) vc <- VarCorr(fit2) expect_equal(names(vc), c("patient")) expect_equal(dim(vc$patient$cor), c(2, 4, 2)) vc <- VarCorr(fit2, summary = FALSE) expect_equal(dim(vc$patient$cor), c(ndraws(fit2), 2, 2)) expect_equal(dim(VarCorr(fit6)$residual__$sd), c(1, 4)) vc <- VarCorr(fit5) expect_equal(dim(vc$patient$sd), c(2, 4)) }) test_that("variables has reasonable ouputs", { expect_true(all( c("b_Intercept", "bsp_moExp", "ar[1]", "cor_visit__Intercept__Trt1", "nu", "simo_moExp1[2]", "r_visit[4,Trt1]", "s_sAge_1[8]", "prior_sd_visit", "prior_cor_visit", "lp__") %in% variables(fit1) )) expect_true(all( c("b_a_Intercept", "b_b_Age", "sd_patient__b_Intercept", "cor_patient__a_Intercept__b_Intercept", "r_patient__a[1,Intercept]", "r_patient__b[4,Intercept]", "prior_b_a") %in% variables(fit2) )) expect_true(all( c("lscale_volume_gpAgeTrt0", "lscale_volume_gpAgeTrt1") %in% variables(fit6) )) expect_equal(variables(fit3), SW(parnames(fit3))) }) test_that("vcov has reasonable outputs", { expect_equal(dim(vcov(fit1)), c(9, 9)) expect_equal(dim(vcov(fit1, cor = TRUE)), c(9, 9)) }) test_that("waic has reasonable outputs", { waic1 <- SW(WAIC(fit1)) expect_true(is.numeric(waic1$estimates)) # fails on MKL for unknown reasons # expect_equal(waic1, SW(waic(fit1))) fit1 <- SW(add_criterion(fit1, "waic")) expect_true(is.numeric(fit1$criteria$waic$estimates)) # fails on MKL for unknown reasons # expect_equal(waic(fit1), fit1$criteria$waic) waic_compare <- SW(waic(fit1, fit1)) expect_equal(length(waic_compare$loos), 2) expect_equal(dim(waic_compare$ic_diffs__), c(1, 2)) waic2 <- SW(waic(fit2)) expect_true(is.numeric(waic2$estimates)) waic_pointwise <- SW(waic(fit2, pointwise = TRUE)) expect_equal(waic2, waic_pointwise) expect_warning(compare_ic(waic1, waic2), "Model comparisons are likely invalid") waic4 <- SW(waic(fit4)) expect_true(is.numeric(waic4$estimates)) }) test_that("diagnostic convenience functions have reasonable outputs", { expect_true(is.data.frame(log_posterior(fit1))) expect_true(is.data.frame(nuts_params(fit1))) expect_true(is.numeric(rhat(fit1))) expect_true(is.numeric(SW(neff_ratio(fit1)))) }) test_that("contrasts of grouping factors are not stored #214", { expect_true(is.null(attr(fit1$data$patient, "contrasts"))) }) brms/tests/testthat/tests.data-helpers.R0000644000176200001440000000215314424476034020065 0ustar liggesuserscontext("Tests for data helper functions") test_that("validate_newdata handles factors correctly", { fit <- brms:::rename_pars(brms:::brmsfit_example1) fit$data$fac <- factor(sample(1:3, nrow(fit$data), TRUE)) newdata <- fit$data[1:5, ] expect_silent(brms:::validate_newdata(newdata, fit)) newdata$visit <- 1:5 expect_error(brms:::validate_newdata(newdata, fit), "Levels '5' of grouping factor 'visit' cannot") newdata$fac <- 1:5 expect_error(brms:::validate_newdata(newdata, fit), "New factor levels are not allowed") }) test_that("validate_data returns correct model.frames", { dat <- data.frame(y = 1:5, x = 1:5, z = 6:10, g = 5:1) bterms <- brmsterms(y ~ as.numeric(x) + (as.factor(z) | g), family = gaussian()) mf <- brms:::validate_data(dat, bterms = bterms) expect_true(all(c("x", "z") %in% names(mf))) bterms <- brmsterms(y ~ 1 + (1|g/x/z), family = gaussian()) mf <- brms:::validate_data(dat, bterms = bterms) expect_equal(mf[["g:x"]], paste0(dat$g, "_", dat$x)) expect_equal(mf[["g:x:z"]], paste0(dat$g, "_", dat$x, "_", dat$z)) }) brms/tests/testthat.R0000644000176200001440000000006414160105076014342 0ustar liggesuserslibrary(testthat) library(brms) test_check("brms") brms/vignettes/0000755000176200001440000000000014504270214013224 5ustar liggesusersbrms/vignettes/me_rent2.pdf0000644000176200001440000005450313252451326015445 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170908165032) /ModDate (D:20170908165032) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 18771 /Filter /FlateDecode >> stream xMnQWJTv嫗OM_q~jؾr2q5x7 k m5q[պ}nO.W%~_62'/KҴ|-o2MGeWZ)j߶=vkE{?3Zj[\_uR5~_Cq_5bʕb_&;Ugd⭆?oq__?N|+;}gk3q~_ `=e|b2.;V-UHLg;ɓMk ɾwĽfg+.bXQ/Yek+}K{-k,[u}|/lw pq;lva=2"ުl"܋fm]:{C{g$/9Vw~;BqozӢq%W2%q[lOX.*v־pwae_վުRZcope|ִ4q_n""IϣL'= X֬z|Pz.w~}t\hV&&~ՂwbkF{9`e7틤L{쁔߲oȼxK n/}r2ߢgǽĭ1qh+}P[h<$މx[4}(W]~NJѺ8T^ NBݚ v/}!Caen;1tb]MIa)vb\y(1,D:~MbYuj_^kjLLIbƃ24 ˣP\-&0mڴ/@-x,2v2ia%2e|V͆vSqYQ#ӰD+=5-_ l#GgEmq_[[Qtٷ8eCkKxL܆)v_mI[hR 4Ta\9m}_bFB_~eOfkb5WaA#CzѮC;v4bOpv.,hL'^/a@#;ct3녥~lәa蓭tm`˶dX:6mO7E>Ƕt[4+)୆m0|9l>FV ;:W8 d+"84fpL“=-"Xe+,} + VJWr_JUւ2Vi2>y #I\1LiҎ$A,M"2#fDZ}QvB?ѤO[Z,a4MvnFtZOLjRnyb3[!TivԌDl АtrLАs!xp)_а ?AЖs96d_hA͐s3o8}֥qjo{˵Wah|\CA{M` KkJsPWB dxZ|fx՛\0xM6^r{+S|7`/ ^ւwcmZ^lqs+v<4^׸;[~ 41E;dnq;ʹ[eXL b8 \!nXG[*%L~IPJ6hE4R̜Mbs)8T72ŕlҜ~oᰩV6uMztoFEQ`VI# A2ѩ_0ġmߵcDjt_}al^Ƅ̆}o}\ؑ`P2fuZ 3 " c:$P$a1$hv0$eUI 3 X14ǓNL qi, Z1> 3Rn2 3DjM0MlGc6A).Kl6lLLc6&. F̌NP&?NeSj.-wShۇ;2uRro;52C)ۗ o0v[܋a j쇼0vݎ8o%D*~V:n`ؾ4l!.Iv9j2l Pl%&B>R@ٺ>:u@Ú<.?,i-=g㲅4/$s٩l#lg\e2 H+A[>:8GPEVqɨ00R@+d+]O|?V+2Xh؂g' :,eE^ u<^Ejl1Y'.f{{me:GU_o Lۀi2Y.&C,2=e+\:jKTXer?oc]&YMu%^ .[~`*6'r][^X{;űy^:vAp;lRJ leb&[w(b@z -VL@BZ7[>Gz/~[c,ru`cx HaF0ɪ_@_vp@!B[D#?P@}g?ZAb R?`˰yS*A$ÿ8bb&)ʤ DB,&"3ay8J [L- 2Zx<(xGݬrxro'eP15_k2QXǢcڢFL0/BGd Yd ێA$7,m؎Hdಠh[2N/DZxA'y"+3p;Ar;D',--kkز m[ȶ.CB2$2{~$ +O'Va¢Yn2+O^V$U$QW+ǟ Yr-yN"}.b⑘⒐\2.R^".$&h!B*J"G3 -ϛ aMXԤ׃HD[D@*"`^F8͎G/"2пLJ 0*A11fC"qx:}o FH()$!F }.(2GH< ijHEm^ޙV;4 & И'\JB%+d`rܫ niãcGaNY6`cK!kt{+,b ]b玸=ii=xh3h"/ iDw l@=KWpV~&Π MQA=0`PŸY=$aY#@b)F`e7Ib"-2u]@|o) ҂Dq\P뀅4+HDt,Gv-8k_/8 gqPƞec;˸~rK1>d ;[^ G{$ܘ14͖+e7%Ԁ1nU'ܔBcL)Ɠ0Sbt)sXnۄ"R&l/LQ@0St'HaDj3` fqvˀ<Œ7%P/C?e/Bxr^+׍-`2~a /M|B2~/Oc!`E>a.oK}}/[7TeVlYA^*Cwish(;";բ3WFVjg>`R Xi -VJ, ,xn?aelL։ʖFXn'tdɤē鉡“&I5dzNd+MhCK>uʖ,[%s}Vk7 ʖ Xn+u`ŖjX1t.t.BK..LzV+n) S#dL7B9<ŶSu\WW2=o>dcΫ A0/tY?.rX TW(BvT[n+Dc'gbE @-W=7<QmÜDLϸ CrnY E $<\:K"Sy;!qHNE $- !*wMQ,A :! :! SIH8.2HH ?. !BZ<Β,c!UƱED5)zeoƏ4nҶ jE&!UIHfQ@@ &D":2ID&_Y|!ʍ""񀈪rXIDqQU =A"26QͯJ*Ђ0Y?T.{MD$7~MD5VsPeEH=HBE $5Q-"U[HD8=cnzEB]d2 *w(1$Bj{P5#$΀(3$G)þoLX5\ADK0:6J"D9$FDR}S :ʝoz;6 YGïhxMeZ@ii8T3gxBhӒN9[d뇝EKEKaΊ;yd[`'יN'jܱeX6ozq:2)o vr v '3G$:1=I'( v/vr޴"*Rd;% +ЂPd vj}N`C9$e9IMd|SwEo@L;~@ +( P@gQg%d͗pQ HK-Xs 7[W S\Dd¤5Y&:DId<诌XL`"o7h݌F+-`.1̼.j=b$1\O3hJRx&b 1tdJP5f 2#>ьinULHAd4%#jU6B6LVb&/LIL+%F#ɋ"&S҇l+E& шEzNٸ`JwZUTu$F#LFb4 B&єԵ垮/fdB1jl)fBs2[d!/MgUe|1\ۓt̯ǝG̦1Bbr22$fSZ̦|11[%n_/:sf `n fb6[-)fkm'HYn]gղ*idd1[+_bVmy(,^VJP ٮU[dTb2S"ٲ{{3fb֣>Й{I]L KۄJT lt i KFs+yBu XiɮdLG?n(V\)[ cڥPi"KwHVA NWp'dDaEQmPdWnƚHkȬh;vF,%ZYsS\RdĆHJyp2j#EofsP'hg Pẁ =jl1PgE;FB^f($z!rQy_cCfM5zO&]= `  Ms726^GбWwgp` Ltj'uyL6jzab`a"3cz*aݯ0;>7oƾg.*4z,ёu>poi>{Fb`M"gxc@Y唴ă#-C@#b hăv0Lhz|)w?%^!fB#HcL1 L?oh|P/h]u0}Wh&vk@#[ N!f hl4Ns[u"Uu AT\_uCx\4? 4(9И<}TdD9J3% Gq QT5_O;-3̓ʀᓐCțCJ~Ot?'?O:?%9ֽm'Ο{K5woAe&?GtHdP Ah4i?Ń]:vmߕÊ^tly+v9tl`bw+:[սJ)VhNVN:t?צH(tM'=|Gb;*M}8ïg}`EŒA6.Xi)XӃol],Vt"5\Wו8ĊmsrIۤ+JY)q>hE!VLNRV(Ǫ$+B3={IA]WtljATo\$段@G_l-JeXcʁCR茪/Vv }U"SU߬ά5 yvgvfELU7߉m̥+ M~GÅ xlp X@YU(lݲLj;5YDT;C!Nusw*$*V~lV׻qȝ;$;;ΐɝ!;zqʉŝQLT+qgt%w2'SOܩ#$w&ogט qʣŝ*gwq+9SęL}g7-SĝC ɝJw58S~Tq9 .%ws;׻W`Δ8UDh]r;k\(#ř#iˑ3GVxnr֡zcGg3: S0DL ͛tAԼf0!ˆA:0ٕ&t,!i.D :1+QL}r :'{ uń & :E, k1aNbB2(cw751!s mx2񹻘`5:'VHw9h>*$]*$,h]0!D$ D!aULdbCl!aG+qE A@1VaBT˛HS&Df>EP9sqČ6'BB&p[WɈ_1g¦acj~6 hS/}%Gvercp㍭ב{Wǚ8qd}v$;Z&&umHe#U>_&ג#a]36C\]H!x?pdٌ"ȡNQ54֒ lA|qt.n$Gv6yGFCrd'7#kI ա׋#Un/켿Hq8kN*92NNGj# + qő',lB#Ȯy]<4D&He#G+*WT8XOF5u'ϕB3d^iA|n"g,ऻ+w0%;;E;u1{IF_gwJA|H׽5CһP|BI:ǃC߃~?Tg%*2y>o}($yCɫNQ`M)`|]lyYLӣ?OJz0>ca#%9u_Ro۪5/KȎ+*Շuz>|3ks*OȎ-ZdvOȈB,AU$Z;R=ّMW}yԜwE,ٱ?P {ر5ϓu.9XbŪ"48lGVrbǘ2_(!V\jw,vL2w؍8;=Ǹ؍8%‚Ȏ#|dG'=^bA;ʖ5=#E/XrMɎ>!7:aɩv䔏8"`ǙE3uNweT8UQ ';LV#;NeB2ɎdǩdG88ɎS#ɎS ɎSdǩd)"q&eN'qj^q5q˜ vj8Mv4S>dLL)2`C1~?qÊ7ؓ8$+N<ɊSMȊՆ|녕?`ǙvY<`Ǚ58aljr`YĎ0qSȊ=;ĊzΊZ idE/FΩ i^)+N꧆ͤ6S3jDyͽgu7 +xj[xvpFSʙqV .u`E%I`^g'b:c5qYdpdDp,;FFU׬;IFQ=&8؃Z`62ű0DmjG/F#N Ytdbĥ`Dk&cmAuGf1h6 B0ÿVg^kY3dƮDdos(gŌ#fSYsbơ,F2GC&31'3*/fdC0cTsP G 9>ĐEIFE?&bHgCvťɐ׊!^ŐG ĄdȦ,3noi2dʦEdŔMqk2%;S1cʟ*d˔``'ƔW-ɔ]Ydʮ3Y]4dJNd ߘrhh )Xcʲ d3f:XZcNm8v}ɘ|_-1W *oAJK֡CAz`LfRay +W0|'$h;9( #2}t8_M%e= F02uAf~Xi`0iX2S%j^z:|hNtجpd""=.epk Z?C4|yr,kL@y ɲQ0cȁpd,vA.88R4g.ɲqg #SȲ̉*_ȒExYvk-4 JvYVFp( WP#WIQk(þm$CͶM~ ˡ٧σH^_R4"=yT?Ez"hAz"KEڞ ى45;Wdg;!U.#YΤqh-dGҴa Ecɑ4gQ$d95N/MGN^9E2"M޼ K7d5!22AK~#ɯ (2\fQ$iRKF^/o$NF K "o$Hs@4-+42i;7i #Hs1H3I(R\D\Eu ϕ8K-+IʬiBs=I+AK$ϥ+'\! 4<"3?IjHooJ@ ϥY$8z \3IKI҂<{ΌF$M"iiG}@{@ }$Ŋd[H}$Py C}݇|=ʰy|Gs@<ۛ*4-/-}6qݫrX7xg!#:i.e c!b bDr'w5_fA`ս<=wXÃ9>N- AypF6]72IL7!V} dY F$n O9_OBJ9U$.Mvl]2>t.dL1 FLiRύ͢(2V 2VVH'FFܬ)irV"nޑa&cwcbjqxo0psqSl ̈́&H2и*?$q ɨE")ȾyIdId"$C$؇vL$A3A" L?hRI$sMy" @Y }Ni+I-::+S7Ks׭nK --d 0:?-:Jln5>9msI1R$#JܡdRdϯIiϲP$=N̶c R0,E=*d +D:.lX[!k0^k$+~)A(SY Ob',I: xBӒ;ԕR$-1=@ӄG%? KaI|Կ;]c= QӒtCW|#⻲QhI6 a|0MS\fjd129&&^<̅ۦtdIX+$*=6C[*4 m=' ZW m0AzLc숾.m `N`i;,o6jxSڦRkl%_m$^bD`ŪӑqZy3&M4 &BIq. 'Tk*LzZ[([>'TkkD.dGH?~1uͩכKOj.z鶁5/ёb!%:]A\y}!:#/%@Fh~tJB'@ (:G EtLtEj:!=j3yW`h!>:$,% 壓4TT>K8O-!QlU4-I\-ry~cX[ͭ0JB{2@[-Y)&"ȱ7fWSw\:[;coB s@l LJj֔sut %pqʰ&io6272=T}^{kFzLd%@8Xi3R'c&D{ӔNLp^h3&uxPFGF6#ڌ s1 \ݴ*ҥTf[1` &2"HPGXm3<q,$\0?=4W xf >_[2>=2^Dnڢ_[LId7cKXQm? Z7=Y.gb`oyu rT|lUVe0{'H(,{}eq6Dda7\M:۹7WTYࢳBCqd]}cSt:)scSt6\x#ǦxufbܹKѾԲ(woU[~SZ>Gk.s{Sq@r 1m).<ȱVk`Uyt8 gDkte~ip9'RD  h?_kPAU8%bb!g Z}ph/NOF7}'6d3 O>kV30Z-m8!ʼn5<+/~cqV6h5r-N:&kqBDX_k{Y} ^cIؽ8E c+6܈}o辷Nwi8꾷hXu]AV!׍n추#7\سFBƺZ:\sR7!D8׍n Yĝ}^5b]rl?w6)/w>3[?mYx7y>ŏɞ[w퇋~AOŏ~w__}?rTܨ_?ooy5]gR yQxx/}t/#Oǽ-k}d"ONFr9P<΄)^wL;0\XDEqE%w]j){Ox@)^wC<+S?߽mol3ڏzqYTVۇ"Ğjgw?g|r}`v( e17_~~~IH^kIsysg =_ן?Zole _^|vo.ڟb؋qFfW_ /:;_\*w/o} ͹Lgß>9o?CL1+(^w~Fu'y3 3`;x<~<~~p>#~q>#~q>#~q~ǿ+J pendstream endobj 9 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 12 /ColorSpace 5 0 R /BitsPerComponent 8 /Length 45 /Interpolate true /Filter /FlateDecode >> stream x{DoǦK)Ҝ6VoҪLvpvSHTn2endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 216] >> endobj 4 0 obj << /ProcSet [/PDF /Text /ImageC] /Font <> /XObject << /Im0 9 0 R >> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 10 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 10 0 R >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000019378 00000 n 0000019461 00000 n 0000019609 00000 n 0000019642 00000 n 0000000212 00000 n 0000000292 00000 n 0000019136 00000 n 0000022337 00000 n 0000022432 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 22532 %%EOF brms/vignettes/brms_threading.Rmd0000644000176200001440000005611514464666420016705 0ustar liggesusers--- title: "Running brms models with within-chain parallelization" author: "Sebastian Weber & Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Running brms models with within-chain parallelization} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(ggplot2) library(brms) theme_set(theme_default()) ``` ```{r, fake-data-sim, include=FALSE, eval=TRUE} set.seed(54647) # number of observations N <- 1E4 # number of group levels G <- round(N / 10) # number of predictors P <- 3 # regression coefficients beta <- rnorm(P) # sampled covariates, group means and fake data fake <- matrix(rnorm(N * P), ncol = P) dimnames(fake) <- list(NULL, paste0("x", 1:P)) # fixed effect part and sampled group membership fake <- transform( as.data.frame(fake), theta = fake %*% beta, g = sample.int(G, N, replace=TRUE) ) # add random intercept by group fake <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g") # linear predictor fake <- transform(fake, mu = theta + eta) # sample Poisson data fake <- transform(fake, y = rpois(N, exp(mu))) # shuffle order of data rows to ensure even distribution of computational effort fake <- fake[sample.int(N, N),] # drop not needed row names rownames(fake) <- NULL ``` ```{r, model-poisson, include=FALSE} model_poisson <- brm( y ~ 1 + x1 + x2 + (1 | g), data = fake, family = poisson(), iter = 500, # short sampling to speedup example chains = 2, prior = prior(normal(0,1), class = b) + prior(constant(1), class = sd, group = g), backend = "cmdstanr", threads = threading(4), save_pars = save_pars(all = TRUE) ) ``` ```{r, benchmark, include=FALSE} # Benchmarks given model with cross-product of tuning parameters CPU # cores, grainsize and iterations. Models are run with either static # or non-static scheduler and initial values are set by default to 0 on the # unconstrained scale. Function returns a data-frame with the # cross-product of the tuning parameters and as result column the # respective runtime. benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100, static = FALSE) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) scaling_model <- update( model, refresh = 0, threads = threading(1, grainsize = grainsize[1], static = static), chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark <- function(cores, size, iter) { bench_fit <- update( scaling_model, warmup=0, iter = iter, chains = 1, seed = 1234, init = init, refresh = 0, save_warmup=TRUE, threads = threading(cores, grainsize = size, static = static), inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter) res <- with(cases, mapply(run_benchmark, cores, grainsize, iter)) cbind(cases, as.data.frame(t(res))) } benchmark_reference <- function(model, iter=100, init=0) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) ref_model <- update( model, refresh = 0, threads = NULL, chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark_ref <- function(iter_bench) { bench_fit <- update( ref_model, warmup=0, iter = iter_bench, chains = 1, seed = 1234, init = init, refresh = 0, inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } ref <- sapply(iter, run_benchmark_ref) ref <- cbind(as.data.frame(t(ref)), iter=iter) ref } extract_warmup_info <- function(bfit) { adapt <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n") step_size <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2])) inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]])) list(step_size=step_size, inv_metric=inv_metric) } extract_draw <- function(sims, draw) { lapply(sims, brms:::slice, dim = 1, i = draw, drop = TRUE) } ``` ## Introduction Full Bayesian inference is a computationally very demanding task and often we wish to run our models faster in shorter walltime. With modern computers we nowadays have multiple processors available on a given machine such that the use of running the inference in parallel will shorten the overall walltime. While between-chain parallelization is straightforward by merely launching multiple chains at the same time, the use of within-chain parallelization is more complicated in various ways. This vignette aims to introduce the user to within-chain parallelization with **brms**, since its efficient use depends on various aspects specific to the users model. ## Quick summary Assuming you have a **brms** model which you wish to evaluate faster by using more cores per chain, for example: ```{r, eval=FALSE} fit_serial <- brm( count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), chains = 4, cores = 4, backend = "cmdstanr" ) ``` Then running this model with threading requires `cmdstanr` as backend and you can simply add threading support to an existing model with the `update` mechanism as: ```{r, eval=FALSE} fit_parallel <- update( fit_serial, chains = 2, cores = 2, backend = "cmdstanr", threads = threading(2) ) ``` The example above assumes that 4 cores are available which are best used without within-chain parallelization by running 4 chains in parallel. When using within chain parallelization it is still advisable to use just as many threads *in total* as you have CPU cores. It's thus sensible in this case to reduce the number of chains running in parallel to just 2, but allow each chain to use 2 threads. Obviously this will reduce the number of iterations in the posterior here as we assumed a fixed amount of 4 cores. - Only apply within-chain parallelization to large problems which take more than a few minutes at least to calculate. The `epilepsy` example above is actually too small to gain in speed (just a few seconds per chain on this machine). - Within-chain parallelization is less efficient than between-chain parallelization. So only use within-chain parallelism if more CPUs can be used to run the entire analysis. - Due to details of the model and data-set, speedups with more cores can be very limited. Not every model amends to within-chain parallelization and an empirical evaluation is in some cases advisable. - Enabling threading *usually* slows down any model to some extent and this slowdown must be offset by sufficient cores per chain in order to really gain in execution speed. - Doubling the execution speed with few cores is a lot easier than obtaining larger speedups with even more cores. - Models with computationally expensive likelihoods are easier to parallelize than less expensive likelihoods. For example, the Poisson distribution involves expensive $\log\Gamma$ functions whereas the normal likelihood is very cheap to calculate in comparison. - Models with many parameters (e.g., multilevel models) carry a large overhead when running in parallel. - With a larger overhead of the model, the likelihood must be sufficiently expensive such that the relative computational cost of likelihood to parallelization overhead is favorable. - Avoid using hyper-threading, that is, only use as many threads as you have physical cores available. - Ensure that the data is randomly sorted such that consecutive subsets of the data are roughly of the same computational effort. ## Within-chain parallelization The within-chain parallelization implemented in **brms** is based on the `reduce_sum` facility in Stan. The basic principle that `reduce_sum` uses is to split a large summation into arbitrary smaller partial sums. Due to the commutativity and associativity of the sum operation these smaller partial sums can be evaluated in any order and in parallel from one another. **brms** leverages `reduce_sum` to evaluate the log-likelihood of the model in parallel as for example $$ \begin{aligned} l(y|\theta) &= \sum_{i=1}^N l_i(y_i| \theta) \\ &= \sum_{i=1}^{S_1} l_i(y_i| \theta) + \sum_{i=S_1+1}^N l_i(y_i| \theta). \end{aligned} $$ As a consequence, the within-chain parallelization requires mutually independent log-likelihood terms which restricts its applicability to some degree. Furthermore, the within-chain parallelization is only applicable to the evaluation of the data likelihood while all other parts of the model, for example priors, will remain running serially. Thus, only a partial fraction of the entire Stan model will run in parallel which limits the potential speedup one may obtain. The theoretical speedup for a partially in parallel running program is described by [Amdahl‘s law](https://en.wikipedia.org/wiki/Amdahl%27s_law). For example, with 90% of the computational load running in parallel one can essentially double the execution speed with 2 cores while 8 cores may only speedup the program by at most 5x. How large the computational cost of the log-likelihood is in relation to the entire model is very dependent on the model of the user. In practice, the speedups are even smaller than the theoretical speedups. This is caused by the additional overhead implied by forming multiple smaller sums than just one large one. For example, for each partial sum formed the entire parameter vector $\theta$ has to be copied in memory for Stan to be able to calculate the gradient of the log-likelihood. Hence, with more partial sums, more copying is necessary as opposed to evaluating just one large sum. Whether the additional copying is indeed relevant depends on the computational cost of the log-likelihood of each term and the number of parameters. For a model with a computationally cheap normal log-likelihood, this effect is more important than for a model with a Poisson log-likelihood, and for multilevel models with many parameters more copying is needed than for simpler regression models. It may therefore be necessary to form sufficiently large partial sums to warrant an efficient parallel execution. The size of the partial sums is referred to as the `grainsize`, which is set to a reasonable default value. However, for some models this tuning parameter requires some attention from the user for optimal performance. Finally, it is important to note that by default the exact size and order of the partial sums is not stable as it is adjusted to the load of the system. As a result, exact numerical reproducibility is not guaranteed by default. In order to warrant the same size and order of the partial sums, the `static` option must be used and set to `TRUE`, which uses a deterministic scheduler for the parallel work. ## Example model As a toy demonstration, we use here a multilevel Poisson model. The model is a varying intercept model with $`r N`$ data observation which are grouped into $`r G`$ groups. Each data item has $`r P`$ continuous covariates. The simulation code for the fake data can be found in the appendix and it's first $10$ rows are: ```{r} kable(head(fake, 10), digits = 3) ``` The **brms** model fitting this data is: ```{r, eval=FALSE} <> ``` Here we have fixed the standard deviation of the between-group variation for the intercept to the true value of $1$ as used in the simulation. This is to avoid unfavorable geometry of the problem allowing us to concentrate on computational aspects alone. The Poisson likelihood is a relatively expensive likelihood due to the use of $\log\Gamma$ function as opposed to, for example, a normal likelihood which does is by far less expensive operations. Moreover, this example is chosen in order to demonstrate parallelization overhead implied by a large number of parameters. ## Managing parallelization overhead As discussed above, the key mechanism to run Stan programs with parallelization is to split the large sum over independent log likelihood terms into arbitrary smaller *partial sums*. Creating more *partial sums* allows to increase simultaneous parallel computations in a granular way, but at the same time additional overhead is introduced through the requirement to copy the entire parameter vector for each *partial sum* formed along with further overhead due to splitting up a single large task into multiple smaller ones. By default, **brms** will choose a sensible `grainsize` which defines how large a given *partial sum* will roughly be. The actual chunk size is automatically tuned whenever the default non-static scheduler is used, which is the recommended choice to start with. As noted before, only the static scheduler is giving fully deterministic results since the chunk size and order of partial sums will be the same during sampling. While we expect that the default `grainsize` in **brms** is reasonably good for many models, it can improve performance if one tunes the `grainsize` specifically to a given model and data-set. We suggest to increase successively the number of chunks a given data set is split into with the static scheduler and run this on a single core. This way one can control the number of *partial sum* accurately and monitor the execution time as it increases. These experiments are run with only a single chain and very short iteration numbers as we are not interested in the statistical results, but rather aim to be able to explore the tuning parameter space of the chunk size as quickly as possible. The number of iterations needed to get reliable runtime estimates for a given chunk size will depend on many details and the easiest way to determine this is to run this benchmark with multiple number of iterations. Whenever their results match approximately, then the iteration numbers are sufficient. In order to decrease the variation between runs, we also fix the random seed, initial value and the tuning parameters of the sampler (step size and mass matrix). Below is an example R code demonstrating such a benchmark. The utility function `benchmark_threading` is shown and explained in the appendix. ```{r, chunking-scale, message=FALSE, warning=FALSE, results='hide'} chunking_bench <- transform( data.frame(chunks = 4^(0:3)), grainsize = ceiling(N / chunks) ) iter_test <- c(10, 20, 40) # very short test runs scaling_chunking <- benchmark_threading( model_poisson, cores = 1, grainsize = chunking_bench$grainsize, # test various grainsizes iter = iter_test, static = TRUE # with static partitioner ) # run as reference the model *without* reduce_sum ref <- benchmark_reference(model_poisson, iter_test) # for additional data munging please refer to the appendix ``` ```{r, munge-chunking-scaling, include=FALSE} scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize") single_chunk <- transform( subset(scaling_chunking, chunks == 1), num_leapfrog_single = num_leapfrog, num_leapfrog = NULL, runtime_single = runtime, runtime = NULL, grainsize = NULL, chunks=NULL ) scaling_chunking <- transform( merge(scaling_chunking, single_chunk), slowdown = runtime/runtime_single, iter = factor(iter), runtime_single = NULL ) ref <- transform(ref, iter=factor(iter)) ``` Graphically summarizing the results shows that with more than 8 chunks the overhead is about 10% and increasing further with more chunks. For models without many parameters, no such overhead should be observed. Furthermore, one can see that 25 and 50 iterations give similar results implying that 25 iterations suffice for stable runtime estimates for these (and the following) benchmarks. The overhead of up to 20% in this example with 16 chunks may seem large due to the scaling of the plot. One must not forget that when we start to use more CPU cores, the overhead is easily offset, but it limits the maximal speedup we can get. For example, some 2 units of computation become 2.4 units due to the overhead such that on 2 cores we don't quite double the execution speed, but rather get a 1.6x increase in speed instead of a 2x speedup. Considering in addition the time per leapfrog step of the NUTS sampler shows on an absolute scale similar information as before. The upside of this representation is that we can visualize the slowdown in relation to the program *without* `reduce_sum`. As we can see, the additional overhead due to merely enabling `reduce_sum` is substantial in this example. This is attributed in the specific example to the large number of random effects. ```{r} ggplot(scaling_chunking) + aes(chunks, slowdown, colour = iter, shape = iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.8, 2.5, by=0.1)) + ggtitle("Slowdown with increasing number of chunks") ggplot(scaling_chunking) + aes(chunks, 1E3 * runtime/num_leapfrog, colour = iter, shape=iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.1, 2.0, by=0.1)) + geom_hline(data=ref, aes(yintercept=1E3 * runtime/num_leapfrog, colour=iter), linetype=I(2)) + ggtitle("Time per leapfrog step vs number of chunks", "Dashed line is reference model without reduce_sum") + ylab("Time per leapfrog step [ms]") ``` ## Parallelization speedup In practice, we are often interested in so-called "hard-scaling" properties of the parallelization system. That is, for a fixed problem size we would like to know how much faster we can execute the Stan program with increasing number of threads. As nowadays CPUs usually run with so-called hyper-threading, it is also of interest if this technique is beneficial for Stan programs as well (spoiler alert: it's not useful). As we have seen before, the `grainsize` can have an impact on the performance and is as such a tuning parameter. Below we demonstrate some exemplary R code which runs a benchmark with varying number of CPU cores and varying number of `grainsize`s. ```{r, speedup-scale, message=FALSE, warning=FALSE, results='hide'} num_cpu <- parallel::detectCores(logical = FALSE) num_cpu_logical <- parallel::detectCores(logical = TRUE) grainsize_default <- ceiling(N / (2 * num_cpu)) cores <- c(2^seq(0, floor(log2(num_cpu_logical))), num_cpu, num_cpu_logical) cores <- sort(unique(cores)) grainsize <- c(grainsize_default, grainsize_default/2, grainsize_default/4) grainsize <- round(grainsize) iter_scaling <- 20 scaling_cores <- benchmark_threading( model_poisson, cores = cores, grainsize = grainsize, iter = iter_scaling, static = FALSE ) single_core <- transform( subset(scaling_cores, cores == 1), runtime_single = runtime, num_leapfrog=NULL, runtime=NULL, cores = NULL ) scaling_cores <- transform( merge(scaling_cores, single_core), speedup = runtime_single/runtime, grainsize = factor(grainsize) ) ``` It is important to consider the absolute runtime and the relative speedup vs. running on a single core. The relative speedup can be misleading if the single core runtime is very slow in which case speed gains on more CPUs may look overly good. Considering instead the absolute runtime avoids this problem. After all, we are interested in the shortest walltime we can get rather than any relative speedups. ```{r} ggplot(scaling_cores) + aes(cores, runtime, shape = grainsize, color = grainsize) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_cores$cores) + scale_y_log10(breaks=seq(0.1, 1.4, by=0.1)) + theme(legend.position = c(0.85, 0.8)) + geom_hline(data=subset(ref, iter==iter_scaling), aes(yintercept=runtime), linetype=I(2)) + ggtitle("Runtime with varying number of cores", "Dashed line is reference model without reduce_sum") ggplot(scaling_cores) + aes(cores, speedup, shape = grainsize, color = grainsize) + geom_abline(slope = 1, intercept = 0, linetype = 2) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks=scaling_cores$cores) + scale_y_log10(breaks=scaling_cores$cores) + theme(aspect.ratio = 1) + coord_fixed(xlim = c(1, num_cpu_logical), ylim = c(1, num_cpu_logical)) + ggtitle("Relative speedup vs 1 core") ``` The vertical dotted line marks the physical number of CPU cores on the machine this was run. The horizontal dashed line in the plot with absolute runtime marks the respective runtime of the model *without* `reduce_sum` and the dashed unity line in the plot with the relative speedup marks the theoretical maximal speedup. We can see that there is no further reduction in execution time when increasing the thread count to be greater than the number of physical CPUs. Hence, the use of hyper-threading is not helpful when aiming to maximize the speed of a Stan program. Moreover, the use of threading outperforms the single core runtime only when using more than 4 cores in this example. For this example, the shown `grainsize`s matter on some machines but not on others, so your results may look quite different from what is shown here. The overall speedups may not seem impressive in this case, which is attributed in this case to the large number of parameters relative to the number of observations. However, we can still outperform the single core runtime when using many cores. Though the most important advantage of threading is that with an increasing data set size, the user has the option to use a brute-force approach to balance the increase in walltime needed. ```{r} kable(scaling_cores, digits = 2) ``` For a given Stan model one should usually choose the number of chains and the number of threads per chain to be equal to the number of (physical) cores one wishes to use. Only if different chains of the model have relatively different execution times (which they should not have, but it occurs sometimes in practice), then one may consider the use of hyper-threading. Doing so will share the resources evenly across all chains and whenever the fastest chain finishes, the freed resources can be given to the still running chains. ## Appendix ### Fake data simulation ```{r, eval=FALSE} <> ``` ### Poisson example model ```{r, eval=FALSE} <> ``` ### Threading benchmark function ```{r, eval=FALSE} <> ``` ### Munging of slowdown with chunking data ```{r, eval=FALSE} <> ``` brms/vignettes/me_loss1_year.pdf0000644000176200001440000005236413155225620016475 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170910140950) /ModDate (D:20170910140950) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 17605 /Filter /FlateDecode >> stream x}Kq}~EɃ~\)X؀ DI6]QU=Zvfs{TeeD>*+>~?<Gx?joWӿG}l[{ά/~?صpE'>?#[_Lsz~G~a<>9giBN|v{xuIe=S7]?MӔL1?~P3%gɏ?~LLP.rhbz6sV%rq)9͵<abz`b{65L36Z}81,qF98?4_igW&f +ޙ؞A\Q&g/WQLs2g\c53jkٞɿp͖ u.e<)!. {&'\ڍM&>Sq))ߢ)R]G>M3;\r KM1!l*TA2\ژ '| ?2)d.K_ӥfsn}_2ޏa b&+_i}^k=&k@)%'o[kߴK^a`Od&5hɹsĐ1[PFSƼ]pnh& <Ècb# M82k2h\z fM$A&&H1lph͊UhP@ǖ0 5m]C5.l gmQn,`߸zSxP#2> FsҡF\EUIk:؝OZ'YCL$ ZE  /Ыڠu _}bH{Bqd=uME#}Mv3 z tr=\%4وVƂDuiX-Hn )<6A^q&}^6$_E׿!1_ش}y:2jIY!_K{eGȱ=`-Y^i=Ҳ<؋) yΑL~J!1=CzشB#* _%Qfr"3ЄmK_)\ң9@C+!a RCiw W+iw߾>=i<@(2I!ʙ|yHX>yH<$yH0" i~Dv9KCMY^XAslgQ\`GE3^dޜ;|9>L!gaɮ}T1UC ØOEW E>ʺ\sf&_oXm -{3j5.\^ APp0a%z 8o@67^&jnLB"` P`'it37!mL7kL-@v0=.#FG3ycze LᙁbKqcz)EhQq`zؘn(c)؎ XXY(L7`z\  L$1rZ 0ܯD\ =]nrѼr0qaz4cܮ`\!σ&'ax0`ɭL7 L7@X9J&+drӣq`E wa\tsLw9L7y MW@d'0=Ā'0z]s@\u? @y,Y]qϒЌx>`4% Ѣ^5Zrh|[K4+o}2y\~@LN }]F%KdsX=^Zofڵknl2]84K{5ߴ'YM{ F/xӘ%:!4;yӸvP0Mljt`y^V1މn1ME]1j,BJdƀc٫vhi8+W~,@"ȰA22O]ROkq v@w!CafQ<ğS9šE_+`"kܛMY2O,pMv!g?iHP5$RGRi8ӭ5.ȱ:[/>NL!(z1+л΀'zvm^R}ʡvn :9}5xS8ho-@7P⅝D[88X>s &Bu_} |oxEA #Pz8TRKx|>*|^LcB{(A&KfA0`DR)}ˌ]H' 3#73n ā/pjL?>X^ PJyTPA[F?P%c1S+͸Bp7&:Zɔ3=%C?U<p'̴]'4'YNLJ\":#N%Ǔ YK%ƿn>Pb K%oŬGJR gdwL>d{KnJZf)+g|tekE jeG3~K7/O2TB?@eQ%/18%Vg!m2~>|z*8I|&sll{tȑ7 #y0^]DS82 PNg<-q%ubө\}ϭeݽ[Ml\8ܑmDaQ4<mIJ$5i7u/x wW,T4-i.*\(ʼnE(b4 {a֋Yg_͒Z , $Y@tnv/=,f87bBe\)9k,p1Җ<92*z2(k dH,~A` Cϋ,c9{ϒHWg@B s>?3"#[ktT$D1D5V 2dHw bQ!V (oxXxXY]ߟr,\e (f=+7w(:8=8Fs/5y.:lهHqK͜8n1[ˬc>'yrgwZ )AIBV0ْldg^C.*I]ȼ̜PM{JˤL8K^h2"{&g$׎,_;*tb,ݯ*Jcz=1| `}k` n~G>ܣ0li2/'| }qB?aߗES_եQ9Kңtn. خKfq9_)h&\iM#(T u 5uIz>(uQUFR*iiuQlEv%M2U}hiu!خ r]躄mA{NH5ʘ)5ɔNFg. hMLaڮM`lJ'uQZrr]2]arW]M7 7@ Vl(Qf2/%Uut]Duj AeBPnflqCvR/.Lel%yȕ:TTT֢:UԄT*&*F*հ7Q1jJ "zL l*M\.AԓKդH亰Xx.خ z$=tc}v+K*޸NT}zj opl-d @D+rj*W ȒtU,ae( )d8#|7!On O=?[+(gJKݦ\n2~p7¸t 5, M&9ppdؙ"̸ɳ^&Cgq+.irc~oeƣj9^5voKtk>jP\U^YYؠ,Ȗ>,r=ASf#hBzB{=XEMNY66ڵJ~r=Z&](\z_O{`*=8q[٣gS,k=ewCujrhO-x%(XdMe3 e|ăxTKC9 zgu U+Z˓z ȉ`MIs:2ą'×_WwY'e @Gt ib\< x"*'%E_Ěfyy" EGCAdjIMOHd j2/ 2N۹2ؔi{!B/$(O/*ǽ*;TVn0DUKV!j 1FT Jd+muF^kj$9zI[Nij&ֹo: h^Z_# e T)]!Em7(MTRP˶\6CyUGjXPY/E!9na?̃P >zBU~$dV:, VrB`-#UvkM`U%uծ냕Uy%dm #.J\N/YceXeSvKXi >Xk/!Fm܊"Vܘ}kn)*Vߟ{p{!{X}cO_o ,$yUG|W;o-dɡ_H9iNUbMW{;}ϧO!^. nkijOQ1޹AO ݧBgmm[)HTZ~ZϰlF$)VDh?$o5׃"`6߆+ؓnlZUgA~qW BߟzPp1y+͉Q|(D$)nx#nд G + iPoDt{PK\Ѥ3V58 h6z0F7{GBÚ>XhzEL&ozN2n%ӹՃzRΒF9qcHhbtT}%=ݯ$=k{ $=3>l%=G ;΢q}Єyg䦘 r}ՃbS _Qc}+nzPGfS&7|1/ccc Ϟ7ד+vzP7v{h3_|V6%|W>("}G }U*u}k|P+ H+BEJo^x2z-z.]σ^4w ˞MacK٣VʲWEg%^֟d/M eO@6a챩uf3sۯ/{Mxx3$Jx}£7E{»ļP]*X%m|^߲ l}EPysiޒ|vYI!~F>9)]|sl~ >_]||ˍ6ŧ2K&~F+3ژ?PUC*CoͿ &K.>Vu!mE)cPyՔAׅg4]NR=UܛrriuIr CNUMUI\mݑB葪N7ʓ[}N%0"ׅ(u V9ZS]m]´LAd?}۔lSASua'D}N%%d6%PkqzL0=rJ0===7m=K=@[M#,zf<m5-ZM@Z!@[ #dЖlo]3sjPL׼M1N/eЖ]lWlћ#j{LכmAЖ @[ڲy/wg߮nHlZ?@{rڳY/kE0h3e$0V'Pآ(VP$Oȳj&Fr^>6%XKh1h+MWË(M"ڊ"6@G @X M%ZOӅ&yVtإɘ_xڋh+B ;@[ ϡh/RMnWlъ>h&3f΋h{B@@[Cb5u^~:{6Js5GWivlYyZ8Gݱb̗pܮآۋqCmOKe V]jtZX:D롲Ke/2ʚK*{c\5*w\\eO؃rjޙ=)oڛsG{dk[KfvR^$(6N{[ثAR:sՔgn8d:^*xٔQx x?JgI*]$j2qtM6)vFr;A:C!wR`zpѭ9t66ٙd \˹ '̸.O2ba P,rDcvF%ڕ ~ {Ip C/nkn2N}"θP`xm** .oį\aY~4J/ ZM|xp?c v0Lވ4cӪ"~܏.FQEDǃjxzsፐP6a@SKT*@oD}E 'wItEpEBf ]6>tÊ]B[^zQ%4c~uEbĥgG1iH2 ^k/M h(U@@ ZJIkvzASaCU-TЦ>>m4֬muuC7Ymە"P퐢}E-ZhI %46MPHqMLN5j]KPO Q+P& i椆=sPڑZv]TԴ+J[@Զ0VԷ*wQEREA{F{&5~Q^^wVt l|ס3'עXbfvv^ulOt{?6̻QXBqN?1*7~>J$u^{ge=,3.{farEacxsGuyi1Dl3hqyL22y[UL|nl`vNV:4 NǺ4yOx?vVJnG79\׫Up"}U>VZ\͐0k=ԬbGkқe*}zjZ =jdOT *{cz٣ŽWƔeJ` ˲B㲷Dž!WI>{η^d&k}!Fx̶ ;; #21(wlG=O F Z|ʶ:_߲ *ǒkK/g!doRvh|!$CJ⫝!%ħw{"ܕ"VgiV邓7e׭'?S _~tJ`fOe7 KU|<ˮtңpv]H}*VSWm$t}4Tm{*5*nۮKkB%Tp\R%(ZBUz?T[ʶ"*[wgKTXEBCڍE5;a4^:5\HyCm4_Y ΧJ %>mʢ>f5Pݺ=r=d^rzzI߯jz,\z,z쬯=82N"=I/ڛ=J,ВJ Ҟ%KE06N2e;3eoId_dS{Cmd#C /Tx(EnEƠ/l O 1 {CxB{y.>`Bxtcl[:YEiF2Hu&z M>&QC@v;X $i#HB׻١;;YĴI'+ݕ7ଭ) .| BW̰IW&Ϯ WWѫO OU#!  ~f_gu =tDE: *cz? 6Uܮ¸ 9;>c'L`{cuaߗ,ew-jE*NUW@gVuJ2fSoe3(cR 8Π;XYm%dce݇">^ ]tdFϝxӥ{C-q8P!&iopĐ-%3y'ĪGU?yY7L Rx2۷iր> 'lW)SrL&PHV|^Чyf{@+7Te+]o7Mh&phc5[E;9WEKa Ԇ@?ka^m<|SraO/WWCy S"z2zfBz4JkJxxnP|,R EYi|ҧro%J)*y>gyX^Irke|zlj!}ԅI^Յƒ# OPbx%ׄWi ,N]dSxoUxyxٗPxDx3 :O<|_ Ol~C_|$JfaSO[rD=W%+O>U\]|,)Mff;~>7G&?!_\xD>ًF|3->ڹ!V|@vUv8ɇw.O@|)EK>w5:Q+7 *N A|.s] (ʮua(9jr=0>Pv.Q2]TumT*ۣVa\߮ #RkRc*RA\ȥ*%#0k¥"Do.AKKCD亄Zu ܻMTpv]%ޮ _2UQ)u~ئu r]L]$SXH-SdBy^]Ԇ &ATuQ{uua,*r9Aax*sJ-P+NPP>_zA\oEP)ׅ6^"EFR]) S互 *TYT0;.U+l)-f*Wۮ צLkm*M5&Uȵlv]\.fޮ\ .G*^yȘzMr@+*5`p5Þը/tE]J{p/ޫg$lV'5+c&hVmf*@N#b60ΎK\ZZ)[cPP9ؑr(/b:89y/araJPs!F~+HK #b1^0SxFZ ki Y+02Fa`nI0ҏ#-vc؈Q<UڜEȘ!"ceE81[E;EhȸUHo-&y2u;~1R< yx02 b;9FFm!FIg`/ #B\>`[x0*aC3rW#Ud:O^WF~]GH?Z^xF\pBC၂~&D4+<8n$ݿb$F#p0za#]#wkHbGF_` #u䅑~~NV`_`F`l#M&f Awi20r$FZ+HpV`}wիi2p_z$HI|ݿx$GW /=5ޑh>>`=O+8Ѯ`>DSK SΠ>FVpJ_A}]8&Cu%{=Y{p=B{jā'+g2 \Ǟl|tGhcuUdJ3;^_.CV l=8.{ oc;iճw@84}1zƒp6"p$dR#^isژ]x7Kkr?rnpw0Lk'0g}Ajk;7Op~#5i KE1XJ1(SPFSPMoUI\>_}Gz1Y1q< ҃@PelTK1VEs}ir p@@0!$bmX )!,˃%eQB`n/V \o#OA4 8O]q&e8, `-3 U=`8a7P"JNRa#QQ%2gQ咀BIPXL[Dΰ©Z9Q@ .(b . bx* 'OZE*.3)neŖ( E@B۸(ve(x-/bWA8)~ Ve .Ul.D8t1*3WXðWu3*|tn#2RNOUۺkЛEݾ(}K_#tv__){#E/}e=_?b=}K_q%ox__Xo^_?G>o^ G>o^G}b=}K_@ & m_CWF >Dɦa}~?qVwҹص>t>/Iv 9IHO浱bZ7Fzfg; H~>-Ϗ Fhϥ .X*.xt~|__S7" H;M5>d3F9<^zx}3/<.[j4FXp/oj[xiÇҼjne[}CX=όS>9}KFFƨRpÇo3]NdžP>GtRo< ӻaף꿯H]1/ǚ5wHِ~2֏.}(vww~ާb>5hA9>hj(Uf~F~>Cή?x}>y|wZ6orꬿq÷pmßBJWuM6zӧ,L*s+sIپI~x{OLhb\Ûy_WwuVzD Sgч]endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 576 432] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font << /F1 10 0 R /F7 11 0 R >> /ExtGState << /GS1 12 0 R /GS257 13 0 R /GS258 14 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /ZapfDingbats >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 12 0 obj << /Type /ExtGState /CA 1.000 >> endobj 13 0 obj << /Type /ExtGState /ca 0.400 >> endobj 14 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 15 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000017970 00000 n 0000018053 00000 n 0000018217 00000 n 0000018250 00000 n 0000000212 00000 n 0000000292 00000 n 0000020945 00000 n 0000021039 00000 n 0000021123 00000 n 0000021222 00000 n 0000021271 00000 n 0000021320 00000 n trailer << /Size 15 /Info 1 0 R /Root 2 0 R >> startxref 21369 %%EOF brms/vignettes/inhaler_plot.pdf0000644000176200001440000071630213202254050016402 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170125184259) /ModDate (D:20170125184259) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 232956 /Filter /FlateDecode >> stream x콽.MGqe @10ḧlIc7^SUW?e?a]Ws_{ꟲ<]W?7??O߿gd?]O?Շÿ<cjMO o< ۟~'|fgOypi=]?JןG^x 5>ZKx={a9 ,oQ=8;X q\vfZO)VO Bϟq|ȂSjrTx̀Q-GeGuGeO;ʰQ5;> Uj>}0v' {^8۵O)q ~y?GwN°1nб/°1aq\61|0?LjO]v'¨`*ƨcu\Zom0@9 eWc7 "9^/?u^=@/.EP:=@u<*Fv_|::n*P/>@PyUn!. ^P^EDžQ 5(^5j107~ku1f>?Owu-rCS-v<0/姓-|=.B`=ڍ-=Ekz}x Dˮ2*M<odtusMn&mdS@/ W_i^}?|R4`2ϫ6-=󲅳q>޴xA?/[@{cG'~,Lwwy= ^v S;t˸jzكq^/@2!MOU^le<4]<UiC8ㅿ77,eq_~C1+D e챫J}}0^3\pR1c\z̸]SƮȌ"pM|kx`rgDM8C^b1UA}itNW~=|~yz#^O? כ5߿F~/U2͟B^fP379!^Fji?̸_l*}U"zn?`5!2eSR>R_o3Q}U;%cwTl}u;g/߈'r'x۳ˈ'۳|]o/]Ɍw8,,>"^p?ǻߴ;|d.7y֭Q޺\5ىCoИz )^ӄ@u\vޛE=L_!\LC}7{wࡾC}7twK+wm}!扺_][wW)yګwMoݜ, :x{Њ%}1^/wX7;[x~ջlRsV4ջ+דB߽<nVn>^< }w1{|*nB]⃱/V]."}2*r/-#q?^:]XH+xbQ%/b^ ]d{!?X|﵆ߟ:r^ 7 ),ݵ0Dx˽Ezջ=Bv#~/W!/ջk`| ;rwJ/&Eq<ݥ:|'q!31rxT28~![l_8뼐/k慄Y{`گ%jCژI}W2e?_˼>J^M}_muq_Vn~~%3&3a-% ?Ѻn1_[I~#~>OJpJ %x쒥8,g*Zgx//ɷ t>J~?R{{<9^>%y_ͼ}7~\]>C!f;?暎%yϧG<3on||9z`sIzƷwwy=5% y=fΧdswһb)IONNzesN5SS'pwm^9)ߴyx|f'|ik[.91wwrc|]wGߕ+}|᏾c>7r[GzKQo)}xx|Gwʧ))e}|T/|g}zCT+-CϤTO1cS='rMSSS(rw7EN1ީu8;g}zT;ܓRpKzK=OIzKͬWFNWw^)ީz8;kg}zoƗɇS=pwgGzKQo8-X|]wGߕ+32*#2*=Ҳ*-Rs+5;FJJɺɲɪɢɚɒMkXlFqF1 ` lj`.)q^%ܝܝܝ۝۝۝ەەL+ )+(3(##0*v—cT[Y,Vi+˴U"meDi`L2rIfaMbIFaIJ&!,DcHx(%0k%̊lfA6Yͬfc3k1KF@&τQ0cza8R:f;`Ojd5:&?`^#뮑eWϪgճ:Eб< 9vIaT?j)4-c=XR:Rǖ5VeղjY_,ZVWm~}.\9prݰaUÒ ;箼[y5b>R꣤>B꣣>2꣢>"ꫡdat͝dFAOD|O_wă%9PF/FW7SkЃ¾)8ssAǵCBWuwk^f}}DZǹ' ǵ~xL6"Ni,⅃U)0x&׈ qIݱ) \pt# GZ+Gu$D{ !b}.q@ːGK>]x9d{] ѭ.i83A,vHI'n6C:^?Tz$#-h5>@G֎OПgrG'%)i1`X*8 Cm"d=4>cTư!?K6V, 'C2Aod0ѝ쾜3ŞKX,{#4%7,P `jCٗq`bl^bQ܆8 =cnyCy P&X()~>)CH@-Cz8 AkA?ޓOHz4CCe$k3"cݱH2X$ =P j 9e%/[n@cb,z8VHV:T|y1o9*'+ߖQE} ;tJ*ٿJ*1V|_~䘷 ZBt\QŹrqtXr]񮔶㿣b^fUQ7BWYU+c\q4C"=䩍+(GG?;R/vLT9QQv\Te ؑa&/#;:~wGG=?lzwGIf~^ّbϳu-uqIU:Xww=,; xÎâeߣmO}GGpS ,3vT"*;2KAou:YdgZ Au9Qj? G<:NM/QRfǪ:Z x/`<-'-:j :A܅˶;v]_ z*<8^ ;~ayag B_8bǴ1ٶü`5In_Q^\t$#WrpiÊzAǹ ]U%*!|ѹ~פ|ɊVpt`to<:}:kv%HǀK_P`!#a'XXp4qǃ*ח c:֗r4q)I~﻾3 J^Hz벮O8rx(te3t,.}aƥ/" G}S_r_pT:t?pO%8>wiP<1Ϝ?o8J{ʒxHe:VwV/J*t >CGq EtbEG.h_tûxFGq U/v)hޕ;:#p$fMͪW.XU]ȪY.Z׮ZW:R9;o7P-)h}'̿Gl.}=zm$뽙go~68'5ůgf|NxxKxsO|sKAJ=|yjsǟnsM_**G<ǁ[<k<%S%!lq|;Kf=Oz,p㓞 \C֏=fΧdswԻ#xG=9;)Q9q=pxx#[w\D{-SK<_J{Sz1rw\o9qzxˇKw\oGN-~䚎_!O5l|Sc>p%;c63_9;{"x|MgwWF>+r[̇nYo1vf|y |x|a+;Gߕ+}W>|]wG!8-G|r⨷Rc>;L#;Oc?pwc!qwW$n!g[$z1ޱ)ޱ^8;֛xzUX ec-pwIo%zaΧ$za+xzg+;KXo 뵁c7qOY߱8;ֳGzx⨷XOOgw9ޡ p[w3;+Gw-x7V?c%~Qo_$pOz&[[W \sCKex~)8;%Ng}~Y߱_)pKzNk[JG~)ޡ+bgw7 g}~)޵O8;O?Nz~/x%~3-+&NSCdo8;k.}W>|]wGߕVYoVYoViYoV9ޕy#xvwvl%xXdidedad]dYdUdQdMdIǦ5 ,6t來;66 g kl~8RPc,,,,ޮݮ,h2x%!Eƕd gQ4Fl)T002E3Gh,VVj+ 5P6fJ[YVh2IQ.ɬ#I,#,)T$D`S d]fM6$Y,fc3˱blf-6;&0`IȄQ0c\uLo1G Pl)@F^#+ȺkdճYtmx%t,¨]RϱZ 96M`KX<5cԱeղjYa,ZW-˫U'wK=W{.\7llXsհ}>)+/V^ͭh(訏j+Y$]s'Yu#qrЃxq!`\Āv7l)mĭ h7~# rx6 f@*MLV,;h4*7U<=0U1ySXb![L{pVlK{Cw=!-9W΀jr V#>*Vўh|kOzEaȊXdT1{ח5S/*nȱ\C^=*zwůW:>ړ~!Dǒr4)=)Ǔe/'FbOǔ>ߡ ($UQ#tl!Lݎ.Nӕ#L_cLa9ʔ0Ld9Ҕnc t~L2倇#N_cL:h~#ϸo&ǣ>}#P1L^â/~БD_2 !tDO4Qi,Gpdb!Ǧ rt!G>*QJȑZ x~hUHWUUIYգUJ[UK]= Gr#㣣zȪW.XU]f h ^j)^kW:VAqQIqFÈx{=/y撎czmwޛy?Ɲgo~68枎`sKx9Ļ-ωk߉C_8yVƷƷ5o'~#OU|/G"MGqGyGz pIk W5lni|EmkN'z*p \tPocs#j;ٜXz8pwӁSGwZOl9i=xL/)ZO.||jswZNN)ixֻkwZ/o.9i8;tp%;c63_9;{"x|MgwWF>+r[̇nYo1vf|y;|x|+;Gߕ+}W>|]wG19-#G|r䨷>RS>LN#;SpwlS!rwWDn!g[DzK1ީ)ީ^9;՛"xzUT:eSpwIow%zaΧ$zf+#xz+;KTo=굇GwFi|Yߩ|8;գg}zv䨷T鑣R=LN#;9ޱpnf}~YߩaNSEدg[9n{w|]wefUf[edUF[gUZ[eUjwx~1ޱx㝢[7^)=Y=Y=Y=Y=Yֱ9zcuOtOtlXbCw( 0+6MMc`SpwHAM,,,,ޮݮ,d2^IHɠ \IF ID!)Tq002%3d!,VVj+ u2meH[Y,d9 7\YGXXGXRI( d?^)Jɺ$̚lfI6"Yͬfc3Zlf)&$a$(LE1 c:7L#cF^#Kkd5Yu,z\Ƕ:Ga?.)X-Q&u,ԱK PZزjYbZX-뫖Ueq'wK=W{.\7llXsհ}>)+/V^ͭh(訏j+Y$]s'Yq榳;AOD|O\w 1{ 87ڈ7~,">#?Ru?p cۏF<}%q8G~OPx (/ss~6¨㠏x?=4;OD\A?5}*8>EyЧOgOBE2&fǺmdzS܂I.8r1ay'!2՜b1nw3r<{ƾf)!. :z# 7..gޞ)i؈d -7T (Zn!|džq.gٸd_grC$ᠡTri1`DBC:cD{#cc1ŏ?X)`x![f$s3a)hw )s [Pl԰ryP7WCڱ5t8q7QblxS ܆S`3oż_g, q@ZOH@L3.o C "aŦvEp7a|iO _$@G_|z?{hH1^vC {t :ܸ=$ܸ<4ܠbG?=>o=x2 ty/ܣBoqOqvܹ>;[؃޸=]o={ EGq z;wP]ݥA}辰rv~]p#rl zCCoM%:֤ IGj[85=觾ŽUc:佣xIo2~_lHc{ 8w=-?|@tzx;<v O}ž {7˶؁;k#;gמxbF#= VU^p"ٴ1:ƍ|KԞ<]8EG-g߇kN;}q7yBǾIpݎ= `rlIP 9EG9ac# / ?"ع:&'HǏ MO9E:<w{{2A̛|!-=lY7wp{48=2x~Ep<e _ zgރY(s{{x/i3gh3 GO1猿7ܿ'=py2"3G23p߻gVCNy8 ƝY{W:}W=转Je::_wPg !:J1;B~0>|Չ4_9Zy=k*|=;Ƈ [РlXE8vw}]w޵$:w!]xHu;:O P"|TP#'>K8OS#TQ>5W8,~˽t^ZbǷkx||[W\s7~\Mw- h>\H.i|MF- as \6SOq|ck\'s#j;ٜXz8pwӁSGwZOl9i=xL/ah=zl)i8;x֫[wZn9ixv^\x 63xQ>"pwgNN5;S6ZN))8;o:pz5-|;; _91xxxW>|]wGߕ+}W>Qo)9-#G|هgwʇ))~x||ƗS` cS"rKǯT9%-#7^9;;_)ީ^zxxzT=꽑{_w'NYߩ9-#Gzz䨷T?|]wGߕ+32*#2*=Ҳ*-Rs+5;FJJɺɲɪɢɚɒNMšFyfQ\` mjB?؅355 G jjf wgwgwgvgf1]Y]Yd AAA81B8R` `2dKf1xB\YVj+봕e*meF[Y4s0 &nF$&$P%Q1N~$RuI5̒lfE6 Y,fVc3RL&0aIHQ|v0"%ӛp%Üp%5YzF^#뮑eWϪgճ:Eб< 9vIaT?j)4-c=XR:Rǖ5VeղjY_,ZVWm~}.\9prݰaUÒ ;箼[y5b>R꣤>B꣣>2꣢>"ꫡdat͝dA?P|8G=X'd>Uǹ/Dě}}ٷ}~sxǾ DZǹ;|sx>x7D?C,A_!w<zǵ_΃:^v<8A!)㠏xqG|J@dAqՀ"O=ձ;Ԇ|q;@NS㳧Cdx"Ǿ'*DZ1dzSكI1}:Axw<{;= )\ptۻ Gec!D 2ű$b}.q7*A98~Vt'Yf8tnw7dx@,:=_P:"uj k؎ȧI:= q !vtc{CG S;a1xrW`!՗q;(Y8>?-oiAW"NAo/,]Ao8CS, /(c)8FB',# =bi} j 9g , =^alwjC}m'/hX7!<0oÂS;{/;qz[%Rc5ća;{rlG:ư2oj̸w#3W>{Џ#B2 &C~;!9eh5Ab}O{A!v؎TdԌ'3rOpЏČ1ڎSd/>ˏ"xqa zc|1 kx=%Ǽ)Du*gdGpsvc1qמǺgoIokA[r|#c=lEFۘx=̘{1cn $2p "nc.9=co<Ŧ~&TeLYTDK(aD '  c8Q="b=}سc<器%2*2pbc<V ?NĊx'YIesBҞҨH X -c!iF ٱ[8vv |(jR~p yrł$>G;qKGuL8>8/;p_#50f8Zx.ǻw#;ʆꆁ׈ss=z=?c~<{_x-/<y=//7qI;➎?@'K<'^{<-eǧx8pOc<8i|O6lq|x5r3#-5@G=z#pK^ \w8>S{X'=7S/|7t>ԫSKNNz:pwS#rẇ91rx|fk"x|O/:rcgENz--׬;\rc>o3}/<|x|]wGߕ+}W>|];s#G|p䨷OчWwg))~xx|{_wNww7DN-R=LzKQo9;s"t>5;Ջ"xzST]WwNYߩ^w'zꅑ{:ꑛYꝇT/=RSpwN=/;Փg}zTώᑣR==r[)ީxx~=;n9ͬԯp8;;lYߩ_pw귈5"tLzK G['Q"x~)ޱf3e"x~)s8;ENg}~Yߩ_pKzKNk[ꗊ"G~)ޱbgw7;g}~)޵Ow8;mf;-R Ϥԯ9;;FNScԯ|]wG߱4rwW]#'~%NzSc?oNю,힬,재,랬,Ɡ,ؔ&ņQ^<`Wl$C356o)q~cpwVpwpwowowVowoWnWn4lAaJ2憍3(#6d8 c` ^2Wj++:meJ[YVh2IQ.ɬ#I,#,)T$D`S d]fM6$Y,fc3˱blf-6 LX0`E2HƵ1@&)@0')@l')@F^#+ȺkdճYtd[^IP_I.y0Y-F#%%{%k)eղjYa,ZW-˫U'wK=W{.\7llXsհ}>)+/V^ͭh(訏j+Y$]s'YqKރ'=Ug_C29}nK p8u7gzǺǾ)9Vy>8x/:pnxú!Ǟ=a88kq{*r<{;>{3^>LrCS =}vcr5 ާ^iDZmǵ'ukO]>C].8Oprx A8HS.|O'ie]y^48;Żhs|~t/rpP.<ב7 F#E!NAZˆ8i@:Sam8zN! AG '00&SD MmK,`Xyo|bid狅e=Xts%Ys 6C?#, qz@R:zC/#y|9̎[%hS TCr|5G[Coҷ/3!vle_v, vc‚\;da1os(0Ȁ4J#7F#00{zУqG_2{Zİ-@-MT E!OtL_>wOt318D7# CRH`2tO}ч8zC[}i/ClXI1Clv)3C8Qf܌DF{:#c/:,3A ~12pX!h-VvXOKBd<=d5S+Ao\ό1$2pH#5az)ǼK]dÁq{1~cNx"#nog̍o9oCwc쑎1cfdŏ0o7s1Ê1~(T8Q0c%1j[NT<8*"t{**Ƹ71꺛]/Ñ1/TG==]YQwח.=׭5 *VXke '*^+bazc|1{E7c~+r8?9=^3AEٟOT $xߣ"nyƃ3dw#l*~sa|"5\/Tl+^=a?azgd\·{{t. +sa|C+}!e-ޙ }`Ǥ1`[pء#4b[nArE7Qj\9H5C~aը=tji݈t:b[+G86QU_XsbM䨿w_bG/ߡl=}7PFDZƃ ˾xz_q{{f@yOuiwtTy %G/:= !ǼEGg-hHxpO`ץ=;r^rp*pCMhv{2 }7Wr{*hG8*:=top>_8bݦ(v1 c\H47.9c"+ F}W{$:<'GX Iy_ȅ1GEt~}7:>Q傾SnzĹ@@oȊ ~A} 1)1;JQ.$džc#ӱtt3t7Cp%hHꐭXLEV"ו GTbrQ%w>^SyoUwǘ$OV 1Bd1P0Y|c|x?x;U8NQ9gc&+ty΅|}7[t &X,cZw]M1pU{7 ܑӾ#1HG.ѱx"qF.t&VUwﲤW]$>*]Tk$MzV ]eCPp3 -8t؂>"_w?ɉW pD'f\U$7+7^I*XnN1.J_[t |-2c:'flxgsIAk3'y_ͼ}6t>~is;wg~78lnq|}\J枎`sKx9 x9rs;ƷƷ5o'=lq|gg\{_'s-ϙ7x>Gl.i|[OT}^[+k;KK䭧68686tF|M EWw7m9)_Io)e|qKwmf󅇯o|]w̧wύR>9r[G^))}xx|{wGi|Y)8; 6Yߩ9;+"tJzK3-S"GzLTω\T/McS*rwwNYߩv8;T;ܒR0rOSR=r3땑cSꥇWwNYߩe}zT>ّR=|]wz̬zJzJzJcc;;E;oR{{{{{{{{{cSƚFyfQ\<` mlB7qYYYYYYY]Y]YdJB+(6$dd8 c` ^2Wj++:meJ[YVh2IQ.ɬ#I,#,)T$D`S d]fM6$Y,fc3˱blf-6 LX0`E2HƵ1@&)@0')@l')@ɨ'kd5Yw,zV]=5l+) YF#~d<l–,š’Ė5VeղjY_,ZVWm~}.\9prݰaUÒ ;箼[y5b>R꣤>B꣣>2꣢>"ꫡdat͝dAbAo{l=I'=Ճ~!"`K|oA=ߛwom"GA85Q=Qc/A~9~uϧ1~ 6C4yq;9dAx>#vxaTޘ~zhvwJ@'O6}*8>E#+O{\#N™ i۱Iݱ)߱oA8 Mw]jxBI'A28;!~ȑQ=GZY3vw~eGVߟlH$Kك)iqz8{Ew!@u|~Z¹[C/} aXB7G:{EKnÇx)t4c7b޻{$u? AoQ3E°qy P?ZGΦ.#ibxqy PD#kC c-=cwi oe~uكeބΎ޺@ʛO-CF2st @-I=qF68!%GIá10ZC߮(ᇊ7 c8PS:ڽaLW< UJvs;VTHGx=bFٿukOcT7zkc)c:޽bn ViT"f3c:`"_d@EnTf$9)RZ Z !)@jiz{3p?]jѷhDfFzZwc6{2w0Df(o`(Q<=6{2"y0`c1y1wܤ?n+ĈM|vU{4==oD&zc=YDA`DO"yPO #7)ADzoD$"b^PQ?=R{TmqSS}PWC/H8[^d,<&q)BfTȸBtnFظoF?xI16BE"))3V.)G$%GDG})W;淜BGd\7* #`d B#Oo('P=בt02 X/ʈ {Qu"嵘uןwˌzoW7dB}?X;S=8}]pom}[}f]{o{מ_{9_d]{~-keNA0 7ă,Ȍk;fI $Ș<=jQy')yOPߌ̓'ZXF(;1zpq zwػD42R{}w z{*܌؃DP":-C:ߩgs&SL~7wƌ+$J?,=^b f, #2/|75w[,좊Eؾ36͌K?8K1ݩS~XP-d_z7Fƻ/PR{w̤S wK7Ph#c"3*03e(~k ݯ_.w(~i]!%s.ax&Ttͷza_;y׃x2Tˁ5*xA<*b:G ^(n}"z^:fq6iE&3EgES$EuV":pe)7|f RqfF*HuO.$,{,}댾P*.v ?T]܉g=旝T^uOAƃ =h(3iRy=҃~3(*/NO)c>]bߥ׿CY㿪'==PCk1*fo)/ě *iI 4;OKEb'P[셊Q!:ZnfĈS_\'DHv7b׃b(̳P\[ r=-EEŻKbqT[ԒsN,Ċ'b~KESxV(-8ˍn}xx{k>9_b)oaOb߆q#8r;R? W=p=Yi'#5L>qw{G>qwhxE{5;G.q?q 'a.z8~iU~?{wO>ܢ~`︟8;=io{p>pP qW"毇?`y8G{G);c _qwxEhozG{GgyaȷO{8GE); ‡gw7>5;<\)_`E~|#S1_o)I<|K({|c>axm<F~|#S>opwWz|s[ʗ-[y<^h/pfG~|#SޕOpw3|W[{x|G[7||Ec1_`opw4]wߥ/~]wߥVoVjoV*oVޥy=WvL%68ۑv$v;iݎnGR#ۑ1)` 6w LwГ+&;M {0kL~7؂QcHVdp+ۊmE"yH(208@4=( # `(p0 2bDpD6"SFi#IڈmDFь0Qp㠧Ḱ,Qc0SE %Dz;EK#'둒z$d=XlG2#둊=` =-z3AOpM'z@=` k0P-rW̫E"jvȺj$]5r'[=ɣ?O.)ϓZ zd%'@=y(` IK/|SHJdX%UDvUwC5Fk 7,1lc0E}a}n ݊s#F}q/*ŤQ_,D}sK^Rg\]? xpMsj{>Z?,<ܜU%]pΪbprVc21׃§?&^;W@ }rQp'@5o$%i\q+s})"1C (΅Ёѣ zْ~EuM-+i@5 pY ޗy=Oj 9m@IKsۛ ρ7O0PLbö*/auz 7[2۹f/o/b+~FƬm#Mךa z^cNkWC[ n,^^o.67ƻ nt=b p"a&<$]xb랸E?j7L7na;2\* ꏁJsDEdsc@BPDFD`:gNxpN@t ׁz^G`(B;MxFhxN99 DC"Yl ^AG;IFwDv"y~<{Imb4xG?M?#I1␶~ER#qv Njx|0"b ،DE#.g1<ș ɴd[E2+(bÞ\d))9ۓL|3>qޜeUo'"bYBF2be![L|`(M}};XLM5dkcFN.!̪P2A9KᏌLE22A8)d,}0 3"`Fӡh/Q.%S#zL^"oSF.cҐ$3R2,u^/3R =fIEd%*і*#3>x|?axd%}?Q7̸ஞC/ї3]ȔS`ba4e>ߙ~y0cV?|_b>ߝ2&.M?, 3]L~pq ; w3>4HCFhVEfwgFiVfލA efU<`Fݒ.t!wca̸[2rcO63~. DlȌbB2vHPị,Rl#GЃ`(812f$3U@@U=P"jY-8?M/>)i uH8}흂/\( <d_3Wyf_O$8 ئP1pplG}l'}l=L:u9u9a gjTd\3.XQq=~P'xĦz'&P\$9H_RWT\1U`FStq#(̕MEP7M1Og>c]|7(̏NV)NŮ} !P]z˩wbS[LrT] ~kC4 &~}T"AL>C~(r{ʼnN~*근P\H |b(3"Pi*&/b@'"Nb1EQABXPTT $7(R_4 {x}<(b_ #76<;<;4D3` {~bEJãx\BpS0c]AK⡨Ο':< Ex#HLż]èx' m>9_b)oaOb߆ps89䃆z7zN {'+,>pw{'>pw{wni?aF{3p~RG) OWN=~`4\~pN]9; hv8;.|| ßqCџqw{Г" J7 s-(k0P-rW̫E"jvȺj$]5r.g`@<>z#僞H)XzO ԓ`9VDU"*_HJdW9|1tXcaqÆ9F Stڧ/֗ +VЭq;7nnR_LH}/ŢH7[M5+Кo}EAn)q1I\݈TtA~2Rak.<{x}umY$ifhD+%2Ktx5Qȥ,ȅ%P|M&ɗSWW[^7FA&C7%&TҐ%KTR4 ؞iNlMÝK r2iLp}0\&)r/Kɏ ?.\M< > > > {;2aTgjAq^><)Z%-&ib R!B"2#Y""& &IMlD {རm)$u>kzIeHeUH[8Kn4 7N:n3s&@O L*>q{C@?+{O2PMi l2Pj:Ou3ܹa;mK gLT'qD$7v=ӄciυ :z^?^DsAT1iqU=8E,LQʈQ s拈aI ZnI||q9A|lQ| PL"ByPȈQFcjǴe)8`n T(z#YFT xGD-SQw?6.#vE/+(F {)"xBİ2"E/q,침dbˢ #Eb=A g?FT?3#%?.;) q.̈qˆrB"8#XfDHˈwQ FTwDƖ’O*/T)b_ieIVCE=gQPgE92 {&+cB 3^PCz3#:3&*n02.jRUDddT)zQRUU!U#U%5KaV䭖?vз*73^|̈$ʘ9=AjzbM% WF;y#*G 3*w5(ɬ*63jBKUKt'xQC3U5SQΌ|1HO)ftU?f|"]?`XaHeDX8|*1#dE`[?̈;E|Vu~l{B2*0ctwT#4GF`Qf DF|2 {+#3Kh}1#h0c|_+ˌ˒eFfQOoH{^FgIv1㳨+3Bz2c4(3JG)_F*hXce}2^32b󤂏yQem"H$~Y Wf(W_dSJuΟǪܨhdHCFsMϠ/#:7fLQ 2sefdiC-2OE#Q}.+sBP8t\)#=g'ˬ vlC^{VifgVP|6E?z,3>G236HeFVaf'w*T $ĕ my88/(ԡHP)ҢxHz?8Abxa1)7+Hp*:ζKyO{}2:*H b%IMER$)IR S1ॢ&I!KMIEDgCs=Cx E7AH1:cH;>I<́"Ըh:n(g/hPԸިYW:k>]qPu|Q%ge g55CE*+Iі* oI89ᤘ;>:?O/C~/ZR8ME=xTi.E\R!/ERʻ/EbR {U UA^ɭa zVNbL8V++ J}0zWNs)=.B1:)QT *Ⅵ+;8a0Klc8~'oyr86pzri=: s;\8\9p 8\''=Gizd.zd/~~ia~0?~a~Nʅ#a}.|}|~/ {pף_g=0? 0?O|0O|'>p &_tt8!_u8;]aav8;q7ܣ~pN 5;G hﴟqr~G1ܯ9{{iiF{sw/Nqqwܯ{\{< _{GpSw`8;p8-.oџp|Swho|xD{]wߥ/~]wߥ/~{%Ǟoɟ[G?a=R3- ><R=-+z={|KÑYpwߥ/~]wߥ/~|URÁo)_p|Ksw)G{|^S: NĂ3غݎnGb#ۑv$u;r)s XJ~+%?ɕMIzæv̚[0jJnE"SޜV$o3rD #둍Hzb=R1S F=igzR$ڃ@I&؃`N%` E"jyHZ]-ҮYWF%٢ H=\A~$|s4K0Px `$L@IZJX"*bȰJ$X%U/>k8,1nXb0ǨaNra݊s3nF͍R_TI}/EXT$C-\&hͷ~n^>-~~kXErg[ `e ݚD,`VK b)&΂RQ ̥܆TЮM!04C,pah3YCmGP7C)V(AĊ滃ah'{A};KP HR"HWTJY Y{V3aww:A꠰; aPTn*H&VR0 Ti*邤x{AJ aJbN22HRb Ti\*] LEob y\ɳRThYi*уbA tqB2j!NhjCH["Hz"H"8QC)c3-ҾmۉkIJ"H5ٲ< IR^ viRI!!1yRo3풐2^L_9z&\8>6O Bcp:O^Msؑn̰ *~H-.*cz۲w)҆'z&aX5d@0 S ,,2i;?-c[paPlO J="uY ac^﾿0JQN(pw8C^ I(~㽅}<#69_[Av\}9N*tbsC=;R Б"3 v4|8pשк%UL}V;~?d=@}iJTSϵw/֭2nd4VtbL3ͣi,4Fp> $4op_"_  y<Ӌ#@RUi 6UnT$$/{!2NZڶߤ,,"gʚ!` jpڹmVVcZU)U*m Hyr2UxfU'> UJvW5a_@ (I7}J S bÚ}&S˦ eX誐YSfd!`VeQi$]nPXNf)Edt@> ?"UywF6a{K}67 EZy i)E[Iܢ&H- ޳;?,8첺om־*V%sZTt&*zgͥI}ԡ8z0kA4\I:}*H^ EzK{~c:xEC$iёk^$C**0S KV^d7 ^fd&pMϏC }y6_u %)qTIT CPRkvj Jkq Bxw:mP~ .t^Aq`%Eh*$\l *26ll|ؔROjڙ,c7shIkf^DLB)eq9 Y%J*6HJKfݧg887!P)'gS=JeP6{MTJZfN.$nR?%CfcP7EV6V]G.))zWKSzu<3RCT/Upn|jʗ]Z@ʕ:yuzF|5'Omde]MhԣZW[.5z]q)VV\.$uJwi՛L{UwTwXeVNgvSUb6UOb AD@!XjiC~&,~jAD _gb~pY2l6z~pT&9-^dM`QlK- [J7ŷJjÇs {?8l Pl*{ xrCC5NIx 5^`_ͩxck5>Y4Y"XܬlLnVD V7+[89V40W`'u6X23#223P3nFBMJ&`񳒹6d곾 {ܬ#bd0"ԧ`s`E]}R fw 96)'ԇ`I0ݬ`LnV.V7+ 0Yiɟ̖(`u~f Yy@W8JW0P.8Rp z? :? ? `7\0)m. 1]ڂ Nzzz~5(u,@`JS0P7P`'Gٟl`szz߂9(5L@7~? T^@ :~ `[n߂l'6j-XQo| `P=W0P-8Ro T *E"JAȠRdP)2T JMtZz?舍5~k-8fނ(k-ؼFނ(k` zx e W`PֿAg{ş<<݂k#6ֶAgi_B:v? ~(3P֫[00(- ʺt VOlG`:t?X%$Ol=7s? @Ygn  ʺr e=~VAYCn``P֌AGlX#6܂(k-؂R n p `z`P}[00(MۂAYR?{bc-'6pAGl@@6:m? l? T ʚl e-eڂ-=:bc}tƺj? zj `Q[~ڂAY7m@``PF0`7zh NOlg vOl{@w@s@o``P50E"JAȠRdP)2TؤMjؤMؤM*ؤ T@I@E6I 7N;iqځ7@v`M;&u&ʞɨ'!cېc1m"u6,zauoj 6Q I+p( i?Zf G~M4=cQk:_uVy׆QkCި!gmh24 F B#h,h.׆3QkC5QD-n ֆaQkk͊[@yz`<=Nt1e P -BFȱS9b-)v(ޢnZ 7-pM ̦bSjL4=0U#r4T\Bd xb1-̀(OvP ,S)”`J 0%ң8ChS bM%r40N1aVg 9#ls"[d%rHU"SD%HSXJܤ @ m/,93?|rV|>;L~OZ{\uZ׍";'ك8;;l'_I;OK?jHz>uR~8onñ:xa;ܗ5󦣜Gy'{<2/Ⱦz췟Y%z_ X) 'Y5߽>L(>ĽL>ŸXg?_mco}[/N^Br'3n;;D;CW-ʸO}!mg,̾B6?:oLt ӐPl]dmL~O9`owqd_]^dWI'N_FzƬ> ܟDxk]c' ?yd|?tzs'3ƭ ~gl5Vnݦ`k*?%r ?~ן?w o qFb~dmwwIgW0@;CW?f'PA'xޤD? 7xTO^Gc?Qu:b~$2w\Ў?nv}雄.Λu<>k3O+wSy}Sv? 'I[9nܿ9bSv;‘!Kk,}Ϭ~ggb7--m^YI ?+ j7?gmϚK57O>*dS+,z.&^(Tn,WzL~ϐg9_$~>'rdN,7-~8yX .8 7^mh<h8Um7Q{|?J(:Ϸ_篎q:s=<~5RW%8tYշF!MT:Dz{9?t7g้ ƿ;^AQxլ^舀 y$n^o0Q8~o{%x / VOW/zz;*E} KWzg%.os}#?]/xу s~2o'߁~ob*ƛ:bru7}בH8t/:!gXOG_9%p>rx&0{1^GŃ+~/x|6e~9ٰ; t]O#ω=|Rtzj!91^'0x 񖭷Ó߈xg=x8ݺ^ϭ6*K#a?=tGyBpux ~J[; xW|xb+/XC ~.E1_ޏzqJQw|L\OwE߳?[W~yoZOI΅N\ys~wJŇkk˟*lX_xo¾A|$u#_'p|im'1zߘTs}W>ssobs|·oqq:>_r}0_>WkuߏaXOEW`^-♝=Q<^8p%_^'.x-%w)<dzgvWG`7`1w]|8빆~o:q%w-ߟx~fn~*q! ?΃փxl=Lԥrj<ޏr&^,uni |]% ~ p |f_;xI;~Y;zA.|1x_'14\p~g~sxvy?rf'x~ ֽ:TE|b/ Ɨ_;^7׸^sx,uONsx/fmgtfּ?fˍ Z <8ο?D]A+w<Ճv> nJ?]e ;c+/c~2.čx_yy5ڋR| 4%O6Qlhya5S]LxJ_Ws{JnLLq}}K;0cgՃ/M:( {Z_ĭ=x0+x []g7ghƤ^6c "n<>l nװo_{vs{Se(ÞԽߋ8{]w?z~ {\c|ONN1x?kxWx|60^V횆3qS?_6Mލxdh}o Bx%n]/}Cm:5k=H鏆.Kؿ-̏.S|{w/m dL\aZ?MwX-ɿ]3/c0K-yU߆}hx6/ Ϙŏ›;oN&uv /}k|5T |_:kϵ<{Uhڐxx'z&K|u0\OSrm_)Eqp:7ٍi[۝!ܗ{gm˿'>\叞/=190s<^TZ/FB|Y|/#m?zylxx'ލ_BGxo$zzʿyVAo=b'&?8&Ɠ?[S&<^ic`{;\lv0v|x}lnwr,?6~ #x^$93axD˼3k~^Em;gXY0w G1;1sV{v}N<p_NDgOs60$mW%lT^k7q^c򗃯6wm-'Gu7F|`-GB;DmӟttIm^~]r,=w։os+;c<=ߝ1'`A|^<$boo1~ z{UwاMx6ʯi:gu}&œvxG4.'co'1ޢCȉ-v kb<}6n{|]:Kr/{9%9Ͻ0i![ͿO_~)a< :9x ||*\,,v'w^ċyq]fX >/3za*yosO0|)_[!f>sG~wG|Չ9^]/ܿ\+1;o770Mzą_ΐ}Rzj/g~.=mx8_O?t1EaL7s~<3ω_|ۅxoG!.仕N|. ʿ1P1^Ug'|`gx`'Wg?_$;>3ggyqE~v-(˻s<ޔ?+ϛ|>;ߗ0ƣ`ߗΟ|f.\g&x 3ߺ$; 2ϒ,w b8~u/YU?^/'_t ˱>Jļ&$L4c1/ǫGWuӿta<;0oT/uo+|xGLq.](q?*n=_{U3O 4lÝ|Os%ĕIܑ/_/{OAwly=K[q~MG7?Ϸ&} lEܩlGW|ĎjNxgy'Ooěz3"oO߻ooIh^Svq~pc=׮0G!9ޟl:>xۃG{^53|.̏} u=߅ׂ?:z׷5זgO9c|J9?<ߖ4?ܿ?.̷z` 0_3"a[y"gL9VoJ+ߦ0>m&9Ck^ޟ&9e+ZW^t֝.% Kgҙ'tӗ.=IgN]zGN݄tʋ.~eҕgdtKI1]<}a5.=Kwu>$>ӥCw#]tҥ9Hn:wxҥK@]N{4]@tK_9g 7]zN5.kPW\o x.$^.}OꢨK:]t^^_/]vV:v!tCda!t-ΩK'tItC:rw.}7ħKCoΎ.}SW#]:Kҥ+nn:ХoL.#t 71]Z/ХQtLTg`[r LgN?ҥ' tKI:KN!|V^!C>7u.]:Yҥ}tjK7{U7u4ԥOϧK.qFӥovKg^t:4]zL]\ON}Wtԥ? ]69tkAnuK_+uΜQ]ey{Խ.'ҕa:uפCo/緤ұˏ-o/ӝ3 Ott3Q.tkpR.ҷ(PtY.yҥ8~$Bӥo͇tK߅럺t~ytCUׄtřKWҹwtK#]:Kyw5t~z=.]:;}.ߠ{]҅O5җ.=8xK_ԥo_6s~CnunKW\tMtMK7ҥ=_S.8z1⨛Nk]yqԥNtSf~SnuwK.]uK߅[)t8.ު.]ҷtߖ:*y&]zt{^ҥKtLθқouJ>ÕrKW\t.h>STҹ.]yަKZt.=_I5җtԥwKG:/OtR6tUc+ooKWPBt.]yҥ.}uI&ߡ.}7oKW]0ҳtԥ鐩Kgқttҭ3ϮH~ƛۚ.]ucKO]W+N]y{C1;tM7ܻtqU>j)]:.]:u.~ӥgK=.yK/ɼEJХ9tSyԉO9yHK7>ucһHw:u%ҹqt겋3z!qҋtċ:K_[iҗS7uylC8꾛`_KW^t鬻t3tyAԥvbtӑQ<wHgN>uCyKtUy{Sxԙ+9t=tey>QTwEEHtХoKK,]zQ݂)w 9g:t˷t笓Z.}g<@a^Μ:|>)J=Kҝ3o:buau>7[uWީKґP>t&?CY'T'|M.vs<;uhONҡO 3oJgԥo).]uѤKtMI>TW*Х'~oK7tPn7ҷӥ t9ءK[wk.]Wt ]7K߅қ]KWҕ?dtߦK7zԑf76L߻t۫:K~Rt).]/寓.}Jo)KK%K>Kg wtL^ȿKgt]:{,]z%?~tꞳQg]ҵoOVKg^On]:w9^_KgN4ϧ.])ҙetGҷzR٧tғΟx| Ui8v>u߼]uMtԥ7Y'1]zn]m֩s'o\u\71[ҭs<ԥt:9?+UX{w']zNޢG]'uƧ.Dӥu.} ēv_w{]jԥ~{ХS'gt_.]u~K.=}.tnwҭt6uS#]zK]+KWwt:t|_/̥~LiK7]:uC.I+XJ>>H}.=KgNҡ_4H>/~-?:.}}.]ͧK~FCn|Vt}.]uSKH:ҥ҇tХ/#K:ҥI^txҥ3t<]:uK|܇ƫB]zes5Wԥo[o["Kn?dWTt'tsS+t̷7]t{tҩӟOuQ5өtC3?KJ&]:ҫtMyK:ҥ7› ]:IL:.ҥO>w~:tG>t?.JmuHy}J,]:LKߺS,m}8KuW0Ks .}ק3g_-y|ީKg0ӥ gҙL{ /ML]ҷGn]։|p؋%]:K|Ko-܇tTtg))]:F.HgM]:K_IߔMx 3|^Rxt=*9˩[ҹ ufKW]'tJӝ`uH~Jg>` @]H.{']{z2]zNԽWb9.gK^eoK߉t.<ҥoQt݈6]>Q|*ҥ/1]:KO {/]:aL>iϨK_(]bCmR.;ӥcJxѥ}t;tuqҥKE]ҥ˯ltE.qW3]:s/]FKr_Wg^tUԥoUt'.}tYw\J/[u50ץNQ~/J}SǩsG?uG#]kIo>tҩӯ3[L.tM͋ԑS {&,kv' $h }qg/XKjyƏxjꨫ9o/\ySϼt~GԥӤ3^#\zөӷu ȫ:uš>;a>ͥ3Ap~Zɓ:u3ǑS'..} ]~NIk/:}{M>k:sitWuIk˵[wuq}\z#}fΜ<gt]μ=K:s['/霁 Kw{{ؖk!_dLcM]#:uɋuvμF'/:qo}ҝ{`^:ut9[ץ@O^zsm>6:}vI3|q˼nv~Х\uwKQ'ѥ?/]z5=qz;o9}lqҥgԑ7ĕ߬kaY7hӝ߱St]t,'3Ƀqץ;'?}{pKwKKo*]st/u.w}x~Нqt]}ҫn 9|\:yپ_ɟ?}{9u;.WtG>5T6ySʼtMI_懷K}^tq|>@ޟ>@l7N}ҥ?ouxҙ?.9t\@楧cƥS7Nﰺt\ҥߺme`f~_ӭyKWny5#edv9gJ߿.}ҧ..}ǥD2]:wYg^:n~8o0Gto_#>x\S`N.}x3ǛO%o'.1]z;YrUN|s[n/.GKwtW\'_| 5/ݼZ\e^2>E˥C#שYp2p*/5}qwCӐ?.=y-&kť׎K7̼t^t-Mӧh_.}Kwdt%/C}3`dL}5uMo?Yu93Ϝtq默w}onҝx k?.m\zMK|!]Mqk\zp3B7.]KoNѥ(uO9s|~ץoKwn.=>>.eӥ }u<ޟ.}̯^߭s_:uwv85sp٧¡7ǭO6|>['O&]9>~\/MN҇-\j͏ť\sGpӥ{;7O<ϼtO^zq}}\9y.}뮫mq5.}&Ͻ&&>t ;(]z;۩ku¹tsRt5y07Ig~E}9Iy:b\t}d^zK_҇]_]tof^ǥyKpYMoD >󙗞>>Μ;.ܛ{^{W:sWqQ>n]zh/>K_K~]K:s&/h^:3/=3.ytҗnZƅWdK1tts)n&.=.9X楯K.M=97Kn}ǥ7ӥ߮ |o\:Kܞ}߼t].ݼҿy~u5XS?]z /Z^yu u/}F]]=/y{vo8}ySo1ǔ|bv~ϑҥ;K 7Kw.}{WmV湻oӺf~w{k_p>;o楓?ޭw4Kew76g~qOKy-:y<3Oo^>t.=_iЙr/֭.tr\7kdnNԕ{|7~<ǥc'߼}S\틋.ݹt3;8y}ջ_.}95K\̼f <\ܺǥs{wӡt!ޞqK:Kǡ|Kw.ypۼW\zӥK:a n}I.4LO=K}q|ХK.: \?ҧ?]M]wҧN޸>ԥopz͚{<ҧ8뽷/FItӗqSW2/ۛǗ7/bM<9t_|Х{}\0ƥo$.=uХ/0qQѥo_]wKwq}|\zO\OZ~wRvNjӡqn/o|bdϹtĸtҥuC7K>K.}o.}Y/ӥKFsEp'=\v.˥ʭG߬GO:3/ut:o\?v:792Gg>:sM0uU׾b}.ov:Κ~9ǥ;']ctut O^zsԡ+FuKO;ĥ/{K_:j4.2Wq9GqwQg^zsM^֡s|8y\PNᕗ^?.gEwU_֥z9y\yt:u:09߿\zݿ.둗Kt\z8]X}̇יa{{ǥ3,]z泇KOxҗ~5Ν|#x?yE7K߻9N=t{ծC~tG/5/K7?ؼnҝ۠K/'Kwn.}毓7_ӡ\bK鮣W^:spɼtTqIt#۱K_t95Νm^K_:'>/׽~+tKױ=҇y1Xߞ~^tCNz}wy<[7\̺rw58״}ާ.}yj?xq黙w.=?tץoOKZ]߿<>xz~SqulM-_qӸw{tϗ'KoѥKϼuׯ<ҷNK'Ow;KKOKw.}˗.=,qnݹHK8y]yKzD>}ǜ/\=ҋyKgn˥Kg|oַǥ/ޛn>>Konz JK_u\K~G>.t?qI.֧ p\z8I>cϼ[N_^k_Х"K7/K䯧Kau/'/֭S'wÝե7_?Gz}W曻&%y gC.:uf>^v\z8] ХWKKǵS'rn.:7tFa>z{~8]}Eź|}å/Gq|h\y"ҥ4q^Kǝg7p8fҕ֥30]ҝpd^sp2o!/=C.<\g^zѕ?uitn؝vx|+uiҥSKn~.ҥK?󧷟QG=y8y25uw9;~ӥ=WoK=:t|柳KN_at߿]\7 |]68f ]zxuGQޞ=m"ǥ+,ɻY^'KO]gC1O̼XX/USԥxu]Yv~ߥ;mwzv\vӥ7uo?{uM[3OuG]\ut[O<_]qǗK>^tqK'Y׸t;GANW t<}q3OFS֥-u8p9tҥ<8ϹtOw3/}]MI\v΄.}z,yқs ¥ǟ.9bt~G~79uԥ3Nus|8jҙۻN[v\Sǥo~Y7H>u$q~~t:ҭҽ=uK׼{>]s856_e'׻w\OqK.ݼW^z|^t֧}y7[^kz/oקy~qgҫ~]_ߙXt/,t8ӥNzu=.輫4_E>.=k=uKnwiGKO on>/ uӱ_ݤko/nKo:spݷ}Cn>|g#x}q\z{þ8Kok/'/=4KOt烦KWnޚ.]gf^xO۾BGIwŹ&e#o:+wv}q:CNt;}tYg^:|/N^zסvGv9.׎KK7Pny>ߺKuC>}}tys?t^u|Mk\O7]yGzgz.}K:Kw..}f>}m.}ۧg^}׺t;>p:tӥ;YҝKKq]%Wt KwnnK7t8w9皾B}{u9uSn_\?yrm]ԏ3_v\n߷.=Ku̅m+Cd9N#\z3NK>۾[\{{ͫkΜzvm[~ե2?gbs;cX˹SwN_\p|c ]zw{ӹK եe^za.3/ҙ?]N^mt5\PK/s.y噗^ϺG=;os/\za/ӥu.ﲾM>=^]yE.=KEѥsMI>.8n>'8vw}\I˥~nSg;˼tq;_a횾tqUn[^t'һs}qk]ze}^:k]n3wfb=>}):1]z揗o~ӱCtyt5ۅS/ ]st˼</dnG\Nvu]թwϼCo/ג.=̻Kqٙ.=.6/_pKoo\Qn.z.zo?G{ @|K߹t7}ȇkq1/K~å2/}qҫn=ӥoN8^ug+:u`}o2Y[ԗ17/}Wu[/swN*;Ա;ҽ:ySy~?.}[:qh^:90ҋ/.ݹ&|ttKw~^ ^]#/8ҧsZӥg6r;S>hW\߇yz8O|oO?..}/'~Xyåҥ;H>tvM}^/䩯7;_vՏK4].~{y89 'K'G)]:sFӥ:pxpUqqߗNm#g_]o.}K_N9#96qougĥ zoSnoQގ3_{ǥKOw?ronnHӥ3`x~å|>8i̙ 'Kߗgsw}ӥ7qu}.|v]&\oqNv]9[ToҽNys;sRzv#ץKoǭלt^x}ty-s܎#ϼuo.:+[NNt2ҷ.<3z~~t듺tPwժCXGuwkԏK֣u쏹35sRs+VY?onqӥo|SKg6g~̿ooo8ru3׹֙ԣǥ3G%]v{oS/gd͚9z9G^.9㙗>uޗ]w^_ן oq陏ޜK/:]G~~9қGtz9_tf^z=y0ԩow홧N=w\ǥ[<)+ޟ~kå[q8uԥ{at~tC_NSzo|~qs.ץ\қ'M^:ﯼt/:ܳxҙC.}[ַN= /XG^t{+/]ǥ;A^twҋm.}ڇ}{{=Ν|Oo//{NݼΚz4n}C]o{Kżvҋ.y>w]]^7K6߻د}]:9/w5+?tͫ7/}/Ugz~ǹһyMG[OnNŚ̼Kߗ[o￿Z{^±+~gǿ?_~FO?_oӹ3+νT޸{?O:{сù$νtqEgs/E{1?^\Թpԟ}8>tt%u8r}{5^t~8({i܋N^>{5^2>{1^2={Y܋}8n>{OS^u8Zt܋^2=xq%]x8Bνpys/_Gs/f{Oc|A{>pEs?^8 ¹e|8s/:a{ZS^s ¹ν>q:nW(s¹b܋^.νù8pùt܋. 婿ӹX8O8*{1^{Ĺ&νs8qgcC==vEs/{i ^n]8Z}ksU7ν?sNs9`- s/9㛺.s{|ys/Ecͼ{,pcw_8R}>{?νǪs/|8Ź }:Ĺa^xspEs/8۟9^#{W}?ġ;ùޏ9>j:Bν^tesEgν8W^νTtŇs/oҹq^2>{q ν8^+O8(=??ps>νOs/~W( {}z^.y8sҹνs/Kes/tù8+o~{q νGs/ùby82w^ny8uɺy׵qu3ks/ue{濇s/X}: KGcw1{qν_s/7{w2{tKs/Sg4D^/?{YOpEs/9G!{?{Y\?Kqs^/ uw\㞣 v8s/O܋spy}s/ K^nv8ù܋s.p>{=pKw@8+>s/yXkҹtb.ν4pùKTto{9^o?Gopù%{>wqyĹ nY^rNB8|@y=¹^s kn3-p~zq}νp?t^ܽҹK=n:)pż":N^ }Gy3uW~vҷs/}\dXq^܋^O8ùys/܋8r5t%n}Q8bνpEs/m{桇s/]K~y;{1^[ŕg<8wϧ:w܋n^¹p5]tuνq~?ѹas=pz܋}hG sӭ_qy}۹3:3sXvEs~s/e܋8܋}8yXWs/7}8wt?s^ν7s/{}܋^^u8¹ys/s/ˠ^7ss/ҹW8bνw=??_Szk{aֹܽ܋s)p瞟o8rg'멋_ԃuԏqO︶ziw-{t^vH8g=qe|{o^YGߣ?uy~¹qs/楅s>νm~!νPҹ[ӹ[_ҹ'ν7{Rtzyשs|w~;b6zνԓ^u}%y{ӹXtչB8b=z^ܫ^okp5}8Z=e}{ީG/Cty~ǹ[?ѹs/9!{¹7{O:<܋8w ?Kx&źеS?n'<{νsq|ַoouu޿8w8ɚwKǢ;^sL{n=߭oνs/p[l]{q;G0 kԭo/'ks/ùO=B^_bs}cY3?ν^s/܋4x&ko47]pǛy~]܋spSm:j=:{uq5ùf~x8ZuqXS&;{8q目Ա[/'uĹ νoOWs/^йνpy=sD:ƹ[~gqsz=pEs7{qν8g8w?йsνsGnν8G ^^Ĺs8sGc~?ùνpqY¹=78s?wYS'8pY/ sLֳ?ܟϫx~溽ܟIIgMԩzof{If{8œ~ yh:Ĺiy8=܋sFpK#~Yw_/:H{ѝϬ6];{Q_8yu{\O㖋Sn= 9:܋s|pxqŹf8w/չrueù=ν8^y-ޞm]{zs\_Kѥs/ù'{~Kg>{:).ݿ{љ?ѹ[չs~s@^S-Y gs߿^+Oq=_ܭ܋U^tH8wueSv܋82ͳxEN_֣yWo{{/#ss/sS^3s/頋S'ѱ[/׽}qjz:+Sh{lփƚ1nzs#pOt{i6Wnos6u~[o9xqt5t@n?νs׳ܭs/w^yA:ZN^Թ8׹[Թ[ҹtŹ.8>{g;m/qWG^/[YSǝ?N4{q :}>6ܟ:;n?9Sù|s-*S:wԹ[׹^2>{)̙qԏzyѽ܀pGw߱;1{--;7>ùE:W $:N<{un:[ukpcݬO8veg K=qǹ%{|pG?t=]9]z in.K׭+Ko\^pNҳ-]Iҥ7W 6yۿut[u=(<\ץK..|^׽O>tꑿNљ_6u ۭT֫}\:t>~]L^:TKg.txkЭW56kuut].}y\:Aҧ{X' GK!]:N ]z$ǥw3`ZX>sOOmqw\zүK/qI:u8?Enޮ.I]:ӥXu|bҝӷwpK>/PgMn^.}q͓R>t>@ěuSq/uCyХ֕S'i: |Dz1_VsI;ǥ]nu:_\5>gdztwMzwoOݥfKu۾n ¥Rҟ:skȷƥ]rJ.gjuN\.Rgץע3OqN6?{=.=\q|ҝKY:utӥҫ=#ۥ9>CL.]רK9:.]K{ץKwKǥ_?suܾK䯧~7/kY>6\6.N]w;}vUg^7\CqϜq^/'\zwKo:uI,ɇ1ܺf=WI.{n`\։]#]z#_>K/:}.qrҥ_a]#>u]z_橧KǍ/ҝK<\zo_m}?wܛ­k߯GK[_<^]z¥jx)\]ۺtպA\3)0>uQym[q̻ť?SLk8x?Kҟgo~?Oåֹʟ:ĭӾnp7+ܩpcy=.yn7b=gK:nөt;kl_qm_`=c_:>ҥ_>m_ҥm`ә?:u}t].ϯt1uM%ӱե3o=]z:˾=e[qҫ}` ~\;.#Gn|Kե{w\G߃.ݼf]q_ѵ,{ vdzrM]1;}dt.6]kҏ֥Ky-.ΝqE>zy\zוGx֥/qťguKǥ_wBA__/}}g˥{E_~Nr;K Ouo8zo?z; nY_85˼i8tq!d]eԑO]zqy=v8pK/m=?묩ӥIܡKONӕ֋5[g~ץڙ7KOW'Itmst.|Pqz}_0=Ngs=ӥKv\֭no߯.ݾpC^t#n.>?]}Ϻt~åue[/׏z/׋yҟY3w]:r#>=.#ZXѱWngԥ~\ҕzN.ݹǥ u۷KE]ϧ.}o.=K:~>¥tҧWN>E\6}'kG7f9ַǥIԥK[ԥK=spӥyK/扆K{N}ҕp55saV^uz~å v}ux/zɸz8_қ8K7Cs_޿KNsӥwe_arS?Mߛ8ۧ/ҋNG^^.=~pǥp9kCzD>N^yno7KM3~8'ұO~iM}3ֱS.:}~U^tӥ;(\KO}ŭ:w_:vǷҿ3ohp>ǑJt΅Х;C^>@8pӺtҫҹv.v=_t:M]NP ^N_\יS-DZK/ .8/]yֺt.ꚯO~ߏKrgN.9tu~ӥؙ ǥ[ť:]}:}q1Dwlo~wξھz.=K.΅]䯗b7}p鏋ףR/yt3'}o]sWpS~^=<䯗cַo9Ofo/.G.=^ϙ^[~K/ۼޏK?uҝ3K|Kw˥wܞ3]:.]GKw..7]ҧصsMIGqu#4>tUG^r]ϟ.tҝ;KtX.}q[K'KF]tq>'=~#t??u^^uztCw};vz{Og}Jt?3/~{<3yѥַǥ\ǥKwq\7\zޢK'2]sp鵞#_W\u_/˼tzC^g=@z^^3Lz):+uqߗ ׳Kd焑~s7<\SХw]|w+?]zә_~RqUwNX|ҥ;wJ꜃җz>ul58r?oE=/u\o<\z:ts998m};uz`KwKޞvtүKOKtYuGK?:..뚛[:tw9:sMwm k<їOz1^y]`pXt:\z8]ytK߮e:ugǥ׫K7Uuue̋:rGĥߺνֹ͓W.=z;.}ȫg_|5[~owOΙz}\4=]ұ_ַ?.=LJ]\]:Wƥ'}ynǥ7ågN]t9C\t:5sR4ߏ_.=>>N9~eNǥ36]z|~O~>.Puҟz]ݒsMkW]ӿ=t3krKqչ~'=?_׷n?{qzſ]zK_[q8fr;7sʦ^uIMwN}.:]z[sNbݨwzZK/WǵKON=:>/Sum͚~;7қ{ۅַ۟sr}ҥuM{ƥO.KqYO^su[wNvON>w|եSM>?O3OG]; smӥg^e?xusfu kb͜x=_.ݹ33ǥ>n}}G/ԭo]ϘK޺r<}Sz<'~^iΩv:[Κ1n>\tΕХ;7Eߥs3Lg^t^.}}Q~|ҙjz~Хs{wqY:trpܮcǥ]:oooe=:*//++s.}̛saSN.˥nq\z?~?ңҙ.KYSwYnoo8utc$M5/]Kykխҷq.|\]}tKu]:ם;I^t~<.낋}{|]V^NCpttҥ/y\zסGst߸+KyMgN^r'[~[IIn.tyE ۾3{>^z~w{Ƈu3^?^.}{{W]S'.=Эk!܂S}e~Щ׼tһK/nǑy5}vμӥO^:yE7W]NPuut.mW~wX_:Kg~Qgh4#~W37mIyv8tSO|םNuIΜF{߭^uߋ.NWKۥ9K/p|zGw~O>.>Nzsݓ.]Gvқ8{söo/=k ɿ&/ҩ҇N}n.Ky|ƺ1ٹ=?.}3Bu5on/|q\:䕗m7]z9>/$߼Ч.}ݲP<8_+K|}{5oKI]z:[g>^}K7_YJ#]zig^:y|>up|qo9nuɚ:DթSHn^˥?c_KzON.=Cƥgat来Ko\o/}~?y7׷gw̙]y pɗuIp홗ΜСWk[q37wK3]z>sq~1/ݹ WoZk=#_t2/tuk׼t~̼܂K=Wtp)[K<җn^|?q.y/ҹ~NnH_ԭs|p4]^پX[wN N8]q:]]yǥG.=u鮛y5}8r\7iw?Kp_tJ4w[3F5/ptywq链2c#nH~.ݼA]4_>ypy.}\_Y[g}Ⱥt~uWK!3/} >o]`m>M8y-8ut_5νk^vtc>xҋn濐#oμK|twҝ_q=Cw׽?}t^]:֥͗7e^:e^Vc"/=k!ݮ.},;stgqco׸Kӥ_g<r)']2~fCL/r_ht.}^./]:g^6^tuyMK?q[n:x^q8yۯ .=缥Kו~ury}Kߺsi{ַk\z8 ]z晓1/.p5.w:եgKt~ԩ/9}\N}{_]+>Sgy{ܟ0/ݾ_]yҟ9V~K7]nbMG^싋_ҋyq/gKo3ގG~kxGUe>ץg{t3/rM};O]}:N~:k[qב=>n]3ɓ!N~9un~tԏ2/.}~.>W^zޞ;v/s2/==N]9^q|wks0]ogz~Kw.C};]zg.ݾt~uw)>zoq~n}kss }y-?RX35>N^֩~Nkt᲋[w9KOKM^trqKǝp3ґ_~ȼthҧKҭҫϼ/~˫;]z:˼tJ^30ۋϗNz)..=/^Y]z:q\zKGYC^OǥKױNޑ.åe]zQץ_'[gn˥?.=U..뜯t: EcttuӥW»G7.}s|ݼǷuu6.6v)vt[]u3YR]sK>G^ᅮ^^XK=>]^;ҟҟVӥ7b9t=IZҝ#a^Kwn.}:rq楏Kt9_ƥ߫K/\O8/Cky8^]z7(қ;wu֙Jx[ⓗǥg~:}](]z oԏLϼt̬ԯqyw^s}:X_w\n}üǗn3wj^Μmt:trK7]:d/uI=׬o&<\Kw..K]st#O^zvS^z~\zSG\z=]W]Stct8uѺrWtԷz4<̧o\ [g^]s}QGKO׏KϹ{KwNy|uuԷ.=97~q%o~].2<]:4ޏ[i='N^zK.~_:r윻:Y[v;gܿyÄK?ytoKe^:ϺE:tn'W,tqk5rk=/ԷqYm6tx\}o\3o7S߾?._.}ʩoxo:s\.r@!mz?.әo/楛.}O /7ҥg>ґw:/mogNOu5Sp;ץO+7,.2~eNYaMtu;8sWu/:y.=J߿.2ϻx|KOg.9uNZ#O/]zå}ҝKϥK.y~K핗ǥo_8sOn{^>uԷ7~.}/ӟwK.|L\z5Zμt7>:y2ҋ]n85=>oOg^^ҥSGI>̃ϼpaKKA?yg7u '\Cܺ+Ϋܺ3̼t捽\7/pʚy{FpռA\z-:N^x¥?/ss>^u}qvM_a Х7]5y{|?y~2/Wu>.#:m\Z >K}\yútK7f3/֩W;ynKݎ#\A.6q|Nͧ뼘y.蒋 [NB_=.hK.^&//|ǝ_G;/nv~_g|t[!6NμtK/t 8wu楓['UnU?>/җǏK'ᕗ^tռf^;y?\[K__.9^:}{¥?y?ud"måz;}ҥY߱k'楧pt]+/K/7μ\'vqם.yoқ.N~|Ͼ=9/UnߞٟW\z)]sjt8..yRqՇh^z|Kot=:&\zͦKǩS7ϼ+'|_Oӵ//KW+:t;N~':敗.ѥOqu>ޭ3g]ӭve}:u ҥy7KNy{k?oq8>\2=]:/u鷎G']zݯM]:߫ҥOKg^[3ӥ:y2_¥?ǥ{.=벺J_+/}|\ߺtϧ̟_cn²nWKӥ/d^kƥO0yS\S.}ko&ߜ<u.k]z7?N:]z=GN>7Kƺ9o&/=OKqҧW >1ҝӶuu7O\ǥK|K<K_:xGpԝ7#tMw~Q>.8!]]뽣/;?Qμr\7CǑtܸwy~>.O~܎Ktqw֓9Pq.3//n{K>tฟ μ+sdN:'/͗ѥ SʼKD!&KSn˥:#x:t].ݼm]zgbå_:vd׷~vnבo-oOL:sp̸t¥.=畦K7_'\zA¥|]O~x./Kx߇ӥK䥛q\zu\K.uw͓Yk>5.8]z1/|k[~t)fqǥi_\ӡy'/.G{EnMvsu[>ǥ۷j^V[åתső'KouרGS~㘫}Kw.Kt u楛l^}ҟߍ>OnqKgzvG\K/:tz֡}ׇ|7X0uҟ7_ݮs>.ݹǥg:}v鋋ӑ/y|yKK6ߜ{|q~֥K-Kҫzԝxk~rEΜK';\z^tx8\z+Og>.zEix^?.=q΅Wt]pҧG^un,.KwΒ.ڇ񞳤K ҧǥ;GGCvt.]gK7ڼt~IC]aBt?k.=:~ӥSg^S7=oՑyuCOqyqҥyKC+\zg͜TqEd^n\tνҥ{=`^z>=7k58{\K0\s3ߵg7\Gz6EwND^ǥ{.Wt7sV;k3ɗ_y}+.s}S.=*dbq;9[N~~C'~UҥKO^uq}Koǝu΅յ[g^:s?.fs祟_|?.}\;.oեߣCХ.tҭKwN.=3/[O=z}]z\Sii̭[=_.=ҥ4O^;^~~_~vљS:u{:uKtSN^pOҝKjy;D^\:s8+7kd8pzy3qCw^߿t 9hӥ/\S싻XS/#]s'tuҝru΅ձqM?ԩ}?)]N>yҝK8t~<]zjqo}(\q[o0/oTyoUnl#xg{ӡ׸.8sҝb^zKGNw}Ι:^.yw;Νm׭ҷzTvy{[qqfM87uҥ[ҥ\otu۹ǥ9>7]:dz2ƥtC\|{k搲K'O~o޽=8_Wq]:91ҝK;ǥOK7oX.~tN|t?t]ߧK.{3/9tҟb=K_Kwn.+/ݵǧC'KOw.KO>>ӥ7Gԏ֥StZ?돓#q~v./]k_Uӥq8m};:K:u']ɓq.}˛nztS;E/.vev\l^}ܹ:uqX3'_][/q|[:t..cdw;k,OnN8+gqI^åq鵘'y镹jywgNYgMyӕ㴗9߼-KOW]uUwNuq;sWsǑ}xKWȏ ҝsK#\z^ϦKSKw3@tz4緛+ƥom5wg~]NN+/=<ҧs uU>==sHn}s}_X3t粘>|+stӥߟr.z7.}lXK9bvsMvCnt˥OK'"]:ӥ{~ѥgsRo/~ >-}g^z|k=n} [￿ZJny~P:=?/׿B_D?WoQox}\哿>ҵsvQqW{W>ù?ٟ!o~9qo [ u|U&/~k¹?~KW/#N^>.~ǹ6ߺ8s?\=pX=NqLJb8Q3/=ׯ ~_st"_ku_G]DZ?өsmp\֙Tzù~q>¹?;:}}qM]#us=cb{p pߥ_q|a~uopNܟ׳I.7ù8ǹߥZ_Aze>9>^ù??{]_7x.w>X+ָ{9'bpěÉ^v r]Ź8n@B D B npZ+볿_lӵrppers q]o羯g{g Tڻq}?/hS|_Kr\yG8F׽!ꣃ^q?y{eY};}N=spxh|W>wc^_R8u?sߏWr{,ߩNrg}/?_֗}wz?K~Qks/֛^ql_{zڏν8/ν:ν8=_8B={^/pes/sùpgbP,^/s?w8}n羯ׅvkx_zby9>\b{x?r{<^?ʧӭ=ܫs]b<o;s~ƧIx^1νq`kn1>o:ѯI8=^t}?~{{ H'b|n0[l̷scgsUgs/KߣFZ|__\¹p|;\-΃/{ܯ?~pe9tEù?t\l$}9ov~hq\ xB^.5hG(ν7sY #~=yWN}o<=|hDJv};z·G/xL~W}}8{lp5Gjf$`nS}}g/LxM;~ùg'/۹˱h.'7N}Y;>l{s7sǏw慆s+y8>#7~soB:åzd$ 쿧S7}c&~Ys=ҹWqs9]/ 3߁w^yW8}~֟y_pxh{S3Ĺ)8F޿/Oۋ#Ǫc 7[^Sx6]{n!@\ùg}N2M<:ۻ?;su_ν6rs~o7z'^N9㽯c|Kďs~Q~mn?.:`A._7>{z1ϖeZ/s/:5=olq?޲?=_#so|>{ywxJߗ8|߀sy8s= }qw^ұy_7_>}pO<_:K{}s+ƥE^Y_c&k=FW}OWO_gu_ڍkx ~xryr?q>];߷޿^=8}|=6G/׵rWpOgmfly8}~W/E6ƷW\8}|޿vǟF|"mx8>sΝtg}?/.w|ݣ߄sx}6['~\tՈW¹IN۞tt\z<ùufOߺm~}ɳֹDn|X?#[Ĺ?Ź3>?S={^¹o8rd8}=yi/s:<_܋.;y羏׏`N%-ƣu#>p{<^1L8/~?mùy%¹S8j|8=޺QF^x_s?A5>/~O%˴yx4yl:z9帞sK{G|羟l<}7g8|{u|sb|?AvOgހ^q{[q_G<úwپs8|Ҟ<,ڋpOlߣ+?i8>7SOx/~;}^>N{k@u&pos]{!~ko^R=_B8|}:鬷>pٮ{8|d8~sН;H8=յ+_=6Qs؞ۧ['GO<%ޛ,Dw8"m=px+sާp~ 羏w۳޿w8s/m/G8|s&[Kķ?O<u9=^G[~D|yڞ1ގkw{[ď >^;{ ~{8^wꩳ}S>k mY?k<6.?>#y ވ[_7s㕇W0nю?4¹xP|1>>u#x1{{Z:|^~xZ˫b훺vѽ1^[8߬{Qlw;t=Kg39¹9K8.qi%OEùM:xYoUwKg㺾s/:J~K^|DKùzpy¹n|k'?:/|۶R3>ux&{zu!'Du puxx=/۞=,Cs9k?;8¹=/_q~W⽯5ߚy f[W]4^{g|џ.7{Gn>}/Wʡ[/L->2u3̷=g8|?sI8}uu_qƹE8}hߴyǹx3s~\ױq|KݷؾW"ޫoԹ#~zďǻۏ7{֛џqyu3ökW7.-:vX83z֧"y¹{_z\¹g>=϶zup| GNùH3S^wWw>3>~o:8}m֫{O'O<2]h/M>8N[8{ډ/]/Qpl^6W?t~_'Nrt] .=.v .=.Z]z?oΟX?܍.]wZߙ9a]}*N]z^7.puM<ҽϥKw?CK/y _Νx5n-]]:qt.9\z:6\z:S]:tǥ/?N\z\=GzyZҽҫPu/peYNõҥrԳǥO!\Gfwt9~>{o[qsK^qzqҏ}C|:t.q'K>^[7AK.ҥźjb\G^N}Q^.w^!NyzzKx֥ۥztӥԥfqfqܸ7~/yu>GkK7K7O']7]:ytǎ^[/7s]%]zΫgԉk|:ѵ_oqƁҥt쪟'Ϯʉ|:tx^u%+qKƥW^..=ҥ;/.t3o>yq^{7n ptޫK=.ݸ.Xnxjo!d".~}/^ݞpYk\:!>.]Kt_'] ^åxn^Atkt\z=ǥå;_.ԥ#]:6]z~\+[]O7c&/n.=iå׾̋G\zK/gХtꃧK>SptJCѥ7sBPD^Nn .8z\:5;ҙ7G^:p K uǥåI^qnKN`M +\z3^dbӱc¥ҭ߁KOעKz_.u9E6.X? n9]zgާCChG}ϼȓ֥w\:,7?\z.ݼ@\z?űlSnK/.XU^uҝ'QS[եK7/OZ?<\zqg..=#.=]ytqoH^KOGKI]y\tҋp@t:.=/l8t̓J=r\yt[ǥj.̳+'ѝwcwK7D.=7ǥW:7~?\zl8t!]{/t77 \zQҽ>3ul_ߏK_spa3^Ʃ^7K/cypҽѥitpK7.}1ǥS,0n8wm]t.tҗ8*\tt.Nm\:yK׌S0.=?ƙTp*}=twp9.z tиzc'M}/x6M]yw.XY.w^=t;<:toq.qϼať{~}߫KہK'][w^X}\z.]KOKy ޗyqs{m3O]WqY<]zsh \:ytws^]zۥťҭ;.yItqhx˥OW7=u˩/ߝwzyam/\pK^x{ޡ.qߏ6k< ѥn[7?hK\|ۋK/:O<:ttģa%9שn_.qyaxytďiy!u\:N)]zӹK_pup9o .txZN˥S}^nNNUN˿{͏H<Xe]1̻n?֭ffu\ҩ3go>~ץBt9.z쯎տ~7.Ήlx͟H}ñ͇ץK7~.zút}.WCgn~Gԉ?:u򭧮x4oK/#¥W*Kg<.ϞNz:s8ccǏK:p_uvt~\z|p=a//پ+g~ŷӥ?.yҥw2^~t ҝR.Ɵ>:n>lqåWҫ?qOtOr\]=29~uҳ:pZuҋ'^N=n>ԙ+<]KWu\zg@q:CwRrJ\z-1եK/{qߺ\C/1.K7rqKoփSNyfAvgz9)#!6mȳ>;uΜxys9.=ƃitqҭ{qc.=<.n9]zܗӥ/]zҭ_KԃХ7Z/=i⩗ۓ ҭK8tKY}':t$ q^:ܼF6y8yls0t韮9\/ftC\z\NnzU޲ˢۥgD4pNpt.ޞy{:s7ki:N^ߢz׸FӝXK7.҇u3W]tgl.=JpzƺG7ϕwiu2te-K7.K^c}.텃;.K:|;^mbsS^#΀z1qS׎#osɼpHKw^]9.=_ҏ:t 8.>+s\zEftw]:I=q޷ӥO>.=ki>?/.=]:u.]KХOgyKss.Xgk]җ. ^z?֧r{t-8}_>KAsat^zәӁҽ? s]zawq=å0]ҽK'ҳ|)z8.륿nKgez鱿ҏcm72Zt!^Sҩߐ.guypy>ҋge{K~.yqu˥{ѥWgWwKt_﮿ʩOdz.9N+n_tq)WѥO9.z8㬟?\z^Ot֋M<ӥSﻘgzǮV:q҃WSkw> wKw}q=ӥ;ҥ:=~ƥia"~o\y4HZԩOmt߇nr.vS_7z]GYzKY/=oǾtMwN^ҩn4MwN#ҥ;_>.>tKގ.=aKgqs}_.}K|Kn=>/뽇K֛;?9yvy .=Ӂw|Yz'KzKו^)v:y{5gy.t.0lK7͋:tKO?n^yK.zK<[WwN=Y_i[<1ҭgKo֯$qNС?`x%߿\z:uo^uYO^\N+'/'.}7ysǩK//q۷hSO&Fu7z.%G.kӹ>^oC+yqY?['ώ?<1]yx<K;z:sɤ['p..ӥۥ>yl{4]fFzƥuiC>^]׌o'KϞ.=~o\z^z3pq/ẻK/8֡WiNj.]K~ns nrYOҫҥܟp_/r|K_ynKN^/^z\慺\:~ЩMn8tAc^tKu#{yR.åG讻yY/<]:uE~)Kg~~]ʫ^:Z~yW'K.pq˳:=\zҭ?kt]ХuқOt8['/GOp˥ot~s\Сg]:n]hS6PwۏKKϼS/ձߎ.[ޏS9/.Vg̋KK-s|Kw9w}ۺ}K7ҭK7~K\:yY/tY/= H ]z/|.'<^cޫ;M=/<9GK:.=tOҝ@޸>қ9j笗a??.it7tC7K_֧O<>r.'Nc{_q[/ih|`ӥ;/.]/]~דI^yq H}Oe]z~/y\K~K=.>c+7^>i+&]:9Щ?\z:b&^v^B=>.Ӷ^ե[V?ҫҫqu?z~_]O&Oy{pc:˙^sOw.'һ_tnt /]c]S/}K{yv:uw.Mzse`8xݢMKu_וnn|?]<.=WpCozԥweS/x .Sskg<8tq}^ҍKOߝg6];<.x.Y/zd8穇μ1ĥe}7x]zAS7!]zq܏.n} KzKyH3ctI9q;\zu\tsY;o.yUtΓbt/.=ǟKyn.x~׌םvx©Nmꥧ3ǥbt>uxy\zw_Ki;Νy^o?t7]:{R/}gN}3:.=/\p9+$:tg=u/Wbݺm!}uBxtO<.=ӥ.y)qOKo]t1^?ҽ?ҝz!J8 oO󸾴Kߎ;s]C'K/_7סk׹΍W5Ewugv2tKׅY/=]cuKM5qYyWΜayRõo)]a :5Z/]KzK/ēzm_.=KIeK/]K>ҝG뮗&^^ul_Љoۥk^:tGƥWU\zӶxەqEХ[g骗\o~S/]љo=tmI}hM⼫q5:u{mӡ?n:ty:vꔥ['n[KtZ/N|+\.Kӥw={to~:ƼW6e̻dz.~k0~꥓?닟zy\:tuK?ӝ9nrqK/9}/:tďN!\+]БK]X,ηҩ_K'R^BY_SNq7x/;yHq}t;?Y/.ݺtwExK|tnYnܗ7֙5\..teRK~8']Еot[^uʘ˺bvҥ/K}.=t^z~e=OǾ"~K=3N1ϒGx©C'~<\yzzq%kgvsKwଗ.:bߌ[nߠͼ8f<\;mvQ[+~\BKnyRu8rq|븾μtY?뵿g}xfY&=tGz.xTKw ]w:c'~֩o ㅶ7:v+|8's/]c=p.x|t=/.[Sg^S꣇K/#`tL^t uxҥW]k/:su:qvѭ}әgtwe}yc;OK__.}Kw~]ftt֫ѥ^:gtgW.sy8ХS^:qӥS"]7u֋^z=m8ɣ;Ǒ^zur}k:s;;utҭz\z_]vq~_]euC]'Nt,^s׸N?;𪗞տ~{:;tG7.җ0/8?\t: ]˅ηߛn_8N]?KK/>:νr'֥[ozN=tY/}t f^lOKqK^WY:s{/{,ۋK:xA^us|råj|vppѥ7'\z)'!ҙ+΢K/锫qK>Xt-ХK7!]+]zӱK+.tE7ލkۥ<tCҍK#\N^a/^ ]w3)FOR;'N ]G>]۳^zM^ԭנ}s|KzKqrz+Kg^KUg grt9?S.xq0X/ҙ8]ǯ.å?K:k;o.tsxveoSt!\r8a^sX,_ף.Vk\|0M`[ǹ>K=iK|^u?ںySK:yq,OM]<˼ҭjt,t: \z$]:qz]zGKzs ]tKWNvϠK7O+ƣMW^飍#gt}KwtvSѥKϼ9]yZ܇npy^r\r~Kwv6?yqmå;ץg.8ҩg>ͳKW{O|tt^:gϡӡS< u'K׉z8>;+:ux\:g7mۭu>|rMScn34ӥO9iyA=?t֟¥a算>o^K=.ybz/j_sɷts{uW~ӥ[/n|2gVKhOљ'5qy3չ?o֥g1..X.|~yaނS/]gn=z;tt^ytt..]wbt.:O..tH9/}꥿k.zqye<;^zwhq:~\w8~t8t]}oқ]zӕҫm5O{XߜqfyW5ҫ/=.b{ΜyagYϝ3^ԙߕצKYyHn..u^^å=z:>]Y/Kuӕ=Kz3Oqus\:ӥg}u._ZpvK]ϣҭG.=/?aryv]N=}^SK:q!ۥCƥ.zǥq8u6..]z.zǸ2Nt[~yaS/}fWetK73]zv1nХ7o]qdpγ^zpK%S/orwzOmwZ/]K|ץ/rNqOKO'ߜ'u_֥W{xy|_O?x{*H꽿/t/ڙwuʟ{&\uW֥{?^ۥӥt>OOt?~e>o<Ϻt/t)zu龟ӥstx^.=K^z>}8_ķG>5둳h_K5wǝ.}>Ȼn;^zƣq|pxg.=땇K/s/>g~xs<=+!.|9Oj]cr8>l%KgK_ٮ]tCӥ[OUn]]q .M~4tDszM#t ǪK_ǩ.D^GҭokGG߲>|8h\zփ^Ot4.;@79?8z|p.}龇:ҳ>:/~OڶY?K$.2C7K}atҩ=u[7!:pKU/=˥/=Y|pKҳuqp:eEN<_]uKtov鯎=\zS/='ߚzzƥgm;/.y]^.}qU׎o\:ωۥ<.}ԙw5S+қ~7qәgt].IY/sԡ}Ý=qtu拦KtK.q_K?zY>o^=ۦ^37_Mz]WỦo~3lo['njK7?Gc?tCNʙ5룷Wi}u~#k~gWt.=n=h먭~[ǹgsSG?ұozå7R/zǥrMg~on|9]륿tUg^b6V7ҥ?ǵS,O]z~{5qY_gKb^:NҋLX/yQķG:rxr7߿[WlrKK7.yupzt_םҙ76ܳՙG|_.uq鼏H>3o̳d{c륧;XƷ?l_7tg^ķKINxԵxԉۥ&S/ӱgY/Q]z}N}uK/\uKK /qY?ҩҹ>?~1@qBiꇲ?v({EO??tV>y:}x;}:/Ү<\b8}8}Mrù\=kںpāp3,vN*sOp?r?p"_8}NGloo87ڗF{{pw:phr9w.t?Co}c/N8}թn_ӵu9}9yhEѹs{svop;s׈{8ù7r{tcs÷B6{8ùpη8^ùo. ۗ;@o7Zc^-\K8}p/r?wD/C8ss߷˹Fp}ߏKK~}~>C3 }_71O@8w7t{8ThhZb8/Or}8¹s9A{_}m{I{s=@fxG\¹8¹6w`Xktp;OEb#A_שs翎lϊgTc]php{@<W8wt p(sD,gOOg9=:/I,t?խs9>ۿGh֧ ۗsS>]\NGG羏_~9~M/L{{^s5zc/ngu49OEy c6 ùr?NE:pM?}C}WVן1t+ƻgq9}?|hKיw&s?p9N8=>W=o_ _`<Ҟ]}X[_q^8t5>;t{kyꙏOG=>x='JW>=b<g¹Q^bI=~_N}x^sᲇvchם'$9¹nx;K~1~Nձ|xW{_-?z7N|;}=bt389R8wt<ѹK{Ǿb sqs?p?~{s}4;}|}[_9¹sG];ǵOۺ}=N\_'`և6ڍ jksCj}p?s=}?O,6wW^Oq{|S/ٟ8|w}}_1yȃ )_}ЭWuo|~v;|[by2~o\v?׿~s}t?{s'+_g=p"~¹ttwo^qTGsS_U8?ùՠ]?;u/ˑ}{{5+g7=8|Ѯ1:y9}˹[/M?uo8uu}_toN+';Ir=ƹ끮!">۹q9}}{cpzΝׇthW#5q~'O lTh_2ֽ?ftxt׵vcEB>b]v<#!`/w\_=ԩ?1s߯_˹o{ p_ț{~G~^?EOGǼ8w^ѹ:!yՑq= /{{c`Q [N7~i1~~u_wq+y VsIv/s&Νt݈r+m?v#ƃG'qk,~_3ފs(ؿ¹񽎝xtw87Ɵ8¹nx4ۻOG?:\xozv݆sEå_wڵx.}/_?|s?xpؿ/l1^/]{+c~?]g8v=߷x#xs:xwsĹQ=br+֧>v{<[o?}Ѷ??ѱa8}=ӱ_=cs?TޢphO>9}|џp{^Aw8}<43ƻ?Əy7~^-鏿׌o?għ:tķcs?Ϗx6y>{S'ۻ;⽝;ޞ~Bszď}txC6wF|{Z}}qs9N$y¹F8=~hO[lu6c[?y3ߗsk{x8\=wAkq{}q5s}w.pu9=ދϷ;^~p?}JG<G~?O<}3߅sQ##cȟѭfOi8}ѱ/w9Q~߰M<{1|}?O8s g}8~CO|ڟO۸'s?pT8}h/\?84ow|<9~¹U_8Wx~8|Fs}{[O<0>^b{Yx;I{˝w<>{<_s?p'}/y|.Y>u#{8&?ztƣew}?lkxupsF'8wu8{wp}i羯WἫhf˹Zs>ytz0i+_?o euX?w_,=ߧ/n!m?c1fewotģ}s?[8<q{<{'_f}vz5Y?F~4:9¢#߃Ϸ¹X/hA¹Wg C0:F~5ƹc'~#?OsG/?Go]uķx_m{'pyknGy?нsN|f|F{0u_oëqO7tO7tO7}¹x4K?[lo:v͗g'^W_/xrp?:ow~qN(}oGvO¹xh^^i_p|s?8oۋvvk>kW~3}\}oKoMi<:ηpUi7X7[,ƹo3羯Ǔ*{g~678wߋ$cvU߮q5:j~9 t3ܢt-xϯ+̃u}| $|!fG{DS>.'^חuy3\61sDg7O/M|gxî_/Ĺoon|']>c}E8b2Qp;=iz¹營7Z/kӥ.ՕMn=˥֍]gw~_.]m/h{:]zpFɳk\z׵Ks|e^ogpqS??I;yq>K.FҗnF\zG\".ݸ.[oҿS=..W]7yY<]:}ۧű?ӥKsp\:n{נ;}K>.qn_|?\;'nBs.}8]<t.}:#g{ť7eܠ>={˥Ga]q؏7tO\w>u呧X#O3?LtDt惺\z8,]Ņѥu֧ץ[utХ.nԥSo']5q?菶.Ns>~=ӥ3k^å+ĥoql4]Ƒ㾧q˩^#r_zٸtC|3O1K'/]Hҽ^3Jԟ>yq/\zv﬏oWlܿtWKo勺["//]'.=mtEһ.|p5~8usե<^qN^as8.SO^t:tvuӥ?.}n3ӥG.{O{t腼@z7wB^NUN\q˼ ǥ/} [\qЏyl6q~t:ӑХҥ2]:yҭ_K'n.sҽ҉pҭK'Sިw>~m8k\7]8Ĥ#/:u .cyv8O>yt\:_Mw|0w}>n>]9yv0KN;//tҥ?ǩwylq/t%KXy,*..YMn0\qޏyvK_?Mn.˥7#M~Wol8N]o__u%ǵǩťWpݼ\ze>p]z>$.\uC.=&qWKout]ҭKǎKq韟byGt3W?.]r\ҿ+.c+ǥ3Vt*.{)vK;ť̳t].]K7B{=]˥G.]GKg~t.}qyIn.\_8~ESХ;.=뻿kʼSGyǺt.=O5e\.=^KOGK7q;:AtH92rԥWv)vݿtա҇u~/]wX漫sq<N=jGw\:ξ9*[]{}KCХ_ٙהOw8/@CǑ㤇mꧏG^2]u{t_\ҫp]KuĥW?ҫxU_k^].XnL\z:0]tބe^\_ֵ7~vy8{u˿gSԉ;hK.\?\FK.}åӥ܏tUތot쏖ܶ_..n>uY<Ţ&ޫå?ǥ7KOK6\wFqӥ<ҝJމ7{uOu9ӭ/.utMn}K]An}8]us\:7.ֺtt5t;?.8Х{Х.uxҥSONҭKw,\yUKҿKA7c{:y{t鲻m [WǥKe|r\zE\zҝފåw.Zoq>]][K;.=u56Gt韎yf/{ťѥ[g\zHƣxԭOҭ:p鑞tuzü®Sgצ3'u6tqu9|ЉG@^?ttStcҽtԋtYOJ֙5Ϻtt.zϺt]ӥg'^.Kx.˥{ԥwptwn=Q\qtxtq"?EgNzy\:CqSW/Kqx.=wüeyWu ީvһTқ6Q=\/ҍO.=IJ.ݼW]gtyu?ե{ƥ¥bpUKx.ytKGxr+mo]N\zctyf{|ߥ.s5mqK.޺åty>sKx..˥=8/uCWN.]G.}Nr/>t׸tҫq߱ux .=Ǔ_yR?rtzzƥ{ӥ|K7:]ҳ|{{>:.M.y*t鞟tXr{?K.Giq<ҿʙgKw|K5.=2Onn#G}pqoSϏ.y9t.9Kw<2;u83nGs.x2.=ǫtK~K7t\:qt||ץ_xyMw~[n|.\z|<ӭɷ1.=ƥuΣ.]{tKOK'.~xX8҇҇˫᪫hq5to٩ n>Jt:e|_\Su=\zt{ueKTo ƣ۲^qխSWlԙǵuԷ󻎝x[wǥWoK҇#\ƥK/8']7rpnå;o.t#֥.zZuq{c'WKwқnީsKKx[n~.=w\z^.=wWޭO>\n2}sԋƷ}.鮇N{Ki]zױѺtۯmz1?)"<cӥW'.ѕwӥ_.]ǨK=vm؟zݸ}tq8]9upҗmymKK֝uӥ/qХS1]Эwy\qqYN=g]:ĥK#:v{u};:sSN<:5+XҧL>4]uKO}A;.y,t?ӥW/.TuY~:Nz9i]zk'?Ʒ/å;xj}b>5.ݺ$^tSnK_Non~pttn~.wt=.~7åχKwG]z'D~k}O~-NYש}q~ѥ urĥ[G4]:GJNtg>xѩҭ.Z]z;yao<һ.qU?o:uytaŵKydp|]zt Z^|҉OKtM7G=_ӥS+]?\z~p-χKx]s\z[?.}Kץ˥.zmtqKҙo']zП^>q鯎M}t\҉>Gut&.=K.ʝvҭwK1}oKևԥ@tԥG>u}}{a]#?濏K>~þ8a'X7x>.zz}.ݹE7Ky1/\StGԥ\OW^z5o/:եOy2kEwNLu|OL^qW9(ެpҭKn.;vt_uѥ7|tp2zqKw..yv䛓]t䙏Nyt?oKm^z5Oڷ:u+k]~НKo|o7/=?Op9btG^.]Ko7>y-Ϯ~o:4'}\n:t,^U]st{}Kߏgg~q8˾])]5_߾=\zγץߺpg]z;]z}}M.GKAgttEn_>K[nɥKo۝y=֏okqӹSwN['&] ]z]_qn'?<=]z34/=c=ӧX}qӼ_yם/KUԥls;/e\Kt@ץ91N|祻Kx}K5-K7/^t_̫եs֣_ n}"]st.}op<9.Tt].ݹ ˣOXoO|ԥLtGd^z?km^K&]fLO{9~Ec֥9+ǓuԩG㜯t5Ko:&\zq.ҽХ;p5wkAj:uؚ}}\:}ztOt+]v{3/}oz_XөϏSǥ7{k׼=8ҋ9#'~oX?nK']ԣs+qyKwj:s\ɇ}K.6<]:y9qߙ^~Xw\dPֱ8ti\:sӥI>/qק.9_;/=tn~]oޛS]zCב߬o;}>]B\tOpMe^C]stG>:t3z[3]sv^z\^͗k=I\#қ.7.};>;ǥ9h?uvۥ|Ko.z.ķKY~)5t䇓w6/w~3e͜o^:Kq~n]z7箩4u~\7.]j^.ݹO!oݼt2,]z橇KK:v׼ .rAttyK'"]ҋZ=c{3.|\zN=~y_o~ k8oҝKt陷K7Y ]]\zHnɺÙoz[wK/o:q9gPNOt_ߺGs..}ӥg~;.=G`WΜTKgnpj/w^ԡW=9"{Ko.{.tyK=޺ g.ܟKwyU'?G\z^K/~=x?m~}\st>{Kƥ; ^/q>>'/+p5Gz|s\,ҝK9`:tۏN~q]zѝqKӹK/¥\.Yn?STykzvNj<#/vN׿WKo^Oҋ/O{]#^2;\z@QުysWs;U>=]pəǥquq Gg^k7.]gKtOuagy3,:N;.%n^uIK tp0]gšv8\t:7]z:sW=_{-z¥Nu:O]']s$t>tus tK7xϏK]]z._tz[5;ezKsKo:tnqsp͹$oַ_ݺ:t֣c.=q{|ǥǥӥ_.G:K/wt@uɸV.Lå<Ν9sWK?;WWKt=c.<~K:i͏֥~g>.]N:å¥\ҋK]k~y֩C^~{{yԣܹ5|C?յ3Ǖ|ɚv nߚҿ.}et1ptEKq b0O&.]gKy{gh^E..ǾҧΜq^.1|tΜ.=\z_uxۇo^%OԥK7/=G^qܷ}lŵuGgNޭ[KKz\kwn:`=1Aҫ{O^y虗ڙucuq{Ku3g~_8H˾uFՙs|8p魚wy5K|ttvKu5<\.]K/gZ:uZy5u/u:nnΝx.QW!>Eq酾+]z+c᪋}vvN.yɣȼt 2/Kj^G\zk>>:DӝG]"]_{/.];u /]zӷK ._杇K/Ӂ?!K;_:D8Iuy+,:t7/=oZ:u'?Wi/n}Sեe_I2?>;9u m]}\zq~ sFt:\]:uӥ_nәYv|SǥsuwK;gԝ[xXS<׹)nq/sPtOʼtݴ. ['O&yvrSinѧ>.?.=.:a̧ǥ{}K9DWԑy;]s0t{f]CWauϙkv=nN>.9$;/uͼOn~.c]uۥ]dn..}:y|3/y\z>wtӥWwtGkd:uo^~k<O6{ȫΜ}h?]z>}v7iNn]-3=_5s¥ܑ^tIrM>ͣkC>v_:u;tY0/tһξȻ.}ث8u5ܿf8]ztj;n_wt碘ޝ k]speO>g7z9CgNqԥ\t>ZY&'?Nۧ+qҝO.=/d]z5 Хo{ lnk;7tԁuQnsދ}v䕧K9ҋңp鮟:u_p?7nr7>t6{|чKם}ҩ[>~n\\nCǹ7:֝7~g{]dp{A|ɤS_4ͼy;ӥq鬷K>a]:ҧmq޸k;AZN8o:}{䏓>to{=]z5ϼ˼tustt|?IK[KҝK/ۼt]yB̓¥#2/=q\\zxҫѧX楗mtw!sOF@{ϼp/9֩Gy][ ]z:i`s=NǣK:'k/.zѥׯKiۥۻN]?]}[ť7]KgQSMܖ/nn]9yyKO7KϾb]s[t.=KݺsMkEg|ХשKPx}һK91K:p}w楿uۺѥ;Cnycn_.}xo9k8btM=:åBH^؞/楗.ӥ_K9]zuc[ۇKoyn=ݟN98K9.}:K=x縦Sq* Sҥu~֥}ütKץw3?>uN¡SιPK}7tϟ;gּtݷ}v:tT.똋;㐋]縎KO.NNsX㾫n}hb{/ҙx]Sz=Χtҝ3K#/=џ_\-}.ҽ~0/]k^K/tGwNyn~-ۋyMd^zK.`ե\]sUtcG~ԿӥHuq/?]zz7oOޥSԫ{u+䋵sR]?š?t8r\x'}6t9Qs\qtmߺu\:9\5tnӥOt]\qp麖K]z:u}tKsɃo֏ooO?8?]z桗~|~۳:տog0=᝗_M>:r^OsWoץ7w^͚~G8:sW]ud^sqt:&].̼tW9a^xf^:E[1/}9\7ut\}±ԕKp8>w鏾O..I6/̝^t怹5uy~kC\XcY桗z;]ںtݑy_:v^zCɍKNQ^kեٿ&8r/5sW/z[n^n*.܊t̥/g~{}>ǥs\zeWr9?ݺtya=_K7ԼfwwgֱSԷߏKO'>osG֣uӥ~p'Kޑ.=yS~?.ݼY]s3Kw..K?|祇KԥWt1.K7KtHK.=һN8ҋk۹twt2/}꼋C~3K[\g2/~txKo3۔.9gǥ ޜkKK=ν:9wUt鯏cxq]wnŚ1|59ex=sovpOg\XS_֏q{yۙt/_:b績ͼqҥ`qەҟ/;Uګx}1WvՕԧ]=\SyҩKcK7?Z~8u;Ngnz9t?jz<>:mׇrw;wko:/>uַo[K/4Kw}C^֥Щo7:ySgN9ޗ?tחsW޼t9?7MzXS? ^E]yKtc.}^Y׭Sr:ҙKK3d^:hh(q?_??o??~{2U ~*??vNA:w~so}\X3_'5;? l+ܬ|Zsۮ; ~Y?\T8]n¹tu¹~(n׿?+g^f8s}e_psmp Ǽpu50@㯋_Us_8z {¹n?~/=;G?y˹~m:7'Rȯ_?pvy~̓ oz 羞Oy<8s'o^Ǘ'/?]8~QY{`wsVq #tc_{p5/y3ws_珗5':kS|̫O}t[t9[n_ }ks_5~_8?k2_w9uq߳8sis_>z?ֿt| G}=~qE_{[s{=}}<|?s_O9z*+ѯjsùϯºs__ep}t?:՞ﱎ/z~O8u~o<pdz?fs_Ǘu홷=z=zOۗs_?]>9~yY?o<}kSgթ߱ǿ]۵gs_?Y?W>s[W{^ȭKo}is_rm.z\kΝ?:sŒ@ùvz?ߟC~nq }-/Ǐ_>q˹롛5{'ϼ%w8{pf?O3 ڏχp|w}Tt3q}?k?q8}xs_s_:^ǻνun8>XXXs_맭ףp@a]:׿ڟ νuùw8nǿ뵿߭[p˹vW{S4Wp{umon\oNO=_uaښsx¹pgwq=z<k9?/s_ù}}]/w_Ե?q3O}=?ubzw~zkqօ_˹tw=}}¹_:;ywO=8<os_z8~ 7CqѹWe73_}>-gD8UOS~<=Kx;׷¹ͺpX܏{]pG{OϾpxu/:u|v%n?tu~gǹ5}_]ucoo\=?pV~k/>z 羏/z=_s_7 oe=z;ù{^[7*{ sѹ|Yg=8U\uϧp ׫s>ù.}oc{w}9 G^Oqs_pp?:x_˱=|zysu#o}.%gS ^os_S}tz߿q;tWԓFCuσA|_Էù͚{u~ùףkõ?o¹źgqѨvb]E8u=ퟨ/ֱqʜ'Zx' P8Ab}8yuw\3ao=A8}Q-<>q=;^?Tuܳ>υ:+''>{|:;nO{tԷ9Czڼ%z..=9yqw=#7ׯ8̻o?s_;7߻zb=^o넢F;][߮g\އs_5mcĄs_O؟yw}"|{ԩSW8uùAa]nq= ^z _]ԏ%:pO\73羯ùg}=_:ùG}<:jP=Oԋo<ӹGt׽pmz6|< >F&^W8UԽs|S~3N'wd|Z?~美WO9 oԣsc}Zi}9^m-[8}~ ޺s¹g8] h{s߿qٿs_Gl/֏cND8 羿sS _k쯙]_?; t Nk/sU¹ޞu8ù?ipp }.5)z^?olu]??pkU~G? z|Ix]g.^sߟϴ 羯ùvq}>s_W]hs|;y[C>N^=^Yw{zۏx|_꽯FkԏC}>p8{7Go?et5otrù}{ùc<^#.<꽽Nzl8u3zs¹˚qŏ߿qpnoԣ~}=qU];t0SߎϻInN}}ןù˺;xs}]^w8=bM8\yxsߟgaM?}8}=} 8OSX5c7p-ڰ[sǹ#q{}uoxhq<羾Vԯv|8}/:?ߨgo5t_gxӹg羮wN?x_8?e~8w2tps_uǯkq}s=pg;T8^{t5Wwoq|M߬S _`M6Gk^z1H8|?ZQ~t7ν9 v>oܳ8zӽ8Ĺͺ6k};\W8 =pz]X>tԣܳsݿpbM=:wY>{z{'ܳs_~e]?bt歏k^s_g}νuùg0=ZO'=/8ƹg羞e޾gpZ7Ϗ܏=Ĺ!:K>kWp̣[N4_tO>^`o_q?lXS|721W+{Śz*=} =7pn^]<_wަ3Oe/[on3=}}9^jNztamN~{dǾzK~G?cw/ǙI8ut݆s~ 羞o?Ν2{s2~z&1sp߈sz=Kq}ùg.=}}xԭWMnoW?O`z|t=-_{Dp.]{Wk羾Է3=ߎg8s=Qppp[| ~ߞG${ނs =ܳ^8˛+o9Y*9',>_sz+=ZW/?J;ut?t::.=]ud]f9.1];֥9KR.M>p tjqq^צKΗ.>k¥֥s]Ko]ל.7|?S:+ ?.=O]#]:}ۺź%ںkҝ.Μ=}Iҥp]Ko0]:}htץKG{ץ>^>qmC\:tu%tK.]K<^\ǥW.7.MS;p<^۽?gҥӥ{֥g~=}̿ӥqۿQg:sƥKҥӥWy6ts޻>\zoǾq̣ť۷.WߞۥwoF݃oÕһ}Z׸N>nnP{1^S'yKĥGKuw]:nw]kܺ:{\zߟS,~F_c#_<];89ҥO[:r9ۧK7.åӥvq;\};>ҥOБg;']+]ågyh]k>=.|i\zo;_GXX?]7_XKwx[׎Ky_iq;y%oq};Nbm_\cn]Kos;tߺtK/pzԥ_:jdzSXcݬcN\$]sVt鏮[׈׳.]zΉ\ \3w:; gӺt?pqy]:3ɇKKo#ե;ht/33{M"Vӥ;K^׾u^A^{\z¥;;uW>wéCr].z7g/]:}׺֥Wpӥ7qI7otӥ\zwn۩C3.=ҝkK7K?ҭKgntһst.MKo+ԥ;_q:e}եyKo\tnޏååxK7I^[ҥg^y3եDžK> ]o_:>zŭs;}/}otWKב҇_¥{~Хg.\z-.=>ta~.]Kgny}Eb҇n}Zp-e^ө79.qݎ%u=qJS}~v: ]zөҥg|xu鍾!]:st~K7J͛Z~ΜE]z֥? K׭mcһ}¸KϾf]_+.=q~Ln^".9]3tt qݼV]z7z{/ԏΜǥtutoע;Ǒӥ n(]zpΣKϾx]zag"Nz|8|ݹއu іʳ~\yct|v6]ytq9)һK7]z?voO}ҝ_\~\:suzuMK7nR{uw.'|eߧk8>@7}_~ǖ!zotquӥiwqxj<8ҽå7s.͇KǣWHwsԭr\?KDӥ;WS}ĸt縥KKtϰ]NǏKySt..nݹ5}l ݺs>:NKpt\IK\z^oҧnrgv@KKtޞt靹t>rygۋۇK]׋SI:3?In.8 ]znKѥԥS9\zcùlYv{g`M^|=-]FĥVtt9' \}f.=})]܀u=OӡzϺ n.ݹto/.G~+]J\\z{]o‹}yKy]9tRu鷮Z?Wq tcNmթS.޷SǑ_\oҩץKyܳO1+.=ҝSKKQ^KΩ:'cqޜKKw.]7Klv|]v5%_ǍKK'׺s#N{\?^ =ow:}Mnߞkoz~:uyK.p9wQ޷3M|.K[/=Yϸi[g^?+ĵGbΙk¥dt Kwn.>K]z~;S\zέĥ]u/Un|; ةt>w眫K|Uե;7\p]zcλ.=?tk8/nḛqy=Ki<ҥOKwN.ݹ*^7:w͇rMn1թқsqFKw-.=oҝcKG3pkoǵv9gs\#Rr]zq}o/.%n[&o<];7e=_&]svtyܸtC.nSѯK;]:sulntfv\һyǥ;sp9GEN`a|w\z^KwzBިԙtty>ƥvtBKoO~K-ץ߾côƥvtq/<n".=.ݹyttӥWs}.=.tåG~|ѭS<kӥsp^N^t[Koۙ_1G;'_.Wn~.]KO=r.zts p:1\z{ XgK1]\C:sHk\zաS?:s;tAxMpA_ַs;mqåK7VN(]z S`A94K?D ]z}]:tv߸~~N1]z޿fY_\}Ϝ2ݹwԗuԏǥo_L:ty~zҥ7ۥ_n/.t|J=KۿY֡S_ſ~L~|ti=9]ptyҫJG~f+2KNC޺N7?Y?k8'Kw.kKt9]z{u5/5mn̍ҕoz=EҥƛΜT\~k1*]Ѕwod}}ΙةoԥO5ùKۯ.~']z^k\oK/>_s_.=өo3^q~Ϝ..9|>q[қsktطsc~]sӥw{~\.8p~H^uѸt_w=.ѡ't|.\z3_Y&/ۿ׫tsҥ;H\#e\qӇK;4]s8t|c}?sv;]uy>.һrs.z.܅tgKKtԃu9w ̓ץ7t;]z̭7枦Kr䊦K/KǥťwHqåOu7\yM.pӥw/.Cg^.-p^γOKO^Mμ ]3/}ꞋkNg^z槿߫uΚ:Dq̫}.OnwIN#]]:SKo{|uݵ[wN"KQݺ}yvW]}<>_ҝz?+qoWǛKOe^:}Nuf>y yPNuknq%_GWμ/^K|[' .=]vKѭ\.=>;\Kס;oON\o94/=rd=5}6g/Kw.龫}l/^N$^tқn9ώ'ҧk$Mg=y:.=+"ާuG~:u < zJ^;_:s_.ѿ/q!_թV>f~_1Oq]zǼK|o<\Cǥ:u7t/.};tl\:ߋt\s<5u٤KowWǥ{]c{~\l^t敵NݺNa=Ng~vn߳nptt3/=^ۥ?9>쳛g~c N]I!s^_toUO=yuGN]#וuoyv?:Qn^.t5pקo/]w]:}86y~pytХK{8];U>CӥKzޘ.G]z海ͻu[μ̳<ޟ6#]/0]tܫΜ5v Kne\sWv^:s^y\sN.Mw꾋k=yK7/Un.ݼ>\zқƺt]yv 5/ǎKms.ӥOg}楷]oqt7tuno~ԡ>u{n.p5c}GN.ݼZ] 2/ݼ0]p} xoon\փL~9֥3'/ocӴ3\z{g}/t _]x/ț}{{7yw:ryv\8]z{pt,l}{t~gKn^4.ϻt|/w*ҏۛRYq=߭3/.ɓ'/ .KufyIҳspy>1/W#ϼG'_o/uK>ңo˼tfq¥5ҙ\iL7/=t麧#/fM~N<`]s>v^:./.zܬq=K>35/]a^zNݼt:y2ӿC&/å;w/]zӧh~b>Ţ3W9MN=u;5sMo]y:u\:>;}yg\n:sa_oO=G9=ۥw;NW11/]׽ǥ֙o?sǥ3)_̜ztKwn.g~e|z{/qEWn=aMu{;ҝ;Kossk~u=\\K7ms>{2/}ַ:}qK/:u^q-:s8.<ꝗ;uMCowK'|ΙN^tԷ{Ǹj+s^KvԷץgKϹ_Ϳđkԥ|ӥɻg^z:s.9}lUϼt^q4fNj|0//]:y0'sy{Xw9򘓢Kq3#2_׋KT]zKotžХWߺbv{s..95KE~ʷK׭3wr;[wN=ǓtvKϼs\>@^K:SJuя.y:ӥ Oۥ_ۯn8tԷ/:}/owַ_97zBi\z~_¥7u?u鯮5:Kҥ;Ƽi^vϼt\<..=u5DsH]3by7+EΜt2/ݹKzμtousΜz9yOַ8>\OE{c.B{qۣku:w]}v/kyt ԥ{Koӥ{ӥ{}K̯>š3o'|p>.iҝKO\}qx{{Kzқk7_t?K/>-u_^u-{?.=iwӥKԥ[ĥg sK]spȼo\y^?ҏ5 \zѥ.ﯺ{ǥ\yֻw^z?u˹/ssRnqpwO?tu0/ݹE6/gNԝ_l/kImlҥ{.]78s:qΑ¥KKoC^ЉKK nN{ogNj^K9 [2/tqCmIХ܁W7~7љҙ0=Wwb<~e~y:tѷNz4ݸtӺtpCU96tطsao.ҝCKK۵7S]:yxҝ9;{搒ޜktGg:^'uwCwsg@楓G=9'5s@tIӥOyCd^7.=uq>¥.n¼fku˥3U^b_`]s u|K7_to_K;sכΜz40^N եӟKypLt-Ɯ[]sҥt ]]|7[2~pۥsҥ_nֵ-եuv:sW\ޫyoSvo~\zׅ?_yg^qG' +.GgjG]zq.tK7(]zEGKW抝.t#.].}q:S\z1lq3/#w{̅ʼtv}շ^vUWN};.#t.G6qsйUg^KvC>3#O/a?csM~ؿۿyOåߺ|\:ĺt=N+O\-ҥGO^:t΅ХNzy]:stεN>{һ^?]yc]\StcINntG^:ҭK84]z/eǥ['MNq?ۭ^Yg 7. GK?up"t?3/mҭg^ө<:sS_ƚFӭs'֙_Q:t| tt2/]3/⼽qKn^\[С_8u9t;t.?ޜGz\:)5ȃy5u\S7KwG]z'ҭ3ҭy鯷1Or]Kμ3/=nO~󝎼Koü7_ GuۻtGNt8˺F]z_I;uxK.=0.wWq\p}kL:|bl\z6Ngw1O:uVwXt8q. ?g^:>3?W~!t]K7ov_kq1'1աSKXs˼y]gNy陯nY';oҭ;bMbיq 0߼^cb|\m>.Onp ӥ;FҩåK|޴o֙p2dcgnLauߗ}lg^z'u;/bnȼtze_Ŀo^z{ۛsy~m۾#Oқ%$ \zK>.=}Ց?:'Ľ҇kituKt=+k\ԝ;o0OO9ҝc^zag^ԩ?H<3/=_Kw.=?tOo]~d>z?@ܘß:r\stoKzQ[ԥs=Kϼ\uK7ټkKowre~)>3?'ttGm~>C0/]7i^:sҥg~<.]'KONԥK/En_5.=.<*WѥKO.q<]yC;/}_‘?Ņť7Huɘn.*q~In.]KOK|Ozy<]_ݹkpq×}qn.}ȣjon9Fnߛy҇yǥGKNE>u"]$;/ѩӷ7t䯓O}ڼoO]ՙ_Rxw^zgMyn3ѥdžKoЭguud\^X3\.ϨK7oW.=}uoM>wU}uԸS<ҝyKy֣q_y7k䋎q[oK/֫hk߸/:y=ҝKKouNj̣֣qݾ8?ymg_..=/pԙ{G3ga^z>_O<~}{~3/ΙeN.ݹK5߼tjzoա3'5_tҽ~ץWKzve;s_nonpQG['7]z{hx2/ѡK;\qaå;YI~?q7߇qy}KӥU]Kťv^s0Kg'/O׋Su8u8t98}S]3?ttαå]yӺt u?.z.מsG?Htxm\a_[_?.ݹttqҥ]k^ޞ;߿Oqs ҥ7]ҙ.Se|;kΏK8\;]CimK:vzӥ+&ҭ7ĥۗ.nN_a:ŭ¥g=adqu}֏ӥ鳋ׯ.}tO^?9\:Gts/KO76OO!Su.9>6]}uO/tkqp[][/}ez~to|\^u䇓M5NS1}ҝCK_pݜ.};𦛏Y]zy˛TYΜ<3[Tԏ3_~;>뽟#\z{78?%oӥ.9/ܿ.]K,]zUKϹ!ַ%皼tt陟N~6Fat9zrӥ+{;uCN}|o\sq9gļ{}S?os]9Ktۥ?^8_vԷөo~Kw.=?Kw4.yd^:/.=Yǥɼҥg{ӯ;.]N}[߾t%߇NE}.yK.:ޜKo銋ν&lίp5ۍzD9g~u]x[G?Ǽw\z :~Dҭ#/.j2/׿y~zԝwfw秿ַ_::Kw.9XK>sN۩?ncu9>e67Vݹ{9v;ޞ3/=.~t3/}ߨOӥ7uD\stѕNy~<ƚ|եSH +溕}>ѥU?fNXt-uͼx\z枍\z8Y\z>tKͭH+]+]s{~oiu/.t:&]:twTץwuu#/ڼgS|ٝy-K]KorqԸtT~k_k_Ko_t4.|u۝ =[^a]Hҹ>ҥ_qxns1v^p{?źts2/_]z3Zu8{qKםS/ss.յO;ߟ3G.92uf>.=sK\Sn.tM.=㐋k9EN=ޛysms\f^yƺt..ݹ9.?/YOʼtq.ݹt3O}{/aX/K>o^y׸s t:u xnn~yW]0ǹS7K3)]_ 9'~eXѡy?+?.=vǭ>q7_t9~>Yq˸td^cƥ;wft/ҥS?M^?MGN>8.=]u}gq~ƥw|>̏<ԩo:tc95m\q&+M?FcM=v{WttOK,]ח;/=]'Weԡ?qӡsNja\׿'/̩G:t;ϼq!}?ùn?]po#{M؟yܫuu`c ΟqN卼>s_w{z]Fqܦco v^{ͣbԭpxߣ ds؟sI¹j[?^Oӽq.9z>u>G={?\zbE8w?:޾g^t80k=ss_/pI_57seG޶^ﺿq~X?sas_뢃-p㳜ڟs ¹}ƚ<?_G 3 ^Gįu1}t{yA>}(/ 8q`Nͺ"_@ZuU|9rz}]5q/׌ؿ|K}&bj4똄s_-ޯ,y-g2~e*r5tSb;/1qϺ6ѹ`^/ѹV=?XOs_¸s}:'1ùO8_W8u{=߯y {c&'sqkkc}k>Ȅs_cK8~ӽO|?8wn[tz8ܹѹdzѽXߺB|ӱq_k>W3'wkf'櫻o^޾uk|¹?87n ˹`ձWSכ3s_w?cOg[}__¹eqؙv\c}_}_>Y{}>8ss:y8{_ps3~X_;~Wx>O/xi|3z}?1ӹ?WFN[Oq}}m>|}ݿ/Xo3s_1[ ޏʘx}}~f-}>7%E׾ù}}GGPc>rs]~}}ub-}ngb>1s_;]o!>p8}},{#uIen۳#8cfď}Onop33:Ν:>1y%=8}|7LJs;}_cǗ|x¹?ٿ9:^n'k/*Z߲8yu<3Kxog8/[n8>s_3x~\¹?e8u}׵{8}?Wq:7 ֧::r3~c=}}#q{wL|7~E<{}?>O7A8u9:sqRw֣7籾 羾ďqOC;a?:=/3{މsߧ¹8ٿr8x?KW= 8zN]c38~}˜x4םѱ߬/+?}|֥ ww%{JB຾G_:'֯?ǹ>/+泮V$7cǷNc_C$>}?Խ?/2W.~ |ӱ?Xs_8+ߍG sx&:S3~s_볗sQw`߾u_gw[g`n͗*{^sg?:5ֽ?]_pksө?}_?ùg<K1S3ΝA}'ùKh2}ùg }ù~:X=^QOK_#~\W^c>D`fg¹8u?ൾ*pp+^2fK^#3`<2}]?g[W,G~G Wq:g|֩^s_SߎOwoY.Ƽީ~׉>۩#\G%7/Η&pk:5u¹&GWZPcx+u<|Gƣ>}|jģ]?s4sߟp{ }6οpk : ~_#e=}mď։Wu.6Wz8}pM-Ӿ?߱ޯùg}}_1:qù{8yp;7z='8u\X{pkk_8i0n1?_j5{t5竌W|U:|?^?_#㽷ۉG߼E/GغJ3^ci݂Y"u%¹=S;iitt8/ֵ??G{~?Ig7Οp:XgA~NB>Øo_=N|;g8~K;^o8 羯pχs_'[M8}38¹9UģWeӱόo;}!EeL=Ĺg|P;itzqPo%.gO}t|ùnz|ԟu}9&]uGn'= c/gNZ;its_uoùG7}c w<3>sp?:?R#~O3iǹ3&ߚgoG_:ۗ9sΝ?:^={'^~cs__3#k<4]o8uԭt[zۗg[~ 8>{s_pg^.衛_G7{~?G>:?'յ1v=?=>b7tk>h8WON5u;nwԍ;~i{s_ǫ0˾#Չo{xs%=8u=yv83Dwu>ؿ];௏}#q+^cbqqcu3׼왿.Og<?"ݷ{'ߚ{?~q Wz~382ͷ\3}?^xϿ3}3^co8\ܟѹ[+:/3~???Mn0>]\k|ͷgv8'ltk}{pi?ތ]{=q8&psx};X^:'18}瞾޿ʘx{8ؿO/9~XWp+Y>#;tď/]kG/sߟpk=|;/?nOù/xtu<{:vD|4;_?1Wwpzۿˏ==}N=t}+μ;׎Kw.]wKwNb߸.S'q؍kx=?.};a^ǥåny\zljmmp-tݰ._z~q#OѸ}wvǥW[ԥO2.}?;\z/^/.Itһ4qC]:1N2tuvI‘K^]z)fW{ǥM3ѥݏݼ8H݀p_^Ќ4׸gwzåwGYwKvv%Ӝt.}?>]ut{ҟqtԥƥ_:pt:N]zK+oڿ62n`ay<&Oϻ7r~K{.KOKo۝w;9tKwåyvDuҩ yooIbK7W{N\c{uw x?q~ӥo8CGSz޲<.qԹsn׏K?p?.=iGqågv?Kwt{ץCӥK. .S-]Uwu\v.8sA?q=tӥS^n^.ӥ7k^\=;ܫyqk.8u=pǟW^HgL^Kt]g~G8Dӱ`yqCg~~v#.:+q3gtu7['n}+cWz~Ըt듷.E8]:.=ÙKw}K}Kå{ԩO~ԡ:'QAݿÑқqO5xu|Ϗ.ݟpәo|\\zh]ytJ0]KtWt탺H+~Wbn<:7Kҍ7K['8w :?<;޿pmlstq}zө?֩_ֻ:sǸpuK.ݼw\zһlqY7މgK//..\zǫKN._'Oyn^}|{zt|\z{/]:7vp\z.4w\ҥϧi]םW/tvp0}v)Vҥ_w4;yvcΜC/;.KGK. ..S^?]?ҫuUp-~7^7\Ř:q+]zz88^!+.ѩSw쿾5ƃx/8pқt֥ҥp:t9åB>XdƘv;|ng\tuīӥ=&}__yZF~yӥP ފ;\z^KQߙgw1&^]zm}8]3k:t{׭3_>~y\u ӥ#]~t䏮k.ٯ=\։GntcK.~\oOzk)⶛< ҫn[״u:u8¥7KϿ7zq/tO^?. u^tSס}ա3_ܟtҥ{CcKD:]zIK7K.:%tNK:w/0͘<8tU'zo9&}}ylO\zҭKK.=tKf<on.lq:H̻y]yt.ݺ6v?uэ18*#.|\ޮW|.c2v{ŵqٿ}zΩ.ƥW'\x~\xju鮋q;ᔋuR|g+\zgt鹿ۥx]~mǎ:v^;.Sb.gN_:.;ۥ5ӥg|Qzަ[wu?·n<:v\ut:օ}t#ȭ+'M:e\z/|tӺgNKno];C;.=o|SW~z:.ޏ.]'zN[~Qga\"X^>zqg]z\vӯ|oBuU]åo.v7~|aK߲/u pqݺ1uan:y\vW|8Xǥ?{niKGՉUu1t]~qηvtեgn_)]#n<.;.=AtuިKo֑Yҋ~vq> \zzK.:wۥqaťwi:GW~[N~4뾩 .=K:aɷ>73梁t:ѥS*]CҥԥOKo[iաx/׃tI:\zn'ߺ\V81<\.}K:K﷮ث['ݟ}!ҧ.5^^>.t>xo9]t׻tۥw?b{oƷ9MN?f|;+.ԥo޳ŸD~uz.N9Ot7J>/~ό)uҥOoKN2Kg.>ҭKG?v9e?uo+݃1w_ҳcv֥[n֥S7Xޣ?uMw1я6?]:N|߮:u>F|~C\zIХ_tM}\:G\3.˥/WtKpJtݤ.t:]\z_ogԕSoK\f#uںSKҩ.]K!.=ҥ{֥ԥG[_kt?~ǹ|>in..=9uHtK/<4]zp5sq>s>8r?Y3> N\c\~n>㺟.~{? q]zיKxT7ic7uZW]}Kef<_鼻inSޞޟ.q;'KХWץӥOntuttҷn};_Gpt/.Wi:t}}ǥK~tuCӥ{̋cK>kåw.]'Ky{:txtNN҇c]zeL[z>gwC7oONpL޷_K]qupĥ7ҭKN.}XGť.]zi_5sԣN>uK:8ɥ;; N6uC]:&]c?Nd8&.;yԥ#ޟԩw̉k輷Kסss\:kI cة|S^}Z:ӥw;u7|Ͽtj~q]cc\߼̭8 G˥OН/.gi%ҕ׈ .]ǐ]KCWKطy{E|:]s'qL}t8x=:yvcߏKO'K|y?KNӟ-7>~-nUu!{?_!.ݺ tD~_zҥC;{u|:tsåw;suO..}~CgN^g7áKN]/=_3nAw߿|:+W܋gS_4c\op KO.}o۫۟:X/ץXѥ_Wx3&o/VtvO .KS.QGJ7]z[׾K.~tsK'O~;ut_C'o8֥+V_8Wg?r:Wχf?k|]GM޻/>=~tpt};r/=]|ڶkg?^uXå:yqUW~uߕ{v]nCIuз_]z>O?b/=^Ku :tW]<#ޫ+W~؍kƏ߿\qiХ{3~:]uW엮ӥ{֥[gKg[~yҽ/xM0K?['1~K7o!]z+|9&p[/]KyҭtKסۯE~>.]Kq8K'^..׫K7o^Ϙ~Nfʸzu;.{gKONyl엞n1gsu\C'o}KOK/]~MwNsf1&k%_<]ytm;3.oK?m<:tۥz[g[Dcե[91_)үg1/'tet/~ǥgUOχ>>/Cn<:tN]Kwq~Nw3uzeq:U]gWuOtwnN] ߟOd=tե^u)ѥ{~u\Ƹ!;K~ԥ']zt֭Хg#|m/}:&~xy:u<ν[5_wo ']:ʇyvNt}\.JH޶[_͸t>_ҫrߣuarz)~]m|qLOwҗuR߸tuKxKt?3^KKn$.=lNtօХIԏKǥ77q~ҍK\Qn\zua?z0Na]z~ZwҭGY]N޴.].}_]-ҭËK/=y!_fKn>tYU޷SKo~q8>}}ȇN}Ptjq:t~KҭK~_p63]t8t2~q_u~s)['3_յ['5ǥ[藮S_c/K{]uYn]:]uIz\Jn?\z5K).ҥ{o/K7]zs<>ҍ7/=]r~u<åz\KK~됾:tN//tWuh|=m\e6//.VnWS#]q~ow!Su/}қQ4]zGG>uG]:ѹԥ:엞NKn5wڧ,>ߺ[w]ҩ?q*+vǥW5ҭ`isuco K'p~u^t7^.fL[7S'M⽗wKƷӡ#$] ~CN|֩xxF]WgN/:ۺqN].}8f<~nx4SX:'֫qt_:>KgKN엎J~Uҫm|5ݿ6#xo_K't>~;7vy问_tuR?:uWN>`8|n|ҭ~tt=]h]zWtgtKa> Ktsu?{t/q[K'/]zuG8_>Dtg췽۷1uM/9uRǥ{.9XxwK[\z?c<]z^t.+S\N]+ҽ>.~K6?֙K8|\uMtѥ{~ҳN.|tUgX'u.:\t҉s+/ ?rO^W:ҋqԋLp~/yt*sץSXnGtet.ӥХ=~;T:w:鷐қ~.uen˸F:uw26헮ץ۩u#K&t:=\Kwty~ťWd?w8wx]]zk{L^[7- ..=.: af=;}3nqC'o~:s[@~鹿t.̯Tެ+K?Lɳ?y\z~^t3?M8\Ft靺ۥN*]NI^~gEwN}z]2/Ngn=1yv.eA4Ǧ_ƥ[Fut鞏ۥ_;.M7xډkЯZo/{O\zĥ'M+.}:~cץS6]zҳ:qtǻ|gtgJ=_z[Ǒs|{֟ә?^қSKOҿ_ҭ+KpΣKwp7?q}>4[/4_]:ңN.K?.~/K/uq/ҽI/y5npЉn=/K.\:ӥ~2}tN{fN]rqq}/ű~t3>}9y{t^ץ7ץw.ߝtE.}ЉLO"o\#r}Ƌ .}bܠ'k~'CǸKn\z~_:y/8ꗞ:c}3KuUGu=~gn'n\z9y{엎ӧ_o[WDų_z\Nt铺Xi'qW?{OFW#N4nKw3 v5;`toY9SN^'yUWi:twSi܋yq_1/ѕۯe|rO#qѥ۟F^uݯyvǥ(pcNnޝ.yۥ[ n9-nŸ}\x9ػdnzŹxM4:tڷK:tK~Gct3/uSys;?noGӕ3_?/=#O|KN/пrKҭK?ǵ;\zO}<>oKxǥ7^:uytϓuWu_q鷏g>я=]z_)t/n]]uv~w~_G>utxwc|CMngua~ҳz|?ͳc[wn]g|ӥ@tcCG^6.=w/=9oӯxEޱ.]it~~q?|ԥ>҇~2ХgK'$]yKOwZwuWӓuHӡﻚ7}>{̉:u_ͼ[nxyuvqխ/q?&/Kһ9\zm҇o~/ҥ{.=ֿK.[v:/c<|;?e.=Ywҳxչ_?>S/]]zqY/.ݎ'.=O\zԥ_[O>u]rA[KzP~m?ӑW\SI>ýC7PN|;_O[ÿtq~׷N{8_.YIt%=Su?uWq-5.ҭKog엞_?ױ^YWNz5ܺ:⩺tv~7K .$K']:ީ3K֥¥g\zKwХji:uw՝_=_:å~uXu>ҽҫu\q~ۉhz{usqxc{/[WQn3?:['?_z5uso>']:@_zЉⰛCo~.ݺ#uݺ1G|XҭK7?!]unԹu}Տ<:7øtut[/+\/|t>7ҳ..Ћ܇μ>K}vSn/GXoҭKN߈wO#ov^on'_֭/՝I։w^Ñ.ա3ԩSwko=n<~oN?*u_:KOwN.}[)ҋco׹o>^ψKcG_.{o%3˸PfLv:up :e߳8>b|keԙW..ݾutY4.]qKՇK7*ww1?Kptҥ#G㖟̷ȫND<֝w/w~\z˥{}ӥw]q~҉K'0[|hƣ9Kԥc+Gpe?_R^t):q~7`K.]WKPg?oW<~ӥǑƣqwӡ8Kyijӥ{ҧË]unһ;⽽yĥ{8wk~tu:tХ+7Sw7]ENO|1}Ϫn)]z:+͘|[vOvul|;\.t]M8C'^']:toutҭKtҽ_ҳ.pKϳ:ao??~g({__?u_+ѹs/Cν ]n82W[;ƹ3{k^Fms/ຂs/c|-H~sGluܰ1s=>\I8cs/}8b?"z5߽]?m8Ýs/~+_t+tg8-: ]k/?ڿo=^O82^wN~%>|Sb>9s&{?ێ;o5_х?1_u7A8uux1Bpk~cz 7Cߞu~S'`]~/dz|N4{[z<[7qQ|~=نs_ù7k8ѱ&mo[Y3o8?t%k]oy}|p:7uq]_1߳_17:7||#~W8u^^u[3\O8ߘ}wLx͗ߘ|C?7|~gVq] I^x>~ͧ{ _\J^7߼ܟuwqWc0bx[?Pu8X? ru }oQtW=u36|w̗c>oy=羯gW\ù?f_<~9u؟0ga]{K's_Oycԙ/wד7߼>z~2ν d8uys |st~|ù1 |xgp~}/X{|#}˹>u3#s_]o85o.O?q˹{W| 羮'WvO}|ؿ{9vr.%cw8pOur¹>W㺟}¹^z9u=ұ?u=˹޹speT/?/7qùg;<ck>qk}Sc'?{szggyPn9ףq_ø{_M ̘ٯ~|c;5c>o8:OOGpS8yҩ߱ڟXνt+>_gg韽o;|n{X鿾g:z;_ k}6āpM?'k:'羿߄s/nG^p{}p@apԽr/0^ʼn/ɷٯ|/|֥ 7۝v7˺W\Z|{̧ 美x/s_߇oe3}| 美^]{]6XWp˹k/ G}b=}8W|pŹggz7c:~^O˰^8N~~˹:)[Gq˹nޘO}}{W?xnѭX/O;֏'o֯~m1}+B|Y^u;G'?"93b}>< -q=>?ħu`<";:p+0~-Għ^u$¹7Mo9z]{{=s_ɘ{Msx} X'7??b}o8u}׽Xߺ|˴V8>b>Z8н7='8:O|sq\:pq%ԁùo帞s_ؾo}ߦ51{~ƹ1~Q!'^{߫ƷN~Ո~xXons8}}߸뺏p{_.B77"]Qoo?_E77^3^s_6ùϟ|7~?s_θ>7ugpk=cgŹK:73~sדpx7upo|s_e{ǿspC8&cztoo{.KS8ѽ^?xN}ͧ# |4]{t¹+X?:u¹kn_ ;ӌ{oc=Z>Z/܌og_Wtp7:_t'd8/{wO8ޕxoKx!}]o󸿆s?ٿƣhgLֱ3_#sx}}?ǹΟ߅sp^8s_n^YW'{{2&ޙF<gp+PcScj|8v~5Oi|!:n5z!u¹woùe83b>Ow&>s>os~u}ާ#ᰌ<~3森6}uƷuc;f>!~>c7v];c4گXV]{,߻p+^G0oߺc׈z1&ԉoz{ķùqg<ڱ}-ۭÇs|R_FM~s߿q{=q1n8>ӵMY&=P|g~㺟V8ӵƭk8 >?ng|o x=|k^osqs_/~o}={p?s?G{ٍ{'ޛn0&: ON|#3~}}~;zf8}/{Kp} CZL-^pk}.6;⽳o67ùs_گ߼dp!3{/\N8oss_>||~q}N|_#~kďuFZޙ/t}ǥc'ߚbǕw3c|;㙏|t%Cn>ses_8O?cƣ93M|ߦ_e5r?{WWsƷqw7uw8uϿ.Ʒq||s|:7cމG_s_z/W|fL_>^uFqτs_W'c^on"Ntvď-s_׻!ENx{į+~݌Nn>8nG<| :t_˧/׵khï#;pkt3=Z]v۸u8ѱ3_8pĹAG]p%^o.zOt|O;c?Ʒ/W71_=:r87S_ٿw}}&cf<֟v|kg7OG߾羮ws_͆s_*]{w8p,o=;:.ķ/wƝ 瞾 羾}O|G}oM;WqeZ {t+<<&}?o{ùQW&>¹zs8xWb\Oɷ/'s/:8%^ɷ\}Oq1|mI>>Up+^P?|~ƹg%=}}/|<ףps/[oqŭg{I:]eқ=\ҥ7\.Ct.u ҥۯ\{եӼǏ/ セ9].Y/N%]zn>ÅҟΥ?K8_W\>K?\z0.Ung]W^uң?._ .}$]zԌ.}wKpǥO\&uO30u8ۉkC~oǾ.o>gDbo۝Zؿq5kT%҇u ytJ:\(:KNj\ԙoK҇ӥot#qĸ /N\#.Oot,gĥohnǹK|#Mt߸w;u~㳿U7U7S.Gw2/au֑Х[BN.=ݪ.Kť.}6>.ƣcoa\u뭺\qx\y/;]zc'Nƥ_tנ|>Kq8̫C_qc'OQK=\z_\z:K]_\(c`ɋk:qŇK7/-]uKncռSǑz >y}u.13~͸åtyץ׿\z:C҇urt鯮>]: n]tt^0r;q\<.~׺z3槟e%/tѸtootҥ{ǥҥZtoKn:\q']t=Kn]i]up?q\:_WשNupѺtѦK׵ҽ>ҳy50ۥKn{ť;yyץ{=ťҥׯKϺ 3]:0ώ:WcԥO@鷠Kxy~ץ{a|1&rԉuۤK|1O/g I>w'Ӹx\zhq|pcS7NR?~K:Sҥ:S~_ĥ3]:}q>}#>_K>K:{\z_ktJҍKEg8.]ץKϿGy.tU]>. yby\z{OKt׋t_>\z폭K7WnP]K^ڕygԑ3<;O^<]LgK ӥHnY\~ w;z)]åitqBaKu{t qbt/ytSWKOyUt?t`qMҥ7_әK.}p.]wKҩk.̋ӥǪKpt}vΜ<;oXה:ü#ӥq]ǣK?ӑctOK﾿t] ޳wһp~Ku:$\p]vҍKeҋn7r8Ǽ;Ǒ1+>;/nO]u[tuO!.Ku㩓yqqҥGKOGK?._;.].ϟ.]KY/%)]ќ/5.=4^NХo|}_EWN|źq}ե8tӥw9qKn9.tIuҍ7ҭӭKYn]\.=i,\#0u_խuHqyq8h\+]:ҥXNuta[]{u\stt.[nޛ.ݼt)ѥ{ĥGgדүӥϲ}],:vޭK=6:][ij;c\zӡ3ߥCjѭSwQ ]:[w~=yo8n|v;r\׳o5q=Х[Fn]2]ztmKqt8Wg|S|þN?CI|Oٝ?\z..>i \z.t<.:6éSթ#pٟ}/t޿u_n:yq:j^t.}ǥO?y{]:KOߌGsʉӥNNҋw?v8CG>u5|>yqq!\`.x.]z..܎N`>~~?]z۩oď҇NW>~ҭKwK?m:;?1t܏Iϫ.=|i6\ۭGpYUnq~K9G_|Ցw}qK٤KMGǥ>ԥ[ >]?ҳ..ݺIt*n~k[NuOcZgWv:c^_tҧT]K\vG|ҳ,.=ۥֹg?~_tq~c']t{q;}ҥyƥ|=u\ěct:t|ƥ/>cI~'։q޿q}ӹn(\,f|;+iG\o1Ʒ#;|Xu8.uo{{uLҭ<i_q¥Hҥ?SK.9mn'>uN>~ҭ+KN1ˑ.).}~;c\GnNs\z[Oǫu\tөm~֩ZN'nҧu uWK7.}TǧKt}ԕWҍѥ/]ry>]sDuok¥ԥ:fꐦ 3?1`L~}cEgN>8ۋu:ǥ]z_ߎD>uSwϟK_͘|f^?^7ҭۧKQS6K|եeO}}حc'<_Ŀu_>>.=c\z+S'N߮-t|p?tGKK>}ҥyK7>Kҙ~<\Qq҉7.҇^>.qå].~tKoK1.sIåÕ_W뮦cğMGN?f<}NzNsnTnTGxp~q gK..}گ>GKݺn8t;[']uďK{=ֻtPKqҭK.vqn.uǥ&E>CɇnXåb|p~8޺[K/ڿ7/'./_s|uZpҥ['F}N~Cבu7u=ֿ8|n\w].|t5WGxo}~{N~tcoץS'k>on~.|tKەğӥO|ҧu]w.}oK?py=ƥ+]up K.۸>tոYTN<:XG:hO3MNxkx.l\tԱեOҥ_?]:uӥ{ҥۡ?NN|9]k0o?3.=|ҍGK}tө7\|KvCǮ#¥[.]ut['^ty?/?ߞ{Vt^k:59֥socytһ|q֛qt.Pt: ƥGtgCo1~>NiNJ^ώKׁ_t1ƥLcχ.nKuø ߫S{~]zw9rLS7nPxNҡ:tx҉qcp½ut␺tEt&unj^}>yq^t5.}=nG^#sq9]._N~ǥӥ[J>ǿڏ<:7߲ygL ;|.=].}+_uQpS']z\_u]7^wv~olwm !"-!=]Yv_7XKz}|Q1S7o5y\Kסo-Й}YHNK:+X3o/O]z9Kq$_>8utҥ_:p_7'/}6>Kf^m6.}/y_t<+_toNnpl\zOeKKs'['Y:sAwKkrO9.]z..q@楳?\satM7?3O&NSu[QXX/:8p/.}8Fեguƥ_Y[qtyK/:uu:I|һyu[q0>t]5.ϣҗn}w_85}np;u8?uQ.#/N/Ottt#ӥ?tߝKNGc_ϧyC~E߲݃:]Kd^c^xN3.I_y|StBӥ<;k$v|t[ {IwN]םyu鸤tˑ^Xӧ?.>t~ӥ7^ۥK/2/9Iۥsҝ.#ۥ:uԱu_t.|XX:q8Kwn.s8SwޙX\/\7t鏯ezѭg']"tJ^C︛-\_H5}WoK;>\G>>.}b^}W]uХ۷K7rWrޟ<ӥ?e^} |K7SۧOޞ|.<ҙ×.]Kw.=xtåO>.|O\z҇s¥.}קK{;9Cwo:s~1W)c> v:u9n>K/rǥKg~著N>z楓GK7 κt(.}q=.=3/=uG/Un֥ù߹?:뷷7Kҙ[.ݾ;]y#t7KԚ}~qzoҪS֡q܃ay7g.ùg^zաOo>@d^zH]{9ǕײϷ.}tkxt9>uQo{NKK<;/}s7kץGKҩ} ӥ/زK.ZXS?tCǥqcmW^b{uo/gt.ݾf\zb]W]圃y7u|sڣk-]zͼk^N}[ltq~år䥓.}ؗK_mto9saK7zo:t슮<3/f{o֣N1¥oq˵4y2nǥWt|5]P^pַQ>ץNLuG x2Of&O&u.}eWO}и1x}җsp{y2Kå?:m..}W.|pb׍Kc^zutTݹs\+k殒'^͇OWU]yE~ҝSK_G~Kw.}9_֏]~wۋǥӧy鞟t~t.;sr}sC.'>.9jp7G?sϼtpre^z_wm^q:]z#/V\zNK]z#Ϝt?K/۩3.}W]z:vpүӥ].}ru\|t陏Wutv^zuMLWu̅m:ust{=^w#ӱL+֥[/ѥz5/}=?ҝÅK_z.Ϻb>.o|\9x͗եt;KqW5ҥ2K|c^n֣3ݹ:uׯKe^z|3ǥĶKrW]o^X2?.lNys➏Kw.yyn=u/^?ou㏿g.,{W85}õ}5sM?{̙3 vp?>.K>ԥKzF]z}__ust3OG~Mv˹k.?;yS\Ӫ+gǥ/guF\W\y׋.ON^zE.=ߧ\,]z3<]:\ou]}t+N:srǢn^/qkk/Wַuԣ{\z/IkJ>9.åL\;'/ٷ^^tK7XN~y7\z^W<ퟸ>a7K#\۩x{GO^s;uv|;ۉw69LJ=ʾ;uϓsW;k~.<ӥ wU.]WK7]O;ԑw>_|ѥ;S=]sKu~>)뽟ܚsHkǏN6:ӥ;NC^y|tk湇μu;>m^to㢛|گ=En=n9p[>KwNy8-]*p.註.pe^sWv^v屿KCGG]ǥg~=yumS?ޞ9q}K^ץ?:e]z5?N.}C.=7:N[u:t.ÁԥO.}tCַu/u|Q\zɥJ~w.}y.#/fM4yc5/^qt޸k;tsK>]z:\]z.=oK'}֏?to?]zӡ?9'sSuԷ;apĸb9yݼsONyO]0Z]g'';]ԡ3wfXױSVtbM5yoEn.~]z:❗~>]:ur ߇N|\1:bǥ{.}2g]>ɚ|t>K rp8sZc:v'tt~I8Wzm3G`Y?䥛sKХ;O}П鷮rҷ ַK_nu<ɚ[gq鏾u589>;֥#쏎.~š9LK.=3/å{>ǥK3tK_>>Y K>?Qu.ktK_.2o=\zb\^zCқ|\iY_>u]ҝ;KO]|K_\ʼ.溦K_p֋ҥwtU.}ꔫx=.9N^Coܳ;]Yۿ]X?׃I=sRq޷Kwqޞݮk^攥Sߓ~̭[܎ǥSӥ_tE{=K=]/:k:uy{\st#K_e֡8ӥ3/ѝ_q~ҥ.ütϗtץ?ۡ;'u^#o:sCtҽӥ{EtҝK/ǥŜ]:K<_ϥ>pn`8$\sҥ/cdz{ͻѕS DžKssCNr=L^owty{u}F]b_թ8ѷKvttCKo1]a]Ss['y{yҽJ>}u*,c_ ұSx{KgpKױ͏֥7yNSOuU}}s.B)[ObxKwN]/\ptҥs.KG̿.Kg#׻.1<\(jzqƸt߿#s"~祧c&/w}9nίoM>yxuk{T]p.c]7\6]μ3WwԙwݟKN]w:o/\.W.ݹ:N}utK9Mg~=wt5y}t3ϼZ׈ѥ.o#t5q]:ytu:Y]s-tyΝҥ]eyܙ^u|ǝK/:ut7˼tZs..>too&t\ӷwΙ?G^{_׷MKow\z_I<өSwNtE7NnPbuչWyץ/ƫ;W^xq9wK>ooۋc]ߏp;Nt#cҫy|AoW{.=åϏt:ө/tۋ9'AESK<>3ǹw:x>}1gBμtAK~K.=^ot|;ۥvμ~;3_¢k_} ]9nGKIy)^W{W ?.]WKKG쳻uE.}KқJG~^ep~\3Хg7.>]cwҩ۷Y7tDKi?Stp<g߽.ݼ ̣ϼCOK=SA]}%zK7? t&tK:}wqk^zVyӡguw\?ngKuҫ>;'y{=vEwnǥo.}xǥgKoy2߼̷_Y.} _˾t>u\ώ> ]9kpչ}KOi^.}їKwtyzt|of=:u>tg2?z>y䙗t>jq>0]۵~ʚ6ngx{>@ӽ}ӥ}{Ko>S,>ѧy3/0.t.cKoۙOC:yΜt>_ߒ4{ե_:j\:ӥ[W|qMY>J\z:]y[tȷKtqKO}̭7g^tK敓p6/a}sޕy{K۱ߓo7]zyui ݺ}KO,]?.>ygn.2~G33ώggwةvƥ֥{~7/Kו3}ov9nxt陿}K{}v:uU}ǺG)]:˧K_g\t5y2]N^?uqy/]XwCK׷.wW֥wt.'/}钶K'_~ֹ<_Q]M2?<.^K7OQn^.9ۥ7.yOnߞåKwΈy̼ty\\0?Vx/Ƿ$y:uq㹭Gu;/=&u3W6үʙ]xxn83~yWå?>3tH>u[Y5y2}ȼ̧__әS̒g9SNycõWgWљN 9]:NG.}NVK3.ҋk}\_һ{vq|sMkxK7^S>t]]Kw^:O^\?\:ytӥ;GNw.]gN}GKKwn.ݾt-]ݯϜ#/}ԩ}CΜԥ;ώa^v]zqAv~:¥ԥ/Ct8r^9ҹtn[0/׿y֗u阫xt~}v_K_u;sƥ_<>.ݹrtrG?]#EKou\zש?ӥ.ݹƸtGz~f8^;ۼtrҥr;{>.}C/\w-ftqya\6nzǃK>W?.91t{tM>̫.yKW^XwָSq.:u۹Gg^>{9W}eXe'}Y?.:sqKwKK6ݝ:uq%U:̅-:ǃ.}9\Lt 3/~d&/}\һ3uoQ>.~t]ojtKukK<]z1o<]z3]:9]tuv\tE7ݬ:srk\}ttGK_ַqe?p;t䞑/N^:/K7~k\|w*gbM=_ױۥ+^>u-Mn7sK_ޟEJ/å[_ѥ9ܳ\e^zϜqrjiӍ_ЩϘJwq.lסS:uq癗3.ݹK_5]zׅWӥҥ3W6]zk\҅7}R֕qmF~;.}/~+F^7y:9\ZҝKw.NwߚUvK]z?Kץo<ԣq~q|K^|>pc积ܧsay|gdݏzzk^+i?\:ϲ~p\ӥϼtӥ]uIХ?:jvbst䞑s'O<]z:sKC'.=7楯&lΩG\G_#/{vn99Փ>~+tG/o:9[֩Gw:|.}^>~̟ٿ}>|\sBK{;C|K#.G^Сҭe^zfw~Nߓ>uvS?st;.=җs@t7߼t?ۥO?BnQt"һn:oǥ} ]st}w㾗>tԣ{y:u;yN^\;ǵ1a߮WܞgC}?:3;czM/ָ¥w79GNXө?\z5<]zu͜١C',lg珼Vw]3w5;ۥuԷ/cק.ݹ;/==O77XS~?_?__Vrk:z_o_G?OvB7"ߎ_*?_¿O^޺vp_pO8C ^n^?Ĺ }:%s/U\rms/MޟW7(M˵5Nu s/<~ν.u|pν:F^yKӍq||ν޿{,|¹ץ .>{{܋%uR/büW8tOo/s/u˼pzq<s'pE's/AKν;<ν{ w:R ν\z:w{5ʭ^_s/e'acŹWtAi:pùS^{s/Eνs/t+G>P][ν>=u=x=O{cb{ݿ}=gyk/8L5?{;ν s/̯-νLK{,܋.4{ u4{uJ8^%{yܣ|}}C^2={uK8qȯs^~ؿҭGcs/΍s>^7_]_=]M^K^t8RWw{ ^v۱޿| ?źx#/~>X_q/ﱘs/Eg}Wνsùt/{Źbu8.:rù|Kб_On/s/وcW8ﱝ kzq={8zfz4swץ{_kӹǫt8r_ׇs/]gν8'({={?~q|q¹w8w7ϸwûbC }8}/^s/M'νdr( sp.ν{1>{_q}g¹~pi8Q8ppW\.]{z:܆p58K3<{+98Wol^@_=?|ZW\:LǿνT]w^q\' ν۽cݮ/o:ю;A\WwB=5{]^2?={̏^<ףޯz>?Kzs;t5O^-}'=>Эنs.=?w:b={:pEwsν0t|pչh: 8ν<:;g~ νss/Q-[w-ùùe+^t8w  :':?[ƹn {,:pzzgq}Zu%Oӽ?끋}/WνTv*y_=KyO|?Q؟}u8?{qnν4-O?z<׹^Gsν~g]8|܋y8bT838θp>U{i>}8۽/s/]GzG{/qž^{s~sKwҽ8¹ Q8}'}F;K\={ַts/E {qK8G֣>֗߈~Xf.B8w98?svo=vKk} {o-Wo[(x.ַkWuwamkczpꬫoqE'=8s~vW\ws|>f:zYy^WyB^ >׳uSu%s{\O%s/ۅsUx܋. ~Fمs_sK׀s/[zνd>s νt>oqys~s9 ^no2p|e=u5y<¹g8?Ks/;ν`¹O8޸-{qX_+usx>s^O8-}t9!ܫ}:S3N^u8玾Gѹg}py~羯¹Y:¹s/ν4pszU8}}s/~p{3Ozs^s7{~?ù_ܳD8\{|<Nqp܋sùgνng}qCp{='ܳ_ܾ)[??w kǹpSYS/W8}!;rmùMpu9g.~<¹Gޯ~~\/ >އstԏqW6p53u5;{ֳ¹88{{i曇s/ǹ+[?v Whg~/a=zν.q%b{qn$ν9{7w^<ߎe=:/"{^o,ν8)<w瞟8<܋Oe6:R[oqz^n=뽷quXSƥ_?m8b{q97{Úc.@8\pNz~|~N^_ͬmR}|¹Y{|ps/pnν874{ ܳP^N~E}s8~ ͚~*~f{y=)sv\ss/sۇ~8˽9k/q{7ک㢛psEnν¹?s=8ܫWs˹ߵnĿ\ӿ~ϸ]8YSx?GG~<{^js߯_{>s{CN}㟱?d܋spŹm8Wֹ_Q>O~ùs/76{^oח~|묫֩oùs>s/\`8E^.>V{z{}ܳG׹Fn?= u:֣ND=ѽ7G:~ʚmvsqM?x死?דFߥ3s`m<ߌsz@^s^s/9 {aN.=u9:wo 3O__N~Ǐsӹ[ѹ/sI{i>{۽跾t w8=q۩o_:~<> s9pK3_~98ߣ~3У9tgzk5K:'On=9{16{unq{O]Wƹƚ1s GN}{~d{5~ 'OܳR^t85ۮ/S&k AHg~ur8sIǹg]ssqKpʚzc~þ^{ls/΅ s/M'νkN~kz5ίzo?po=BNuq}еq)t].2>;}..Ka[}sMq|_/'};*׋ttg_rѥ?.[rmwN:Iܿp}Oť^җ>8\|4yѩ77;;Ntҷѥwt/z)>:uǥNdKg^]n8sS׈|MOsǢK7OWӥ[b;_;/|t:W]e[g8鳻uzy*ԩC-n6:uqץ9ۥg;Õ|_O7Kg$.^YXS'ytouoo{N ҳΒ.ݼl]Kt<~tW\҇rpvi_ν3S^sw5uܙ+0t䳇K[~.WqD] ~Ys|5{t) uON^un;kƥש==t{M7}K=WE>tM7O6}}a?uo|Ksltwåc_QAt@toex=.ye_Z_Ϲ x|\z|p3ӥW܂||қNro}\:]3ޅKϺB?voF`?L!zN9]:,]z/MO3.ݹ%yt]9}kKu9O^WGz!tݸk;^aK?~K7ߛuӥ_/.zܬ[Kϧn^.}}vs;.>*׋yٸ9#'a_/9åoJue\zֹ_//xb.ݼp9guwe\?Xύb>.8p-t:_ϾA]z/RGoٷwK'ϛtve:}v֟~nni}{v\z5.} EsAqn֥7c`͑^̏ t_Ϭ2w`u`t_ϾpPnpaMݭcǥ?ng_ָyK>O 2+d#Og:FkMHn.ݼå_]D ^֙zvdm<]Ú>8߇KϹK3Wҏ=ѕ[?6OR^{֥߭KCS/I] G]:\z#p9t3.f\46kҥqm;O ^-8w\wѹnz uK/Z&OZKҫN5:qs\oww\avk*;N9;/\oVMԷǥ?Fztޏ[o.1޹#8uo^.9So?v:j]s)_ Kz_޾ѦK7?Xx3D=z_.'sVяkyKw΅.GqzYtMgݲvv/9snݹx>_OFz:L]q3=>qeu:[>t.^cxqOc}/ݹㇵ}v5\S/oK_oOo5sWϝҶkO|wDzo{E9GzupKԷ:˳z?ҥrё㊫msuvK>NCz^ss8mu5eޛy}qyԥ틫uK‰{˙K9=@ΜM\~.5sWy<o|tnqN*z3сo;J|%:8ߒ^~tɚ©C.}_&=ϿSOLwuͻ$ݾ\z..}kOK'O~{qM7O{ӹW~Yno///]sE;tlNئN9#.}:w`Y߾ޣ>K4\z#KzkW~.=9Ny{ǸjKv֙k:\]ҹ_q|<˹k\y䯧ӥ/~]3?zQY\7:vV:/\zڤKϼs.[_MtL #tַkҶSǥ֣qwԣK:z3wqCwc~#]åةstӡS?uwmݺ7Y?q},\z]|ܳ{8f]z՝NnW9\z悤K6wkw}׳zd}q50GusVҥ3*]z̖?\:3/:;]z:Swjriם;p9*]z/e}N\Gk K'߼ZϏgg. s*k琺ݹ59`{98+w\K/ߎ\|oԡO<[ЙS?tԷK.~ny=&_0X#]z7>|K7?U^s0J>0W Koҧn?"<>^9wR'wk;uёKzu;ҥۍuoItᔫms\3_~59eҡ_?.趛st:s:sovCեs=KSKש_G?Ho#umao^+΅ׇ.ۯϿdNŚ<9[ף:u[K :qGԏץS7=>6֥u5輽bƥ_OgNnDza.^t/ҳ.pt])GKϼ#ONy|ӵu?_/v陯N};η3Y| /\{Щkw5.=9`<0^tm^O+s*EΜv¥_KN.MtHtrDĥߧ+k{\KOO.ݹ tp];p魐pt>tt꣸~KKw.ҟKϥt.ݣNbyҥ.k;y<]F橓o{υեsyҷԥnu?9Gy陟s'}Y'ӝߜWu1/]?͓ԥiu楷^<\z~yݖ.]f^znClkқy=kyn|һۭ|t麕p>.};q#QG.~XK.O2_8\p9Nh].Zu櫇Kϱ#/}o]qy{:t|߭C7/鞛}lv^kn~>K>'\zAye_gz]#/=\PCKU^+Oҏu OvUWnbMKågZt;um;t>+nǹw;o܋y>.=uܺΜ-vs{'B~-;}q<]sSK`^vӥ㪫n~.=]Ot5ON^ם_34/=3/=O]ytӥԩkۻ]d^s`tΡХ_>~y2kݹݵy7:t^twf"sgӎ7]z͗5/=gtwn# Б?ztso1tIuK|}mS>>tv! .ϼtͥKNN^yu5=¥7k]7C{gܑkO}EuSs_\3wK?ۛKם4G^:o)ɓ:t]͚:ſA:}й߮#_:vZnI}}=KW O>u!NCaM N]۝_ɺhp8ϹtsL]:ҥ{~ӥ?=y鏎r͓ѭoKg^|H\zϼtȇ'/Kw\z:t~ޚg.҇k]zaMɓ:u?!2u_̼Ku/ҧ?ye[}2?'yxy2.ܟɓyåζ:Kt9Iӥ/.ͧYܾ֙>uɸKG\N.C.% ]'k/;}.}+ԥg{߮~g8+'?|&O&9}{piKWn߸.=t']z׵:u楧#'s6O.}/nOܼx}~o7tԏދ+3OFg^NեO>.|p۱KG5y5}{W.}py}yү}{ܧ}N9%C׭[~Xw.}tg^qvy\pɻϼ-v>:sK˹tKoޞzҡfy:t#oQ?ŅK7]koϵyVOsK¥ןuny6}^Y%/zףۥXuӹ{#yntP;G:t'Sťg_ӑדtЩ;'w}.};Nҩ'N\Xy}쎼vq'OtԣKougm=濤co}܇2odI^9H楛.鳋ۥ?>/G}{G8񑷎Kg..pw.Ot=~K7msKϹ^3̧!>E2/zܼt?t_K7_9kv|^K߮Vt].ݹmҳ0l^u5SgN=.??ۭu W?n.oXSNN=֩oWӱr-{t= -NhhoOwK'^^>.楛ַmtߥ[_qK^yo~\yGg38xKﮩGtϷsjtt~u)>]z\_ti=:/tny5}{~4/9JX]/_x.ݹtҥ&_?R}|sHN2?!]:sn鷞g?_yU|7ҩ3/NNyK|3K<.]]sV楷K.}K|XS}\:.']h|#/.K!ݹ:s|<ɼtiGoWV[5^4f\WN}_hݹ}xrM^zѡG~̜~peۥ_:su8r|?\zg=6].Y~҇|fw޳yU9M~K:mrU1/ҥz3/9 VåN0Y/t׏K/:N3\åWvwn?xeMҩo]>.o|ՙS_:t=g^zޞya72\^ve:Ωo_s/ԣxHo#oNp돼atҧ__tם}n'g+#-ȼtϟ냋:|b5eg^#'/9 3G"]s#t]7<>f?xѝS98^u?Im~̍J>tӥC;wBV}l^zus_:+;i~X&o|:mgDx#\S|6k?{^J:r~ӥ_zYCҩӯ6]׾tOt~/ϼt_{=K/;Xo.p{Ύyu^WǜU\z?K_;u7琾_?>O3/=.#/=8.t]]2/NR=upG^ QxSgҩVu腺͚޴֩s;uvw]Yn"\zJ^p]tN3X':u_qҙ~Н[M=ң+g>^ѡ'SXknvҳ/+]:22~ ]&\[Y[XNxv^:?/3.۟㼙.tްo9䥛wh^Zc0s^cotOǨ:}wқy޸tae^:uK>yYw9qMN>{sCc{ӭs|8f~8>tqҗu /߼t}qkҟys\zoK<@nst/qߙ>p~2/=܇<ΏҳΙ.Q|w}Zt;Z':v.EwN]|me<@:9D>?ҙO.G^vݿn@vuݥ3בOۏõKwNyk]z:νqkw_IkoO1l_ttm"^Ow;k?^ݹY7D$]}vy|N^uOI>.}O?;ߺŚ_>}q;}u~ףkyWt>nt|sw楳tїS$wu\:uåg]1]:tηץ׏KyҳK;]u'~.m9wR]s|_]s3O=o/teF!"Y}qak>-\z5?ltq5K)\z~?ƥҭf^zunԹ:t'SS9)QO/ؖn>;qtCL^^yW>.}]u8jK8Bҗy۸ttn^NmvkK?.Kť?:[۫:t_.]'a^3/=9>\vwMy6.ݾ^?.6>E0pOJԥw]/._twt:]z>~3d}ӱ~~~4/ݾp??gKr/m)V]ޞzo3'5pYȼtO;/=9+ӥKz9.>ԩ'.=NxC7O_=Х ~?kwtuüw\:snpoN|åo7.}߿kɚvZdsIz/N?\p;.GC7O.֣μCTft*K^m|\}W~|Wɓ!~}\{ӱS/Ǎ_Q~3o\1/2|9saTn>.=w۷W{=^9Mgyчl^¥g=(]sgt㝗q|yu$ϼKN}N<^t<z;枦Kw.}~HΜ1.gw&/=^͓5/r}Kotgtѥ'{w9t5mksNuԷqWԏ./yѩϼ7]pkoM^wp٧}bιNtoS3/>p{.]1`dx}o֑9ɼ:p>E(Kt)Kt䱯tႋxKw.9o[qnvyKqQM^w9yottS>.>t闷9]N}gGe9c7O^y;/=w7uqc_wKyݚ~mn^Gn.~SXS:qKHS>u;ǥ0Ķi:tV_usםS//:u>:~Mܫg!u5_%ݹ.]wN};lx gܜkZ]_qo:C?nҥʩoKO=Q3\{Y.ā9'׽Õ3'ǧg`M y>c3{5Х;eבː.}¼tߟHscq#ǙN^sir;cGnAt_3]S']z]_sz9y%d:kӵ~+~뮫GsHz{4U7o//]ҧƥ0\ۇkבӭ_ѭS߮S$_sϷŚq"/4[?.].>\S]zKǍK/K|tsFcq|8Yqko!e~N#J~|֩㜋rsMq|Cgjѱ?tvw͚zCiԩ⠛sWKK˼tpL>}(\skǥмt;wӼyd^p{...K'4]zD=ZGK>.|)7:sR3_GzZq<9I;Qn=An}G^|~LJ_S'C~|:v'/}ϭ2/ݹzbt;.}_tq<}>..mKm3O2+5Kסo=蚗A]5/}:_=.|t#}֩=y|K|?S}Cg)?tߏ}hxsWgKgNY3kedM=+lK\rq]Xkq\;7/;O^zF#Pt銛sWùҗ߬.=]9Ux.Z_8җF.{zƜRtp?.+yt9NҭܙoN:.G[gޘ;<>CKt+ۧ¥O#/Z?.=#2C}]N=ϜK]c.96=WͿ5/]KwDzop-.6/[]z /]psR[7pqKsM:ksʼtKk^z_\:·K>ٿSgN*<ҧ>K:Ksy:uRgtgys|O2w!뽙N}{9.ѝu']fM=Ku\ȇN7/9 n'޲fMY<_楓cr䥓yy\:~Kw.yo#/]>zֵtҗspǧ7y鏮ܳS^x?ҧcy:sx yu91yCyp{vKNNY=\zΑ;åGq~ҥWo?~.}K.9$[kWt/r9n^Gwn:NKKo;o}1Iw^Õg~:<~G^:ޙ^>.yK;ѯǥSLy^GnBttK>߾K<KKK/ַɷ:tԷ3~p,’go_7kΜ~[Ϲt~ʼ9α e^zw\s¥"q<͹ҩgK~kqҧk1ļev.==E6ly{??~u]ѹW2- _e/| ?ވX/G?^7_oωk%Hަy؎+}!Aspݟo<^͛'5soUν6*qUscwܑ^u8v}ݺp毷fy8KW]ν^c ޾5*yp:j~'ν.3νQW<[s○so۫_G~>ù78q|Q{cN:7o_5pM]k;oun^bMkM뭘~a|¹Col~sp1νv^9+}:Ź̛^uo/չk]Gzw[۟p=ùTֿ:DsD8cIu{+;o.{ù>soż_k ^u}8%]|8?O¹n\pŹn{8z|{itu_{~so퓿p^k8ƺ^y5cg׹vيgpg   t VW1y}nuߗgyxܿ}sg6<}r_W`Lcq<每KgF5sO_ǹ=]\qޯ8k[}=x>w_`71?ynn]_>\pڹO6}ǹtG}u#k{?n]:7}:=0}?o}n:88}\t܇W/'}sQ<>Ǯ./g8qU%}㧏+Jsw9̟o 8>os_7sSsw>Gqu=sg: q'}>>Ź#}u p >݌73>S8Y_:qqk.a~Ge8Odvm|{|׿p[_8/Ís_׹^ 0o׹NЕƹ[qþi׭'|'c>40"ν_:qo:=O?Gכ>׵yu̓¹;7_0 >u?9_׾eo^ǾV>}!O}F܇yo8c#}s>81܇q_8aNN|Pss30_~ܽ~1}DzraC{qݟ| wU? Љ_G{~lV{3uþH #'mùun@>~Ź7o{8w:w8zzoSne}Տh2^eu/ptqCLJs_槑>~߻OnSǑq槧ϟ8aϤNzo鰩}vs{8a3Γ>$O{'_LJsw~mwySouԷq_ԇ;8u7܇y>q~3S7u͛ܽĹdL}9^| >Mw7q4O>}LcS=}գv3KN=Kw'뽗zе_w]/GoI}sC3Ge^ƻNc[6}sup]vקn} u>~܇N-ν1:!ϯ8"}L?NU܇WwکGzCt_yq7}z>&}ygE_>:ù[oùϏ}׹{Kn^#}t~8ΣNx^d{8^+78aԭ?:{X/oP;o}\:pc'1>کoOLxЍܧ또>SĹsGzĹo棒><܇y8>:xS>2Ow͘{/}\p~qsen}S>.Xߞ:xܾGMs_ !=:Lin qv1~֏q_햧CN=ǹ{羮Gs6}ukt{q ƹCsq8989OsnX Csk׽ezq瘿>?ޛ'}n'}}n{7gẄqUo:z͘Sp[o_~eޟOuWܻ~muu.꽮pu7'׹Isq_?l>ϐOl=b8zt|S?ǹw'}u7ܧŹKdN}t9{ܻމs~ŹG|קyq1fc}{ӵ7pO{SҹM;LJs>u'>'}qzp>8?֣?Ozqw?s75q ~ t{__s_oc|ϭo/KO?νAq81.m=zs;ָ8:2q_s_ǻ3v>s_͸8׽emk^>ߟ-o[S~W gøatN>!}q_w'<{S_Y>Yuatǹ'ԏ;oy}>~p^/_G;zy糳ͱ㩗n}??8h5Hws_Gq/ܽs_s_1SǾ}6?:5?9ܭO>G 7ێzuƹAxǹs_b<3ߍ͍4y_]}ܽ1}߭?8?s|s7߇qG{soqٟ'ey{?_wٿ}ޯǹw=8oŹ{xqҽَ?_]>:~ܻ~swsy=;_{mǹ0}r?Mwhg@p8?c ׭d `Y/ѱӿup}ܹŹ.zq}美b|Posrug_:qzue[ޛn=~>p_~N{"z!}ͧN?siԭԷqҩӿ=8ߧs_8~|}sǹ^=ùD]`.=Nҋ1.}q|]\7$䅧OyfSYw߷Ku!J#鳋^>S'_oKߴK7.tYwNЕo_tS}cKg1q##ĥO+.}.i^gമq;ǹ_n >._tuüP]:} v|ttou'.ױХtq:\u"],f҇x1t!r~{ҟ複};3iڥvgܺŘ:vq޸v}{5Ot\KuS~rP~~\z.yA>7Kg}v\wK/>Ϻ/~t]:J;uǥ_~n qNc`W #ڡ_KqͺSnc_{]歏8q}IW>qާեO.]Kt>._ّ.t||K:O]7K|~}vGGa.>ĥz|ۏ\qE2.ݾF]NeݭCqЯl>qu5ۥN:(/7K|klu {.=nws]i:.3i79yq=ťĥ/8~>wwt\=.ک˺us3ϻ+~\K~]M^n`sEAVto/ۥ_9ǥS7ӥkť"ڥ:ɦ3Nu/qy~ώK|\:/NW#/Iu/.}n?uv7y|_.)Kt{Ku:wXx|?}ıqKKw}O]..}uqo~|\yʸ.}|KJg>Et[;up[ҷ+.} Kߨ+7q.(]z!.}u ҷW\>.}_yKӧ:Ia{\z޺ ҥK/BХptݸץ.uҥ[wj#¥w".2pK;ƕutҭ\;?ucsۻu=s\:n_;.}KoKut\Oyqe{\.}KoK\o||]rnOKs]/n>.'/} XN}ɫK+qKӥzooו>XG;?.}q:N]}xt].ЁOv\4En_.?t.>A\]W֥tKӥw}\}p)o]{wȟ?_ ..}ط1ԡYנ]uΟۥ]z.uOt݇K>F]҇U^_v}Yԩϸ֩K$\,cǥߩK7]yX|v8i<7]}q͓ӥ__qss ]wF>ͫ.]K7OJ?\zǥ[>.=7{|C{Kt]>̃ӥ~9>3ukS~\zx\K~gvt]Q:4KK7S毷KХ[? ۥםOw<S[K^:tyqХϓ;..}wlj硳>\w_ҥSԥ7K7.}tq~KOD>|\] .v2a-.>]8\GOS\z;&\z;^]e~u[֍u]JFn.3p+.}:ϝ+]|:Nf|e[uM3ĥХߜ?uďK+/>y{Oҧy{َ:}|N\zãKwץCCLi3ťSѥߏKOI>K".}?O|KoKS?߮C^>|gCC\z{>Kyݸayҙ[֡?^~MǸa~8.ҧڹڟs>֥Smu:ҧyΜuWKһ^Ko'KK\8}=OwMW:K|t^sqzt]zߎK8ץn_.}nu`tҿ>uk׭玻O]:=t}xn:9<:9]u^t'^7J:tAvKWN:.|?;.ҝoyЙOKSﵞKVϧoӥpoե!K?.}vytG/9{KoKO/ץ^:uC\zKґGyGw~NСK.?Ϗ|^tts{_o5t[ƙo>[wԓ1[ѧcy^?]"]z{KѥOױԥ{=KCtץt|tХߜt|ۥuR90O\zKzq׸t?dtҝҭ/e?C?-:kǥ?ԥ{K{Z\9*ҽKבOե}եӟK7oS}Mp]1a7<ߞ:v+߾FK..~|]z;*]:5]]4uwۥkwҼҩ_KuqץCtOYA. .}g|ϯKCǝzv.;.}ǥzt\][>;}wحG}S6t].ҹեvu{|\: \:෎m=owv_n?LҝGq>ĥ{F>;{~1.}ӡqθެsqCtۇrp=өקKEtaѥK]*'9Y_å_;/\^1_zX|g]v_p1]ҽ_qYg>?‘O]ͺ t׽ѥ;ԥv陏҇ymݺ]~}:ug֥ҏҭ+KgKuOc$88cN:|~'ۥK暑l_>.Kǭ?!N:urLԭMtKיlS|0ƥ۩lq麆ץKvKu> nR>.wٵK_/$8ͯK/~x,\dp~9skn"yӺuK?u;t[γ>q1y2<:pL݅vN~w9w^:z{|t5K^7I..(.].|v.:{oxxyz>7_nץg}_>:4|KߗwK?#1u o\KǵKש];.tK7_>!x|z}A9}q䝷KP~qNuKg=v~_Kt:]㸨LߦC|}]z;_uwWy8o\:uK}}vN~\:F>o֥?y>ԭ8>[g=j~~^2_=Y'>{\zu·pko/\o?]7K?t9cǷKW>Erå{_]:uU]zK'nX>[K#ҧyίKS\}睗NN\zK7;¥n}>曻ơcKtq;_0나}>q6/}u~oc̯K?9~>t{~\.C^o>.\zA5/x]9}#KUwopͺ^'>u:?y=}_3֥[Gnn_/.<ߛo;tKw?oy2-EOt;}8者i^k/_Wǥ..uuե>K7?Kgqn9ݎ'ng0?R57>@Su):}ץgo:sS3:u{;yK싻ӷ;|oǥ?:i}.}8=>\q_Ky.p:8]G>uq|}l[NƏK?o~|~{~Kq靏N]K]]zpg|.vۼ~t]..}y¥;_Х{\}S~3>uuDt*qGKםҽt\{v䫔>Ko]N_\.uA.}'^:u8rӛy߭SwU89^{_]uC0.쾸1}{_{gwt#.}o.aL߻kt^yd.ut:]:}u[.ZJgчC_gt;/q;hg~K׍[jNPtuqi]s]GNKґ>?.<.]e<.uf^٭wyKc^Jκ͝'q}{MN_C6]9>:t5O=]On^.]WKc^K>t^Ǽp{G~\wԡS%{/:>~cȳ:o^z>楛ϋKrv|t.spz9N>E:/uFpa]y=p K_ԟ+pT?.} ?:d|}}ҥw:y?޼'C]zޛWt]..]WK7X1o:~?_ǣK y驗||:FNw˥g?}K7RN.}.}<]pzZ߮e_awKו]GM:MqwZ.Ga~\.9y-G:&r|;/]Ko?OiK_.>E>O{¥w=MM#1}v|>.O9.}ǥ?Go] һǥcOb^W)>:u]7K9O7y}?ڥ[ХO?]w7_^S曏Kǿ\z׾[:sN[tǬJ~ẫ0Ou]_ҥ?qct|yzzuu^c?._|ͺLy6ף~S>.y:tN';/|.}k^:!K H]zu^t?wCηu;ב;>q)u۹5/z .o߼vKo~y8[:u{uO}}z.߫ץSǥ:;?tmեyCK>xׅuoOƘuR:Ը9tߙϼ.vۥ?=o:9`ҭיt楻N/.{ 2?Kwݍץ?^g>KwݛK'.fcgtѥҝ}֥_oϯKu.t7+\z_^>uvqVy鮫K_].ҡo3]_t/_?\늾.}=owzS_t.}\v8S?:.nQoOsK;6l2v׍</:tO9LJo>g>.xv9~꽻Sn?swo]:sIeY~x|ץ4l8#tqd3tyy;ǥ'O /-]wui:򸢏KwL}'/~v:<]2/ݼJ9t^>tøtrڥK7uWuoO=q+ߌGsww.}:{twOK7_un?v;ޡcǥoo~Zү߼tlnީ.q_uޏץqtݱ.z'/=w]zq'v>o}[>7cXKv酋9ܸuۥ߯c<9ХÎK/].5ҹ.utӿGwQS|twǬkzt]ynu:l߯cgy鷮Щ+.a}ts\u#]?'Kکo:zq黮ޏ>v/iޟ=y#:G|dWǥvʛя_#Kg^8y=o=۝N>>N.}n.ݼhҷ߼9ҽyy|.=ץ?sIe۱;?oCG?:dzN[~\|˱.]:toN~]`Kw]G]g:rNS~u?og~c^qTۥov۷u\Μrf|73\Kutڥw>|sL|q)g?.}0.O;_ĥy]{Ϻ.ϙ ϥ?UqqDG׍Kw_zt>yۭkΩkāY߱]_5n^:K]vuߛ_x~~]zܠyץ?:ulm:uu1{NJKz~W_upӼ8\xn>tu~:4q˿W]ڡ~NK>u<ñy7c-Kl~N^nPХ:v6ƸC~[mХߏ˾yZKGy=·;ԥSҩv^ɓ~qn>vN}w]y͸7Eǥץyp߼~˥ѥ?:z{Ssy}tG.]m|>y:t_pһW.=?.Ϻ>E{c|۾s+ǥ/Kwtyu ;}v9x۷|y9]gK߿.}K..?]?.ϛ׆KA|ҏK/םWC~ץ]z~p|]h>_Ϻo^[>KKo7K>D祻N.|O^zr}],}ǯ=vK7>''\_?:'/G^ÎKi]Kt}w^:z>[ />@q0/^~>c]KO?_SܞO0_w^z·u\Fݥ]: t]Uҝtm>6y雎8.}Ko>k>{Cyr}_os|C7#/oq8ݺA·K?.uIuw^tU1/ǥB~}]zu1y靧ӷǥO_>CN_ܩSg9o!/ߟK??oǥ^ǥ[By_?]+'/tnǸvӾᘺӺA'ǥ;¥+.:.}{sKu^>ͺ<ǥ]YnqKt欷äKե}Y'1Ωĥ[Wեץn*~5/yt=uwts_uM楛Wd^}>tvny>s7b^}+6/ݾ2]u+¥wߋ.޼~/.xKތ}v) Wԥ{K:s\Й|C}sw:tS_C׹/.ؼ2ov.߭c'&}KO盗'/}]:῿3?.]7LK?u̸uҭK7Nn~.qwusԥ}t].ݾKte].?.1<}x3_qһXWKgvu;}vw]kåǥҩlwN]S3cޭs:?:wpӛӱ.d:uRqܬCZo9hN}/og|җS>9?{]:ucqյ?+\ ͷz]z~t_.|d]:yx]:y}vq鬓yɤ׼t?Oq:k\}bnyS>tS_ף]vǝN}AO]<|ps]/\ʏK9{݀|>/?޼;^.?/Ϻt>ywr:N;.[n=C>zK¼vqiѥ{K]6...]Ko.wץ|@ ..>vt.W}ο>~]:YwKץb~u~N;uӟO=ҷ7?uHw/۾SN _e\z;K\zK_KۨKw/_NM֣.wmt]9ҭ^o~qS֥|t鮓ǥb^L~Ziw7'] ҏיS?n>.ԥn.v]zSg]ءS^.ԩ]gv:ڥ:tq?.Qo75:uǝN}GY?1n|t|2^~u\ukK.9it:qe]ͼtɺus=KwKЭooߺs/n2+\SԥwR]1׭ۿw^S.:ut3cn?.]ťM<;;ļt]y?>o>uŸtۥǥ嘗~ۥץ.|c]{]X:]gKס+s?~r>y8z4.]Cw:uǹus]瓗>t|utÿKlǥҩү7]>yҧ߼z\D]vqC;\wmng1u~3~ƥGy8K.}o>}<ǥn}ѝq/:w|]K /uڥ?tڷq8ΘOїz\֣yqەo.߽ά.FKե?]+:\n_qK/йKK7y&|\t;r;?ys{K>zo緟JixKXW|8O:t'ºby?=.cu']0.}ҥpCzvqҥ{KF Kǽ KK;|v7>n/|ɼtK]:>=i~wqx\z߼toַs]:vc\8p/AM{j gW;4#ל%| l/K\{K^4cv2u|oc 맧KձԋVh}yi7v>eFu8؋aez_{q^߮ND]^^պ PDn@4|V'x;MVz^SNTidw߮-ˉK=^.^ڽXbxzlϮ8ոX:8e_mիi4ue&2<׭ OE0}޼^5^%^ZE)^}|zW^zgWůŠQ"tkRyK}^j˥#B0Q~ݢۇ.;/ ;M?AfQ25|^Fů{BVbo>_u_ԑ捉gŰŭ'ym° kc]8t#3]6\{]=kĘ v85*˭,|e&llyREˋАō(`y>bWχQET^{^::|}avPgxriG L]]nwvWKOKđ3Z*WYjC^:^Q]5Ϯ6~U*2enxO??tˍ3ln2jDAuQt&2^!]`\/(^kANu]ZXɥǭt:=/H1K!5Eb;xۯ I~lw筯"x`wS׽; 2}4G%ܓ5U0$=/=v㞇i뽛vs wB]Swk>֔~*UjF`ݽ۵IW+_)#-,˼w]Q^Zۘ2Z4d+Ķ|v^p- .S'Vv!]F4j!R $Ԟ:8i]QeO vmFS|&)/]S@$]ݯ>מ1NZ&6.#WL.|`vQeCF]tQS4k1S=xȞdί^= ƞ6y}S0:`|uȔ}i.VBaMĩVYK1eSv&j` u#ɛZ[7>Oezs٪ƺү]hެÈ{.ž.rы0[œ`Ϯ`65^.2ih y~s\ңP%K\z9g/aλ]Q{auMv0xK2Q> oym>7k WArRcD6φpW>濳ZΙ!Zyw5!͓!rL̵.!y^֓H2`5tq}F c8Z^IϠbK" s:0g#zy ^+yKORd[|$3U [Ώ?e9Y: Y'IM3/\)7GuۄU.Z}TcqVa*(*ÇSt ]7B\ @y<.<:/=ߣZ6/iC든m΁0VG^ԁs\XHoJ@r sƗG3\\׋c[D"ש^-"W6j. (BWKZ!L= @./.9r%QRˤ2']UqvukTj{rܵ8\|c5q%ez$dRqjSHck@c82>≮O}QVt|IY%x޼5 ^$|q_҆EWc:Tg| Rukrݘ⮜F*.} *7eJ&kS&FU&'60w0c˥Ck* ^.dH<#0⚦:||Cc=xA\ }PaÇB;z\#x6uup&Pa8<# R.U^phApJb2\sp_x okJ ^}=JTWZ W탣Qh@ OauK>_ 1%KpME[`6>kjw%ImFԁ7pvu8< C&PqcT-s ~_$ˤU h7Z@/̜pg]CK:][#PlGi3x'140E{l6&!ӢSpׂg 1DNK5N DvY`Ac*wG3F/ԯ_[0}/H@W#\&޷\-ܷ6w 滿 fE hS;<%}˅}ݏI=5U {w€|7Ʒ\b+ķoCŵmv^w{U`S|tG``x\wae\To[<)B4'*Z˓ykuovkxkzok" k;.[Jz)^ϓ ޚMc3W-{Rq3X=/rNw‚w^ݵkw4rXX[/;OoIT\9$_ɛfHZnot*pOs0=ގ-CHu-E?@HQ8]M-ߣ 2{6FJnIukxu~x[ܚZu?YJl&ɺx\ `VQ5D)?1V!uȝzkO 1ܚ]m[y!U5E_n>zzFu~­C ߴGr{<} n)H,vvw v?4>gFg(ivtxn0wEj5}.tɒ>[L/\=58fRG_-#yI.wmM)b>٢CM^HStd[dF`l# T\y9{tF^(6akMǧ&xWk_4vYB&ǵ$cKa[ܞw-css7y]vCi˕)S>ZWz1-RQh=_mj3Yw Zohg-gE%Y Y/}&L6%n.ہ))w^q&lv[ױfv޾8Ye_߂>׿hY{lvu][HHR>}SN;o?L9Q"ZW%NA,rYQ/R&=Xcc}X:t~b'hXͭ9ǭ)HfEbn@>c \Zݭ|?}g [B uq2 aE Vb-yok.Nms}%f?Һ+IevVWxW{Jju4/6^bWS:]80O?ts՜.*WUz)5\W-3Gxu^Yb2ëZozZʭ1j7y~RwzՒsHe"ӝ!}ۃ.'ehdW8ǫ^+RU1rT9xԫBPU}xƫx7jt=n= O{K'jxMxUkzU^4|6_WE _`,@Ma(R=Ͻ>ߴ<9}9Ukꜯ,ëF^~ӒuūݎW5*5«W5G{۔auDūk.gz)ë?Uw5m>fxzBϮS)StOקz~SUK{L >xK{r-)Rsռꖯë|S4BU-Uk@W / }xUk]^u-W}?SƫNx';zSG23z%l?xF,HpX0r2ǫjpfUWM?U T\մ<]xtyIb^xU~jC>iC:iԫ TWXWSoh{9?x ƫëƫ6^)~7y#^uӍ2fpaFTg >^2xUϓxU)Onj.Kz/W_$)!S{ܚK]n׫vU!P:xCT{@_WF$[:UKW}iǫ~cO{Ua֫jUU)'^:%LM/at y=ujxU}WW~[Wu}Pƫ5u3MU'e.H:˫NWWU+īNKWU&ѫ0V;ūJxxC7xZU=Qī<]xUޅ]|op73v*{S:rxWuīNW`nځϧūjnUi:z>}U]UI:aZpҾǫsU5xv?WES~uA}@5j]^aiĺc_x{W7#^u>Ky=OZNvU9Uߋ'/v;<3%OǥW7^'(ī 6:^UbW5Qj)^ū*Dq bxՍwڪtyս~47ONqk*{W/id^u*RJUY^Wj ^Uī ī:«v1F{S5$twyUXǫu#`JU]SKu^u/⓯OC_7v ~xU:^U W1ۺU4|UMë^5Eӂ+ 9>ݍj1Oav5Y!׃x:ց^] aU"ƫMW58 wz[SۜPХ^5L&2tVM W)r*U]xUsd9ur&xUra]{[ī>>U5\xU1 ^uezU۫rS,΢eAzUaګSq%|v]Og1L)neǫzf^5xUzՓ^j I2uUݴWQƫJBګnx+_}<*Nj[Loxݕ7Uwn{XHnMAwSDѦ 9|Nƫ6W5=^u+W50^Xs2,zxUsWVƫK}C_ƫeUW)HB]ij$ڠzوW/;^UzZ@'8A"5u`hU5G UU7iЫ]W~H >^{UU;L~rndh~pU1tOz|FWݧZ;L|j #ݨ^SqԬG"xUV/2%jZq+᳿ B+xUDC;)b?'G-ǫ·*^u:Wua*~ί^yUUW~ī';׎kfljI{wvn-QxUτa$^tOn}Ҷ$Pk\UmU;yሞūW }}SK!UxUS>!^^YP7Ut)5UsՆW:Z|#3F2iYB bX$*PQEhu`vSq%P7^u2=ߦ {UѾ$s=Wum jƫz1:WRqu}geW5Q*ǫ3ҫ*ǫ^FU^$)H~|%zUU]'xy52t~ ("47*?OxAͲ~E{W-U̻WxUOAUS*-*`RWS=Y!GB 7^3j׾U][IPڃUyUUëگW}Ly=y|5勃W-#U;{׫N嫡}UjīW4Kaī>燒2)~ULx/AkG򪍵tՍ-Dqy2Ѣ-ɣ},YīzZ7UIU?W/^Y_j/ZEcjמxծUY9WdlnūfxXqfYM& y@_iīv *,q빊U7|+RjUSYlTٚtxfzfϮgʻxTëzrë)pc@V7WTgLA^xꞡ|5=\մ>UUg1zUHĩƫW53^{ʄW}񪷸5^uxUϢzՍ@5jz9pZZxbzT!^u>'[A*Gu. \!a*OisT.Wm [wi*D6*zUqM꼌#~"5G:0Y= E{*z[9ƫW=EzUĢ^5Z ZjIm^UW+Z,WmY YIUdؿ]=.68fWPН;ʿ=~3d_ΙN o ^u^דWb_WO/+w͚\W=GE?76Hy7u[S}ܚ2)~!' W ޚ4Bm*x#We&jMCgǷ īzW}$t:y[_^I*kUYDZ2aiƦhNLǫ >ꔯT\ǫzW5:EK M99*r$L5+^5:|E7ifWfMkoMŵaWw9o#^;SxU;aW9U5=mNUī%oWm9%_ו}*Pty(#ӫJJ:jv5NJdQfDY 5v׫tWeR_zUQ<^2@u^'xPq,2Wh/E֓:pk(Z^xUxUVѫv( 聗]@ԋk~.MjZ^UWW}7,ƫ^zETjx~n[8=WEwn4]@xUoUkFk^zUJ7^uWNw%neW/_wl!x iDUO~DaU}y*pzAs>^u*4OZNGīL^JVOjn*Xūzl$ӯW_͚]uj:P5XkGg{o*Obmqiƫʔf;UT_[Ͻ ӫTW=_fM#V^xnUxU&ūZUO_YFW} 9" p5k"16pixUoU훯ZntOMxSʆW"Sj _/,^RN֞"u2xy«<#U5kZ0񪔸7Y0Ttk.7q++b5eNPMOd$qzj͝M8r+rYt[Hnm6Ÿ 0 2ƍʶ]U{AYuOޛ"o" #'TU6Z[,ͫg4,y4k]W-vVXP=[F«Ejp\WUWM@Ի<])= Z̍^tdzyX%t3ـLG_gxU?W l9 )=07a/nʱXʸ<6k}"C!pRͬ+Xꃑ@sWL:@j$WԅUCAD,oQaMWnK**(NMA_|rQFټ eU]qԼ%Uq8=Pɞʆ[E{NcU;Y: ^\p5 s2U`2WKQTíj0)^"$U9NT5s*ြVW|yՋ$xUW-D̫9y~Λ@`j77:mӕUg^j6x"L=W dֆyX @ȏΠzWs3%ΫZ(xUX iVumWE!xU z+ms[uT+}Uje}RYd:L*O^yUZcY uWMv*{-w"uAAGWMūaP4xՎ) `)O;jUo/^55zZ }E5Ql0Uu6,1=UGU7^5]f!3R0J 韐yU+n *U9s&jjY+yJՀjY̫:k^)NIW\3aW  _Qz቙RWMdLHsϥzT-.:LVb@WU/R/a }M:«fK6zUMcU7SG )v[v$ = TjD0WuWj 2«W6O!m)^@j78i}l}U#:*DQ?yc HB,* j`Une^tWuW-d¤ )^*hfNuTmyP#@ @UJp른zC_0 @_9 m}UIU&Z桯|:ɀ4ڷ)16V^K_U!^xq #`}Ut7U ͨydSkejKeӬt0&9"{*^ΩxU@xUk«zjoxa3߅Iūt }UW6rK_ oGx]U"T%"Z jq"^55jXXL": ֝W^ߞīЄu@=ن(*R'Tw0Mat˞ ^n+ȴHUWUͭW%o^5 `«h3UE{2[$^5U WT@IbU5COj(&vLuTk^5Xq*TW 1֌R0zH#*EͫDWmF2g@ yH5*h ^՘jy@ʷTUS77j}[[1L V!p9VU~㨒iV-ljyPj^ii[Mp[tY^!ſWSU{WԀW%^oͫfÏ'=={+%ټ**k Ȼ('dK (^u@XVoEW][ڍZc㫪M*5*96"n~ ^6oU@2U.xՐP?l -vc8U*m^+UCqT49=ǦE+^Dx4ū8WZ|H#*GW&ڬZ| 3E;J 1뫎%*Vò'u ΫZ^ZRT-l6uTP Ӏj[U"$J܄W%o}U3xVl^5Qȼƺ*xˉo5W%ļzx=Wx1/2oA_/b.¤Ϋօn'U)e}3yը۠OE0ëIX*bU= 6s8a|Uq`(U]^i(v3JL&RsUC (z.7^5 ^BZW9!8xվTŁͼ2`ͫV cZD0̫3 ?fgl+xUPoUșW ^5źs}; Z íJLՊ^՚,ͫE-Pxc\lpUɓ1hssT->g% jfJ3kvs%O 2Լ*R4sD,U)þxռ׉W-]IΠzkTkǬSx3|H2zt ^4 z`6b-]AU~.sЃ2VW Si"\5K_5aU˼*WyUx[Xi`Tyb0Q婙H;Lێ&t W)}UW- wy]#Us^ÛAT~yUXūFj]^gxU OW-|4jgxbUjw~ڌ'7UHLpzΫLp΍WuNyՓ*^5 k<5ūaљ u[A_gWdZ<%"^vb7^5Ϋ:/ ^ZWՇ`xUU[d Uj}U4R3+d'Cū@_58x-<誐=uA=WYTeUu3K_&@EigrRݦPr*&T)O5L%bEw(:*Jꫜ@.tuǫWu}{xk!G_ˡ:Y-u^U-+^XNW=U0J-)tNKV ƶP?PUa|5OTK_J@s.k^E8XĔTAjxU «:>U5W6qNy2z j^oſwT)6X1%zǫz z-]j@^SRȋ-glW*x*eV*ͫWՔ ^UO eEQ x CGRdfJxU6~RBAUq`XיVW ^Ռx|ͫWmVnNy\j_IM[ ^XڬHfN& m𪮼'^"«VF;U@H*̫j5YP-K_j]jqh^2c{A& H Z,{z4PXFTKDxUyW܂W=ܖb&RbW }t[깘c[!ë^5NlS8'JJU6<(׽U'j>LwP'٤'k6zͭjWrƫW8WUkz5xU3 ނf"Hf4Y¼flj'UC25m UѢj1 jV*V]@2+i8+~& =O^5C_B%RQmh3R@WkKWmQ'1U2*@m^zW|\<$UW-wj|. ^4-UhjUΫ{}U(«I6i34kK^28HL&RUlPhF)B_yUJ]-^Qlyd'=]^5O]!1Ux,Z+!RTjVSƫb 2P4kIi8Ws1\jbz諒 o^l * K_Uj7T$ͫvq5b*UO WtI@㫷«k^vI,Ie|\?* yXռp0iuږ]HQ楠-xUsZVj'  a@UpxEW ּ*m g6UU +٤h^W+j9^{^5 mUMf a@UW~~Tƫ|ƫLur&؟t| /(S\?O?SŇC?'^c&(ܟSŇEs׋")C~z/ԗc~OAA)? Kɯu~Lz_??:O}/}-Lz?wWqGx|>$s^/uZ'w/>''ttzЏ$|^Ix:}+rKV,ef7o|s7?9 7^̔wojOW;'vT49g]#P؎IIvkR_b;fl'6v-c;'~9w<~LwfNv3M,)Mߟ{%YU#_j,04DX_m63mlMØxk>=1 v4)v4 IxY!i>2/VG&jXW} gE!59֮<vxx:#| a3"FjضWǬArljHq#GHܛG#KTl[pǫD"e_I?#@g=(< <_cTگaHUkoE*ݯp&Zx;˿;wԾ-.oʷ"}dVj{ABQu=L><&y-:.J=qe}ճxhx6h遁vp1P5ʱG:zTK)Tm4qQ@{h}\iUGO}p5#鞃z<REJ53V[T}nXc/1Suq]s18tcL%!cdi(7"5|Lu]{fw@3nvY?fWGk @n/N!{||ww?Ξ\iצ"%{_t7U[I.^ڦrǻr?4;m%=gw@v'3㶻:^u7\z]o>cwmY9c$?#ӮcjN]/);ȋZ\/,6endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 612 828] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj xref 0 11 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000233322 00000 n 0000233405 00000 n 0000233517 00000 n 0000233550 00000 n 0000000212 00000 n 0000000292 00000 n 0000236245 00000 n 0000236339 00000 n trailer << /Size 11 /Info 1 0 R /Root 2 0 R >> startxref 236438 %%EOF brms/vignettes/kidney_plot.pdf0000644000176200001440000077260313202254050016251 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170125183859) /ModDate (D:20170125183859) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 255677 /Filter /FlateDecode >> stream x콽.MGq&*3\@ $0 A>ʌ(`t_־Ɋ\k+J˴ut߿[O?Ogk?ϟqS+qQ7Ͱ<>8 +8?+.n8t\gċSpV'j/?juY-X՚gZh5wNm7? guwY 'Ɵ'.+jO%<p O%<CGT@^8k$;c-64e?gqlvSpΪ`YqVypV;pV8G{T<:p`W?h.\o.K %#{q)\ןfgu>x;ߺDZoݮZvpOӖ/ޗ;*>8+FЯhgEna96C}V>ӱ;^4 aprj|2x~ԴSFxAf:z6p3 xD3‹3;}ԲAp7^@XkNgi]ma۞ak9Cϟ~9^Y궐bE1T^u#|AdX.sPn\ިE P/BZzu{zq>7laWأ3737z@-k 8ޗ_%n O14~_hʦ%*Ꝗ~ S<@׋m6:?Tⴃ =\xwGW8?;Cupa_3]؄GNxH-@=@t^_PP^\ DMق2ee˛!9jzgXc^n}hTzzejLMSt-9oc*&t:Y-+J6cs^7g,c ZR}UE~Qνj>*T^jB8FeM~d]/?}W8T0UP^X/C2V($֫<|[{ 'xx\ER{+zn2}MAB`f^.3J8~z<3d h8?['!sr08?yp|ڿ!P^|:C8o}­60q>A~0\WZUl|>={ƷzAuʦzAu*M-aҎ?^~p<+K︞mM0YZ}kkt=G%|=Uv!+g;i?_ Y1Vګo Kyұ,>L ՇEf+ջ#~\K{c\&3x]73IEI|}񬑡;ywOdw*ډW:~ފ|Ⱦ0 ފ<ޭ#,w+]{#wĻc9"DŽ|wyNw"8&/ӅaKޖO<ջ,[3FH N3#ĻNsMĻk%ŪJvﲤa=0|]d&xznwO]eCǵWߖ]pҺ53K;Tp=K|+ao;xw2ջw0f#W;衧)mf#wŻ͌wKn3g(ޑ晎_[W: |ߑ-rmi>{||zz~?G\W<3_mn3m=_|s-^ϙ7x=Gl. ozesw%S{<6x~Gm.dz\l9ŻwGoN)=9Ż#ǻ _9ޝxw#^!zC[\y6xw{Sz1pwZon9ix˛KwZoNN-~g`3 x=GNN))x|1bkx|O/O|;; 7o|]wGߕ+}|n਷v>8p[;8ꭝчS=;r[GzKQoxxz+;9ޱpnd}~Yߩa%g}~)ޱ_#rKǏrJzK$R?J9;lfLo9Żyg}~)ot8;+nIopMzKRRU䨷ԯ9;{9ޱ_fS9Żg}~IoJzKg[7<|]wedUF[z\YoViYop9;oSc+ƕbݓݓݓݓuݓeݓUݓEݓ5ݓ%7$0+6⊍chczqW jlߘ%ܝܝܝ۝۝۝meFƕ gQ47lID!R` `2dKf1xBYͬfj3봙e*mf6FY4s0 &nF$&$P%Q1N~$\)Jɺ$̚ldI6"YFc#Zld)&$a$(d>;EkcM8RaNx%ueueueueueueճYtd[dy<`>i [ Px k P K P[X-KVe}ղjY]IrtsauÖˆ5W KNڗO+;/[y97jnGK}GI}GG}dT*#*/J6IIoA1>~¬At.&{ 8݈w 71{8'G};,=G\|8AlT&>In#0AGObO=!\bbvH.@72eV1o(q;B<9?I+Cx]x-0!ݺH:Gsrq$ްD[;!p귒(/ PpkvtzS:HvCȃwtSX 8O voc!aͺXf8E# 0ioo,fŏiih 'CZUE:_,iu_5 ,P4Hw|~Z.9w@u?-_ }`qx7% , cxvt/ eq 8`Kv>,ioFП?$ HOȿn;RćE E5q[+d=1t7=>TZ^5$ q#dSxa~2eĭCbkגgI1CH񄌚1QȸO -TQ3zk;t u-IF!%oPJ{R7mb\H;i\ 2cQp"j (2,5[c85AoLg(DxY!El E;l1X/q~WWoc|qcXWAc]j3sÁB1>*p.8Q0 c|uK2xqЮqzŘ|kyb[ ](UEh]P^燊qd m4+8DEITx1-k1W܌`FEUT+g G(*~"*pb!+]z uec5+k\\ iu4ؠQc8DQ1]Hy-Yl+r̛Xr ko|1oziL:jQ165^q6@E*B*KywcDE|[sc|uc:ܡƭfr*mኊ1oT -=ڸuj/6n:|ё w,p{xpdQcbA/UGq zgt#"[3{G xcot8F;AEO`tr11s:j/8#ǟxx17;|㇎{(F[oX::} zc\/:}Bǔ1):,dw珎,C }_'^#_$Aǘv{ qfLB|бMo<ޘw3n:lzk C7tY|#BkW4 Ļ6 ^ #X4濇wC_@Ǥq C):26ޡn:NjܑvS}yh5}::^EGpȣwJ}7ثw ;z`[z*6wP::]Xv}ߛ[r\-wAoqMuE8O6xqNH8?wW]}JpLZU ĪXxep'ˉA쒢{s>ss=ss?~]?ϣ~Wh>\H.H nnC_:,^L= \I.kvnx=қkN)Ix'=8;)Io9i=N=;G6ﴞ \nz \h=)i8;x֫[wxpﴞk8;tpz%;63w+;Gߕ+}W>|]wG19-#G|r䨷>HNWwʧ))NYߩ^N1ީ^g[Io9-c"xzN䚮xzQToUcSpwNYߩ^w'zꅑ{ꑛYꝇWw)ީz8;kg}zo/;Փg}zTώᑣR==r[)ީ{w'8rYߩ_pww|e}~Yߩ"rw׈#-R?I䨷ԏ9;DN0/9;DNg}~Yߩ(rw=;NJ[[w:\RT䨷o9-kEN^gw;’BLBDY8%p(%0k%ȊldA6YFc#kL`’ d,`E2J7HJ9vžԕווווuוeWϪgճ EǕP< vIbT?jI'4[ P y9%V eղjY^$S 6\Ǟ+=[\6jXrҾ|rXV^yAw˹Ws3/>Z#>J#>:eQQPy WIN㠟û`%zo ީ틉^AxĈ-><;6p,{!u\9`5\67bG|z@I+q^F|qK@@%x0K@a. gqBxxBh>; gqų #'>E`$S`{O:sU_L]D|]jD\<+7ŻLx_]8F#Gx>w^O젋6'I78s̰w~Z;. $ ?H"8=?/#-‘q|~Jz,R*O CoE:v 1Ԧ w_$)$C`2h &7D)e#i)b8 ޓCԞQS;7~zBjqOB%#.RPLiOld,xΡ=Α5# Scy@hsd\+2ý hNd|(GFؘС1c8r[욅7dDF{.#mLG?5]17 9+?1Q'bsi9179ٱ͊1 *Ƹhs{xw)sٔC98aE zc:!ըwŵ9'=k.!2HG|'<7TD˘{{E 9dE{@f)*vsŊ1OIoqO1wh|Qoni|"i=;ci=Q4濇 [Ml_Ao &*\{vKR1sz ~c'_UT7D|c o=--{T̍xgri{ƈo_AoxEtL4VUǁ1s*ޑ0/FcME-}]l:j=cxMI-}K{[[zбa=ڎ|0FGG1;FG<:J,vf 9zt8cw7kў73C­G{C@az?ߋ|z0#ȯ_z(:G~_xб+Awxh(A xxCǕO8!4߃[t|˸O8nm?ݚ}h5Bbбvwty<7= :񾢣Θt*}t_wMn;]מxHxPeG0aJot?Ď=߱>_sC):JEbw'_lNow̺yGq {Л>wSsyw}7vRɱ[.:>+n; Xvߛ[rP$K zK{lczQvT譪ٮHm;]?ÁߦxEG17;]Qбt[`c8A뇰/p>wc;l}='/x>踟c;o,.3vz(8?W:|}Us>z O8l}G1 S_Xׇp/#חSK8t)3YSֿxplzX{8>|} G<ݘ#:N_dWȭ7/X"~ txAg /`v/dE:vv6Ff _8#h'r</ʑyqsHt$9Ñޔ&r3OǓʔ~TD~*[vji;:vV0gG)3:̺g ipW(m'$xJ:v:n'C۹R:v*iϹRΕ$-<p$|3΅5;MG΄1ߕǥs؃~}::wŸ]#t :Jw큎]#u2X= G.d'8 n:ȬʆCjCtnfR+|!qw*{\W<3_mn3m=_|ף8pKk%_S`sKWԆ \Rf=XOz.pI_c3d5]OMm'[n.9INNz:pwSpDN)1_txx|Uϊa[[̧Yo1wx|f91_xxx|]wGߕ+}W>|];s#G|p䨷Oчgwg))~JN=;#t~Y)8; 6Yߩ9;+"tLzK#-S"GzLTω\T/McS*rwwNYߩv8;T;ܒR0rOSR=r3땑cSJN3;[g}zTe}zT>ّR=|]wGߕ+#2*W[zK[[eUjwx~1ީXxhVbJɺɲɪɢɚɒNMšFyfQ\` mjB?؅#55 8/ met@AA8A81Bx&Sd8×c8TY,fi3˴U"mf6D;aLp#ru5cIX1Np(uKFd#+zld96YFb,I0ʢc>FQtk 8R:91{ PW^W^WV^W^W]W]=Eט-~LK~̔LjٳMcX9O +zn̫s-R%RQQ_ p% k$k>|_~,衃^AxĈ->?rAЇE Csq@Kr_3+~WWxu#><|8v9(Sz s<{G5Ȉk~zhr pG% FAǞ4q๧7/A'Wdz㙘 ك8~[?8vwpxO SO"8O. /ɱ$q$ƢyH:Cse b=S R7)@+@(@(@amΎGXzS!dӛ] ۻw(o7F,}kݑXH8Y2z_8rKK0ܒ~% qb3dSjP_^v,Ƃ08oXbk t%Y2_qV_8@2`FFX:>?- e!v{EKn8ǚtrQ$ D0Vh ᯟP4f#=1KBbȶԆH| 1A&H=Qp2!c8~JTsĻ;z4C"3 &C H? kWJN6n:oʊ{ĖH{g>H523Q3AȍAD;c8 c:sOx(!)ukRdBFrܓK=Mz7qkOkd\[p_b Z7%37?116c e[{[1#m 32pT"mLG<ဇB#Θ1(DݘB{ɴ+=M+)* tw@/swH ,:v燊[8{{X"G0*&[y iOOzc8QhxG!7paJT ?r̛*zWxV ~6RIGiT䌯7C+zt{Ϙ{CИx(~h|=荧y1c4GEta=TTǣ=Qq5lĬ4UE_g"lccT$#3*D1oA8'A?8wWԍ缥eJZBG*E/+Rɥ!͎IDtcP }gBC`5:"G=tTG>?vd=ѱQ vtŎR8_#x?Q +Ht_A?8} 5CǍq z;ĕA!߈':Cp(2߅1?ր:,ݠ@Ǔ1[bǔc$X%:4ݣ/d#7uTeǗ1:|~Pv6hOzwzg%8m>[r̛z0{ Ry[fǜzc3:|ytc|:]/1: 껛P-]x︇5;&M/qv)G:FGqtcTtt~C=3:F](u/>xBn|^AoŶrCGg8ة:R5^kJ}Cǯ}wwp`HIpV;]DGz7=XKʱ/c{ '" ;}wvGq z_3;}ʼn߇[{x%::}:~ݸ-wp'_aǽ=AKF~YpWd&>C_|#0ޓړAR zBd,@ X{?H=Ó+<#d Ht8q zCV+C_XY{7\#fgPj 5ߟJ59cg2:~v6lch(r*IG>IC_tci'hKste9cYy&l;IIa|uRt9z%1qt9ο ySf?A}pl/wct0{;wLGMft8w&(G:; NN yUpN1ο 3ߏ NguC~)]ՠԹ.X5:Zw ]a#9A 2tz CGбAtZDGpk"y xGrb0ќxpD'Vүm+ŪW.XU_dɪYG*ZQx.'W:E*A16ll.}?~m$σ撮g?o{<36x~g|sso[:W6x~Gl.^"o=;zlsw撎zH?O5]OUwÁSx7|x#[wZ{-^SK6^%NN)i8;77^rzwsN%;xzz?pMOuʐi6_zSS>dsN%;cN)1_txx|Uϊa[[̧Yo1wx|f91_xxx|]wGߕ+}W>|];s#G|p䨷Oчgwg))~JN=;#t~Y)8; 6Yߩ9;+"tLzK#-S"GzLTω\T/McS*rwwNYߩv8;T;ܒR0rOSR=r3땑cSJN3;[g}zmXMe}zrXىb=|]wedUF[z{[eUZ[xWjwہ7)ڱxJɺɲɪɢɚɒNMšFyfQ\` mjB?؅#55 8/ met@AA8A81Bx&Sd8×c8TY,fi3˴U"mf6D;aLp#ru5cIX1Np(uKFd#+zld96YFb,I0ʢc>FQtk 8R:91{ PW^W^WV^W^W]W]=EWϚ+)cyFs¨~Rϱi[ P )@{(u-k%V eղjY^$?\:rsaeÚ%''Iaݝtw^ϭy57b>R꣤,>:#>*#*/J6II|. AD|/߈w"k = /A"pVx"pVxu#>`zP8% D\Qħ@a.=DF\#׈CAzЏń>8}>g t\{tx>Ocx=mOێ}OcOk 'p=tICL8/p~")O"zj xw<zCxr~VgzCo,ɀhstAyOb=Rґp%oDC @ (B;|ɕam jl|Yaņb}GnIpVčw}M]¤)ޗ(A1>2,~F(6]cYe؏K2C&7Ptcg,&q, ك>z9{_F3>0v<{zO5Նك;N 7Ć\7~Z, oō/(n4!`8u> ;"yae2I$M )KH$\ !sɿdqL唂N&zdcEY-IT A|H0Se$^tSW/׏VɝiR1CyJCҸ(3n{6*#W eƮt ̌^ -3E{2cX)3L.UFYrqf0+#Z eLdeF ikt2#[GƶhXft Ao=.SdF,:q.r(2#]e 3hxǒt3#2X1zў̸̌p`3c2Qd_bEkTɩx~8Ԫ=/_W}GEV*x wE6:Y15|Qqy +2!ȊMT:T`WE_'<㠯ڃ:XFEn<*^6 N>oV*'UԪ\՞߬U7V[}Ta6+U_\`EцY'[{y;-U9YԈ*6,娷pfEߑR![3֞^{[kE_CŶVtG^ {Tþ*/'_7ގCG=*az\H%cww=6z~TБ;~6yhM{Db'vdT}U{cWv|T}a!U_d`=qbx0cq̎zq~_S5c쨱?Ao=:rҞyhyVVuÎ1c=:ŽJG:,:t?d ;tx~#HCߕF$;mNp#} Ú˅_LRGsx\0uDAt{tT=q/#[_HaǶx/#ܗ\4([p*;}-셬׭=/ni?Fǻ/Mt{'Uu8Fxv_: `  dBxpwdHG3@"la? [pL\  0 :2q=7aKuE7ў $8F.}O -)C NJg'=bt_J px:+A\ʍ9΅=#AL_ch(\$I2}a$s1_m䅜"O;JG1w.krWpt9aK_Hcysp$/0HgY"_ȝw!yy | <t}p^w̭1x];}8WTu~pDc\?΍l?/7~|]ws%u-%3#G=BG$4y=w]ꡣvu#:rw]wW==8C oa؃8|#=xxpD'f\w2Ax ۉW,X5?3o8wAqUQ?^kr'̿G糙;?3?Kc^~mWߛ<敟撮g?o/'qƣ wqwޗ-y6t~}B3t[:~Ǔ%^ωG6x='mnz/x~t~[:5_U eOU|J䩊+^-^5^@ίS\H+jC\I.6SOx~sK:Rocs#j;ٜXz8pwӁSﴞszdsN=_ôz*pף)i8;x֫[wZn9ixv^\x 63ף|D!kwʧl.9)#F&qw$N3; |"p[ȇnYo!f|\y |x|agwGߕ+}W>|]wMb>9q[G)1xx|x+;Oe}|X/8|g}zCXH3-CXOI1cc='qMSSc(qw7%1ޱ8;g}z[X ܓb/pKz=]OIzIww&1ޱ8;sg}zpXOсc=;q['zQoxxz~+;9ޡ pnd}~Y߱%g}~)]3ǟk&~Qo_$pOz&[[W \sCKex~)8;%Ng}~Y߱_)pKzNk[JG~)ޡ+bGw7 g}~)޵O8;O?Nz~W[ |]wz8ҳ*-Ҳ*5ǻRsC?oP>xhV+ź'K'+' ''˺'''k'K:4eI``Wj?ŕЦ&1]8RXSJAM,,,,Vn+K7 + )3(#(#W a2UL1|q0/A3KBmf6LY,fh3K4fF$(d$dd"Bt0)ُ+E)YY,FVd# rld56Y,d$d ;eg(:5a P)@J=+k+K+++ ++ˮUWϢgulб< 9vIaT?j)4-c=XR:Rǖ5VeղjY_,ZVWm|}.\9p T6jXrҾ|rXV^yAw˹Ws3/>Z#>B꣣>2꣢>"ꫡdat͝dA_qWhSfnčxĀv7!?6Vh7bXTASeڈWҟˡ\ zgKPUg^x rnī[>7bokAre(U o7ڋ6"0p gQҼ[4Cޅ]imfX= =N { q¨4>s:܁MSQ~d4Vi$WZk9Vv&rtIs}=\+w['f8mcIES~4xQ.OALTZ)5*1L6RT:)q*;;) `yTJdU«r92CTJ{ }xSVE(; كЛG)CEH_SV)p+֣ PW)+֕kJ5Eye1%{d[RW7s1`1.,*\Hˌ^k.B7KЙo/o,zlD\ &r.Zڋ.(KJ+7lVeԝ{/kťP\(eȬܬKʽ@ }\ÌK}\qa\ 粹r[._|h%w㞴\7n1z\̛}-8ݱL<ψIV=R V0_FU> &/76djхy MRAs'\lsC=1eTL4cil^eѻ I8>H!5Fo&COm&^>̘jlgE*LSfM{x2ִG43nZaFqODeQCFY"NƯ=t1#ش.3rNFʌc/.G%3H͗!rX3#ګΡp1cbVoQoAPv驌s=m1~[3O8cFK2c0PF9y)#nyC=$q{63#o8ґړ~sjfp G%+]{Ҳ1s*H<̺ht/U<^9+"f䒣hyT\.9Y gÙANKeV";Ug= kw钣_8{F"f/AUv]Vұ̊aTcp=V/}UXNJ]?cWÒOdrWG=ɮY{8?Tl=/D{isVGxGV/=iOsVha`żksVԻ-Agp#>o:Y>Z=1`f*8~#XaȦl;:PŽG<:&:mr|O~1]{c C{t|CGHÛ#{d{vt:^ԑEݱ2;Z%:5v^dw=荹g<$ܞq2;v,Li?Q# {Op#++ޱ#+~c [%Azb-u}SIQNMȑNQ0~ci|rSqwcwi8-}i{Xk:6hGG\1טaQG]Ao"ut<]4t8xi~wctCߵ&<˗k|mQU:Q*ee#ZcqVG`OEړV=Q:N[#Z[ӱZ9򗖍t/ȿpAוꨵe'RwU{#͎][޿[[>#͎a[|lm>8.:m~cYyu4W~Mu==ɀCǴCJh#pģc.9 }:#uqc\_4RGyrCU~Kuر^vW~OJ"#q͎ygߡaGФ:v X {;_d: HAǁ"GB{wh㟎%䈨Q4ucCII9:>O:BcDP9J DS9Rvñ/REK9^#F]9fFa9n7#Gie9v{O9Ÿazxa^HǑr$g_ Iz9ɗ#Jy9Tj:vS쀣kF: G.1I}e8̴>:tEcч]}t~c{G#?hAGGY?LǢJor4n'JrL t\#xJ=/gC}XUI˒AQa{U&74IMnfqr9x=O܃>fnf>>>>}WAx>Ipw;޿}O>~}O<t''ģW';q⥸ }ⱸ;\@\}W[+p[ʇmnYo)f|yb69)_yx|]wGߕ+}W>|];s#G|p䨷Oчgwg))~JN=;#t~Y)8; 6Yߩ9;+"tLzK#-S"GzLTω\T/McS*rwwNYߩv8;T;ܒR0rOSR=r3땑cSJN3;[g}zTe}zT>ّR=|]wzʕVYoViYo]9ޱ7rwlxhV+ź'K'+' ''˺'''k'K:6eoI`;`Wl&1}Ha81K;+; ;;˷;;M&JBJL2JH"JBLc!` _2sKFRmf6PY,fVi35M-ܓ^GؒZ`JJ1N~$\)Jɺ$̚ldI6"YFc#Zld)&$a$(d>;E1 c:7HW Pl)@]Y{]Yz]Yy]Yx]Yw]Yvz]=kc[QϱK 9VKa>Ǧ l)@'uԱ:ZX-+Veyղj㓼saυÖ- k/'Ա, zn̫s-5(訏j+Y$]s'YqwtK'bn_K=7ٷDC ->}xbЇCqp];`xVݚ~ ~I+ x>`q_]ǹ_lǵ_.C!Xwq<8ϧ@sAx>P8wM 4.Qna{}CoÔx] L'|Swbr4f}s|h}4|qɵͤ勺Uqi۰= {S~$ 81"8 }&C]8? و yd(|mAZvn:p7AB^t?3Ao7q.LJ{]f%PCH5v7'nQ pqyP]A:톰67cQn-솓z PWٛ[2Y }`藀7ba/QEg0,o Y7©_&*CmIoK2CXr_A=vY(N-@] LCXvK:o(5wK~q^¸_ccl jClKn8-\77yCbo见D@Y >#`)C=/#鉎%/ '7uq_@$>{BZ-HAG .>1~q$k ⴃ1Ļ;DM׼Q$ SnS>ڑ2gzAwBR:{.9ojP40foAD :.32j!Œ_4Cc{HqsWdqz1g<(5yx;MG_1SDzƶEFQlg|qמ&?:18/ŮYx*ԕph\ȫY(ҁ1PX0fԍ¦a{cbl{T!d"+X[c!xp_wxwCT&}4k4 {#~oo#st 6j{sޑ`\ަ` xx#6ӡb:.q0.AZxRT#sĘ_av&eQɑ(EE=yc GrSw.GPmܞÀ{fdTɂc8Y]R:3ԇ|}.c׋k~"c#q5@EIAЭxPttT CEG.9_I zk?zkHϠ >Q}T@בz*l='M{7[ѿІ3W@E 6C x:o8Iq}7Fťe y=']#Ao\2aAoxE'X;*dEXm~"r8!ŅYQn~kEVQ ݸxŻP*dޟT1ߵ+w"7wt'+- zEljOp7! Gb'm؃ E/}w!*#}j8:n#fCz!+k7OM㣣Y~8^$1w/p~N+tވwZQ#(&G(:O5L:t<%8$:OV4#!ƃWdsz1P ]"W;G[x?+2]ztw72eȫDp,&~p<n>E(zLGgxx[>))%t~&&|̤Glo7N:<x=p&^zNYw.L"ީ=|ČwxGnw3oWsf#;g;xwΊ?x7 _-2)_[|]o~YwzyYwjmO/ݞ|O~w<+|ǣ_īxw;x?ow~wwz|zE\#.^_=%x~AωK:AՋ'MqMz9ŻwW;xwswW;xw<#;'{w84 E5L-^SKǜʫSx/pw?9isxչxsNeہSx=pKs/pf>!ף|D!5;SKwLj =SSyx|UgNzK0i5-Kw牕s9)_rSyxW]wKߕ/}W]wKߕ/}W򹁣|p਷<8-G;&rwω\4_9;7ENUc|4_vINm|.4_)Ioi>ҙc|K/4z9;^N{:4|9;G_Nّ|x䨷49-_~R|.XOpxd}zYߩyd}zYߩ"rw׈O[EkcM%Üp%5YzF^#뮑eWϪgճmQb?I?Ijy1j4-(Y<5(C%(YK-k%V eղjY^Þg{8lyްiÚg KNڗW +v<[y87hnܓTR_JKH}/DԷCM5;ɚ/}CGqПϗA-Qkc/L{#Μ)6vb7Sg->>:tn؃ l@hdp(rfqò mRxVkj94 ֔mR{xC?bB{ ^ \_]LsrD L/q8],(&52b% (6?<D3_A0,b%7n%z @{pM ܆zCVA]: ûfl wEvV;*ؽ"zްzijxiwrzjװzl{mlzn..cO:m&& LjXC?"kuD}ICnI݃: m2?eW٫-_\UB{d{ 'GbJɃ: 7H5^gf9M[(~݃ˆܭWdo z+-" Nzifd0hv09(C;9h CZ@\?Nu>?t݆d݃63zC[ ڀͷA6@=(CYջ }Z6ƆUGg=_hXP?p:/C|'@-렷~д%bH@Xjl鉃?ZO:X4ԟ"Y#ibZ̟f܃ 6ptA!YsT{؎DxnI"G)$+} lH?4in:ܷA{H{c5H1o kQ;=qB3(+7(;LEF%GdJQU(,Rr)94'GE0a@dL^38s%G%2mɁS8+ǻeqa8QB;e=O&2ևDF52⇋3q ĚcpP"#a:-o~X'zc~?hQ33qB3!1q{یa7̈XaC>mJcFұo3.wpr̟6ft15 gAS+fs5_kOx:{6efN!$یa\fϰa)o3rVHm0; /f}0cxx1"ݬtDbƱ <=1cy=譐S|Y! A+J`ƴQm"f\XYc mI'A{|cFظ`\3ʇq?1|H7_3և|1|ExT8n1#n,͘rԟxhOn̸7_3 O7ƞiF?GE_8?8ZQQ`/kT$1_p<*+䣣9tQ1ax=ܭbt2CGEa:X.A~q~tC؝6x'gro?#8荑޴@arO\ *+yzVg8N#8VV51?[\•ﴢ;Ao_ĻY8.?E{W;g;rSdCS#w;g;P#?wΌwqīY[w Bo[bČwx G[x7<3{9Qsf#O;7R#o;Usx8sn z+Čwx^pd&f#?wx +ǻo$ʗ+_|ʗ+_|ʗS>7p[Gh糝<8;3-q~Qo|L|>'pMSS1|S|*pw>ϗ9g}mY|sOz[[>_)Io|XcNϗ:|9;u{t~Y|sw>gzG遣x';w9ީxzݓ.g}z%.g}z)ޱ^#rK?IoHzK$R=JX9;8^&rw]Nhz o mpG'XtZ8$+\RpA*  ~(*sY48pZnk .o 8l.WAu)ܒCBG'bz)h̟tۏ;7oYib.'l:(y1R: --R_z49hO) .!cœn/͒rœn# dQ "t=9#t 6:)렯 w-)v1@&nPB0"xz՞FraH(PK$2W؃0kx5X~n-TDwxp[fJ_ו7?^j Ǻ _+JሷScUVObjyNV=Vkbg/ay[Xn-9ԡá}w^pCnqV+88}Abk6?^Qm xG:*h/w\-s6?^nW`EyÞHOq~؃nq V'od0+[~]\AQo=-xzo#X<b'Ҟ+zqa }[\񃎏p72*a{8N4rglkE6LFwxxGފw+]ϫxx(pUaMy Sb;P#?wxp\6GV╎_[_ Ybƻw:.;g;nx*93a 21y+93alw]UΙt|x71ÉKpX*93u}Gv}xGv}xGv}xGv}Gv}xƃ-xQĹx|sN5SS'pw9or~rSʙIo)ܲR>͹f|sN<};; Ww7:ʗ+_|ʗ+_|ʗS>7p[Gh糝<8;3-q~Qo|L|>'pMSS1|S|*pwgǛ9;O z[>ܒ=]OIzKΜ;/4_zyxYirwe}Yi>rwώ#GQoi>/Xpxz-ǻ';+\N#;K\NScF䖎R=I"Gz)ޱ%rwqfLXo9Ży.g}z)Tot9;+]nIorMzKRRU䨷T9;{]9ޱ^.g}zYߩ-rw-;]Nxά׻R/ϤToxIzKSccX/9;[^NKߕ/}W]wK߱4rwW]#'z˨q#xz1ޱqhRbǕbݛݛݛݛuݛeݛUݛEݛ5ݛ%kX,FqB1=` l,`w|RXcHAYvp;뷝mgv[YJBjL2jͤ`}5l{`\\l)~qŒbRmf6PY,fVi35 Q0p1%u.$`XRI1N~tq(Ŭɞ,ɞȞ,ȞǞ,Ǟƞ,ƞŞ,`X0,bE0EƵ1@vI #(.F^#Kkd5Yu,z\-^\I1%#FeĨ}`ӼRbM Ћ%(XK[X-KVe}ղjY]+yS=RDxR4M-*XtEFZMG+ B+ F+ JkW Ĩ U #4 Z+ ^֫dAlm~+^W4A*-ߡjA6ޡ ;]m>P`XfArT*X\0F͇-xL rLתGAu)ZyBdW+l*: kAߕ RO`0 ((\E|Y~SAўXa-p2Wԟ? _W狂}OزUg4П|C>zp ʫa8(\E`Bye`d}0<.'CD<& W*N}wl-4tOG*2K^8 pR )ahZp_b̠ahA jb>,h9Ȗׂ4-@ÏO3T|>\6X2hX*\Pix*\Pvh*y zK+DАU&\ae ]+UW4#؇џPV? gghO+ kF<] o4}ud;P4xGd+񈆿y LaPse2!>]?ȐX iC53'ca>E E- ZxWӍc~ L_M-+Cӗ^0n+MJPme8 ZV#C:]i2jYbx E -s ܳ+Rep&T{rd:TFp)л2kyq,a2,oeh.>chcߗv&ޗ0[~S}~`>A*ߗ/\'3;zf󠟵ce `tCIw3nmIO/q KX|Gܜ9<%`bS YE,L/Z+YY XYw^ψ=l/wK@`hq'bK@̋XY!b^dzpų`ƳwqB?KE`.ZqK@K@K@ij)-5TI I=S r+bXhZ.b%K01(PGԈ1@AF 9b P / /00R°"b۠$b !M.6^F[cX-b P])@axQŚƨK PF -l5lc6t8%`1( "ŚRK PHx8"1(dS"\ř29hiQ t%aŚ \K PH9"qqŕ2wg P%+T dU*YAJVP%+(C#adj l g PL_|bb)=(&#xVIA1;~1)(wRPG y-<' N9D †CtG/Ź!@q&&bPljg.&9IAqbRPأ "x % N9b.bPbg/&ʋIAq3bgR/&ʼn؋IAq7b6 #a O P8RŞ/8/1)(;Xp1)(VD [<†-IwGaJ- a\)@bI 5 cBIzRP,O["ՓbŤXwsEaê5 D †CaQPtqZO P.&*1@Xv1)(V8†e W6m8ae' E UycgPx1)(N9JVP%+J/fUBe(lP1 p^lIؠbM K P>6iVӛӛӛӛӛӛtӛdӛTӛDjT2('t @!9PD(vzbBӈQNI$vH;)Nh'y:ZIiEł2x 蝞VPz4bA~qP(hh&14I ͤfB3頙dL*h&FL`t N5XFJ-0D  <,N+_yyyyyyyyy؁yȩDQ?`X NBDI*# YN=FHf$m3Iٌ$lz5=ɚT |mN+J 8/] 3 D{N-F8j( :Q6bZ1-i$LK %Ӓ~iONqniɦZjiq阜Ic:;tV4ΙiJ+YdJ*YdR$Ď(ni}PfiV+YW 655l: `q+GGڣc{8L6iV}KqۓGom؅mѩ{ѱrh΋Mg%[5؃N؁DEoV?Q[LS4Dk* v6kj`;^]]ju)u,ܶNh":ڠ_TM(M/J fm-/f ЬEۇHBUіw8ڄdz}sp:o8ܓW4{We8khcxk1nWY8}L+lñS8p(NO(M@ w x$|=6[<#Ί1nzsέz=ZiJ8n>qw7;On*A;ۂ/$ rSpn=m;3pC8Ew{(O*;v$&;[9d{? ߹ 6[?b+0ËPGg%C1aY)aPgHUz> GC>[ !Yvun筷wü>鍶!`ܵ܆5:vnae_BpC1T0!ngoogaIR>?RwraYb[1?3GcmCΈ(x2' 0CbЇ6H_ zCcpSDCEdQqH|=R0ľ ׎hH !(Ekm)jfomBeШ,&rX' 1dZe L[ȎH/OYRY;8<6 봢?Cvg'[6 +oF&yơȑ<|pe0VA~shrd?AdNa^ocۋ|Y;\#2'^=K(fJH~wSqoZ` jHc `fy.o\jR'>lcR]GmsKt%4XXΚ>*n&v;F'ƢԴM):d4*ƍIY&_Txh WW.cu-@HړƹCNN 6ZSx7(Oj iA}Va _Ckm+5PuԚ6vKk RSkT#jfRW@UkډzP*@Y!./m\گVz2 `Q`%~nitߡ^`j&&jSkrrNN e׉;-t <ƁZ&VdAgw.0VɲWj #BsSa-`V``։` }`Am{],u8a}ygޗx^/Bj@w_ +1/htѹ+?6Ɔo^C]aڕGݗ{<Ӳ! Ǯy4үѮ ݺѬk ҇r©FYuJ#}yItnx " ,0?\eGu\-7׭b8IUڕ\yU\tnexGO\4Ґwc ;.ܸf{qwӮF+3N@#>MomZ.\p7=L˂̾Lˀ9o7Anۀ a^ۀg MA]vg#Lw8ۀo8[i%G+ [@xk3 07O_ #sX"QXʉ,GdӤ6 s88&4tqrm=2@9"@ ]n8q2@9x06q[@; 6&j_bU5PͧQI| 5_`͗_ͩ|ck5?Y-nSpV K8D <`g߱ת旮Wұǃ:;xBc a %xrl<9pV =BcgpXY)8pVމ8pw@5΋qKnѱK.ձKرijRWNTGYI$8pV.0K8+'DIec gɱă%jc=\ 9/A"1(@1@4uec P.O P.w P.ʼn=(Ė{'<S'Ͽ?ݟ??+$$Rqck oſw 7nل_rp&fEKo_\owyd6K}Y;EW}$Yӊ(Rw3]>&_9wxV·k;x?X={V_=cl?{ByLg>gğE3gf.?Y~.iѭUs(_mVg3"aqOS~JhSI9cSV3gE36x? ks/N7?Έ 5*v_ء_?c?ŷ#olBώCg$+tqjNDG8dGQy(WL~M @@ǶtTy)pq ο80z ҟ_7?@6 6fS[\蟟~D'S$V_\onBe؍_ ,}UV_P~|ag_#`SKQٛ1wmLqVC:'aQ|5_ٖ5i>k?nm_n8;>a~ώC ׶ѾG{/.7aVmW}KkT[l9׿m\ض,,n[YbğN1V 8#\#~|n='?J>}۲e_my׿mn[Q۶ȿuS^3BTu?GW֪'ȿާYyD-׈}y5޺Ǐ/Z??X]!G}y԰fE :Zt{a}#NGy9=g -EzwKE/һ6ie8B1[fnCzG{#_^onf*fv1%+ʻ|t3<~Ezy3e*N|?0fƙ?g] I?g]?g}3rYϰ ?g=oIXbtiOӏg-5uߏ?;c/q:>_Y'~ݬfçN;LSO+Nc_ǖ߭c-ßr>o<}nt4[{C~_7_熅7W?˾Y?s<ߖ|h?#s?#}{wߓm;0::W_~Ym9`[k> ZCX@|q~DYRܖq30ז͹ϟBn5c=4<֪̅٧}c ^x9ɻ4_h{[c{1TUuvw[Y{樥gڍ<2s)^~wX͟A>fާiɫDCdk,[\ve=M^~_YrT߃ۋx7'\ndk\E~9 %=A{䢖o\Gŗ@9b5><'X*KxVؾߟxv="fw_x}e?lŪ_a# tZ3.{x=8%[<8m5p[<lzއx7m.3$WrGaYx >Y1lI>L}N,zqüf5kg<"x=4}lkX9<[ƹa k)ةiXZ^f=y_ok|ck.2Ѿhx__}~ڣi?lZNx?|'9?lZ_Lv>韟U{VZ[+v:sg|׺aƳ]XI\vZ/lgKEJBwoUlkՇ-5ƃmmsY{6^_?wEWħԿ"#;+;D}׎SÝE'?wۖڕ;U~xfxmlgG+~܂ס=~oOzNGV]D<z^sg<K+^g܀@~vg7g){#q(}}6^>}YN]xu_/xoܯ)~qC]AکO2_U?sWoAʕ#oU<+Wnr~_E#?gR$ކu6߳w~:l+w:ΊC>YE0C<|;Wh߁U__(z,g?cm+|ٿ".'X>|׺uWDߩN_ oaZK p}Kl]g׻ѿnx[+x;t,,s@_4~y_Eׄ_?/G"U,j;־y$5=jDy_s')%] :<8_7_u p 'c|F1磑3o<=>~=y0~-OO{Co.> k0D _p3Tz+"/Aпj7Q6Ww]2ѿ6]F<_'<zpcqxa3A?H'UJ>.oOLZ-LQƯ~UƯSߔB\bkC%;W_9^xa ^+տ~ K=y!XSGbMuB 8~*W?wCʍχ7Wď+Cx6D{CpK_:p^7שϊocZ0YއEnԿaߛVBi+ǫ7|\ U_υ2Mr ׏şJ^H@"OxZG}S?-c:x_+1:1P|yQyqqnWGQ>A{4~@r{ֿY'%'xֹy9ޯdG%>υxޱߚF`2ĝٗ|0dzBܩ?:+xzzxpO}0ӋWn Ys;o7@~n.2Ï?~5w_qzxAXzqzxan\/m| 2*Q~?_QjÜoeJ~1hpç=/7gLOR~c!q_>_??o3諾5~-=K=ѿݯv|׋x_;@Q;n @>(_wǢQyUznTxzߟkr:W=4^=Ư~_?gtyePv}q8u|Wij3 sc(?x4߅6G(? 2_ϒb??&#w>ܑoB_u^aޯ+Au[cqbW=l?zû{,2 Cs?ElS;[;lG7OUCOG:u+{s;0~Ç{>C+ޟS_xk xy4뱎%3?xL凗xD|;o~,_9=,UOSѿ6=x6oyvujd-hI^#Y4~E?+7=c1+ mofE~XS._'ho̦/}φuòTrd֓@̎Pz9F;qx@t 1aIyϟn`|ak8}͉z'#?oW|\hU߷f{Zg/\_B%wv2IU5TOg=]q[s s=h;!~GT}ާ=[_xA}v~8ߍUvmx[W|WEZ/Yc<[Wjk =7SxОwQ~z͢ϙY)Y|zMz~Xǟx5>ި/T~fOW{)?W۽ok|Ff{P&FfozyC^SzMj\mko5^k^s^k𚡿~WzM/f^SaoUx^krB&ևٿB,loW= zͮB]8ϯk"~;4~ zM?+|zMC^k6+wjoz}S#aw=6gީz!^]hl|?O=wi zͮտ}yu}[]zM`i05e~<^zqㅮ5ߥUgfdS^uSWk߇UߴɌg]]տIVKV="OSoq~C%3g,7^?0kNh\~M^/afg&s/!x} q:ȋ~I|kN|ף*y^s~^wij084~XͿFxKEsZ}׬:_kw]ɚd3_k{Ud!6 Oc&3sfn~[ꭟzMW^U}׬=5x[S>Կ"ޡ;ľ2WzMŗ6@Ѽ^KCkГOc&aο|^zML/}]$ܣlh9~E{C~cox:XYi]h0/Q/}?={F;}=ϹSD>87<}s=q_Oy^黡/hKRt橃/2n}/:}5\_zo]Nf]B{#y~79+T]7#軞'xB=ooQn]1n?'_z}sy<9;g|OK?:~"O2ŬӁoOI|]>W0_9'}TO |?/>Ñ/%_`/7t"˗u:E>':>/}ɗ+/xK]#Y/AWKoq}sʷ/_d{/}y!assҹҧ|U>}u;ҩ/?NQΔIyys$_oCsV[ga=sϛ4H9}>AYG|S><$7sVޡ[򙷠ݗ0Y> |Zt՝/]'}隗p_/uDKaWKoʗ%_:%KgSt=t}җ|Kk] ҵn|7"_:}~xI_z}~KZ7At=Oݷ;<)//yN-?K׺k{]u?ؙ^gqlOa0@`ө,;~$+ݝNخJwN\M[?G Jfߺ~+YW:d|gKåeނt]W ǻ]zMM֥_:m9w<\z\O{ǥ{u:<ոnxQl}?$z9 kӥW<.=|:Kn]ݺq>.t@n^(\zu~K7vygr{N^?ugr\z߶G-3Kou·)\zq>K/.]:N5];_>ԩӟ]WM>.L:gt].]KOgK]ҋN׏ҥw{sեnqEۥG.t}sFv8|!qYd_"]Htҗ7t啸IÈåש\=W83oq#wX| nSθ&]zgboWkg߆s^Q뮺s]E{\y^^rc0?\C7Nf3Χ.}wKw8]ν:SgK/ӿ:yteuΡ3/>.sa"Bp`6y":[ҟ:u篾<8+'Odwt}ӥg;'N'']eK]z:YN>CGD_ I\hWm]C:]zYܟt]yftm;t'׻.xԥ93^3tkZ o]y'q7v<]zWKog=}\yt/\޺tuīK7.]:NP./\yDN .#]yuw]Gf+۝8mu/ۥ㼛x֥2^~\yRtoZᶩ#t{>k8?mvmg+K7/vҋyׇK~ӥR~K7 .xtvg]>k~פMfW+-ut}\tJo..}/Jtu;v5;y$.xt~=n4]Y]ÙWX_Ofѵ6O~_qxq=K'ojKG~;|K/r񚷮x=K7t[02Z]tG̋@GK׭Sѭ?x|/hӟťsեqe3^3]ӥ3_NN]vBG!pkԬ#T?.]'ZkvkFSt+um\ҡzItzc5m8ѥۭT>yuǥwnoxkN .1/S'?GܯpyӥNh#uggz.=_]Uz|^g:B8˺hqqҍ:\:N~¥KwZ^;kxKW~1]zm:M|>.ϐ.]K?_zqWtW]zq߮׏Kե?xIKo췦KgH~.K/̗ӥߞ#xӥ"!np|Ft;\>.pJx͸t?]m5u%'.}uҥ/{]#t.]z۩;+\zottn҉ҥMuұ8]:^Y`b=P]z _ׯ3oY{Y_yX|åʯu{=D\3&ﯙЮ֭m?1:.9S'."Lp٬sN7ӥ7|q]}{=>K7NK'N^tu6Nz8|u(GsR<\zy/l<3v+zYON^77Gi8q:v%}gxҩ/~ٟQ_2N֝t뎯6n..q}\-\:v]g|ߏuN닥KǝK_s]D]z/뜍vcݩg]z^ws|Qu u&K:3QGOޘqڏq\$OcV|9k~ԥ?ҳt˙780&L3=pҥw/۸nhè}9~]z;ǥK:Gw:'xץWa6mSINDtgueC7_=:vб/z霟X'K.?9 ެwKߎ>uЗy"Bt8b9Х܁.=]sq|so~]qitҥ?ۙ[v~zcwr:'9ӥK/gj5OK8.}mgn~A< ]UtW#FĮtgbIW[7>ܷ+ҝ.ۥYbG3l;?.}pҭoK=_ץY+"on.N~L#.L^^qKe9Z]kéǥ؝Fq|LN>tOѥw]yStl]zkO Qެ>?t}[/8V]qt'OK:ty?ѥ7wgtUKq͗סc_KO'KayN>t%tt䱊yt~=\^:[淾iҧhFatҭߪKХ[Zn\讗z6q6sΙÏg}ѝS?Y?<:K.K G:BΜ<̗nxKoM.=-.ݼt/tvؙ⎋nzٸgC7_K|?YK׵zCNqK'3[zG}h֙[p&U5|=k>ǕgXԥS;^:M^pxt]#@t2қypƩKҳ9.PKwAnW].| Enӥ9]zg/Ν+n=ᎉۭ{&^]޶3'$U:ut`}^Gft}өu7ҥ;ӥS䨗~{/6.g]ҳ}:tԥû^:wfM^p35ҥ}Kh+m5oWMNOqץq\:ډ4.҉MKͯ=ׇqyΙGK֥i^KftW]wzԥT.EC>9WWmnPes O:nϛ.YG=aqҭ3wKz.]/g;u 3'd~\];%^Z?қ/ׇogqLХ[RwK_n]]M^{YGh.Vt]ݬNc'^3U:s]ݼjӥ#]:tgԥtUeێOҥ[nt]/҇yyiOo|~bw>usSp8<\z%.d=z\z~#N.(һ/#/_μFF}qǥ+ҽ~u^5dɳ|\MեǍ8ץ7}k>:sW_S/vCg+`һӥԥg*]ÏxVwe҉O=tMG>nݼsϥKou^bu:5]gS[9ffGgkf5>.t]]9=:]cC Vo{kwיǤ6ǝ/\/[&^SGK׵bu˿nGw^瓳^n<~K~_~:;Օ qks\zM|Sܹ޼uxMHK۩>'^sz}nsK:v3rttz?]/}Ku~\:ҳzNn:2^3~Q/} >]ҩNffޗ/:t_qkҹw]r¥YnWm\Сu^9k1?\:ǧyՆΜ93kN+WYNnKo:#uK~[2oi'^kZoN~rK7.8>o~/qf|6up/;ҳ^|k^sp}\K7/vatu9Х~s1[ɏKǑIt麔^mqsK8zw]@sv?7ǥs_μNҗN~ԭn|uҋvt<DZ/.Xo=9:mKON\?z+Y/x~cxҭ7KO?2ߡΜq'm`EuúO}KNt:]J^_׎kz#?أ3gfnkm"sZXq"]Gѥ_:o.u^z]:ʣ^:;Oo>9z7ʾb}?.ݶx?;tnveK/xG< Kƥ7lgts~<,G]ytO{MmlL:7ɳ:CK.}`{h#_NtOC'}9{OZ/.p:gk#]q:c]׫.Y$sur8u"n_Ouq\zt|9͛Kϼü/|a\?^z\ۥ/?b|i\z3.ȧy6SZuKyy6M+gtgt-ҫtE\_#En}\\ftK^.}mN^52뜬K7ECr̍mNqѬs.닳9w=tt"EӕwxqK>.ݸb]p:gc}XlvڌCgGnuߺ濿7ȓ.O׈ե[I~qY/'.=.}/kusɿqGhMޗ.֟#Ptp]7O~s`q5k707 _:s9qsvw7.=:'.֙p~nצC'5޻y>5?]];y.yaѶ^ݯzxtrXUz%.}qץ̌יOY/BĝKå;Kt˸:\z8+ׇu꺹A+/ 8i\.}X;\z>.oKҽ_tƁҟԩ77=>~e׌Нc>qK:u每wǥKw>.ݸN\zw}n.~w|őKwfK~.ݸe]zХwK.}Zo|8^zש?^.K:s:s0ܛotl>mSo.93~/\z>m~\:qқ^p=]K/tċv;nX/<=^zut$:ଯN=q?]uWW\rm¥I!.]Nf~/_9ץG~[ן/^ϴ|8a.8w]zq|ԥߺzqGҳ:.Kևo|t_9y_3ӥiKOKᄒWm~\nN^u.b>̫ןvS}8_F޾[G6+y p;yc]z6]C>C謗^u_M~>t[[ѝO[^:N^o]Х۟қå׸tӥO^wtq̫6kR/]v㸹X?ҥwif&oo\^:+]:.$]:yu .ϋK/Y2oiK7o.Ou]g/:_u3^39|s!^ ]yytML&qGqzw+n^]ХЭshXzq/_KokotK.>znvy~#^3't]tԧtꌯҳ|K>uҳ:\Ц?z^:xͩ.WZt3ץ7gf8]/}yx֙3r\z}#ե#]z:;'НWm&sk?5oK_抮xͥ;oqҥ3'^S]zיڍSg}3_/Ut6]EwNc'^gtuAz6<;^Swn.뼉׼].}X)Y/}Kn8p鱍v.}n^ ]Kk^֛*m4]9#t\z/lʿ\syEz|KK?:uҩ㻢.}.ON]tN|8:Գ7ݺr!?/zg:h&m̏Kַ_}m^myҥ7pv0i#?7]e5:ӥK.Ct:ҭK'*]:ҧ$q[KwKKNZ ]/Ѝ'8qGu.yY.}ҟُJ>uөOB;ӿwf9YcC&-?Q߿bWJ_\wԹ:G}s~nkw]O踉ڼ]7w#MmpW<[;!u8 ~{~ɿ릿]S]7ѵhߺ^gkOb8ѭgN~u_?# >¹EuOk֯m~׹y(G[~ܗqù{'\M8{hhwzu\?{=pk6?{m<[/q>nqug{|PpY}D?:ҽ~{Ѿ^mꉿsG.`'=+x~M9i~n;Y? {5Onw=vcszY:'?q~u\uW듿릿Gg׎/gtg8w{lg]7}ù>Yuofp=x]8߿Oݲ? ^߾~2>}D8?:|xO\oqsϯNz}Ϸh?|ϸ?q{+p_yQ¹ƻ¹ߎvtp>ùѿ_wugwtgԯM~b|5/F8w﨧}EXu|=;]kگҽ8_'/¹g9p'pt 6k={Ko6k:+~Yo:uuumޣσ_1s?o{yuw+W8p+po?qGD;Ź>Oùۼ"v-w;g2o»n.۟9~sw[џùoF4{}3jpSg|:Ff@= ^_w{5ת~||:{{MGO|+W#ڸk8=sW~;{g|ףy¹/oڝv:_g7c|33tsϳpϗN{8_ùS]7}v~Oq7KgƑs=Rj\o8|¹}ҭ8_.O\o8\sso16~8p~swpzڟcs|u+㌯}tmssW^hJ8wG\_+O,띏_u~W]m8wЩ֛l?ù.~>Rhc>sù8?O{>ù s?op+w6q~~wq8s^_7m MIs_:k׵8h1¹){>_mf:lX/y|=ת_qp=G¹'hk蟘Z+¹pgJ^5'7 3us}i#@/m3]۟}믿߷Ц?8){q;w:x8'_b|z ~h37{DC1s?UG=spz4=WwS/>B};׹$޻^^h8; ;ο>i-]~թ_7ݴ^XosF{|c8y7Ipֳ1f8۟1_ֳ>r|Mb<%r「?K.tI+3]P8=~s_ùvKaׇq%W8s{==hۓ%]nSb@\¹kv瓩{ùK뎯|1~w]:vG <_ӱ#s8\z=8ݴ}$>?+:֎ 'W<{^׹9vϷ?{+Wù+`~b}<pއsG۟4gYjpy!zw8w~[4 8:H׍!c=>_mWu8yZY+]妽X֭8Ebhްpog:eFpֽ>K8z9s0tO\ӟùMYmӟV>}J}¹]8sXcg|׿xB5¹}_q5s}܏sj^p߄s8}=ss߿Ѯ1ѿ@׿rz\>s]=q^b|%/)=׫q'{Y#opem>\uw_v߇s8w|;{B{8~{x=q~_$֎%hΧUο_c7t_'Cz\v_|-i=w z6ùEscA{S$@6@wz⟘S1ףp?s?;~wԙù$v|]Y:fgM8wiG}s羟ùpW+כ}sI{3>8=ޅs/O?y3O8<}?sWIb 8O<;ړx+^8}sϯNU9]:%p߄s=pss/t쌯M8>c|<y?ܟߑW`9_0;o:x毗x];]E|a}ww2y{yoߠg|mqkg5Iygu|=txf&߇xM'swև:kvՙ.]'):vGn}3._?s?Y~x=V]$^ӼixF7yޯxMGssyxMKO5sǛF}_xf=zk}8ߋ{g}Kw_qkhUצ{g|tx{Gl2>دx~>8S}Ϗ鬻q]_>;+q=ѱ׸_7){Ʒ2^3{p{3Q>5q8k{;k='֎p{?uiפn}t]E%_M_~н#8ܯ¹y8i3ùpo1iӟ];MQO5{?8?ױ8__'i/֓:Jp&m|Sxi=i=͋xM qߋsB{S/[/\'I|:#psFN&}߃xM6$postK7:v}=q 񚷮r5/hSOxM8|ҽ1^P}=׬nTY_3~_ss=>b5[i͇vcu8o3>s'^SZzwu#'\=5קk3_9zO_n+#^n{s}=8oǹxͬߖe6tēĿ_/=Dxe}lYvnxNGA{Ij|SޕxͣSg3Sx͢opyqy+f?8\¹| )k]|6;$z5+=}psx'^3+#^3¹ϧsk83瞞 ~z<k>ŏx_#^:{G}.篏}O85y]CHбkNjx=瞾 kg}ugU>|so;s~5Ib| 87ZNf?;>xn֝]WTux{ip {s=羯6Eŏc u%櫑"{Opjkv|<k>:y5JG'o&?=Wp${pp/s`{s>s]/P:ťWUwu+^]{_߻_zևN^_^^^tYhttY?p}pۥS/=98qYK*.v uMG_suĉ:t~u}s}ñ;spXo>9Օ>.ݸt ]Ftycw~ 9ҧΧJX_IgܷKl3}q:)]zùҭ:gtuKwmq:v~8]:åS8N:smpU7wٷu#%W+pyҡWܦGKҥ?\.tһGèލ Kҥ]onf]`̯z8ɧK'qt{rsx<n}ҧk:zW#s\yX=]t[tvqһ/`oKG:gxtsً=9cѥϐ.|CKo<.}tCGxәӥե3Хo|;RuN.}X?6ku8]ҭGK\ExNљs϶K}j?-9Yѭ덋 GuK/Sh?{]vsvٰ3}s38}_qKGts\+Kn]z:bޗ#A^\ҩ?K=LgKzѥ*]z-}.=OsYyɧ.yn:g.=wm<5:#qMgKoKo.wt+=~[OuNHou҉֥wkt(e=}ĸtz۹9ucutѥOS]z1Ιʋq}֥:cǥ:]q\<tq[wԥb=tog7]m=[g?.v:'μx~Jitޏy_Y/dt`9?#uuۥKY~_u_:g:g?:y6>vtҍӥ\'.=ܮΩ;ouoť?.qt҇0^ܴnjK7NOť6mǥOn:q]gZN~[?Yn\.tϧzsK7Nuqb8ʿ.WҭK/]:ӥw:׬yX.g^kťwoҥ늶K_Kևå.8u\ҍs.]8k\zݔ\\u`I|Riot룕\uU:sYo]>hq֋ե7:gљS ֥WߺqUӥuNޏK_:nHuzz|t_y_m|;.ݸA]:Q}Gkܿtҗ}ڟ狼 b;]=<қzztutu;̫6?.}X|n}8]tk;|P{tT9gKtꋸa]9/sJލťg!\Xkե[Vq:SNqݼ ~q}{хW"N<ҳ{n:\53pn}roktҭK'Wp~z:Nqq鎟t.]KϋK'ҋ~5뭏c}.].]Kuwy|yz.=t ]X:\7.Oޭ}_<wqKg~zfMttsѿv|8tӥ7]sqKK7o.};p9._ɫ.ݼt+tu>/ҍץ[P|utcN<܏[ǥ߾W0Zg:;\z<å0 ><~Eҳ֥O,t;'sWn&]i?^v qtn}[|:ӥO>.< ۥw3]'N.=]K^uҋ}\uv9q_\zQl>t_åKot8Jw=6KGztyo\|x>ԥO\>tP]oKťKyqq]ХgFNt麡kK֧_/-?n9q:8ݗ.  SkѝӟQޭ]p5I>..=]tkgu΍럴omݜB\n}L\m}ǥ{+Kg:/tty_ӥ .K7v99]z}wx_?yt3Oz.ݼrn!\y|ӥ;֥:d\z:jҮC'xuyKåwJ]_.=tDͳKҳm|ǥå.}޻M}>ҥg}'ItGKw|ǥ$t,qDqMN,Nt'ۼt K??>Cg _9_9SKKwof!\~WÕ[G6.YGuqsfvsOuc13_Ku_֥?.< g_m\kg|}i]ҝҧ?5}cLKuNW?wQ|d^5za Ǹtx#^3tJfٯgkxצ#g.ݼBۥ҇n2OM|S^ҍKn^7]2^~\mܿ/u._tĥmIK77<~W ]:ҧ/qe+-tM^q_~;צ;g|ԥgjpK+qopܷyw|%҇G7t.=z\;/|_.x1]ǟUkGdKNp||KE??.zߺG.O˼+6r}.YG謿bxMU_zykv_~/So<]:oN$6.ԩ#G<etSW^so?#[g}gn;k:vXc_N'[5l3|<9{Ef7o.ϗ.M|%]mtz9kԭ7iS\u~_+?tU_aRެo^2Ut}Y{z5$~;58$N??v-Ew:\ްCǥKOW;_xǥw{s] ]z:>"qփ.zNg.K~ҥo9ߙҙ[?&Nұ;~v& ~K=Y6K}3ǥҥܘ.S/=}ߤ=sFb(޷Cg5;d}6НGK>"n8W:󰬟Kz{һqOn\yKimG=]/=w]z^yBɓC΁NzҩW tsj\zw3ZGKwF>^zpvKǑ[0O]:qҭ/K/֗ebt[vzѝyp.T]볺tͻKo^q+_oڸt,b<՝љWuVݺ.ўåjzܭcץ?/å 9yK5WW\rͺҥΤzkOf=q]/]zY|?k:u+y"/MqqI>:y/׶?ߴlȣ6t뱙G>F-'ut5ҥߪKz<Ӹ~~o\^_/9zn^åuxl>z>#??.b\ze=\CK?.M['^踇ǥ?׌ҳ~z=[uy6~Kϼtҋ#]zK^tKtmMQ/fPNχ^ex͇6Mq]ץgLk8q_o-^xw|ա'^wOf~t#2^3s]tX[/qQo{=r~ze}vW]8񚗟/\zu-lyt>ѧK/'!vmևIt\_ҥS?p'.Y:tϯKo쬗./:sݴq&N^lONDn}M]љ/,3SGzͼ/z68X~2uO.xͨOKg?*]U~u_Y?=z问jlzGfѡ?^Yxp|zyӥKӥLyo9qu=>zܴM^['s+znxMt}z׵Oҥ㨗N=#ӥˑ/]z>{Z޼6.}~\Sӥ7ҧ#?~u:ҿu]tқ75K֥%^x~ ]z8<]Nzҗu]9\:8O?]9+tIuwt}]ugSg5\ҭWKw?>]yEtե?e\zxWׇq8wKT|y.}˼sIK>Nk%<tc~.:~ҽKYuK..}x[7ǥ]a0/8ypygz|=?.tGKo^x2M<C0?>e^5m}^ӥ{}z8׮3/Wu_G]'Eͳ+?rtmWׇ|xiלʩ;koxͬD|I<X/zNBѩtqu,}iӥbtK>._GX \җq|rZ҇p}Y_},]zNݺ:uWyk[taKϼI^^]EΙ/ :$1o_-t׽㣳^:Y/}qקK:}N+]+\}>ӥwK7]:uҥu;/xKԥ.ߪϥK.ͥS~D̿uxb8]^|3_o]hcpԏ6뜷qsUwezs~;a?8XLעKϼ=ts]n~Jqo>;8EzMw8ng'̿љ5J7m4ڬ+=N^|hxNYs"pD8҇vYO&ɭc_>Y/sE|g:3g۬s?|u:ȗ.ݼny9ӑ/I|h]HgN\{=v:m9upۥ>pY/ҭpk.K>.͵ӥKg?us6Y?yXg.utѥ~ĭSgsڦ~ЕӟEK/:o:'sG7Ktzs}t^grK߯:籎.>v3.bYs:':9u`GKsgޗӥq:nn?9G.z>up#9yz>'𨗎NޯXM~Kåt:0ys\t]/}/~^ò~:ҫuNe>C7fMaסǺQ/=#n3]e=\~usZop~\zK/۝3{t}nHt~8ήCQ~D68_Y/ҩiߪn}D_7N͛K.Wc=뉋t1HsƥzU>?\:yX,8x6=?w.yet]/~ӱ; w7ѥg}8t踹Kt⺋q6kO'&&Nfm[wN~keW .}֬^t.oSn9_ЙucK'&]zuΪ3'?~?\z'.䨗g>].>Sә}?Y?Bĺz;g|}tAgNl; '\zItt_{ӸG[WN\?3fGK=p_p˶y6tM]zƥ.=qv9".n@]ҍ3}x\uȹΙgscng‰7,st)ҳ>9.]|}oKg_4]z:naĥҍC֥KoEҭKoeKoĥ7қq'f\zӑwh\z֓ǥg=q1]:q u7"]2o2.լsþ2PNws6۬s>ro?.^?.^ۓ\>|qff|t9+!]UtהK8UXEKO=n)>uurȥ+Kp'S\urgʇ7[/pȥ[SOF}ir- .2^Qt9:t&w}g㫗^\T?|srg:}ʇKɥMʥw$\zѿK#E=b~ɡ{pɩ/6u:ppcs'gs\.7>-wNk8" \zr!r3Э;.oN~1_ʥO]/Ӈ҇\W/].=7>ۇ.3C#2==CWoNNx!\}+]˥79Ǻ~p6u:|">.\t1^'><}KO .^k_.N>[ \}09Rw+kx/+ mlk/ߞu:rwiSwgE?tG}S'^(rįɭO]O.rӺyrM7.= m.}|tH&|˥;~^z'[/ȝ+\/c,r|\n+}K'_\zU:E=k_s}\}.lp_9=g8So&~O~ʙ_g<&\P.rp˥g_>Q&u륧~:OF̯pFj/қoipwakæ0Ƨz'՗KG'.|ssMO.=9z[>ȥG.=ɥ͕[/_c5t9:t~S_å?\zå˫MpպWEX[>WɥKtwK|\\;\9 <қzʝWƟǩ;L]9tm.iW׿ʭSwxS/ն˅|vɱ̯s~S/.=9~ɥ/nOr?%?&9"lj{r^+Z^=?\\\\}7\#K?˥>ɥn 'zKt¥UCĥw9sSg+3rs~?\㭗빡~MX=rQ\zN.=ϷD|\&^xcd&ڷup$pɥ_r7 ltm9ϧUҋtInnb}S.>k#59տYO"N}ӣ=c.|\ͥqkxs5Wj~Q.=ij^3\Mt ?^Orћc\_J}fr|KOnzӷkW5͙ߑ}!ʥK8u.g>;_a'x,9rwxԩ׌qv9$8bKO" M}PҋY|cqs,wE<5LJKϘ\7qppצ3'˩#"9;"T96Y35o?lj^|nb~.Kg#[/=rƫrw˥R.}ȥ?mSY/nSqRn.^mZ9H=u\҃vf&zry¥[OzS{Щo^z[>\\Mj˥7l<>pM=zWzM=_S/=8ͥҋϿ.rk6lG*^"\zҧy郳֟WCwʥ'OzrrKgNN.}ig&לԓ$n\9߼SG(Yy#]G9zpk[?\:Vr ȥs!^\}}srU;'?z~HSǿrһz5O.}sp_K3ғkO.pɑ7WެU\߃K_5ƫ\-\kp|q_.]\P>ҋ}0y^>ܜ:z}t髟\}"҉!.~cw?G>'~zsENlu҇~Ƕ|˩_pM.}YW+\:}ŒKO=׼<\D.z?ҫϷeR9rn&!{x/n|>QY/淭^'\Kg\zi>.\\\oQ^pCnRw9sqMz͆p?tSQGpȡ/u9åi+^zUoZ꫷#_r襣_R K}uX_gW}[ :B1^K!G¥_^|M.>9rAK\:rž0rW.PS9'*ZBܫY)~  &_w{[W޴5cóuvChMn0&儢@599_rs}yaZ{_ygu}_p ^sp|7p+`m-7]+som\o4\y=>{V'8[rkh8}=uupQ56ʛ+eaۅ]:z5?9_yu{m??crw]Oᚗ]ew}\kpAq︞{^ˆc^yE|M[Wt~kz|xد~u9z^7ы=bq=5~ʛkmpk<=`_Erg>}SO0k-;r>rW[ys_\{kp{<羮=ÿ_{_Wpso=ʛc^o_y<.lkxz_z^7v ֮/1[Pÿ>xڠZ=}ZgG 둆1MoJ\ >?}}~#3ؿ_ѯrOܟ R}}3#8{ڟx_{pk>؃%~{˭?q_o:~c?/_y~¿ڗ%8ԃg':C85~+]ϿvGN?-855^?5kWxd g }==cWxt=>-+n[Kn_\ֶ^{~Zi?1'x+op3_yk>.}]ֿj_/cg<.q?c>}}<GZʵXqO׋Zzıpj{%|P񯱾 w򯯟cKg~֣_Ͽ9#^H~Mw<{ޟ||e=?{k)⹉=#:ɭX_Ds}}Ks_~~FMp~vW¿x j]Ip]νWN s$Z/z1E79rS8ջ νupso-mkc7v;~s:^1!?c=ȭ߬_[rܯ=#~Hn5}=:WarO[pWį\Dlp+pc?s_{p/s_| ~cU9. Z}Bs/85v%3S.F]Npkuc8E{/_z Zk }K-+8̇¹)>c>Fp~sO6f_Yo0~"8#[S'^_'\cp~hr |pc?o}?r\;k{kp }ͯ7v_zW }*3U8: W {Ox:8uW{8~kIp&?|y>+|({?x =opޟs%?oGԯ1ruipk}'3 x&^ }'g>=85_6lkqb~s=_{ w`|ֽa~|q|,|_C~~D~ҿ185_Չu981ނso8^Uܗ?O9 \m>N.w_6x2_Ϙ列}p?;9_g~ }NZMpg__k_p۟`65 3_ ~8{+6O{?FgGaUO; kcjͧ6<88kr+W~EE_Ы¿/z_唂ss}w['kUѿ?ƯM~:_n^y 9s_\{9*c~}®wrnkOqv;3 -v_asz8=)׾3Up}}w//6$vg==;¿/ } Ͽvܗ?_')r5Wrpv+?ƯMOo`%}os88 =z>8ޭ /6O]p;Dx }kįM.Q=_羮`ysapkwcs_VlWGw#丣`sp8uWn}ϘϪa }=uIR_9ܳ}e s_~8O8Bw68gso_{W/}%WxνWXO_s?ÿ}~9s[Fpk=*'?38O.^crw9|]?85 lIFp{r_ }ݿ=~59u"/~_ƿrfgp?pYˋB_zM_pY734}#z{s쿾#p=ppWk7`{g }W*6j_7=bs_xa[O"E<5[ۜ:>>5k };ܳ=7s= sK}N^pso[؅pîG<yrkaq9+8jx_KÞ}5 ;8s_]]W8j0=މv­Uuaz[{ù~5{8~j8^QًcCTwIԩ'"85<į^515^| Zlq5x=9^.;8_}Np[K9ر{לqzܳ=cܳ=3 1{x(ы֓[/4{{ùoP/yܗ?=8yYYO9_(7*G3 o_~gk{_\S~o_ыzTp^x^3x+\w5~^s֓GZ?|^_p¹'/(>~%: } ocw~'o8rW!8ewl-i{XϾY75sN8Gs(8{.l"^c|%?O/ ZyOuKooyN8ȕSwYtItvҍS \\zrܥݩ|U. s?\zw<) IyϓKu:pyrrrU_rM>CΤʵ}c!>7N,)s?yGrU}tt&N?!y}W\n&WyҭKN"K<¥Kw>K#i#*'f^ Zw\-w^O.ݼ79ttR:*x;3\}6\:y҇}ҋzݾ/¥9ܹ68LnxK|f{aQH.>Npɥ7\q+Mr#­ghvǙyN8k􂿄K+zuҍK'o\.us6lƋuM{rlϏi.\>\zKnaxNQgr} \9ϓyN6ptҳE.]BtLɥw!\urʥg]\uɥ\:vn.].#tp7^;f>krįQgnppٷI}9{~Ϭ Krօ˥i7>үȏtl`E|_I=vqns\ur ˥'\GLuKN];r9"S _x?ȩ3f_5fZpr]|>Yb*\ur].?_S]n~.8\:vtɥʥzwj~8'ov꫷\ҝҭ{K;cG/}'tfg"tߓK7NߣN3)'\zS .sg_.=& ?WǾ/Sg'~:b/qop-\_o_kKirZr~y׬ծGnl8*N>.#!7bKOO.tr鿴}ȥ pMwrKſ\zrr;fɥi_ CupV?2~}< G|[+2?ɥ\zrs(ҍһ'>U.8}^l]/o.=: x_\ \uB]LV="?'.]I.I۷å ontENzW:.w~Eu?3έ'y񯯶}"0Ӿ/]NRǤMz}tʥ>rԓߗKWo^.>rKk\:z޷}KE.\K_m_9sE\z/S=l>MܾO+8ȕ_ѷν_]ɡå^'rp&[ٚ}_SwX|5Mlt֧o)py|<rpOɥ\zvk`/tÙY1Sߞ\rk{憜98­ݣoSWտKO.'~ENp|opɩ#ԴSpה;I.&n>L.=cbÙ~~0>\__:+هN.=9å;_ʥ\zgK?׳!tm.=ӷ 59t9wqX3uFG(ͥM9s׎~lj_9~r\:ީ#4LJSzMGn. 6޵CνC9t+tu9tS3#\zrU=tkȡ5K[}u{GǍ_ppn.p|Kg|9%;" 0$}Y+"?'\zS]*䦣^һ|6ԗ/O.~tK)k\-Gn zo.ͩc&K$۾/6\ᬳ^ysrk]c>KSGwrK"ޗ;56\:ɥ:U.}s> Sٶ;ɥN.}8޺zɥgfS'.[rNN!4w˝SG:yG\z/yK䬣^39NVrzُ3g>K}vxvrW%.dK'~Kw)~ur'&~pį:M1"W~}KÕK\9v\:\zU<<^ANpL_tߗyå9[$6Y\:kc;C͕KKoe뫫!g~KWX.z?v[3%W~G 1ҋwrå'7R<^Y7~MKKrݼ{w}xқ7_:s>rL?Og͡MS?X&H.}ȝ˥˙?'/N^6ʉ^9ZҝOһ|rS$#p93^>8y"wݬ>:rs[wq<~#Ot;p]n]o_ edM?åWǴ?mt-KW\i߁\C}D9s=!p3o!7n}>3}V.=gj"חzSn+cq3o9t3>"}KC=^zC/}ا`o3ɥ?څ:u9?Z"ȁTK./yNae,\o9y<\gˡ_cYҳFt9ͥGѕy"WξMr}^l=rp驏lj;z|9zꥫWt~or].}ȭ쿉z~:rKoC/Ŧ!Ϸ߷m 7Ito3_NY.r6\^L~'z t(K7ugp['NC/=}>twһz[G9wȥ7޷;QSOe='N+8:<\eȡ$n]ם7K_.\raޙ^-w'2Ko[>:ξ'9I.ʉ7\zʥK.=PǀKo ՛p}`gO.wN|"zҳN .=-tͥW/>rMV~wpu>ʥʥOKkotK8\Ե˥[Ǩ^unrr[/x59v?SOtKor}K[\zrp&M}һKpU9멗n]\jK KrI.}-.G^\돃KkK3OtT?>ㅓKcS/tɥO.`M^zK{p70r#b̩@;ÿn"opr艿Z:"׮^ɥ|!. >娫kꥻ\:ϳrɥW9t+}rå [$^|ݺC8ғ[P>\0~ ->:u׋^z}gK-H.=9u0Wr"R?]iåͭӯ;ǿKw<ȥ˹ɥ]ʥ?KO;UK:ʟ/~{[S߽XC/zKO>Vɡz\==c=B.]bsp[/}`c?t7\z=K:u:SN}C/=n>n\z·n_ қ}mG>ѯ|3rEr+[rKP/l~rֱ$\zrUZҝ^:}{sss}KnR.??t׾ׇKO..}xp]..=3K8c}$>'oƯɝ_Lm.m}u'6/K9!R/]!\p^:uɥoKw8L暜9zsExAK֩r pw}yԓnsl[NN>\\\zO.vI®Ưz˭Q׬;{>̏9kS'g^zå r#rE}~8MuIҶՇKtr3}ɥK\zQ{kʩp76}_K/\z/S66up-N}8tn9tM>V^Y9 uveK$tNɥ9$Yz>|t3tp\:kqёKI.],>ԣoCF.>vK,:Kɕq?^w5ٟ=һ:}Ы٨?Zjҫ4\r{2~}sBzkʝڰK/o9\\}_-5__J}; ȥ:zߕ8%rY759t5?\!sɝS?~|;jk9ܟK7 ކ#u}zܿ-/qKwU.=orå>ɥ`P\OSY]æOpS}ҫǭ'pE}r5vK/Q֓ +ގKF.}e'g nS.]=_W=u9B['?|}9v\}JR/}I}txv6x>$^>:kwr|4WC/9t׸K?\#W}ߕ9}˥xkF_SG(\}3zK!8| >䲳^s~It8׾੟Y:$KWG*Zk6lkCǿNψgMSG;6ɱ݈\.[.='åg89s#{ꥧ>zϋǩ'^s<å0q/Oxҍw/n,t^z;]r˿&^Y~y^#|G/ 7WN= {kr~1?y1^^_.懧:'nlrď?.}KY>y?\zp-rrre_'Z;[Om&+\ 6y)wN>\zK٩\xry-tFr76__m֩I~åO9s9v򜗶g|_}DmKw^O.Srÿ檜s^zѦ}=9~9:u:n 6 z6u:_m#B?._ԏr&ΗK~~Enʩ}t}^p&~ o.]>%wxK\:7S<}rmzɥs焃~SN}SoampKO\\/\zih-#é.;:~/ntu[wmn NM#\z/NyrKos_a;uKoYtu_?O3޿\g͡ͺ>tKO9ɭ˥{ܺqeɥW.׏>/63<%Wn:yѳS_]z=Kr\KW.A.ݺ{t9t뮶^pcKOrt9Jt(!޷>:}n9tqzɥ'gom3KrVO.=ʷ\ur~xkS'o+S.\zM=kbsy9KoϗKw\_.}K/k?#}ɥ?zI$nҹ79BDnCo }|pQNG/aWˡP/y7]8Щ.]-t\u~—:Xȥ''>8stcz>\z?tɥQ@.=GpY7rrY$ȩSwXΉ_3ǿV/>&pgҳY.U.e=&nͥ9uKGȡC=ݎoꥧ~k!{'wK/S/>r#tW~{u9^oٷcӰGoo-wNGҳY.37װ3Ng`'jzɥ".'%^\zݜ9ҿK/8~'^\z7rMN=07m;&? XwXЯ+z>\zU.r3ur9l.=~䗒KrU&^:[&׎[IG.e_p9tÙ{s1>髝\\rʥ~KO}]\wrkv%hr]oirr9rwt'n߶ͥ='޵k?6fpg\oDҧ;,ȧ'N>M.}s̩\ ȵ{ʉ_z\zG_SG|)n_vrur<&7WL6襣ލ_ݾ}"=x!zr9K.qw0?x?^6NxS>7uЛrW8K:ԉbիW/]_.=O~K\T_ѿ&w^#^.=ͥ'gN|ԩ'WDs)Zw:K;Ko \zoursȥgٚ+u[3ɥ'ښ}"W;D>(.$n]O!7azrN9"n۰3ܯsc:t~e>N{PٍMl;c|}sw9t+_9vI^KWK8?gO[/Ү\z{—y?ȭå?ǿu_t8דz7[3.N^w+tcȏv98}:6Y[WU.od~X}jKO.> Snnjjʡ_̙ϊ6$q'Kz]o.]Nt[/8?5Co:]å'G:BSک~ɡ۷G3)wn]-ٰDž\zgx˥mߗM_5Wɵc0tQp9uW85]¥Gz͇+tqp7|t rM=*.=9yPCtR/yK/b=m[_zp#tɝ^.=_pޘYukǩ')r7}:6{O.}ʝ?å|k6SGhȡ_ҫܺ:~5˫^zG/VC;yPg_6\z~Z񯯜‡K/^\z/r5E\:><å?*NC/:[N/p9ܹ҇:B[-g"g5;+9v".up6}"l.z>}ժ:ɩF.Kr.mx*gN|k/\zrW^49[ ˩<Τȕe˥'ttתSS+n$C .jiT~%6v9[$g˩/N".!.7^9zpߏz:}69vㅊ6:B.x0tjɥ[?!.|Ρ>[Z{7gfpI!8яO!~ה+'~Pg/^?^_gߗK!SO=Zza9\4?|˩Q.=g,^\:xrS|\r_r˥rǷY]rk69t_:<KC/ܿ?}zpz&w^xᾋcϮהCD?>uR?QO\:oR_S<;">ՏKO=f:>\zuʝ?K/r>6kWty/χ꥓?KoN߀i= wNN,K>tQɥ_u_wurqv9ȥ9[ zKOn5N~8{rwr/=t_sIKb=+w~16}ɥ"ǝ\z7Kkr^s\漣^{zOrWmWå$=ts-/8{ߣ^> rYO.KM9s)^^zkx_.]uҝ7z|0W}/<^F_aFKz^3|k~^xok.Kw$F\}:ҳқ6G\z`w?.?UY־*ߟ{<+Mۿ?7cC$D?______Ͽvsc#ɹסqpAu^r+o;zT&8!iJpur+oZ:Zs}_y:g w>\ʛ^ʛ!g}rY6ʛ>ǽ;_\q=r?[?oM<8:R|Xwc?,g\?~g_rO]Ͼۻpg98:k]o|oEzoʛ1cNXyӟq?WKùZ"]_ǫ|z7m .28xr8߫r3g ^b%^7?r3W[/x_olWt+o>_x꽯zq$~{П_yu?}}緲8oD{1~sܿ٠wE_5p~v.1sk+og^P}M }#3xz|o 'v 0k\])g ]lxO?r=%泾+_87]cܗ?kںQWtbx?9M+_ճ w=p%^+_yDŽsO:)?n+ֱ{<_l1s>z Pνvrkῂs_vc>{{*?b>k[o/ẃs_c_33tsc'`Mn垯k7zِٗ{Y6'?|گoSy+885uW z0M:~s_c/_Os_`wlϟO{<^c+r?:R/wտF_(x?ïzRo}vc\y5{|c=o={Gn| }]_|~|6ԣ_}Fgɩ?b>Oput9uWd?mjw~7Mƃz-k]{g{ 뻂zoWG gHp?. Z#^_:rOW :/v+_c<695m팿s_Qn }4sXp7;??\r˿#8{Gpۿ܏7¿w$8'v X's_][Oܞ?9l zlا==8\%l5צӺ . o{+Ws_.}|=>|b_<8u}W_џZre# xc2?Z? l3֩׎=qNqㅃs_ݎ]~1G1Yqg f0?8/{ǿ^{矛;98>ɵ+W8;gps߿78=y=Op+C¹i{yipkP<;+ܗj3W/ù)?zz{gr28n{Dʵ_wroSGgY~Y;agn ,}q{3__7_/8|{ÿZwrטOs_gܿWN =pkqcs3K587pȟ+?:Kmzc~G}}Ͽ#9^s_8"~U/,8o/g(zrW]|ַMGW>q`/s{psCp¹Jpc'~E}|&{`}3s_M8=yWߎc~+߷Uݩ^%#ۗ%8u??}'ʩ__z؟oW._9y88߂s?F}uϏC.~ ~KK-hp{<Ɵ;]'6]_Sofɭ_c'ś>8/s??6ʵ3 ǿ_1%}E~X,8=b=7}?W~W>߱{sܗ=8=~s08'8wos 8"_c<#ޏgԉ_~_Ůa'/s885߾į;~{pz&8)G^o3 gr+^p }cpk={puw&^fWn~}׾¿'8oʽOJ䕓/9x^Y\ms k>ks}pyxk }͇6M5W"'O{M(8/s+8&kGs_7+uvp?ȵ_c}s_|\rW1'}q,ާO#k«J>νs_g`x_ѫg} :<p;w}_s_rԓ_CGQp p;{S}u^¿^ `&1|$~"8̗ùnG}\ֿv7S~;{rQy]qzGs\;88n~8(_77sz"85}ܳ^=p:6:/85? g38g'%rW_ %8s_h9yWN Np[o1?^|_ Z._t`w~*8xoo ~~qpSg=s_?Eע|Vpk7\{$>8C9vq_|Y vWs18=}6]xע=y5osAfrkigE}]羞.ȧ{ pߌbvkV]5Kw=Ư7v5%rW :5xk?oחUn]Zz*>P35{_>ݾ֓M }5U~O7~}gSϩL=z5Of^7]34?L糡M~5?<>=^W$Nf[.5rkpLm}2+~Q!D~.zͱ9v3_ pkuCmkp'Bpkv9WMp~ù'o8/pY .,8\!u5#ϗz<&rpk|b/8s}gpkaGk_<$:$!z ڞ:Q }3>s_Qط{=¹!wSzkrɏ˧^39rJNUک'Ӧ^7_}r7s_a_=ܓgs_Lr3ߔk_O@3{xpį[Q _cُg|}׬=#WzcE_a= >S~_kdxùg{P|䌩rY9ԝˑsܳ }o羮cCTlzpY羟wp?z_ݎx}'9uy9v#pk9׌Pkǿ^pQi>os3އszS8u\|\_ovs$oxpυs믲5=~WSbw[}gJkswRY>ߴs__Nub&O{¹'sok&tpkMq}J=;Gx!z͘_s^08WR_Q W~:ub.#^tѷH.].ݼ\'ϩ>ҹɥ'~y|]M~V}Dͺ8{P/[RO}"w]Rɹn.\AU>0v$ș\\prE~XQ/>K.fʙå;}p.=+t|ɥ;?ȥ{r׎rp>ɥgA.].rsׇKF.ݺҧ_L.m>/rs҇\\zׁr\9Ip7.=^ɥzaG߱f.}z>pg<'.r@9%g޳ƍ>b|_p$>M0qɥo<}Xҧ< I8/kS!|w |&n'opهK.m}vm\z:ukl{'؇(n_0WbؗKOnA.]"[Kw[.]nD.| v;3g˙7 1ҳ/O.=9u7B.x}x?/ҳ/+%rgUN\o~=ۇKS3{s˥r͗uȱÙ.n.ݼ\Y\uW9Nԡtuٺ{9~Ι_.~aUK׉/]Y>ͳtsUרKN:4\k~.]Kw?ps}p\țK[N\zVK.,q:5\[;ǥ|K1usSK͏åN_C]ߏ~q=u67cř~t_uq,7_.}vr ǎK?;?jn:Ne|wکyv{\SoX׿>.ݺ_]I].=yqc }ތY/$+׺}\zy~j\73:K'uús K~izp>OK7 ׶\z׭^xϚp߇q?t&uӥ|quSǥߎBx\q Yғ=_u8]z\znNg>q駮JcO]yKOKig3\}ҭץ;ғX|uX qƥ{\z].^L~i]ĥkkr8^\S.%*.׾.ttc9twݺƥKޯғ~8}K\\:}=Oz~vu˥'\k]}t.w~y>̯/gKΩ8wK.X`<ѡSQ׺touaѥ>Tt}uOڙ_/.1}ӥ7.g.tzq8rKyO:[q}_z`_]9uu:u?\ԩS{z]8uׯ ˥OҽCť_ű߸0_>zMG?o ?ogpI8K]-o\]>g.}sӗ`W.ݾEt#t^_tϗ.ݾQuLpҽХ߆K.ݾ0t>tԥSgQ~>9Uw>;:vɞuc_ү5ݏ_ҧ\n]_\e-^qyyҭK\z9׿>=ȕKwF>͛_/a>t.On6]w]yͥ7֓L?׳ǾOt \$2.繺t/틴tݹqyD\}#u:;\zpqpܒtҽ?֥{KKݼ|[g2=9oi]tNnV]tq7]n_8\KO7}å4]}hK/Od#T]}!q篸kG3qqԓ:߸CWpŽ28ݏk׌;g>k}~\M]mKqݟoZԭt~嚛3țu1$uKNnG]zʫ^:z]LvwpYϋKO]A֓t?}gCғ>/XשеүKǥKcK^>tK.>f3qk^:t֯kLf.^{uͧWO.g?h2_9^OqӜEĥХ\Z5OMP79Y/ҧ8H\cg:u1]K7Qn.ё^֩Kwp˥ǵL}^>OgN=I7%7u_}}kC~u'zͩ3C'^S\֕!pȩg?89zM]r=ǯrt/tc\:yӗ=p_ҧg].0qK\k't]3.]O׬nsy[];u+%d|}%tyk:nyOpkΓ.XcX}nwݿ]ozw>^ԩ WzMOX%Ok .^ǩo[^ԩz_>NKO<K[_6tO=D^~\.եK.z{/?.\٩+.}r~ĥvrN93'w/u۷/..ݿ.}Wwupү/{{\4V>̛ng#_N2/}Oޜ.}noƬÒp_w2vSw} ?iΫ;vKV\nrt\s9ͫ?.KG~nq'/|E]˥O+M];KV's\kss=Wץ\:ct֛f_`LqypnrVK7Sn~.5Oѡ܆|}еۿ>c=tϝ}NxoV~yr.A: .}{0\i0gx爷_>gK.=˼ǥFN]ƥSG1yHq/@\;gs֩;އC}O93Г~֭;'/s{6^}κ~ҩ;ǥ'w\z楳[>J\z_/u:}k>֥k^} K?-˺PKߚ#yqSl|å}>fg[]kc?ay үKסf_]ܵ~ީ3´]l5HMޯi>n77ɷn'/}ϸtK';.}x;߾7?\uqѥ]Z77qڧ.Ͻ>yssz}|Kw_(.l%o<.c0>yá/@\zAy_pӺҽ{z/썱}6 }·1mp:XssP}$foơ܆]iѥ۷D>{?___O\Wѩzʭם~{zcWwN~|K_ӥ}S1uUK}tg]k&/|!\4_eѩ>%]ǥǝǥ~q&*n-/1q#}NƬ_qSGsG6/\z$u1cҥ/.ҵǥ?~^ ]zҭSԥϿ\u]w20ut~:K7'uô.;5?y۸tV^zgꌩcɛ| ]x|n]X>g=uAu~yuG܌Y/<:οq\=+y:5/ݾ.{rqW\:yӷ7s;O_6oXwH~c]KO>]u=\'c Owz1͡C73ԕg=߈K˥Wݷ..=5\W܏u_/:uiwpK'?o]ui~_ط㱏ynҳӥ~!^c_izgs_Qn~..>.:t .g|tƥHݿnμt@ׯuut>s]yӺtu^u:t8Ù:tԙ_o8?.eteߗSN~tG>.=yu=[ycwny}p'w˥Y.>quKYN]i\ryyǥ;K\:}O.KO]NI.]}K)5o)φμoKuGb7_n[ò>Sw|ouѥgHn?.ݼG՗zAJɘCW^Ř8~yp߮_Ϝz?ׯNKw~NS.U\z|lyKOpugo#gԩKסp۷Tݬ'5wǥѡ5n:uXᆏO=I\}K?s1fzʙ_q֥K:U]ƥO߿aߗGw~nϯ7^.r|K?O?{K'/w3қy'UKy_N{w9q_ť?y鸸ͥO>ߺtgV^zwl.Q]:qqo^}/W^zoүq3ݥ1Kn߈_ _O&y}_ѥ{ХogJsgqq̸tݓ.qKOK]:uqy?+kki.q鏿o|s>^:{>Q7׏Ko~M?ox_ׯ>Oׯӱ녋}z]NGN}HKL&~%_^9wػqSgz9yWOf}Uy1f~M~:+:yn}RO]Op\σV^:<>[y2^sucs_U:cljK...J~->Y׫>tK˝~%\eDwkG^cK?v'~U9]}mpӭKw?N~U0q[9}6p≯k~^ytͥ?srғ?zM|r>tsh5k:[str}3&Go$@g~0_܌K׭8-~_a:s֯ǩ'9?.ݼԖzWWN䡟sեputOBc]O5us~sͧ~SI^}5}޵5}s!ˑү5eK:oݥ/.>_u3~\?ד#qtߺ#KML&qĥ>uq8>]g~泻V:1q5_lyQs\K?.Y̯q_篸+}tw]}|-M:צSg~?֓ݥG0yW\zwLf/+/WA9c9tq3rzKw.֭'.ԓHP]F5OoN>VoY/\z9u֯9Ϻ?\ 2qrǥ>0??.}'/G8޸#&<.8}7/K}Ekg`L?xO;ݺsSסs67c`˭g򺓗._҇ooƭ{R?꿩3g_x9O_һ_+gK KF\^+c^O^zW\˩!773?~a[^'*R~{M*OQ}]\Nvt򤓗><<>tu.ݾ˥:WOk\C}orMlt9n~޿i\9u:NMrթS׏3Gg|sKq1||PÓ}Nqϩ\.yc"S?OǥGK9F\O^tlk'աқ-.:-/Օ>밮cg_)I_ ?.g9编CoԕΩ ~9ǥ?Y}ƥ׿g^:Bgҧ}!KtХWMMһ}NLs_G}qSSqԟZMǺ9:ץy'/=)Grq&'/n9sD]M}}\:_9ou:suأ39bwL!}Nĥtkߏ}NԆ_z K?>.}xL ̯y].>037kť[tq9ޭup爏c#▓:f~}t>_3fo\zҭ3f_.OB~ߺ9ogթ;[g_ѕSO_Ǽu\͹n.}z}Nĥ }Nƥ{=u:8<gytGƥЯc]vjUwӲω{ƥK?ͻNͪ{0/ZΜ/ߒn].eOQwUK4?{(.=ys#ǸZ9ݥǥm:j]+/}|\n >_]{s[KN>yKr_?2v~=Sg}Sw5֩[1v+9\GK:Ɩicw;X.:{9ouK˩s=}N\yҭKեV^:K\:K:$]}pq7tؓ>3v:}k=uC~ǥ(y'u?٨sZyucc2ο҇?.>q鷯ǥwK~Ut<'oM]?>V]gL^u^>Ӻ8;7K6󙒷^ե:f\+/z{wx\z8<\0{}uS7?g@\zթy}ysodzwO^.}_CWNk޿> /N]gq;'¼tuquǥt:`]0.e_]ucNX]z_\u[^q^uz}\;>;ݥm,u/gқN}\zO\9+]}mKOKonݐ.=ymtuqfe\üt$ĥOu:c]zq}y8^g+].>[_.}ѝSwkӡs=_Kn֥{>ғ7K=K7^Kr\tĥ{?l^u׺guXSқ߼tӖK{tڇ}}u_MǬ_357c#k~SK﾿#O^ԭӷ7tݤy:].~?ίu<2.˥ͼt{߯:sp˥f>Lޜǹ_k~φ.0S_K:䥗2/Y\t_έq&qyZy<.]7K>V8:ZK1,3⢯̯K_eҽ?.߻tǥ߾+}D&:؏K70yף#sgrLK~|t3ftrOfz!~_\}6K/..g?`L_K{t8λugR˥}|ׯ[^㋺u:9u=\y釮;]rq㚇CKO-̯[g8y3)ǾK:Sgny鮯K (\:.W9u|uqdL>uסz^8tݥXy霏qy2/ϋ.ݼr]z\<os:cKo^ϚN|?5/=3\z'a] KЙt}urϕNtDnNWn`L]?C77u%o?OҝSw8^K.]\3vԥu'#K=Nq|K>tO҇~9ynݼt~\7.Mf,yuޯ:t[y}p0?;9B'w^+/|䥓~XOsc 8Sg;'OSY.=N>ձ_㴯-}>Х^_z^::>ǥߣ^qM^3 !Ǿ5?q闯g?ҝ?tIG^rw 9BKO5֥?+_zkRy֥_:kOݜ_ٟ/Wrl0$/Øz+^~_]5#D:y.ݺ-/ѡz͓1:?tեZtKO.U] KK㿜_]yc3P雥K~).=ݯ¥gUKo\p:tҳ>1/3pӭ?~.=U.!q<ץGL>/yr֝_}=Ϸ+^|t#9pȧk9K]3ԥo_O_r ;oi']y+/ԱSOR.F~OGg3:u]ǥOqW\Cw?NNf3o9u}'GԽЩ'<\\4!/jW^z%]zԥj^?>~u|~I91oO\ǥtқ#}"7ݹyNc0KoGky:tU+'K}Ww&ou21~Ӈ\懟t]!_;ϻpkK7W}SY#qkЏ:8{\׳~Kӥ'?<9BoϏKg y^]z31'p.65f~%MN_5.>t?OarNw\::yޖz?G[s=ӡ{2z6utp\ztg5ɳN^ҥSopwH~OWSy0y]}!8t~r_mե'D~Nsoǩ[+.KquKn.vx}\ԙS9ĥO?>yzһQ.]cױ3];}K+zi\Coz,.}'_>Vz_OO4t:8o)papt_ӿ}Sgw_/퟿__ÿ>ܫz9kߴMkߴ'߼Mu2::RkߴM]t6 ν7m׹<7m{[y%/M{[giUۭ}uoڹ.u:ϼkߴ%OM8n(ν/sCԹ; ;oA6t-otJ8~|{cAޛM[kߴso{}Ӧ;ŹwEq]yޯ8ο䩟uݹWm־i5Źs}y׾io:7m'ܻ.^z6'νSso'[}6Tqmhp:վi׾iK>v?üR{my훶-?W%νq:N~ν_+_|4Ot8><jߴq}Ӝ86=_j4=;ν%M8FR{>ݱ׳zkߴ?soW5?\z^<οs:L{?V0?My8f{{>?[-M{3p~ʼn׾ip>I8\pu;.8kߴW[s_߯MSƹ7]/ν}n~7νsoCg^m־io νnq},us=}ӟaW?|{3 Csms=Kj {νM7mc}f>4νm}8}ޗu;t?k'8s+{O ν7u\t3Xw~:7m+}v|uצk_{e׾iK^x;f~=׾ic_Smvr}Ĺwܛ}p-yo#й[z?jߴtХ׾i{oZo:lY7Yףkycgpν7ަ} jߴٷ nϷd|q}W۹\{ޝ{νRqν.pܻ p-pZu8R̃~ܻy8?kߴ[soy_zsYGq5㷮87mֽܻSso} jߴY׎s{g~v֯חsoܛCù{׹7pt]Wso[*ԾiNެŹ\{>oU&u8ƹwν,p-yoù{?soS]⨻qUgsotn~EކyۇqCӯ}Ӽ8v-.y7k4#:9͘uν :ν7˹:qq6_}t{Źscx֯q׳䡗so:){7҇zVGνYׅso:{ש[~98wt-n^2~{^swsw}sޛ;zk;qޏey9Pso<ֹ7 Թ?kOx9>{7o¹7yq?ܛy48~sw}soqܛ}Ipm~[ν% Usw}so {_/7պA{{ν\qMs{W8wkuMDžsoAXg~%y=t_~1˹?s?8wu@{>8v˹W9ԟ8޺p~%߽{νs 3:?й7q5so֙ܛν:r{kܟ][ׯ.~>u<͋νѧMνDq/C9so¹g¹ߧsoqy{QssosߺYswP>ߝ{;Ĺ7͟<_p>ӹpZo>qй~羾v[~v{:rùv~7{Ww_˹wS8wt>ֹ7νMνA9w8n/=q{q6?~m=й_sw?G˹7]ν}sr<}</8wܛ}pU^'ν]Źuޟs}O _so&{'{/B{pYƹ[Osom<8Wsu~so/+=}{w{3rOy7ܭѹ_?osr]ν鋭syj:n=νӧ_6= ={{p?zy%Sg~쫅so:W{_岇׺_¹g ίsoG{q?ù[so ^}si{~ν7.]s}z3vޞY^zS> 3~rs}soù8rmsߓwqƿso{}7׺۱\A:6/ͫŹwJuUs{{:rY_ܻ}p<ס{g?ܳsyνM$8rssso8 {﬏p_68p4o{Oq5{K}96<{cùE{oeOzq|>{soq;9:܏۳SOR_s>Jo{νaSyx_kg_P޼Ĺg{uUS?;_q>786;pYT{p_ù7s?s{#KpK:qͼh{{nuP'{pۖ~|_6tW~=8Z7];;vM8v/^{}s~Wq1S|sz죊s~};d=܏#>t5pӵ[=={Oz {[yoIjwܭչg3#{pYoܭչ؟z^8t=S~1z*: Źc5_W]S׿¡^֏ZߗKm,s|ѹ7[5}s_?OkչC%5uOpWwsg~ֽoݹ>s}¹?ίqMso߿5y8wu-}g֯Qkscg?.c5ɛ\~SI1$swZ_'ק^Ե?q~=t윏s{;:^ǹCWzOƹg~ùsy]syν=ަ7|vνsR쫁so[g~M=V{>_8ˀsw_싁soqU۩so'{:a~ǩ顇ׯM;痟~?=g5qלҹ75:w}νNNν_so)¹soN5n#:c֯q̯?ϭ>GMչǿtPHfs|^_ί:p\e:wu۵yƹ[os+9:so׹:7sLޮsyνѧW?pܽ^Տ>_ݥ*p?ҽԥ)һyo˥>yx9f֡Z]c7җK>;$ŝS׏[K|3g?u>guιE]c"ps|o:X8&\q:e]|Y.119;t͑>mw9ovy犷6q鮳kiް.>RtuntmucK?pt-_o1!ez"OK?.L<~վ SvKߏKMsuKn!.ϋ.}ڧ:fͳy0s[.}|\m_ilw|z0vSwK/[֥Ok}t/gRn9Х_"Nai2fvZnқNխOCyxЩ>uqsN]rq_s_?۱˹1.=k|Wĥc[}> .ĥե_}_μUK~l'./@sCw}N>ϩCեaZ[׋һ:등0y}3gvygg33_sD7=ǥn ]}Wt7.=.Rq<Kқ}Q63g;f~\eu?07S+ƥOo]}t+}nĉK;gp֟./.Gn]˥:uq?:}pݺ\z7?F~_t\CN>'8nNқ˥ǵS>_q؇xutk&u'7{?s:빻\/]|[qAߕ~Pw;C.7u?K~0.]ǡK7FҥB.=n=_sDqS1ίc0=<Y}9u'͓ťn7r̯|d}b]ׇ1uc9ώKqsSg3:utpKy?qθt.=Cn.ywgw#N;\zK:e\+> p~:ױqc`K.aN^C\:u~o>rΩt_.᦯_oo^KK7M~~ғ>xNt>tcťqf].n_A]z#.=u˥ϏK7X?3;Icq[tҗ. ֥71.|G\z>EqwKKwcsNT]zs֥AǥǩyF`c֯CwtGҭcץ~K˩ǥ$qq't7멃qKirAҝt?.<]]˥K.~O׶q{9w/ѥ\9:ul$?ktֿn]+tO]nե{?Kw>.WKХtХ~[xܺ×q]_]z'#.q{[c:_us9u"jǥގKEҗǥuw.қF.OХ?˭SwX˥ǩ>:ql[s7\/ K7^̯^N9ڞ7i~r}:|]:}KJ~/K7R3.=}p>?KOxt t~ҭ+ԥ~.wNΡ;??\#\z8V]yt}n>&.縮U=?}0#<DZ~ĥ}ѝp}q燐?.]ǤKw}Kbs1_Uθ\z_˥_שwdכ1uХMn ]qsK.YNgN]qvt?/V:[.ҩ"p׸t }^Kom.=ϫs>on1> } ʥ[KnO~7}tW]z?ƥ(.6/~kcSg~n^.2޺tcp9t}ҥ_i\z1.> ݏ¥>r=˥ϏK7?UnU]}=KǑqtK?U#|tnCtt>ԥO-]}Kϛ.o:ur7_.<7c$|K׉қ}Iq鹾n.ѩSO|vrp3u:ϻk~.]|ҏn=^3Mf_VKoy#D>x\וǪ}\z>8:ة[\,nؾ/x;gԙ_y?}p=܏#=.'8qץ^ե;}"Owݯ8k}^uC'W|'}:crҗIL\zq-}K8q.?9BcݜկrL_5qҷt0sgGn_\z&GȿL髆kzMSt֝ҭψK,.},gީ錙_~T.Wtߺtt+.=.=OK.-ˏK}rй$wLK̛OfƸ'bL= yϪ%ߚVzͩ3w?Ng~nst֥曧^s}2^8oԙtz\:y\;Wcơ;gt}w˥<&.b\:qޯ}>֥{=ӥN=.~2>6~i\:q\:Oqs.7}%>.Yv9U_zͮ+gZ.֙'G曑zͦSg~ׯ"?zM+˥NK?K~Ͽ\-u7N^tݮy闎98s※y|&9u~:CN.;yӿ$y輯pGKtĹ 'T~>0WgN!|1L֍[uqۺgwKjKtGL:/t/WwY'N:wwq;myc[zpn^ŜOߤ)yq9^.2 ?ҧg~%<.V}΃1}6n:;w>'uH[^:n9;EҟY͏Kߜ9mp鿉3Yus~:W_\:NtO'Bץ˙yoq9ǟZǞt\0o8ǥL\ғ}YKw}a^:ãKngn>=Nkoҿ.|{Kn..=.~8鋀K ~ԩI>ԵZW]yaSk^z7}Nq#ť[KYnIKMi?ᶻҽK$o{W M^zׯN~Nk[˾/]w߲yqsUw\zɷ};Nӭfw>'KO^u~\ -.}cU͑7^}=k}nn^.|ɏg]O҇y3g:ҫNm㦻}6RХjV^:nHؾ/o֫u䫳~re^uBίz\gBn]yG֯n>KoY/:ԥj^z7c9b8W ҭץ_߼v?[.ޞ>{ݏutK^q"ǥϏ:cl܎ouq}ҽO^NHn.s=ӥ?Kχ.=no)yهqǧnW~܆ީKť|n-F~rҥwy:?]z\:t[Ƹt6.Kc9t;S.xn_ \ύuW]u+/˥ǥ~_\u>zNC}?ust~]zuSZ.]N~%O{=ysu"<ЙsO:y8GUW{?u&qKtq͗ե?KS]uί<׃1u:=zcOyac[~[Й~?.=?y|gڇ\/]>yl#ϻvs?r36/1]Ki_\:_y鮇K'/ D:ܼ8|KyЩzᛗysgғ~׳C_xs6]ue^z7tǥ&.қz\t'KϏK֙_~V>3'[\q|m\7Ìݹ_:}_^?]øt+t>a~'r~'I~]a\C_/'c\N!&oαϻ^S~.͕.=}tq-y8~=}_ץKtK.s\tZ.=߯LK֥ӥt\E\rgԡ[?_]f^Wm֩'ίWf?|ҭK˩Ssǥ;?GHvlf}}tt.aKO޹.e&u)sk>:.U~^ԥ=\ϿK߃˾{\qP7$kK_O\zunCts\}ukțy̯tťӷ!yytGgn=oS\:ͥsѥ?O c\:n!.U]P~.zM#z[~[2]_>"/8kƵY:^ҩ8]9ϻK!S7_aL_֟q6\zK7^x_N^:t~}txtԓġ^(Kr=}s=_~WݽukOfӭ3&zq0fx:ĥ*.ݼh\}#>C L+_OSץۧg^y=9BWC.'>y>̎K?Ϫ/)׉K<ť˩3:Θk9_]8tԓ$?]7>t}n\ոtsEқ?yNݼt}u̯uѥ;,N>w͑wݬ'qݥ'?Vn#9BqMwׯñ}6jzCwnw~+OzKn=ISu;>4/=nzMtl.>7Ň}ժO.6zk塳~m:ur^[cꇿy^Kw7/}snxgǎK?=طO\4.]}/>C|\ɺt].铗~TW^:K?wnP\:9qqUOrKNǥ:kƕ/ť~頻}Kt7tԷ'/~InyrpЧ$So^cg<ݥ71.Zy>թS9|=}^ݹ}_>.=C$\1W_:y?ǥ?=}nKO|5ݹ.kƳJ˸C8t0'?}^BeʟٿKg'G6=yCgK:urK~p^NX譮owspK?uk<5.zFrNxzKO:sѥWH~ʩ|t$uĥwrS6^O]͘|q?ǥ>uKn/ӡS1ts]\:үyxq#ߞt꿩3ԥSKUgs_NJCĥy]C6=׺1.]j^NC>+_wަoˁn~.~3K'_NDә.:uǥy'9"^o~]:mt>O^yW9\.=yb0U系t^^yԙ3O$?|rww9r9~訏SNݺ'/ tyyx?.Wǝ:qt:tO=KK\\N7W>ЙqKߏKO>ҹKc7kO^zK':.>tKե:|tmwt%/{qr>{ߢ[=:{s:qq&<>j~98ԕ9wynsҗqIƥN~9n}κk{r/]ema/G\:s10ޟǺC_C?n]ظt/>qMwξR]ϙҧK/OXg;GKO]i~wһݥKIҧN>MW~K9Rқևnn|uo:?$_tt9qNS.=\ϙ_Ƙcǥ'N;.:]5_:lzfԩ ƥ[~/Lu?XQu&J}w;E~~~}WgNNԭ׭[݄.]瑼8z!YYlOt:n8~swFOżs^ p~Ł_coNǸSS.>g]tRn^g\tR\}pցĥ^\z7=9ξk׍ߩ?].r'=uY/ܻKMמ}۱ݺԙ7wgu.gt%O9\ys&F~7޽]KoCg!߻g鿩d[>x\U.}g?7/}>r釿z!yO-:\z.=utݙ.0S'ܬI~:n.:Ӟ䍯nХljҽn.ȭu#9бғ~ҙLcs^'/=__tqSK+C/%OgRCt0uχ|aq_.ϣ.?ETr鯯zWN?:KiqOFny{_SmU+0yͼ9˛}_N1ߏGgNߗ1qGyһ .!]z??|Xu̸˥ۧ3.]ǤKOׯOKW.ςKo_?!/|~mzz}\.XW:H\ArKv.]zsu:cg'yC7/}0f?nҫ:aoۧmѼtOw\u8_]xs?u}=乞;S(.OEY^:tWa!n\}:Vnݺ~:|Swå{ť_ǥ_:tO.]z^wa?eK{[z.?'?77ts>.pݼd\{^Μ]JnU}t|\z3OQny y~qrusc/zA\3n~zɟnί_~u&r齯g՝9uƹۯ=\Еt<L~Y\zM}Kmu2?]os;΄WUXHv`380ىq`v۟ެJT_mbZȥWp3/knr&~KK_ҫ:טҭۚ\uҫcݗ)gNȥyty+otrM}gKz֥K.9z\?.=rɥyȥK\zU_Ru}rp&џꥯW?=pwQ6vʡ;=؃M_?\WK߽z驯>O_.}=W`9zzKS."_*Ǯ>S\ߍ{ɐ;缫Kꥻ?KFp;n`8Aѿnɫ6\Gr鮇ҳ.zuK7>^zԵM.|S/=꥓WxpUH[ns~ɝORsz0{=zU\= :u_}K7Wr5l"N>#xr#%qrUS/}ʙ~_.C.]!ntߗzw9N8{t ʥ[gR.\}^˕Ao]5һn.)>/~Kn}Ksrٯ֩chsKw^?_.ׄN.3L&N>IS'?9qpKroS;һܺKOnU.x|M׬K[::BE\.]NpSWmʭ迈ɥWIrqПs?!n]C/}asZu#*6u_כ3RwC0\zqSN}yj/pW^OftZЩc\Mr֡K['_:=^:ҿ9˥~_!{Y}L.^.{s*.7`SɝS*+u9Yr#Kp% rkXȥ[P.ݺ rP/]*n.=gpoҳ.Mҫ:V؏*wZr\kԳG/7t\uK \|mλڇKo>i^|)E~O]~~?8t&߾= |M8׼/~ǡ^rM.d>Lӻ2_nQ:̷ףȑ_'*䰛u"sK䲛59wt å'zyD~[>uy=uKgޑ89sc&.ݺ;5n'Ь\2_3sϛ9$]!!B_U.~˭?1?Ή~:zC|ҩ&n]+WwC.ʱاM.+-rԱ'ܥ\ySn|ͦzrqs>墻ɥ_S%S1>Q%tH҇N_3gZЩ >'qȱO2̯xS=q^\#G?+'>[gQXƇS+[ဓ;K+N.=9жΆ+8ǒ:BpɥOۉǍpɥߗz%k=6\gEkp;Kr|f;a8om_/!SGˡklG/7wI.9+G&Oyɥwꥣo?>ɥsz釾pLB[9tλܺpȝ߱~|ͬ@fS/ֵ;t$S.ݺ\zor驿\zzupyԡK.].=9ҋ˥{=ҭs~߬ҫzkr盼!z_'n.e:B?ɥW9s$gN]_b;a}oxM*N|xɡ>|#^wԛƇɥL.ÃKGo=KNѫ^..ްS.{?pꭷ7_åח;K?K.K.zKO=򝯹ŇK_gfgoʥJ.ݺBpY'efԝQ/#ON_r/-uK9%΁:~|߿j;_DBLa_׿wC {O}ɹr;=-=A!;}G/{MG~qSҋȹvN/\gIK+rcm{S?ɹ!W_0ȹoMvp;n~wp;n\_ϯ߇Kwusop~?? ~/=t-}wB-Cp;S{krWx ν~yNc[68jW;nǿo>]pN=|V広s/M}?;nӁڙwtGl_1x3a;s[}ufg:;nڊb`߂ }j;[py <[/?b+3_[p?n~M28֯xso]#{S}7/gMwO_s߿U98|ַ-<7nqa=պG}aq=㱪;/8wȹYi1_sLqlvt?sۿe J}˱_ }ώ8 ;9W_W_Ko_1Yw!8na}_}cozz}*8=vkrw\z Z?tsܷ¿~|G=!_z:=kpa?wE|듁ϊqp{=?c>M_nŽ)/ܔK}8 -Wb> };6WW0؋-q룞'>z%~57牽b}_ɳs߿g`߲}$up{[W{px\鏜 ?[W/sz[~$8 |b>kr#kFp?ʭ7#[{g0νp }}L7޿w|#8w~߃S¿w߂o{g3son#{_#38z*swr%Wc'C }k_ۇso#[rO?¿g Ikǿscv<ʭ_1һW{nxlrOWo?9<1-[`|&  >sg~ 898=tCUn2 x[/1gؿ&~5O:8>O7C~Y#nwx=\s ]b~Op?1s~!~u ,;#g<}ko1ރs:78r=<3r5ӿvpހs#{^]OoU~2U}b/o|kW~|V_x{9wpτszIE.w@}xCxvr5[g/8mw_sKkrxNuă_ùg9~{"¿뽞{oG| }ޱ 7vxcoyvk/s%c]xɹo}cO,*^/3Ϫ\-:xfgq{WrćWgog_ps3&=ד!^vshs~ =px]__~_;8ù ~ß-8wފ\DpLs߇3AԻ =pK"tϻ";8w|相'ƣu_s4;h8w~ νP矅c.[y\Mp{==♷zksyO{쿊zna_1Mؿ6KZm]ć\'8;8w={+νs%tkrkׇ38=s6Wbp_G}?/{M0uzWJ^6>,u;^s~>snB\s"^s_9܏__sϖߊ̺,TpWwɱWp= x\r5vkr?qUG%8?5kj_/:~/87^Or{ù91}vk_ܿʽOOR}WpuyOs0.|&nn~.9'lKogb_~_v_O<se_~]®o>>Epyǿz~"ypsUxvu 7]ÿ{kWk'GEn:8w~q>XD>SG 6yQ}ok'wȽsޕ zɭ񾞜 }羷qxamp{vpF~}߉aO+y<~{YɩO=ﺰrpXosc{`?r*^$_{/¹y*^OL:wp3t?^ȱkZgdo؋BuI|W_\_;:0z8}{rizU֙yk?hXL59WȭW淆c~^?K M牋|M5p|{nUn;蕓0grwės}̇xUKn<8>lk&> oӿGf_!gw6_'ʽM|[c> }xܽ~/2S/\u_S|3$7o^#_ g| =/pOs덉M|ʱE<5ù{*'y{/3] {zۿq^O/lk=8܏7pkpR3oBO~|I3 =ss߿b3't~hpO 8kb[/gf3yGfmriXM-W]oZrǹDZ~x3e'<=c|n5\>ņs냆ϳMn<_nb;$_\sb7MrO׍=cGf^=K }W'#٬\/,IR?xc[wK{qwGZuɽ߲|g_9{?VUG_jW淊M#~|~wӿQG!_wc{Z/,xx.o%O+nI.< Ω>#\7wɝ_N\+t98u6پbރK/3q&G\c't2ȥW/tKppƙKOs}taK.}\zr2O9C.]rpɵqN9or>ߗKOv9ϻyå-w;D_<>,lԟ') ѓq%"N>'~ɡw6yS}ye;g>KnGnχ8gl>s_̋r< 7ȡuE2/8\zr;;Ƀ-rWm =&}Dҗ6W4cJ.'ho_[K/r8K7R.=ktɥSO+KS.}>K}uG}G_.yҳ?gȭrwO.ۇKoe^S'y: _EaG$WMhǿqN;녍'g\zrSM3/{.\uv˒KO${ɥ_u\zG^.._9gCp΄׬rɱ#/ҋz0|9뻙q׫~M|ѣI.ݺp}Ht=ҫzrN.}/.NN+z:v`'9̯w9t꾤9"uq!\qk^.Y_͓K$t+t~K7O+yrգK7\zsϞq[_Krռ~?qNwtɥ|9Kr: z˥g\8zENɃ]rꍺ^7xM.2jx?ᲓK_.zr&'^Kr9uo.y*r8gr]m:K/'^8gW9tЉs#\n#VݼȃKn\gc<.z.}a?GǸsȋKn\zOKϼ;tw҇ɥSWa{wr˥Gyrr'rm˥\˽KN}I,lpzp>KT.:p<҇tɼٟrɉ'K?/$\utKOyӁ#o#6mtnrMNrUkrWq^:\=. [)ux -r- #O-# '^ԯ-3q\DKO<ҳ.ҿsi{K.=?<ɕ_˥:5\u6Sκ/ZN.xLrK?rᚋpGKw!^C:Vz{ND..& ntҗ?\:uKO.μ?gk#n]M]Ks|^\|pouЍɝ_cK/K_tEҳN \zUoM.^|rEΜx\zrqrة^\ ̫KK>XOW ksKO=;'^z_kå[wA.]A.].K.=*7r}ԉLΜ<~OK_%W>yɥOE=t8sx3uc~K\z`S缫ɵE~X99[cgٿ|[.{f6:ɥ_rpS.=2Ҧ8\7\r˥wr]kr7K/9%~sp?[Ν,9rrsrp\u]/ɥw^}_\Cǿ}-0{='+9t$~SU.=0\uҭ^cd<K~0>\>\r3+r|K˩?ҧ6:?ktrҝ/һzW:v?Kw='nEtɥ[W.m^κrցK\zrEZS[w׬ .n^79Nӛlo'7,^/͋K.:p?'r1ȥ_\G{rS'Dʕߑg^o\ꭏӟʥxYasޕ}~fʩ3gzɭ_rꜿ|r/-g.n:r䓠}Y}:s"^ԃ渿"grkG9=og~Sr}\m;WjݗX_ʥ~ߵc\zw[jSåK>׉M<._v'`O.=9E|d>Iv<~x\3'ߓ\z;ni_Hn~3\zy.`_ȥ?..(n&t|?^wk69uåW_}gt.ݼv/˥[dpJ>S.c57pYwyלrćS_B~y3̉w+7C _c~{X¥O қuᢍP`yλS0=4ȭ}kq˥/ rú|%N^%NgYyK\z?ҭ+yɥˡ.\z5+~pW.|K/.z%uʭ[ׄ'_Ӻ r\zå':Bɭ͝o .=\ rϑK/ǎAZɕ__Ua^759sdݗ-n;M7{n@k"_3¥^+\x/9ﺵG/+ə3ݓK2_3uS_)g&z߷58 zvr}.gN~\{z9rM 9s kF4qC|͉xsE+k._׿\:8ɥOutrU`*g\:ixrU.=9/t9E$^S}~9gҿrEۺ.}YG`Co}9åɥ Vr7t-gM=;N!8®/[:^#g΁޿W_\zUׇK'(҇zkWrQ@.y#\>Wg^:rGmϻ:6p#ϻ!Co_pSN~:K\:VrćKȡH.Nx:u.W1\:\~a}F:B]/M\*^gåw9k.Ƈ+_KK_Gf\:u.}ȡr!wb=x|=?uk&kSn~Ӄ5폺. 'O.=ߒiqɩCd;篩>I>\端.L*~!2+~_kv9tuqpp霷&ȍGf-p|ZݰٿK:x/~ɡ_'|a|>KQ_;5 \zU߼6'Ntrߛ1ҋukK&\eɥ[H.SgYҋ;5G.9vK\zҭ{,rraKor: _.}srUr?jUMOg>K^2hW؉K__ֱzЉw~C|Mt^K3_3~\:ҭrKb҉$Krޙy|6#0%WNfӏ|Zp_'}3)Kp&NYr{~\z}?}pIȥ?r םq҇07֭iK.͕.n=C/ɵ=.zɥ>x5\x9#‘ԏЮǾ,tK8y9>\zW\\eFN}sDf|C':t"rSKrևK_/qN9W/}|zպ襫*o'Ή\/Ϋ r%^zʥW/1ҧrSG6lԳL.]8'r9SG!t੗n]W/λn n .^zpuy9zKN9ɥ~}rUN?KSH._] ~/cGn.97$ǼK7\"\K.]ۇKg'˥9u9\:qG;g}~d\9"ݷyɥ߻=Gl#KrEnw]NÞ]w$Ao&7=[sʕ\zΧrA7aهq}+>Էə\{zU<ҋ.cSOn|X KeG'/"Տg _+v?i>^O 'q^oə|fp/_.=֎KG:"gNKN}Xꩫ!ι;/GXrAoS8^9lrrr\z~5.P.=99k;vt_K~*>K,}r9y^n)ҝr#္a}0lkpާyKU/=rVɋrbSg#{totarmt^EtNj\-|g^ĆK=&zɥ76qΘҋ6qqNFɥ^|1޿K?\#o6u_' s3M[ĉ}K\:q҉^!'w8_s?wә~}G?tiݗxҭ2̉s>u2 woW.<+o9tM^ƿAE\yr橽zw\z.G\\\yrA4O.8ÍgMf^W/]K'Ω9s[gm~SKKsKrr֑xxҧ;zw^*6G/ݼһsp鯾|v˩ '\zknKK7W.}:[N=ױ>It9Jt{å_# y%үKNrrYrr1rǽ\zS?%N|@Uƿ58?wp? g:&~ėK?ЯT9R1Cn)Ϭ[ڱp_y1{p̟pK.]$S?DO;tK/}zn-gԱ'jr9^ot\҇zEnȩ?GfҭC^-_8cg~utՏKW?N.5`KCx硗w7>ܴgzɝ'r..#z%&ns< S]=Õmj^\u^{=\z=>zʥ*,3*ʡ.~Dr)ʥ[Htp&^c|QN.=9G KOb^DSpgCUW/ntzv0/9s:yY:u"z8ryr嬣 xAѿ~裗8ρU\p$å_}ҭ&n]AtVȥ'9y/l0Vw}& ɥˡʥWO:χKoe'\N.˭.Nr9ɥ;_ɥxZʯӟʥ~x3xp鵪^t3.\zpR/ī.=g\zҭC!^^x^g]59s%>FіK'^''ni+Kr霏&>7t0\z3'}:x?ҭC.x\uҍ7>KO;\˥?zs ׈ȥ6\r篶3׿j_ɥV.] nO.ݺr]}yp\kg.^c,M|'99;/ɥG1tOtLkrw\=K&^r%yқg5XU/Xɥ˵\z\u^.=v߾sm37m38lrw5>5/n\:KS'$;pYH.ҧɥ'ǎ!Sξ4?\:u.ҶNɥiffS㱿:u/mq^9K\u^.>.KC/XלO;p#4s;GS ?\z)rʙ }}|r㽞Bt3^g;G=KorNf.$N&\;:B\-ގn3ЭMf6_SNjuK^:m&nXҝ?}?ҫ9}xқ:5Y7G.ݺr:O0_'n~oOKkM^?}kRW!5܇Krr=S// C^\s~5Љ_rȵzpsEN'I.]=\Cn.絩[ ]cl:}MΜpO\z9uI3\zUraѱ9zpuޣ.\cGr¥WV뫟>̓K}Ot^zׯ^z?pskʥ55=,81tާK䰓Kr Pr3spk&|_KoֱzQ.t9C[Jje>\rSߙ﫩zr^:7[x?һ|s;-n]p9tDKӅKܦ\zLǕK/׎}>\OQmkprU}״er驇>Skv޲cYKz+0K۾9HϗI.r'>z^z9u%y9t9~_ݍX*~pkvuꖢw^/\r7ppͺjN%>?/+Or9$O?oͧ^:_9rs\=\zupKW.]ғ+:hJ|:zSN:h.דOw{e9uI{K7>zC}KOstz=線|>S?3_3Gr W|Wk'_\zE#)_=k)Nf9bW/).3{ݺjp멗k7%yzCzNt>\zK\.80>#.tSz=y:å;һ/t^:ᩗ/Ao}wx9"?ttG?9GJ.]ng7CS^Y_?WE^XpWrsuK=G^\zUU.]n8g{^zp0r˭`{(>ləmy;ҧuS?.97o#}߼S/=3<\9fș7 n;ҭ czrpZ`O\r7:yrWXK%åg"^/9Gn\:uқ|99sm;'Oy=6Un}33bW }Q/sKrE3Ul`9\+J.]_.۽'9G|Щ^ww۩om7O'>&.ܹr#vo\z~:z]N=ҧ47˫ǟzp-9N\:NKw>Kå%&ιK|lȱ.ꥧ:Irpɥ9x✗q9sXwNsǞ\z+o߼}_tK/>כ\#8'sD/ɱ{߭׏^x9FM*W^zCÎ8`>o+L:å7qqNٿ½^z\:9o9jqΪ>իhS%rpd}'WN!qΪ\ܦzy榜ΚM^X7oK]=ҫ˥'W~GՓK?5?LpM.]KO=uoM^䅽z錟K2C܆\zM\:zԗzsə[S=ׯ^~sDsȫKWoU.]nA.SGsD8n)t+/~'ЭKk_ol8KBf>w3˥aGO.{b?\W>:S'lG?bə_1i>z8p5å'*.W3ަ6ß&NDrD/羬oyrqn\[KWO[2|.=FN/E\96p9~kW.]N.=ҫ%S?R.ʉL=MwKM|.=3oW.]Ld/E˥?GO9uKM.S'}9bpet2ϺٿR7aZg#?\zQ.qryp\#|˙_CgW ι`rP/]KsLϾ͋zr[D.ݺ)rr¥[1W/=m|&.nQ.^cW/cدI8v9ɥ~K_n7o9Et9dtK_'yrrr iy "br關9y5wM<:3r?^.&/"?p?T/}u_WK~O.ݺrU}Ftɥ;Cֿ/n䐓KOΜK?zg{=u_v0^?\uRҝݟ˥;ȥȥKҍ'K>\:i^zꝓ .= ʥ ^/z+@.uo9P:Or驷zS&\ɥȥCqyu6s &qL..c.}InkC"tK/>\\uZ^otXқwӿAwϻn9r_'nMt r~^.}91kt8ʥ7KG}8‡a|MҖҋW˥꣣\rKor>2>ȵ{%~^]}}\-wrEn9sz_rֱCgz˭{~KsKw.n t/뫗^"<"Nåf!Lb,N\zor䟤^:-r૗Щ5t𧻓Nܿ>؜W>?.=z76$?KGo)ҋaҋ$rROJu߾R9A.!>;SG~rzѻ|MϛKҍ¥g[tШ}_rK;kһv3>yӿ|). ^ҳ|f{?8ҭ%zyə/c}^)?r59뷕:B].=g+\ΗK\חKKwɩkuys8x5=K竗~ˡE>ҫܺ\9\z_뛯)uk{9vt6ay<ut[\zf>C}esDs_ɥ].}M ~_?K7L.=0SW.\zp?WU'y~N> %y=:B.xEr)ʥON:5K.}kȥ?{g楽rݿU S.=W/ϫ#uKé.-}Cɡ^?"^\4r鵾.=W/hkrWoȥ7WsɥO??KM#n|Xc\:]ɥ/#_|C/=Rt9Z;Kpr^:6H.|?w^|ͨ^uo׼+v'_].ؓKy^:׿6uu&N.^rʥ!ʥO,~M.ݺ2N +.}^:3s<ʩ\zw|,n] I.i9ؙn?O/|'N[n/-ʥ[B>-\kʩ[w3wpSμ>\T~ߗ:B_.K.|Ko=~)t$ntK~_3 Λ|ҋug_n>Ko?QY;q3_I"E7ìc7: H>9s0|Xiu\rwG'_ZgaY'2دʥ^M.|&N[>Ǜ\z^.cC Ω[M.{~tg0 loτ[Ƈw7'_׫uɕ_!~yM۩RlǿRw._:\zpr$>G/\z4ux~oꥷKϺpz'\d-g^|'KK3Kғ[/y=רn]o*/n# .O.}ʽkr7U9u+zWu_] S˩_H<&Ko׼?\zUYֺ˽>M׿_Ͽ~ ~=C  O}ɹ׶swאkF ;u97װ~ajr;n>87}zr׿{ʽ׹۫\ wېS9_r{ ]@-·}cwtU{DV_O!^sq= Oywts__r-D7'^ʶ;n\sooX?<~>b|-ܟ/rw\}ߏ9Op;n_s٩cn+ձk{G.~E`supx >㱾x7[W~M6刺vtgr{>s>w=u|sss߿#8=kGp~`;W-~c> x!>>[{3=?? w}pݱ淃se\㦵9_ߟsx_/gwt].Y]ÿ/uUsuE.xr-^M_z=T% ν6 ~պ#G}|u4s٩,vpoXWؗ&K1 ~yp{q|Vg__7 \Ϭ|W=V,8MznX 7¿r 7;_8;c>s{m+3Fc~W_[g~}o'o>t?r'}|6_y`)_w>G^\pKps~/_k =׃p[GOz 7qZ ν5U48}?rWߨ c<_rXrsO굯\Ǯ1 r#E}1ɭ]̼s^Ӯ1ѿS:@._z~ߥ-rOx?kg g||˾¿G_k1 x\~?#+8+_Sr%羟o=/4uk7 8pٿ/8sO o'@e<} c˽k osKŮ;¿~EFO}rX~;ÿV9Sp;"zSn:8{> }s߿w瓃s@9Frÿ@龾ao*}_ 2p+_Rsa< x_"!hr7 ru԰"|ɵy. 1#[{Ew1~sr+j^kp{>{3c>So+8Fwh8=_l1竊EϻȽop-{Upx};axd$8W.8/U3rYE݀Cv< ~yoor=粝}^_LN:8?rհpڗ^sȹϿίϪռ{Ky\Ͽs( xƿv9v-Wp5+p{+;o7.XEp{>;8ùc ppb&io1c! }߯}nùo>gψwU9uggrW?e|e<}.pWgUr_o9yl89}? Ņ~* _+ߜN;x+ua/Tw_!8w^;W܃]Ono_[W9[p \vZX/g˩?_OsrO!?;!m6!lI'y}_7v?\Nh7&w˽Op]8=w3> s?ExsߟLN(8k>89¹_>}~|+ _:ƿ¿}g'6|8r-Ð}Mxx.Bp}{o{ƋJpyof|t ܷ+lp{>'?#]p;yp_8sg;W }l __ks5gėпc>[_sG3*~+1cgڽ^rwOg{m2c۟{U_~opo7}p{#=3 oL5o$_|<'ƣIxX3ɵs|I87~&: Sޟ'O_\+Iz:g&gk[Ю[UN~*kpo|e_wλ3_85{ˎ|n3=K"_3L׬rk^ӿ=Oqa?ϛ p |MD? ׷羟nUl6W|k׍=}-~~c|Į,lλٕUnaI^8>ulI6[n’kǿIࢋM|v5> םOn/58~!9v_6C=s3=ܷ?>?Ά]M]kʡ 8ù=#{}?_9x {ݯ?sx%{w23Wp ^b~G=~}s9|Mq~p9ùg>_??|u8mOWsQ8\¹q~8'}k\I\;S)~=y8\Og|}WsgzP~=kW:{?O|8k'_˵3=r#z+I<%8=ٿmGfss8_yC?qz8?|z39uҿlǿ/^'n[ۉÅI]L8os|g8>>8؋{דs~6a;W>?¿ZǤy~|M8Oakf+%~a{ùg<}6$bwhk6$rg1'_I888/s-o8?j5IOrù1{#p{ȱ?ݛM17^~z+_'/ Kcå?ѯɥu9;:?"nw˝Qkɩͅ~^8e;y3;!OD.WaCܼ]y0tɥEwt9Jt/n+t/m.=կ/'ΙuK'tyҋ.]rtҍk$>$t90t9 ^9իK%.%>̉sN#鹢\z΃\zU.V?=✝?N.S,^vrvms y_.=9q✷3q|/'Ι5qp}zGO.}h' .g(RoݼÆNy <ҭ N^kr鏿oS̉s.|#˭#$wp:1?$^sD܇ɡ7qNIK/~8u_r .}\z'o0tKK\Ძy:å<қuB҉'>է8ɥWƷ\z39YE.}s~>0t93Yԉ+/Oȥ׷n.Fe;q_.] Ø/3O.]R.ݺ1pm8ޖ59tl^垉sr\zr9KW.,]'~˩Otr>O/+\7\z\T͝{=O.}`[c/89Tҋp7?yɥz#pɹʥO}җ#9\:p'>'/"ۺ3/lܺ:qμ~F$~ۏ8g?8vցK^yr8!_3Ι:uȱr}'rLκ\znROgow'[?^ErM8rya~npYw8gSƿL.ݺ"rS{+;ҭ { .~Nz9um>\u\8gmZdrwuS'i\z'/mp9+29உs \\qywɥ7~e=seLrW9CKCgNw?\u{;1tɥo 폌sppu6Rn=~!_.}}t_.k3\6\z٨rsh'YcKYɥKoQɥw#գK\9qrpsy✑GO.f|ɥ'\z}ʥS;tGt5K7/\.=s.>\,/>?\\PZW }s>rr\zOr;_R~y¥76טSW-_Wrkr˥OчN=YW-ڇx+cK\ylɥ88m_Xrb;YpryKˑK˥7mλb}$BrMK #>"\\\zCN.=G.]NL..國uO.=柗K_^3Hrݿ\zCKgK3'l,r.I=uS7\zn]|ݽ֞|>J2;1 X t^qߣ吝`-=۳b5] kƥwxutϟե7 LKo. \vO}9]vq9tUw~}rt/ׯş|6>..}ҡN5os|qxǟ.>Pt߯s$?/]:j_5y8[Kϼf_p5.fqoK: gCg`/xkҥ/߯i<.=럜_u5UϬg?[<<ӥԓ~.OǙ;~Wv KwVv~M3]sǥ;?ݟ.8}_ݜzt:H]}\tcK|y\ǥ{Bޖ/..n}st]zn{}9]z:ӡίE]}tW+ǺtWҭI;ĥe_uK_]vӇ`Y.=~O]zμ~nҙdy/:q8us>N]=]zOt鏿?spQߺ|U%]Ǐ..}갳^k'Yv;^uz~el!0yþ҇n;5}}>Wi8ҭ/LKқ}u9¥fL߷'5c.=tӥ.ǿ]Μ]߳]z_f~3?.tک,:l5'-uk_O.=v3¥w׺t ug\zW'oؾ1ksu|עC'w LS~ǥ.8ftׯ8׌]zx;MO= :>>rOa^X>x׻ [+cl髖Ǒ}ه8~`^Ǒ_Lt:I]c\a3stk>s\:.XOuO;3Efzԥ7yÓ.}"uaz˾Kw}.nj5o]w7ן.=\.|R]TpO0z=.=.=]c.Zӡ$`qқ~9ncҥq56~KwKSgPיs@uӥ?ҥgBn9Bُ;׳I NIեwk\yt$Y8]:;gz|=_1Kºt.={_}vq|u7\֏Kz1]Mux֥gt?Ǿ/0ubkK7Z>=ҥj>̻r36O.qs8[zcwWӥ״6]z5_|k&;59$yɺtr ҥ;g~-:vט/K]z|^3Df|KW/כ8r\n[]zҫڸ~ӥ?;>kf^c._.Y^^ҽ(#u1}Kw]֝gCnzMzxY5sNpѥ{=Kw~Ƹ]5.}{avkR.=ZgP9]z]۝!pK:p}5u7ʩ׼|?ҥӇ`s0|_u9.å'^O.}-y=+Dkv3LNtKKKWcK%]z.tQ¥ҥ{ץ/_O1w72G֝1}3q~.=:Mg@n]z7xҥ>~.%.}o}Ywu+O~:,ܠ.שKys.Nc[}NuL .yYy=K/gϼu\=;/=u|uyݹωSf3oՙKw_7u?ۥ?:zIcť[ե 9D>ߗ.=>yۥ떯KonNR]}|>.Wq|җ}2/=}NGy"˿K: Ko q>ҥy8}ԥg}X;.{G{]]YK??ؗqamt\>}!t~u:.ȳ&=9q8gsv:.=/tKyҽK~o^O0q"?:5vқuŘ}ԥ]zt|\z>g]ۙ{f0u^>åw,tҝ31þ|\zs_ϺһvMw9];Os+/[;]zեW^]z/9k>.=jo>o\יף$]:{u>?f>y3_X нXuե#ϼsӥO]nҩ=һO7.ݾK鿙[SO}ĩ+|緜_|Ko` ]:;ɗ' }N.uҽ8\dL?t3G_ǺUn>Xttꞎ۱:;3ǾSߓy鼞:v^S7/t9_Cpt9y+t\uC'_åW 'I>S7u%[gz\}tz̩{oC_ǾjK>tQ.Yg7ϵwH^4:]usWsf>[K's>;QwK}qmu;ӥWrsj>w拎tUlj]>һysZҗq_l0ұW-{+/YNMu:][ۥGv>gqrL>n]vqnN_\z1O}NbpHnϕC~UKn~yȌFw߼s>:uo6G}eޜ>Ҏ|]NAn.]ׯK7Bts1]/ӥ9}_p˹?.ݺҋc`q񙗞NpL:Sgnu1.ҩS1y[7`Kax1}#/=ݹmtO?]/cGtq}ҥ>bSwX|>u|~;1u۾:v<:tIӝ^Sq13O KyһnQgs~ӥKΩ~t-v=޿|.}nna<>2o2.g]uztϿG>usǥ83̾/>ҝ_֥ts:tut>'Xߓ.;c\ϣLǷ.}QKoy1?ӥ?:;\c\>`ҝtu0/ݾ'^+եK7Lӥg^|.qcs.}Bygd.=ݒy 5/lN]ҙGtOt^o߯~s>֥߾ks+ut݂.o>OGK//~}\N}y&|8q:f?~[wd^߼tuK'b]n.sKN.]}Ctaһιԩ~Y7֥_ۥ_>~}5.|KglŸuK_Ϻt陿>\ U;u8K'=|p:6\>.}1Ù~}]z7>.7.~ӥE^zчK֭ҽ?.=eȘq鿧݌p]ۥ?:_җnp9.κ?]bҝtC]\n.)WסS_ѥƥ}8tñ}_Uo^Lk<X_qٙ|.}ʯ|r:]M^u<1/݇K2K/%{.q8Μ5gtoIUN]N̗|0v~u<1'!_v:f?nﳏ.\z-.]p8j=Iot+֥{֥^֥ۇLK/.=W^o7oqǝ.uЩ;t-\X^upLC'p9g^:;ykts>u絝KO.. {1ut]nu.x]Ǘ.>Gn9sgCw^ҩ*_t?t¥[g.]ǨK.ޫq1ug;tׯ>nk3/qץ{˟4IsKf^gۥg:}_Kwg^yǿgX]/C~.'?]];}6:w߿]?/W>e>5|ե{?ü߃KwONAá3?\zi7Uǝ.}\uUs_]z[_oܟӥ^^3Kt8^Ok~.p~e_KYGfUХ;3/<5ө3]ǘzqΏOk];k棻?<֯tKߗy̭ 0f~ea]y1f:uj[COOoze?{o...o]z|>}՚nzK%g^z/Wkf>:}6S[G^rW:^OǮ֓o^u1GX>9Byof\G^C/~.}ln^bʯx3}<q?ץKtt\ۨK?]ҽҧcl<:t֯^]y1x{pB\u~\ XuW9:n|<$q{KOpә[9~cſyUN+nڷt˱^M^wsK13:ks5o\u[եw]|7k~?I5\S~yҗ3 һɗO^tݎzt75/]7K߼vkǥ_tøtqK^tg~Mt;GHu}7.tJt}]>͓O|^u>Οzͩv'֥p㨻Kxѥ?GfY[oqr}$yU9KK9]3/ѩ3Ghfg.<[]zzMkuAkb߆7OS~>8KE!oqC|{>+:yQ˼t].>}3G(Kv\O~mt%spMI99Bm!.܆å?:trmҫ.gΘ}YoJ#a]5nߎ.@u߸~ҹ?K;1t;һ}#p];+'y`][>}2G!ǥϮKϼltc~Хwpy;֧utzCCT糮c^3lsWȩ״..pDZSyЙ_c>¥ݏطKסS?]/ϯXv]M߾¸G?쫆{'/Kst9}tCtڷ8>tץׇ>M3.OO{tХ{1Ez_tO_5]9ҋzϥ?a~?r\{]p OutuS]t҇`1yVMKL.~$~گѩ:K/:z9?7g0uq|ҥ?p{w#f8:p|`8ǥn\:u4oեq߼[ťoKwp8G77u錏K>WYqs|˺æS7)\sO5KKuU'q;9 yDq>l~;&;ɭ[~ӯ9>Koy>Sw3f+n>ofN7牡+f: xԩ{>.]үӥ[g?r_3}tיkIy鎙'p҇n}$]O~}8ۥwag#}Nޯ1K_۩x?hucaKב.ǩ˾?LWN]?}uX;]n$]4}ι:0ëuH]fЩs1];k39o]9y8wՅ3/]שKx5/1}8KϼuoK֥S.zc.}tcK:Kx8~w.=ٗNv|>uXӡqK:R/0 Zn8G>Osⴋ+s::֡Ν|8s}\z1}3t:|KgQ/?oaۥYeҋZ3f:uԝ77o]tC73.q~x>:C]z_ۥaSw:,ۥu:r`q>Vq;ӥҥ_nxqA1/y'K.aY3~Х;ϼt竕7y}ydf}]z9pQNv˼qsvNg8c\3KowΜGN]ϙy.p>Cg~ŹK.}l.}z|O._Rh<9\y"sK;]:ҫ/kvgǥ['s>t>qu;uYҭ4/ts6u7ҧnq_W1uMw\>s1qtE_sǥwky陯>'ytͼ]z擳|y:W\zoەqױuyһy!n .wK?=Dq}1_vzW:8z9/Ptg`7o:tC]utO]kKg?*]4wߌ}ΦSg?.t~^{;+懢Sg3zv^zsL6T]z3O;o>:s3Vݺu1鿉fSoN߬әs\7/]KGtKKt}wG~nyNqSg~|{;ls֯t:;]: ԩ[٠r~m>nߗpSNΥS|F^m\8amq_Y⨫׻]ltwoCKY'K.|D]ǺtGK_qsq<~å#0_ܼXgcg׫G^EKѥKWut?;/q/:֡SuhSg\zM{eacc\ҥgD>t޷]:vKǕK]DRn.K]:ҥ}m^u̯pKw}Kw}KnKu_/:p;/S~uau@ B:NN'Kն~\ѝ\N!߰OB8.=:n.ɛ?}:x}u|K=?v_ХFpMK.]KO|?\yܣ+wfqN26IǾ‘tuøv\e^v1_֭sKoK?.K]^5]9u7s8}"u̯қOϾjcǥ';]ݝNݺu\zt_n~YKϼiȸեuyO7I]؏?S}~ǥ'N~.I]z≯t陇ޏ:']zaC'|w<ֵ];}x=+'ϕo?oO^z W>i}S|?ty^.cK:6|tpdۇ˼tB~.W~\ѕ1KN7]}qݾJtһ.$ίtOtb^:uˠK׭oyͺ~:N:}_n}D+ycvq?\z7tNj.d]K>O~wz8}_ǥKg<:Oo)xy\י.>}tYӥOx|ҥߎ'i<ǥK][K_]ϧ\z|kN9S?掼v̯UnǥϝJvKқgץw%Ͻy+9_եu&:i^ϺtׇWbԕ_LSُ֥.\zťwꠏG>T~.mÉ?ίqKpѕNJ<Z:t kzԥW.2.=/צSpKǁsҥ{}KO8/_OpL}_b~.}~\KwZzn3v탱җΜ/CgN\su9 n;]:ok}t֥Wpcҝ?\u̯MNߗ]z qQ=ҋOӥ7Ǻ99+ƭcoi棛CGqQ-ҋg^:y%]#t̯8 ]C W\܏{|>ُKW~}ҥuһ?̿\/[^}||WӅ빲^8ԥ4 ǣK7?oʩ\KtLpӜtz1WLꘜGWzd΁nzYӝswùYk3|j9ᠶKO>s}gН#D^m_K3/.zH]#/}Ω5gҩ!p]Mw^c3?]zn]#k^t:<]zϟᾖ3GptӼ'1KnN}y8ߕ.}8zGwh>YK~Btӥ͟'ѭ~m:ulw sQ9תc7/}1е〻>Wt[zDZy>+Qy+K_z\h:tI}ҥЩ'>QCw g{^tm硗~4/1;s)ydP|?w^ҡӇ߽nYNp+Mzcr}\?{\;K7Dn߽KO}tgy>.:]K<f5_;/=ﳏ./wҟ?\zѡ~R&G"];G^:n~9SI}Q˼tͳ^s~\YKϿff:v\:ǥoKsz~Z^sYO Kԥߺt3o}W ǟ.=2o?׏K2/=/4>>q:sߙ>uN}tۤKw.Y>:K)]zgtrxҥ7@nv1u^Ol^t-?ǥK^tg^덝~.O]z1h#4u5zGe<^֩9ޟt陟N=IutGO~_gߎ~~oHwo?w??_G ^q?vԹQ;{Y~Mhi7~peqC/MK75{yùν#_y7n|K zpg :!7}wv8ߘW~S i+wy e\۵ߘv82} }2ӡ`{M[?!xM~=[~w:*[g1_{=NϻoùZ¹oo{nW^Խg_{}so?k?{8q{-t=޿x;^W|o?:_Ɇs/|pM+:6r\G{p-s}~}|w}\~ νw>'{>{}9~g~z>!{>msŸt<9^N{>νU; \8?[?W|_Ésp17ԯ+mэ8sq~ ^F:w=L-/>'Wskͯ>@3~^GYщ_1Not#޿+}J_}sӹx+ӵטͯ}|_b~5/8{~խ?k1Ͻ: ;;c÷_ƹ];kսr|˸Wq<:s3_=^4| w~u~ }җ~>Rxc> ^F/U=}ϷW~g̾;oq{K\s_d\^F3Y37}ߟp0~:v~3N]+>p ^/{<:|>ӱym^9=wϘ[2?_}7z#p<}{p 羿o/{squϢSz,;[|?4c~^/O̯ {|W=g?b|ͫ_1^+gϊu3C8xӽט/ߌsb%⿯_^[\oׯ85\D8ҭ?7ֳNj~W8燫~Az7_c~;t_uss/6hx82=G`ù0,{>:~_o?b3 ?*W2DZ糇s/pyùqs>yb~Asq+?|f^L8!t¹/p;ʸ|#|Vt{3];=xN"{^o)s9{:{j|cwtK,ҹ)ߌgx?G-Nùsw+|FռpzGp|7pϛ;{>G;pysחq9|Vtz'Ƨs߿ùǿg{~{܏߱5·s^/{GpFztysú;g|w~ս|qѱ>ߌ[䓷_N~u`_]>8t̯#WùùVK8w~c7O`sO8/Ĺk?U]>{'b:˰|fd8zL?Wqe=cz~tϺn.'{:pN>cu׵p=Oǥ{#6Kؙ_8P8z ^_p/{}g8?~ѾVb.~\=׫8߿qqC^c\u5]tr~8Y}t#Gx+W`wK^{tmc 9G¹z ~.ƍ%{><;Kg\\/ :} kpù?_cZt5G蹾ǹq~}=>byuZ>yùaec2w {> <|>Wew)[S?Ooskj1ņ1+[v㼟sϟqKԭ_}|>֛KܖK羏p#.^s/> usM^~~|>b{=g~]s7u~n=OϟoX_sqYc:1~s7qts3ߟ? py?x8w?@)s|d>W|+7w~WӽXo>ֵs>[8wg[8sz^ōG};z$Ÿ?_>:sq=t97:E\črc:Ky8u8wҹ>[b={8w4? ϧK? ^,ƅ]{1>=W_$8p5v̯Y:x_z1fұw cuùŸt_[b+5'5ɯz͑N>5G:~}_qGOǿuڷ1xqc֯{^=xt>{pyG;p{Pi.-_9_szG'5<g?Xኣ^3rz'qwzMs/#׬Ox񅛦^6;5~>z!>8z<]sgXu׬'p{=}/Wke8}>[5];E>pkV?ǣ¹pP3ùg{pz=¹gݯs\c?}_sOY/^3+zM [_s?q?k8'ĹsxPk'^<^g{q~uk:~t_sz:tOL]:p5WsyŘv8¹y]߿ǹE_5K~=O8s=oxp~Wk6z=w.a^/=gqǿG&;8/qex.5?~=C8MfF~'=}ϳw=]pYs=לqP~~w|­gީ[n^ӼpŹg} =?ſ\^Q{cq~w]_=}~k櫳~w;Ï7>\Dȼi0} |{0c~:z5} _u7ݾQ|sM~s8ù?~?>/Wפ=q]p_u5G |p5yYY|Yxֱ3޺wƘ5KQu#޿g?n}_?spz#45s8:8}}=ةo"<5{טo¹Øzp ;"ڮԱ3=o]sO?s?^kzQI]o#oqK4] .}o \4~|Q'89ҳ>åk|>p/ts]z:\j]ĭ[3[wu Μ}N q9}=O7Х3ӥ]ptXҥ߾3]q扥S:J]׾xOKàK7S:e]tsr.o>>K%tt\K:tg.ѩ/}ήեcsY>o>"y|"bܭNQwS:"]ycXwXu ‘ҭ9\zו3Op|KnBޗ?.ub};$}Ko:uܧNN.=ԥ.6|ƥ7os/`ͭ3b]c8..1t:}tpqy|ҭץPY~ϙG>b8Z]e=2}쮛u KOut]zϟԥڹ+.}Fם+>R'[_:taSwKY~o:l|\7 9g~Kg|>ggӾW _Y;g~%<9K.]5xrrL\t+ңuQW/Yw|NߏjqK>}t\һ}tt]:u鿿b\҇Ow~%<]zӡ>tsr\}θХdtG8\z[{1fuT9.g?o.w>'.p8:,7ag}.}zK\H.qW9n}Z]z_K.>f>wkgsBvs>'u`K|$uc;uu:^/ cW><]z_Kusz~ץ_۵sfʩ?w]z|Ks}qͼǾjg3l۩~ՕԥۗKx<9쳁_Μ}طt׋қn}NvQq>S'KKҥK7_L~sv}D9uW:]Qu?uMϩsѥ3.<眗y>.=tο{q髖NkeL/9OsåХեs_/]uWsFݖ.1O~Z{ѥgK%`>2FwKә~v\zqL]ĥCǙD].:6]z|vtoǥ*]:xtӥ?۱Svǥq/tK|)gR7Kؙ_q:G>'993:\s~đwWq.}\~>1 qy.q\uۥ7KYׯ[ҡ[q\~n1?ֹo3u̯_ҧWn..#."]0_En^.]WK)҇vt;҇s\.}߮?ǥftyF].׍K:k\yRt]-.K7_PkѥqYKϺa]O^|}KzIХwG\/.}__qԗxqǥ>nv]VǜϪ?OҭťoХg]?.=?].]gK7Z~T=:s 8}t̯7g9]z) i]znqׯu:tLCLC7% ͧ}"<..}tvqYK>pӥOϺ.=ݪ.>:tot<>q&Cm]zol[꘾/l\zba:tߥKSKסo]va~.ݾ ZS|:zVn^.@\z[_9.}KϾ tt.ݺM]tݾ[NåW~>sK7zqOto9}tt#$,}qn]MNǟSKSw>pL>u߷_n:؇1s\?3x5֭xw\͘>V1u:8u13o\tӥ۷at}|vgq]7.}۳`NGKU̯ǥgK:z1qaߗ+ZםNtXq2~< nmWک'3Otԓҧr?.?t2ӥ{LM\z'F'.};￞.=]..=.=Ϸ$KWnߗɸُ[:uIѩsҙ_f̯8Ϲ?KpֵҽK.='KOgKw~ѥEH7iߗt>ɘ;.p帹Seqzұ~.}t_\uº\ҭեDqy}KJ7ş~.}ҙ~Et8k?t҇Ntsi\v 7̫}9nYOμ}1[~0.=83<\z~p֥G_2|x<ҷ \|CK}åwҽѥK:I]}op=.=g\zQ\z~uo{qyKn]ttu_[gҽ¥T~]9X-Kg^g\^:sᦫ}_pkz}KI~Oޜ_#qycC?Q'rgztWNtzts?H{]7\ҧIt{\z¥OίcҭN|dСNJ|9:t0L,/ts=K__^19B|>gR?.o[y3XT}zM\v5zйؙ_oԝ^xtOy޸['ݲ&}t?Gҽ?]:NzM3֓tUsl_Ky5t~VNqs0.5?.~|2^8ҧ}3qzxRxQ|I~<\z+spYuN~op]q9 y3GG5;c\zӡ_uT\4e+^S][^+.LwnŘp7̟7CvC?OR>i_SJn.|V]9:qptݝ.]wK//ʜ93>:sg~~t}]9}_N>9vx2?wfc8]:o5g֥rGg^x*c4.<5Μp;oίß_1_K}}kBnzͦzͥ+gz.}tv|9^tlaLP/t?ׯKϣ.}uþ7ץ.ޗ$˱*s?psKםnNn.mHq))Y! GKv:}K~`l_p}=|ߕn}#t7=e1_3k׼t^K֓3ť[.tҧ} ptmS?uї.]}gtڋ}6.fȏK:~mj^z˾/q틠K/yХЩoIgq5xYW\o.}\:^sq֥>ҥfP\?sƥ֥.g.Ofnz͢S_}_.Gf/tߺtҷAN7]ߓKO^2fld}D->gA]W[㔫>gstH~>'<99gJgN\ѡSwKω>/.]wKwKt8uoKϣ GԳfY.<'uút.:t|$:/9.`CN~7qa:.3^׾M[ԥwy鍱.1ә0xǥCuҧ.[S:]uF/R޶SVy 9uҿyKtiKǍKߟWs6;xfا˼>:|[Nk:t"t5q},/]nѥOb<}եWn.}oKϼ3wt..=ÿ7ӥ:st.ˁKw^KһΜ:qfuw9:?Xҗ|XwK+O:u1u|><;gȷK}>밮SuCty]K|+|`qܗy8Knb܎`o^:ӥs߮þ.5]y/7N8dM0{ss.=׏ϼ:;ǥO]3yGެIw=7Lps^:t^ly#.}Ltju]w>Xkra|>ҭKOK/>;]|'׏K?gsXyv9u|3w^s a^[_J>3s>gtS.=_:;׋10^OsNh=us/rqpԹKnD%qݜ/y7#țufߗt鸙K.å>';_ӥWy;Nt3KY?.=_>b}}ttRt^ߣocNQZ}{s WSwH>K3.]nkgLߗ3g+?.: ]uHaKfͨ3Х7qžjQa^z36:NC~_s:u>~1.r<>/vGם.w\:v`}8]} t陟K7̼tKϾsKnڼtmӥtc99:]Wt ..ƥ[K\zeltһyfsy}O8ppK9]ǠH_sf>ľ'}N̝W#>;o]9WޛLEnac}QKoU҇yۥ4/}mǎKu:tugץ{KߴNg9 } p n] .wqsUwNk鷝y:Kv.ݏkD\zμt\2.:h\zpK7_P^uuKwN]Щτ;Ƭ_ǥ['KIμ~O^;Ϻa]n_Ƙɏ(޼̃ǥ7sҋ{<⨻nq֯cݎן.=q֥gK|sg?.=]. uw%ҽKKo>?{=K?g_y8۱O;4/^ྯ;txKg.q}K''/]Kϼ+uaË}ctb~֥]sZo3/ԩ_oUñ7̏Ks7qӥ;A~.pu]z˼CЩ>e<t~¥KKZf~FϺt/?tc0g~:k;/w7_-cCqռ5ҽ>yQWK7}˱W;y8Na<^rq?Ϲ:oT%:ާK8w^}t3ӥ.ݲЩL>.:O_GD楷=e:#{d:?w;.kt: LW\c\:t>C1]zُܾ]UK/\z~t:>|7ҏqsM-upۥ_:tg:upۥw}!O:K|pҧy?汧K>N>o_qEN}u:qn_\z:]_t݃.]z_I|?KqK<]Kϼt~|Kx lKZ]./ͷztUgl_72/=O<^q>#ťtһy>1ҹyi9qa4.]7y麟yg^z>\0_S{d^бSNútrKץͿťk;sXe~:$8i^2~ٷ4\vϧ8y8]gnkelKwcKm;Ǚ<>>`.tU6:}p׸tKg?]>Ktsq.tKwLn+_5/}.}89]nKA^zٓyҺteuG>:n.{=:Xȧu8zCOd̗t/pz]܏եqϧ>tlt9e_p/0>3ytqP9]zGl7.?:}g&>yևKop<[J|k>Nߗ3:楛Wd&okzͼKo:לoqŘ>|tҥY.~dOȢ3'֡3>2o#K2/S~.}ryUw~~HtOKK_>N=IuqB.֝Gf+5ɷNuG>8Kw>y~XYs/'۷T.럕Wk^}Wqty陏N= n;sc wҥle'|嘾j!ίɫ<\z\[[N3f:u?cIxS9Bӟ_?{>.|5^39B__.K?K>085:.'^0f~=gҩ#tίV:sXᶫ]Nߗ n=v{!yH^q/K'<]x}8spp}t׸Za3It8W>5'ɼt.}?KǫK_:aeSKtԥ Kݺswuo}bLf$za8$tnjGwp|tE_]/̏K7wt֛שSs_KYI_#G{U|zΙ_9ȼz[תC`ҩ7ѥof:}_G4dǥ{$]S_]tǥKɼtr3/ӗt^9^uk;yoۥW]99Bjk-SmzM=ґ^t}yyטC'G^]/9Bq>ĥGj_q҇3^v׮#^u|kk~y-åg9}ҭ[0kg k;urp_ONK.םN_ҩӇy|3^u_^7sn:b>¥\~mN=Iթ stn^._uL߯Kg$]|K> mVt©W=DqǠeSg\z.}??snn}.K~oW_ӥ䥯?]zab8&\`ɼteK<c$]TN}Φ#z1ߺÕy/o޺ua1QW.t9oy WmSbLx楓>'.GS}瓇Μɧ.}.uPtVK7UoX']}Bty<>NáS_GDt>v|?.pG^zqtǥ} 7\yӥ.e>gۥ:¥[K.+ҩ3ե{BvE46;u:88gvG\>N>xt>a?50]z\ާKo_>b?\zt/<.=]usKƥйoCMM:1ťO3/}N'o9c7o3Cξo\:yߙο҇N:j:>_.餳f3.801'9zV^w:yq~եӏ/]Ko_\z\tݫ.}y7/t 3/ݾ4W]~ҝu󛗞K]zq9;KzKå_vIqz3|]>ʥ{_-]:n!]:.YKo䅧Ko:_xNz>bӡs2_Kw_%]:u n ]:қ.[wHzw_ _tGeVa#=9μ}˼n_tt.#/|s[?o^Μ9t3K/|uw9:t|Ko?>Vq_q' q޷N7w;f~%;ovp޹ω[.Y['ҭ3/aWע;N'bS] o:Nyǩy>.]wq>ԝ32K|z]z1[utþ/w:b\80\| ĥlߏtşy=}xӷ*ҭԥ[7Kw?K'O=}ӥA.}[QcHv97]ٯ?]z潧Ku;/:]}2/=Wӥs]z̯g_N쫖.}1_^v[K.:6̯ҧ^5.ujwN'Gwtpݾ/QwK?ѱ[oXGlKvS3uq2/}W |KSҭS5/]]q#~\N\./K{uűys1qe^z:˾/˼S2o`^ӷkӱϯOpq&NQ7tѿK|Kw}.]b^N`祓ts99]zS^yy3 틂KA?r}D^p~:CKϺ}\z:g\9^]z_SyùM_KOLJKO]:yܗ}6pǗku8.q҇tK[/]r1['^̛:3?"u?tҥ[Kvqԥg^w>uCgN]D߲.0ƥ~GTG8xۼ.}^y8q&N!ǰOoN#/?s%~^NT];Nqt?~ye^zpKhy\:sgtϷ3vp3N瓗.Μxt鮟Kkч#us:㬋}>.=].=?uSsTn]/mYCqa\5.֥m^zί3.y8 :u{UwK_K]Ett]yKy}|]:Kk8f~ t7.tҥot}_pθå|Kw/]:8tb+㙗>N?.b]zO_.>߾jxt3/8.nt鹟8v^zal}bl=wvCy鏯ttt~_8l\z:\O^ut?ӕ;NƸ}|׿K?7<c3/y鷯.yKХGVyvNgNAӭ[O2yN_cY9,KqKvԓĿ]z2Lp_cỴzym~zMuS3_8⪋_3?zx<5G,K'/>]:n}ׯ#O~3/;f!_]>^S>>%t].v䯧Ko:t|dfǺtuҭCץ[yԥS_K]zϧ$k^7p]z:tyu˜eksl㺹1qtMwKo:޾ϟxK]'btҩ/ĥ-.=כt楻>1/~o\Oʮ+S70I"ځLpf'Ɓہ_NH %{^j(2o=?[G7WNޭK \zK_n]]Ÿq/}Yv9Y:u_C@u`K<}"NOK2>ϯX?<R.L:}pԙo_O> ³_zՙ.=.å7^Μ8?Gtÿ/]:n|g#;_1£3o||MkӥOm]p_KQ9(gx5~c]q鬇ҥOyq&>s>q>N|ӡ^(:t?:t.OyJnh]N{K:v wqץssեf.]7K~W{p͙Yu>tǥKo:tIn_Otl>M!vq}үO.|5?'i/ԩ$o~Kt%<ԝ#.It^K_8./M>7]듣_zwL[3cKtzk.p٠_|k_:`..醳УSgGWvǥ['EDCեԩѥK禇h'$tqK]/|KҧgZtOn_]tEGQ~հ_:}ҥKK?׸t>BUgNtcINu,tȧ1/:ҭK7$gw\g&|t˾hK.x.}.lNݗw~W|ۥktϧ.<5 鋒.=7aѩѱ[1^#d](\NӹG/ݺ1tv1?;xwS?o_G?7Z˿__w;йtuv7s_^t\ׇ ^&ù^-{UMmstu8߸3 ^Dz_7n8~xqez?'M?wk87MkU.%{p}յco#^_^^لs}^Mb';X77S' ];x/zu8.׊Щ3ù7c?W|uq?7n;28:/3pT7tz-T8|L=7|/u޸_ތ~39V7n{?E^U]U8~~Vte߇pߧpx1~{~oܴN~8>q._po=_q[ѽ1?,On<Х8_ xo{ 'b0^1N^&kGĻk8pupS_qz|8K߸_qÍ v_:*q3]b|3?8~|U=c/qb~Es_u¹Q7${mcg}_~w]c~zk4oL~u%oί} ?px|s'[~w݌k>sW{?Wqot¹?r3νM#o<^Ϻ'?co;^t̯o돺=WeWuܮ}z5Οׯ3؄s/=G>w~{\oϯ3^qNG~z\q?Z.;kѱ_-1V]vm|{-ùY>/k<s3*]+/~-۳'87?ޏ:{o]ùח_٧Ǯk:#7z}߱^5sgo~]~_¹} y{]Ysx;{]k<4sKq?~+uFXڿ'ֵ?qKߊx~:ףyO|t{ p4s=7~t#W?~f݇pO8zaLνFXoot#x?+֯p_ƹzx>uU}¹s} Lgggy8} ~u=8?>Z}MN<9&cqqsx W|_ss#p1uj¹3];_|k93Řux|W-W|ùpq}ùw8wϖ.8{.ί.Ƈpq2ztd~:u5 . }p-Ww8߯pW̯¹dLo3.Ͽ }|׸~¹G|#{?[:ׇ{$lw1~8nr~xL8w|8ws|#kù3ν.׿];$_q=WT~8>דz?=#8'f'Wifs?:wjpϛ1گ9epzf=;p|q y0Y 83똄s_ {>|;W׸Y' ^sv֯7h¹׃cqo~S8}8z<_w0{?_Kt~sg]6Wߨ#p{ >87=~:N)5|y^:l忟1^9]gKYR'`x=ùpίp1fk.~빟?^.5r~z|I>7=98oW;{ͯcqj5z?Փ{,|g5:+#_x2=Gqǹ^9̯79sp1^~3t7=:8}5u|/ƃqs%{{58Jk_8ǘ+01"_{ޟqscY`kƬ_yz: KX?ùKS;+.ӯ|M]n-Us~ĵ#&_s;5kXWө$}0/!_ r+_#_s|ONf׵8yp+7o}|ͩ"_8uw ЩE:u8~bϋ|M[{錉q׼>dƹo~,'/2>|z<߿_~z?2s/<_ >s]8y3~6tć:tܯs[spse7#wC'Gfx\?¹zmkz<0O?ί֙_txps? /ćsx8q1f~:uWtP_u#u5[kN;8~qϹ>>3\q1> s;x~we88zq3&_ϖ}GtXs >|Mzweuqs|U{gӓ3sSq}߅|͜o5~a8<~75/|h\z q4{9NӼ]z_zՙd?unyǍKo_;$.]wKtͼ\{9g'OIw~s^v+)ҧnGYҍACg7u}?[gWqNu鮋uuBq+up˸tEҥ_83\z.q{JNFtҋ/]rb~|Nn.}R/O>oK ҳ_x9-W p9qK!Gt餛nnUvp8˼[%yeotǥۯug<]~~¥.;֙#Ι<tc1n.t1oKoۭyҭ]åAsҥ{"uKׁ> d3UCu?צKtE7w.8.<]Ϲs|ӥ nLny<y<~9ǥs80EL:qΩ3g6?嘟pK:"X'Cc?{t;:1<7Kw_V\zuѥY^vti\:qtӥts.ɃIԥg qNangKq2ҥg.۟.}PZ.]WKOqeh\;̋Ns'N3~>q[~븉s6q:S'ii]zå[gq7ϣۥs<ӥ'9һƉscs.nһλz?=W`nu$tCGKw~Х^z n\R1.}}5.8qβXҍ ҷsts1.\:yҫNc<.=tu?\]>Y/LL;6w'i]z8'4t6x.}XJ>\ ..~|t.}fgץg?p\:tCKN&1O_>bl~\tӥs.]'K7_nqN~qN_җys+- ۄ֥w K_wť;̋5‘܌;LN3pKWKcr~#.K,:st.:6җyt+utڤK7>ι~-5_q}2|=u8?y:ʙ_c'o]E]X{|tGK~q:y:qg^.=.}ӥwn7>.]KK_:f\:_]2oLn]tu.޶SgƥO n.ǥga[هuL<.?a\).=k\yu|t"uYWK:u:I^b8bݗ>ퟦK7on4]z#w<$.}t%t ҇KѥSwQn|0]яq:qtuٿNgq֯ۺp:t}ǥ+ЍNKwKt q?zi]\uuusK.=쿾lt}Iwu۱.w׈/?.:bttԥ|Ko:c't>Yg#~ߴtvӹsK_>]J~d曱KzѥVcӥKϺCts}K7pԉǥ;')\HKn.=ׇ\_ӍKO]zK'>5O꺜.}ds58]uSqKw2]zƥg]q΋n.tq5On::uU_~:״nvo[g3v]y=u Kw9]5\Xǥ[X.vkq&t鿏ҋκ~/O\z>ҍwu쇎~>ҟ}t:O]z:}\:NL:cqtҧuqYNNw>;k]z:b|qqoҍoo.ZWU7}iGf]uWk⒛תk']v♯>u'q7sqrySNJ=Wy;Dtsa|ee&c^ҝs=]['xt^ť҇uzq[Ǚs__ҭ۪KyDnN$^_~}|/җK^׍K:j\zx17KtKt3/G.!]tWuj%5q䙯yέSp1k8\~~upw.}d[wNݗpYt.=ݒ.%P8ӲY>KK.}_oťj?a~u]'_sZ 5Eq.ݺ҇.|é~zOgKΜ>7'_8.}ї!]zaI~ubs5K.xO_љ҇7O~㎶KC'%_g!ukAե[Auuժn::s_y}7G?y\qt?}K}<~..P~x#5..ҹҥ/ Wq-]:u|c}k[YWMNGK^Kt­Cg77>.>KKvSyutח̛u".=x3_OߥK.]]>o_q] nݗCg#_Ñ߬KױnO~.=q#כq;sҥ؟c={q;K_z'}Rӥvqgҋu~\/KNAok΋$q>pKj]tkӥt>t?t鮧uK'>.ݺHtGuy~u.oaי36ć/[NJkӝOJwuS\~}bs2><72ҳ.}ӥy Gί:2NCtiZթOrpҍϥK/.}X>ӥ:s~K#]u~t֕ҥSYwkK C8n/qt3gM7m\c^N8'nq]V{tG5g3ݓ7~g}0]y!^3OӥOeҍSK_g'Ik;y٥[wйcPKOwNrL\ѵ'q~ԩ'<<>Nit+K/{|E:]YXYgC:Lw^}] pݱ\u3/q9I?~ܒ7ѥg]WzKz/zZҽҭWK__z5smØty:әg~w70O#o޺uצSuC\y/{oףҳ{tCK[E^WNz,^?i~>it]:[߼=z֩Qc].ҝtХ;Dzu9N}^u~sOn9]Sg~_w9yͺ/8ftsΘ@ѭk:tN}|KN8KouǸ?ҍkftҧݺ/:.}n]qΥ[k~sCgN^??;J~Yg \qtǩo}헮S~.ݼ엞uy1{aLt>sN9y:}_:yu-K_.~O_zӡSszuw~ՍnYN:n}sԥ]q8<~c<ҷ:u[?\:#ҏn?ߟӥ|.:c{xytGt].}Y ]yԡӥ/ҳ?.}gz3/}K:scz}-~'׋x_qͼ~eԥ?ۭ8/Ƭ_:CTćӡ[ץΉUΤ3f1;]XԩR>.::һ;\z[WCctyp7K/~K_^|qg֥ft靼/=LrzӥةA?~Y/ct3z;Nd>ЭS>ӥ7s_::V?g2uԱ*gZ?.nNN^:vRgañ/ :nzĥK?;ć.g]̃W]y Kkav3:>҇G\-Х{ե5:oe|8qoGLݺ/Ƹoݼ:.ݳ^eLK׾ks}ԥwK~ۥye{MЩc]mtUN|xO\z:/=엞~c`< ];an~ԥ;Mn]LNS~B|?tEӥ[o޳_z8yҽoN˺/E~QGB֡/]_o.~љ~-u3qp\K7藞cts>.Klt.[tatחۥ~_;'0}^ǥ>Х[jC7DbҥsL^<].\7.Oɋa\xu~GxKonZ[>K~8fy_/8:Vדt3^Kc\sIpMCN^?q.?\.=D/?|\..Y\׈mN|z֯88~niscǥ:ӕO:t~8ҥK7"]cҥe?botպjt¥/+u9_m'_3utok2_~etB7w]+_G~aKw?~.~w!=].IGJ^g<.֩7 c_ҫZ<ܓy~ϳPS3 ]:~rZuם}}[W '̯9~åsԥۍ-N|NaݧKJ~q#_33R Cѥ^>>"]Gwt8.}y0ⳏ./qytaukgԍtuN~Ǯ_c<̫\/\?.]K7;]e/1&>tpt]OnSKե:{Y۫Gqפy 79u'.cG+unұ~-:u|1S'W>_ǥ['DӬY<><3_1K~ҥFu3_볟_3Kҗכҫk8֫G[Н3޺sϱ?Wg.]:K:zētkҥ_A _/pKס_t>g:=~kf~N>BC~Wѡ'W]9wǥS'*]uv1}pG.=,ҩNgt\mq?v~1uK_.=_c~:t֯UN~Sӡ>]zoUׯ9c/gd]p;Wӟ׼~ԥ~}pӥOĜuK>BǥK.zῧsљXwK:t5ԥ[gdK>.=Gcf;'>Nҫ>5c]9$׸o>t}yG.}sf~QK_\5#_u99Μd?t✸uXե&yG~~Ent]p>.~ۥS7 =o~X)vǥOvǥ{}so~\3'n_3:r"ǍƘy/qL͡cg v2:A=xZu59yK>.~t\ӥ엾t/=\/=~ ХgL~MgNҫ3A9N{߻\q1ܣ.{q܏nnz9қ'.pJ:)]I~Yw`S\p|/z✸~ҝ7tbҥ;.y藞/oφg?ﺺ~N>baynK7.ݺ 'K>ci8߸I=tίs~1u.}lNN׵o"dzfסs=:Oz:1ǥuKq꘼?sV:yqӡ_>..=*ѩy]zflu|E^}8Gǎ3-_ֱzNn^.}:]1{W5qǥ|K.wKKsK~39?.=[K'ι?G:u~_:v~uvt9~68]A.뾋u_Kt93I['9Х?jݗq<:2|DžKO>t}9qΡSgpؙ_.g}_s3;Ϻ!ҩk.sB;-?oE{KK_‰s^۩اiGs׎K_Cqsֱ2oKoWμ.|\y]`ZW:Bәu.B>ɓСSWm.}~y֌sp1ί8l\zg͡c ӱ׏3n:Ϊ+o8Ƙk/]׶㼛yqsgK/֡SW-=yEw^]Gt͋ЙW x֥OY'utχ~鬇zy_9ü~5u3OߋK|tOҥפK/UϾYUtiuDix\.:8\]?v1.7.{:Vq>un9\z/eK3yt﫮\uӥG_.=m]엞.~uguU:ɾXåg.*c'NәSjG|p1k{m.]yygs=G|c_[K^o0u pE~һyK߮XW]ӥo^uKu}ӥKϼGtK_:uY/|KOgҋc:.|\zcnG<:u.k_:q#_z~N}^Out^ ҳ:y:ǥ"tu)\`?A>&׻'Kn.=~;_Ա_N:]]N \2حqץp8t]ǎn+]uvtSK}=\z_Zӥ[@^t:u֯Ͼ[w+v헮;'/ԩ.>Lp0κ;~]y޻_:GdtGcgu}\3|y忟78.}K/=ҽ_uŸK..9.<_z:tln.=]xeuԫuK:;KcG7K'p1mN~鏯gkuL￾\z]S\z>ҭK:g[7 ]up#w~u?g|~B]dKt/:cKzƇos ƇMlaOq<ؼKҳAt'.=O?W]zn1ԩc:]utEKo8d:̯:]u_pt/ׯ0f.=/u1.}֙_c>ҥu:yV#_y#:¥K^Ot/.=.}XGQy^./=;KK5KtӼs5]u]p-y{t:uq./'v8~fRե3'>N ۖ|[ҽ^p~^ҳe?5~kѕn}~u-uLa<ҧSgS3]9uK3KNn~x=ntvt^uwſί1ҳt~wӥc\ԱSz ֩!NuJ 7K:#t8=T]zׅgpu_<ޭ;wS?0Kߨ//ҳ_kաӧ8]yj׌3s@ҥoǟ..tg/xkљSǪ׼.yq~Vׯ1֝<=Rućӡ5ućqG(c`L㙯[컱tt#æ o'yå_:q_:twyb|ts~s8u"/:p30t8}Fݴ[g~W7>B:uӱ#=]:wGtt95]':i8{]q[Ƈ?.:#ۥ:u t֥JN엮sե_?_:vI}Ytq|os|y~KNf|tMw몍K'K7=KhRә}p_zY6~r녢['_;8=.ҭcAåるps7S.wݹ]c\['$sGKۡGHgN>Iϯפuk|͢3>u_:S]z?KqefԹХgt}KN>I\KuetŕEԡ^=3~~ޯtֽХg?aݗK~;^K7_3ӥ뗲_ft~FѭEW쿾nǥ/.,׌Cnd:GCԝҭK_֙ҥyқ|q5ǥ /^u֯QnƮ_grkKN.>st___,#߾7_׿IϿ%${{OW?~Md m^l s/۷kی1^ iU˵Yc*[4 [i ܫ~N%yz se?ǃ5ϋimv0ʶ̽& iaE^zM)aJ̽خ^,=¦U sv6-̽6^Hj{`nv~bv^&l^#lZio's/2>{aa"[k]}Mcw2";d0J̽ wMkciܑ̽-s/{2 s/>>¦ղ0d6Ea}6-ɬ#lZ#lZ;¦U s/ÿ?¦"l~{=.zMi=̽v|M v{l'ar⃹s샹ױ_Y4{Viu+*r{rTQNy8#jZ W^U(bwX~}DMkADMݫ#jZ;ٝ`$Vab+8wfxijffC{=Z_l)Fbk;{-ϸ]Z$!N^%"7[@܋)L#dZ yKܫ)%jk{mX;w* ⾨p_;{ɂj}{1?^#o׷#^Zj~7Bkۋ^j( ^ͯ÷Ҫ·;~\@Vۭo <,nK^oxK/OJǿ W.,5GxqۋbD|{xi}]k߯t_qso_ K }KKϋp+5'oy{1=nP^:¥t xO#t{# }ZBZFvBvE=t{5^/f6إ*n/vEW=0N^bn/vGɨۋZ,tn_Hö/m_TRR%ݐ=/ de{8-! a+ۋΐe;d{c}梕!xCׇo2j&/ _j/d{5Sz-!ۋq ^3"+;SBת^DG+ )+ۋE9lߟbd{LhlvDR*z+]*Kv^}j%d{QN!kv^.MvCbG%d{+J :zkE~^(l{xkG&xUޙ=|`{Z^-$l/<`{Ab&`{i߆ l&`{)nDh%_2vCYJ^@SK(|S+bN|#wW,y̓ƳPݐ͸Sn ۋvU a{=\BBbCY`{!.ذ 9M ׺/µ/}*[P]bGGeP{H@դkP{1)^KwJQnQ{9{+u&Z@դwP{݂ȝ5BګԞb3?P{^-:jBP{b]{e@E j/E ,"^AŢDũ5@5Qqb2P{MګpAtEul*fkj/6WDբTj6P{.@u{ZElo2 \Lj/Gs+uqG|=U]6:V_bs@P{fګ2ލsm"}kfa+9BxM{/=i 4}M{^41Ǵð8]X&`ô57`ڗ%/#ֶpL kp_ehz37x!e@W+ +9CEF^z%T^nh/N!ګpZ^|S]Nο/D{"杢=u|@&6?5n~Eκz"BZuXQ!D{F4Z! ўVD{>H!KSh/D{!hϛXan]槢T$E{-=`)ڋARHh/VpB:^# ўw/DY ڋ/h!Kvў#=cXmnN^CWC^-h/@@/Zs1<{,S}\2^3@{`=oBB^6BnnUAz9r!wy-OSG X` n>^V@r ^,Gff5hoEKf&P^l4 h{nЫW)07og̬3lh/<Wƫ*#ݖ_d#3Ӹ&`' #3 vS+ēNjg}> [̑q+xԂyًsoꆳW !=V&j ފ73^,g g˫L'[8{u5-28{'G^kG^fZ^^m#/tbiP^j ^n>/8{6ޑYlSg7R^XLۢG^f3ZnΞAt8{5}SDZj|@^f ʴ̥noq_V>%cLDWsAg^yV@chu {<#-f/>n^[^,^f]LZMd)K`8>2+w/1{ÀMs쵉1a}e%>B1fbM#-zwe)T]^IdcYI`JXOnLZ^oq{ex%f6(f/݊{qLz"揼Lρ .5~_WY/fZ:1a݌?2M"Ef7L^* ^8=2 .ueD:^'fwOR:1{Sٽ T%y-.yQ쵉I$#fw)'f7*ftqp|Ybzo>E fKI$f/mcsbJ>1{m/^m f/bd0{o~ fID٫x^(.fy){vՃw?kmҎݧ11[bv@ד?ڤ^(&f/jH̬~Ŧ`qv7եuZM^&-{2kak,M̗h{ΎxR ~;3~ewDJsy$fGֲg sG<m"$.H̬Y`guI̼YX2 IURJ){Y,'/ز̤evڕA٫ [&dwCY^|dY Ǐ!wyx|c.\@:H̜3Ma& Mdž$f#d׬] ݃3@q3- d-d %rBru\ݑ q"dBDRyz6V?A-y.Aw|x~6H;S^Z)w%7$4Zx^~腬Ř","مK>qϵB:κl'<7/B> p3x|7>y/-a8sx2UHNjN\J09 ϵew˟NwmjN!),oaf6x!d"4w߱ |L3~vx3J Vr|a."!%p0!W>yϳ43g W|="oj[y3gܹYJt]LS9;s<]{8 FlvWJƘrԌOw^ތ;μ;ϳLRs@ϛ65y x3`n!Y=^xN@$94mKx^x(vs~ްNuΗeݟ63$/5 ~Se'i2[1-!;Jy+|M2MkkTMr;$mNԫ2韌{L }Tt2_a緭axPfbdbym[ymd Kv^擝7Yy@23Ɨv~2)"@pәXuQu~on"j9\͏:8R*Ƀp>ԹZy9ʸ:Pux:"YJ>.TY uZ&cJTŽu^#Y9syہ:"_, ؼǝjl: Q@Cx2]8GtPRΪ TyoJ2 8:Gts+nMo BvRc]PgD񒑷c`ya3g1qq~Ǭ8?<&+guL[e,\ "0w:GYAts|dt3n?L} ;ԋ`F^C'S:/6BWo΋PƠ[Gf 꼚W:Q꜏[un9չT6zFlt~I5SP@n<V~[M+XmpFM4V/o57O֜;_U)ը.Zy%od0fj*b:< Nu~mU ]GĈ0G]PRΩ:w#Xu^ZjRy*|9":k]fsb1UbM7*U>*:7 u^nY8M7<=꼊P/']Ջ]aݲ󴱲s(Ϋ]eDd癳 ;UMpsW;Ϝh Ow_!y o]9[5g7Wy^o}VJw^u.DFow_&Vw^Cu{^V61϶LM~=/Zg]a=OӁ=O=iDK=_^s9=d}Kί,2_8sܯ7,tqK]\*OM߬&oLO}^G)]i<͇ܖs-G0fӍ K-5$`Ϸ|sjSN\}3N$>bi!>$>&}))V5v*>=,FJh^A4T- ~>_:4x ץ&GǣQoW4bb5oEAϷ~}[/yWHN}C=ܴuGgz~̍sS>/P&J}^-[>ϰ<%dsPrJyrpn7}^헽y}3>WHϻ) :c ǩ#m}nE}n)+_u`Gnv^iQurLw7ebLX{HTGc}7"}+ye2UC~ERw⾧>/ݷ wep@?us[+M]z>} Cр2d4IB$C{ϨB2׳VfFFyͻ8.ڈ#5y?~^L̿~npƇ~޽:7,+n~xyGIyac|4UDN\/?(}9 ϛ]q9Jp*Ϝ>r}yu?|t*H? Ty씽Nyw^_9?t퓯̽ 9SY7s+p潟9wN]Ep_+gi 'q~[_&@ovߜߟ<N~.~a}f_Oyf~"?loV.fs/#ȅAy5}ğܹJ?N83MCՍn)teO>3ǟ?/>6=۹J>xŸT3M(NIq%Qn~ԙ>>Ns^O3^vviJJ(^Gn_@y{:)6uR qթmU0@~Vtۑ˯gwxe+ OO/x,a9ՙk=]G6*X#J"oҸ~8V-P#hG PFS _@.q3s?y^9somJq H:aWb2^wVB[|Z\M>_j.v+}~- ss&MWgK`^ OqfWi;JO05Ɯɂ=+E>]۞_u9Ad xs4Uu~i.y3Q G|NVތ=_kti ^RtTL[qGV9귪O#Vib<ތ{U볊^[VÒ+9m \|[T*7m|FUvÿ>7@|n`9Y-Z팙LQls/nm tkiMyf*-8&I(E##=o@~y'6Ř vz9kSu6=7gz)4uE U#ٛ=KOUy~ @痲|؍LӀx* Aݧ!ß'<Ly޶,Q\fך"K}z~|j'ui[xX|,''MLt<.tv]Aߗ9aߛ$qТY^:Ra(kE,7 mrc7CYs0ccu7ۡ6iiY5/>oËksq%>WoϏ?\_|0>"Ͷ;s}Zkv1:G#,o7G|ny#2ϽQ'םy>̪:sY5۪.'cZngM1:48ޙ*|-{7{gqSPŭOv&czÖ{]Wj =i7/i'ϡݵ;7YCgq+<'pSe/z~H[i!ERQx15 $?[jRtv+ 7fqݧf߁t?蹝/V-n^>Jg8Y=g5meK2gvyuA|DŽ$ D;5tV7kyPs;4'8]S0}G*3J2qnэ~q1ε mvCBϗPyROUwnt(2(-RJIrlAH`㕓oy4' ۄ˒vߢxp4'f(iKynt\ <#&WE8(MS7<yަoլb<lܮ?%KNyly~;ff禁)ύ@۾#ϻrNrs,)]$su<ly>,YfLTj_y^mZkK3^orQN8T˞iϏ<7Iyn}\e9u znAN=⚿yR_cĩLs3R})ǥ^zCϑ6(UA?U4Ƥiayq,zA@7Wޯs *wy dX9IO6\++=g]RO6??=5)zQzn}Lc6ƞ Fzn^=Ġ)|DTLw=g42q]:#XI\:Xۨ)'q)7ƞ{2\O&=c칾 zh^|I㧝4 4ZAe:+ĞM&Ugp[D{vM;}b{8 ?Qױ'c+ $- 㞛LMU圓{~37̵+qKINs+֤w2=?5?칓My#j+gҖ]\{k`m1YܕA~nF>wm>׷͜ܞ@6>}M|n_!3}n4|6UX}Fۦe@|NQp">1A|Ͳ\&>߿{}n1 yqڦbs9x;u_vysl|>voy^TU>م볋SxX^_Yn6>~g6.|dX|rG_.Nm'>*WYFS>'|n'Ϗ=,}.>*3v4.FczЖ5 HʢK?{n.4{֞bZm\ʼy^AWzn*=6zNW.5ꚥM/J*C)(zd?9|bL ߗ+Šm/26=~Wwp q kRk.FJu6] rWׯ/=(ϳϖ~]VUgJsǦ$')cAltJt ϧ躕tMCOrx+o?Tg#[X?}zUg^{wWǞWY݇-Tg>W M>^}nZp[]N I\kNyWe7t9sS>]jr *,^@s>׳ϭUՕoکN_[t]+{2ܚ RJ돧61x~1Gjή)]'zN|j3e՟o].>RV2PP#>:ck+X3|~uYZUg.hǚ,Kɱ52tz3-Mz~cu  =?9??I|yŜ|5㟌=YG~yeW0P8wy2Xjno)=|J~X¿R?6z?Y[\R^i:ɬ/Yx*H%߅9rv))Δ"_2;ř@y2\թl1fuJ-o,O# ҔKZNo`p .߿sg*՟+Uj:ҊS)=ﶡNRz,0|-I=yQ{4\(z^VIY{>>>>R2MRһrT*=fy可ܣ-޳|BǗ;9e*üܢ緔si9̿z~=oRsfqK;c!zNâ鹩sۢצJ9?oIѡd)8m&c.i%?rOW’ŘCϗk~^u<"-MKs/y䄞'6 n|q >?Bi_rI.$'G?4X ?^fSc82yƚKh~b1 s[%)tSfy5+ɏl|?\|>.' O_BY4[x>6D 緇5o-+ y,=..z>}O6=oRFw=wqyQ彸sq.wKY\p7SEU{G=7{^:ӳ9~󆞷tdrϗ9YuH gES߮ +Wᗞ7/盞ś|>#nϧ9Ϳry˅?s姧ñ|^Bϧ+5!jO[ZS]>͠ > }^s">TV9mՠ>#2hIJGv1[NGR|_ZwOH>Upl8A7Ǐ>s>9[=,pxq|ʫ&Hx֬PΘ͚!g0ǷocC}}ӹ+>z{{?꽂ϝZ?5=`ٷ{Dxa8+p0jf*Cϭ=G굒έ2loMgL ׽&r o??Iʟ[ջ7paN$_\3?oViXПGޜ^/k;k+nz*0ކn[\zsfǯTivMTGoN:›kÑ;yi$* @?X-n*V5^iu|8 nn,:KL!< @9i1Aĺ!t4H>$֬qZ.AYak!݈- uQ~> c/NeaH*w Dto_zUC/}>$GrnčgDغ!v)n(}}[AgM^$ ?nZt Ao\? ])A?%k?t# ͼQϙoA;<}qAsɦ~%B! 2 ?OCι^sZWo#\ r-AoFI=~n,nEЭ O=Vcp<69os~s}篴_n'ތ) AoFIЛK=nK :WŏV%)'%KЛz T $%s/M~ DU󸽾^_IyK+!YկFP*At[0HA/r-ϏM̿$A=Ɵ>,9ߡCЛMЋɣAibnv7|]1שIB]PCzW%BP?M!CzY|̙V`;BeNy*tO(f BWیa5Wc0zOk"+*u߿e!Ps|,9X^!\N~ѠA7|S^wgzoVDL9Sc4M{բY;c׏A$VZʷsZSrնRO^oeSrS(}aqsoNvyslx^ɢ畎A_~ůvϧ-?r /A/ 1xB~?O?q˓ِOAStݼU sHнٕ^ɡs#Zbn {E!!5)ytt!!2""F i7)vA}?%.@)Af{Kk|YɟE lAoE!1{Bi;}JkKW|S+gLt ޽VځTM5D?]u/1 GO tJ?Ƈֿ ɽsǛs:}ޤcKIu׆y!Ы}Bzhtj, u=?K;UoyqͭNzҧ`0dZw&?w'swn KZ&Oyx<`۟gw2ܣ_Nb]ZQ2Kj ?oCW9zzs&@Nz0];jW?hfoWfysօ/=:u$cZ O+Ȭϝ}:\*PJwG,"ܺ 越xi-p= ~xkωH~4CSD>D^O_>@wl=hݗ@xeW6:}lÙ7Uu65i}kNɕetb].?O!&>~2w|s- ),Q}Zt Ϭ:8^Q@:}+|[>s+ۚ);>~~m~*-@w*@w@xnzuRWog-iP@|4ݕ "[5XXBK:@,X.6uF*ЍUW6)SVy7]ֻLSN> t=*ޝ*JF"obM;!>sZ-1}x}-dN9Ӈu$SNDcE膰7[-7Q:%?X7. O.t7+1~x}A1D_>@621.-[l]s2 ׻_U5_g"01}QFyqBeΦOyuTv5Q^?/|њy+CL:㯁[qok]54\Z4sVʠ1nW1^4L-*S'mS_\k[gJ  hSنih' zDӎ dkWmsc޶i7U#gzi=>MO_ϴ-є??z.}h),>%2 CgUW sC'_)`ǭ~;dҙx0܎!2ayْ*^)/CwSQ%0a ih}5"0cO{5;%M9{I u/ݖNf}0a^yzCwsW~UhvW>i43Ku~}vUw? Y6q_ }QczUд+VG/[M4'B7sVBPZmSO!fzhVz*dN)bxeb$B_}>&Nv!tL!t6"tBF.]l]u?Ǚ{ۻ oSǿ_? LOr_}㿎?WI{oJKw3DZ}Q7.3s@w&ckW'{dK.!t~WH;-AaCYuQ>Kwqާ]MPNjƐϿZG}Ӯo ^m-RUѾE.vN;Zk#/*Ќˡ]>ѻ:Imaq/'߅ЮV7ƪ}H~{9?w t(W$2JK>w֬_{W?.~vn6v_xz_R}zcGjk؈Llg7{w!*~W;s&}ѻٯ~R0]jhNOp0zgm\ލDSpvcJa_~7-~g.bSx6Ͼ=Y_?ZF7U] NA9i{G+vewQtQАnry&ۿ+yg7[^tSПU!=]i/;6D{]F?̼Т̻/ȼ8%d?¼wr;qu .k}Cͻ}1"03]ze'V^#* ޻tbHyF+P9>tW:rhq-߻Jw)_I^ף@:!Ǒw`/TTC^w#! kǏ6|C=>>9W9v|t)n̥-ۈw=/IE|w1ֈAxG!zݟ}#'Wpnhnku vcg#]K?R㰇Hxwh"f!^.ƇU#]PxMG{wkuڌT ;Lؗ1+]TƆ}UwAw}c;!Î|W$ŕXqHuQθ|rP!y%&{ QP 6~؇Yf6D x8ߵƾK}ֽ_dw'1Se$aԨ! z"m4=Gt nbaw&!Zi󝧀ߕQ](훃¦TbttḻDaQ1K Nj w~4w7>{qݚO k0]`nYS3e,8 ]V ϟpoAۣBϮKHR=N{n]Pv_pNv'(v~rD+]b-Z{ͲNAݔw {3v+F{tWhfvS9jltb+ɇ90wݯӱ-FIe1ò, ws"x{''5hndU0P;b- ;MX$0&¬W{oXVz|tcLa^  |şqQ|] }ݗ] AG{Hm\71[ xݗ{v݇|q]ȺgDe^ 3`Hڼ9:;Wロ]Z{ i=N2|YI5̺*WY{b>9kXHjJ<лr4q[[ڭьV+Nꮜ doA4_]E4k`2{2u=NAĩ[JoߺWR}!u(ɜwet(. 舡TP(C4[ǚ&ֿ|=m<v[2:K;0mw2q$Ajzxy~ra;=b/{v]=?=wkcP%y#yx/ߍW;;CX:|wꗫ?B箦sʇ9-U˹ҁ_*ƹ _B'¹?w7o欅7Et[ankln`\s,aͮ Q〚]4n^ \)G ffv ܗq/ =h1ׄ(y&%1xn4Z({O v2NQܽ3UwiH9i(.u iGmy ,BaNA:Mo{Oj@q\'~*fߢZvEqo:?ڨ XaGe{Eq`o]ǭ}ÕްߎzCFoMB~T̾wt xߚ{ovoC}06  .YIQ ⛥h|8{G3`~Gs :?:d3k`ovU`Ս 2;b-F.J(%G4b0kxɜ q3}|-;cO*R.n$7W0JQEΏNZợ)ME )33t⠳z }3$P2U)_~]bZtZ.ΈNwhN1XW^;ҡ#!u fB* #t]@Z 8Eq@t{!.7Eq'( _x\4I 6M=h5E!Mu#rE:vzrݛ庣uuMu:S4߻0LQ\=z3gq(W(Nk n_(\@Ű\aJC4}+o{W(sKm}V/v*v;HvmeeCnaoJWvۺ:?:Afm-5 P[$C|Kpv*rOw.Mj[[nRkfWbқegu>ֻd?ќ!Z|T PMtMA6W:f۟]wmi{w-}|w/+BB6E$ߦZR{27ʹΓY⋠Dhώf Eq:?Ukh{QN@n!"g{WN+Rl(Eq 11Af;GY8e0\ZֽcXY%X;PVqݨ0I&Jv/zHF5.~(>T`)fWxm/c:)>?:&8]Kll/y['Xq^z k(._غ) LY,zUrza ;l ;nm[7.1I: 0X- +[Y *XKu@Z}徘ֳʫ^_jZyTԗW-9Djcr9\Rj_{ ҥ⫙ZW].񪹣j1`1g}RW.e{U ëJƫg]ƫJUmujݸل>LXNjy4V« 5Ks@xUq:r&i4KsX5؄}4_*: sU-īWF^lxU#/=aַY[\Z׵7VU-\emǫjjW}$\k+|ѢxrU1[:yīJ,iU;:x:DUWe}?^u\ ܚ99UUO&z{9!xPcEY;By'(^VUƭQ</ jK}|2^ٶWMhj]񪞮BZeU=ū:3ҫ>xUfxթe^\H=Rf*CQEzzնEjUWL}p8۫;W kmm^ճ ^T)WuWW/^u\Ksƺ4Y.ӫޛƫ^ūmHVЛM׫"CovOnxUj񪽂uziW}Y^Ոx'׫N?xCPSƙ՜߻=6EK(x:U~rP47Lǫzj c^rJHPS<ҁUQ^(]Y&0bb47zGnW5r=^%RUuƫ PM{`ч|u@j]D޺īW^uqW[xU^&bJU7"~௚?z*p'z47vQ$ߥiUqzաԫ j:3*t'gOLVūV)^4w/ǫVWū&ti.۵(;&W-Nr4w+RUIW*KsV}īdIzgdi@x;>M5^lxnfn-ìgjGxrKWͷ|5zUxW˃90J6~:.ƫbW5/xUβRNxU 9^UڻƫWH>^utxQ/=WBUYsӫ*Wj!4^ c/t?^Փ[j#WM"^uz}lޢ&޺Ho(^U!R^576xU xUoNU?QmZM =Q1u: ë zgފ{FUǫzW-:jo:?WƫWӏWuwW٧zUoU07^8gxU?xhgD*tatݟMg9mP2ə=>yEЫmWhx{\UYUӫ= ǫňW9^uܒۛz@^WtiNmoW{Vz;tf?A^4"{N }lh.wQ$),$9^g ?zURo~>^ս'jL=AT6F^:fN~%Pm8NӃ{/͜.sj׫V!_Eΰ_{4'Jķx|FxU:U^W2ǫ/l(|[Ϧ3-n]!^ UW:EzyLNANZ[/R> JUj8^FXxեzf>Ų&P ?OYڍ;X4s)_N^#gl 4=HVzUgWj^>^^*j^:RjO69]:.dz&TZ}s.V/|5^\|Nb9oͲRtxϦ(HxRU1W:H񪕍]34Eq>+^>ɜbj/6PEjNA"zyscAo<(Ӂ=JP=J$BwƏ s}{5fUkjYW"5n*sWG+]s+zU5Uǡ\rWӫV4J(것ŰJ}t?jE}k«vU:ӈBWHiU5o^ltCj^v zϦ([x2>xUKԫVt!Cܚ =^U xX䫹nH}ASEq;,#rnTfN7}rJxU{/l"x>7PxUVնWfκW}| '%^o[(RH>6c<WT9([s{t9Uǡիf%/Т[٧ëZnjXVj!*'d%]f"? 7!Z.qz)+Z^iͬnD|q!AMVol9a(UzӃ *iϷ^՜EqW%8Eq]1K%xn?x~kQ\WTpWv|0ZW ի;SWRJn>ݒfYpzU+%q .ٺ'RajU0[wkUOU$xW&cI-#%UWԓ]vSfNxU(Uz{tf;5WS9K&+d9: U@5(8T,g'f]o^ǫN6h. (x@ONQ\Ԝ*uNxպU+?W>^u)HJQOwT ^aCԔ 7a*Rr0UOLq.-Sy՜TUtfU׫1~z7U ƫ>ZwJPsԬ̰&ġ}*Lz8\>zރxպ5WH^~U4zUvdʫBj}xw8UBqOZ84j*RqUوcy{zwS`{UJ5Z%^^'_:xm?aON)YY*|G]U|5KMG u:La!4ܢVǞlzWUAżzv܂Pԋ;!yr~-U Wux{ՔU>sV^~v&3Er $'~4kUùǫn4]KsCOcZЫ| Ks6_*7WGU~6-E- 3a!yfiNzvCnw4wiN 7&'Ƨ*s0a9䫹[(s|Ui*_ yPPل~V"^J0wUy7n ;Ü#_k+IΏWuEzr ^ÚϮ]ԄW,u_zU!ܩHM-A y^l+[Vry}l%whR xUEj 5Ksfig:47TԹLjELE\ AKGUs0n-JxU^uFjP:7~6vjwYӫ>rݛ+&)Pk0L"boL9}h:~#PW^5KsZU+Aub*[czնj*G=FUG$ΰ_UƣWTW^u(' 0O׫[`'NХ!_MxUo,WEy&A(vz+CU+tp櫺"jW:U/o7U@}$`F^\kJ.g$#"~htf[>uEjNA5͡.GU?D9~o/?9xKsJ9{JJX5T$䫲ƋW>>g`Fa#_z^j:hjէkʪVrԶUo_G3/y=_E^XjJ7zQB_"|[1tj:3 %n^D5|Ɛ]@xU˫WfU/ȇS6x'_b9V2$W-9əU6kI<؄%ei䫶/4^ղU=W_ë^TU$_xxl:?@5琠~tzk'1at's 7L]^4%:UHHgU-Zq]U7Ƚ6hfU-WQjJʫ/ǫ΍[̡2$YVʩZG^zUw5JZjVZj|*r깁j fxpWeʳ*9Ji@5W:cJ+ԫjt5^/]5_JGgUMb*zW'zxѤW×2oƫŕtKrN/6xc/+uzS{nx0LBpi&P@|_Rg|ˬ҇ӟ%'ǫV =^ ^m^p=Z@5}O%I|2|1m\`W]m&`2xJ2m\-RsQWf2şxU3R_jH|ř?^uH,szt׫pdj&h!_%YT67 _6_Wm Wn{>_gUK>F< S9'^IjNV/96(!>HX ifWhUyEzզuxUc^5xU^@x:AWͻWV-^TUzUK^RW"_UW&_UUFS^zUU|2!Jj:ҝ_G 7]^u0zӃhIPGzZ s0VtZ׵+TW5UfW9] 3+z<mUjx{5RW@5}96h>/j&5WEfūK?^5( D |57yUaP͕fNW),7&_Ք[Uz7^JoSxU Cz U4)8}UY^ OUퟀW܂l֛ì>櫪U0kլ^ūg:Ts ⟊WUxU=eW~܋"2fMa0(] *_QU3"_MU5MFS=zEM9&W=43(%PM =6h:Ɯ^ xUWÖ&xU6AV"U]+W欕W&^Փjj{ǫVo .īĉW-P^x̀W=wkYjV^3-|jTg׻x0ҡ'_}j*U+5BUx ^tf>S%Amy ^6V/.Wp^F CdwVÔ}Wz+RxeWiYJӁ[WǯWTU')GNE&c}KxU Uǫ WZxfE[n{񪶝W[z9|9]s:ɇwxHիڐ*^)xU:0U-:^&Y^uhxn5kZ[c2mzw-WuY8^8Ӣ8.->^uW4iQ-s[j$njqiCӢ8ȁ3Jim}ji֓+ݔz r=V/񪶍"_ūZqWu)|U[U \H|U}' ,~UPTU^N|g>9ͼuM)mգfͲzUWʫ֓{iE=x8JSwql,C+_5/!^u4>2UQC WNSn7_>~QV/s ]87}e9prp{@Yī:gǫ=M/^վ#MG]jwjtKzOf5W~ NjtUI}XVZWX)p9z'8ivMUЫWSǁd,:!ĮUOŕeyP*%Y,͚TkW0( 9«vf9L3aM𪷩U]ƫZof||ۗ0P<1UuP=J<Sr9<ī˿Qf&|1#2I(WYtEqmGY_xbU6hhX䫞2RU?UE=䫖8NQ;_ $,9]^0^UAy«>LCNYs 4_fYTSWqٮat 34_ѱkV^HTjnOqk׫W-'{҃(Z|բ'7ۼ-C`/Z[Ojtwk=>5_t^UԬbGs_ZofM^;_5fQuz's˟@ī \C86 q>9׫.F-W|SKb0Ô_zr'_դi*;zUzղoi۞"mdW UիN!Cߢ0Eq׫>UUMadJu ^"5WxSǫ3|wW6^jg׫>Ϧ5/HV@5(!H@!U:լWzhtpEw-4ā#5͚7ƨaڠ={jm"Moxť|c0s铹dU:82xպ!aiSEWe5Bʫī>|U'!ũ˫WEqc$_{׫NZ^UQW;^իiQ\zUU%_UO |xؓ9]zzON*ywo꿧ojxUW5WhʑU R|1W@4yq 474*?LXRa~4G¦^Azsnq4d"ju~j,).[]Dvj6bUMr:,qox m4ل"4^usx?"x~/U"5mx /sU+lwfR? K0^5=WW,3a%FzUUexUzUe :/0_~fi2a3Ksfv3U샇(:^u';[T뜵>W5 jzUv6@Y{$,͙lM:ɭ2ƞM0KsmVrU=Vꐯ>;:*5_Z)JarxU/W}$TCS7KsO0^՛1*zHPC+n5WxU˫b'PyJPJxqYkWSΟ9W5|ն}v ^xUgzUj_k? #nMzUuVr!AjIW-MCc#^uT*hZɑU"r05}ONaa>|:70չW^j=|UC˫-zJP yYEE }f9%0^f"U/DǫUѫWjY5vI9^u0iMS!\yW=IP nmnBMj]{\cP5_rxU;_Μ`+SJjg/>5^*9}}WU>^?[xnz^xU~Eaaݬx|S?^վs۫:̏MSƫj֤qlīVijNxS^)W0v4cebl.,iN3#`w4kt@BjW%34Ks4"kqɤeinP:*Sn1ŭYV*:݅4*lB9h 2Ҝ ͥGȧ[VrKJbI׫Z?S"5MS1}s\ūzu֫W6.Z~_<9^PxU'&;_zUT8 6ի^8݇݁LU5 +p'J|FuRU> *rWPPޟ9^ƫīZZxīV[u퐯j P8x'U f«u^Tj}9?TJGtx0WI*,wUG7"|%_={o^ɜ?|U>ȧ[GzU/jrz)߇W5%4*^)yI).2#RMm{Wwà,(u-HZդEʁA 7:!*=mƂWxUMs鐛!MSuMOʘ;WUmWC^1tطd8~ׯWUIU鍣W&^n{;_RJw}pZ)W=^]ǫ@[Uz Wfo\M|q𪧌BsTu0ZaQzgdHsitCs!\麚yo/Wel9핯|2I#/."5W:T|)RUKf9^UtIXU/ j*_ʟp "QU׫V?7ەk+^`5w2xUW)'_hxPnq8LsAOūL5ktCzHfOwëꐯ&uJ嫴A^OjM';AWh<^uS'ǫ=pjݸU]WoJy.PMSxNwPUzDu«PWed{xUs'?Hū:>DW%lv^Q<ǫWZa֗&u.7cxUjGxUR|JGҥ^P< LxUG!v y(z4tNmpë3WW'^ѺF\_ij76jŞ(%9JSȧU02_i0UmW=H嫖f͕覵u? ^CEJb]͎+|Uc*_jNA'Cj 5UmG;^u4.;_׫“Os96p҃C SܚSszGNw)_M%nQ*+^5W$l$qUgQUA=9n ^~#ëWu9m^jWyҬ@UtٛDǫU@5X,gW_ W%_eYaI=FӾy(ZxպeW-WۍWcc:>^ڃ},^|x{k(Y"D Ŵ(nONQ\=>|U_~같v+:ĭDV/KLC"5W:ZOEɝU'(2|%Wuo]4lZ"t:yī"z*a97aN$^w*^)|B}'(NR4(.pz]ꦾ֝aj^`unz{Ixպ߈WcU"5Eq0RUojO^ҙ`H"jWu*hXB0̲R]逓)W=4zJ_MnƫnE)6!_%eQ\Y*jɄN^~U«NwR_2.rLT Eq 9gY|?^@3/xU{-.@6h>Q R2[[Ys *z|Wl{UƫޚӜ׫Vk9L(Tʦ^UYWZ7^zU5^ Vkm;W5sjbxU&We]fH'zՖ:Gmˈ{Sիz \-fz(5a;;`NYPJA%<\ ^WU9C}BJV_UzU9Ы"0zU^v[mCq^5Q n(UCJdar{ʪ"1 /k-;}/_o?͊_g//?𿷿ܝ鏫QFw/F#+n7!{o} &|G]8Hǽ`aq/G}E}.V c}Ër |pð?߾wGq?(u+Q !9Kdۇۇg+QȊy.VYE9낯zG=0ϻZFΜzs=p7>$TQ++QcPo~w) TcczV=yPvݺKǫ1z`%2 ~.2ͮ _wAz/;Dx{I f1?n(JW:,oA-ܛi ARkauu~]uv"x_%]yl%<lT| r0'?:Q5s=;jqns/vs9Oi-3ފ *56LPφI/!]iNӯkuv4]ykasutp>w !ܻ]C ~N$#7xF$-֔=m{0a#h !َ'C :5)kHRװpI<<Ob/¹{)&(1mCt'f;E ALGNَF!htZx2ʩJsOܽ'^w≻?O$+ -x'޻mMmf;Byچ/ a#})j~ʝ_wu_wu`w 3ނ'C/{) ^lG(/A!vO:e- ̮I\Ó½'{ O 'IR9h plG]  O;e-,w}X8pI^Ó½'sdT<QO GvxaBy؎/ A@(/SV{W\ݫ^2{[Fwzʓc\}|I4,,z%,;k" έh%$uCH+1HD %A^B:"u@09mSHru+> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj xref 0 11 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000256043 00000 n 0000256126 00000 n 0000256238 00000 n 0000256271 00000 n 0000000212 00000 n 0000000292 00000 n 0000258966 00000 n 0000259060 00000 n trailer << /Size 11 /Info 1 0 R /Root 2 0 R >> startxref 259159 %%EOF brms/vignettes/ppc_mm1.pdf0000644000176200001440000007500113042165067015263 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170125191102) /ModDate (D:20170125191102) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 27077 /Filter /FlateDecode >> stream xKeKvU+&tѵ,$yHVaW9d[T!h pVwُxso毿o~_k~_|O_~2?|_m~N_M_4}ot~÷mOW9ѱh}F':]=Zs95~`z~] ң>~a={/lKc6ݢ}Mt_8 Ǚp_oph='ڏo?-S݉ڷhQG yhkhS{K Z{{U-D?eO_Ô{o9lѺ%m[ڽ=گDңc\xv-ƕ;p/ s֩76գkGV$[\۽_#kֵ-:z{Yވ[Wcڽ_DqMDW!X۽=ϬE>\֫ڽ#[mmEۖs?Ѣζq-:GgТus<Ӣ֫Z4e{dKzUoGU-:DWh<콤q[-}lzj9EǑ6OW6^բn{jv%jբ_帖^ko{Uv֫ZthェE7Qk g?zkﭛ[4ZƮGQk}O:ѫ.J9ѫcJ<ѫc-ߖѫneoEZ޷DݴhcェEǜ=:zj=m^բjWUߍvvqw -oE+-zKiz'jEgs뺥ߖ[4zv[zU=[Uk֫z{/Q{-ݞGU+3BZj=콪1ڜ:^1cZzP~Tc3ou-luk齪'3QkE-}ۭWm^ꮽ? آ֫Zo:zh{UwѾRט[O-3_[_wU-EmIo14`Zji=+ܙcѸ^ٯ^{j=f=޽;k}d^H{mE}wcOoDZEWyֺ֞ǘբZ 9OSք@֪Z`XkS±jo9[ 0VksNz:WkJKQcn'+ښȒo{b'l,1R}g%齇޳W5_;a{?:h˕TZMzzG [m}ׅ2VSEڰ]nnZ뗐_Ǭտvq+ęF;Qχq_'cnV}-eb ܗ&'q_1. s[w>ډqah_׍mr痱v c >~9{<}=.^_6\?6'q_M!v ]B]zsf˹ \Xm\VS>dWmbm8ڃp8eY? &n}vn`:AѹX*jn]zT֦6_}11xsȦy?h}j_^ܧu Sٰ TB܆1 nxnP^{F6 w¹ =cl`ۋ6q A ֗6^;R[M>^魴֨ޟ0ul{<|gZEܦ&~ξ4?^|=6Js禾h?7ml @{Ysޟto?I'ϧ7iJs-ǂ7<#͚cGxy=m<b_I-Vo} ;L?m66n+1gZ~h66}FXKܙuڬ;cg8Y?vG"=+yF}(=kq7Ng6Խg⓸ϏUֱ771_.i߭-g#;|x~qMNlzGݧ13=wܾf;ˉH{џ[{$NަMkϹ+hAϽgWJwFYӟYHb-ۛ?_ "33>\lzx-JV's?dvN=o{Iv縘?wVk|َ ҞZpwߜOx܆w=};<== ܇r/yO猧ywg{47J!nc07=>H"kk1m=7ٖc ϮzE(}~^zǚ:V>?==vdd9ևc:֏{~o }zYkS]}zvK)ꛊqVv=ݧ ĥֽG51,=E؞L,=7a%"~3]k6 K̭ב7f/rB2Gq^.Yilz:%“B[{q IM&T7~Dedv=l=`,c;XucF`R=G}X\16$Zzzlׄce~#:I;e}^ؽ_H(͸lvv=G /e?H^GRi GVVI#cҳ1g`#mumi{8vJ|鲎궇w鸅8Gjf%;SDz%tdNUqL#9yrL$H,jYwj72.[L{KK=5.]X/W Im\JyFieako}4x F7F/y}z"p'l]reqͬgZG~1>N key'3=y)ivN=e[D}Sa D e;DDOt~!':^ Ot~ۆ:&?@d l_h?(+esp|5m1  <;RgS+@x&c  #?kdB{u|n_ <_YdEY ;쟿=`7+ːa}}<@!@ء$@x  _3טS@xsO٭_SO<@xj <@% @xR $ ^k f ޲oُّS@x g]@xlf ||X %98a;- |g N'-\Xd;%jwֳpKva%aaX @xS7,@x[5@xӬ >ZOꣵ LvF -/dz_7 r `20Mf鬙/57V2ͮ+;L^1aV2Lx?wFqÄ(„wrY2A&FC=LLa00cMjE&|x}0fޟ9LT Lr0TL-Qp_c g `mÄOpt0E _ &|q6S048L30$6˄7LxfWLx 0@ ^wtB+C&܁Y>&qY&^;q}{D =0.N{^=pÄ4‘@ᾬM?Tx,rGheܲަ}2s3 hx 3/87+8Id&Fo^|3퍰5|r>|s>.u#t]~vbn"=1S>|zsqҹy#\,u}Ntr$>|hÇ'>|‡;J8+& fZ۽am y ߑd)==g5$|-| ^@u֝>ӀkYy=)Ad ;vk‡7>>0‡7A7NbÇ7>>q`>^L>|6sOa$o>\4|a'|%>z-u҇"[a+Ç[C=^|x|!l]ul2P ×$v! =v<{>||aFߕg*QŇ/qqpehplcZ|xw>HF!^Fa@|8W9sl|xβR>|3ÇL@`G0s #U0Ň)N\p Q@ߏ!LpN+yy gpXZxK0^p)Θ`4C0| F0^pY&(N69÷WU)΂A0pC0<4kO4#_9^Na0׉`n`l;|I90& r_|xc-></҇KtŎWζ7|xfq>ܣ`'d2Ak,Ü*OiKs(>g |q,rd|iq WLUt < |,D u\ŇG{Pax|3&ÇdGFS0gÜ?_i!a&6!r}N$|xc>|xtApg oÇ7Njo8 o>s\.|xg+>uHvBÇwۆd[‡wuΉqu*o-j}'9‡]!j EÇV 'Xω\K[:'‡= >|a >^uB'ÇOv'>|ȕ-9 1b&|0|>|qZ>փ+38|-|ORasŇ H' Bxl"Q gebx#((9-XQ^1doU ŰtU:l_:PŰ4SŰ4C0(O\txI7axCYo3Q鰊R0V E֮t.Bwq)|_:L.}*;$ >TDBX9cS:F!͂)ctPAμ)M4pe8bx z)w(U|>H1:(3^>ytt>YHϛbbX:b<^*_Tr8t BC:|):tXt_ŰeT 3bbxE!bXş)A`,JOi5tB}_[ZbXEaR *pQ {*^(WŰT Wbx!;bxaQ1lR1#a?aJ1_ʤbp_!oUtut?HaO@xs}33>{H:|N?^:|=Z >Hɍ?JU /o*U .:L6&x'07:Jc tKQ /*-Ooӑ c\$dp! >آv C|77ra8rM:<#8˃08\ Y.N\5xWqʅOpxhW`8raJpPS``^yK.# |mʅ9t\xSDȅpWʅ/x`\88rC.|֧g7pn C. ,B.|g|G.i "F.,-@88x~&hAF΀47 ,n&M4bP,x*ra\i?: 7ra~ȅ@B _g^rOh?::8*8O`8#C,8D|~GW8Vp88xHOp:g KKuWIpWp"8xU= &ݎ\xF.0Ǫ|}ȝ48_ncr׭D ;'O}(lCn . \ ( EALꅻb@~a lok[<͂NqT4I-w>ӎ.QS UߙYso{zQBgY X wFC["wQK]Iׅ]YHH;U Fk6X-wc%Tp.o1C w f+mpi4+m4SmKnZG =-m0m0GiUlHrt6x~A >>nۿK [6oiwJ_RE/{C>JU: e:*K^'^kzZC{_p_0WߌG=?oi4[ťjG-6Zm Vk6XS?h6`<-C_O +;?ZA'm0gxK诧U -upTh] R 66X`UjWq6`hF\Rh~/|Oԋ6!em0 2K3N:hOnL:`" ?`'005|bhEMh7qoc%} g6X^wtxG N.Giwq Y ^>u@P|?l&q(hwqorK4ɥU"6X_ m0wJ% mzҡ >0{2p*M08 B%  C;:6`I0;:= zG^рd;z}iK, 0 g }`pBapH #  ^0j` c >`/0Dȁ:^)r" >ʁe qo`u!$ vv6{0,9(`%`Mr`;SsE#=0x} 0xUv;TZ@q `I9E 4a] ѷ6C6ܫ6hח66X1҆68WUJS # X>ϟ`2hDxGwtѣ=;z(~;pn;u;z=}G}_`άw;:_0|`K<ǓYmBm ?Vl\=QigQk6x`|6x'Xq m["/i ^>cFG7@`(h7 `2hɟ &{6HKFLUʦ0% ء0dD.z Ǔ` fI f B_`|p0n~Ph =я6x=`6 lw1#a`ɁO`*-aƵ,hapo`0(~_08Y<Ǔ/ Go\0h80}^8`r `*t7\ɞq* ńw$ j```0Ld`0;`05!wF$xGKKl-@`0C/p`0Vm0|Wm"]MV}*`Ti( ^ w.*0 >?oֈݥ a7*}QWa-enlwQKxɁRyS:Fު%}(9R*%m-RRKxGZ«Zj ;k-vc-a{C Ȇu {B6|ˊa! wpML-aSKxOaٰe\|᰾ o?a| jaÓaöp]|o>Fo6|.hwFs FF9 fPlxQ llmlҩAh_hmņv1l(lR/So%+7V lmvoFSη-e$p6}aˆeaQS (K(eço [GB6,K [D6X6, T:,et*.SK8%/5lj†q3zFkF7;EQ|Ć6.ذCeó|aìK[lxEI]xto&Fo:ɔ + ^T†oaa|h}Y3lX3`ذ$lML}R7ʥa>eòqt?V#o!k} zapÆo_а3d8mGa+aq&`xV;RHmޑC%ba]"0, /d= փ >9)E+`X]`X-/E tZ`X*`X7`x O0wJQa- Üa!'++0xW0BA0T»C ǂr~7 #+0\{@˧iY`+oE *V& ކHDH a<`x4R O`KiU'h\`8cKpR /Y§a >Sc8݀aa'`Ѐy10L@0k: `xe [02Ea`FEr0[Xt&`BGF 0C0{qp !qpY }óg W5߀UT)*n.m 6uA%|=$8]N|(Ѣ>Lg:TQ**G%X.^,T JxR4|< W0<6ɏit{/ U%< J8.U g#JI*lT b&!J#0TmѮ*lT o/0TYΆMpNOV%|}3h=T)h$Fs hLaf8o0(LJXSMG%\{MG%\}MF4:Uhj5AѢi4hF4ќ@%=*_;)T,Q S 0BT¬$P cJ.*aP SZ0P P YP STQ@9k*%w f ^*oJx͢0ffSEǓ sn0R7`0;$ØI EF_DMitE-ٚFGyiCH[P S"0 sP0, 00l⨄L _qATKa-?lQ),%4OZj!]f,|>b!DXP/(}-0*(- (lO>ibE_>AY`PHS+"<)""b"bD_;0%?@~""""~∈a!]O^ _ GS`DЯX`rŗei (_@q<_ _yUV>r_ 8`:\{D*PZ Ph (@X`A"WˇK(^ǀbD) (~IDl  PLAa ߂_ _?bMŚ<mςbx'/(!RU`*0lAy OZ6ok@A2@zooOϪ%6gIwrI-ŋ%ŋ%fAbk -A%(.:@|( (| (1ϋg3݀ZI=@Żx:>A7O]@xXZ_j!m-? 揃SGޥGO/^qV>|sX<|+ ?Z/WGG5` 4 voa*x#tIKJ~4 fAq?$çCC ~HO9[KYǍX@O@O˾B' ?2|hV2|ŀ wγ+!ǦcpaȰ2 .dx߆ +2Qdx !* U8dXY(dX&djʐvȰZ7+CCW88 Eq(xzqȰJm%f.>y)aq=dԤdXvȰcȰfĐaEa6ů?2mcG#UV2(X0a< Ix ̪F8ddxS#+HV2]d?l%' 3=dUpTE6 ë<0_0DZ2 d^;s? C92 HUCpoȰ'J ó%Ù ;@! EH'/#aOK4Gop7MF.7dXdV#?07XaIBpGGY !nǡ = ?y>QId8d8AӒ֮tx?lG"$iwJ× Àd5H3+ι3%è sp%rHZ$81Wd|Ҩ$7QÐC?z\'a3 V9Pc DžZ?z,?:WpưA6 Z?z~$RbAj%;QI+^!a֒%fu2́*Ȱ6dx+2 BɍBk c2fI}2g s2 $@˨r£?^!"׋ G2d8ސa8a 1$:!K2=dxdxa)aj%cOIdY$ã Bq$"eQ w YA* <U2 G2̻E2|gs㪐aO#[2b2Zj8d=d d%dSaJ""26dxCT2˗dxBu 2\*߮7 2i$ìCw8GNV)TF2%ц w Ӳ F2L0': S$|a uGr$̩H*.Q1d=>dڥm.2< [\7d^<\O·P:U 2 O:Q:g؁%S|";QPXh$6 @ᶨ;ᙃB:@ 𡺘:ZWغMf]aՙ@aIR ^/unKA_;PG S.a+( ^+Yhj$YU=|PBm=}Zx3ugPxMi wR [gT(ֺ»ja @dI~ PN2PXP4PZ—@ Pb#bR _M _,3t@(\CPԖ@oM>xW?P(|TWiK=:Øy_ufb=d8KWa s}uYb_=c>_=p}(|o 7!npj@aAez:WUS&yQ[P@uU딬zX5Y5-aBaą·N@qҿ~0 ; u: sA<ld0-4>(| [B(_(Pԡ0BC'u>պ@aw»€{9W'ooW=!NPx=PxSެ ?ÛjpN (X\W{@u}u| @ᵜZ=T^nչ yZ(P^ o:ASW /s+,x uRZWXuXWwpH鏂3n*g))'t"cE=>һlV00VBU%S<(C -/D \qLwD\L8%- "_q1Dx8Ëa3)[gEK<*iVvxo|uOo!9" "،xӒYDrq)ET""'oxXhVqJ%".&,tJ,">ag"C+"NSؚхAĺ-]VKD|ÞO"{!0 /C D0_1"- & jK gÓ8h jq&BFq oxUJUbxz3 0,">dAODvUb,"F\8[&L P"3FD-"VK "+DRbSd$P0m2nGN%#<wI |0+^K<|3&xv݇xU,s pN*?L#1I c$_у˫qw/"'[Tx~Uc xx9aFBe,!^,BhS(pdƊ("f}ZQG<<#A'5 s"q:Gt1X*}uW飠pw>K /i8\\W-*MrDW4ҧ_WiD"bSAķ{"*ˢ*}`U1җnKTQ{Z,8<OZ3j-MVtZZ9RpxR d'pXnAr]kSn4ZK 7u=aN}V&6 w) h&ZK~0^Y:p>_pEog8\0pV/V=L8[8>V [' {*;g |pCkZfc-}Pi-D)'Q T1^y>*'߷Lzz`1 חt)K\ҔEF1LR g ^]Ű%U Ss F1LζnXùSa6:UP = V-ql/ZOmUkc88>Z blpXpXx&>aQa30pǫp;V[1|V[Y8\Р+m@[1e &SvXU:p9pv߸0۲[+ypc}{Xџ/CU|pL;ppIJ:XvXSv8CŰ)Z:_kiZK;}>Uޅ3Vew6m/v~o ᭬7 U` /a uLWb yj:8N*n L0sa wÛ]bQ*఩N*e5 N[8[8CVp)c߁֐{aa6Mvw]-wa9`؂πF;|Ktﰱ k\YG,-XhE;|hw9a</OcYa_Z8O> ajqT;V0hhY0AJpF!Ř9W%f._0,S_ o_/0 'Uz6U:aCdp0'qpN7Zpifh}82`x>pз%LGy˃9`ڵaea]êÖv Y I0Y^óF#aWMWŤd%̀aUȖ>aTrr-7H0LVD0 mn]aĂa%bÞ\ {0\9`B a6GMUra4` P`X #`x}KiËvn c#s0, /^Ɖ%Xx>:aԀa,7lm { rZjPnba)7yR )7Vᷫ4~K;0O Qb/cB+`i`a<( S0?UP7`$`r}hWҜ  /*=z?`yJ_/0^*} S\Ǧ\Ǻ0" 0|*{QJ0|Ws({T ΋(? (8)7s0;3l W-V-0bjU;<^`x{8K0  )D;LtKi=f]`cX#y,Ylzsi2zwĻkc!3 0=V.7kvΚ%`Da>ȟ dN zZ0k6`.冕t s >p Ubg%`b[r +a'}CiuXy,.a=7 U0'",7[w,c}[\/D?WE gj 0ݥj ,ְ@ K O R-\@HU XT kZ0jSi֦V-|k*5R w@.C ܹR !~;w|3Lj `|U cj [Zn5l-[k ;YkX iaۏ\Pj XZ%k kAlawp޴pQ#9J{Bp3Y@! s0= _YFPp]bwRjᾴ |X U ªkU~,]ª‹ fXҧ iқB7j BZw1@Xpj%'T ?ak uy0 Ta8¥N§f\@Sm6r2>zV4#V!' ,+dVhaCaa+ O[8e50H4@Z~ S:P*0ְRPuf>*PQ ƪ7!j|ja )G-{_K-llK p@}J-|ʈQ 'kZ8Yja<^_CaK֯0B=jp1 Q + vO0@WjvGRn  R r V ֏(9ja}Q ے1^-(+vY.ȁvr0OBI(|jw80«ЁU%̦7(^jM ߊeV J|»8P~uT~סp>(lmyc,((ΰRPC-|ks-PXS“a»NpXKZx"Z8= ZxG-NCpR =ҹ25k(PR!UjypJ j-5 04R/RMRRù*50.Fha~(v]%Na=)pJ #<0RxD[j lL"Ujx0RiUj8ݹJ 0-5LA` O0ZbӧZ3jӧZxR.T~ )5J Ua1X _j'P|=  &Cӝ>0o}S (<^`  /Bi@B ( /Px+@aŒ@@P:sҙ4<7^4>Pze(=2VCX:k(=4a pK ("BC G S2`cB|'(f 4 ϸ Ϙ kn(<`   @00%@a@a& 0 40 Px& )ZFM &($ (   c V(A^0ZWQj P;P@aW[)1ja$MA-eušTP?`= C0 /@`30;PRe(t 5b(= V(Pz@#O)J\jR l:PW (L+ # b9`f s(L0čR=5Pj|4jaLP kDFQ s]Ņ1Wq~j,q P2A-B7jac(>xyeMUjU pœC-0UvP Z-jas&PX)Pji cZtQ 7ja-Mp/Ba (PDG-\:jYpg .'UtNlJ) JEPR 2J]0 4 [%p)WA*˦p Z/̀ag4_)kÞ.r>N0g_6[)N.Q /X«Fe1`X[np]ϩMeާ`EO)'m¥\ 0i V-`,`Xk gAsa0LmTMaݣ鱕`jU 9U`#Cp)Ö  Wm_}*wmQ k >=@>Q -;>:RCR XF),X Ϟ /*FzÂmkb#FO2 r,o cS`EfaQ0,( (0|K)h;ó`0`pa*0 lP^`/0 -0|$Â00*)×v_aO O6`d [V0| c5X`x 8b\XhsZý=m8jlg< NU _k&9+>JI0|X60o0ܓ'e- =ީ+`X_a".:7 c[ ajaR~lQ cRߴ`:Ia╍AXMN³b“6.`x,`xo))PJamU [ URaT#UQaXJtTP%lAKTRagpYRaXeTTR%F%Q2UzMUG>YU φ]%,A%,*0“cU@R '5[*a0*k_@@[*CpT>z7AJ8mT7*a0`r#7]CF%|Q "ߨOvTš<U՘ҧaU z t [Lh!ѻ$O}5.S먄W#>Fl{ 5;9@X"@xzp“|GI= @#X75@xQJ x!@x>`]6Ѻc@<@CBVFla*N@blᴜ06@Oժ0,[JxY @#/"_ D40 >z{ZhdUa8 pZ+ VBGD}nNHa 0/GUz죯70^>5<* T3՗}K%+GC>:?Tate->"p.C}a* + τG|WB* ˷/÷~ d/_[*>gRt),G@&@E9 R2ǽFUMd @f@>?T9-pP3 OU@{ g  sx |ȉF@f  h.@>_bWq*g -xJU1@9L c+:x *^?T1.U@^Qk mE(?*TK*I*6X:la+[HH8=ΨH<ƖRh*=T;x%8ΪtU/*&ZTVU_PU_8(S1Hnzd nzl4QZvӁȌDe7=Vee7= oHicEUUHi&Ue+q2ڪHO4-/U1KULHƘ/LL"EVE菭H?y%VGT$KHDыH1O|2_Y֔4K2>,5fӘٴ[UiJl1!"S4'lz -ʽh,іn4H\AnDȘu7BοgY*Bc6} dg^fyM~4ØM-n/yL٬yќiMJO%[j6ٴG*4>܋)lk6v]!_yL_M{$B42@MolT"ٴDY3j̦Dd_*D̦*Uϟ*%GhD 6bjEj-rAʣ?OO8b6-6N->$Rj̦lZszͦц!jA3P[\1xriEȮEGDl !k/BV d̬Aȗ|~V"ޫ1S/Hhx!r!;7Ae B[[F(U[lIŇHmu7A+y~2 bEjO/m!Rb=+EV3T[_b-VpT"-_141.X2ºPdPdtPU'j)2E#+E΃"6Cl"^ӚBM^4c(.Y> EyC0Pd*"e[=HL(I-YP E@WPd6)EOK,KOEVME>e̡ȇ7y煖44WiC?"+4XpkZ["k EVIqȻV̡LRUk:dŋґs:GYO6GM2~(ReZi٦P(5BNZ"E E.us(E"+Y"KZBه"ϟbΑk^9ȎPdբȧ*PddkȗJa)2UQ(2y"yȋ89CqQdR4/#+1GV<"•x3"ě:b)2"Ĺ*B S\%CQC=EvlbZ*WxMsW)6kz"k%+^&5(r`.~yMbeŏ4]&ʅ"K t(ra7!zMiy(ۿǿοÿ_~8Pyo<}O-c^8WJ^.W~n?g~M^s)8S0뻞\ ԁ-nGWW˺޿֩x__%>GG/?|y_o7GGNn}>AZK_К?jM|b׻C2;?~,B|> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << /GS1 12 0 R /GS2 13 0 R /GS257 14 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F9 /BaseFont /Times-Italic /Encoding 9 0 R >> endobj 12 0 obj << /Type /ExtGState /CA 0.702 >> endobj 13 0 obj << /Type /ExtGState /CA 1.000 >> endobj 14 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 15 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000027442 00000 n 0000027525 00000 n 0000027686 00000 n 0000027719 00000 n 0000000212 00000 n 0000000292 00000 n 0000030414 00000 n 0000030508 00000 n 0000030607 00000 n 0000030707 00000 n 0000030756 00000 n 0000030805 00000 n trailer << /Size 15 /Info 1 0 R /Root 2 0 R >> startxref 30854 %%EOF brms/vignettes/me_zinb1.pdf0000644000176200001440000002005513051356434015432 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170216181050) /ModDate (D:20170216181050) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 4096 /Filter /FlateDecode >> stream x[͏G9Iw5D9!:!{꽚]d&vr뮮W}ퟷ?>gK{JXI{[{[nmo}ˣƃ|Bɻ?}-ݿ}~ͭսV澺I+mjRާĤK5iv.٤d@% }0v*5 {[% =ٓ sހ0L^²]hw'"d9]5tGPi5^TZ.'C@P}ړ s@} \BߧK@@YQ+ ګ7Ag]rt~>Cv1/Gٺ#4Ml.**a,dAgmc9bB9cs \t[)Ffk嗧mlفt/Eͽa@ fFHэ5gR}]q UiFS@,')dJiiqdCZ?_+'ջ8iw,G3U [q8E˲[v.x߫%yP CevO5!Om5253MieV,~^]z,e{cXFpdžO5-"P3 x7a"Ƭ@՛Ɓf/eըDY/:WOuo zDg\֫fLqݎR(Ok/e]+7 ~hi!r' ,hxQ=~ڻ<tu~XШ,8:|q+#JX8cT"3?~"Y#{FSvr/RlLJ*|H^maH*%{i(M)K Mc#5b4x"O7&7yhAd0rw6-A݉Ɣ 2W  "밹5,J)"Ğ?(`Bj'H;pFn 47H!9a՚|*Tm+c񟽸Y 6_H m/>Mls_}yK7->n6>H[A3;+H p$UCK@(H5 $5ZK@H-,wt;d؏6|*䇽ҚXpV8WЉ?|i b_9[%O#N(:}IU/ H$>Z3`24!F jQÚaRaad5&|FLCI[$_AWf_<"#x[i1/}/&[QMxL sTiHU/$_uDBS## $'PU;8" W 0RR%L~E\{#+WrzHHu@QL@C$֕ $KF@I6ɖ=hdLubdM)\W ̾^vG2Yɦiy ~dUՐ6c$z%,egW2mϳH(H֭p%V/Q:~OI5o#^GɥtGI2y}M UokB};U-/4x/Gkߗwۯү~1E槾~]y--lۡʭU1dQ 1 ;/x0g1WCL$CҍQJ(u xÅq-#:F;'ř-_S'uJ[A-wCyCzhl%]֊zib73_}ݛw'A\5].,;coSK8JcQݛωG8WV>\A&-z! Rw%y.\!7$p&qA $Hu$xyII^& TYOIS8(iXIsE*V 9s&t+XJKsɳi4ٚ@0@V!)m4Ɛ@} >5پCƌ d BnY ߞ,֋)2=&3L@V8,ZȹL z F5T\GC^5r UGaYbx-[cl-rJ9Pu@g%%pIb$Qdw&{A0ՒD"]22xrɍMcQd;ZD8/@*z[$ŁTvNR]R1At - 1ыR49)@+¿ F(>+ ,@ T547J!DDApкh9.bCeTUE5͕J{. *rEdЪ<* @oU*(KJPDv&p{%ږ A*@Rd}g4SK'cVdr 9@db< 02h\7%k[7bCy4-0Aă03A#ι# X^Ϊ||qB.m%Ypѝy^q"rX{f'Le!4e>eew=s>C .a{AբJ>C.G@Vq3\.&-]Bk:L5XĊ[%Ű9 (hL^fLm~eS3Ǵ>bҴ> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font << /F1 10 0 R /F7 11 0 R >> /ExtGState << /GS1 12 0 R /GS257 13 0 R /GS258 14 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /ZapfDingbats >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 12 0 obj << /Type /ExtGState /CA 1.000 >> endobj 13 0 obj << /Type /ExtGState /ca 0.400 >> endobj 14 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 15 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000004460 00000 n 0000004543 00000 n 0000004707 00000 n 0000004740 00000 n 0000000212 00000 n 0000000292 00000 n 0000007435 00000 n 0000007529 00000 n 0000007613 00000 n 0000007712 00000 n 0000007761 00000 n 0000007810 00000 n trailer << /Size 15 /Info 1 0 R /Root 2 0 R >> startxref 7859 %%EOF brms/vignettes/brms_monotonic.Rmd0000644000176200001440000002040014224753353016725 0ustar liggesusers--- title: "Estimating Monotonic Effects with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Monotonic Effects with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette is about monotonic effects, a special way of handling discrete predictors that are on an ordinal or higher scale (Bürkner & Charpentier, in review). A predictor, which we want to model as monotonic (i.e., having a monotonically increasing or decreasing relationship with the response), must either be integer valued or an ordered factor. As opposed to a continuous predictor, predictor categories (or integers) are not assumed to be equidistant with respect to their effect on the response variable. Instead, the distance between adjacent predictor categories (or integers) is estimated from the data and may vary across categories. This is realized by parameterizing as follows: One parameter, $b$, takes care of the direction and size of the effect similar to an ordinary regression parameter. If the monotonic effect is used in a linear model, $b$ can be interpreted as the expected average difference between two adjacent categories of the ordinal predictor. An additional parameter vector, $\zeta$, estimates the normalized distances between consecutive predictor categories which thus defines the shape of the monotonic effect. For a single monotonic predictor, $x$, the linear predictor term of observation $n$ looks as follows: $$\eta_n = b D \sum_{i = 1}^{x_n} \zeta_i$$ The parameter $b$ can take on any real value, while $\zeta$ is a simplex, which means that it satisfies $\zeta_i \in [0,1]$ and $\sum_{i = 1}^D \zeta_i = 1$ with $D$ being the number of elements of $\zeta$. Equivalently, $D$ is the number of categories (or highest integer in the data) minus 1, since we start counting categories from zero to simplify the notation. ## A Simple Monotonic Model A main application of monotonic effects are ordinal predictors that can be modeled this way without falsely treating them either as continuous or as unordered categorical predictors. In Psychology, for instance, this kind of data is omnipresent in the form of Likert scale items, which are often treated as being continuous for convenience without ever testing this assumption. As an example, suppose we are interested in the relationship of yearly income (in $) and life satisfaction measured on an arbitrary scale from 0 to 100. Usually, people are not asked for the exact income. Instead, they are asked to rank themselves in one of certain classes, say: 'below 20k', 'between 20k and 40k', 'between 40k and 100k' and 'above 100k'. We use some simulated data for illustration purposes. ```{r} income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") income <- factor(sample(income_options, 100, TRUE), levels = income_options, ordered = TRUE) mean_ls <- c(30, 60, 70, 75) ls <- mean_ls[income] + rnorm(100, sd = 7) dat <- data.frame(income, ls) ``` We now proceed with analyzing the data modeling `income` as a monotonic effect. ```{r, results='hide'} fit1 <- brm(ls ~ mo(income), data = dat) ``` The summary methods yield ```{r} summary(fit1) plot(fit1, variable = "simo", regex = TRUE) plot(conditional_effects(fit1)) ``` The distributions of the simplex parameter of `income`, as shown in the `plot` method, demonstrate that the largest difference (about 70% of the difference between minimum and maximum category) is between the first two categories. Now, let's compare of monotonic model with two common alternative models. (a) Assume `income` to be continuous: ```{r, results='hide'} dat$income_num <- as.numeric(dat$income) fit2 <- brm(ls ~ income_num, data = dat) ``` ```{r} summary(fit2) ``` or (b) Assume `income` to be an unordered factor: ```{r, results='hide'} contrasts(dat$income) <- contr.treatment(4) fit3 <- brm(ls ~ income, data = dat) ``` ```{r} summary(fit3) ``` We can easily compare the fit of the three models using leave-one-out cross-validation. ```{r} loo(fit1, fit2, fit3) ``` The monotonic model fits better than the continuous model, which is not surprising given that the relationship between `income` and `ls` is non-linear. The monotonic and the unordered factor model have almost identical fit in this example, but this may not be the case for other data sets. ## Setting Prior Distributions In the previous monotonic model, we have implicitly assumed that all differences between adjacent categories were a-priori the same, or formulated correctly, had the same prior distribution. In the following, we want to show how to change this assumption. The canonical prior distribution of a simplex parameter is the Dirichlet distribution, a multivariate generalization of the beta distribution. It is non-zero for all valid simplexes (i.e., $\zeta_i \in [0,1]$ and $\sum_{i = 1}^D \zeta_i = 1$) and zero otherwise. The Dirichlet prior has a single parameter $\alpha$ of the same length as $\zeta$. The higher $\alpha_i$ the higher the a-priori probability of higher values of $\zeta_i$. Suppose that, before looking at the data, we expected that the same amount of additional money matters more for people who generally have less money. This translates into a higher a-priori values of $\zeta_1$ (difference between 'below_20' and '20_to_40') and hence into higher values of $\alpha_1$. We choose $\alpha_1 = 2$ and $\alpha_2 = \alpha_3 = 1$, the latter being the default value of $\alpha$. To fit the model we write: ```{r, results='hide'} prior4 <- prior(dirichlet(c(2, 1, 1)), class = "simo", coef = "moincome1") fit4 <- brm(ls ~ mo(income), data = dat, prior = prior4, sample_prior = TRUE) ``` The `1` at the end of `"moincome1"` may appear strange when first working with monotonic effects. However, it is necessary as one monotonic term may be associated with multiple simplex parameters, if interactions of multiple monotonic variables are included in the model. ```{r} summary(fit4) ``` We have used `sample_prior = TRUE` to also obtain draws from the prior distribution of `simo_moincome1` so that we can visualized it. ```{r} plot(fit4, variable = "prior_simo", regex = TRUE, N = 3) ``` As is visible in the plots, `simo_moincome1[1]` was a-priori on average twice as high as `simo_moincome1[2]` and `simo_moincome1[3]` as a result of setting $\alpha_1$ to 2. ## Modeling interactions of monotonic variables Suppose, we have additionally asked participants for their age. ```{r} dat$age <- rnorm(100, mean = 40, sd = 10) ``` We are not only interested in the main effect of age but also in the interaction of income and age. Interactions with monotonic variables can be specified in the usual way using the `*` operator: ```{r, results='hide'} fit5 <- brm(ls ~ mo(income)*age, data = dat) ``` ```{r} summary(fit5) conditional_effects(fit5, "income:age") ``` ## Modelling Monotonic Group-Level Effects Suppose that the 100 people in our sample data were drawn from 10 different cities; 10 people per city. Thus, we add an identifier for `city` to the data and add some city-related variation to `ls`. ```{r} dat$city <- rep(1:10, each = 10) var_city <- rnorm(10, sd = 10) dat$ls <- dat$ls + var_city[dat$city] ``` With the following code, we fit a multilevel model assuming the intercept and the effect of `income` to vary by city: ```{r, results='hide'} fit6 <- brm(ls ~ mo(income)*age + (mo(income) | city), data = dat) ``` ```{r} summary(fit6) ``` reveals that the effect of `income` varies only little across cities. For the present data, this is not overly surprising given that, in the data simulations, we assumed `income` to have the same effect across cities. ## References Bürkner P. C. & Charpentier, E. (in review). [Monotonic Effects: A Principled Approach for Including Ordinal Predictors in Regression Models](https://psyarxiv.com/9qkhj/). *PsyArXiv preprint*. brms/vignettes/flowchart.pdf0000644000176200001440000013154413202254050015712 0ustar liggesusers%PDF-1.5 % 6 0 obj << /Length 1630 /Filter /FlateDecode >> stream xKs68ʓqMLfzp[#1R=N~ "!yޚR^ {DyO(+nS6w67\̭'rr J'u.iT!*Jt@f06TpD9ZK[iDlP(K9 iyCId7+Tr#.b|xe`2,֯GXXfaI[WA71j9=qPư[;, &`rEwM9_?}Ƕ~T~r84̬4)O9;~=;g}P Sei̗٬~plAY)1uMmHK40^@ Hrb\ aϊ@r "KD^RcTy D5,wH5$EDAl A!0d հ1ub6$߈eP eanʎVCwX2J9>}&h j(x-en?zc{ALvV>{} s\Q/PCR,4N0UhM`gBfq03( MTSM9dmzn2/\b3lFf3 h>86F96Ćp3ǦoDw7Vp37ZP}g%-=NC1% xj0 _7߯r:nwOv&*–D{:tlL(Yrggǖ \I/r|+Jp=>˕hKsRؙ:KK$=o#DSS sb(L Z1?фaF)M8;yv: rm_dRR!rScF8l"(kFQע![z5$X'Ì9ù=vXpA\6GWM(L^{D@NX96nmP1 #zgwr >^TS]`N %[sJ6ByTpҖ9 vx9D1 ߓV(%\*ʵj~a!rSUѱW*{aR0q0h&w*hPrj@7!SKjZ|avsFbMy\{> endobj 4 0 obj << /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R /Font << /F8 7 0 R /F15 8 0 R /F16 9 0 R /F17 10 0 R >> /ProcSet [ /PDF /Text ] >> endobj 1 0 obj <<>> endobj 2 0 obj <<>> endobj 3 0 obj << /pgfprgb [/Pattern /DeviceRGB] >> endobj 12 0 obj [777.8 277.8 333.3 277.8 500 500 500 500 500 500 500 500 500 500 500 277.8 277.8 319.4 777.8 472.2 472.2 666.7 666.7 666.7 638.9 722.2 597.2 569.4 666.7 708.3 277.8 472.2 694.4 541.7 875 708.3 736.1 638.9 736.1 645.8] endobj 13 0 obj [555.6 694.4 769.4 755.6 1033.3 755.6 755.6 611.1 280 544.4 280 500 277.8 277.8 486.1 555.6 444.4 555.6 466.7 305.6 500 555.6 277.8 305.6 527.8 277.8 833.3 555.6 500 555.6 527.8 427.8 394.4 390.3] endobj 14 0 obj [525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525] endobj 15 0 obj [555.6 555.6 833.3 833.3 277.8 305.6 500 500 500 500 500 750 444.4 500 722.2 777.8 500 902.8 1013.9 777.8 277.8 277.8 500 833.3 500 833.3 777.8 277.8 388.9 388.9 500 777.8 277.8 333.3 277.8 500 500 500 500 500 500 500 500 500 500 500 277.8 277.8 277.8 777.8 472.2 472.2 777.8 750 708.3 722.2 763.9 680.6 652.8 784.7 750 361.1 513.9 777.8 625 916.7 750 777.8 680.6 777.8 736.1 555.6 722.2 750 750 1027.8 750 750 611.1 277.8 500 277.8 500 277.8 277.8 500 555.6 444.4 555.6 444.4 305.6 500 555.6 277.8 305.6 527.8 277.8 833.3 555.6 500 555.6 527.8 391.7 394.4 388.9 555.6 527.8 722.2] endobj 16 0 obj << /Length1 1486 /Length2 7258 /Length3 0 /Length 8243 /Filter /FlateDecode >> stream xڍ46\{JĈ{"1bӢjjE(JkԮUUԫ'$usɪgȯh~P@a PFr<<( <`LF!^HK$@(!C ;($̓S wrFߤ$$~\ap( Іa7~ -FI @\=PN>p3 9~@\a0 9=>FCaH/p`u!0Àgm  BQnp#`]U-/A:2 V.ZS] MI,:~E:;%N9#_/S!!AlK /ܩɸ`y^~]D|vgqZEJ5 fzI`u|o`^:7NcX<̝//a1#$l$3öMKOcqA}bW` p/ֆE\vاj jqex,dZdfK@ }wDL8Yҙ:+.|%M^x-@ .!ENS4Fm AZʈc rT?F#M8?t [#ve"r'?oXH流jw=QpN]#),zg6聫$lg *?8F&߸G_jZV1F1lj%^4y|=BC@g^d_mP1fg˴Qo]vTu{Y|dI&6^8J#AOۓAIHzI-|7;v~8S ?>KiLɌ8ΥDM'\FmY$^E`2I}ۇ(=7[.e]AW$4>rZe EVKpPۨ?^'60a8]Hkhg쓃J TW].՞څu@m>Ạ̊́ FǬsDZCd#h${zw4a@ЪTn_J }E1"lX$_e.]˯h,}z826oIkZ[pتsw}72$O$*E~v!˟$o7&gBK5#_Wハ[߽*36 ;K9I \$keC볺!a֚i\;@Їw[LO毜ʲ_p'-3*=s32#vg:X-w |_ f]q(,̈K%:~taPo]EJ ?) vB=x\g!Yyt/nwKgN?#dM {wʋ3 sltw|S9a+V>1D v:G~4!O5Rz|Lrdj"alüVB]}KQ[~qM Qekc YIa{nO` \Z>ZʝNjBh&ΞgURdI]tksRrF }6ةܱ{oh<*A\x~I~63#aMY? NuA }:Be Sy kY,$߱B:_9[s]rxܙtK!曮enng±%Պ:h\]0|0a:PanvgL!E0"k#T"ʂA˪wO y "d}XM@rbZ?[7Hõ[ͮDݷڊ۱N|-NFQjPTv`XO=L9/[?ZY"\[9;BbNOלm=Ŏ?,O@zGI a2EAgOvRvjn&_%&J!5Nt(~,'Qy K(AyK40Niz('N{$͘lpa%ib. –l*-!x3ITOQhju;\: R!|}=z;q[|Fa=usT\E 'ɸ*퉙% ãƆpz]L㴥ƐjF/GUˉfCQN8W,"f䳞QJ\<3Kb]v<~yy$O{W8ឬO d=3C9$8O҃هtgN30/X#_6g!δ=W,PB={XRg(%4]pAC\P^DúOϖʛL6{ӂ\ai*Dӳ-gaRv]vA G|Ns^4z$7w n =/ 36:Tǽc=ĶzTkxYM0#>bX ms`,FTi~qp{Rϝ;cs\#,g@Ow(zP.VGpZ/X˰?ww \QLx VS*ϣY wI& 1Ro:ZIySܟz7v*0ףϚ~gu瑍Y!NѤd2!+3{lSzk"IX̞݉s>ed%C$Ƙe7GFvBchĄĴIP^/ [nz8VcIB06ͣH^ T~SҥxF}([;6G;OnѼhJj^!D#1W{v Ie0s4i+pv@eUnYםWDKF<jX%Ġ ]WOS蓖4ni,>E(ߑ{EOo(p)`Lۑ:nr0W6w"*0ZG)EEFfc1O)T1e9!s{]u{:%uݬ*NC^U,>A(- V!kYաİR^oVSUe(*UlZ|R|;䝬T/o!Qm\[W3GIUDpmC /'W2\Iu5/r oIS㘔'_WӠH䓩87%=q^$adlAes;1hj*`$/-yA\Axĕ+SFR(*UC@Ʌ܀δr5G5?\Iv61[J;o;@L%6w#z7'Dh ;x"Lmj.4,ѴS4֥~I P\Ph.fTRI$>6Pu-aAeeέ'f<3At ]yPxTP~[=iFxK6g#|x?%W-~x!)?2'B;Y&T8c([ӓЉ!62.psA![G"#8> `_ ӪwRk&pTO;bٖ]4++tGI{jG߳MB4Yډ <a!bAOs?+J>BFy^ .-bM^6/.ɣf p@\l9,-Z}rdgVO؂ɉ>5~jԅ˒I'^Wy>.9yaEoZ]?BZx /+{*V7xY,lOI9ӅDŽ 1:4OLpɖͺVrd#R&ՏYaX(<* -;wN}Db2+dP?diVvjϕ k#GD٩s佸D2Pw$@FLuG.-^^j64T9JGngѪbi:OjpZ(zv~lnU9u:ρjdiNPDPT#X#C/)|/J(2rYx/?2puG]oY6Wƒ;I%LD8Xsc{]ߵ5X~we*r|l0i/AH-U^g =0WǘZ2ooy|^)L) Cw4~h܇an)780YH*# yYY{In䄳B#k"gE:_ƻZn284?j+ >GdkE0#&70Xm3$w X %+B~*=ff7W1w }D@iM}-eYAu虊KƗczKˡvKVaaĄpW@s RO` ?Ɇd&tB`..ꓕC$ aCzz E1LC!(5-@NJ ؒS 穿̈́ψ}'vEPGsv择$! _ym;]GKJ].Ҏ{5Of2Vԩ|gj׊$-1F%S÷)T+YztGBf̒#ӚqG S'$a"f %֮S%/BH]dN {!dhgd/ݨV % "glS 1Ij0y)y"@D-ąxz~) 898<jJ[mfEQsȼUOA &)ZVEˑ*#& RR,[9}#'22PqwA^8Ķ߫N[?DAlaQNߞ߮60o{(2WEꪕ 3 N~v(ࣦg _?:9d endstream endobj 17 0 obj << /Type /FontDescriptor /FontName /NBPXCR+CMB10 /Flags 4 /FontBBox [-62 -250 1011 750] /Ascent 694 /CapHeight 686 /Descent -194 /ItalicAngle 0 /StemV 108 /XHeight 444 /CharSet (/S/a/b/m/n/r/s/t) /FontFile 16 0 R >> endobj 18 0 obj << /Length1 1777 /Length2 13171 /Length3 0 /Length 14290 /Filter /FlateDecode >> stream xڍP `ݝ@$;]8r9jf^mwPj0Y:AnLl̬ %u6V++3++;=o16/ ]&iSrl6n~6~VV;++.I3[K3@tEptvq{#ւ9@bka(#Z4-lnV͉ӓZifP]{l?>=#eQWRPaL&v.VC~_7TQdO+Cʎ 3\_lOs IS6s>nkKJ@K[ws3{_1U hjfa mA@UGW?{},>?(S|ߧ )pcعf..fG\_ z9fۻ < (!?_4XA|"wX Xa i9,mlޟsX3|OO,r,9 /_=ۿ /kKavua5bL{B3T{:tLK.(Itՙ!.bI#}h;R7ˤ/' m jO~&_ԧ'q,<$'b{q -O΋y9 U?X21_ͭ\60,Uy91=ƥ F7R/ E|*4]{)!o0Ʀ}q|KV/z  321gT۪ǀk=Ɩvsa41wjKL\UMb0j9>:n{Rz6'A-O k#ߙ"RGGwzu Y{KDg@C=x}C~?mBs_xܵ9`aʭfޝryYo!eA!1o=FM8ZrY,YMukeLBHa_vV{Se@Rt4kz%'мϧ_!1Ck_is ) 5NPWu-Oeڌ1(q NV,SY@a)Mb|BݶO#i\Pa:֍`xa8冤H13wRδLh0=׶j>1la A ;NQ 'p8C89Ẹ_ꘪRydaL4蛯b(Ku`ne`<0ܵړ='ss 1,( ֞\q4(߂'&RLHXKۏ.3Gl.s6*kj2DFMEyOpMr yT9戯~˂-" p(q%|xB4_wi^:m}wU0(lsж=J:sxTK'y  ,V7YTRy$bt,e8\ebwYU?zi)uA0_bRx}h\B*9*!`Ϥo}DU:|*ϭ53s&Ua;cEMBc0)3fPt <}C9ypjQ[Z&59XHvb1̝"|+8p:ƣfP1TU7m;xyIBO#­s 6Eɥ75̟RtiZ6]#k9,Ka`ƩƆ[3(v.j6Vj,sK/leA!J9EWEk-=7x<|J㕠މxg11Qi|{ 0ɽP|`+>v:-Ҵ;y6AЀl\87AW{eYOfEvsD [i2φHrn q<$`\ɰ Co[xKj|0(d5Yn(D ɾstםڝ|gZX1ga z# r55}~CMaKlM2TO^7'^,/i%.|)A-zlnew%`#gcjsg zg샥ia?)_G lnYV=D{ޠ^3YS'(,!e}o92@0!]9=HYI>qʷw NcSI|견Xt::s3L* ciLNQjO9ˮsYT`\8st\^hԻZ-|HZR4}٪&ίCceIihafrO\5zKQ 晞3пv(nQ/R7„4TО9TO4ujrKnToə=%t<]Ϲ6,Z#=&_4.i, Pb"m(BAtFeb_ƍK}3S#B%[&u>U9E;U$&gy>6GK' m8.71eh&EdPrL:I5ƀlq$5g% U/-‰jN_"gz"Xb`5i[owݠkDChYeF&ڰWԜ!>%~ФiFw&-+J% R, 8OLIh0G 49哕GÒi4R‘GXxu^~t+fѲɕ3՜Z݃K`Sd?NffJfr-[NI2ė4`3m6(cj6ezL{!.f\%U=ҹI>X"#)\=0pQkS_CG$e"v\]CV'竫~~HZN@f5g8?"U{R\%]'#X[83pze=h1Vw^vVU^'ap+7d~ fgRiy, Om Rni«=pz4vGl +l3C:I͋MА3͸Ndؒ3)ON>ͼD}QqS;L}[˧YرKMΏl('Ia'}2k>JǒAp# EV[rBϡv]9 + =t[Ωi =#bɛ[eKX"5@: SY<#vYYHO>oopx| _4Jt)w=GTB !bB h:иX4ِP~ٮR?qq0p0>p C8n~/ AUqJ=uؐx%{2x9ڠz"tkL*ϓԌ)gJ, n`D z0 uB 6m9DRm/_s4#r{FĤ8E=\y`* 0m(3M?_q-i#WѭesplGrإoZ>Sfs1uZ*&!; o8Etٍ9Kd3ҍ{'uZgHwE_N}/NgCTJF ( ~p+z'UAiϗ'̥b]oh[d=+<(=z$pDv+KbH QݭpB7y{uz)VS8'd=ƵԖQgd8qŮy-4fi)un4ZOR&Y&qH9TZԱ1U~[y niKO}p9,6|hZ5Q_:|Cvbfn3ݣ˝l$1$@|߱:@p Y}Mme2&Kc\3)59 UX|/Va틜F H..4Ii4FeLhZe^+ߓ="gokt6:0VҒ6q+̋tSL$ OF(})I wzFKiCEW-.8\ PHlݯCa2Zhria#&O#s|r83例pZ>JgZA-ߨnIGzU]ZcFvߴ(j^ŎuB,puDfj)C^6 _пq /Pk.*s'q7> eޠ޲y1Y>p t/[ h쪞W1y>FR?pIja ZDP!+" Tg\|eևGk:p0+uO0tџv\*|"q]R,$MY>?P^38 ǃt4TcƫdhBpVq'Ǥ.'eUyzqIvuFG0,Ρ3N%ֺicEs4vZX0DK4,EMb0Dw"}ʭ܃ vpU*7=Dim1.b׉S*. K-k;f%Y/4˸d/ě)=jxnMuh|jyR~4r&@y~;|NVNLMw q?Db* p6u$T{ -de6h%VڜKaTm1aȫkYudkWd_1p(E2~.]r`OdExCCkt);pciG*R5Nq?nȷ=( PteI;73l_A jؓL\nf0]1(/oeOt9؏G|{LJ=Ch>_""BePy&B5ı>ڲ .eB+K-Fe wT[)y,xK'.#[ԝ]Q~7+mVCcmJ>e9QEb%rnsÇ#5)VY~$#~4e_]0J:doOZ]0sۢHCq:ҩ!)da"<~2 |LuZ% _ۜ6%D7¹b)xbo9_v[e.h6xZ1VV#Վ.kᄨa⚃ WWrZ11ܱv}+ttGb ^Q#͝تϹ u8g +O/1}.>aE45=2 D$vmY 7b$D1LZ4?;nPL%fj T/k1LTmf"ܕc00K%\9e1ja}3OaŘTE894J2:h Y/wa%eY9!ؔ3iD?98dՑ-_.pR)gq=|b 4%'9 ae[ZYTZrexZU}0 ZecCH ԫvr Zl(.L6$Ӱk>|͇QnPUIōsnfxRYVH录N\`uyuUȑZ%-v&luӝGJ xa+{dfҁDI^^O\25;xEfv*Sh{ ir[²JZՃo(p 2To5#0- r nMpA8f}a/5ģ;F'P탋 Qj>I.CvgjQ|l/4bxvNbػՖ`Šg T>HÞIIBᇓ鼴yRbsz_D;d+-_P"<{jqw{ٙPǐAzÐ%)eTh`InuO(EԨfAb@ʦ07-*-;lb[l޽p4C5_찫{Y2!nP pV䷸ܳ,|ӌ O .up-hp9ld`6HhP> j~zd;PYyܚ]b+0e}6QQ"kop#' P:a4:3 ZRU6wϥ5%2ג@糣Cr͔kN P:F77CQBx\ZƈVUdJOfs?!t vzKl-af+SM)?Kr0gSkPOC>qֲp3yJuC]_@!ʬ.u }=|ZzĄY^J|9d̎vP#:=)uʣ3b = bԺʖo+#JL1>&B3גyV9eAݣk:n K+|'6x֩0*NvK9 WvIBd~OGˑ~P?)-B?-cB JK7Ӄ{dE?[N|YKÖ9Lvf!Q"QiVJՏj)>-ETyn!ٴ6KǠ!xC/B"?w3hvvP?%wT,D/WPNMlĺ  lhgjQG@i >Z:+d :C]37ks89GtIu~Oݵwz) @Ǻ$&6ip6|mɉ[^k&SW3mbg&DM#.f_D8UNnmsJn*(oȹQ/(Y_Wd-Ht*'T6G#uQ )2?>@SB=LudMM$TMOǴeE0a Uh;%j.+/b/y>(h]b? 9ѰYlZ g9j`Le&:j!z&ݫrHą y|qi` 2ߤO~EJm1i%YX\7H98=Tߒ\=tm18-oy\̲`,6׮$x6⛇VR_`H$uNAtS{h{ x`|zs9kuV)~lS3[om75q WjTޙ(d5DAˆuCEa1Ϲo?~D* ϏcJ/f{k/{ŏ1lkm`H\r=G][O$P+ ϖ7>0i3̸'jJ6HJ`297 QH`u?_~ a$ًXkE^iv: `st!.Hd )i0lj8tSqNfB# n/4`ܹ=??^*phqdb/y @L[h7f&, V atzGS+9v׺0 w,Jjo&N9!';9z|*/4\54V ==j2ү*>ho{A_ʤ&]Y!PqXMUp5X>\\:ő$|H(f2n[`7wF1ca>;4qlmck' Syu{OZt\mӸaÀ`6QR'Wx#5xC[~Η-O{ ֭ NPZ,ة/{~ rP.zC3R0lqkXh(1Բfn4kdP]n _P_p ӓTw> ?f3ђKGggvGUU4~ox[!NAEٶs${$*QPإ\M"QFW[Ϲ'վ:fpol{cH@ABR[+Tm. kh;֬̑KxE,&18M 6AGhI}=$1#i:D|C}(Vux״9y֡A΂nLEOd`Bb1h~A9><Ͷ e}i!gt'_%YQ$Y1Z,MSUU`F4Ս~}"p9ex&R@^m q0iB<n͊deB G,,)m\ŝe r+ΧI_)f٬fs/x Y 0Qak lZJ"Pvw[2&AۭhxM0lO$TKw ^<0 n rE,zCQ> endobj 20 0 obj << /Length1 1423 /Length2 6170 /Length3 0 /Length 7131 /Filter /FlateDecode >> stream xڍwTk6R?t@ڂa&qh vP_%8Q(7I~~///>+p(G@ U a>P`u|(o]|'>:P7 C"n($#27cV)]]!0}\3 ۲Ѱp{@T`n\x9@P wmwPC  Cn~`OBx@w`E6( ?oG@3 _,n6skj8_#vAܬFjKϚ;GUQ\$ @QmpS _oW؍lo*țh-`p_ta""J ޿Q7) ub/oK\\<j@Hfo!o-V*̩*g칅g&]Q"(\?GʩRt&J#iv2gUrPhJ]{ymz;8àXJ"n.{UXjJf䪩& 3&Ӣe7oǚ0,>Ǔ"R= T @Ɵ7Q,ŧ}'+EzGhy]j2_52hl"' B#h}vfxbDc~okAiXàQC#. WowMjk,lJ&go/(!9R6E'Ϭ&D`r-0UR\}U:KϛM_g%sRiy[ Ykk>=KQXHqEGb{UM5 Jѡ bz["WrC-8,|j^IDh*]Ջ;(ﭵlyK)06xSӻjA/C8lȤ!ΐͭ&t`)ŒsM%x7rZXsb‰Toh5C5;,36_:@r`Ξly*^y ے> pۑKdH xԯl'}h`&Ĝ-݆49љ+h|qҞɹ6";={%(oSǜR 'v۬?+nPllz|FYn>9QMeg0 ?OLz!$VT.Iݚ+B`Ia}gpv@0nQ.-,ZOl3&tNiDݾ^M#Ve AbX5kTz؉(;{)Y\oD}Cj\wdp+"Ri 4T7O,I-K%uGhضeh)cͳ犃CYf4~adF;!Pi#{=UѼXj964ԟSؒ捞-Vіmm$Ga)^/'-m6not<8?tTlr՞r_-\t`Wg66?-_=7|lLJt)>-N&Ae#)~ִ9&0VjP+ h\/#4a$ÚݽYaẓ_Zz-snJM].0ƭM( I٢MGQiu}>g3 INx҉vE%ֺJ% 2SrJzW> }=ֻaKI<B| WrIVd|I%up[ .i! ^EN &O-kV41rI\XߐٜζrrS$v~Tv~M@Y:NC.E8֭q-js!34PVb3C9h:>զM/tujZa-zbUBF'*5ۮr;>5qf rBdg 㬧ckl[?O7$#Q+ceAUj.!X[WPn2GdE- >\މYfWwi=< 8:|HR cyɦ;R۴ӠFyND 27c{,wO%6ƺkw2 1$Q4#"r$16p4^?WokD+=NJZ쉴27ӊ_~O ()0hK 9C+CT|B9o2А9\$>箴P3U*4llGt a;o_+T'zW ) 6uEA~>e-.Qu.7g_%Hr!_|O<4زSf-]R1U<Í])+&)D[6nYaˈުb}= $dZI#uِqF4ިɸ&@%4G+Oޘ ǖͩ %;zg;Հm)15eRaǧHB36YZ*kMϫC Ρ_!|m=#+&z 'c _4YwwP*³#оVdÇ0c^? M`gky*G @YE ]-OsO//^NeYg:RRT}{NV_vjmG:w02#ϝpHBRA·5ߔ}!n@=橽R̪zR"mo,~qzCt.95f-upNlycNwgš:Gj QY?&-cK'*پu>{d>UΕ#y)˜QZUڒ{ԠTț!цiUhzދPF2N!sJX}/Q,xpKHAYQV^5MqNG-UAp27mIqE~ϳz`L%̉7Zeqg.UicS $P>yg3n,nMR5A2']A5ߕ7zSh .9TO6+YVΆg[[2ݽcJ/j|ͲI>"Kw?^m.eguE ଅe vfհA49a\4k'/.xOlymP4-5vK“ZKԉ'&,w=j2W`}Um[$ LҴIGj3hc}9[T)ԣ*d &O{,°/ڃ a#[#,墧tn@UkJ@zcXH"](޷@Є\KV〚sq>LaqAf|2  .YM"R,bT \BS2ibb+z ԫeM32gaC#Y=,pVYфA 񚐐"#S+?*7<; LMS{dG-d{7%2Bg5SI,DpS>ٖa{*c(ֽ!ӛEw_ѯV#@bN7yLᙕYԧ{*po\->>Au3XO$G|Oأ qJjo|BUAsb`uFgWhҺMi mRJ[+i=nd-HX+);H4Mq"s\#"]wưRHGsCo吗g RGFR-10l擛ڙP&n>INq +e􊘛j  TV.(Oy'7x/f^Mh)LZcos)>:j$zRF%YUkld)~«9z9=?7Q*.~ (HX!uUd>D R]B jڬ?Y]~T]CMJτ"]0i\dl7"Ghe[|G.oQѡλ N<fyXV=W3UYt\;=YJxry!xgD5Oꟁc`=i:Bo+ /IO~Cأ2N:_-JÐgߧx\O}Wth# N; B/z ֱ_Ə{,t$y:"A˲g J]iH *]&Y]ҪQ3wi$/*}Tgsu#|xgVg#loDU%mĚͽ1s"uw/j*ThFM/ozIfT՗_!M=GٚYXu2/ 2 i&rw oJ8t'X ?1S1/1eB/sÓlg7ll_>y$#E)]⤃o\\դMMQ2 =cIꪴ16a {|lewžGFߟq!cF)ެbx/T92RR߶G Qc x~BИ e'ɦ8A.lNN}>9i8DFղ6LvG{~hJɧF|_aj`anPX>[Xbݔ9q]R=3GK):$?TYབl*@١폅rA?pUy~dF_sY:3۲QA=/@i;1h0uP7k^\ L~]nĖ:k^ya^\f=/2z N Br>_"joCYo9DI{:#V]&s(Io4\5s͝ *~S +(Pi]_h#ѵΫҔg@J췍qaB Ű;垖SS:XJ!/Q&m]+V ' {%yH X e`]`Cx.]#wͨqF?y>;2?hc(iKekʒ~x 0W_p:۹S= gCDDHnK濯yP=ce5X iFx;_7ٵsD[&a@PiuRU@bQ;!Æ*AѧROD9V=$/=7?0}= o5_L(j^-&<8٦ M":/ZgS}?71f~D,dj~/zv̊ endstream endobj 21 0 obj << /Type /FontDescriptor /FontName /MNZTUE+CMSS10 /Flags 4 /FontBBox [-61 -250 999 759] /Ascent 694 /CapHeight 694 /Descent -194 /ItalicAngle 0 /StemV 78 /XHeight 444 /CharSet (/C/R/plus) /FontFile 20 0 R >> endobj 22 0 obj << /Length1 1553 /Length2 9000 /Length3 0 /Length 10018 /Filter /FlateDecode >> stream xڍP\-]4@c{ݝ,[ЄGsUծ5sUSUQg5 vLl̬qE 6V++3++;J +Bt~I_!v9w đ tr;8laO)t-l (-A/M6u)_%h-YXܘNG !:Fr9)1#Q4,N:!n@G`69ؙ/ e{ݟ 03?]lG2bkY6 3#hg;hy6@?H/ dwvbv6KڙClmAvNH'v_kmq0sgѴ;d%y1!frpr@%?l/|! 9t]@>^v7BbcM& AwY_`e038bqMuMuY(8L.77翫oXUzvߟ^ow%ȋA=+UI3ڂm6`; 0eL_z(ig 1=z\#E_\\/5!m %`qD}|<oӟb`1^ٿ /R_%?E,v/AN _/./|ٖ72EZ [U1LNcZrlwy@CH \w!4܃-I{#L먹!%A(Nmziq o`HICdV5l3tU /J>[{mXn1 Gs|`x?VEHIG@{56M%,U\n!j}{ ,>Ok-KN/ b掺=}trSaPm#k ܍эIͤ爾"jؚ0Ex]./^V3e.hHIi FɌ0Cud eZH.}l_m+̋P=ثGsBˤq&gd;O}I^U)lyd~lq%@%%$y`E o鳁(s|2l;Yw\Wm~ D VIAXfDGS DZy% ;Ty 2eRZQ4_ ^iZɉ][ bLtK !\Pwʩ^a<3MRlE(״T^fPݤZfCˆȧ S<0cC 4W$7݇qjj ~Q\l2Qj2†A.E!dr Má1ZgQcMIg\.n< aNB&Uۦ= Ӡo͠`fJn(1+{EsHpf:3Ӊ_ٿcvL}uKsh,-vԴ/2JWG⫉ۭ6P]&o yn %pQ4;==(1ev2@61/?cH)&_G}WּCfty1˱+ $*ccb`Z~~fc<_~L?,iNLRSDzuvٖ ɨɚcfrB>o&ݭ^+q/0f,0_d͖Nh=i_6K͔60%~ԇ2 r2[k"ӢU[- kwy`%QobPWHܗoRA[ro~y ~d$l̐29\:L|]Yc_rlO9Y!nW8)=+lé*`%AM~ >>e}s.GVثf_Iک mVOkpQ=Y3eh:(?,A-*NFl| ]ZgݫJf6*GPqI·SQqګ[SO4g~Vr%oYhB1X{澕iGP.ܥs2$n.m"-aB,`m-_g+,$ B<Ǧ~ eP408avMJSilQ{~!/=k*3=/s`FzHNZ_G-@ B(%z"+ i%[~' O&A?#vf8(^)u(W%LooЃj~2j;w+"Tr.3o[BpL?|@C3՘Nb b?0vY鼜-4> i](cNFTD)5͉A)t:$ CP.-xpYĀA?ⷰן;c4* GB{SrS6<h Y퀷x~Rvp~AWJv]'4\T!k.|H%FiGFY:>}C6{ċ hW1jhS" ˘H>$cD,grbi{uEz2{$MxS*w2 x\;xxTӷI }8T`dݒ.A֊!up߱r0덧KAIbd?"ؽt`qڱkM+0fHu*٫k(]c>WxIwjXSRV&2 fi2`R :Kʅ < M 3!7yCʝe4VN. x)q<! ^̻)iRw>7AThXfNxJ E\Ҿ~=M !{$ɬY1tĶ'M$P[4M'L!;9Rˇp\hbF<&I/Lmw<':ɹ&. ͨl21vY{ '"/:hy y?h$GV+r2gTuy reΔ7˻ uڠ7sSN%sZ)twWrZu]l?(놄3LTш:Mfrګ-l|Rt'\!!p8m>ެ\nDYkZ|K9Ci4#r_i,C1kd8NjN>F#>=q0u[UMӋȤ󌩵k߁E$ּ/=wD۲3oQ5"J)g?AoPKvצ >)"ifs,.1fn tכA ׊qY-D]-<4wŧDBSt?HۺdNp?(J2ka5cqDhםqǐLo@BR>jwSw6{kN( M)~ vЎ ]VVg;S!ғ+ z,lY)ɾ:m9>!,n7c /PiE[74$Baۡ#~4W@qL{ "DnV s:l47S 'iϮAF Cd"2#D2 r/R0w ^~kXs作Rˠu 4ּj铁MvK SՀ2 {$]YypƎ[3512k୶NBA,Ƒqrx@X /TCb;J(5¯XS O~)%}A]v/T`9q.$̉ޛut'Vqdrp$MߩaVtxI~NFHVT)dxW'T kP u"tߍUwݧmn/չ#vAݝo9_jX`/NM˜fZ:)4bpNdcGZ/;6 Z΍++TW-IQ_P1'۳Q:_1+qkw1/% f]'lieOz+FQ .uKx"iQL CvC(4ɔcVk 7T8߼+^+{*K9_ h, {j}U 巾wo>H&5%R7,`6~qPewb* ^ 樹7JVp}&>Iԁyo/E*JFAg#.%J F#Á+ě!^D'$ 'gYeG c~ًvκbh ^(YhiƤ1EcLa:W߮ӟy6򊍴f^EܐNϹ"7 ]*fC$a6Qù yTCFķhͮ|BEul'+@ (NbQsXK<2DVkWJ 1y;;Դ#d[A56b/?tc\<8*1Mm.Ӣ㒢o!AF*_LaJN1.^ ʣ㬡[B6j.oLBeO(2Ndc`C(&->7ҷaB3WJiۙ*L麤6V Y(ZsA k8߉sj⯟H>-3`䖧n[XHb7 æ,ÕX&Σ,}"P@G9`&F6((eT?UmZ:\`f$TP-<0L,T'Lz@3dw5!MDns$sqj$˜e4%y}+y h}uzcPO ixé$]"FȃafJJ6x(kmQnL /JM6B}oFHz38v{@ScX ,B q(s?ʡݒ7*( 4gGRPq%{9!yxPmhAkUGs%GKl9(䰘GY,!sLS2A զu10q䡩OkP|c3MFR)=J=_g>mfc7jӓdD(g#c['ZwԃpM$2||=;ԝA$+^6*)˘RÃgIJj*L v_Z-6 ZpǕ+ &p ulHp 48Mq@{IU%xRƄhID 3uT <~&+~FkYQ^![@~3z*86fi{=UJdWly2/:t3^1@Gӂ^,~ =d7&3VIָj`VU5J ] V.8>J娇^gϓY~(>ZERw֛]z/ӉM0o1:F}1G'zEp't3ѹ+[Wt]{-iaB5DHr fk}mf*i>9Kcp]lu5BW6ͽ'Աu{ISބ,7)jl`x#|嚮nVe`b "Ʀ JUn7Fo9C&ئZftq[Gη<0í0XWaCZv +cͼv\c|CbOѻ J bsfwC;@+TC$JZ-q?܏ G]J.SIҋ*9/|E4®C֍mnV6/B0ʬ+ ۑf߬&Mեx d`/Ň'8(g٧d7V폸!XW?VR(Uv;Mqz=r Q\}e`ϔv r_$yRE .c-&N9(!BV5oea;U6ryxljƭm LPu&٭^WyGa\qRrlA"*'ɁI,}C,n%6[r2uT~; Vuщeuey}%a+B}<޷=k$UoKy~h;6L +7Z1|ڽA *'.{"}fjo%MһZd:MUi׎D:dm[$:4$qK ^xswwAq&blvZ.fD7l'MEŞ7wQ&G#+5~)l6@4K"}ƅOЏfU|^ RsݴYWTQR6ǦH:!%]Fa)A+St2ydU515|қ)LZ/WԦAZiBKyNkgؙ{:O1_ ^͡e9b lh1AT?}YXW_E+Bhb˿dɬ4*U4 M+DDpPF5*~n4)U^#uS$ ۺviU?2uZ]g#箊@qYFq޸ߨwsݰ2\p)kSm4Cn^sl|M(PVkBYq~tCnzYE>^gY!DiDJ$I{d$z^(Gơgw lhMgv{j+EEWQ>\I\WHTTQsIӎyq}kq~[[,XDd]EZz}"]661neQ[uz8n(hg>Qt V"C *8{27 3Q\Anϓh)pX0UZ(vCnbqzU&STN@SN3u Af" :mR xaCv=F`?pnw$Yb%P{ %=;zZtg0&L~č-ДoQ뛬Y?65 %zf0.}4Rfei],\ UpTwH~|?t&Ėu;ye7@mj%D?ߑ]5>kdčZc6/},~?Q 1oXd+# ~D Y/(;& |#V|mĺ \V\jz` SOAA2;acrwěohGqpfx`RC΅! /C #EwI8w(ޝ6x ? ]'QSZ\A1 vu}1q tQFJ2J4w=dðP> endobj 9 0 obj << /Type /Font /Subtype /Type1 /BaseFont /NBPXCR+CMB10 /FontDescriptor 17 0 R /FirstChar 83 /LastChar 116 /Widths 13 0 R >> endobj 7 0 obj << /Type /Font /Subtype /Type1 /BaseFont /RMUKLW+CMR10 /FontDescriptor 19 0 R /FirstChar 12 /LastChar 119 /Widths 15 0 R >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /BaseFont /MNZTUE+CMSS10 /FontDescriptor 21 0 R /FirstChar 43 /LastChar 82 /Widths 12 0 R >> endobj 8 0 obj << /Type /Font /Subtype /Type1 /BaseFont /CUSUSI+CMTT10 /FontDescriptor 23 0 R /FirstChar 97 /LastChar 116 /Widths 14 0 R >> endobj 11 0 obj << /Type /Pages /Count 1 /Kids [5 0 R] >> endobj 24 0 obj << /Type /Catalog /Pages 11 0 R >> endobj 25 0 obj << /Producer (MiKTeX pdfTeX-1.40.14) /Creator (TeX) /CreationDate (D:20151111142744+01'00') /ModDate (D:20151111142744+01'00') /Trapped /False /PTEX.Fullbanner (This is MiKTeX-pdfTeX 2.9.4902 (1.40.14)) >> endobj xref 0 26 0000000000 65535 f 0000001990 00000 n 0000002010 00000 n 0000002030 00000 n 0000001837 00000 n 0000001724 00000 n 0000000015 00000 n 0000044497 00000 n 0000044776 00000 n 0000044358 00000 n 0000044636 00000 n 0000044916 00000 n 0000002083 00000 n 0000002317 00000 n 0000002530 00000 n 0000002628 00000 n 0000003226 00000 n 0000011588 00000 n 0000011820 00000 n 0000026230 00000 n 0000026509 00000 n 0000033759 00000 n 0000033983 00000 n 0000044120 00000 n 0000044974 00000 n 0000045025 00000 n trailer << /Size 26 /Root 24 0 R /Info 25 0 R /ID [<6480FFBB824EB7A26FB56374AB07DD8B> <6480FFBB824EB7A26FB56374AB07DD8B>] >> startxref 45247 %%EOF brms/vignettes/me_loss1.pdf0000644000176200001440000001416013155225616015452 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170910140948) /ModDate (D:20170910140948) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 2231 /Filter /FlateDecode >> stream xXˎ]߯RZ"כֿĀHf, DHjŖ%$RC>6Sϟj"1 C#]ki#_"{K~%Z"F?ڌezۭ'}q]c$Z3+qjCшi6t<Њf4rz*J+צ'(asQu)Qf3V]}V1s,KwE2g)eF*1׀OѷKw@CZR[?9p[ip9-#Ÿ,qb_ BM{?zv;".>(sߞc)hEA W'm`T{8Oy$R'+_|ݪa Ly'x 6:L,gw ӚNk_2\]Vqx.?h"`=DS<_~W ĵxdzb',xWɰɣEכ_5Ϋ. b۸bN񔔘?Ά~#-hXї.b-_Tʅi^+_S\!_QLpy<&}|mo~} ,;^gxr~Xa|.coJ<8Og>zKy0k'_=ׯOg#ߡK=9a%CoF+[׭Wy=p=}w{C]/~CO[zumL=Xfz PO]auz>^-(OXrv˛.;:ʣnci+]U~>g7g]vyG}忻@vy y~8#/N^~=B{ih%\ Wc{A6t{7\TmGg~^WGϹSvsfޢc;9,>톶67=}]r{L{4v0^?|/`_Qrt}j9Jj>,G)ɑ|__p ᖑO=?~h4CggV-G9!}`N3]3#vLh\,#+*`ji`^q'F>\ݟ# 1w[/ZJO^]qsnk]|dvke'OI<xK~Iǖ6׫^&<=wYKVuTw|ة?mendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 216] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << /GS1 11 0 R /GS257 12 0 R /GS258 13 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 11 0 obj << /Type /ExtGState /CA 1.000 >> endobj 12 0 obj << /Type /ExtGState /ca 0.400 >> endobj 13 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 14 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000002595 00000 n 0000002678 00000 n 0000002830 00000 n 0000002863 00000 n 0000000212 00000 n 0000000292 00000 n 0000005558 00000 n 0000005652 00000 n 0000005751 00000 n 0000005800 00000 n 0000005849 00000 n trailer << /Size 14 /Info 1 0 R /Root 2 0 R >> startxref 5898 %%EOF brms/vignettes/brms_distreg.Rmd0000644000176200001440000002521614224753311016365 0ustar liggesusers--- title: "Estimating Distributional Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Distributional Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette provides an introduction on how to fit distributional regression models with **brms**. We use the term *distributional model* to refer to a model, in which we can specify predictor terms for all parameters of the assumed response distribution. In the vast majority of regression model implementations, only the location parameter (usually the mean) of the response distribution depends on the predictors and corresponding regression parameters. Other parameters (e.g., scale or shape parameters) are estimated as auxiliary parameters assuming them to be constant across observations. This assumption is so common that most researchers applying regression models are often (in my experience) not aware of the possibility of relaxing it. This is understandable insofar as relaxing this assumption drastically increase model complexity and thus makes models hard to fit. Fortunately, **brms** uses **Stan** on the backend, which is an incredibly flexible and powerful tool for estimating Bayesian models so that model complexity is much less of an issue. Suppose we have a normally distributed response variable. Then, in basic linear regression, we specify a predictor term $\eta_{\mu}$ for the mean parameter $\mu$ of the normal distribution. The second parameter of the normal distribution -- the residual standard deviation $\sigma$ -- is assumed to be constant across observations. We estimate $\sigma$ but do not try to *predict* it. In a distributional model, however, we do exactly this by specifying a predictor term $\eta_{\sigma}$ for $\sigma$ in addition to the predictor term $\eta_{\mu}$. Ignoring group-level effects for the moment, the linear predictor of a parameter $\theta$ for observation $n$ has the form $$\eta_{\theta n} = \sum_{i = 1}^{K_{\theta}} b_{\theta i} x_{\theta i n}$$ where $x_{\theta i n}$ denotes the value of the $i$th predictor of parameter $\theta$ for observation $n$ and $b_{\theta i}$ is the $i$th regression coefficient of parameter $\theta$. A distributional normal model with response variable $y$ can then be written as $$y_n \sim \mathcal{N}\left(\eta_{\mu n}, \, \exp(\eta_{\sigma n}) \right)$$ We used the exponential function around $\eta_{\sigma}$ to reflect that $\sigma$ constitutes a standard deviation and thus only takes on positive values, while a linear predictor can be any real number. ## A simple distributional model Unequal variance models are possibly the most simple, but nevertheless very important application of distributional models. Suppose we have two groups of patients: One group receives a treatment (e.g., an antidepressive drug) and another group receives placebo. Since the treatment may not work equally well for all patients, the symptom variance of the treatment group may be larger than the symptom variance of the placebo group after some weeks of treatment. For simplicity, assume that we only investigate the post-treatment values. ```{r} group <- rep(c("treat", "placebo"), each = 30) symptom_post <- c(rnorm(30, mean = 1, sd = 2), rnorm(30, mean = 0, sd = 1)) dat1 <- data.frame(group, symptom_post) head(dat1) ``` The following model estimates the effect of `group` on both the mean and the residual standard deviation of the normal response distribution. ```{r, results='hide'} fit1 <- brm(bf(symptom_post ~ group, sigma ~ group), data = dat1, family = gaussian()) ``` Useful summary statistics and plots can be obtained via ```{r, results='hide'} summary(fit1) plot(fit1, N = 2, ask = FALSE) plot(conditional_effects(fit1), points = TRUE) ``` The population-level effect `sigma_grouptreat`, which is the contrast of the two residual standard deviations on the log-scale, reveals that the variances of both groups are indeed different. This impression is confirmed when looking at the `conditional_effects` of `group`. Going one step further, we can compute the residual standard deviations on the original scale using the `hypothesis` method. ```{r} hyp <- c("exp(sigma_Intercept) = 0", "exp(sigma_Intercept + sigma_grouptreat) = 0") hypothesis(fit1, hyp) ``` We may also directly compare them and plot the posterior distribution of their difference. ```{r} hyp <- "exp(sigma_Intercept + sigma_grouptreat) > exp(sigma_Intercept)" (hyp <- hypothesis(fit1, hyp)) plot(hyp, chars = NULL) ``` Indeed, the residual standard deviation of the treatment group seems to larger than that of the placebo group. Moreover the magnitude of this difference is pretty similar to what we expected due to the values we put into the data simulations. ## Zero-Inflated Models Another important application of the distributional regression framework are so called zero-inflated models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. For example, if one seeks to predict the number of cigarettes people smoke per day and also includes non-smokers, there will be a huge amount of zeros which, when not modeled appropriately, can seriously distort parameter estimates. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (\url{https://stats.idre.ucla.edu/stata/dae/zero-inflated-poisson-regression}), the data are described as follows: "The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish." ```{r} zinb <- read.csv("https://paul-buerkner.github.io/data/fish.csv") head(zinb) ``` As predictors we choose the number of people per group, the number of children, as well as whether the group consists of campers. Many groups may not even try catching any fish at all (thus leading to many zero responses) and so we fit a zero-inflated Poisson model to the data. For now, we assume a constant zero-inflation probability across observations. ```{r, results='hide'} fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson()) ``` Again, we summarize the results using the usual methods. ```{r} summary(fit_zinb1) plot(conditional_effects(fit_zinb1), ask = FALSE) ``` According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability `zi` is pretty large with a mean of 41%. Please note that the probability of catching no fish is actually higher than 41%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-*inflation*). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here). Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish. Most children are just terribly bad at waiting for hours until something happens. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data. ```{r, results='hide'} fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) ``` ```{r} summary(fit_zinb2) plot(conditional_effects(fit_zinb2), ask = FALSE) ``` To transform the linear predictor of `zi` into a probability, **brms** applies the logit-link: $$logit(zi) = \log\left(\frac{zi}{1-zi}\right) = \eta_{zi}$$ The logit-link takes values within $[0, 1]$ and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors. According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your change of catching no fish at all (as implied by the zero-inflation part) most likely because groups with more children are not even trying. ## Additive Distributional Models In the examples so far, we did not have multilevel data and thus did not fully use the capabilities of the distributional regression framework of **brms**. In the example presented below, we will not only show how to deal with multilevel data in distributional models, but also how to incorporate smooth terms (i.e., splines) into the model. In many applications, we have no or only a very vague idea how the relationship between a predictor and the response looks like. A very flexible approach to tackle this problems is to use splines and let them figure out the form of the relationship. For illustration purposes, we simulate some data with the **mgcv** package, which is also used in **brms** to prepare smooth terms. ```{r} dat_smooth <- mgcv::gamSim(eg = 6, n = 200, scale = 2, verbose = FALSE) head(dat_smooth[, 1:6]) ``` The data contains the predictors `x0` to `x3` as well as the grouping factor `fac` indicating the nested structure of the data. We predict the response variable `y` using smooth terms of `x1` and `x2` and a varying intercept of `fac`. In addition, we assume the residual standard deviation `sigma` to vary by a smoothing term of `x0` and a varying intercept of `fac`. ```{r, results='hide'} fit_smooth1 <- brm( bf(y ~ s(x1) + s(x2) + (1|fac), sigma ~ s(x0) + (1|fac)), data = dat_smooth, family = gaussian(), chains = 2, control = list(adapt_delta = 0.95) ) ``` ```{r} summary(fit_smooth1) plot(conditional_effects(fit_smooth1), points = TRUE, ask = FALSE) ``` This model is likely an overkill for the data at hand, but nicely demonstrates the ease with which one can specify complex models with **brms** and to fit them using **Stan** on the backend. brms/vignettes/brms_multilevel.ltx0000644000176200001440000016555114213413565017204 0ustar liggesusers\documentclass[article, nojss]{jss} %\VignetteIndexEntry{Multilevel Models with brms} %\VignetteEngine{R.rsp::tex} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% almost as usual \author{Paul-Christian B\"urkner} \title{Advanced Bayesian Multilevel Modeling with the \proglang{R} Package \pkg{brms}} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Paul-Christian B\"urkner} %% comma-separated \Plaintitle{Advanced Bayesian Multilevel Modeling with the R Package brms} %% without formatting \Shorttitle{Advanced Bayesian Multilevel Modeling with \pkg{brms}} %% a short title (if necessary) %% an abstract and keywords \Abstract{ The \pkg{brms} package allows R users to easily specify a wide range of Bayesian single-level and multilevel models, which are fitted with the probabilistic programming language \proglang{Stan} behind the scenes. Several response distributions are supported, of which all parameters (e.g., location, scale, and shape) can be predicted at the same time thus allowing for distributional regression. Non-linear relationships may be specified using non-linear predictor terms or semi-parametric approaches such as splines or Gaussian processes. Multivariate models, in which each response variable can be predicted using the above mentioned options, can be fitted as well. To make all of these modeling options possible in a multilevel framework, \pkg{brms} provides an intuitive and powerful formula syntax, which extends the well known formula syntax of \pkg{lme4}. The purpose of the present paper is to introduce this syntax in detail and to demonstrate its usefulness with four examples, each showing other relevant aspects of the syntax. If you use \pkg{brms}, please cite this article as published in the R Journal \citep{brms2}. } \Keywords{Bayesian inference, multilevel models, distributional regression, MCMC, \proglang{Stan}, \proglang{R}} \Plainkeywords{Bayesian inference, multilevel models, distributional regression, MCMC, Stan, R} %% without formatting %% at least one keyword must be supplied %% publication information %% NOTE: Typically, this can be left commented and will be filled out by the technical editor %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{2012-06-04} %% \Acceptdate{2012-06-04} %% The address of (at least) one author should be given %% in the following format: \Address{ Paul-Christian B\"urkner\\ E-mail: \email{paul.buerkner@gmail.com}\\ URL: \url{https://paul-buerkner.github.io} } %% It is also possible to add a telephone and fax number %% before the e-mail in the following format: %% Telephone: +43/512/507-7103 %% Fax: +43/512/507-2851 %% for those who use Sweave please include the following line (with % symbols): %% need no \usepackage{Sweave.sty} %% end of declarations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %% include your article here, just as usual %% Note that you should use the \pkg{}, \proglang{} and \code{} commands. \section{Introduction} Multilevel models (MLMs) offer great flexibility for researchers across sciences \citep{brown2015, demidenko2013, gelmanMLM2006, pinheiro2006}. They allow modeling of data measured on different levels at the same time -- for instance data of students nested within classes and schools -- thus taking complex dependency structures into account. It is not surprising that many packages for R have been developed to fit MLMs. Usually, however, the functionality of these implementations is limited insofar as it is only possible to predict the mean of the response distribution. Other parameters of the response distribution, such as the residual standard deviation in linear models, are assumed constant across observations, which may be violated in many applications. Accordingly, it is desirable to allow for prediction of \emph{all} response parameters at the same time. Models doing exactly that are often referred to as \emph{distributional models} or more verbosely \emph{models for location, scale and shape} \citep{rigby2005}. Another limitation of basic MLMs is that they only allow for linear predictor terms. While linear predictor terms already offer good flexibility, they are of limited use when relationships are inherently non-linear. Such non-linearity can be handled in at least two ways: (1) by fully specifying a non-linear predictor term with corresponding parameters each of which can be predicted using MLMs \citep{lindstrom1990}, or (2) estimating the form of the non-linear relationship on the fly using splines \citep{wood2004} or Gaussian processes \citep{rasmussen2006}. The former are often simply called \emph{non-linear models}, while models applying splines are referred to as \emph{generalized additive models} (GAMs; \citeauthor{hastie1990}, \citeyear{hastie1990}). Combining all of these modeling options into one framework is a complex task, both conceptually and with regard to model fitting. Maximum likelihood methods, which are typically applied in classical 'frequentist' statistics, can reach their limits at some point and fully Bayesian methods become the go-to solutions to fit such complex models \citep{gelman2014}. In addition to being more flexible, the Bayesian framework comes with other advantages, for instance, the ability to derive probability statements for every quantity of interest or explicitly incorporating prior knowledge about parameters into the model. The former is particularly relevant in non-linear models, for which classical approaches struggle more often than not in propagating all the uncertainty in the parameter estimates to non-linear functions such as out-of-sample predictions. Possibly the most powerful program for performing full Bayesian inference available to date is Stan \citep{stanM2017, carpenter2017}. It implements Hamiltonian Monte Carlo \citep{duane1987, neal2011, betancourt2014} and its extension, the No-U-Turn (NUTS) Sampler \citep{hoffman2014}. These algorithms converge much more quickly than other Markov-Chain Monte-Carlo (MCMC) algorithms especially for high-dimensional models \citep{hoffman2014, betancourt2014, betancourt2017}. An excellent non-mathematical introduction to Hamiltonian Monte Carlo can be found in \citet{betancourt2017}. Stan comes with its own programming language, allowing for great modeling flexibility \cite{stanM2017, carpenter2017}). Many researchers may still be hesitent to use Stan directly, as every model has to be written, debugged and possibly also optimized. This may be a time-consuming and error-prone process even for researchers familiar with Bayesian inference. The \pkg{brms} package \citep{brms1}, presented in this paper, aims to remove these hurdles for a wide range of regression models by allowing the user to benefit from the merits of Stan by using extended \pkg{lme4}-like \citep{bates2015} formula syntax, with which many R users are familiar with. It offers much more than writing efficient and human-readable Stan code: \pkg{brms} comes with many post-processing and visualization functions, for instance to perform posterior predictive checks, leave-one-out cross-validation, visualization of estimated effects, and prediction of new data. The overarching aim is to have one general framework for regression modeling, which offers everything required to successfully apply regression models to complex data. To date, it already replaces and extends the functionality of dozens of other R packages, each of which is restricted to specific regression models\footnote{Unfortunately, due to the implementation via Stan, it is not easily possible for users to define their own response distributions and run them via \pkg{brms}. If you feel that a response distribution is missing in \pkg{brms}, please open an issue on GitHub (\url{https://github.com/paul-buerkner/brms}).}. The purpose of the present article is to provide an introduction of the advanced multilevel formula syntax implemented in \pkg{brms}, which allows to fit a wide and growing range of non-linear distributional multilevel models. A general overview of the package is already given in \citet{brms1}. Accordingly, the present article focuses on more recent developments. We begin by explaining the underlying structure of distributional models. Next, the formula syntax of \pkg{lme4} and its extensions implemented in \pkg{brms} are explained. Four examples that demonstrate the use of the new syntax are discussed in detail. Afterwards, the functionality of \pkg{brms} is compared with that of \pkg{rstanarm} \citep{rstanarm2017} and \pkg{MCMCglmm} \citep{hadfield2010}. We end by describing future plans for extending the package. \section{Model description} \label{model} The core of models implemented in \pkg{brms} is the prediction of the response $y$ through predicting all parameters $\theta_p$ of the response distribution $D$, which is also called the model \code{family} in many R packages. We write $$y_i \sim D(\theta_{1i}, \theta_{2i}, ...)$$ to stress the dependency on the $i\textsuperscript{th}$ observation. Every parameter $\theta_p$ may be regressed on its own predictor term $\eta_p$ transformed by the inverse link function $f_p$ that is $\theta_{pi} = f_p(\eta_{pi})$\footnote{A parameter can also be assumed constant across observations so that a linear predictor is not required.}. Such models are typically refered to as \emph{distributional models}\footnote{The models described in \citet{brms1} are a sub-class of the here described models.}. Details about the parameterization of each \code{family} are given in \code{vignette("brms\_families")}. Suppressing the index $p$ for simplicity, a predictor term $\eta$ can generally be written as $$ \eta = \mathbf{X} \beta + \mathbf{Z} u + \sum_{k = 1}^K s_k(x_k) $$ In this equation, $\beta$ and $u$ are the coefficients at population-level and group-level respectively and $\mathbf{X}, \mathbf{Z}$ are the corresponding design matrices. The terms $s_k(x_k)$ symbolize optional smooth functions of unspecified form based on covariates $x_k$ fitted via splines (see \citet{wood2011} for the underlying implementation in the \pkg{mgcv} package) or Gaussian processes \citep{williams1996}. The response $y$ as well as $\mathbf{X}$, $\mathbf{Z}$, and $x_k$ make up the data, whereas $\beta$, $u$, and the smooth functions $s_k$ are the model parameters being estimated. The coefficients $\beta$ and $u$ may be more commonly known as fixed and random effects, but I avoid theses terms following the recommendations of \citet{gelmanMLM2006}. Details about prior distributions of $\beta$ and $u$ can be found in \citet{brms1} and under \code{help("set\_prior")}. As an alternative to the strictly additive formulation described above, predictor terms may also have any form specifiable in Stan. We call it a \emph{non-linear} predictor and write $$\eta = f(c_1, c_2, ..., \phi_1, \phi_2, ...)$$ The structure of the function $f$ is given by the user, $c_r$ are known or observed covariates, and $\phi_s$ are non-linear parameters each having its own linear predictor term $\eta_{\phi_s}$ of the form specified above. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves. A frequentist implementation of such models, which inspired the non-linear syntax in \pkg{brms}, can be found in the \pkg{nlme} package \citep{nlme2016}. \section{Extended multilevel formula syntax} \label{formula_syntax} The formula syntax applied in \pkg{brms} builds upon the syntax of the R package \pkg{lme4} \citep{bates2015}. First, we will briefly explain the \pkg{lme4} syntax used to specify multilevel models and then introduce certain extensions that allow to specify much more complicated models in \pkg{brms}. An \pkg{lme4} formula has the general form \begin{Sinput} response ~ pterms + (gterms | group) \end{Sinput} The \code{pterms} part contains the population-level effects that are assumed to be the same across obervations. The \code{gterms} part contains so called group-level effects that are assumed to vary accross grouping variables specified in \code{group}. Multiple grouping factors each with multiple group-level effects are possible. Usually, \code{group} contains only a single variable name pointing to a factor, but you may also use \code{g1:g2} or \code{g1/g2}, if both \code{g1} and \code{g2} are suitable grouping factors. The \code{:} operator creates a new grouping factor that consists of the combined levels of \code{g1} and \code{g2} (you could think of this as pasting the levels of both factors together). The \code{/} operator indicates nested grouping structures and expands one grouping factor into two or more when using multiple \code{/} within one term. If, for instance, you write \code{(1 | g1/g2)}, it will be expanded to \code{(1 | g1) + (1 | g1:g2)}. Instead of \code{|} you may use \code{||} in grouping terms to prevent group-level correlations from being modeled. This may be useful in particular when modeling so many group-level effects that convergence of the fitting algorithms becomes an issue due to model complexity. One limitation of the \code{||} operator in \pkg{lme4} is that it only splits up terms so that columns of the design matrix originating from the same term are still modeled as correlated (e.g., when coding a categorical predictor; see the \code{mixed} function of the \pkg{afex} package by \citet{afex2015} for a way to avoid this behavior). While intuitive and visually appealing, the classic \pkg{lme4} syntax is not flexible enough to allow for specifying the more complex models supported by \pkg{brms}. In non-linear or distributional models, for instance, multiple parameters are predicted, each having their own population and group-level effects. Hence, multiple formulas are necessary to specify such models\footnote{Actually, it is possible to specify multiple model parts within one formula using interactions terms for instance as implemented in \pkg{MCMCglmm} \citep{hadfield2010}. However, this syntax is limited in flexibility and requires a rather deep understanding of the way R parses formulas, thus often being confusing to users.}. Then, however, specifying group-level effects of the same grouping factor to be correlated \emph{across} formulas becomes complicated. The solution implemented in \pkg{brms} (and currently unique to it) is to expand the \code{|} operator into \code{||}, where \code{} can be any value. Group-level terms with the same \code{ID} will then be modeled as correlated if they share same grouping factor(s)\footnote{It might even be further extended to \code{|fun()|}, where \code{fun} defines the type of correlation structure, defaulting to unstructured that is estimating the full correlation matrix. The \code{fun} argument is not yet supported by \pkg{brms} but could be supported in the future if other correlation structures, such as compound symmetry or Toeplitz, turn out to have reasonable practical applications and effective implementations in Stan.}. For instance, if the terms \code{(x1|ID|g1)} and \code{(x2|ID|g1)} appear somewhere in the same or different formulas passed to \pkg{brms}, they will be modeled as correlated. Further extensions of the classical \pkg{lme4} syntax refer to the \code{group} part. It is rather limited in its flexibility since only variable names combined by \code{:} or \code{/} are supported. We propose two extensions of this syntax: Firstly, \code{group} can generally be split up in its terms so that, say, \code{(1 | g1 + g2)} is expanded to \code{(1 | g1) + (1 | g2)}. This is fully consistent with the way \code{/} is handled so it provides a natural generalization to the existing syntax. Secondly, there are some special grouping structures that cannot be expressed by simply combining grouping variables. For instance, multi-membership models cannot be expressed this way. To overcome this limitation, we propose wrapping terms in \code{group} within special functions that allow specifying alternative grouping structures: \code{(gterms | fun(group))}. In \pkg{brms}, there are currently two such functions implemented, namely \code{gr} for the default behavior and \code{mm} for multi-membership terms. To be compatible with the original syntax and to keep formulas short, \code{gr} is automatically added internally if none of these functions is specified. While some non-linear relationships, such as quadratic relationships, can be expressed within the basic R formula syntax, other more complicated ones cannot. For this reason, it is possible in \pkg{brms} to fully specify non-linear predictor terms similar to how it is done in \pkg{nlme}, but fully compatible with the extended multilevel syntax described above. Suppose, for instance, we want to model the non-linear growth curve $$ y = b_1 (1 - \exp(-(x / b_2)^{b_3}) $$ between $y$ and $x$ with parameters $b_1$, $b_2$, and $b_3$ (see Example 3 in this paper for an implementation of this model with real data). Furthermore, we want all three parameters to vary by a grouping variable $g$ and model those group-level effects as correlated. Additionally $b_1$ should be predicted by a covariate $z$. We can express this in \pkg{brms} using multiple formulas, one for the non-linear model itself and one per non-linear parameter: \begin{Sinput} y ~ b1 * (1 - exp(-(x / b2) ^ b3) b1 ~ z + (1|ID|g) b2 ~ (1|ID|g) b3 ~ (1|ID|g) \end{Sinput} The first formula will not be evaluated using standard R formula parsing, but instead taken literally. In contrast, the formulas for the non-linear parameters will be evaluated in the usual way and are compatible with all terms supported by \pkg{brms}. Note that we have used the above described ID-syntax to model group-level effects as correlated across formulas. There are other syntax extensions implemented in \pkg{brms} that do not directly target grouping terms. Firstly, there are terms formally included in the \code{pterms} part that are handled separately. The most prominent examples are smooth terms specified through the \code{s} and \code{t2} functions of the \pkg{mgcv} package \citep{wood2011}. Other examples are category specific effects \code{cs}, monotonic effects \code{mo}, noise-free effects \code{me}, or Gaussian process terms \code{gp}. The former is explained in \citet{brms1}, while the latter three are documented in \code{help(brmsformula)}. Internally, these terms are extracted from \code{pterms} and not included in the construction of the population-level design matrix. Secondly, making use of the fact that \code{|} is unused on the left-hand side of $\sim$ in formula, additional information on the response variable may be specified via \begin{Sinput} response | aterms ~ \end{Sinput} The \code{aterms} part may contain multiple terms of the form \code{fun()} separated by \code{+} each providing special information on the response variable. This allows among others to weight observations, provide known standard errors for meta-analysis, or model censored or truncated data. As it is not the main topic of the present paper, we refer to \code{help("brmsformula")} and \code{help("addition-terms")} for more details. To set up the model formulas and combine them into one object, \pkg{brms} defines the \code{brmsformula} (or short \code{bf}) function. Its output can then be passed to the \code{parse\_bf} function, which splits up the formulas in separate parts and prepares them for the generation of design matrices and related data. Other packages may re-use these functions in their own routines making it easier to offer support for the above described multilevel syntax. \section{Examples} The idea of \pkg{brms} is to provide one unified framework for multilevel regression models in R. As such, the above described formula syntax in all of its variations can be applied in combination with all response distributions supported by \pkg{brms} (currently about 35 response distributions are supported; see \code{help("brmsfamily")} and \code{vignette("brms\_families")} for an overview). In this section, we will discuss four examples in detail, each focusing on certain aspects of the syntax. They are chosen to provide a broad overview of the modeling options. The first is about the number of fish caught be different groups of people. It does not actually contain any multilevel structure, but helps in understanding how to set up formulas for different model parts. The second example is about housing rents in Munich. We model the data using splines and a distributional regression approach. The third example is about cumulative insurance loss payments across several years, which is fitted using a rather complex non-linear multilevel model. Finally, the fourth example is about the performance of school children, who change school during the year, thus requiring a multi-membership model. Despite not being covered in the four examples, there are a few more modeling options that we want to briefly describe. First, \pkg{brms} allows fitting so called phylogenetic models. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. Species are not independent as they come from the same phylogenetic tree, implying that different levels of the same grouping-factor (i.e., species) are likely correlated. There is a whole vignette dedicated to this topic, which can be found via \code{vignette("brms\_phylogenetics")}. Second, there is a canonical way to handle ordinal predictors, without falsely assuming they are either categorical or continuous. We call them monotonic effects and discuss them in \code{vignette("brms\_monotonic")}. Last but not least, it is possible to account for measurement error in both response and predictor variables. This is often ignored in applied regression modeling \citep{westfall2016}, although measurement error is very common in all scientific fields making use of observational data. There is no vignette yet covering this topic, but one will be added in the future. In the meantime, \code{help("brmsformula")} is the best place to start learning about such models as well as about other details of the \pkg{brms} formula syntax. \subsection{Example 1: Catching fish} An important application of the distributional regression framework of \pkg{brms} are so called zero-inflated and hurdle models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (\url{https://stats.idre.ucla.edu/stata/dae/zero-inflated-poisson-regression}), the data are described as follows: ``The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish.'' \begin{Sinput} zinb <- read.csv("http://stats.idre.ucla.edu/stat/data/fish.csv") zinb$camper <- factor(zinb$camper, labels = c("no", "yes")) head(zinb) \end{Sinput} \begin{Sinput} nofish livebait camper persons child xb zg count 1 1 0 no 1 0 -0.8963146 3.0504048 0 2 0 1 yes 1 0 -0.5583450 1.7461489 0 3 0 1 no 1 0 -0.4017310 0.2799389 0 4 0 1 yes 2 1 -0.9562981 -0.6015257 0 5 0 1 no 1 0 0.4368910 0.5277091 1 6 0 1 yes 4 2 1.3944855 -0.7075348 0 \end{Sinput} As predictors we choose the number of people per group, the number of children, as well as whether or not the group consists of campers. Many groups may not catch any fish just because they do not try and so we fit a zero-inflated Poisson model. For now, we assume a constant zero-inflation probability across observations. \begin{Sinput} fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson("log")) \end{Sinput} The model is readily summarized via \begin{Sinput} summary(fit_zinb1) \end{Sinput} \begin{Sinput} Family: zero_inflated_poisson (log) Formula: count ~ persons + child + camper Data: zinb (Number of observations: 250) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept -1.01 0.17 -1.34 -0.67 2171 1 persons 0.87 0.04 0.79 0.96 2188 1 child -1.36 0.09 -1.55 -1.18 1790 1 camper 0.80 0.09 0.62 0.98 2950 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat zi 0.41 0.04 0.32 0.49 2409 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} A graphical summary is available through \begin{Sinput} conditional_effects(fit_zinb1) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_zinb1.pdf} \caption{Conditional effects plots of the \code{fit\_zinb1} model.} \label{me_zinb1} \end{figure} (see Figure \ref{me_zinb1}). In fact, the \code{conditional\_effects} method turned out to be so powerful in visualizing effects of predictors that I am using it almost as frequently as \code{summary}. According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability \code{zi} is pretty large with a mean of 41\%. Please note that the probability of catching no fish is actually higher than 41\%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-\emph{inflation}). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here). Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish, since children often lack the patience required for fishing. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data. \begin{Sinput} fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) \end{Sinput} To transform the linear predictor of \code{zi} into a probability, \pkg{brms} applies the logit-link, which takes values within $[0, 1]$ and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors. \begin{Sinput} summary(fit_zinb2) \end{Sinput} \begin{Sinput} Family: zero_inflated_poisson (log) Formula: count ~ persons + child + camper zi ~ child Data: zinb (Number of observations: 250) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept -1.07 0.18 -1.43 -0.73 2322 1 persons 0.89 0.05 0.80 0.98 2481 1 child -1.17 0.10 -1.37 -1.00 2615 1 camper 0.78 0.10 0.60 0.96 3270 1 zi_Intercept -0.95 0.27 -1.52 -0.48 2341 1 zi_child 1.21 0.28 0.69 1.79 2492 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your chance of catching no fish at all (as implied by the zero-inflation part), possibly because groups with more children spend less time or no time at all fishing. Comparing model fit via leave-one-out cross validation as implemented in the \pkg{loo} package \citep{loo2016, vehtari2016}. \begin{Sinput} LOO(fit_zinb1, fit_zinb2) \end{Sinput} \begin{Sinput} LOOIC SE fit_zinb1 1639.52 363.30 fit_zinb2 1621.35 362.39 fit_zinb1 - fit_zinb2 18.16 15.71 \end{Sinput} reveals that the second model using the number of children to predict both model parts has better fit. However, when considering the standard error of the \code{LOOIC} difference, improvement in model fit is apparently modest and not substantial. More examples of distributional model can be found in \code{vignette("brms\_distreg")}. \subsection{Example 2: Housing rents} In their book about regression modeling, \citet{fahrmeir2013} use an example about the housing rents in Munich from 1999. The data contains information about roughly 3000 apartments including among others the absolute rent (\code{rent}), rent per square meter (\code{rentsqm}), size of the apartment (\code{area}), construction year (\code{yearc}), and the district in Munich (\code{district}), where the apartment is located. The data can be found in the \pkg{gamlss.data} package \citep{gamlss.data}: \begin{Sinput} data("rent99", package = "gamlss.data") head(rent99) \end{Sinput} \begin{Sinput} rent rentsqm area yearc location bath kitchen cheating district 1 109.9487 4.228797 26 1918 2 0 0 0 916 2 243.2820 8.688646 28 1918 2 0 0 1 813 3 261.6410 8.721369 30 1918 1 0 0 1 611 4 106.4103 3.547009 30 1918 2 0 0 0 2025 5 133.3846 4.446154 30 1918 2 0 0 1 561 6 339.0256 11.300851 30 1918 2 0 0 1 541 \end{Sinput} Here, we aim at predicting the rent per square meter with the size of the apartment as well as the construction year, while taking the district of Munich into account. As the effect of both predictors on the rent is of unknown non-linear form, we model these variables using a bivariate tensor spline \citep{wood2013}. The district is accounted for via a varying intercept. \begin{Sinput} fit_rent1 <- brm(rentsqm ~ t2(area, yearc) + (1|district), data = rent99, chains = 2, cores = 2) \end{Sinput} We fit the model using just two chains (instead of the default of four chains) on two processor cores to reduce the model fitting time for the purpose of the present paper. In general, using the default option of four chains (or more) is recommended. \begin{Sinput} summary(fit_rent1) \end{Sinput} \begin{Sinput} Family: gaussian(identity) Formula: rentsqm ~ t2(area, yearc) + (1 | district) Data: rent99 (Number of observations: 3082) Samples: 2 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 2000 ICs: LOO = NA; WAIC = NA; R2 = NA Smooth Terms: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sds(t2areayearc_1) 4.93 2.32 1.61 10.77 1546 1.00 sds(t2areayearc_2) 5.78 2.87 1.58 13.15 1175 1.00 sds(t2areayearc_3) 8.09 3.19 3.66 16.22 1418 1.00 Group-Level Effects: ~district (Number of levels: 336) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.60 0.06 0.48 0.73 494 1.01 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 7.80 0.11 7.59 8.02 2000 1.00 t2areayearc_1 -1.00 0.09 -1.15 -0.83 2000 1.00 t2areayearc_2 0.75 0.17 0.43 1.09 2000 1.00 t2areayearc_3 -0.07 0.16 -0.40 0.24 1579 1.00 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 1.95 0.03 1.90 2.01 2000 1.00 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} For models including splines, the output of \code{summary} is not tremendously helpful, but we get at least some information. Firstly, the credible intervals of the standard deviations of the coefficients forming the splines (under \code{'Smooth Terms'}) are sufficiently far away from zero to indicate non-linearity in the (combined) effect of \code{area} and \code{yearc}. Secondly, even after controlling for these predictors, districts still vary with respect to rent per square meter by a sizable amount as visible under \code{'Group-Level Effects'} in the output. To further understand the effect of the predictor, we apply graphical methods: \begin{Sinput} conditional_effects(fit_rent1, surface = TRUE) \end{Sinput} In Figure \ref{me_rent1}, the conditional effects of both predictors are displayed, while the respective other predictor is fixed at its mean. In Figure \ref{me_rent2}, the combined effect is shown, clearly demonstrating an interaction between the variables. In particular, housing rents appear to be highest for small and relatively new apartments. \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_rent1.pdf} \caption{Conditional effects plots of the \code{fit\_rent1} model for single predictors.} \label{me_rent1} \end{figure} \begin{figure}[ht] \centering \includegraphics[width=0.7\textwidth,keepaspectratio]{me_rent2.pdf} \caption{Surface plot of the \code{fit\_rent1} model for the combined effect of \code{area} and \code{yearc}.} \label{me_rent2} \end{figure} In the above example, we only considered the mean of the response distribution to vary by \code{area} and \code{yearc}, but this my not necessarily reasonable assumption, as the variation of the response might vary with these variables as well. Accordingly, we fit splines and effects of district for both the location and the scale parameter, which is called \code{sigma} in Gaussian models. \begin{Sinput} bform <- bf(rentsqm ~ t2(area, yearc) + (1|ID1|district), sigma ~ t2(area, yearc) + (1|ID1|district)) fit_rent2 <- brm(bform, data = rent99, chains = 2, cores = 2) \end{Sinput} If not otherwise specified, \code{sigma} is predicted on the log-scale to ensure it is positive no matter how the predictor term looks like. Instead of \code{(1|district)} as in the previous model, we now use \code{(1|ID1|district)} in both formulas. This results in modeling the varying intercepts of both model parts as correlated (see the description of the ID-syntax above). The group-level part of the \code{summary} output looks as follows: \begin{Sinput} Group-Level Effects: ~district (Number of levels: 336) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.60 0.06 0.49 0.73 744 1.00 sd(sigma_Intercept) 0.11 0.02 0.06 0.15 751 1.00 cor(Intercept,sigma_Intercept) 0.72 0.17 0.35 0.98 648 1.00 \end{Sinput} As visible from the positive correlation of the intercepts, districts with overall higher rent per square meter have higher variation at the same time. Lastly, we want to turn our attention to the splines. While \code{conditional\_effects} is used to visualize effects of predictors on the expected response, \code{conditional\_smooths} is used to show just the spline parts of the model: \begin{Sinput} conditional_smooths(fit_rent2) \end{Sinput} The plot on the left-hand side of Figure \ref{me_rent3} resembles the one in Figure \ref{me_rent2}, but the scale is different since only the spline is plotted. The right-hand side of \ref{me_rent3} shows the spline for \code{sigma}. Since we apply the log-link on \code{sigma} by default the spline is on the log-scale as well. As visible in the plot, the variation in the rent per square meter is highest for relatively small and old apartments, while the variation is smallest for medium to large apartments build around the 1960s. \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_rent3.pdf} \caption{Plots showing the smooth terms of the \code{fit\_rent2} model.} \label{me_rent3} \end{figure} \subsection{Example 3: Insurance loss payments} On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see \url{http://www.magesblog.com/2015/11/loss-developments-via-growth-curves-and.html}). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows: $$cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)$$ $$\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)$$ The cumulative insurance payments $cum$ will grow over time, and we model this dependency using the variable $dev$. Further, $ult_{AY}$ is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters $\theta$ and $\omega$, which are responsible for the growth of the cumulative loss and are for now assumed to be the same across years. We load the data \begin{Sinput} url <- paste0("https://raw.githubusercontent.com/mages/", "diesunddas/master/Data/ClarkTriangle.csv") loss <- read.csv(url) head(loss) \end{Sinput} \begin{Sinput} AY dev cum 1 1991 6 357.848 2 1991 18 1124.788 3 1991 30 1735.330 4 1991 42 2182.708 5 1991 54 2745.596 6 1991 66 3319.994 \end{Sinput} and translate the proposed model into a non-linear \pkg{brms} model. \begin{Sinput} nlform <- bf(cum ~ ult * (1 - exp(-(dev / theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE) nlprior <- c(prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta")) fit_loss1 <- brm(formula = nlform, data = loss, family = gaussian(), prior = nlprior, control = list(adapt_delta = 0.9)) \end{Sinput} In the above functions calls, quite a few things are going on. The formulas are wrapped in \code{bf} to combine them into one object. The first formula specifies the non-linear model. We set argument \code{nl = TRUE} so that \pkg{brms} takes this formula literally and instead of using standard R formula parsing. We specify one additional formula per non-linear parameter (a) to clarify what variables are covariates and what are parameters and (b) to specify the predictor term for the parameters. We estimate a group-level effect of accident year (variable \code{AY}) for the ultimate loss \code{ult}. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in the case of \code{ult}, contains only a varying intercept for year. Both \code{omega} and \code{theta} are assumed to be constant across observations so we just fit a population-level intercept. Priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. Otherwise, we may observe that different Markov chains converge to different parameter regions as multiple posterior distribution are equally plausible. Setting prior distributions is a difficult task especially in non-linear models. It requires some experience and knowledge both about the model that is being fitted and about the data at hand. Additionally, there is more to keep in mind to optimize the sampler's performance: Firstly, using non- or weakly informative priors in non-linear models often leads to problems even if the model is generally identified. For instance, if a zero-centered and reasonably wide prior such as \code{normal(0, 10000)} it set on \code{ult}, there is little information about \code{theta} and \code{omega} for samples of \code{ult} being close to zero, which may lead to convergence problems. Secondly, Stan works best when parameters are roughly on the same order of magnitude \citep{stan2017}. In the present example, \code{ult} is of three orders larger than \code{omega}. Still, the sampler seems to work quite well, but this may not be true for other models. One solution is to rescale parameters before model fitting. For instance, for the present example, one could have downscaled \code{ult} by replacing it with \code{ult * 1000} and correspondingly the \code{normal(5000, 1000)} prior with \code{normal(5, 1)}. In the \code{control} argument we increase \code{adapt\_delta} to get rid of a few divergent transitions (cf. \citeauthor{stan2017}, \citeyear{stan2017}; \citeauthor{brms1}, \citeyear{brms1}). Again the model is summarized via \begin{Sinput} summary(fit_loss1) \end{Sinput} \begin{Sinput} Family: gaussian (identity) Formula: cum ~ ult * (1 - exp(-(dev / theta)^omega)) ult ~ 1 + (1 | AY) omega ~ 1 theta ~ 1 Data: loss (Number of observations: 55) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Group-Level Effects: ~AY (Number of levels: 10) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(ult_Intercept) 745.74 231.31 421.05 1306.04 916 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat ult_Intercept 5273.70 292.34 4707.11 5852.28 798 1 omega_Intercept 1.34 0.05 1.24 1.43 2167 1 theta_Intercept 46.07 2.09 42.38 50.57 1896 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 139.93 15.52 113.6 175.33 2358 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} as well as \begin{Sinput} conditional_effects(fit_loss1) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.7\textwidth,keepaspectratio]{me_loss1.pdf} \caption{Conditional effects plots of the \code{fit\_loss1} model.} \label{me_loss1} \end{figure} (see Figure \ref{me_loss1}). We can also visualize the cumulative insurance loss over time separately for each year. \begin{Sinput} conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_year <- conditional_effects(fit_loss1, conditions = conditions, re_formula = NULL, method = "predict") plot(me_year, ncol = 5, points = TRUE) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_loss1_year.pdf} \caption{Conditional effects plots of the \code{fit\_loss1} model separately for each accident year.} \label{me_loss1_year} \end{figure} (see Figure \ref{me_loss1_year}). It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. In the above model, we considered \code{omega} and \code{delta} to be constant across years, which may not necessarily be true. We can easily investigate this by fitting varying intercepts for all three non-linear parameters also estimating group-level correlation using the above introduced \code{ID} syntax. \begin{Sinput} nlform2 <- bf(cum ~ ult * (1 - exp(-(dev / theta)^omega)), ult ~ 1 + (1|ID1|AY), omega ~ 1 + (1|ID1|AY), theta ~ 1 + (1|ID1|AY), nl = TRUE) fit_loss2 <- update(fit_loss1, formula = nlform2, control = list(adapt_delta = 0.90)) \end{Sinput} We could have also specified all predictor terms more conveniently within one formula as \begin{Sinput} ult + omega + theta ~ 1 + (1|ID1|AY) \end{Sinput} because the structure of the predictor terms is identical. To compare model fit, we perform leave-one-out cross-validation. \begin{Sinput} LOO(fit_loss1, fit_loss2) \end{Sinput} \begin{Sinput} LOOIC SE fit_loss1 715.44 19.24 fit_loss2 720.60 19.85 fit_loss1 - fit_loss2 -5.15 5.34 \end{Sinput} Since smaller values indicate better expected out-of-sample predictions and thus better model fit, the simpler model that only has a varying intercept over parameter \code{ult} is preferred. This may not be overly surprising, given that three varying intercepts as well as three group-level correlations are probably overkill for data containing only 55 observations. Nevertheless, it nicely demonstrates how to apply the \code{ID} syntax in practice. More examples of non-linear models can be found in \code{vignette("brms\_nonlinear")}. \subsection{Example 4: Performance of school children} Suppose that we want to predict the performance of students in the final exams at the end of the year. There are many variables to consider, but one important factor will clearly be school membership. Schools might differ in the ratio of teachers and students, the general quality of teaching, in the cognitive ability of the students they draw, or other factors we are not aware of that induce dependency among students of the same school. Thus, it is advised to apply a multilevel modeling techniques including school membership as a group-level term. Of course, we should account for class membership and other levels of the educational hierarchy as well, but for the purpose of the present example, we will focus on schools only. Usually, accounting for school membership is pretty-straight forward by simply adding a varying intercept to the formula: \code{(1 | school)}. However, a non-negligible number of students might change schools during the year. This would result in a situation where one student is a member of multiple schools and so we need a multi-membership model. Setting up such a model not only requires information on the different schools students attend during the year, but also the amount of time spend at each school. The latter can be used to weight the influence each school has on its students, since more time attending a school will likely result in greater influence. For now, let us assume that students change schools maximally once a year and spend equal time at each school. We will later see how to relax these assumptions. Real educational data are usually relatively large and complex so that we simulate our own data for the purpose of this tutorial paper. We simulate 10 schools and 1000 students, with each school having the same expected number of 100 students. We model 10\% of students as changing schools. \begin{Sinput} data_mm <- sim_multi_mem(nschools = 10, nstudents = 1000, change = 0.1) head(data_mm) \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 1 8 9 0.5 0.5 16.27422 2 10 9 0.5 0.5 18.71387 3 5 3 0.5 0.5 23.65319 4 3 5 0.5 0.5 22.35204 5 5 3 0.5 0.5 16.38019 6 10 6 0.5 0.5 17.63494 \end{Sinput} The code of function \code{sim\_multi\_mem} can be found in the online supplement of the present paper. For reasons of better illustration, students changing schools appear in the first rows of the data. Data of students being only at a single school looks as follows: \begin{Sinput} data_mm[101:106, ] \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 101 2 2 0.5 0.5 27.247851 102 9 9 0.5 0.5 24.041427 103 4 4 0.5 0.5 12.575001 104 2 2 0.5 0.5 21.203644 105 4 4 0.5 0.5 12.856166 106 4 4 0.5 0.5 9.740174 \end{Sinput} Thus, school variables are identical, but we still have to specify both in order to pass the data appropriately. Incorporating no other predictors into the model for simplicity, a multi-membership model is specified as \begin{Sinput} fit_mm <- brm(y ~ 1 + (1 | mm(s1, s2)), data = data_mm) \end{Sinput} The only new syntax element is that multiple grouping factors (\code{s1} and \code{s2}) are wrapped in \code{mm}. Everything else remains exactly the same. Note that we did not specify the relative weights of schools for each student and thus, by default, equal weights are assumed. \begin{Sinput} summary(fit_mm) \end{Sinput} \begin{Sinput} Family: gaussian (identity) Formula: y ~ 1 + (1 | mm(s1, s2)) Data: data_mm (Number of observations: 1000) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Group-Level Effects: ~mms1s2 (Number of levels: 10) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 2.76 0.82 1.69 4.74 682 1.01 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 19 0.93 17.06 20.8 610 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 3.58 0.08 3.43 3.75 2117 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} With regard to the assumptions made in the above example, it is unlikely that all children who change schools stay in both schools equally long. To relax this assumption, we have to specify weights. First, we amend the simulated data to contain non-equal weights for students changing schools. For all other students, weighting does of course not matter as they stay in the same school anyway. \begin{Sinput} data_mm[1:100, "w1"] <- runif(100, 0, 1) data_mm[1:100, "w2"] <- 1 - data_mm[1:100, "w1"] head(data_mm) \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 1 8 9 0.3403258 0.65967423 16.27422 2 10 9 0.1771435 0.82285652 18.71387 3 5 3 0.9059811 0.09401892 23.65319 4 3 5 0.4432007 0.55679930 22.35204 5 5 3 0.8052026 0.19479738 16.38019 6 10 6 0.5610243 0.43897567 17.63494 \end{Sinput} Incorporating these weights into the model is straight forward. \begin{Sinput} fit_mm2 <- brm(y ~ 1 + (1 | mm(s1, s2, weights = cbind(w1, w2))), data = data_mm) \end{Sinput} The summary output is similar to the previous, so we do not show it here. The second assumption that students change schools only once a year, may also easily be relaxed by providing more than two grouping factors, say, \code{mm(s1, s2, s3)}. \section{Comparison between packages} Over the years, many R packages have been developed that implement MLMs, each being more or less general in their supported models. In \cite{brms1}, I compared \pkg{brms} with \pkg{lme4} \citep{bates2015}, \pkg{MCMCglmm} \citep{hadfield2010}, \pkg{rstanarm} \citep{rstanarm2017}, and \pkg{rethinking} \citep{mcelreath2017}. Since then, quite a few new features have been added in particular to \pkg{brms} and \pkg{rstanarm}. Accordingly, in the present paper, I will update these comparisons, but focus on \pkg{brms}, \pkg{rstanarm}, and \pkg{MCMCglmm} as the possibly most important R packages implementing Bayesian MLMs. While \pkg{brms} and \pkg{rstanarm} are both based on the probabilistic programming language \pkg{Stan}, \pkg{MCMCglmm} implements its own custom MCMC algorithm. Modeling options and other important information of these packages are summarized in Table~\ref{comparison} and will be discussed in detail below. Regarding general model classes, all three packages support the most common types such as linear, count data and certain survival models. Currently, \pkg{brms} and \pkg{MCMCglmm} provide more flexibility when modeling categorical and ordinal data. Additionally, they offer support for zero-inflated and hurdle models to account for access zeros in the data (see Example 1 above). For survival / time-to-event models, \pkg{rstanarm} offers great flexibility via the \code{stan\_jm} function, which allows for complex association structures between time-to-event data and one or more models of longitudinal covariates (for details see \url{https://cran.r-project.org/web/packages/rstanarm/vignettes/jm.html}). Model classes currently specific to \pkg{brms} are robust linear models using Student's t-distribution (family \code{student}) as well as response times models via the exponentially modified Gaussian (family \code{exgaussian}) distribution or the Wiener diffusion model (family \code{wiener}). The latter allows to simultaneously model dichotomous decisions and their corresponding response times (for a detailed example see \url{http://singmann.org/wiener-model-analysis-with-brms-part-i/}). All three packages offer many additional modeling options, with \pkg{brms} currently having the greatest flexibility (see Table~\ref{comparison} for a summary). Moreover, the packages differ in the general framework, in which they are implemented: \pkg{brms} and \pkg{MCMCglmm} each come with a single model fitting function (\code{brm} and \code{MCMCglmm} respectively), through which all of their models can be specified. Further, their framework allows to seamlessly combine most modeling options with each other in the same model. In contrast, the approach of \pkg{rstanarm} is to emulate existing functions of other packages. This has the advantage of an easier transition between classical and Bayesian models, since the syntax used to specify models stays the same. However, it comes with the main disadvantage that many modeling options cannot be used in combination within the same model. Information criteria are available in all three packages. The advantages of WAIC and LOO implemented in \pkg{brms} and \pkg{rstanarm}, are their less restrictive assumptions and that their standard errors can be easily estimated to get a better sense of the uncertainty in the criteria. Comparing the prior options of the packages, \pkg{brms} offers a little more flexibility than \pkg{MCMCglmm} and \pkg{rstanarm}, as virtually any prior distribution can be applied on population-level effects as well as on the standard deviations of group-level effects. In addition, I believe that the way priors are specified in \pkg{brms} is more intuitive as it is directly evident what prior is actually applied. In \pkg{brms}, Bayes factors are available both via Savage-Dickey ratios \citep{wagenmakers2010} and bridge-sampling \citep{bridgesampling2017}, while \pkg{rstanarm} allows for the latter option. For a detailed comparison with respect to sampling efficiency, see \cite{brms1}. \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{rstanarm}} & \parbox{2cm}{\pkg{MCMCglmm}} \\ \hline \\ [-1.5ex] \parbox{6cm}{\textbf{Model classes}} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & no & no \\ Count data models & yes & yes & yes \\ Survival models & yes & yes$^1$ & yes \\ Response times models & yes & no & no \\ Beta models & yes & yes & no \\ Categorical models & yes & yes$^2$ & yes \\ Multinomial models & no & no & yes \\ Ordinal models & various & cumulative$^2$ & cumulative \\ Zero-inflated and hurdle models & yes & no & yes \\ \hline \\ [-1.5ex] \parbox{5cm}{\textbf{Modeling options}} & & & \\ [1ex] Variable link functions & various & various & no \\ Multilevel structures & yes & yes & yes \\ Multi-membership & yes & no & yes \\ Multivariate responses & yes & yes$^3$ & yes \\ Non-linear predictors & yes & limited$^4$ & no \\ Distributional regression & yes & no & no \\ Finite mixtures & yes & no & no \\ Splines (additive models) & yes & yes & yes \\ Gaussian Processes & yes & no & no \\ Temporal autocorrelation & yes & yes$^{2, 5}$ & no \\ Spatial autocorrelation & yes & yes$^{2, 5}$ & no \\ Monotonic effects & yes & no & no \\ Category specific effects & yes & no & no \\ Measurement error & yes & no & no \\ Weights & yes & yes & no \\ Offset & yes & yes & using priors \\ Censored data & yes & yes$^1$ & yes \\ Truncated data & yes & no & no \\ Customized covariances & yes & no & yes \\ Missing value imputation & no & no & no \\ \hline \\ [-1.5ex] \textbf{Bayesian specifics} & & & \\ [1ex] Population-level priors & flexible & flexible & normal \\ Group-level priors & normal & normal & normal \\ Covariance priors & flexible & restricted$^6$ & restricted$^7$ \\ Bayes factors & yes & yes$^8$ & no \\ Parallelization & yes & yes & no \\ \hline \\ [-1.5ex] \textbf{Other} & & & \\ [1ex] Estimator & HMC, NUTS & HMC, NUTS & MH, Gibbs$^9$ \\ Information criterion & WAIC, LOO & WAIC, LOO & DIC \\ C++ compiler required & yes & no & no \\ \hline \end{tabular} \caption{ Comparison of the capabilities of the \pkg{brms}, \pkg{rstanarm} and \pkg{MCMCglmm} packages. Notes: (1) Advanced functionality available via \code{stan\_jm}. (2) No group-level terms and related functionality allowed. (3) Cannot be combined with other modeling options such as splines. (4) Functionality limited to linear Gaussian models and certein pre-specified non-linear functions. (5) Functionality available only on GitHub branches (\url{https://github.com/stan-dev/rstanarm}). (6) For details see \cite{hadfield2010}. (7) For details see \url{https://github.com/stan-dev/rstanarm/wiki/Prior-distributions}. (8) Available via the \pkg{bridgesampling} package \citep{bridgesampling2017}. (9) Estimator consists of a combination of both algorithms. } \label{comparison} \end{table} \section{Conclusion} The present paper is meant to introduce R users and developers to the extended \pkg{lme4} formula syntax applied in \pkg{brms}. Only a subset of modeling options were discussed in detail, which ensured the paper was not too broad. For some of the more basic models that \pkg{brms} can fit, see \citet{brms1}. Many more examples can be found in the growing number of vignettes accompanying the package (see \code{vignette(package = "brms")} for an overview). To date, \pkg{brms} is already one of the most flexible R packages when it comes to regression modeling. However, for the future, there are quite a few more features that I am planning to implement (see \url{https://github.com/paul-buerkner/brms/issues} for the current list of issues). In addition to smaller, incremental updates, I have two specific features in mind: (1) latent variables estimated via confirmatory factor analysis and (2) missing value imputation. I receive ideas and suggestions from users almost every day -- for which I am always grateful -- and so the list of features that will be implemented in the proceeding versions of \pkg{brms} will continue to grow. \section*{Acknowledgments} First of all, I would like to thank the Stan Development Team for creating the probabilistic programming language Stan, which is an incredibly powerful and flexible tool for performing full Bayesian inference. Without it, \pkg{brms} could not fit a single model. Furthermore, I want to thank Heinz Holling, Donald Williams and Ruben Arslan for valuable comments on earlier versions of the paper. I also would like to thank the many users who reported bugs or had ideas for new features, thus helping to continuously improve \pkg{brms}. \bibliography{citations_multilevel} \end{document} brms/vignettes/citations_multilevel.bib0000644000176200001440000002444614160105076020154 0ustar liggesusers% Encoding: UTF-8 @Article{brms2, title = {Advanced {Bayesian} Multilevel Modeling with the {R} Package {brms}}, author = {Paul-Christian Bürkner}, journal = {The R Journal}, year = {2018}, volume = {10}, number = {1}, pages = {395--411}, doi = {10.32614/RJ-2018-017}, encoding = {UTF-8}, } @Article{vehtari2016, author = {Vehtari, Aki and Gelman, Andrew and Gabry, Jonah}, title = {Practical Bayesian Model Evaluation Using Leave-One-Out Cross-Validation and WAIC}, journal = {Statistics and Computing}, year = {2016}, pages = {1--20}, publisher = {Springer}, } @Book{fahrmeir2013, title = {Regression: models, methods and applications}, publisher = {Springer Science \& Business Media}, year = {2013}, author = {Fahrmeir, Ludwig and Kneib, Thomas and Lang, Stefan and Marx, Brian}, } @Manual{gamlss.data, title = {gamlss.data: GAMLSS Data}, author = {Mikis Stasinopoulos and Bob Rigby}, year = {2016}, note = {R package version 5.0-0}, url = {https://CRAN.R-project.org/package=gamlss.data}, } @Article{wood2013, author = {Wood, Simon N and Scheipl, Fabian and Faraway, Julian J}, title = {Straightforward intermediate rank tensor product smoothing in mixed models}, journal = {Statistics and Computing}, year = {2013}, pages = {1--20}, publisher = {Springer}, } @Manual{mcelreath2017, title = {rethinking: Statistical Rethinking Course and Book Package}, author = {Richard McElreath}, year = {2017}, note = {R package version 1.59}, owner = {Paul}, timestamp = {2016.03.04}, url = {https://github.com/rmcelreath/rethinking}, } @Article{wagenmakers2010, author = {Wagenmakers, Eric-Jan and Lodewyckx, Tom and Kuriyal, Himanshu and Grasman, Raoul}, title = {Bayesian hypothesis testing for psychologists: A tutorial on the Savage--Dickey method}, journal = {Cognitive psychology}, year = {2010}, volume = {60}, number = {3}, pages = {158--189}, publisher = {Elsevier}, } @Manual{bridgesampling2017, title = {bridgesampling: Bridge Sampling for Marginal Likelihoods and Bayes Factors}, author = {Quentin F. Gronau and Henrik Singmann}, year = {2017}, note = {R package version 0.4-0}, url = {https://CRAN.R-project.org/package=bridgesampling}, } @BOOK{brown2015, title = {Applied Mixed Models in Medicine}, publisher = {John Wiley \& Sons}, year = {2015}, author = {Brown, Helen and Prescott, Robin}, owner = {Paul}, timestamp = {2015.06.19} } @Book{demidenko2013, title = {Mixed Models: Theory and Applications with R}, publisher = {John Wiley \& Sons}, year = {2013}, author = {Demidenko, Eugene}, owner = {Paul}, timestamp = {2015.06.19}, } @Book{gelmanMLM2006, title = {Data Analysis Using Regression and Multilevel/Hierarchical Models}, publisher = {Cambridge University Press}, year = {2006}, author = {Gelman, Andrew and Hill, Jennifer}, owner = {Paul}, timestamp = {2016.02.21}, } @Book{pinheiro2006, title = {Mixed-Effects Models in S and S-PLUS}, publisher = {Springer-Verlage Science \& Business Media}, year = {2006}, author = {Pinheiro, Jose and Bates, Douglas}, owner = {Paul}, timestamp = {2015.06.19}, } @Article{rigby2005, author = {Rigby, Robert A and Stasinopoulos, D Mikis}, title = {Generalized Additive Models for Location, Scale and Shape}, journal = {Journal of the Royal Statistical Society C (Applied Statistics)}, year = {2005}, volume = {54}, number = {3}, pages = {507--554}, publisher = {Wiley Online Library}, } @Article{lindstrom1990, author = {Lindstrom, Mary J and Bates, Douglas M}, title = {Nonlinear Mixed Effects Models for Repeated Measures Data}, journal = {Biometrics}, year = {1990}, pages = {673--687}, publisher = {JSTOR}, } @Article{wood2004, author = {Wood, Simon N}, title = {Stable and Efficient Multiple Smoothing Parameter Estimation for Generalized Additive Models}, journal = {Journal of the American Statistical Association}, year = {2004}, volume = {99}, number = {467}, pages = {673--686}, publisher = {Taylor \& Francis}, } @Article{rasmussen2006, author = {Rasmussen, Carl Edward and Williams, C. K. I.}, title = {Gaussian processes for machine learning}, year = {2006}, publisher = {Massachusetts Institute of Technology}, } @BOOK{hastie1990, title = {Generalized Additive Models}, publisher = {CRC Press}, year = {1990}, author = {Hastie, Trevor J and Tibshirani, Robert J}, volume = {43}, owner = {Paul}, timestamp = {2015.09.07} } @BOOK{gelman2014, title = {Bayesian Data Analysis}, publisher = {Taylor \& Francis}, year = {2014}, author = {Gelman, Andrew and Carlin, John B and Stern, Hal S and Rubin, Donald B}, volume = {2}, owner = {Paul}, timestamp = {2015.06.20} } @Manual{stanM2017, title = {Stan Modeling Language: User's Guide and Reference Manual}, author = {{Stan Development Team}}, year = {2017}, owner = {Paul}, timestamp = {2015.06.18}, url = {http://mc-stan.org/manual.html}, } @Article{carpenter2017, author = {Carpenter, B. and Gelman, A. and Hoffman, M. and Lee, D. and Goodrich, B. and Betancourt, M. and Brubaker, M. A. and Guo, J. and Li, P. and Ridell, A.}, title = {Stan: A Probabilistic Programming Language}, journal = {Journal of Statistical Software}, year = {2017}, owner = {Paul}, timestamp = {2015.06.19}, } @ARTICLE{duane1987, author = {Duane, Simon and Kennedy, Anthony D and Pendleton, Brian J and Roweth, Duncan}, title = {Hybrid Monte Carlo}, journal = {Physics Letters B}, year = {1987}, volume = {195}, pages = {216--222}, number = {2}, owner = {Paul}, publisher = {Elsevier}, timestamp = {2015.06.19} } @InBook{neal2011, chapter = {MCMC Using Hamiltonian Dynamics}, title = {Handbook of Markov Chain Monte Carlo}, publisher = {CRC Press}, year = {2011}, author = {Neal, Radford M}, volume = {2}, owner = {Paul}, timestamp = {2015.06.19}, } @Article{betancourt2014, author = {Betancourt, MJ and Byrne, Simon and Livingstone, Samuel and Girolami, Mark}, title = {The Geometric Foundations of Hamiltonian Monte Carlo}, journal = {arXiv preprint arXiv:1410.5110}, year = {2014}, } @ARTICLE{hoffman2014, author = {Hoffman, Matthew D and Gelman, Andrew}, title = {The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo}, journal = {The Journal of Machine Learning Research}, year = {2014}, volume = {15}, pages = {1593--1623}, number = {1}, owner = {Paul}, publisher = {JMLR. org}, timestamp = {2015.06.19} } @Article{betancourt2017, author = {Michael Betancourt}, title = {A Conceptual Introduction to Hamiltonian Monte Carlo}, journal = {arXiv preprint}, year = {2017}, url = {https://arxiv.org/pdf/1701.02434.pdf}, } @ARTICLE{bates2015, author = {Douglas Bates and Martin M{\"a}chler and Ben Bolker and Steve Walker}, title = {Fitting Linear Mixed-Effects Models Using \pkg{lme4}}, journal = {Journal of Statistical Software}, year = {2015}, volume = {67}, pages = {1--48}, number = {1}, owner = {Paul}, timestamp = {2015.11.13} } @Article{hadfield2010, author = {Hadfield, Jarrod D}, title = {MCMC Methods for Multi-Response Generalized Linear Mixed Models: the \pkg{MCMCglmm} {R} Package}, journal = {Journal of Statistical Software}, year = {2010}, volume = {33}, number = {2}, pages = {1--22}, owner = {Paul}, timestamp = {2015.06.18}, } @Manual{rstanarm2017, title = {rstanarm: {Bayesian} applied regression modeling via {Stan}.}, author = {{Stan Development Team}}, year = {2017}, note = {R package version 2.17.2}, url = {http://mc-stan.org/}, } @Manual{afex2015, title = {\pkg{afex}: Analysis of Factorial Experiments}, author = {Henrik Singmann and Ben Bolker and Jake Westfall}, year = {2015}, note = {R package version 0.15-2}, owner = {Paul}, timestamp = {2016.02.13}, url = {https://CRAN.R-project.org/package=afex}, } @Article{brms1, author = {Paul-Christian B\"urkner}, title = {\pkg{brms}: An {R} Package for Bayesian Multilevel Models using Stan}, journal = {Journal of Statistical Software}, year = {2017}, encoding = {UTF-8}, } @Article{wood2011, author = {Wood, Simon N}, title = {Fast Stable Restricted Maximum Likelihood and Marginal Likelihood Estimation of Semiparametric Generalized Linear Models}, journal = {Journal of the Royal Statistical Society: Series B (Statistical Methodology)}, year = {2011}, volume = {73}, number = {1}, pages = {3--36}, publisher = {Wiley Online Library}, } @InProceedings{williams1996, author = {Williams, Christopher KI and Rasmussen, Carl Edward}, title = {Gaussian processes for regression}, booktitle = {Advances in neural information processing systems}, year = {1996}, pages = {514--520}, } @MANUAL{nlme2016, title = {\pkg{nlme}: Linear and Nonlinear Mixed Effects Models}, author = {Jose Pinheiro and Douglas Bates and Saikat DebRoy and Deepayan Sarkar and {R Core Team}}, year = {2016}, note = {R package version 3.1-124}, owner = {Paul}, timestamp = {2016.03.06}, url = {http://CRAN.R-project.org/package=nlme} } @Article{westfall2016, author = {Westfall, Jacob and Yarkoni, Tal}, title = {Statistically Controlling for Confounding Constructs is Harder than You Think}, journal = {PloS one}, year = {2016}, volume = {11}, number = {3}, pages = {e0152719}, publisher = {Public Library of Science}, } @Manual{loo2016, title = {\pkg{loo}: {E}fficient Leave-One-Out Cross-Validation and {WAIC} for {B}ayesian Models.}, author = {Aki Vehtari and Andrew Gelman and Jonah Gabry}, year = {2016}, note = {R package version 1.0.0}, url = {https://github.com/stan-dev/loo}, } @Manual{stan2017, title = {Stan: A C++ Library for Probability and Sampling, Version 2.17.0}, author = {{Stan Development Team}}, year = {2017}, owner = {Paul}, timestamp = {2015.06.18}, url = {http://mc-stan.org/}, } @Comment{jabref-meta: databaseType:bibtex;} brms/vignettes/brms_missings.Rmd0000644000176200001440000002441014224753343016560 0ustar liggesusers--- title: "Handle Missing Values with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Handle Missing Values with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction Many real world data sets contain missing values for various reasons. Generally, we have quite a few options to handle those missing values. The easiest solution is to remove all rows from the data set, where one or more variables are missing. However, if values are not missing completely at random, this will likely lead to bias in our analysis. Accordingly, we usually want to impute missing values in one way or the other. Here, we will consider two very general approaches using **brms**: (1) Impute missing values *before* the model fitting with multiple imputation, and (2) impute missing values on the fly *during* model fitting[^1]. As a simple example, we will use the `nhanes` data set, which contains information on participants' `age`, `bmi` (body mass index), `hyp` (hypertensive), and `chl` (total serum cholesterol). For the purpose of the present vignette, we are primarily interested in predicting `bmi` by `age` and `chl`. ```{r} data("nhanes", package = "mice") head(nhanes) ``` ## Imputation before model fitting There are many approaches allowing us to impute missing data before the actual model fitting takes place. From a statistical perspective, multiple imputation is one of the best solutions. Each missing value is not imputed once but `m` times leading to a total of `m` fully imputed data sets. The model can then be fitted to each of those data sets separately and results are pooled across models, afterwards. One widely applied package for multiple imputation is **mice** (Buuren & Groothuis-Oudshoorn, 2010) and we will use it in the following in combination with **brms**. Here, we apply the default settings of **mice**, which means that all variables will be used to impute missing values in all other variables and imputation functions automatically chosen based on the variables' characteristics. ```{r} library(mice) imp <- mice(nhanes, m = 5, print = FALSE) ``` Now, we have `m = 5` imputed data sets stored within the `imp` object. In practice, we will likely need more than `5` of those to accurately account for the uncertainty induced by the missingness, perhaps even in the area of `100` imputed data sets (Zhou & Reiter, 2010). Of course, this increases the computational burden by a lot and so we stick to `m = 5` for the purpose of this vignette. Regardless of the value of `m`, we can either extract those data sets and then pass them to the actual model fitting function as a list of data frames, or pass `imp` directly. The latter works because **brms** offers special support for data imputed by **mice**. We will go with the latter approach, since it is less typing. Fitting our model of interest with **brms** to the multiple imputed data sets is straightforward. ```{r, results = 'hide', message = FALSE} fit_imp1 <- brm_multiple(bmi ~ age*chl, data = imp, chains = 2) ``` The returned fitted model is an ordinary `brmsfit` object containing the posterior draws of all `m` submodels. While pooling across models is not necessarily straightforward in classical statistics, it is trivial in a Bayesian framework. Here, pooling results of multiple imputed data sets is simply achieved by combining the posterior draws of the submodels. Accordingly, all post-processing methods can be used out of the box without having to worry about pooling at all. ```{r} summary(fit_imp1) ``` In the summary output, we notice that some `Rhat` values are higher than $1.1$ indicating possible convergence problems. For models based on multiple imputed data sets, this is often a **false positive**: Chains of different submodels may not overlay each other exactly, since there were fitted to different data. We can see the chains on the right-hand side of ```{r} plot(fit_imp1, variable = "^b", regex = TRUE) ``` Such non-overlaying chains imply high `Rhat` values without there actually being any convergence issue. Accordingly, we have to investigate the convergence of the submodels separately, which we can do by looking at ```{r} round(fit_imp1$rhats, 2) ``` The convergence of each of the submodels looks good. Accordingly, we can proceed with further post-processing and interpretation of the results. For instance, we could investigate the combined effect of `age` and `chl`. ```{r} conditional_effects(fit_imp1, "age:chl") ``` To summarize, the advantages of multiple imputation are obvious: One can apply it to all kinds of models, since model fitting functions do not need to know that the data sets were imputed, beforehand. Also, we do not need to worry about pooling across submodels when using fully Bayesian methods. The only drawback is the amount of time required for model fitting. Estimating Bayesian models is already quite slow with just a single data set and it only gets worse when working with multiple imputation. ### Compatibility with other multiple imputation packages **brms** offers built-in support for **mice** mainly because I use the latter in some of my own research projects. Nevertheless, `brm_multiple` supports all kinds of multiple imputation packages as it also accepts a *list* of data frames as input for its `data` argument. Thus, you just need to extract the imputed data frames in the form of a list, which can then be passed to `brm_multiple`. Most multiple imputation packages have some built-in functionality for this task. When using the **mi** package, for instance, you simply need to call the `mi::complete` function to get the desired output. ## Imputation during model fitting Imputation during model fitting is generally thought to be more complex than imputation before model fitting, because one has to take care of everything within one step. This remains true when imputing missing values with **brms**, but possibly to a somewhat smaller degree. Consider again the `nhanes` data with the goal to predict `bmi` by `age`, and `chl`. Since `age` contains no missing values, we only have to take special care of `bmi` and `chl`. We need to tell the model two things. (1) Which variables contain missing values and how they should be predicted, as well as (2) which of these imputed variables should be used as predictors. In **brms** we can do this as follows: ```{r, results = 'hide', message = FALSE} bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) fit_imp2 <- brm(bform, data = nhanes) ``` The model has become multivariate, as we no longer only predict `bmi` but also `chl` (see `vignette("brms_multivariate")` for details about the multivariate syntax of **brms**). We ensure that missings in both variables will be modeled rather than excluded by adding `| mi()` on the left-hand side of the formulas[^2]. We write `mi(chl)` on the right-hand side of the formula for `bmi` to ensure that the estimated missing values of `chl` will be used in the prediction of `bmi`. The summary is a bit more cluttered as we get coefficients for both response variables, but apart from that we can interpret coefficients in the usual way. ```{r} summary(fit_imp2) conditional_effects(fit_imp2, "age:chl", resp = "bmi") ``` The results look pretty similar to those obtained from multiple imputation, but be aware that this may not be generally the case. In multiple imputation, the default is to impute all variables based on all other variables, while in the 'one-step' approach, we have to explicitly specify the variables used in the imputation. Thus, arguably, multiple imputation is easier to apply. An obvious advantage of the 'one-step' approach is that the model needs to be fitted only once instead of `m` times. Also, within the **brms** framework, we can use multilevel structure and complex non-linear relationships for the imputation of missing values, which is not achieved as easily in standard multiple imputation software. On the downside, it is currently not possible to impute discrete variables, because **Stan** (the engine behind **brms**) does not allow estimating discrete parameters. ### Combining measurement error and missing values Missing value terms in **brms** cannot only handle missing values but also measurement error, or arbitrary combinations of the two. In fact, we can think of a missing value as a value with infinite measurement error. Thus, `mi` terms are a natural (and somewhat more verbose) generalization of the now soft deprecated `me` terms. Suppose we had measured the variable `chl` with some known error: ```{r} nhanes$se <- rexp(nrow(nhanes), 2) ``` Then we can go ahead an include this information into the model as follows: ```{r, results = 'hide', message = FALSE, eval = FALSE} bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi(se) ~ age) + set_rescor(FALSE) fit_imp3 <- brm(bform, data = nhanes) ``` Summarizing and post-processing the model continues to work as usual. [^1]: Actually, there is a third approach that only applies to missings in response variables. If we want to impute missing responses, we just fit the model using the observed responses and than impute the missings *after* fitting the model by means of posterior prediction. That is, we supply the predictor values corresponding to missing responses to the `predict` method. [^2]: We don't really need this for `bmi`, since `bmi` is not used as a predictor for another variable. Accordingly, we could also -- and equivalently -- impute missing values of `bmi` *after* model fitting by means of posterior prediction. ## References Buuren, S. V. & Groothuis-Oudshoorn, K. (2010). mice: Multivariate imputation by chained equations in R. *Journal of Statistical Software*, 1-68. doi.org/10.18637/jss.v045.i03 Zhou, X. & Reiter, J. P. (2010). A Note on Bayesian Inference After Multiple Imputation. *The American Statistician*, 64(2), 159-163. doi.org/10.1198/tast.2010.09109 brms/vignettes/brms_customfamilies.Rmd0000644000176200001440000003230314224753323017746 0ustar liggesusers--- title: "Define Custom Response Distributions with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Define Custom Response Distributions with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction The **brms** package comes with a lot of built-in response distributions -- usually called *families* in R -- to specify among others linear, count data, survival, response times, or ordinal models (see `help(brmsfamily)` for an overview). Despite supporting over two dozen families, there is still a long list of distributions, which are not natively supported. The present vignette will explain how to specify such *custom families* in **brms**. By doing that, users can benefit from the modeling flexibility and post-processing options of **brms** even when using self-defined response distributions. If you have built a custom family that you want to make available to other users, you can submit a pull request to this [GitHub repository](https://github.com/paul-buerkner/custom-brms-families). ## A Case Study As a case study, we will use the `cbpp` data of the **lme4** package, which describes the development of the CBPP disease of cattle in Africa. The data set contains four variables: `period` (the time period), `herd` (a factor identifying the cattle herd), `incidence` (number of new disease cases for a given herd and time period), as well as `size` (the herd size at the beginning of a given time period). ```{r cbpp} data("cbpp", package = "lme4") head(cbpp) ``` In a first step, we will be predicting `incidence` using a simple binomial model, which will serve as our baseline model. For observed number of events $y$ (`incidence` in our case) and total number of trials $T$ (`size`), the probability mass function of the binomial distribution is defined as $$ P(y | T, p) = \binom{T}{y} p^{y} (1 - p)^{N-y} $$ where $p$ is the event probability. In the classical binomial model, we will directly predict $p$ on the logit-scale, which means that for each observation $i$ we compute the success probability $p_i$ as $$ p_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} $$ where $\eta_i$ is the linear predictor term of observation $i$ (see `vignette("brms_overview")` for more details on linear predictors in **brms**). Predicting `incidence` by `period` and a varying intercept of `herd` is straight forward in **brms**: ```{r fit1, results='hide'} fit1 <- brm(incidence | trials(size) ~ period + (1|herd), data = cbpp, family = binomial()) ``` In the summary output, we see that the incidence probability varies substantially over herds, but reduces over the course of the time as indicated by the negative coefficients of `period`. ```{r fit1_summary} summary(fit1) ``` A drawback of the binomial model is that -- after taking into account the linear predictor -- its variance is fixed to $\text{Var}(y_i) = T_i p_i (1 - p_i)$. All variance exceeding this value cannot be not taken into account by the model. There are multiple ways of dealing with this so called *overdispersion* and the solution described below will serve as an illustrative example of how to define custom families in **brms**. ## The Beta-Binomial Distribution The *beta-binomial* model is a generalization of the *binomial* model with an additional parameter to account for overdispersion. In the beta-binomial model, we do not predict the binomial probability $p_i$ directly, but assume it to be beta distributed with hyperparameters $\alpha > 0$ and $\beta > 0$: $$ p_i \sim \text{Beta}(\alpha_i, \beta_i) $$ The $\alpha$ and $\beta$ parameters are both hard to interpret and generally not recommended for use in regression models. Thus, we will apply a different parameterization with parameters $\mu \in [0, 1]$ and $\phi > 0$, which we will call $\text{Beta2}$: $$ \text{Beta2}(\mu, \phi) = \text{Beta}(\mu \phi, (1-\mu) \phi) $$ The parameters $\mu$ and $\phi$ specify the mean and precision parameter, respectively. By defining $$ \mu_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} $$ we still predict the expected probability by means of our transformed linear predictor (as in the original binomial model), but account for potential overdispersion via the parameter $\phi$. ## Fitting Custom Family Models The beta-binomial distribution is natively supported in **brms** nowadays, but we will still use it as an example to define it ourselves via the `custom_family` function. This function requires the family's name, the names of its parameters (`mu` and `phi` in our case), corresponding link functions (only applied if parameters are predicted), their theoretical lower and upper bounds (only applied if parameters are not predicted), information on whether the distribution is discrete or continuous, and finally, whether additional non-parameter variables need to be passed to the distribution. For our beta-binomial example, this results in the following custom family: ```{r beta_binomial2} beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(0, 0), ub = c(1, NA), type = "int", vars = "vint1[n]" ) ``` The name `vint1` for the variable containing the number of trials is not chosen arbitrarily as we will see below. Next, we have to provide the relevant **Stan** functions if the distribution is not defined in **Stan** itself. For the `beta_binomial2` distribution, this is straight forward since the ordinal `beta_binomial` distribution is already implemented. ```{r stan_funs} stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int T) { return beta_binomial_lpmf(y | T, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int T) { return beta_binomial_rng(T, mu * phi, (1 - mu) * phi); } " ``` For the model fitting, we will only need `beta_binomial2_lpmf`, but `beta_binomial2_rng` will come in handy when it comes to post-processing. We define: ```{r stanvars} stanvars <- stanvar(scode = stan_funs, block = "functions") ``` To provide information about the number of trials (an integer variable), we are going to use the addition argument `vint()`, which can only be used in custom families. Similarly, if we needed to include additional vectors of real data, we would use `vreal()`. Actually, for this particular example, we could more elegantly apply the addition argument `trials()` instead of `vint()`as in the basic binomial model. However, since the present vignette is meant to give a general overview of the topic, we will go with the more general method. We now have all components together to fit our custom beta-binomial model: ```{r fit2, results='hide'} fit2 <- brm( incidence | vint(size) ~ period + (1|herd), data = cbpp, family = beta_binomial2, stanvars = stanvars ) ``` The summary output reveals that the uncertainty in the coefficients of `period` is somewhat larger than in the basic binomial model, which is the result of including the overdispersion parameter `phi` in the model. Apart from that, the results looks pretty similar. ```{r summary_fit2} summary(fit2) ``` ## Post-Processing Custom Family Models Some post-processing methods such as `summary` or `plot` work out of the box for custom family models. However, there are three particularly important methods, which require additional input by the user. These are `posterior_epred`, `posterior_predict` and `log_lik` computing predicted mean values, predicted response values, and log-likelihood values, respectively. They are not only relevant for their own sake, but also provide the basis of many other post-processing methods. For instance, we may be interested in comparing the fit of the binomial model with that of the beta-binomial model by means of approximate leave-one-out cross-validation implemented in method `loo`, which in turn requires `log_lik` to be working. The `log_lik` function of a family should be named `log_lik_` and have the two arguments `i` (indicating observations) and `prep`. You don't have to worry too much about how `prep` is created (if you are interested, check out the `prepare_predictions` function). Instead, all you need to know is that parameters are stored in slot `dpars` and data are stored in slot `data`. Generally, parameters take on the form of a $S \times N$ matrix (with $S =$ number of posterior draws and $N =$ number of observations) if they are predicted (as is `mu` in our example) and a vector of size $N$ if the are not predicted (as is `phi`). We could define the complete log-likelihood function in R directly, or we can expose the self-defined **Stan** functions and apply them. The latter approach is usually more convenient, but the former is more stable and the only option when implementing custom families in other R packages building upon **brms**. For the purpose of the present vignette, we will go with the latter approach. ```{r} expose_functions(fit2, vectorize = TRUE) ``` and define the required `log_lik` functions with a few lines of code. ```{r log_lik} log_lik_beta_binomial2 <- function(i, prep) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] y <- prep$data$Y[i] beta_binomial2_lpmf(y, mu, phi, trials) } ``` The `get_dpar` function will do the necessary transformations to handle both the case when the distributional parameters are predicted separately for each row and when they are the same for the whole fit. With that being done, all of the post-processing methods requiring `log_lik` will work as well. For instance, model comparison can simply be performed via ```{r loo} loo(fit1, fit2) ``` Since larger `ELPD` values indicate better fit, we see that the beta-binomial model fits somewhat better, although the corresponding standard error reveals that the difference is not that substantial. Next, we will define the function necessary for the `posterior_predict` method: ```{r posterior_predict} posterior_predict_beta_binomial2 <- function(i, prep, ...) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] beta_binomial2_rng(mu, phi, trials) } ``` The `posterior_predict` function looks pretty similar to the corresponding `log_lik` function, except that we are now creating random draws of the response instead of log-likelihood values. Again, we are using an exposed **Stan** function for convenience. Make sure to add a `...` argument to your `posterior_predict` function even if you are not using it, since some families require additional arguments. With `posterior_predict` to be working, we can engage for instance in posterior-predictive checking: ```{r pp_check} pp_check(fit2) ``` When defining the `posterior_epred` function, you have to keep in mind that it has only a `prep` argument and should compute the mean response values for all observations at once. Since the mean of the beta-binomial distribution is $\text{E}(y) = \mu T$ definition of the corresponding `posterior_epred` function is not too complicated, but we need to get the dimension of parameters and data in line. ```{r posterior_epred} posterior_epred_beta_binomial2 <- function(prep) { mu <- brms::get_dpar(prep, "mu") trials <- prep$data$vint1 trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE) mu * trials } ``` A post-processing method relying directly on `posterior_epred` is `conditional_effects`, which allows to visualize effects of predictors. ```{r conditional_effects} conditional_effects(fit2, conditions = data.frame(size = 1)) ``` For ease of interpretation we have set `size` to 1 so that the y-axis of the above plot indicates probabilities. ## Turning a Custom Family into a Native Family Family functions built natively into **brms** are safer to use and more convenient, as they require much less user input. If you think that your custom family is general enough to be useful to other users, please feel free to open an issue on [GitHub](https://github.com/paul-buerkner/brms/issues) so that we can discuss all the details. Provided that we agree it makes sense to implement your family natively in brms, the following steps are required (`foo` is a placeholder for the family name): * In `family-lists.R`, add function `.family_foo` which should contain basic information about your family (you will find lots of examples for other families there). * In `families.R`, add family function `foo` which should be a simple wrapper around `.brmsfamily`. * In `stan-likelihood.R`, add function `stan_log_lik_foo` which provides the likelihood of the family in Stan language. * If necessary, add self-defined Stan functions in separate files under `inst/chunks`. * Add functions `posterior_predict_foo`, `posterior_epred_foo` and `log_lik_foo` to `posterior_predict.R`, `posterior_epred.R` and `log_lik.R`, respectively. * If necessary, add distribution functions to `distributions.R`. brms/vignettes/brms_families.Rmd0000644000176200001440000003401714275414730016521 0ustar liggesusers--- title: "Parameterization of Response Distributions in brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Parameterization of Response Distributions in brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- The purpose of this vignette is to discuss the parameterizations of the families (i.e., response distributions) used in brms. For a more general overview of the package see `vignette("brms_overview")`. ## Notation Throughout this vignette, we denote values of the response variable as $y$, a density function as $f$, and use $\mu$ to refer to the main model parameter, which is usually the mean of the response distribution or some closely related quantity. In a regression framework, $\mu$ is not estimated directly but computed as $\mu = g(\eta)$, where $\eta$ is a predictor term (see `help(brmsformula)` for details) and $g$ is the response function (i.e., inverse of the link function). ## Location shift models The density of the **gaussian** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\frac{1}{2}\left(\frac{y - \mu}{\sigma}\right)^2\right) $$ where $\sigma$ is the residual standard deviation. The density of the **student** family is given by $$ f(y) = \frac{\Gamma((\nu + 1)/2)}{\Gamma(\nu/2)} \frac{1}{\sqrt{\nu\pi}\sigma}\left(1 + \frac{1}{\nu} \left(\frac{y - \mu}{\sigma}\right)^2\right)^{-(\nu+1)/2} $$ $\Gamma$ denotes the gamma function and $\nu > 1$ are the degrees of freedom. As $\nu \rightarrow \infty$, the student distribution becomes the gaussian distribution. The density of the **skew_normal** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\omega} \exp\left(-\frac{1}{2} \left(\frac{y - \xi}{\omega}\right)^2 \right) \left(1 + \text{erf} \left( \alpha \left(\frac{y - \xi}{\omega \sqrt{2}} \right) \right) \right) $$ where $\xi$ is the location parameter, $\omega$ is the positive scale parameter, $\alpha$ the skewness parameter, and $\text{erf}$ denotes the error function of the gaussian distribution. To parameterize the skew-normal distribution in terms of the mean $\mu$ and standard deviation $\sigma$, $\omega$ and $\xi$ are computed as $$ \omega = \frac{\sigma}{\sqrt{1 - \frac{2}{\pi} \frac{\alpha^2}{1 + \alpha^2}}} $$ $$ \xi = \mu - \omega \frac{\alpha}{\sqrt{1 + \alpha^2}} \sqrt{\frac{2}{\pi}} $$ If $\alpha = 0$, the skew-normal distribution becomes the gaussian distribution. For location shift models, $y$ can be any real value. ## Binary and count data models The density of the **binomial** family is given by $$ f(y) = {N \choose y} \mu^{y} (1-\mu)^{N - y} $$ where $N$ is the number of trials and $y \in \{0, ... , N\}$. When all $N$ are $1$ (i.e., $y \in \{0,1\}$), the **bernoulli** distribution for binary data arises. For $y \in \mathbb{N}_0$, the density of the **poisson** family is given by $$ f(y) = \frac{\mu^{y}}{y!} \exp(-\mu) $$ The density of the **negbinomial** (negative binomial) family is $$ f(y) = {y + \phi - 1 \choose y} \left(\frac{\mu}{\mu + \phi}\right)^{y} \left(\frac{\phi}{\mu + \phi}\right)^\phi $$ where $\phi$ is a positive precision parameter. For $\phi \rightarrow \infty$, the negative binomial distribution becomes the poisson distribution. The density of the **geometric** family arises if $\phi$ is set to $1$. ## Time-to-event models With time-to-event models we mean all models that are defined on the positive reals only, that is $y \in \mathbb{R}^+$. The density of the **lognormal** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma y} \exp\left(-\frac{1}{2}\left(\frac{\log(y) - \mu}{\sigma}\right)^2\right) $$ where $\sigma$ is the residual standard deviation on the log-scale. The density of the **Gamma** family is given by $$ f(y) = \frac{(\alpha / \mu)^\alpha}{\Gamma(\alpha)} y^{\alpha-1} \exp\left(-\frac{\alpha y}{\mu}\right) $$ where $\alpha$ is a positive shape parameter. The density of the **weibull** family is given by $$ f(y) = \frac{\alpha}{s} \left(\frac{y}{s}\right)^{\alpha-1} \exp\left(-\left(\frac{y}{s}\right)^\alpha\right) $$ where $\alpha$ is again a positive shape parameter and $s = \mu / \Gamma(1 + 1 / \alpha)$ is the scale parameter to that $\mu$ is the mean of the distribution. The **exponential** family arises if $\alpha$ is set to $1$ for either the gamma or Weibull distribution. The density of the **inverse.gaussian** family is given by $$ f(y) = \left(\frac{\alpha}{2 \pi y^3}\right)^{1/2} \exp \left(\frac{-\alpha (y - \mu)^2}{2 \mu^2 y} \right) $$ where $\alpha$ is a positive shape parameter. The **cox** family implements Cox proportional hazards model which assumes a hazard function of the form $h(y) = h_0(y) \mu$ with baseline hazard $h_0(y)$ expressed via M-splines (which integrate to I-splines) in order to ensure monotonicity. The density of the cox model is then given by $$ f(y) = h(y) S(y) $$ where $S(y)$ is the survival function implied by $h(y)$. ## Extreme value models Modeling extremes requires special distributions. One may use the **weibull** distribution (see above) or the **frechet** distribution with density $$ f(y) = \frac{\nu}{s} \left(\frac{y}{s}\right)^{-1-\nu} \exp\left(-\left(\frac{y}{s}\right)^{-\nu}\right) $$ where $s = \mu / \Gamma(1 - 1 / \nu)$ is a positive scale parameter and $\nu > 1$ is a shape parameter so that $\mu$ predicts the mean of the Frechet distribution. A generalization of both distributions is the generalized extreme value distribution (family **gen_extreme_value**) with density $$ f(y) = \frac{1}{\sigma} t(y)^{\xi + 1} \exp(-t(y)) $$ where $$ t(y) = \left(1 + \xi \left(\frac{y - \mu}{\sigma} \right)\right)^{-1 / \xi} $$ with positive scale parameter $\sigma$ and shape parameter $\xi$. ## Response time models One family that is especially suited to model reaction times is the **exgaussian** ('exponentially modified Gaussian') family. Its density is given by $$ f(y) = \frac{1}{2 \beta} \exp\left(\frac{1}{2 \beta} \left(2\xi + \sigma^2 / \beta - 2 y \right) \right) \text{erfc}\left(\frac{\xi + \sigma^2 / \beta - y}{\sqrt{2} \sigma} \right) $$ where $\beta$ is the scale (inverse rate) of the exponential component, $\xi$ is the mean of the Gaussian component, $\sigma$ is the standard deviation of the Gaussian component, and $\text{erfc}$ is the complementary error function. We parameterize $\mu = \xi + \beta$ so that the main predictor term equals the mean of the distribution. Another family well suited for modeling response times is the **shifted_lognormal** distribution. It's density equals that of the **lognormal** distribution except that the whole distribution is shifted to the right by a positive parameter called *ndt* (for consistency with the **wiener** diffusion model explained below). A family concerned with the combined modeling of reaction times and corresponding binary responses is the **wiener** diffusion model. It has four model parameters each with a natural interpretation. The parameter $\alpha > 0$ describes the separation between two boundaries of the diffusion process, $\tau > 0$ describes the non-decision time (e.g., due to image or motor processing), $\beta \in [0, 1]$ describes the initial bias in favor of the upper alternative, and $\delta \in \mathbb{R}$ describes the drift rate to the boundaries (a positive value indicates a drift towards to upper boundary). The density for the reaction time at the upper boundary is given by $$ f(y) = \frac{\alpha}{(y-\tau)^3/2} \exp \! \left(- \delta \alpha \beta - \frac{\delta^2(y-\tau)}{2}\right) \sum_{k = - \infty}^{\infty} (2k + \beta) \phi \! \left(\frac{2k + \alpha \beta}{\sqrt{y - \tau}}\right) $$ where $\phi(x)$ denotes the standard normal density function. The density at the lower boundary can be obtained by substituting $1 - \beta$ for $\beta$ and $-\delta$ for $\delta$ in the above equation. In brms the parameters $\alpha$, $\tau$, and $\beta$ are modeled as auxiliary parameters named *bs* ('boundary separation'), *ndt* ('non-decision time'), and *bias* respectively, whereas the drift rate $\delta$ is modeled via the ordinary model formula that is as $\delta = \mu$. ## Quantile regression Quantile regression is implemented via family **asym_laplace** (asymmetric Laplace distribution) with density $$ f(y) = \frac{p (1 - p)}{\sigma} \exp\left(-\rho_p\left(\frac{y - \mu}{\sigma}\right)\right) $$ where $\rho_p$ is given by $\rho_p(x) = x (p - I_{x < 0})$ and $I_A$ is the indicator function of set $A$. The parameter $\sigma$ is a positive scale parameter and $p$ is the *quantile* parameter taking on values in $(0, 1)$. For this distribution, we have $P(Y < g(\eta)) = p$. Thus, quantile regression can be performed by fixing $p$ to the quantile to interest. ## Probability models The density of the **Beta** family for $y \in (0,1)$ is given by $$ f(y) = \frac{y^{\mu \phi - 1} (1-y)^{(1-\mu) \phi-1}}{B(\mu \phi, (1-\mu) \phi)} $$ where $B$ is the beta function and $\phi$ is a positive precision parameter. A multivariate generalization of the **Beta** family is the **dirichlet** family with density $$ f(y) = \frac{1}{B((\mu_{1}, \ldots, \mu_{K}) \phi)} \prod_{k=1}^K y_{k}^{\mu_{k} \phi - 1}. $$ The **dirichlet** family is implemented with the multivariate logit link function so that $$ \mu_{j} = \frac{\exp(\eta_{j})}{\sum_{k = 1}^{K} \exp(\eta_{k})} $$ For reasons of identifiability, $\eta_{\rm ref}$ is set to $0$, where ${\rm ref}$ is one of the response categories chosen as reference. An alternative to the **dirichlet** family is the **logistic_normal** family with density $$ f(y) = \frac{1}{\prod_{k=1}^K y_k} \times \text{multivariate_normal}(\tilde{y} \, | \, \mu, \sigma, \Omega) $$ where $\tilde{y}$ is the multivariate logit transformed response $$ \tilde{y} = (\log(y_1 / y_{\rm ref}), \ldots, \log(y_{\rm ref-1} / y_{\rm ref}), \log(y_{\rm ref+1} / y_{\rm ref}), \ldots, \log(y_K / y_{\rm ref})) $$ of dimension $K-1$ (excluding the reference category), which is modeled as multivariate normally distributed with latent mean and standard deviation vectors $\mu$ and $\sigma$, as well as correlation matrix $\Omega$. ## Circular models The density of the **von_mises** family for $y \in (-\pi,\pi)$ is given by $$ f(y) = \frac{\exp(\kappa \cos(y - \mu))}{2\pi I_0(\kappa)} $$ where $I_0$ is the modified Bessel function of order 0 and $\kappa$ is a positive precision parameter. ## Ordinal and categorical models For ordinal and categorical models, $y$ is one of the categories $1, ..., K$. The intercepts of ordinal models are called thresholds and are denoted as $\tau_k$, with $k \in \{1, ..., K-1\}$, whereas $\eta$ does not contain a fixed effects intercept. Note that the applied link functions $h$ are technically distribution functions $\mathbb{R} \rightarrow [0,1]$. The density of the **cumulative** family (implementing the most basic ordinal model) is given by $$ f(y) = g(\tau_{y + 1} - \eta) - g(\tau_{y} - \eta) $$ The densities of the **sratio** (stopping ratio) and **cratio** (continuation ratio) families are given by $$ f(y) = g(\tau_{y + 1} - \eta) \prod_{k = 1}^{y} (1 - g(\tau_{k} - \eta)) $$ and $$ f(y) = (1 - g(\eta - \tau_{y + 1})) \prod_{k = 1}^{y} g(\eta - \tau_{k}) $$ respectively. Note that both families are equivalent for symmetric link functions such as logit or probit. The density of the **acat** (adjacent category) family is given by $$ f(y) = \frac{\prod_{k=1}^{y} g(\eta - \tau_{k}) \prod_{k=y+1}^K(1-g(\eta - \tau_{k}))}{\sum_{k=0}^K\prod_{j=1}^k g(\eta-\tau_{j}) \prod_{j=k+1}^K(1-g(\eta - \tau_{j}))} $$ For the logit link, this can be simplified to $$ f(y) = \frac{\exp \left(\sum_{k=1}^{y} (\eta - \tau_{k}) \right)} {\sum_{k=0}^K \exp\left(\sum_{j=1}^k (\eta - \tau_{j}) \right)} $$ The linear predictor $\eta$ can be generalized to also depend on the category $k$ for a subset of predictors. This leads to category specific effects (for details on how to specify them see `help(brm)`). Note that **cumulative** and **sratio** models use $\tau - \eta$, whereas **cratio** and **acat** use $\eta - \tau$. This is done to ensure that larger values of $\eta$ increase the probability of *higher* response categories. The **categorical** family is currently only implemented with the multivariate logit link function and has density $$ f(y) = \mu_{y} = \frac{\exp(\eta_{y})}{\sum_{k = 1}^{K} \exp(\eta_{k})} $$ Note that $\eta$ does also depend on the category $k$. For reasons of identifiability, $\eta_{1}$ is set to $0$. A generalization of the **categorical** family to more than one trial is the **multinomial** family with density $$ f(y) = {N \choose y_{1}, y_{2}, \ldots, y_{K}} \prod_{k=1}^K \mu_{k}^{y_{k}} $$ where, for each category, $\mu_{k}$ is estimated via the multivariate logit link function shown above. ## Zero-inflated and hurdle models **Zero-inflated** and **hurdle** families extend existing families by adding special processes for responses that are zero. The density of a **zero-inflated** family is given by $$ f_z(y) = z + (1 - z) f(0) \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) \quad \text{if } y > 0 $$ where $z$ denotes the zero-inflation probability. Currently implemented families are **zero_inflated_poisson**, **zero_inflated_binomial**, **zero_inflated_negbinomial**, and **zero_inflated_beta**. The density of a **hurdle** family is given by $$ f_z(y) = z \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) / (1 - f(0)) \quad \text{if } y > 0 $$ Currently implemented families are **hurdle_poisson**, **hurdle_negbinomial**, **hurdle_gamma**, and **hurdle_lognormal**. The density of a **zero-one-inflated** family is given by $$ f_{\alpha, \gamma}(y) = \alpha (1 - \gamma) \quad \text{if } y = 0 \\ f_{\alpha, \gamma}(y) = \alpha \gamma \quad \text{if } y = 1 \\ f_{\alpha, \gamma}(y) = (1 - \alpha) f(y) \quad \text{if } y \notin \{0, 1\} $$ where $\alpha$ is the zero-one-inflation probability (i.e. the probability that zero or one occurs) and $\gamma$ is the conditional one-inflation probability (i.e. the probability that one occurs rather than zero). Currently implemented families are **zero_one_inflated_beta**. brms/vignettes/me_rent3.pdf0000644000176200001440000013306613252451326015450 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170908174613) /ModDate (D:20170908174613) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 42286 /Filter /FlateDecode >> stream xKtKr7?Z1EۂȀDH= 2Dk܇%ѧOt[k̈'OۿOQox[VWZ?_k97~~KʘgV~_~'Ͽ\oپ_w5ϪZy_g_H?˯|ߚzh_ySLg_ ~s> ı$~Mq֟M=ׯڿ.\^Nרs>r9wr3Zz9_9QW_k%~ 3c֑1}]戼J ߞsSWn?'p<wfgi #/kwug?~lu=-ۓVwOv-slOݿ&8?"nQ?_]yTnşWk2jyٲ]7!9lf޼;?bf8wu;tiztKvU'zn>X?sLJ[2qe8r~?Mqn^xe30&cb2+2ӷw7ۣGts[B|kn8WS5tsf qBq/MWqkom?Vf'?zܗ6fz>\=r=%`zdiISد 2QbEW$[W4y/ʣzğ%bɣ7Fȏy)+A}:}8F1u{ݜ,WW KH-Od(%Н< /P0ECl(_ StRe:} t52B_PIbxn](WNeR4r=PG4r=Z&.霟G5=O,0еa;v8/BqzϳsݗwuX ~#ʫ=a?c]VjV w]!ͣ/h1+{`0ܙ+i%羴ǤX,uSN|46 ٯ-/f*cy;m@q3.^n ߬pR&{Y\g۷S|ܯ=ngW]K7{Wq=8 =&&C&GMp7ol/8 !l*.sw3M=Z Y yq-h9hcæΖşBT}|[İ#5¦|>-ʂ뾃LokbL3abIFG5ltdbAEGl'&Tt`AZ\`b =Nax“3X:|,11Ȁ^ok;95nAX28?Tg#Z"3 4( tQ}^xeDh~ tfX-5XgspM7^c0,w{k}F@ȶ'=/JGRG2apI4X"myN)z}nȒI_NA+K[ǀ@̀+na\摒'pJMB8 8,OQ Fop(/rd߯ed c*o#'>6Fݏ<%ö=c7e X_cY+Ŵ'C^[fWh;ܲw #Y`iqyGI. H!8Z.^j󈯡?b~|R=JR%}~rCv?!JL 8ʴ_ #MgO<&1vE.8*\HgAʿ[fd h` rdcBݐ^ i\˿GdԮ6]e3k#kq-g- 8W}o ԭ,.e@6u C3~CM6 u&Oĩe#ϿxqS `t0J68l\h,t'\d{~<6wm>d4ot綀p%l #u8b2[) ʐJ% >:LA&;2+1`SY~ VT!A;2~ȏx^dȀ`,Fl? B2 :&2ȰdrFlPf Lud&PUk[u u`l>L" 3)&7]_&C-UC)W(`&c' a2GUdZQ^hld}090H-rə\G3qH :4+"p ɼ഑'lAJXh(9㾄=$@…|b?il ޠu?#hbEb7C`F`Ha ,הw>{TF̯ Z|nI;cyR~?;`D#[Ȅ5Ö׭$ZVJ;_n-@?MJv+!1znSU< 'm'6~ѭB"КB=H= s`zKMT ]="iFBaI[y+D *ttT9=(T^z !! M4[y&DCiVUg~G4AjGisU&HCM)9ҏetUiTp4=_ytI?-NH^ v+$~ݖIVK\?3TOvڷ*2~lfd{ Zse O恙 -ou0 04-f׌=[wp<*Y(#,Tq'X Yibs瞩Fzy#gM0 ƨi$؛^X "b;};#l!q$yvq>]Î8qa]faI;T+/CV]|!w: = B* n/0>hkLV4NwVWVM4.Hj"#XhNUU-yp5|-3|{4,UVVoUR{dV*kX-ͪ҇))p CHjl)USa5)Mj s; Vן+7,%K'X5;*~ʮ:}mT<15t<0S˪H5N-&@@A*~TSE5Uܶ\ HYSlFmL0'"h쩥Pki~ ~SN`,PR+t[~AtDp KnKrTNzVtS{>p?2i/88ޘne=C "8'UMZ,ZTv %Û2A֤IdM#ư$$fYSb5{ ZLZT(Z'Zg֢vD&^D\Pk +BJ C"Дm|j\F! -6TFP_&" wT"tQP*fO"A" [؞)-z u& Y'Mؔ^zF@#ĶTDb8+6U$6 x?Kl6d%uO۲[RFl0[A<$."%0[o'C{ Hֿ=Ba$qeYtLb*3'MbElGĦGNhcPs4\_ІUUq=ЛK^׆w:&{~Σ.77|ňކץ-6<(M冗鬽yw\,`9`9,=N<d ; ;aS<Uyz ;/ C$7{;8yu ;gź-opVr>Rsk\:S7A7=x%b*l"dϪeBlBvf!{#2"cϷo3 Ի213سFv=O˨ "FЅxgJ{W"D®zw"aH JD^4ve{hXA HأHxdzA2 򭯷$7hHQ!ЌyoB*!P[9p((G ._!pAHM$T7ok yCD?1#jɠfm>a(9&Oͭ2$ΐ0/׺Wzv $CzcC&.=}͒b.Uo𺳀 ߄]BX cxEZ6r6 #nQ[>](9: %BIE%%@IWANw``S߄+[M6=Q`YL`!E%K l#T$ڮt`p(鐻"`90` f«wO,5{`Pk%h >aI`9g $vXf%Hk=[/=Xz%Ilf۪\+ .I6Ev>Tq>$;@U` d-|kb APucB$IF,߶rΧxgpH2+I@1$֎j:$N؊f$I> p$66QkFHFm6?!56IzFFDGML(F]$˨MLQ{Vu5&$Ȩ$`R!TqfQo1+KF vM1bԡAF1 QuJ:՟JFU?u߁i6biuaO2O;ȨO:>pA8E f(~^%~D{,:C??/ ~(v;`W,Y<*E2؝ ~'`=IA?6P*3D ~5;uG?[s=1%CďL#H2lDISe.?/?L '#,8sIǎn#+%H2U}B@NHZ }N`N?wB庀(1t0E`k*FhkѩE wt(5oWDηI詑)#hjL? E#\PGE{hDIzBͰc(г儢QHUF-c US(h[((O7^c~d`Csk~Ѐ(:2+ o}Ę>葁2DѡVL(_pQUw&(:4ٞ(:ECTh\/Qtd:DSjVEsWE@)h|?Q&%(c 1؁^DѡVq: %#Z K( qs"&ՠ$Zυ]Έ]DϮdC&UkPTs𨣧gS㱡*S㲉K㵉K5DE"\m0O4AT h#Н^!@C ݌5BC[1wJ@tbzcV4U>-CgO#*UN,HQ CI 7C"f?ˉ&]0f9?0F3nQY09NC"gM"ДS|H1 aF!sޮ'ct]$0cqq5Y $HgĞ ep"A"X#$c6ZXG +j7 ɘbr1'/ 1b{l[=Ō0yPFgMپ2f1U(A>e<12'Ę[)|))L& c*L0<0&4+Ǧ1GS'SUgbL51p P6\*F1RbLTNH2s1#I 'c.i29~ǘ NJ1猩^1(ŘcYdiGƌL2fTm1dL71ɘ#Ɯߪ̡Y1Ve\a*1fOɘTVK 1Xbʥyh c ^2fCN2LD2Llp%sp!WU]SNdY 9:,|~ɘSU2e&SZݟ4 `UΔ9_s_Lw0g_~ ϡ{996dx[ŲdL?͹\ n΂7q,9, v1ysshp!o:p&1=sXV,ݸǙtfBPQ8࿌y(?0`<`RdyD[o)ɠTc"=W?da/$.u*O`dYj3s.HDY̭M( YV8WJ}W0xSGх>_Lɒuu%&P@gO ZH:зF/%Y;gPr#Jv6pyPs |@^*`bPp %ǧO T .ʱRM݉%cLvI8cETGȒ*vM= "vK6Sdf9RcÎSoX%uXR[KnKnXr& KU=dɩdɩYԸZ_Jm%MASd:XKNr$K%gKV}{gs%M~{qe`:~w KNu%P3,9,9uYrfgbySɒS[0#,~#&FdYxȒp\]NvUf~q4dG p}W&KjxXR7c=JKĎ[W&b9@쨎3:Ďv14GvvWZ'캧/Dv٣sUdGZ1 vCVdo5\y:q>& hEvΎ8xOCIǶT6r";?1츱b@N+vVanD 9o,:"dy|h CH澞 2M`d4X0`Gd*#z;2 U:4e7 vbp}Q߉9;`GFh ;w숌-?E0"ΑDz!*#a;2bǪ[`GFy 7ֆ|HUl$FLW0\Qv1ܸ:1"z6{58 7Gz'0P85a62Gx9ʹ,d¡`62bWĈs$F\bD}1digɈ512Ĉ:D11⸧:#*g+FT11bDuɋudQň%F*+&:!L8uT5pj<30Zv0.>#Nw%~2EńCLB1RLqb}T/b Lm99L8}#C$&ߺ[Vq1[_x=L Yz]~s&܉qAo5 n[D7+FyO #~ !ȕ-79.~2pnRpE缑afM:S7-=ϴf{ؘ/~#?, sB\߀!4gH߈M0qN`^~ =k<870'N4DM(s&g&o@A e)o&V9CF9]7D؈ aVX$mz% vv#/M7bzwJ&w, OǞMh-gT/`F 2\O QlɆ ;t#E  P(ӵWPpjD[8E1`wS~KQx'4zgZ9mZ)wmHK~?K(n .Ҷ#90Då#ሂK+Rx(hUoy$%jMofrǥ;Kc WahG N! z*_DťrJ*+ˁKIF@ T\^@T\Dŕ40ZyHHhڕt Wy t\hBt\:mt:q4=G\J\QI\ %f% %WVy#P^\ W6vz%:8>dn&:zF8踊N?]?DUߥfc=­DGY=z):.?x1IzE zI#b=z"F@1vN; Gع~D] gb+$N!LB\O")B^mC*ByD$ĮbNBT1w: bMbJP=6u&q(_"GbDKzG}vosBXl10xqwv#H#/s{&zzIgd|&"Jd!1AzHOQnz XKxzH _2ggu}\}Mh,zҺ@;B㊎#wځ* =ozhGJgVa%Q RQ. Ýډ; M ӗ6 @ѰXe`9PbYU)z~_E oٱ6A1!b9R"˲@Y8|})\#O^oYNؑph #%ד=%-,r#38'X8u&)qV^ MZ&JT.1.Fj3"%x@+jH@3Rc{TPYH(2R)Q^ 5LJ\QkiJ%qHq753L+MJDK_{پo;!qo#K/%nM5$%b>ԥ#s8(qgR"9%>߶]Iyi-nN"w55Țu.qsq>R?M$EnM$EnH[eȭ2PR֙uH1(r'>H['>"w%"-c8#)RSE¢zvF鎐8kb;{LHGXѳ""{7h*COVbxB+\IT 큨Y!c,6yš`1F5ɁE72Ƅǭ!aqg+{vaqG!ny#,n(y[5ǭqrqOq78u. q(x,{×f Q 9)ᑀG u@`q+L8,z^W}ɽ^wKE,.$aQ!Sf=Я__`qq*aq|{^btBroMՀEs!w#|Bڄݸ [3OoV}-qYr1u朓K*b<+Ⱥl`ƌv0<֟O( DV݁l{A7UpS_$hY:#fhY8J{9o 0KX#̗c?UܱH48⎆U`;7_n2KmѲhL Bc2&40p ⎁N8!ܑ1WAfa@.wp@fYg8H$qG84cΦ24,@cN Ef;z_ -6Q$Иi?K_ Sd, ^<eh; ʹ"ǮJaXġB !̎1{ +,]t5Z@ݺاߋDڸQB̲`7@ 1'OŁ:p avfR!c3i.C51HCaT璪bΨ#Ypf)ICǒkk1,%2F ?`r `GvfpaNI3t73 &;j| R.~a}F;%=9,`#yZmtKXǍ7;'mk`3 v̧ d݉a.`m:*4ŽHp5E;Q,;+Ǩ Xv l cXjvGjp?ia:202\0|K_d$6qxelRgaxL԰i^N L~5ZO &\EHEDaC1|klœ e̲T06?d$q l gN*5&gR$Nj 0\,Il@ϵT03WF ,m? Mp鬅K:ʳ'A߆ְTK KœjedyxL"{9"LLfG$&ӻ÷eIlMfy9DO[&3Q6I&qkVyLP(.)i vܗW 'ՙ$o9>OIIR6ḠЙ}ET=۳O%\5$ؾ?xPV䃫3BI"CJJg6M\D>$oh 5J=$~' ʅRP0]:sYJWJN}$Ƽ]%Ix!$)*Ox( Z%4ZwqQCXJpw#@\WI}g~zh 8aB+R2"˫~%qKhEńW+V~׈mߥ~r7(H^XHҊTC~,:6]5yz+TUT,$DuFv"ʕ`pY芫PG!!FR\CH[#b(.;ʊc.Ub͡?ѥR\ԁ#FFc~tc]*g-{XqO*WTJOɡc P\r3vӣg*d0;h }CmU8 䣝+}"DXIt&hJdT>avAw׾D lnڛ rCvwQ'±}TǣW?Om$oqio[埿-D[sV^J%{=U!>o?'?? ?y18w4??kk'?/8Ÿ?ϋ]gğ=sJ?Im>vCϱqǟCƫzZ3^>|u>8Z"J3/ׇd;lstƋt׻gu$:Ŀ3(Et1 Z_w~ޫ/Ϟ%w[f/M/׏Y7Lg-7S|? ˏ'eݽt7X=B+i{?ǫYw, 2&A7~|?[oy |Z>Ǟz8Ŀ3)^z:D7P~ߑwq}G}7}dG mRxxR|GR|GR|GR|GRS]˟⍵ vl?;|IaLH/Y9wש_݋mbZ&)axi<A){ۓ7ٵ |19?Yx}(}m~ z,絘 W .&gH }ك˸OidfufџAr]C '@ y!k{kblLorc\sk**~&8=ݫVᶁN\Sq}}k xyLOe6dLO !5z^<+kZ-k}`(QY{L)g غ<7]g_h]ujc],eu&:%{+u3Uors-OZ ֿ'c4JaML>a#/>7~F)l[7|hܞ&}, @'>!:GJC{)l 'o0hu >jS>Wftµwu b*l݋Q>9h]3[@&o>i J7|-#[/H Qxy"z[}Z?+fzݢzl868%=g?K}YNcC]G׼NCZϸoi}x\sڈ>O~^Ҿ| zzƾ/麌}ϑ"ZuyWz(O 9&gTlk|mhX<^5lC& j&RCM)|~ ab-%ȡBOy9v>qҐlzNE =UU@&1;Vz65i1w1̳=g}_~j]:~*޹~Tu-bې5UC+\ =ǡ~Z?x:s ֯ݛhMvGq?!=O=;؁z{JJo/hn7 =[>I<˘χmyL˞MЧp=5%8;#=Ygiր)5&gvm, =-f`]kNԐ =͙g\Ž9甴Fv9I q։h1gnp]屴ɓ̿~zCoP'GcWw2ʋz.>5-7vL!]<2bZb xؙc93ryYl1J(Hd)g * [[AW䊂ϭdPqb+(* .ejq.Kx0>GΟGC؃~: ZFEح̺$٩awyE/<43G Zڮ*11I8ݸz]ks=4/8Mqlkk܏a׸[~h[h߀_w6îOiI}vMv}">:Et[v|ˎnw ;(;QvN7~!GyhwI-o^_=IO!MKP^PP' 8Fv^NAS/6𮳉۾zLof@%Ќd^4-EN(70I$C *, <0-,,rVd8BаЍKD4ЊÊMC+%A \ib$m;c \{k%N H] Y䤈 -V|Cqd,r Ef== ܨd<+h\OU1P6Kך>}"!}85S#mv?J?SD౞Sč2Е;* ߞ/CןcŪ~酙y^ĉp.ĤO6镕芌R3N<O=?=ws^QT"sapa2BeŅll\q?S9P(q2BeāC\H䐲 K_ 09PDȑa~QRpV 9PxrH$|oqnQC8pde6Fȁ<ج|fXXm?NhyEAX8P&WtǼ7VҊ'c~Uj~BG(96BhU`Cx{Ǘۓ쮠S=?3{R`nN8:5E/bz^R|&ޗNE^h/"e}]Rm^QWI#C{VE{MQ+^FOvK.Co e6Mj<:O)5z =׿5ζv-p*걞raF}7wDD\oA]iWy*. v-v];Hݡ(%iw A$rӭgx}SGܷ<ҢE,ep8\ ~Š X,%KAMz՝ʀlҭ tGI$%ҭ@8+=}I07VU۪lU:p{xNU-鶉nCo!vюL8!Zu>.c7ދN)X0VχV -G&Z`ciuŰb=?9*HU-/\ىsLEx=V*,o6%)Ex2u0VyF3[LŴdجL0lr0Pa b6`1Df:i'dLy'Cd2G,L;#\ 1IA3_0t0,}`,fbuVď S+[0s0lJC psAS39&#s re.  f^82X"+_WGyǦJ] Ff &^8'߆```3b^ڼ3?Z-^hyΗa{D>zc|}1i08.VD%N  d$1V3L qފ|n2jʩFdT2o#"ԏH/JL=qTĐ`o è}I1؟x?P+PoP|E\0'e0'{D-1{D%A2Ȳ<񲨖mKv0P<=g.{(bOes+b/bf2"blJ>0Z*>MN*Mތɢ; M_ZBs.0IEk4?E{L*"K@@{vR /WVA ~{Ov:UϸMiBB!kT2>h/t[ Dp2њ:k{QhMWк>mL 2MBVYMq*{u?p%c,BWbuXB{ [>ӽu`[г>IKow*n,: P_(Vj@ j´߆"c2™r*\ JPyr8BEІ\UUUOFU"] W5h؞@:}Bc *ҕD Mc.*"*t+**]BrP`hP {Uh MWaj,*(-Waj ]2]DWa *o2b$n [t\b S aLb i~~^o݊(69cQkSI 0:L֊j63}=|r AS2\s`I.wba/e=pF\ʹ&{,"1nqCľ^5/H]iܪg/PT86Em/f>Ckǡ/ R%jfݪJj/ jzkN_3%bA7cLw^D.*RD}z_D.AP_D.A8mo}\E#hR2iR.Mpg7@%- B )O { (:J*:?BrO}{D] @(s96_ȕM({\ Na,tK<>O.\"$ @<t.7'\ oOh7!u1bͨ.\r1|\R1dԗ1Pjg= fYpbLv˥7ƔL^O]o{N>\RL"W*?.wˬӇփ/ǥKzׅKI ~"ȥtzayCuGIOJ ^ 7*?4 k_d +KBS_eߐ7o.KM,CutYޔeerIpYBpqY#tYˢd,Q1Deȅ2>7$72]I%tY*~oP_\?25.ZzeˢjrY],*,~eQ\54 cL \eoeQ=5cSg(StQ-Lq̆\x4]Ut4WՄ9?S"(S"((5eFy9R$[wYxg,rT,y`gJ9eڋlTG"eF;\T-~T3_V߀ 3>JҜN(JGh)IKYQ2o$}q%} Ƣ5 >JfNI+VQgH'9(3IeU#)QNaӆFEtJ.V$(VRnCwT:Ӆ#CCEW=8ڟ [H WNITcs8%Ćp蔨N"Ž*)ir 67p.7[5ه3_popr`6D6!!:'EΈiGsf\9#U9CrNYj%s!ɻp z^ 8?F\u~9PpFJ97kcT>v:#Q*1SϊY4 2jR8?Ǽsɯ9IU/ gItF|:#[&.oO`L9'840aMuv0~|:;(Mުϯp)|#/ʵg'cHG8;͟yΎwU>Mј72/i2Flݟ47uJm(83r)”El=}v,QL5QKZGz9S Fșp؜SW!dgȏV79T>Ky^tԋViXΔ@y`P;hY I!S6عExaճsyRW̺48OSt۫G7`?34 9 `*Giȷ9̇1xV3Qi*#˜ tONMs2coC|LҞchbMzK‰"QxKI}:$xKY-u wReJR&ã4Rouio \y}}8hޥ(G3|8xK*nit&Ly/[*2846 /RcV5=N۶=gr^,χѬ`VVC$iQ&/M^H^xeE%C^SSS).j1)5 Uh2?Z ^E9I!ݔZ/-r|{0zMu^[W|SP򚊼W@Uu55Fc]*EL땁JRC3 z0zMI^ŽåD-/L^Rl0r'0yI}ӻ/ BA^CKhe^/ !ueB/'aPxaփ0zacy>xzM x  免 I^ؘv,/l, 6免%/ ^Gz{;&3x3^XAIx]E'Bs$tx^=(8:\^(jY$/opԨ׫MUIE)]I^ސF/o< FH!DJukTUI^Rxu BSTA^ЁVNEMvC?Pͬ(([TT-;u, }>c)}Pa/f2Y5)n^,(`Nn^Tș^oT;.ih|zu&CE+W75L^h\zu&U|&MA2'yu>yu'NU&WW@Dʎ^]ҫũ[^\ezqjo^V@.Ꚛ55bЫC!.âWQ~0(yq]8+W'W-yuq(: W삼mSU(M=uGˋ[d?yq ^{qӆ-=-Q+KX]U K 10l ,_HϮ\m77j#UZUU{f,I(NDqRU#^@qMm7m%XG @8WY%{{TKr^\PK 'YO5B,I倰MUm]m\R܅{dkZlFe؁.*ͥk)#f6A,LǑ #Ou^ ۛnMa(Ǚaa4ّ 0ivګ2a=V+4rUvP񛅗"(j|E6)7W⪾ M͑2Wܴf |H-J6TIge!WI'ci.qfLJoUPHOrVuU^Y&Q%'z‡ߺ>wrp8u!⩲Ē ۹+'7. Cx笜[C딭߼īaX,ݫrhVfrzΆ  eaK[SI="P:ǃU4,5 Ǭ)OY.Tvƒ"s㱻n-9sg J^wGg`Ax R`GAv% \4CxrGo,D/}M|)UKuY _ΗS/}2$/}2"(ݝQNr,:i/H_<@$ex% |iO K$#hQvϻr+KܜϚ ͟G[{6H؞<';{^}oms/$b.6H)a9lQ,ޫ(7xLtÎ*;kSj80IyW* (풮5t+F%@)m FE.7gʰKG"*XVQl,B昘`*-hr FEjPNnpN[k$0r 嵙t=q /%A0r1lESX^`;R/V `QH@Ό&gKF 29>Q~#:Q2$y%*}$eLmGD]SD#zM \Q]fخ(qi4VDSYBD( A)r^~JdψJ5QP)enTE`I"JB?'Ѻr}(FVP\R"Q.Qhj"eM8 PR&jm|!( qQ`HwE jq'w#݄CCBPȄj˗,gWM}NkEd  s_F[!u(BQn%m1sQ.5ܮҞQYIZ>˰~=+g5遼Z%V(kI[xĬikc^,b3]eZ4B282VVOQ(]uҞswM`GC'jBͰBU-=UKyJ[*+Y콲8]e(KS^yeoYYV&$fiE51ԼCKe< u5+~JެJ>WF%BZ 5: ]"ϗ,PP{u`HZZ^u꣑B[Qtl({G:Q} ePAk'ix=B=B{Z| 2 uk-GuȔ=B@[Dٻ=l-P]'Q@,@,dԽ5:{ u6s#!W9eoUAeWFwm$Nԭni ukPFeS;(D5@VEV{zVOx5ig#~N//]94Z=ՄC ` NjҎ$/.羹&"V!q;89 m#R%ɱ gsԃ)w MMjc x t)fm )WvDL'4^)?/cyywq/aVV պV N.em DGpHNrU9B'RVMրD>bᖷ/RNyi8B|IupWbeODUɉh!妒3+=f"C]CO@r:o8$V'ap D)lw'arafg$ .BoJC߅b9\83}Z6* ~k1'0XJa&061ޓ06~JЄ=ix?0,!F}`"L57p}gLm)04d&OKZ_OwHw1~Xըܞ'cfU|-G13y'n=cbax9J%)+[j#bk2Gr $F4z_ɕԻ3a0.є ;H҅j0T5w}oo %fU.H̾5d80 Sno]\}7F'&p0Q50V%1~ػbB2|%so %댹VubJvblPz#Mͦ soItsososodsoUs;f$qbns?R'~>[z^kdZF榞cnd sw s/@}+1特>O=IǨ5>S[ܪ\ox0fn[b[`[܏ԕ}i-dd`nAs7{`&a(`R4U9=D]/iք5vXcTE/!|Wl@p'O7!$Bp% } ^bH؅ÈEl xU@^Chګ;̦7s9YIKu_ZjSuPN4+tt.}Za9{0S\]U w`b@hƙBWU3W701wS)< j-9%vEpuK% :@ԡ?!bȱ>!;m$VoKy*ܒ>97V} +`gpӺ]vLPcK}B%rCiL4c}@jn poYR6T=³Ul_Eb>>;{6+r &?j#I< ƽ, /p`Wʷ /Zgp/0^srpO0^| {q=6 ƟT6 0eep0Q̓2s{ʹ_*upO0SOO,$0L>yGdsMb8LlA&6<@͖ۆ+cCI11Ye `JÏNgsB}R-$^gy3:w}z1Ob|}\5G&w19H;facK ?mci3\޹͗Rtx'Y8ʐ޹fjSNpߩP~3X,N:pIϲ{Ua^k EK$HNz~> an* ?$*XʀC=qF҄Mi4I#Z$5I+wrC"9&Mi`VIS㿦@M3P fLJmtzWAVϽ{iI uXIj,i5I M~'5ƽX_* uB'%$5AHw ᯓX"1HjԠ&!e"51"5Hjlu޽["5wIjll!Q[cHZDR"=HRw^$Nb $CSCY}ΥH G$-(iq$!FBAH &iK").B]$)$HR"$)ﮂS?&Iq4G28v$-IYG@š";&nA\,öCWA54~X/bɚ0=I 3#i!G2S7Ŕ.H{C*^l"_i_s0.I :x$7a{oHaHW HZ0糍FaQ\1^Q~ K|z0UV ᨪáR^w4Jd!3gV@i@ckY!!QbmTdbMD0Y;(JaԲ3(rQ)b8-T G#ͫvĶ;hQl;wl(1>[C ͸=q' <%DJám- XRN,=[?hN \w7Nu'r?{wvoTK v(+E?;(pXn qK:x}øNut<JN)(,L :ռ[cVp);: xUM^;ι݃ %q{͞gf8-sJ:7㺣Eحf}h͜P!v;//XsJx'a}wי5i9E SXnenS3sXX+.攷ҍr׹Db4NT\LTG*q1`\L\Ѐs^3SAS笠ybb .F[\L.h1S4 A.h=/q1G-buس<+.%p10+}\S4:]sP%.󑋹.s1m.뜬k r1Gɽ5D{zԽD.Fe˷fS+\Q "jRqzr5 !Wbwq5AnFf0\q3+1nK.sQ>q3V{;,&U,R5]%5jP5MՈ U_?M8:jX-Ɵ4SLF*$"U#6T WUCpӥ\]6xD9X~~d'NKׄ1Tbg,I=r%15">OX.X. ؔ2>gϐ,œgy+~$_wD3ݵvMSL,m< qL3dL $c&|i*߄CYp(w;h Wm՘*F1q &9ED2FfvNdZ6?hHƨ.z:_7pR'd*dLpB%%bdLQVH 1T5ɘ' Jf1Y]*$D7ӲDYui$dzᶝ"b N@)1"}]@Th`Ȅ@cJ@T*aN"+@Aq]p8 n&=!73|ba >Sq$A}5dil<6V#+3Tbml429!և(h:CFebe;#k2}ЃX2,N&Q`B/!+*JQh=8t>g}X-KjD8|K}CO,N,k\tPbq =KNy;ؠ Ö֏, ϓ"켝Bv+/BQGbX1,Q0-DRKDb&e6vqY2iKԸx)h!1T2!8Yʯ"KĶOD"-oեgхI-tgnS]uz5 nU6m>%ziM"FBHҨY4:c\J:Hi~%IƻAHSI鱴gcܫP@4A{4%H#oi$O{KT)i`˒9U$x DMP`^ ~`L❽ xF]p ]-Z: }3}wӥ otGokvloBUBUV3u! d_&m:Y/:؜+?w .ykw>fuXWU`TTg ,b"!_Á1 q8%F, fKKUZzQC]Tt#$v ,Q43Ov­k<MNke%gk̎g;P)\l6n)D-= vi*,tond ʞllCQ''&26G?(FU(QrF{0,(F=JtgJ<(CgEtsT_@8EJc}Z*E~&]4Bɠd-ڴ4QWea睎+G*)ς"} [D5K;$fA'G#TM֋~\Kt[[w4Д0nDQK,]繾鏈pQeg<2^G  !>.GF|\v7.Ոm w5Pčgz^bJX'&bTN8}D3Je8N|K8m50f Q540͚lA5Y!уWH~JP5274Ʊ.W*%LI^m}T3^5a2(΄WD|*\cbP;l;k1k튕*ȝk \hv]-QINLrJM?'L*…#'̬:^Ep}*] #Y_|Y0gz Ȟ`dOb PY;TDTGOnDb9@,@AT 'ˠ P*4) -q@p$E; d ]+HUU]/- @>bBTH0*  3J"ܯ^_1C*ApHV^9s}#PO![֔p B($ \D1C2 v|vʴ 0]|hꮝ K _ȉ9`1]b:G2Qb8`PMҲ̎ HggOApcQ0Dwi*R eVϖ2oU ?Vu =ɭu? &uY8~L~Ta*ۖ]u[c{] v(GQ*'½ICzG :C垪֡HH83ǵꔅ1OHH8:LԺ alTQ&UG纾Xs.3vr? ?W랛7i e9 M[s"M(ׯ)ʫGd&?J?WtǤA I?lzgע)-Jз4) 91`I*5зWozWiTwbt5w%fLs5 ԫrW{k*ju1oMu}2ʝ gq:3Yvb{|8@ZhMƵp0!XvqLjp˪g]4c,nY]r܅fz.솇0ӻ$)?uKsz`?\KrV0kJ,Xt4E$J ?0kDccJ=ۇ5A=5d3,mY8io{vڛǬT!jev%!f<!>Ge֎ 6fa!`?l}3 LJi.ǔS=d9-e`L7~Ä&W }6Ic 00ڣ&J n>x?Z'/J6 dh}Ywk';A:vK֩G~NllhTM'Y?Z'ތN!ڐ͆|P&26dVZ-ähLu1ad4!"7Y lQT`,~ #?Q Sx5:a4 +4dZ*h! a"mv ]W,{Y,EL2yn%pף3eyɕHʝ5d;,<.uK +s+1bix^=%İb0 v<iaгf>N0  w|J1,iUò J$Ri10x,?)^uL 3c+A{lw6v v zd$jv0$. cɞ(si-gq.| % jⲺ_~.y\?Џr+DgLLK>D{D;8(G$% ixDŹl-M}ͩ7żnȗN ?v+cԑ(1wW>iuM(Bnߏ2)TFR1[ Rw'{TcdBql(z#cxR?f5?ߊ)ϷC s^㴿 8;j~Yk~pix3ZWp~T|}zv7#2.y_2^uu.,XO*7-C K9 Vx?\0}z$}}j_.3O~~\$} >j.+oa?-+^]˷s[w_.Ϳ~W_nfzT+?? >~+>A5>g% .څ0Ms&^*/p]XOz|a{!ށ}ǨV׸|k_k z_kPubendstream endobj 9 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 13 /ColorSpace 5 0 R /BitsPerComponent 8 /Length 48 /Interpolate true /Filter /FlateDecode >> stream xt-׊ 36ԨkҪHtipSqendstream endobj 10 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 12 /ColorSpace 5 0 R /BitsPerComponent 8 /Length 45 /Interpolate true /Filter /FlateDecode >> stream x{p-!1{ski1﵍rrjuwMxendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 576 216] >> endobj 4 0 obj << /ProcSet [/PDF /Text /ImageC] /Font <> /XObject << /Im0 9 0 R /Im1 10 0 R >> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 11 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 12 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 11 0 R >> endobj xref 0 13 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000043139 00000 n 0000043222 00000 n 0000043384 00000 n 0000043417 00000 n 0000000212 00000 n 0000000292 00000 n 0000042651 00000 n 0000042896 00000 n 0000046112 00000 n 0000046207 00000 n trailer << /Size 13 /Info 1 0 R /Root 2 0 R >> startxref 46307 %%EOF brms/vignettes/brms_nonlinear.Rmd0000644000176200001440000003016714224753370016717 0ustar liggesusers--- title: "Estimating Non-Linear Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Non-Linear Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette provides an introduction on how to fit non-linear multilevel models with **brms**. Non-linear models are incredibly flexible and powerful, but require much more care with respect to model specification and priors than typical generalized linear models. Ignoring group-level effects for the moment, the predictor term $\eta_n$ of a generalized linear model for observation $n$ can be written as follows: $$\eta_n = \sum_{i = 1}^K b_i x_{ni}$$ where $b_i$ is the regression coefficient of predictor $i$ and $x_{ni}$ is the data of predictor $i$ for observation $n$. This also comprises interaction terms and various other data transformations. However, the structure of $\eta_n$ is always linear in the sense that the regression coefficients $b_i$ are multiplied by some predictor values and then summed up. This implies that the hypothetical predictor term $$\eta_n = b_1 \exp(b_2 x_n)$$ would *not* be a *linear* predictor anymore and we could not fit it using classical techniques of generalized linear models. We thus need a more general model class, which we will call *non-linear* models. Note that the term 'non-linear' does not say anything about the assumed distribution of the response variable. In particular it does not mean 'not normally distributed' as we can apply non-linear predictor terms to all kinds of response distributions (for more details on response distributions available in **brms** see `vignette("brms_families")`). ## A Simple Non-Linear Model We begin with a simple example using simulated data. ```{r} b <- c(2, 0.75) x <- rnorm(100) y <- rnorm(100, mean = b[1] * exp(b[2] * x)) dat1 <- data.frame(x, y) ``` As stated above, we cannot use a generalized linear model to estimate $b$ so we go ahead an specify a non-linear model. ```{r, results='hide'} prior1 <- prior(normal(1, 2), nlpar = "b1") + prior(normal(0, 2), nlpar = "b2") fit1 <- brm(bf(y ~ b1 * exp(b2 * x), b1 + b2 ~ 1, nl = TRUE), data = dat1, prior = prior1) ``` When looking at the above code, the first thing that becomes obvious is that we changed the `formula` syntax to display the non-linear formula including predictors (i.e., `x`) and parameters (i.e., `b1` and `b2`) wrapped in a call to `bf`. This stands in contrast to classical **R** formulas, where only predictors are given and parameters are implicit. The argument `b1 + b2 ~ 1` serves two purposes. First, it provides information, which variables in `formula` are parameters, and second, it specifies the linear predictor terms for each parameter. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves (see also the following examples). In the present case, we have no further variables to predict `b1` and `b2` and thus we just fit intercepts that represent our estimates of $b_1$ and $b_2$ in the model equation above. The formula `b1 + b2 ~ 1` is a short form of `b1 ~ 1, b2 ~ 1` that can be used if multiple non-linear parameters share the same formula. Setting `nl = TRUE` tells **brms** that the formula should be treated as non-linear. In contrast to generalized linear models, priors on population-level parameters (i.e., 'fixed effects') are often mandatory to identify a non-linear model. Thus, **brms** requires the user to explicitly specify these priors. In the present example, we used a `normal(1, 2)` prior on (the population-level intercept of) `b1`, while we used a `normal(0, 2)` prior on (the population-level intercept of) `b2`. Setting priors is a non-trivial task in all kinds of models, especially in non-linear models, so you should always invest some time to think of appropriate priors. Quite often, you may be forced to change your priors after fitting a non-linear model for the first time, when you observe different MCMC chains converging to different posterior regions. This is a clear sign of an identification problem and one solution is to set stronger (i.e., more narrow) priors. To obtain summaries of the fitted model, we apply ```{r} summary(fit1) plot(fit1) plot(conditional_effects(fit1), points = TRUE) ``` The `summary` method reveals that we were able to recover the true parameter values pretty nicely. According to the `plot` method, our MCMC chains have converged well and to the same posterior. The `conditional_effects` method visualizes the model-implied (non-linear) regression line. We might be also interested in comparing our non-linear model to a classical linear model. ```{r, results='hide'} fit2 <- brm(y ~ x, data = dat1) ``` ```{r} summary(fit2) ``` To investigate and compare model fit, we can apply graphical posterior predictive checks, which make use of the **bayesplot** package on the backend. ```{r} pp_check(fit1) pp_check(fit2) ``` We can also easily compare model fit using leave-one-out cross-validation. ```{r} loo(fit1, fit2) ``` Since smaller `LOOIC` values indicate better model fit, it is immediately evident that the non-linear model fits the data better, which is of course not too surprising since we simulated the data from exactly that model. ## A Real-World Non-Linear model On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see https://www.magesblog.com/post/2015-11-03-loss-developments-via-growth-curves-and/). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows: $$cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)$$ $$\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)$$ The cumulative insurance payments $cum$ will grow over time, and we model this dependency using the variable $dev$. Further, $ult_{AY}$ is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters $\theta$ and $\omega$, which are responsible for the growth of the cumulative loss and are assumed to be the same across years. The data is already shipped with brms. ```{r} data(loss) head(loss) ``` and translate the proposed model into a non-linear **brms** model. ```{r, results='hide'} fit_loss <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) ``` We estimate a group-level effect of accident year (variable `AY`) for the ultimate loss `ult`. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in case of `ult`, contains only an varying intercept over year. Again, priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. We summarize the model using well known methods. ```{r} summary(fit_loss) plot(fit_loss, N = 3, ask = FALSE) conditional_effects(fit_loss) ``` Next, we show marginal effects separately for each year. ```{r} conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_loss <- conditional_effects( fit_loss, conditions = conditions, re_formula = NULL, method = "predict" ) plot(me_loss, ncol = 5, points = TRUE) ``` It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. For a more detailed discussion of this data set, see Section 4.5 in Gesmann & Morris (2020). ## Advanced Item-Response Models As a third example, we want to show how to model more advanced item-response models using the non-linear model framework of **brms**. For simplicity, suppose we have a single forced choice item with three alternatives of which only one is correct. Our response variable is whether a person answers the item correctly (1) or not (0). Person are assumed to vary in their ability to answer the item correctly. However, every person has a 33% chance of getting the item right just by guessing. We thus simulate some data to reflect this situation. ```{r} inv_logit <- function(x) 1 / (1 + exp(-x)) ability <- rnorm(300) p <- 0.33 + 0.67 * inv_logit(ability) answer <- ifelse(runif(300, 0, 1) < p, 1, 0) dat_ir <- data.frame(ability, answer) ``` The most basic item-response model is equivalent to a simple logistic regression model. ```{r, results='hide'} fit_ir1 <- brm(answer ~ ability, data = dat_ir, family = bernoulli()) ``` However, this model completely ignores the guessing probability and will thus likely come to biased estimates and predictions. ```{r} summary(fit_ir1) plot(conditional_effects(fit_ir1), points = TRUE) ``` A more sophisticated approach incorporating the guessing probability looks as follows: ```{r, results='hide'} fit_ir2 <- brm( bf(answer ~ 0.33 + 0.67 * inv_logit(eta), eta ~ ability, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = prior(normal(0, 5), nlpar = "eta") ) ``` It is very important to set the link function of the `bernoulli` family to `identity` or else we will apply two link functions. This is because our non-linear predictor term already contains the desired link function (`0.33 + 0.67 * inv_logit`), but the `bernoulli` family applies the default `logit` link on top of it. This will of course lead to strange and uninterpretable results. Thus, please make sure that you set the link function to `identity`, whenever your non-linear predictor term already contains the desired link function. ```{r} summary(fit_ir2) plot(conditional_effects(fit_ir2), points = TRUE) ``` Comparing model fit via leave-one-out cross-validation ```{r} loo(fit_ir1, fit_ir2) ``` shows that both model fit the data equally well, but remember that predictions of the first model might still be misleading as they may well be below the guessing probability for low ability values. Now, suppose that we don't know the guessing probability and want to estimate it from the data. This can easily be done changing the previous model just a bit. ```{r, results='hide'} fit_ir3 <- brm( bf(answer ~ guess + (1 - guess) * inv_logit(eta), eta ~ 0 + ability, guess ~ 1, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = c( prior(normal(0, 5), nlpar = "eta"), prior(beta(1, 1), nlpar = "guess", lb = 0, ub = 1) ) ) ``` Here, we model the guessing probability as a non-linear parameter making sure that it cannot exceed the interval $[0, 1]$. We did not estimate an intercept for `eta`, as this will lead to a bias in the estimated guessing parameter (try it out; this is an excellent example of how careful one has to be in non-linear models). ```{r} summary(fit_ir3) plot(fit_ir3) plot(conditional_effects(fit_ir3), points = TRUE) ``` The results show that we are able to recover the simulated model parameters with this non-linear model. Of course, real item-response data have multiple items so that accounting for item and person variability (e.g., using a multilevel model with varying intercepts) becomes necessary as we have multiple observations per item and person. Luckily, this can all be done within the non-linear framework of **brms** and I hope that this vignette serves as a good starting point. ## References Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving Models. *CAS Research Papers*. brms/vignettes/kidney_conditional_effects.pdf0000644000176200001440000002451513606326627021307 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170125183821) /ModDate (D:20170125183821) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 6431 /Filter /FlateDecode >> stream x\ˮGr߯8Ki3]gl m,H ,`/ȈKA^ }u[_o8n='ӎ1/n}wXڽ[zK;ųO~?ǽۓ{﷑ҽ۫>4XH<vpw<-N>dc3\~/67^RÄ8kݰ?_=ǧ_o^~:9}~8L&nb8}/w&r8.ɱ.p;.yb倇,X:Ã'1MPap|b9U߆'ǃgNɿ5l_OίvǮc&R)/noYtk%O^~X<;]| ׷ק[ nX?`_gקlտݭ^3qw;4Ḻ%s133-fLjKrKYJsNְmN8^?/4zn>m~>_?/b6M~{n6L_2__XHڹqT#qi&h_sw \}֯~fn毚d&Zl<6mOa`|Lgjw`'@~3y-AyWL&[a[M~@aw4}xL6iӮmnIczp95ຍS-x,ySLKǒ79zZ/`GZOZo``ʗPi7s#pK_ǦOC 0Ӳ7G2e/_\Gx,^0_sv)B>}{<<CϱeX/]3|D|D|w# y;wCWKS'k[7XW<|߃?p~(>_$79?$uGR>/}_:ŗ)ic}HtH"m|@Eqr!Ϸo5IkcgdlI=o2~q}`[;>=??g3?;Xt@0WO'|0||ȒṞZze;.|/ko zj^B{{Ob}ǧ+ut`cSSIO=gϞO(&^՟^'o{?l6p"* \y_|߾>9 "$㤱="O 8 TTTӌ]otBp"0z;zv|FSٚOFpѐ#υйy:qDžԇ/~ҾSO}?s5؃~k{?ޞqHEiE;$БÓHO#9%<>=5'?Ȩ~F6`㗲o`\wv]zvCFi,>"ya;Msڟ߬g;1ZȀ9E^R^w~7=YXw ,jWMC(Y[w zv/LdW>'v IS]N4Hmeҫ\Ӿ.dYup]/vEh)ǡGrd͝c[ /w\#Aܖ0qϾ_i&~!^w#+^EwC>@cBϯy7 ކFF`Um(\"r)lȻ%E"iٶ0W*~[A zjtbwF0Fl)yu+/j)5VVOqrsGڋ-ʞ/j5@3K rRtdOhg>ns$oft^XKF\0CEzdj9dwPuU4*r~3ے7^oforJCт'xp{z,VW3ʎtv siM]PUM~3[s$@i>Fd[2[變+{ٖTeNyҼET{xiD f7Oo>ܺ(tA;J%99p(6O:bnW֠^of>p.1ȥNUi*iǪN r3 @QSUϪUWU xYU X[qy@/< r-s{I4&: rJ;r1$\Hz!:%G\N9Hj$qzl}|/ڙ];gѨD#d!z5Xwd:$ک uJlQ,ӈ^.␱h`@#ڸf8H&IVpFߟ}tkyp{gL7S)k/xHL0ɦ-uRŊR\|xH: UmK piB!c,Hř E`Ώ vN1!$FĈp$b=6 72-|>0˸N)0@,$Y <!Y!'=83#\6`Gq 9ΐ}BKufcpBF XaqX'WHk+T`y6-\ 7x3WcE 'C0Ϙ4yRQ3lL8WxU>-]8{FK)?`oL$0sW(!0gTMA;Wq+LgbIM@q$]))Ҥ*ΗpqѱosYߢr=h A$HAbQAskZ%SUUNUUQDO2XJ9ʷJtK@҈$ =g49SߩONi,l=q*EYb۔#8eU.P9[xҢ:q}+ fQu2XRzG7e3 3r,TtXfꤑЄfh`Ii~3rY4Ω/АHѰg2oSJe̮JO̦GMw*ٌJ)Է6 NuENխ@˽Լ\X{*w->U֑ӟp 0PT^X& E3j@alFH`I@H{+0V q S} 8x2W` ۨn`,}cO1,ʢ|F`5NE)x>"1CEQ"@U(vzdPgɝ<i"V8ΫNBRqD6e,t8 GD P뭈"ڬ&IFasݹ,`+[ߌe,/ЖC=\eyOҸZrT.Չ=/GiO)|H "VO}s'~jޣVZpEŶ./oo;u.Z|kh^KiR.oH1TN09Ixh{(}?endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 504 828] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font << /F1 10 0 R /F7 11 0 R >> /ExtGState << /GS1 12 0 R /GS257 13 0 R /GS258 14 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /ZapfDingbats >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 12 0 obj << /Type /ExtGState /CA 1.000 >> endobj 13 0 obj << /Type /ExtGState /ca 0.400 >> endobj 14 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 15 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000006795 00000 n 0000006878 00000 n 0000007042 00000 n 0000007075 00000 n 0000000212 00000 n 0000000292 00000 n 0000009770 00000 n 0000009864 00000 n 0000009948 00000 n 0000010047 00000 n 0000010096 00000 n 0000010145 00000 n trailer << /Size 15 /Info 1 0 R /Root 2 0 R >> startxref 10194 %%EOF brms/vignettes/brms_multivariate.Rmd0000644000176200001440000002007514224753362017436 0ustar liggesusers--- title: "Estimating Multivariate Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Multivariate Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction In the present vignette, we want to discuss how to specify multivariate multilevel models using **brms**. We call a model *multivariate* if it contains multiple response variables, each being predicted by its own set of predictors. Consider an example from biology. Hadfield, Nutall, Osorio, and Owens (2007) analyzed data of the Eurasian blue tit (https://en.wikipedia.org/wiki/Eurasian_blue_tit). They predicted the `tarsus` length as well as the `back` color of chicks. Half of the brood were put into another `fosternest`, while the other half stayed in the fosternest of their own `dam`. This allows to separate genetic from environmental factors. Additionally, we have information about the `hatchdate` and `sex` of the chicks (the latter being known for 94\% of the animals). ```{r data} data("BTdata", package = "MCMCglmm") head(BTdata) ``` ## Basic Multivariate Models We begin with a relatively simple multivariate normal model. ```{r fit1, message=FALSE, warning=FALSE, results='hide'} bform1 <- bf(mvbind(tarsus, back) ~ sex + hatchdate + (1|p|fosternest) + (1|q|dam)) + set_rescor(TRUE) fit1 <- brm(bform1, data = BTdata, chains = 2, cores = 2) ``` As can be seen in the model code, we have used `mvbind` notation to tell **brms** that both `tarsus` and `back` are separate response variables. The term `(1|p|fosternest)` indicates a varying intercept over `fosternest`. By writing `|p|` in between we indicate that all varying effects of `fosternest` should be modeled as correlated. This makes sense since we actually have two model parts, one for `tarsus` and one for `back`. The indicator `p` is arbitrary and can be replaced by other symbols that comes into your mind (for details about the multilevel syntax of **brms**, see `help("brmsformula")` and `vignette("brms_multilevel")`). Similarly, the term `(1|q|dam)` indicates correlated varying effects of the genetic mother of the chicks. Alternatively, we could have also modeled the genetic similarities through pedigrees and corresponding relatedness matrices, but this is not the focus of this vignette (please see `vignette("brms_phylogenetics")`). The model results are readily summarized via ```{r summary1, warning=FALSE} fit1 <- add_criterion(fit1, "loo") summary(fit1) ``` The summary output of multivariate models closely resembles those of univariate models, except that the parameters now have the corresponding response variable as prefix. Within dams, tarsus length and back color seem to be negatively correlated, while within fosternests the opposite is true. This indicates differential effects of genetic and environmental factors on these two characteristics. Further, the small residual correlation `rescor(tarsus, back)` on the bottom of the output indicates that there is little unmodeled dependency between tarsus length and back color. Although not necessary at this point, we have already computed and stored the LOO information criterion of `fit1`, which we will use for model comparisons. Next, let's take a look at some posterior-predictive checks, which give us a first impression of the model fit. ```{r pp_check1, message=FALSE} pp_check(fit1, resp = "tarsus") pp_check(fit1, resp = "back") ``` This looks pretty solid, but we notice a slight unmodeled left skewness in the distribution of `tarsus`. We will come back to this later on. Next, we want to investigate how much variation in the response variables can be explained by our model and we use a Bayesian generalization of the $R^2$ coefficient. ```{r R2_1} bayes_R2(fit1) ``` Clearly, there is much variation in both animal characteristics that we can not explain, but apparently we can explain more of the variation in tarsus length than in back color. ## More Complex Multivariate Models Now, suppose we only want to control for `sex` in `tarsus` but not in `back` and vice versa for `hatchdate`. Not that this is particular reasonable for the present example, but it allows us to illustrate how to specify different formulas for different response variables. We can no longer use `mvbind` syntax and so we have to use a more verbose approach: ```{r fit2, message=FALSE, warning=FALSE, results='hide'} bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) bf_back <- bf(back ~ hatchdate + (1|p|fosternest) + (1|q|dam)) fit2 <- brm(bf_tarsus + bf_back + set_rescor(TRUE), data = BTdata, chains = 2, cores = 2) ``` Note that we have literally *added* the two model parts via the `+` operator, which is in this case equivalent to writing `mvbf(bf_tarsus, bf_back)`. See `help("brmsformula")` and `help("mvbrmsformula")` for more details about this syntax. Again, we summarize the model first. ```{r summary2, warning=FALSE} fit2 <- add_criterion(fit2, "loo") summary(fit2) ``` Let's find out, how model fit changed due to excluding certain effects from the initial model: ```{r loo12} loo(fit1, fit2) ``` Apparently, there is no noteworthy difference in the model fit. Accordingly, we do not really need to model `sex` and `hatchdate` for both response variables, but there is also no harm in including them (so I would probably just include them). To give you a glimpse of the capabilities of **brms**' multivariate syntax, we change our model in various directions at the same time. Remember the slight left skewness of `tarsus`, which we will now model by using the `skew_normal` family instead of the `gaussian` family. Since we do not have a multivariate normal (or student-t) model, anymore, estimating residual correlations is no longer possible. We make this explicit using the `set_rescor` function. Further, we investigate if the relationship of `back` and `hatchdate` is really linear as previously assumed by fitting a non-linear spline of `hatchdate`. On top of it, we model separate residual variances of `tarsus` for male and female chicks. ```{r fit3, message=FALSE, warning=FALSE, results='hide'} bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) + lf(sigma ~ 0 + sex) + skew_normal() bf_back <- bf(back ~ s(hatchdate) + (1|p|fosternest) + (1|q|dam)) + gaussian() fit3 <- brm( bf_tarsus + bf_back + set_rescor(FALSE), data = BTdata, chains = 2, cores = 2, control = list(adapt_delta = 0.95) ) ``` Again, we summarize the model and look at some posterior-predictive checks. ```{r summary3, warning=FALSE} fit3 <- add_criterion(fit3, "loo") summary(fit3) ``` We see that the (log) residual standard deviation of `tarsus` is somewhat larger for chicks whose sex could not be identified as compared to male or female chicks. Further, we see from the negative `alpha` (skewness) parameter of `tarsus` that the residuals are indeed slightly left-skewed. Lastly, running ```{r me3} conditional_effects(fit3, "hatchdate", resp = "back") ``` reveals a non-linear relationship of `hatchdate` on the `back` color, which seems to change in waves over the course of the hatch dates. There are many more modeling options for multivariate models, which are not discussed in this vignette. Examples include autocorrelation structures, Gaussian processes, or explicit non-linear predictors (e.g., see `help("brmsformula")` or `vignette("brms_multilevel")`). In fact, nearly all the flexibility of univariate models is retained in multivariate models. ## References Hadfield JD, Nutall A, Osorio D, Owens IPF (2007). Testing the phenotypic gambit: phenotypic, genetic and environmental correlations of colour. *Journal of Evolutionary Biology*, 20(2), 549-557. brms/vignettes/me_rent1.pdf0000644000176200001440000002006213252451326015435 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170908164932) /ModDate (D:20170908164932) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 4217 /Filter /FlateDecode >> stream xZOGO;ߝk"@B bp$b$ķfwMp:3S]]ˏ_<_~iKIq?y{Z}t{S}}Omi>J۬7OmlxovlcY:`Mt`fJٷ[o0tߎ 6;̑\`-ŧFZz1K3z~B plP?AKf{!M>]n' <פ(@0xDoa9>6\Mepܯ=qʼ_kVɰ|X`jJޯ"y)Cj $ysn2up5"0uK(8r~ޗIx BOhR {c\BzVUĂDp l<# T'27*B:dd`EdtQ.L'K$ɘDr8fTFCÜEӪa`[=_22Lלҙג{ɣW^LW^ 3ϕa~۩:O9*Y>ҹk'ڡ;nK|nݒuҌK3up,3dYP\"ZaC,!`>Y&bXE$82z^ P-T`*:-l@ β,hF%0uZp&NGh-A{k)_I hQB䫀KCnǗ_?O'T=OYEU ]4S儇\[avFE6"u6yjuC絪 yߌ :U]T.c*]\]QҪ.zh yua,VLZg`:lVvU]8Eq5l\EZՇYbe'BYgkœņrCydAɒ=ucQW':˞,ܼhfU]4ݳhޯU]\]ԕ0\]%guQUENRsgua|Vj~Uf~\]R ^]$Ī.`k=.`7AìW]]. ؒ_.V[EruUݭYxZEruȺT4ZxEa%7Mwsba6a[]"U&5i.C5M.qdJ0kAS&u|Cq4x(8.A J0I![eSnn/_ ;L&[#()[folxXW*޽g?W.)Xؾ)Wo??~駯8LJ E"ch(>~1vtcoC@ǁ(^v8 =Eɴi}mޞ\'Z@w(3O75]n9h=y=/!b|.|İ"0҇8Tz,|dpv yڻ{kilȵ1W߽ns6uiYwE}ۿߺJ)W x7CnIxLgtn*Ӳdr `|Qur)æIlww `0mq_'ZPR6B< t Q XJ?gp$J4JCnW~i\ͷ_ž#8RgD-F\iy%e]w't-LmBD"s!0 %J+KP[4O`F1 M©mYne.%Y[~s7{X~I)PIF6_[dOl$h?߮ n[YI{j}_eQӧ:fm`1cWSßEԤ[a_.E=r`v-7%KoE7zQ0]2pmM tw}@:^-uq;r;ίӠ rݿ)1 R] 7_&[ 9LΧ 0bݿmr9Ew8Ӂ}1bZ)E)X[gJU}~*(lz]`#]4dBn;~|Ҿ{:gp *]ĢA dܮwn~av+ȺPpCN( J~渂0|͕ȜV$QW{'hKH~ȊU?ĭɾ_z%;˵gC*URO=Xzc(Zz-.!R^?f}j=،>|z2MKog}joN]?Cz[z"ޓ68JzEl蟪^;^gL~r=pm÷ˎ[2?uGͼE3#8>yQ闗^Zxl@G/g<)|?J/08#'?X^}C*.7x{t }'/?JP$7yNX 2 |A?|o3+Xs [f{z6%71 s>^0'n}K.z^ÁUj fژOySSB8Oiendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 576 216] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << /GS1 11 0 R /GS257 12 0 R /GS258 13 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 11 0 obj << /Type /ExtGState /CA 1.000 >> endobj 12 0 obj << /Type /ExtGState /ca 0.400 >> endobj 13 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 14 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000004581 00000 n 0000004664 00000 n 0000004816 00000 n 0000004849 00000 n 0000000212 00000 n 0000000292 00000 n 0000007544 00000 n 0000007638 00000 n 0000007737 00000 n 0000007786 00000 n 0000007835 00000 n trailer << /Size 14 /Info 1 0 R /Root 2 0 R >> startxref 7884 %%EOF brms/vignettes/brms_phylogenetics.Rmd0000644000176200001440000002723214224753376017614 0ustar liggesusers--- title: "Estimating Phylogenetic Multilevel Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Phylogenetic Multilevel Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction In the present vignette, we want to discuss how to specify phylogenetic multilevel models using **brms**. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. The usual approach would be to model species as a grouping factor in a multilevel model and estimate varying intercepts (and possibly also varying slopes) over species. However, species are not independent as they come from the same phylogenetic tree and we thus have to adjust our model to incorporate this dependency. The examples discussed here are from chapter 11 of the book *Modern Phylogenetic Comparative Methods and the application in Evolutionary Biology* (de Villemeruil & Nakagawa, 2014). The necessary data can be downloaded from the corresponding website (https://www.mpcm-evolution.com/). Some of these models may take a few minutes to fit. ## A Simple Phylogenetic Model Assume we have measurements of a phenotype, `phen` (say the body size), and a `cofactor` variable (say the temperature of the environment). We prepare the data using the following code. ```{r} phylo <- ape::read.nexus("https://paul-buerkner.github.io/data/phylo.nex") data_simple <- read.table( "https://paul-buerkner.github.io/data/data_simple.txt", header = TRUE ) head(data_simple) ``` The `phylo` object contains information on the relationship between species. Using this information, we can construct a covariance matrix of species (Hadfield & Nakagawa, 2010). ```{r} A <- ape::vcv.phylo(phylo) ``` Now we are ready to fit our first phylogenetic multilevel model: ```{r, results='hide'} model_simple <- brm( phen ~ cofactor + (1|gr(phylo, cov = A)), data = data_simple, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "b"), prior(normal(0, 50), "Intercept"), prior(student_t(3, 0, 20), "sd"), prior(student_t(3, 0, 20), "sigma") ) ) ``` With the exception of `(1|gr(phylo, cov = A))` instead of `(1|phylo)` this is a basic multilevel model with a varying intercept over species (`phylo` is an indicator of species in this data set). However, by using `cov = A` in the `gr` function, we make sure that species are correlated as specified by the covariance matrix `A`. We pass `A` itself via the `data2` argument which can be used for any kinds of data that does not fit into the regular structure of the `data` argument. Setting priors is not required for achieving good convergence for this model, but it improves sampling speed a bit. After fitting, the results can be investigated in detail. ```{r} summary(model_simple) plot(model_simple, N = 2, ask = FALSE) plot(conditional_effects(model_simple), points = TRUE) ``` The so called phylogenetic signal (often symbolize by $\lambda$) can be computed with the `hypothesis` method and is roughly $\lambda = 0.7$ for this example. ```{r} hyp <- "sd_phylo__Intercept^2 / (sd_phylo__Intercept^2 + sigma^2) = 0" (hyp <- hypothesis(model_simple, hyp, class = NULL)) plot(hyp) ``` Note that the phylogenetic signal is just a synonym of the intra-class correlation (ICC) used in the context phylogenetic analysis. ## A Phylogenetic Model with Repeated Measurements Often, we have multiple observations per species and this allows to fit more complicated phylogenetic models. ```{r} data_repeat <- read.table( "https://paul-buerkner.github.io/data/data_repeat.txt", header = TRUE ) data_repeat$spec_mean_cf <- with(data_repeat, sapply(split(cofactor, phylo), mean)[phylo]) head(data_repeat) ``` The variable `spec_mean_cf` just contains the mean of the cofactor for each species. The code for the repeated measurement phylogenetic model looks as follows: ```{r, results='hide'} model_repeat1 <- brm( phen ~ spec_mean_cf + (1|gr(phylo, cov = A)) + (1|species), data = data_repeat, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0,10), "b"), prior(normal(0,50), "Intercept"), prior(student_t(3,0,20), "sd"), prior(student_t(3,0,20), "sigma") ), sample_prior = TRUE, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` The variables `phylo` and `species` are identical as they are both identifiers of the species. However, we model the phylogenetic covariance only for `phylo` and thus the `species` variable accounts for any specific effect that would be independent of the phylogenetic relationship between species (e.g., environmental or niche effects). Again we can obtain model summaries as well as estimates of the phylogenetic signal. ```{r} summary(model_repeat1) ``` ```{r} hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat1, hyp, class = NULL)) plot(hyp) ``` So far, we have completely ignored the variability of the cofactor within species. To incorporate this into the model, we define ```{r} data_repeat$within_spec_cf <- data_repeat$cofactor - data_repeat$spec_mean_cf ``` and then fit it again using `within_spec_cf` as an additional predictor. ```{r, results='hide'} model_repeat2 <- update( model_repeat1, formula = ~ . + within_spec_cf, newdata = data_repeat, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` The results are almost unchanged, with apparently no relationship between the phenotype and the within species variance of `cofactor`. ```{r} summary(model_repeat2) ``` Also, the phylogenetic signal remains more or less the same. ```{r} hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat2, hyp, class = NULL)) ``` ## A Phylogenetic Meta-Analysis Let's say we have Fisher's z-transformed correlation coefficients $Zr$ per species along with corresponding sample sizes (e.g., correlations between male coloration and reproductive success): ```{r} data_fisher <- read.table( "https://paul-buerkner.github.io/data/data_effect.txt", header = TRUE ) data_fisher$obs <- 1:nrow(data_fisher) head(data_fisher) ``` We assume the sampling variance to be known and as $V(Zr) = \frac{1}{N - 3}$ for Fisher's values, where $N$ is the sample size per species. Incorporating the known sampling variance into the model is straight forward. One has to keep in mind though, that **brms** requires the sampling standard deviation (square root of the variance) as input instead of the variance itself. The group-level effect of `obs` represents the residual variance, which we have to model explicitly in a meta-analytic model. ```{r, results='hide'} model_fisher <- brm( Zr | se(sqrt(1 / (N - 3))) ~ 1 + (1|gr(phylo, cov = A)) + (1|obs), data = data_fisher, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "Intercept"), prior(student_t(3, 0, 10), "sd") ), control = list(adapt_delta = 0.95), chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` A summary of the fitted model is obtained via ```{r} summary(model_fisher) plot(model_fisher) ``` The meta-analytic mean (i.e., the model intercept) is $0.16$ with a credible interval of $[0.08, 0.25]$. Thus the mean correlation across species is positive according to the model. ## A phylogenetic count-data model Suppose that we analyze a phenotype that consists of counts instead of being a continuous variable. In such a case, the normality assumption will likely not be justified and it is recommended to use a distribution explicitly suited for count data, for instance the Poisson distribution. The following data set (again retrieved from mpcm-evolution.org) provides an example. ```{r} data_pois <- read.table( "https://paul-buerkner.github.io/data/data_pois.txt", header = TRUE ) data_pois$obs <- 1:nrow(data_pois) head(data_pois) ``` As the Poisson distribution does not have a natural overdispersion parameter, we model the residual variance via the group-level effects of `obs` (e.g., see Lawless, 1987). ```{r, results='hide'} model_pois <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)) + (1|obs), data = data_pois, family = poisson("log"), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ``` Again, we obtain a summary of the fitted model via ```{r} summary(model_pois) plot(conditional_effects(model_pois), points = TRUE) ``` Now, assume we ignore the fact that the phenotype is count data and fit a linear normal model instead. ```{r, results='hide'} model_normal <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)), data = data_pois, family = gaussian(), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ``` ```{r} summary(model_normal) ``` We see that `cofactor` has a positive relationship with the phenotype in both models. One should keep in mind, though, that the estimates of the Poisson model are on the log-scale, as we applied the canonical log-link function in this example. Therefore, estimates are not comparable to a linear normal model even if applied to the same data. What we can compare, however, is the model fit, for instance graphically via posterior predictive checks. ```{r} pp_check(model_pois) pp_check(model_normal) ``` Apparently, the distribution of the phenotype predicted by the Poisson model resembles the original distribution of the phenotype pretty closely, while the normal models fails to do so. We can also apply leave-one-out cross-validation for direct numerical comparison of model fit. ```{r} loo(model_pois, model_normal) ``` Since smaller values of loo indicate better fit, it is again evident that the Poisson model fits the data better than the normal model. Of course, the Poisson model is not the only reasonable option here. For instance, you could use a negative binomial model (via family `negative_binomial`), which already contains an overdispersion parameter so that modeling a varying intercept of `obs` becomes obsolete. ## Phylogenetic models with multiple group-level effects In the above examples, we have only used a single group-level effect (i.e., a varying intercept) for the phylogenetic grouping factors. In **brms**, it is also possible to estimate multiple group-level effects (e.g., a varying intercept and a varying slope) for these grouping factors. However, it requires repeatedly computing Kronecker products of covariance matrices while fitting the model. This will be very slow especially when the grouping factors have many levels and matrices are thus large. ## References de Villemeruil P. & Nakagawa, S. (2014) General quantitative genetic methods for comparative biology. In: *Modern phylogenetic comparative methods and their application in evolutionary biology: concepts and practice* (ed. Garamszegi L.) Springer, New York. pp. 287-303. Hadfield, J. D. & Nakagawa, S. (2010) General quantitative genetic methods for comparative biology: phylogenies, taxonomies, and multi-trait models for continuous and categorical characters. *Journal of Evolutionary Biology*. 23. 494-508. Lawless, J. F. (1987). Negative binomial and mixed Poisson regression. *Canadian Journal of Statistics*, 15(3), 209-225. brms/vignettes/citations_overview.bib0000644000176200001440000005616214160105076017640 0ustar liggesusers% Encoding: UTF-8 @Article{brms1, author = {Paul-Christian B\"urkner}, title = {{brms}: An {R} Package for Bayesian Multilevel Models using Stan}, journal = {Journal of Statistical Software}, year = {2017}, volume = {80}, number = {1}, pages = {1--28}, encoding = {UTF-8}, doi = {10.18637/jss.v080.i01} } @BOOK{brown2015, title = {Applied Mixed Models in Medicine}, publisher = {John Wiley \& Sons}, year = {2015}, author = {Brown, Helen and Prescott, Robin}, owner = {Paul}, timestamp = {2015.06.19} } @ARTICLE{lunn2000, author = {Lunn, David J and Thomas, Andrew and Best, Nicky and Spiegelhalter, David}, title = {\pkg{WinBUGS} a Bayesian Modelling Framework: Concepts, Structure, and Extensibility}, journal = {Statistics and {C}omputing}, year = {2000}, volume = {10}, pages = {325--337}, number = {4}, owner = {Paul}, publisher = {Springer}, timestamp = {2015.06.18} } @MANUAL{spiegelhalter2003, title = {\pkg{WinBUGS} Version - 1.4 User Manual}, author = {Spiegelhalter, David and Thomas, Andrew and Best, Nicky and Lunn, Dave}, year = {2003}, journal = {MRC Biostatistics Unit, Cambridge}, owner = {Paul}, publisher = {version}, timestamp = {2015.06.18}, url = {http://www.mrc-bsu.cam.ac.uk/bugs} } @MANUAL{spiegelhalter2007, title = {\pkg{OpenBUGS} User Manual, Version 3.0.2}, author = {Spiegelhalter, D and Thomas, A and Best, N and Lunn, D}, year = {2007}, journal = {MRC Biostatistics Unit, Cambridge}, owner = {Paul}, timestamp = {2015.06.18} } @MANUAL{plummer2013, title = {\pkg{JAGS}: Just Another Gibs Sampler}, author = {Plummer, Martyn}, year = {2013}, owner = {Paul}, timestamp = {2015.01.20}, url = {http://mcmc-jags.sourceforge.net/} } @ARTICLE{hadfield2010, author = {Hadfield, Jarrod D}, title = {MCMC Methods for Multi-Response Generalized Linear Mixed Models: the \pkg{MCMCglmm} \proglang{R} Package}, journal = {Journal of Statistical Software}, year = {2010}, volume = {33}, pages = {1--22}, number = {2}, owner = {Paul}, timestamp = {2015.06.18} } @Manual{stan2017, title = {\proglang{Stan}: A \proglang{C++} Library for Probability and Sampling, Version 2.14.0}, author = {{Stan Development Team}}, year = {2017}, owner = {Paul}, timestamp = {2015.06.18}, url = {http://mc-stan.org/}, } @Article{carpenter2017, author = {Carpenter, B. and Gelman, A. and Hoffman, M. and Lee, D. and Goodrich, B. and Betancourt, M. and Brubaker, M. A. and Guo, J. and Li, P. and Ridell, A.}, title = {\proglang{Stan}: A Probabilistic Programming Language}, journal = {Journal of Statistical Software}, year = {2017}, owner = {Paul}, timestamp = {2015.06.19}, } @ARTICLE{metropolis1953, author = {Metropolis, Nicholas and Rosenbluth, Arianna W and Rosenbluth, Marshall N and Teller, Augusta H and Teller, Edward}, title = {Equation of State Calculations by Fast Computing Machines}, journal = {The Journal of Chemical Physics}, year = {1953}, volume = {21}, pages = {1087--1092}, number = {6}, owner = {Paul}, publisher = {AIP Publishing}, timestamp = {2015.06.19} } @ARTICLE{hastings1970, author = {Hastings, W Keith}, title = {Monte Carlo Sampling Methods Using Markov Chains and their Applications}, journal = {Biometrika}, year = {1970}, volume = {57}, pages = {97--109}, number = {1}, owner = {Paul}, publisher = {Biometrika Trust}, timestamp = {2015.06.19} } @ARTICLE{geman1984, author = {Geman, Stuart and Geman, Donald}, title = {Stochastic Relaxation, Gibbs Distributions, and the Bayesian Restoration of Images}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, year = {1984}, pages = {721--741}, number = {6}, owner = {Paul}, publisher = {IEEE}, timestamp = {2015.06.19} } @ARTICLE{gelfand1990, author = {Gelfand, Alan E and Smith, Adrian FM}, title = {Sampling-Based Approaches to Calculating Marginal Densities}, journal = {Journal of the American Statistical Association}, year = {1990}, volume = {85}, pages = {398--409}, number = {410}, owner = {Paul}, publisher = {Taylor \& Francis Group}, timestamp = {2015.06.19} } @ARTICLE{damien1999, author = {Damien, Paul and Wakefield, Jon and Walker, Stephen}, title = {Gibbs Sampling for Bayesian Non-Conjugate and Hierarchical Models by Using Auxiliary Variables}, journal = {Journal of the Royal Statistical Society B, Statistical Methodology}, year = {1999}, pages = {331--344}, owner = {Paul}, publisher = {JSTOR}, timestamp = {2015.06.19} } @ARTICLE{neal2003, author = {Neal, Radford M.}, title = {Slice Sampling}, journal = {The Annals of Statistics}, year = {2003}, pages = {705--741}, owner = {Paul}, publisher = {JSTOR}, timestamp = {2015.06.19} } @InBook{neal2011, chapter = {MCMC Using Hamiltonian Dynamics}, title = {Handbook of Markov Chain Monte Carlo}, publisher = {CRC Press}, year = {2011}, author = {Neal, Radford M}, volume = {2}, owner = {Paul}, timestamp = {2015.06.19}, } @ARTICLE{hoffman2014, author = {Hoffman, Matthew D and Gelman, Andrew}, title = {The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo}, journal = {The Journal of Machine Learning Research}, year = {2014}, volume = {15}, pages = {1593--1623}, number = {1}, owner = {Paul}, publisher = {JMLR. org}, timestamp = {2015.06.19} } @BOOK{gelman2014, title = {Bayesian Data Analysis}, publisher = {Taylor \& Francis}, year = {2014}, author = {Gelman, Andrew and Carlin, John B and Stern, Hal S and Rubin, Donald B}, volume = {2}, owner = {Paul}, timestamp = {2015.06.20} } @Manual{stanM2017, title = {\proglang{Stan} Modeling Language: User's Guide and Reference Manual}, author = {{Stan Development Team}}, year = {2017}, owner = {Paul}, timestamp = {2015.06.18}, url = {http://mc-stan.org/manual.html}, } @Article{rigby2005, author = {Rigby, Robert A and Stasinopoulos, D Mikis}, title = {Generalized Additive Models for Location, Scale and Shape}, journal = {Journal of the Royal Statistical Society C (Applied Statistics)}, year = {2005}, volume = {54}, number = {3}, pages = {507--554}, publisher = {Wiley Online Library}, } @Article{lindstrom1990, author = {Lindstrom, Mary J and Bates, Douglas M}, title = {Nonlinear Mixed Effects Models for Repeated Measures Data}, journal = {Biometrics}, year = {1990}, pages = {673--687}, publisher = {JSTOR}, } @Article{wood2004, author = {Wood, Simon N}, title = {Stable and Efficient Multiple Smoothing Parameter Estimation for Generalized Additive Models}, journal = {Journal of the American Statistical Association}, year = {2004}, volume = {99}, number = {467}, pages = {673--686}, publisher = {Taylor \& Francis}, } @Article{rasmussen2006, author = {Rasmussen, Carl Edward and Williams, C. K. I.}, title = {Gaussian processes for machine learning}, year = {2006}, publisher = {Massachusetts Institute of Technology}, } @Article{betancourt2014, author = {Betancourt, MJ and Byrne, Simon and Livingstone, Samuel and Girolami, Mark}, title = {The Geometric Foundations of Hamiltonian Monte Carlo}, journal = {arXiv preprint arXiv:1410.5110}, year = {2014}, } @Article{betancourt2017, author = {Michael Betancourt}, title = {A Conceptual Introduction to Hamiltonian Monte Carlo}, journal = {arXiv preprint}, year = {2017}, url = {https://arxiv.org/pdf/1701.02434.pdf}, } @Manual{rstanarm2017, title = {rstanarm: {Bayesian} applied regression modeling via {Stan}.}, author = {{Stan Development Team}}, year = {2017}, note = {R package version 2.17.2}, url = {http://mc-stan.org/}, } @InProceedings{williams1996, author = {Williams, Christopher KI and Rasmussen, Carl Edward}, title = {Gaussian processes for regression}, booktitle = {Advances in neural information processing systems}, year = {1996}, pages = {514--520}, } @Article{westfall2016, author = {Westfall, Jacob and Yarkoni, Tal}, title = {Statistically Controlling for Confounding Constructs is Harder than You Think}, journal = {PloS one}, year = {2016}, volume = {11}, number = {3}, pages = {e0152719}, publisher = {Public Library of Science}, } @BOOK{demidenko2013, title = {Mixed Models: Theory and Applications with \proglang{R}}, publisher = {John Wiley \& Sons}, year = {2013}, author = {Demidenko, Eugene}, owner = {Paul}, timestamp = {2015.06.19} } @Book{pinheiro2006, title = {Mixed-Effects Models in \proglang{S} and \proglang{S-PLUS}}, publisher = {Springer-Verlage Science \& Business Media}, year = {2006}, author = {Pinheiro, Jose and Bates, Douglas}, owner = {Paul}, timestamp = {2015.06.19}, } @MANUAL{Rcore2015, title = {\proglang{R}: A Language and Environment for Statistical Computing}, author = {{R Core Team}}, organization = {\proglang{R} Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2015}, owner = {Paul}, timestamp = {2015.01.20}, url = {http://www.R-project.org/} } @ARTICLE{bates2015, author = {Douglas Bates and Martin M{\"a}chler and Ben Bolker and Steve Walker}, title = {Fitting Linear Mixed-Effects Models Using \pkg{lme4}}, journal = {Journal of Statistical Software}, year = {2015}, volume = {67}, pages = {1--48}, number = {1}, owner = {Paul}, timestamp = {2015.11.13} } @ARTICLE{mcgilchrist1991, author = {McGilchrist, CA and Aisbett, CW}, title = {Regression with Frailty in Survival Analysis}, journal = {Biometrics}, year = {1991}, pages = {461--466}, owner = {Paul}, publisher = {JSTOR}, timestamp = {2015.08.15} } @ARTICLE{ezzet1991, author = {Ezzet, Farkad and Whitehead, John}, title = {A Random Effects Model for Ordinal Responses from a Crossover Trial}, journal = {Statistics in Medicine}, year = {1991}, volume = {10}, pages = {901--907}, number = {6}, owner = {Paul}, publisher = {Wiley Online Library}, timestamp = {2015.09.03} } @Book{gelmanMLM2006, title = {Data Analysis Using Regression and Multilevel/Hierarchical Models}, publisher = {Cambridge University Press}, year = {2006}, author = {Gelman, Andrew and Hill, Jennifer}, owner = {Paul}, timestamp = {2016.02.21}, } @Book{fox2011, title = {An R companion to Applied Regression, Second Edition}, publisher = {Sage}, year = {2011}, author = {Fox, John and Weisberg, Sanford}, } @ARTICLE{lewandowski2009, author = {Lewandowski, Daniel and Kurowicka, Dorota and Joe, Harry}, title = {Generating Random Correlation Matrices Based on Vines and Extended Onion Method}, journal = {Journal of Multivariate Analysis}, year = {2009}, volume = {100}, pages = {1989--2001}, number = {9}, owner = {Paul}, publisher = {Elsevier}, timestamp = {2015.07.23} } @ARTICLE{juarez2010, author = {Ju{\'a}rez, Miguel A and Steel, Mark FJ}, title = {Model-Based Clustering of Non-Gaussian Panel Data Based on Skew-t Distributions}, journal = {Journal of Business \& Economic Statistics}, year = {2010}, volume = {28}, pages = {52--66}, number = {1}, owner = {Paul}, publisher = {Taylor \& Francis}, timestamp = {2015.11.06} } @ARTICLE{creutz1988, author = {Creutz, Michael}, title = {Global Monte Carlo Algorithms for Many-Fermion Systems}, journal = {Physical Review D}, year = {1988}, volume = {38}, pages = {1228}, number = {4}, owner = {Paul}, publisher = {APS}, timestamp = {2015.08.10} } @BOOK{griewank2008, title = {Evaluating Derivatives: Principles and Techniques of Algorithmic Differentiation}, publisher = {Siam}, year = {2008}, author = {Griewank, Andreas and Walther, Andrea}, owner = {Paul}, timestamp = {2015.08.10} } @ARTICLE{watanabe2010, author = {Watanabe, Sumio}, title = {Asymptotic Equivalence of Bayes Cross Validation and Widely Applicable Information Criterion in Singular Learning Theory}, journal = {The Journal of Machine Learning Research}, year = {2010}, volume = {11}, pages = {3571--3594}, owner = {Paul}, publisher = {JMLR. org}, timestamp = {2015.08.10} } @TECHREPORT{gelfand1992, author = {Gelfand, Alan E and Dey, Dipak K and Chang, Hong}, title = {Model Determination Using Predictive Distributions with Implementation via Sampling-Based Methods}, institution = {DTIC Document}, year = {1992}, owner = {Paul}, timestamp = {2015.08.17} } @ARTICLE{ionides2008, author = {Ionides, Edward L}, title = {Truncated Importance Sampling}, journal = {Journal of Computational and Graphical Statistics}, year = {2008}, volume = {17}, pages = {295--311}, number = {2}, owner = {Paul}, publisher = {Taylor \& Francis}, timestamp = {2015.08.17} } @ARTICLE{vehtari2015, author = {Aki Vehtari and Andrew Gelman and Jonah Gabry}, title = {Efficient Implementation of Leave-One-Out Cross-Validation and WAIC for Evaluating Fitted Bayesian Models}, journal = {Unpublished manuscript}, year = {2015}, pages = {1--22}, owner = {Paul}, timestamp = {2015.08.26}, url = {http://www.stat.columbia.edu/~gelman/research/unpublished/loo_stan.pdf} } @ARTICLE{vanderlinde2005, author = {van der Linde, Angelika}, title = {DIC in Variable Selection}, journal = {Statistica Neerlandica}, year = {2005}, volume = {59}, pages = {45--56}, number = {1}, owner = {Paul}, publisher = {Wiley Online Library}, timestamp = {2015.08.10} } @Manual{loo2016, title = {\pkg{loo}: {E}fficient Leave-One-Out Cross-Validation and {WAIC} for {B}ayesian Models.}, author = {Aki Vehtari and Andrew Gelman and Jonah Gabry}, year = {2016}, note = {R package version 1.0.0}, url = {https://github.com/stan-dev/loo}, } @MANUAL{Xcode2015, title = {\pkg{Xcode} Software, Version~7}, author = {{Apple Inc.}}, address = {Cupertino, USA}, year = {2015}, owner = {Paul}, timestamp = {2015.01.20}, url = {https://developer.apple.com/xcode/} } @Article{masters1982, author = {Masters, Geoff N}, title = {A {R}asch Model for Partial Credit Scoring}, journal = {Psychometrika}, year = {1982}, volume = {47}, number = {2}, pages = {149--174}, owner = {Paul}, publisher = {Springer}, timestamp = {2015.02.08}, } @ARTICLE{tutz1990, author = {Tutz, Gerhard}, title = {Sequential Item Response Models with an Ordered Response}, journal = {British Journal of Mathematical and Statistical Psychology}, year = {1990}, volume = {43}, pages = {39--55}, number = {1}, owner = {Paul}, publisher = {Wiley Online Library}, timestamp = {2015.02.01} } @ARTICLE{yee2010, author = {Yee, Thomas W}, title = {The \pkg{VGAM} Package for Categorical Data Analysis}, journal = {Journal of Statistical Software}, year = {2010}, volume = {32}, pages = {1--34}, number = {10}, owner = {Paul}, timestamp = {2015.09.04} } @ARTICLE{andrich1978b, author = {Andrich, David}, title = {Application of a Psychometric Rating Model to Ordered Categories which are Scored with Successive Integers}, journal = {Applied Psychological Measurement}, year = {1978}, volume = {2}, pages = {581--594}, number = {4}, owner = {Paul}, publisher = {Sage Publications}, timestamp = {2015.01.27} } @ARTICLE{andersen1977, author = {Andersen, Erling B}, title = {Sufficient Statistics and Latent Trait Models}, journal = {Psychometrika}, year = {1977}, volume = {42}, pages = {69--81}, number = {1}, owner = {Paul}, publisher = {Springer}, timestamp = {2015.01.27} } @ARTICLE{vanderark2001, author = {Van Der Ark, L Andries}, title = {Relationships and Properties of Polytomous Item Response Theory Models}, journal = {Applied Psychological Measurement}, year = {2001}, volume = {25}, pages = {273--282}, number = {3}, owner = {Paul}, publisher = {Sage Publications}, timestamp = {2015.01.26} } @Book{tutz2000, title = {Die {A}nalyse {K}ategorialer {D}aten: {A}nwendungsorientierte {E}inf{\"u}hrung in {L}ogit-{M}odellierung und {K}ategoriale {R}egression}, publisher = {Oldenbourg Verlag}, year = {2000}, author = {Tutz, Gerhard}, owner = {Paul}, timestamp = {2015.01.23}, } @MANUAL{rstanarm2016, title = {rstanarm: Bayesian Applied Regression Modeling via \pkg{Stan}}, author = {Jonah Gabry and Ben Goodrich}, year = {2016}, note = {R package version 2.9.0-3}, owner = {Paul}, timestamp = {2016.03.04}, url = {https://CRAN.R-project.org/package=rstanarm} } @MANUAL{mcelreath2016, title = {rethinking: Statistical Rethinking Course and Book Package}, author = {Richard McElreath}, year = {2016}, note = {R package version 1.58}, owner = {Paul}, timestamp = {2016.03.04}, url = {https://github.com/rmcelreath/rethinking} } @MANUAL{nlme2016, title = {\pkg{nlme}: Linear and Nonlinear Mixed Effects Models}, author = {Jose Pinheiro and Douglas Bates and Saikat DebRoy and Deepayan Sarkar and {R Core Team}}, year = {2016}, note = {R package version 3.1-124}, owner = {Paul}, timestamp = {2016.03.06}, url = {http://CRAN.R-project.org/package=nlme} } @BOOK{hastie1990, title = {Generalized Additive Models}, publisher = {CRC Press}, year = {1990}, author = {Hastie, Trevor J and Tibshirani, Robert J}, volume = {43}, owner = {Paul}, timestamp = {2015.09.07} } @Article{wood2011, author = {Wood, Simon N}, title = {Fast Stable Restricted Maximum Likelihood and Marginal Likelihood Estimation of Semiparametric Generalized Linear Models}, journal = {Journal of the Royal Statistical Society: Series B (Statistical Methodology)}, year = {2011}, volume = {73}, number = {1}, pages = {3--36}, publisher = {Wiley Online Library}, } @BOOK{zuur2014, title = {A beginner's Guide to Generalized Additive Models with \proglang{R}}, publisher = {Highland Statistics Limited}, year = {2014}, author = {Zuur, Alain F}, owner = {Paul}, timestamp = {2016.03.04} } @ARTICLE{chung2013, author = {Yeojin Chung and Sophia Rabe-Hesketh and Vincent Dorie and Andrew Gelman and Jingchen Liu}, title = {A nondegenerate penalized likelihood estimator for variance parameters in multilevel models}, journal = {Psychometrika}, year = {2013}, volume = {78}, pages = {685--709}, number = {4}, owner = {Paul}, publisher = {Springer}, timestamp = {2016.02.22}, url = {http://gllamm.org/} } @ARTICLE{duane1987, author = {Duane, Simon and Kennedy, Anthony D and Pendleton, Brian J and Roweth, Duncan}, title = {Hybrid Monte Carlo}, journal = {Physics Letters B}, year = {1987}, volume = {195}, pages = {216--222}, number = {2}, owner = {Paul}, publisher = {Elsevier}, timestamp = {2015.06.19} } @ARTICLE{natarajan2000, author = {Natarajan, Ranjini and Kass, Robert E}, title = {Reference Bayesian Methods for Generalized Linear Mixed Models}, journal = {Journal of the American Statistical Association}, year = {2000}, volume = {95}, pages = {227--237}, number = {449}, owner = {Paul}, publisher = {Taylor \& Francis}, timestamp = {2015.07.23} } @ARTICLE{kass2006, author = {Kass, Robert E and Natarajan, Ranjini}, title = {A Default Conjugate Prior for Variance Components in Generalized Linear Mixed Models (Comment on Article by Browne and Draper)}, journal = {Bayesian Analysis}, year = {2006}, volume = {1}, pages = {535--542}, number = {3}, owner = {Paul}, publisher = {International Society for Bayesian Analysis}, timestamp = {2015.07.23} } @ARTICLE{plummer2008, author = {Plummer, Martyn}, title = {Penalized Loss Functions for Bayesian Model Comparison}, journal = {Biostatistics}, year = {2008}, owner = {Paul}, publisher = {Biometrika Trust}, timestamp = {2015.08.10} } @ARTICLE{spiegelhalter2002, author = {Spiegelhalter, David J and Best, Nicola G and Carlin, Bradley P and Van Der Linde, Angelika}, title = {Bayesian Measures of Model Complexity and Fit}, journal = {Journal of the Royal Statistical Society B, Statistical Methodology}, year = {2002}, volume = {64}, pages = {583--639}, number = {4}, owner = {Paul}, publisher = {Wiley Online Library}, timestamp = {2015.09.02} } @MANUAL{Rtools2015, title = {\pkg{Rtools} Software, Version~3.3}, author = {{R Core Team}}, organization = {\proglang{R} Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2015}, owner = {Paul}, timestamp = {2015.01.20}, url = {https://cran.r-project.org/bin/windows/Rtools/} } @Manual{afex2015, title = {\pkg{afex}: Analysis of Factorial Experiments}, author = {Henrik Singmann and Ben Bolker and Jake Westfall}, year = {2015}, note = {R package version 0.15-2}, owner = {Paul}, timestamp = {2016.02.13}, url = {https://CRAN.R-project.org/package=afex}, } @INPROCEEDINGS{carvalho2009, author = {Carvalho, Carlos M and Polson, Nicholas G and Scott, James G}, title = {Handling Sparsity via the Horseshoe}, booktitle = {International Conference on Artificial Intelligence and Statistics}, year = {2009}, pages = {73--80}, owner = {Paul}, timestamp = {2015.11.09} } @ARTICLE{carvalho2010, author = {Carvalho, Carlos M and Polson, Nicholas G and Scott, James G}, title = {The Horseshoe Estimator for Sparse Signals}, journal = {Biometrika}, year = {2010}, pages = {1--16}, owner = {Paul}, publisher = {Biometrika Trust}, timestamp = {2015.11.09} } @ARTICLE{gelman2006, author = {Gelman, Andrew}, title = {Prior Distributions for Variance Parameters in Hierarchical Models}, journal = {Bayesian Analysis}, year = {2006}, volume = {1}, pages = {515--534}, number = {3}, owner = {Paul}, publisher = {International Society for Bayesian Analysis}, timestamp = {2015.07.15} } @Article{gelman1992, author = {Gelman, Andrew and Rubin, Donald B}, title = {Inference from Iterative Simulation Using Multiple Sequences}, journal = {Statistical Science}, year = {1992}, pages = {457--472}, publisher = {JSTOR}, } @MANUAL{gabry2015, title = {\pkg{shinystan}: Interactive Visual and Numerical Diagnostics and Posterior Analysis for Bayesian Models}, author = {Jonah Gabry}, year = {2015}, note = {\proglang{R}~Package Version~2.0.0}, owner = {Paul}, timestamp = {2015.08.26}, url = {http://CRAN.R-project.org/package=shinystan} } @ARTICLE{samejima1969, author = {Samejima, Fumiko}, title = {Estimation of Latent Ability Using a Response Pattern of Graded Scores}, journal = {Psychometrika Monograph Supplement}, year = {1969}, owner = {Paul}, timestamp = {2015.01.27} } @MISC{christensen2015, author = {R. H. B. Christensen}, title = {\pkg{ordinal} -- Regression Models for Ordinal Data}, year = {2015}, note = {\proglang{R} package version 2015.6-28. http://www.cran.r-project.org/package=ordinal/}, owner = {Paul}, timestamp = {2015.09.04} } @ARTICLE{andrich1978a, author = {Andrich, David}, title = {A Rating Formulation for Ordered Response Categories}, journal = {Psychometrika}, year = {1978}, volume = {43}, pages = {561--573}, number = {4}, owner = {Paul}, publisher = {Springer}, timestamp = {2015.01.27} } @Comment{jabref-meta: databaseType:bibtex;} brms/vignettes/brms_overview.ltx0000644000176200001440000017473514213413565016674 0ustar liggesusers\documentclass[article, nojss]{jss} %\VignetteIndexEntry{Overview of the brms Package} %\VignetteEngine{R.rsp::tex} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% almost as usual \author{Paul-Christian B\"urkner} \title{\pkg{brms}: An \proglang{R} Package for Bayesian Multilevel Models using \pkg{Stan}} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Paul-Christian B\"urkner} %% comma-separated \Plaintitle{brms: An R Package for Bayesian Multilevel Models using Stan} %% without formatting \Shorttitle{\pkg{brms}: Bayesian Multilevel Models using Stan} %% a short title (if necessary) %% an abstract and keywords \Abstract{ The \pkg{brms} package implements Bayesian multilevel models in \proglang{R} using the probabilistic programming language \pkg{Stan}. A wide range of distributions and link functions are supported, allowing users to fit -- among others -- linear, robust linear, binomial, Poisson, survival, response times, ordinal, quantile, zero-inflated, hurdle, and even non-linear models all in a multilevel context. Further modeling options include autocorrelation of the response variable, user defined covariance structures, censored data, as well as meta-analytic standard errors. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their beliefs. In addition, model fit can easily be assessed and compared using posterior-predictive checks and leave-one-out cross-validation. If you use \pkg{brms}, please cite this article as published in the Journal of Statistical Software \citep{brms1}. } \Keywords{Bayesian inference, multilevel model, ordinal data, MCMC, \proglang{Stan}, \proglang{R}} \Plainkeywords{Bayesian inference, multilevel model, ordinal data, MCMC, Stan, R} %% without formatting %% at least one keyword must be supplied %% publication information %% NOTE: Typically, this can be left commented and will be filled out by the technical editor %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{2012-06-04} %% \Acceptdate{2012-06-04} %% The address of (at least) one author should be given %% in the following format: \Address{ Paul-Christian B\"urkner\\ E-mail: \email{paul.buerkner@gmail.com}\\ URL: \url{https://paul-buerkner.github.io} } %% It is also possible to add a telephone and fax number %% before the e-mail in the following format: %% Telephone: +43/512/507-7103 %% Fax: +43/512/507-2851 %% for those who use Sweave please include the following line (with % symbols): %% need no \usepackage{Sweave.sty} %% end of declarations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %% include your article here, just as usual %% Note that you should use the \pkg{}, \proglang{} and \code{} commands. \section{Introduction} Multilevel models (MLMs) offer a great flexibility for researchers across sciences \citep{brown2015, demidenko2013, gelmanMLM2006, pinheiro2006}. They allow the modeling of data measured on different levels at the same time -- for instance data of students nested within classes and schools -- thus taking complex dependency structures into account. It is not surprising that many packages for \proglang{R} \citep{Rcore2015} have been developed to fit MLMs. Possibly the most widely known package in this area is \pkg{lme4} \citep{bates2015}, which uses maximum likelihood or restricted maximum likelihood methods for model fitting. Although alternative Bayesian methods have several advantages over frequentist approaches (e.g., the possibility of explicitly incorporating prior knowledge about parameters into the model), their practical use was limited for a long time because the posterior distributions of more complex models (such as MLMs) could not be found analytically. Markov chain Monte Carlo (MCMC) algorithms allowing to draw random samples from the posterior were not available or too time-consuming. In the last few decades, however, this has changed with the development of new algorithms and the rapid increase of general computing power. Today, several software packages implement these techniques, for instance \pkg{WinBugs} \citep{lunn2000, spiegelhalter2003}, \pkg{OpenBugs} \citep{spiegelhalter2007}, \pkg{JAGS} \citep{plummer2013}, \pkg{MCMCglmm} \citep{hadfield2010} and \pkg{Stan} \citep{stan2017, carpenter2017} to mention only a few. With the exception of the latter, all of these programs are primarily using combinations of Metropolis-Hastings updates \citep{metropolis1953,hastings1970} and Gibbs-sampling \citep{geman1984,gelfand1990}, sometimes also coupled with slice-sampling \citep{damien1999,neal2003}. One of the main problems of these algorithms is their rather slow convergence for high-dimensional models with correlated parameters \citep{neal2011,hoffman2014,gelman2014}. Furthermore, Gibbs-sampling requires priors to be conjugate to the likelihood of parameters in order to work efficiently \citep{gelman2014}, thus reducing the freedom of the researcher in choosing a prior that reflects his or her beliefs. In contrast, \pkg{Stan} implements Hamiltonian Monte Carlo \citep{duane1987, neal2011} and its extension, the No-U-Turn Sampler (NUTS) \citep{hoffman2014}. These algorithms converge much more quickly especially for high-dimensional models regardless of whether the priors are conjugate or not \citep{hoffman2014}. Similar to software packages like \pkg{WinBugs}, \pkg{Stan} comes with its own programming language, allowing for great modeling flexibility (cf., \citeauthor{stanM2017} \citeyear{stanM2017}; \citeauthor{carpenter2017} \citeyear{carpenter2017}). Many researchers may still hesitate to use \pkg{Stan} directly, as every model has to be written, debugged and possibly also optimized. This may be a time-consuming and error prone process even for researchers familiar with Bayesian inference. The package \pkg{brms}, presented in this paper, aims at closing this gap (at least for MLMs) allowing the user to benefit from the merits of \pkg{Stan} only by using simple, \pkg{lme4}-like formula syntax. \pkg{brms} supports a wide range of distributions and link functions, allows for multiple grouping factors each with multiple group-level effects, autocorrelation of the response variable, user defined covariance structures, as well as flexible and explicit prior specifications. The purpose of the present article is to provide a general overview of the \pkg{brms} package (version 0.10.0). We begin by explaining the underlying structure of MLMs. Next, the software is introduced in detail using recurrence times of infection in kidney patients \citep{mcgilchrist1991} and ratings of inhaler instructions \citep{ezzet1991} as examples. We end by comparing \pkg{brms} to other \proglang{R} packages implementing MLMs and describe future plans for extending the package. \section{Model description} \label{model} The core of every MLM is the prediction of the response $y$ through the linear combination $\eta$ of predictors transformed by the inverse link function $f$ assuming a certain distribution $D$ for $y$. We write $$y_i \sim D(f(\eta_i), \theta)$$ to stress the dependency on the $i\textsuperscript{th}$ data point. In many \proglang{R} packages, $D$ is also called the `family' and we will use this term in the following. The parameter $\theta$ describes additional family specific parameters that typically do not vary across data points, such as the standard deviation $\sigma$ in normal models or the shape $\alpha$ in Gamma or negative binomial models. The linear predictor can generally be written as $$\eta = \mathbf{X} \beta + \mathbf{Z} u$$ In this equation, $\beta$ and $u$ are the coefficients at population-level and group-level respectively and $\mathbf{X}, \mathbf{Z}$ are the corresponding design matrices. The response $y$ as well as $\mathbf{X}$ and $\mathbf{Z}$ make up the data, whereas $\beta$, $u$, and $\theta$ are the model parameters being estimated. The coefficients $\beta$ and $u$ may be more commonly known as fixed and random effects. However, we avoid these terms in the present paper following the recommendations of \cite{gelmanMLM2006}, as they are not used unambiguously in the literature. Also, we want to make explicit that $u$ is a model parameter in the same manner as $\beta$ so that uncertainty in its estimates can be naturally evaluated. In fact, this is an important advantage of Bayesian MCMC methods as compared to maximum likelihood approaches, which do not treat $u$ as a parameter, but assume that it is part of the error term instead (cf., \citeauthor{fox2011}, \citeyear{fox2011}). Except for linear models, we do not incorporate an additional error term for every observation by default. If desired, such an error term can always be modeled using a grouping factor with as many levels as observations in the data. \subsection{Prior distributions} \subsubsection{Regression parameters at population-level} In \pkg{brms}, population-level parameters are not restricted to have normal priors. Instead, every parameter can have every one-dimensional prior implemented in \pkg{Stan}, for instance uniform, Cauchy or even Gamma priors. As a negative side effect of this flexibility, correlations between them cannot be modeled as parameters. If desired, point estimates of the correlations can be obtained after sampling has been done. By default, population level parameters have an improper flat prior over the reals. \subsubsection{Regression parameters at group-level} The group-level parameters $u$ are assumed to come from a multivariate normal distribution with mean zero and unknown covariance matrix $\mathbf{\Sigma}$: $$u \sim N(0, \mathbf{\Sigma})$$ As is generally the case, covariances between group-level parameters of different grouping factors are assumed to be zero. This implies that $\mathbf{Z}$ and $u$ can be split up into several matrices $\mathbf{Z_k}$ and parameter vectors $u_k$, where $k$ indexes grouping factors, so that the model can be simplified to $$u_k \sim N(0, \mathbf{\Sigma_k})$$ Usually, but not always, we can also assume group-level parameters associated with different levels (indexed by $j$) of the same grouping factor to be independent leading to $$u_{kj} \sim N(0, \mathbf{V_k})$$ The covariance matrices $\mathbf{V_k}$ are modeled as parameters. In most packages, an Inverse-Wishart distribution is used as a prior for $\mathbf{V_k}$. This is mostly because its conjugacy leads to good properties of Gibbs-Samplers \citep{gelman2014}. However, there are good arguments against the Inverse-Wishart prior \citep{natarajan2000, kass2006}. The NUTS-Sampler implemented in \pkg{Stan} does not require priors to be conjugate. This advantage is utilized in \pkg{brms}: $\mathbf{V_k}$ is parameterized in terms of a correlation matrix $\mathbf{\Omega_k}$ and a vector of standard deviations $\sigma_k$ through $$\mathbf{V_k} = \mathbf{D}(\sigma_k) \mathbf{\Omega_k} \mathbf{D}(\sigma_k)$$ where $\mathbf{D}(\sigma_k)$ denotes the diagonal matrix with diagonal elements $\sigma_k$. Priors are then specified for the parameters on the right hand side of the equation. For $\mathbf{\Omega_k}$, we use the LKJ-Correlation prior with parameter $\zeta > 0$ by \cite{lewandowski2009}\footnote{Internally, the Cholesky factor of the correlation matrix is used, as it is more efficient and numerically stable.}: $$\mathbf{\Omega_k} \sim \mathrm{LKJ}(\zeta)$$ The expected value of the LKJ-prior is the identity matrix (implying correlations of zero) for any positive value of $\zeta$, which can be interpreted like the shape parameter of a symmetric beta distribution \citep{stanM2017}. If $\zeta = 1$ (the default in \pkg{brms}) the density is uniform over correlation matrices of the respective dimension. If $\zeta > 1$, the identity matrix is the mode of the prior, with a sharper peak in the density for larger values of $\zeta$. If $0 < \zeta < 1$ the prior is U-shaped having a trough at the identity matrix, which leads to higher probabilities for non-zero correlations. For every element of $\sigma_k$, any prior can be applied that is defined on the non-negative reals only. As default in \pkg{brms}, we use a half Student-t prior with 3 degrees of freedom. This prior often leads to better convergence of the models than a half Cauchy prior, while still being relatively weakly informative. Sometimes -- for instance when modeling pedigrees -- different levels of the same grouping factor cannot be assumed to be independent. In this case, the covariance matrix of $u_k$ becomes $$\mathbf{\Sigma_k} = \mathbf{V_k} \otimes \mathbf{A_k}$$ where $\mathbf{A_k}$ is the known covariance matrix between levels and $\otimes$ is the Kronecker product. \subsubsection{Family specific parameters} For some families, additional parameters need to be estimated. In the current section, we only name the most important ones. Normal and Student's distributions need the parameter $\sigma$ to account for residual error variance. By default, $\sigma$ has a half Cauchy prior with a scale parameter that depends on the standard deviation of the response variable to remain only weakly informative regardless of response variable's scaling. Furthermore, Student's distributions needs the parameter $\nu$ representing the degrees of freedom. By default, $\nu$ has a wide gamma prior as proposed by \cite{juarez2010}. Gamma, Weibull, and negative binomial distributions need the shape parameter $\alpha$ that also has a wide gamma prior by default. \section{Parameter estimation} The \pkg{brms} package does not fit models itself but uses \pkg{Stan} on the back-end. Accordingly, all samplers implemented in \pkg{Stan} can be used to fit \pkg{brms} models. Currently, these are the static Hamiltonian Monte-Carlo (HMC) Sampler sometimes also referred to as Hybrid Monte-Carlo \citep{neal2011, neal2003, duane1987} and its extension the No-U-Turn Sampler (NUTS) by \cite{hoffman2014}. HMC-like algorithms produce samples that are much less autocorrelated than those of other samplers such as the random-walk Metropolis algorithm \citep{hoffman2014, Creutz1988}. The main drawback of this increased efficiency is the need to calculate the gradient of the log-posterior, which can be automated using algorithmic differentiation \citep{griewank2008} but is still a time-consuming process for more complex models. Thus, using HMC leads to higher quality samples but takes more time per sample than other algorithms typically applied. Another drawback of HMC is the need to pre-specify at least two parameters, which are both critical for the performance of HMC. The NUTS Sampler allows setting these parameters automatically thus eliminating the need for any hand-tuning, while still being at least as efficient as a well tuned HMC \citep{hoffman2014}. For more details on the sampling algorithms applied in \pkg{Stan}, see the \pkg{Stan} user's manual \citep{stanM2017} as well as \cite{hoffman2014}. In addition to the estimation of model parameters, \pkg{brms} allows drawing samples from the posterior predictive distribution as well as from the pointwise log-likelihood. Both can be used to assess model fit. The former allows a comparison between the actual response $y$ and the response $\hat{y}$ predicted by the model. The pointwise log-likelihood can be used, among others, to calculate the widely applicable information criterion (WAIC) proposed by \cite{watanabe2010} and leave-one-out cross-validation (LOO; \citealp{gelfand1992}; \citealp{vehtari2015}; see also \citealp{ionides2008}) both allowing to compare different models applied to the same data (lower WAICs and LOOs indicate better model fit). The WAIC can be viewed as an improvement of the popular deviance information criterion (DIC), which has been criticized by several authors (\citealp{vehtari2015}; \citealp{plummer2008}; \citealp{vanderlinde2005}; see also the discussion at the end of the original DIC paper by \citealp{spiegelhalter2002}) in part because of problems arising from fact that the DIC is only a point estimate. In \pkg{brms}, WAIC and LOO are implemented using the \pkg{loo} package \citep{loo2016} also following the recommendations of \cite{vehtari2015}. \section{Software} \label{software} The \pkg{brms} package provides functions for fitting MLMs using \pkg{Stan} for full Bayesian inference. To install the latest release version of \pkg{brms} from CRAN, type \code{install.packages("brms")} within \proglang{R}. The current developmental version can be downloaded from GitHub via \begin{Sinput} devtools::install_github("paul-buerkner/brms") \end{Sinput} Additionally, a \proglang{C++} compiler is required. This is because \pkg{brms} internally creates \pkg{Stan} code, which is translated to \proglang{C++} and compiled afterwards. The program \pkg{Rtools} \citep{Rtools2015} comes with a \proglang{C++} compiler for Windows\footnote{During the installation process, there is an option to change the system \code{PATH}. Please make sure to check this options, because otherwise \pkg{Rtools} will not be available within \proglang{R}.}. On OS X, one should use \pkg{Xcode} \citep{Xcode2015} from the App Store. To check whether the compiler can be called within \proglang{R}, run \code{system("g++ -v")} when using \pkg{Rtools} or \code{system("clang++ -v")} when using \pkg{Xcode}. If no warning occurs and a few lines of difficult to read system code are printed out, the compiler should work correctly. For more detailed instructions on how to get the compilers running, see the prerequisites section on \url{https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started}. Models are fitted in \pkg{brms} using the following procedure, which is also summarized in Figure~\ref{flowchart}. First, the user specifies the model using the \code{brm} function in a way typical for most model fitting \proglang{R} functions, that is by defining \code{formula}, \code{data}, and \code{family}, as well as some other optional arguments. Second, this information is processed and the \code{make_stancode} and \code{make_standata} functions are called. The former generates the model code in \pkg{Stan} language and the latter prepares the data for use in \pkg{Stan}. These two are the mandatory parts of every \pkg{Stan} model and without \pkg{brms}, users would have to specify them themselves. Third, \pkg{Stan} code and data as well as additional arguments (such as the number of iterations and chains) are passed to functions of the \pkg{rstan} package (the \proglang{R} interface of \pkg{Stan}; \citeauthor{stan2017}, \citeyear{stan2017}). Fourth, the model is fitted by \pkg{Stan} after translating and compiling it in \proglang{C++}. Fifth, after the model has been fitted and returned by \pkg{rstan}, the fitted model object is post-processed in \pkg{brms} among others by renaming the model parameters to be understood by the user. Sixth, the results can be investigated in \proglang{R} using various methods such as \code{summary}, \code{plot}, or \code{predict} (for a complete list of methods type \code{methods(class = "brmsfit")}). \begin{figure}[ht] \centering \includegraphics[height = 0.4\textheight, keepaspectratio]{flowchart.pdf} \caption{High level description of the model fitting procedure used in \pkg{brms}.} \label{flowchart} \end{figure} \subsection{A worked example} In the following, we use an example about the recurrence time of an infection in kidney patients initially published by \cite{mcgilchrist1991}. The data set consists of 76 entries of 7 variables: \begin{Sinput} R> library("brms") R> data("kidney") R> head(kidney, n = 3) \end{Sinput} \begin{Soutput} time censored patient recur age sex disease 1 8 0 1 1 28 male other 2 23 0 2 1 48 female GN 3 22 0 3 1 32 male other \end{Soutput} Variable \code{time} represents the recurrence time of the infection, \code{censored} indicates if \code{time} is right censored (\code{1}) or not censored (\code{0}), variable \code{patient} is the patient id, and \code{recur} indicates if it is the first or second recurrence in that patient. Finally, variables \code{age}, \code{sex}, and \code{disease} make up the predictors. \subsection[Fitting models with brms]{Fitting models with \pkg{brms}} The core of the \pkg{brms} package is the \code{brm} function and we will explain its argument structure using the example above. Suppose we want to predict the (possibly censored) recurrence time using a log-normal model, in which the intercept as well as the effect of \code{age} is nested within patients. Then, we may use the following code: \begin{Sinput} fit1 <- brm(formula = time | cens(censored) ~ age * sex + disease + (1 + age|patient), data = kidney, family = lognormal(), prior = c(set_prior("normal(0,5)", class = "b"), set_prior("cauchy(0,2)", class = "sd"), set_prior("lkj(2)", class = "cor")), warmup = 1000, iter = 2000, chains = 4, control = list(adapt_delta = 0.95)) \end{Sinput} \subsection[formula: Information on the response and predictors]{\code{formula}: Information on the response and predictors} Without doubt, \code{formula} is the most complicated argument, as it contains information on the response variable as well as on predictors at different levels of the model. Everything before the $\sim$ sign relates to the response part of \code{formula}. In the usual and most simple case, this is just one variable name (e.g., \code{time}). However, to incorporate additional information about the response, one can add one or more terms of the form \code{| fun(variable)}. \code{fun} may be one of a few functions defined internally in \pkg{brms} and \code{variable} corresponds to a variable in the data set supplied by the user. In this example, \code{cens} makes up the internal function that handles censored data, and \code{censored} is the variable that contains information on the censoring. Other available functions in this context are \code{weights} and \code{disp} to allow different sorts of weighting, \code{se} to specify known standard errors primarily for meta-analysis, \code{trunc} to define truncation boundaries, \code{trials} for binomial models\footnote{In functions such as \code{glm} or \code{glmer}, the binomial response is typically passed as \code{cbind(success, failure)}. In \pkg{brms}, the equivalent syntax is \code{success | trials(success + failure)}.}, and \code{cat} to specify the number of categories for ordinal models. Everything on the right side of $\sim$ specifies predictors. Here, the syntax exactly matches that of \pkg{lme4}. For both, population-level and group-level terms, the \code{+} is used to separate different effects from each other. Group-level terms are of the form \code{(coefs | group)}, where \code{coefs} contains one or more variables whose effects are assumed to vary with the levels of the grouping factor given in \code{group}. Multiple grouping factors each with multiple group-level coefficients are possible. In the present example, only one group-level term is specified in which \code{1 + age} are the coefficients varying with the grouping factor \code{patient}. This implies that the intercept of the model as well as the effect of age is supposed to vary between patients. By default, group-level coefficients within a grouping factor are assumed to be correlated. Correlations can be set to zero by using the \code{(coefs || group)} syntax\footnote{In contrast to \pkg{lme4}, the \code{||} operator in \pkg{brms} splits up the design matrix computed from \code{coefs} instead of decomposing \code{coefs} in its terms. This implies that columns of the design matrix originating from the same factor are also assumed to be uncorrelated, whereas \pkg{lme4} estimates the correlations in this case. For a way to achieve \pkg{brms}-like behavior with \pkg{lme4}, see the \code{mixed} function of the \pkg{afex} package by \cite{afex2015}.}. Everything on the right side of \code{formula} that is not recognized as part of a group-level term is treated as a population-level effect. In this example, the population-level effects are \code{age}, \code{sex}, and \code{disease}. \subsection[family: Distribution of the response variable]{\code{family}: Distribution of the response variable} Argument \code{family} should usually be a family function, a call to a family function or a character string naming the family. If not otherwise specified, default link functions are applied. \pkg{brms} comes with a large variety of families. Linear and robust linear regression can be performed using the \code{gaussian} or \code{student} family combined with the \code{identity} link. For dichotomous and categorical data, families \code{bernoulli}, \code{binomial}, and \code{categorical} combined with the \code{logit} link, by default, are perfectly suited. Families \code{poisson}, \code{negbinomial}, and \code{geometric} allow for modeling count data. Families \code{lognormal}, \code{Gamma}, \code{exponential}, and \code{weibull} can be used (among others) for survival regression. Ordinal regression can be performed using the families \code{cumulative}, \code{cratio}, \code{sratio}, and \code{acat}. Finally, families \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, \code{zero_inflated_binomial}, \code{zero_inflated_beta}, \code{hurdle_poisson}, \code{hurdle_negbinomial}, and \code{hurdle_gamma} can be used to adequately model excess zeros in the response. In our example, we use \code{family = lognormal()} implying a log-normal ``survival'' model for the response variable \code{time}. \subsection[prior: Prior distributions of model parameters]{\code{prior}: Prior distributions of model parameters} Every population-level effect has its corresponding regression parameter. These parameters are named as \code{b\_}, where \code{} represents the name of the corresponding population-level effect. The default prior is an improper flat prior over the reals. Suppose, for instance, that we want to set a normal prior with mean \code{0} and standard deviation \code{10} on the effect of \code{age} and a Cauchy prior with location \code{1} and scale \code{2} on \code{sexfemale}\footnote{When factors are used as predictors, parameter names will depend on the factor levels. To get an overview of all parameters and parameter classes for which priors can be specified, use function \code{get\_prior}. For the present example, \code{get\_prior(time | cens(censored) $\sim$ age * sex + disease + (1 + age|patient), data = kidney, family = lognormal())} does the desired.}. Then, we may write \begin{Sinput} prior <- c(set_prior("normal(0,10)", class = "b", coef = "age"), set_prior("cauchy(1,2)", class = "b", coef = "sexfemale")) \end{Sinput} To put the same prior (e.g., a normal prior) on all population-level effects at once, we may write as a shortcut \code{set_prior("normal(0,10)", class = "b")}. This also leads to faster sampling, because priors can be vectorized in this case. Note that we could also omit the \code{class} argument for population-level effects, as it is the default class in \code{set_prior}. A special shrinkage prior to be applied on population-level effects is the horseshoe prior \citep{carvalho2009, carvalho2010}. It is symmetric around zero with fat tails and an infinitely large spike at zero. This makes it ideal for sparse models that have many regression coefficients, although only a minority of them is non-zero. The horseshoe prior can be applied on all population-level effects at once (excluding the intercept) by using \code{set_prior("horseshoe(1)")}. The $1$ implies that the Student-$t$ prior of the local shrinkage parameters has 1 degrees of freedom. In \pkg{brms} it is possible to increase the degrees of freedom (which will often improve convergence), although the prior no longer resembles a horseshoe in this case\footnote{This class of priors is often referred to as hierarchical shrinkage family, which contains the original horseshoe prior as a special case.}. For more details see \cite{carvalho2009, carvalho2010}. Each group-level effect of each grouping factor has a standard deviation parameter, which is restricted to be non-negative and, by default, has a half Student-$t$ prior with $3$ degrees of freedom and a scale parameter that is minimally $10$. For non-ordinal models, \pkg{brms} tries to evaluate if the scale is large enough to be considered only weakly informative for the model at hand by comparing it with the standard deviation of the response after applying the link function. If this is not the case, it will increase the scale based on the aforementioned standard deviation\footnote{Changing priors based on the data is not truly Bayesian and might rightly be criticized. However, it helps avoiding the problem of too informative default priors without always forcing users to define their own priors. The latter would also be problematic as not all users can be expected to be well educated Bayesians and reasonable default priors will help them a lot in using Bayesian methods.}. \pkg{Stan} implicitly defines a half Student-$t$ prior by using a Student-$t$ prior on a restricted parameter \citep{stanM2017}. For other reasonable priors on standard deviations see \cite{gelman2006}. In \pkg{brms}, standard deviation parameters are named as \code{sd\_\_} so that \code{sd\_patient\_Intercept} and \code{sd\_patient\_age} are the parameter names in the example. If desired, it is possible to set a different prior on each parameter, but statements such as \code{set_prior("student_t(3,0,5)", class = "sd", group = "patient")} or even \code{set_prior("student_t(3,0,5)", class = "sd")} may also be used and are again faster because of vectorization. If there is more than one group-level effect per grouping factor, correlations between group-level effects are estimated. As mentioned in Section~\ref{model}, the LKJ-Correlation prior with parameter $\zeta > 0$ \citep{lewandowski2009} is used for this purpose. In \pkg{brms}, this prior is abbreviated as \code{"lkj(zeta)"} and correlation matrix parameters are named as \code{cor\_}, (e.g., \code{cor_patient}), so that \code{set_prior("lkj(2)", class = "cor", group = "patient")} is a valid statement. To set the same prior on every correlation matrix in the model, \code{set_prior("lkj(2)", class = "cor")} is also allowed, but does not come with any efficiency increases. Other model parameters such as the residual standard deviation \code{sigma} in normal models or the \code{shape} in Gamma models have their priors defined in the same way, where each of them is treated as having its own parameter class. A complete overview on possible prior distributions is given in the \pkg{Stan} user's manual \citep{stanM2017}. Note that \pkg{brms} does not thoroughly check if the priors are written in correct \pkg{Stan} language. Instead, \pkg{Stan} will check their syntactical correctness when the model is parsed to \proglang{C++} and return an error if they are not. This, however, does not imply that priors are always meaningful if they are accepted by \pkg{Stan}. Although \pkg{brms} tries to find common problems (e.g., setting bounded priors on unbounded parameters), there is no guarantee that the defined priors are reasonable for the model. \subsection[control Adjusting the sampling behavior of Stan]{\code{control}: Adjusting the sampling behavior of \pkg{Stan}} In addition to choosing the number of iterations, warmup samples, and chains, users can control the behavior of the NUTS sampler by using the \code{control} argument. The most important reason to use \code{control} is to decrease (or eliminate at best) the number of divergent transitions that cause a bias in the obtained posterior samples. Whenever you see the warning \code{"There were x divergent transitions after warmup."}, you should really think about increasing \code{adapt_delta}. To do this, write \code{control = list(adapt_delta = )}, where \code{} should usually be a value between \code{0.8} (current default) and \code{1}. Increasing \code{adapt_delta} will slow down the sampler but will decrease the number of divergent transitions threatening the validity of your posterior samples. Another problem arises when the depth of the tree being evaluated in each iteration is exceeded. This is less common than having divergent transitions, but may also bias the posterior samples. When it happens, \pkg{Stan} will throw out a warning suggesting to increase \code{max_treedepth}, which can be accomplished by writing \code{control = list(max_treedepth = )} with a positive integer \code{} that should usually be larger than the current default of \code{10}. \subsection{Analyzing the results} The example model \code{fit1} is fitted using 4 chains, each with 2000 iterations of which the first 1000 are warmup to calibrate the sampler, leading to a total of 4000 posterior samples\footnote{To save time, chains may also run in parallel when using argument \code{cluster}.}. For researchers familiar with Gibbs or Metropolis-Hastings sampling, this number may seem far too small to achieve good convergence and reasonable results, especially for multilevel models. However, as \pkg{brms} utilizes the NUTS sampler \citep{hoffman2014} implemented in \pkg{Stan}, even complex models can often be fitted with not more than a few thousand samples. Of course, every iteration is more computationally intensive and time-consuming than the iterations of other algorithms, but the quality of the samples (i.e., the effective sample size per iteration) is usually higher. After the posterior samples have been computed, the \code{brm} function returns an \proglang{R} object, containing (among others) the fully commented model code in \pkg{Stan} language, the data to fit the model, and the posterior samples themselves. The model code and data for the present example can be extracted through \code{stancode(fit1)} and \code{standata(fit1)} respectively\footnote{Both model code and data may be amended and used to fit new models. That way, \pkg{brms} can also serve as a good starting point in building more complicated models in \pkg{Stan}, directly.}. A model summary is readily available using \begin{Sinput} R> summary(fit1, waic = TRUE) \end{Sinput} \begin{Soutput} Family: lognormal (identity) Formula: time | cens(censored) ~ age * sex + disease + (1 + age | patient) Data: kidney (Number of observations: 76) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: 673.51 Group-Level Effects: ~patient (Number of levels: 38) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.40 0.28 0.01 1.01 1731 1 sd(age) 0.01 0.01 0.00 0.02 1137 1 cor(Intercept,age) -0.13 0.46 -0.88 0.76 3159 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 2.73 0.96 0.82 4.68 2139 1 age 0.01 0.02 -0.03 0.06 1614 1 sexfemale 2.42 1.13 0.15 4.64 2065 1 diseaseGN -0.40 0.53 -1.45 0.64 2664 1 diseaseAN -0.52 0.50 -1.48 0.48 2713 1 diseasePKD 0.60 0.74 -0.86 2.02 2968 1 age:sexfemale -0.02 0.03 -0.07 0.03 1956 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 1.15 0.13 0.91 1.44 4000 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Soutput} On the top of the output, some general information on the model is given, such as family, formula, number of iterations and chains, as well as the WAIC. Next, group-level effects are displayed separately for each grouping factor in terms of standard deviations and correlations between group-level effects. On the bottom of the output, population-level effects are displayed. If incorporated, autocorrelation and family specific parameters (e.g., the residual standard deviation \code{sigma}) are also given. In general, every parameter is summarized using the mean (\code{Estimate}) and the standard deviation (\code{Est.Error}) of the posterior distribution as well as two-sided 95\% Credible intervals (\code{l-95\% CI} and \code{u-95\% CI}) based on quantiles. The \code{Eff.Sample} value is an estimation of the effective sample size; that is the number of independent samples from the posterior distribution that would be expected to yield the same standard error of the posterior mean as is obtained from the dependent samples returned by the MCMC algorithm. The \code{Rhat} value provides information on the convergence of the algorithm (cf., \citeauthor{gelman1992}, \citeyear{gelman1992}). If \code{Rhat} is considerably greater than 1 (i.e., $> 1.1$), the chains have not yet converged and it is necessary to run more iterations and/or set stronger priors. To visually investigate the chains as well as the posterior distribution, the \code{plot} method can be used (see Figure~\ref{kidney_plot}). An even more detailed investigation can be achieved by applying the \pkg{shinystan} package \citep{gabry2015} through method \code{launch_shiny}. With respect to the above summary, \code{sexfemale} seems to be the only population-level effect with considerable influence on the response. Because the mean of \code{sexfemale} is positive, the model predicts longer periods without an infection for females than for males. Effects of population-level predictors can also be visualized with the \code{conditional_effects} method (see Figure~\ref{kidney_conditional_effects}). \begin{figure}[ht] \centering \includegraphics[width=0.95\textwidth]{kidney_plot.pdf} \caption{Trace and Density plots of all relevant parameters of the kidney model discussed in Section~\ref{software}.} \label{kidney_plot} \end{figure} \begin{figure}[ht] \centering \includegraphics[height=0.90\textheight]{kidney_conditional_effects.pdf} \caption{Conditional effects plots of all population-level predictors of the kidney model discussed in Section~\ref{software}.} \label{kidney_conditional_effects} \end{figure} Looking at the group-level effects, the standard deviation parameter of \code{age} is suspiciously small. To test whether it is smaller than the standard deviation parameter of \code{Intercept}, we apply the \code{hypothesis} method: \begin{Sinput} R> hypothesis(fit1, "Intercept - age > 0", class = "sd", group = "patient") \end{Sinput} \begin{Soutput} Hypothesis Tests for class sd_patient: Estimate Est.Error l-95% CI u-95% CI Evid.Ratio Intercept-age > 0 0.39 0.27 0.03 Inf 67.97 * --- '*': The expected value under the hypothesis lies outside the 95% CI. \end{Soutput} The one-sided 95\% credibility interval does not contain zero, thus indicating that the standard deviations differ from each other in the expected direction. In accordance with this finding, the \code{Evid.Ratio} shows that the hypothesis being tested (i.e., \code{Intercept - age > 0}) is about $68$ times more likely than the alternative hypothesis \code{Intercept - age < 0}. It is important to note that this kind of comparison is not easily possible when applying frequentist methods, because in this case only point estimates are available for group-level standard deviations and correlations. When looking at the correlation between both group-level effects, its distribution displayed in Figure~\ref{kidney_plot} and the 95\% credibility interval in the summary output appear to be rather wide. This indicates that there is not enough evidence in the data to reasonably estimate the correlation. Together, the small standard deviation of \code{age} and the uncertainty in the correlation raise the question if \code{age} should be modeled as a group specific term at all. To answer this question, we fit another model without this term: \begin{Sinput} R> fit2 <- update(fit1, formula. = ~ . - (1 + age|patient) + (1|patient)) \end{Sinput} A good way to compare both models is leave-one-out cross-validation (LOO)\footnote{The WAIC is an approximation of LOO that is faster and easier to compute. However, according to \cite{vehtari2015}, LOO may be the preferred method to perform model comparisons.}, which can be called in \pkg{brms} using \begin{Sinput} R> LOO(fit1, fit2) \end{Sinput} \begin{Soutput} LOOIC SE fit1 675.45 45.18 fit2 674.17 45.06 fit1 - fit2 1.28 0.99 \end{Soutput} In the output, the LOO information criterion for each model as well as the difference of the LOOs each with its corresponding standard error is shown. Both LOO and WAIC are approximately normal if the number of observations is large so that the standard errors can be very helpful in evaluating differences in the information criteria. However, for small sample sizes, standard errors should be interpreted with care \citep{vehtari2015}. For the present example, it is immediately evident that both models have very similar fit, indicating that there is little benefit in adding group specific coefficients for \code{age}. \subsection{Modeling ordinal data} In the following, we want to briefly discuss a second example to demonstrate the capabilities of \pkg{brms} in handling ordinal data. \cite{ezzet1991} analyze data from a two-treatment, two-period crossover trial to compare 2 inhalation devices for delivering the drug salbutamol in 286 asthma patients. Patients were asked to rate the clarity of leaflet instructions accompanying each device, using a four-point ordinal scale. Ratings are predicted by \code{treat} to indicate which of the two inhaler devices was used, \code{period} to indicate the time of administration, and \code{carry} to model possible carry over effects. \begin{Sinput} R> data("inhaler") R> head(inhaler, n = 1) \end{Sinput} \begin{Soutput} subject rating treat period carry 1 1 1 0.5 0.5 0 \end{Soutput} Typically, the ordinal response is assumed to originate from the categorization of a latent continuous variable. That is there are $K$ latent thresholds (model intercepts), which partition the continuous scale into the $K + 1$ observable, ordered categories. Following this approach leads to the cumulative or graded-response model \citep{samejima1969} for ordinal data implemented in many \proglang{R} packages. In \pkg{brms}, it is available via family \code{cumulative}. Fitting the cumulative model to the inhaler data, also incorporating an intercept varying by subjects, may look this: \begin{Sinput} fit3 <- brm(formula = rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative) \end{Sinput} While the support for ordinal data in most \proglang{R} packages ends here\footnote{Exceptions known to us are the packages \pkg{ordinal} \citep{christensen2015} and \pkg{VGAM} \citep{yee2010}. The former supports only cumulative models but with different modeling option for the thresholds. The latter supports all four ordinal families also implemented in \pkg{brms} as well as category specific effects but no group-specific effects.}, \pkg{brms} allows changes to this basic model in at least three ways. First of all, three additional ordinal families are implemented. Families \code{sratio} (stopping ratio) and \code{cratio} (continuation ratio) are so called sequential models \citep{tutz1990}. Both are equivalent to each other for symmetric link functions such as \code{logit} but will differ for asymmetric ones such as \code{cloglog}. The fourth ordinal family is \code{acat} (adjacent category) also known as partial credits model \citep{masters1982, andrich1978a}. Second, restrictions to the thresholds can be applied. By default, thresholds are ordered for family \code{cumulative} or are completely free to vary for the other families. This is indicated by argument \code{threshold = "flexible"} (default) in \code{brm}. Using \code{threshold = "equidistant"} forces the distance between two adjacent thresholds to be the same, that is $$\tau_k = \tau_1 + (k-1)\delta$$ for thresholds $\tau_k$ and distance $\delta$ (see also \citealp{andrich1978b}; \citealp{andrich1978a}; \citealp{andersen1977}). Third, the assumption that predictors have constant effects across categories may be relaxed for non-cumulative ordinal models \citep{vanderark2001, tutz2000} leading to category specific effects. For instance, variable \code{treat} may only have an impact on the decision between category 3 and 4, but not on the lower categories. Without using category specific effects, such a pattern would remain invisible. To illustrate all three modeling options at once, we fit a (hardly theoretically justified) stopping ratio model with equidistant thresholds and category specific effects for variable \code{treat} on which we apply an informative prior. \begin{Sinput} fit4 <- brm(formula = rating ~ period + carry + cs(treat) + (1|subject), data = inhaler, family = sratio, threshold = "equidistant", prior = set_prior("normal(-1,2)", coef = "treat")) \end{Sinput} Note that priors are defined on category specific effects in the same way as for other population-level effects. A model summary can be obtained in the same way as before: \begin{Sinput} R> summary(fit4, waic = TRUE) \end{Sinput} \begin{Soutput} Family: sratio (logit) Formula: rating ~ period + carry + cs(treat) + (1 | subject) Data: inhaler (Number of observations: 572) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: 911.9 Group-Level Effects: ~subject (Number of levels: 286) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 1.05 0.23 0.56 1.5 648 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept[1] 0.72 0.13 0.48 0.99 2048 1 Intercept[2] 2.67 0.35 2.00 3.39 969 1 Intercept[3] 4.62 0.66 3.36 5.95 1037 1 period 0.25 0.18 -0.09 0.61 4000 1 carry -0.26 0.22 -0.70 0.17 1874 1 treat[1] -0.96 0.30 -1.56 -0.40 1385 1 treat[2] -0.65 0.49 -1.60 0.27 4000 1 treat[3] -2.65 1.21 -5.00 -0.29 4000 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat delta 1.95 0.32 1.33 2.6 1181 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Soutput} Trace and density plots of the model parameters as produced by \code{plot(fit4)} can be found in Figure~\ref{inhaler_plot}. We see that three intercepts (thresholds) and three effects of \code{treat} have been estimated, because a four-point scale was used for the ratings. The treatment effect seems to be strongest between category 3 and 4. At the same time, however, the credible interval is also much larger. In fact, the intervals of all three effects of \code{treat} are highly overlapping, which indicates that there is not enough evidence in the data to support category specific effects. On the bottom of the output, parameter \code{delta} specifies the distance between two adjacent thresholds and indeed the intercepts differ from each other by the magnitude of \code{delta}. \begin{figure}[ht] \centering \includegraphics[width=0.95\textwidth]{inhaler_plot.pdf} \caption{Trace and Density plots of all relevant parameters of the inhaler model discussed in Section~\ref{software}.} \label{inhaler_plot} \end{figure} \section[Comparison]{Comparison between packages} Over the years, many \proglang{R} packages have been developed that implement MLMs, each being more or less general in their supported models. Comparing all of them to \pkg{brms} would be too extensive and barely helpful for the purpose of the present paper. Accordingly, we concentrate on a comparison with four packages. These are \pkg{lme4} \citep{bates2015} and \pkg{MCMCglmm} \citep{hadfield2010}, which are possibly the most general and widely applied \proglang{R} packages for MLMs, as well as \pkg{rstanarm} \citep{rstanarm2016} and \pkg{rethinking} \citep{mcelreath2016}, which are both based on \pkg{Stan}. As opposed to the other packages, \pkg{rethinking} was primarily written for teaching purposes and requires the user to specify the full model explicitly using its own simplified \pkg{BUGS}-like syntax thus helping users to better understand the models that are fitted to their data. Regarding model families, all five packages support the most common types such as linear and binomial models as well as Poisson models for count data. Currently, \pkg{brms} and \pkg{MCMCglmm} provide more flexibility when modeling categorical and ordinal data. In addition, \pkg{brms} supports robust linear regression using Student's distribution, which is also implemented on a GitHub branch of \pkg{rstanarm}. \pkg{MCMCglmm} allows fitting multinomial models that are currently not available in the other packages. Generalizing classical MLMs, \pkg{brms} and \pkg{MCMCglmm} allow fiting zero-inflated and hurdle models dealing with excess zeros in the response. Furthermore, \pkg{brms} supports non-linear models similar to the \pkg{nlme} package \citep{nlme2016} providing great flexibility but also requiring more care to produce reasonable results. Another flexible model class are generalized additive mixed models \citep{hastie1990,wood2011,zuur2014}, which can be fitted with \pkg{brms} and \pkg{rstanarm}. In all five packages, there are quite a few additional modeling options. Variable link functions can be specified in all packages except for \pkg{MCMCglmm}, in which only one link is available per family. \pkg{MCMCglmm} generally supports multivariate responses using data in wide format, whereas \pkg{brms} currently only offers this option for families \code{gaussian} and \code{student}. It should be noted that it is always possible to transform data from wide to long format for compatibility with the other packages. Autocorrelation of the response can only be fitted in \pkg{brms}, which supports auto-regressive as well as moving-average effects. For ordinal models in \pkg{brms}, effects of predictors may vary across different levels of the response as explained in the inhaler example. A feature currently exclusive to \pkg{rethinking} is the possibility to impute missing values in the predictor variables. Information criteria are available in all three packages. The advantage of WAIC and LOO implemented in \pkg{brms}, \pkg{rstanarm}, and \pkg{rethinking} is that their standard errors can be easily estimated to get a better sense of the uncertainty in the criteria. Comparing the prior options of the Bayesian packages, \pkg{brms} and \pkg{rethinking} offer a little more flexibility than \pkg{MCMCglmm} and \pkg{rstanarm}, as virtually any prior distribution can be applied on population-level effects as well as on the standard deviations of group-level effects. In addition, we believe that the way priors are specified in \pkg{brms} and \pkg{rethinking} is more intuitive as it is directly evident what prior is actually applied. A more detailed comparison of the packages can be found in Table~\ref{comparison1} and Table~\ref{comparison2}. To facilitate the understanding of the model formulation in \pkg{brms}, Table~\ref{syntax} shows \pkg{lme4} function calls to fit sample models along with the equivalent \pkg{brms} syntax. So far the focus was only on capabilities. Another important topic is speed, especially for more complex models. Of course, \pkg{lme4} is usually much faster than the other packages as it uses maximum likelihood methods instead of MCMC algorithms, which are slower by design. To compare the efficiency of the four Bayesian packages, we fitted multilevel models on real data sets using the minimum effective sample size divided by sampling time as a measure of sampling efficiency. One should always aim at running multiple chains as one cannot be sure that a single chain really explores the whole posterior distribution. However, as \pkg{MCMCglmm} does not come with a built-in option to run multiple chains, we used only a single chain to fit the models after making sure that it leads to the same results as multiple chains. The \proglang{R} code allowing to replicate the results is available as supplemental material. The first thing that becomes obvious when fitting the models is that \pkg{brms} and \pkg{rethinking} need to compile the \proglang{C++} model before actually fitting it, because the \pkg{Stan} code being parsed to \proglang{C++} is generated on the fly based on the user's input. Compilation takes about a half to one minute depending on the model complexity and computing power of the machine. This is not required by \pkg{rstanarm} and \pkg{MCMCglmm}, although the former is also based on \pkg{Stan}, as compilation takes place only once at installation time. While the latter approach saves the compilation time, the former is more flexible when it comes to model specification. For small and simple models, compilation time dominates the overall computation time, but for larger and more complex models, sampling will take several minutes or hours so that one minute more or less will not really matter, anymore. Accordingly, the following comparisons do not include the compilation time. In models containing only group-specific intercepts, \pkg{MCMCglmm} is usually more efficient than the \pkg{Stan} packages. However, when also estimating group-specific slopes, \pkg{MCMCglmm} falls behind the other packages and quite often refuses to sample at all unless one carefully specifies informative priors. Note that these results are obtained by running only a single chain. For all three \pkg{Stan} packages, sampling efficiency can easily be increased by running multiple chains in parallel. Comparing the \pkg{Stan} packages to each other, \pkg{brms} is usually most efficient for models with group-specific terms, whereas \pkg{rstanarm} tends to be roughly $50\%$ to $75\%$ as efficient at least for the analyzed data sets. The efficiency of \pkg{rethinking} is more variable depending on the model formulation and data, sometimes being slightly ahead of the other two packages, but usually being considerably less efficient. Generally, \pkg{rethinking} loses efficiency for models with many population-level effects presumably because one cannot use design matrices and vectorized prior specifications for population-level parameters. Note that it was not possible to specify the exact same priors across packages due to varying parameterizations. Of course, efficiency depends heavily on the model, chosen priors, and data at hand so that the present results should not be over-interpreted. \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{lme4}} & \parbox{2cm}{\pkg{MCMCglmm}} \\ \hline \\ [-1.5ex] \parbox{6cm}{Supported model types:} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & no & no \\ Binomial models & yes & yes & yes \\ Categorical models & yes & no & yes \\ Multinomial models & no & no & yes \\ Count data models & yes & yes & yes \\ Survival models & yes$^1$ & yes & yes \\ Ordinal models & various & no & cumulative \\ Zero-inflated and hurdle models & yes & no & yes \\ Generalized additive models & yes & no & no \\ Non-linear models & yes & no & no \\ \hline \\ [-1.5ex] \parbox{5cm}{Additional modeling options:} & & & \\ [1ex] Variable link functions & various & various & no \\ Weights & yes & yes & no \\ Offset & yes & yes & using priors \\ Multivariate responses & limited & no & yes \\ Autocorrelation effects & yes & no & no \\ Category specific effects & yes & no & no \\ Standard errors for meta-analysis & yes & no & yes \\ Censored data & yes & no & yes \\ Truncated data & yes & no & no \\ Customized covariances & yes & no & yes \\ Missing value imputation & no & no & no \\ \hline \\ [-1.5ex] Bayesian specifics: & & & \\ [1ex] parallelization & yes & -- & no \\ population-level priors & flexible & --$^3$ & normal \\ group-level priors & normal & --$^3$ & normal \\ covariance priors & flexible & --$^3$ & restricted$^4$ \\ \hline \\ [-1.5ex] Other: & & & \\ [1ex] Estimator & HMC, NUTS & ML, REML & MH, Gibbs$^2$ \\ Information criterion & WAIC, LOO & AIC, BIC & DIC \\ \proglang{C++} compiler required & yes & no & no \\ Modularized & no & yes & no \\ \hline \end{tabular} \caption{Comparison of the capabilities of the \pkg{brms}, \pkg{lme4} and \pkg{MCMCglmm} package. Notes: (1) Weibull family only available in \pkg{brms}. (2) Estimator consists of a combination of both algorithms. (3) Priors may be imposed using the \pkg{blme} package \citep{chung2013}. (4) For details see \cite{hadfield2010}.} \label{comparison1} \end{table} \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{rstanarm}} & \parbox{2cm}{\pkg{rethinking}} \\ \hline \\ [-1.5ex] \parbox{6cm}{Supported model types:} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & yes$^1$ & no \\ Binomial models & yes & yes & yes \\ Categorical models & yes & no & no \\ Multinomial models & no & no & no \\ Count data models & yes & yes & yes \\ Survival models & yes$^2$ & yes & yes \\ Ordinal models & various & cumulative$^3$ & no \\ Zero-inflated and hurdle models & yes & no & no \\ Generalized additive models & yes & yes & no \\ Non-linear models & yes & no & limited$^4$ \\ \hline \\ [-1.5ex] \parbox{5cm}{Additional modeling options:} & & & \\ [1ex] Variable link functions & various & various & various \\ Weights & yes & yes & no \\ Offset & yes & yes & yes \\ Multivariate responses & limited & no & no \\ Autocorrelation effects & yes & no & no \\ Category specific effects & yes & no & no \\ Standard errors for meta-analysis & yes & no & no \\ Censored data & yes & no & no \\ Truncated data & yes & no & yes \\ Customized covariances & yes & no & no \\ Missing value imputation & no & no & yes \\ \hline \\ [-1.5ex] Bayesian specifics: & & & \\ [1ex] parallelization & yes & yes & yes \\ population-level priors & flexible & normal, Student-t & flexible \\ group-level priors & normal & normal & normal \\ covariance priors & flexible & restricted$^5$ & flexible \\ \hline \\ [-1.5ex] Other: & & & \\ [1ex] Estimator & HMC, NUTS & HMC, NUTS & HMC, NUTS \\ Information criterion & WAIC, LOO & AIC, LOO & AIC, LOO \\ \proglang{C++} compiler required & yes & no & yes \\ Modularized & no & no & no \\ \hline \end{tabular} \caption{Comparison of the capabilities of the \pkg{brms}, \pkg{rstanarm} and \pkg{rethinking} package. Notes: (1) Currently only implemented on a branch on GitHub. (2) Weibull family only available in \pkg{brms}. (3) No group-level terms allowed. (4) The parser is mainly written for linear models but also accepts some non-linear model specifications. (5) For details see \url{https://github.com/stan-dev/rstanarm/wiki/Prior-distributions}.} \label{comparison2} \end{table} \begin{table}[hbtp] \centering %\renewcommand{\arraystretch}{2} \begin{tabular}{ll} Dataset & \parbox{10cm}{Function call} \\ \hline \\ [-1.5ex] \parbox{2cm}{cake} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{lmer(angle $\sim$ recipe * temperature + (1|recipe:replicate), \\ \hspace*{5ex} data = cake)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(angle $\sim$ recipe * temperature + (1|recipe:replicate), \\ \hspace*{4ex} data = cake)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{sleepstudy} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{lmer(Reaction $\sim$ Days + (Days|Subject), data = sleepstudy)}} \\ [1.5ex] \pkg{brms} & \parbox{13cm}{\code{brm(Reaction $\sim$ Days + (Days|Subject), data = sleepstudy)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{cbpp$^1$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(cbind(incidence, size - incidence) $\sim$ period + (1 | herd), \\ \hspace*{6ex} family = binomial("logit"), data = cbpp)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(incidence | trials(size) $\sim$ period + (1 | herd), \\ \hspace*{4ex} family = binomial("logit"), data = cbpp)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{grouseticks$^1$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(TICKS $\sim$ YEAR + HEIGHT + (1|BROOD) + (1|LOCATION), \\ \hspace*{6ex} family = poisson("log"), data = grouseticks)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(TICKS $\sim$ YEAR + HEIGHT + (1|BROOD) + (1|LOCATION), \\ \hspace*{4ex} family = poisson("log"), data = grouseticks)}} \\ [2ex] \hline \\ [-1ex] \parbox{2cm}{VerbAgg$^2$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(r2 $\sim$ (Anger + Gender + btype + situ)\^{}2 + (1|id) \\ \hspace*{6ex} + (1|item), family = binomial, data = VerbAgg)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(r2 $\sim$ (Anger + Gender + btype + situ)\^{}2 + (1|id) \\ \hspace*{4ex} + (1|item), family = bernoulli, data = VerbAgg)}} \\ [2ex] \hline \\ [-1.5ex] \end{tabular} \caption{Comparison of the model syntax of \pkg{lme4} and \pkg{brms} using data sets included in \pkg{lme4}. Notes: (1) Default links are used to that the link argument may be omitted. (2) Fitting this model takes some time. A proper prior on the population-level effects (e.g., \code{prior = set\_prior("normal(0,5)")}) may help in increasing sampling speed.} \label{syntax} \end{table} \section{Conclusion} The present paper is meant to provide a general overview on the \proglang{R} package \pkg{brms} implementing MLMs using the probabilistic programming language \pkg{Stan} for full Bayesian inference. Although only a small selection of the modeling options available in \pkg{brms} are discussed in detail, I hope that this article can serve as a good starting point to further explore the capabilities of the package. For the future, I have several plans on how to improve the functionality of \pkg{brms}. I want to include multivariate models that can handle multiple response variables coming from different distributions as well as new correlation structures for instance for spatial data. Similarily, distributional regression models as well as mixture response distributions appear to be valuable extensions of the package. I am always grateful for any suggestions and ideas regarding new features. \section*{Acknowledgments} First of all, I would like to thank the Stan Development Team for creating the probabilistic programming language \pkg{Stan}, which is an incredibly powerful and flexible tool for performing full Bayesian inference. Without it, \pkg{brms} could not fit a single model. Two anonymous reviewers provided very detailed and thoughtful suggestions to substantially improve both the package and the paper. Furthermore, Prof. Philipp Doebler and Prof. Heinz Holling have given valuable feedback on earlier versions of the paper. Lastly, I want to thank the many users who reported bugs or had ideas for new features, thus helping to continuously improve \pkg{brms}. \bibliography{citations_overview} \end{document} brms/R/0000755000176200001440000000000014504270214011415 5ustar liggesusersbrms/R/lsp.R0000644000176200001440000000364214364257623012360 0ustar liggesusers# find all namespace entries of a package, which are of # a particular type for instance all exported objects # retrieved from https://github.com/raredd/rawr # @param package the package name # @param what type of the objects to retrieve ("all" for all objects) # @param pattern regex that must be matches by the object names # @return a character vector of object names lsp <- function(package, what = "all", pattern = ".*") { if (!is.character(substitute(package))) package <- deparse0(substitute(package)) ns <- asNamespace(package) ## base package does not have NAMESPACE if (isBaseNamespace(ns)) { res <- ls(.BaseNamespaceEnv, all.names = TRUE) return(res[grep(pattern, res, perl = TRUE, ignore.case = TRUE)]) } else { ## for non base packages if (exists('.__NAMESPACE__.', envir = ns, inherits = FALSE)) { wh <- get('.__NAMESPACE__.', inherits = FALSE, envir = asNamespace(package, base.OK = FALSE)) what <- if (missing(what)) 'all' else if ('?' %in% what) return(ls(wh)) else ls(wh)[pmatch(what[1], ls(wh))] if (!is.null(what) && !any(what %in% c('all', ls(wh)))) stop('\'what\' should be one of ', paste0(shQuote(ls(wh)), collapse = ', '), ', or \'all\'', domain = NA) res <- sapply(ls(wh), function(x) getNamespaceInfo(ns, x)) res <- rapply(res, ls, classes = 'environment', how = 'replace', all.names = TRUE) if (is.null(what)) return(res[grep(pattern, res, perl = TRUE, ignore.case = TRUE)]) if (what %in% 'all') { res <- ls(getNamespace(package), all.names = TRUE) return(res[grep(pattern, res, perl = TRUE, ignore.case = TRUE)]) } if (any(what %in% ls(wh))) { res <- res[[what]] return(res[grep(pattern, res, perl = TRUE, ignore.case = TRUE)]) } } else stop(sprintf('no NAMESPACE file found for package %s', package)) } } brms/R/reloo.R0000644000176200001440000001460214361545260012672 0ustar liggesusers#' Compute exact cross-validation for problematic observations #' #' Compute exact cross-validation for problematic observations for which #' approximate leave-one-out cross-validation may return incorrect results. #' Models for problematic observations can be run in parallel using the #' \pkg{future} package. #' #' @inheritParams predict.brmsfit #' @param x An \R object of class \code{brmsfit} or \code{loo} depending #' on the method. #' @param loo An \R object of class \code{loo}. #' @param fit An \R object of class \code{brmsfit}. #' @param k_threshold The threshold at which Pareto \eqn{k} #' estimates are treated as problematic. Defaults to \code{0.7}. #' See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} #' for more details. #' @param check Logical; If \code{TRUE} (the default), some checks #' check are performed if the \code{loo} object was generated #' from the \code{brmsfit} object passed to argument \code{fit}. #' @param recompile Logical, indicating whether the Stan model should be #' recompiled. This may be necessary if you are running \code{reloo} on #' another machine than the one used to fit the model. #' @param future_args A list of further arguments passed to #' \code{\link[future:future]{future}} for additional control over parallel #' execution if activated. #' @param ... Further arguments passed to #' \code{\link{update.brmsfit}} and \code{\link{log_lik.brmsfit}}. #' #' @return An object of the class \code{loo}. #' #' @details #' Warnings about Pareto \eqn{k} estimates indicate observations #' for which the approximation to LOO is problematic (this is described in #' detail in Vehtari, Gelman, and Gabry (2017) and the #' \pkg{\link[loo:loo-package]{loo}} package documentation). #' If there are \eqn{J} observations with \eqn{k} estimates above #' \code{k_threshold}, then \code{reloo} will refit the original model #' \eqn{J} times, each time leaving out one of the \eqn{J} #' problematic observations. The pointwise contributions of these observations #' to the total ELPD are then computed directly and substituted for the #' previous estimates from these \eqn{J} observations that are stored in the #' original \code{loo} object. #' #' @seealso \code{\link{loo}}, \code{\link{kfold}} #' #' @examples #' \dontrun{ #' fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson()) #' # throws warning about some pareto k estimates being too high #' (loo1 <- loo(fit1)) #' (reloo1 <- reloo(fit1, loo = loo1, chains = 1)) #' } #' #' @export reloo.brmsfit <- function(x, loo, k_threshold = 0.7, newdata = NULL, resp = NULL, check = TRUE, recompile = NULL, future_args = list(), ...) { stopifnot(is.loo(loo), is.brmsfit(x), is.list(future_args)) if (is.brmsfit_multiple(x)) { warn_brmsfit_multiple(x) class(x) <- "brmsfit" } if (is.null(newdata)) { mf <- model.frame(x) } else { mf <- as.data.frame(newdata) } mf <- rm_attr(mf, c("terms", "brmsframe")) if (NROW(mf) != NROW(loo$pointwise)) { stop2("Number of observations in 'loo' and 'x' do not match.") } check <- as_one_logical(check) if (check) { yhash_loo <- attr(loo, "yhash") yhash_fit <- hash_response(x, newdata = newdata) if (!is_equal(yhash_loo, yhash_fit)) { stop2( "Response values used in 'loo' and 'x' do not match. ", "If this is a false positive, please set 'check' to FALSE." ) } } if (is.null(loo$diagnostics$pareto_k)) { stop2("No Pareto k estimates found in the 'loo' object.") } obs <- loo::pareto_k_ids(loo, k_threshold) J <- length(obs) if (J == 0L) { message( "No problematic observations found. ", "Returning the original 'loo' object." ) return(loo) } # split dots for use in log_lik and update dots <- list(...) ll_arg_names <- arg_names("log_lik") ll_arg_names <- intersect(names(dots), ll_arg_names) ll_args <- dots[ll_arg_names] ll_args$allow_new_levels <- TRUE ll_args$resp <- resp ll_args$combine <- TRUE # cores is used in both log_lik and update up_arg_names <- setdiff(names(dots), setdiff(ll_arg_names, "cores")) up_args <- dots[up_arg_names] up_args$refresh <- 0 .reloo <- function(j) { omitted <- obs[j] mf_omitted <- mf[-omitted, , drop = FALSE] fit_j <- x up_args$object <- fit_j up_args$newdata <- mf_omitted up_args$data2 <- subset_data2(x$data2, -omitted) fit_j <- SW(do_call(update, up_args)) ll_args$object <- fit_j ll_args$newdata <- mf[omitted, , drop = FALSE] ll_args$newdata2 <- subset_data2(x$data2, omitted) return(do_call(log_lik, ll_args)) } lls <- futures <- vector("list", J) message( J, " problematic observation(s) found.", "\nThe model will be refit ", J, " times." ) x <- recompile_model(x, recompile = recompile) future_args$FUN <- .reloo future_args$seed <- TRUE for (j in seq_len(J)) { message( "\nFitting model ", j, " out of ", J, " (leaving out observation ", obs[j], ")" ) future_args$args <- list(j) futures[[j]] <- do_call("futureCall", future_args, pkg = "future") } for (j in seq_len(J)) { lls[[j]] <- future::value(futures[[j]]) } # most of the following code is taken from rstanarm:::reloo # compute elpd_{loo,j} for each of the held out observations elpd_loo <- ulapply(lls, log_mean_exp) # compute \hat{lpd}_j for each of the held out observations (using log-lik # matrix from full posterior, not the leave-one-out posteriors) mf_obs <- mf[obs, , drop = FALSE] data2_obs <- subset_data2(x$data2, obs) ll_x <- log_lik(x, newdata = mf_obs, newdata2 = data2_obs) hat_lpd <- apply(ll_x, 2, log_mean_exp) # compute effective number of parameters p_loo <- hat_lpd - elpd_loo # replace parts of the loo object with these computed quantities sel <- c("elpd_loo", "p_loo", "looic") loo$pointwise[obs, sel] <- cbind(elpd_loo, p_loo, -2 * elpd_loo) new_pw <- loo$pointwise[, sel, drop = FALSE] loo$estimates[, 1] <- colSums(new_pw) loo$estimates[, 2] <- sqrt(nrow(loo$pointwise) * apply(new_pw, 2, var)) # what should we do about pareto-k? for now setting them to 0 loo$diagnostics$pareto_k[obs] <- 0 loo } #' @rdname reloo.brmsfit #' @export reloo.loo <- function(x, fit, ...) { reloo(fit, loo = x, ...) } # the generic will eventually be moved to 'loo' #' @rdname reloo.brmsfit #' @export reloo <- function(x, ...) { UseMethod("reloo") } brms/R/stan-likelihood.R0000644000176200001440000011621214477011663014643 0ustar liggesusers# unless otherwise specified, functions return a single character # string defining the likelihood of the model in Stan language stan_log_lik <- function(x, ...) { UseMethod("stan_log_lik") } # Stan code for the model likelihood # @param bterms object of class brmsterms # @param data data passed by the user # @param mix optional mixture component ID # @param ptheta are mixing proportions predicted? #' @export stan_log_lik.family <- function(x, bterms, data, threads, normalize, mix = "", ptheta = FALSE, ...) { stopifnot(is.brmsterms(bterms)) stopifnot(length(mix) == 1L) bterms$family <- x resp <- usc(combine_prefix(bterms)) # prepare family part of the likelihood log_lik_args <- nlist(bterms, resp, mix, threads) log_lik_fun <- paste0("stan_log_lik_", prepare_family(bterms)$fun) ll <- do_call(log_lik_fun, log_lik_args) # incorporate other parts into the likelihood args <- nlist(ll, bterms, data, resp, threads, normalize, mix, ptheta) if (nzchar(mix)) { out <- do_call(stan_log_lik_mix, args) } else if (is.formula(bterms$adforms$cens)) { out <- do_call(stan_log_lik_cens, args) } else if (is.formula(bterms$adforms$weights)) { out <- do_call(stan_log_lik_weights, args) } else { out <- do_call(stan_log_lik_general, args) } if (grepl(stan_nn_regex(), out) && !nzchar(mix)) { # loop over likelihood if it cannot be vectorized out <- paste0( " for (n in 1:N", resp, ") {\n", stan_nn_def(threads), " ", out, " }\n" ) } out } #' @export stan_log_lik.mixfamily <- function(x, bterms, threads, ...) { dp_ids <- dpar_id(names(bterms$dpars)) fdp_ids <- dpar_id(names(bterms$fdpars)) resp <- usc(bterms$resp) ptheta <- any(dpar_class(names(bterms$dpars)) %in% "theta") ll <- rep(NA, length(x$mix)) for (i in seq_along(x$mix)) { sbterms <- bterms sbterms$dpars <- sbterms$dpars[dp_ids == i] sbterms$fdpars <- sbterms$fdpars[fdp_ids == i] ll[i] <- stan_log_lik( x$mix[[i]], sbterms, mix = i, ptheta = ptheta, threads = threads, ... ) } resp <- usc(combine_prefix(bterms)) n <- stan_nn(threads) has_weights <- is.formula(bterms$adforms$weights) weights <- str_if(has_weights, glue("weights{resp}{n} * ")) out <- glue( " // likelihood of the mixture model\n", " for (n in 1:N{resp}) {{\n", stan_nn_def(threads), " array[{length(ll)}] real ps;\n" ) str_add(out) <- collapse(" ", ll) str_add(out) <- glue( " {tp()}{weights}log_sum_exp(ps);\n", " }}\n" ) out } #' @export stan_log_lik.brmsterms <- function(x, ...) { stan_log_lik(x$family, bterms = x, ...) } #' @export stan_log_lik.mvbrmsterms <- function(x, ...) { if (x$rescor) { out <- stan_log_lik(as.brmsterms(x), ...) } else { out <- ulapply(x$terms, stan_log_lik, ...) } out } # default likelihood in Stan language stan_log_lik_general <- function(ll, bterms, data, threads, normalize, resp = "", ...) { stopifnot(is.sdist(ll)) require_n <- grepl(stan_nn_regex(), ll$args) n <- str_if(require_n, stan_nn(threads), stan_slice(threads)) lpdf <- stan_log_lik_lpdf_name(bterms, normalize, dist = ll$dist) Y <- stan_log_lik_Y_name(bterms) tr <- stan_log_lik_trunc(ll, bterms, data, resp = resp, threads = threads) glue("{tp()}{ll$dist}_{lpdf}({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n") } # censored likelihood in Stan language stan_log_lik_cens <- function(ll, bterms, data, threads, normalize, resp = "", ...) { stopifnot(is.sdist(ll)) s <- wsp(nsp = 4) cens <- eval_rhs(bterms$adforms$cens) lpdf <- stan_log_lik_lpdf_name(bterms, normalize, dist = ll$dist) has_weights <- is.formula(bterms$adforms$weights) Y <- stan_log_lik_Y_name(bterms) n <- stan_nn(threads) w <- str_if(has_weights, glue("weights{resp}{n} * ")) tr <- stan_log_lik_trunc(ll, bterms, data, resp = resp, threads = threads) tp <- tp() out <- glue( "// special treatment of censored data\n", s, "if (cens{resp}{n} == 0) {{\n", s, "{tp}{w}{ll$dist}_{lpdf}({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n", s, "}} else if (cens{resp}{n} == 1) {{\n", s, "{tp}{w}{ll$dist}_lccdf({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n", s, "}} else if (cens{resp}{n} == -1) {{\n", s, "{tp}{w}{ll$dist}_lcdf({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n" ) if (cens$vars$y2 != "NA") { # interval censoring is required str_add(out) <- glue( s, "}} else if (cens{resp}{n} == 2) {{\n", s, "{tp}{w}log_diff_exp(\n", s, " {ll$dist}_lcdf(rcens{resp}{n}{ll$shift} | {ll$args}),\n", s, " {ll$dist}_lcdf({Y}{resp}{n}{ll$shift} | {ll$args})\n", s, " ){tr};\n" ) } str_add(out) <- glue(s, "}}\n") out } # weighted likelihood in Stan language stan_log_lik_weights <- function(ll, bterms, data, threads, normalize, resp = "", ...) { stopifnot(is.sdist(ll)) tr <- stan_log_lik_trunc(ll, bterms, data, resp = resp, threads = threads) lpdf <- stan_log_lik_lpdf_name(bterms, normalize, dist = ll$dist) Y <- stan_log_lik_Y_name(bterms) n <- stan_nn(threads) glue( "{tp()}weights{resp}{n} * ({ll$dist}_{lpdf}", "({Y}{resp}{n}{ll$shift} | {ll$args}){tr});\n" ) } # likelihood of a single mixture component stan_log_lik_mix <- function(ll, bterms, data, mix, ptheta, threads, normalize, resp = "", ...) { stopifnot(is.sdist(ll)) theta <- str_if(ptheta, glue("theta{mix}{resp}[n]"), glue("log(theta{mix}{resp})") ) tr <- stan_log_lik_trunc(ll, bterms, data, resp = resp, threads = threads) lpdf <- stan_log_lik_lpdf_name(bterms, normalize, dist = ll$dist) Y <- stan_log_lik_Y_name(bterms) n <- stan_nn(threads) if (is.formula(bterms$adforms$cens)) { # mostly copied over from stan_log_lik_cens cens <- eval_rhs(bterms$adforms$cens) s <- wsp(nsp = 4) out <- glue( "// special treatment of censored data\n", s, "if (cens{resp}{n} == 0) {{\n", s, " ps[{mix}] = {theta} + ", "{ll$dist}_{lpdf}({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n", s, "}} else if (cens{resp}{n} == 1) {{\n", s, " ps[{mix}] = {theta} + ", "{ll$dist}_lccdf({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n", s, "}} else if (cens{resp}{n} == -1) {{\n", s, " ps[{mix}] = {theta} + ", "{ll$dist}_lcdf({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n" ) if (cens$vars$y2 != "NA") { # interval censoring is required str_add(out) <- glue( s, "}} else if (cens{resp}{n} == 2) {{\n", s, " ps[{mix}] = {theta} + log_diff_exp(\n", s, " {ll$dist}_lcdf(rcens{resp}{n}{ll$shift} | {ll$args}),\n", s, " {ll$dist}_lcdf({Y}{resp}{n}{ll$shift} | {ll$args})\n", s, " ){tr};\n" ) } str_add(out) <- glue(s, "}}\n") } else { out <- glue( "ps[{mix}] = {theta} + ", "{ll$dist}_{lpdf}({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n" ) } out } # truncated part of the likelihood # @param short use the T[, ] syntax? stan_log_lik_trunc <- function(ll, bterms, data, threads, resp = "", short = FALSE) { stopifnot(is.sdist(ll)) bounds <- trunc_bounds(bterms, data = data) if (!any(bounds$lb > -Inf | bounds$ub < Inf)) { return("") } n <- stan_nn(threads) m1 <- str_if(use_int(bterms), " - 1") lb <- str_if(any(bounds$lb > -Inf), glue("lb{resp}{n}{m1}")) ub <- str_if(any(bounds$ub < Inf), glue("ub{resp}{n}")) if (short) { # truncation using T[, ] syntax out <- glue(" T[{lb}, {ub}]") } else { # truncation making use of _lcdf functions ms <- paste0(" -\n", wsp(nsp = 6)) if (any(bounds$lb > -Inf) && !any(bounds$ub < Inf)) { out <- glue("{ms}{ll$dist}_lccdf({lb}{ll$shift} | {ll$args})") } else if (!any(bounds$lb > -Inf) && any(bounds$ub < Inf)) { out <- glue("{ms}{ll$dist}_lcdf({ub}{ll$shift} | {ll$args})") } else if (any(bounds$lb > -Inf) && any(bounds$ub < Inf)) { trr <- glue("{ll$dist}_lcdf({ub}{ll$shift} | {ll$args})") trl <- glue("{ll$dist}_lcdf({lb}{ll$shift} | {ll$args})") out <- glue("{ms}log_diff_exp({trr}, {trl})") } } out } stan_log_lik_lpdf_name <- function(bterms, normalize, dist = NULL) { if (!is.null(dist) && !normalize) { # some Stan lpdfs or lpmfs only exist as normalized versions always_normalized <- always_normalized(bterms) if (length(always_normalized)) { always_normalized <- paste0(escape_all(always_normalized), "$") normalize <- any(ulapply(always_normalized, grepl, x = dist)) } } if (normalize) { out <- ifelse(use_int(bterms$family), "lpmf", "lpdf") } else { out <- ifelse(use_int(bterms$family), "lupmf", "lupdf") } out } stan_log_lik_Y_name <- function(bterms) { ifelse(is.formula(bterms$adforms$mi), "Yl", "Y") } # prepare names of distributional parameters # @param reqn will the likelihood be wrapped in a loop over n? # @param dpars optional names of distributional parameters to be prepared # if not specified will prepare all distributional parameters stan_log_lik_dpars <- function(bterms, reqn, resp = "", mix = "", dpars = NULL, type = NULL) { if (is.null(dpars)) { dpars <- paste0(valid_dpars(bterms, type = type), mix) } pred_dpars <- names(bterms$dpars) if (is_equal(type, "multi")) { pred_dpars <- unique(dpar_class(pred_dpars, bterms)) } is_pred <- dpars %in% pred_dpars out <- paste0(dpars, resp, ifelse(reqn & is_pred, "[n]", "")) named_list(dpars, out) } # adjust lpdf name if a more efficient version is available # for a specific link. For instance 'poisson_log' stan_log_lik_simple_lpdf <- function(lpdf, link, bterms, sep = "_") { stopifnot(is.brmsterms(bterms)) cens_or_trunc <- stan_log_lik_adj(bterms, c("cens", "trunc")) if (bterms$family$link == link && !cens_or_trunc) { lpdf <- paste0(lpdf, sep, link) } lpdf } # prepare _logit suffix for distributional parameters # used in zero-inflated and hurdle models stan_log_lik_dpar_usc_logit <- function(dpar, bterms) { stopifnot(dpar %in% c("zi", "hu")) stopifnot(is.brmsterms(bterms)) cens_or_trunc <- stan_log_lik_adj(bterms, c("cens", "trunc")) usc_logit <- isTRUE(bterms$dpars[[dpar]]$family$link == "logit") str_if(usc_logit && !cens_or_trunc, "_logit") } # add 'se' to 'sigma' within the Stan likelihood stan_log_lik_add_se <- function(sigma, bterms, reqn, resp = "", threads = NULL) { if (!is.formula(bterms$adforms$se)) { return(sigma) } nse <- str_if(reqn, stan_nn(threads), stan_slice(threads)) if (no_sigma(bterms)) { sigma <- glue("se{resp}{nse}") } else { sigma <- glue("sqrt(square({sigma}) + se2{resp}{nse})") } sigma } # multiply 'dpar' by the 'rate' denominator within the Stan likelihood # @param log add the rate denominator on the log scale if sensible? stan_log_lik_multiply_rate_denom <- function(dpar, bterms, reqn, resp = "", log = FALSE, transform = NULL) { dpar_transform <- dpar if (!is.null(transform)) { dpar_transform <- glue("{transform}({dpar})") } if (!is.formula(bterms$adforms$rate)) { return(dpar_transform) } ndenom <- str_if(reqn, "[n]") denom <- glue("denom{resp}{ndenom}") cens_or_trunc <- stan_log_lik_adj(bterms, c("cens", "trunc")) if (log && bterms$family$link == "log" && !cens_or_trunc) { denom <- glue("log_{denom}") operator <- "+" } else { # dpar without resp name or index dpar_clean <- sub("(_|\\[).*", "", dpar) is_pred <- dpar_clean %in% c("mu", names(bterms$dpars)) operator <- str_if(reqn || !is_pred, "*", ".*") } glue("{dpar_transform} {operator} {denom}") } # check if the log-liklihood needs to be adjused # @param x named list of formulas or brmsterms object # @param adds vector of addition argument names # @return a single logical value stan_log_lik_adj <- function(x, adds = c("weights", "cens", "trunc")) { stopifnot(all(adds %in% c("weights", "cens", "trunc"))) if (is.brmsterms(x)) x <- x$adforms any(ulapply(x[adds], is.formula)) } # one function per family stan_log_lik_gaussian <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) p$sigma <- paste0("sigma", resp) out <- sdist("normal_id_glm", p$x, p$alpha, p$beta, p$sigma) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, reqn, resp, threads) out <- sdist("normal", p$mu, p$sigma) } out } stan_log_lik_gaussian_mv <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || bterms$sigma_pred p <- list(Mu = paste0("Mu", if (reqn) "[n]")) p$LSigma <- paste0("LSigma", if (bterms$sigma_pred) "[n]") sdist("multi_normal_cholesky", p$Mu, p$LSigma) } stan_log_lik_gaussian_time <- function(bterms, resp = "", mix = "", ...) { if (stan_log_lik_adj(bterms)) { stop2("Invalid addition arguments for this model.") } has_se <- is.formula(bterms$adforms$se) flex <- has_ac_class(tidy_acef(bterms), "unstr") p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) v <- c("Lcortime", "nobs_tg", "begin_tg", "end_tg") if (has_se) { c(v) <- "se2" } if (flex) { c(v) <- "Jtime_tg" } p[v] <- as.list(paste0(v, resp)) sfx <- str_if("sigma" %in% names(bterms$dpars), "het", "hom") sfx <- str_if(has_se, paste0(sfx, "_se"), sfx) sfx <- str_if(flex, paste0(sfx, "_flex"), sfx) sdist(glue("normal_time_{sfx}"), p$mu, p$sigma, p$se2, p$Lcortime, p$nobs_tg, p$begin_tg, p$end_tg, p$Jtime_tg ) } stan_log_lik_gaussian_fcor <- function(bterms, resp = "", mix = "", ...) { has_se <- is.formula(bterms$adforms$se) if (stan_log_lik_adj(bterms) || has_se) { stop2("Invalid addition arguments for this model.") } p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$Lfcor <- paste0("Lfcor", resp) sfx <- str_if("sigma" %in% names(bterms$dpars), "het", "hom") sdist(glue("normal_fcor_{sfx}"), p$mu, p$sigma, p$Lfcor) } stan_log_lik_gaussian_lagsar <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, FALSE, resp, threads) v <- c("lagsar", "Msar", "eigenMsar") p[v] <- as.list(paste0(v, resp)) sdist("normal_lagsar", p$mu, p$sigma, p$lagsar, p$Msar, p$eigenMsar) } stan_log_lik_gaussian_errorsar <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, FALSE, resp, threads) v <- c("errorsar", "Msar", "eigenMsar") p[v] <- as.list(paste0(v, resp)) sdist("normal_errorsar", p$mu, p$sigma, p$errorsar, p$Msar, p$eigenMsar) } stan_log_lik_student <- function(bterms, resp = "", mix = "", threads = NULL, ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, reqn, resp, threads) sdist("student_t", p$nu, p$mu, p$sigma) } stan_log_lik_student_mv <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || bterms$sigma_pred p <- stan_log_lik_dpars(bterms, reqn, resp, mix, dpars = "nu") p$Mu <- paste0("Mu", if (reqn) "[n]") p$Sigma <- paste0("Sigma", if (bterms$sigma_pred) "[n]") sdist("multi_student_t", p$nu, p$Mu, p$Sigma) } stan_log_lik_student_time <- function(bterms, resp = "", mix = "", ...) { if (stan_log_lik_adj(bterms)) { stop2("Invalid addition arguments for this model.") } has_se <- is.formula(bterms$adforms$se) flex <- has_ac_class(tidy_acef(bterms), "unstr") p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) v <- c("Lcortime", "nobs_tg", "begin_tg", "end_tg") if (has_se) { c(v) <- "se2" } if (flex) { c(v) <- "Jtime_tg" } p[v] <- as.list(paste0(v, resp)) sfx <- str_if("sigma" %in% names(bterms$dpars), "het", "hom") sfx <- str_if(has_se, paste0(sfx, "_se"), sfx) sfx <- str_if(flex, paste0(sfx, "_flex"), sfx) sdist(glue("student_t_time_{sfx}"), p$nu, p$mu, p$sigma, p$se2, p$Lcortime, p$nobs_tg, p$begin_tg, p$end_tg, p$Jtime_tg ) } stan_log_lik_student_fcor <- function(bterms, resp = "", mix = "", ...) { has_se <- is.formula(bterms$adforms$se) if (stan_log_lik_adj(bterms) || has_se) { stop2("Invalid addition arguments for this model.") } p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$Lfcor <- paste0("Lfcor", resp) sfx <- str_if("sigma" %in% names(bterms$dpars), "het", "hom") sdist(glue("student_t_fcor_{sfx}"), p$nu, p$mu, p$sigma, p$Lfcor) } stan_log_lik_student_lagsar <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, FALSE, resp, threads) v <- c("lagsar", "Msar", "eigenMsar") p[v] <- as.list(paste0(v, resp)) sdist("student_t_lagsar", p$nu, p$mu, p$sigma, p$lagsar, p$Msar, p$eigenMsar) } stan_log_lik_student_errorsar <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, FALSE, resp, threads) v <- c("errorsar", "Msar", "eigenMsar") p[v] <- as.list(paste0(v, resp)) sdist("student_t_errorsar", p$nu, p$mu, p$sigma, p$errorsar, p$Msar, p$eigenMsar) } stan_log_lik_lognormal <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("lognormal", p$mu, p$sigma) } stan_log_lik_shifted_lognormal <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("lognormal", p$mu, p$sigma, shift = paste0(" - ", p$ndt)) } stan_log_lik_asym_laplace <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) sdist("asym_laplace", p$mu, p$sigma, p$quantile) } stan_log_lik_skew_normal <- function(bterms, resp = "", mix = "", threads = NULL, ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, reqn, resp, threads) # required because of CP parameterization of mu and sigma nomega <- any(grepl(stan_nn_regex(), c(p$sigma, p$alpha))) nomega <- str_if(reqn && nomega, "[n]") p$omega <- paste0("omega", mix, resp, nomega) sdist("skew_normal", p$mu, p$omega, p$alpha) } stan_log_lik_poisson <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) out <- sdist("poisson_log_glm", p$x, p$alpha, p$beta) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) lpdf <- stan_log_lik_simple_lpdf("poisson", "log", bterms) p$mu <- stan_log_lik_multiply_rate_denom(p$mu, bterms, reqn, resp, log = TRUE) out <- sdist(lpdf, p$mu) } out } stan_log_lik_negbinomial <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) p$shape <- paste0("shape", resp) out <- sdist("neg_binomial_2_log_glm", p$x, p$alpha, p$beta, p$shape) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$mu <- stan_log_lik_multiply_rate_denom(p$mu, bterms, reqn, resp, log = TRUE) p$shape <- stan_log_lik_multiply_rate_denom(p$shape, bterms, reqn, resp) lpdf <- stan_log_lik_simple_lpdf("neg_binomial_2", "log", bterms) out <- sdist(lpdf, p$mu, p$shape) } out } stan_log_lik_negbinomial2 <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) p$sigma <- paste0("sigma", resp) p$shape <- paste0("inv(", p$sigma, ")") out <- sdist("neg_binomial_2_log_glm", p$x, p$alpha, p$beta, p$shape) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$mu <- stan_log_lik_multiply_rate_denom(p$mu, bterms, reqn, resp, log = TRUE) p$shape <- stan_log_lik_multiply_rate_denom( p$sigma, bterms, reqn, resp, transform = "inv" ) lpdf <- stan_log_lik_simple_lpdf("neg_binomial_2", "log", bterms) out <- sdist(lpdf, p$mu, p$shape) } out } stan_log_lik_geometric <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) p$shape <- "1" out <- sdist("neg_binomial_2_log_glm", p$x, p$alpha, p$beta, p$shape) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$shape <- "1" p$mu <- stan_log_lik_multiply_rate_denom(p$mu, bterms, reqn, resp, log = TRUE) p$shape <- stan_log_lik_multiply_rate_denom(p$shape, bterms, reqn, resp) lpdf <- stan_log_lik_simple_lpdf("neg_binomial_2", "log", bterms) out <- sdist(lpdf, p$mu, p$shape) } } stan_log_lik_binomial <- function(bterms, resp = "", mix = "", threads = NULL, ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) slice <- str_if(reqn, stan_nn(threads), stan_slice(threads)) p$trials <- paste0("trials", resp, slice) lpdf <- stan_log_lik_simple_lpdf("binomial", "logit", bterms) sdist(lpdf, p$trials, p$mu) } stan_log_lik_beta_binomial <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) n <- stan_nn(threads) sdist( "beta_binomial", paste0("trials", resp, n), paste0(p$mu, " * ", p$phi), paste0("(1 - ", p$mu, ") * ", p$phi) ) } stan_log_lik_bernoulli <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) out <- sdist("bernoulli_logit_glm", p$x, p$alpha, p$beta) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) lpdf <- stan_log_lik_simple_lpdf("bernoulli", "logit", bterms) out <- sdist(lpdf, p$mu) } out } stan_log_lik_discrete_weibull <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) sdist("discrete_weibull", p$mu, p$shape) } stan_log_lik_com_poisson <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("com_poisson", "log", bterms) sdist(lpdf, p$mu, p$shape) } stan_log_lik_gamma <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || paste0("shape", mix) %in% names(bterms$dpars) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) # Stan uses shape-rate parameterization with rate = shape / mean div_op <- str_if(reqn, " / ", " ./ ") sdist("gamma", p$shape, paste0(p$shape, div_op, p$mu)) } stan_log_lik_exponential <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) # Stan uses rate parameterization with rate = 1 / mean sdist("exponential", paste0("inv(", p$mu, ")")) } stan_log_lik_weibull <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) # Stan uses shape-scale parameterization for weibull need_dot_div <- !reqn && paste0("shape", mix) %in% names(bterms$dpars) div_op <- str_if(need_dot_div, " ./ ", " / ") p$scale <- paste0(p$mu, div_op, "tgamma(1 + 1", div_op, p$shape, ")") sdist("weibull", p$shape, p$scale) } stan_log_lik_frechet <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) # Stan uses shape-scale parameterization for frechet need_dot_div <- !reqn && paste0("nu", mix) %in% names(bterms$dpars) div_op <- str_if(need_dot_div, " ./ ", " / ") p$scale <- paste0(p$mu, div_op, "tgamma(1 - 1", div_op, p$nu, ")") sdist("frechet", p$nu, p$scale) } stan_log_lik_gen_extreme_value <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) sdist("gen_extreme_value", p$mu, p$sigma, p$xi) } stan_log_lik_exgaussian <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist( "exp_mod_normal", paste0(p$mu, " - ", p$beta), p$sigma, paste0("inv(", p$beta, ")") ) } stan_log_lik_inverse.gaussian <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || glue("shape{mix}") %in% names(bterms$dpars) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) lpdf <- paste0("inv_gaussian", if (!reqn) "_vector") n <- str_if(reqn, "[n]") sdist(lpdf, p$mu, p$shape) } stan_log_lik_wiener <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) n <- stan_nn(threads) p$dec <- paste0("dec", resp, n) sdist("wiener_diffusion", p$dec, p$bs, p$ndt, p$bias, p$mu) } stan_log_lik_beta <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || paste0("phi", mix) %in% names(bterms$dpars) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("beta", paste0(p$mu, " * ", p$phi), paste0("(1 - ", p$mu, ") * ", p$phi) ) } stan_log_lik_von_mises <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || "kappa" %in% names(bterms$dpars) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) lpdf <- paste0("von_mises_", str_if(reqn, "real", "vector")) sdist(lpdf, p$mu, p$kappa) } stan_log_lik_cox <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) p$bhaz <- paste0("bhaz", resp, "[n]") p$cbhaz <- paste0("cbhaz", resp, "[n]") lpdf <- "cox" if (bterms$family$link == "log") { str_add(lpdf) <- "_log" } sdist(lpdf, p$mu, p$bhaz, p$cbhaz) } stan_log_lik_cumulative <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms, allow_special_terms = FALSE)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) out <- sdist("ordered_logistic_glm", p$x, p$beta, p$alpha) return(out) } stan_log_lik_ordinal(bterms, resp, mix, threads, ...) } stan_log_lik_sratio <- function(bterms, resp = "", mix = "", threads = NULL, ...) { stan_log_lik_ordinal(bterms, resp, mix, threads, ...) } stan_log_lik_cratio <- function(bterms, resp = "", mix = "", threads = NULL, ...) { stan_log_lik_ordinal(bterms, resp, mix, threads, ...) } stan_log_lik_acat <- function(bterms, resp = "", mix = "", threads = NULL, ...) { stan_log_lik_ordinal(bterms, resp, mix, threads, ...) } stan_log_lik_categorical <- function(bterms, resp = "", mix = "", ...) { stopifnot(bterms$family$link == "logit") stopifnot(!isTRUE(nzchar(mix))) # mixture models are not allowed p <- stan_log_lik_dpars(bterms, TRUE, resp, mix, dpars = "mu", type = "multi") sdist("categorical_logit", p$mu) } stan_log_lik_multinomial <- function(bterms, resp = "", mix = "", ...) { stopifnot(bterms$family$link == "logit") stopifnot(!isTRUE(nzchar(mix))) # mixture models are not allowed p <- stan_log_lik_dpars(bterms, TRUE, resp, mix, dpars = "mu", type = "multi") sdist("multinomial_logit2", p$mu) } stan_log_lik_dirichlet <- function(bterms, resp = "", mix = "", ...) { stopifnot(bterms$family$link == "logit") stopifnot(!isTRUE(nzchar(mix))) # mixture models are not allowed mu <- stan_log_lik_dpars(bterms, TRUE, resp, mix, dpars = "mu", type = "multi")$mu reqn <- glue("phi{mix}") %in% names(bterms$dpars) phi <- stan_log_lik_dpars(bterms, reqn, resp, mix, dpars = "phi")$phi sdist("dirichlet_logit", mu, phi) } stan_log_lik_dirichlet2 <- function(bterms, resp = "", mix = "", ...) { stopifnot(!isTRUE(nzchar(mix))) # mixture models are not allowed mu <- stan_log_lik_dpars(bterms, TRUE, resp, mix, dpars = "mu", type = "multi")$mu sdist("dirichlet", mu) } stan_log_lik_logistic_normal <- function(bterms, resp = "", mix = "", ...) { stopifnot(bterms$family$link == "identity") stopifnot(!isTRUE(nzchar(mix))) # mixture models are not allowed p <- stan_log_lik_dpars(bterms, TRUE, resp, mix, type = "multi") p$Llncor <- glue("Llncor{mix}{resp}") p$refcat <- get_refcat(bterms$family, int = TRUE) sdist("logistic_normal_cholesky_cor", p$mu, p$sigma, p$Llncor, p$refcat) } stan_log_lik_ordinal <- function(bterms, resp = "", mix = "", threads = NULL, ...) { prefix <- paste0(str_if(nzchar(mix), paste0("_mu", mix)), resp) p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) if (use_ordered_logistic(bterms)) { # TODO: support 'ordered_probit' as well lpdf <- "ordered_logistic" p[grepl("^disc", names(p))] <- NULL } else { lpdf <- paste0(bterms$family$family, "_", bterms$family$link) } if (has_thres_groups(bterms)) { str_add(lpdf) <- "_merged" n <- stan_nn(threads) p$Jthres <- paste0("Jthres", resp, n) p$thres <- "merged_Intercept" } else { p$thres <- "Intercept" } str_add(p$thres) <- prefix if (has_sum_to_zero_thres(bterms)) { str_add(p$thres) <- "_stz" } if (has_cs(bterms)) { if (has_thres_groups(bterms)) { stop2("Cannot use category specific effects ", "in models with multiple thresholds.") } str_add(p$thres) <- paste0(" - transpose(mucs", prefix, "[n])") } sdist(lpdf, p$mu, p$disc, p$thres, p$Jthres) } stan_log_lik_hurdle_poisson <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("hurdle_poisson", "log", bterms) lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("hu", bterms)) sdist(lpdf, p$mu, p$hu) } stan_log_lik_hurdle_negbinomial <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("hurdle_neg_binomial", "log", bterms) lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("hu", bterms)) sdist(lpdf, p$mu, p$shape, p$hu) } stan_log_lik_hurdle_gamma <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) usc_logit <- stan_log_lik_dpar_usc_logit("hu", bterms) lpdf <- paste0("hurdle_gamma", usc_logit) # Stan uses shape-rate parameterization for gamma with rate = shape / mean sdist(lpdf, p$shape, paste0(p$shape, " / ", p$mu), p$hu) } stan_log_lik_hurdle_lognormal <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) usc_logit <- stan_log_lik_dpar_usc_logit("hu", bterms) lpdf <- paste0("hurdle_lognormal", usc_logit) sdist(lpdf, p$mu, p$sigma, p$hu) } stan_log_lik_hurdle_cumulative <- function(bterms, resp = "", mix = "", threads = NULL, ...) { prefix <- paste0(str_if(nzchar(mix), paste0("_mu", mix)), resp) p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) if (use_ordered_logistic(bterms)) { # TODO: support 'ordered_probit' as well lpdf <- "hurdle_cumulative_ordered_logistic" } else { lpdf <- paste0(bterms$family$family, "_", bterms$family$link) } if (has_thres_groups(bterms)) { str_add(lpdf) <- "_merged" n <- stan_nn(threads) p$Jthres <- paste0("Jthres", resp, n) p$thres <- "merged_Intercept" } else { p$thres <- "Intercept" } str_add(p$thres) <- prefix if (has_sum_to_zero_thres(bterms)) { str_add(p$thres) <- "_stz" } if (has_cs(bterms)) { if (has_thres_groups(bterms)) { stop2("Cannot use category specific effects ", "in models with multiple thresholds.") } str_add(p$thres) <- paste0(" - transpose(mucs", prefix, "[n])") } sdist(lpdf, p$mu, p$hu, p$disc, p$thres, p$Jthres) } stan_log_lik_zero_inflated_poisson <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("zero_inflated_poisson", "log", bterms) lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("zi", bterms)) sdist(lpdf, p$mu, p$zi) } stan_log_lik_zero_inflated_negbinomial <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("zero_inflated_neg_binomial", "log", bterms) lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("zi", bterms)) sdist(lpdf, p$mu, p$shape, p$zi) } stan_log_lik_zero_inflated_binomial <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) n <- stan_nn(threads) p$trials <- paste0("trials", resp, n) lpdf <- "zero_inflated_binomial" lpdf <- stan_log_lik_simple_lpdf(lpdf, "logit", bterms, sep = "_b") lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("zi", bterms)) sdist(lpdf, p$trials, p$mu, p$zi) } stan_log_lik_zero_inflated_beta_binomial <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) n <- stan_nn(threads) p$trials <- paste0("trials", resp, n) lpdf <- "zero_inflated_beta_binomial" lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("zi", bterms)) sdist(lpdf, p$trials, p$mu, p$phi, p$zi) } stan_log_lik_zero_inflated_beta <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) usc_logit <- stan_log_lik_dpar_usc_logit("zi", bterms) lpdf <- paste0("zero_inflated_beta", usc_logit) sdist(lpdf, p$mu, p$phi, p$zi) } stan_log_lik_zero_one_inflated_beta <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) sdist("zero_one_inflated_beta", p$mu, p$phi, p$zoi, p$coi) } stan_log_lik_zero_inflated_asym_laplace <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) usc_logit <- stan_log_lik_dpar_usc_logit("zi", bterms) lpdf <- paste0("zero_inflated_asym_laplace", usc_logit) sdist(lpdf, p$mu, p$sigma, p$quantile, p$zi) } stan_log_lik_custom <- function(bterms, resp = "", mix = "", threads = NULL, ...) { family <- bterms$family no_loop <- isFALSE(family$loop) if (no_loop && (stan_log_lik_adj(bterms) || nzchar(mix))) { stop2("This model requires evaluating the custom ", "likelihood as a loop over observations.") } reqn <- !no_loop p <- stan_log_lik_dpars(bterms, reqn, resp, mix) dpars <- paste0(family$dpars, mix) if (is_ordinal(family)) { prefix <- paste0(resp, if (nzchar(mix)) paste0("_mu", mix)) p$thres <- paste0("Intercept", prefix) } # insert the response name into the 'vars' strings # addition terms contain the response in their variable name n <- stan_nn(threads) var_names <- sub("\\[.+$", "", family$vars) var_indices <- get_matches("\\[.+$", family$vars, first = TRUE) has_n_index <- var_indices %in% "[n]" if (no_loop && any(has_n_index)) { stop2("Invalid use of index '[n]' in an unlooped custom likelihood.") } var_indices <- ifelse(has_n_index, n, var_indices) is_var_adterms <- var_names %in% c("se", "trials", "dec") | grepl("^((vint)|(vreal))[[:digit:]]+$", var_names) var_resps <- ifelse(is_var_adterms, resp, "") vars <- paste0(var_names, var_resps, var_indices) sdist(family$name, p[dpars], p$thres, vars) } # use Stan GLM primitive functions? # @param bterms a brmsterms object # @return TRUE or FALSE use_glm_primitive <- function(bterms, allow_special_terms = TRUE) { stopifnot(is.brmsterms(bterms)) # the model can only have a single predicted parameter # and no additional residual or autocorrelation structure non_glm_adterms <- c("se", "weights", "thres", "cens", "trunc", "rate") mu <- bterms$dpars[["mu"]] if (!is.btl(mu) || length(bterms$dpars) > 1L || isTRUE(bterms$rescor) || is.formula(mu$ac) || any(names(bterms$adforms) %in% non_glm_adterms)) { return(FALSE) } # supported families and link functions # TODO: support categorical_logit primitive glm_links <- list( gaussian = "identity", bernoulli = "logit", poisson = "log", negbinomial = "log", negbinomial2 = "log" # rstan does not yet support 'ordered_logistic_glm' # cumulative = "logit" ) if (!isTRUE(glm_links[[mu$family$family]] == mu$family$link)) { return(FALSE) } if (!allow_special_terms && has_special_terms(mu)) { # some primitives do not support special terms in the way # required by brms' Stan code generation return(FALSE) } length(all_terms(mu$fe)) > 0 && !is_sparse(mu$fe) } # standard arguments for primitive Stan GLM functions # @param bterms a btl object # @param resp optional name of the response variable # @return a named list of Stan code snippets args_glm_primitive <- function(bterms, resp = "", threads = NULL) { stopifnot(is.btl(bterms)) decomp <- get_decomp(bterms$fe) center_X <- stan_center_X(bterms) slice <- stan_slice(threads) sfx_X <- sfx_b <- "" if (decomp == "QR") { sfx_X <- sfx_b <- "Q" } else if (center_X) { sfx_X <- "c" } x <- glue("X{sfx_X}{resp}{slice}") beta <- glue("b{sfx_b}{resp}") if (has_special_terms(bterms)) { # the intercept vector will contain all the remaining terms alpha <- glue("mu{resp}") } else { if (center_X) { alpha <- glue("Intercept{resp}") } else { alpha <- "0" } } nlist(x, alpha, beta) } # use the ordered_logistic built-in functions use_ordered_logistic <- function(bterms) { stopifnot(is.brmsterms(bterms)) isTRUE(bterms$family$family %in% c("cumulative", "hurdle_cumulative")) && isTRUE(bterms$family$link == "logit") && isTRUE(bterms$fdpars$disc$value == 1) && !has_cs(bterms) } # prepare distribution and arguments for use in Stan sdist <- function(dist, ..., shift = "") { args <- sargs(...) structure(nlist(dist, args, shift), class = "sdist") } # prepare arguments for Stan likelihood statements sargs <- function(...) { dots <- as.character(c(...)) dots <- dots[nzchar(dots)] paste0(dots, collapse = ", ") } is.sdist <- function(x) { inherits(x, "sdist") } tp <- function(wsp = 2) { wsp <- collapse(rep(" ", wsp)) paste0(wsp, "target += ") } brms/R/make_standata.R0000644000176200001440000002400114453526454014346 0ustar liggesusers#' Data for \pkg{brms} Models #' #' Generate data for \pkg{brms} models to be passed to \pkg{Stan} #' #' @inheritParams brm #' @param ... Other arguments for internal use. #' #' @return A named list of objects containing the required data #' to fit a \pkg{brms} model with \pkg{Stan}. #' #' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com} #' #' @examples #' sdata1 <- make_standata(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "cumulative") #' str(sdata1) #' #' sdata2 <- make_standata(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = "poisson") #' str(sdata2) #' #' @export make_standata <- function(formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sample_prior = "no", stanvars = NULL, threads = getOption("brms.threads", NULL), knots = NULL, drop_unused_levels = TRUE, ...) { if (is.brmsfit(formula)) { stop2("Use 'standata' to extract Stan data from 'brmsfit' objects.") } formula <- validate_formula( formula, data = data, family = family, autocor = autocor, cov_ranef = cov_ranef ) bterms <- brmsterms(formula) data2 <- validate_data2( data2, bterms = bterms, get_data2_autocor(formula), get_data2_cov_ranef(formula) ) data <- validate_data( data, bterms = bterms, knots = knots, data2 = data2, drop_unused_levels = drop_unused_levels ) prior <- .validate_prior( prior, bterms = bterms, data = data, sample_prior = sample_prior ) stanvars <- validate_stanvars(stanvars) threads <- validate_threads(threads) .make_standata( bterms, data = data, prior = prior, data2 = data2, stanvars = stanvars, threads = threads, ... ) } # internal work function of 'make_stancode' # @param check_response check validity of the response? # @param only_response extract data related to the response only? # @param internal prepare Stan data for use in post-processing methods? # @param basis original Stan data as prepared by 'standata_basis' # @param ... currently ignored # @return names list of data passed to Stan .make_standata <- function(bterms, data, prior, stanvars, data2, threads = threading(), check_response = TRUE, only_response = FALSE, internal = FALSE, basis = NULL, ...) { check_response <- as_one_logical(check_response) only_response <- as_one_logical(only_response) internal <- as_one_logical(internal) # order data for use in autocorrelation models data <- order_data(data, bterms = bterms) out <- data_response( bterms, data, check_response = check_response, internal = internal, basis = basis ) if (!only_response) { ranef <- tidy_ranef(bterms, data, old_levels = basis$levels) meef <- tidy_meef(bterms, data, old_levels = basis$levels) index <- tidy_index(bterms, data) # pass as sdata so that data_special_prior knows about data_gr_global sdata_gr_global <- data_gr_global(ranef, data2 = data2) c(out) <- data_predictor( bterms, data = data, prior = prior, data2 = data2, ranef = ranef, sdata = sdata_gr_global, index = index, basis = basis ) c(out) <- sdata_gr_global c(out) <- data_Xme(meef, data = data) } out$prior_only <- as.integer(is_prior_only(prior)) if (use_threading(threads)) { out$grainsize <- threads$grainsize if (is.null(out$grainsize)) { out$grainsize <- ceiling(out$N / (2 * threads$threads)) out$grainsize <- max(100, out$grainsize) } } if (is.stanvars(stanvars)) { stanvars <- subset_stanvars(stanvars, block = "data") inv_names <- intersect(names(stanvars), names(out)) if (length(inv_names)) { stop2("Cannot overwrite existing variables: ", collapse_comma(inv_names)) } out[names(stanvars)] <- from_list(stanvars, "sdata") } if (internal) { # allows to recover the original order of the data attr(out, "old_order") <- attr(data, "old_order") # ensures current grouping levels are known in post-processing ranef_new <- tidy_ranef(bterms, data) meef_new <- tidy_meef(bterms, data) attr(out, "levels") <- get_levels(ranef_new, meef_new) } structure(out, class = c("standata", "list")) } #' Extract data passed to Stan #' #' Extract all data that was used by Stan to fit the model. #' #' @aliases standata.brmsfit #' #' @param object An object of class \code{brmsfit}. #' @param ... More arguments passed to \code{\link{make_standata}} #' and \code{\link{validate_newdata}}. #' @inheritParams prepare_predictions #' #' @return A named list containing the data originally passed to Stan. #' #' @export standata.brmsfit <- function(object, newdata = NULL, re_formula = NULL, newdata2 = NULL, new_objects = NULL, incl_autocor = TRUE, ...) { object <- restructure(object) # allows functions to fall back to old default behavior # which was used when originally fitting the model options(.brmsfit_version = object$version$brms) on.exit(options(.brmsfit_version = NULL)) object <- exclude_terms(object, incl_autocor = incl_autocor) formula <- update_re_terms(object$formula, re_formula) bterms <- brmsterms(formula) newdata2 <- use_alias(newdata2, new_objects) data2 <- current_data2(object, newdata2) data <- current_data( object, newdata, newdata2 = data2, re_formula = re_formula, ... ) stanvars <- add_newdata_stanvars(object$stanvars, data2) basis <- object$basis if (is.null(basis)) { # this case should not happen actually, perhaps when people use # the 'empty' feature. But computing it here will be fine # for almost all models, only causing potential problems for processing # of splines on new machines (#1465) basis <- standata_basis(bterms, data = object$data) } .make_standata( bterms, data = data, prior = object$prior, data2 = data2, stanvars = stanvars, threads = object$threads, basis = basis, ... ) } #' @rdname standata.brmsfit #' @export standata <- function(object, ...) { UseMethod("standata") } # prepare basis data required for correct predictions from new data # TODO: eventually export this function if we want to ensure full compatibility # with the 'empty' feature. see ?rename_pars for an example standata_basis <- function(x, data, ...) { UseMethod("standata_basis") } #' @export standata_basis.default <- function(x, data, ...) { list() } #' @export standata_basis.mvbrmsterms <- function(x, data, ...) { out <- list() for (r in names(x$terms)) { out$resps[[r]] <- standata_basis(x$terms[[r]], data, ...) } out$levels <- get_levels(tidy_meef(x, data), tidy_ranef(x, data)) out } #' @export standata_basis.brmsterms <- function(x, data, ...) { out <- list() data <- subset_data(data, x) for (dp in names(x$dpars)) { out$dpars[[dp]] <- standata_basis(x$dpars[[dp]], data, ...) } for (nlp in names(x$nlpars)) { out$nlpars[[nlp]] <- standata_basis(x$nlpars[[nlp]], data, ...) } # old levels are required to select the right indices for new levels out$levels <- get_levels(tidy_meef(x, data), tidy_ranef(x, data)) if (is_binary(x$family) || is_categorical(x$family)) { y <- model.response(model.frame(x$respform, data, na.action = na.pass)) out$resp_levels <- levels(as.factor(y)) } out } #' @export standata_basis.btnl <- function(x, data, ...) { list() } #' @export standata_basis.btl <- function(x, data, ...) { out <- list() out$sm <- standata_basis_sm(x, data, ...) out$gp <- standata_basis_gp(x, data, ...) out$sp <- standata_basis_sp(x, data, ...) out$ac <- standata_basis_ac(x, data, ...) out$bhaz <- standata_basis_bhaz(x, data, ...) out } # prepare basis data related to smooth terms standata_basis_sm <- function(x, data, ...) { stopifnot(is.btl(x)) smterms <- all_terms(x[["sm"]]) out <- named_list(smterms) if (length(smterms)) { knots <- get_knots(data) data <- rm_attr(data, "terms") # the spline penalty has changed in 2.8.7 (#646) diagonal.penalty <- !require_old_default("2.8.7") gam_args <- list( data = data, knots = knots, absorb.cons = TRUE, modCon = 3, diagonal.penalty = diagonal.penalty ) for (i in seq_along(smterms)) { sc_args <- c(list(eval2(smterms[i])), gam_args) sm <- do_call(smoothCon, sc_args) re <- vector("list", length(sm)) for (j in seq_along(sm)) { re[[j]] <- mgcv::smooth2random(sm[[j]], names(data), type = 2) } out[[i]]$sm <- sm out[[i]]$re <- re } } out } # prepare basis data related to gaussian processes standata_basis_gp <- function(x, data, ...) { stopifnot(is.btl(x)) out <- data_gp(x, data, internal = TRUE) out <- out[grepl("^((Xgp)|(dmax)|(cmeans))", names(out))] out } # prepare basis data related to special terms standata_basis_sp <- function(x, data, ...) { stopifnot(is.btl(x)) out <- list() if (length(attr(x$sp, "uni_mo"))) { # do it like data_sp() spef <- tidy_spef(x, data) Xmo <- lapply(unlist(spef$calls_mo), get_mo_values, data = data) out$Jmo <- as.array(ulapply(Xmo, max)) } out } # prepare basis data related to autocorrelation structures standata_basis_ac <- function(x, data, ...) { out <- list() if (has_ac_class(x, "car")) { gr <- get_ac_vars(x, "gr", class = "car") if (isTRUE(nzchar(gr))) { out$locations <- extract_levels(get(gr, data)) } else { out$locations <- NA } } if (has_ac_class(x, "unstr")) { time <- get_ac_vars(x, "time", dim = "time") out$times <- extract_levels(get(time, data)) } out } # prepare basis data for baseline hazards of the cox model standata_basis_bhaz <- function(x, data, ...) { out <- list() if (is_cox(x$family)) { # compute basis matrix of the baseline hazard for the Cox model y <- model.response(model.frame(x$respform, data, na.action = na.pass)) out$basis_matrix <- bhaz_basis_matrix(y, args = x$family$bhaz) } out } brms/R/rename_pars.R0000644000176200001440000005235714454230367014061 0ustar liggesusers#' Rename parameters in brmsfit objects #' #' Rename parameters within the \code{stanfit} object #' after model fitting to ensure reasonable parameter names. This function is #' usually called automatically by \code{\link{brm}} and users will rarely be #' required to call it themselves. #' #' @param x A \code{brmsfit} object. #' @return A \code{brmsfit} object with adjusted parameter names. #' #' @details #' Function \code{rename_pars} is a deprecated alias of \code{rename_pars}. #' #' @examples #' \dontrun{ #' # fit a model manually via rstan #' scode <- make_stancode(count ~ Trt, data = epilepsy) #' sdata <- make_standata(count ~ Trt, data = epilepsy) #' stanfit <- rstan::stan(model_code = scode, data = sdata) #' #' # feed the Stan model back into brms #' fit <- brm(count ~ Trt, data = epilepsy, empty = TRUE) #' fit$fit <- stanfit #' fit <- rename_pars(fit) #' summary(fit) #' } #' #' @export rename_pars <- function(x) { if (!length(x$fit@sim)) { return(x) } bterms <- brmsterms(x$formula) meef <- tidy_meef(bterms, data = x$data) pars <- variables(x) # find positions of parameters and define new names to_rename <- c( rename_predictor(bterms, data = x$data, pars = pars, prior = x$prior), rename_re(x$ranef, pars = pars), rename_Xme(meef, pars = pars) ) # perform the actual renaming in x$fit@sim x <- save_old_par_order(x) x <- do_renaming(x, to_rename) x$fit <- repair_stanfit(x$fit) x <- compute_quantities(x) x <- reorder_pars(x) x } # helps in renaming parameters after model fitting # @return a list whose elements can be interpreted by do_renaming rename_predictor <- function(x, ...) { UseMethod("rename_predictor") } #' @export rename_predictor.default <- function(x, ...) { NULL } #' @export rename_predictor.mvbrmsterms <- function(x, data, pars, ...) { out <- list() for (i in seq_along(x$terms)) { c(out) <- rename_predictor(x$terms[[i]], data = data, pars = pars, ...) } if (x$rescor) { rescor_names <- get_cornames( x$responses, type = "rescor", brackets = FALSE ) lc(out) <- rlist(grepl("^rescor\\[", pars), rescor_names) } out } #' @export rename_predictor.brmsterms <- function(x, data, ...) { data <- subset_data(data, x) out <- list() for (dp in names(x$dpars)) { c(out) <- rename_predictor(x$dpars[[dp]], data = data, ...) } for (nlp in names(x$nlpars)) { c(out) <- rename_predictor(x$nlpars[[nlp]], data = data, ...) } if (is.formula(x$adforms$mi)) { c(out) <- rename_Ymi(x, data = data, ...) } c(out) <- rename_thres(x, data = data, ...) c(out) <- rename_family_cor_pars(x, data = data, ...) out } # helps in renaming parameters of additive predictor terms # @param pars vector of all parameter names #' @export rename_predictor.btl <- function(x, ...) { c(rename_fe(x, ...), rename_sm(x, ...), rename_cs(x, ...), rename_sp(x, ...), rename_gp(x, ...), rename_ac(x, ...)) } # helps in renaming fixed effects parameters rename_fe <- function(bterms, data, pars, prior, ...) { out <- list() fixef <- colnames(data_fe(bterms, data)$X) if (stan_center_X(bterms)) { fixef <- setdiff(fixef, "Intercept") } if (!length(fixef)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) b <- paste0("b", p) pos <- grepl(paste0("^", b, "\\["), pars) bnames <- paste0(b, "_", fixef) lc(out) <- rlist(pos, bnames) c(out) <- rename_prior(b, pars, names = fixef) if (has_special_prior(prior, bterms, class = "b")) { sdb <- paste0("sdb", p) pos <- grepl(paste0("^", sdb, "\\["), pars) sdb_names <- paste0(sdb, "_", fixef) lc(out) <- rlist(pos, sdb_names) } out } # helps in renaming special effects parameters rename_sp <- function(bterms, data, pars, prior, ...) { out <- list() spef <- tidy_spef(bterms, data) if (!nrow(spef)) { return(out) } p <- usc(combine_prefix(bterms)) bsp <- paste0("bsp", p) pos <- grepl(paste0("^", bsp, "\\["), pars) newnames <- paste0("bsp", p, "_", spef$coef) lc(out) <- rlist(pos, newnames) c(out) <- rename_prior(bsp, pars, names = spef$coef) simo_coef <- get_simo_labels(spef) for (i in seq_along(simo_coef)) { simo_old <- paste0("simo", p, "_", i) simo_new <- paste0("simo", p, "_", simo_coef[i]) pos <- grepl(paste0("^", simo_old, "\\["), pars) simo_names <- paste0(simo_new, "[", seq_len(sum(pos)), "]") lc(out) <- rlist(pos, simo_names) c(out) <- rename_prior( simo_old, pars, new_class = simo_new, is_vector = TRUE ) } if (has_special_prior(prior, bterms, class = "b")) { sdbsp <- paste0("sdbsp", p) pos <- grepl(paste0("^", sdbsp, "\\["), pars) sdbsp_names <- paste0(sdbsp, "_", spef$coef) lc(out) <- rlist(pos, sdbsp_names) } out } # helps in renaming category specific effects parameters rename_cs <- function(bterms, data, pars, ...) { out <- list() csef <- colnames(data_cs(bterms, data)$Xcs) if (length(csef)) { p <- usc(combine_prefix(bterms)) bcsp <- paste0("bcs", p) ncs <- length(csef) thres <- sum(grepl(paste0("^b", p, "_Intercept\\["), pars)) csenames <- t(outer(csef, paste0("[", 1:thres, "]"), FUN = paste0)) csenames <- paste0(bcsp, "_", csenames) sort_cse <- ulapply(seq_len(ncs), seq, to = thres * ncs, by = ncs) lc(out) <- rlist( grepl(paste0("^", bcsp, "\\["), pars), csenames, sort = sort_cse ) c(out) <- rename_prior(bcsp, pars, names = csef) } out } # rename threshold parameters in ordinal models rename_thres <- function(bterms, pars, ...) { out <- list() # renaming is only required if multiple threshold were estimated if (!has_thres_groups(bterms)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) int <- paste0("b", p, "_Intercept") groups <- get_thres_groups(bterms) for (i in seq_along(groups)) { thres <- get_thres(bterms, groups[i]) pos <- grepl(glue("^{int}_{i}\\["), pars) int_names <- glue("{int}[{groups[i]},{thres}]") lc(out) <- rlist(pos, int_names) } out } # helps in renaming global noise free variables # @param meef data.frame returned by 'tidy_meef' rename_Xme <- function(meef, pars, ...) { stopifnot(is.meef_frame(meef)) out <- list() levels <- attr(meef, "levels") groups <- unique(meef$grname) for (i in seq_along(groups)) { g <- groups[i] K <- which(meef$grname %in% g) # rename mean and sd parameters for (par in c("meanme", "sdme")) { hpar <- paste0(par, "_", i) pos <- grepl(paste0("^", hpar, "\\["), pars) hpar_new <- paste0(par, "_", meef$coef[K]) lc(out) <- rlist(pos, hpar_new) c(out) <- rename_prior(hpar, pars, names = hpar_new) } # rename latent variable parameters for (k in K) { if (any(grepl("^Xme_", pars))) { Xme <- paste0("Xme_", k) pos <- grepl(paste0("^", Xme, "\\["), pars) Xme_new <- paste0("Xme_", meef$coef[k]) if (nzchar(g)) { indices <- gsub("[ \t\r\n]", ".", levels[[g]]) } else { indices <- seq_len(sum(pos)) } fnames <- paste0(Xme_new, "[", indices, "]") lc(out) <- rlist(pos, fnames) } } # rename correlation parameters if (meef$cor[K[1]] && length(K) > 1L) { cor_type <- paste0("corme", usc(g)) cor_names <- get_cornames(meef$coef[K], cor_type, brackets = FALSE) cor_regex <- paste0("^corme_", i, "(\\[|$)") cor_pos <- grepl(cor_regex, pars) lc(out) <- rlist(cor_pos, cor_names) c(out) <- rename_prior( paste0("corme_", i), pars, new_class = paste0("corme", usc(g)) ) } } out } # helps in renaming estimated missing values rename_Ymi <- function(bterms, data, pars, ...) { stopifnot(is.brmsterms(bterms)) out <- list() if (is.formula(bterms$adforms$mi)) { resp <- usc(combine_prefix(bterms)) resp_data <- data_response(bterms, data, check_response = FALSE) Ymi <- paste0("Ymi", resp) pos <- grepl(paste0("^", Ymi, "\\["), pars) if (any(pos)) { Jmi <- resp_data$Jmi fnames <- paste0(Ymi, "[", Jmi, "]") lc(out) <- rlist(pos, fnames) } } out } # helps in renaming parameters of gaussian processes rename_gp <- function(bterms, data, pars, ...) { out <- list() p <- usc(combine_prefix(bterms), "prefix") gpef <- tidy_gpef(bterms, data) for (i in seq_rows(gpef)) { # rename GP hyperparameters sfx1 <- gpef$sfx1[[i]] sfx2 <- as.vector(gpef$sfx2[[i]]) sdgp <- paste0("sdgp", p) sdgp_old <- paste0(sdgp, "_", i) sdgp_pos <- grepl(paste0("^", sdgp_old, "\\["), pars) sdgp_names <- paste0(sdgp, "_", sfx1) lc(out) <- rlist(sdgp_pos, sdgp_names) c(out) <- rename_prior(sdgp_old, pars, names = sfx1, new_class = sdgp) lscale <- paste0("lscale", p) lscale_old <- paste0(lscale, "_", i) lscale_pos <- grepl(paste0("^", lscale_old, "\\["), pars) lscale_names <- paste0(lscale, "_", sfx2) lc(out) <- rlist(lscale_pos, lscale_names) c(out) <- rename_prior(lscale_old, pars, names = sfx2, new_class = lscale) zgp <- paste0("zgp", p) zgp_old <- paste0(zgp, "_", i) if (length(sfx1) > 1L) { # categorical 'by' variable for (j in seq_along(sfx1)) { zgp_old_sub <- paste0(zgp_old, "_", j) zgp_pos <- grepl(paste0("^", zgp_old_sub, "\\["), pars) if (any(zgp_pos)) { zgp_new <- paste0(zgp, "_", sfx1[j]) fnames <- paste0(zgp_new, "[", seq_len(sum(zgp_pos)), "]") lc(out) <- rlist(zgp_pos, fnames) } } } else { zgp_pos <- grepl(paste0("^", zgp_old, "\\["), pars) if (any(zgp_pos)) { zgp_new <- paste0(zgp, "_", sfx1) fnames <- paste0(zgp_new, "[", seq_len(sum(zgp_pos)), "]") lc(out) <- rlist(zgp_pos, fnames) } } } out } # helps in renaming smoothing term parameters rename_sm <- function(bterms, data, pars, prior, ...) { out <- list() smef <- tidy_smef(bterms, data) if (NROW(smef)) { p <- usc(combine_prefix(bterms)) Xs_names <- attr(smef, "Xs_names") if (length(Xs_names)) { bs <- paste0("bs", p) pos <- grepl(paste0("^", bs, "\\["), pars) bsnames <- paste0(bs, "_", Xs_names) lc(out) <- rlist(pos, bsnames) c(out) <- rename_prior(bs, pars, names = Xs_names) } if (has_special_prior(prior, bterms, class = "b")) { sdbs <- paste0("sdbs", p) pos <- grepl(paste0("^", sdbs, "\\["), pars) sdbs_names <- paste0(sdbs, "_", Xs_names) lc(out) <- rlist(pos, sdbs_names) } sds <- paste0("sds", p) sds_names <- paste0(sds, "_", smef$label) s <- paste0("s", p) snames <- paste0(s, "_", smef$label) for (i in seq_rows(smef)) { nbases <- smef$nbases[i] sds_pos <- grepl(paste0("^", sds, "_", i), pars) sds_names_nb <- paste0(sds_names[i], "_", seq_len(nbases)) lc(out) <- rlist(sds_pos, sds_names_nb) new_class <- paste0(sds, "_", smef$label[i]) c(out) <- rename_prior(paste0(sds, "_", i), pars, new_class = new_class) for (j in seq_len(nbases)) { spos <- grepl(paste0("^", s, "_", i, "_", j), pars) sfnames <- paste0(snames[i], "_", j, "[", seq_len(sum(spos)), "]") lc(out) <- rlist(spos, sfnames) } } } out } # helps in renaming autocorrelation parameters rename_ac <- function(bterms, data, pars, ...) { out <- list() acef <- tidy_acef(bterms) resp <- usc(bterms$resp) if (has_ac_class(acef, "unstr")) { time <- get_ac_vars(acef, "time", dim = "time") times <- extract_levels(get(time, data)) corname <- paste0("cortime", resp) regex <- paste0("^", corname, "\\[") cortime_names <- get_cornames(times, type = corname, brackets = FALSE) lc(out) <- rlist(grepl(regex, pars), cortime_names) } out } # helps in renaming group-level parameters # @param ranef: data.frame returned by 'tidy_ranef' rename_re <- function(ranef, pars, ...) { out <- list() if (has_rows(ranef)) { for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) g <- r$group[1] rnames <- get_rnames(r) sd_names <- paste0("sd_", g, "__", as.vector(rnames)) sd_pos <- grepl(paste0("^sd_", id, "(\\[|$)"), pars) lc(out) <- rlist(sd_pos, sd_names) c(out) <- rename_prior( paste0("sd_", id), pars, new_class = paste0("sd_", g), names = paste0("_", as.vector(rnames)) ) # rename group-level correlations if (nrow(r) > 1L && isTRUE(r$cor[1])) { type <- paste0("cor_", g) if (isTRUE(nzchar(r$by[1]))) { cor_names <- named_list(r$bylevels[[1]]) for (j in seq_along(cor_names)) { cor_names[[j]] <- get_cornames( rnames[, j], type, brackets = FALSE ) } cor_names <- unlist(cor_names) } else { cor_names <- get_cornames(rnames, type, brackets = FALSE) } cor_regex <- paste0("^cor_", id, "(_[[:digit:]]+)?(\\[|$)") cor_pos <- grepl(cor_regex, pars) lc(out) <- rlist(cor_pos, cor_names) c(out) <- rename_prior( paste0("cor_", id), pars, new_class = paste0("cor_", g) ) } } if (any(grepl("^r_", pars))) { c(out) <- rename_re_levels(ranef, pars = pars) } tranef <- get_dist_groups(ranef, "student") for (i in seq_rows(tranef)) { df_pos <- grepl(paste0("^df_", tranef$ggn[i], "$"), pars) df_name <- paste0("df_", tranef$group[i]) lc(out) <- rlist(df_pos, df_name) } } out } # helps in renaming varying effects parameters per level # @param ranef: data.frame returned by 'tidy_ranef' rename_re_levels <- function(ranef, pars, ...) { out <- list() for (i in seq_rows(ranef)) { r <- ranef[i, ] p <- usc(combine_prefix(r)) r_parnames <- paste0("r_", r$id, p, "_", r$cn) r_regex <- paste0("^", r_parnames, "(\\[|$)") r_new_parname <- paste0("r_", r$group, usc(p)) # rstan doesn't like whitespaces in parameter names levels <- gsub("[ \t\r\n]", ".", attr(ranef, "levels")[[r$group]]) index_names <- make_index_names(levels, r$coef, dim = 2) fnames <- paste0(r_new_parname, index_names) lc(out) <- rlist(grepl(r_regex, pars), fnames) } out } # helps to rename correlation parameters of likelihoods rename_family_cor_pars <- function(x, pars, ...) { stopifnot(is.brmsterms(x)) out <- list() if (is_logistic_normal(x$family)) { predcats <- get_predcats(x$family) lncor_names <- get_cornames( predcats, type = "lncor", brackets = FALSE ) lc(out) <- rlist(grepl("^lncor\\[", pars), lncor_names) } out } # helps in renaming prior parameters # @param class the class of the parameters # @param pars names of all parameters in the model # @param names names to replace digits at the end of parameter names # @param new_class optional replacement of the orginal class name # @param is_vector indicate if the prior parameter is a vector rename_prior <- function(class, pars, names = NULL, new_class = class, is_vector = FALSE) { out <- list() # 'stan_rngprior' adds '__' before the digits to disambiguate regex <- paste0("^prior_", class, "(__[[:digit:]]+|$|\\[)") pos_priors <- which(grepl(regex, pars)) if (length(pos_priors)) { priors <- gsub( paste0("^prior_", class), paste0("prior_", new_class), pars[pos_priors] ) if (is_vector) { if (!is.null(names)) { .names <- paste0("_", names) for (i in seq_along(priors)) { priors[i] <- gsub("\\[[[:digit:]]+\\]$", .names[i], priors[i]) } } lc(out) <- rlist(pos_priors, priors) } else { digits <- sapply(priors, function(prior) { d <- regmatches(prior, gregexpr("__[[:digit:]]+$", prior))[[1]] if (length(d)) as.numeric(substr(d, 3, nchar(d))) else 0 }) for (i in seq_along(priors)) { if (digits[i] && !is.null(names)) { priors[i] <- gsub("_[[:digit:]]+$", names[digits[i]], priors[i]) } if (pars[pos_priors[i]] != priors[i]) { lc(out) <- rlist(pos_priors[i], priors[i]) } } } } out } # helper for rename_* functions rlist <- function(pos, fnames, ...) { structure(nlist(pos, fnames, ...), class = c("rlist", "list")) } is.rlist <- function(x) { inherits(x, "rlist") } # compute index names in square brackets for indexing stan parameters # @param rownames a vector of row names # @param colnames a vector of columns # @param dim the number of output dimensions # @return all index pairs of rows and cols make_index_names <- function(rownames, colnames = NULL, dim = 1) { if (!dim %in% c(1, 2)) stop("dim must be 1 or 2") if (dim == 1) { index_names <- paste0("[", rownames, "]") } else { tmp <- outer(rownames, colnames, FUN = paste, sep = ",") index_names <- paste0("[", tmp, "]") } index_names } # save original order of the parameters in the stanfit object save_old_par_order <- function(x) { x$fit@sim$pars_oi_old <- x$fit@sim$pars_oi x$fit@sim$dims_oi_old <- x$fit@sim$dims_oi x$fit@sim$fnames_oi_old <- x$fit@sim$fnames_oi x } # perform actual renaming of Stan parameters # @param x a brmsfit object # @param y a list of lists each element allowing # to rename certain parameters # @return a brmsfit object with updated parameter names do_renaming <- function(x, y) { .do_renaming <- function(x, y) { stopifnot(is.rlist(y)) x$fit@sim$fnames_oi[y$pos] <- y$fnames for (i in seq_len(chains)) { names(x$fit@sim$samples[[i]])[y$pos] <- y$fnames if (!is.null(y$sort)) { x$fit@sim$samples[[i]][y$pos] <- x$fit@sim$samples[[i]][y$pos][y$sort] } } return(x) } chains <- length(x$fit@sim$samples) # temporary fix for issue #387 until fixed in rstan for (i in seq_len(chains)) { x$fit@sim$samples[[i]]$lp__.1 <- NULL } for (i in seq_along(y)) { x <- .do_renaming(x, y[[i]]) } x } # order parameter draws after parameter class # @param x brmsfit object reorder_pars <- function(x) { all_classes <- unique(c( "b", "bs", "bsp", "bcs", "ar", "ma", "sderr", "lagsar", "errorsar", "car", "rhocar", "sdcar", "cosy", "cortime", "sd", "cor", "df", "sds", "sdgp", "lscale", valid_dpars(x), "hs", "R2D2", "sdb", "sdbsp", "sdbs", "sdar", "sdma", "lncor", "Intercept", "tmp", "rescor", "delta", "simo", "r", "s", "zgp", "rcar", "sbhaz", "Ymi", "Yl", "meanme", "sdme", "corme", "Xme", "prior", "lprior", "lp" )) # reorder parameter classes class <- get_matches("^[^_]+", x$fit@sim$pars_oi) new_order <- order( factor(class, levels = all_classes), !grepl("_Intercept(_[[:digit:]]+)?$", x$fit@sim$pars_oi) ) x$fit@sim$dims_oi <- x$fit@sim$dims_oi[new_order] x$fit@sim$pars_oi <- names(x$fit@sim$dims_oi) # reorder single parameter names nsubpars <- ulapply(x$fit@sim$dims_oi, prod) has_subpars <- nsubpars > 0 new_order <- new_order[has_subpars] nsubpars <- nsubpars[has_subpars] num <- lapply(seq_along(new_order), function(x) as.numeric(paste0(x, ".", sprintf("%010d", seq_len(nsubpars[x])))) ) new_order <- order(unlist(num[order(new_order)])) x$fit@sim$fnames_oi <- x$fit@sim$fnames_oi[new_order] chains <- length(x$fit@sim$samples) for (i in seq_len(chains)) { # attributes of samples must be kept x$fit@sim$samples[[i]] <- subset_keep_attr(x$fit@sim$samples[[i]], new_order) } x } # wrapper function to compute and store quantities in the stanfit # object which were not computed / stored by Stan itself # @param x a brmsfit object # @return a brmsfit object compute_quantities <- function(x) { stopifnot(is.brmsfit(x)) x <- compute_xi(x) x } # helper function to compute parameter xi, which is currently # defined in the Stan model block and thus not being stored # @param x a brmsfit object # @return a brmsfit object compute_xi <- function(x, ...) { UseMethod("compute_xi") } #' @export compute_xi.brmsfit <- function(x, ...) { if (!any(grepl("^tmp_xi(_|$)", variables(x)))) { return(x) } draws <- try(extract_draws(x)) if (is_try_error(draws)) { warning2("Trying to compute 'xi' was unsuccessful. ", "Some S3 methods may not work as expected.") return(x) } compute_xi(draws, fit = x, ...) } #' @export compute_xi.mvbrmsprep <- function(x, fit, ...) { stopifnot(is.brmsfit(fit)) for (resp in names(x$resps)) { fit <- compute_xi(x$resps[[resp]], fit = fit, ...) } fit } #' @export compute_xi.brmsprep <- function(x, fit, ...) { stopifnot(is.brmsfit(fit)) resp <- usc(x$resp) tmp_xi_name <- paste0("tmp_xi", resp) if (!tmp_xi_name %in% variables(fit)) { return(fit) } mu <- get_dpar(x, "mu") sigma <- get_dpar(x, "sigma") y <- matrix(x$data$Y, dim(mu)[1], dim(mu)[2], byrow = TRUE) bs <- -1 / matrixStats::rowRanges((y - mu) / sigma) bs <- matrixStats::rowRanges(bs) tmp_xi <- as.vector(as.matrix(fit, pars = tmp_xi_name)) xi <- inv_logit(tmp_xi) * (bs[, 2] - bs[, 1]) + bs[, 1] # write xi into stanfit object xi_name <- paste0("xi", resp) samp_chain <- length(xi) / fit$fit@sim$chains for (i in seq_len(fit$fit@sim$chains)) { xi_part <- xi[((i - 1) * samp_chain + 1):(i * samp_chain)] # add warmup draws not used anyway xi_part <- c(rep(0, fit$fit@sim$warmup2[1]), xi_part) fit$fit@sim$samples[[i]][[xi_name]] <- xi_part } fit$fit@sim$pars_oi <- c(fit$fit@sim$pars_oi, xi_name) fit$fit@sim$dims_oi[[xi_name]] <- numeric(0) fit$fit@sim$fnames_oi <- c(fit$fit@sim$fnames_oi, xi_name) fit$fit@sim$n_flatnames <- fit$fit@sim$n_flatnames + 1 fit } brms/R/stan-prior.R0000644000176200001440000006562014430717063013655 0ustar liggesusers# unless otherwise specified, functions return a single character # string defining the likelihood of the model in Stan language # Define priors for parameters in Stan language # @param prior an object of class 'brmsprior' # @param class the parameter class # @param coef the coefficients of this class # @param group the name of a grouping factor # @param type Stan type used in the definition of the parameter # if type is empty the parameter is not initialized inside 'stan_prior' # @param dim stan array dimension to be specified after the parameter name # cannot be expressed via 'suffix' as the latter should apply to # individual coefficients while 'dim' should not # TODO: decide whether to support arrays for parameters at all # an alternative would be to specify elements directly as parameters # @param coef_type Stan type used in the definition of individual parameter # coefficients # @param prefix a prefix to put at the parameter class # @param suffix a suffix to put at the parameter class # @param broadcast Stan type to which the prior should be broadcasted # in order to handle vectorized prior statements # supported values are 'vector' or 'matrix' # @param comment character string containing a comment for the parameter # @param px list or data.frame after which to subset 'prior' # @return a named list of character strings in Stan language stan_prior <- function(prior, class, coef = NULL, group = NULL, type = "real", dim = "", coef_type = "real", prefix = "", suffix = "", broadcast = "vector", header_type = "", comment = "", px = list(), normalize = TRUE) { prior_only <- isTRUE(attr(prior, "sample_prior") == "only") prior <- subset2( prior, class = class, coef = c(coef, ""), group = c(group, ""), ls = px ) # special priors cannot be passed literally to Stan is_special_prior <- is_special_prior(prior$prior) if (any(is_special_prior)) { special_prior <- prior$prior[is_special_prior] stop2("Prior ", collapse_comma(special_prior), " is used in an invalid ", "context. See ?set_prior for details on how to use special priors.") } px <- as.data.frame(px, stringsAsFactors = FALSE) upx <- unique(px) if (nrow(upx) > 1L) { # TODO: find a better solution to handle this case # can only happen for SD parameters of the same ID base_prior <- lb <- ub <- rep(NA, nrow(upx)) base_bounds <- data.frame(lb = lb, ub = ub) for (i in seq_rows(upx)) { sub_upx <- lapply(upx[i, ], function(x) c(x, "")) sub_prior <- subset2(prior, ls = sub_upx) base_prior[i] <- stan_base_prior(sub_prior) base_bounds[i, ] <- stan_base_prior(sub_prior, col = c("lb", "ub")) } if (length(unique(base_prior)) > 1L) { # define prior for single coefficients manually # as there is not single base_prior anymore take_coef_prior <- nzchar(prior$coef) & !nzchar(prior$prior) prior_of_coefs <- prior[take_coef_prior, vars_prefix()] take_base_prior <- match_rows(prior_of_coefs, upx) prior$prior[take_coef_prior] <- base_prior[take_base_prior] } base_prior <- base_prior[1] if (nrow(unique(base_bounds)) > 1L) { stop2("Conflicting boundary information for ", "coefficients of class '", class, "'.") } base_bounds <- base_bounds[1, ] } else { base_prior <- stan_base_prior(prior) # select both bounds together so that they come from the same base prior base_bounds <- stan_base_prior(prior, col = c("lb", "ub")) } bound <- convert_bounds2stan(base_bounds) # generate stan prior statements out <- list() par <- paste0(prefix, class, suffix) has_constant_priors <- FALSE has_coef_prior <- any(with(prior, nzchar(coef) & nzchar(prior))) if (has_coef_prior || nzchar(dim) && length(coef)) { # priors on individual coefficients are also individually set # priors are always set on individual coefficients for arrays index_two_dims <- is.matrix(coef) coef <- as.matrix(coef) prior <- subset2(prior, coef = coef) estimated_coef_indices <- list() used_base_prior <- FALSE for (i in seq_rows(coef)) { for (j in seq_cols(coef)) { index <- i if (index_two_dims) { c(index) <- j } prior_ij <- subset2(prior, coef = coef[i, j]) if (NROW(px) > 1L) { # disambiguate priors of coefficients with the same name # coming from different model components stopifnot(NROW(px) == NROW(coef)) prior_ij <- subset2(prior_ij, ls = px[i, ]) } # zero rows can happen if only global priors present stopifnot(nrow(prior_ij) <= 1L) coef_prior <- prior_ij$prior if (!isTRUE(nzchar(coef_prior))) { used_base_prior <- TRUE coef_prior <- base_prior } if (!stan_is_constant_prior(coef_prior)) { # all parameters with non-constant priors are estimated c(estimated_coef_indices) <- list(index) } if (nzchar(coef_prior)) { # implies a proper prior or constant if (type == coef_type && !nzchar(dim)) { # the single coefficient of that parameter equals the parameter stopifnot(all(index == 1L)) par_ij <- par } else { par_ij <- paste0(par, collapse("[", index, "]")) } if (stan_is_constant_prior(coef_prior)) { coef_prior <- stan_constant_prior( coef_prior, par_ij, broadcast = broadcast ) str_add(out$tpar_prior_const) <- paste0(coef_prior, ";\n") } else { coef_prior <- stan_target_prior( coef_prior, par_ij, broadcast = broadcast, bound = bound, resp = px$resp[1], normalize = normalize ) str_add(out$tpar_prior) <- paste0(lpp(), coef_prior, ";\n") } } } } # the base prior may be improper flat in which no Stan code is added # but we still have estimated coefficients if the base prior is used has_estimated_priors <- isTRUE(nzchar(out$tpar_prior)) || used_base_prior && !stan_is_constant_prior(base_prior) has_constant_priors <- isTRUE(nzchar(out$tpar_prior_const)) if (has_estimated_priors && has_constant_priors) { # need to mix definition in the parameters and transformed parameters block if (!nzchar(coef_type)) { stop2("Can either estimate or fix all values of parameter '", par, "'.") } coef_type <- stan_type_add_bounds(coef_type, bound) for (i in seq_along(estimated_coef_indices)) { index <- estimated_coef_indices[[i]] iu <- paste0(index, collapse = "_") str_add(out$par) <- glue( " {coef_type} par_{par}_{iu};\n" ) ib <- collapse("[", index, "]") str_add(out$tpar_prior_const) <- cglue( " {par}{ib} = par_{par}_{iu};\n" ) } } } else if (nzchar(base_prior)) { # only a global prior is present and will be broadcasted ncoef <- length(coef) has_constant_priors <- stan_is_constant_prior(base_prior) if (has_constant_priors) { constant_base_prior <- stan_constant_prior( base_prior, par = par, ncoef = ncoef, broadcast = broadcast ) str_add(out$tpar_prior_const) <- paste0(constant_base_prior, ";\n") } else { target_base_prior <- stan_target_prior( base_prior, par = par, ncoef = ncoef, bound = bound, broadcast = broadcast, resp = px$resp[1], normalize = normalize ) str_add(out$tpar_prior) <- paste0(lpp(), target_base_prior, ";\n") } } if (nzchar(type)) { # only define the parameter here if type is non-empty type <- stan_adjust_par_type(type, base_prior) type <- stan_type_add_bounds(type, bound) comment <- stan_comment(comment) par_definition <- glue(" {type} {par}{dim};{comment}\n") if (has_constant_priors) { # parameter must be defined in the transformed parameters block str_add(out$tpar_def) <- par_definition } else { # parameter can be defined in the parameters block str_add(out$par) <- par_definition } if (nzchar(header_type)) { str_add(out$pll_args) <- glue(", {header_type} {par}") } } else { if (has_constant_priors) { stop2("Cannot fix parameter '", par, "' in this model.") } } has_improper_prior <- !is.null(out$par) && is.null(out$tpar_prior) if (prior_only && has_improper_prior) { stop2("Sampling from priors is not possible as ", "some parameters have no proper priors. ", "Error occurred for parameter '", par, "'.") } out } # extract base prior information for a given set of priors # the base prior is the lowest level, non-flat, non-coefficient prior # @param prior a brmsprior object # @param col columns for which base prior information is to be found # @param sel_prior optional brmsprior object to subset 'prior' before # finding the base prior # @return the 'col' columns of the identified base prior stan_base_prior <- function(prior, col = "prior", sel_prior = NULL, ...) { stopifnot(all(col %in% c("prior", "lb", "ub"))) if (!is.null(sel_prior)) { # find the base prior using sel_prior for subsetting stopifnot(is.brmsprior(sel_prior)) prior <- subset2( prior, class = sel_prior$class, group = c(sel_prior$group, ""), dpar = sel_prior$dpar, nlpar = sel_prior$nlpar, resp = sel_prior$resp, ... ) } else { prior <- subset2(prior, ...) } stopifnot(length(unique(prior$class)) <= 1) # take all rows with non-zero entries on any of the chosen columns take <- !nzchar(prior$coef) & Reduce("|", lapply(prior[col], nzchar)) prior <- prior[take, ] if (!NROW(prior)) { if (length(col) == 1L) { return("") } else { return(brmsprior()[, col]) } } vars <- c("group", "nlpar", "dpar", "resp", "class") for (v in vars) { take <- nzchar(prior[[v]]) if (any(take)) { prior <- prior[take, ] } } stopifnot(NROW(prior) == 1L) prior[, col] } # Stan prior in target += notation # @param prior character string defining the prior # @param par name of the parameter on which to set the prior # @param ncoef number of coefficients in the parameter # @param bound bounds of the parameter in Stan language # @param broadcast Stan type to which the prior should be broadcasted # @param name of the response variable # @return a character string defining the prior in Stan language stan_target_prior <- function(prior, par, ncoef = 0, broadcast = "vector", bound = "", resp = "", normalize = TRUE) { prior <- gsub("[[:space:]]+\\(", "(", prior) prior_name <- get_matches( "^[^\\(]+(?=\\()", prior, perl = TRUE, simplify = FALSE ) for (i in seq_along(prior_name)) { if (length(prior_name[[i]]) != 1L) { stop2("The prior '", prior[i], "' is invalid.") } } prior_name <- unlist(prior_name) prior_args <- rep(NA, length(prior)) for (i in seq_along(prior)) { prior_args[i] <- sub(glue("^{prior_name[i]}\\("), "", prior[i]) prior_args[i] <- sub(")$", "", prior_args[i]) } if (broadcast == "matrix" && ncoef > 0) { # apply a scalar prior to all elements of a matrix par <- glue("to_vector({par})") } if (nzchar(prior_args)) { str_add(prior_args, start = TRUE) <- " | " } lpdf <- stan_lpdf_name(normalize) out <- glue("{prior_name}_{lpdf}({par}{prior_args})") par_class <- unique(get_matches("^[^_]+", par)) par_bound <- convert_stan2bounds(bound) prior_bound <- prior_bounds(prior_name) trunc_lb <- is.character(par_bound$lb) || par_bound$lb > prior_bound$lb trunc_ub <- is.character(par_bound$ub) || par_bound$ub < prior_bound$ub if (normalize) { # obtain correct normalization constants for truncated priors if (trunc_lb || trunc_ub) { wsp <- wsp(nsp = 4) # scalar parameters are of length 1 but have no coefficients ncoef <- max(1, ncoef) if (trunc_lb && !trunc_ub) { str_add(out) <- glue( "\n{wsp}- {ncoef} * {prior_name}_lccdf({par_bound$lb}{prior_args})" ) } else if (!trunc_lb && trunc_ub) { str_add(out) <- glue( "\n{wsp}- {ncoef} * {prior_name}_lcdf({par_bound$ub}{prior_args})" ) } else if (trunc_lb && trunc_ub) { str_add(out) <- glue( "\n{wsp}- {ncoef} * log_diff_exp(", "{prior_name}_lcdf({par_bound$ub}{prior_args}), ", "{prior_name}_lcdf({par_bound$lb}{prior_args}))" ) } } } out } # fix parameters to constants in Stan language # @param prior character string defining the prior # @param par name of the parameter on which to set the prior # @param ncoef number of coefficients in the parameter # @param broadcast Stan type to which the prior should be broadcasted # @return a character string defining the prior in Stan language stan_constant_prior <- function(prior, par, ncoef = 0, broadcast = "vector") { stopifnot(grepl("^constant\\(", prior)) prior_args <- gsub("(^constant\\()|(\\)$)", "", prior) if (broadcast == "vector") { if (ncoef > 0) { # broadcast the scalar prior on the whole parameter vector prior_args <- glue("rep_vector({prior_args}, rows({par}))") } # no action required for individual coefficients of vectors } else if (broadcast == "matrix") { if (ncoef > 0) { # broadcast the scalar prior on the whole parameter matrix prior_args <- glue("rep_matrix({prior_args}, rows({par}), cols({par}))") } else { # single coefficient is a row in the parameter matrix prior_args <- glue("rep_row_vector({prior_args}, cols({par}))") } } glue(" {par} = {prior_args}") } # Stan code for global parameters of special shrinkage priors stan_special_prior <- function(bterms, out, data, prior, ranef, normalize, ...) { stopifnot(is.list(out)) tp <- tp() lpp <- lpp() lpdf <- stan_lpdf_name(normalize) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) if (!has_special_prior(prior, px)) { return(out) } special <- get_special_prior(prior, px, main = TRUE) str_add(out$data) <- glue( " int Kscales{p}; // number of local scale parameters\n" ) if (special$name == "horseshoe") { str_add(out$data) <- glue( " // data for the horseshoe prior\n", " real hs_df{p}; // local degrees of freedom\n", " real hs_df_global{p}; // global degrees of freedom\n", " real hs_df_slab{p}; // slab degrees of freedom\n", " real hs_scale_global{p}; // global prior scale\n", " real hs_scale_slab{p}; // slab prior scale\n" ) str_add(out$par) <- glue( " // horseshoe shrinkage parameters\n", " real hs_global{p}; // global shrinkage parameter\n", " real hs_slab{p}; // slab regularization parameter\n", " vector[Kscales{p}] hs_local{p}; // local parameters for the horseshoe prior\n" ) hs_scale_global <- glue("hs_scale_global{p}") if (isTRUE(special$autoscale)) { str_add(hs_scale_global) <- glue(" * sigma{usc(px$resp)}") } str_add(out$tpar_prior) <- glue( "{lpp}student_t_{lpdf}(hs_global{p} | hs_df_global{p}, 0, {hs_scale_global})", str_if(normalize, "\n - 1 * log(0.5)"), ";\n", "{lpp}inv_gamma_{lpdf}(hs_slab{p} | 0.5 * hs_df_slab{p}, 0.5 * hs_df_slab{p});\n" ) str_add(out$tpar_def) <- glue( " vector[Kscales{p}] scales{p}; // local horseshoe scale parameters\n" ) str_add(out$tpar_comp) <- glue( " // compute horseshoe scale parameters\n", " scales{p} = scales_horseshoe(hs_local{p}, hs_global{p}, hs_scale_slab{p}^2 * hs_slab{p});\n" ) str_add(out$model_prior) <- glue( "{tp}student_t_{lpdf}(hs_local{p} | hs_df{p}, 0, 1)", str_if(normalize, "\n - rows(hs_local{p}) * log(0.5)"), ";\n" ) } else if (special$name == "R2D2") { str_add(out$data) <- glue( " // data for the R2D2 prior\n", " real R2D2_mean_R2{p}; // mean of the R2 prior\n", " real R2D2_prec_R2{p}; // precision of the R2 prior\n", " // concentration vector of the D2 prior\n", " vector[Kscales{p}] R2D2_cons_D2{p};\n" ) str_add(out$par) <- glue( " // parameters of the R2D2 prior\n", " real R2D2_R2{p};\n", " simplex[Kscales{p}] R2D2_phi{p};\n" ) var_mult <- "" if (isTRUE(special$autoscale)) { var_mult <- glue("sigma{usc(px$resp)}^2 * ") } str_add(out$tpar_def) <- glue( " real R2D2_tau2{p}; // global R2D2 scale parameter\n", " vector[Kscales{p}] scales{p}; // local R2D2 scale parameters\n" ) str_add(out$tpar_comp) <- glue( " // compute R2D2 scale parameters\n", " R2D2_tau2{p} = {var_mult}R2D2_R2{p} / (1 - R2D2_R2{p});\n", " scales{p} = scales_R2D2(R2D2_phi{p}, R2D2_tau2{p});\n" ) str_add(out$tpar_prior) <- glue( "{lpp}beta_{lpdf}(R2D2_R2{p} | R2D2_mean_R2{p} * R2D2_prec_R2{p}, ", "(1 - R2D2_mean_R2{p}) * R2D2_prec_R2{p});\n" ) str_add(out$model_prior) <- glue( "{tp}dirichlet_{lpdf}(R2D2_phi{p} | R2D2_cons_D2{p});\n" ) } if (has_special_prior(prior, px, class = "sd")) { # this has to be done here rather than in stan_re() # because the latter is not local to a linear predictor ids <- unique(subset2(ranef, ls = px)$id) str_add(out$prior_global_scales) <- cglue(" sd_{ids}") str_add(out$prior_global_lengths) <- cglue(" M_{ids}") } # split up scales into subsets belonging to different parameter classes # this connects the global to the local priors scales <- strsplit(trimws(out$prior_global_scales), " ")[[1]] lengths <- strsplit(trimws(out$prior_global_lengths), " ")[[1]] out$prior_global_scales <- out$prior_global_lengths <- NULL lengths <- c("1", lengths) for (i in seq_along(scales)) { lower <- paste0(lengths[1:i], collapse = "+") upper <- paste0(lengths[2:(i+1)], collapse = "+") # some scale parameters are a scalar not a vector bracket1 <- str_if(lengths[i+1] == "1", "[1]") str_add(out$tpar_comp) <- glue( " {scales[i]} = scales{p}[({lower}):({upper})]{bracket1};\n" ) } out } # Stan code of normal priors on regression coefficients # in non-centered parameterization # @param class name of the coefficient class # @param suffix shared suffix of the involved variables # @param suffix_class extra suffix of the class # @param suffix_K extra suffix of K (number of coefficients) stan_prior_non_centered <- function(class = "b", suffix = "", suffix_class = "", suffix_K = "", normalize = TRUE) { out <- list() csfx <- glue("{class}{suffix}") csfx2 <- glue("{class}{suffix_class}{suffix}") Ksfx <- glue("K{suffix_K}{suffix}") lpdf <- stan_lpdf_name(normalize) str_add(out$tpar_def) <- glue( " vector[{Ksfx}] {csfx2}; // scaled coefficients\n" ) str_add(out$par) <- glue( " vector[{Ksfx}] z{csfx}; // unscaled coefficients\n" ) str_add(out$tpar_def) <- glue( " vector[{Ksfx}] sd{csfx}; // SDs of the coefficients\n" ) str_add(out$tpar_special_prior) <- glue( " {csfx2} = z{csfx} .* sd{csfx}; // scale coefficients\n" ) str_add(out$model_prior) <- glue( "{tp()}std_normal_{lpdf}(z{csfx});\n" ) str_add(out$prior_global_scales) <- glue(" sd{csfx}") str_add(out$prior_global_lengths) <- glue(" {Ksfx}") str_add(out$pll_args) <- glue(", vector {csfx2}") out } # combine unchecked priors for use in Stan # @param prior a brmsprior object # @return a single character string in Stan language stan_unchecked_prior <- function(prior) { stopifnot(is.brmsprior(prior)) if (all(nzchar(prior$class))) { return("") } prior <- subset2(prior, class = "") collapse(" ", prior$prior, ";\n") } # Stan code to sample separately from priors # @param tpar_prior character string taken from stan_prior that contains # all priors that can potentially be sampled from separately # @param par_declars the parameters block of the Stan code # required to extract boundaries # @param gen_quantities Stan code from the generated quantities block # @param special_prior a list of values pertaining to special priors # such as horseshoe or lasso # @param sample_prior take draws from priors? stan_rngprior <- function(tpar_prior, par_declars, gen_quantities, special_prior, sample_prior = "yes") { if (!is_equal(sample_prior, "yes")) { return(list()) } tpar_prior <- strsplit(gsub(" |\\n", "", tpar_prior), ";")[[1]] # D will contain all relevant information about the priors D <- data.frame(prior = tpar_prior[nzchar(tpar_prior)]) pars_regex <- "(?<=(_lpdf\\())[^|]+" D$par <- get_matches(pars_regex, D$prior, perl = TRUE, first = TRUE) # 'std_normal' has no '|' and thus the above regex matches too much np <- !grepl("\\|", D$prior) np_regex <- ".+(?=\\)$)" D$par[np] <- get_matches(np_regex, D$par[np], perl = TRUE, first = TRUE) # 'to_vector' should be removed from the parameter names has_tv <- grepl("^to_vector\\(", D$par) tv_regex <- "(^to_vector\\()|(\\)(?=((\\[[[:digit:]]+\\])?)$))" D$par[has_tv] <- gsub(tv_regex, "", D$par[has_tv], perl = TRUE) # do not sample from some auxiliary parameters excl_regex <- c("tmp") excl_regex <- paste0("(", excl_regex, ")", collapse = "|") excl_regex <- paste0("^(", excl_regex, ")(_|$)") D <- D[!grepl(excl_regex, D$par), ] if (!NROW(D)) return(list()) # rename parameters containing indices has_ind <- grepl("\\[[[:digit:]]+\\]", D$par) D$par[has_ind] <- ulapply(D$par[has_ind], function(par) { ind_regex <- "(?<=\\[)[[:digit:]]+(?=\\])" ind <- get_matches(ind_regex, par, perl = TRUE) gsub("\\[[[:digit:]]+\\]", paste0("__", ind), par) }) # cannot handle priors on variable transformations D <- D[D$par %in% stan_all_vars(D$par), ] if (!NROW(D)) return(list()) class_old <- c("^L_", "^Lrescor") class_new <- c("cor_", "rescor") D$par <- rename(D$par, class_old, class_new, fixed = FALSE) dis_regex <- "(?<=lprior\\+=)[^\\(]+(?=_lpdf\\()" D$dist <- get_matches(dis_regex, D$prior, perl = TRUE, first = TRUE) D$dist <- sub("corr_cholesky$", "corr", D$dist) args_regex <- "(?<=\\|)[^$\\|]+(?=\\)($|-))" D$args <- get_matches(args_regex, D$prior, perl = TRUE, first = TRUE) # 'std_normal_rng' does not exist in Stan has_std_normal <- D$dist == "std_normal" D$dist[has_std_normal] <- "normal" D$args[has_std_normal] <- "0,1" # extract information from the initial parameter definition par_declars <- unlist(strsplit(par_declars, "\n", fixed = TRUE)) par_declars <- gsub("^[[:blank:]]*", "", par_declars) par_declars <- par_declars[!grepl("^//", par_declars)] all_pars_regex <- "(?<= )[^[:blank:]]+(?=;)" all_pars <- get_matches(all_pars_regex, par_declars, perl = TRUE) all_pars <- rename(all_pars, class_old, class_new, fixed = FALSE) all_bounds <- get_matches("<.+>", par_declars, first = TRUE) all_types <- get_matches("^[^[:blank:]]+", par_declars) all_dims <- get_matches( "(?<=\\[)[^\\]]*", par_declars, first = TRUE, perl = TRUE ) # define parameter types and boundaries D$dim <- D$bounds <- "" D$type <- "real" for (i in seq_along(all_pars)) { k <- which(grepl(paste0("^", all_pars[i]), D$par)) D$dim[k] <- all_dims[i] D$bounds[k] <- all_bounds[i] if (grepl("^((simo_)|(theta)|(R2D2_phi))", all_pars[i])) { D$type[k] <- all_types[i] } } # exclude priors which depend on other priors # TODO: enable sampling from these priors as well found_vars <- lapply(D$args, find_vars, dot = FALSE, brackets = FALSE) contains_other_pars <- ulapply(found_vars, function(x) any(x %in% all_pars)) D <- D[!contains_other_pars, ] if (!NROW(D)) return(list()) out <- list() # sample priors in the generated quantities block D$lkj <- grepl("^lkj_corr$", D$dist) D$args <- paste0(ifelse(D$lkj, paste0(D$dim, ","), ""), D$args) D$lkj_index <- ifelse(D$lkj, "[1, 2]", "") D$prior_par <- glue("prior_{D$par}") str_add(out$gen_def) <- " // additionally sample draws from priors\n" str_add(out$gen_def) <- cglue( " {D$type} {D$prior_par} = {D$dist}_rng({D$args}){D$lkj_index};\n" ) # sample from truncated priors using rejection sampling D$lb <- stan_extract_bounds(D$bounds, bound = "lower") D$ub <- stan_extract_bounds(D$bounds, bound = "upper") Ibounds <- which(nzchar(D$bounds)) if (length(Ibounds)) { str_add(out$gen_comp) <- " // use rejection sampling for truncated priors\n" for (i in Ibounds) { wl <- if (nzchar(D$lb[i])) glue("{D$prior_par[i]} < {D$lb[i]}") wu <- if (nzchar(D$ub[i])) glue("{D$prior_par[i]} > {D$ub[i]}") prior_while <- paste0(c(wl, wu), collapse = " || ") str_add(out$gen_comp) <- glue( " while ({prior_while}) {{\n", " {D$prior_par[i]} = {D$dist[i]}_rng({D$args[i]}){D$lkj_index[i]};\n", " }}\n" ) } } out } # are multiple base priors supplied? # px list of class, dpar, etc. elements used to infer parameter suffixes stan_has_multiple_base_priors <- function(px) { px <- as.data.frame(px, stringsAsFactors = FALSE) nrow(unique(px)) > 1L } # check if any constant priors are present # @param prior a vector of character strings stan_is_constant_prior <- function(prior) { grepl("^constant\\(", prior) } # extract Stan boundaries expression from a string stan_extract_bounds <- function(x, bound = c("lower", "upper")) { bound <- match.arg(bound) x <- rm_wsp(x) regex <- glue("(?<={bound}=)[^,>]*") get_matches(regex, x, perl = TRUE, first = TRUE) } # choose the right suffix for Stan probability densities stan_lpdf_name <- function(normalize, int = FALSE) { if (normalize) { out <- ifelse(int, "lpmf", "lpdf") } else { out <- ifelse(int, "lupmf", "lupdf") } out } # add bounds to a Stan type specification which may include dimensions stan_type_add_bounds <- function(type, bound) { regex_dim <- "\\[.*$" type_type <- sub(regex_dim, "", type) type_dim <- get_matches(regex_dim, type, first = TRUE) glue("{type_type}{bound}{type_dim}") } # adjust the type of a parameter based on the assigned prior stan_adjust_par_type <- function(type, prior) { # TODO: add support for more type-prior combinations? combs <- data.frame( type = "vector", prior = "dirichlet", new_type = "simplex" ) for (i in seq_rows(combs)) { regex_type <- paste0("^", combs$type[i], "\\[?") regex_prior <- paste0("^", combs$prior[i], "\\(") if (grepl(regex_type, type) && grepl(regex_prior, prior)) { brackets <- get_matches("\\[.*\\]$", type, first = TRUE) type <- paste0(combs$new_type[i], brackets) break } } type } # stops if a prior bound is given stopif_prior_bound <- function(prior, class, ...) { lb <- stan_base_prior(prior, "lb", class = class, ...) ub <- stan_base_prior(prior, "ub", class = class, ...) if (nzchar(lb) || nzchar(ub)) { stop2("Cannot add bounds to class '", class, "' for this prior.") } return(invisible(NULL)) } # lprior plus equal lpp <- function(wsp = 2) { wsp <- collapse(rep(" ", wsp)) paste0(wsp, "lprior += ") } brms/R/brm_multiple.R0000644000176200001440000002270014436303654014245 0ustar liggesusers#' Run the same \pkg{brms} model on multiple datasets #' #' Run the same \pkg{brms} model on multiple datasets and then combine the #' results into one fitted model object. This is useful in particular for #' multiple missing value imputation, where the same model is fitted on multiple #' imputed data sets. Models can be run in parallel using the \pkg{future} #' package. #' #' @inheritParams brm #' @param data A \emph{list} of data.frames each of which will be used to fit a #' separate model. Alternatively, a \code{mids} object from the \pkg{mice} #' package. #' @param data2 A \emph{list} of named lists each of which will be used to fit a #' separate model. Each of the named lists contains objects representing data #' which cannot be passed via argument \code{data} (see \code{\link{brm}} for #' examples). The length of the outer list should match the length of the list #' passed to the \code{data} argument. #' @param recompile Logical, indicating whether the Stan model should be #' recompiled for every imputed data set. Defaults to \code{FALSE}. If #' \code{NULL}, \code{brm_multiple} tries to figure out internally, if recompilation #' is necessary, for example because data-dependent priors have changed. #' Using the default of no recompilation should be fine in most cases. #' @param combine Logical; Indicates if the fitted models should be combined #' into a single fitted model object via \code{\link{combine_models}}. #' Defaults to \code{TRUE}. #' @param fit An instance of S3 class \code{brmsfit_multiple} derived from a #' previous fit; defaults to \code{NA}. If \code{fit} is of class #' \code{brmsfit_multiple}, the compiled model associated with the fitted #' result is re-used and all arguments modifying the model code or data are #' ignored. It is not recommended to use this argument directly, but to call #' the \code{\link[brms:update.brmsfit_multiple]{update}} method, instead. #' @param ... Further arguments passed to \code{\link{brm}}. #' #' @details The combined model may issue false positive convergence warnings, as #' the MCMC chains corresponding to different datasets may not necessarily #' overlap, even if each of the original models did converge. To find out #' whether each of the original models converged, investigate #' \code{fit$rhats}, where \code{fit} denotes the output of #' \code{brm_multiple}. #' #' @return If \code{combine = TRUE} a \code{brmsfit_multiple} object, which #' inherits from class \code{brmsfit} and behaves essentially the same. If #' \code{combine = FALSE} a list of \code{brmsfit} objects. #' #' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com} #' #' @examples #' \dontrun{ #' library(mice) #' imp <- mice(nhanes2) #' #' # fit the model using mice and lm #' fit_imp1 <- with(lm(bmi ~ age + hyp + chl), data = imp) #' summary(pool(fit_imp1)) #' #' # fit the model using brms #' fit_imp2 <- brm_multiple(bmi ~ age + hyp + chl, data = imp, chains = 1) #' summary(fit_imp2) #' plot(fit_imp2, pars = "^b_") #' # investigate convergence of the original models #' fit_imp2$rhats #' #' # use the future package for parallelization #' library(future) #' plan(multiprocess) #' fit_imp3 <- brm_multiple(bmi~age+hyp+chl, data = imp, chains = 1) #' summary(fit_imp3) #' } #' #' @export brm_multiple <- function(formula, data, family = gaussian(), prior = NULL, data2 = NULL, autocor = NULL, cov_ranef = NULL, sample_prior = c("no", "yes", "only"), sparse = NULL, knots = NULL, stanvars = NULL, stan_funs = NULL, silent = 1, recompile = FALSE, combine = TRUE, fit = NA, algorithm = getOption("brms.algorithm", "sampling"), seed = NA, file = NULL, file_compress = TRUE, file_refit = getOption("brms.file_refit", "never"), ...) { combine <- as_one_logical(combine) file_refit <- match.arg(file_refit, file_refit_options()) if (!is.null(file)) { if (file_refit == "on_change") { stop2("file_refit = 'on_change' is not supported for brm_multiple yet.") } # optionally load saved model object if (!combine) { stop2("Cannot use 'file' if 'combine' is FALSE.") } if (file_refit != "always") { fits <- read_brmsfit(file) if (!is.null(fits)) { return(fits) } } } algorithm <- match.arg(algorithm, algorithm_choices()) silent <- validate_silent(silent) recompile <- as_one_logical(recompile) data_name <- substitute_name(data) if (inherits(data, "mids")) { require_package("mice", version = "3.0.0") data <- lapply(seq_len(data$m), mice::complete, data = data) } else if (!is_data_list(data)) { stop2("'data' must be a list of data.frames.") } if (!is.null(data2)) { if (!is_data2_list(data2)) { stop2("'data2' must be a list of named lists.") } if (length(data2) != length(data)) { stop2("'data2' must have the same length as 'data'.") } } if (is.brmsfit(fit)) { # avoid complications when updating the model class(fit) <- setdiff(class(fit), "brmsfit_multiple") } else { args <- nlist( formula, data = data[[1]], family, prior, data2 = data2[[1]], autocor, cov_ranef, sample_prior, sparse, knots, stanvars, stan_funs, algorithm, silent, seed, ... ) args$chains <- 0 if (silent < 2) { message("Compiling the C++ model") } fit <- suppressMessages(do_call(brm, args)) } dots <- list(...) # allow compiling the model without sampling (#671) if (isTRUE(dots$chains == 0) || isTRUE(dots$iter == 0)) { class(fit) <- c("brmsfit_multiple", class(fit)) return(fit) } fits <- futures <- rhats <- vector("list", length(data)) for (i in seq_along(data)) { futures[[i]] <- future::future( update(fit, newdata = data[[i]], data2 = data2[[i]], recompile = recompile, silent = silent, ...), packages = "brms", seed = TRUE ) } for (i in seq_along(data)) { if (silent < 2) { message("Fitting imputed model ", i) } fits[[i]] <- future::value(futures[[i]]) if (algorithm == "sampling") { # TODO: replace by rhat of the posterior package rhats[[i]] <- data.frame(as.list(rhat(fits[[i]]))) if (any(rhats[[i]] > 1.1, na.rm = TRUE)) { warning2("Imputed model ", i, " did not converge.") } } } if (combine) { fits <- combine_models(mlist = fits, check_data = FALSE) attr(fits$data, "data_name") <- data_name if (algorithm == "sampling") { fits$rhats <- do_call(rbind, rhats) } class(fits) <- c("brmsfit_multiple", class(fits)) } if (!is.null(file)) { fits <- write_brmsfit(fits, file, compress = file_compress) } fits } #' Combine Models fitted with \pkg{brms} #' #' Combine multiple \code{brmsfit} objects, which fitted the same model. #' This is usefully for instance when having manually run models in parallel. #' #' @param ... One or more \code{brmsfit} objects. #' @param mlist Optional list of one or more \code{brmsfit} objects. #' @param check_data Logical; indicates if the data should be checked #' for being the same across models (defaults to \code{TRUE}). #' Setting it to \code{FALSE} may be useful for instance #' when combining models fitted on multiple imputed data sets. #' #' @details This function just takes the first model and replaces #' its \code{stanfit} object (slot \code{fit}) by the combined #' \code{stanfit} objects of all models. #' #' @return A \code{brmsfit} object. #' #' @export combine_models <- function(..., mlist = NULL, check_data = TRUE) { models <- c(list(...), mlist) check_data <- as_one_logical(check_data) if (!length(models)) { stop2("No models supplied to 'combine_models'.") } for (i in seq_along(models)) { if (!is.brmsfit(models[[i]])) { stop2("Model ", i, " is no 'brmsfit' object.") } models[[i]] <- restructure(models[[i]]) } ref_formula <- formula(models[[1]]) ref_pars <- variables(models[[1]]) ref_mf <- model.frame(models[[1]]) for (i in seq_along(models)[-1]) { if (!is_equal(formula(models[[i]]), ref_formula)) { stop2("Models 1 and ", i, " have different formulas.") } if (!is_equal(variables(models[[i]]), ref_pars)) { stop2("Models 1 and ", i, " have different parameters.") } if (check_data && !is_equal(model.frame(models[[i]]), ref_mf)) { stop2( "Models 1 and ", i, " have different data. ", "Set 'check_data' to FALSE to turn off checking of the data." ) } } sflist <- from_list(models, "fit") out <- models[[1]] out$fit <- rstan::sflist2stanfit(sflist) if (out$backend == "cmdstanr") { att <- attributes(models[[1]]$fit) attributes(out$fit)$CmdStanModel <- att$CmdStanModel attributes(out$fit)$metadata <- att$metadata } out } # validity check for 'data' input of 'brm_multiple' is_data_list <- function(x) { # see also issue #1383 is.list(x) && (is.vector(x) || all(vapply(x, is.data.frame, logical(1L)))) } # validity check for 'data2' input of 'brm_multiple' is_data2_list <- function(x) { is.list(x) && all(ulapply(x, function(y) is.list(y) && is_named(y))) } warn_brmsfit_multiple <- function(x, newdata = NULL) { if (is.brmsfit_multiple(x) && is.null(newdata)) { warning2( "Using only the first imputed data set. Please interpret the results ", "with caution until a more principled approach has been implemented." ) } invisible(x) } brms/R/sysdata.rda0000644000176200001440000214163614424715563013606 0ustar liggesusersBZh91AY&SY'`w_rYۡIYM})>.VMޟ_>]D:Ww;}ý@}WX*aE=k Qq/{N{mϊgPZF}˩ݦ@}> $59@Ɛj3mۀme%$C{<wZTW@ RvćAmaeMAd$G Y0H{uӬ%DR%hhQJHQEA@+AOk @PP(;(4(@H  "Cタ(:>v` U{e@^ J* 9 MUͨVR%)JPB P @@AR%(S@2)cFh`^h 6im4,tPvu3 {9ݡ[!N#֞JԩMu-T Smt7sƶM;Qۮ2֛vwIm=Xm=Gzqkr{pzg\ЖodgZ77aKUu÷ZkmjFڲєR=U R UTIP`QnH!*s[緾yg}@‹#;qN٠P,D>v0hC `#A 40LM@010 F" $@@ahibiЙLje2` 0dA!)M2Cjzm)h&dњh'2zID=Pba00OSM6L4CSɆ MOSMG$JH@U?S꟒p (c&p:yjH=}'" jW &>yO]d*o1@ ^~zy_HA4A/}^6TtڪlmF&XF9Ħf0>yqt#c˻F }LmBNVޚ }]ܑ#}y:I3'X&3Jsv}],}2x!2[{|saEN 1.9dScQW㘞oq g^Q"gA)"]qȿEf}v̫l FlÕ#O!<({ وq>gr8r)'V =q2N i JxWB[6]U61i6X艋GuH dgMm<]BaҾ!?G$} JaCH5J,e,>D ĺz I(h}UNTbnQR%Js!5 U]ж/O=ϕEq8pf 92 (G#l?˿E< I+L@I޾?) <)5ȫ*~^7oKHwh=d`Z %"SIo\7“<12H|w%[\7#^eFih|ܓi-2Dt,yԞK\,}JoˬǿnJ7ƓF<7ήhfQ\1IFk _+]{/UyH`E}[A`:N+XxiʪS}-7[\b6,I`r#}Y$$׉Niv7 x?╥hIQi-(bJ:j4Z)tv%ť1p)W[ܒ Flj)1z P]qlaTUPI n TO{VLvɋ cEŲ";բi3J]kFMf- /b M*d^# )bdE!hE~Յf÷i4pӯw q""u 7P͖,RӮK1F&ͱ $Z9&ތ 3"E9̳i3AA)l@A@vvE5ʝe(BemwzKws{+"'P<9$H,Enj57]A]D9rIa `_\Þ@ "V"aO`>a?-χ焼wI?ӝ2O7E+~E} a9k;D3jH@:Ұ &_ a;A/7%ʪ0W><ޫ1c ;wҶ҂ )"@1ݪl}m B :[~ 2)$6] Ad%e 1αH"6`[/mӼQxan|,׮g,kFJ4Zxݺ^X<ꨛT,ϛ{W_8}vee)\޺mcc#NӺr>yI!!K7038،21LڄN_^1O^X0_O.~iRa2ok;uI#CvAfyͻ+><|dxV;Pqys3k=5O"AE d![5MK#BAegv[Eu+dMQK'siwڛU@>v7ps++ \5ﴩtya ǿj:+ql`bnbȶ/X #<bK|UFcM!6x{/3B \ "=\63s,]֦C}0lW?-Slw+OX43g`)25M@m_~TVNL@iTo!PڷEO <X P AHiB5ѿPST6/Zh8xCJcSk~mU0ڮxfrL2*$i$ j F3<0L C.YACUIpPL4d dR$bc'Ƙgmҍ>^tGwfv{:4HXgmW$ ,lw':s=6 wM{͹E'2y!I$[n)Z"zyBe#R3Z׶0ca5m/+{la국)LhSjK >}]Y$` S P3 G!z++W^3!$7Q_2b]rߠbAC7YO*\^{b~vt<1-=VG(gſH+od]s|0!i 6Y]r|T_`/) 3KcdʥF>hkkl\[qsv>l|LWlJ<3 b5dC /unXW6Mj#l&#2`XdDxQZUEg[%xE+:)M7tm[v6՚iԪYJUJVjYJ *R"h *U 9\mPQ,ǛOoX@騜zZ E@ST * usQ~h@7mt JZj1Hho1mqe.OFr(jwZqWMFւ6ϱd6m\ҹY-~R=z%">fg `*QK  (J%D>?;O8$&J~QM>Pýu/lAC^`?Gm-+&}k1CvbGc.}?~ʻ )P}: Z.6hhb_KR9&⃧>& 0ȇw}KalM)ʙwE\foib07k,_`mTh\ #r!v&u?ӞY=|QSoBr*nY71CP._#a;WLBmy_7m+F%~1nכ&z[C 0f$>1?{6Qa/Em[EVT}t4<@$(/Fs׷)g|[0٫\=F{<f.zfkYVfnDZUZرUcbch#j>ds]K\ֹޮ6$1޺iarxF)^_a/exC}œζ[& \#(Ӵl[{=7Q%>_IWKlmaƨWۮ]"5c{E5s7TSir<0xhїEvkr(l']"H-eYgQ|?6vʡ$eqZ~{V]Pgi81Ihk |rI&F0g|xJj1pCOùK4_CyỈO3Lj)Xgx8ښ\5dPV'v[j>Õu ˺$ztŮ ݱ3hO,>\ֈq6Oa$GXDZd|򼉥!|s^ф‘ܓx<{E|λaR }霮h`Q>FWk;EDVى^._?u/]ʙtn{:$_9 tbr#_s0Q""(q;H.H7.>C׮2dF34cSDh+u{wb`}7Ar1C+QD=wJe}]~X fFOv5K:̓b_C0f 2Z-U]j5m(7yN&sb̨3M#o낯fhb7RHQ#JЀȡM_q?ͥ4l}4&8sqE((3_ FuMU`dž9cBe|ʢvE"Q(G  FnuM-mY7Dݻ\{M?rLйtGb>g1) ky?Tj&/qݝTF͡r1[3>:ϗ>VJh@dIL =*h(0PU@FApHTeF!bQD !De\|FW@PtT!R!R!R!R QR0H$ *B @%R +JR*ABRR:S\>AN?I{'P!䓷Q)e' o`'R|q绩ì}@sl`1PEi@?[dn!> o[A N$ SCҫ9dS pHTX!}?:h8\+D{pNZ6XOt!)Wl 4<{,P-<ҿxC wLk>XyT`IJ"zl| lj2ADr/!@GC=9IJ'J(^ wF˾+~_ Hzu``=/Y!އ.c8܌O$d7axS_"uXx2h{ƐjtUis7=q?8\:b;:`s!XC1.l8/x>_y1"!"_xߍ1 |J% c7)pXoOb5[K[CM,/Ǭ%8HbctUw vEX***%!oUTgSR>!L=d?GD(P L"\`%}?qJ: W0. \q=:P&Ll8 VXH*ncJ)j߱0D5@_(xЛT?P > r&S`dJL0%Хr Hy$Q݌YK`~6Y [a116YXBp¿jNvwo>UDWPC$YP66*Y?DEA?~.~ײY?ƛ7zPD'ۃ Z?ų=߾1)sQYڈA+Tu_We'IJQi>4͋mw}"+kLAP>-;]a7֭~WGO˧W tc3>'_8A3lA !yósfT5j@P+"&bT&m!nJ^m̡v ߁*-A4m0z֢HIƪ-jǁLdaO+lUm6V!-ȴ,$HԟQq Pܼ.f @8'=[I $ۼowj!}_ѻi_e.ӳkͲ<7L&x- Иn]sng(4p.<􌑎ߧD4_qڜIwy8T^gs`Ds=.v|tIƗw1ݫ+RPM[=4y7)QMHHH #"Hɯ]sE'la^ɶF DsA i1I<7ڏSGng83<ջTT "UBUT*T%U T¤JU%RMERU%*EAT *  * A\@s)PE@NTNT✨JNi*p:J*@b|AUAW.( P;?D*)=PyH;AヸzECCr'|C.JslNl&+@0\O-.#I8Jz#{ /-L)h;ϖh!O/2‡$4]g_z z1DA" ~?_?O~8ck鞼= {5Di:"mKI/e/̿2@$V8c2^ [!ZA8!/`ʗv<0Z o]E ̌ h^0^ Eb!hKb'FDdȒ,X®,A, _x.XꏨJT|oK"b  Z]V ZKJ,k|Sui~P?9-\ztc{0?(fjj5U؉F%znZ5+S~nM4Os2S{YYklL>A2 GYԙ'K./?1=^L^/Ž ܹR6#MvjкJ'1x[ퟎ(J~5Ӛ]a@]nw&H,0l+C^/^:Gx]-,e)Qy- x iϟmr\">ù@37#X(0J$&Ek!/oX@t55j_ FLQVl-> V63?`-x.]Fba"Y+AFPaEf Pq:&2e''RWX۠y| u³M L!y%EIYv-pvR EF0e}Ճ;ljp7Cz-[G=P}6~O;|{ .MowW= ߠ6HQ#U:raO^]M欂˖]<129 v; 1 ) cx@yn2ZCXĥ7X+ϾxZs>St!~?aaD^&o EDSzhzF2!2~oVo{aUty_\ $Z_*qo,'da.vP+b\ v-I/ ;7-3aq/^kY}I>f S\bbe[R3l5%|XG ƷQl{jTE_Qͯ(ي?G2u~2θe ]ŅW;!Hy7;7*UGNZLS4;cwv]~q,k)nxFDhgLGqrm}ٍ砽q^ہi 6q LuYJ棙ְ7*}`E4cgHonVv 3\n9L%wJ&iLlˬ<Ŷg7]m7X˕L(+XYݳ0jQrҲܽLUz,Hw3T.eU5 &ЄlSF4/~j _ذrZEkckE#ҝ 7?H5Z٠(Fs70xs>_f>>oo٬XG@]ӠYv~fan $J|-2BZ<7̥y222 iM/KЗzkv23!a1+ !')fN3x l^nki"G 14st)d~jd\})w*GLEB< =cWyR\I6b6 i7"怒q\bt=i:Ux*&w_j1WTb*8Bˆa &cD=|ȉ# olGaGj5s|u }?mPSݿʌ}/K'EwTqqn$äIJ ~j]cc|D/5ӜR6|e hlVS AЁwSk-Wk VȲgcws H=k들&,obֶi[fղVb;0_*7ײX#˥$lKqp'4 pl,<a=={)d3MU{]=:2srBM^tB0[}XU TG7}f׳IM_-%SR4T%3Dez~%,1b=vnK=P޿:&oSI-Mmk^b#U<ۥgᓌ6n"u`;sCք{4kYCZmN,[ z גU_Kj7ӆW%KFx]0x3RTn.14ЫS{ao-\,$df {&X<#K&?kjlkuMSsS@RxrXn+\ ECzib;ti\h/m 8fKVb6UNXiLs,{0asl&ZpZ*e(fA[ Eѻ4! PMK IAMSS#0CO}?LO-wa=__zrÿf2ۭ^jqJ;R>~y`~2%=ZYwI\P!p^Ѩg9<Ν![e ޭFtFܭɹ7&.ӛtitn`-Ѻ[tnjjg=Y2a.OnI׶wCo:H F! WA4NN_C.$јH Fx4q5%`g0#@6y?$Ҙ |x |`zjC狧v\?,?~" /†NaǼO+/g7m @76ZEyl8RllccP<`{B ~c^1C!Q/!wH@Ǽ}`l0"*`!JTO!A-h2S%09B`vBdo`6Tm XKb(EdAdAdAdc\4(Fholii,qŜYhe,g3I $o  [Fdeĸdd"C"A0RwhY^ޞ[l sUjsLI h00ȐrPHCJz֣ZN!̅#91oaP"GmA,! :/%cSؔ* ^¤i& 3 a uߦ!@? zj}^_S ' ^_"&B~^HOT-;m"") , H t 5jkACo?9temdY`B;0(DR))@N'?r `#-H-x5xwG\L$ٸ qDtBkV^="P$b5,RCR5J8. 3xw\n8a :d7Na~olw`‰EO{C֔/<7[DvK$ SvzR_ ߻aLv2IJFH|ϯre4l`kw<(CRF $$7uv Y꼘aD) |/} Z;Id9bUCPE(ȾqvkDk ?q[YO[d:YZ4k L]i%udRj0?fᡭ&lm8ՔRdbK;bU]QPna})g@`0D+osHHҪXRDNz^"dB^r[}\q{~{D6 nͨ*7I%2ՊboH#9BG'@{H:YTq)ܭa{?iv'0葻ZrR Ț&!qї~kH.!Iaʽ4! \uXC$ץcڡ6[xHG@@2QZFTC;9XEtŰni*m;OhӘܔ \%"@ų̛^A$[ (Ns746{˦ajJDuAFd`z Vz&P#9Zþ54(՜κ?0%.%+9j!e59͆Qss Jh5l]n>g[QQfE I%WK??O2 "s&1G:LBU nAe+/3f>{Ȝ˙xFaÃx#0z\B-| 7U;=vc1"=^-}o]@@;!;%b VU+uФA 1]S .#:wf=.z3 c'r8dhժ+0agʝm 1[k77~ar۳A;R&z3YB$Ʉ/ t(( 2OkN>I4 K,."<@ka,JP]8aJs,[3ĞQSӠU7arX}-#jNc[oKunUGjmV v7k] ǣ׻oyeo1ҩqGx2K5~~ywVFl-3M}Suyx&_C,0Pfyw]b%g}A$ɠjc %k\iQ} bѓNAΞ#5rOhm`bgu@*7tݖeGyFNI>۳φnvjr-nYvݿW~,o7|r{OW-otۀe)CPqt(gB;<QbD=AL^hcӘ/ q+72ӻ*RbIp, sW E1l7)d7;[T?";ʚ:UWQ&r#hRfqC=ǃ+kYM_u&hg:mg_G` C^߷ݡ𰰩aPguI Z" MQ [JBJw-ay@j=TXaGH兖"*3qIN&%w;Z36@ۉ5e^q^XxߙҺκۓӷbk$ `ߕ4w7S?3=(C~N[ZϏJ}jr74pBWlA\Š#K Ez;AّiIV@OaBShIk_u~_<R^!zYQKOoD'@wi@+Av}EKY2Q,r-XH"@`7̯d9ߒ3A/#sZY=d58|»Tay1\8eFѷGE)rTF"A 3VbVh8g${ IJ3Nơ*>L-KƑwVfd0`+{!`I[%HŸUU9UBo0O4+A *!%EKT/c,C2Bg-u: 3t0ULQǧq"*a~\ǑI{O5u {cC?uMxV&l~{;0L%B I>"{\Erb^뫤hS2" X"HB͛[_fww]kK˅FKbinÇG?xЧӢoHp (t7,V7N^1z GT_Y,@:^5n|ohZ!p%rwC˺a!ֆn&!^AK"JtA _ ߕ4g1k拍nhg.^{l\DPYf2>Şٜ{v̳ ;]-N15cBj~ʕxmu[xka\m? 1YCq\_sY=|1֛ &w3mbUT~fUMUG%xT D筲KA K#A?.;mE+L _Wlu)L=Mo5E'Z4z_I]ґ.?ڜI͹gߐ€3Z•`sPyُy-j .tՉ2w:6&17pY!# d}-/}^,aW5R:)dc7 kOUD`@O ')/dΔ'G<D҉y.'K+yŧd\jj?`2k(oQA% އ=4?>qח<"'|iA Ld<-vtX LU0"?( (ljE=S,@^5R?=8ĉ/ga&%YchCPm,.i?cND>i&[M9>|eȘ{&`x6b 8m9}܏9##"}@ T#<X/GqZrV$}[oN/%cdG yQR !m yͣyߡ TdmQ54n*=nRn5lW+Z jsP3ϥ =I|, NsċX&E&UAnʳCLgdN2R(DnBgGEdݧY㾝4Hګ}q4G#_OCx7e0pq;.O1V}== Q2yk@z[ܿH8MGb!26FL2 C`J*hwfVh g%ޫ6 {EV:fZ/v V/NMcG$揃KKtQ'>Iqי+0( d#?U20^('jDے}m}/%xy  _۫GՕpy׮ωng}W`]^:mog;/"D<&"aAVw2 ʕ2w:c 0CWcxFH NƤ\*tu%cF2d~:CR6ñ= 5j~buj*@a됐: ~O(*!=i2'BvSu c~7mqt-}70#X;gxcJ{8/Zs.mzPk/֖I_Z}vR鿖G<||SaGCx9ǜ^ia+ ]f Dx]e7q @D8#9Y3~oGǧ56blWG\j-tZD6ZO5ۗn¨L"B7'>_A~4KԌ wPgZ=z ¶e.čj:[$[ |vC^})T' _PxI?"x _\Lh)AtS@L",xH @uI@J|!ANd|yHul~!ra4-M7 {[{ H_[&OK/P;%£BmPf{i..I=麺عglcrm,&b$~/NT;i.|V*%Kp;ޛ9fZm9RhgX;B-fTB@RC$s[_̚ CAgPD';eJo!Q%/n yxYrkd¸2p̸TjT  AN?;P/MU1@"z7p3!=0 Rnac6?*9a C/B ЇZ8 @ٷL#@o,`xʹo=mPgߨw z"Ki@y=2=wϾ9aP6F18:9&j0Ÿ~TaKUfrs%(bQ pB<cF+fo6ڍ~WwkkS-"ɟBv;+e2OR?*/dd@wSUW|ݗWg/+'1&cr"`Yr<~K]Ƚٞ@3.ᆬ]{1bؿI D?}4i-Ӥr7mm׃vp2Y0q3%u"fG=G鿕VWHrn,q\v Ydi:NNqrK\kɳIO$^ VOMyC+Ǚ\4;&*FH  }P'0*.(Pqg%ĴvpFQe9'2?r46A3xfޡ {sOO{}g/oM.n[[oz/A'4p]̩E P BPNB')ɠͤC;K(JS& ݎU~PrNK+ʯ/|o -|UH7|5OOظTx9ď$5JGߣ-Nn(nqY8"rpg뻫Τ:;qSt]p A՟)+P<(b^rM4Č/jVAK%j4$1$jg+4A&7a`` ,-`grz˜?r!cJLɘ/&/9 wѬOF9s/1Zߜݭ/ \hdO=G{9d30s_^CirmƵuvl3P2h R =ڂ>6jT-۷_\ D3g-6 Q]F~ɽo8<-M@NӵfP!@8$B}գb/(!}\h>:ƿ?OozF t60zȪ=#u+kjGq=>iѧY,wOt:yMZu;x b5衛D/D ^/׀^-s{ Y-~"k$¡5b$xL֊W,۸`jrnh\k?x+jzaYh9|旋CA?k--?c)I9yAG=lC.CQ0ALF,oTeZBϱ(xL I՟N.1RaO~bXP?3𜯴xob զ`p\?D봺vw=9[d N8Sw^ƴ$hC!>فn4S`g9ޡW[%Obvz tEn6(Q*jnY |Uy3=XZygۃvP a?}k/эbP2hiPCvG yC~,! PjTW:sNlk;e KMݻ:<$9f{:nm)M=?Q<|PExvy`Rm0cfΒDxɘp)B(̠X85ʌs "TpHə|ktScxQM-G!967疱&au i99Le#0Aûvg-݋;Mj%Y# Ð_ As%zC܃OOQd,/$&U⑤=9:xG U$ c -cl,ȡfx12zܗLd4'ujji%8}tb"|/d< P4/}D"AAǂ,+ζMtCWp0&I<$H1<~}Nz&qSnloU8ZY(PCT@jeR~vm#!" C! ~asx̎+/Ez_~3-IEhhE4*⢵jӓ!T)Rݦ"zCu_LD>&lط&|P A7,޶Ns\sPKT&})(0@kh<:$4dh 8ah%}Kp,*ۙN2ёP?Yshf c:2>o`#Qi"H'fuٵ*0f!}q̡Uj3JEp6F%w`p! OF̆vF4A:߱!yly.atwFtʻ Y޻h6w&~!"Dt_9?&?4L|نYM@T]Zðۡg<(hi·T "TKF\N0a=L;mCeջP*©amKD8_N}o:G=GT&F0wg@}ռt/XI'>E@K+8v_6")ӱ} 5.ϣw}haD_91Z,9guy~Ϻ۽`p,S6P28A=9*9Pv81XNrA]Q~Yqo5l|?n.7&ܚ/65T7m5g}E۸ 9To4 T;wj@tbRY3̞o%β 6i #s2 $kkel0fZɏwgYL< {,rm~mhi[5rιw36 ''H;ב%u\%Gh`6Ve|6Ȭ~-~cMmw6vxHhl^F`U]CCïC` M>։ pg! Tq BB`Q9|Tq iw˨:8Cy&y_ 7G,)F`?;A ɋhVOfg+eB4Ѧ84-e>poGGp.Q˫DZHq.8[=h1ˁ)DjE쮤I}:!0q|ۦAAQs]+~~%X^lJ/\66{D!y{!cˋgck&WZh pOP)܃%>6r&_2bpHd@\z2;h6@SvD6bc ̍U8 : 8t{~*, &ܼO}f+;c֝ -#zGtXz8%:$?lT: T^wsfh6[J" 3X0sF4 kēFJ5: mr"w7t\vvo'z*t n[tBR3a=1[E6E[j`lAHҕt kz[o|fo5꯾;~?"=@r X}$ nS~I.7J+F3\: U@e#)`Em"=<=8~@@8U=Jm47?4gSDc FD67:k W{v rRNZe^t[zmmv@oHLy,2#RŴim-VTT#z5QEmI?&\r-΄]j k14U颹DOc+1=HJvI2C00&$*XEq8Ui_v9 ]Ӫ*B77yLaºCAK}c o5gvu%3%7| G=ɿuJk}_vx%fҲ/= jy!xv@[?ђ^'#whcț9訧]j sJjpd@@D 2Ǿ2{s@雳XZ:6<3-Bt>#G^u7SLQkREw5]zs[ys٪ЀZˑ[p3ު Fm?q<ɬ* Ǥ/|Tp%Uu1'|5DdE0v[Å@R۟哱7-!ThP0* %UV>Yq!% d1 UdMD<uReUj6[[h5|G0B1 }U#_E|%׫2=@d,MykVwz꛲/]M@az=oDV~ +=+?~~/ x3s^9Q7c ed:Z/垖Jdf&c2D~ɓ Hy5A. sh1=r өhKw]NBz| AԘo,q]) ,&^l_r' bVd@h߻ xq,k]]^.6CMvJ[ROX3 -|(46xO RR*li+_xwKzݦ ?khlll%a7gcaO 67;@6%BmsYph8Z(/|auR{/$p15O?Ӵ,.|x]c26eH`Wc }/#[vi7wJcM.>;C;<в1~e;qَ+g6{!e9k!tlcp|#nUC Bm6<.лiaw:v8lO ; "[>4 MIm55SvWR #)D)mhH/8&X-? &A.Zxl#d!FaȾK*0̂W枒hQwvٟܜntadLIt%f LLҙ>{it}^M8ƣƞ!X'%+z7FV״ˮ)d<;4CCaDx%7 Gv zͫuK"7LB.r \dڮI^e`[eC4NGܯLt,!b\43@ONz=JOoU4>KƯzҟy{nNysrljmpM4ڕz:=g5)R و!fp_l-:o岻~J$:x^ 9mYΧA ) !X`^ӮoR= ] 7Cպcdak\ai2 sa<"&("cذfI`f.R5ay|DpMxh n|j!<<70yv=UH9!=^/ʅ$`WTwbv$C0a`;ܰnf|FG{ MH@xP??GJrIO7)i0$ϡ7q|n/+>&J@r"ӵ =ش/SF_BN .n,pn/BQ:7|OÖBdx%j̤j[_&F &q],Z[A0&G\'n"A(c"ew_ S3 .! -hf YaP i$| ͚ۛuJ[9qVn*Pۏ͓ 0A]C:& (Cq-lSzQ ^rl4!L|qYq.(.; :S\盶hJDu|Cb gvSGH@|\,u+~9@̃_pd7m v9)tyyd&SsRR#$l/nJ& rB;B0ጴv2F&~g3 Ɉx4Ĕ⥢; =^c7;=0B e+V"M=E}|3 $ >>B]4anOwM.+Br}n , {{r #d揆XA g+&e! ᱶ\dq+iXC8+z6x6MgR]0_XHe{8N8w{(7R~'k;ͬA TFJĨܕes^p khOB<433onCl!g yT>&t7Jh'S}xe*hz<9&RYxHq&vdkF/p Wh`mN)`8`8F7-thq8117ɗ}CD/M^bô3`s3Xry?5_ ,*ꩥ#Gpߣ tn }qɳ%:7p0JPԆEs~"AȵkMC{[OCYtL9q"0\M[C؄l[A *׸ m@Fg%S7`p q5*= Dp suB- \9yWvhi|.>h0wèrs{%N+H| V^rCQrzPAUrY [Ιٽ#ᕨ -y-bs2eL}CjUP0\?}Ƚ `;3TLnM͇@3l_e[V pW !d5.xn#q ӢnB#s{.VVLEȹ6d9j3^JeU739\ ȈǂɒI+%$goffuS5e7~ 5⮜8d{:l&43AXd|ceױ=鞎4KsQu)5 \v\1n-Zxb ͖9cfʨ[ V +]O,&ySKkhZj2CVšqB4lXCS{͐8u35ñ2PmpqThv tv]mX&8և%!~3隖0fh?\aWbхq=CY֠JZ_3 J:؆W#<*d{g{xW<L[,"--xp_M8In:/@2DdvSm i6%-WwN!H8-eVE' IT57&aѱЙ7d+X,oFl;sW @˪ W * ܃ f=z趔PB,*j*,ֿL$_,E8ɏ%?9Xm ^s䡀N{yq|SI9+ ׶M汾`ޕwLOL '{E3FavP,(l-~Ű32*zws B?gx/VŋN8shcǹߋk#ar`Ӛ{Mu{'5%Gp#A}vJb鲬`NRr۩C ^C>5'f\ Z׏`c1g}jmՓ拄,ݝad#/ɲ)`ȨG,}xEF`lae5EaR}[u\.bāOnT MX7L"( a>pv&HZ%V kfK#oPLfF%1rëMnveƋ "`󼿫Ų2r!`PW+B6}*rO:i* 7iBW!fڤQqCTeY`\s"͑V fQCuaqP\g4KeR`!C;*)Uܾtp$+lU wl=pgD Fslnӂ:Ͻ5fDRS8ГAVkcASSvM𰠲<˓F-{$^ɑȱO̪9$7LYdrFGm;HrT2'y0Eg[Py&`T U2wum>V] {.UDYYp\؋LێU>zJ몯dv+2~.UY2Nvm1c~5 px-^ݐ@+ A;mF5Xj;;sfw3^? M{Õ/%4*GMVM7}MhYbK*TΘȩڜ#A۫ :&2PBu>Ix]3-hoΰ*NIW`(u%qgu*k8dW~,"EAMa01Ze@B.Kg^ҒRpoEJfTVpzWEZ1Ur`pYQטXp'7C4q.CD^+ *_-60_@0@9WOTN~UY&`FC @1x֠<3Xxv ɂV/[B3#dD&8P3a 49'9&|TrG-KB8A_<͉1N7^%[ \>K* Ud##v~ 7:8-\FkXG '*S-#xbw}vK,qp uVx N!PGoHa°6>>"yGTnfz+T,KESA7;Opr%/-T9q;]Dr[hAFHٲL*ja s۴J~k>JԴ9+ZS-\]\X[6X'0jʱ:JÊ؁lv3cCwEyNakfJղE#+BPݙ?W}pzm,yp.0Y*=cfpVAj9. ,1v5COc섯wy!ʢJ,"(? ͰdJhvESkTiST23UW 4D)y^aA()0ΞZ,d7j*M@QIdn 2F`5#)Tw31qd7F¢*AW3 Y-S/K7 \Ui {#`% BfYG㒿uV)5&Iet u/+ jmNH,5k/26?k ۝wQSIzuҟDj&F>FThu M1.W멮9Άaot} H՝4cӴ(CP> -y; Mm+)顬9ّtq@ x`F!4# tdlG"lD>_ aCi jScX=Wzg:OJd_׺cN| GlZxκm2~ie'!bçƿOwYb;%+{Wo<#3?N~iCClm=avQPNV|*?k5BSNUY'ܯqҙ*>N7VnfFAb/Q?[5-LgwsɏM |U״t[9n{ѿx|_GqfK 4fqyS4Y'8iT1#ZEafETMQFf~d?!N~eȩ%~Zԑ7ջ/xYT3# eWR2|WGoȽ?ZvF?6/Cϰ~Sӵz+r* FJ3e2܄g3W?åW9U3V-G)U\(j<4$6sM>a-޿4}޿ϋ/^2}ӿrOe\?r}!\pEKE[MA?A !'K ?k?mujUVP$FcH~~!2q;jI ߔK+$(H1?,-f)*e,:AXb!\Aaº0[_m}}ujQk?g.ۓC.r%e>uJxSJzfE{@o4}~EAV"꺉] t7j աSGs>~;a{JJ1BR#]rCѿѡsTQ~|ΠΎo>%Arb86oӡ@͊b1>t|&`cL UŽ5+eRdJg} E4)A4c!)FwN o$\󼯙z2D)B):frQW`?ۛD!8Bq!&m´{eBiPRdԦ[66@Zmcl-D֍JeEm)٤ԕSZi2QYjU*[QjSXe%3ke,ڋei5Feli*R!(P$fXdC ( H *4B DAB,! @@3B얛Ek)4IZI( J (HJ+2TeM%IY4M d$22 d3L -@(4dSSF1&e3iHّF,,̢|ADHV&&J6c1,F5XliFѳH!i3L KJJ$@YY"RX+/՚D{@ gp֒IFuJRajF $ !Xš>2 fI5cԔ3$6Dl$"&DD2fb ZfP$iEdEQh$ai62dز[n U@9 Jhh4NtМsssqۮݤsu.uۻ%sNӹλ(t.nuҎwu ]suκbu;3rqGtIt"n%wrBeϧɠE&@dMuZ1rb$:oZmg(h- 4P1<@R(a QL(w4 F-R?uH@EФ&@S(R\\O_6 &d̂@,̔ѡ2Qf3͖j+Uʈ@d&&wvFɔ [UbVGwrGڮU)%/JϰO`8&_?F NL=\Pgss1eh8-g26S'0#=lQ׆,1kHl /a^."x hFVR%[ދX̊Io]cz(#v fU7K>E9B e=5)ayqPǩ8K^+&18?Ĭ@KP`9{68؉ưU6"iG$.6 d645s{-fVL}\ծb+#K]6t\"%Y2=̶ >J# ir*R?UzjpڌNV'VB e|Ri6ް᪱ⴧ]h2vύmpeTSTlmgc9"EW_`q/:l*\,\v:/1ZW6UEbe)SY!خ@tL*-2{V%xq}Q:^7tEY ~RҴ Cm8)&SLV6OXe蜛VhD6qͱrڬګucSw$cQ>憝+99nTprI٫Er3XpL/%ʏ)LQJBXF-|:w5 8f:0}&f/2^nWaUcK63,0;n8VhU[8})3`BDy}3^mv|0>@{4‰{i˲~Pg N.bҢ"dL䝔IuVKf *]Y-ƘW~\75X߃@!,0.ٷmܰF~!S4R˺X)-p=-dq10[_:2{:aٴLlp*e^HqȰ\R^~VTͅ: XTнzF鐎 iM53ʼnta1s0[)0[E6Fz4q]?g'm(O >L]>K2u h&v[k܍?KDºr*QZI~\N.7'%%Ǣ*Ci$V ?K R;ָK!e߰S;:+HXoaT<=`]bvmg%9$b̏j뮧`ɚ@^-LpۯFYN'uR]6 ͨŭȴz cTQP:/ -w,ϓ^)Tqxޡ, SO;qw$LL# poHi{Vo\=} d4>cF,C_TV0 n@l"?;W v$d]/eWjw5k '(eU&@2-30d$levEۤouT۰J*kzytG$G[%ɲ䭔IE_8 2գw$^!(v,J8EB"*ڼXU=DRHDG9KI :$}(:pl.,mԑJE 6"p#I"[FC 2RW=C"ӅEPTDLK[Mj^EK5B[A݇$(sb^_F4ءgr*20TLfB=JC+{*kjDMr6\ `4ܑ}4,L_ dI Y#N2Sܕ*3Qurc=Oeqwcvd+{wuS+~ɸns$(`n G#$w "32y Lq^pFD<2٧U6`xLȲI*l;8' 5F9_5;  cpWD2~{/ETmYHsd8.{aJHLع',F^R]p(*}H,|~rJE/ ؜1qH;*2:J{g(i E(D48;P#ҌUQ/~Ffb"8%HNc4&t&bR^x~6XLI_i9\H`5 a$GIبh{fkfߐaQ x .7Q\1P0I΂C4M)(fx}+,Kh#bDEr."8#&04eGY˲>dGm[sw3= ;J1(-=ATT2&P6aD㬊/ìZTQCކ!v8Y:"BI~ 7yp[:#!+}+em1ux0j@Y@GKGyz5Oka] v c`#оb7eX00AoojpS)4R/>D?߹i4Ŝ. _xA/!#xG0vE^$/#+==& E3b(Gu*tQ[~>F8ҲTy,rq֤6|I0B:ƚ"E6Gtʲe@d͡K‰E.n&cǵ]I/mI^:Q?t0ayJʈcg#R$ #30GչmIrhÄX V :a_8I)$bΩR4жPy0NZڅ>V xwL$/W9Wm  oM}#p%Evx|I#"/܏Ω3K<♒EfiXjAYF?C~ˆ:Ӣ|$B_'A3Q&eD0& "!6P̟rv'~#0~vYZ)8zg*TL$ G}oکSibkZUR7dPcR(7t's;')wnbwW1E=\'(uȂuw!\'ur bwnN?yy]"'w {wyܝ<v;ݸN;v]nbuwWvθמ^ w[qqq]z^k1$nE1iSw#=;߭ǝww!sCq۵ǝ'nܝûܕP @P1$q" iW@y^ KFgے1{hc >_X _%KLU@AxwbXhwD$XgA'}_^$h|gN U-~c?=:<;6P҅VITEPaZ씟dD/kG!4)$= WA:akDCpqסz.s(`/ʑ'Kx.WnC>S'?>*)[B(ְtHXYv_QY_2Ö`d?GaV/+u?<~ﶖdQw! 0SzY./jI ")y,[A# I32_cଂE0sMZq"K"QH>#<逓LL$2B^aEr. .]s&Ǽ_yf9f,#GxA/:vgsH3UI1iPQ=?j;3~43|j@%=C`I޾-/hWvՅ߷}CNv=2;^uG3~ jAُ#wCQŇݧg󗾇)I?kK~NO@"&2nAS,PG=DN| ?ǨV'oѴ=!]H* Р$U@PP*s "o&`2"x4`ӎ#k-#S} 5p,2??S+~wk_r)T JTZi(ZPZJEdFQ`g?koy]JUӽOۉ,9Fx2 7ܦG>y [VZw+yjիQ ,}\hdscC3KKKCkcckkohlO5_m}1L\ZV^šQ v[>o=nìCcrpё1Pqqd2Jښw+"$OtWgh0kX5jۧT[Y697{ETŏٽdx׬Oi\#;]j}Zo]_p=|Tnpyd>8J﾿J˺{p>"qJfkc1[[|rZÿwn-777%\Y12 _l\bO/譔?k& #|str}~~{~~}~~{r@'f.%H7', v/\?{5=e\埨bۢ][=>o}#9t0Y.t'[DD Eq}~uCjt'eF Ux6JR= fW5||m=>Ot՝JzG%=|Qb1[چy{YLvdk8oEU~ds/`lVOW1XnS! G'2 BOg+QT⎿ " W{bnl] EsRȵMW?;uSqc]'_zzxj|~~zii`=_X]v̅gnfhhA]YfT#Y"Vkr2Pq78TL2*?e'GJ['$em9)[e++-o[[oy/wҭZURROUh!ţI娢-GQNMj,ڵlZ>_nw^b¼ruNM,7ӓ%i{[4,Zv֭ZLx(y f`lԂpu{{--grktv|y|GGGFFFGHHHH`{CSk~n6xCլ^c|%;:,41BX]Sli_+VZLضXX965X:8X::88;:<O;4z&>˙nIY iWo7T2dB)ؗZ-A?66AIj#Ci%dNfEX4/9gwFFx87ǧN. a#lZ+z7ޯIЋ889E,pL@xw2 ^UG=ftX;/ufͬpv?*i\Vˌ?9k-WhNNNNLMٗMBO`l> h v___U____YYRsuC=}7>s>IA5S}ߋn}w+dyun}ͧݭ-m##|mMQiqzM]=M4mMm6M8m6MpuSuz;gw?gs^wzC߷G/pwqrPzNus-+*yޟ_z=>w ~j*A`o9N A;D6n5/c8}Ӌ{ 4rv67; /䝝[_____e t7nq"^R"#9[[--/󌗗HHHHEfffd3y+u7hZoܞo7lL榧7+q! Ԃ6 yh2[a7:$/={0[O#;cT܃(`Tf1Pj#N35F<@kuJm@-`h /?L^kyLhvD׷촽n{(P)bȰ<8pG\v_" SMs*}M(v::1"*y]RRZ]>_FC -?D8wἏ;GbV+]`N#rץ'^ dbuawQ{s`9Lw :^;\d~XQ{UN~9weXhwVF^\Y؝dN%L[t<(kiQ"jrݛQ 2P$/D<W\Q7^! uZ_DHУwԢp aSuLzUTcl}X yDZv*R\ IӷeNĎlpas]sRok0,[\>a|`~eprM-/SiJ\ou anf/lF@I?| WlOmSZ0*s DVq2)k FQ#'4H$  rr n^(RW8\SD@,X;{C #GX<6O'5X>.'XYiV0ىV1i8|6!gxTE~E<-$9~MD9BƘP @M0ύŦ0#aaGOiDٗnl<'牧H (~ݣ!Gю, 4Ϣkrq?:zGAI?t02p@3b T DXS((YTBwf+̅ .5ZL5hL,'&1$)IT_t JGʤS= "C?PM?/HKx`0\vJ?oɆg[üQ3Q/޽F}y,(e:0(z"X;'hg[X(ǟr^l0ϙp?s?ziϝx0 ZJ?ݙ qܓG0wtn"".:ZP 0aGL4q +cbPo7c 7ˋBp='sF33#FcB>U+\} 86C> ":i;M7C|"RSfF}e({o6O֔D}ZOCJ6&[_t4l0SwV} ~vW?[j*z+(ls}/hp`v5J/+"t5~MY젧TG!q}klx,N!D"3'ɂ?? P zd@N0lB "*n|KΥ#j= 又]2c˺zr" H<@Juv4Nޠ;p~qAp~A߮^Qت@C@!y^?*`tٞtwt$ [OW*l,,-JYuD ?:  ( (z*(|*| }P'*v2]>8dobSBE#~4f#^W6LX.^*_3 N}QzXᆿx<9"g!! $l ,eCl!z{E./_ix$/=CنT/F_ "ov+7I]wA߄M19NH)/.rUn. +89BPػMcxJ+H9IWN.2V\|*y75UZ2+ 櫉*~cJnRզ,|.U4Cly,`,-QūTʧѭXg =zhZYa{^Sbe0l#8= q <1VzO8 5c`%kOkT[$ $#r؈>elMQfd3Qp:b;XT&i6<>uf, @,1C =ڠ{at_-y|]{};KۮG 둮 .RZ5VyjtjPkaF4,/Ly~cPkM PMQC7É0!t'ob?q bU o/ ;8 pU(o ?52&D?Ne>/٣̞*7[$g*T!o̹l&qyMyUiaQ\çd^̟k y/d?Pez]?G-ǻ8O})y޸ACz4G4@A m" qb5Mn6 $nhsZn iªZ]UeJw7m7sr#,H"n]l;i 2Z6#0(5l2O*kN.1; ; :OQĹ3 oͱ7`$|Cxx/i7)07>At&> l 𣕢_xs!(UA V)U @SU*!eEE*!D ,m|#<#>[.)082мdrH=bME)2UB8l$֛8AE^QxBˉ~ 㣏Dwè_"5qKvvW $laW~LG{ Gh Fqó"oVUtIk?: Ts`zv \GϿETT P.7!A.+[u R"Ȩ:*wx:WN8a#XY9PHH}}:3 1+AnDYg ra;; hb3]q_@k^j؜a@dBFDByAڇ2HD[%'}oq-4kZ낅 >pޛ#*XԊ)ĀU6RшU:!Bq*Ye'$Hu#!T^v11CvPb9rr4һR)<s*e- ^LxNHLob/u,7}$D߻J Ą &" Q7҈=)c_iS{G  'Z4s@!!^TٽyiM-+)EJsJJKN+)Q (4MBc x9q.):痑^l a`9xshgj E(AD LVrdP>Hys{@=D1K)IS===jd;06{VQݣs~1ُ(Y4K~ZjG"/4{@"s-,눫G5!y8z6(l@NLw^X;~Uh~1 fvy)ؒu|rHZs\:ѝzdYW+Mbyg-z*)$ ʌ9* "K  ̀&86*"D@ &$" 0$I*2ʁ2(?4AVQ 5 K$)0@+ĢC4`AF9*)*``*a$$H  80,*) ,Ā@-̊DcP("$I"1"`!B$b "4- (R% )TCAL`1 Ds\A1 L0+*JB$@1 *RR:  0D,1A0EVAd $V%B A)U$RBXbTBaV%BIP Q(aRbPd ZTE`! %X Hf&F3! P(d@&UiF hRL!sJXZpRs1es HhV)ZPḭ\hF&"E%!"QiXA(R ZJR"V&) E% RYJkkfZ5PҴ##. RmIkFEiUU+Ef[bb@( %IY$i @MQkEcTUE #J*2  @"b(a ю %9 Bc)0$*$*,J(RJ҉BLYR4* )BҒR(C-%jJ%QFѵ  &!8RjU%nLFYRRVYP&T $-K)&lTZ-%hcmPY$))RdfYajFjKiZemVű[ R]f43j-RF53mєڢU5l[2KEh%Tb`Jlm (R"(( BJ HnEE*6-ڤcY*ZB!js_g=ZT wk=5h!6}0jS q1Wa(PAԋ?KM@m˞k* ?%Mio'XP@"h@T@ B%"} 2DȪ(@ Jʊ(DHJ LJ1$)0$+)M+--&YYH` Ea%H%DL JTD(iD)AQbE@TPFEM[TLfjթj$"d`eH D"YPTQeXHPQ`QT"TE Q hQDZT "PR!UDT!E"P`@AbEADT}DBT Œ* @`jӡFÓlTq(b3 LIDY* 0TV6(ID0 `J[p^WkJ,l 61o* LHH&DJB"4F0AN2Hnb"/lx۞hN~7*ඤT"'YCPA3$$xf$PeVm5BRccj XU%Ie)Ij6SDQh&i660RaAFK#E F4hIllmFM+)R%c %1cLf5 RlITMa($*FL3 2a6MXYFHHA@bYmUcQkmZ$ն*(" +fQ@1,F `X A `1(I+BKC)$ٙM1jM1 6 &d`iFC#4i 0FH,)ƒBԕ@[T #!0OBP)֣b Ԃx"rE SbSyQC U2P!6ԯ1UQ Gg8(HSbUGJRBB(.j[Z6, 4`]wʫ;ct _JPVPL@@)EC `%La`2WRB$LLR@%G! PV%JTsZnjۉ R M[k5&VZiM*mL 4! @ ) HH*L$AH UԭRU-ZR*D @j+jSc*e*eْ̕UljPQ*A0 ȉ!&$E*VRŠQZmFERi6Vb HЈ,J2%b04[l[UlQ,A(`Jp@R#!'@RcmXYMm@F,0j$@ؚjeDƵЅ )jt*a A0Ԩ]e]WTpG$Ubedi\X7Y7IAc @)awa@ܕNd"R*$ $ҠQEUGBG)j5-SJY B( UTJ*ȢpT@\IS;B4A+P4K`v UԊQ@6 P\#@oT#9_#D־Z/5TSxH FZg*j)RUm#h_ɰ)5DW*KTDNH~_5ϖnȹU} JV@U2ETJD PZFjhiVPJ F(,4 !($ )'dmP> 0(&!̐X@1:$2!SH 4T6,[%U4֮ 6+C ,!c&8BiVcZ-j5Tkbh4mh+b[Ũ[ckEE[hرcbFѱbU_b1lZE+Љ#(֠rCJtBa nH@DNP* Df֍,eTأa hQP)"B@ Zh(@v6=@sq&(x )V) }ߵ< Gbzڲ0)@#$sv !AO"C ဆBHB2UH`CCL)|.T<29!$w 7ePq A9$+⤠@Pp(vT);&.62R0pC$. ;@@ R/*XH2 W:PRūy\7€yS%jHA0K2D:PUT_hD@-7%_BTChQ+x 4Ps329ƛU,6Scs^i?^2zSD{>T>ezLn1ۮ<w:/)@wTHpK}n{zԍ^2!`LJGMa $(ł $/0͡&0_ ZZTp(u{A4NC,(4{P|)[b&4aMK1 6QJ+L_AЏ!THQ|FY>NCj9:D@8ar`HQh::)Ԏ -  vӠ pPpEdPt.:e %B%E THE$`B\ #y`II C ܂XapezmQ&wIקEn5! "!B4!S" !U15"[YPJU)ZQCD(Rd= UWh(Ϗ[wDQ3D>s=2D$$$RA@\!Ph A!Df(flDptt3$&΃4aG p_9?'p=;)`41 dRъ KF 0ab%zv+wӉtt0'⤡ 12Y+) B@Qb"3 ɥ F- 5Y&44M!Q $ V՘ U `2b  @ `4APɍVd#y]{Iִ 01 Va#pԗk/Q1Y"B Cp& ! b(N00$v6w K{n!p0 R%G.vY˙Ar0=pB2# h\S cRCT36GI蝢Ӑt(^QQ䊚MF@"dN z6@jL-Bj]J c@jCPl0B,`1ޓvNu谋 Łl(l0m#(9 cYJH@P4x;E TB `N1̽{:$%h)D;J9A@c f=AGR;HR#&! Css:Xuu P`НĠ$4.== 0F.-CZMɢsUThHa` hr588G>g d{4x!N=E*pD e%DiPH\ `U ` SBؗstx;;bu:ؠaU)02MB%>?7N.JCJ`<|"OnlG2@$[fNîmr~N\;~ ^!l Lg}}ëW :gFNu"U(z@C1+=ǻN KߋG+=ڜ"Tv O!5B"tk=9JLoq*)t5C4 9YsÇY,c(=fG3452A0!"C !IHr',#0D ]8yTA!΃ P͍T3;' !ōptDlFD jËC*T"U*Rt #2Px  VtgxFL'Hap#%l=ᐢ6:Ce'A@1 yVatXɱ:E a `;018 z !~CPeG d vS(TodZđPl<en3f!A;!="OccrCK"!d9F5 5XF (45(2Xhcd}#m[4!22ܻ@rdh@`rk)tsNFrɗ6)$hZ T5d";^D^~uUt̸^l#aSu.G`D`t5*cd1;E lBH+M!RV`/1xݎ'YS`bT؇:i ġĠ2D ]ͬv @ #!D QJ !NnsMMrrcL`cyE;,]1FhG 0b L' C9J,`"A1 `.*,Alkh1I Խ lC5¤ 5.\iƎpłZt(\oo<:pZj@$?\[I+J4?SVcbRA2L+ f wH2iᘝ;njS&GQ[B!}v1Eh|+[vWãyN `Lk=6T-ۯ|{^! %xn@I{$| H&^դh #2f49TR-f! OFyuz^R'{^ 0z5aܩxJd] 5:FD!2@Aapn˹'<!L0G%mj]!Gh뻂lԘBfe3<^7x/Jq8`8ʈ\0S^]q@ !)4$P6HTlwv;7`#< G'fd#׶3 HjtЊno."9R ]N$ TCbVtGӧigH.  Kta F$$8]kuv^ttN܉-ďGa|fDOX zXc(9Y4eB`!Yb^~o[y]'Jqg !q%|rDZxvPT4(4B#X 2d @a-rPH]!J'Gn @ AsЎB!`4JYMmcZk;Db$ %KH(C8h 1J) &kYymbA;I 3g Bڐ̂x9>px8?~?hlb =j_Z&ȫnJ'm8%oEJX,nqs ,} ZZE桁qDm(B!LhHKgObwv&4&y]@OwBP (&[`PX`EPXRF=E 0ڒNpCRH 5s!E,uHmmd>̴8@n (;mo1($(%iia FЂ!UQ!УΒࣣZ_}ګfَJYͬn z˼oNX!/dSIhWvUсLJHX/*DBuyp:# 9@ щ,4|l  y>}K<.77$ Ŭ5Ql UJhPjyMxqu``<0k-r(l Dv KP ۏn3lll zS5u ͝i3 )jI+NaR+*UT( OR:j#.uӃ7IG t]Kd!0Pl;rSpt1,JVN9 d ;: !f,@b5W;ٕyR0fYы[}N;XeH YtMӤiK+7%"p #Dp FD L ;Qo0 8 z>+DRJDZ0q d[Ł&j@ !OOGuG|{qLe,x9ihk/K] U eͦ s!>lØq: ĉ`j"FE 9/Јgn~;Ǜh62l@sô P" .KcC{5;w+*bB 28]0|tZD!9 Ozqcw.\Gx4(K-R H@qy:0F(" % @kߗ"-Wen ]"RޑJJziN4י*@ nuw:$#^~o/йƘEMKjm]Fۊ/nyoSZtIsp2!tL @HBDUЬYa䌜"ٌlS@;&1D:+Yj Pkk kB<`Pz!:Pp١0k/:^'Iil).ākknIQZ m G#mĒ??=NV{Ǹ`{8g Z`@ r6nt("Ō]@l*) HQ4!_ޅXL3C·f{} w l;m7ZB @ L1Lx.9)l3kalOȡ%Df;:X,CT0g dH`d 2%c Xg8xJ ƿ „QHٜ-claʖ[a0ւ T \E`!VY0#`\Шv`PȹC!,Y.*( jBUr !g% ς9RfE %*f& F!B@\j$fB),1C]Ür0vNc }!/ozε׭Ml]t<6!N=ܛ ꦃɎSBSL< `PekZZ@2dnB;, €j/՚zZ\Ht2߰%zWlDa s)d3e0OhSZE@d,Kmp"M" d@u4 p9SYn.52"B(l7V..]{Awo' 2x@"GŲ8Սd0[zL.i5Q = @SɭSmBKAhdr7hR9B 0:ngt :)#9ڃ03d(^ B@Q$m_ci$`"h xAy9'"b"B$ MCH4`dAH@YU B@aQ)r8HjriE6/C"P!°C|R4zuQ~'LW+HmB0[BG6=aaq&pN{nу-8]Z$үunyL6|h Ta ǣdj u\ r87MsS @Z$!+[qT"ƕP0 'ƹV+¼,rGlu!yǂ-)4wBFHrȋ+H@ZۛxgcRఴs $W4s]Msd|jmVu/pvR3Ͳ6F$#2$dP6!å'̵b iaiC:QÈ:xyRi dd;E *"6t zT#`<_(')h\U+\L3$ ;<=IBsHĄ"v(Z$ D:h,^wE8BNg$ԥwkok yWeƒ ( X&]Dm4#lH @;3YafqHR(aQ  λK㡶,"(h&#x,*>mܕ8ז hc,Ek- 0F BLs. νPoY  ^=U8'L{BHόc."֣ f k Vb_#@g`{@|F+F  1u-c!r!l0Ј!z+AkD2΢'m!Ezgמy[{4C^/r%FɶJ28<|`yN d/N]L)`GE|>fgㇸdo)ET͓AC a1g(8O*KBs;8>dY &AeʧMiEAsaܳ %@R dhh2ŶdMm$hF3#Mbms=!'Ov2| ayX9Ip $ ;r0]="cKX@P*:4&XjI6ݒVT6q4 (C 3e,LΤxBBS;; jd!C $i^5 B+8'N!g4Łqw4,x!@IFt?R`C^/*;kA u+`*SF}Xx*irBf#c=X s6 uW#zm4WX]JN-v0.C Df( ,ZIʹ)k.kk0/W;0f10!\Nˌ/$t{+w~Ĵ%T% 2d-$%d`df923"Q uN7@]0`#p뷥~Aa`$Cs$ZKw\QFkIP'c<#kK; }>ȃ9#|9IU 93 ("aD HU'P`N[BLJZ¨I iHLes-pV ( +8rt9Gk+N)YՇm 5, fQȴuMɹ(x&`G?@I2MG݅[b,4ÒޜI{ZX\`{ e&Tb!BIeņf9 p#{÷;Ұ% ]2cXDE |$of_3qNrӈ#EQ>aok՝fOd֍4NJxWuuI>D b*JE/tzxя3. .s0E5V{ea0e{G|N x(Tjs,hQ̣2s49gUp$tD `(o!@'"%ᢠ8}⎏4C2w6{#Stp1f !bXh6X8(hh ⎖ ȓ5)1 r鹎=AA(T[O(P v&"cbv;@Ưt@)00Nac/rgFL m:P Nsy DUa3mD j#۠Kw;h8vMZ"lavd0B Ͼgr787S)N15 L˾r yB!ǼXn G5 7QG̎LAIIHz| Gal Faz nF~O,&K8WCؚff*[mcYfas77@Bpa)~pEڋ%# Pt,L@'k6 0@B%:H`/&q]dGĈذH#rٙN½olYJ35)*2';ja CH,-[+>\. !-.BDbjI4@AÝW0 Ùhd[#a}a6tvbuړ )-Tp#u=,e0# gP "l+("6W58[oea.7[y+m8-aM+#N3%£4Qָ,/ŭK3B' ;6Pk@ =v[ڤ},1he-uqkeNqhk(y$ ńڐB$jDKb:m6 пaahegjt-3l H*F-J$P1W#Ґ=q6 {zi40wUAA|yău#Qsc!r5EB "&d1zeJshlp)HP(wMb…IjXa=' c>K's1RGnm İ /aL(U =m^Bf9C c]@8G sﻪ\7 {伟BC[JT>0DR [HmZH;A_& 7~mI,jvg7󽩪ՐSdZSCC?Is2???K?7@FxW L&@;t:|80*:!yh$o !4T1nE*RZ a 긋MTaM 'A/;,iFPT |˱ͭ|lP<ȕiG@햘Pm .GQld{ C|dQ>ppتl._.9.fAKcyAjㇵ0:%@B+sgm07H F c7(&ŀ1˃C!9ݥ5i5l I(pr89 ,4d88HB(eJ\`Vj-Ã~B 0+]H\H*aI6CF,aÙf2h\d- xCՀk196 X]ų"3S02 5,- ţh]gC(85\8897N<*WAdLBP.r8G1e~/9bc35 UmG^ZstmULe!oP+Y6U`ÜPv7T`4HA䴒䠃8T.di1iԡ B\-CnKppd2UHbґC Ab\>{*CdTxPI#B+R[yTRnJ238e!$Е!Ŗ,9\K<9Z݆ >leiE6`RΒn1f`j>)1QuK=odž'$\n LAt+CRbEB0`kMvCpóDZ YkmhoQArC!X`Cf:OtgR4a]0 #Dmrg8L2!k`o8p"R &k Mws|蝹p67fȌbid$蜏W<^ie*>GOp@O*X&11&>&i|szdRThd~63V Swp-`ZPm=ph$?vT7LMNPmk"eWm  ^jTJ 515J:n+! 4Q]꿧iHhIsjڂ0c(V&QZ 1 Bf(ޭ2oWaO`04qwHdA-L6Mb)&V$B+%r(Vwq>4 !kyUJOA!3c7& qLhTs tץtޖP"%B?c%d8wCTԊwPVPƒCI-mA1Zu9" hx9r. Fd8$Г`292ʀdh찙.r bn':~Ê˔1) t> %t4@Sb@qzED[BjKEh(#CG'Y +Z]VM UF5fщMⲨn)?8@T@DRPskDhAL62kD1l],jgKA&ہ0D-dqHf($D!ĬX(.)!p/E8ȊR( [H ţ> p "d`e3b KT- o Vj'M4=$hXZ"hcnR#F_uVJr@T3CHmEDɩ A!6w!DDx-}&ə Ǩax щ%5p1A(KLJ|pŤCb?q;r5I^ØC([B^P RUP0>I#Wama^&m Fr(?V퍠?^67,d2V=Im *``2cL7ZƂc@D٣l EI 0[;;"AuEnmUABq.AP=Fh2M4bE k ӏÕ5*:k&=xkY! L`;p.bA?} 2A]( B6ZRf 03j*E@ùȸlm6⑲9n~-wfբUdpA}7 #4d!C' 1XLmLp&UfDja `np[0+&U e8 4#D F)E64'=ˉ'!SXD8T@}8 \L\v3ͤt9?}ϟ ׂcΞbRuP;!PrK(^B A- ѧ`#gqC 1pa 4AX~eE3a[D4ŕԸJ"`D)(PS b?1a8Fo!"/EBH 67_uwFlE1. I@ Be%1cɍ"F G*A`+uTB0&8J4'((W!#TԀm@dЫHH*С@+@ʨPSb*EA/b/*h4l1 L``PB MTr|`F8G2\fnc&ʢJ%ض4B 5p3qjA14 `-Zchѩ! \4Rr [l Yz\SՒR# <ي*2^V2-[wtPXơ$ LiwthٴJٹϱ2Jj@3.dF"yZۊ,|裰4+3H:0x{aR{CrwMEz&AgocwޕL[x prgΛ #K3s*wZVÃQi( ~lӚb|M[.Ga߫ΐ/1iV?wv ß8'Mbg"֚&7!W!֥K䗤lI :ɺRձj_\Znu1j/ر0}Zo0]rbU…y:K\ƠC gt إ1u](3hhVfl~S_@DI~6pbYqmp .0sacj-D4> 14癌Ƚ48$$ĹJ7@waF4D&Y(aUmI4ʑr$*2 0_.𐒒V5_"QnNfrj鼙FD-7h$Jhq3^p~XhgNED)6҉' SV8'dgBܚ`r&Gڰ0fNPݙ;Q`^S]5"S}BkjC*'gT;`{iB\sg[b.G&P4-R{5-(b [Ds2`]yC> p*I.׶BOrM2,"3O}Uo|NXؓ(IۨTŻm:|[ٮ?ٶ4ٿqԢ4Φ{:7?_y@Ϩ#Ǩ(a s9_y ~?QI6!΂/a$%[\HH!k,>X]"OCv,Xˎ73ޚ6clH߈yz?gC CCO? |?JIQE۾oME[ ow׫gS>iԄ<]Qҕ$6Ugb{}\ٙ%k O`gaQȚ'<o609n;rJ;"k{9n ]mӯo\a"' 0a36#p>ΌQ0<@ku~5 j9͠fи>LͿf8f !%EWAtW.xwF< 6 ݫV[D>Y"YHYLDgiB|}hPD?M"n!vce.ϣΜɥڤmYbQǖ]ELn  +0 __lN|oC>be 'if#_O >;d qj,uMչ3[[0Ws~s>Nc?]R~ޏX0=$,eޮwޞzبh*#?k Al!_Ij˾]?htK-ڞV*ݰ?}u4yŖyݮB*K_xyFsvC FYn(<3`Vi, X)FDOnNfT^d-9D)4P 7a-uAa*`<_'rU}EpUK=K|bIuCaN`dО[37'IH4Ԯ/Ui?^xɃ.='k#kp#^ȥzr( Md_W/h}< =`H:ė$MWaHڙ/W,F#9l͔c>ʻG#oz[cn\8T{v"6#cX)̘c2> =ISh}ceS2aeY}a d:K|,XZZdc-D Qv4DuP7su [͍Qzd6m ;v[!b~&'bZƦԒG_{km;VbAK(zH!AF5jѝ6מZTJ]bNMV6|ϮܔpwXVo]{G60F31U Og>fD88+ï%Z*b|xTü·4MߞL:µ>T2xv\@QbJO,O*G򎆛3kmj۬T2O9(\NOJL*Q!Z^8裧]oa8G5sՒeCw{ڸw\LqU!2NMKsp *1報ODp#ֈ \z[(U~7E%}9&6,9ܪCG.ҘXwf;6Э|g,gzEA<׀y:^4R{r{eWݱf71>aONafi%ҷ2FS"/?ƾ&gkE^Nc /L̻_t<'/}ᙵ'C4T]+<bT59in>gi|)oo~'ȼwǯծěuFߪfwەˊj3hys_Pps:n:V:(~L(~Oo~_>gѿmڪ߇S&s_=yS4ǹ>/yd]k'UצW:3?qd}]~*TUk|ux=\U>ۛ}mpO&[4*A׹t`K,>fM413ߊ BW2jbT^o쨭[ ꦘgIY3QX68?8{Umc˻ӻ˵yٱV4z&%LJ̜pMmnH<1Z;"ׯ=cw>7NDX)=sla(cr/fQL-[5s,G}e:ofx0}nn-T۶12lgI-neP D6 9O:Rc,;"G^Ψ '?c9@vC@H@#=JyU,tӘuBZ<0f<`=;= ΋GSPb-aYfeS[tL[&W\k g9GIԢG8UTas(u= GjZrB?RЄ*`5_nXK?ֺD7;iS2`<_щ<)xezV^M36/ܱ:f1Tc O`=>Gô4|>{OҠZu:EMi{xxWM3Mpv+N;m> U՝]':gd^5$ahB_- WNwR J~4xAwd0@B>o֨\yhgKdbU0m09ZnEv>_gbҌpfBe{mZnje+4ݴD)8/_ѱgWp!X^V%[mKڝ_50kz5m W%8{܌zOi#K2Wkj![ow{VpmoQiDTL3S(şЦy6xYt^}ʷ=/qޯ _YciY"f)TXs`l\M|$>֨MI+?LH+ojy,MJ%ZVY)_¾T+TA8@Ͷ[Y g^$Z>쬂w lmh\NmiqeX-%M3!y/I`Т 篙;Av]`+f?w$^6B }NZXT\+G/S6Tf+ٜsiT,<3\%5msf s[7MntqP [b$4؎3%ZgjW 6 2/>.Sh݈;|yfo7n"mA#&PWֶFh;swOAUNymvNJGuqz{_;)Udv5KޤybffVژ6}S51=PV"L q*bSR V=G=~eH9˦ª"QϓZ.0Ã5|E,QwVBPB8H:ڝr.X1_+ '3MI؁dnmfpF JN]V sƂKqGX:(?R,#j][Q̋!-s]x詹)r\;\6K~j_/"?+KZ|A<~ ߞ ~*TJ=- p4w!031 {f@+cs2 ig>By8q.y[\#bmЯ5RSNMTsu:bZj~Ֆf!bŗ>e{i7$lv1oO0/*Jy$5O3¬B!|MLY6ANJHƭXKZa,ida@z)ĬK'QKI*5$R kːM4m ''ͷh>z>ڌ(ck.KKYM76B/S~0@?_VÒ@"He_9OuEneiR0d'zvFƯ C~5Ql3Rrúm!֏%Fők"Aiq ai-LzrkϊlsKUEo5eSkl{Y\񩁴pkK~ ʴ4W,V-֦h{ٌM<鬪$ d=7ԆF-L^h,ldp#dבAU<Ɠm*IUjSuVw=[m^[k NYfܷ&]3*ۗΕ1*擓ΜrwnSu0[r cLXtp'5xjxuNUUUPێI:c8GIUUUU#mR9#m8$m_@ssu\|5W}|w[B>J#f knz`b[`8 4oMviҔQӋ BHȣDHƽn 1"WJQ&ϯ:rO;w@^rwWan;nWt\/%+)2CX`"~{`XE$Zq?1AY&SY[ROw}뻷AVM& Y@݀;͝nx w⓼۽ tn}(Nwxn^޹l׮U$l=wgDm`MOvZ"lz{e(3eP$B:mZ۸'#ga}4FI.ޔ5uE`<]᭵nT>( ێwmxe%^"(P5LYA*Hy.l+y܅ݞ={`}#kVQs׳ZZ=ٶtJdc|nnZ6}z5 Gۘ7ӭ}mkݳϦ mûwZ0Suͳ顽wJϷV PS׾kuw{^U> m_ZocųR=:OOv{ *6 >*Cyo}r}j{çۻݹZxz^u6ϑ+_{{=on}suT{[^ᆴwj^ngI׏iyYnͩlm+ݮn9v|<LR"K#Li}Vn,((=n_lp}ޗﯯmW㴩f U-DPP f}ڍ5*SM(U)JS)@@@)JIRR"L)I=E4iQI٣N(e{R]4z<}:u܃f;qճ/p7.JmH@&b`0#&&2b 0&L0bdɦ04dd &AhiB14 dh M0diAF`d i@hH$D@&24b OO!zhѪ~CE6dLm"4mM5<5)&&TOS0@~B" i&1=LIRy10IiɩIBmI45=M6FMɣ@i= 42h0& DA b24I=&'CF2T0Sh#56&ѡ?E=<2h A@LC@&#@ ځL< O d6ODԏyMM1Sz)~TޚLO4x&d'=C d$DMDJ҉&J%@fE@ x32\̛0#&̌浖Z͂*0CtFEtUk3h2 fX-qu rJ&\a%CɑT)P@:! @//yz"a,kZSɊ¹s @ҭ=ȩSW N#T-@hU T~@2?^8?& ^DL8X+CR]p[pP=0b~{i1~9`Y JA /B?:hWaP G, 2"J$d} PPLlFl $m⸐,Qp;H@ #TETDRP{p*H(Vi_0>1lOCF E\Wb?o U+1jVg# 28L}k5BpY;h`bQ0X#P Otk1_RӮC{?SAKi@ۃX#}j]BϷ+㑈,F. pXĭ PM!K$KyPbRP4!k2nHHҴ,,#J(ČČ2,R0R>,Q5Ҏ#@2`dbEK B(' [y/gBeQ_FzF+vv!TZVzUGRu^x:fR J`v:9w&:Ywg@0@K&BfWc57"DY; gy7x|n't(=9HP`>ဴ Pno*OZ/rDĐ(Sp|uoyC2fB@ۆY۾/{?m\=h* Qʅ!z8oB Y4{!I'>(NT0i0`F$&eت dE9|h"z/~F]E ueũ@;ðrJE^ ܤ5 [!}ָpԕ{ !7ZɢJXŊujJMBTHCy*꯹ %̣Ȕ'_#ZF OZDl:<98rX<_QP^ ^*on ,.yXvcIm.?GkknEH@ײܻeG|k]jFW^Mw#屾u}J"4҃`Afl~?S#^V!_9K*Jicc;nZXLJ*.ߩ'o]_j+h$]xpR4˸O p}*0T\@ *ĸUTWh 17t+Utsljy! PB vχ (`@,qz Fп]s9uSA.SJy`B t" MPHGVI﫫He$J( ( W3M`A@QeE 5MQ%4Y ҖX94daō"T 2PZp Jiɰ̗ A2%)II !@dpԆj„I!q)Q,%I("r" X(cVAXPԙ"dbNa.I1f`.A@CR(+0D &IRA\r-p( ςu<t M+]~ vWG΂mjp}/VdeAR`ĄPv\ypA`A@`V.? =O>JIEevzM{B|?u7 $a""XHb$84#=١ HUNR~?k<\y%T2A I`#$D)#0C2EC0)DC DEW% tڨTJȄ}* D0HhNT 禬1t1:=d^ Hj d|4^Cҥ>. I>0$Fç %ވ ~U&Eް>GUr>b3B+PK!O} >:$DR/"N5+4St-w*b`Atʠ{)PE^ Ԁ_7vܨ@!UR@Ąy@뻬.{4Fr!, DžPB@AG;8F jcw U{| [=err̈ 1HuEH:zd Z=X4@D !(]wL 25=R>&jd5,UE !+F'`r̩b& #J0!hF$BeZ*<>3E#D!T.fJB#D h[B" +(T@F ` 22H,,,0>#Brs|(ZQ!` #+P J21lE ]HFdhXe\{-#)G^FFsw`:%2-aQSuaaWѬ0Ƣ * ]mfq ~;g#i]wj@P;GD҅g:7lr#Źy24/~J͇$) I  #!$Dfa@#~|uu-sLwIaǵ!jk!s ݻT֔O0puI((w1^.b]`2ELC*`ʟU"Q$2?+Nz=r ;v H%"D;I v㉎*`NN!!P*o^Yh ^p$B֡ B$1:`$qBVO!tBT`AB)AX,A2z]0I _]ۍs_ }Q,P@:B!(]S},xllKfc`c;yDhbw;C N>Rf iAdY=~Xrߝ}CcGպDo% μ CQ6ji)ze94 {}{jX|΃^5-u Ad}j=uWzGL_l9ݔ_@(I҆x:6G:^+@U5Xq { 2YTeSMH^羅t-F+瘅BкW%ǚ{ Au-}?o6zҝ _醀#S.AʬF퇢up}ADt-_"HH@B&F})D դTP+Jd,N3vW}W_ĬǾ~&WѽF%P$],6H]lC?Z+z.*x:[=ۮiQ/ @ #+D! zny]Ph\_`m><^#ࣺ a +WSf[TJ*XJZ v2tT<1,yxFmBPDx=7O[ r%B$ @ K@-Ow=qt&5{H B:AA(ҋgn- a PD Tt:-WVڠnA< k"Q )Np]"C.u!]JLbй Ƥ{(zc€i VըH}G`(]z7OL[{-)t\ռ!T bvvkhr[+6:t- Z|t,/ B= i/Cwm r+aTBзVWYXq!z]2: B[ʯs+cl ; r=?$GvHR?StuqȴrT1u]=#'{Q6+>MJfЙ4wLJtJDSd L UB˄椏]/k{lk]rzfd%t3oZG夆j rv6CcrJd7IÕzAYi7Ѝ_mX0uig{~ #M}=FO×QMނ}q_L-fx*ۡ_S$~=U㈣6,+Vh.=(c?ff %dOvB!Jt?֥f'p2B( )AC>a;Ie&>Zcft"]оd/! ޅBB*zx/ Bl G4UփWY/ &@ @;_)y.NkT&К 6aR-4a1؟ nr|zUrARLH ,DBJPQUL00R-PBP H &;BC9*dZPN  .~` A;@L%$Di}wQUm~xs=)ς吴 Zd? rC(|s! !$P1QRϼAD%h̀@0G4Q-cs ]# o /L#I @O4/I*^^VIHk,nZ[I- t++By7&A )_CRTYUU>eIGon+.o75oj}UkH"#̬_\?eP8@FAs``4ϣW":Xu7A}_@)p^̛2"S@ 5}ͨKvToJyg-tuy#1,8K^úLox۲gqK,c0a(O`ΙMiFx5.‘xrpMfe4]yʈsbSzh@"ÓP_GMT Nux;UaPp@ARK`Z!Z!r֍B8 '|a2q4A{^2evy<8(_(o< ёQ )G"᝴)wo*QiWt*)4s1$rQ5A3=ɰw|!Āy:/) +NI _K_CV[v~5̹g.;,0ΙSZК{"jxC`aJA*M f+3kb:'SDhmmG~SfP\>Qu_Q JsplԶ8UT]:^q֛&r5786Q68P1;@i-W;[ ^!J*ЈQ«@M=Cwaz)tb0\BvNnlqB{ԄG"=%$ `8yW=99"O M5uFeryV5j]Mxz~"7ĸ#b?:hKHL~7/#JLyv[猎j=5G磐GEk7MYP!3B!I!zhP-bW#&ୢICe^|̶+>2$)s_}ZqiΦ;0op`@%A;|G9 ^yīn/{Ye {x@h`8@DoU5P^)vr`.6R,%Zu>mʃ] ^˜d i@+Py}}l2âlA xlmF3Iu|HQb8I N5Ύ_SS~<¾@ؼ{ zBûH GgMnПeCsGFӣo5z[?>>%16 DwnO1r(\9l 1mPACgEY/_B`RI`_R8$%\wgYT.t3#wB>N?ay8ܚu` E2G zJhR> "jЀ胿χmla=*1̌qֆm.5 9bttM\=4:6t5D0Fm_c D@rárjDK2޳y3vgGT0Bn ??kݛa,+m(/_3醊37(WsUL0UPveC9[gڸi}gFGaz22.n6QI.&MNb9Z}Vژ9H粑ӨcXc2x--Ӣ=`-CNz4G5rQ:YOX#zjVL_GuYT,*s[Vl  gJ\@3~ HO>eFnNFad+=m<L)Y[uPpG<_=suRKU è,_d/} B)Rvr?:0XU[@.2kB7Bf.E!@'PNLjAY$dRr6g\ϼߺXr)@!KdOC-(v£@txjN &M"8\ :qP[[‰{l6晆)t\;gKh~;^ϐv݀2]Pn}}Sp mx $D G]:`:&qeҊ'ЅЁb_KY%^"txINNF/Uŷ&sU+bl>X? e4bJXXk8[Y ̮k8Y'qPo_а\[rYd,O/BP4B?tj"?QF~\G;~ ա]Pl1vMK zZ1!} 0W= <uy`R /dZTڍu[`;Y =`؄^ƴ +Ԍeے$+p 䫀Pdj^%(:rJ~ڹ: F!qdu,\X #\Do.S3f@ؽE9 z]9}ˀS٣`W%4Mv U#Z#ƍ Å_ws^LYvߙh P0:1|-vYm%0%B_*YlۓѦ$ {*ui: #0 SGL9?oGÓI 4=/ǡέ;UD+#Ĥ_8L`|`&Xy˯@$9<7fMl4pnbq],4e`@rsin :oW81ڂutW>WHU_hbFٯ@ʭ=F G,VSNjUh4 +59 Ge^t zG$2&&z,w[F%I#fIq 2P[ :f=⒛`ԯKd.q¯0g~Uǩh렠SP4n)UIE j;$RnJ54i*Bk BmPW+pLNq$ `~:Y r ^ǼGsC j@vb_cE"z.lߗ܏~q`_ȁeG(r^킡_a"5sii@u͔~*6Sj[0σamQ93kS)8iFou;8d:?LkUT=_l\Q]ۊ;:{:D`&zF9M++GfCoZZ 㩊ۡGxFƒTG%]Ȯ.9ӡ֣#ZDo`^Ι2uےs3MA1#Ơ@gPM3-)al9,~_GNם hvY Bp>}T5#AUwBt}ooSOz G3G6ju_o$^gʽ\۾mp};T=>:G˥pDp#G/灠؅o+958h:h L]C;Š\HpK LWC56S[}5 Wo B=1Pp̀BZfGewpL]WKĂ:) EԾL$ya yGGL 74oqѦi}8:pxJgV};4/9h/`ᆂh@WDv878T_Fd tHc d ` #GG5GH#3ƎF99*|G8y][_nm7v# Em mF{>X^ Ts =msWv9 3KA@b#q 7tY _w{ @p#o9{84܃hF8QƍtүoGGd*u2ytJ%ב(+;D{A :> OoQKM U\YhpD4ӻkm\QlG€q^GU9Q/^GT\ަ՛`o₍4ڏ怭Sv.Bܜ| ZBWF^\z0m1r A\=2:doh^Djah-Yb7;_7S^ F1_4hDo[K7t>n`nݙ}1U pn`8@LoustGݍv>eu}HbQ0~]#{M4Z~ t7%ѭ&=BGR 9|* iInsPѢXB/ vre @M+үH wziFDp:u#(L>O a)LM ߄%4@58tL0`nbjBa YK]5R`pO0f[Ѿ;_EP7T)VsP p(.Z.<.Bܔnp; +?'{ @b8 Ad  ^;/uWw!sk:|tyT/_a]J7?W} "K2w~@\{of.gAr\n{~\~N Hݮj{aF7q*!}`&hKB >d0a^Y_[; KҖ䘖%&U*"`pgB)fEe8Ps&s ? (@tm/^bޯFz8>봑[?B߸`u6ޫuޕ=UL$rjZ1)IuEdнp"s4UZg_ m9H0Hu3a]K9(HjBTL նLN/ߍFĵd\O75*Lle%e,7r/J7}{@*&L?A`yϹ8X9E8L-!:iX;&X 0a5 d``%m5)7o#e$rRN& 05R  ?cA@ =͢0%АCޮ]αPy:H݅l1C/<[S^4WEHgT/Т۶4Fu<|*'"JS9PP~c6hY?z(S΂M (_w^'|𦝿,^ ܈}fI2#pϾ Vֺ@SP-#h@ P$( A; 6axn67:X84A²?7xm[F۷v8UܢAڄb4Q89 ut^?"+>?*PLLp(%DBV lh3DiM AA?@60o\?[P]8487A * @@>B4TkuD*lk-A  ?Z-΍~I'1[\b1 A)/0p vTO6w]ߟ)Et;Vy w'crhWQRiZ(E@ QA?Y+yMH R $ *;j1 ^] l:xb d}i^n?Ծ@M~n6TQ?F3=\ +I&!)z8Lonﰐ,BJȑ" H C3IL K%UTQCP"K, 0@B" @ 4@$#H"@ 2J@GHA(Q+-H$ HJ(0JRQUE%5DL2ST%EURŠ_0PHԄAUUUbO;CV$Q 'NA$b"IH%"P&`/yBDG$M9xf &WgN2Jl ?]C1@'3{ : 4T H+P4/^d qX &`Q%G#9JIu gzTllK')ANOwnI GDG^Eu^V~w=S߷j\zd!z81{wN)<ꄤ溴(Qܽx! +.nZ09\LoKH[@xHJ+dK SU "U[Өɪ;hFe0dPt41`4ffU*)f6B%/t0iN$I,ڏYq&l.lVp2s=K# Òԭi:] ̀UfC;l^KHV3ϝ,gϰ2#ʹ,nd.KhPdmfU\c SQTMUQڡs"W OFy@I#o6CgxşO^)=ykFMg;Kٞ-#=^o3->O,gY:cysLg񳭞>^l@4,@?*k~HP'g7Pn4".U= Xڠ!xg,Um[b(0×ؠce$ecaJIOat=J)5%b4[ )W#֯{y-9 )+-8!En۽ - ^MpB' T1ޅ*zRg;`oDy@j 92;1Ժ\rO\y7De鳲J_ƀF Fh<ߴ'Bf $/z?4n\jdoY~)/胱Z v;'[y(PP@*$yz䂪 [ QQPhZ aWv,.5JFN)jf* ]4^Z.z6\g[kPNPWWWVV\YWY\[Z[[\]6Ƶ3 EִyWnݻvq>I]}%ߖ-_nܲG'#Lf,lglg~D$ĔrZjrrrz% 9>'c޵r1`sۼINCNNNQ׋9ҹkZGggWN! X69RxcLE& 2n+jrBru%W60X R{=_k>.d k{  ^h})͙0x9@¿0R#GG5a}/ Tcmz7ƸR(hŀQ.3)f &EzIóS.|lP0-s*;+xOys@%Cƴ!9Z\/Y:ܦVs%CӋ:Q!'+V h0 nJR6$vR цg mq|;!"DT3٘r9a)JH,4@^[&&-KgX3?}+_mH7Mfx)9M[1ȕ-vglpfo abvD`WshC yBw@ j XX`fJȯIHV TAviE Юsq@}B"2B|zt⏑ hlږ^dC-.doun*gj|nP&RM*1PW,oFlk+:p_yv,/tBxKVfx$Y}^KϟjP',ᨊ3`Њ4i*#v~7%@4$ JdUCYH( US )PP(QJEQ(TAhQJ@ViPZT@AZTA@E@vT>cg->s k_ɅVl\* TY2D(}к'&qE (fO@逭\x8M^!&{S붶B )t"5Q(pT3 ͘Lp`Fw-e*3RRtQ4b]4& S2Mh< z6 S+JB3u"(KW;c0 UєaJ٪"M% lЦQF߭]eS >\l4gǥdjZԫZƼE#yӣ9Fy980 VliL'a*8F&. D9ë fRL ʦC[g77J`\ϙ+-mW[DLȁ.bb+u޲ZVCtgn>gP#W'j%б!TF bYq8黎&P *7A]3lvl ^Cph"Qzn5$^*+̚8D3QN.n;ϕUF.`HÃJ$ԆXоΣ24.x$yAPoZ}l f,0/^eyj3ʖ!ެl6lѣav`2ar}nוB)7J4dsme&cE qq=ZgKl ߓQjgtqFüGޅVָwmj~m+-,Oxp3׷˦s` K MǂL=#˾=M+Yܷ5Vb2j]4];x։#l)U rb; |{9&$5L[Hr+rdǷ52rsź)<"5sw)fZw8mti~<{ _:N1)x-u9ezx[>X09S68{mF^FeSfW7Xs!IQ)X]׺TºP#LV*QxOcưZ:647 T=IQG$} @Vb HI#"S z?R8U\:6fdz AdpD% HڰvBH&0H9)IP%! L@2'#dDf (|WzF%R殅b:WH"P $LU0I#PGpQ@B:€SalKc.4& P:$@䐢dU1CBB #\A^Qt@` A8Pm8RD(hY D9OA "b:66a4N I388$`! @aTR@5ppСD&r4@Hch9D$-%(I8 *)$4*P@ HHB"*47 ;D  4 R(  2B223, (" *ЉJ0H(3 D@!32 Bđ(K 2dRBB5H- )J"̠ ҀB*Ы+H"(@~fa *P(**}d*0JLHUJiCRą B4PP xrC@U?G)Ȑ7 rE\QGI0@ ʨ j@P6AK䢠+!AВ2K"tڐrdT@A$@FM7*|ʟnTQAGQw>@B P)T>!EC)Qt1E4Q⠇P:ȨLj! @+ "C*=x.$(0D@oca`VW'APTD@ܨ"Ј?g*b(AS[" X"! ` qVf":Β+lUS/#O* s A@T((r8aED AA` UR ( tA_i-x^҈Ah|5E@O<"UIbC྾PD6H,}$Ъ D+$`J9Ee@N  *%P4.\`"F$T b_@}"~ɐPCT) 0M+HHHP(P,ADPPP)( ҁ@Д%" C%RH "4 @ R E ƒJPP%P*P)Q+JIJPRRP4U D(!@KKM)J@B0d*@* =E:QO}bpB,W%5!B@_(uA͔0 8%%I!XH_@kN+s !%>`!d dD\x*[ (_hh-mS(=}7iq=G\`m#z%v(j'p(e3Ta*2qP9z—xJ<$( XOJ@C3N2c/WtDl?fbz.EBިS^ lr_m=C0r>eG U@sPvr_EO -=J!sEC`{>J' t"b@a<2 zHX0'` աQ& JAQ]f(  τ TQ@M \%d@`T@KOP,kP $ Y&Ib`v+'AQu>ե ܫSYmvIJ`y}.tˇW%÷PL'z~J;|5=;:^"4l)DࠏNpW wYBtwti `K6CwsTڅBI~}a{!>'ZVWp{MJ#oX*H1 ^e&?l%H#1 $ b (=#JeND~%:%BPP SERRTM15TH%$ IB *PЁ@P(- Ҁ(Ҩ2SSO) T(2J)B)f&Az6Y(Y;(P"QhHG="|!A 1y %  .SH#`r.U0rW@ :j)cVm%z"~zh@E|4_U5Zҡ%A !E4ҕ@ @)" |Y2 x]8R!XB&JR)RE D$VbJUVh%D!(H(HGxP?5nͬd8c]N@)\:8Hp[W؁o0BO7_?Ur'\Q~KLC(Yb#DL|WiuBJݖ 9?aGZTHb05V6*+0 g!!.BQ D] J?K2U%\@eP$<E{1A:! $B+Ї( l#d BQb$WWHH)!2)](`B)F.Y>TFA"sW]T:? 6+w|[ MB!<8^@Y05 !/cRsZ hdC؍`0X h`\b14:qb54#####3qC$t)T (}ʠCRJ GJ*#ECJ |=&> ) &U&{grMU`~$ܪNO]!K~U^_$υ({CIMQU8IU#V() {T(j2ZUPpFAIDdeC?ЎF7 dd_o71~09o$ Ta/$Ia _h#J?yioI.Y #c) v= ߮!B"u#E8➓˭n,4G׹ qt] N0S)L "+3$d•sJ7}w\dڊ,VA]dyr-,Z`Aջמ3Oz@*f}* z*[7 |Z^܍Yk~htl2r68բf#EU;o#7FYZTF H*HS~3UU~K 2$ *A`CI0a Dn0z4pa`aPck@@c;q*m6丽3FaRTōwȩюls,uf̢yho/Etّ[bkx{՗#@1^" Zڒ"6ߢd4J_ Xۗp  4hhK5>OM{>+_!Tm`PǀAH.>rfpG,I z0}ouW>~Kc~#|~ v)fU^]pT)/s b6H?Nw"Ҝr2G$V$JJfj[{s?Hoq7rdn=~scqBG'|/xm7Wufz>,ɽ. Ց.Ұzlz;ؔ{s&\B/ijb!|y7:-8kt蓄Q}nFeK\j,?G^K5B^r~ 9T L-`A?[/A,Q 6OvkVxMN ,~VXNq` %qNJJGc w$f#JJgkmARc:YpO_sk^UWn oo|"LD-SfQYL%`Yb`aeTaI dbUPT0aJуd!$MKa_4BBNE I1RA&`B8`BM``N ` DD`ɄI8 YT"aA2*MMT͈38 TUPUHQR2PUU#<0Sr;DUIb0)f&Ϊs|rqC@ .8euGPdZ7jiqe!TySg(>TƭT 2C7¹+`yCBGwUAt y>G=ul}AV;Bb):ͥ9Pk-GaYBiAgҬ QK!W ŷWv-^˳<ʉAkfH ˦mǑtx]զ=g$vɩ4kci}EO1fa^3/bݬWqY(fɱ!cezZ &LO V"KӸݬJjTA2ES$dlF Z#$ng>GO##|veZ?OϨr,n]Û䪞l4ha0`f9r$O뮦n@3#Vzd~fM_;?<&EoX3'1::_Y‰Hds2 $3 ,Ṭ30JR!Q! ^s?<+{ 5abj A0H@!s}2R|X3ii-,K_zLMJ7i?jCK8ٿ1~/&pq%QZ\>~Aj]t R.^,ZDbwjZTH F mLO7.ϓbQxZV$HD8M|T9 jgQv #k}kd5qy^?mY%@Uwk$=󞼔~ o?H.gGh]]P󧹞\;C%dI. b1?Ugwu{}Oe24yo) ^ Lԟn<|q 3s'~0Ar<%.יQK&TWl0|j*6[6dYݮnc}pVmmOʤⰀ@ $ d$lxOq&- i]gm?^b##Wl]yL݇~گVp!51;;nB,B85CByWm5ZC-rpqk /*? |\I񜮷Ӂ D˂D@ ѲzR?% _u5l4[2CA%*~LLI;ٮhI u p=uQ5ojډ. QAme7T+v 9r{Nn?L( B  q\HH9Lx?x_1]M? k,"w! P ?T>X2`R01K o+42L~1R "*  )re A\_BIn>Aѝ9 E\ ;w+1)$Q7nckDF9M-;QwËx{Ē|8TݗQm &0 ""$ n7++BQ۩GgB%8Sԛg9\Y%QDh#a/ӽܙWAmr΋] JG3!V!WXU1=_- Cٰ-wcr̓YЁx?Rn&pV'Uhd7r803"DW%# VzƱS2囃IAh#"! a֟k=7_UwNG !q֢x:Qy|`M4л}Y1JQi#ɽ^a]wh_kf"p֛ 8{6EBwQP Y"رPMm c}fjWb-vhtfdy;$in'͖_Ǯ~w;Kz.`w]A9`8;e>Oo odB`NfL$,&f.8`w/v+bo|\D= _Jr0V5ӳL% >V$VjWqa}5G%^bm{Ԟ{hȲȪNk~{=ܳ|پN1k@!= @TbbNL$l=vx8vʞo[~;{k"r Ȟ@qU)ɫÞT#eԝ'4*&/Jwݙ [t@VlE=&>'51@A}Gі:40o>NS]H7 !] x+m(`!(~hQKJ ?I -bH`bE D'IdٙJ`z\t(IW`D2?"OXsX`DZ'B/sN H}; ABE <9hӄE9 $, omZN`)pwjxV=ٻ:pc^yB `̿^Ca@F b|A5p$Vhfә_qXnTJesUMt ?Ӽ AF~ \}@6lJP*f[3-*s9 rH% @KP@KBXD9?ޡC>Ȯ@55QqZ!Td$JP#;* Bvd.-I@DWH[/3! Cέw#\'<MjT[kL۱wk{SߠнS)B)Zy$1uA,11aI&am?: (ATrjPIO(`I {XXR Cp"5p6a-Gpl+8 8o7g!PI(Pf;`[:uo[uPX 듁̹l pf@̃P3(4Qm; iAHAcI; THV$TQ15f~aԁ7u hGn)qpn-!)N ! P:Σ  ̃ pwMF @'v:MCo( ͨymSZJ,-DBcpsPuY !@TrIPc,dX%3ҋ"J|vXZ7-Jbv7@C:" PT5ƌ钄5"Jz=I 5:eB 7ƪ)sfs} 6 4f.v ֹF -ч P3Pk 2Fv{[]U6=i {l龑0P%[QZc83~Vr3nYN?z&*p}vRV zm2 $?`΃Lq[E3 #1Ġ|ˆRkw(j K 'mWqo نTchP!Dc9  ^`M X"""o]G"pTT2LcxB'9QTPEQEQTPED̓$$va2d3U8oՕL`4ƅ+Zs6ok|X6EƑ΁T7ߴĎ5&_/ĦF\Uތ Y &C3??Azfd {Qaf.vOk~])\9֐4U߁ImK 0P6Yf}7XB(j%.yP$?~XT}r8XQ(tbpO~EJ|\͸ CHMF@C$>v'=Lp0/EcpjB^%h JلپXI`).Z; Nb3{˔lCxS9d K /Gy[f k+qs$PhltVPTnٕXV5;,Կ ]ݦU?e>{}r51"a 0pxӖ}aa;.pф!*0 0JCHl;jY(҇656+j[G-hQnqF*|ƴ0HbDi{'WdX,?ҵɫW|`(e !y32 ǝf=e6k 2G[H`HAǷA= )J."zzPIf`}/ o4z1@d1n]1U]vTy{fjLs<|c~DCg|B%m]ieΦ E@a\U}= =$Ϲpy}3Z ֊YUE ,3+ mbk5ںs (yaeXX( Ō:Rׯ-P`҇^t4:@bPD( 3@Fn?C罹a030̃u_t~X ?i;)[~4^kkT $ĖBT&};GGJA.x.|0~Ҝf/e[$d Qm@3g-5~끸c]DFO=K~qGdRH3Y$̚dg?^:ك`ׯv綠\?[BٗvN^/‚ SȫRwWzYXFI/BkIva\ń9HoHV GS3*T_YPB9u”';cd͊njVJkR3֥J)JlZԩZ7v32###b1 2rw^~ߠ4᭮+!?+?s{-~s<﯉1TNҠZH*j_00@Ju6%/B֣ė vc|ۛ8S8 G Cd({Fv>.=ҊM+7!UFݨCڅ[k&*D Bbѩ  G>ݘATyS0hd{ MBcp"lFi~DEpt@Sޫ_A'AUc#  îv(LRLGyA"OFqn+}s 'oSjCO'S9u.TT|fF fkۺ*]@8-jnW6DjEt-J^p`fKRY 6Ɓi:G%5 lA!IͪФH"GZPƻY% 8!e V&3A5iLTo&whUBJJcL`H#;*I & Bq0 d oL_7{\ۭR?ɕW{B+[{=i잾ʺ~Csi_J.92IK%rf/?_u觲B"ƒDۊDm@iN B 7~ybcubtTh\4NQf?\ u8]C|!ڦw~gìr-7Vu]wjgos~9^Ubsb2Eu$+@לa~uHu\J]+tUQ_kHEDĀtd: I&?T_owJmo6==6va DJಒJA!H@/ͣ?>WozͲeݲd!R:bq$)v<_HK2!c33:U3 ) ) fDn*wlv#x5hրn"<Ȝ>U3>4nHƴ+COY]') -ӨQ~raE}eƘ(H*Ig0cgv#Ḱӥs ZXvEDEUDQ40UC>h<*\ h 3fbaLeVZѼ8eLA_J?4aUoG0&2],c)fv6R?qϱZ;ָ]x+~ =ٸX$w#:׼W.%w.ɰvH,Sɴ7 *1*/#DUoQ_fQzz\S0w@^%\|+/3;84 U*Ee슄»FKΰؚ$d2%~AR C[с/uEhvh>Ae'ȭJ%x4ץL$|jU3жpm>La@T"G3OHC֊D٥==JMCSw{K7 ` D|?g.>#*CKh[mrȱR fzhs{%(~}צּF$k"eT00A-4^l =i \$z`0'O \gfdpI4]̌4 Jv cWPBJ^}LNH|#$Wn B[NE:*u8D둒`a`G0_i|Hp!C|oF`zP}6m?AOF,I!nb\t{\QiG' Fu;?j[;[<Ü$UjUF3'/Dbzt2< !ҒVIZ:'D=溫ȯ6= ~3fkw~e6rۺgfؘ^;gLל/5nW-#o/%Ҵxx_<龟{aGQYcV~p2|/.w?A孞7g \_0ZD>k?/8FX~S:yG'1pSv];>nm1-1սk<(xzܽbӷR}vz+@Hv;31m]|͜GA SPBZhy@˥?Wpb= ip,U|ڍ=?+upMݮ/Cg.aNA ESmaoتt;;ݴ-;g\R!E&j  BCO /C*HMu3Ug|c;ρMHc! |w-w3c̄!PS:"0TNB<4P {:=G{PZ+do|N"{ާ֝5Cl^uV'j=wYeDH/{<2Fy8< e'EP*QjBrvI ?Pq_9`4M:nΣg +0ԥ!r'$2AXx9Gj9Kqm400% 2K$BΗ䵏9 7!X) mϏ3}gZ;FKztifpu8Fȭ 闒 s)-V(2zWXN.h^flU0z~S=0d7*LRpyRDbE0} ĜC $`KFbBBdR3 ҚFJ$Yfp4%O@>ikG2(&d$)A& 8pƴ=3iI9(܂ ;MiXAulH~Nw`/::VL`pT;)FE(~w{ϗ촦Hhr@ Q E!j!(I(|l/|gu{NFHtY3rR=Ͱ `.IPÁfpbs[_0(1wJŁBCc0v/'E|UiO&Lw>_K4{PCG{G$≲E#dG@JOfQHFRյ ?nxO3W{A5 Eh/%|4IY1)<E&sP@,:~ {>I[9wm~7 墥o[j 9 yf/X] "솪3>$,!$s"=L8"7wmsixxx6sxH5x`030 U@H+ua^$=ZABjQH?5~:ilFg4*0vz^'\~oԦDûʪs9}(g'퓩>=濁~;ut9O|tّՠt`4a0ӄ^ng($p{iE iC.#}?XOC&O<2O!,ԬfhfY/?/?NrEW~/|u)Cyf`RiFRT !  <22L^*,hot)*=\S?!58Md' }NdAf \ +} J- (F;!È5>Vޗg删r]W|'c<6?T5->FyW}Em?r;&Ez9 !-pUd F]~G2|yԺ1D}, J | |$BTK(3/YǤR: V49} ^=Fہ @)uU7RÓ{U =$:6,²S)O(vOeu Hi@B0B 0bhZ`i #J@q5Wgj}8 ]Ah<}tLa #e^B?>h "\l󔗢Anz)ץw)p\܁6Mvov6jIE4t9~na]ʔ,?[꓎3r.6ia!@-kI a Z kz=.C{?mK=1{%!:)!oD}Pl`'.T%ZnUy7,n1͟[ZtXBG4c aLJU,rV~~ےVjL>8q @{i~ GrSlt>тWĄ6!,kOR/OEp 2:'q+s:GGkݜB*ϳb/C~9UHc K_c96۵'e ]Ca9>؉붨鿀t>\^UH=/.7TP"ˣepYqH ;E.gʲ)IΖ#&sDrkvu_c$9se 7O΢4C*ڀК'Y2Ϣ^t#MX<2w}վEs ”Hڳky.v3CN/y,U<k,]ؤclx'J)JRڝ>eЋR}9H\zv27Y%y<ՠHS,'9E ibxo ?3 %!Ja doS P=#!"C4h dȠX!+E[l%hBB悒)p[[.K ][~_cۼ[Ft(q>D怰€HC#["= r1ܛǛȂ›]i:RtsұPaJ ~^5.NڝX!gi^V>Sw87!2}F(td|.#p0mX/P`@)IT_Xp,F3 k< B~zaT =?GA"rLPxx*A֗ǂm.\tvc*@Tk0k$4$,m<vm@oD́-"ȐY%,`>쓐dI|?{T7R}Tٳo0M*nIePx_aSL1ʆ' Dz'd(?jA~"/iAm~{ͳ1}NG悱E6?;WzC]_[ZOto$k-gvÌb`.Qp|RTAް\@\` Y!~'&,Ρ }tC;L.ks{-go"VOvwce.? ̐^5=Mi[`@_8KL2*U T%PH?Ph!5 DD , c_|@NbG@چO}!TҜC5>2'aR-!F8<@Ok}CtҎ=#W1H|qoQHG>+BӰ˂vnrVW^ c  d#"H$T^=w᫩W }<''s}W~UŐ(GIz¡1?WTu_:&QL%"UF| [ސY}CĔ*S+ Sl*aY>{f??/?=q%0/QFlj> ѥoD#dA(OY綾qzW~çCοyW v:RB;ϐS.LC|@7|f®`r;LsˆyٮENr=[J+.P0wsO.l^5sY}rA"Q&޴w>}T^GNӿڮv^>^=B!gD܍8/pvwͰ۽d8GϡǙ"Y2 !=JC6?#S4Nrr;'˄iDp[._ذ½_Kr†hBkuꦒlc*_odx@ߒкIؽ҆EO*Ʒ"[?#*51m#GtUeвe'AծJ]:*Fi4T;!/G{&앷wHJTXx9f@xgfz5prڇDJo2Y/zNN*}\nc#Ȅj?֨6i~J(y0kx9M|jƶ~ϒ}jDQ8ǻ{u^>ۺ 4mޑrvSbGcl^llMEnKNސ;x~[s} o/Skس{ımoSm>L|^: B%&ģ鑩6f3ԛӫϔ6~BΟQvеG2S[k1|?>W/oF'th=uz7 m1!%Oᖿs_$NBwr~߽țЪfK'P(:.o)(t%~!ӞpZqww1 >$x}+9nGίkcʄJ+]/ƒiPdR"sO_ kǥNBkQ4,ή"%)mC$ÕKZ5BؘgVQ` ĎJoCWg qh>!GdaI0ۦ~P}nD{OYI 꾷)X& fEF>_s+6c矻lI#ݗ? {գ}y'}[y\ft}B!9UXudk$rz]|4m}>2[@%-ymM5UcEi[;"b'cE|d6z|fZFu_,;_#;خ _@٬rjdPIh2JS'<@7 g %& |xg @*;= 7Vru6ni;'0MGb,=ZPؽOZPo>v[3V/I#TNS+u+ctP[1tŽ;ˢ|J^˷pc$!\R4hx3HgHG\n@+"H+POϋc{W!dOoq K ăQ̰Q[]Էkm5*p^Or |Zlt_0jaʷCf/{w%%^v4< n/V,(}3OwP/"YkS#-n%KӢ}0(y7Kc#sjd*~V iZd]ÊeGnM>")I`h:X_ ս̘Mpۼ{͍"dm?샅Ԯ⻜jڦ0sCkbTXt0'o^nU$[OlUX ɁJE+tUKPlhJRJy ɓo:[oroNv_]݆׬wftK}h>yGf5 n;LF>H?{Qv}V_>-GSvtd9=0$&a2W{=WH{L\ɼUD*rW:ǕFsY@0G>zR5[~zk>>q '=N_`ʭ`du9=̮iQW.~ ZtiO!;ݕc̍ds'WyԌQRp&4 &?xuUAG2a]dͯݻL[pѕw+ʮWI؂F7FDڽ>GM}()(|/ȃ:~WcD&zUT}+>t;_seAXaW=jT++֯Dv.KMC"AR%R8v<:;NA1{IKYc2!ip( .Ժ#;sW#VB؏ygoe+H{RbZ?"$x) #H᫙س>>#  b{!;+;kxz;1)a.GH돠qؗ:w~LVd& B˿W:5ֶ@o-ut8]#ԥLөmzp po" 7V+L{{؝Mn -1om*[qƿNMϒ~kǬxK&C>H^'pژ3u5{߼C>uz'0Fv.GO~yȥrD Wy@/. iiRpnU^3Bo$>2Zϛ=Bşo+*cp/VYars88ێ*5 ?]Q{9 |Owl'$"Z~Lgc]`XPYwS{娰<}m5SqԼ7Vy-KKtsۙġA']C{%//4~rU]:X{Ɏ&.>,@y %+s >wXE)4\P_Z򵽬q8۱&nW9&.z}< y sE&` w$$:zRqU8~;Kb?yPϦP\?t$ 9!]HTlWmXe}k`EJd$z: 7P U|DA6 H^vl0/1ؒEe0]r$$$Zh_P7A?Tc9>?Løf"OP|:dUji..~ZF.B|Ap`I&g#̡f{u.VyuYo=+?vbb/ Ndfm [gp||<*I+7hu~-?RÅ׌!޿#N l;2BF кŊq۲=NoM^ͮaGj>4vwT6i?pZ϶G}=(/ H#0@% h1js3ZMM[Údc,A5Fg-3#w鯪@1"V#-K̫S:uޏ M)b3򺰥2. ]ğ6 _r~7'uyM#k|_e\Rux=@ mHaiN<Ƨ]kͨ~T1 2,=z;̓AWԅ]cp< Nt 3viwsH |/9`yrSuUVoMBԚ̕D4:ҷ0vԫ\c* dLT-2@~ ߉{wx Vg?uӺ]=lM⼃+0.v"br X o;88@t-jHPYUY 2bÌY:J; 'e; GW.>tl}t[y] 4bqh͖q &w9R$-Tw]U8wNj$w0?m)t ! z-Y 'Ye콫UAsmUZ\?|ZGyXzx?W;xDVqktϙg&1Μ}2}SL?w,WceM|%=2b6DyP>6?ni{XoY Ov3: =I_1ha@|{-š M$p|l7M00QȠԀ;zUe-G$`9YEk%w9lwRݳgY!AULݎI֒G״Oŋ޵oW'J\l"A@97s34I >/~ܟy4~-nJxf{R~'iĒw~Y.x3#a-\BI07W-U:M"9ݳ+V`oa $DqĹ*.XWǩ^d_-S{eW=փc!J )@dBB& 7(۷y8*l_\ߪ`7.A<;c 6TP{\*b"IKijV3=mό8DfG%9][)ҝ=.R w}-]D3Ym&J X~6w1iiiظj97%0d4mVgm;'G e\.631vJnc_'⨳TԛG@|ow@ QB~5NxF8v"=h5?m#;_kYnQݨ{> +@u-1q[pi.~w{|a~'Ȑ<ƟqEǪV+U1ܹOzk;[VY'2BWsH2 \;aw^[XLhoCoխ$IUdyE'{Lk-3@ LAq0U ٵ0$K>W&Im]ŗZZFanv;:@gF}mHK(ތa`M @uf2 'uGA ∟ QVn b_%7l̀0"]f& R޷]v{\dcʝe}uayၐl7f WRWȠ91Eo]L'XѐȬ 3k8?F(]sfîTĻHOrw%:1t96sBd &#DA<Ʀ1p>M6ʾR=XwT!BBAc@0'kk0o" WL0&3!8coʱlԈޟe D{DWIB"=-<@ܡDo+{اLC&f5f*d] gWctZm?3wxxu&e:1fdd3wLj@XS50Cx7?Ghp{LkR[܅x<6RdzU?GϾ0XڃΟ5Qs͕kw^U,ss,`RٚnbځNǚ߾^HD" b#*H2h@IzdKg$f jt}hd}O.1ST0L(rbN"!lhz7g`d01Nyq]ͷXz0I!i݉XHD#X4 -C:3ռc\2ӥ}k޲5Pm?|,IsiT\:]~^{Qj U[ 724hL)Z|9_H5v lںt~jם=r @&Yð@6JΉ[~S[pyP/A}<ޟSܯi_ٿm:|uC~~Ųgz܏zfe'vM4D^jkX1c폓axy${6vI,r~WȂu{w6|mTL@v3Ѿ9d rW3L7sY1mĄTt;}/X XB2Igi<ـ$[qo@@c @!OTLѦGqZ} "QK7'Z$D=Gzf%$aa>,W؄$8[^y=KI t_Br |^R>]n:mz}aho99S2^Ct݊6';z @d /}$$l ӵ*?C~4?uі|x7plTudNhA"ק柃 D'!HhqzJ;rT'*6c."U>|%@8~azzp!׌ P?qiH)GXz'\މwh6VkβKz!2K۬y 2.9ŋs`x׍S_ޏkO 9,T_Z:(&$zF.CI6*4:GCybWy}@ojy̋})&1|YwDa"4z/&Y9K9A8JBS^EƗ;*Sg }=Gmr۱GO ?Ȓf-p$!8a sS';j+ATPAJ^;lݡ@9ZaRJHZnz v?R#F=H J=>k) >pл?<sKwaǶ:w}wb#!!8ۘXxܡ-;8*BYp||8) :oTW{ ˡV0Ta2X1`5{mTh 3<6QxO5֠Tz%Va9,<0ܖĶh(sH!zL1L2^L>[5]GBKn{02ѓ3J//)<@q@yp@ 00luHJ]8w!Y,B޷_qo^GL%?ubgK'^ o]l3v,{̇3UCb/cuu}xUetfZxVCA|fb&'+s @|b\iyaBra /u U 䤿k!,TbBM.MCw"֬$qº杵a9s7*Nzzw A3Wep(@ vxjqZ? ~xl):.QɉY3`)%^z$7>NDՙ& NQ}FqMє#ˆ3C:okI__(YܟSQ,ˮ\,H"D/.[cSKlrM0۷^VK'꟭Yzwo'vȹ#9q1_ugXA iL !%JF%Bֆշz%%缬|󛡆}NݑJ?#~"# #N.ϸ<`;gmy9E.Mټ1L8KCǃk44?|3=xPa%_g#p(Ic!gp ]A[tZΚx'Yxk_E?uq@vծ*]ζ  $ @ZEHPT!|vu4&A՝R;׮sz7A1{{c7"m 볞"z("Yakv {Ex^~'Ou fGI$A;JyDDQKUT̄-LhFg|R QTJWK ԒMXPI+cL}3RJR(e V+[VȊRH֎aֶYe=%DMr!(M4ŴA8ycVfA@9`@K' AV|xA/Q*PM4ueH# UdžWA]ɠEI!]-LX2SR%q& 0х$x[%TR it 45 Qd Cr(גJ Q 3vӚ(ڎv0VZj}nEFpHB8pކhR 02Ƕс`%M%*PHznw2j)[s\!t r*|#RT],ʈoaS"nf"ahBe9E ?%5x+;ɴ0`5FkNlӒ*$~ҹUY٭|p՗TMm-H**IpE٦]2I 4R>BZpn `5Pgݧ%Dq*TJ4*kXuaAQFR32cN ׯzt16 awp.k99?JT5I -- e$|J=J gAbBRNë}B6m@B4 H bВ9h.!_-r :"s)l۠q+ S*1ɛ:4YH#KRR.[HB6#{r[FD"얀: :r O4t4kYVQo{YfݣIdak0,Aa0ֶo3umajݽkxF[6lѳ{6 ѡЦ])%DTPEL%5Ѯ۳v;B*t<` ;]1zрU"腊nR`.tRSdf7ۜ#Eifs3U!&(E{yݵ~?b `xP y's|n%dnd aiB2BWuJ՞h wqo)Ȏ9JEe/ Bye'a%׮+[K}$ʋ_Gı;8|2%qa #׈|eB}Z64DupP'Ţm#=U}\L^$c$- BPc}ɂj( +xb4?kD.\CPZo- q!vvpge_ 6^㗨# 0ar7T1~ Hr_ L%lbi@a" nxjl b@! RdL``bp}lFgT䕈")W!=}6Z)-f+@02l)uG!2R{D< uD@[l:6(1Ё pA\?$4tM'}NR&a!-T*0$$EH$YBAilJ2$ 7q u+ # <)"&Hf֡pK(v+in]ㆂMVN-p q ó\|fc9BYTMFAdхMPddpJIF! H+A)d]+{(X4F)o'jJ8=<6M"//R(%X7m5jWlP@| =G6UQG[NSs{|1cWoI6Owͮ]JZ {CZ(WA I /Hg0؀T#}.tjlWs Ј PPp>B^W^Q1$LEW9u{U h9  N_$<H\ qEX(MQXӨ5KzCb*QƢ?9QE1.0o=^`XImWyU7TVJAmh8#?pn5 IKc5mS*uJf V$M7ڡÉ8T#ڄ?a3RM:0#b 4F3XD!ïRQ#ls$1V39pn 1ML!(LoD{`z9rD"%!oD% 5qUn͈ ;cb`@>B^ xu<ĠAI$16mg/쟂SVxUj𮛙Q æ5" 3_mpq2((&s@IL8`bpq#c]AdߠnP%Vj(;EWa{|{hk&[@׽Ŵ4)FxF+Tixob>&Ӄvsz<}>IcbmiQ 1],$4X(^<?ȑ}0}#B4 &F'dăJcR,PZK%l`]8jq(:^۴ 0*u{]($>&B0gq+$!q\/Y/ML?AxO_g$)QJZ;)ʖB>S , 50<|Ih= 8`! J?n}BBx gXg p]t+OHڷ [ R9ON/o!wH'癣|`v:((-٘F0 (vzc%Vi9zKˊG( }*CS^UF!b"F5TAURD ˑ;ٿh1wZݲѫj*F]S (/5xpܳvx1}N.H>m/v?USPX_Vk ܻi28Q?x6 4*<ҩ`#!g.\79@"IB!h~Qe 4rv| m6ȸ*@yР{< H'2$[v{4 .g$u=T7_r]|loF!L/G@7 QC+8wi[-]/x~DC년m)"I[$XTYU|V{h#‡Ãv^+J*>B}wAT/BsTC8P(2WCԠ`x\b9$FA=&_,k^.2堖 Z*HЇoY6HQ ϡv_ 4  !!Ճ8 nG@lcp8Ds4 >ǤIx Q*(%\a/gSlnmGLdC{>h!,VodAx"J(~nOn r v@+i-uk w x(qmMbyBQm5`#- p ;[ ^,Đ! >lA, fGlR=:|$o9E&~]u+ZI%Z</^<$[U1<;\ YueuP:+BP;Hl(cMO< t\٠obsk` A0)&B$$!A4@@*)ˆPBan8 `A{wWڐD$aFszN_,x2j x& s_2F>z2`VQ*hBBJsmRч8:zJ27}#2%%* )Нo;YIv͔((69d=SA&"$(Tq:k07\ KYwE[O]W ҽq4ρE'0Ba%&hO&@L%&v,on-=)րϴQƏ*I$L91'1L\CvoIRL %V!bTJRjUi^Oqppc R~5g#B)dzЯr{i7up+y;|FҊVԀ&m n|=͎ൊ1(|µBI~ 9owɭ:S_*Xq7F >@>oU*dIՁRse<3%;&=$9ŝZI@eBB"++kѲM,HBtN]z=ˀ`fDPo3Xdj_Cٻ?` "``F D @a =꿛-%.%kw`dUxhvϋ8yzg~e՞`1h/y,^|=2(  yI X 5C[jJ -Fp!Bː,"CBATTa#PC5@sdԕM:ȣAh-$P4m$䒨ˀigpc;S L K kJ! d< 307aUN-7b;u!W@{oMOfx5An؏xUbFbXXVaEevZ?|˺)\JgṔR8骼%t 4M.ps)J|MCTDJ2GPP[iw |S\ث/o3TfhC@ hC*J%d&y(}/y} Á2L9D4Ȣ?OUyìp>ǿQmk,*`r9˽:x # Y}D!|H%c=-cE+AEr=нȡb8pj <D$8& ͛&Osx٦~˱e v!;9zbH(Fp#I"TB`,U@5Z'dy1#!w,)5M%Cq`,bb7IR%.0&X`"P4C&6DT?s_3s:oEtJrN /.vhS, q#O>`O #Bؗ8Ax9l# JR & uA 7=BGBA섺G"8`Z0WW@O %LBRttu gϞd& @{S r!p6?,PPa$N<a#.)A]ǥ磪0{^a?ܩdwdgIm5S{D:qꢳ78ƓY*hD`("!(-ku5Rq|Qh]ۓ( |>e nƛ&((IDzbTdAQ]$4-*4' b0#Dn s6@g'F B[( 0" @#?ւ6.yYPOOpd?:.HBm_ kQu5(tĨ}:;p?Cn @>Ez9`zQ$rQGN%4o/D-@P@zHXP90pBR1;!/C`1ph=>/cT;0K,APH!={Lᙃ5w:̄ṛQF q<Ç!!Iot3p%6Z|f).?|]1{~:҇U{ RM5n.TBY#`L~ŚU>[O)xIBzt ^q3*n~^ 0L1)D=@Q4"cO ?'G :{ z><ɉL@S;{--[%U6QGBbșPA[^xu[>ߘ/hNpR1UzerFOJĵ.C !r."9[D2^CbHb&CQ(ՖpYSs)Fp4^IR)M7߭0qK.gz nEH@1Y] ( bR&#J,,nLhxcOgЌ_$YK"9M̓n^|Uh{$vQvo"=a *Ƶؓ!B"n`0a_LK cĈuSIOдSwbg~00: ;t{U:9d7RxRd:]*=ie"R>\IPABE'C%[[hňnU --VۂV#flTu>' ACӄB6>OK{1>S'6^DRg E!`P(Q"1Y: 0``, &"Nh!&:tQP v{!$ JLj*1Y_ -0Xu580 n7ewm#}NC$lvXqiBdR T*&шdbF^꤈b2bLv1'MchN`"<$=S@p:өpC4P.36`J @ ivPb mhe yB ˹b3@+Xu#X*PCU95Ye1bq98"jp=d',<%+e>T抢 `!|mo52S憆Jr2BFf|d,x,w^6+{5ws045" >&TsTXD Si}M uT{82*8¢'֞)A@`ܸrJ!v )@P*ZAN&o h!i\?PX&":"U]7)"R)J@ KCV)at(&&**ʴ4` TA-%H5./*4kBuhVBH>|iA S4 DIȰI =| y'x~*"Uo)Iwֵ0 L"ALqCi : 9$AM0D%SIn<HҀC2CYOOo;Os^̲w^]ƻW׬Ѥ)+;Ep^%at#Og_N3 յI[ ;SJv%I}D!'{+9D!4*n2ΐhTdR`0e1,!e[X<=Ji%6FVHY7\LRAI C>qΑ# TECѡAB䨀 o*lZwoZ|^PLeԆT{ Ixq$6jS4 믑;rXa!|WQM"/@˯ Ҍ=tNq**;{Rx_Xd׫VV5bQ+YLJlc}.;Js ,It i6-D2(IjW48!2 1( T A*T*c=hv&&3e!I  AwFi-^yK.a 9dȰƃY 0Jd`.2Ɛ 1 Hkst F#O0%P2V@eH aY% BF]%2 aC 0%ʃQ !!|qF垉oG!O"X6BŮ2 yxuN(]\EY3WY튩(hLUpΎF 0΀}1qI$|F F׿~kzb6EVu6Vq eYR@Xi(RF"J*BEd b)R$g|tr2;Cl2n,QHd[ANض{.%h~T|n!!1הˮ *bܗ3/yS]LO-]KDq cc,TgdBy*42wRe6 wD bf&l%J uAvU效5(-,lxVڊ4i5͔p^*6 *}}AުۛVsNjN4z: u"v!OgU"@w׵ȀosKS)/,ID+wnЀD c(~HzyH)1 A ʉXb7Gi^>mj&=.4w"5xvࡊmPGl#kgAHnQFL4"P0>`l6`;9r;c XvJ Pd(* .91MH FzJ4BM2 Q Q!w 1B.@dbH,@dʤQ1( *"p}[TFMQ\A+. > @Q'7he\ X+H˳pYeOaC >Rz;O FK"ѭe+@-5]?1RSBRR! EoY( jfčfO,] QAhYPhc q(I@H;CsGods0ѹt@OP`A ֺM@fc@)@EiwxeffujԮ`s)-Q1 )"]DUrM35+Py,q!;f5p  UfqDP\̈r*@y[4@z? C'~ϴ={  %H:|P 'Cj7]\訁g?]*apT{\-P7|;QxD i"VaTY4fk), 00#0M4b$ :.h倘L/QP>V猻4CD# t]!8B T(K(.'F)j:pu`@CkbCHIKV~+]c@fUA͚ NS#&y EL D@a _ٓ+!)HzvB^3 # `r@ƁPAEuEr.eY! de#+pRe:8 !nx_SaχĨRayLNN\;u}{mٺbSG1^Sw,x?W,b@&B| @4ha9 塽SU<,U2jA>x'z8'!fx?ocuGpl5=Vbn;EUOtwi'HpZbhqJUu@;1}z"Xe<:9 h@0bΫPr)HV KhTwȆU PȵdC!`D)t8C!'^p!|5fDBjO -,aV/vjTpɀ)w]ɳBJI v8N;YE+H=gpAxTkSqU_\aEvtX XxN:"Qi`Arq\EuQ)-{^E}_{c;w-bn,UmW a&SWR:{%DV\-;jKF6qE~IUPpqcT-51!۶+z˯?`SBr*ʪ&r,RhGD вE>`T@iݦzz:hroIRһ (1@xeLEFiP#2Ë5~X[@ Fȵ0B! RLX@*kU qKC)PI+2BAEVuV}CU;C|>>fdz{7=l~RWp9JSO902H+;0?aLqt }!|3W7= m7߮ҤQsfcw| nȜ*%Lo૳1!u!G$6,-t<~i`7Є%. FM<;Uυ6uv-lcdo߫=faQ`p]݆{r?je3Lb{HT,rpD&=Tkz8σ a6<ަfL8u y8ק͗kJNVB\e,@'#l "dON>, @>Tc RJ*@\|nеpTQʁ kUv~,>9-{`k% aS󽊴Zm{A ́q4dМg- d@8fhTWoz.7h%N-u`L!@Eas|:,QC"C݁(4vYÄeJ /2v;TiܣF\Im+|Og9 B(СZm5>6]M{QՖ~2Hd}[2Z9x:g?:+îj;<C\Fl V/.4ɹZc1`9X!ϝVv}Wa1,`tt\A9:k_yeAX5) "rO sN4d~כ=/;SYjIgO3yZY:v^[mrm裭ýF9epL!9! 0dEs;OJ" @-vTU,U|Y6ZcˋX r!򃓪W4^Zn-xƠύt@#U$H,f'/sH.W?/HbVbi<^pc-'dʵԕqY2bA_9οL!>dD*1݋*WSş۪9}ÝAhV5 !90_\fxKcmA\0;oo{#? 7 d.K|_ХcnqEϿr^]=H9RuI;ϯr\eЊӤ;^xI?Cv,BA(Z0ŞCN Y"|Qftق)bD՚iNض9Ç1˿zK.HIEq>Ï[vMAD)T8|?6cHGAh@0@H\hf\D.駎vyGk E%-wFKA =R5Y K)Ӊ MgqlI@ǦB>el)} ,uwx=fO5I@R0}v {%?I$^RvҁU]gGFCO:jmZW~f2ؕ͋ˈ>Xv,5|LHW"KyIuڋ;~7HVT #倻Gffj CwqGu/Io <<<" *΅3 O󦡶;. 濋yQ_ӳzf,; }Uwv}.bb9 ɒL- ?\]F+֝y=tgO|[کTIe~GޒOO/l昽u{o~Sm5,Fơ TpKq)@, N?~/0üd5VjC+w;$, +} 4+]Ê"brlָ|E8[8f^'rsgFDiWnn~k@  0 )~P:[>;ߡMw^a#}dže&" q̏ _nɫÝN~BN,3_2}9Y!nj}&~:k7[Z3A8nЄ!U`]Em_,ZT[%oC#{8A3 LUկ[? H!).7w<B*#{h&KLw}

;ˮTu~^#wvDy(=mt R~eJ|>WE׻@z:8^h굳/# ݒ'wk\澛!MAUyփ`Kt`2h-Cbՙo wYmF1-Be=+RriO/wVKnflЈɕwѽЫ=]//cZ.5K8r;L [tR^NVvYS{&iOx랴Z}mNWh?žU_RZrQ*UFV1{ƣ[6X=x^6kjoLn*wR 3؆pc@̄zX*MnUvIi=[)1?CE6*L~éGm[T^hl$!}W}cZڂ3qxblw^Xo:u=Ա)U ! 9`kpT  11̣f߱{P^5{6x#{ kz=WfΩ_3ܞ g̑s@@di*TBF_âm=|D9-3iX|Tbͪ۟oECؒEDP%h:xb|hP=-Vt2$j.j'W#T^]lہmѢbDSdC39_N;w(|D߰owH/^g$y?5ne;oăм?RUO4RNQU=nr(.{JYa6H`qɯB`"O㢋a7}.2 3V3=-}oKM'm.2,Ͷayj"omL$= !> PY asNi#T҄!xPv1ܝj`rsk2.I;= o,ؙG/I|n _JgRGw&6ͭIw}G2z6 c82C;WWw7F<d'"yeQ(yY~(xxx/b)ҙr<;5W"jy8]*[:mULąjo;P\~9=&?֚ٯGwy~^kFQ 02!IXK1Mkf:P@kHZw]9?I\gv`rZ{\{i<<R.cyg|+hY(@t8Bgt掯LIw2׳tYh:Yv%Jrۿ][c_V|ryw0歃D<__@wINoL!H_<ηΩMݔI~ RvJD3GavPj 4*;c[*`@.o`sӄ<-OCٴ>sUmt)''T5LĠ|ǝ W1Ao<c 0?lF7gq2 ':NL/'~Y+vI=G0؁y,qh^+tT2n ^Mp}P uSΛG]sg@Ix\HnbJHR 6ͮ#%ؿtӚc+逼)Bs6.FOQaMOD䩽=sӾ|/(dxoe+SO @!sGkr@Y:~/ =l&nIVqSf-Oz- 㮷52/wU8c+Gn\cO{p0\ܿgņ%.gy `a@`  Zc{Uo\tᶮr*x)=cڪDeVZOL{/xl^%ӗj?z*}μ^)>rZnGd@.H 0 yB@' Zi(g4lv{VO_ujL71NߔCֻ٫__b48% m˹4ģmޅ ߻9u>{t?} EL d c8d$x <5_{7mcX,<̝Ժbn症a#86 kc-{뮠jT2{\W{kR_*I=>ߎۜ-u%V~9Qz=|OruSY!V8i/nRo-ZL"@u{Y'qcoTPJzg>[_y/|2q3U}kȴ0&`L000 }fhBMW9_95qPO}زw5mEbxhw* w:o~ϥbyrX4k> f-Sl9^#sv<}MȊ"B) 2sx8xL`>>vgCl,vQT]%w̤3F7t1?>M^JqL㑑|sxy*x^ߣa5*ܻI$F 0Bp&ƃ7o3= n+\DZOҥ^9y{yvU9˅cr6J?How׳_vklI.Sl-3~ CpuvC7tب^ct!Bg@urQlc{zG/h}A_%:zR>gzʉzؐ Ь̓)C{GЀ o2Gx1㔬0v0i{#ԺU$j-{Opm7M 5%/(Ed&+l5吓&dP}+ȕƮwZYфt+# Ї0aRcLl9(. &H]w =yw;C}uϖu^/AcoBEdֹw=;mhw&!V缃i}'%ɤO$ڱHbgX<[v%YdM:"D_Ttf&FD"8po<WݸW %>q`/,*Ӎ:vC=:-zkm)6 5I b pxMJninjϟ'q (~6>Oj0HMx7x!ԟ;'? }~ל6}>S>.~Aʢ7~+A "\_RYNv2[69inޣm6Y尡X@t$,J~wxR~s#$ux%a%]XD&B zTv_1Vuo^@iqŞGޟ#M"g.=)!J_>[髷'P񾸳s{Fd]7~jeTdOMD+_ZeE:hWv;\;b]5 c@zБ<ꑵ )c̸~|O3reR#םTY}-vaT̙7mӦ +]#6LѴ&y˰8:lJ|Y5a ` *9?Sj/ g۵M2)?}^'Ed/1W9 MPl7,e:G"h8{YIn?ޒ A2 8EoMh-9}I"_P.%^ӹ[VW+?CʦQ샀;Rg_ tg.Z --|;{|;wy_{EQ0. {>IZ:4>#tgu|1l풂َrɠm>*|:|C:)&rʨxs bGxЖ6"g*Ru@K6p?dx59}0|"CpQf<\on{M4ˢ,[D !6~8z8.L7aod3ٔC?^ѷ$􀦇9o-IB"vm:UxƿybNmS4%Mr!z#6ߙhI-'}^3J]oF%b`r}LؿO3<ϢQ]dRŝ~ VOѥ=׃zgQ0zhX =U$mj8LߙKgW̅TَIwľh{ K 67Ie$,p]$S^uk=ACو~. 6('#şQRMwñ-kpM6$^V?6+b 3577e,JC;uumև;JO -v5(x a*z[l^vkЭrsIHɟ*6m,S3, )t Vwe.Qun$q?%*_zSg89g#O#ZQn6ZF W@ V \Dph'&4[׭v'~4Χi3s6CO8#cL}W08 Ώ3}j:}AeK@yTLa"  2g&m07nr;WC7߶;Aq$378]yoۊo][˜$kDqLjNo!!rM&P=x߱UK&w 7x-*/ ,0 xQv P^F7gpعB FU2cd1GrSLr1h9=#֏! ^MR8,g~~o{TThu:|uxگ4_kgGҝ&V)$T2aXn`U(IJ`[ wz Э$*<6(%1axmјdkaVʭezŝ**i.> bۯJ5]?U:YsG)En9H{[UFL{M/W轛9弅Y2/,=XgLXrд`"v[0.,+]+Uwa*[vLE&1ku3{mzm9I)j,9A{oJg柿}}^u$/ڽGɑ~KMYr>5 }ue?cŃ%x/ W}῎2eSݾP_9z=J9,ӛa1Yvzoen>^3# n˹YFgk(i5w&l9K)]gD'Ϳ:fjQSgoثΪ\^ʳ{;0W9[DA*~'9l7sgvyZ#ʽPψz7+^?JSsrV<:SL#ss~+kK3͛OyaznO[#F|pQUޏgN _?@>DIefeAf< lP|u=!y;;= fޫMƝlҠc懩)I̺A]6\焬˟AArzo{HƛJO)q .-/evWݪ7[~VGhP!!GDz{瞣h>LEkƨ+17v8+ʮ`>Pp&H(Q@Y!>I#3[:|opy?|ǣdU ž,w*^A-g_,tUm[[L8Bed3P(Ԯ[!T=o0 ҇{ҩܩCY.-&ؿ{/zWIִwUvQ3[?Mvliw~L2w^ &T/iL'ӓ!̺GLz(`_*TB*QbmaQ>rV[ۆ/ҡcUF(B ~<>de}d^wvjo~_va1" G_Ŕ(ܹEK ьU#DP61E%8.o+)o0^}3IOKAZ8f؎=e!M㗲&miw*b@,٠st?c~m$PVpyn]aLc)?03Q,UH:t&r.f6Vs/# Όx3'ʿa}oP4M=i=\aʣ:ʙ˟s1UӔm8ꓻ8C |D=poe&8?%J}ayAЍe.3wn-t]UXld%QP/s(_ޞ9E$X="ǛL"ƸL[w6[hwRuj?v.;HSo=Z;pЃ礋qzuOAA"dd}ÈgoS?)B \-zO޿78c7q߼nx&u@)XfK$ `iء19~v)GEBԙA_aLnrf"'NE N@"ȋ%-/;!:4+f!"hgFG}{}nNj[ϴYW _ʨy63Bx}x[`I:4O;bx>o"[d{#+m̽rH KOȗ?o[F<н'N]ӗ7i>P7N"$ T8I< kAƫ$m)6L<|G?=<އ, ecSaÄa ,2Dzj:Kuw|GNiV'.$[xIQu{=ƾN;6hz%U|/eBcڸ*S>m/҇kuH+dU3mod/I[ZwnjXt?v3_wz&Bbt/ֳIIy9` 62Œ#_p^t[d%޶mm‰w:J98Ir*$99@ypa 4,}vSY8zɹuKu>=OPo u ʩO|sx[׫qNq}wT)qL+8V]y >yuIÔlmL%s˥:\_Ei0 Q 4<,ÿu^Eo/bLz$OG=pw%x72Tqv }:urgS;B"HɊ4'@_K1VC6{X*Yϳh2Z#j5OPޔ! #L,;ҁݛ˧t]=v0na84?NN9;e<9e9]1ѽ}SruC~=̐l0gb`}08>,vZeH[&X_)}TCm"Zy:<! 0B '!J40x@ǎg`V=vN L<<`Jiycf720k!o@~~fxS=5! =v ?.~CcY Ӫsw^ 0e 1L04m6vVZ%4T]_,3qAS.lNg nұZTr ۺwueac6GΌ?r[#" 㰃 z17:Tx(;mNȱ+N/ptVX?so:7`8KjN>UGjOMz)ژ{+T (#95~;"D~ < BsO$dBJ4D<Et vM}#v"mZa sLnH#] fR9;- R"1]EL S,Ѵ-;0S@X1٭ޟLϹxT(g[ [E%%AJּ) ߊ=onEUZXXPϦIV$dltktj{#-+ag}2|?W:6ܐ\Jmb{_A%/7z>` $)i7w?TLto'm Qwh6mI|Z''m;.N]Q!fF8Ya8p3Ha ASmfIWUR~CSfnm ?-}ZVB>U?M#m|b$Totv'=yNIyՏ s$t-wba&](n۹G@?zHm~p~Πy4x8k aĪՋdAWN>ɮ-n9aN$T a\7nɴ>ZQ@x/zZMH!a-]*X;_HbvFu!C+¡Pi06G2&׳z+* ޫԟp3-oC\[\(~Ϻ^qϪA~6 $cDqY=fGVZ+Ǭo麈NڵҹoBůHf*tja O聄InOA1Jd:EVa\)M _0# ?7SCK԰b T@# f|φ6+De >m@z6Ā00;Wh}#Y4=!YlrV7U(IG/6J6 (/zʦݏ⻴6(r:mɎ(yq+|x ߾Zy,֠ wޕbV67Q4b=<th 8rp\)owHWVT"Fl²U/x{-ͪdU=4P fH \Q44Gȃ5U1ܦsf칉 ~٤tFt'v?$B065|ϭ<̩;ϯQ}Ard]^?$P)LP{9Tnۥ5f=&&o@i!N%H'߫C}&;WO_qƁ75 ལNt}{;um^@¹ *8d}yF)]퟿u]7CmZqmeugZkExb # F "t|"=$+ hkì.3&f$p'W"6d;9@I4[,kX\F`P;0mڨU*|<޿lˏ&F#߶$\?n{wN;g9 ϣu޾m-tXa/8 Rp?ݚ-3_?h8zE r[^ab LqG&83Mϧ7L``fN$)d8@29h2E;kίxcp#&2YmtcAɂqN}3sp_g|?q } y&뿗ab1HiGj݇9Up*w*&~ ٽMdS.s@*yP +KQؠZqJtc%:*8 ?fxx](HD^0f޴,I.7 Ws@F/"Vxeyiz.q DM] =y|čz{{k&23( eA;nX|'lm{ࠖd\@Pn,L}O(%tb-Op!BTp qhܝ15.GiȮ/=K'7O<$5LH) {wAv$f)BkZη1'3D7hx }.嫮H| 9bI*n>vvJy#oers0a $/G>cӢaw=<`-X7vu,\Ʊ,A$,OK_MM&t^Ҿߓrs01gOaMa54c̆_p%mDn,zK ʐ> vX9#U{$_V.tXG etd? 3IW!9{/V{y=וRb${Jfԧ_ sRCMyZR+O;dI0 ꂯ?nRh $g'dnw>8zI[?u>8neۏf6F {MɧL"(CϳЬtn3_ ?бx62nV#~Yy^ʨ̴Ùߎ8czȈ5@pW"tQ6cwEt~M3X?DflCEϪRl4 RP{(*s"b!wf ÌWa-f݉VE5{t!7q&C^[HI7e`v.ah}N6)1)tI4x= 3#¹P|b{,6p$VWD)Zq b:Q/g2Eb.ȞZڮS;eȲI Y!-< "آJ$ PBE ?Sg֟2}werWx~@< %;.w>ܛE)Y@$>ES?`j@$!j9}F4_46ܘRv+Tq8/|&\{ !2E3[^ N>ACL[6~|I26el'n7a! n1Xka190ry,TޜAaaёGdU ?i;^=sP'K.Hⅵ#3+j'pQ` Պ\=F_pm7=gx/o|}z+drɐxDoCa L%U"? cXgxˊ >!d\돯E8vCz0)"!0iOG4 :&$ $Gc#,s}줙?zOi[up| M%J W|;#X]{&e~O4< @W0U-OI•mۥM<=&CyCOw_'2nݔ8~&G_Q@QeY*nQlXO-n@'O6*<>s3n+d !C8~`MϲX;lM¼g^޻1W{s9R'Mh8?1 _R+7x! ^$h+Jg ixV"5iD3}d+݌yc/J'".[qb?o<'!_ aw/1'0H ';P1(M`:*Ӿn0i/v;'aKPjpvc[|Hfd NTcʓ݋?uU5&ٹ`^v0q%;S(bxyFp%䖰&g^ *ʀvHIc8 @Fwb>8dD@*7v7 a4P x!(neѽ6M@v yx?}, 0wX^,'~xUNQnbܾ[ݺ"bapͤr 8 Bjg"zuoRJՒdrM+Mc:eb'-v^JB/' eWb(rzy)枫olI%I=Anxn_h:RV s U!@C)g#Inj 9  -|iv%υy;7C#wVH%GHӐ]rr6-i B/W7QyhVOyU5 s{(r(#HMfIh%$윧'VRstiV*a'7ɈYF=. Яa#QE" 佁L b3L }ĖШL]Z$BE('h]CAB؉.eWI$W>Bd)pZnV ̿MBbG}B:w}r;mțMssy4t=-h3%f8-WiQDsQ7#DvQ*?r3 =WO/vSvNƼ-/Շ@2W_~oP H<ܒIW5)5Ʌ[^ i@:oQ2tKJ]:h))(C}!3)n}SWOȓgLr6GDu #Ճb#pP|x#h9G:4F#GaATnA܍؃TqQGA#GTF7Ѻ]`jU:Pzj8;:|lS$$*KL(~9WP q{$#砽/d^^7>QCHo1)X%"<f~GG# !3l.gzuᦲxOYBi+ B)>ЃpwePÞI5OEyAJsHYBڦߨs~nw#Y9#C$Z1 Ȃ$"L܅_OsN! sQ|"T h%]!e$K(Wd* ƴ?o~FHP24D,4M 7ᦓBBarB=5l\ܞκE !_4++diXyLrU8rOqܣ\w̰_mtjt8?)\._7׽j5nF~:&$trG Dk='6־FەV!CW-Qˁ[QGHK7hb-iFչwhCGtѪ7##FoW(k0Ptÿézv b ~KPA0A7:eslTyx'{0O+zůc.C>x4i"abbW<`Nķj|(؄a3+;&VB/y}Ωlux$vNX;JR I{}Z**g?RG3Ơn:E>> H0<-ggj[ HcX0\53{}h f$'*8"4c=Sk54&2C&_T7A [/)#%''k 2!#n!nFX~BA x:Pj"y\8MoGZ,1Z]`8p3`KR3qC+AojHZk<@h&K[U@ U8As8f4;9[9[/ih7 `d=iUW|s8'I@8tEV%Ry]P| T.tha{䎃a]a~Ř0+o:X h7l=' !vg23xʆ*SG"db|VeL+H=>Y+$6pOWnâu"2L@|$6 u?tYw_*'%TitRBGľ/3M`<;( WGVZנ >v3E&V>[#OUĊEExяL?U Zny}zPfy <{L`iY8ȱ{f2%a=y2.IV ym/\#I2$*<+ h kBҧyEoBV;:,s᳗ll pAЂX@0Hm!!yx0 :?"efDQ"cH.W1%& )?v6Fi! i|h }Rlxjw'u]ȏydCnX\j1] Cb!R'v]e$gAfwGeL]K ۮJ)/m4U/_@%'|=<د ҩlj[ϨA3< GowzpJM>T3eZ^;mS 3,A,Vxr|l\˟ ((a}|؏&Be𐐘gW1ntpD{ܤ2TvQ} 5dOs ʶ&6̒,ô .B9s C*=p(@DxւAY4D,\|1 ₰R=G/ZX,4R/zg8Ep8ps`hႲ|ੁ= WL < y< #v|{7nP0 )4J sD?9[*Qs=E|[7Yhn?U_O$!r/lWjrCBvHAB%0O*ǶKޠPG^\|UBEKH#A.0IJvv#`]!H`n4V_PL8?}Q>¯Gy/=o9o (B=<%?L`eX|C$C[RRGW UBJZApɕs(xꊬb'C㎊V IBx&+sSX[Us24>sCHßE)G5? _SDW70@j RkKHl_x@ז`,2sr~ >9QԨ[+pg2f5^+T5=.r UG.֐kTZ]D&5~"rTϒ\fKDUaa\,22׾IeȎ*A`'K^.PWjBՔeaPRicHe;'Wt_J.ă>X35~F0W."F.8K:)FT#Ra@U>ea+'9랬gxPeنd̾*ZG:CPk:ir{4>r_ }ޕ̣nffdlֵfeUQEUU:7I).["AJ6a@L˺e4B<ϗP>Y>dlH+ntsu@4-8Bp;|OYGoOzeXb̘R[Mv^/B% ^`Z!kI0QC#%vJV& DUS+ JXhKEJA2;(PTePĻ29)7Z "!^\R-x& ՊApm%1lMK!KA Xa<Ȣ&e#ϣhBnpKRG~7HHHi&  6lԡQ;NF8 kf}j]7h>6C71 4 (S vL2qSqj jA|3Thāa ,>'>|1DTDDED0Qxa0œwW={WpsYی^i֣uCJ.ɑ4AlVº@%ُfU-{-~iu!g׻o_NB.ArP.W1CUDQNXBF}&`Xfp5lVDw>QJ=xf2]gl톶HlXmٳ&heۀT 066qkA5&G^T${ u,}V}J&䪞]71vzGqPlkLvaUjWqk!5Xz}m$d4g]bLbo.rn@Q60[R :ԩ5cO f&0Њ@@ FLF@ L& hhO&5=0#)i`L4 ښ )SڣI6 m@4h=@z@h `dmOS?LI)j=M4h24Ꞡhh A*jRTҞʞ?T4@3H 4M24Gih Pj bhiE?TyOd2S4jz ڇ P4~4=@yFbjzAP4$D CA0& zzQhʟ~Fz5= =MS&'GFMze@G4ѕ?am-sU,M[ZJLll1JVU1eieB0 ``0C\Efxi2,ݺUmliędW*"4UR!|,ffij Y.كsO?PdYeXYL,,bLq~ pd5Dڍ}~tɱT37-XY2kVhi7nJK6 djQ3-&=Q&u]avRlRsw>0wV䜶*C GJdK"wau>@;~hti}Sn@Xc5uLS27sįe[ EVѳFލդ3aBfP̕hSJرg6lG.͠_/Xlwlvk6j2Fmf^bdh#[Lev+e5#kj[,&i}N%k5iF^ݚ6ʬil5[kelѣUF4iX10ZjR7lthŲml]vkldh[+dj4e5i245fFk5Y46fUiw:T^s9YLʆF32aW@slCh(6LSf6#Ej}0%>*Ȥ& +ל+~>_k 8,ȟo5lO)sF>+zh=u'# _xZi@ˆ+ k&'%G")r/?S6qu<)"+pSx@p>iti$xe%RVp _)z: ^50px9jib>,fKa1L|OQOeKTdT) ‘XJJr/‘]әv.AZ<5=;`&  6ԴkZ//>_Lf iA9*U-Dɡ'A ` Ě6/8 p|oI8x:J@3<^re1bex$ڗe!'ha@ @/j)Kq;¼yNMOSN)O{ɩy])>˪8O1Sxx™1t^w]jXL .#H=NO t𶽢ymXv]7=UM)ߙ :i9}WS5ȀcESv$U  @1PO);ߑv=?ݘ<:/}0?|6 nJuJi=72>.>΍OS8hbbՃ8Nf =8uTj=7OpsTXv5NOꧧT?|=ױ}~77wE+*9?B|Sve;sS{x~co~?<@ MX?ž2ʞv:QI?7~S$NXYwyοfWM^nGz~j7jxJm\)SS y*v`q^Oi)`SSZ90iu|-*%>?VkWWni߃: WD /x_N+ܞO? ! JJk2_%K1ojW=5FD/̓g1&KFed2?I=? KҦClޫw|-| `Ɵ^ԗiv6weNz6.~2[z%?y~%^*}&Ir3mvnHlLLWglp&lV}s @ʿ^d6-dÜvHnݰ#6*ͭ8*V*qԴ2U-AaA7c. n7ӫMfkVuXy;23lήwqC)NƊIN.e4ۗqTxR`|1~^-0#: E_*^=0 zCT￲BoKp}N)T]"jWz vz+ּWY|*l+{/_wd+quҞRRqYp`\m@gvᔼj);]mlFu`TQzʔdVH,TAπUPS)ruh+Wl kYMsRJXGQ(`}g0:%&&)m)*J z|j])Rp2+Ţ>xR/Eg҆iHRq(a#*" o*L;>sE,ZH*tC?{Ǯ_׽?H#Z7h|x>߻y_ϡ?l6oSP8"!WF3 : @@ 0\CBhI)Zuw5d⁆#:(0!El4Vá>z39sN'"erH)( SYHpOBA~ٗlArSk3ufA*c%蔍\K?R J@;;\?/›>Mj?Gғ+o{&BVxe0}{ u y vqrŸ7>pQ2z-~ǧs-iOZK_a[_ 4m`.ZRi)"#" /{_quُ:z$p,!\ YS/ߺqVIYrsvleS4>_J$Nj=?R]BvM ȌQQJ]0tÊ8/v-9{0&R^WgGOӷ^O 3,ʝdJO(e%"$f $CSRE0hP6.QjT-o›up)ئ )1#8WE'19;y KN/5>'JꮙC҆eӾ sh;r<6@`C_D<#*+rs,p? -SOl-L{Q΅MSrI;91lLb))v6-4=N{l;]_jSz#┗Wޗ>e$S_|98O L Ҙ>u+;`x*!QgљZ0c,wcTB ;iwu)?4&_w=QF]r!RS MrHO j-*a`AB~FFn)SSaY)_Y:V)SSXʗJpbqgT8;xIf2.4b朮'apr.IeOgy&Р!LPa 4od,G6-3שA+'9<ܖ`T 4fkRȕ5?@| / NiG_٫qWUNɜ c K" 9']˻s.}lK^ wuaqxA|.ݜW|S¿qe.e0O@]t۱ #.Qd^лʜ[sM^]4ѷ | NtJsS\{(]T_N&^>Yje;9`v8ro9rq.V^CƘ_.=**5*^'S!1NQJpzYIH?^>!Z[ɒLA #g-5&)1IdQRNlbu& )(H1`_+lbE  _grm|A}P)Me/T8]9x\7eAIz%OI)b*RB] ,q)|d%/_-5;5&):)0*zJqI,1oQPD_D]%.xݼ~ui_+pfmE|#XzZ$4V9?${˴]. >/'ngGe_.//ݗE煴ʵ()pT-:і"[yc&5 TbE >$Z$<CWuVaR!JJB~O0ڦ)SRY_-_HOR3KiYd$H/F=Z56Hh~#vϗQr,Up-sCޤYz88/9$OW<6g_OH?]r-q~t* S@{*R1Ke%SYPL@)ARtE;9dRorqq/峗,ާ,)e<2oKeWOR\yBq.y<;<”RQK7 R~OR:ϳ4d@JF"X`' )߯I"%RKL]Z @DqD$oK)|#x<0`0܁PǴjZӸzO[b8ߐ;x^]=w=m>֩+Ŧ 6t~$?Zq%yb |\BbĿ{ʶ)5O_wod.9M9o5~m٧oV77j}|dKISFuG;OAG"E:it`Ӌ@ 2HBj;ƨd/4BS}|OjS8S-oM}Tf._%-z s#D ڰdNH=;ӮM5L&~ʚ(17 kR: QP/f ` HJFkd$B/_AcSO0Eѽ5TK>=3kb|"b /[->)r-|\- ضŦ[WeWÅ_HWcg~w܇P`}y,sp?wi|6g1Zɳ{n˛#t:^õRV}!:htwG}P P6{-6g25b5l]}BrOi_%>U1!ݽO_kIlI&}#ǎ])V!%,%bYD,ȆDe!% #!S%#VdIfŘ[e3HV5bƶ[mmlkc[V"D@ebaRU%9]: &ٞ ˃:lvmm CaxƄƨl?~~[2n=ج4La!IM Bκv%>Ñ^L'eU6"'^!mQRݪ!^Y[뭊Smu#"- ՐPDM#EKy4ZUQEvL*٤/~@숉yU$gU[>+U0^@!#-hXu^N֣^ 7TGuet2{(7q19'\3p/Qwt.KR_Y`Q}Ø j{Z.ցksNtYR0b̈m$hN-]fD"_q>W;=v`W ue;8[[ MTZ_Eëed+a 0Q>rj%ŀ ߂_Jy#qd ))̧R%:?lc1׏˶VMZ-"ڵlk0b# jIMl4k c?gzl**d=j?X U*i<ȗVZRAeS,\[|-|O[ܴ􅔩n^跭 ?)~OOfɏ.BM.y3T9R-dl&3#C'l݆V*meXX2[hbٱjZlID5JfQԅ6X H4&d*p (X Bo[V6kvoŦ0ay%Z,p{ti== DH"UQ/1Po(gqfCy7$ I4.X^3KFd5@I?@!(X\5΍:2cB-XC5֭<\bڏlPMhpPh#^Ƿɋ/)qn a)׸MN6ԳW!)Ev;]B $jtBP mUف tQ8޴K$C9d 8/eaa edrVw_2AwrQ$e'{X0`9TCUGM\)rj|/pl{ZI{&@;ୱF0g k71f'ZMGJ0KNH#SrmOgşh2=)+;9_&/d4t01M43+ˮ^ :6183АJv0G1a}n:mp 9-v)NilYPܲ*6j6T7UnB-0 =jZCVт|&*Nc+2S0̳330`l+%BZ[fm%YL4-ՅfZja`f ɳUm [eflfc-(hɶ5YE1cjlC**Q2e" fJ6kB0kf6c l’H3(1,UEh֛XMa`XUFƶ¶4*Y1Aa[[m`C`R$J* $ )1E,)Qb5؆2MMd4ě[-ZAmmEkmjIV5fZhiZdRm-M5EVFFT-)ٛ 82ZeJY %0 LS) E&0Mlk6Y0-&hmi0l#MŶLfłllmY,MaYYc,be,d߳i%BbaU3B,Um!Sk!V( 2R Qf2dWzgl1(*Al0ߘM+ UlRpRRbdVIyОt%>"$FQN/'*399#^$DeT%U]T,E"X(LLD1&I PōmLFYڶm@:Sjlcb ©B*EڍQlVmjj`'P Ppf %q&ii4IYۧb8-Wv]66e5̴sm"D M lܰەca;l00s8͝pF"@A1LZ*M$YS l0饷Nlsv0vbY0 fٜS[;볶l-(UZU[#jTc6E'0NL[Sr)5q+T!Q,6fI* (5lb%*(dfEa5SuLU),Y PJZGo ȋjG6 N3:lڶimdcʳ1ų8GC&cMJ [ac)Rɒ&"epa;Ampr/qprzCM=PI4Q-JY39i:tO'#]٦9tZw1MƄ#EDj:&pCqr `7hQ*(!aDIf3q:lTq7hg,GdšN79øås1y]'sQcu:vE§+N%  j&5BqF3r^0wX122',fІ2W`e0nӴަԶM̋a=6,X$d $D Qe ˢc1Xht'4hCAo9]vp11orS054ʛ&]K S9L(!5XZBdԁ %Sfp[gvW##S^a\w#r-1SΤ/O2d1<,Ua[.Ubz@*,Y(0}&5$M>{mifrPG.Msjf3 3;f32v^v9K^"z@U#ڪ ,F :|;Aƒ&y &=rWp&U`Ui;x@4{إ:RȺh"}gDC^1Ǐv]xuqi8<'zЫHp>[#EC{YK?VI⌱]@G@# |k%)l)@ſ^i\;x0q,>I4Kf2V;[JGBa|/S 9V=|OG;.1m0L.ٱqmuЊVVDx*$m'M9ٟ 7I3cׄӧN]x;Z˧Y$K ks/=LZX&O ý3jQtu9c,.۱MpymeYӊ^^Վ2:t 0sJÊ7;%d\BvZ!\-:gռ殔k~c RW<)j_iFm W J0Kj9 I$;y!(B8O br)".II٣ @4ZpUF4e3punu]̔dNoñv<>:Z:k;[Lá[\Tfja̋%mLn;t9s+֙wڡՈ ɞqv6t^GETf1c(aWK*Y.g:[8:&;LOnrlr%)ܦWέ2⃭Kkm⤿j=X<(5v1LP0Y)NѺC3|Q0 1f胮 CoXPN(܃D%:ʛ"̀ӒWNjf^f7V#ˑdI,aKc- 5l[s_\H>@ws^ ]w><,t \@Y !@!é%V$mH@7g\Ab(})9[;ֳZ(=!YťHa !$HuU+_SfCge$gLJrPzQP9|A{wdpBc IFf0vƔJ.]26sޘ @ЛcBwt&U!0#V۵%Y:7 * hZ fف5K*PW4 ԀJ xpkvvջC$ox{&ԒB&okg$L;\Vr~wFJMFHܪ %&mʭ0f%Y/$Gm'|/u.-W/S4-7Ӎ&a-Xk.{W.} ÖJ.m.s84UA$,º"!g?tQ] 42Jm 3أ<>5M!8Q !'yYJFk` i!#`#aplLK?3lB{RM'L>HDι~ξԣ*pV$UF\!n;&1,_1:Iy$EwPGQ<9)ҕ([G/л^$Vf͹ބssL&6R}@P@Wknm6&^`.TT%PX)8yնfk>#6xyyu;546eZ͑[(`VKYHlǃWZ':0]dzH&ݣvwsK>sU %POje4ļvQP!>/NLVus]NҔt) eɅWx'Tynv|e֚#j#RYP6"&^zT0 +&;z$ $=9iM~NnG\>6;w,탏jRȗ]~FaWEyGt! tak* r#o\$"K/d by3#zttt33q8(tF604.z7pX͠5Ea eX7bۭQ(w0OON]ue+$7GZeYc?ͣf,Vcq6YjlnnAKMX#i}_UGSTň+Ŋ4ىcvҖI6=,v}ztklW?p./xL/33,]Zt/Pe.(sg˚ -)J\Xc ,a^ʢҟ B#^KBޞ&auxoO5sc4b< H'/LE Ir|>[tkf,%x;hWOkɥ΁o^x$J1ƛ$hE:սNmݻ+Fe,#X*iR/Xftoy6f|<]I:G&ԣFMqqa/K]üO/q,LY3/V:ZXi=ܼ>52icK3,W='-WK1ޝk3q[F2iq\XK+O4']NR3Χ) 1j33wl[ P6 2I4ّB g_0;ʫ,:*uHm'0#Dim{+E<7 cy}^uGGU;K!ֿ{9.fU.fgewh0iRWMdO"CiTӂ 6o}RiSJS4v:c5oɲ{j9=ݮܺz(0VP Cd\ \Y}s|.<3k6π! ^„Z,79u# }tnA#goʯ[YstiBCRx:R{$ HxlBtuW`FbƗ9ďr\ wH jAG,D &KI:lM+v,"L񭷢i7Hm.1Iq ~@ӂ*p䪆 XHD b3In2h$mف%hٚM4<`/m:۩U=7[ӎoL˪Æya{^6{_nWMltprp~9J74T}8f" @'] EbVd [Ogx},%IUc$Y׫52)nئ{{/F>JK B'L} H hwi.k`(-0I4b]ƲB6Fgˆj4JZc F p{ 8OLLj󵔴.b0՛V^6ٖVa\@sNFs"y @qMefPE7dcxn0փM5V7rrÎIR)d-$YʒƬe-f4w!:m1sC}m?%z|ʱ%*};JԢ*]/p{Gt ͕[MfY jԪVV+SRiJLT7~%ޛfN*Ny?s^Ji v77.NE|}|L?=v*{`$J׿Mq\89J\pu;L(7ۄ嶵ᬝT=ђTZezй${98p8I "&cgzt=K-%Pyn2ڧ{;ϑ,u.ur{EJUUe/rD= _|ζ:`V #F5yu[U_#|f|tu4]}9w\\4䱌cKDx6.=mO1xaz\2^US1:Y7~Ǿw7y7i̖FOO6.Uju)Km;zDϿixvEb|c_3{wIJ2n}w7gm{Ǧg){0ҳ}(];[kZGk# I:f''Œ!D `e". ٱFCrWTmɓ{kQ 'T8Q!Щ۩x2IRТZYYю[mTYy&c\NHTa>@<13B^ˣ]ux;k!S%TugUY$YB&yyvY'n+8J=ƣYjYcﳈ;)b⇾O}Hu)7tg.OvT-/W郤7 6F<Қߌ)q$1P[K3+,{uҟ:|Wp|25جf 2Fk4cJTdJXVS[jMsHs3~ƥs)ox۬3}9axWWey%9Klob~;wwNrN7zBůCGpI Cal#]]FC$C#9 mF:i.^uI|b:&UO?; `ȳKmBwC1rfJŊX@'S3!slǑIGg~G#W';H]E䤧++2X.X5G=]*/oz}޹88R~cw|q^\f5]]lWU^meWAˀm2Y;LX0GQbmTfE (N ,Έ*VVU흮lDe$hqēh q-pEG"1Su]xKʛ1p%`{v136Y[˯ܰ_ust5jvb̡Zcvv-"eDŽ<=Ygϡ<1+$gV5h-gumߔVVva[RnV+Tmf]NJfYt])6`3=*X|So?oF߳^?٥%^3'y'YcS_MVRY̾q0ɎJ]2΁.瞟aHvtIqn]cVQ/.Q;8^ZSdߺ.N8KeBӌ n.Ug~z(y   7! &A ) o!Ʒy4;KXjF9"b@ *{UǏC6NS[x}Ctv'eo#-_fOJ6auoڦ33ߋ^WJmSu?xkb܂4}j-VʹR ls nꪪ |Gbu2#QT?W{GO= ]NZO@"Ш򜻜Ej!˝-E6ٔZUjUVZUjUVcB`d IƢ)C(T"HhIe`c`rG*$EMލ{THa===vOy}NݙY:6_ Z5~9NC3>BKzrc X9ys`p./"*GsrN*\?{vhMbE˜+QMh'#pv^A)2g¦i+1u Eޕ@ZR ID 0Z:^ǜ3-p7 Ah ʰoKi|\?3Ł822I.yvCjCڎ:ЯEe[UUUUUUYHlZ֔b"XK2L$W&UC[eL9-%衤H7HG v7k4Mpz9c*voN\Ս~Rky3c)S( qm;Z3,,tQFlv3FƒXwcō닋Fl'z I9qDxM MjY&rm6 H; $:nE"CH`rl%X# Y<#-rh XħtrB*$J035s-m7L 2zNط$ɂ=caqHzN.uuŕƼY9w3ϩ *H i %q {/,:0{R]'hAT8uL2Stq"#w:H-f$@w *M Cc3J|pʮfrRFYQHH4T_(K t^A 1)f 1342rL, Z+XY<5Y sFp3Kj̰20c&g[W)NvN;L;­ۚr|N>#:i__R/l;=Mp釀Aa b*%Mۂ噼tP lA z@:$dxm][H6AFu*ɡb4J5UC0Xfl.XVlL zH `i"`TU8y4 &jRB El( ,HT`։`B0*(,xbH & y%,3en3%=X6Im1LZxes9`[I1lƅ QY5Y $0l|x I!;#^oI#sЛTpA'D1!1R CHڳ oꪕ{їV,.K_[cMg~J8Iq5wGO\Y m˱rkmۃvsyծƘ,i1[+Uoq4#L,HsB 1{~\֩82vPӥ噓s}cϣ>''_S10ʡHM6IaģPyW`g[="'-T`ZlrEs^?9et9eX5'CbHbgh߹`^I<906o͒ozx\Q%W# ;UvZءW{ l̳u*YZ'ՇѽsbW2TjCZkMoULͰцa*OeYb#ˇ) H`1(dC}\?PO٦RjiHD N*elxKK qôtFm^&n[2YmNɵdfn`[P")@LD@}ϝd\_9u0*XP n cizWAI*픿ϛo >%+:607 `ZEAI"J~<ݪH9k"WWn:"۩)Z`)6U4J`0> np`_ EUE^,bXN wLŭB,%Wd> LʐR20x=Vr{SuLx}-{;0=P1C1z~>_n%㖋tlY/O^}RỽMg\K)7ḸT ,,ľ_vg.t~6[&tfdZ`ǫe0ǸLTd: @j4U@b$/p^2`G8X[y}~:?as_1OlPŞaj36$tKHGUiD/3qs$eۍQ&;Räm &4^\'C\fr⦾< 0̸pcʙ'9}w!1e?^i[a6<"Y.k1:@PA]9aL7o Jx^wc?D#y-F=&ta8/T̑BWpChO`ױr>:;F=ǿ4cM+* Ӗ,/f_d_/4V-m~U')pJѾT\?j3oS$7oj SSRh7JOPFk_rmch)ÀNyAq N'}d'/9P=qSp:z׃?[/BXapy, b[3{&#@۱kU{xwh||OdFM2W8iW67SOdS$ѻ @/6o9XSij{<0Pp[űq[b/@]ص2/V^i~Sr y~9h*}~yl]2ܻ~` y bVl yHryo;>`@O/װYNkv.UIP.`TcSPT;c)jTTێ#{]^C5^) d;4;~p0?ٴ  h0^~_zse~O]=R\Y'? ?BTS{S{`MjN4 xyr oGIS4RB㳊Y44u qU86aKeM}51o*EYI cdS9T=DjK20eu p{̓MS$U&O`)#>#Cb\r<(X\in-yZrȗ"/m zz%&W*T**t 90e@2q/ Ax1,M/ɊW bCrwCe폤sl87qTd~c<}&Yi/! H(DI޵ˇjX-vAځ{rS鯸6z$ ar {kĽ @T$P0Nc 0 ]8PMrKoZS=OL_>)8k!鞉ϔc ̠c;ґҧOR0yIP oPln/0KbPDJH0$ P#ֻlhp^b}>UwgѩrR))23ttTD=4OOM/dZE]dWԬRHqeHfbNdJQHRnC , (/tcϮxڱ}*5l A$:Ϻ9 =#p.,WİJԦ6L:IBBa$;~XIG ; u~[- ,JA9ޝw?~5;2"N(`Dn 8Aw@'HGẟ;o) ⌸zn1=Aq6XrE u9Er7&+hsP |I&dh3f0n٘VÕU\(\4H ̭y~g'rU縶 P#íq! !} ?al6E.!Ċ :u t~R ̸ȝ yd ]ne5&`.teN?nO[z}~+vpp;_]r/^glg0V9`su5`lf_eh퀬8ogl<9[JJ Su݃ F]^/&Ykl)Ӽ0ŰS}J `X{ t `Vm@f@㖰== gl?h1bp0+ ׃! )<>o jkc s<c. F #Sĩ੖Lσ4Ks.|-y S Bp|R[QwkU Mz=D$pRK', sZ)k~pհLdFRhlIbD84EP3pr.N]?zL?t4R@;LSjW-sYQs2B nlK$,S `v@ f")6F׆3 5PxOތ# =wZ6`~EU%h~>y0Pw>2ᝍ,`x3O*9+1<0L+Bt{@eĩ~ú+v?qw:Io^c~ c:lw\}'@Xl+;iqrP–Ơfjk`Z`O>fOYͧE7tCnCtޮTfhS6JAq4aB1 )R0 3zV_9PytxB x{3a?hxt6Qw10Yb_uu瓶H(FWTYj$nFYUWaeݒ6mm B$BF eKvx@H*ӧ!fiLiIPڑlܸ#M FM=?W=w;kR-kZֵkZֵoMkZֵkZֵkZֵkZcZݔjkc9eua7aҔ)k]ZZagqkZֶ9e+\c2,m J=@+`˄ B I ? he7 klU/]jpgx9@P`dOgVGUCa-L?;#?7f}]ʕIRiUx4p'[s~{6{͇Y[߯ϣ>>o罬7H{d! g] 8ڎ˴6fw':*N|ɟ_}:v6&^]˄?(yvwYҫ`oѿ# \x)y>/B"Hi* !iI Ph@P"(RU ְ!J6€hPHm`EP2(leUJl}G2 U!A ($5@֒ 6 hh@D@UPU@Q  s7׏{>^>J~$zd46QFmCG@ѐ@44h R"2c?JP=!hѠ$d 2OSʟxSbQ l2zMѓ@ڀ4ѦM4 S@!44h4 =TSД=S 4P@4hMR!i i245Mi MJ{&jfOQIS4S13HOS4fijFJHF4 4d2dCISɩ& ɨj=L2hA@i@hAI ⌨XQdb,2ʅ0VPeQkh0 C"<3jSH5 #kjlQ-k~*T8T!&SgEv$eFF\T AtuXSb$2)[#D\+p% YJɌČ´2YXXVaY Lʬ&dMJdc##>1dc#c[H222222zV#I2&Wu[UF?"t[k Y$ZV'j)He3!"d2021FVQe,l#(22kd]oSkjWeCw!Dž!8CrJڋb +<٠˞f``} O"M]DZ(M##^\R)C ,ݙ+d$C`L1cb^h/wj{Ҵ1TjQN.jj:^vӟS!Rrہ0).;<~%WZT <+\82I._Vꯡl=zGǵdrzLp9:vGKwYDZHO"́䖱 ʨff£1D2FbVdS52f((bfnc Z"XmmEqVu\6Z[]`Vtc MIxQG_>;^}%x50,b !0Cd6oү>=vd{xx^<*9{e+4}y6Ѷ= xGcJ9ȏ3!0`4 *5gq\4tmpS2 r L CJ]0! ZC"d2|%su*Mu^%U#F]h:8Ƥ",,rj0C"YJ(W/Y1J222I22F*5VYmb( Be2zcV[U,2s4X)dkSYd++ ,!3/> !SeY VL rբ2&-Ke-KSl̶[35MSTKeYd1Y &CeeVX+ b &+,VCd2b XV,VC!dddeFFFҍ$222422$2222222#$222K######)l񚢮>Tnս/ޏFu Үи,BbI܇! PSZձ)D³32`e'+Zp (ḍ"ƪjU6hma3hѢL=<ߎ_)W'tyz&[c>KƸG64r7t[QWU-)W:=diFܸ ccT]Gvwi8K iJoא6yLX>! b{ ~_QC!Cy?O<̨y2 دWwjҬ`8ǙOWA6"aIW?ܺGuGY{GN{~ |hQuÏGǨuyU*v -G$.7-hg:nȍǚ>={GǬ).;}ʿZyd:yQ+S?[>+Ʈھ_`os\AGu 9Jq>{ʼWť^ożگBtwCCc*J$}G|{:_{|(:7Yc~uwWn[ثG]Sn#zm|x_">|||hGʼ;ת]M_7!H~Z/t:^ w-zv+$yǽ>t~ yH7GŲx:8ʎOhxτn=*ˏĎ7͏=l|v-:gv\$jm*%4TRbiJZ+bVb0xL,ɒ6U;y-$[*H2X̡Kܕ~*~aԇ%I{nպzjUmCd WMC! = !JU߂wқ!ѪC1jCuxvxC!ޒc«OB !} U'LGi:UC- :!Y<?cd.d<׎w~C͹9dXubWHѦQi`%5pVWW=\☭ .a3'*]ﺮ+!߇=]]hNtypuJEo=۝P!w8s+={Gx %%=Xwwn1Ʈ{dxT>CJu*羊{tb<>/^|~%x!u-C $;-+-nSG} Qf)],=ޤ8+Շ(f'M+^p;iI龛 tRLV1^i+8!UC8x|A!j<7_2]&kU\sޏz>o)W%u;'??{%U]^;v^"zHL>|!8+U#}\rTq0vO1e#*ĘWbEYAy;,|Iߏ͎;HqU ѐ_#uC)PV5 *ڻ/ID;:MYW~ڮkwTK;n,*:dmZ+╄;w+be#wC]*1lW5]X:XX#Ѽ\|84G>l_twQ? 71Ѝqty(N…3!/C((eCo%VKu6jFzڤuj Mu➩T>=\l =,pG>9 ;U=<6]$>~Nh~[+Ucʵ:_>lb>WΏ|G]R>z#ѽ'}x11QHjsT]U`^0?Ip>)W\x:.E*Z1U 1zQ8QG7M#z?%Kѽԏ$>_\C#8LW61VUyj<hSm{&cKrg(_w[sg>V_;RvG ry#CRtޯ~ylUCWUguڟc[\EUpQLV^RRWj'G~7Ҿ !i^ނW^<>#kϼ]#<߿.5> {G|qTxQ}f럜N{Gܿf4>?r5GGcc|4>?^JIVGKwCHw>ZWU쌞;izW%#]^bW(yWUx\sX\U^r?p#sO^dxAFGҺ;GяJ۫}vì%sT>o9'7?ÿ?|xmnUA|Y??z8;CC<1-`UmjUVIu7J![U{V9#]ʽ!y]e\N**U>lcnڽ$x1p;Uv873JJ{Ľ]**{/oIiWU3r䏏W2p(ww!܇q&8)riVG7&s$uMܫGWGJGs;7X6\J[TcwT1WMrs8D]]loGaJHdrvRӹO^?D}!CId8#8#A`9ԎKPލ͎[pqwU##8qhQ7"壩sdqƃzkoGE<7G9Q5HZ5Fl菲*Qtyލ#5G8V6>CL7ƃ,oG{F6G 1><pۓFލ#GUyc$qc;(*TjO9 6;1dz=?1ͨǺxэGcG$Eհ7dF6Q8g5ޮq:1cǫJ㪎Qe㦏oI:8[tolm#gmp,{nWqVCHuCr7r77l;X[!l/2WKJGah9Ѻ9 ctd<4nHl7Z%m|Ѳ8`*ƨḍ&6Ɣ8 .#\k%7#ZƱ#vEˎTh?ȏ󒸱5Cd~x FHy{@~+hX*#F2=X`$}Ǖ%h?x1zQ?=8(ml ލaƃttGCpᏽ׎`r#GG ȇr4s>ZG}locŹ\qhACA:HƃQ+AC ׏EQ%h8#({HわÌ4l%z!<ɏGm_߲|4XxFI~F!:>K)-^2lc䬥Zeŭdj>}b͌VQ?b4FSSzhujaV@k]KS|kdtcG_Ə6%5J,UpGFqZ4&RӍJuQ1cGҏ*8#GWFFGGGTjGތtlXB6Iic#J2G#GFImˎ0?65:F0a+C?5"6Uh,YRIFHFFhUsT$ CXeBL bF*DaY ,I&k[6fmefi*`Fī"f N%$!_5ﳛXk cC9n[iM%)TjVnk1)(reK+qK,ڮv\9TjNIkqsՔP$l[sfJ/&2!#j2FĬ7 YXO9h,C*i)ΥY%\HT`K1 YeJ(sa/9M;+qƌKx ׹WHW&櫵 k]Vnjf6DҤQFm(]↺Mع[8e5PÓM 01M2Nqˀd:4 Z 9 $vS2dYf Zk!fֳ3h3~kZљרakfڇgW -fYcV33~ -hֵql^iez08jSl ZIgDoR klubgjRk!CYE! cel= ܛTҹk25kfc fƫ0TLD[A I)RF0:YIGbGTl~|PU\>RZWsx*jīʓFCPULXuTmSF>,`_}_ 0bGoǻe#"91QcTzM#* U&@U0*ܥS)%iIu.S:zuB6mt:Ě7ޒoY+)eX#!\yzy]{oQMd\g{s:ǵ^O01aX1m-wz("("("("r2W}\vjc~8q],LȜ.Acγs|,Li?5ۂ01LPK$vݙf4ѓ*%Vff`wffffhWm' ʛMQA#d!eJmoh2# hՔ 3`obP3$1U GLuos6zxŧ79b[2Y4ӹYw",!)7 x4ӵ߱"K*BH vɴV eI 0U00+%F([adž4ɵBnC(,0,1%MKv!LbU(0E+5ʙhXFXF"Kh0 QL(d6+*Rج *o Km)=NsmӦNKsU*Ͷ*KcNW{b򱻆g,p])LвZ:hVhSEj4f+1 #J1MyZTSvS`1I IAR$`Mګ6Z-6 b9g%{- N!o3\r h%UtSst1*E:qKAuiQ+haKb PxU#IiaIcKCe TTudSK(bie]PN0*E߆)jCP0 DS2,ʕ]]~  \VKEi-؜ 5r"% +I4!* aKS&KAcxށfJĖIeALK$8e4qMCJkU`84̌ŲaYbQU"ikZ5eZ5Bփ)c5&ІQVC"* ai54aXTak(peOޥUL)°F+֭7lV&X4V1ZJh`V-H&ىe5 1+*k 2ʰhՆX1hbѫZL2[LԤab%t~uIZjMho ^\Un˜DDDDDZkߕܹxFYdeeFYdk9+d `UK d%)#!L%%Hd@IVeU`,e&* II+}d*lĖD%$Ȱ(,UDQcY,&e U)BfPAJTeTY01XYU,V `VbBX*Sk3aa LI`eY`r+*Ei%[*`*)V)6DcDʊLa2XŁȬ fTɅLc,Ldd0 `!I`dbJad2 )TJfRm՛RYSHb"Y1!LAS!L`0SC2bS a m5 eelXU %PlQbeT) Te4dOVբ+*!R4X `fLb `,CaXfIc VZR`b,Be%eUdUDxE1Th0c%Q4djmTN5!~Ȧ4# JYTq$V)Y] PLc33P *"ST,AY,dQH2C2 Reb#I]DX,XdcFE&CB@mTejYeZfړ&FIC"0eI,V+ `UAdEe 2"Y,b+2IJ2 Ta( 1 bŒJ PDAee a aEDȆYE``3*21FFXV+cd,D%1!–TdʌFJYIc VaYUdeXV@Q a U2IddUL)RbL22S X&, -Lf̬66fc&+0dijV)$n dʳ 2 eVC"7+ D%T +J5X+eT-hb3`ذ%AE0I&UT$$҇Bju\?R<>79=~;{傽+謅2S)JcR0dddF(eeYV+d1Cd1 C3 a !+*TLCKLPՅKDҲJKFC$2Ij*2223e,ʘxLd ̑M2,aKTY CP$$0jU,jR Tj3RJVMMhSZfԥSZ5U iYj32ԃ-iRұ,Ve!I]lB+ ateLF oE&Q/%DBѴ2I$lTx ծU4Je:&$w+X:;ut4+ k:'' 08ȴ1Xmɪ_ʩ OɗSI+[Љ]ej1gP6"Ŕ>:tZ]<,#xn_FEua\V>i\vkz=μtiἯ6><8WtpۖnW զtunmqo6Ӝ8k\6 ܭfܬtu >nso6ۨl[6ˎ9p::89gEsqs.פ/!~UHb;PU3_+_hWqfۖgoyG_#7[s: Lj\?憡Cm_ZSqPr%i9'<Ҥi;bRjI~Ki.b\ =ч~Q7}9_x]'{@ps:޿MRr[=98*_:>s{wX'xd/lWtIlK·ߟ) [>KurM^7nq ;ǘ:ʗiݍN^0}\}2OG+F)mmB93<X52u |H\D.] Bt`aa4WlV.كqXVAGEcaQamf?'00beD9b={ak yyEvu?|>|k]8Wf,+1YIddaaլʭY3SͲy=bT\ڦZBk-H5 *ɓȫ.>eV5 j~ЗGc1L{vx>&V۷(Njj.JNjN9Wģ_)ovIn!yWY0שumE2$J5 Q}k{>7E1R-^.h,Vܹ b܇1sye3,I ܻ*,3%YS0KheYG>{_lxpx[{(:.ߡ% a+p>eE)z&]ZXJox3MEV<;i-7&n<%/ eߙCz}ė7Gv$>E]!A[U.n]ZH[,a!᪎oZ%d=j*q0}ө붿=A\Y,DzL)~w*>NfatG|R;q=㭧:_xǁsTuܮxDSX̲y_HIQy[GvbSsO=`svEJYnmq۟rY'z.^+vOM^h1Y:2Wh4sk5ieSV6lӍfVf- m.*sD-E۪..n8/;O-%?}z>y`7U9{]H̋2w/^K=d :k0k1 Ky-Iom>o7wl߃kY5bl2ek\+;~v}w7'?G_6g_mNv;fj+NY|]no_/̒xɿc}<6߯1_\+PC!Td30!e*7ͱK}ɜ[w8.gQtx.镯u)76k T֞O]Tfd1 !kU2Cb2*ڮU-*G33`b;G+[0w\"/yZ , TByRXKx-)vaJs;/X#%)SͼpID*V𨼥z™ tq+@ƍ*LhΒkkٽ%յT|Gmo;q9_/o8KŊ3SRNKSWx3b +Y]cC,\DӬ g stsd`e; b/MBs=}iy̓O^sFt"үc6KIvy,0]sÁ5-ya]ԗZ.zD]QE^\Bl,>mVEm,C:/C~1_<PCPIBc No*2f-tжr4:ætmT,W'h:1;XCB|tϾ9Iy.%N!9V Pm%jKʽ.{I3$1Y%-lֶ0lejFR8Yܶv3RZ;xoӛ= UR3fo:A|k2a=.b$ҋw:u<~>ޛy`)ܑe)ްAWI|Il䜙@P+R5o:9q& Oov(t`fRZ);lViӍtmfV+qvZQmIu*v+ff1ߒa=>C]E8G*.Vh1[k(ݴFrM1&Ue*Rso,+5o.mS44dY)^M{Kod9R`]~J[ s{҇R>Ѷ ܡCŪ; *ʧI$iyQ#m]IUO? {.6xd4:3Bj3\wv= ³ ʮѴVByD: |ʳ,c c0 \WWX)4zo3,Z0|Eh#N/9^+G z rWoU约fffbUgo+mgY7хff|8sk~|c~tӦK줽B\Y+ u "Rк3f0Ў+vө.W 1WKhd*k9ȓ\q),5& Y2" -2leͶQz&~KZ|s|.+~>_7/P._q wE`KŽcꕦ+ɬTd9k P؆C!=d1%Yj+W f6Ji F1Uɹ3d8U٬[{n mn?\\%fh_mχ-8|fXk5P76 [4h碮V<;8; 4/baCr8qUa?ot'F8<%îKf5F f`ޙVFEa0,+f9s5Ү]odz޴+JHrީz'B0V0eto ^^dvjO86x =,bG=|y/7+AXZ;KXi M48kL l8wr Ѱ;Ifiე }~}z=_agM 7~mnJWoGcv1h<\o9?5dlXcz6Ƒ>Sgp XxYUc7/* ˱Ͽ$@@tMz  ITI]=IP+Ū2U?U^}E;­溺+=#ʹ6}E矢+W~) CR7G 2yJ4Jm%R]rkl /spr39CkS7 5 4_Ig9CJ2i^s:3%ʏ>5Gz_W=٪oU=^#ٵaI7yau箷nvCRqW9Ǡq^!!V#)Vװ\N\~${h #mtt\7GR,|/5xGǍnQz4lsc[u7^yxhǗ|F4#~$wq8x6yG>6Ə8|HGiFG̏m>5G >Td{у>HѠ56X9cIz=h:hFfeµ8Z1Oȿ?Ny+p]'m$MMܽB_`0 ܰ\$-~nԔ)(Jk nZi 0 [$8~g"AmN~a7 fDS͕!V?Ppm5V@`[VJ~Ž$rߡhA{A+(~:KƒNgj( 4Sƀ.4.Z\xzf.(. ;;hE`@b1nS&I;뿊H>SȽ{.t;˝jJ]gF9o {S#y >֦-jD5|4{Og|dɞ\vcUÁpADARQ$I$b"Q $I$Q>~e2`@z ~,I$I$I$I$A-AJR`ahms 9~-ˏXp196P553ƧL7ʆQ `}?^b}t0o4=2d!Ѐ=_ym].r@HW,WY?ZS_-o? k"]">V@H*fHX27 =uZ,#! $*ZT

PVIC@o??_ Q$!@J *!QTchI"TURf(BT)H( PYP(4"hfB() ( `J lDMS6TޚdiSTCMFM Sj Hz @d@4ɠ2@M4M2hzh I"jzMO?#ҟzP{Ct6CÄ5Pd$",C(<^TMDyUTu%sH;\NUDPP.>L WqAƢp< j+tT AP{PwLD; sʃAv!QbI*)#w2*wT.<Ϯ:>l G\!H7D?ACאzh}D9JGd vߐdHw 3mej1k"5 iPE6U*=+]!+`] %VQlAڤP Po"82}D=/JAP(zAC$"AiRΠ^ 67k=H"9羽H>>\'AH> H;ΚJPy H5 Ư"UPA䨝>j: z'A;~DA؇gDzHx:7) 1G.; cv;7"I Pi`(}ŴHqcAҠC֐nduo Iʐ`ȠCՐoCѕ=iVdYU)`5"TKR T,+ʔ2AőC c1}+z|2Z͚Wػ1Q"sJMѕ2uuĥDH85,Y0Zs9Zdή\Ƽ;c-ij՛-zw%wVyW:^vC9lƘWs*Kk63WVkk+نlٓf3 cY78d핖1W[.yqnv]y]%ssVuZ¦a.m1p7omX R؃%e)> V FmcZˮ\f% "tAF3+e{LjUT$!!i! HBB$!! HBB$!! HBB3e52ݩzZN䆠ݭwjξk&c~01Y<2J mՈs;vsuV)e1pc v43. jg%YzVXk#Zևdnڶ-[!&!bRG9soUX3d3`0jjJ ff`1GKwj4nqW^Oie3)il͌l]^]W, -W+wcz~zr^W0k*A0,kaTBA€$VT4;jĴڐӀ̄FO L]%yvsw!ҨKQ9ӡ*6ڮޖNeI] u12&4 Ghoj'xֹ)\$bY#)S?$*6H3QQGѯerbR xU o%T41乹o!麕+>3b+BԁGAPi#YU# 2 "ԫmY*b,E=.–#$1b $jZdHT҉jILHFPb0*22eRj2dUV#ZUQu&'VZek aW.+ Be$ibd8f, b#(db {xY]`/\4{exƩlvNۮ7rr8v_hSrv;o}@Կvgt:X~k>xk5q-Ouyܛ24ﱵW+L?I-G7C?r91ƴxlr8#]ckf/vˡ3k5Lm3v2/ Å=C?;z]Ӿ_?:"xmXx'!ʼS<txΓv qW q`ݖUƮlbZl M Ûm6jÃ߹4vu[ővuu1ËguзekvSsvov8Sqv֝M]͋7'c{v9U:j NkNԿa?~;{{/306skW&4qp1jwɧh:F9WX MWYz7o]Fffn; |Vr|.7HOϢL1e9QSI~e Q:TeW8Z{Άf1iMYaDey}cJ;vʧ-cv63m6pp6^+#^ "rw4GrR?R7`Ę}v]Nqq*lu!PVIUwG{׏}߸ƚqpOA2 U{ٴWnKGq.Fn9}tE7*riQ+ozq_||޼zBǭ(hW{hUDT"._j> Z:/Yɾ]/QsN~GMGȋI 輼{$_QEEb.жt"Ql_5b._gWs{TvU}G^,f5P}.0|x=1%x2)I] 7lL)hjdphSh0JVtssf5On㒮8nKQwJ*ެJPaYeSnjNT~ OhK.n <͟ʽS9LLJyG^ qLAw8)_۸&Mt*BpI6tcrh?{Ul%>zVu|WQ(`Q~' {yutյC)zM<2et"DZm^ė\J~|d 3_u].,w*.(4_)Niȣ/VKŕB.ɵo>NX }@~SemO+VZvff cYz;}D ?S¢xQ%X1kORo{ַt^5s\5/B:]V9U\:[wZSd[6Cl\&"UXlt\CƚGeHM\6 qU9N6=G­֫uXU}>?UuڠAV2*f1Rz>X?VVdm]<8nڸi\[N ~&6Updyj6_9˂|ƝmW&p0LiZ7ql.-n赵ǭ۴Չٱn{a896LI vp\;ܖ˃Y>B/aҶI]S2-# :z+O;;NW:prׁeȹ]]u8AE AdC F3XIVRa** =F*6;P|Usw1֠PdA*#Ycș#IUyh`<=\m" ̱Vh ;0٢<4gJx(pV2*4_W]zkӑ2:nUb㡍kfvu t'Ӵĸ'S"mVR ho©N >'y@WR^bKJR)*s>8GӶx:%ۖHr^FOXR&R *tu9#NqP hd45CGQ5 :ep3q2eO9~ q3JWyqN${|%jꣽnL:N[aal=ؕ]4Zz@Լ~iCʬU+K`lmb{㥣fUp61jVGȏLdYh',L$Q]v wb7$Վ7Hx˗3333 UUUVA U0d (TPB (PB (PB f`.X0 1b,0cZLwTl˕:YWQNK5B>UlGS"zQ]-Ńm1lr%fW^Dw5;)lI^MT W"x.[{T|KʣnA]h yjjq\M VUWKrrx>Bqyyjun\罪2;CJ5$\)mId$a0Yiuvչ%jahM0mْ>3G. Z!ǡiޤ'Y]srFtZ& V25*FmV1if^U|q#1s0;J:K54Yc;?i>Hى;`j=#'8PncOHFA 2 k˩UjCHګ1O1G Hҭ֘y2ݻ1p8^F7iō=*t6plrnŰXZjckΙ*tp9&rt.õ1?l|81 ~S)4x xeɓNU;u£Qq[.;=OCwc*=ʤ9~[ߟB[jAQc^ǵO_LȧӤUQ9ݒVRwf2G[l>sLq*B}"-AKpsz[kMi -^OoRzר4aT_CC I~4:M-,Xbis)*}q%wGiS(-dQ6 R%Z0Uu]2 $ZQU'XH3F SMI 5Um6Hݒ7dvHHݳf3{SHW.F]b\˵80a7ZpTb`e1 p[.YF I *J#JS Xs)fd.L47V7AM1 XI9ZKJ։ I* ZUIUB۵Yʺ2I6b62 QeR¢ErmjkVҥ%%e "Jp(={nO눸Adj$ (o H: U;wo_(g_}Gѣ{*įN+yO%g.*~!h_읭{{n6o7nWUez훚4׾ܫ?]n~0ۂ+U'ggqS'}+͔}}jeT@I:̍9f0 A!< 1=yͨ}W*GmJ{v󽩫4іXGG,`x|?R_|*{K ^"'ࡤ2A `Ad!AU$먜;֨ (A0!?$H'A 2 wY13>~'uT7'_Ez)~JK7k;_@q]c*l33"yaqU}v45>VZbrU{_1i>'Įo]5hϺUZ=s?\v1w/wip1c?w٧}wdLc_>}N'Ъnj{B@ΛN@ 'uS^90Lm|ynGv)ڠ35TBmtxNlA4ݗ!:ѹl[ŻWm>;1{~cp3?2A߿+J?A+oWܾ"1TyԎo_Q)~OWJ_҃LO/IEt_xƢ9K-`9xcg+q*~%ohVg5'~+n>_HKq&W;zNr~N׍1E1e;*5LģI?=|g_ !`փO(ﴇCt9AA× $yH9H5 :UUi5I0ITU?_~_ @I@~x'b!ì#Di&O$4z`H}]A/H}jf)^W?܏Y}o=~oeZept?*菍~ԏQ5_+j_ӃԾOKuKyNt~ιKE̸'0eA?)b?W\o*%B RAA@*R@JDPP* PRPB*B%BPHPRP4hPPHPUAJQJ @ (T"$(i EBIEPUQBmҍJ@Q ( J@JETRRA@((RJ * ("RETQAJ%)UH)*TTBU"UPUPUHP*i45a 6c6#RBS'S>nWB_*UܥRvvBwGr6fZJ3 `̔c1(̖1 bf2C} bb&LF2@1Eff%3 f 88`݌R"mOS 8EK9]23Z/dș臢Nkĝ_ns&qNìiM6_+OL"nSLOvQ22,`0b`Jc#H2L`0ʰbLa0ŌC,ʣ*Ǚ2ѻIcr4eXXq18)ړwG]mjXeu[X|j 13I˝w+N:U]+v\\2d J2:o~>ΜwNv0)1KشS Vʩ#ZÎQ4(`4YIRfS$Y {6ݦEz=DK{*NeEYd RKh¬bIec0r0lh1'2+K*bF0+01+Ň^KQ]6[&fc11,ՎYjVR tT)>t<&bd|tO[eKTauheNuN+>*LIKpI*ʭ'>'IL{)%wҾ''cl^J LN)ܜ 4K]K>~CKK+CM(ac)XW^V5`]{zvݮBCthc);vbNJN?E>tp;z=#iΓI܏F,'ݒ̖2cOIM{i4u|4D'Rj.d.]%̗Mn\"Mji:۱E_.Z\{e^k ~$N 69*ʦU俍Xco~;|I]+a:[ZN䔏M4)5IϥnĜ =y\)~t|$%˽<%|^<p]d k^b[Ir)1&q&fwi?sޓu;rI1;'^/¸KpKbKw%{Kɒc(.b5C)0/ M@w49zeQy'>[|;Uw^9mJȾbʧҘSnof1fVa3d؟w(ːqh51|qkl JCQWzٸ4zZaSrV9&Ç m=Vffc333vfffffhoy9XGkX,ec[V&.<*2dP!bFű3`0bY0ceKw ,Kl v ۽LǏp4F>ۦ3*c <4lbSk c861mgI f3c umv(bQs.{o8l\rcK^- jw|6~S,gvT%*lAI>:Va͒Sj)21sp֘6nLS 47&Ra*hX2lɃ&64шUEowkZm~>M)82u`!*HTEYGgɌbz=~{l}\uM-U! V Q0U74 *o$Jqª[mbaid B_*͝%uT?iZs򆗈<~z7 ,ر[-jNKu8idi.Xt!;/YsF9m)>rګ=-4k1~9F#' xlefXnȭ[,WiXƒ8+d>g1ёv%Nk.Ֆd{{+ 1TzFPńUzJj02$$114xCYNߌk5 1E`dK aEaM l[aa&rmXỤ(o5j0\QCT#be Z .M UX1b0ч-*Җ0MiQTdUm4!VTmRM"0S pIoK*6XX# [FȒ-Z++pX7-̗4ыqKM+J|VXZd0,53Y[&UtmmYcF7V22ҦLИkJ(ִ5kC2loj̰fVc2fk%],VK UdB2eV&* $*VS"bĘ$*"fPF$>S$1MS֭VaL@9XU,dX e!3 2f+2,U33%W3!VQ\uҌ??CYFU9ʖ0 b#cUdҌ,a,1b)1,F$b`&UX,c*&XI,VV"12`bac*22YYXeacf2U1,,013aJʫ  C*LXF%4ɅLdc3ec3F2Y $:FIYUeS1,L&LuĎ,YK52YYL2Q1\KF)(cL@`̌0{Ǣ_2Tcl ʬ]1f31e 2eZ[C,fcF c*,V"%J0dĢc1Lc0ĕXRʆ+1Y1ddb)afK0vԘ2KQ2SEbJFj1f)JʼnJ4d&14)fUP@ bV+ČJ2)YUL##B))VIX@eHe0, d!b Id 2Jă)0Y,RbIa+$El2 'n3$KPdS)1$R\TedYV=Tp LK#hP̙2fXe,c0q~mO ʫ)K#%ae$+Q0+&2L*3$afe"Ju0(S*幛!WO)}~ͣ}$e:%ɋ .KSuibŃSS&MR_`[-%h7#O8L-ZSM&L&(q,ZXb1bŋ ʸ dO}lna;/[Mil✮'89Dt#ʱu-х:St4tG9Œ K)l IʆzbM,bɋ&,Y1dœLY1rFKDMM,1*,L*VL[L*9ba10o1jE1e1"Ņbe1211bdY#IXLLf-ż:inY>8SKb®bG,2b%2`XUbXlIҬLT#K'&i&L2dɓ&L2dɓ&L2dɓ&LZTɔɁFR[QV-9+xĦIN)\ X҆Z&K,XI&&&&W4ɒ#ybŋJe, 01b1bŅVRba:rXpFiibŋ,Xh[bhN3&MUm+qZZ[-b,Eɓ&N%źԣ)щiiijnɕq&SSdȱ1PoCee,ğZ62by.[ŔyEl|G5_wΛGr{yɰ5t9ObL,F#m2hMʪR  L`EbCc3Lbbd&fS)c)1L"ńFdz[2/X15XbԙYY,BV 1ZZЬUb2ժj0UC"ő5V-+#M$Ր7X,J``Ȳ6} C200±c,S5J[B)., 0[)6Y)k*"dRn, Z\0,"C Ud\6m(W(K(w **Ddɓ&O) O̞%6Ʊ1v 2jda?!'+"LMbbU122,*bb4XLXK!eCy-Ŋbb12bbʼnbSI麞Q6/bT}htv쿀9.ssOT=Na^u})X7[JpO>5;V&/js&&'VɲҪl¬C٤+iUϛO?n3ȓھ 1K}. EM Ő$CT0SBԑ2F,*ɈS-,[P+B4Rd`Jd`5Z=Lx2臆Y _\N"SId$9NSonXu_Wťp_n\{u&,_Ch|0n(~8Qz+Z.kάGx _9OrU4\r]˹hܰUx/w;AvgȹOXd]RmJjNCQ>.p/ʧQhZݓ;fLMinXi]EihS$Ţ2M,Z#$ũY&- $2M,Z/\'Rŵ4.2r~DTš&MJvcr_K:mex33<Kiӑ3 !%Mv; KXbXzuŋK[-,^Kmmfas7{rьɖ}MsZ^'t:7x%t|8ӱx;gdZ:ޖyjjq4p z*F;NE་F|ŋSKKu&u-o'vΎO:MN&3sţݦm=㛛U \Ùz4ExM'ŋ,X%9{BzS(O2Uc(y\STeWy'KS?M8,̙1b)رa,ZXXbŋ,,[Z,ZZY1h4bdɪdɥ bWSuɋB2n[-nldXabid1jb1jbzfũE.`iȺ.m1֙4=K7#*YC% &U%Vě1{Vt[Ե-.\5,xˉؔ'zE֋<%v.';iœh&Z5Kʽ |[Ɨxgޜzy7R'%<zźżZKʵ7Y77[&4[L/4ٴ.NkuƆN2,-Ի=3l\)~-2`;EԼibܲZ^Q;9KS&ito7ls/ X^\VN j]r\.\S9LN y 朖Ogq[.)1rNqwL䬟h{s=3MN&-Kups\-R䲟bq57dbԢ^7 x7/9hY6NSS[Mil4\˅^4^Zv.S\Ǹkj8OSm6-el[-el[-el un[n[l^ܷ-"n-p\.p.kbO"O^ܻqwispO<8N\.)xC9]qJ,lZwx5E:G%&w';j}俸TŅ)0F*/Ix/Qw2yVSSl~%Z9s_"ޓSҔrj;=&=}\W9Ԝ֒d\ܜ.ܪxJ:'nL#6[c\"MUO5ȎFV$U\I(e,X"x);nG!'<*RNɓ8i'6O*uDi> jt/kB✓Iܺˬi;&oQb%24;&kieilZXyוiq^+q[#N152zUt:LfNs&L&; d2vߚCjnFhObNs(Ns<3wN9̙9hdaWY&8dɩʶ[5Ŕ[Rw)Jɼm"[K..Z.s.qbXX]Kejm"--$& p]lb\Uv.ɒődɔOh/5=7.\6Ev̞)M\֖ŋn\j⸬XT\%Ve pS)FC# q]K8,Us5 Kd86 %:o8hLLL =sSjd_ {|OdZ u v+?SJ qĨLbK${Ӻ} n/TrOdi2TiL2z^wLVɩW:ܴ2jjijjtSTɔɒdXV,F,I!1`,,Źiaao0bɋ&źb?\&M>YŅq14\i2)X;Wxfr_jm;'IJ_$9yId:O;мYe(ϒzglgm9*T4n r+ʶ}keWBEv&ZMIL*EZ\S'd蚦MdO$; s-.Ks\5] rZ0adɅ,,,LY1dɅ %dV &,Y wJLLJibXb=ڞ bК ,M,YL\]DnKv&O<𼳾ng榓S>n'v/]~˕^dCG);l7r]똴ʛN)8/ ]jz)'jp)vL5=✤&O2y')]k&- ub,& KBүڦ"e%YEbな_Mp.lXńbiVK&,$#KSNiiu8S K~!~wN˱l'+)K0b|#')G65=US$!I1E/ilaTɉjm:rb&S Cв EɓS"LX )2KZL*dK&UC &B+QY6MMEbX[ ~-jL،M4Z }ŲiɩSKRM-P\-'O][ 昷pN,MC&yIe2sXSR^ԽًrźuMӤEm{Ɉ4+ۆ!DRS.ׇgENʖ148UO^m%6]&ZXKL#&#'UܹS: z琽3:Ng)/:S֓WH|Pq'bbdG\߲;#SiyKyrMb-d`2lbSxxKzr\)y!W(FK2d.6NI<5qI<5] |_5/j⟇y!ieKjpZ\ɋ)<i¥d1dœ]s-|w:l sJC7z>U,ޖ,rɐ-NZ-Njp 'F(F-/FM.Kx.'j䴸vt.uqN;|+̽rZ/H%6yᶞ޵WɢuؒjsOvdK) y%qT8H[XrȶZi,[/^Jl"j&is/D䶧ڀ`@896(W:':'IVSuJrc<elOY<TZ$nJ؏:]؎k[W/hdihg"U%1bSddI䓱6i&$_"[|keK\̾ev/]˭~޼Vb[ y~9|ˊ&zz\Wp00+Re-,-.yF,Y<\)'ƱJ'Ϛn<Ɯ.3SrtBe:-׼ e/yz{%ŋ,Xbŋ,Xbŋ,XaWG tXֹ[ u. eu.eZX,^^ Ke^B6" y鞥KX8a2r])>%xfO\rX\)rI\ɉyzOipFLH1V'WզFR~ H`jȤ~%SDya=%ܷVԖQJ<8hbbwKH^ u7ʼniE%dIJbY1,LRbZ^52Ke"kNwӺ%Mܸ,-#pZG1{vempMI&.%"l7UinZ-Nȧo f.02hzC`SPɂ]&bŹlZ\bɅLo7IxUU$O䰐řpbJ7\:HtDƗľKKM+pI&FUu$B;OI<7E~2Le)+K +ʿq7\Q桩i 'U'c:Oֽ yڽ]ҫ6~J#\TJ1O_/N|~#MaUB]M192|"?*{'аQ77lXJLY 0b7] "Ns'Xh_Kٻ>=yȿ|8RSbvpگ/Ƥ|c\XoRD+$J3 2d}  >]kY=xF*d8[ b`ɃKYF &&,v.q-Ns*o6iynnlnSB[\帷[lbŁl[$ضX,Xd6- ,YŲ-LX0dʶ[[ F,ܷUl0d\}+|./uuxUCgsʻ&,^dM {Wȇ}߬{k]*KG*M/bCS 1,,oܒy$jaͧ.%~)wd+*`1b?J8e 꾮KR<զs.uWs.LS!Srs?!~W5/4FWo_I. D:K+ZBr`,j`ʌg6$ibŅ ާ_7iN'YVLLf 8Sn--hil~ZnNS;WNB9K 2r)q+yn.ky2w⭗WXhZZ]rMq%5k8j_c1rUT1?R~L$&)Ijbu/Xb11&,,#~!QV/hm~T8y{/\Wu.k亗P~46MUpLMN 0&q9KuZZ\.'iฮ .ֹ.%kb[SKeq\hh흳vΓԺ-ִ-&I:$`NG]:NqN):ӭdS5 \l5556ͦqZ[MpN\.ps,[bt\S4d1b544]%r]KtӚ;&LX[E9Εu'q\gSIΛkel6[-ż[Ȱ\-Kur\SSeiu,\˴-UN[. Kur[U؝$q:%Y9NsgUܻLtvRm45FNt]EuN:˒id?KKŅ&Mjdе4,XabŢœS)&bKSS&CKDbɓ)$dbJ[TlRjuUeY2ptibnSuj\Kiiiil\L-KD⚛MmV&&m&S`MMอ--.j"20)NWuZ" ȴ U1eG嵦~OO4M&l}ޜt͝N3l ~CU%DZ'LUZO#Rh1bŋKKKV(L0?\O,R]bLIOٝI\NZ.ΪCT!VIn#uyх9*Z贲zd)ؔ#v&$e[$ģd\ы#j& b-(wNCe6--KijX1h N1)*gdwMup, lTu+B;JC丗vŐ⺗hb*#TZFC\\`SonJ\.'9CKn˜\jn Px/ޫ*ʼn4FبXb1-n$ؿ,CS\,X)2Y,RlĸJA7Oy6M1O~!^ŐF'i4# `]ηbuȶn]U=kdBJ;mTUޱ9.Kұb ՝ KI'Bv'bNydCرzVRŅNq:MN%ĸ8'0 m8NS9\LI-KymPm6IÙ2a^ed`[vHygX-suZRO8 lMm5Wm=!'^rKbY0-ʵ&ʼnhYTM*Sr'GcCGv”䦧%Q{$r:Ҵb̙2f2byUuou6J;3331ɓ&+ʰ@압?hTJ*Nܝ铭HJXuK8p[,X--&L&SiSyo1l[*7L eim54--- -dlM֖&MM-im2o2m7jm7,[LA1:dœ,,2b"Et(;! @{&Nbb-{ 'œ,֞L/kop& ,Eȏ%}bŲ24XiBodQ}G!{a%.V,N?= 'ا` <(ż欢rRi޿bLM-R+ DߵUx>C1R}x%0zA:=M:I I|)rp ȿ&jvWc- .ĦEINT5+ ElQ0B|iHe0S-&LC W%/< [*nFvid)}6L2Wyg,o6&,XRn,CzV4lXYRaX>>rnXazh|;',Xb1bŋ,Xd 2L2dɓ&L+C}[ЮR=Wו/&OI=JZħGaR2G$ `56u3)IqhjQh%Lh\I *>~rZK# y͢]ՖyڏL_:&,Zɋ%2VrX\O{x\q]RܶpVW2rOU%;9[XI0X]m4V&,ձ6%G \XCe ާx6K ʙK'ƾeLXK&LCɓ*2bŋ`f RE^s'4zsY58S&Zxl=dWpyX^56 [,ɋ6)l,SLY%6. {n?E-#(0{eRieUdlMxHsЎK] 2lnXFCy1ES ŋ`$|!࿲`hOܚЄ|UKUw02cT_wMJgʨa'G^=# 59 /"a黯`) &U^b%Q1M{SȠ]=%'iMY+Qnw֥!r Tۖ^A]b!S"ч&&/-x,[Si[ U)~5UCd?ɵ2e>Q. #HCj&Ib?nIZW[*WjTN+<4$ڝEԩ>X/P.dm?BdwL #Q-,L&,L&U&&CK`i,UdRZH'Cx/oJr[i/R5zec^PYe#dhZX,)Œd2XK*jL0 *,G|ZRb,Z1oJ]ܟ5^.)DG{ ;W*½$OC庩]U$+/bbst7@2[Oo)OGQb5{^5VUSy j2,C8e@za/qlu=sabO-IT\p We;0"~ufH/*]=NJsrO:#e e_*Gd>ʧ[_uIyE= Ӆ(+ U&dC,TW"%z͌"'K2eˋKy^Ռ-XKC z^ĽuEȪ^֨ YeeV*b,=erO1>ʢۑl`M6"5{ԲK-.*WΔ'JjwlCh}xwPޑ%U_&B}Ŵ'twUGQVSmb7ZMMeSfi2di!& UYH6Ua>݆AdE`估=z}K(0 `VDbdJR]A;{kkYU)Ոe*ԇ%;qI;U_^u]/.@s4WYwF7zK>!'O~|3c՘VEV=K@'/RQzAb|Lȿ7Of>Cba^i;p ]#B6*ؿ ԐdXU&&&-)MqR\- #eL,Y Ke ,-, Hě,zfn`dɋ`e0X-,#b,Rb1b442dIjL,X2b4OHO>Xy#K`a*yWi`$j.K\ill[TF0F9^+ΞuXzUxZSʅF*WPG̦SH05ΚFF,,Χ, &E2d1abL-Pzu5=VnS8ӈt졚Uiul\'%ŋ+MY-:W%z'jBl륐2$a)|kC V&L'e;d0*Wglf+15|:$j_~,lp6WRjj8qqaM//LW)tue454O7kɥ]WfXc.&YXlKG' pl\ɷsLX͇jqLcCG gIӳ9:U]Gfˍ:)ܻ5ťŲl'zt`:QںebKwőd]vMprt%\CZ2xNĻtల [::TV ,QRLLd#)?~Ovb~AdŅ2h52e6,œeMim4XM$ʴaqIĹOGDyj152bp3I2A[X6,.ZbŴSA---d%l*8iO:וjM-#ꎼ5, ,Xbŋ,Xbŋ,XbJ`F FTz"O$1YL/B.֪}Փ-Êd9KeũdY,adĜLXI bɩ0Y0bO7'jSJZ2opLL)2yjyNKe'%TҭS*eG2L $_}Y.7QN&MK,Y2dajb̝VRɃ-L,Y02L)Y=59)Uc(Wo^31;"p&D#"X2vCŊa;U# X1j.]ÔXuf*(Ņ)LYL,2aa0bM./BPe* P$hZDeal,4I{ZorIx/ԝؿzWq,[U7L9ؔ)bC(bŋ,%s  b 1i-l\ *?`S3 E4LE2wjX/jW2|GAώ}Ω.8;g&k*Ni^x5bLi&5jri~qJɔ̙IJ^t\ܻ-e1w͖:K?I"ESM&OAʤޥ1BvҡeZ^u.떖/O8 ?R;)NF4HjK "FS4Kƙ-4Ƙ|FmiEio4ƱV0,д#o IŴ *'㖔lZqRL#ص&élIi:5wjܹ,Rv'%7'b)ӣMXuqhNt4[9:MpXR蝓EŌ[N)hXf- v\7djڦ,v\cSę9*2Hʂe$nYWTSyp67N4ro:!89"1s[Hu͓b#(Tш*nN]mTߠYLLThAؚ!-`*d*v!aT_ =s!$0?T4-YZ+TT\b7Ҁ|{ʮib#)]NM6DE#@:JM+3~KLcBRfX(ٚ1J uAY=#aa,,V2ʘ,)/q\"[<,`~Re=Dt,)bŋ,Xbjbjb ,Xjiibŋ,[ U6F1V,,XjxQ_%Zz^ɂ"5>ln&MLEVaźv;U^LdRn'6.'SrSpe ~/`R{'_ES*#3 c <;jEeoq!䗃z6O<ϊ԰(Sν1bFPܕ Cx`b*~C >C&=K/c6p~ޜ*{j"V%j5& Ux~xG_R+Q;NWc[<Ե1bimKBRK&Md' E ^A{&U0)\CO|o1j u(跆tq+SԜ&DLԈ~N~'Gأv]QYm3X2ٶLkda1o+E,ZK,Z09d5-ۛᲆƣe 651QM1L M(h4 piR!JlًeCn6lªcXkk)fizqXmɾqgDram%MأKKsNbgCd# !Q5E,YELLmhVDcccLbliiK*CL Qb*c*1IeX&2LDĥXK, K e@2ɌH"ݕldYaVleV6d,VVc&aPb0mHKH#M qb]~7mK%M/.ξGWYt%:Mn:$nNBs~=`ospͺIՌ1C `X,sQK[qi/Զs[Y(+VTTs85sKԶl{GÌ>l͓neU3 F4Fy;dS*7g5ʤO}eRS3hy.r'ȑ0b()AQLK2ITUahTQT=rE dmb'50Dda'TIjXK lKXXXXY,2__w$%濂go'LySja15M7>'Ӻض o/'|ak9;]:y"bb+CO9FND(tIV\"8ԟ*h̒L(Lm N*lTQ8F_=oS2>NPhz 6I}9M!B!q8$j㙺7} (Q<2Lpʕ S[H.iu>p<'|/F_N5ENYl>(ٺݻ2cQ7-Lޛp}NJQ((R EHMo@t?U)4g!6>|[wu}n=߾uMIIÌm'&(՜֟YbN:R7$/$\v?6WfȘ˻T\/(]:dSKb_)pu%_|ѐh9 PVfD CD%АP0P4msڔ`,],ٖlwKhHP3׳i6Rtu[® *_h mj l zR{^=i嬪*&gA*J*;3={*JS|^l;ΰ'=ٟO}}{<׼}oz^s1\uQB(my;u IRJJ)"J*QZATDw0&$oW;<ЧMw[Yl 9{T:mqIq|}91*U":;>@gx4(y}ӵ>s=P}w`R(+μ{.ۺa҅T9$ F(|:}xϯ{>}})}@]8B*IRZ%@$DU*3 (IR[kd%T*)4+ jBH!TUDQKYBk6JejZRtݚ:(CTWC`-ED44t(aJKJPw N164dkӷڼ5(@` &b2h 2i M2<410d&ɂd4b4L @bhh42@d1 @ iLL&M24h h4Lh4 LT@&@F$&!#'a4i@dSѣL d&h@Iiizb4C@10Lj"d!`?bzdުzxS)i 6)hC ='䧔yO)h zz4d==H4A#OM@)I hFC@4`hM4hd4&L0M i OJzzLF4ڂl4Ls?K FJڲ43HȪ5U0`kUOŌf`1B؎21)FchQ4͍iixXَ6kFKaZZ6[CdMc5n~qf3W*x1o5ohvw+cDMPCE|04sTg:y&C$ U\{,$" Zx)=Gp? ]5/^Gw?5mʕ6/{M=kQ6RL,YZV##)p##X#0rhݺ7T2FS3̙)6&F#qF5 yXcc%{˱: .X SAh!0-FÀ lkqqcb/t^9jn󑗹u=Ki,`yˁ ߻֬etd͝+U dc2M1S6 cV566X%fg|=G r՘u3OnI{6qc$ٺU&^*u͞ߏ$~@K uNN~c *٢K}[ : f@fk"Me,R3% +|]\Jb?O#p#sDsɽWlmkSoN"/^;r0x`fLL DSjoue1:ƴp)=F 1FofcQ]QUe3)i3f%c 2, w֕Z`y,20̑23!ʙceLed00eabgߚ_GfgЭb5]$u#CmhпT91Ms<V..6 c15OUS:b\v8?0  [Laz:s_1)r<^7Du÷giF0)b*2SV~kd+X Xw(mƊbaʦ#$kfz>3Yh`2Qvm=]Cm&б@M@ `fWҬYL\zJ1FhbVaFed!e=?g ʱ 0ņF&de,13,z5YJ#65fc(³"wcUc2b$qc2fCa Zz.Do2U>H8F{<98&Gl3j9{Tz{ߥZTegoF:HTWqa`FAwhx5'{>x=5xSv99Bq#6$o)GiKf=;Fi=#Џ#:;}QFvŶy[m-2fJa!,fR)'kѬ sU70[(,^#z*ZdeQf5bV~B8}'Gyן̼t~x*dG{ǯxcՎMz7f+XJǢ"FS#38GdvEjz-R^,7zY]zsrِ4HhRGOs=Ox~\/ 4U1!=y1坵G:GQ.FGRasgd{=sGQsNP#h o„ {AAFbe2Bf0/iq8&ٷDj zs]1h==!⺣9Ɨ6%8G-9Ck1 K19F u Ny;.$!?E&={q,ch,mH9T_NP4E}Zu/'ovRi GȺtΒg :GNΥZau|+MK8h\ouXVuR=@Cw]$A@"'6l<3b?:3vҘ&. r~}# 4đQX 'E<`dJT_=쭺,XC'{e PS~ۓT[Pi]bbT Vu팧8Ɍb9<کK0Av/sb6ӂ5O{=)u\>#n@x|s>>1o6y=p#\{w'\sHrp#E.#|xyW^-1ͳ0VlxpA;)z̔gG4uT}&{j+Fddwͣ'=<%=|<{)ɡ{wh=26#akU#QvDk#=R5ױWrsI9)^ë#ۢSc]Nݑ܎}"܎NO z>Tu9> jkB::#N nUyO7uqǻxGftN~:E߮W{ʧ98pߜ.#T׽,c 8ZUZ# FKFNL@mdӎ-4m_dbQܵTوIW<=G>Wr dhjY-cJXĪŊu 4 ƒ) C;|˙TWsHB 951@ wILRL&pE}Ƥq82b@^*$t"dC"!"ߛ*&[8ϕ{w=x*^Vsrp6aUS3#KO.?q>iXŏXw{̜hfY&sm&lnC*%k1A,x%U +A(TLl̗P"ef{O6S4,}H0 Ȱ69ɠ4&5HAmuL46 CXzmnXm owH)C;8i,b,b.l=OJ]VRLΜLL0fLX-,ac# q\EKv75oeiM(dbV* ^n3XILVf4kVy=yd!>IF5UcP)7$)Y XSO}(eB恅b JX.332[a*>I|%~Ms˞FhbWrSaJM(Yib)ZZ0o7i1JP LBE6ű\f" p ĭ]+&ddՕtX,3@:2Gp.64[$5b&CU[GmXe4h+UR!T1Uc ,d 2&+,08T1UFQhJ0Hmh KmIp4a&e&YX1 (3!M"%vr*O59ED RN2w`QhxA1aaa*Y}3$m5 n(hG9"cF%# H)@yS2=-x>ו<CԛN_7^{;{w.6lw>/jb9ԋh@TbBDnHfTXҹ<7uxY2]rsPx{r<+vPhupc[y;Ʈoq.uUkLMI`°d ʆ3nc&$L?.=u=gUx EӞsLi`o{OEf7xk~a.b͸'Ww;/gwKm61c1v{E|-Th<ѩyy\P𳓠ڛ۾OfY8Ǣt3#1O{gk:x$}r.POgx9#% Jd9`YTHe0!S3Q MDe4 a VHnȘ!Bҭ,b182j"2ZLg&5$ٚTlHFHb6T`4R #Fᄸnʒ[d 0Kv؍Q$ F14ac 4/?k\ 2˒{JcW;\t,pafkZ"5`e8S1\`ՙc0c%]+79cE|K6q٦aq32&LɃ0f ɶ3c.V֥%yq%E,Jb dQEddY)eȆ0a ,)UUJOԐ_A^lB=6TFgwmD[?C>Tl hT}Ƈˁ_NUkﰅwG0M)>s^d?Q'NB*քh{3Gؾ#ai<6 ]4% кxuՁF  S#Õ+'iP)I,k $ @j?Im̔9IC>`I3ЧuOӜ[R JYIG႒Ib43 ¨Y3+٦>ҏN.[ϧG~?[dC7@>?騷q Z8(@` M.ΦW}r?JHB_)I U?9dÉ?~GVgY56¾DS'Ҙ)"V*dőUǎȭYC21X$LD20fUl;DKC\BY6q`-Hp\G8)iOo&"!H~)؉a4[ D4]!I.n ڱޚa!+$$$#E~.L3@ܘ(k,*(Rm;خ0~hV4$]: g8rȘҥ8v32DOX@VbC-?1[|( shf'no(;EK8MV絓iR8v1䰓 tget ;8Jh& 2vmvW1tO rlILAdxTffs'R N,y(bhB h,d¬ȩŒŌ 1fUQM#&ZJa2iY2 e&2?KJcM5UfK+jA+0aXɕV,1bɉXYɂ  e1bb*,Yb V`Xm &66466f1+bCXT2Ee)1,  TfVV2V++,rX-}W !h_gGB;`~.WR)]}$,kj0;,%x/hD.>ReH"&~'`Flq ܙ(K< `q-R1wqp([B[ߌ,xyB]6X۲tp&b]kK6o;sF= W߶n.IXC$֫nM$Hj8 p/ <}'G%sU!&0*Ri7k`]uWO3=z'IF@"5Rsz$U(}ؒ2/BumYɋҤu6>y_1-\^G~Ό9 jБB9"3+Y%OjE~{L0O&0b+1a?RHEj(wLt6MT]|&D!%9?J97 Ӓnt NT|TѾ{MB U1hi@j a!^ Vn[VEÛGFZo6AMn~ -#A_fKB^aySK[xwԳ:8?w2݉Ddvwlko6yo\]>{^mǯ5M"+IB44Pugu#"B& 'H {(A(`\4+1 + _]j`ҦGMF`̌LfYm icM6 Ma,d2əef1 2*Ld03 Y3,ʂ$2" ,J1{\Jk)/|~%)VBmNk 2Nt01c33,X1MYfYTbv4 V1XefVXc*cc X11LbFIJXQ,(,,`fLLaa3e3+1`& h\8B-C;}K䑶 F>JsP.P.w1L35|aBclI֤BH.r$"]@SR4 @Di0K3kU@(#bj!q?v-3dyXp;ݏ1PW -$rrhX4 72@yҎ1!.fJZ)]LJp??K~'MfƬ%h,+&&+GpL=ig JKoU (j#*"}Ҷf#%Rμ'4T`*b:a_9|.4]D "!6#4stK~-"ٟH=?FmQ\̅3wZHc$:",2S)FZeQSHeA,JdJh<ۑ0hFgqu.%@lbΪ#nwFίL#]fa?KK*0YΖ(A?V/5)~S%JNZA=dQ0?c*xSuNǦy!Y4G<; ß-IiicDլT_4<<٘Y$cZ۷x kY6ZQV& ~!&#vbJހ9-  Fwl0S|c wuӡ2G+lEùKR*w|;+}F8M Qh4gݦ&̸?_ʫ9] iA~7%9 }qY #D1QZ zNd36pQKݘŌBc0##RBs1^@m}FO Ɉ YrGn;RVc?#Rf6b1'󢹑hd\(\"WzxNyqF$[a|!cvܯ``>zqf_2p_ȭe*ڱ&ag-~r$b㥑v4w%Q҃B, "\? b.eMVfBN%qW7d ;dz>|2|&힯k_Ql~Rj!>]gFmjt  ^T{wUL]m+ǜ}YFӷb?*kviL1\KH.--|}2q-]ت`0`>0F*I!B YFEvnz>~#9]9->;.)ueW@@"Y% )7(:|D $0"]=v`"* D@J# !* PusC^\0j,K);pP}-(\f}:VGHO }GUpRőT{}j ̭BU ș?Cm* MeP_{ Y E{_8x%7lfA[G^t?c.e S {K~ o+Pqh9B"EiWSvc 0II$Y0- Ch\~2~59Teu~NXQ~$ w]^?,ЎQd4iW{~LEZ>KVټwm/ (\L 4frT Ġy[۫>#$논'E{-- A-6wsQ#4vZ_4E]Td{AWBp ?q^'~M^qBrqG.UO# 1c+/2[$lL4q96%x>LQ&rIڠ0D`xG*`hn032 :5[PVeFUZCjMR~6~ 13#0f1L20[%9j5Zڼ]W+j6qqn=9ga_+J撑60/`en8ֆ-dd0`0Jj oB %p ++3(SJ` `uVFJl0@bWpʪVekZfc5 S6R5Y`aaf1,0mDѱhڍHqV`K*CqГRpP1-T WffV 3XV+XW@U+*j(caeL ,#+%YXd+l5GU5ZFXpqթ4TȲ3b AƸVVƪ KCz7#  6cf!TP6Qމ`2޷W@ÜgtƘь1ҷ?G_)\Uv;Xi\Sug4ٌ`޲.GCv`2{ B-c|/Xĺl  9;n+i #6lT;SֿIѭ? *jרCeQ -dčx 8<燃D -2Xh׼˜_\?[{'EV2s S0`?{vgs2`l p2e%8K``/{x؎.Vp-$yבf!c# u F#)" ɚJ.^!a)˭]#+P-LS:RR߽2BG8A00U7^#5Ye[ ~V͐\eQ猭9gvѱ. W_A]WP&6w/#;VKQgŐM%4$V ձ[X9$YyY;W$k-CeR|7)x.zz<eS zNˀUޗw0%xg*skrҬ~sMȞugMp'hR!BWTcz֞WOM/Pɕk 0H"p,ϐx`mSV=h?3PhcޅيşK\4 n XAû]XY@%?}.rCi 3oj*}>{ͅhG?~< vaj[/ܶWp,BGB]ʈ]DG?e_l}f腵3旈 CSpWE|IǕ^cw~tceyU^%ħ9nsc.(N#$ro`Ƅ˰Ф0JY)`^TjDeL%35в}JƙFښ121em U,Xd&œܯM%rp.t%57Cawmc"pn(ՙ ۃWŠ]IC\fϤ-*; l3ui]y1 *(sljK+A [\Y tMu.6{._~v tŧV|ഴF͟Ŋ*Fa<댒$X-IPS4" .G*P섌os넷s0y s/j*,Ɲc`h֯%1,3V3vh(LfT>S#xS@yG z}r! egmjŪ.Eu?o,a=R6mah+ >,G7-ZG{{bp @ $$D39{[P3ڔϯC' 2:S?y>jqg\Ho(SRJ*Ue\S KZ@jc+6 [BEt}xz2"qXu+2 ` zQHo8V9t&l]{Q.5;.YO˵Jc}:v{2pEGE-~q9&,Czw͒ ivCPǨŏN~>5re-4K+QLQ50̬c"2'ڲ,U-L$-`XVIY#DdfZEiDhZ#ԖS"a,&%dXdibb-Zffel4Z6,°!DݘF2UYf1@_=f[:[{ DZm<+R@|8 JwV`3ӱj;z=QtlLg{ZHecMj LԳqk}4#S~ʢjd*Ls|g,r S%=4 cx6sX_3,|hd6 Þ'9 b+ӈWoX:1{3nB@\+dO6<+jk@+ cL8+0Z_[ &/^,H0Y,+PWS56^FkU3$;9iLak4 kgR !\%yMcfd!"_LEm1R%wv.WiǬVgNYn()M|Ud3H4}嬀>9ycg6{YIn׭Ք M/je%1V`4Ն, 0c+,XaD!228.qaEex|K ETDAIfH`Mkf^xIf}P=([iW4) %1mI:kԺ73M_s0g>PzӼ^z6,s*jT l m|:o|.TSC=SCh.b}=^U 0\@ȅ:9' L8[}ʦ9$H$cfٵZܧIӿ;X6L5 `+5VF͟; M@CT,~Mp>D-L-~MP K89&߻| AH-blk:=dmac+Sy6WQʩ}cD}f  #/)_/ޞ/4@m^`F}e*EW̮bJ*γM BMHFgS?KNć&PML*,OK m(;|ĉWɌ~HnnnY/C1<Ȉsf B)7>K%G)>gM2ZڽĻkBdk#zs>ʡ߸?);o<S3vx@@d@!4/LoFKd#Ze!"Ya-ZǯҰ < (͵n2'81w`sPG|oDR|@fԐ J9`jO(l}7-*T{b(A YhqpU %uc2Ù.gTуt$iB;1eP@A_%$XEoom6?t*YϔqJuX}dhV[UG:fMZ 9o|4 >%*ݾ_}8Y1fuW?Ŷ^fm{<\s .xW*g8k~^Nx?|@PJ$$5Ҩ^ӬN"?ntn8?SU۸UimB*GпY4ߚ1 ;&nLpk$RBZxHXA. 1֊U\S ':VjxyĚh+A C\su/~l?m<i IeHI͠6K5rS7?fVş*Ȥ*ce0/յh6I--B 6&u.-DyӅM\}'7)6K  Bi @ NP+o]doi:&uGT-o\0bBM5bo`EymJ@f_Mlϗ̂ SN|274V znj`-)Pdk{$x$KҽjX_1wqo//:4J]VvOo⻾- b` z;;oj\;={Ng҇j*/rI_[n=kK L?-0洎T뭵DU&P+[Y\Py2f8jKTGC"*فKq6ZTqremnۧJ@涉YGoPtv[ G[#W\;(2ip~B'l/![]0l!8 Kr1ڀ;r5"I"<2J4v:Z+%^83VChyag_ zOl?4xL]vY,­g,1@'#Ԛ:I 1iJxnש}bm= Ki{"PRh09i)d }=Nކo*3MA1J{6o^TyPP9+Ϣj@s•   ȆBž+ѳ}L'I$y7~:6qjGw1׬+m!T_?:j]/u}H[4ؤYp9?=VGaj.$7 x*)*=mkʆqAA`U4Oj u|yhg1U&݄T.T(~ %<߻`kq`@B(@6~4衯LŤa"OoG6a rw {,&^pWwMD5?6clcq!Gj̟ |р7q1Qe;Wva=tP >mpҿ Z'oAY$B#m/—#u04x|ZA5DZ χ*J4yR/s$Wwp0%gfך@@PB#(tD@I cLoh9%!!:tb|$ⅷW<3[A]ڦWhU|-59ʪo9i52_껳1;`;uq== s$⺙o'qtQ6H by;·sٴU"Aۻ-YPquźW%moRpDU+͐AmmrfUaɿyZ.Wd:R#iaFǵvݯ3{,}}RɃ)FͣD ;n2?p4mL:Q4x)s#,J=]Oi4hmTJ{qī@`-\$tl'd)S #~X_DI8j`i?\;/?Ub(_eUTA}FU^?} "Ya L0}ĴX+-Ҵj&&Re|C4L GP}axsUz|7*0dR&A tcXX?*)qVf6*f*c1YXF `f0h0SA.9}UWs|UR'p]MǯL9g>??g3A7Z80M ҘVҁ!ڞpy  wUpZlhA4\n6nN{6 {q e.cM- s_> K{w'yC^h=+'4WP8yEOt1{c'̑c*1g=,1q3C%Wʰaeib,jaabw\jflX+W6Pڔ'*BҺG+z/l(z2R`OV-)SNztq[U9^ՁVT eU{D :P 7*jωПd%.[/擻tW׽p#F;/s>wky Ø; \;Lě*. ע5~~z.wی;:?$~J/Wkk#"GC}ԪT^[GZ:|z~zXuaX 5j u)6i4fYFFZc#kkV,6͛5f,6loWC ѣկP^tUn0W#ǕM\7pmU[ U`l0lmJ Qު6mQt{Ӑ?Bn{#vqsLskpcqnˤ7Jx̪z#z2NO;gCutp7JdeYaXaVd&/u``?!f2?˩5Y&V31f3b|0Zm302I211ʪg[Ho ŃaUV+++|^1eb 03XʶTF12$21`fd&Y`TM~s ef3Y#h͒_/ul-ofې ZWēwլK+PmD־>dH\ p3r'MZ*Y⥾L#7XLE\Cɝ8ӕ c/5t{?Zhh~܋;h7AC^-1Mһ1%Y~ƘUٶ7H #uQ5np v}/m״T` /|OΠyhX:fGr>Zx`ӽ2Qzt&6p Q~G^T+[8:-  6f'|/m 2HrǠj{fNL+SQ|G;kQ?:]@j,Oa&X)pݻ"yPRߋi VM? ^;iS ]GR˟~"*Hᓚ"7F5;uif^~t -Y xuWP|^/u!NM߿eSqx.ujh}>AMCؕjO@aN8 gM=U}1*4Sr̭ykiNf6`  5S_w+>Gp?R7a ;.vK(@PʊDLYh<Բ!UD@ڽo~ V9JU½j70 n˅87̴KZwzLf),yrF. M|3'/˧/?>f`k/yTklϩcj* m3,I6`6mM&j0цMfj6U|3VѦpͷdC! FffdfoVʴZ0k_llaXh5r9UnckTRLn Ĩ%3Lei@1 $#?^*SpʮJ8le` d[`5 CUU¾5GwuEU厇p/@yo)/Nla}n>a=WTʰ2SIZ$ V_rW=fn_mc?ULeǒV}R­8:Mզo%Ng౅g CpW.VD[dl~M[_*QwfOby3xk=Nk4Ȃ@SIuacq@?)TU(*TN2?q>NsyHwd?|g~ǷMv+5g$4TBH7zMSע Z¦ Գ$9l+R}-t+2~Vy;Oۺα?:LnuE%n2U2 IC_#ߋ?)q}tNjօ HsU638ŰiZVP8hVhgCo'o ymbiQ˛7;cZLoW@zU^ʏJ2ҦTdeP24^38W/5mQ*Zm[0`21ح e*0X81d֪ c-zՕWsUpsx4brC^pgEuq_yktWW{ͰڶÙ*ʵnfrO[=:glKc9*u%UYy7Z-U{ʵW`u!gS;X, ':# <9*hڹ\f q/bjod ^z#vk]%tU֭ SeSe``.öǤoYVeVbVQ^L*t6*W<j~猖ۍUu:.ܪVjETd`p JZ[.j#^1\1BMEXf*ʙ `3z㵎:af\c^;ʸ90\! M0PYm2(]]t]~0Ԟ_)i_Kt1Lvs. Mac)IQzA3in\6nỹU`iD6&XGeaM6z-C~$Xfh >^ MbY࣮N*u)>]f K>џR`L\o8|zk< ҠqT(ꝒIl^ }e#8ԟyLl}J8`/w@ oQ`\edFrHjJܻ `MCl@l [ ٛ oDvQ/L+dShnsJ2C^yEqtr;l.s~eJe򾾻mI!~˦EB&'\QaRKb[6z@ R%1-F2 iN{;u\M p1yVE .?:O ")Bj#y%z~f6p?+F"p_85Y6leʳc$gV #3޼wìj ?DYR `d# ~pԆJˈ.U,1#pWBd=̥6C+#Ҭ9zՠ9PV+c{%h1Kc=鮾PrO|7 6;#*9'+Cbh7lbK*<dk#;NuZVWKC;<0[WlWye] ӕ]1] U##:fhJc# fE 1VFeYX2 O&c$I}kW I4,jԘ%%YO=oragˋs.uyOTF٭Z{sFbz_ӁzUy0Y=i6ZeTdU߯jU9.xkڧɫat*:]:+YUt\"t_1}oCxwz;MRB6$ìN 7.2{nznyH,F-S=@014 ȟPtWֳY!e0m+B@@P'7m; :^)ƶ w.@͹֟f} Ιz=>-O|ާņ0 EH6 &} zަ;|HuݴM]]ŋldUĠ[PB Tv. Iy%4KMZ:&ܫ-܂X>#]Ƽd.8??A{〲t-?[ j71@ikй:{!P!q(C\ZӃ,uzN+zK~ހꤒ@hB@8.8j:O݄wb[+` ,0=cFᆚQ0alF ʰVaV m^'莯/q]z.>{uvj9`${xO80CY$gɣu8~C[^)U.IŁ!zH~@umanl:PP Qdk_>\D^?*8! BbwKp@ }/yD/,y_j3}5e: G ׏K`F;ۨayGn1WX2)B(2x$ $Ѐ=}aiB3G^Bej+^c"@x)O[! M{+bbJ4k2MDfCgS S.,rGX P6qyϷP{?w<䌃 2Fh) 2'DHJf{`ϻG?,0a+;mYQ:qW,q\1@?8bR!\Չh`cS`4 IJdc"b0U%ZSF*M1Ծ_7,'*4L3"4&ɽkU $_/Ig\G'j}+ڤ;g/9$88W\l[9Vٵl}%3vO[}QdnѴPsP3ܦpP&Si:g~6ml󘢹hjh#USz|p|Օ\̃Ѱ~WKjGX0NLm{&0r'?>J(PD( 7#͖ 2CI9ү|T%{-W/o6akbe#< `;#eFk!Y x%dD> .}(Z5PK*տ(үaE ObaߚWCw"%n Z R7سyߟԊxO%]6;t@r`Kh'A+ʚmda5L jT}-{p'\9/Ṯd'7i'q$60&` L*ҙ!T]`c,*E.DFh L}}(EnsvܻnU0]o/2Sf+"O⽭W!GG [+M'[U,FghV8Ǣ7 h_")ȣ``w?Q8[3mxn$Y_?q@3FVT'1KN-IQEiwXWZEX^$5} r^t(nxY)eW}06@%aiP!1C 5rKaJÏ<`&LGӘ (WyzOetֹVP+s"vexaOu]޿ک}ۙ 1ˢg|+/IAxx@s?0$;JTWQ^'P'< ]9Rhvz`h+D9j8FCNF~h,eȗ[Bɦw~ƨp,o5* +RX7EmHAybwAezs{2o_[YRa!Xt9 ZPh9{Ƥ\3_Qfb2LYjҖo>~?>ϖ0 ,%;p+ҙK]׉b}:]9ݗgnO˶jL|<31/3]",*Jr Wn"sX~Zq#KjƮb=K:K@J-AݙF`}(&>rK;]z"kݦMh3 3YB+ٖ6<@7Z>K΢3 %S\\@ʭ4.4> *`s9^E` Q$Npmlzvi|@W6̰}ayY7Sl pYR\z:2TdEtÇ\ ̻j_-nQE[13zp62T[,r"Z4XcY;ߗ;wƣgˋe;S[sb*0ә Eݷׅꐡ_ڬ{"M1e eP1aVZJ$YpvVǁ#b3/amqOcw6'}q(OTg4$^77_,K}7H\t:8?EQR/³E5H ID%U*w1I*#?EjziS:=zA<)w9Ag P5d*zҲ͉ްvܜ1_s蒸cs1stf\퍷OSXW1|t{.!QNJኯ'rErk}v8sdthȲ 'l# xm^o+{A\+kwy",ybWEE2$0*қ6+h,'c'$ i+i EYT:$ JixDSI=.g<Ƿl4eᣈϧoȶx*iނx*PT "&.P V*3Xٮ1"4r7&6*1UI½K`E?[W]U!)6m4 R~ FLu k5n0e7X(Kc,"zPk6.f撸r[ #Gȵ{폣[~뤗h?, NjbR.yNΎtˈ]qq<,3S34Cn7M"X_TfwSP g»>K@?i9wUE~3ЀWUٺ+]%b#`ky_||>v/=6 Tk8y39vZzsgTJ]7VlڻA D${M|Cp+r=NߕY㤻 _Uz)4EqB)焤` %R@:IHJtRJ9_k{\'qco֘۞mxLP\ύ9ҘƘZTҐ;X+xKEa't3Boz $ 8Fv,9A`Lc+Lj;{UhŎARc[c{LU[US\k}=?':WayS=Q=Goadf` )E3q5WC=:"s_YYX1;jl^}Q3V{{=hÞxuv㡷EfƍU*oVwaשә? 0STumVSa0^&5]U˕qr#Lj[.d869GNomW8s{tߝ[ߧ3S˵\fϯH:fnkߟ>۟,7קcc>zueʖ)daOo]=][ X:} ;]yÑ{ˑNƎ}Gw/ ȧƺs\7CsVѦ; 4c5bv0i7ӯ]yX~`6haՕ`K9&RV.E$,Pz 0wWe{'rs:6u9:p7^xfʱ@-|JJ< `4 $ !-׏xڞV_7CXN̐4Օo$ .iMLCgWfr=u5Q\5>xiTsya[btpfպ#}mwwlFڞngw?L O͢q}u{o::ORoZ$-RlGEewvءpuXes/dlPo! @H [3 x>3ü썍׻޺^Rz)*"& /zG[x7ݏӮA0 }7?oɺ$RtI'R%&: #&PQpWv 5h'ۡ.{@VY5O |N>^}x{{s9o9r} n+"/̓ڟ݇MӸoSЦ4m^W[7:`"0A0h:+=>_,Z0$TYFa20- ZTiVT} *M0-(Xd@%? M߯dDbA6_\{56w. ( Mז9G OR9wM[w(`>5K0 IGڎ?yf 5Mve:phݹ15| V{WCXx}^$?,UQ]:L7VsA%>.93U]K|$ a0Pi|ST_|n_@6Ct@631ȭhZ9,l6`,ޫq`48)SA\mSr CWSp2 fۿB$z3]ٙI+Ka={߱> 0G;2 y7ʑ"%SzQth[R,s9ɒ+ʔ0?gf5-1ߵӞ|;O] "G%p$%zY\GUh?mI`Nx!b?|n6?a!:N0Qޔ tA^\{:GT d~011FܖG5#’y Anv:a>KK1F2Tt/k>OJfi띦O&xlv(Z UuMV-0m&@=4X7sJkXEkk!χ8F<á6ӜT͐\ە6Iu}{+Fr.sуqΟṤ#Q}KšBi]NgJiKAN# 3 K}myϱS^ε.7UUi$D Ė̐`v|&?1fUݖnvi_vtAc|yȱLyU}:#kaːOusP{!N{2jM.m^34jle;5m+B~V;vwб:|\-Z{j]|]G@FU\E:97ZkFS~SH_̴Zmi-g~ c&̵=.%r?1?k7+7've/E\#fQ'+ڵVykSv9QZmЛx*| ~NFES͜ L#33WSĹc?A)ͳ,6j)*m'㯷-4xWۯV/rwfܦhv&^S9\T~pK7`in](l_W}pKV=;2oyXA&Ցdz *Im%Tn:H=m/\b3eDwY~4QD̴egYcєo Lydȩ&tW@sebjZLsP\]-r jr9ѧ]-q5UWԛz7:NE'rY¸q*i~`t6jdDI7:yUy #Z.S/-m ZyݡDچP+MSo禙:0iu+5*^>dJTJwbe6f}c MrefoxzD]&!T̉t ٯ&ǯiL¤o?.j9XRTs;Im)pRY{_:mqOG::J~/M_aRl%^Ӝ|ԌCU!Nܗ-g®}{ :8nDKSd}w@C@Ff=˫ޝCEr_U'trf(9[.>71X浿^;jبmcck+:*N_;]YB-?h;Q =f`ΩP:puʎ_g.5Ou U֜a24p"WZcB`6FN4*פ>M;P_ezŸ[]n\oaSaaVOɤ{dUi"%!s8AsHFed/Mb7:VG<'G~*Bi͐VvsV΁.MPF(xY2 :;J,MAyߏJڷf\8{_wYbJq=jzK.T*+ܟg${$M`6אK:Tg8Nu, W9eVLjCR]v]([0gJܳM?iXH!HoI<6?a%aVGbGyX\t5Pdku4Zlo5?mWqFRneG:۶ |C njtl ;v2 aFUSn`xl,f ϱNϗg&l AL -[HBo7)ܫ͇~/W'9:sGd<5둞~E˦JEٳ\`Cs2i =9}"3VS+ZìN^ ó+Aƽ*MGQ00$G9 .Q|g|m 箝i$5XuTCvZ=A]a1G`\.D턙qAB ^4߃X+ cEv]'yn/ԕbغߺr,t @6Hovo"q+]wJEbW' Q$ϿR44bk>z /bO;yGFC&7U .ՙhػXx]j )mXۖ~f<%-chACAz\@}ڍ0捥\|w"! K#[&̙H~^B&.1|gbv6ٖ[(N9{O9$$8۹cJ~nVǷ;bd?AKxˍusQ}9,)^v^?ֲVƲh-% vhyo֊ثv'_*Ԕlv[`#F|B<܌Mg(Fε"st帗_Dgvs"cef7/hB˓[ TʆӹYy6xͭm *_~UGEUjt q]e&NX\DxOBSEf6o0#eVD#\G+Gë\0 ?Qj8[x`(qw K/YWeyꄍ}6-zknBFVmsRJ`ow k\Ykޝ6kac=߻~O^k'H'`fHEN +؄AO)- GDD7pߟ2qCkՀ>(TXWo8&St3>Olt#U2?ۚ>ƓjjVR43 H[דiіW^ja9ޜ;R~;G4oNcm* yȀzxh<[ԇ,/u)u3^6˧;R#A4LomM/\9ܒ|?fub+bJ7NܗpIJ$߆{ yIt7ڮҝ y9~ܮCo2d 7\,z@;:heV}ZN0zrg6S( ?ƣcY~VJ&G֍8蔴u5l+-dr`dM{RCGƈbGՕB\|DbJ_PLF{ >"sC.톭Kzv8Y[TJ0*)Dl70ne"- mO?GTC^ԬRpπ> ν.%|dhRѱ;eyZ\i8LJvn:ٲpZئ7}MP6Yu?Z_۾ŧZ't3_A䢐x r8q~~| |ʅe=$?wHxrPULcUIRtoFw,%zkO?NP؂3y5O=\Ur~Cͧw(>[ giG% ;S_gK˙e_lIK=k !u.,iO4!/*۶Dhd7P=NXoS-G8-OP8Do& R_!V;G[rcѪHsEpw)53U" µv(&DNKw n „ *v!vEԀwY$yD1,jI1t=5ȁ)Ry⫰b6r?b(@D㌓,R&^AWwj^?:kZ} v( G44-Z؝}#@X>°`l<8=xx[GMpWn\Nw.kHW|!ՠB֖Ёˋz/Jw7+qU/`\iHJK8ߘom `m)o8;󶈪7I CsU5劸7 B&ٵlaghs*m7VcJama߫9Lx\ ?F٨l>}^II l`Loe 3FǤ2O[e爁4$>݉{ #,xU] WUy{>P L(|(T v^+bC+ <;$D>Fb2V6U*|Hlc$yu;VrGj@@Y"/O!)7giHQϡI|A28пY<+ځӲ8R{w1 0td.LhZYpO?oENtyr. 횤\XrTs11{=eV|~g] 5$rqꮃRK;o)<[Sbn2y vMRyYY YWK3~Ց?{{!&{bͲ]So̠o̵ 22>{s- 8GZvHʷ]n NPc 辢O&\i λWp-A )iwWO@| lPZN,Hn0P w~xjy~)Y'8F& =ҷ?m} Ŵ(]4xQˀ]Pusv\nF~鿽`7+RlĮܸ(.,Z`1Ô}s3L,iqʒZ_<=#? 215m+#v@!5._LӶA6M)_&)K w0 * ?`2V9 $p%6ev0F :v bY{? sb5GϾ5vq |<`>]`!%xHd}r MgHuG;2iLMLL)00`̞=FVW?y7>wY'+}v kʣ!t_nT~K8rյT{`}3*]_@+`E^ԫo^S]gEJ֤#gXCw^ɕi)|59ȐR%2iv2@ nzi5GJt0"(ϖXč<ӷ}λ@4 e11d[Sc*~04l/:x?g7F <&f{%Ge{KM阬5-Bb2XDTiebO$80IJi4!N_"CT%)4]|i4 dDHlA)20AꜨ815ǰH`nyxjv;sy e*vk_I@02 ``FK@nQKm>J#@ſ)FT KBEOER1$ %s[$I1__~+2 M{_ya`nﭪzLxJx7D, PWbbCI?0rʮ>}!j UF(=x"xs,B{3R TJVQ4;+eM*_c{JܗpuevC݆.'ݰ 6Dm.l PbC9R>ﺵ98@ ,lK<|'ߠ'ꐓ>i|qΛ>@ !.H$)&YXJ¯2Xhap?OcԮnקJ5 #UUyw=l謿r~˜~sy*7vBf:;&>ޠVҿv@eUI1ғ]pʝwxPEz 'h+nWgu_:0<`0"4ccHw8 ݉΅7 ! lB w&Z\soP&@ 6y ƴKl K7SH+cW8o_[I+ bq4aڝ'gߪhbOђmy-.%W^% ma 䍽™hEMy06oşú1$:`/oK*น=J_\Sa92w/S8GDo: Sxt}G$$Fn\Փ;gOUmTML[bz2'.֜ ^n"-B` :?CT\?SK ,hMxHD*7J6LL@cUu><3-vjJq80QpC8keeP )eY fzuH ٫,h6&Ttv\>;H$X> n_XER=?e>ޟ_Z,8Ti. #Շ .;5~`ZM_mbZ;Gt!Idow]PAmI 5 ޫ]ws<W P}.-;Oi% f;fh͋9HY2u|WVԿ6ݙQ94>;i3jhԠkU_wVu҄JFvI1#S|PDַf@H}jbݟQPp,#ɑ}n/3b_l;o0WfƟT>Ak8a'$G JX(QOܼ/NLiry8ddgҺZu@t}`!#2=Nŭ1Gc1%b}/cY_bj eN(~ Y? A@{y; Yrֶ  7u2H1,μ.r-aOVV}@~ꓚP?,"RvGc'W_Q\8%[1g'n wEmg`M>r:%o`MgO.=7z0[_-nx Qk6d~h@x8n!`/A@8`P*!Z\L.a=rO{E~ DA{G. D&;{j@ ]$WP5ڡq[-%>)zLO[z~o,=U"sx$Lq'Lwe="w~E(hs.LJ@BEO_eY @dtVP `3 FmfX..Gx]ʳJkj[@ 7=h$yHNa]owllA`$"s?ti[)L{ˁ=d =ގ l.pD WEm''Ҹ |b%ܲv#yivD iʅ01&'M#aٱcRq֛^̀sH Mp!i6=d !{T ˅ }= G嗤 H%֨/1 揠s@@ot4:]٨5qVj"%h>V .xH.?dl]3Ww{I ! $BW*1_ xi+l!!Y !B E+2?ll3mүtpM|_$%(v`0{+iKC}`b/+f}'!)϶FEq}!wkJm^"ͺ.=IHuMJvd93| KWeZ,m Sz|dt6rT B 8? nsIje䆡WW ! $DiÜ5nU."Zmn>л3p\V0_7]X0II `%!!eV-B(6rNi@J E%!C[ ͗L\ʴ-&ͻ^pu {,:Sz\}]xۜBx[#҅(;onW«ڌjm ]IS3VFz^$VppB|>QRқKS<}3%v%ItH> 2W _»+c‚Xt%2H&J`JdJdiMHB828c8S6b+ce\TIi'E;9^~/^zMlUU6g{Se؜2ׂK8s3t楂EazsGE,cWNEBs,i׉") g6>8Z#wwk3̅ L,<@KUt{{.C씄pkg~Y2 5:Gw˷Ӿ &@t ųifN.lYDž.EAY &)FgzہQ{a-r=\Se8+6UT)3wo]mX R`o}^I֠3(vU3`u!nn}juG;ުQh\<1/-1\CcA4qLc1(( YRII5oͶ66csu77sp#^#q9>i Bx:8ElWy#WݣKIA9jv|-#W|:+*􏧓9wGF],`F\w{Yvf6 P.;6 q8`}IxJW 81c 04ݱ cf-1wD fH$1| c1Ÿ+5'sg<ߔkyН1W`Au2gyHR)E2LS)e3*w;sw;sw3 s1`0`0  35y,,X&Z]b/jn_Wkyt2#Q A :d1xE #( @ SAqW݈]_\هފȔk7W@\ᯬ/D{$ v&T~FDJD*(\A 75p c8_]È\|w_G>gB_ZP + YQ>{o$?U3 V+"aa0X_ߗ>o<ܭXm}ZˆU"W?=NzSOqS:.N}-g98a\Uhд0:lhps9+kaʸ(˖n88ͶޭW3ǧ\cW]mSCaww9ݪqjW.*8O <g#G񫾧naErt]b6ʻCܪ9xu:6ۅgUuVqp7ʭ·\ޮ V+=Uʳ AQu3 L eXV&1k6qC rWNu+*l7ڦmÎ#8+V*mW36;k;וVt:]UzU^ ­xh777twhew+tX[ gcNc77uSkqՇ:w10lb6s7lv%vll٦iiii;p5Zeh֫7aƝ&p9:hE* iēn%4DMM4'4ܜ74R$4"p"`DD1v䉙q(JRD2SM!"CY6hXa? i±VFFa*ab+(Xd220 1K+$daYXW ;mSU]7{]BM Zj[7im5ʺw讁:vFՎn:6B߅fLz ]fV;DLi֍5Y֍af%dfFafGﵣl4ժc c1c11TqB11,5Xeem$~GS_8z QS[͉I0 m>fynC˖Є= b*ߙ>;}s!ץA I~to YecG> Lx~|;l:a1 HeVieefe `L`9QWPttHlZDCbMFOssWF.㡪|%G|`ʺ00V 댬e,4Tb!veڭ F`:pFTG@1&ꕲ`]Un90n7Hb!&쬐Tr\jVetJAj3!Q l#q~r\G!7Fj惐` rP6MZV tO0[I~lkF(ѱ 4lhڍ%#c *Ti-%+TlacM- ح 7WQvNK %kF&% 4lhڍx}hķ56јlsF+bᲲ-͎#rVj< S&Lad%Uн8-LS䆑u%[AXʻS1e9Q7USnx$ޔtxFwC@#71SYUrrR=y'k3uw"GEJzє )V&Ycr\#"Sr BKIxbB$F0"E!Q%0@3 5.ֿ_ŷ[s+2Lb3=eUf kkմe,YQ6Vbaa_Wckafaefa#?^_*}wW/zE>,d\)xhRBI&!M{o-^ӏ|4_[ i%|}o7 h AxA4ZS(,K )}O龹?e=@}󝢿L ,zY[ƞ|_6a![ N< u8/kIJ]JKeW? /ٹ+rza1j{".34La׫-8j\-\%NL WB;Rq1'd\Ŵ6X;mS=3$:!7)Tt%D ZF0'2-*q  UL03*ޏsfM*bkqMg2qSzJ0?32=:4h<vqF?qVfQm:H)YP틖<"^/Jʲ!0ʳ0 3)VUfa$`c,X s΅8<_G%;J${Z| .f*t_}PjT3{`u+NMF@ps5)'-8F#p.&*Oj7 &ǷCiX5Z B/RSNtppU^{$Ga8 Ta]cUw]tPp m^1P݇qW'؛gO^ դjV${Yw֭1 Em[9n rB_.6, FW IG *'ǭ1Tzg:;Pl1ifUgɄiCIhƐ 0 sV .elbS%ǟbYF}k"[Y+{ߛ/.DD@ \~s׾;y㻷~7EKd=ٮ C{Bc[Nd.Ojd1 AsK7}j+͌5h⍌#c⛚64%0/+*9fqsKc 696Ih2_))ڰ2̣5ѳֵb٪ڮ!>qW>]Ȯa2 Dg2Kef=8ǧgz+ꖌl/ 6FC%Ofd3 ̦ 2fi$X껓H}P=}fhX48UQ+b6| bI15 {XwiEV6]e6qr8]-!66BݝHSX돋vh8\zQ+ꇖ{l  geSLwl'`Yt>'l͝M#ltW'=KSތec] WB:sQ-fys&#C|ȴĿJhC P%~ %HEh8?K-as b OگL+ȇ ,dWd}_פ,ĕ-LELGc/W1>`h=cˡA߈(D(D0DEDnԿ©aH_G}T}ÝjW8>}߭+Ȥ;"?w>)X&V +>dw:./Tw˜9FOP0><,tqH׮}UsyW$Y+V$+rFþoS?<0ᗃ=s=_8v#NoHʽy{ݴEx4xmWԌTl;C3˜jd=#*sal-0<ԢW}z;32chu  0Z0uC` E/=9w_-/0Jww}~(ʬ e(0+cC +Պ1Tqb{s*zƍVVVDjm_SJkY,V 0a daZij$0f jKc 0 5QR.ws‹(L[{J~" ,[>O kS>wUgabv[ֳʳGF9;nsbG͙QgQ;+4 '$ˁ0̉H%Xa  _w?kLCri~;^s;۸h¸膭2pѪP7V.<+cc9WLuh0eL1UbJ.ܛzl~$}ھmt+I;:HkXr3FλIbY)gCX ݗw `Yz~#zW4=8*XB7d!$4Z LOO+cՈgثf}wa}OmUX0/6j>9\AOpLYG`Hx=_\p'D L!; Pz3T5Rʪ`.`*C=$xs.1Z;Gl6w 8ҿ 9i4mzO 9֣bثSqlDò CC4ԦD'?|^z ^-T)7czFO^71mQ>vo U! < @_ɆNmqy+U6`l>l,eL9b)ε\ݯ_›E͏$(lf~{vxVi%,Ȟc-ZN"reCT )U(($܉x:I0c jgYdŻ'2W5suUit6_\XzAn^9Λ{?r5Ym @@z6eĞyls. W;$;NWw-C(&6awXlnٶןVO=W97)+s+bUcјk>֪ǏQdb0™, :q̱,REըޡwLTsD9Պ1b/L!mm\0%h`qMꛛs ߋD׸| 'J*0q[{=v/I)hUPz<NnU]%dȱ-.c"g_^yic6\lU0v\yXW8V*P{5bV" t5~;z l2UF3˜ΕJ$ fX *9jChۡ{W@Ss=Qr{N_UX'gþg7q$7d$jC;-nkN%b{yiu3YͨZ@U4oeɀ&OoW?ގ'V `L@Г5w03%-gR5IorzxϏ3vSm  >?bC戏?.dOnZ_OOr?U_MKO.nt_T /ܾ3>M[&A:éUl{~[3嫉zuN=nR~Û U<#4X- &YV3,c1,1X08 ZFe0eLjT߱ǂw^Zcc_p^ p{[~jK7~ϫ><$1 b#k}KxZ_uJ'>bIM[-k]5 Ejф;͚&g-ߋOqx3lO ;zTE@Hfna/?NWκ?Q)nɁc b2Fd`O~Cko6;@" :fkIँ]/>E4ؿ݁.&sB@I03>KeH" @pH[~}Yc V1UT}ǿ1wWwC1={'hR뮳Uwfmfek5(>;w_%\*]8t|ɍ.2p^zI s˼_Ǩ,V, Xb++͇?_t#K+e\%z=䮗 /Z:w_oCkzbd)d, J]1=YEfëpasiL1BPtڜ[(oBIgOx"+b|$?!6 h훲@W<"5gyv'/^XtkK˵R_@C=ũUQ!W>V(o mQ>}S࿛fK1Gr#aɾNF+?쾛$K THDZf:$_֝yW~ *Tz>v3;~x-./oDZڦTI?WSveu $ߊ˸:MYcn{}=_ڧ%xNƐ'P5(ݘ,A:kE75e.@ă=|\$ޤ[&C0-p*_Cc?Splq7@c>?VzSKmH3Xۊ #+励>?NJxN5>F}G+KZm2աvy*j7plT/wOS~X|+!,QzUu 4hh1+?oE/NT.WYa?TKLCɰS^sO8UB^ 6TKjTdc&E```kӘP^l4x0b31 WP^g8'Poܹ02=d1MEȧ1Cmfʗ?45I)pqҝ{/Py^A9]_Ni'(2C+#$K@y?&=|/,7%K,1XVK%C2d JL^+^1g GOꃙ'#fzo]K#r鮶ve㽪8Woej\X |= U^MЗYY;)~ qs 'fΦ U<*Xŋ0XdVVJ 8,_7ueei[eV70h?I&> MB 8ni+VU15S4l6i?>.CM -PJ8&`*O|@,$kDr@#p~Kv1cDE6r94 z@"# }V;IiQ`ʦ)aTۧUJZ̊0df2ʲ'A`7٦P`Ã*D?iq)~ll4Vxm&U[2Z?pmpqbw:]n1:\Ά"u_1\zG;v/2˨vWٚuc K=ğn>izX=3UZ,(lŸSԝ|(RЬh+G7p2Gz/\d{-kr7.[ipb/J+q݇¢䇸W5)^xhyBd"$6ItZx.baO>O>L DȰVlO'M@[: Fd̝)}LΏg n}^qH&,࣍vꌦ?7#RJwϛ-gCLk2WzJ`mW̷e@2`Xi}zrު$Io;E"Bz(=!% r#Q^)3A s!BnϨgGD! \ yz\u\"@E FWh :OU!8aG7/y6'jfZ؄&1&fc%&M!~>e vJnT ox+g;3T>il;Io:~:Eb0d =S>={ptܪߙ6v?Coz~{8̗qr*>t>rUqYE7ˇJL`.w'0͗Ι`w^@yGۍ{`=jyM~%>gŁS2JZ K V=Xv"niOA00x "e@X0a<`LKn 07Y9 GKp~px}j.lA3_ᵅEtkYcuJuY}N׫;2$a~('dP|ҵ d1`{\݊V~__~qfaA8&0{lY'ZSJyn'݈ϕo syqLӀ`aj\aǭY<<q=Icxϳ|&[[{oy8;j-Ar1J{Eh`pJ0`̘ڟl0hDl乏]옯Tȍ7&EدkzJ*![U`~ʦQus.=\Whqg8vP mU2Q鏻:*|tRU/-0Q,0iz|oJ5T}~j~/Y;RU&.#SL{#~gK^gPv4/`"66 z۔J᰸ "6d0-"ɐ2Yb,XV SJW 0@ڪ8kUnb$]=Ycl]ס{?=<_vQZL.YvA%cJ"*+ݙVBt_4vTh6 \ c/fG[m3s,R2[C"]]ǿ|y(l#$猁)$)? 895OqSp9CK欙}y1MV<Ä D<0>ɫT9U&Q0?e ޡ'>/T#* Qnw  ^HeKŒC'SJQ; r \ pR# E`Cn[ /_!ݨ]uX̲Kݍ_ٳZXz`ԕ93eLmJV Q*heY*Mea`` 00````l!0Xn5Ub].S[GX)FQW:0èOªw|?Ї VsJt>oߴ?qݜL=!NY_8K^:_&5}ã(р @…H4ߢqXKȎ#8! M` H)ޕ=_As^}ۃϏ菏44Zbb*caFƃpJ6Eh+dV>XQ\B4|S]G pzأ+h6 ԫsBX,YZ[;F?a=s{fX)-=C‡϶12b,^ZjT_8&֍ƼO{_*uuvҟvypeY8tI~<@h!m`01´]U^*N ,>J}T`*xLjffAk OvUS['|c I^5hV?SXMҶm&iZiFiV6jV}〶 `ե*MɲM̱\vpQ7 T{g5HyɄxA`aGS<ôwyP+'_|K?;#?q.Q/s [ )j}☍4MJ;u߸λ53їA[m,5p9jsPQe޿*<a^k #.6Wh'Xtn_x dg8?[f1[cVk WpI1t֡ -Vl5nǡWsP볺5Z+h+$n"o>* x>mzA;;Y,R&ר=qnɠ켖?WZä&[<8sSLo)l%1-dk;d&0hє6m6cMm6cPņEj|i4bfi5"A"H/GrںP6mmCߡHް`(Ϛ`ɖeKUT~On5Vl/g}7xtjI6. oR+w\ .U9b$ؿqOJV;K]Re%$B 03I ={O1pv}6?}p͚[桊 !  =,Z,V|G&ahزOSv1n %<SS.: Mm6 LH~&ٛlYda ܭ휚rrs n~#~ogfaJ$0L*1U*!gmWWٻW%bwFV3LkZV+JinFúlشٳD$NYNQ0` vW/m@⁗\▊Ѕ) _&so4};=ֲͱͦ'Ӊ{#zr-ͪй6susHg p uuEsNp]CzHڼu.pw>S9 `>?]&민@+rV;@ Xm-:ۮ7Fo¨bɋ*,,bwf}^#8>\Kb2I 6~V7i8)& 0V y~yR`h`{7T$ؘ0JhflJ2 IFp`x׾`eUbNUh} c6̺sp̬b0̫ 7XI0 p<`:ql{4O̶p0j?1\FV<06q``j wFޜwj匱ltya885Hq- ^V~+M7yRbGnTu=sFSJFFƣ F% 0/z+G+Q.MOEBEV 1HjQimLd8j?ZX]y.̮c-,c={~ˏ4!cFR]y# W4 cW)bR)P5k,2L  J5YC;jNebQcxG&r1f2əf3&L&?ZR1x|>QmQ% `\&e"tV1bćJ85#ҭXC .;\+61V]UqLQ3+0k)3bfUaS9Am3#Qd8Ү[TZxYػW|OkZ-Ve;?E=oD|>>]f\rpК^yXRU`mhe.)2ZW`9*"à^!莠9VUu ``:&޼k=X^=h0ss ģy+Q6zq- "Ӽ߆=ƀ(BE`sb)(`5_Km6O\]*KO)jP *J_rQb+k2<,ʻFXH{,?/)iMd_*uCMo';c[mSBDe4`Ddd~OQ~gmYo٬yof,l )n01cX<lsi3 mlnNnp4iXAAA4RBJqSeʥ"4q&RN б2lA.\h8,FYC)^s[mnʫgh98ho^*OjGV6* 7^=srE UҮ͎C(DkWuXrU}`t.䓢]fGroɎ>e48kalֻƸp[z[-ץ7 Z݀ ${ [>ȁ {jPi3r;oJԘ!0&BdܧC(Jn8imIP#u7 rDa?U&)o]*ނ;F$Iؘl4ZbQ/aԥlmG؎!wW +F14; o(o"G@1Չ~G:+US_׍b$t^\~})zOnnuI#Tym tʣEyGQUqV,kE4yQXL*6Z`jIxTxiS?>%R}vH1bb,1J`0Bz<>?ؤL+ F+Qk0KO{JzTbr8Tl=qͩʴG`\:Į?U}*v4SI~uR}7 | +RxV0ȹGoUO;s1Fim߻38͊-O)  yU^3ltUO}bDb+3 01Z"jVѕaت5Ihl0`eemJX(4V7W U!>CcSp;Gd oY`}('?;r{69iQ;`I`#'u/Px7W1r{?I>.=cĮ1dy߮/UZUd\}Zvţo6knIA;*WS':`"Fd H\._/GK{{ $i/_2F _P486?ZbO$.+ac_:١?tw›=q +bLt$c79Sr4T.GpFhi\'aӜ\%kN"_[FQ7@{q"lqKlKý5.;D2R:͌C"qjwmc SAr!8A[B`!`!ȅ[m:ZW_&w9o;@3O=;ƪ>Q}=?SpW=v'ϯLkF6hB@Fnޅ`l 4 'zv&V=u=/=/-f/ÖÁ>I!%2@x! 1"oBemcm`W`6臨]ƾamRrR8 1gVAN6✗a#ԗOa]^B*v]Wԝ6; 0!婬ozښ@(-ր7?ct> mzЍϵ-D KёW{` ~螗o?z|:ߒZI_/B1Ǘo _ UuY /lkt.͕di۷~JJ_8%IV7 mbiq?x]Y1ޖG m.9*:?y1| {}} bB >Z RAllP Ij/ciVkO t9v+uxHM61{xzէ:[zZߝ)^H&:̓m}b ݑ+_3\m^f }5qI]VwmE~3uVih؍|M~+_F`󀒫9(x}m1B@OTk˿Sҵ|^uS Te__QT&6:NhLmD_4[=,=od3>F~v‹/h4d"_P-<,ċW7\6x_OO/wQ2eVaN QL ILv:{wẝ![GlY5k^ՕǛQfѐvf~!.:PFTX_).흥/w~?7j6żߜzc̉ɳM^6>3M B &i֩H9˅-4]?ɳ]M~4LhDGig%[9 ׂڈ߇4}CG~Kf`0~r YӵB*풰Ű'> /B2);iu !մ%8J+^Aڝ>g!n`qC.m/'(jy}֙;w~̍DiCL׼ƏsJ)u!p@&֖M27cxO5 _HhQvYJ.N]*˱ӧ\nm)]uynn|ʏ2"_bu~lSfNx:`n QCI/>P"QYg= P>ɒHS/X~f6p i Yj~Q~n(nhKaukͺwYa[u++iEl Ւtie/n_72"C c61&:]Yx!MqYvm\a VprW5~lO)7^9?oZX)=|-ӕ[)?&N݆47ɉ _zrko˱CZQsSphg~dO>!MUh ׸Ҵs]idQ~J!ãapSu<>Ϸ@\Ū[w}o.sɒcLaݵ|*#?l*Gˢp닍|'PQ )_$YN($yy~R9JQpy!-`ed9&Ke}ӫ xޡĖF'`_8ej9VcZrᣜ,vܨ<'# 0rX`\xdlίxiξ#Q֩gWt䩘r'#Z2NkF4;uel.^L//jf&p)((&!V^qRޕ^'meBWibfխ#f,iT^or"WШ-K'ֈ쬒`1\? eFn X\x=~( k~W3/% jP@@Pkґbܛs[ NU׵d2:cX`t9ezi43#CV;`c^e,By9{ !622'P#'пf&` /Hy[+C6#vﵗǰwa(]-gԙ}n5OΟhf|VBO9[wb%ѝ4_VjE Yyu<Ni  DDݠN9#{zWIe! wƉh~hFSlVdV {|sϧd?e\p+,6la*8谬yUx Jt' yxG#Yrgi(j3l\iw}Meu|okMOV #'; Ӕ.I?Nnz5L[k `g^HC$#eKu"&Cn[ar69;=eOIp[%ؓP̳xU'!*h~JG=e%qR+]3E[p}mllbi Q`gOj]LǨN%"%\NVcO`v Ә-Ć 6BA^1|A^r9r틹OVNk?(mҶ =b.GzlzMS6o V[|8f[Ee Œ?I;Ĥ: ?/N>bbOxyQS<=\T##CI߼=8'uQb9l!.lljoے6$kn{(Gr6%'2pʻMcflًSw+oiszDkn$ƞMUJU#07vǣBƅ9p3ɸ>2ѥyerzsP |*wIaכȬ҄?iCCݖ@ys>Cp;q8gS=d&եBV]?SpZb;fcL#^2O񗿮O;?N7 /&&Tjï-\vY˜s}CZ8[a* wcn3v s>8.2ȫ֖h?֡ijCh-=aC1l=&@ S`:j} 0ēky >qh؏nk`_9^ jETd9&{NqKۏ2r$/u_X@~=03F5jvL>|f*dE pG%x`r3%V͔w٦jV%iaKꤽ K`5޾ =*;&"7sESw4ј4_AZZnӘ˃{^-T󨹽Oq?88jrBtһj,BQSXx~8Kį/IKbLߥ|'ӵd+ k-^Dֳ%]{m7;@$wRNxQ\r&y<C.7KZ ؗA -\쑜iϔ{#5t!e:tju(u)q;ڶZH%+:x: C붭s3{vz͎5L;NNN.D nlUe*l||Y9_)4AB#:qFPR(;[0wb2fDk=1G;;qIz),s!F7h}Ÿg܊7fE~z6lxq0߶^`X pЉ n{ Q<+]83j 3ZZp{+κnΑFױ6jFi牵]RQDjhYl}u]_L?Zw,ԿR{UzIu$" E#Pg'̊ngIR#3nx'(^D4ޱP> R~UzbOաzA=,sE}np"+7C,$ΉoׁAb = =yE)%Iol_n=K9U;w|ӄ50ؽ6|xiG;a7 JZDiTevGe\"4RF}kZY쯏4jgkX0οԦ_޹B+dʁ-6n*ykEBm\_/┇ljuG?3f'J*8\o-i7λ?("X9in*nJ ~Q=&/$v~_ UrOyHl|wgm@HO /e ki½5'Vd+zL†f_zď=Ɏ#nfVcROTcگ=Jssj*N}&WT[#Ƙؐ`{T<iJRK*ړN'ڈtԀD$1F =BݷhM˾A:X|7V{CEl J5ًѽ-Z擘 k=qJPqh``zNɑ<ۥlFWCT wv~n\n 'wMAQS%'6t}kޛvw>O9﷿ w hL-v!I_hяͧ#\j]C>mEcq[=M_á Cz`]~+.6^9aһO@;O7n[әK$ͧ|5UEK㠚tY9y^yeI#5ʇkc1ݡ`j5)FL%c17 ZBh[0'g¼{&~}G2諔. )?z+߈`FCЧ&VuMg.WZk79N6v=ma{U}Oq5W>WĭW̒b=_gQ o,J5zd@47\mgnFteJzȾAt!׹Z^ִCN}B5ZjO.LZq߽YJCeZ/ua*s'/,5LsTz& ("[ j70ƽ PŁfA-FJV9X^[Cb,p2h{?B4:yZc+W?>[}%fѪh3mXܿ-?͙_MdK"\|XTbQquYqMEY57i)5 6!-<|j*W k\>mY[߬r2>ݣ_l4fj~nsU#(TMJ#IXSiF|R47.m>j̋}tP ep_,E~R\)6bÅ;{涯5 do!~H""S,||O(8LbOu'z;7Q3  (#[p*ҋ t7k:9[cզZ_ [ȃ=B`NqF뱿2e 7{ƽ#^UFRoBx]3<,.cB'ü͍.;yDзCjn;J &WE5f_q9J:wk~*PĎӆ^Ro/=@JƮRr:q_Ogk0pK_֙cd#5JrXCN@/ΰ̡7#uQs|wa13٤uDG"*>fXOlSAdbl}FO+&W_͜8w6\ep}c{:u, y_Vz}+"Vpa#TyxK2b*'v)k/T[=AW.Ǘm qG5GDʒN^/Z ? w_M¯3H^EBt ! PsQ4?Pjdx"} A٠wFJ&HՕTpʧMcIYiD @MA֔H;v`2s߮T5*I}l]2Bf0[kuMJ lF}hbTyAQWI޶͗g('R-NNo"t6U ;7q*y {[ vc>5$]Ivs5g2G4)r7tAꌹk u 0!12H|lxOGYpO{Te}+K|V]~lO!7/uǾUd/Ҩԩ\>^N C|)S:icmBjd5_&}XEjmDl^k=tP\HP~>]=|vֳcQyp nXm|ߢvMhhV,]PM' ~"ŏ_Nqd)s5:k7"鰓oC;Jxzm,xTI>Ϙטq,WQaU` X_NRuYI'ҧBF&CѿHr)(l;} ^/5m.a+,;u%C/j&ALiҶ \EgyFEhPw7H8Nζ Ϭ_؀yǤ"pe4b %Z9]h5U8n2Lq阧"jfIpf>gkUܮ;F1fhΘR+oHZFR7B*oZ/]d8rMag-AWa =t!JcYbs|NR'k߻0jH`A;Hq0/wzЛz77qgXy]"Prvk65( s\dK[;p$8y <-F\A~|a 97F^*{Ts?)rm,2/zu%,>{v? JbU!*=X؂!LFw4A#s7:dz]4?hG#%Ii-zy6sXW_hptgWOo ߫4#prwjpkY=ȭ)L%SW&ڷp#wt']1yAl7xFi4v0έ 9,͸H 3#j9KxKf-E'N{ؿF|G'KcC7/Sf5ts}`~wVk-ҜlySr^tUYk$f5/ç)ʇ|m-b'S'_EḎǃ4]Vl*J/Ui)V]?#m:_ Xs *\& Ҕ1}8(͡h]{moAiB#N`XJR?Y N2c~58U>(Ho>ӠgT?ȧY<N݅Nq[ 8Y=e6(41DdVB GFu:Q(1СIuh -Ưܥƾ3E`W\z#l&Ϛh;XmqhP]{i,pe@5w冉pKmѨePzZOi!x1{LñEvvxGcPzڢΙWLGz;"cgk89 q@jk}Grvi:!nIl)E)7XS}#<.YLܤ/IU#TݶvmJڿʮu5Ѹ\6N&}MF[._#/@%StbaT؛&C(Mu !:ݾ^Aҟ8r&"H%.HI?Rwo]dE%`ndq+MP;8"}~FẊ5 =w՞Tl&.$8 -ֳI=oB^t(t:s GyQFݤ_3QzjxSrU/!XR&[:84dM)P9G4"96D|QyT,GgSZl؜NxS$vxR59\#_djpS0lXljn3fҵ%ؐ-Vi%A"+lyhP;x D?DHkU4x1{n5AܽM8aP}>=IV''t,⻵EJϓ}'^ڗ:|u4NR;s'9MD)XPfhNv1g:ʹsMn~z37+%g&XEkk+6pگڟÛgվPm_9R;B|ekyJߦ̛kcjVmZT9 p״692׏`pе66x鼫!Ckͬqe)LоnwY~HǬRpO?V3:Ұ7񲐦a?SgRvA9ঢ়|(Eі5_6ӱzΫrr|_(>SvNw4r8jqz66o^Ra6W&]ύG<0\`v})i$yܤƪ2'?=hT}3ؿּ#Vϟ-9[8nr<ɹZ(%ߢ0ID:ݝ7{5ކ<ҷL \ 3Gkg$O^QxW3!%V$LddTN BM>>tҷI#J5w{c}, &l_=c;nO ggM*oD,UɵrhnM\jqG8-CPmʹF]P0x7=[7Ճh0Ӌ%#2}_Żb7Wsgܣ# ;:(sTx\UJ#"(x0<DdKWax;7֎%1:]DEG7Յ`ɹ@jfHAHs ^ڒw}Jir!I_ٖNn<# ifUi$}#NrG7VɈa˯A{;OM֫Um_#{Ӄ;{^F23($̛ ν 27!N Vi9x;h̃ߊ{tF2e8E6%mXr>.fau'Z~}H+nϣ\{oqR~DZa$DTt{#W^ kXl eVh=k.zdsgEL3rnu{='˛8Rt=S/|,;oVu*!_"fU;ix6\s F&r1Y4K:teos}k>Dy$G徝Sv];VMf gf>9n,i"y35Cz28=sPzxW@@b=_MBIjeڗ wVI4,| cIZ,|%m}{T>}#|uJn,|w~l@(ܘ^M䛢w<ПvR K8o[Y~n p.`a{v:U$m&FAD8z.A>L=g* 1`[}9@0O2KʗzxUҽTrj 9obӶN0<ݝuTaDO3?kmj)8}m/ǫR[_9ܞd.ܮ/NJq=ӏSCZ͎@sVYiվZݚ&M:ʈWآײNa&{QnYgˑD4*%_uɗoZlY.n"RF 'cn雌#qm:m2~[|/Br08{Vԉৣ&rXP]Iy:vxˮՏύ%gI(Q|b5:J;I#l5o4?~ZweZ {w=~"mD;ilHZ[@f`b[vrk՘:N_̦k_$^\W)X] DŽJڟ/d}Yg0 57k׏7rmlt 孜ùB~`vsTnJMDy!]"/>(TS[v@5f\:!gb}Czy5ߐlzv4p3 ߇$KQ?NԤ4`|p{!9ș.9MU~yB7qI7NՓ KYwNz5g% '.oe1̤ݰ4BeT?@!)PHx@1%Y^eJ($}YPRׯka^ylbRCsoXsG[;s\w"4z`Xq _ϖx]nDI3{_u]4_5G@laHv .C;xfYVʶ>m̽}/$S͗'+lLO" ._s\6O4n ,"Uf//\Nm=>j[?n0_!~iU_r]B լj}SǝFAyYG{E'!fW)M[lZDLfiV[rsȲf Z=TG פiަɛ*u_$k$j-}Sҳ㤶,>6>ct=fm!2 !oQQuj.4RmT5C?Ql|3b/Jh/y.\s»J<%SB.;瘏QEHQ"x ?=dg6,xGN n<yZUeoá'g!՗ BԖ䖡{$>צ%V<1 caLs({EJ3A"% r30u/t%T;VX %?W[ϼLRR61. m# ؟-- M7p9o>Nz$MrC?(]ʖ@˴-:=4=3$(UT{W6; U( NE@$CK%с^vO(۾_9'ʄMҙU6/'SjtOOCp/1dP@-80˻2LYIg-%ASk{e tBeX@я}C~MNu;~^A4u?ߏQGj\'x#Hp#)@_$oy:ENg2쑦na˙!+wu!R ?ח$%,E^(Q6KFo!4-9}#Q.!ڼ'ϷFP1Ҙqza;o6f|=O7QqG־봋{Md2^vٯLg1xfd2"'3*Y*}?]O٫:<3Z{p%qÁLHyTNM!S%%ՁoqPBJZٙCƱ&z)&Ŝs*kWi5R;Щ)|/k+iGJCԅ$v_;D̮ ^mн<+]կN)jveavqu2lNA  ztU|s_`4*22.&ww̖RdD #Cl$"XE`qJ@+ׄ˸r>JRJ;-ic|zN]/w;+:'VS\H; X-jϫe76ڼe#eEɾ?c=o'̮삐<&#h^.]éإjxR?Jd$>,2ue!?0RdehNO%k/74F1 oȍ0;uXA]4W g`xDw<@#ɊE#A=>tju}0e[MQtD675Yɝ08kUAǢm1k>~$dn+#/e?őN+SBQ[/1`)Plu1]߸@Kӱ{dfZTRIڗ32G W}Uk>$&&W+Ta{16}ڹJ 4%W HLt(tgpnnynC̈́,j3!8vDszyB|[VezBw,ף?yzgre(Ru rRin1Q >K6bfU(!#r}.f9rc.*<+_v]*od^]MUyr;iX4g\<,6g+Nz콺Nsl2ڀhU&6@ K  oΔy{r'gCb-V8_x묧.DMmߛY+u}G6mŜAU_؏FQQ36,&(]s*G/i[շb0vx-iBR%U|n-{B^%<6' ,Bxq_Êy@sC3*rbO[{HmzNirgB%XwS{{gJ%$Za e9*Nuɬ/Q h]13{Ub_mSBV۳R qvzu31eC۔~O\oo*m37RpƼs=J ;?lAn yܩ݂=pYqߵI"32a R^KǰϑVY3Tfg:2֞-0θ]F迮QWpLwQSֆiޱdPdIm+ߘ|~ZFZЂ7@ aiREo5yC{]w{sl5徿wxMC=r(ܳ ']Ll+t*IЮ8*Vt %5Z@6p po7Lv;E|6-Czps[,p@X?9}`< U/vR"mJ䲶ԍTY60YO4# u|{X@ry(\r4k詌-V|"6 2v؎GGҰv%ojD+#Hy=S\֛ ֧ T,zPrW#T!Y3CNi̬RnQ Y~\Sg4n}=ܑ&w  zl`Qo ksyyzY@xNv;'f?lĘ\r&oxY?oo\fՠc"2IgE-^*AlU2LYo2 Qvs?)~󻌀P0 -ڋ Qo{7wf NʖqQhgz V%`e鯝/K>-z jp |GU(7,p3At L̈bX;tv;qWm* +(]8/LˆKr -ڥS>T =t9_򛷔p-٫eOwNwVݗ6&.x م k>Qo^TŞ.Vbvl~WX/cg |]wy%nmBros'ez:ײA %Hm{8r0bOtɐa])^\2+d`"^,d7NLȷfwasتI{yCMd?!sf䶽\:`;_z#,Ŷ8v{axaW{D/nt:p FW2ՏS€ugvQ#x ?`2'myݳucy{F.#"Ʌ~hݲʨ$}S2{p0Ga!mm9?%7F/&˚j/ dǍ$j7݌_b΁9:ﱟ)+۰bh[rnO'#! .FGbв%4Ũ[J`#fUc&ǼU/yp m^{ &HDwǧ`/դ '9볔H5ܫI_ן64VQ;{ѣP&0L.m:ʚ%<טNŮBt//eo~f?9Q|]­D1!$xR߹-պA㱱6*:>J4v~/嗅<Ӑa4PW븫TexP6x( AÕ߃@jK&IU<)r2w@toLRߣ݄.ʬǃM{Q 9KˬEL!A|&26vnj'*| ƐGKwy.. d/֡"s h|J0V حף;yvXdEWWi2n8w9T)~aKOk~ * 9G2?P-9a 0@tXyZE,pqs\rp MBaR+ERu/*[Sorww"+8¶ hc 笛K(> {61=LņC$)XfB¡XӥB!̽i\D%]ʲjzp5 h铮C'PU&jDFI^#F IjɫiYo\Fρa4_7V{M:_bɺ'ss:5D{|cMȨ*Ndxȵ< X h6sQ5YC€?#(o¾NdB.k)f֯)ޤUk!4\Q<0cбYTwͼν1WW8 Z9/@~suzx,OD$z ~=R 0eUxs'|eVQu'4i(T>}zݷDbzU= pL{sաBqm \u qSAe/| 0a?X~X8 %QG#adNrಟU(HO(٥bp~\h6\lMӚF<)E7h^Cؚmcm{~%~TQ,_3y42h崋 rT &-^IW"$@̕| he]I y;qao]2z WUɲ뵧 /,„ߙQTzgJھΔB_\3Y3|>=M֥i)3i6S@yƋwjJG ~p5T=-0uDD8V~9Wj>9f(/ ^#~Uɶ2_+a ѐJ6WVYty|жr!x ȍ$QSx<[:iM!c\&-:+"::tvWy Re^ IgE[BcJz$ v"͖85)4l3^a._~&11Yv_+ӒZL:1c"1#heGb{jV/0c6׋W,n3u1vYJheC]I?i5Xndޡ21C=SmؼՊE[:dSTE,JydK"NQp;ժSs=ws` ǞU䍒4珓"w+׿$R<&مvgW!W}F+:(E'Ts蹟{L/Oچ E cEs[j&Z+LtF@i:9-h|2ʲqIF>XS*km<7r0VT} (V>(J#QP2g2ڵ@hYAv_Y$eߺ^~>1}8sVxhP:ѩf)ژsidOv6X~^D#Ā9͝,uD+SJ$}BKp(>@;n%_lًKV_:G}i='~{fe*6ǜ{eoqgFt;kRMڶ|Ӕ=O>ȣ3f^q5'Y~PDpO܉!єdgT(A-QYw=J?`xդTԕN}J4TO[(Ȟ`{Ț=c&ڧJ1@,&YHjZyC:;=ʜ9qG`0aOLBKlxQxEJ4*SjG@E5&F-̙%[VSTh RkIYslI֮yYCBw:awϒV %Β­ͧ-ѼX MpKvøJjn)Kk.V43مd^qz:s`UmP5OުCsU:4xyBF5EiKYRtgڅ鷙-spJ? pi~*J"(tC.VVUyDې*#aS\ 5hr,Y9zNROroq@k-%8x'Tl:j~(WŝTDo#ç Jz3W`,Ҡ N6cԀ<&{!Kn%S D&{7YMmN#/44F&05ԠkNJ"/r%?KZ'QjM)SeރM+8HR-xunp`b5ļMi*VM)!nWC7D%! ~SjTW*dÝTϷ|9X}@*?k#nt)V3c7\/q9st2 (3fiԥufnaVIAh4 axKqٚW+݊۩:71F^b6,PI#hW5R33İ6D].=aT-m=)d;!UVbJe>N bӨ[EK‰iiK?F#dvBK@ p[ZݒQc֫Q6a5`sOZ[}mCNp|k @! f͚e1!~#Q]喿/m;&#^yz8" ){zJ#h!(mj?sz<4>ޮ5m~Ĉ1BԀYBV|_X@;}O8 !46y]S֩)Dǿ;(*BKխTrkt&2aG+&[dQnč[ߜ5kK64_5aUL\bjE;7n)tnFyZ <.L)n)vjuwZ3n7RrȏbHYLL%V@kWRlyLx m{Wmރ;K>?O˰oHiAt75>^LhUvI~Y%ռwfIs]5i/Rv\3nrAiޕHب\5f5=T/;ᓁ^so1 /nvTwo] ~moG{r4Jڪ:LhGy]xM5,i*R#~EyM-gw_p<;gX Vy!hX,8Ya_Y9%n2cqrm e{SKhdDBY4<îKC?e} *a@G2?HqH.*tn7m_V.f$ hIx`f°ʪ *؍Wh]c?˒W4M1|͛i4b1ǚBH2h|Vk׭2ΚVY #ĩ+8o1jAԒ皃 K;e5gen_|8\IֺuYWTUR5eVZvӯ[{sTX%%~_Č}Ucن㜧òRW&bɆHTl*Jvehp*"}Jv QStr#>9eG֔|ޟO fT2ZeYlvNZk_ C3(Z^f9U\{.J]%$bYsg5h'g+䭷X2s$\ OC'!;TXLXn<!N ]eZa AN.^z/> V%iU] Td\>,Ӷ8T$18uLe)0JU#l5AV_sVmo_- wF󕤤+yPF(5^;b뗪4G7_z|ݚh O}uD1B_ꀬD*Jum+)j:TLGa4˳gs[涼Nד7 %J3̀g?s! M# O&KgDH+)?Sԗ9Iݛz3PI$jQOnfp yşt S nrJ@n8X I$9uUd)I:r~ֺnIYpNd4f5& uܶZPZ IuHRcu/< eVjw;+mE2t eV$t@BAD%=l8z2^10VayY za]u퀘Oʦ?W|bD.EGLjl((K5ɷʻ!uėʲAЈ`Q|+GKc4W""{LvQS]:ѓ(mYQpfřQAIȥlpBm[s+ڸ.M5 Z"eQ;2"@n7CԇlB!*<\dn`@A>*kӲ %x&Gwl[ZWHa"VYxN_IR+#T1xPw% Mފq;^KTj}d0 ݏU튑k;o;LD*eUnu=# Ǡi!u:b'>:vףߴr] YPig5*{΁(g,>eds;a< u1۫Rt Ux^m*-'p2R,Q;B䚳TJV#b^:I4kW<'|,S08k@.6:Nv 0Veq,ĨH@IuބeUԟ dr}Aן4V ZHNP>DX~0Hfb|VY>!/La1PDJnuʷ+y6\(Cwp.=enU*whvE $:[?>{k)xs8-&` ׭ B@s>NS;[}\"GǔFC d7ÞCp$c*鋃[R~a;LsRsKe-JTBιfC`Hx-_PzSr~785RE@4C}ũ`/ϴuUfKծSpw) 4h (t2=@`h_2#1fSg_RtiGx7Ty1lzuP u > ̖('hRrF.U{i(%7R| Laȩf6-bwُ+z1i9(XZ^='\;n ~whwl*͑ጴyQws%g3hA n{qTe ;}c:&FSC埪E+2rlws_1}֥RʘarئvS>ݛo])QyE%zE/-&l銻c K;uQ<\(3&~(wRBƍ!Ls?CMy>ұ-;f]K+1&۽>!G{M%Jm$v~~hV5B](3/~PW#~ַwv8C0> "}z1hos_QLqz)\>5UiN3), .cѾr,%/z7K$>Tn#F+AZSPj@ȤkQZIdda&La$ԋ,f3f$~Ii[پZTKЩ,K2&aYde,)d#)`%KQa&JVUK1-6a#l1Mf#YkYbX7ĭ1$n+OcT(5eE+L0MŒCI!W#&fߦ)K6h:&ӓL$NK6/VeѮk<]g%{S̀& U1"m{~ CgyWЉ!#~,cԐ+>ٵGq\1J)ՉHr=+gx9jxMF7rM&Gx !kmΨL7?H;lu>ҷ\7{E2-^B\KKdH1mHOW~(,GTdd}E?)??GG{2V6R\ i lxu[EMKJP?J؏y& cĔ@`dG@@-1ƽíuš a:u"فv6ca-tb^3>)R"T_;i#|WnNW*NoHdOk0F%skwQOwX+ IY5A0O{;x0]- U/eu<ј {kriܤ yF ^qz!@N r;`mGۯ7|#WqO/EvEHZVRFqFFFFFFG">e9+$TH!"bs_tw/i)/YXd{ΔF%Ҷ~=Sv"O_;6ϤS{ @lWi2&<-̼`)OڱԸ1=`FFF~|{`$|%Xq2kq\Uڕ뒸lB[=ǯ3a_zuaU#@ʴDHe4 V^oz! 97'i q3=Q @#vN`@tNr|8s !SƝYT,붥C+P*NF$gͣ1 ƥF}.E0E" 1?L,3S>n@xqNR [h)T8ANۛ*V09N9/6ޡfμ!l_/{.@~}Zh-8i 5m;]My#y)iɖ].V+W:ٯ W c6)u@jvs?{G@SJ}UEYIXPGxdo$OC uO=-K_tͶ:Xtz7)}Tgruw4[~Pmbݙa#Mq v6/v~_|ylsx}(wn^Œ% D5W-efY#XF,86`rCKmpxp:h$,/T|FԿ8΀Aԃ&1iAx!#%>CR?ҩv(9?_p#y?FadhˊX>-b$<3̼?DqJoߏ33{ Zh/pn iIH@jg+**Dn@'e lԮ`* ɔ@܃d<XK%%;oL+(_ax 6mh+S^>6y;'5FV&xFabO~U ;++UqF*XXi*a꽛##b qbaY9;.}T$T#Y]]N6"a+l WX6ÿtm c>C->=FA|:Mz(ot {WF . O}ZZi4MEZv [Z# 4t]  ;nt9NU&YUc3n!!?ɖ5tu5d }U:XŢ??Qa/̐}k: _+}婢"" Ehɓ&,뮺뮲U**UT%)UU3xpfYIdǜc,>G8\\gTyÜi%do\c?i ,~ӹ*ӝv'<:VA mJ`S0llņB~dQ|߮ͬݦ )ǫvأB3MG2u 8u^cRـ`Nh` 44D+#c0vC;/ B똝8)OE- #⎃uz{ǣ')AȈ :Id5EZ>7Ek { Hu |R$ Y˸6C)UI_1Y+? \[U]yY#bcjRs7bqޥGS^sgۘqY0Ar2vyWy` ,X^&'ĂXs|^9zz>_\i{0"J.2,#R{7?K?)J҂??_2@ʆiR v 30h ˵ -4reN4[&mN&pp:]wM/5)޹rB۶={t2%Wf G z(vp{ugޤ|*B٫Xʭ[T642jQ!QZNl}l1lGXBkG}U% {d5K6^y<Ӝg}ﻟ}=w3L;u^#gm(cدP B ( kj=I㢁}wzsd=ݽ)<;oIyヤ#4#%J*SC]>Cz’\U):wGZe Rʡw^Slp)UfVQ5R^-o/baAI $&Ji|37> *}*[pdMreppi^ AdŔہDbqkH0ȘXxLőYbDI}-b1K,lahTI74 n+/"""+"!^B!`}ۭj7aF䱺kn7%c!ǻUxq^Lf{̒2YY^0umٚn;B L! $e4W{!%IVdv$OWճḢs5VvSi3ֽK:)>-#[xi5sv*1E;|ip6]WFB̨Y#2 YW v^]\Y8ao,%S)|b8DN'8˫-n^R'GQ0ʲƻ5TnN7yN3yi8vvgE3]L50(νkvg 5?1L)8܉w+*U'2 $A" ȄЍfb)F ` (*0a@` FA—VYyEjYKOoԧKV]5=5z/ X>ό?ԌV]EwpWzr|\2`Y&K"1;= OKӃKU.4q#pv?1iѳ XIYI,2&JTK eB0R-*XX)dLc5)4oKV[MLCpJ\IY-LJ³,&fafA7li{t&&0e- +!̌U3&e `$Rlfc32'挠PanA AEQ&VOp,e#0Qivh&磳ů 25e-.[IDlSBk9ZБ$AuIq~Uy`HtgyEOI"JNGIGIfk4%Iٷ F7pl2SJTe[v'a|Dԗ$7OFu&$:y)u'zjc֝2&퉀ԻZf.t9Of&^Qg zRs{O%<:zI'w{nI7F;hYmtnpeO gN< 6bjw4T% "AYT,'JI7Z05X3TQy& *D"-M %2J bF0Q.%j* 2La%r(RaF6p G*XKѐJjʳCLZ bYO6yKLM<~o<_dӞϪ6\ӟv"LmsE ʦ pkj1Ra'^Qv55Mbhªf 1-CrG&tz1jN*,Bֱhg2 *Ukji0y.䷉edQ &־(+=QFqv/Z*X ەȄbb0YyjUՓYen\s?2KF#)4j[SkAf1a9VG LcߵSFoL∎ *"ߌ\iVr?&2I"jKb4ǯc6r^#U&XW=ߛmub0 _1Y4eWo}Njd!nxnuՙQE  l " (, =^=GG~7[2v;>|:e$2> -cmB]NHK㵄j I9 6U%'sN8h?wǩA|~ $Д#]\Ⴀ@j{QNvQ1mƛi AWύ2MHMOWv eFe J )YRC{3ox;UUU[-kW'.G%YkA41[m^dL,,,Eae\ϒlJX3`̙3Jތ101\WtYC$B@>.lq׽Qqa$A;z/Ea6ZV+PʇKf,̍mt(bf2IuT65CjI˗8m h n)ڍTo=Ufss=aNRxB2}ICmm (bKʹ%a0)UDaUeTrbڤ ʧ25s6Pҫ Y0C+CTAMs15I6X AK2&@Z 2emkM-fثk# H*D~o-Z֬ͦ1-s22`(.bbXfwXՃ4mk)SIc[kkFeiH̔Kfؘ_9jf $-eRV k#*QfժMr-52Y64eQK2?1>&|FvFBHS*a <wǩ7]8Wg%O8uV~VU=~>cjeO;~ZQG 'Xu?$>@U4YXoVX.Ho<6u5gvC^ۋǕ= .\ekvY8Yw_C0<{a8U|5f2# [奶{=f W뫳UOn:~:sPNGh7ͼݻ/ͭ>sNN}<`\:\zrw<9e 핪]ͮ;UkwO7:\܉qe 7k;N %=°a)^bi4ZKQb1, [bf_Vj6j#'0ƣ *gV,%,AdrVʱC*},eSv&Rlʰha[772%dbn50*jb-CdbN F5p2je53$d8%HBL!I,La6ZRE- i%dL,5Qa-"J(0$fڶ *I- 2kZ8I5F\d&B86g F# `!P%c ".,JWMfXLb̎nؑ\.47 EZ `1ET7 @(e2N/7ߙhnj7E+풬L$d&IBHXJPXK*&0KP1 bL1XB$-d݁EW[dB|$W{JR $$re)"q貨g M-)0}H6ơ1J%R2WWDG=~:6ʧn%'`*fWML=OR!Ν FH:~Sa_f[@ZF.;51K:#[d{F$x8+" lt{0u2L1#O[E" Lcu@rX ~ÎWjvڭ9i~iOsZf-1y".Ē,F}avIkYl"KZ>?R_1XE0,1=EG& VK Ȑ0KR"3$I|b+䰝?/nG Mwޭ/A?ꅾzQueϘ*B;BIfwnc~0}>ˀKmji0'7J~z4f>A8ۃ\}ƣNV4gŚtQ!戝 & _ܫIJUzDr޽ݨڇ|L7ɨg?9 ͮ)4E/oQ뗷8{<7}enb~6j?/oj󩉋"`e`Rea_R%R²dR3*LT1_ CVCF0ÆIMkE1bVT2de$2Y,LȌV)YX10XX2VaVFF0H*E bAETff1%Xe!KĖ0c(X0e,0b# `b,ʲ֣@X/PK?Ҥ}F~?~=7z$bb$mn Im y9Z`fĒ./<Or+ЋV\ˌOhnR)p[Dn HL0@ONKzpob]`h׃Q7h>{/7o¿/0~uE_G]8cV(WrTfaǗJ֌7+^`!7DX;Bnx4vѽe,+Dv?$}jB&ȗd_5|~ N5YH`O6.L=oۨOxN66]"cv&L y<>6ٳR$` f,ePOR %&oy?b5+PX wKXٿe<f5r$mCY@n(^=|1v434-x+X0ҿd:;AiE_, x/bB1!A` i %# ŋ@H 3^PDEA@P ffR̊ `Y@2bĬ33%, 0 BHMm޿5X_5S߷w `^2Xq~E,R1h4Fݽz6-"1A㷩5 >+&n:0!w}Ͷq|_S{i>wuٞA҉34|Qkڥͷn <Ǭ1ޛo|l'n{<&iۍtٞa\u;ө,ĹWH }}0Z&u cX 4IaBE!cRyY/1[~8̿&789@?b/=e݆'8rXY Vŵ:f ߵ-#]_d/䰱> CR3a!bbI'xʁnT QBty;Ԓ2Z%|_+udu8&\`FI .4ϰ:}Dd>_ A,s-Kgwk>wm@o R\'TLvLs~ wȮѭp4h>p">s.֡B@QS#U9Z[αsFwf,#̜k($pZ̻~.}(qRݸF!Q$ 8® %`0B,<:^ֵ;Gm6YUguLĞڏ=]z?dP>)>8r-;KhP0nPF xz.?n1Y'dSJ?ʍs H5nwEՆi|g|[3a`C$d΂{N-Tc gi*Oo츿_~2fK%P߃UG>W8 ҍ'KPi"YdW½h3Д< G=&@L@bZb^!^gg |jKİePKؓ@Bj$>MRK>>FLҡ!,,LVELQ˳??T׭oLQ>m(2bR1~)*(_Mu}>&saF$" HfuڟI-5oԧ`|-*T0/'ǣͩ?%weᘩBT]AFu$ { 2ۿeDAuLJn}+9w!ANBʗP9wZ1'`e,z?8/sI6v;ڿ:+$$QM`36^ےOz w& ΢j+Լ~f˽PJGز o)tj}p(d$ObBaSϚC.VQٻ(ǝ :-&Wr X2#UVV }ioJ*h jV)hjU]+jtbŬZHD!2( ,YE3 ̃2S*tV[KC+etMqSg'+[ej|iA@LwZ^ػ^/_lX8jb`e.8 XUcKih0X,1F%oVњ 0* 0 30rh`љ.VT6Q-K3*R`tVT+ T1Xn0j)[نQ"(#DȭF 1+#" 0n18,SpFa*12KUqepRZ@9`Vae%deF$W ,2ĕ7WJ1Ho6VeϝY-o0ɋ a*ɆVF,bb+2FbXaf ċ XlIE#DS%±d ʖ㪑+!6ҦY#EML3*1XSUҶhh3cQj\2$v}DLVU &c3,VE6ZN4hUfkF&3\Ym534hѱL0G#Um2%,mVao74eYeX0427CUV"N*v` +I1Y*9r8-F83 )XG[4lt7=Rbv6i֛˒ܻá4nӃ߻Ksv;[& ,V,hphxF$ݏ9mn4(0LpO@N%ާ-5T!DE kzQEF{=bp͞e'afa%Bs7Bx ONA'iL)@ _MkTZ7F>2N;W+[s+X?hr2VVI?׹\Tc ?:@8LZfRF`էj \ۓ1+SR=axH%pԶ,+? 4R|rU#1KfĐ*Oei 莚oyI2gPf\S|[I/a,?Ą ;̄U5лēp=Ppa8uw͟h۳"l7Ą‹ I\#n;<3r"ifH0c.ٻkR$ c<>{ Gdz8罟Z^I@rV\/5]*(l5q^! s%Ave: -3?V ,y>Fӑe8Z6,#K*!;׺k1g{vҙY)+>=d<P MYy>o:ӓfId!V (P V,U0/}~d-h>kߊKѶÞt1oL8r(]t x@@qBRII63p?1oY%0xg W l ֨:η*Q墮*_-Qo;`>ߛNBށU|/%{h%|/B FQ@= f_ 0tG5I`&5}Rz`Fnn<$5`$wv l$MIRmfwPu0ѽ0=ʀ\|k*p#XVB ZkH|R2i6Tx9F[Mϻ=f<[g3: XzԖGp#UaeSEFEj&h3S$J)ŐbȫPbdLKE Fd0Z`1F&Ub32@e2eaj ɦMHefBULHI" @QX@A<4a_|172x܌Զ쐕n|ޖ"K40DhWb) /kC4! 'CUq+~n9ֈF|<0E 8rWB;RBWios(։igh$3(ODсt[O4Ϝ~NFh~,21/2А98mh$F8NVā4 {|G;j\G[DcZp~3)DVzDs&틴aI M9؆0}FvEaܬU W`X2Y͙;=OʶF~%&VBaG? \Aպli}ʒT:0lҙ[CGa9lglI-BiG$0P >; `:-H6 ^ sO:4_*(o{d:ʃQ.~ t7rv1vj=J+֓Iz*Y"[9+KBZyڗ/ju_~S$IHUAD$A HPPbP$S6+ ~y#d;B)i=:nt܆RYAG`1 Kmfg SWigQʖ-nAzI0Cfӌ*ϛ0lq e=}Y ᫬uLJ^oDsߡ+D 3^TC'3w[.(<\B@1Y(AϿepE@!F?2;7O1qw( ;rA`&M_U+ŭkh|-\@e Lh@Vѐ!+@!dl 1LFWQd(kFh/;cDr׾M'@FsFUs(9f0H`! Đ"4u$g՝q|!Ϭ$7PqGjik*Hbqg`!rqA8ɗ !jhBR%&3|u||L7>j&xc B` w;XaCᏹ<r&z!Ѹd@ Pz]3L㷎Xk cǹo\j_OǚRieNstp=TlIOCbKaqD-=7gpo/}'jS>oGΎ'?nT_m6Y{[D"氎G[::B;Xn BЛ]ԮNBRO;WK>F]8_8 "V[F2hyw]W!S浝}h=;NyR)mGf=x:3 HgiϕgĢE'g@ XH6*@ F;^;/?a'_IVfJ&Þcal:Ǔ\~vpPd"pɥ1.ۡ8$Sw=2SK>.ۅlm=ażV[!qhoJݰ^8$I& !ey j+z٥ǚ6UM}%nKQWDOwy2lU-2?+8 0ȉ0rbmyDپ(6pt]ԚUrKSPE 4ii~}}p5~7IYڵ.@ Z^IG\y՘۬h qa{a~@7E7oY=lDq<&Y;|.lc3:mAG-Dݥ-bHgǶ'fkhCM){cʱJT` 1@J{(mvTeGP{q ZxKk) !wrmP-WgSWljaPAzՎ.X -7KXxa;ZWO0uCT/^Y %jwjK~Slڗ$]{EKolTe%0 /Y<;ߣwqͦm|\'Z'akPujH 3 ԔL&b\%ͩmqw;ه & zkx1$3@7\)DcEK? W8 o<>m/;6} ΙL&Rx3eF[[X^*ʢ[};vD-]rg^>TjpF,4iN .d_5|Ɋp0:1Hv)Iuܭnw'q9z{JUm!8 ?Bj~.$b/9{QM'>;xV/00`tesD+%5wWjrcN>2a;U;w> czElӍW.&8kr Uy]**QBxP EG}/)/sr4o[ylOsnӃslgiO?gX5qΧV q>#y:+[ix{u75\z}U<ɽ!˰xDž8ƪKZ^xrv:穛)bmnlw$|S3Kq,aj*UŊQ8&8MmܙJN߬VaN`rʛ qxu^Un1º.%s 1CW>uǧqW*[ a 999rr9N\9G'.N\G#'.NG#aYFQe,:ΛNV\aWn8 v\Cl]]z_ޏLUy.r4hqGe8U՝to,^\|]TsSp4ONxyCGnP<_n9ML\zL ոv(Z{Ʈq199vtru([8ޞY&$ʛf   ][׎,QQGM7DTwzu`44Gt~krI|;slwXꮰ\H\=%N)젬銄aoQZɸÖgU:m\il ^I=t2'~iڬQ$&V~EN"D  x:X0\?u >4?1W@jqx4I]wpUP0yDm֦uأDV9=9&KF*)l-Q-*4.">#T棋yOp~ݑ>_n.h0QXZ >vrozYCIkEIC:^KZ0H ؖ 4h#Z t: t=*Y__yfҏ#}JC*lf4*8<'o#W=8Vd7;=xLsm:9M]g:[NI&߹5N'iFMx|>Ӱ+_ꇝs*0ƍ8GBL xfL'o`'>Hvݷmvaޜd+\fqg:w߽^sVɖ c,I0İ,œ*{̍dȬ2RaXaK?}7&+Red2adX %ab3֓őM0~W-+#1b̲02LVT}fʏ74I0f2guETef, ab02&,|6LC+Ψ_Auh~R FNyL ߗˮڙZ$]mƙ>NHB0c9r`𶚔)9Vͥq8{}r#,ժG=cV{Vw<@.@Bj RħY4^ ǡ~ڴN 14 }6$]DŽ"iyn##b=[P}7ɿ> 螥͐䘎m\S6tĞ4-OucЩ Gord>g$fT9G^٭6 Zl|?Hs+FKK9F4Q )_Y;UôH ( -s%NXxrD?%`&ŽѸd8 2)^.Ձ@A$8&(MeL|eSVK<:d3VAAK{ A.s27駴x5bDzY%IJ,vS:Y^@'?aM'`@ 0q~5${( #ŠWqW]XI/u1OszR5ŀ^?TT\ }Uxt7:l2dĴla*m62af[2^"D"DUY,&X\VժR3Ķ 62Z9gX00`f3ehIFOVʶ0|}N0GU >ff_}Wu4F.e 7x$f=x8 C `za)-iZ\؎Cahet(o?\ y #VCUl#;u&3h~t<=VI&>s;MxS%0K^äsP==zoWi ʽ-a&"o;U=>^p AY迫5<:_6m}ebSZZٗ9e]#E]q;#TKƪ7\E: ۂ쿵 x?[ xl-e,X=s\d8ǣrY]5nY׹ׇ5#E}~rzzaSA%7ڲzm[YѾ6\]/Nm9ǹg人vgzn69mvEpWqVn88ppppp<S)*JßJ ]ڴUػ}企Wh'mf[>vTVPHA_iP rN. V ~ƊҢ [i *`OE0 E_w)Mhs;= CVVibЍzR_SGo/\=**U½An-L6Yا-h:Gˋw9"U2H& #)bY_E⾣ޯ =w.T²bBqnw7Mn,_X\Urcu:+## 6d)w|ze`ܟnBNĻ 7ʸ̲WUL$̢ތ)ha.QߣU+yΩ^)85W-*З7%Vh+Õub&ebYk\ײc,;mF.3&gЭh1E}ΎMK%6vD`e$"GhЗK7HbIӝ|6+)ܿ.$p\_`5y@$ KU~[5v|j{ŭcB[ %hN5ߦCoᚔg>V,xeʲ~]I'/ u_ y}mҮC. ` ܾ9DE\4$ē N$ 3yF˘5MFUVdaŔVKebұbuFbfe fY0ɆefYGγu>TW.ZU}rWJ^էA Vs9GX~$͞!0>|\r#™+˝~Eݿjw1tw8W<+!(5[ᅴxLWn#*v+%qWvu{\jZ5{+ѴmFѽ[-<ԼWQFC;ttreh1W@t:O;ע2"0FEddUUVHaeY0e7.F^qYFe̲p6UZX2VRʊцF%XH:߮_/\+=5]C[TvMPʙI:g\%_oSՅBkfLH@d$5C no \^.:ŞjaB&1+bT` # /zN$yZ<-2b@聉#]g9.lj}0*'N@co TWˠ:bDDC S^?'1`Gt)[؍+f`l]*i2hSxԗ<:ydnb[G+P.j0|]ShݭfD ܇HFA.+K%~J섰݃!`8%g\PLNl6;DF ^&$"h_}\}e^%ֱ}K@t'u̟- $ɀP`R&}N_ {Ê\$# rdCoN5{Z tφ%<`lƕ; aeU70>J"i9s 19K6X5O;S @r S0_S0?wlqK Bby̭`h1.# h71QA ӱ-9,f_ nnFˆe ?o &G礀pJm4Ky;ǫ WsbqN')xrY[yЫZ.gUSA*DQ0 fQqmދ(∖wh,%ʟ$_>eb6c5$)^kb_N$01+M" sOQw{\GUm4aKX'mVBeF"0a8;½}ג\; o}=U~pOGSyGÁPf#5w-gv{H ЛC@z! PHҷ_^)sY9–y7'_ =pdCj9Ga2P`g+O\JDgrNmubBTF=2 8 \^ݺڶڄ6fG$,(DĀJAIG[tj\$XE4Փ2ff3j)a6%UC̪YC|g}_9:26jZ/]a/HN*H}YUeWejT,#u +lpK&A$;E#=<DzI_V{85#/Jrosg#9W{Hk.jjDk_R+a( 00D`[$ рRݺYm֠GB:04ѵLj!ٯ!Hwf  Bi=|cvFգ l Fש{ >MwuuZgƥ ߹k= hglU\k"VNIaI{ʙ6;7>;LFrƲK2yvvXɏ+6p!`" k`kg]k[-oV} ;Jh&mn!"li]v+ zi7ޏPG.T})o՝-L4j8H#2HVck8WV|x,&a?J2ON\wmA'S|PKainOq5[ gۻ}Hr aUiM(m4c}4(KۨF}\'و,ik*T+[?_NƵE-wf k۴H]J:x kV|khFC+Ѹ"|yaz"C6%|Ŗ=\g/]=In[=jCOPrzn{.W` D؏!z G%"Fzӵ" C T gM1T-F'}N_諺xNL@ x@jQS mXCBϱhMOT˜k_d:7=Lz|JrVpn3h;1 =? @bOF5_ˑ†Vb,öjj6_. klĪԿԼ/0Z/dJ*>i+Tm>xs>Yy.zjs(QfHnwK)=IFkbBJ]sHӴLEso me`W)фA];tѕDlmPD?Qo+O˺k=;wAoO'1z0ARv~,Wz$`+-9+xG3?gnρ~ qsbP!1Yƶ6kZ[{w5,(&A$۟5rw|Z<&jb:v)r ^gYB0xw6JŲAVA, 77*뻌ߟX!0b@|z~x-)KFo>7X?T|hoǼ}+|_¼ B 4 R .qGHߎ,ټ?ZE!,E:ܘ,0&FUd+"j7DSn`tLDђJǷdjZj%9l!K.j$)җQ#zi{{^ H ld$FjŨA74SCȇ¤={̅+簤C{FI%W:0ŀ ܺemnPNU,##z4RRxݖ1gH* mKoZjv27*ۘ1eWʚUlª̩޽zcߠl}5b>n- 8Cq@`hiV~gJ#f$ŔC]V9uR%b(y8?>`>Uu^~9:ӪqϞ#ߜD/R;d<%w߻U:4ўû8,&i&p7Ǖy^Wx^WvU5=l\GzN-:\OeҵYHZ5^}LI1& &$s6jsU2UfR Zx|Lٛ|05'`tG"ąY5u>[6Uvܮ(?%55*V wĈuؘ8=~/~4(w$(@Hͬw~gՂiq] pd֪JAS9Us-h17`Ua0Q\Tp/ZXJQ,d$ N$[7}g`Ę!J /˱ܺW<\6Nljk  Tv@3>><ZtRm΍>:yO3|;K%UEe)>???S(jA,dhѕVLP hʍS%b &PcXiM>EK*w@]u2% $%?&ZMSﻲ^{MLnZy5ndYU㝢Œ{:b$ 'e<4XSX*E[*"`tR%Bq4:&'b?? b~xkc-IK k)hf 5&oqa&Lѹp~Z}OޜU-u5W*v&l7ԖЊD2đ9I2!9ljb] mVW|MFPV\v!I JsYfNmj]Ys]ȬCmqGڸ יOD[f~>^d*GQ56VU!(zn߶xAaq{brt^sd3k=D/J1t,xkmA[%ӊUCwz|-X b@KCLT:@EY`шqf3#0\F6-Sfn2{R״@x!//ͼ_ehG ZKW>Mgu7?j?XЌʍ.8pIqj^o}َyib`E,i>zh تػvM;]h6R ;UVJ ό1^V7]2j#?,qߒD_\@sdǿviA}Xb)v4Fz} F>_dv';hZX-ur(NHkSV|F gk_$%ųPg2gV?WcT77塦ᅟ30.1b=E.?̻Iu'3c2GtL=]~3fr>s(#?ߗ#Ii|_N3xpꏓ"SYR:KnjQ>'O=I.40MVd'>hZ/t^=GgdLkX%o; Ύm f7:)29_zpd;2CQw2v5?6PbE?jTA] ,/mŦom)lG?6J)I ȮUu kN|`a-S?C/{ .!Ȧ$m&.KڹGi6{6+lq2ɡ36؋h #nKD (=?Yޒ1EZ1p;%!5 I0@{C 9͙z%yuwח`VKq}o5noTZׂEL' FX%ѿ]3{ /^8FѹUP r HQw8:\{5A&ir gwi9h[dE/Œz&5/Z[G|=y=L$Dzi}A=>W;x.7f%d}X~۔G~֝7S͜F8fj{<" U3^5ߊu:2pQ;=Cevf3 5tCycS9CP}Wphxjrb-ڏ~J (;=7*t_W*naQa $':*G *VK`q/,gziy|іxNˇ%™Z6m}naF~&p^6yH$}4?/9vokfgWW>4O_%h|Xt`d%aX*LH|_H>% Y޶^z\Ƌ[_[>Y;=ܳ- BW6qH>`$Y~Ii &#../dZ04z_E +8j)&Po=:٫d&6<.us†Q=D,є(ɳm?}M#D&ޖxsrT&M(rז5( z6bǐҠiF5qN 059oP:oo UӁߗSX Li}R:Yn%PJuWzMp3{<=W֤uؿtSH1&԰Zق 6(KMk!EVa=䡄%ÚL5yKϬ;]7qر6]^6F[-to D7\oCX[XG>g9p5.|FtIX X%fmzr`L=1ө!?Zt;F2.O4=@'o\ʭH]_ᵳl7  ݷTT1RXCG%gyҗld4Jwd"  r8ܕ͋~(XhS;D-~]>}7ހ:Z:#4f4zzP)_W{eG>oZ%|lOk9/X[3orEƔ)l1iͰFog^{}-c$뷐lS] 3K:5Mq+ClZ`AaU7ƒbP9ITZ\ζp ٲK'hW QBغE+ r~|>p ]ˆFQ!8V= {rX[ݴziCtZ)م6 (x< خ<>|b1;syķ1QYOK>t_[z<_w1cLa:uϗ?O}ݐګ4{ /ϫw=񕪝r[L'E{{k5jz艱)ŇRvZ@Xﬗ[ܿ#gVB-ܬZD|s\i+spcAt'7 Wk_!"dYDPq! Eіpb Y O}v*op +>>WW$~> CUZǯt2R$j]jOyߜEާl'aْp{s:*'T y/aǍsXEM˨nTy U܄a1@5fE[ZNnwc+ dɇg.Bdn`Z !$HTK줐AFbISMֲ'VWrXa/@q7aSyԲeVxUXgܤq%V iOVKuF4tZDl~[0KqN|N$Z>Z[G?jd}-NIl2*9I'-8"aAStdN'*:!>Z?]Q}c*̦K |}\AVlEV|T}'ŕEU@t IUfQ.q_$I-dS __SRE 鮃&딱n8l ho'T S\c4c4@N\AY]vñHCѩ4[ˇHɠd=o+KZwHS.>o=9_6z=&^kڱ{+|{.]Lk0ji4 u::1_a܊Dc{'^(dր{I4pSDh0) >@$z!|Q̪ؿ:q$\hgăgJa1 REHE]f{ ZZGe·1\!$ڱi >TbM-FH[h;1'*"@Z`оI| joWBp$Ć7s E08Ϫ! _ RcDeB#Ӭ97lS@눂j}oپbu3#4g,yΞV%y^li_yb70qZ| =SFB;@xmPęuV77(AB%JX% xln({ʩ3c}a ƚ I%B5 3.8$AiR /%IroS1~}̭VTaRDġYʰ2%"2!`1ѕSrҺ7ɑLykAٙQ[8B߈lLԪ &陷hצ ~V!RGSN?7t l;o|sRٮFqi8+`[ul& 0̵/qOɻۙzt>-nB]sc}CB@1AmW0>u!b[k0ԹGCT `6^Vqjɠ1} j%*r͛Xq5{&-4AbYVK3VZ]=C98`>~_oFǰTṂ0*H `<1'^Z(bJ(C1&${u5Qk1V~M˃#e1 1bG-d+"\6jns"Reb yk6Rn`@P|It8~wQxOܺ / jg! aSĘ I> Bn ýȝg=g^=4ѧ4-*.mFg|o>?sY7B[ ȼ]u/g?Ԛ9ѕXS RdU_{_K55*=4a_|C];s(d  aB%px6eV8_#C8a J EV te?l| Qw>j\y({:OR}8&%7H;>WȒWA[\21X2o}+rp/jblQ1& r(ialԤi2 6̼QXUeK"")Pc%DI)wYB=[ܔ_I]v?wWUi8^qV ZH$$/2F~Ҫ7C_GtOĨZ/m^I^̭Sˈ<8_ĮQ줕$`Lh'1VE26 tG/16Bť*c]} =_v㼼DF#^tPSzr|2SyW)'UeOEᲝN{nRayW魟Uޕ673[)'`_%N?t(U =~m90w?, )dzLmd'*sAkW>ۿ$a/ȃ!,MsQ8n{@U?we}ƃ3 =BtaտS(;ԍOUzukS^S?p@3_?W1حH~T{5_ FW%[EbK+⯿'ب}6QVbOr9|?7K˺@ `>+Ҙ: N]hs==-3P l+l'[L5h8ԉv {ʚ|UnCCKsƃh1tӸx>#]ix~8G߿hQgAi|eD>}*]_Ýj<n&j6J\BƎJg_~WLR6$}0U.+E=J\e&ж/>w[aSꏲ=I =*UV|].ځ!'ޙʽ­Xsp޿U^~s䡊OR\hx,Y| \w<+x CN$+EַV)l_fZ3ګyC⽒K+c0&$NމϬ!iWz=u,5jZ!E8FK`ulx=!$T}Xz^6Ɉ=~{H/y1j(j,)E9k LI % "o?F*L З^tP1^J3W_QR}};$(*i}aPC|%󿘿6o7Syo77}o77M^^{{/vt8yyU:h`g_{Ce s#l8ԉ:6ŃvTέu+3z$<&Ke/{йgi ?#x}N+NMq\ݹ7777h+1^Yٝ_h˄KK094d!8Ҫ4J7%п ENTǢpm#:(O"lLI;E[_yHuZВҝ#Jt;ACKfԭZ0FicP{iΗ(EU^>_< @)j>,HY^nfOmBLw V ^/ $Z:13fvL[E%eOr-0(,H&}3NWi ;ָ[lTwsAŖ=SR :_ƍppyܜ *M*tSg))"H%IRTN0 0ݸ_}SMʚ#I`|vf)Q¿fNkbI rd!> d'ȼs禃(jbJPaNc)$y,XŭkRi5i2I5ZC>_33 tcj$4||Nri'@NuMs_q8F3Շ:opdoGYG\[mVhF6⭎*à oVFNtS'IIt:0-Mllm6:0SVY5t'ig4No5UF62WC "U:Q[ӃruY%7dpa:5lvKFh Fpa 640ʻMFaFFFe\F9rL2F,L9o7MpmmX+c,lToj529FQr*pneXh:SToqcF˴Ãm܍ߔQ2Cio7ldtwN;GuXuo2vPdbXor9Շ# \\?y˔% @%P BP% A@c-bZaq.,c__BKeeoݺZ`m+ul޷SuonlE-eŋUe[[- uՊڶnelսnMuu [KZul1,I,"A$AADE A "E , D:SSs4hIx&%ߜ8KoSHjrV0V KnuNgTr6& F]Qķ7QXUi+T0tb-"*ҹ,Ubb+łYX,BX, ҢŽ8Q&E[-S?^?8goy3TԟrNDE@Y%CON4M^Zn'9Ye~Kmw Tz">Oop+j=҉UXmұ6K Ԛ-V3Y02 #C]q+(u j `|FЍSpu*`]shʴe^zSXvʰVCelUReaaf&ѬHj-aVђFUZ4Ms팤m\Tg!¸0j.VwwlEҟ4.wVUD99.$]UʡX:H$PUoIʰp8V%:*ڶr;U݊jbì6 L`2L'wNHI\dȕe10UvNnݩUWAC)`!f&FQYVz;:rBf6do.6]"E/nrtڜ ݭL( zK#R zduYSr˵.ub̝&zSUssssp*8UvͥmaVhca;ff,]W(?\d0d };uYMbBo OtQ]`g>pQ׮ f4\U9suT VJ )E| )iBڄj!N"wu6$"fJC;Wҍ(4݇ؔ hd Һ+]ċ%(>+Bfɹ8!+ Խ?/ùܥ챘fHG56__{%EF`{uȻF1znѶ ;f#s̕{^֯^?Rdz~i̬ʙIbk\{%u%Fպ]_0d,BC:X~'܂@vTsUbMq&:کLÃ|Y㱹87.!XTesBrȇ jX\mX#o8*˲Nʰ HAG٭a*jL(4&P;2Y+#j `+|,9T6ZVr㤍wbǎ?>k7e^5y)h.C Օ_9DY֎=^VeW0 ̘# 2dʲ2O C ;xa*`XwO8};٪zE|9{~RׅZU:Jj'dI^hq:J!3J8«Bd:~Tmp{nF~JuhiX0uԥ6ӯ$O'HI E;fW&s7Sns] sutjgƔ|aF a\H<'D<:QJ6'f̀tՄ!䩦DKYa>w$=Zt-ޞNF R=G}u}0m[Va?tI&[RrPm-􆅞T1TmΑ*%F{"tL Q`dt?)*ÊI}Gʎ4)NC?L'OԙfWXJ,+g) eAv/1먠bC1bbbX4Xb'&K!eY*̡L5d41,z4:f?`)ʶKgNU<_5{>;澳/ =Z鬬^ ^!yXJ ]UԢ7붢x4BGqIfks8E&y77_po~?`gMs<zɌbh0f+x^߾u}s~^daH ,*KI2???m ¶0cfP.UlҕqV  7Zþ+Jx'PS@"](VڬicubXǪܽr+oL84i.~: CYc}}TO}m|x~bP9_'Ec݋ڮw4c# L.%E;+ QLg02_me m<돘bQɬ˷^<OK)q񞯔J"'WXު)x /Bߑ1n=Z:SvOVRQL0K#VY a^EM *\ԎC@m_O{9ze'ɥö222Ka4@ {n 9R"Ϗ̂^4U{-lz9σԤU+U:i.! : ""-d/%]) <6װ8އfòEqUep3Iqim#a& N:ݠkr|\`dYVC e,.7_M56E*>~)Q"*?/RQ6$O+;{rD{J~j* }wwK|,J]B½>>C7 1vQCGbKrRGhLEY !o8GWo=tKިV(w=bj`KBt5s;i_1ݕQ]A+FOw*|Zv:?p1&e_a\|>{m\x:x$˗CrVls\fS7ht|XttxTau8`M?qEń7O)HXJXOktz7Ɓv*C%[ 8J=['BԕxxN7(%a]i( -Nc xI65=;&&0z]+ & T^>a ˥K(,3B0 }`歪^I^=S. J8L@@|PH|y9څS1+B Ҥuw #+|a檉9Dܟur_͓O>]gb9YR+D/÷BHҘcKFR34b >idy6 .P5]{թ~=_v<䤐[}_GZfg?3ڭ_{/~|0)HDBCo1 @ *[[h @ բIY D{53ɮvT8[}>Qʕӏծ2<=Rp펇ポ}`{272h-UaZ+2*eXf3GQXbYVVFZU3Ċ"/>Ҟ|C neWݽ(9 sϟ";Z :t(:F3Q\uXG,5ISy龁x}^UWy̏QEAE oЀY:5? A1 *^mG(L2ګ_/;[ * px<ݾ]U$v~~{'~IrIalhmFT{nrOS?*KcJP_V+0OBZ`b1f3}tlw%d9w?cݞj ~Nm@}eJz S4O12vj9Tp`i'k!Z,݁P w@)W*.u㙍:Tm[j9SqsG/\,GXbs^%̀I,ߠzǭwRi RAѶ"Jۜ}D/k?G jY0[~7sA3RvzMŌOd}S R# H}ߝϷ0ƹN6ƶ>g9X10I5N%8)ש1DQ=ffEjLF#UJI;<) 5,#/W3.w/C SS@(hzH i}jo}!S֦ a Ka ~؞{!hdF|Lup:zud$'w&!J QF{*}dz]3*wғ\/ Je}/:8;&ojbN[|>uxhSS,ljJw^ίAߏIu'XL'0{5%>_C:JP^$G~7Er*{T>Ӻ)K9ϖJ/wcS;ӈDޫ*&*/1[b]m?Gx?>X,  Z/1QWR^a;ʲgd9bG!Ŗd8\^yeyV^FUеKDqrp*_;ZvJwIr⚩ݗeb= = QSeoQ(ʌXw"Eѻ/|^ԼiO:zlbcb ̱bf2a*jwqcW,Ŝ4IDamZQ׬gon%8ppؚ^'bGVO-L]ܴ/_%Wuw>BS7$'&K d0eKÃ(_WAFh\ot^oC#*\X"..^@wfm% go2͘c7fU Xx(my &Qp#:CF%S-ܢ8}l5 徇yN%>!~uNz+.` @ LJ'aCjҢe}{P@|,hA !s-뗲4CA0>CJ+_l1x vj3>(%~HB48+]} "T1PC#_,e_'rO\LhKϩ nH0;/:E՞S^RQ=VAjjZV;_9#QTf ii@HB$ag ]fͿYN9uEIaZy75-q) =F#v]]¥f! 兘']I0j M,QCH[wojx:фu14ŵmS?3pSC4)" G$Q_4̆tX$\bdCC;NI%e\bsʦW)ЌTʲ'~ *'HZǢ̊,²0̙P*O6M=n#x,P \(&^4٠z I~,I؍D7}'Yϣ*⮴:/P%}{G b3 Hto~V _oT[';T0Ub[e*]2RZQ)vV:. F9ƖSzǡl޻UfbF|~^}\қ *pǗޥe^^e^ PNlt$R[ڀ``!B}|oR7Y_G ן/H|M;%2&^s ݘ@#o ĚF=rr)'7pgF%]BhcrNPf/=!iJ*~u= ov/4 7W>O3l3[$ZrZM$,j CW=}"i,UZn i֢oF+^CUpWrG/>9#z_ KѱC#h6660 ͍67͍h7h4lllhķ766mmnnnmMQ5FlѴa Fq"rO?} *6$pQzvw|pǒwN\{~k?|.-DZ[Qq[qZq1FGMn&8+֍X[HxPWFy<5^zSQFTY 2)Zr{ .)ln/!&C£ eUJ3A 0NJqi-W@ Nim=N#oMRKsq)t=8ªI_z>w-Y7'F @@v2ƪQT^@m$yugѮX=ubuȓ`~Iɻ\j /;3>g')ȕi-2LIedeKjΝz `gr7QN4j5I( %tHFH8[m~3Y3鄌[ў$Đ)%=HtVjj$h4z8!9jT|2rbXU|VW^eZ% J3ڟ>a~~WWV^+>R;Ŏc1jNNJZ%ڭ𙑖 ̞XG yW!⾫w²cSaKJV[(|,Gq"^t(?~J0DRk|3O6n/Sm~/5qea\ /X͉}پÛhʊCj'dU')bRooU6C|hVX#&+2QbRQ 04p&2SPi6$WC{fwF'4dafc9}XzFʷ1&}.\GWzp ݑS:Q`cDV'nυ?ew'۵#q>hŶz8nWMG nŠAjb{>81_EV?cns[cg-{GҳT9tf_Pr)j'I$ƠB/[i'uݴ[dk1?+m/iewɯ0-xɚ_"rp9{%?,{Lc(Ȯ+bԪ|}F^˾veUD'xA;롪<[%wz: yɌǑ{ïlVFa]tȪT8&AN9yQaԮ[Vw~cpu<ߦGwכ4WAM!mM&Ztmf-M[iBɣ#j FUeDh 0## # *İ *"-,^^c6SWB.gC9e%zu5Qun]N.\8~^e:$z+A9KusʉS/ꪺNzmL ߁]vq(PC:h~1Ny6%fgk|`qT!I_  pP6̍am M``igO?J,Q?Nqtެ;~S9u]Ww9O;;cɆHz]B Rޙ!,e*k6dĮA䐾$bjXQLES]!;Cp}t7Gn {_ypa0`ߙ#ÀDN&"?w+h  /<帖kftHԲlTu4kFAgIO^YĤ^4[^6`d8v#7gWZr,@eowΑnPݛ&r0 @`:7jY I8j37CZW:׳xo|KHXiYJVf!8TF tqm`^Eb?=K}ǵ^zdQ±AAM,UbF*R(B !E Ѝƴϙ?QmʮgBeV22 `˸ѭR54;?sګ. [K|[ KpB{>{jj W[[CAhB6}ıLK(####$}QUdaXhޥLO !V17 !zᅼzᅼzᅼs-겜IeabYG*5Wj `+I~uiZC`hPҲS+uZT_3t 2mPoCU #rjGԠ|g/]ŋ pj*v8 p+ }$֝wV..!1Y ~rҾ65t: T?/=~M:{!xw|x .?jė;O-ԟe y-Ur/Ɔ=:i@3W(%?bzV+M=_Yw *6M,^Tp޸{xi{СJ{[-ԪѐL [L/K%+i+ yHU\.uU"vbVhSSߥ޾\bYZZՆbXC<ʉY)+RЁ`&$;]8R\dǧǑ*4SܼO_!K9\K=We/'ֺ=v0߶<;EO}YZLP}i>u̱h,+eB3#~Z aU-L\S ʪV@1m VLخ>|' ë~KgnG>|;;UUV`w֝ ! :y"1NĖJMD`%m9r܇}ٷֲ6r5dё((fnebSjw73ҳl%LɃ1TgMN]rm1&Ĕ"'-Y4.?Szv*I '@>cpH ЄīDAF !6<OkI?#?|/yHcd³)>äk= 0HDD/T`h`q yR#yQa+UMUCCCaFʴ:T7Ftpa%Xa&a2eYVn1R$Ē@xC {cqͼ'N8ADʯbS0,O *__?N#W{PvU`eYeYe8087V+%E1i穉m E[zTx%K㲻u|ѵOI_g*]b.w]dF+2de.UAWh+zxV]krJ¡~v8w))jad,߬0&Wz*6*eXf3aaS÷Otabvdǯ#$eF,,XLU`22daXdV)mRtFwFeO+Ƨ(K Y"$IG#ea֌eb`̥zN 4# J N{WWa5K`6GRUĖJMMFRJ\o# Mf eMN3im\X-Z%x벣W|m՟ˆAz}SqGywW3Z|O;TCJX= dvFnU,i_+YU*xZzեXpV;svV% V ][+ql_^yHJJ.ul7R|ۇ<=Q*X- w/WVd̦dt׷92 ?{W|nǗEOum{}Y q5`r5;}X)Hf=U YĀ,"+(Etco"h0A K0;+le4Z/,ՅfYՖYĶesMspq:˗|'P84J}3v#p(;C7i$BINN 1VYJ5bWkoo%&q䳋2rl+h?ϟ#)Y,Vj3u ˟\]:Fb]EoJiƬXٮ\%?_EStwv԰^wɩjj ?t|vm#zf0jqzݺVoqHﯲ4umb;`)! t'a%ϐW9)ʅD6ǫa&% "C((nq͙Wa8m4(4ƨb&?/4iWg\}we72J  ahuT訴I'4wrVK`jT-e/Y>.1;0WK P6niJSkI9}3DQeQ]ij4pWhKzKڗ$2.fHWS%FlV󀺉?}:w}yk.:kb`0bdaaba~N^rEB`(a{2v{?-_/ORFF*ɉMK&XaL 2mEeJ1,U+cJ} 땽=Xb26u}ca][ڶʳ zjZգUqWJfXI-GP[HXpbaUЧ%3W=;e켔uد F "n6 P ~R1{82/̭$H?y Ӥ[$->7O)v Z>~0dL.c\3;@0&C1@ꢎ9DwU~7 q v =zk.|/9pFit鰰`{ 5&,U%7ߪT=[6 =y3%*PhO3ͫ+X+xֿ8),Ԝ]!˝.f8ksK:T1b;0x7 R L(`n0b8K>dUoap 2}ʰ]:*⾲lM 3DY# W.>pG?JC`{|~Z>@ GJo>ϗR=+޻W/lPЏ0?9S2۠7VfU[;$ bJ,13fV@@Ondx~E!}aj+W}< DI0Ԓ?z-Ōc>/*_I$~=_k)qT=aO[idRbLJpJ]&0IH>I)nsx}}ӯoSRyФbW7_9ŕJ'BL Edmkg vIߤeT kNLM<8^3ռ^3Z(ehDl`.[n+Տh_u ,zNn JL ItN[ܷU)"TW#h}Z[m$P"H!$H`ͷy"齘ɝɉ y_[؛9dn*cxզI(JCbx,ܰ;R ^=Q;eb4W6~>-Vs.q/y7n#R5Fj}^dçOrU:&$f֒P:7(4K yTnj'ǦRҎǎ%ls3կOזi6E"s;DoޙNayɮy,@~wwuoGا _5f7 hwi]VjF2_Z\c DN.75DqLO{dP7tsFtWw{l`PI,n.,I!U4 0Z?e*5Oxr_f+8)WMQ J(v%A>oݠѻ5 j+(uKK;jAi5e *Jax;p( @4qWCWӠU;aQe~L(wߺ6+w&8(嵽,r=؎]ѓvmx.s;  Gy&\/3[ Ij)ogTl# 1 o)J-&C9Nhq pWB)2CCN\ymj.)!0Fiz4ddP75qp7)&凋w5]Ow#ԋ}a]J;qƆ_$'H0Uom]4 LP} [4gbXƺ>9'>[?x.V0L~4>W11ofrݒN߈ rU,ȀjyQoK|@ncGD4z*y_7}w> ~7Vk#}ËK7+OW!C㪡[V5h<@E bT^N÷_} fw5JU[ [lS8AIIvxo;Gl^+4hE eBԖ$ 5~}sqIBK7O'vM*$6n\Q g:.k^bkJ&CZ CEH4 #KvJW):o囑jAQSeƒdxlNyP*,Ci~")? yCne4@0=l&uj*MD8S{)XwsWW#GYڏ7(a:P5A/[6ΑZ]Dχi2u-ϵձk5ΤG]}M|TJ W)Hi2#Ly9'1(X߾aej)9|,Y e4%MXVVoYZuViPZ u؎1Z͚!H~}C/£ntG7Oe^V2:]b;S̺evBBM (78OQFTCxڮz \ g-n_K/7ټrW_[Pydn6}N" l7k1U).n@ um:ll9U6DF@?!ݹznq>df'؞eo>B@XO;JN~%bjq` eʌMZǑ-C˽ Y@1Z3Q!PE.C)MhI?vvU0/n!b߆[:KPd5Y16>~{+DHW^c]ޡk0Mji(p"͔9_cI݀R [NKҖ#utsN?1| zpp'arg>9'Zlk̆YM_+\E 3 Fa|:6@2*ZLYe 6[q_"08hT+dO.V NN_GqO %zϧȝkG|,d_%ɄS Z?Xq6WC9j?n3:I'.d/պdAU+f_|M'cxe'q%O7S|r8XN *3eI3f%$ejĩa1hUZa+Zs\j~e k*\"öRUg`tyM͚"wv]7T;$MR6Q|ZvrFb|`bj  DMk&@1p Qg_}& xl3㈅?EE?G.t#_6rc9{=⤽6MXy||TTw n}|b tG8Z DiBtn M{6ǞY!Lk:C(joe18:+w5j/Beri_F˚QC? FY1߲:}+"#Gpnʝn@n#+!\?_2v:XLsre9' v,8\rʷtd?̷x#ǧ`|]sЉ(E 3Pih(4\*H9KBއ}rO We8n;JȰz>+̳-zӻq\ܸ,<ҏ#sw^tM)Z"V֨QD6'8k玖=!Yc=݁9uVa]#sI4c ɧ}4me nv^h5P^H}f64A!sw_<++KF> FjRҍ62@};4ՏwҚ쎑{ bKrPwkWًm`q4"njB4٥V..4 :rxA~J =G{+UM؂<;sS-]oy3юBEF_Kڇ sZ`ߌy- `erlhN_LOgJZ\ W?Mv*M(ms۟l2wh#.#-ZNl/Z1x̘2qpq\.Q"H;J&+ݜeO L1ZdC=͚cx!`5>2NC@~g7aen;S} n:"巁[Bx \kZ-KN^ƑyU~:!sTi$?MU(?g0>B2gUx3i2*08}]^$(!<ɹ0VkY_Un)kYI7&?kc}I)Nlmfg;+7nn3 YuyL60ysqvŷE(?ѩv'!ANAgw<:C4.rDZ3b|D`^R/ݥ!s#q䑤J-)hc_})s0P >H$Nf2cL.ѐcB5md*XWXӸge? sk󪟅m 8s)ȭn +#NLpHmc3, 6y -gSt4X&9_Y14l_d&Ѥ9 5e~HϾGJMDyx}H }/zޕQ.O?ތ )@ I`tꡤT<m%w)G(]~~i+O" *IwesOPwaᐇHS1Ox|Sҍ/2s- w]1;G̻&}v,W?9#kw؄< :G2,ImĸR5meIN/jj$=[G, GDʥR9co8RI˭BMP5#wb@qkG,~/ ;Aܕ˚80X|R-i' !䟴E) `Sm]cQ Dh -ŧ6su|M<_UzMʀ5OQ jK%?᷊VeZxIF$21d$\Ԣ] ƶ OJ9V(~mm 8WG\D~-<~VIu?-]xwނgòʞ,/ 2(5yer;BEʼn$ Qr?3&L~ϟqzq[n_94"v/ʩ|%GƄyPMi}!aZw d=qz%tFI'aL0# Y"|r{![.=mf'|HkdVn0/5i"]~K3ǐeB4km-Dd핦ygJRuggdO~R9f³6bP12QQ%zW\3F(]х*WRp, H0 eۺX6;Lʝ3V}B@T%/3ar{/U-h#$0(6wI%8^g5;KЊB _Je}! /D{؂h,w s5R4o/|&YY~F'9SB݁iحްh0^..'{½+oXSɹ#9Gt> ox|'ս=қ*M$r} ,LS7_ $wIB.~peKxg>L HOA; dMѷ*s2][rΊ?3>7=$nA$؅JijD%M>9:E`Ÿ7*<=zIBʌO,JSVϮT9 0ڄN۫=q6T` iNrGuc'h.]ui5%ʽJhx>>K!HFm 7oԿ(~K eG$ ɔ׺• G~lG2tL79 ||jdK')Z$y~RGK袡iwNSc#7A~Bk"?ۛղO<9r 5Q[Nc d^E[njԹxt}viDxf,+NȷbO%h%1BqWk \oжcz B r^ᙘ˪A)Xlދ/fr;VSk8xi=T\S&J)y>9h$bg@[^U*X$@|0N,R21F&THQBHÓO^ 9Fͻ/,ήO`gǏPآ<C,4ĽIʒ>&O/[HP~~>VY3'>Be MapG7_36&@w&J5 VxL1 31Sq,~cc3.Os7:: V0SUVTȊ_YbB^#;C< )v$Z zS c+d HQA`5ӏĄzWcY) ќ;9eE>~q^ܿ.UF蓽TS_xgOׇlՐ ӂt]2oB21htQ.cRde?WJ䱓]ӺnYfl/YCVE1.Y_X=pa"4Xǟ?uZh*9 K*u8jh˨DbSXbIʏ\Jy0MX|caNSҗ@"VDH7XvA-)|vTO:!G"qn?uoW׷#=ij~Jj:jZy2%:BڪSqD.K;TUNCWo Նl>R!z.d{GJ,Zsz'2f>^;]"6Pyϡ"Fxp^)rOTHLD^93 8N2P9^*m5ezp"9nrK6C;Zd D3> 7̨P1j.DCE5K3AJqϜ@_Ph++jJ/!5-SYSo D#QXcbh_9QKan)g,bBk Y}=mOFy{ ?艈suh X"I*)zH\=BГq,QqA0UBC>Is|H 𢽊Zp&Wn=J0);13}6+p]=Qh^ FW S@8 R&셶6l%h'Xfڮ ^iY)w#l6lA}@6<'W-t9x|;-LL޴PY C51MDݏڅERʬ m m3c]>GJ ь0 ӚY?gO뤝f;Сu-²4bP>@}VP35>Dԕlg1 vy""+z%iD֪&S+$Voƿ8J"\650 îLoL%i`3U)*Һ#PDZrPK|类C/֟7[ԅN /%^ARDIFwZNR-X0|HjQ8rOE/@Q)Z^#iE \AS9J'ݬ$1ޣ?@~G͚,v8(`Y̳-6 9x);|zC4 Ӫ'34 vo@\fv>g$>̂Pͤ![x$ A_EAL}4:5_%5(qB1;+ K%[l@|u2Y?Z#Zɹ- 6:ileɩ/e;X:B x~S9>MOvc;PHPGz;p0~N)P8@-m^i'y\tqze?tIb( I;~>!ؕ픕C*ј[rm񅻅J/6R݉-ՖR-k6ހ7v߶XaDAzڝpx햗Ƨo!Xm phy>#Ԅ!7su+MA ELUxDLtZ&_G($pR_=mE`GC tD 9zYROmfOgd \wqf}ˮWkp{n2G-Z-Z>#8L[%\ejvZH=_'j﷬C$?J k:#}+{Z?8p{1m/;nk4Гg:U)ICdUZ39g/Z4`yJ?5*~f4 v[?g9Ks3=kv붵hoNi qxUtl'aGEr>?3d$KJ~ ]^C ]^̪^ԺE9,II/.1U6YnunI{3R\,8@cai*+|~=)j Ėۆ!!ǺwˆսF_U~dLOPw"/In$#zgۋh9_ȯzndX^/q!h5'Y-\|%sٷ(d(UV*pUd#Wb0~*1,DԳHbLJݤVcfgowQ׫_|57x(5 Tk&ZlRusLOĻ.~Cl]i񮶣g>'a ы3bvϮY* }AhN̙m|]|#,{Ӎ\l F芒@ ]UBw)jY{=Cܿ\>Y+42]AK- <5[] ;I(yw9a yJ4i)AOfzAk5sXښ`*bCҧXQr: .c},DZ(H8lNi5adzDJM +^/<0;)bO: o&rtO!)tW1Wҙc;xϝT@,΄kܝSt{-R@JOÈIL+d]BoWKXE0m&j쒆ӹecYֲxK}08|)>C 1J&0&݊pbYyY3/2۶ $R:}hV~0)#X ʶY=&I\#iyݼm/pVs_l $&Ua&.$8ɨLa '.g𾮫umatKWl5eɓ8zW]q^ (rкhf{IT\J9U3\:|0ƸuP9[ɽFcr%q[őm1ʨ饻4')l8ѫVl҉ HV8IW+fز;$z~^64^ MD|X~EmcpkS;5cJ8zw&CQ.F-J >q^=2˂5s(H??UZ5>c)-jt egy1ԋM]0sƎ1A ɦԪr]+Ǧ1WX'1zcޜc<㋭Q\xë [{=Ae: 8$tDѺ e>waM{^h+/d>kJ1ٝYcosGgJbuK%%Oiæ6v.J%`Q{RGNՏv%-߾@-s>wc&\JɊ#GQ8z]u|M|ЉAj~$SE:?i3F'U3^ޤRk4eVOGW$JI]@saŐ~D+~_rPM/k:{^;CΟLC+{F]D$w-*HBݬGc%Lv()#UQW!]k$yh%1/ H{&:)J;)ۘ՘,Cұ d3U9&Ik4.N)_c_cy+nA~mQ[W>Aʪ;CP\}jַ^Ox6R SG \sBD#כ;f$(_yse8E*y (wOMš8+Kmg\u$ܯLo٬␥pː 1d^jo],l$ yCgXe7sMgP Kt,O/z:*d|:"&Ei;PH!H֒?]Vhp1r*7~XO,yr^44, { ex#Kh(*Mn]4Yd;vX{Q#1Π+3ycYq,m<1,A4e+;A1 >t&ȚHcUmZMø@7 Z&|Ֆs7A&e%Cc/Te{Dkm-,B LY:ƝCԀ֌~yzXa70!5@1Hsom*-bDʐ+|cv%ҡ+KIOl4Ѱl2dH"]m3, TG4ʝ*.9j'Y;ωfr~9auP.ə:yP˜ :6R Œ= L'`;4<9C æqj HroBWPqԿ{(^b^"W;Ol|1Eۈ+/`'O̦qa9 Gэfy||*J6fcE4O]yQ؅\ *鱃2Ǵ$@ڶr|DAU3RcݩLU+&GW"cGjoN#G ze Y!VS䣳`v\<.4Daa[f8jJ4͒FA<#]R-;}#b:>sԶEUv;{dVS%t_H40հ$8Fa{T8:l9% sX_Rns |~ִR4̸?Rk̟P}:b++[|0{;9$ZȌRi6Pu{l"U +3O M:&2bިSyձFFŋRCOc@"Ig0N!n/E`>*#F<ށ.Ğ]x$0#=:M%_E\?6-"|8wOA)q+? j'wz>i<'( ;kEwS=1ڱ#J@U(K,sX?o^e#E𜆭o_ \q9=| Cf:owsA*u}ɚ$ _b(<sGpf|"koD죿( ; &g~{>+gsԩ,hhg@qAh;N2hMlf '\H|cojq]>q`ސˉ%FI/Er|aq8!SJ(5>g'#Chi !yU$Uls'Nd2DxI$gTFҪ;7츿TMB6/~Tftx@7J߲%#qo6ᙷB@ *JAo puk=PqzI}봮A:-;)|Xu6U=KC<[0i˽ᥦ3Æ?nAN4r l:@M sfΔ;1:'ofJo-Ϊ(M++{ PIi,_ޯ'QI\#@3Dʱg0GvX"wI'Gɼ֬xH:imMjZcw}_0:nK*t,yZ_IGx/ ny'yֿn^؀=9pAKY3>lR# Yķ _(*ϩST~9-/`'f3#h LL!,e,3N(liZ' ėQ窷=Hx2Qԏ.1v9Fy+o'u:"Nꏫ(9]6jG|qnq_vO>;M & ֪򉬐! 5 EDmeOcw?ECI\M#0G|&}i  8TGw 14>^T V+tIYln`u׍SzzR1;E ihXKuZAacy9mstЂ{ 2be -ssbWnp}a *=A*l@q܀ N)~$&@aGh*PZ}8Ƴ:e xKB8>l|*;3\?ul_lT7>o1%VI$>PM  <-ՖzX&f!Nl?}p>grt 5A]#$oGA 1xcd;vI nȈPD*O^AMs""Ԇ1B}w)hȧCd;>F"|h ~5pa㻳Ue_@N/#ȇ٢<1E[*jHiIz9[&OQ\,7F0DaQzq+p$ό.=r~&ROc.P£!"~ VQWEuXG- (SC\' vX<ՑyI]͗!+bWxa N%} sOm0k}>Y &A/3fǬ $M7pl"$F_2m5Ry֊=27@6,Z'׏XkGm[*㋣ _7 W]yoX ۾$hI0ubWnp_S gr;p:: 1ܫ]{|$J4 ELǠtMlLPaG0bT? {keI<$x%&}x98#!ws+f_ppU&ҽ/,.޾e>hn}FM>lCW NN56%?81*b;$3rϣB  ! \274Iuw_ -Pm+F )74՝a3K ZDgb%k5< HO̐1ĭ'Y=Y)'< ہʹd1zG`xK1 Ch:yb}-uDd1),>S)ޢaNsFNӛؑh?ό'$OMy -kAuqI5V&?h6P8O,3ET!tcZ M?'TP؍W]Ǽ\@HNN le>59ߙ1V:TɣE rW%s4J.BqsOr!?' JUa ,%\_2+P)O̔)"PԄB0u`t[y,s lqL$ 0l-qvZdV+겣>M%\sc(}/‡VrrezB `ius?z1Cq V#sfDw8:il@ MdA?H Q?Aޗg"K꤅]O >WGܦ6\h Th8lf;.| e=q5%2..41#f%1=B[;}xu#J Iz 0ݞ.P|PXoX5^$C!{$pavʅOy/5$f*c. <"- qGQ'R@ p]b[EW#ܬnr+ |p΋+h?79xfmD*P+kLNQugeC|>ٔl/~ۍVKK%c>;4&=ژʡD@ D -&l;IBӁp?~$,dAUBG 0oJm&C5FG3.Iq:vSԘyͰMټ Ө2=g뒶8,,u*ѽe1 &lqQC/2.yߟ}AsdpIQ4•bcs~5V|ݜbij=ѷt|ycsGO s =EJ&(qk h,2FBlA7chatX|Gi(X XqCs, .ECCbP̴ &SV`dWxla4ohnu=G=co_g{ӓ,+|954blHRf2WvDYwE>Pl b/{\׳̉IxDk29&k SG?^s^+RoS1e ~GtR;7dWOեw`}˔Cx99V->M;=:xn{|:p=wzXo~kC11AK/kZ?9e >@z8Pzw7).Hײd0 c8(.7PfaꅵAKi7:yI[^+r+!yNo8Q0B^7 YIϪ[c4L1wgyxXw H09A>,r7׾Ti8CA>Ͽ?x=W}- A_ݿ; PAFsU>6Yszd~ O3`%1!!D/7z"kЮG旣4'ؾl^݀X>v#aX}L+;a4.̓ ަ?ꅙަ{i~pGo$ E&AD?Ť ߷t/;ՔH Ā^sK %td6e 9'EsB0+rQa• [Ycj[/+bbA5D._p  N1$7q QPY<΃5h%CL:1" g˻tר<ЖêF(ϱ\@dN'[l>(腜 #.SEdϟwMxeKr8CѷE%tr @0J.n?Z-U>Ic an mİPژ,۞t-ݭ |IQ$!a%{ T>G"ߪ2?˼xҵP;c_kbrpH>!t,`N@CϭAa2}FNQ]Az{Ƽx}3ep gqG|l}f-6܏&ޘX́6.DpYaɱ$6gh'YtМO.1Jז\@K}StN_`HJӳ7sgg~<`/q+hlQ &{.ꯪM-YL¢WT7FK l3b#o2g5u2$|:bqK)@SR2a), r48R--BG ]f! 9| yטGӤ%9{#>r0c.k0S"#,/?s7U95en KnH_4~@S/QsK"=Im Sќ˘Pzc+W¶) DS; ")bVď OαſCBBB 7kT܂HV-9R"Wx$dsb0( ͩ(drJkL%^73~猗쉯W!k8= NiϬ`}M%X4@$9|{+!s6֋ر?6X;A 4+ɉ!C"0uns-/g@WTYDvC sE+pC("aGIAd4*0U?;@b hRN;{"92QƔo`4N>&7sL8*ZCRx|!U[uG5l$.&=b o<߾kBHHF u H )2LJZbL4VXe|l &ĿYk*޳}Td1SZl6Sf(K"I™`b`af)Y, )aa,dLdUo̳)5#f#,Uif#1Pj0c$eSlL 0ċ  ˉ9{^\uY=r'K{2P`w@Y+w[bNUc^*NA@\GЬM ifX7tupQ(?b/D 1 (L$܊OJs x-b&PMΟ` `JnͿ|$p׈Å2 kL'_[J|7_;)q"LYa4zGp@X'?^w<̻]:"`2'4򾩉3w>kȄy' #~l[v4tU}*ELJ"~JprN NWJ@ ]hIP$F(a +n(nt`O7zY1/ʕ&MRf׬jfbH .rځps)Ca~ 9ٴWu b`؜+Mze 6eƿhN3Rx2Msԋb,ȐEI ӿ-`XGTC lzm>^}2ޮhgh,I. إP ·v~,2*hJ$ܧ5-oNڂ"OoҬlCe[`E[4ergg3gv а9mKUN.b8R-GJ 6jߋ/\u##\y]k)}ܤƣ˄TkD1$UZb8|$H sH׌1~,B5} O\$tsځ2F@d-ʸ-]\/y,)Ч|wʚCD1B"hfc &D쉨>fdN\D|eb#z7 ~7qkH! 1$ N@an;Dd;Oj>(ǿ١40 $C6u{hыXr5TwdPQ |t#hw״EQ!+2ƪce,/96LIx9|ZrY+;ϐ!2wK~Xbm`B8dcITwGI.2i`AN׮GƧSˀV-M^Zr,Y?~fgCV71ԇ"2 /kdMe"t :}/ӻ.d[Dlڬ*dZVF}OjVG1 \JUTRa+KXi{)[47fiN7yXkM0DMٻz[{ܼE @tD9ӈtր|ު}:kEnrͻj#B$b%uwi-qۯܱaݳ}>֛lfmT-M}gwn_}MWwu>6o׽uݽ}q{׮sonwwoɬVw4] J E C6POnn`tPhEN=޸4:KAJW:gGf[(.m}1_{P h5q'jJ :V0*zszv{ۗ>ٻo^-ܻmP( QUeAB>B& UkkS:*R@`4dF&dLCM 2&&@dA&hFLh D M44dM4hh@dєAbiT&&LSAH3IS!4i1 leOzd0ЏSh2ɤ~M2m L!Dz"D 1*OMOS~O)?FzS!ОCCOP=@4iQ4h4Ѡ SAjQMRBFA0!7Ҟj5lOI'z6S4i@OP4m@hR!4dM2i51O&&LSm'imOj~hM2= m4M6A4`5LM4h=L!驄tD X9d%IBJc0EPxG^ZT6VYmcv_q,S q&WROn H(DA@dý42,'M "U!bR"!b" "QX "$ }A&Lc#h,FTʝ4^OӦZ :G\)3xܩóiKEeR2a,M.YUFV BIY@ FdG#H'fI} ) YcX2ZlUDB1a]Kj}!+.y﬒ʣ!1+(c Hb E퐮 ĝRfXLbD>*g,L¨iS{cU&Z)SI>9Nl롱MlV*DMKދלtKyUZ4CL^:"w}Rs"^bm /ESE./_^H2WO)|^b1y@,de%N~J^/Z(0Ww.݁OEKp藢]~iןQE软;/6^N/:WȱB;⟰)JzvKڱEONw'`OgE.4̛חoO$ʓ6NE7{6lpntz_Œhˬq,%b3Gph\P )$2)" &0eDo5hT,Hm l=D^8bQ`%,u,Թd 3K{^_~ޟb_ys?xu(09Kע'T^MO,זK |zquJ:$o-h*)<n'j4̓S;w]"Ljx{-:U0~ahc Š0M2%X#A]iHUKHHO&S'ì=T)>>1>2)2"d* z<6ft`"3F^j^y>K}[^1cL|)yhn<(^œӷNi駫aN^i؝E6N]̧!uUpS}nE[4(8+jݟય%_$-\Nj]trq/.N7ziˣ~<ڋݝWekSzdj]u—!! Gncz᥃(([7Jn҈^BmvуSCIZ<@IU7{Rq #mCTz)0h0x"1ICCaAԜdSZ}O\S0oHA4*"aW2T#kn$/ rxjxAvD~%= yS>xy<[uّTGfQfE΋s AsGPyӍaHv_/-pW:7WT[PrSkrQs)nLarŢދ#@W8ҏ{ݞ /-voSPq p." _Pj/ (9EE嗗\ uKʗ[ wu'jr|N a\iW]?vdwzx˲[𗚋-]sȸw}Mqoo{<>әM" 1c 5BA$2#I++IɌ_nS.t)O1N˄2+'9)]O&R=` 1LS,L;tSd/ҖR`tDMS:x9HL,ĘS7(:* `,xt - DA *TXa1,0 U$S$0> (Q1ɆjXd>DD 0% "GSL H: H#ԏb:U*&i2Z5f6\ʔ6b#Q&SNTdhр|_hMPaJ(UZﰫ[IYqxȝʼIʃ0N+c4ZbbcL1cLa.2|n]zrF-(b IBaW<ϋ|MRH3ǭIqh醷e=sM:Cw 9og|aNJCLS]rdN9=PHx;ٳ&ÖvI)JGA0R&9f,)8/Q ^&"`bU.`dΙqpNrGo. Ww}#- PANW\eoqPۅmܼK B:a´a&nMym۱#:]Fښٱ6ٱ6ٱ6ٱ9 58*H# Hɶj7Fl4u뷻3Pm.]9 d*:9Ev>_##1"27lTҕs %۝-ca;qΦ+`ٓjޤ̮}TEZitԔJنSDAb aH=:aUQU]޽UUUc73da'`&h+ 5[WmpQ2Y,R,dJ^e `@RR,@RR#y2 e 0%x51USDDDDDDb# ȝпݻ9fȋU\1쇱52,csF;M,z} 4b tojSYŠwݛCkt@yiӌ5*sFOmTd`5jf(%AWpċ۶OOҝTqh?O^k:jwE3QkszsCY oiZrbQ|=Ĭ$%}$ah dBa2%(ee`ˌ[h=2Yf&\-8UY|+|XH¸J$ '% 4FU ehdo[d,C (bMH,qGS(j,kAbkP,1[h-E52&4ʤDdTĶMQ&Se&i,&QpQ Q Eږ j ,2ֱ-YT=qeXRYK*868 1XU@SB(R!8hxxу$ÄK J `a -q6&LG;fc`xFE h=edrSRbEI&d1*dDLE)!`T"YAKT{d7(?JUJKXIz0)T(GOlt}t>'тHB{?'AnO"eò]z{'zUa|hȧŽLl*>{ԗgތk{+`~2ݚK}AO[JM`HG>>fea2 Ub J)"fK *Lf (]x /~[wh^;{h`B?f$ DAz^O+bL u8|/7 3tTޅP4vd\6BZi[d; 4ƆO?u}gyU ^bdIA& AVLd)N)BI!d3bd)UFK$$!Rb F2L 3hFX $BA Q$B %%%! !%SeIA HX HRIL0dF2Dņ#1d1CD%DDTeES)eAC #,YKA R *R"0֕焪}ud'LEk!9=Y1C:\I`A!^ I2=L|.ǁ'J!XHZPm`pKǾ*% R1Ccyh$ C_*OoP>T_ ) B]f "a$ $<~鬮Oܻ2dXK7'=IAz s-# IS&H$$@&!r\ gzA!M)CADS$MB "A(BԆ`#2I*Y@"Ll'DDCO ھRd EXCΊ "1Ut!(@$4K(S،("H"R$!JRd"  A B%)DB+Y+"d!(iJX(ESè;K!|_?SNXU-*8>&d1J$TP=dPAh^>t0tZҟB? ёZd±S r֨&EJ&%, PS$1VL2", 2JR*P0a) V,W~UJl8ՊRԖ `1,:qT*ʦ@P )G,*%bQ Qb#ÄSţ~-N#`Fdt ۶bǃ9 B|mQEM]e"N-.Y‘ yPrSEFGrzz|5*h]'(4,92Q=@SDHEaLL)LLUV*AaP{8p旺 Q{M|W XJʿ WJ0=-) )褗 #JU'uzVYEO1-fCw}UHm*h f$W-: Tv4cnsb' y{תUTOAފ*i! AaAN30A&INOw燶*pM $u4С}m}Z(}_J<_s{@u+n-aJh0ĩBLRϻjP`+Ajy&de(2ԭPb! (C1a(D[ل e,zBVi=zRz+9<)qt,I‰(F _n8Q5Rn;n~/ͲͤCW$!,Qf?Mxgț) S brAH{ZGM/5]uS 2972j4h%m3sSfQ5annhYe嗁P1aaL!ܫdk( kTfpVVSjgi jm?9W}N94itHuZY.Ԝ"XY&QN# 554uNUm 2S dI Ň܌500$, l\X`@D485P2 Z2a@V`\5ES ޙTaa#'v00!FqtdIchbZ8K'QѪL2ı,"abL+$XS+"[ͣ єeS%K*›:48!L֪iiiCU1 @ C C 0 0 `a! 1*0nlnv%hUhʕKx QF\ZLabŅ4!$u%AHj ၅1ebXc 5\885PÈ"aCa0 dB"ȧdɶ72ބt2 n@`eWq5D<*YC`+b. : _.[iIųci,ZecO+7cQnZ\K c.7ՁyW23Zi4BCB:@Oy1۲- rd167y wPRb:R7 jh* U,iFP!@5 $kN& +"ĜJAgӶ?Gv tI1$5q4Ɵe/KaB0& %1*Ltڳjy}x\'c79s(HGAՏ7Q"[8υUwt0zW50SǛ!J@L~~IdQa I$:?FVXLNoωCZWRm8"EB@=$ddiFI( 12 , 9Í_` 踞@,!iHk>sm{#jsu:v& *i0`Ȑ \he@n2#Agh`3,% :cf;_ww #DxƘ_:_TB8ט^-^Y{gḄX^U&AhIM *zHuƇ@~ S<禑T\!H4hN%`3dyùѽ=Ny {3441E$jU ESIe_5cEE]BiS*1L b*1FVZj$eSU1*# !e&Af*%F&!bIHjдhkZDf$2+F$PH l_MGcgɍOɉl&$clLҚ%~oAYZ3ZEj?nThYT<1׿M<቗Q[Aa%@Ht(#x/䰢 ]ZדlxQWӡ$ v6m}3V/.^7 !!r|l0=m{2h~kߤ~-o1,hyܛh{kH!RRuY ,4f B KJMEffda+U/RwDZ)n:f_{;wWSu;4:% $B"KW_X2KA#V[Kb Dj7/ ;qt6 fQ>ԣq&]$ZEp( 1 $ ` ;zX+I_qy7~5$IvSB9gO \+7ByDEg}$;H(q m1=T2}V=0YVev=-%s^r˭ g~RΧ`W.-J '*d%aZy뢌D`~& )J#jLЪl-HuJ':=yk!u(@׿Ϝm(_}m޹\׻IA b4* ]r`z%+(r>GJI{ _St w?%_w:YY,1~iM2Rm5*p_ CKӑ)`hUQIEa = /KMiŅ, ,HD J +o{lNA*o|quϾ"ɹi[K+<"=&XSe԰--8XzKR|(=z*yhBe)ppi[f-.͎Ad ‘(fPξ; 0 zkz }Ka|JUNޘJ)&fOhL0! 1 @,98RLX1^#QުK N8Ӻҭ~yjuɕ씞*edS#T#zČV+Td+Ҹ{9ҹW1/۲9ѕd:R-!W~\m!{ˇ%}z:`wƎmȵ-k.#V4iۊֹ-Wκjya1a*;&h2N8+WzkuJUIh( P @9\MV)AǕ/2bԴkZu p8qꍆ{tҽ+Ҳ˧NX!9-&EBÃZ4lڪMMId4 w}+pդSd¨fPL  fa sWT:KV}(>^29v|b^vLvO8ՙֈxsg86w61X7_X}S{=At;^%ZZ]+B<n|k: TE+ aaL ^ah[iil X.ǩ:sf:Ev|}yuG<v9FQg&10q9Ppos|O/حkLbZ^k̯B,ib{uyWF8{،[hrnqz]5 qzkzgcc%aa˷{,7q[lza #FF4m6i񭍚y\ԍN ozָ-Ye|\R8br\T꞉y9mf۲hj\ 2%ج!@bS@iYiLJ.7읔$^7y0[,72H@aɼNI\]ZO6M\spq8NQ H;6hs:G잉dɓ&^7&yy'~ ]%]%]%ô2ŢNtqNsɓ'Ms|Ҽ':xkae|a-7U-Pe X:ŔY[NE/ = K^<%+V횭kؑ|m{/ox>"00!33 *$D0440¿ShԖI fW Pa YXBF# ch XBVZPIejBZD<`¬XYE`,B B!fVXePBI(id^lVKuO=4P퍂H@]on#pM'Myf=Y٤%k(`)]$voB1~23G#teKr5q*,Ys9& TO^1YY.{~rԟhr8Rm3А#;ЮȯG/ΔlbI$ Rg}U-\ڂ1P`CzVuk̤XE[I'D\_bH8ah;|c'z1c*vowR7pRe=~I"P$MiiBy/t_IUMXr)!ی?pet4: 0 0 5r̬3,Zg* Ź&..`a8\Ypce Ye^gR|@a]X{6M1ea_D_{v0Ĝ_a8 xM7IJіFdSHu }rWWBWv/kǐ*Wj2AV2.FXRl5΀gsZxWP&K#)}M4q!58 EmW y,XQ\7𗄼%.\Kq.%ĸu]i:V/*YL5;*^{ _zT7=dNVu7--5ҶzݳLJrOh> trŅxGAx |C {yG<'D{5s&2~rZ&B(6Tg ׵ojk0ϣ3?v"pIEkx(>xS4~R4^+ |Q=-_:DB3+V p}W#=Ti,|8ݺ'edD¼ ~* 2H9뜤fBQ#3^OA͇n^T:[Jg}ߙ? H  } Ku2Wj]xһ ,Ե~};"1iPA r[w 65Ype|jrh0n ,3,/Kߗ,flC?ϳ IK0X`0ÍI3hԜrJ<U63Dge!+\g 3 xM)9oͦb}m|=fD6 ciYU'̜}m3pD( ! rPjcF6Q(@j(X#?G"XrS%xJ O:Sk.W7<׹,vy>#kod.x*h 7PNh` t0$FɥW#㏪UXDvB!tJ&7NuG|23b]8j4 zp9r2FbƝ}<8/ #̀+`!Q`H PTZskO Ke=&Su0ƗO:u`V@MB>1OJD`x Hc1Ht|1EH5V3{#I>Vֹ#1|Qq@Ӎ1?ev뮾eLIX?h`*p`$F#&_Hܾ^Q~7<34 o ;ǯkj)ɢ ( !&{1o̥J-o-_+n鼓kSZԣĄ,JdWQV|Wb/jq[l/u1t"!BJv21q|_8 wVpxiӠ%r;8H F+=;=JW>F[GbaH7(f5koݢZ#vQ-4p@ m%=.{ y,~ŕto>Zǻ*)B@ N0 $%=K%X?k]^uY~{.:^gVaD"N"I ZѸhs06|+UreR WD(1$hj'E '8 O=uzF-GU"$CI,YıQ[͑/L orbժnW؍J":GTEVs)A  + K¸YޗQ_B)mdO_$H€ xs׫?z.zoSNop7o1V:ŁX !{fI~|TOs 4uh.Z /٘|Xa3F\K.Jz2L.u-F 7ƊX|?<{ Yem­ŷKeplKosukxRD1+G` ' D, U ѱN=)a$Ν9s}f:Q.o6ͳy֎%%%<ǖ^Iǁz32jYeYjЃf0FPmP4آf@n2' ܲ ]H㉹+wld&NΩӦc) 5 cH61bvX7ez fN3wLEгuWzX[cv5tin\ KD!CCn釘ոmŠkn CEM+e` NtMN09jSmDȄK°"W%f(F-(9ձ2fmߠr97Gt`7][;g|N;{vg٫i=KkM|s3\,0Z4 奋~w1rI^+.}խqZ.#[81ٌcz=냅GfQ▏xh ~Ms,5ܫatد&".Qk~Bri}w\!E P$a@r\ ULN3o؅m&?p8KJ8ܼZ%%'^*Wk Ŋ¾a/$ڞc8+׵܌$)4"~}i>LԔ4T,p`AU ĩ$ٕ)Sb D~\ _l?7-s7~,߼S͝H$O>}Ll/?M`ll,Y2ԭħH < s@ xXr4rK>%}X4D͂I8&L`'߼*3=rZx$ikFo#b\\W~fe(TK*1˦*d45yC1̄YIwpސȟ9W lCYcӒW{%)B{szh |}g0e?;m2eyJjed s1 9ky= 3KNuV!4 jgCUl7Oz ;_WEMc⚜ZT5rfM~ |ĊowAiNbcĺ,T7O[)O[ 16#f7Ҫm\*lI}s`DŽ&ǫ+#_)]G# nOslV#~9W4-^ 毂?c=PoNuR]g4|_A4Q+/R'}DԮ|ҺqZ2kz]1V> Vc>jOɎf--b౜8҇ƒ` H>HgSԉxao=7pPOu 9?:?Hiw|} 3]Y6o}@,)he+9};Wʐi:zt6ϭU 9O[%$Te?(mWtrmwC3FkC<qv(4ة,@h"V6I8~~6S7ЮInnX>O]F *ҧ5)ý$v]$mZW{Gq<^~ۄT[ʂQja]S~Sl WO\XW_ D}O}"~"S5 BA2c)oa3Ld<5| Lp L8B({X̔L6KO΁W趨7_n.2_?P!}e;Xj7_|ޙ{Zr2ӿeGz;-γ@!g_ "4aE@51G!"$2.&m@h.GPRxu}ˇg2IRh1:'{?f:J>3b̧-,Z/F+ /--+H°Z,pZXťČ. Qŋcgth\̮O:Sq[R6;}]&Ax˱x]",,?((Hn6O'hw"yјhjyGOHb ar>yUP;xnBC7ǂlA:-{|[s?x<=H?qf<(Ƃ1j7``\`p 4h\loPP44"Ofu<\]Ǹ5P4֢#x2|߷1ܯ?vg`$:%PsqV?|s$z05JYLVAJ*@ $[nun;bv7Wr_UGuy/TI$턾0aa\ؐ1CXzHע "IS p&Dhx_}mZTÀ_v|lSWj~_GGSޮno$S̴!l0vA#ȓJ/%/^ն\PM }8aph]#ip:Y `ZAc^{J6d R;QE.FQpki[;~_9ǓU߬{nRnUN:; HD4Vx4w灡)`AH Q09FQԖ5FVFEj4i35F򵙟f3KF|665j250Z3Qr ͍or93sTGAq\+8 ,2]iF)b˥ѤpD( 0pt 0X# Vʷ6[Í lung-#z86q5XB֫R8-G"[2FthX?N 0+    0+    0+    0+    0+    0U TʹjZW:ܬ+ дVX4klT) XfL*L(9Wj!FD?{*ij<: +C|{_ɺ~m/e>FZkC֫CCi'Gxlr?I\2͟|E Ȋ;H3vIEK!z9h/..QV$|-33,ʥd) ²GuBEk ڎӪ\)ГI6)ι(RT6 [%E̎4:~ڰYEU.0TкD07adIZDͣFdf5Mi+i.s2LU;GI[%~pÙllny# Ur6Bd Z+'3++sGY8,:B4gd.B?aTe9F#cc.gqyjndy*#cxR#QSJe eYRQvk뻏=޸V Y,e죑ÿpTjJħHsl&&dVFODXʤF_OK}xW4*'ݝ*;%%d,݉2w+'/A'WK$ܱu~.u@Pz!~V IV!Z9#8Gp\ø.XVűai 6[ſV˚湮kJ.Kr,.%ĸTN\53SS:Ns8Z0,6Y˿jK#zYSg. 9z=.ⱅabŋbOX d`ZUD4a,Zʊi&qUڻxz1f.ޡKkVwxaaao{YfݣIna%fn6F[{f͚6ofd4>lSpLpM5Zo 0+ {7Mw -f͘lk h57f͚6ofv6[)v8<^y*]k jA~SЩ8!;l[* uܥŐdrW%vi726NrMÿ4+҃7zJjKsa0Qi(!q P:)_*Ssn>P S8xϪ"璻Ϫwgr7k;ݥ ,fʶIiSHH>$CDtMz %~;/B"Xi_o~:Y" H3(&P=(/Qee!z 8IL4$/ڥxTR_^_?f֝Mec$Ɲ&-_@DRmVٞ,{|,8?^/gE?uyVOo ~`gSP} XtkьlLa gZ46AźSG Z~1\JP§S{Obv׸MwAK ٭0LȇXAJLJ<8 J2o]eZT™@`ĿT`m稚VkSqK\pn8Gwե]4E`!`j*VH M <'hNfRQ_vaaAYee;B!G2%+F]grgSbJ6|%Q;҉EL;=œ*rEvUV5Sksb`chf ]7a'HUr.b00FNW^,) = arNYZ6rDr\60N?>'(\k$;W2Z] K"FҀ<lX<_@}bэZdK 0ݪc 0U"CP0,ȥ"SU $//}aԗWڟfGeYYƍ.U[&`xWqv`[RB4$ x>"Z$~C{Pa|ߥ3m޾dqC((7H_lFK*/ALXVBXVan i4o6K*aF W^R% pi-FJ=bo](pQz*!fd j"!ÁPZ}UTTG|{sڔrVh菵OéTIw&cSI+7p*ǔvͩ]YFO̯ !R/licbl#L]¾ʾd~..נP>g\ަbOG^xTO(xS1F_EJ7F'edd-z%62nr[)6V : I<az-'t<+sǯy(wiv2#d~]:/ 8D_p%`0_*OH~)CU1=ztzU|*3כ!deҖ7 ^J_b>¨t+v8*bBt.#ӇƮlmb'曔Q}+lq:% l%koj](O>԰/JŋŲh-bG雸EE<XO`nC =5Zi|~*8{|Sc$W9~;($_ gO|axZvkE~r$sLSP3rBӖ‚xU2QT sxv7Vtt>ppIEEeJ,hٮ$7A\߿u ;2F%uS > \H78 N٦NUsr9d :Xm#fnSN#Jpl4tVllpl]5b]W)\gd\_k.K,6 &jM,^JDBLah%H.X50JUow޸&i_-g]ԜV*縭8~r}|PPaA *MUIܸPOˋfYNpʊ)-]W.vX.-V.` +5Jnkt{lUճܷcWZUeP|).8998U-N2syAȦ(1fqx\XNcqpa}{Ŋ? . mbY, mmRm|`(&qr l \,OYea%Sԭ<\{;RUa{32}?TLp ?Y-<-~E2ȼ>A{v &R3Y;b<,8v4nurR.AMNr04Gayږ%H2B!))QA*Du{0*c%6 g=8ʨ;oKx{ l{mnhՕorNmdv++)+\p,Sb{m*qdXpSu]y F B)?eyg&wdn\XﯘⓋlx[98ȇK)-RK֖E9 <Ԍ%sa ]ӑҼˋebՅTXoY-;(dS%Ʌƪ&Y(ټ;RrYRȪ"nMoj+W?NI)v, K9]7/oRAsN@ 0;-}{)P@Rde_GWԪň~AWDbn_OW\_+"Y VtB$-A]\pfc'˻(}8R-/SS{l)i}uDN~]|L6}/]}@r3[.9ȥl06EjXIňňňňŖhZ0+p.#T,A FpD$YMda0<b5\@ Vg;Omo)ZOqLflY1˒\` &`{6DlE jIBPG_]D㋻1`$[+-}|cË+,a_0jrz8}cC['jr*BEaFbŅ %^Z VȜ]mqnr:(X< PO/v&όLԂ@HI@hBʘ_*l4ԡ⷗ȒE04 @}-A|,er8)Wd||lqc)Y@@#I4qn h ?OPX-V XIXchؼoM?n!?昁YYj«bQ{TKϮ}+#$_:6&қ9jӨݯrT`1U12_1anyV Gih/0ĘCe7 ׮w"u4]׃ڢy]IS}*GM;̿$ս$ XJK)Z79ox#$,%d2i0{)%c*Ŏ *U'pd:=LvODw7L9=etWĭT2F0q)Ёp4~]ES1c gJ$TUnkV* =.5ےmvmWWf5#^{̡#Kh܉u.I퇆S?ZN|R"w@&IhBDReX H'G>'\['G=ݗ=̖Я{*i4 x@B`dE%`QU}ʕr6TZ; -k~D]iI`KǴ;62qRj#/d21 )\@}fI+SbcllMhLYŔU؍J[^2\2 8ѕHJ̠9ŞUUseкWd5Q/_ (uPWdUʉl\~'̈́LWUD'[Ս8Hf4hnj-Q/S':Ń}jbVzT:iaH{8a ܦR5{lT`"at  {r7U:|/5O6w8DBA h&.uiB@ͣ1@*X˽D;wuA-Hʩ220j~4Ư |'UrVRKz~)hy{i>Y Tp? =ՋH--fWo7x, Օ+] aܫHG;чĥ)H?`>pĄK֕<yt;̻:̝.K_~FU[}bq.%-ˆrIRli[n]Ej//)nz~W5#ŋ-~Qׯ\+2QQY j?+=-F# ;pdyH )/  *< @hRP9!""0У]G\J8-?׏SuoǺt-އ *b` L@Ī̘r:)Fu%J=Gqx ?'=6 ^4\gcj KsLO cy*ܡiaffbX1ڱaԣ6΋dd/bTT(=?|xQ*':-J Z0CEj W A0qshՍCJ`@,8ѐbm"L__W&U&_@T!>Fw)Nx ӂ*ĻSB.+HV +RQĥvث,0Lbd툞YМ|5ʱ{ W;0Yz$ڢjRtlY>XU|D<5GNj|t'^v|^H𠞖ۘA~$`ǰh;ڧ{]Z#FI(KKj2n#lX#Jj TYd* `*8#) *򗠫_lVB``D_z˦uOSN$݆0-SN_FHB/;iB NMP{e SB`Ԋ&!P ( NȐB@ (r&>g\wU XA Q~&`OES8%/20ibձmJfO^J?3+$h U;4P<<]s{V۲y.\y.&pVϰslV+t#$RbK^b@w/(w;/싲#9,̲&}ywLz24 lTDž`lSf ! (5XR 0츾;CjFI u;.})S^VA%޷S=Ew}ru2r@'s^\8V( +-$=ϻ뻳uÔc^L>{ ъ 0I [JDb ZD:))>XB =I dIR$~y%?;us Qϑ*5*{Ht{iXQ-+ l6Qnϟ&G*WOŹޢȎᚌEq :Q_Lm%%Xhn>v[[7|'檼(ccr~A/v2>pyBGϩUUUURDwR,uZyMҫSD,DGEY>l1)1h\d޻I#x?|ΕoYT^H`G=wjq o%aWL/:ωZxI1$xUe0ʣא)X]ո{Fz~),ռ3 *Z&;kE#*hRꀂ[VzR:t@)m4jMw^wxNJg&P "P#)JBŬ\ʘbj1&I023:dԵX4(ņA"!R" !CZZ$zCz669Jdjm\TȣZ-IM6 FlbM FJ0Ķ j2nVFj1`w%>M)I򊰫I0UZICRYMPji&K-6HԼW:x$ZIxC%0b2C Lb3lAY i`SF!n ҿgZ!e#)A^/]Uju _ʒ;q|{#ٕC).ÑG:tEэ1 >5SB_>4_FDŽcI=_TeC~84tixyA^KI7zCֺO06w޸W35O`ɓ&SJUj{ޭTiGlUɽqKJGq^ WJFf>16[g)2)ܓb&09TjvЌ=uԠJv$;(JD2"{wLY1{α4Z%76VC~nEȣZc"!̉6gWM*M?[a||_5ç0iF(e/\4y( $3G:4"z4$! $fӮyϟJeQ[ji)n@A &NwjƖ-# MVV'[I4ݍ4ll~_GƘw.]glL\ݼ p͞ri,lM hrcG'ѭܦg'cݴ9N gK]Iuʼn<1b ] 5b:uJ\rGb\SӢ#Kŋα6Ib譅N9įI^7B1?gJ“BŅŅhEm~u{pS{K"d!$V[4 <1OnrCKԖF27¼OX޹׬9*,9tA3<:Yc'#CzXS TL$軇Xz:Er6p)gR#±aaaafj]hXXݮKbN>7+ #E;zOs"bBAyC(ԩR0uI 3!^SiL i5*(]9wdyOAhi7K}VZJ(VMLB+"m5T&-F+Kl[*8peG B`h@vJۜR26Y=pܼ$kr=߶oXᒳgDk iyJ< z w<vPo}Q^/*QJRo |b줦hA؎=0+>aV%ŭW{u/ G&KJ $& |2ZEd/{{]Mښ+ FBDA BpgZoA䜜siqB`'#.$gVTH OeĠk gӛn·u1\3b6IcNĘ{:WsQ]*HĺK6EXnlԣx[ CWԓr?.b];WFbgbF%l7lZ& bұdm4,"ܴ67 FF|w,?۸jx/^gj0K)!UW(3p0nڑ -t!"K%'FLXjb0`1,s]-aHP!^NMg9FH:+zj[-ГMM&GʉXtcTY&aU7q)S?QkTuV#X9:O~\܉)`5T{UW@KtsuϗaVeNb΢&Gn`,PAc$_$/Aa0&B!,R,T P8DL@VıYRO■_00lv[:`duͪ2XZZV#Wbƈ.Cupcx5ex^RJ{0u2Gu_C.B>v\`|* |n}mq<n|%| }MY q0$l:I_q렒@$AON"d-@e )Բ wwBW͖a4?~_U2.M IU69"WO%s'PSkZ!U;Mg0qTY@ ijIcU$"$bH` T]5ns{|,{jȂ]a |@ˆti&5(ԓ|L[q 0{ AVUnFq'$Ky|WQUukGwVKBұXP1'(B!F54:A"!jۚ q-505w[c7Z|Ϥ^aPaN01 !Q=!kߓ>sZyn>Q[hM( ׯL."fRZpklUxHW@@VS^bSGkxҴ9ߞ%?',_p(w Z4rh2GGPycw]Kˉ}C"G3,kÍėd~^bdoZT37"W*::d˽/eá:o8]3%v~jpv5 {4Fk)ȭ.v0,{)1E̹R0u|Z6J0 B`'x߮cڱ\+'>\n\&c/碎(ٸuwՀer z7M@O?w|{YUIԣQ vmpƓq11d>(m,c"GFa֓ϭO=圢31#AŬ[S}g9KdC|,} %1~gDv;7oY[5񷽂d7֡H//hH"&.kN{hyNv,k :rZ'>`jm^'WzcLHq6͏5.E{Q gdO\x'9)6wn0``,3Zu\&\<\*9^Ui{k}|46X-*{8t}?)6W&6=H [ {|.2oyaP_I,`p|OUJ.F/mCLmL(t'8)Z:]auʧBwȱߘ6QU}Q[55`9|ү:+1]څ^*s̾2 3߭O׷F8|&ʏ) < xI-&9Gus`K MسFucꇌo/;Hɟ.U5R2w-d AM{1,U~~Y-Up@\q lrTڏ=_S6j |2EUm(mljO2EC{̿󏍌C~ӝk~-+}x'q"Cw(3y+ҵe͎~r3HX6u|E6M"ƛ i˧1fq k0/hv+&ݦye텳i/bP"" Q*fC3F?r$d_eW 6BF$ @TFI S.id)M)rv8z 7[2]l=^J)W\ ¦ׄ#0$*H-zZazߘ;z7l8Ά~Z( \ ^ndOI*MHn%=/͋4x'~~SWMNYbRQ9S7=(5ya&Eޗ%qK}Q-]B[H@`$%&L37 ?ۢH{V3f Oֹ舄ښ^';0XN:`u8kD$jk~;ᆲ(c"*_:mY>s %`[ScOJ%B:+y˯`Gt.0癧бG`K@-j4z=ѶavrY\a`,5*l;yu:r*zq7m)aDDi_o5P PrԺwزvQZ}̺SH`vO{h$oh7" ޾24r<MOW'g+-sNL}.c a@I *{@W6fWj=Z"~]ݓ|j/U|H }܇[7^:"I4k ;Vy>mѫͱ7LJuh+3|*#i,u=ʞ45EVDZ16KOr\\nCW۪Ž^O^v9=_g(4liY$6WGPWL ;l62Bqcjk ә-hT^QW g0|;$k7ZIy a$yS\m4W1GW[Z/aRc+ Szy~I߰¼ g% z_YyWEOP<-ĪɴR 4XמVsJGz:Y&%s9(q9ۄ_9dž2w}4wsR<w,݃^9)A{VHOSvzT 'WykMx[+{s~ U5{4#! jRk\Lmr|q9`'ѐLa2Kq]oRǬQ}[%\s&{@OΌ\2}Uڛd7_.}n׳ w <)vdTJlkӦcvoc kUIג1<]FS%?Gf<`I^:{tҋ`֚Zna *$lS~.6,fR2W;v@>cCj.I&Y-k }8BgUfOEyϰpc9=` 5[QX94׏3ڃWA/U)}5}fa0#6tiֹ*6bS!oÚswK_Y3cA7gmpxr䤝ngcu.7{eؿ<G*NkCT_w^s -\ ĽG a)Y4i}>([GלoasOeTVF>~w8W*0iN%x]ag|^NъU>_qz dGǜ=t}C[ !]S}>0PѸ,4CɏK^byu(g%Gyèy;kI y/^^s>Tm8SIZ^_JP޻ϹUej"YS:WDqZwDyCbs9ܮbNKH|wc (*bJ»h-Vc.J<@â 㦩3kFl6,S ޟeQ-7k-[;bb}5#J5uKy3_?FXޮh>&]2Peӱpn 鏮{ [N US~m?a~E#LCC@tri2\ NmQNGS#.M0{9n|㦅d32~JSj%%=pzl EKG4 C\9ꬴRRڵ7ءجczڭ~,zp6*{MKkd=&"μЄϝEaƑrbAMmmћzf#Ml'ܾʝ%[)y%I:~Svy4>8vL#s@tGP BK_n-w7o!Gg_3}&+l;y|g LcNRv8F{KU?g_$Ok'e/O*SOze=H?&4_{^̪l'q㯨'Kq ֤ _c5?$+wK@t\96&kllow0C3@[ɕGZNīs>b})+xg;ȷlOy UPQ (nZYBIK,NM!ŪDH}sT̚Տ]uj ԶWx۩%@b27alG#@4[jݳ!+\Z}֝f.1vM8\"w`Ov{&F$y4%۲rp8VDuG$4 [Ӆ4~ɇx,gatt^8jl 3& #FQ ~xC F wkB=.@ëLGr<5i#O΍MeޣcS.8-EBlg ԑ@˫~Ctn4Mػl{Zk w#c5JmlDdMP9gY7}`lG Gi o줭S>/8-i-Mצwz0T|*Ҹ]?p2.ykcd^mkPûAz3F*hכjsvUn^WoZ;ɜoŽ4A1NԎQzxJ*7S6!1j0P0a_C]>T_ T0qµI<@41o< D^bR3F=2D&FmXm$kwr!vg"UVE"fDGz 79*4'+S͛nOm|j鴓^kA=/ݿ~ }KH"k#SdG/w y>`34쟴ڪȆ_Ό 6( fDHLwjL]V}Pl;;Ne3vѽj/ūq"Krg!D,x htznT_Fm6k>j$7u_UXj9BO.Ha ?zOg,I1"WCLA (5xCFqG1|0b4,@h8JNn&ѩg7PeH3RGKӅG ؒ @rq^üeC( <:L8N8rh^O s1 ^`{cJot<9l"lyΈbΝzśGxIE_`0_yO3zvH^8>^8F"e$'Ju,!{ޚ`pfz͗vcO]i `$yic O;~ӛN)ƦPI蛳 qc T!ZHޑIcIt2]S2zq"|IA #r*g6jnHS/ϣCoKw鯒bGſNOW+6OQ1 IfW3J"s})5ޢuY,.OKK!.{@h`C bb`jdh31䡦 (ƘR&[n) ?q[4݌mĪĩ *L#B1(0R@+(Qe)1,(XŌD ِʵjb3)̐$MI!0VG$Lrf*b&VIGvhB X{ >u-RGswQwg>4G"Kd-R6)P:ԞfCwj D #>ʉO{3}?ٻ+1؍ )ex|<#骆D'<7m%$;6 f)iD^ 蒉<-¬g#&Oah 0b蘔VR<ɔxѩpY5>յ/m)-110_lD#2Ң7r"Ts@:`wRʦړ~111112#W~pDg~/ž,/D,z6Ki~cQh]W0J thg>rB8}(47;Rp91 qF`s46`zY-\&~\ 8B<="YtD<^*t j-&$|P RHD_Ro7uGϞ߆kqwP p Xk:f}au1v4ME OWSe'Ë By%Izu볼CϜݵV'%eQmɜqac5% ,)*0DcT~ Dr`-!9ِ*b 0Z!/Yo30 ,B`85.\}AV`d&NA/`A=>$kΈiH֚SA* [N@G6YFb邟; 'l{T_St( wJ]o5S/NlF5`s 6M_yK ^c_~R>n~'N17ԵA}`z?Q/j_>t9tAYoPh"b  }j?Ҕg D&/~NSz8KN:*&cH>R"#Xi8i2ӂavfx'jRZD 9` |f Y\e,#U搚\jq%RF$4hpV}^M G(?!C2p1\P&G٧nu6lǤX`&XjJ ,H9ƄZI H d4)<' z$fN>[{o#'ypьDA jλ{\Y9Ӛ{DTFb;F5ժI45Q/v[/ @ind.լ+6JȧF~m_^rm7p g(¯%`4L[K?vvݮ{$ Kׯbݻ{ۗ.YeYeYenݻmmmlmsji뮹*kDdcȘ[-S!ab0JWF1] m†H+^6dMp|`C^X 0|)IbO@~M%"bUeq&'lk///M ?xU$ H}֚):c;C2a% (=`B4!6ߎ[Fi)~/\끅H\MZHDD_o°SV&p9#٩&]סE+O_R:BYpUρ{BF{D!;#WW^p A0xN62g_7}9xcT;G'؞ ~ O+ JӜ $dMe;C򯢵T(UP>{tҥ tw 4)F4(:M;r3R {W{U^zynQB Aȟ_.'޾Zw+lF Q@IMJ@ wnx A%iϩBi(IJwcP(RJ[| BA^uUI Ewހۙ;=3{/{$۹"T gc{F $RUs3m׾{ynzG[f5eO}TDR*[hR) $Uh^= ;wY@ gӐ{eJh2%U]҅>^{`=ڀr€(ݽEِ(yP6@( ǥH4A@oE%S\٣wt=o=ރm(UKmR5JT$ P PRB X©k\Vƹ]frfUлk-c޽ʱZ12 0&F Ɉ1 #Aa10LM4ɣL4ShbAD&&&0`0M44L&M4dф14Mb4h2j` I&L@'`LM&4`L 21L3#M db45M&M5LiOO$?LTOzM1Oʟbbz)iS)i#ёc%6SoJ?IOTؤJ DH 4 & SI(2)OTTG&<?T)66C@?T 4 6O5'=Mh$A@ h&ɴɐae2b02MSLj`4 L)iz4Ԟd'I`JtmXX2r0[jʅ 14 Ɍq8iCbP2> 8$)R 4𶻾 S)GHɩli=6Q"vL5648%!M"+q5nS']aiO{۴W|7xrcmZ٧q:Ze Sv+x95T eW ,e,~5?޷#+bYeُqU65M{zKe3MyȀi^`T kT}qǹ̈V^x򮇥XŻ2VVV3ZK/`i\qbm l֩ۢճe9qt%5Y_%6tU?$vVpvf2P Zs.ݣabq$ͽp&lb ذ0=sM㛑,֞$^x*2fV d-6׍m7Ow=V6mͫP}Ws ఎ:|VGJ^I; X^F&23)fc &L|X#qmh|U\.rw-n7M].jqcLa1a{щz>/>zW/vJvy}N2sV:<<HgWN2cHmg.|_./Ӿ/i6\,*wXi<ky%㼗]Gm/2\8`/37{e-x^ػ]U;׍q=\5vq<" }[|֞8 ztc0b^ۯYzNu_zG;y|}ut 98]KAcy<٘݅,T$<"`=@@4:2`f_S"$0 bbX9z"mzhunuqb-g`pifU?RVhq/Îv;Fok}*_2UХulHEՖˤ )TL \qdI@bupSn6X;ZhiߗR9/-Pі^}@+3ky'C:%*4"'W|A 䓔XƆ  pg{\cwmS,T6>/cny|ZfN3+ǗV032̙Nmƴo6𘩾}>Uu;obޮ<_[U^xvrcH&rVeɫ}o1]7ŋ>eW=\{lqy|Ct"]yVb56ߝ\]4/k^>X]xH>gJ3н׼od6VpKxz=pCpߗ{Hx̜ovֹy@}{uwzUGUy{}ʷO~{]-뇆{Ӧ|Uyڬ\\]%|G,+wG;Rf wڸsW}yǽV;fz^{]槟@<;U^{z/8tr뗈<5]}Wt*9WVbC#bGkmdae*L3RdV``f6ŏ8NLX0f,f1(UbFJL0ņCXR`qEȸ\a$nYf%-cxѦa4b`fӐs5"ZbeMRWf<+,1,,Xu;EN4ma1ł0/JElZ*JbCIeYe>Wv&=|ïBirRM |$M6u %S, S251LeMH5nOyZqVSZ`!7Ap0-dN00RHʗ-qf2eT ԏ|ņefYѻMaZag"vMc5d.l4\+x YɋӲډTOX{$m&RQ%N?ylfhмFM,aΙ+0i'<{g(r9+R}IabW`m%9LǛ65ֱNGSR!DSl^/QԑJ_2zV˸,U/%HW*p64]Pa&_ }ۤZ1f2-M x\YF:׀ceӄ&Is0Kǣ= fqfFd\ i0 cke s' mOf}LDeR *ZR(%WA9Pͬ0mT&51mf3.^h&KR鄿,h޲ rKv'g:~s:E 8ǟTN:='βwYpz7;n>ZƯCES" 0|EYBʨO 涉Yb7YRpFA+T$Y1Sj TȲDM9MNHL+2V*2>5|JJiIV7nٻ80EA(snN:j`N*88qn09lZeU#*? E+r*@MYL[61a#,M,ffR+2̉-|{fAID6 X;R+(} 2ޜױp{vlvTf xp<%+'jfm!ҧ*)?_q|#H2IǁCGGeZPH|pԜ-Q)<6ӉC8@$C`I ;9;˟qT_x8?Hv 0d1@aQVLb5UTcS?ҿ]x]0i A#g J*HTd@=_jdi.v"1ipYCeOAWAZ}%*D9:=ra}tsTΠՊ> :19Gz(WzF ~mh  C1$Ņ"+aIbV3V05dU0&LC02V&2,2Xb ,* $,)X2̘`abɌ5i)>~ K b])WWq K@//o,|:u+k,K$o}|6Q#\X'(V Vr(M i E7($3i>*%>/٠0{䳻LI&% f/=(w/R $/M #XIl_sl>U>%rQ?s%|?MZ;XJعMR mUa7ێ%!#p^ܼcb &&@ao`53xЃ m`/πXc_d^u*Tߝ֢ƀ^=`>'ʢ3ĒC'!f&e3&11f1X33%*IRC?X"lgb?|,f}NfYY]&6m4! $3^1dao& Y3*2,X)Y`̫, 2a2,2&aff1LbfOOK_J7߳|c\KyCP!6BPjI G5 GѧdCd>s/!Sx\ZA$$Ҙ|~^/2E˜_V(K։F Uqn )L Db~+`dʬER!m1 kRȷ ʲ3ﴓUC# mzSg {U( >s.gQWkA?rde } ZdOV*a?jyd ?gfjh/?+ǗI ?}~lzށ>7a[Yu`Meٍr2ó)e04_wJ՜59E|-!,#~Z|ݾgq4N*9=~Uƫ\c]@84痕KC ̡K}a׆rI!nD1L$VY X2ahM /qg(k" b؎A:pKl~@Hoi-jgb|m T/nEK}l}_H佚,ahlT =w-ΪzU%vy$$ ؖ93}q/K͋-$;Bip>,VFƈH`kܣ}cC&L2̕RMJ}"hFe?yid#|jT01f" "D$2n`wezh9ւLlF 8:ptJECy3[2.j]SPvIZM! QBB@_y~&Z0[axӶ3͈M"ߘY`ьsDD *lwg'v~6Oe 4\]=0ֱ|WQHsN!LI3$Xk@6~4 qA&'4!Ca^9X/!uȰ.ദT2MCa4,XbŋEJjbœZXXXZ/Zh1e2OS)b071bYQ,,kfubhKubMNfԲ\-CKKTŴtn0a6.0L16[/_mNO{{^35QZicM,[.B\TMSEY iҎ jQ™S)a[SEiSp'qEld!{bƮb2jdť&LXɋ̱M)j2sZX-, KKKKKTBmidɓ&,X&,LEqbM#)Kr+ u.ae2T,X2XUdS!d/apY%ɕŅdŋ bŒ*ĿFuLUa-*p*u*-I),L ,XbLʼn q5?4$p\EKL§&L|YQ8/X[X1p4ZXbŋ,Xbŋ2eZK!ilMMMTɹss&L2dɕ,X- Ţn-`ZZFd7:Q5SLmL M2 XZXbɓ&LXbŋ@-JV#l0e52dŋ,2RŊd&&L1aLY SɪX1bŋ&M)jjbŋL qv.V,[Zɓ&SS)t4œ ʼnVB]'I&,[F$,ɵZ. -SjM.*[ŋtqkR\LMMH=Ue oaꭓwqzm0ÃxNqw0٦< ] x=I rEZѕ~?/315U|sE d )~{Uml\O:+fMZEw0c0?9GZ $$7Z/߻iU ί)Vwu ^.Ms`}*L~TuۡC>fxښfXXESK&8*6̆œS,&Z3XSTb&MMF Udj12!2,V2c11K2b11Va"cIcIwϙdq,Gɂ2/`>6_G`& lDL4 IgXsfzeXq0`/s0^SHgX}J9 ~?>{qײxAl z ./[r%r>+`" <υ^Sn%_^[귬s)7J]|Wlun%5>ǿL3[m"!WiNogfm;TtwnBݐ㦌0f-RGh!^h"!M|Hgb{ULntm 5O&TsSilrh=RxJNBDPC`cC q HX]]Kfe&+{k2P)r#q=͜m=HQ;}' ;/1V2wԬpTyWܡ)6|u yY"! hdo/㵋i i"boq;SRR_A5H]u*иߣ=DOЋ.)CVP}ʡxArVA9&i5[`PO{^'zfoc"%3 fyq*OO'تW={Xsr^/+טD 쵋WPuګjxB31ZY`IN$(ŞTUi,e; $`N001yo.F:u1\v9}L펖'ϑ/Hf}#[)q9}r>2:D*kr %W{'}F*xǣKh)U"&hof'%v75Ԯ{_$I 4ٗ '`0EkDwibNXNd%6".&t(1tّQ܈:oE#vIW^#unyѶjyydr6¶g66s-ǹ p*qrbdx`0^E$~67ý6xMFq yvMKska(ssIϔR_Ω1?t Z +2̖ͩ2g|nt-3IMe7]zŰ( F kpOC@hO)Mn%nڃQ?ek`]n`9<=*^ 1ٕìra_da' wҁ7Iϕ}Df PԖ'c^P8*s'n+ntnod lrn=pCD ZσSwE@1'?N#&Sfbv1ց2t2ﻶOi؇٧*vX= &x|~Լ3xyRL_8-P@ @5s-Pm+} cw_Opڏڽ7y'͠0`9'̓Pi[@!,)W"ڛ^0'|(r CaUmUR!i%@$JQև_wef.SJx(μ(U9iX@q 3VuD6T߬UP'd뱿-)LNTJˣkbF0o9 7@hD@D57NoĶDt|Xܰ3x3-C:`&EѪz|aJer8_Q?iNϫ>Od=۔wz(wH$!ohS1=bkjIhO/OI'e2BᲊU,!/ B>gOİAئ4cOO>MtIX`Ibbb,O=Jiꌕi1`ɔŒe#%"łŊd‘bL?--6,XXL &&T,,#Eɉ _JvӸJ'NoraIĦcsE~(77Wb':ⱲT . U t?p'wՠ~Ƀq#ʴu?Nkǜ͛inYBd#֕j5kԎ'r)Tv*yX<%;ڟzh0縟ПҜ}ӹ#LꚦW4CL0XS"Gk{,CSVe e,ZH0LQ/FHIWdXʖb -ٺP񸖀M,_[ﻭĘĬbcjb-,\)>IDO^ %e1%IjbL,,S ň~qSp^{W>JdToS"&tbWRMP^9;3sKL2p&j d{/lE9m4MdbɋWr[Y5676_8p>!uWJpz73蘦&N;K[[Z. &֧3k]p@!,Ʉx;1gV"W_Nvfe /1A6%^Mq95w[dbb⻮fWzpk-[[Xv݃/njjslrjpI:L2lŋf,Ze4ZSSfMdٲ eD`iVS&́0Jo2ulުe7#ri=nZQVMf u*pV,[6bwPǼOTR=y.4,F {K,.Cz7ZX2iiibppp&LgD)ɺŋ&vwc2dlmbr8Y2p54PakyʺN\^g [8)Rp7wdV2{G)ޛ 'bu8ާ}Ot7k\9/[_I:M֢4[Xp58_B>n\f N(7:MM-M,\Ne,Z76Ux K'|ŋbpjq556o7] yo7[LMMͭO7fXA%+Ckhs&.Y8dܙ6U-.dɓ'I1hbK&-N' S mdwN֧yd[Y. h&/*ŋ.⸮3Aбjdbbq17]+l[as[SdH1:VEw/Bl[dmM*+&\x)6be010p^/9i{2<ɥ֥]w)lZj~켟/wOy&&OU7LLL*b1*aabbbb~SUbbdʲbXF,#&*ɂa,Nb2&)VB- o|9ʟ!b02Jɕd=&_#}SK52.I,1LU bŊdY2b`e(,%bŋЕ{-AXRŋ&L1dLXb ]mL_iW`dYV#dC!)Y0Ǧ%!bM$&f<!GS}{Y$i |'}Ld|g6fgc P'5UƼ A0PD zA0T!2)6SmiYxe琟:Ax+[ K+p/Sb~l!z_ߑ&c'wz{?b8ϙK5-0E ci%yj-(Sd͒2V*C@Z_V\/Ch+>צ%? ko/ #U3&8;$9@%z6׻hKh=f5b0 ~ S_c(os.ב82Oc! 7yfYpKcwUb[Sya(WgDϗBzs@stGۺ7G@T2" ضU8Ul+pSВ:}3jc^lkssU2?>4ȍ#wԐnO_#p]4`PijQŲ߅FUdaY= L@ƹl2C jو 3wd8 9C,[^addŋss&LXb,Xq*m4ad,12dť&L\,ZL--,XZZ2i54q4y&lLySjs2LKS &OS-ES&&M'οÝ;ڽUџNwYiœ?}1tw(a)e1OUs+*8Na]$ZD M6-S `Ŋaal'4ڜi0"y_ByO͟]:FxUKS \R]+C"T`u,yddyr'WM.+lSIz4S)x'*ɼ9'1w9%9-.K)ܺeO.*e5N7 ҝt؃u8)1nCM9*n]uNv.sssssssskkkj5:Nɹg3un,Sym4#aŕԇV>P LB'֠׳rDÑ,?yw٢ayvFջڣj13[XP;mmuЅyzϺ q1zu+~cqˤ!)OL%$PqqBIeq)_U@AvHBA/2%xĒI}o6?։f".&6ךl1)_A?Z0&"eU_nX B_/6/Tԛ7?әq-Rqy!!.M>j<͐wȿz .Ʌw'|L*xNg9z洼Ld^]SC62DbEA"`x8q` @S}8:u|r9Jyoa~%=x0w569[TLД@ L}tǁz:IKЍ@$ABC ~Ь۹BH}8U{].xa sQqN)G2=VGEI'ae[ȥ088ǐ"`Xt7gy@lS߮g% )g]-#~lDa0wgSs@tAs!L5Vk@$T@@abǿao\iłL Ͽju6xfȑ$->-]_r,XaTDO{bVE_D~yU7ev)S٧t?+.w}^RAGe1U=jam h T+qad[,.&<}OF#a}.(=`Z@O2r☲~^3gY:/̚2O?Ȁr.xO)pz%goor9sl3쾫E'Ms {siAhHI.HwmH(RD@ʲx;Fgb}nm/j˽KY|wz5c '|V0`l85/3._2AUCCe:U0it9rD847vN{`]o;D'і&,L/XJM&m7,OWoW{\Se.gj|Y-*J9tV) b"I4'˥g67W 6, !I ]RS2n*/j/5_@5U?MZKe6o*; q$8+ a? R^7/n%{%_-‹EPgDdvr;9& :Lqnh&ZrkhJAf]>C)iteK $CQ{n^W=םzw&D6I{mY"T/}IœO*i|dׄϰ^tť&ε^Q^! ?$Ud^ސo@)8Ȉ%]Q_g|&O-d\|+) ?~> ȿD5_Z;NWm~Ё=ldDOɺZ=Ŀ[U8_NWWOuΩ&\"'A!Sۨ~*rK$;TĨDP!J$ D #֦,3N\:VG7}C:^t~1P=ج7Zfm" DB1{f>Hjr< ' 2GSGr|Ɵ>OeN^|{?O;LU.vM=;‘{*Rc=sWtf9pra; m!%Jab/Q|hEO1*1`}E:$ܾo+Χ_1%scrr_-Qh,Gÿ-ˋ ë́qdNذ'Ƌ"%O-Ν`E&ahZtkiɺNKi~ָLbuj]iжM. e'jir7|M?Y<L0jQ^\Wxg e12o2a2, *Y ޸. kȧjjjEe1`bEԼ[SSkS'1T%.S-̛6dܻ x֦xʼxܵ<sCrAr\i񮉴lO nyM- g0i4ӱeЪw[n:J;K5OqMZ  9Il.a^8u/iiY)~VyMdnmO!O(r7/:jGS/3sdZSVo+%2" DQ` B[w߳!$rTJ+[ j#Ľ0 2D$}7k*]WrZi2p"~W/7z11L,S􉄿S][.i N5I -yV:9[o){CnD9nMo=3]U {!] grkp M$O @! Ld51bŋ&L&  &"ɔLLX1L`a[&J+Voof/ x3/<()?T}?Kx~BzgVliC;m ߂ ej5gC"/PhXVVjzAlӫpfՔ2D,x8ȵNi~?'z^~}Uvi:ikcM2c!ɏAe| ljNUjYj8qjnl\!tnA Aʜ2 x0.5I--,ZLLL1Le50ga! P@bkdSt{#s3V o MFH_|]k*IhՆ}uO;#)sy8TR3a("B ௕;Q]IlN*֣znq'koa{͇`pzϊ`D9}S=Gy)dS l-"b#Vx.T-un?="~7α<7#&26JEBmPf7a)y:'j&o/qBǹquܜ="9 d9ȺŞODl$Y^Q2b;^cΞ U^3VP󻖳8|SQk?t7z]2bOWc8mݪOPr:He])ZQFgv;U(fs:;{n44Sun  bW9YSvz ź)RN7|<RQ)g)> vzlL΀uiB=h(G:fk !*/Y(x>*TE'3]h'k_곉*217Nzt:>跜OIߴYD EU|1t[E󹴽)O@HO$Ĺhj&rm; 6o?%s.uvd`w_B 4. :i& ls{C>qWՓ=K_+5ZvRR7,TK4<3Ordq'?ii\<=K|ɉjҲjk͞VoD!ɶiyJ^^`ޭ|3>nͥE] R }nkn?t4|^:eAs~8KT*ɲS_U2ԳoTĩjWZ- fe7:6]IDX]U/[ 4ӕCqM&v|ɲ56=},#łs[ CF7F.[Ԗf|]_j>C{DcJt>6R;M[D\S6eF5Şan7;"g`x|wt7O:/wo؈3F$fvKs$xS gDž3vTjZ8Qؾ# A1[ĤluLg1n%!u^O%ήmGɰkwP7R5 8̮YW"1/勞V?\R+DNcb'{΋ʹ~ q<|ifik1ҽXE̛<=²)+6J2,lmͼ^?79op7?=vJnO 32qyGO\ GZ48:;T㪘u:/||hD+̳V}2'4tb ckTōlj,t>7KlM}#Q'̽j٭r{vsߙfܢM,t oܩ08v}K)eJ}Gac1*.Ȟ EH}(ӮC_h_s$U;&-b|F֜W"{|OSU_GNLdkUOXj J-~˻b??_j_l83LrKzT$화6YB8?6E.7iٻH!=q(yQZn%˶Prz&ZCr?&koI)j>q3G/u6묻umq7NJvJ0d6k]_ӎA+w;3wmOQV&Vc7sHywz`ېll'I&Y1!,0xp]HUɇ.,|U.{W{UAXpoϣ+ lS}J]keT%=r925a ..J>9I]>e}>L)EPW{u2WU I^f72!Hwa;lo LDX)b57Wi-G1K)/BQdnѯ0"=&"g~د;I QuS_'wjR'!z#rNn"*')^m_lO6fznn|~Gv5-7 Ep}ӭ~mAo@[/03RZ{?U^| .cޟmS#EapFQ5,QQS[UoFzBN^:ڕ*e0p(z}}`zR."V/+H+^wJD mGm'|Ϫ p^E"%D.]/|JdL$#G:?}$=x"w'p?glp9-1l6*}x$$@=MwCF!LLs84n;Xh]R}#;hϳt{s.&OG&v@xZ*wp܎}$$<!KOPd8anBJ0v,}TW=bOE?^ pSda2d̙?K/iC6@=οYK~^n/FRo›^^Qg/ 7hR=ڿ=)}kcBm\(Uj b]7^0$ѣgd%1%E-QrXQFD8Gul| ,+=# @:pD$GrX" Jch4@c0MwPBDL9$\ PcrѸG+G@5=bN!U2kFd$pҗRb41)T<0G 仞@ֱ0=ytݤ&{@ !< /W?}ϕG+wbo^>Yv/CӈUϬh걅\t_P_{FSY7m%x^|[^wxӉgT8:=0$[æ#!$#;\8 -舴F}kD7mZ8N ="<6B_9vN\ɓSgjMuuUźź܌[|}d?CI8C pܲ`z[>w}Ґn,0φV_L&z,^brkus}~UtU8L*u|St:Tlpibr$(DT"IB(E=<oo.G7'qU׬;Z+d+ ȭUzr.h?SfХ@/7Uyzv zi;w'?:帶[l-lvt?8PeXL2bnrb0Xb6-l[,F ,XFɵ ia2dɈn*E;F * v+d2|q'uJy Sf`kEXH5et=ȒP=|Faaaz:~~1gisg(%’%%E%X?tU WA;'0~DTQ n,,,,Au'Y9*8'6̆L2dݓ5 QUv\;];&JCOػ,m2u*?~?Nr%  y 887k6Zg T*T"83p(Vr3~-=3yr PawJ?{QyG_%NWxꌄ5~n"ov_mE7m~@BKIJѱ@ ,*[ Oc`(5D2oA!"*vMCֶ9--hR"ڦ2 K?|I8Wp>7" !cLRbHXu4}CC1x (IK%=qAC"I{ge}j&0@7>J/DϷ5(< H/]'0-3/ԛFp1VAyzPjs3(hY8u\ߍ}A/Xk*hKQn+N{KAijЪ%nΉ Ӭ>AP:ָȳ҄A Ѻx q$-2ܦgv`C&q /49Y,z}eۏ14.YS?OOyz |vLb?*~^MqYoǯǃV84[)yφB94~_S{LͲ1l@0 Wyt?'B8 k2Y3ᆪG-I&BJW@s&`z]/o̱ eBQd"’ |l5V Z"}N$gP3~SSXe\w|}'0hg]/?0L6IUA!Zp$%]Tei{:-P4A OJ[v oѶjk%.Rh=Q 3 JVFX2u!?U*9w7,'cUNĢc 8c\!%>nt.|H¦7?3k[<3eՠkKI,RT t0n=^$T"O73̳@;w کNN"1S<'V t?i59\'-oy`_늟L۷z66"&5&.2}}}S" ij"j'ߑCƦb7Ljk,*YncܺB`;,.>򁏉&w`Xy5]ZtXx+*SlGmϪفyƯwmO{3TP{Ln5IA=^b tB9y|Tum2dy~o5XiDpF,XX,രX.ŋ \Tr6bbd\ˉM'#rN)}un--dim2l&y7o7&o0!~3g.].TC*>4~6 sBoӁ$\7Kavzme@Nqy4sUbE?)z./\b? ~?~) ,/hDq0dD%;C $C ӽmuOBxk߻F:!3}a'f<\w0+C*1 EcM,eg`];XuM+ɪjs1?Ck ?_xanV6huVj*_钋~e,_O?vװtx9AH EŴۉwZ7PsD*|Qxwhy f71c1 )?7J8ŋf1`CHpsK2MZp3IGJ `'`pg_J(.?{vw𩦍) ŒŔťdU@X%=n׎i[aqn+cI=3 ԓ s/]q=`!WM&Xu2#m^V`? #-ub,=:?3ԝ3xg&xigzuSycխޙxw ?gdo9ݯKrޯo>[-X#c<$?Hӌ_H$ A B!Aeeeeee'|]뛡SIc innݹӷ~\Mv &$!$^ MH튊(H|!#G0YmoJH; nts9%ɀN0 0ƻ}X9|*]0{F;L k*]ܭ`]!\JO o`rY"(͓86܆MUUcR ld r!gw"Y?,:%ޔ_`s[ dw.z2|&sC ڝD·^Ǧf"kpqGF"/W_; I$Sྣ'>4\$Ȋ|wuT2Ο3O>dɓ,Xdɓ&LN4ZZԍ%4Y4ZJjK(bVSIдZZXMԖ_z3KfT)Ŀ77-˙mttZn471jn[M-Z6[M'q[%22'Tɓs58ʹ%q['%8Ӓ Z 8,Tr#6N )dŢpqN SV.S4j[9\%hs1din-MN-mB+sMjb9-Ȳm8$mMM֦eCi`NkNb is6d7: ʙ+[ML0S6ɩ&uiijdU&YB$1dœLY0ds9 iq YKMNE&›  n3-MM.☲8'؋ ,XXK,,,TTKTaF^)iL" !@ .'#1dh?@ ǖJ6ZlMskksskkssfem6[-'#96:4A,ìo|U/nU'ݡȧ)T{SbLLXbɓ&LLYV hXSH`S%:%a*}IO Nt \'5MH:)L*`H6l4EJޛӬh`#.DdIԦTdM;&q,)Y2IdŅY7KETdiLL[X覗)w%Z.LJt.KadJ[-T+at.l/r\ŅpeJqv ;-v;0s$9ӕ9  w)uSMы4Tҫ%dGBRU+l #Uގ. l]h/W h/ [/82dqe2d]L2C`;ʓ|uԚ_ixpoQ.fd"a2E"~Dod8O4] b ߭ [^{5b4Ӌdɲdd836KtZ]+b\.ir\)SSy1566ypNt[\mp-..W+K费]n˕廃yssFMۮ. &*:IS*,,L']RQ5.u)!\9/,^g؋~KpKpnA^;8q2UR= b*OU02d2bhEv8`4d?ԺI9Bк r`[Sim4)al[&bl4[-&,Xs8[L\,dbm4eLM&L2152ptb^z01`u;|CټXIw']:MlTwOf,&`,X,XYLS  ,TԋGo1wSxJ´EbűlX,Z,[榦MN'Sst8Į&Ss2njneL8ɹ&MSnq2jnq58MBK഍lbضF,[--u eSd7%q+nKeŋrط-uE[-֋en[*6X,G&vIwBQֺ&#&*}i4U.crlOu-n< )L^J8 EFrinW=t^u[[\Զcg6驓i6M&9sg98ͦSi9syq98E.)ʬ9sjrS76hNi8MS*7mtT]HZ[0IpN3r9m8NSnf[3Sps4nɆ:gZ, %0`ŐŒK)dK*KŊ@ȲdʖL*ɔac 01 d &ɓ&Q1&@ɊaLL,2e#J5C&& a0Ni34@"b4ToiQU)<Se LU,dCIp1>Z:Jwf0#A#C8qdɓ9||imWkPYJ ,1Ss'`M,,##aa&,,X_QM>"P' Z$i@&SS#$$bScP2C P$OkJI$Hi=ľ/W^I{iu]?>an=W_^'Ώքjyy\ S4]\ƚi1s0iɶ9 T&ApLPHO>⪋ќdfJNFJ%z}O^\ʻE;%^IEրX`LW*zrH`fF34] iMR0)nL}z%&I!S䔶J]p dJj _ *LzKIZGtbQ+h2ONP20DlfU;dɓ&,&,Y2dɋ$ɔLXdŋ&LXXdɔ<0?z:N31 t:!˿OQ;"Y<SqۅQuQQT)8ŎDl.mjsU9%Ҝn S !ܬ,]2IWCQ̾fsCNES$=؝3B{(Gpk&TU-H OΌ/=p`**p IVzFv+:R6$m+[ dQZoYЁ6m8nX"ıq z/nfj Ou 3LJ&O?742OV/P-,X %hb6F,X ub6F[k.jXZXO~QE/%ԵM™^GJ SQHi}'|Z}Uܪxэ? 2:fL1bŋ,XdʼnX22XbXXb,Xdɐŋ C/$_xEܪ/Ɵ-*Z.bagJ5NT؏T}z_>IH9##Aa`L ``BDu6߇{P:ǹc{cKumzigaIaRo5s޴&1C![,jM Dɰ麢de5A\!~jOoI|~ϴEFzb ]KC#R:켚B#$bd1b,uiiQbŅbŋ,XXF,Zn i,Xbŋ,ɨFO@gԱ(ܐ6K2>)RمgVb{ H儯ݥ<Sۥ5_gK#$ %/Y>Q{]Ҥ?;Wxe_GlGY7L_nX,-)ЧR蘱ae/1s+V& E**W '<9Ü3zh,6$$aioMMiOɴOql88*aL)ȲNPzդ4 )ZXRX[,Se$NŔؗ*}WT3k@$ȈŕGf6Ob-;_q̈B+($@ 7R& 4˱ϕjKi|=5;ʨ/Uxy~ dzqȼbK⹩WREi|iKX10Tbq'%U#?T;ob@JLY0q-hV5MG旨\ z.zœ 0R.уjew=ꇃ*:F7úO5V^CDz&WOg"dkibby_#hf}*ה*,85#ݫ~yhNanC7\Zcyt}ޙ|tp7 IB RQӪp{$n҇>"|˥-`GH܂T*&:,TO,+s%\v'}~T'BUPޞ0y704d؄&X$vRʜdؓLnn*s\EKs0` 3 adɊd>Xp5Aq+xX}S\OD0W .o6x?::q)ǹɉT]/i~Q}G;_665fvy)) r-6 0*%(}`b Y:CGKiE81(27*i&1m4+@ qc2R{$[Si塚=#IvԻ(rzzקUXI|CuP:ºr 5ܳٽ=ŅOlt5M .8aRS'B.fT.CAz&+pl%0XtanMRHxW rtЏ#S$!%)֘ҮrO#ab)tcbً-תʼE'ZeE C5VѳP^dvm.L/-zk'\?t+%ʃS5մR҇;6)̯V"C%1F>7$,R$+e2^'Rd}\II_7>) /R<}g{X\^ԧ>Kc{)֟jj4!]«ǯ0˴ ryoCK.lSC- LLXLF E,S4 /wͬk(6t꣎<+}j|X $$0$T 'HJRo=ųM7}4zw?-S>'W]cXryʅ`U33 &68ASޏNo"b0 #㊀QA@&@ԟ// [YO]z0n-X.5 3N*ƍSGTCG:*:ܼR}XPhjZX0e,jZYVUZ&KZ&LL LC:ǒUTN6J5Ҩ}Ҥ{b)t]ɏ}Xy L"aɢc?ٞiSSSupY-C%2K%ԵKQVC%p0jib\- Rȫihj m`[7hZasƌh0L2dɊMDe_ۯ/[ kkmmmpXM&&yQɍMyݔ[nɩ&Li&Lɩjjjjo\4\-ÅĶ\,Mio2j71p[XbœSM2dɓSy57MMMNsɓMML<+UQ;^zecJTJC}{{8vnICdh<[GM?W E`dwn|y?D0yͩCx !+8 uZ}/^4>Q/!?.B~Κ|eNU_1ĸTѺ$}mpN@p}A͹T4B?9P]׈Ѫavwgףo:j|N;\g׶n6'9Q!O}u,|#O#v_YeQuEX ǢBN " .vB0ɋ޾(T I/NKjP}aO֥'Rae}]|檹>%WAjptbcUWh+EUʪNpI\LJ֪N̥ڶUE#݇|/|^wEaeL>ƙLX^~OH=!*2u}h"z)Wq*2ҜIm ~Z%O>7KRM+!ܰ89)2 )e2 8ؚL~{#e#ԫ)R0%pfqj LiWԞjN b54#KQe2-pW2|Bu_mxd $vԭ(;/x^,U9/Ew/ƧxPc!+Ԭ/MdRչj>J2Xp_uFS}-/Ұ\s槞z3 Y1XLadX &PdRQ_;ꇾTtgnT#z ĞOk{n3oWFk3 F=+InҠ,7j؍Q_Ap{OE77LR4IG,2bLU(?oAZ.&t]$Nzrs4 .L*=^};F.z{%B xBZ#g n KWY:?Q~jT/$ˆEQbUY5B֋E/J~s*ɨuU:] wP (W }m^pƧe%2_mdcǫ<7rѿOj5i"Bm> [UIxbDDw垨=t;p},骏H}>wz2 >p $+ϊlwŲy_2@kl^p `l!Yϸ"]ml9'ryU㫕&~1>I5n4-dcvfՀx>kQ",44` h5L5EjkFϔӄĩ5R b+de?# atrR [~e?~u;-\P$D# ޡ9.d#+h2K)OXxKG_ۥX/⇵$MOQKF?i Sy-& 1VAH0A3dZ6R=yw>a.s= /a>qioJ.HKWxjmw_KD? Tr"W%\\.a0id܋&ʺJvMM84L٩/bR|i^vb.ȇ')*d^YN@ *2D2}^_=ۓo<, -˰qSJ515f&WA$EߛQhJ~ܷ朋#< %jk{Ј'ky󪪸)`bOGF=3 . PȗWU`W׹¾WُJ+SB(~x ce2«c+$ ʐ**_"?SLIJjXbܷŰX-ţ/+ʔ= ·kԸ}$'MѽIi.SwNC=oITGꣽ\)=BM{ц'_8.4⚚\V⸭qZ%ĸȃ zU:6 zz'Eȱccb1ۻ<'ha{Jsom6~2QQª]HL,G3Jjdŋ,a1LRS- ۣ>NS"0`%I eA붟u HBA*~^m0 ʘ?"6 K-IV E4]P7}_%Rb+W9'FFF#X}Os"i$b0ٲ) }fOr<ބf^^2܄L]pp dTCgCǕaL3 %{ E1w^{uNIʘN)d`ؐ&岩CKO)D]SEWL>A?Ea<Gf;iy\*wy'bXyçW4L*K ę [BŪpF1[ib²2Yj 2-JbKjfT&!lō!ZɂZű6IaV"XR,P©-LBxbI4I ܐӀYcg\yuߴuJu0i"S *0W ]4gR/)%G+ѪVI}tZSʜ$*6:އ\$A0!غ\ʇ>z.]܂<|J%Ɩ+eIP9[XTZ堍5xs'xn]G ԵA0m(ᜒmA?lC0TBϝ{"_s‹by%k_~_Jz\ /y< _rGvSɮ'@Gw0/3)z:"hآFn/~w8g)?x&4XOlheMo~ rQOsM)[-QmUn-5dL۴gx ~Sjqkb|/{~/X=ʿF,,Uwb}񾔧J.;_a{s8@_0H$[H#O1GdDmC w"P\/!~ -s2`UP)r8D(mLa `30cQJ 3M&?)ݦPKa]X12Z&4pM&U_ ݪ1heu MeI`#0Mp`ѪiO#aOpM~N1:)4Y!eX5GrZZKZS pA꿿gyrg~jt |WW̑,RayEa)9V D,K e_qQ_?؋LsqH[e7TмTPARӱ1? S)+IYLRT|$v W}G?b[])ӹ; LN"laN B56~7b)ߧUs rrE_O}ZK^ /Q/B)᳼S֝`mPABSbw=C2izlMZxU~'S%IjIڇ$[ M&$;^Iw7R>|R|LքV`/sjV+sqCaFAd0Tw5F6=DEk =R:֕u3(2~^1e=u&I'%T=(1ሼC苊Q8"7`?w>*LkΡO8_o4;*4=ͯ>|6À,*8a99Ch3V>Km7 TDMACm4!HR)K 2;*Ȏ+p}40uNɪ4- dG.38HDE |ިH ߦ]sOw4ԫi6ti|1<.t9Rt:uM۸qGZSJ9l[i!vvηZ見E;_^9)A=x(1?S?GU1@DXl[Gt>zsJP02cLI By-푂w E)={WHEp^Ե貂"ˈYUaNMUh#Eqyd5)4SfZc.eœ&L&,Y2dɋ ,1bɓ,2dɓ&M U(S%Vk 7ӸJTK뿪(wc3KŋDbhW-o52dzm501IFxeL>0ݕ %R(^Uľ j§WOJ7T ʨpz*v/)^E dɼٺ&V&CEzZw=Qw[ʞu,1ƍ Էq/ò<ԟWb?:-9-. %:1mQ MLx֐T?vUL&'<%ԮU.\~^%(ꌦ, LҢhZ/@0;%MFc7kDXʊ`֑Β{i{;Mv O 8$OMo~ahL+T])-!ʵ4hLF+ɋE5 -TഡiLDqL,. lM&CEdX*1LG m Zoߏ/{Ȇ"À,x1MLďdD`%а 1M6l)4T1nd,VO*d/QUO8*YT( rE$g3|IWxoOC8*E r 90YB`0IAYO'4s:b+%VVHy;ww*ЫEhb)nSRQȊح5I`#FXbUr/Xw.rb:̐oUSX9:^L9\mΈM1Իg #\S52dmbŅeŅ7I0XF*LZF 6XF,/nܮ8~A6SrCj7qn⼩eCFG}(µT$ʅbRS줣]]oGdseL+"S Tzw;i:%Q`# =.t)4=v$WxNdsYpFh6Q^ʝĶvF7"S]RjjI IZ}Uϫ/XՖ"ݷSIe5O9THm*NvkW>}W4C/NH)Lݎ/wJ`“eNz_zmڑC|r(l26} 0*(>UoTmEu_NQ޿l͗9k @cקMgR+ $LWk Wh:tp>>3N,~!5!ҘIG)Obdŋ,L/&MLKPŨvSS&Ldɓ&LbW(KqbTŐ%=ZS&'֧~bL)lU]y[/,QwQxUxL[,F&:}M;U , ̥?q|cY/ՕDJOh{< 7)œ~be󵦯ڬl}<RPjZ#ʥ#VRŅ .BI!%j.a~m- a<ąSrLN(z(5S|:-{BĤ,@?$2m`7̯Xp*ܢkS~^o"oV[d].xcP LA' ]}{pR '~,kC Xa >Ȅ~R-u0$2BK,S(T}gѣv^vz<(=5=l d]`B[v=0$uy" G~ZDNx)=O% aSFs ّMT<ne4Et9-@Ry@jUosۛ{fbv`c b~|zKKtnŢ͡ b e4dΙCPQ52d vXI?!BrLrM2NB"bF-<=wZ̝[?y'Py];yI/tS;Iʼ* e094⿥wޛ riqdb͖)_ "HYC  M{qf"45PJUK 8X W qoobMz,ߛ^(5Dpm3>K9z qBwl6>UݕgA>Dv("e%xr9˯CGisu&zf \yT7W 4s$s!/~[hcB+yлE Wamh-t' `Zs2ҫK?f{p\%i_zƠU`z{iWt~^K/twA|Ihi7oXF'БS 6lo\!{g\C/Cf8c/l]+@:O}:N<[gKe7J{/N]J8R-RZI$E;zz31<nNr*;/t˳$|%cw)BYK`g✺6>i)XkLA"bnK< = 7Py]U_',y p/ 6l ƀ[ Eov ')AbI^#3yd(zjߕu?W4cM iqmx9)O49W~Wߖр"RjaǫfQ`J`40 -(Nί`5 Pys^WXrc5ϖ 4iBj[rr_n8 O/as-lbgdaJ:!q]y ]% 4y}`?=Tq~ >d,T5@kqȦflў=G`s`3L88@D/aO0r u:6]r.Vvh?StӦĻ yەLdjU=Y1 Q熑G q,<,[3ӜO8?LyJC;0z-Wxj7 yu2gFo=AP;8vWh /^^ lO|;9#RGhv_,\X0 `f74:[@C- 7{]|8a!prjo-\޳OVIY]>kblB2#ҡ\ahh\۫ 7sVt{vj;#DXɰMS}cZaG(Yg_߻Ҳ|;k؍y}`6G e<ȢAL)As4r؂Cgz%~lz:$kou[2RR ::48.ͻ1U& ԛ̌i][ԋ8%$:]?>zƖGc>(&5~gFGv:>j>Gʇ+cP6c{v?-c{LNsPkUtp4_"? }-k'HSN& GmI1au\^_yʒ,(gGH I!}W&3/&ėtB I<+}Py;}+t+O].Jq~~Up) ̿ Oi`;ӆi#DF~º,KHzP{5Q)=8$HFE%Z@c}2rhfϴ-nQĹ%G> tpt<8(uEjU[&ypG^ޤ7M⨐H#crUνFYvȑm1jGșK|Ɔy &+2:#Ĉ6?dZ5G%u6I#ERz?]QT7&I\TT<+]N7eWR;!.tS;m;2_nH;fGۜ{k6L.Ā*= Q.u]kt_WWgr Eo>Hv|KH`0&ephQR=\@DݥW]a䯺nW(=T0Tg)ћ= m<2&~t@אHK n|xi=U|;6}[(U.7>ʹ4HI:mG]ᶈ|^s=âFc%L)%w4ǣUVm=VĻwQOyf [aZW{oD-gI[%nGՉk,n,ZJ InHѯM&&Lﶎ/Aڠ| aUOfeG~z/rKTniʾgrOjX.wAQH>_CM_?- %*\ˡ}R:~+&=UK"47<<Rds'vlz6XL?BWy-2Kᱚħ3a=Qݺ1-؞`bOv}E\d80XY/jp2ߋQl 12W,kX՜1l)'_^[ޢSy 0:\~G 09zV mvGmzۻh~nhsU'8'-}G%4B Ҵ1@hXU\4,'}0mnD63ɬxם*UOR4R`݇ځD3sR5g&mOqQ^ D DMъ ΏO ϖQ dRحbJ*?_iڋ2s)y;G!*``8HrʈDs~IЮPzai?ョJog5]k:XDE` ,:;iD} uce7~7jOY_GC?FTO X4M;D1{`QdZΊW WxCdZ !㟾r23%[ \=u@ M)z:(a_i%s]x%j7fru/CsAcZ  !P>Ż΄Us{+Ƕf̡xBg+&w8 )Bg4iiVEl9ʙ^AQfLn~WOCb{7c]U,ï3{_@c0ȬYww%3FnP RT';;(:wFDZAYb>9Ng'7]ɧChXIkuӶj9:UO+:(ы -uYiIY-=oA߬YN7vq4wʘN;C Ru2Pn>=լv5($TTƩ} KуFiE~IJfzD+͟H6-bic4-sSkchXkvBhQeUQ]Ӣl|ֻ}>q՜և!g1w3; wn;]+98jQ0rr"(;Gq>_n~S]N>?GǶf`]cal :4.^)'ws3\1 MAv{>p]P:Yծ%Q6SGCPڼAPTw6 g·7`cZR!t~F`f9j? ǛCNS~~/imr*p9+$`JGx}*?o氿M֔&FZL<D΄^ KJOn0pj>=(]YSFzK 7 C`A?1@POyK'ߪ ?!oAoә+I}{ĤQ\ FkD7 KPȐkNԠ1+ 8r/RKl4~+YLq{e7ZO[O¯OTM-"[dѶΒ]zgyUo6SB^>hGxeg]-|<n:.q!nUÔç0_x~ $?J2=Y=#ȵ2jisDcm;|;޳Tdw'cwbxh"Tkx| PzU ??bPz)z;7,1-P -)t4=_% ԟɀkqLr3Dhs­Zj%I=7 'M@3p? {[GI4٧vͪnQP_=wՐRYWF=~F92`E+G째6ϕRA;4''Ȋh2, 4,5}^2[Y/4#z&¢xWFj ~̌Bq}jw )[7 Mf.xe8ʠT՟=/6"i5AL*"1jO?毡ЏܱJޙ+c(ۃg][5gD U=ce KP؀RTXDo~Lialń5$N@7PTFܛ c_i^i²yw7̗\qW9߾lri 5}mLP1.mAkTWmm7lv:g `/XjC9x\W>җcWbSvHm(`ϛ;L"eh\Lcf0=}~zzX8.\S RVrO~ jHΉSMǯf;ӎjs- Y~ Wyo6ҹvaKzI|[COӽSEu9;&6?)Ly,'^7;bmϯX0CCxNBͧ=އ]T3;vΕ'F+=B|峸Uhi7o5`FKe^ob% I^׻'XM|$ueX.*Q]hJcH&G1={ _Z*zs^O2es{dQ֥fﻐ#ǖY,|QWPmyO2O䀐z'7:'4c=޹n.Qkn&"[~]4I%Ӽ!q8~l!rJ ąmqO}&Rjrv_N7sj2m%q3ŕMX[MPtqUxLd©yןj_Qhf`,\ wlI4&˥-oc[Ԃ>ZUn4rCܻ%YA>Df:; =O%NuOn8=|8DFbT˼ $!_hذR23 AX*ɑ籞tA „0vgdpS{$q;)d!ẊҊhEra;YLFI "#fisK Z?ڒAU퇆ZRjZ~_론;lݘw{' RY4r2=dD/cQ(w*ϲ곾=3̜*SAߖqT)#%]8c럖[76?2 u~WӦEZKXi636sLw#szj-{R%;*y\ug]g6_јB*AB5IIdL/Am腿q=:cmfG&=tSVrgLSxnd@qÊ;Ӎ{ +=lԧ+ֹ ѡ\@ %|m!/ev$S%FFȷJl3^@ |#h]9\ @*r94K>tQ| W塲\#:6fxݚ0ZN"L>Nw{j*3s}?.#ٯ~s׻ER@~v{φINǑCنHf$J/ hXIQ&e{X).jq|#piֈ1Q"9*~Q~$Fx DA'&q)z1/o H׹wZ-)$"0=0zUbc ]6px=ʇdmQg`.% G򥧅q`@5ꁒ]e kN^Mߘ 4dfH<͎qBLx%*uXz64d-|ёB<KSްy#rkJԨ49*W/0Tb]T4|mW5#o?9 ֖c /XD++[&܅J@{Lx7e zEJl a9xgOrXm #A!zs3f@|sty@h1KUn}aJ2uهDf&*X}OLT՛kE-O")1LZBbpƒd*T(^e"C r2cLsT; ip)W" +=DX;ڵ2Q6 o>gM3ʃ ,xKBg(WB0d8v,twɗؤ]7z {ojּ&`xϼ@bRq|_\B:g;.#J`JrȌ2>Pi%/z䘗w_ ϐ??0{Ù]nԱ9`AtruتaJ9aXՒ.MnG`J}Mh٧GuP B< >^N+NJ4NFn&gy(}:42MoR |d=kI![d#w} 9AhD1>wTU{;a(N|Z&t6i:~H˼ߒ + ?JEyri8֨w{+3X@ z {dކ{l/dFޏO/I8eeZt*N'VųB9sErLy"jsn Rxtl(w![FӕK/ g\Ȕ'N|^=RJT{gC7|РԯAkR}3KH9#$Cއayl\.ϺTK'`}q:,I~QNט(T٨TCDq84Eҧ\GҴs(5GG Vpsa ik.Pr'4Lp gz: 58uܥUE4a=]1M}Uxv%FJf3BV cUT7>E!RNGAd3pmtxq> $qڒ jap7"q 4z|F R@(a8)0ڙ'C!"GꄝÎd4> )*#)hi`B[w6\:w;kCg"zT~$>t?jK;b~T(uC;63)S~NXӊo+ʼn3,Y-<C17<FmTbZ럘z̥^VDislo"qp07Mqb}6`e!@W-W!7|o;;x  Jqjn[{xލ$jtjo=<16~_-3|4g/ GUXDR(eVzd`G[eEtKV\`8s| A%^68l< `v< ?4?;wTT LjVǥC|3|@C'RE*jW+F[>߫9gw$ը^) pn5Xhז%]I_Oj 2=G]Ά3V`N^7ɑ;q ̟WB|Sh8EAp3يl2v;E4 =||=IciOzw/08޻;ϊHa> %qCWѳy ]= xĿ>*x#!=yA\N.fJ΄`Qb !8hNqs)[|p-RQ{G,{_9/<׿|Axzgp"{EP g]г Ւ7uQ *Ot#KDw'󙩁<wjsbu1IRqQr(.f\j6夾e]I^\pn.oo][$ӌ# RP؄mv l0o)hxUb|or4,Bz[y1Sֆ_=ٗվ~slgOG+FnG[D*fm"=- ~Q- ?>!ֺ;õ/F_oj3']b116ޝRɮ:Fd_cPd5(_êb7 ;W> ;nG(vƎUN׼g")7±?E,RLDJPr .=MA8Z:{.ce"xwOPex#M+s]cѥq ~m>@$6I/( 惼GؽZZgu{jG̛6KtZEvfa8%r31@O,U*u6Ѥȶ ie!8pqii ;C($䖗`$w#HFLj_mmkWm7$L.k/+EqpҀ1jz,ĻI>;sɵ;Ϊ(3Wodsn3a~ r琮;x Im(pMFN$~2]̆Ovj}P0ǽgw`T哓n4N8D|Yr[?=t 'f:>]+<~j{*gC?%Dךvv};!L<[]xuu&!Y1&6YN <$OiKD(}}ͩD⏱}?kGe=ŨxSsc:NK:_q@mJ1[3Y="t lWS~%7R{-`m,}ν8Kv> tzD(:Z3+(C%O)T5!HcU[>籰n&]ܬ} 5EY,xH#mB(vZVæeMc$hPڱQY~4O"YZr6{A74q<V펢bVȬ88p!tɚ-.Jwxolt(偠(} WA򾇡/Q3|~r.quc9+GS@C34WK&=biV"Dfѿ0!3n}MGst>i~`$(*hN4iZAf4<;_ޅ<"/2@huHү tpCkr+50htn㇀+E$>,d$/K5o(?u"`exL+h8v\w=Raz*|Yx"j?w6{\ o[ sK cFuZf4\h*t<Ϸt=zA6^!ThOknS )ɍ䳈_@@Fň`quʥ(A!_]͜˫K)w,b b~?kq٠#ƺ!xzx2}ț|Y|`AEF-%LULJo?FVt + VE!UM{FX^Nh?JU%yDYt aTw8Ht,(IY3)O3z8|LD*_P%8#nY ZNbܺ4l{~Ԡ3{{Z喯tu{aZk0c Xl7@-ÈHGYA Xq3* 2g/K%M킭T][L7E-7B`鹳AWrxh~Bw [4g7!&-g2QeS^9BH?fHBHˍ4bOu͈.0*:+$7V<[TMV34&H؉^ ͇ĐZL=~1]Ռ}ŷ7W{pRT2CH_َ8U!+ ݥV% BxJQGD#,W$/~AH'jPԎR@a8aioK+!Ӡ?GTGG4~yjڮ'0 W~ tqv#_(~5/UI]>NX5@V)7!#SWDL2e:0I8'A_W0[KB+H!KUR)BIEP0ȂAwMvOo6e.9!z ]V2fFȕa"  Q6I=0w=y2H3'KĂ%|'0*k<SQ" bU~+$#nK#Iҳ1*nxd,Q[3<"BG0nI$RtR" D)wTLE*A!$X;;uu?,EV|fʷwV9?- x050LK_i5,Ҧl ?/"mY֯>rϩP BYm^I`AcŐ|CIr 'ʿj''#>9G?t`t~/d.^U&UÍ|c|`]%Z7`AJIo/s$#զ[%9ooz-%T$64F/A66n *͙ d3Q24_e-55jZ? m%ˀ]?矛%ٖ^$?k lDmb%)8"$jKoĜ`NsXe}fo12E\ {SK;&K#s7-IGȰ+{. BUf.rt^2`Hs?M10=NgY:WBwYvd 5x)0U9uɓ1+\5W VV Ke.H9x:}_Yg,V=dXܷp 'lO% )qe5I' o>:23@ P P P P*P(P@(UJ$AT( (Q@T($([D IP243IɦSd2 =zCF` !=@2GD  2ɓ@шhi!ѣL42da A F @4d4&MF!LLFF2ɑ1F&@ &Mh@ѐ 4 12 4C&F04`4)RMzҧmLjfGOd PhښM @Mad@*z0)?F)6(='OR~C@,?*n*J*1H:D˦ɕe6a2/75|m7ܫ&SFUL)v?î;UsF8V0VNfVi#<bzjo3S([b?&**mK+M"w;=wyv2yKی &rЬ0b}4cJ%EDXP"0|>eG]cwݧU.Bp<3;4)eJGr9(n8 80鯥]M͔iJ dܒG'luG !ttcH\'_N\Mk2FO"vNXެ-XXau/{㝚ٖ_ԍ'yXr|\OwM50ZbI SGVT+s|&_N%&~g 9˓Z~F$寝hsrNY> xMg|Mj{+}ʝ/|1'TaETR>8373vFrHm0l^Srn<gefofdɶ`Ջaj7Y-嚻SQeXc˗&wc8sSfU_Nl&0cYZ5eH@C̓Ҫ蚒$'3M{*tM6be)5eqz~ Cς} $@&j>$<ߤI8p9X+Y[&bDL$Ejip4n`I!BI(CQU`"{c^=Prc0x{8zXW>k|6H O|n+ˈpe 6_9 ~4t3l{^~Vsvz}>L\Œmf Hap7w$B4I*kRRő)QaV*,ՒOr22ԒRHgV];C{dێ& LQmYll5Lď,ID@H>XV0RPDq"8H"$4XENk!ETJ$NQ,D*TT["–BU$%D J!eZ*D-Hʒt=Hē$R+dT*PAO/Ypblѣ J$a1[U*[e9%$LJ) Id*e-I adJY EBT*,$*,ITBJL*SRI1s`E(je(%9b4@Dd$[ *R"DD$8wC1ih'$ NM*Tن0ae!J 2UX;L%WV͘4H*UL)L1шry3qwӱrO+/XJT$ɔU*X6h(e*CMЬ*L *111RIXURER%*ʑE*G5*iU(T`*IZY,[K[h*UOLɹM040*-**MC$* ,*E *Hʥn20121[ 4ɧ0kQ2`JM*&*d敉$JISe UY-2a)0«rl bE*)eVY!U*eVFLwBvW*~lGSLv+a[4e,9eUx7v* 84 ;[6Ua_Yi?OaVqi[dɆw^ 56ݪoDq:Ob;*wRm!/q[)0pe{h֐I><8z*(bvNϳX40 <zq) Gq^)֎8Ȍ$I%)4iRCTvya'6nnUuSgQUEQwzL*US{wa{#fݾCLJNN&pax/y'rne28&Dx WW=SJwaw+ 0<ll+f^H{&L9yeԜv{!:ǭm;\hZz~u[jo+Ab'*HB{^ЕSeVR|^BIGqAJOI$<;I:$z\q6V^Eei[0faX?;{M%OB0LL*F L ~ QSs>',wy\ YeC2~>VyUŖdeiL6UaeiUUM4,WMi[++D-aR)`lQYeUYqhʛ|2*R:42s3 jPnnTTi'Y #bJUQJ(yNr'vJ %)J*T)JU**Y*)I'q0`#XB*TU*U0eTTUXK %UTbT)K"TR¬Q_>%g2|LSR7Ṗ?gȟ$xqJt(YuիAUUU `JS*«JX0'-*LM!MaY+ Nvvq[zGt RW`n>8ISFMR*i X2dⓋ `H<7/k DvJTzS{\WLF͞Au42,=,â<iZMtїߎ1JauDlʫ,=&I­WraQ klaM:i$NRWL4V4Nz`CܮWbj$waZaeZqC)]&͑4l`rvpreY2a,ݳpt:]&pޘi%rtr{cl'g"v*=oMHc2vpy쟍?S R07w܌D&X{Y:y[Q#Џ[驆ެ50jmRzM21( L ((TaL(+>Dhn$5I6i0p~FNgD`A%DF'76 > ITDJ*,0iGY*X&pC)%!*I"*U&C$[ʽRN(dvc&)%Y%YA^qHp"*J6UU`&wdXEN .6vWP*I9BKMJaIXnG7G'bII*}r:+N[tn9U[0nVцI4~Gw9l<^^s$Q,z (RdIuMi"R8HHLXAuMU-Mqg[EQ`kG0Bt)99GlY?C8Ȫ0saH;ԙO6)P~z^>ER0Ok1S;![|u:Lq鍝va#{:QH‘VaNe^dp O&S"nrVGR9#v ٲXvc6WSV4w*HSe]L%v0vRFw 8&[7py;:7Z6aPw#'D:֦ʪV)nv{7eӱWr+*slٳwn,6 ͦ*TRTL1aab۱^ L6WUe9؜'''<>WA)0~HwI*HbQx "LT<_qU-:060}ݤ~BT$H0QR "NxYV6g#)fXɒ|EhVN0dhɔ0<͘V:}3'!_N>Qwͳlbʱq x8.˙s]F=DÃ{!#,>SB)%ޥw[#cD|#$7zIҕ|q>U~O>zt|'O[Ҟ (),+J>gĒ:!^ZVVhЩ͔,+ee4 ҏblh|&FY&LʥUI)2eRɔ2J%Jȟ{bu!M׉c#SN%;M)ijh|QEF$()0ʥUUJ 1f$؃,+04H '}\qH+v@S?fv=ЏK _F]q=E<;\k|tW0G ǩ4><}.*}u~ɨ|ϳЎRumc\+*ܓ-=6=)i$櫒Qy M#(x?ƟKdª|FV͓ V66aFу*ɓYe44KCfɳOLuO֟UER3 *@IM fDHJ~w6`ReQ$pUUUUUQ $&a%&D(F0aX)J00BI}R(*JU%IE)R"RRTRJJQ0UR*QURT#T0U)*JRJJ$JFd''s`aU>E)UUUUUU}MSreYel?3Ha$0%w BQ?H}_|0?+M$ "e.$ H$0#D4Nd/ce[%V`6lݖ aXevl[#al+M2٤Vi0c,i0ݦ͘n݆df͙n&Tڝi򊒪z~(1ÁVt^ml}GRFƝ[S=ci<>I6%% )US6Jv n20TQY0`dUVaUUY&UUUL*0 2CDĄdsh2S3; PJRQTQ*I22K2Zɾ[&nZ&S<{/SDVnF+IUVSHʪr"qdMdL%%b5F"p"B$4D%'ch|4BIt{^iZ{{a0Z+ Yaĭae[#sWqWnËZEl-0)N&\ cs G 0wッ,86l˚*R*U%R )(Q$PUUO"U%P*U*)UTC )T(6D 6DVo*pK!SD!8;AXzOKnO Q4ydTY$#=""<Dޟky>(:BI6I"r$,IR81$%"yNl &7!& \8&JO$H` UV*UUUTGҒM ZB:ЍiV:eu7|x[e\ JVsM $hUI) BG:9 HMMpc}Iu!zT``;쪶 ݻ )=I $L2)Nx>' }jUUQA*SWaU1q=F}:Ӈf. E0X:*Uf0OR%VR2K#,J*0t@fI';=Jv'NI%'=B[ $\߉nN8%=VV՟wIUR(h Wi2MQ' ~O_@zg2$ed'I׺ylOAH&TIu2D{2SBטb`U*JRTf,X0co77u*l`2Q0URIbM;؟U$)~ jG/3tl&hm? G#s*IχksGSIP$8 ڪGtї 7|Z,*d!'|DD܉Hu4"m2z# 1|ȑz!n6OC7T2e=nopCN%"7N!yP)0x89pNX0Lক49xh(ՇTUDSraW4eUT0]*fl䜚dK+UD0`a"a'&0^0~.v >II;`ppRU't2M%@Ą읎Be$RM#g0&C$$MնXVЙO~ vrl4^ؔlI0ʼn:V @yg1%&dD!,0wj۩G7(s+3ivCXW!AIևUY#ʰh®yYE\1`moRYJ! 2lu=gS I$;FID$DH $X(FUuXj^*)]0lQU8o(3{l{̻F'/Q|L||e?$~)%RRRJI2y#yxD4v:&Ga0*00RL TE& TTSbNQ~~?Eu `i>=ﭖZ&iai)R ))FI40*L*0` i!TL #*Zi`M*(~~M2FYeZe0VVZeʪ,0Maai0LLіYieiL)ah* 4ViauW OU.;]u G{r}dGOƇg3 0L0v=䃂 $% \GS$&I\S.}4LHŝc#bGt>{$pRwy%ܠxj=.!"THG!肮3XIXYhm㊞R5a`RT'RI'H;W]X$1"avaWچ #祃6Y~$Bl*x2: O$YYȩe]MN;wR1ϜMGDF$:S HΔD%#ySUbr7X0줒y$&L ?#HaH}!ZqDI/:~ UF a$%D0·4LBĭ1"4gBd9\,sq:A#T)* IԜv8N laI<hvjt[VrfOyGjN*+IJC/;&];lO9;ߝIBP*%POh2Uuo'ܦ obXm2ۆ62 <3v"=JT&2|fNL0To JG&I &U"EmQ4Iؐ2% WcI'7C9̈c%ql#Chvb^Y6wyzؒȑ2ʫ-Ä,,"N̄LI1'A⒓FVU-5Ȧ.2!ąHCLI$I#,e&#Lđ)RQ"JeQ R**!HBHi XfD HM|o#S$Qi]DOħy>p~8|bo`F>G=p ,7o-J`¿V'sזz˧)  s89sϛ# hFVn'R{\YV҅\JCxS 2(4͢LQF``L0FN2T&$08KURUUUUUUUdARR'jrwR|X'>uU}+i#p#bWI:I=KO"TpIwU*q$:sP̥IAq|\>ֱ XTvMW(|:Mw'|O6 ')\UN3C-X˜bSU鴍fFETNL `X.Kb*\*HTU?]TIJld#0OD><2z4x4N)y:rHS! O8'1dtӹfai*Xl$FyR+69L$إu6n$sn)O ޒ| _Y $R"E't8:{NIf}و0a1k-!X`0) IGR1&.2 7lR+!$UTपP` M6H|rjs\& 6GrUQQ2S,QitaJIM02e.'3 Mʍ*Mts&%N\Q!77Cc#Ldf:&hP(CB"%FI80a9&$RN+"!퇞H≲IcCR-:Q+WBEZj`mtFt*\TЄd&y8y9HUTXO"tBQBSCI u"5DK#uO*G$/̪|d}|ް$OA_*Iе+8*FD$XN2'Gc1Շ G I;$QvOyd|[35(YE') { ꪯVS 40d3JmITYv0q•>74'mN`6nNu :=EثUŵct#|(I'7D1禐 UG&< a#7d%7(+"JP\|ݗSȫ\}#'$bew?tDAgg缜L=43^!dtgEi:{S]z=+_C\ņl|]ecd R&R` 8@baЌBi|dQ3ILUr LBuLW>xo^}b)„`brms/R/update.R0000644000176200001440000003267414433613565013051 0ustar liggesusers#' Update \pkg{brms} models #' #' This method allows to update an existing \code{brmsfit} object. #' #' @param object An object of class \code{brmsfit}. #' @param formula. Changes to the formula; for details see #' \code{\link{update.formula}} and \code{\link{brmsformula}}. #' @param newdata Optional \code{data.frame} to update the model with new data. #' Data-dependent default priors will not be updated automatically. #' @param recompile Logical, indicating whether the Stan model should #' be recompiled. If \code{NULL} (the default), \code{update} tries #' to figure out internally, if recompilation is necessary. #' Setting it to \code{FALSE} will cause all Stan code changing #' arguments to be ignored. #' @param ... Other arguments passed to \code{\link{brm}}. #' #' @details When updating a \code{brmsfit} created with the \pkg{cmdstanr} #' backend in a different \R session, a recompilation will be triggered #' because by default, \pkg{cmdstanr} writes the model executable to a #' temporary directory. To avoid that, set option #' \code{"cmdstanr_write_stan_file_dir"} to a nontemporary path of your choice #' before creating the original \code{brmsfit} (see section 'Examples' below). #' #' @examples #' \dontrun{ #' fit1 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), #' data = kidney, family = gaussian("log")) #' summary(fit1) #' #' ## remove effects of 'disease' #' fit2 <- update(fit1, formula. = ~ . - disease) #' summary(fit2) #' #' ## remove the group specific term of 'patient' and #' ## change the data (just take a subset in this example) #' fit3 <- update(fit1, formula. = ~ . - (1|patient), #' newdata = kidney[1:38, ]) #' summary(fit3) #' #' ## use another family and add population-level priors #' fit4 <- update(fit1, family = weibull(), init = "0", #' prior = set_prior("normal(0,5)")) #' summary(fit4) #' #' ## to avoid a recompilation when updating a 'cmdstanr'-backend fit in a fresh #' ## R session, set option 'cmdstanr_write_stan_file_dir' before creating the #' ## initial 'brmsfit' #' ## CAUTION: the following code creates some files in the current working #' ## directory: two 'model_.stan' files, one 'model_(.exe)' #' ## executable, and one 'fit_cmdstanr_.rds' file #' set.seed(7) #' fname <- paste0("fit_cmdstanr_", sample.int(.Machine$integer.max, 1)) #' options(cmdstanr_write_stan_file_dir = getwd()) #' fit_cmdstanr <- brm(rate ~ conc + state, #' data = Puromycin, #' backend = "cmdstanr", #' file = fname) #' # now restart the R session and run the following (after attaching 'brms') #' set.seed(7) #' fname <- paste0("fit_cmdstanr_", sample.int(.Machine$integer.max, 1)) #' fit_cmdstanr <- brm(rate ~ conc + state, #' data = Puromycin, #' backend = "cmdstanr", #' file = fname) #' upd_cmdstanr <- update(fit_cmdstanr, #' formula. = rate ~ conc) #' } #' #' @export update.brmsfit <- function(object, formula., newdata = NULL, recompile = NULL, ...) { dots <- list(...) testmode <- isTRUE(dots[["testmode"]]) dots$testmode <- NULL if ("silent" %in% names(dots)) { dots$silent <- validate_silent(dots$silent) } else { dots$silent <- object$stan_args$silent %||% 1L } silent <- dots$silent object <- restructure(object) if (isTRUE(object$version$brms < "2.0.0")) { warning2("Updating models fitted with older versions of brms may fail.") } object$file <- NULL if ("data" %in% names(dots)) { # otherwise the data name cannot be found by substitute stop2("Please use argument 'newdata' to update the data.") } if (!is.null(newdata)) { dots$data <- newdata data_name <- substitute_name(newdata) } else { dots$data <- object$data data_name <- get_data_name(object$data) } if (missing(formula.) || is.null(formula.)) { dots$formula <- object$formula if (!is.null(dots[["family"]])) { dots$formula <- bf(dots$formula, family = dots$family) } if (!is.null(dots[["autocor"]])) { dots$formula <- bf(dots$formula, autocor = dots$autocor) } } else { # TODO: restructure updating of the model formula if (is.mvbrmsformula(formula.) || is.mvbrmsformula(object$formula)) { stop2("Updating formulas of multivariate models is not yet possible.") } if (is.brmsformula(formula.)) { nl <- get_nl(formula.) } else { formula. <- as.formula(formula.) nl <- get_nl(formula(object)) } family <- get_arg("family", formula., dots, object) autocor <- get_arg("autocor", formula., dots, object) dots$formula <- bf(formula., family = family, autocor = autocor, nl = nl) if (is_nonlinear(object)) { if (length(setdiff(all.vars(dots$formula$formula), ".")) == 0L) { dots$formula <- update(object$formula, dots$formula, mode = "keep") } else { dots$formula <- update(object$formula, dots$formula, mode = "replace") if (silent < 2) { message("Argument 'formula.' will completely replace the ", "original formula in non-linear models.") } } } else { mvars <- all.vars(dots$formula$formula) mvars <- setdiff(mvars, c(names(object$data), ".")) if (length(mvars) && is.null(newdata)) { stop2("New variables found: ", collapse_comma(mvars), "\nPlease supply your data again via argument 'newdata'.") } dots$formula <- update(formula(object), dots$formula) } } # update response categories and ordinal thresholds dots$formula <- validate_formula(dots$formula, data = dots$data) if (is.null(dots$prior)) { dots$prior <- object$prior } else { if (!is.brmsprior(dots$prior)) { stop2("Argument 'prior' needs to be a 'brmsprior' object.") } # update existing priors manually and keep only user-specified ones # default priors are recomputed base on newdata if provided old_user_prior <- subset2(object$prior, source = "user") dots$prior <- rbind(dots$prior, old_user_prior) dupl_priors <- duplicated(dots$prior[, rcols_prior()]) dots$prior <- dots$prior[!dupl_priors, ] } # make sure potentially updated priors pass 'validate_prior' attr(dots$prior, "allow_invalid_prior") <- TRUE if (!"sample_prior" %in% names(dots)) { dots$sample_prior <- attr(object$prior, "sample_prior") if (is.null(dots$sample_prior)) { has_prior_pars <- any(grepl("^prior_", variables(object))) dots$sample_prior <- if (has_prior_pars) "yes" else "no" } } # do not use 'is.null' to allow updating arguments to NULL if (!"data2" %in% names(dots)) { dots$data2 <- object$data2 } if (!"stanvars" %in% names(dots)) { dots$stanvars <- object$stanvars } if (!"algorithm" %in% names(dots)) { dots$algorithm <- object$algorithm } if (!"backend" %in% names(dots)) { dots$backend <- object$backend } if (!"threads" %in% names(dots)) { dots$threads <- object$threads } if (!"save_pars" %in% names(dots)) { dots$save_pars <- object$save_pars } if (!"knots" %in% names(dots)) { dots$knots <- get_knots(object$data) } if (!"drop_unused_levels" %in% names(dots)) { dots$drop_unused_levels <- get_drop_unused_levels(object$data) } if (!"normalize" %in% names(dots)) { dots$normalize <- is_normalized(object$model) } # update arguments controlling the sampling process if (is.null(dots$iter)) { # only keep old 'warmup' if also keeping old 'iter' dots$warmup <- first_not_null(dots$warmup, object$fit@sim$warmup) } dots$iter <- first_not_null(dots$iter, object$fit@sim$iter) dots$chains <- first_not_null(dots$chains, object$fit@sim$chains) dots$thin <- first_not_null(dots$thin, object$fit@sim$thin) dots$backend <- match.arg(dots$backend, backend_choices()) same_backend <- is_equal(dots$backend, object$backend) if (same_backend) { # reusing control arguments in other backends may cause errors #1259 control <- attr(object$fit@sim$samples[[1]], "args")$control control <- control[setdiff(names(control), names(dots$control))] dots$control[names(control)] <- control # reuse backend arguments originally passed to brm #1373 names_old_stan_args <- setdiff(names(object$stan_args), names(dots)) dots[names_old_stan_args] <- object$stan_args[names_old_stan_args] } if (is.null(recompile)) { # only recompile if new and old stan code do not match new_stancode <- suppressMessages(do_call(make_stancode, dots)) # stan code may differ just because of the version number (#288) new_stancode <- sub("^[^\n]+\n", "", new_stancode) old_stancode <- stancode(object, version = FALSE) recompile <- needs_recompilation(object) || !same_backend || !is_equal(new_stancode, old_stancode) if (recompile && silent < 2) { message("The desired updates require recompiling the model") } } recompile <- as_one_logical(recompile) if (recompile) { # recompliation is necessary dots$fit <- NA if (!testmode) { object <- do_call(brm, dots) } } else { # refit the model without compiling it again if (!is.null(dots$formula)) { object$formula <- dots$formula dots$formula <- NULL } bterms <- brmsterms(object$formula) object$data2 <- validate_data2(dots$data2, bterms = bterms) object$data <- validate_data( dots$data, bterms = bterms, data2 = object$data2, knots = dots$knots, drop_unused_levels = dots$drop_unused_levels ) object$prior <- .validate_prior( dots$prior, bterms = bterms, data = object$data, sample_prior = dots$sample_prior ) object$family <- get_element(object$formula, "family") object$autocor <- get_element(object$formula, "autocor") object$ranef <- tidy_ranef(bterms, data = object$data) object$stanvars <- validate_stanvars(dots$stanvars) object$threads <- validate_threads(dots$threads) if ("sample_prior" %in% names(dots)) { dots$sample_prior <- validate_sample_prior(dots$sample_prior) attr(object$prior, "sample_prior") <- dots$sample_prior } object$save_pars <- validate_save_pars( save_pars = dots$save_pars, save_ranef = dots$save_ranef, save_mevars = dots$save_mevars, save_all_pars = dots$save_all_pars ) object$basis <- standata_basis(bterms, data = object$data) algorithm <- match.arg(dots$algorithm, algorithm_choices()) dots$algorithm <- object$algorithm <- algorithm # can only avoid recompilation when using the old backend dots$backend <- object$backend if (!testmode) { dots$fit <- object object <- do_call(brm, dots) } } attr(object$data, "data_name") <- data_name object } #' Update \pkg{brms} models based on multiple data sets #' #' This method allows to update an existing \code{brmsfit_multiple} object. #' #' @param object An object of class \code{brmsfit_multiple}. #' @param formula. Changes to the formula; for details see #' \code{\link{update.formula}} and \code{\link{brmsformula}}. #' @param newdata List of \code{data.frames} to update the model with new data. #' Currently required even if the original data should be used. #' @param ... Other arguments passed to \code{\link{update.brmsfit}} #' and \code{\link{brm_multiple}}. #' #' @examples #' \dontrun{ #' library(mice) #' imp <- mice(nhanes2) #' #' # initially fit the model #' fit_imp1 <- brm_multiple(bmi ~ age + hyp + chl, data = imp, chains = 1) #' summary(fit_imp1) #' #' # update the model using fewer predictors #' fit_imp2 <- update(fit_imp1, formula. = . ~ hyp + chl, newdata = imp) #' summary(fit_imp2) #' } #' #' @export update.brmsfit_multiple <- function(object, formula., newdata = NULL, ...) { dots <- list(...) if ("data" %in% names(dots)) { # otherwise the data name cannot be found by substitute stop2("Please use argument 'newdata' to update the data.") } if (is.null(newdata)) { stop2("'newdata' is required when updating a 'brmsfit_multiple' object.") } data_name <- substitute_name(newdata) if (inherits(newdata, "mids")) { require_package("mice", version = "3.0.0") newdata <- lapply(seq_len(newdata$m), mice::complete, data = newdata) } else if (!(is.list(newdata) && is.vector(newdata))) { stop2("'newdata' must be a list of data.frames.") } # update the template model using all arguments if (missing(formula.)) { formula. <- NULL } args <- c(nlist(object, formula., newdata = newdata[[1]]), dots) args$file <- NULL args$chains <- 0 fit <- do_call(update.brmsfit, args) # arguments later passed to brm_multiple args <- c(nlist(fit, data = newdata), dots) # update arguments controlling the sampling process # they cannot be accessed directly from the template model # as it does not contain any draws (chains = 0) if (is.null(args$iter)) { # only keep old 'warmup' if also keeping old 'iter' args$warmup <- first_not_null(args$warmup, object$fit@sim$warmup) } if (is.null(args$chains)) { # chains were combined across all submodels args$chains <- object$fit@sim$chains / max(NROW(object$rhats), 1) } args$iter <- first_not_null(args$iter, object$fit@sim$iter) args$thin <- first_not_null(args$thin, object$fit@sim$thin) control <- attr(object$fit@sim$samples[[1]], "args")$control control <- control[setdiff(names(control), names(args$control))] args$control[names(control)] <- control args$recompile <- NULL out <- do_call(brm_multiple, args) attr(out$data, "data_name") <- data_name out } brms/R/stan-helpers.R0000644000176200001440000002633214424715563014167 0ustar liggesusers# unless otherwise specified, functions return a named list # of Stan code snippets to be pasted together later on # define Stan functions or globally used transformed data # TODO: refactor to not require extraction of information from all model parts # 'expand_include_statements' removes duplicates which opens the door # for adding Stan functions at better places rather than globally here stan_global_defs <- function(bterms, prior, ranef, threads) { families <- family_names(bterms) links <- family_info(bterms, "link") unique_combs <- !duplicated(paste0(families, ":", links)) families <- families[unique_combs] links <- links[unique_combs] out <- list() # TODO: detect these links in all dpars not just in 'mu' if (any(links == "cauchit")) { str_add(out$fun) <- " #include 'fun_cauchit.stan'\n" } else if (any(links == "cloglog")) { str_add(out$fun) <- " #include 'fun_cloglog.stan'\n" } else if (any(links == "softplus")) { str_add(out$fun) <- " #include 'fun_softplus.stan'\n" } else if (any(links == "squareplus")) { str_add(out$fun) <- " #include 'fun_squareplus.stan'\n" } else if (any(links == "softit")) { str_add(out$fun) <- " #include 'fun_softit.stan'\n" } if (has_special_prior(prior)) { str_add(out$fun) <- " #include 'fun_horseshoe.stan'\n" str_add(out$fun) <- " #include 'fun_r2d2.stan'\n" } if (nrow(ranef)) { r_funs <- NULL ids <- unique(ranef$id) for (id in ids) { r <- ranef[ranef$id == id, ] if (nrow(r) > 1L && r$cor[1]) { if (nzchar(r$by[1])) { if (nzchar(r$cov[1])) { c(r_funs) <- " #include 'fun_scale_r_cor_by_cov.stan'\n" } else { c(r_funs) <- " #include 'fun_scale_r_cor_by.stan'\n" } } else { if (nzchar(r$cov[1])) { c(r_funs) <- " #include 'fun_scale_r_cor_cov.stan'\n" } else { c(r_funs) <- " #include 'fun_scale_r_cor.stan'\n" } } } } str_add(out$fun) <- collapse(unique(r_funs)) } family_files <- family_info(bterms, "include") if (length(family_files)) { str_add(out$fun) <- cglue(" #include '{family_files}'\n") } is_ordinal <- ulapply(families, is_ordinal) if (any(is_ordinal)) { ord_fams <- families[is_ordinal] ord_links <- links[is_ordinal] for (i in seq_along(ord_fams)) { if (has_extra_cat(ord_fams[i])) { str_add(out$fun) <- stan_hurdle_ordinal_lpmf(ord_fams[i], ord_links[i]) } else { str_add(out$fun) <- stan_ordinal_lpmf(ord_fams[i], ord_links[i]) } } } uni_mo <- ulapply(get_effect(bterms, "sp"), attr, "uni_mo") if (length(uni_mo)) { str_add(out$fun) <- " #include 'fun_monotonic.stan'\n" } if (length(get_effect(bterms, "gp"))) { # TODO: include functions selectively str_add(out$fun) <- " #include 'fun_gaussian_process.stan'\n" str_add(out$fun) <- " #include 'fun_gaussian_process_approx.stan'\n" str_add(out$fun) <- " #include 'fun_which_range.stan'\n" } acterms <- get_effect(bterms, "ac") acefs <- lapply(acterms, tidy_acef) if (any(ulapply(acefs, has_ac_subset, dim = "time", cov = TRUE))) { str_add(out$fun) <- glue( " #include 'fun_sequence.stan'\n", " #include 'fun_is_equal.stan'\n", " #include 'fun_stack_vectors.stan'\n" ) if ("gaussian" %in% families) { str_add(out$fun) <- glue( " #include 'fun_normal_time.stan'\n", " #include 'fun_normal_time_se.stan'\n" ) } if ("student" %in% families) { str_add(out$fun) <- glue( " #include 'fun_student_t_time.stan'\n", " #include 'fun_student_t_time_se.stan'\n" ) } # TODO: include selectively once we have the 'latent' indicator str_add(out$fun) <- glue( " #include 'fun_scale_time_err.stan'\n" ) if (any(ulapply(acefs, has_ac_class, "arma"))) { str_add(out$fun) <- glue( " #include 'fun_cholesky_cor_ar1.stan'\n", " #include 'fun_cholesky_cor_ma1.stan'\n", " #include 'fun_cholesky_cor_arma1.stan'\n" ) } if (any(ulapply(acefs, has_ac_class, "cosy"))) { str_add(out$fun) <- glue( " #include 'fun_cholesky_cor_cosy.stan'\n" ) } } if (any(ulapply(acefs, has_ac_class, "sar"))) { if ("gaussian" %in% families) { str_add(out$fun) <- glue( " #include 'fun_normal_lagsar.stan'\n", " #include 'fun_normal_errorsar.stan'\n" ) } if ("student" %in% families) { str_add(out$fun) <- glue( " #include 'fun_student_t_lagsar.stan'\n", " #include 'fun_student_t_errorsar.stan'\n" ) } } if (any(ulapply(acefs, has_ac_class, "car"))) { str_add(out$fun) <- glue( " #include 'fun_sparse_car_lpdf.stan'\n", " #include 'fun_sparse_icar_lpdf.stan'\n" ) } if (any(ulapply(acefs, has_ac_class, "fcor"))) { str_add(out$fun) <- glue( " #include 'fun_normal_fcor.stan'\n", " #include 'fun_student_t_fcor.stan'\n" ) } if (use_threading(threads)) { str_add(out$fun) <- " #include 'fun_sequence.stan'\n" } out } # link function in Stan language # @param link name of the link function # @param vectorize use vectorize version of the link function? # @param transform actually apply the link function? stan_link <- function(link, vectorize = TRUE, transform = TRUE) { vectorize <- as_one_logical(vectorize) transform <- as_one_logical(transform %||% FALSE) if (!transform) { # we have a Stan lpdf that applies the link automatically # or we have a non-linear parameter that has no link function return("") } if (vectorize) { # custom function cannot yet be overloaded in old Stan versions # TODO: change names once overloading is possible in rstan out <- switch( link, identity = "", log = "log", logm1 = "logm1_vector", inverse = "inv", sqrt = "sqrt", "1/mu^2" = "inv_square", logit = "logit", probit = "inv_Phi", probit_approx = "inv_Phi", cloglog = "cloglog_vector", cauchit = "cauchit_vector", tan_half = "tan_half_vector", log1p = "log1p", softplus = "log_expm1_vector", squareplus = "inv_squareplus_vector", softit = "softit_vector" ) } else { out <- switch( link, identity = "", log = "log", logm1 = "logm1", inverse = "inv", sqrt = "sqrt", "1/mu^2" = "inv_square", logit = "logit", probit = "inv_Phi", probit_approx = "inv_Phi", cloglog = "cloglog", cauchit = "cauchit", tan_half = "tan_half", log1p = "log1p", softplus = "log_expm1", squareplus = "inv_squareplus", softit = "softit" ) } out } # inverse link in Stan language # @param link name of the link function # @param vectorize use vectorize version of the inv_link function? # @param transform actually apply the inv_link function? stan_inv_link <- function(link, vectorize = TRUE, transform = TRUE) { vectorize <- as_one_logical(vectorize) transform <- as_one_logical(transform %||% FALSE) if (!transform) { # we have a Stan lpdf that applies the inv_link automatically # or we have a non-linear parameter that has no link function return("") } if (vectorize) { # custom function cannot yet be overloaded in old Stan versions # TODO: change names once overloading is possible in rstan out <- switch( link, identity = "", log = "exp", logm1 = "expp1_vector", inverse = "inv", sqrt = "square", "1/mu^2" = "inv_sqrt", logit = "inv_logit", probit = "Phi", probit_approx = "Phi_approx", cloglog = "inv_cloglog", cauchit = "inv_cauchit_vector", tan_half = "inv_tan_half_vector", log1p = "expm1", softplus = "log1p_exp", squareplus = "squareplus_vector", softit = "inv_softit_vector" ) } else { out <- switch( link, identity = "", log = "exp", logm1 = "expp1", inverse = "inv", sqrt = "square", "1/mu^2" = "inv_sqrt", logit = "inv_logit", probit = "Phi", probit_approx = "Phi_approx", cloglog = "inv_cloglog", cauchit = "inv_cauchit", tan_half = "inv_tan_half", log1p = "expm1", softplus = "log1p_exp", squareplus = "squareplus", softit = "inv_softit" ) } out } # define a vector in Stan language stan_vector <- function(...) { paste0("transpose([", paste0(c(...), collapse = ", "), "])") } # prepare Stan code for correlations in the generated quantities block # @param cor name of the correlation vector # @param ncol number of columns of the correlation matrix stan_cor_gen_comp <- function(cor, ncol) { Cor <- paste0(toupper(substring(cor, 1, 1)), substring(cor, 2)) glue( " // extract upper diagonal of correlation matrix\n", " for (k in 1:{ncol}) {{\n", " for (j in 1:(k - 1)) {{\n", " {cor}[choose(k - 1, 2) + j] = {Cor}[j, k];\n", " }}\n", " }}\n" ) } # indicates if a family-link combination has a built in # function in Stan (such as binomial_logit) # @param family a list with elements 'family' and 'link' # ideally a (brms)family object # @param bterms brmsterms object of the univariate model stan_has_built_in_fun <- function(family, bterms) { stopifnot(all(c("family", "link") %in% names(family))) stopifnot(is.brmsterms(bterms)) cens_or_trunc <- stan_log_lik_adj(bterms$adforms, c("cens", "trunc")) link <- family[["link"]] dpar <- family[["dpar"]] if (cens_or_trunc) { # only few families have special lcdf and lccdf functions out <- has_built_in_fun(family, link, cdf = TRUE) || has_built_in_fun(bterms, link, dpar = dpar, cdf = TRUE) } else { out <- has_built_in_fun(family, link) || has_built_in_fun(bterms, link, dpar = dpar) } out } # get all variable names accepted in Stan stan_all_vars <- function(x) { x <- gsub("\\.", "+", x) all_vars(x) } # transform names to be used as variable names in Stan make_stan_names <- function(x) { gsub("\\.|_", "", make.names(x, unique = TRUE)) } # functions to handle indexing when threading stan_slice <- function(threads) { str_if(use_threading(threads), "[start:end]") } stan_nn <- function(threads) { str_if(use_threading(threads), "[nn]", "[n]") } stan_nn_def <- function(threads) { str_if(use_threading(threads), " int nn = n + start - 1;\n") } stan_nn_regex <- function() { "\\[((n)|(nn))\\]" } # clean up arguments for partial_log_lik # @param ... strings containing arguments of the form ', type identifier' # @return named list of two elements: # typed: types + identifiers for use in the function header # plain: identifiers only for use in the function call stan_clean_pll_args <- function(...) { args <- paste0(...) # split up header to remove duplicates typed <- unlist(strsplit(args, ", +"))[-1] typed <- unique(typed) plain <- rm_wsp(get_matches(" [^ ]+$", typed)) typed <- collapse(", ", typed) plain <- collapse(", ", plain) nlist(typed, plain) } # prepare a string to be used as comment in Stan stan_comment <- function(comment, wsp = 2) { comment <- as.character(comment) wsp <- wsp(nsp = wsp) if (!length(comment)) { return(character(0)) } ifelse(nzchar(comment), paste0(wsp, "// ", comment), "") } brms/R/posterior_smooths.R0000644000176200001440000001001214213413565015341 0ustar liggesusers#' Posterior Predictions of Smooth Terms #' #' Compute posterior predictions of smooth \code{s} and \code{t2} terms of #' models fitted with \pkg{brms}. #' #' @inheritParams posterior_epred.brmsfit #' @param smooth Name of a single smooth term for which predictions should #' be computed. #' @param newdata An optional \code{data.frame} for which to evaluate #' predictions. If \code{NULL} (default), the original data of the model is #' used. Only those variables appearing in the chosen \code{smooth} term are #' required. #' @param ... Currently ignored. #' #' @return An S x N matrix, where S is the number of #' posterior draws and N is the number of observations. #' #' @examples #' \dontrun{ #' set.seed(0) #' dat <- mgcv::gamSim(1, n = 200, scale = 2) #' fit <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) #' summary(fit) #' #' newdata <- data.frame(x2 = seq(0, 1, 10)) #' str(posterior_smooths(fit, smooth = "s(x2)", newdata = newdata)) #' } #' #' @export posterior_smooths.brmsfit <- function(object, smooth, newdata = NULL, resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, ...) { resp <- validate_resp(resp, object, multiple = FALSE) bterms <- brmsterms(exclude_terms(object$formula, smooths_only = TRUE)) if (!is.null(resp)) { stopifnot(is.mvbrmsterms(bterms)) bterms <- bterms$terms[[resp]] } if (!is.null(nlpar)) { if (length(dpar)) { stop2("Cannot use 'dpar' and 'nlpar' at the same time.") } nlpar <- as_one_character(nlpar) nlpars <- names(bterms$nlpars) if (!nlpar %in% nlpars) { stop2("Invalid argument 'nlpar'. Valid non-linear ", "parameters are: ", collapse_comma(nlpars)) } bterms <- bterms$nlpars[[nlpar]] } else { dpar <- dpar %||% "mu" dpar <- as_one_character(dpar) dpars <- names(bterms$dpars) if (!dpar %in% dpars) { stop2("Invalid argument 'dpar'. Valid distributional ", "parameters are: ", collapse_comma(dpars)) } bterms <- bterms$dpars[[dpar]] } posterior_smooths( bterms, fit = object, smooth = smooth, newdata = newdata, ndraws = ndraws, draw_ids = draw_ids, ... ) } #' @export posterior_smooths.btl <- function(object, fit, smooth, newdata = NULL, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, ...) { smooth <- rm_wsp(as_one_character(smooth)) ndraws <- use_alias(ndraws, nsamples) draw_ids <- use_alias(draw_ids, subset) smef <- tidy_smef(object, fit$data) smef$term <- rm_wsp(smef$term) smterms <- unique(smef$term) if (!smooth %in% smterms) { stop2("Term '", smooth, "' cannot be found. Available ", "smooth terms are: ", collapse_comma(smterms)) } # find relevant variables sub_smef <- subset2(smef, term = smooth) covars <- all_vars(sub_smef$covars[[1]]) byvars <- all_vars(sub_smef$byvars[[1]]) req_vars <- c(covars, byvars) # prepare predictions for splines sdata <- standata( fit, newdata, re_formula = NA, internal = TRUE, check_response = FALSE, req_vars = req_vars ) draw_ids <- validate_draw_ids(fit, draw_ids, ndraws) draws <- as_draws_matrix(fit) draws <- suppressMessages(subset_draws(draws, draw = draw_ids)) prep_args <- nlist(x = object, draws, sdata, data = fit$data) prep <- do_call(prepare_predictions, prep_args) # select subset of smooth parameters and design matrices i <- which(smterms %in% smooth)[1] J <- which(smef$termnum == i) scs <- unlist(attr(prep$sm$fe$Xs, "smcols")[J]) prep$sm$fe$Xs <- prep$sm$fe$Xs[, scs, drop = FALSE] prep$sm$fe$bs <- prep$sm$fe$bs[, scs, drop = FALSE] prep$sm$re <- prep$sm$re[J] prep$family <- brmsfamily("gaussian") predictor(prep, i = NULL) } #' @export posterior_smooths.btnl <- function(object, ...) { stop2("Non-linear formulas do not contain smooth terms.") } #' @rdname posterior_smooths.brmsfit #' @export posterior_smooths <- function(object, ...) { UseMethod("posterior_smooths") } brms/R/posterior_samples.R0000644000176200001440000002015614213413565015323 0ustar liggesusers#' (Deprecated) Extract Posterior Samples #' #' Extract posterior samples of specified parameters. The #' \code{posterior_samples} method is deprecated. We recommend using the more #' modern and consistent \code{\link[brms:draws-brms]{as_draws_*}} extractor #' functions of the \pkg{posterior} package instead. #' #' @param x An \code{R} object typically of class \code{brmsfit} #' @param pars Names of parameters for which posterior samples #' should be returned, as given by a character vector or regular expressions. #' By default, all posterior samples of all parameters are extracted. #' @param fixed Indicates whether parameter names #' should be matched exactly (\code{TRUE}) or treated as #' regular expressions (\code{FALSE}). Default is \code{FALSE}. #' @param add_chain A flag indicating if the returned \code{data.frame} #' should contain two additional columns. The \code{chain} column #' indicates the chain in which each sample was generated, the \code{iter} #' column indicates the iteration number within each chain. #' @param subset A numeric vector indicating the rows #' (i.e., posterior samples) to be returned. #' If \code{NULL} (the default), all posterior samples are returned. #' @param as.matrix Should the output be a \code{matrix} #' instead of a \code{data.frame}? Defaults to \code{FALSE}. #' @param as.array Should the output be an \code{array} #' instead of a \code{data.frame}? Defaults to \code{FALSE}. #' @param ... Arguments passed to individual methods (if applicable). #' #' @return A data.frame (matrix or array) containing the posterior samples. #' #' @seealso \code{\link[brms:draws-brms]{as_draws}}, #' \code{\link[brms:as.data.frame.brmsfit]{as.data.frame}} #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "cumulative") #' #' # extract posterior samples of population-level effects #' samples1 <- posterior_samples(fit, pars = "^b") #' head(samples1) #' #' # extract posterior samples of group-level standard deviations #' samples2 <- posterior_samples(fit, pars = "^sd_") #' head(samples2) #' } #' #' @export posterior_samples.brmsfit <- function(x, pars = NA, fixed = FALSE, add_chain = FALSE, subset = NULL, as.matrix = FALSE, as.array = FALSE, ...) { if (as.matrix && as.array) { stop2("Cannot use 'as.matrix' and 'as.array' at the same time.") } if (add_chain && as.array) { stop2("Cannot use 'add_chain' and 'as.array' at the same time.") } contains_draws(x) pars <- extract_pars(pars, variables(x), fixed = fixed, ...) # get basic information on the samples iter <- x$fit@sim$iter warmup <- x$fit@sim$warmup thin <- x$fit@sim$thin chains <- x$fit@sim$chains final_iter <- ceiling((iter - warmup) / thin) samples_taken <- seq(warmup + 1, iter, thin) samples <- NULL if (length(pars)) { if (as.matrix) { samples <- as.matrix(x$fit, pars = pars) } else if (as.array) { samples <- as.array(x$fit, pars = pars) } else { samples <- as.data.frame(x$fit, pars = pars) } if (add_chain) { # name the column 'chain' not 'chains' (#32) samples <- cbind(samples, chain = factor(rep(1:chains, each = final_iter)), iter = rep(samples_taken, chains) ) } if (!is.null(subset)) { if (as.array) { samples <- samples[subset, , , drop = FALSE] } else { samples <- samples[subset, , drop = FALSE] } } } samples } #' @rdname posterior_samples.brmsfit #' @export posterior_samples <- function(x, pars = NA, ...) { warning2("Method 'posterior_samples' is deprecated. ", "Please see ?as_draws for recommended alternatives.") UseMethod("posterior_samples") } #' @export posterior_samples.default <- function(x, pars = NA, fixed = FALSE, ...) { x <- as.data.frame(x) if (!anyNA(pars)) { pars <- extract_pars(pars, all_pars = names(x), fixed = fixed, ...) x <- x[, pars, drop = FALSE] } if (!ncol(x)) { x <- NULL } x } #' Extract Parameter Names #' #' Extract all parameter names of a given model. #' #' @aliases parnames.brmsfit #' #' @param x An \R object #' @param ... Further arguments passed to or from other methods. #' #' @return A character vector containing the parameter names of the model. #' #' @export parnames <- function(x, ...) { warning2("'parnames' is deprecated. Please use 'variables' instead.") UseMethod("parnames") } #' @export parnames.default <- function(x, ...) { names(x) } #' @export parnames.brmsfit <- function(x, ...) { out <- dimnames(x$fit) if (is.list(out)) { out <- out$parameters } out } # extract all valid parameter names that match pars # @param pars A character vector or regular expression # @param all_pars all parameter names of the fitted model # @param fixed should parameter names be matched exactly? # @param exact_match deprecated alias of fixed # @param na_value: what should be returned if pars is NA? # @param ... Further arguments to be passed to grepl # @return A character vector of parameter names extract_pars <- function(pars, all_pars, fixed = FALSE, exact_match = FALSE, na_value = all_pars, ...) { if (!(anyNA(pars) || is.character(pars))) { stop2("Argument 'pars' must be NA or a character vector.") } fixed <- check_deprecated_fixed(fixed, exact_match) if (!anyNA(pars)) { fixed <- as_one_logical(fixed) if (fixed) { out <- intersect(pars, all_pars) } else { out <- vector("list", length(pars)) for (i in seq_along(pars)) { out[[i]] <- all_pars[grepl(pars[i], all_pars, ...)] } out <- unique(unlist(out)) } } else { out <- na_value } out } # check deprecated alias of argument 'fixed' check_deprecated_fixed <- function(fixed, exact_match) { if (!isFALSE(exact_match)) { # deprecated as of brms 2.10.6; remove in brms 3.0 warning2("Argument 'exact_match' is deprecated. ", "Please use 'fixed' instead.") fixed <- exact_match } fixed } #' Extract posterior samples for use with the \pkg{coda} package #' #' @aliases as.mcmc #' #' @inheritParams posterior_samples.brmsfit #' @param ... currently unused #' @param combine_chains Indicates whether chains should be combined. #' @param inc_warmup Indicates if the warmup samples should be included. #' Default is \code{FALSE}. Warmup samples are used to tune the #' parameters of the sampling algorithm and should not be analyzed. #' #' @return If \code{combine_chains = TRUE} an \code{mcmc} object is returned. #' If \code{combine_chains = FALSE} an \code{mcmc.list} object is returned. #' #' @method as.mcmc brmsfit #' @export #' @export as.mcmc #' @importFrom coda as.mcmc as.mcmc.brmsfit <- function(x, pars = NA, fixed = FALSE, combine_chains = FALSE, inc_warmup = FALSE, ...) { warning2("as.mcmc.brmsfit is deprecated and will eventually be removed.") contains_draws(x) pars <- extract_pars(pars, all_pars = variables(x), fixed = fixed, ...) combine_chains <- as_one_logical(combine_chains) inc_warmup <- as_one_logical(inc_warmup) if (combine_chains) { if (inc_warmup) { stop2("Cannot include warmup samples when 'combine_chains' is TRUE.") } out <- as.matrix(x$fit, pars) ndraws <- nrow(out) end <- x$fit@sim$iter * x$fit@sim$chains thin <- x$fit@sim$thin start <- end - (ndraws - 1) * thin mcpar <- c(start, end, thin) attr(out, "mcpar") <- mcpar class(out) <- "mcmc" } else { thin <- x$fit@sim$thin if (inc_warmup && thin >= 2) { stop2("Cannot include warmup samples when 'thin' >= 2.") } ps <- rstan::extract(x$fit, pars, permuted = FALSE, inc_warmup = inc_warmup) ndraws <- dim(ps)[1] end <- x$fit@sim$iter start <- end - (ndraws - 1) * thin mcpar <- c(start, end, thin) out <- vector("list", length = dim(ps)[2]) for (i in seq_along(out)) { out[[i]] <- ps[, i, ] attr(out[[i]], "mcpar") <- mcpar class(out[[i]]) <- "mcmc" } class(out) <- "mcmc.list" } out } brms/R/brmsfit-class.R0000644000176200001440000001016314427444030014315 0ustar liggesusers#' Class \code{brmsfit} of models fitted with the \pkg{brms} package #' #' Models fitted with the \code{\link[brms:brms-package]{brms}} package are #' represented as a \code{brmsfit} object, which contains the posterior #' draws (samples), model formula, Stan code, relevant data, and other information. #' #' @name brmsfit-class #' @aliases brmsfit #' @docType class #' #' @details #' See \code{methods(class = "brmsfit")} for an overview of available methods. #' #' @slot formula A \code{\link{brmsformula}} object. #' @slot data A \code{data.frame} containing all variables used in the model. #' @slot data2 A \code{list} of data objects which cannot be passed #' via \code{data}. #' @slot prior A \code{\link{brmsprior}} object containing #' information on the priors used in the model. #' @slot stanvars A \code{\link{stanvars}} object. #' @slot model The model code in \pkg{Stan} language. #' @slot ranef A \code{data.frame} containing the group-level structure. #' @slot exclude The names of the parameters for which draws are not saved. #' @slot algorithm The name of the algorithm used to fit the model. #' @slot backend The name of the backend used to fit the model. #' @slot threads An object of class `brmsthreads` created by #' \code{\link{threading}}. #' @slot opencl An object of class `brmsopencl` created by \code{\link{opencl}}. #' @slot stan_args Named list of additional control arguments that were passed #' to the Stan backend directly. #' @slot fit An object of class \code{\link[rstan:stanfit-class]{stanfit}} #' among others containing the posterior draws. #' @slot basis An object that contains a small subset of the Stan data #' created at fitting time, which is needed to process new data correctly. #' @slot criteria An empty \code{list} for adding model fit criteria #' after estimation of the model. #' @slot file Optional name of a file in which the model object was stored in #' or loaded from. #' @slot version The versions of \pkg{brms} and \pkg{rstan} with #' which the model was fitted. #' @slot family (Deprecated) A \code{\link{brmsfamily}} object. #' @slot autocor (Deprecated) An \code{\link{cor_brms}} object containing #' the autocorrelation structure if specified. #' @slot cov_ranef (Deprecated) A \code{list} of customized group-level #' covariance matrices. #' @slot stan_funs (Deprecated) A character string of length one or \code{NULL}. #' @slot data.name (Deprecated) The name of \code{data} as specified by the user. #' #' @seealso #' \code{\link{brms}}, #' \code{\link{brm}}, #' \code{\link{brmsformula}}, #' \code{\link{brmsfamily}} #' NULL # brmsfit class brmsfit <- function(formula = NULL, data = data.frame(), prior = empty_prior(), data2 = list(), stanvars = NULL, model = "", ranef = empty_ranef(), save_pars = NULL, algorithm = "sampling", backend = "rstan", threads = threading(), opencl = opencl(), stan_args = list(), fit = NULL, basis = NULL, criteria = list(), file = NULL, family = NULL, autocor = NULL, cov_ranef = NULL, stan_funs = NULL, data.name = "") { version <- list( brms = utils::packageVersion("brms"), rstan = utils::packageVersion("rstan"), stanHeaders = utils::packageVersion("StanHeaders") ) if (backend == "cmdstanr") { require_package("cmdstanr") version$cmdstanr <- utils::packageVersion("cmdstanr") version$cmdstan <- as.package_version(cmdstanr::cmdstan_version()) } x <- nlist( formula, data, prior, data2, stanvars, model, ranef, save_pars, algorithm, backend, threads, opencl, stan_args, fit, basis, criteria, file, version, family, autocor, cov_ranef, stan_funs, data.name ) class(x) <- "brmsfit" x } #' Checks if argument is a \code{brmsfit} object #' #' @param x An \R object #' #' @export is.brmsfit <- function(x) { inherits(x, "brmsfit") } #' Checks if argument is a \code{brmsfit_multiple} object #' #' @param x An \R object #' #' @export is.brmsfit_multiple <- function(x) { inherits(x, "brmsfit_multiple") } is.stanfit <- function(x) { inherits(x, "stanfit") } brms/R/distributions.R0000644000176200001440000023320214403576711014455 0ustar liggesusers#' The Student-t Distribution #' #' Density, distribution function, quantile function and random generation #' for the Student-t distribution with location \code{mu}, scale \code{sigma}, #' and degrees of freedom \code{df}. #' #' @name StudentT #' #' @param x Vector of quantiles. #' @param q Vector of quantiles. #' @param p Vector of probabilities. #' @param n Number of draws to sample from the distribution. #' @param mu Vector of location values. #' @param sigma Vector of scale values. #' @param df Vector of degrees of freedom. #' @param log Logical; If \code{TRUE}, values are returned on the log scale. #' @param log.p Logical; If \code{TRUE}, values are returned on the log scale. #' @param lower.tail Logical; If \code{TRUE} (default), return P(X <= x). #' Else, return P(X > x) . #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @seealso \code{\link[stats:TDist]{TDist}} #' #' @export dstudent_t <- function(x, df, mu = 0, sigma = 1, log = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } if (log) { dt((x - mu) / sigma, df = df, log = TRUE) - log(sigma) } else { dt((x - mu) / sigma, df = df) / sigma } } #' @rdname StudentT #' @export pstudent_t <- function(q, df, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } pt((q - mu) / sigma, df = df, lower.tail = lower.tail, log.p = log.p) } #' @rdname StudentT #' @export qstudent_t <- function(p, df, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } p <- validate_p_dist(p, lower.tail = lower.tail, log.p = log.p) mu + sigma * qt(p, df = df) } #' @rdname StudentT #' @export rstudent_t <- function(n, df, mu = 0, sigma = 1) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } mu + sigma * rt(n, df = df) } #' The Multivariate Normal Distribution #' #' Density function and random generation for the multivariate normal #' distribution with mean vector \code{mu} and covariance matrix \code{Sigma}. #' #' @name MultiNormal #' #' @inheritParams StudentT #' @param x Vector or matrix of quantiles. If \code{x} is a matrix, #' each row is taken to be a quantile. #' @param mu Mean vector with length equal to the number of dimensions. #' @param Sigma Covariance matrix. #' @param check Logical; Indicates whether several input checks #' should be performed. Defaults to \code{FALSE} to improve #' efficiency. #' #' @details See the Stan user's manual \url{https://mc-stan.org/documentation/} #' for details on the parameterization #' #' @export dmulti_normal <- function(x, mu, Sigma, log = FALSE, check = FALSE) { if (is.vector(x) || length(dim(x)) == 1L) { x <- matrix(x, ncol = length(x)) } p <- ncol(x) if (check) { if (length(mu) != p) { stop2("Dimension of mu is incorrect.") } if (!all(dim(Sigma) == c(p, p))) { stop2("Dimension of Sigma is incorrect.") } if (!is_symmetric(Sigma)) { stop2("Sigma must be a symmetric matrix.") } } chol_Sigma <- chol(Sigma) rooti <- backsolve(chol_Sigma, t(x) - mu, transpose = TRUE) quads <- colSums(rooti^2) out <- -(p / 2) * log(2 * pi) - sum(log(diag(chol_Sigma))) - .5 * quads if (!log) { out <- exp(out) } out } #' @rdname MultiNormal #' @export rmulti_normal <- function(n, mu, Sigma, check = FALSE) { p <- length(mu) if (check) { if (!(is_wholenumber(n) && n > 0)) { stop2("n must be a positive integer.") } if (!all(dim(Sigma) == c(p, p))) { stop2("Dimension of Sigma is incorrect.") } if (!is_symmetric(Sigma)) { stop2("Sigma must be a symmetric matrix.") } } draws <- matrix(rnorm(n * p), nrow = n, ncol = p) mu + draws %*% chol(Sigma) } #' The Multivariate Student-t Distribution #' #' Density function and random generation for the multivariate Student-t #' distribution with location vector \code{mu}, covariance matrix \code{Sigma}, #' and degrees of freedom \code{df}. #' #' @name MultiStudentT #' #' @inheritParams StudentT #' @param x Vector or matrix of quantiles. If \code{x} is a matrix, #' each row is taken to be a quantile. #' @param mu Location vector with length equal to the number of dimensions. #' @param Sigma Covariance matrix. #' @param check Logical; Indicates whether several input checks #' should be performed. Defaults to \code{FALSE} to improve #' efficiency. #' #' @details See the Stan user's manual \url{https://mc-stan.org/documentation/} #' for details on the parameterization #' #' @export dmulti_student_t <- function(x, df, mu, Sigma, log = FALSE, check = FALSE) { if (is.vector(x) || length(dim(x)) == 1L) { x <- matrix(x, ncol = length(x)) } p <- ncol(x) if (check) { if (isTRUE(any(df <= 0))) { stop2("df must be greater than 0.") } if (length(mu) != p) { stop2("Dimension of mu is incorrect.") } if (!all(dim(Sigma) == c(p, p))) { stop2("Dimension of Sigma is incorrect.") } if (!is_symmetric(Sigma)) { stop2("Sigma must be a symmetric matrix.") } } chol_Sigma <- chol(Sigma) rooti <- backsolve(chol_Sigma, t(x) - mu, transpose = TRUE) quads <- colSums(rooti^2) out <- lgamma((p + df)/2) - (lgamma(df / 2) + sum(log(diag(chol_Sigma))) + p / 2 * log(pi * df)) - 0.5 * (df + p) * log1p(quads / df) if (!log) { out <- exp(out) } out } #' @rdname MultiStudentT #' @export rmulti_student_t <- function(n, df, mu, Sigma, check = FALSE) { p <- length(mu) if (isTRUE(any(df <= 0))) { stop2("df must be greater than 0.") } draws <- rmulti_normal(n, mu = rep(0, p), Sigma = Sigma, check = check) draws <- draws / sqrt(rchisq(n, df = df) / df) sweep(draws, 2, mu, "+") } #' The (Multivariate) Logistic Normal Distribution #' #' Density function and random generation for the (multivariate) logistic normal #' distribution with latent mean vector \code{mu} and covariance matrix \code{Sigma}. #' #' @name LogisticNormal #' #' @inheritParams StudentT #' @param x Vector or matrix of quantiles. If \code{x} is a matrix, #' each row is taken to be a quantile. #' @param mu Mean vector with length equal to the number of dimensions. #' @param Sigma Covariance matrix. #' @param refcat A single integer indicating the reference category. #' Defaults to \code{1}. #' @param check Logical; Indicates whether several input checks #' should be performed. Defaults to \code{FALSE} to improve #' efficiency. #' #' @export dlogistic_normal <- function(x, mu, Sigma, refcat = 1, log = FALSE, check = FALSE) { if (is.vector(x) || length(dim(x)) == 1L) { x <- matrix(x, ncol = length(x)) } lx <- link_categorical(x, refcat) out <- dmulti_normal(lx, mu, Sigma, log = TRUE) - rowSums(log(x)) if (!log) { out <- exp(out) } out } #' @rdname LogisticNormal #' @export rlogistic_normal <- function(n, mu, Sigma, refcat = 1, check = FALSE) { out <- rmulti_normal(n, mu, Sigma, check = check) inv_link_categorical(out, refcat = refcat) } #' The Skew-Normal Distribution #' #' Density, distribution function, and random generation for the #' skew-normal distribution with mean \code{mu}, #' standard deviation \code{sigma}, and skewness \code{alpha}. #' #' @name SkewNormal #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of mean values. #' @param sigma Vector of standard deviation values. #' @param alpha Vector of skewness values. #' @param xi Optional vector of location values. #' If \code{NULL} (the default), will be computed internally. #' @param omega Optional vector of scale values. #' If \code{NULL} (the default), will be computed internally. #' @param tol Tolerance of the approximation used in the #' computation of quantiles. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dskew_normal <- function(x, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, log = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be greater than 0.") } args <- cp2dp(mu, sigma, alpha, xi = xi, omega = omega, x = x) out <- with(args, { # do it like sn::dsn z <- (x - xi) / omega if (length(alpha) == 1L) { alpha <- rep(alpha, length(z)) } logN <- -log(sqrt(2 * pi)) - log(omega) - z^2 / 2 logS <- ifelse( abs(alpha) < Inf, pnorm(alpha * z, log.p = TRUE), log(as.numeric(sign(alpha) * z > 0)) ) out <- logN + logS - pnorm(0, log.p = TRUE) ifelse(abs(z) == Inf, -Inf, out) }) if (!log) { out <- exp(out) } out } #' @rdname SkewNormal #' @export pskew_normal <- function(q, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, lower.tail = TRUE, log.p = FALSE) { require_package("mnormt") if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } args <- cp2dp(mu, sigma, alpha, xi = xi, omega = omega, q = q) out <- with(args, { # do it like sn::psn z <- (q - xi) / omega nz <- length(z) is_alpha_inf <- abs(alpha) == Inf delta[is_alpha_inf] <- sign(alpha[is_alpha_inf]) out <- numeric(nz) for (k in seq_len(nz)) { if (is_alpha_inf[k]) { if (alpha[k] > 0) { out[k] <- 2 * (pnorm(pmax(z[k], 0)) - 0.5) } else { out[k] <- 1 - 2 * (0.5 - pnorm(pmin(z[k], 0))) } } else { S <- matrix(c(1, -delta[k], -delta[k], 1), 2, 2) out[k] <- 2 * mnormt::biv.nt.prob( 0, lower = rep(-Inf, 2), upper = c(z[k], 0), mean = c(0, 0), S = S ) } } pmin(1, pmax(0, out)) }) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname SkewNormal #' @export qskew_normal <- function(p, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, lower.tail = TRUE, log.p = FALSE, tol = 1e-8) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } p <- validate_p_dist(p, lower.tail = lower.tail, log.p = log.p) args <- cp2dp(mu, sigma, alpha, xi = xi, omega = omega, p = p) out <- with(args, { # do it like sn::qsn na <- is.na(p) | (p < 0) | (p > 1) zero <- (p == 0) one <- (p == 1) p <- replace(p, (na | zero | one), 0.5) cum <- skew_normal_cumulants(0, 1, alpha, n = 4) g1 <- cum[, 3] / cum[, 2]^(3 / 2) g2 <- cum[, 4] / cum[, 2]^2 x <- qnorm(p) x <- x + (x^2 - 1) * g1 / 6 + x * (x^2 - 3) * g2 / 24 - x * (2 * x^2 - 5) * g1^2 / 36 x <- cum[, 1] + sqrt(cum[, 2]) * x px <- pskew_normal(x, xi = 0, omega = 1, alpha = alpha) max_err <- 1 while (max_err > tol) { x1 <- x - (px - p) / dskew_normal(x, xi = 0, omega = 1, alpha = alpha) x <- x1 px <- pskew_normal(x, xi = 0, omega = 1, alpha = alpha) max_err <- max(abs(px - p)) if (is.na(max_err)) { warning2("Approximation in 'qskew_normal' might have failed.") } } x <- replace(x, na, NA) x <- replace(x, zero, -Inf) x <- replace(x, one, Inf) as.numeric(xi + omega * x) }) out } #' @rdname SkewNormal #' @export rskew_normal <- function(n, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } args <- cp2dp(mu, sigma, alpha, xi = xi, omega = omega) with(args, { # do it like sn::rsn z1 <- rnorm(n) z2 <- rnorm(n) id <- z2 > args$alpha * z1 z1[id] <- -z1[id] xi + omega * z1 }) } # convert skew-normal mixed-CP to DP parameterization # @return a data.frame containing all relevant parameters cp2dp <- function(mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, ...) { delta <- alpha / sqrt(1 + alpha^2) if (is.null(omega)) { omega <- sigma / sqrt(1 - 2 / pi * delta^2) } if (is.null(xi)) { xi <- mu - omega * delta * sqrt(2 / pi) } expand(dots = nlist(mu, sigma, alpha, xi, omega, delta, ...)) } # helper function for qskew_normal # code basis taken from sn::sn.cumulants # uses xi and omega rather than mu and sigma skew_normal_cumulants <- function(xi = 0, omega = 1, alpha = 0, n = 4) { cumulants_half_norm <- function(n) { n <- max(n, 2) n <- as.integer(2 * ceiling(n/2)) half.n <- as.integer(n/2) m <- 0:(half.n - 1) a <- sqrt(2/pi)/(gamma(m + 1) * 2^m * (2 * m + 1)) signs <- rep(c(1, -1), half.n)[seq_len(half.n)] a <- as.vector(rbind(signs * a, rep(0, half.n))) coeff <- rep(a[1], n) for (k in 2:n) { ind <- seq_len(k - 1) coeff[k] <- a[k] - sum(ind * coeff[ind] * a[rev(ind)]/k) } kappa <- coeff * gamma(seq_len(n) + 1) kappa[2] <- 1 + kappa[2] return(kappa) } args <- expand(dots = nlist(xi, omega, alpha)) with(args, { # do it like sn::sn.cumulants delta <- alpha / sqrt(1 + alpha^2) kv <- cumulants_half_norm(n) if (length(kv) > n) { kv <- kv[-(n + 1)] } kv[2] <- kv[2] - 1 kappa <- outer(delta, 1:n, "^") * matrix(rep(kv, length(xi)), ncol = n, byrow = TRUE) kappa[, 2] <- kappa[, 2] + 1 kappa <- kappa * outer(omega, 1:n, "^") kappa[, 1] <- kappa[, 1] + xi kappa }) } # CDF of the inverse gamma function pinvgamma <- function(q, shape, rate, lower.tail = TRUE, log.p = FALSE) { pgamma(1/q, shape, rate = rate, lower.tail = !lower.tail, log.p = log.p) } #' The von Mises Distribution #' #' Density, distribution function, and random generation for the #' von Mises distribution with location \code{mu}, and precision \code{kappa}. #' #' @name VonMises #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param kappa Vector of precision values. #' @param acc Accuracy of numerical approximations. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dvon_mises <- function(x, mu, kappa, log = FALSE) { if (isTRUE(any(kappa < 0))) { stop2("kappa must be non-negative") } # expects x in [-pi, pi] rather than [0, 2*pi] as CircStats::dvm be <- besselI(kappa, nu = 0, expon.scaled = TRUE) out <- -log(2 * pi * be) + kappa * (cos(x - mu) - 1) if (!log) { out <- exp(out) } out } #' @rdname VonMises #' @export pvon_mises <- function(q, mu, kappa, lower.tail = TRUE, log.p = FALSE, acc = 1e-20) { if (isTRUE(any(kappa < 0))) { stop2("kappa must be non-negative") } pi <- base::pi pi2 <- 2 * pi q <- (q + pi) %% pi2 mu <- (mu + pi) %% pi2 args <- expand(q = q, mu = mu, kappa = kappa) q <- args$q mu <- args$mu kappa <- args$kappa rm(args) # code basis taken from CircStats::pvm but improved # considerably with respect to speed and stability rec_sum <- function(q, kappa, acc, sum = 0, i = 1) { # compute the sum of of besselI functions recursively term <- (besselI(kappa, nu = i) * sin(i * q)) / i sum <- sum + term rd <- abs(term) >= acc if (sum(rd)) { sum[rd] <- rec_sum( q[rd], kappa[rd], acc, sum = sum[rd], i = i + 1 ) } sum } .pvon_mises <- function(q, kappa, acc) { sum <- rec_sum(q, kappa, acc) q / pi2 + sum / (pi * besselI(kappa, nu = 0)) } out <- rep(NA, length(mu)) zero_mu <- mu == 0 if (sum(zero_mu)) { out[zero_mu] <- .pvon_mises(q[zero_mu], kappa[zero_mu], acc) } lq_mu <- q <= mu if (sum(lq_mu)) { upper <- (q[lq_mu] - mu[lq_mu]) %% pi2 upper[upper == 0] <- pi2 lower <- (-mu[lq_mu]) %% pi2 out[lq_mu] <- .pvon_mises(upper, kappa[lq_mu], acc) - .pvon_mises(lower, kappa[lq_mu], acc) } uq_mu <- q > mu if (sum(uq_mu)) { upper <- q[uq_mu] - mu[uq_mu] lower <- mu[uq_mu] %% pi2 out[uq_mu] <- .pvon_mises(upper, kappa[uq_mu], acc) + .pvon_mises(lower, kappa[uq_mu], acc) } if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname VonMises #' @export rvon_mises <- function(n, mu, kappa) { if (isTRUE(any(kappa < 0))) { stop2("kappa must be non-negative") } args <- expand(mu = mu, kappa = kappa, length = n) mu <- args$mu kappa <- args$kappa rm(args) pi <- base::pi mu <- mu + pi # code basis taken from CircStats::rvm but improved # considerably with respect to speed and stability rvon_mises_outer <- function(r, mu, kappa) { n <- length(r) U1 <- runif(n, 0, 1) z <- cos(pi * U1) f <- (1 + r * z) / (r + z) c <- kappa * (r - f) U2 <- runif(n, 0, 1) outer <- is.na(f) | is.infinite(f) | !(c * (2 - c) - U2 > 0 | log(c / U2) + 1 - c >= 0) inner <- !outer out <- rep(NA, n) if (sum(inner)) { out[inner] <- rvon_mises_inner(f[inner], mu[inner]) } if (sum(outer)) { # evaluate recursively until a valid sample is found out[outer] <- rvon_mises_outer(r[outer], mu[outer], kappa[outer]) } out } rvon_mises_inner <- function(f, mu) { n <- length(f) U3 <- runif(n, 0, 1) (sign(U3 - 0.5) * acos(f) + mu) %% (2 * pi) } a <- 1 + (1 + 4 * (kappa^2))^0.5 b <- (a - (2 * a)^0.5) / (2 * kappa) r <- (1 + b^2) / (2 * b) # indicates underflow due to kappa being close to zero is_uf <- is.na(r) | is.infinite(r) not_uf <- !is_uf out <- rep(NA, n) if (sum(is_uf)) { out[is_uf] <- runif(sum(is_uf), 0, 2 * pi) } if (sum(not_uf)) { out[not_uf] <- rvon_mises_outer(r[not_uf], mu[not_uf], kappa[not_uf]) } out - pi } #' The Exponentially Modified Gaussian Distribution #' #' Density, distribution function, and random generation #' for the exponentially modified Gaussian distribution with #' mean \code{mu} and standard deviation \code{sigma} of the gaussian #' component, as well as scale \code{beta} of the exponential #' component. #' #' @name ExGaussian #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of means of the combined distribution. #' @param sigma Vector of standard deviations of the gaussian component. #' @param beta Vector of scales of the exponential component. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dexgaussian <- function(x, mu, sigma, beta, log = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } if (isTRUE(any(beta < 0))) { stop2("beta must be non-negative.") } args <- nlist(x, mu, sigma, beta) args <- do_call(expand, args) args$mu <- with(args, mu - beta) args$z <- with(args, x - mu - sigma^2 / beta) out <- with(args, -log(beta) - (z + sigma^2 / (2 * beta)) / beta + pnorm(z / sigma, log.p = TRUE) ) if (!log) { out <- exp(out) } out } #' @rdname ExGaussian #' @export pexgaussian <- function(q, mu, sigma, beta, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } if (isTRUE(any(beta < 0))) { stop2("beta must be non-negative.") } args <- nlist(q, mu, sigma, beta) args <- do_call(expand, args) args$mu <- with(args, mu - beta) args$z <- with(args, q - mu - sigma^2 / beta) out <- with(args, pnorm((q - mu) / sigma) - pnorm(z / sigma) * exp(((mu + sigma^2 / beta)^2 - mu^2 - 2 * q * sigma^2 / beta) / (2 * sigma^2)) ) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname ExGaussian #' @export rexgaussian <- function(n, mu, sigma, beta) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } if (isTRUE(any(beta < 0))) { stop2("beta must be non-negative.") } mu <- mu - beta rnorm(n, mean = mu, sd = sigma) + rexp(n, rate = 1 / beta) } #' The Frechet Distribution #' #' Density, distribution function, quantile function and random generation #' for the Frechet distribution with location \code{loc}, scale \code{scale}, #' and shape \code{shape}. #' #' @name Frechet #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param loc Vector of locations. #' @param scale Vector of scales. #' @param shape Vector of shapes. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dfrechet <- function(x, loc = 0, scale = 1, shape = 1, log = FALSE) { if (isTRUE(any(scale <= 0))) { stop2("Argument 'scale' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } x <- (x - loc) / scale args <- nlist(x, loc, scale, shape) args <- do_call(expand, args) out <- with(args, log(shape / scale) - (1 + shape) * log(x) - x^(-shape) ) if (!log) { out <- exp(out) } out } #' @rdname Frechet #' @export pfrechet <- function(q, loc = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(scale <= 0))) { stop2("Argument 'scale' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } q <- pmax((q - loc) / scale, 0) out <- exp(-q^(-shape)) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname Frechet #' @export qfrechet <- function(p, loc = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(scale <= 0))) { stop2("Argument 'scale' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } p <- validate_p_dist(p, lower.tail = lower.tail, log.p = log.p) loc + scale * (-log(p))^(-1/shape) } #' @rdname Frechet #' @export rfrechet <- function(n, loc = 0, scale = 1, shape = 1) { if (isTRUE(any(scale <= 0))) { stop2("Argument 'scale' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } loc + scale * rexp(n)^(-1 / shape) } #' The Shifted Log Normal Distribution #' #' Density, distribution function, quantile function and random generation #' for the shifted log normal distribution with mean \code{meanlog}, #' standard deviation \code{sdlog}, and shift parameter \code{shift}. #' #' @name Shifted_Lognormal #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param meanlog Vector of means. #' @param sdlog Vector of standard deviations. #' @param shift Vector of shifts. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dshifted_lnorm <- function(x, meanlog = 0, sdlog = 1, shift = 0, log = FALSE) { args <- nlist(dist = "lnorm", x, shift, meanlog, sdlog, log) do_call(dshifted, args) } #' @rdname Shifted_Lognormal #' @export pshifted_lnorm <- function(q, meanlog = 0, sdlog = 1, shift = 0, lower.tail = TRUE, log.p = FALSE) { args <- nlist(dist = "lnorm", q, shift, meanlog, sdlog, lower.tail, log.p) do_call(pshifted, args) } #' @rdname Shifted_Lognormal #' @export qshifted_lnorm <- function(p, meanlog = 0, sdlog = 1, shift = 0, lower.tail = TRUE, log.p = FALSE) { args <- nlist(dist = "lnorm", p, shift, meanlog, sdlog, lower.tail, log.p) do_call(qshifted, args) } #' @rdname Shifted_Lognormal #' @export rshifted_lnorm <- function(n, meanlog = 0, sdlog = 1, shift = 0) { args <- nlist(dist = "lnorm", n, shift, meanlog, sdlog) do_call(rshifted, args) } #' The Inverse Gaussian Distribution #' #' Density, distribution function, and random generation #' for the inverse Gaussian distribution with location \code{mu}, #' and shape \code{shape}. #' #' @name InvGaussian #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of locations. #' @param shape Vector of shapes. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dinv_gaussian <- function(x, mu = 1, shape = 1, log = FALSE) { if (isTRUE(any(mu <= 0))) { stop2("Argument 'mu' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } args <- nlist(x, mu, shape) args <- do_call(expand, args) out <- with(args, 0.5 * log(shape / (2 * pi)) - 1.5 * log(x) - 0.5 * shape * (x - mu)^2 / (x * mu^2) ) if (!log) { out <- exp(out) } out } #' @rdname InvGaussian #' @export pinv_gaussian <- function(q, mu = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(mu <= 0))) { stop2("Argument 'mu' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } args <- nlist(q, mu, shape) args <- do_call(expand, args) out <- with(args, pnorm(sqrt(shape / q) * (q / mu - 1)) + exp(2 * shape / mu) * pnorm(-sqrt(shape / q) * (q / mu + 1)) ) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname InvGaussian #' @export rinv_gaussian <- function(n, mu = 1, shape = 1) { # create random numbers for the inverse gaussian distribution # Args: # Args: see dinv_gaussian if (isTRUE(any(mu <= 0))) { stop2("Argument 'mu' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } args <- nlist(mu, shape, length = n) args <- do_call(expand, args) # algorithm from wikipedia args$y <- rnorm(n)^2 args$x <- with(args, mu + (mu^2 * y) / (2 * shape) - mu / (2 * shape) * sqrt(4 * mu * shape * y + mu^2 * y^2) ) args$z <- runif(n) with(args, ifelse(z <= mu / (mu + x), x, mu^2 / x)) } #' The Beta-binomial Distribution #' #' Cumulative density & mass functions, and random number generation for the #' Beta-binomial distribution using the following re-parameterisation of the #' \href{https://mc-stan.org/docs/2_29/functions-reference/beta-binomial-distribution.html}{Stan #' Beta-binomial definition}: #' \itemize{ #' \item{\code{mu = alpha * beta}} mean probability of trial success. #' \item{\code{phi = (1 - mu) * beta}} precision or over-dispersion, component. #' } #' #' @name BetaBinomial #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param size Vector of number of trials (zero or more). #' @param mu Vector of means. #' @param phi Vector of precisions. #' #' @export dbeta_binomial <- function(x, size, mu, phi, log = FALSE) { require_package("extraDistr") alpha <- mu * phi beta <- (1 - mu) * phi extraDistr::dbbinom(x, size, alpha = alpha, beta = beta, log = log) } #' @rdname BetaBinomial #' @export pbeta_binomial <- function(q, size, mu, phi, lower.tail = TRUE, log.p = FALSE) { require_package("extraDistr") alpha <- mu * phi beta <- (1 - mu) * phi extraDistr::pbbinom(q, size, alpha = alpha, beta = beta, lower.tail = lower.tail, log.p = log.p) } #' @rdname BetaBinomial #' @export rbeta_binomial <- function(n, size, mu, phi) { # beta location-scale probabilities probs <- rbeta(n, mu * phi, (1 - mu) * phi) # binomial draws rbinom(n, size = size, prob = probs) } #' The Generalized Extreme Value Distribution #' #' Density, distribution function, and random generation #' for the generalized extreme value distribution with #' location \code{mu}, scale \code{sigma} and shape \code{xi}. #' #' @name GenExtremeValue #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of locations. #' @param sigma Vector of scales. #' @param xi Vector of shapes. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dgen_extreme_value <- function(x, mu = 0, sigma = 1, xi = 0, log = FALSE) { if (isTRUE(any(sigma <= 0))) { stop2("sigma bust be positive.") } x <- (x - mu) / sigma args <- nlist(x, mu, sigma, xi) args <- do_call(expand, args) args$t <- with(args, 1 + xi * x) out <- with(args, ifelse( xi == 0, -log(sigma) - x - exp(-x), -log(sigma) - (1 + 1 / xi) * log(t) - t^(-1 / xi) )) if (!log) { out <- exp(out) } out } #' @rdname GenExtremeValue #' @export pgen_extreme_value <- function(q, mu = 0, sigma = 1, xi = 0, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(sigma <= 0))) { stop2("sigma bust be positive.") } q <- (q - mu) / sigma args <- nlist(q, mu, sigma, xi) args <- do_call(expand, args) out <- with(args, ifelse( xi == 0, exp(-exp(-q)), exp(-(1 + xi * q)^(-1 / xi)) )) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname GenExtremeValue #' @export qgen_extreme_value <- function(p, mu = 0, sigma = 1, xi = 0, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(sigma <= 0))) { stop2("sigma bust be positive.") } p <- validate_p_dist(p, lower.tail = lower.tail, log.p = log.p) args <- nlist(p, mu, sigma, xi) args <- do_call(expand, args) out <- with(args, ifelse( xi == 0, mu - sigma * log(-log(p)), mu + (sigma * (1 - (-log(p))^xi)) / xi )) out } #' @rdname GenExtremeValue #' @export rgen_extreme_value <- function(n, mu = 0, sigma = 1, xi = 0) { if (isTRUE(any(sigma <= 0))) { stop2("sigma bust be positive.") } args <- nlist(mu, sigma, xi, length = n) args <- do_call(expand, args) with(args, ifelse( xi == 0, mu - sigma * log(rexp(n)), mu + sigma * (rexp(n)^(-xi) - 1) / xi )) } #' The Asymmetric Laplace Distribution #' #' Density, distribution function, quantile function and random generation #' for the asymmetric Laplace distribution with location \code{mu}, #' scale \code{sigma} and asymmetry parameter \code{quantile}. #' #' @name AsymLaplace #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of locations. #' @param sigma Vector of scales. #' @param quantile Asymmetry parameter corresponding to quantiles #' in quantile regression (hence the name). #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dasym_laplace <- function(x, mu = 0, sigma = 1, quantile = 0.5, log = FALSE) { out <- ifelse(x < mu, yes = (quantile * (1 - quantile) / sigma) * exp((1 - quantile) * (x - mu) / sigma), no = (quantile * (1 - quantile) / sigma) * exp(-quantile * (x - mu) / sigma) ) if (log) { out <- log(out) } out } #' @rdname AsymLaplace #' @export pasym_laplace <- function(q, mu = 0, sigma = 1, quantile = 0.5, lower.tail = TRUE, log.p = FALSE) { out <- ifelse(q < mu, yes = quantile * exp((1 - quantile) * (q - mu) / sigma), no = 1 - (1 - quantile) * exp(-quantile * (q - mu) / sigma) ) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname AsymLaplace #' @export qasym_laplace <- function(p, mu = 0, sigma = 1, quantile = 0.5, lower.tail = TRUE, log.p = FALSE) { p <- validate_p_dist(p, lower.tail = lower.tail, log.p = log.p) if (length(quantile) == 1L) { quantile <- rep(quantile, length(mu)) } ifelse(p < quantile, yes = mu + ((sigma * log(p / quantile)) / (1 - quantile)), no = mu - ((sigma * log((1 - p) / (1 - quantile))) / quantile) ) } #' @rdname AsymLaplace #' @export rasym_laplace <- function(n, mu = 0, sigma = 1, quantile = 0.5) { u <- runif(n) qasym_laplace(u, mu = mu, sigma = sigma, quantile = quantile) } # The Discrete Weibull Distribution # # Density, distribution function, quantile function and random generation # for the discrete Weibull distribution with location \code{mu} and # shape \code{shape}. # # @name DiscreteWeibull # # @inheritParams StudentT # @param mu Location parameter in the unit interval. # @param shape Positive shape parameter. # # @details See \code{vignette("brms_families")} for details # on the parameterization. # # @export ddiscrete_weibull <- function(x, mu, shape, log = FALSE) { if (isTRUE(any(mu < 0 | mu > 1))) { stop2("mu bust be between 0 and 1.") } if (isTRUE(any(shape <= 0))) { stop2("shape bust be positive.") } x <- round(x) out <- mu^x^shape - mu^(x + 1)^shape out[x < 0] <- 0 if (log) { out <- log(out) } out } # @rdname DiscreteWeibull # @export pdiscrete_weibull <- function(x, mu, shape, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(mu < 0 | mu > 1))) { stop2("mu bust be between 0 and 1.") } if (isTRUE(any(shape <= 0))) { stop2("shape bust be positive.") } x <- round(x) if (lower.tail) { out <- 1 - mu^(x + 1)^shape out[x < 0] <- 0 } else { out <- mu^(x + 1)^shape out[x < 0] <- 1 } if (log.p) { out <- log(out) } out } # @rdname DiscreteWeibull # @export qdiscrete_weibull <- function(p, mu, shape, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(mu < 0 | mu > 1))) { stop2("mu bust be between 0 and 1.") } if (isTRUE(any(shape <= 0))) { stop2("shape bust be positive.") } p <- validate_p_dist(p, lower.tail = lower.tail, log.p = log.p) ceiling((log(1 - p) / log(mu))^(1 / shape) - 1) } # @rdname DiscreteWeibull # @export rdiscrete_weibull <- function(n, mu, shape) { u <- runif(n, 0, 1) qdiscrete_weibull(u, mu, shape) } # mean of the discrete weibull distribution # @param mu location parameter # @param shape shape parameter # @param M maximal evaluated element of the series # @param thres threshold for new elements at which to stop evaluation mean_discrete_weibull <- function(mu, shape, M = 1000, thres = 0.001) { opt_M <- ceiling(max((log(thres) / log(mu))^(1 / shape))) if (opt_M <= M) { M <- opt_M } else { # avoid the loop below running too slow warning2( "Approximating the mean of the 'discrete_weibull' ", "distribution failed and results be inaccurate." ) } out <- 0 for (y in seq_len(M)) { out <- out + mu^y^shape } # approximation of the residual series (see Englehart & Li, 2011) # returns unreasonably large values presumably due to numerical issues out } # PDF of the COM-Poisson distribution # com_poisson in brms uses the mode parameterization dcom_poisson <- function(x, mu, shape, log = FALSE) { x <- round(x) log_mu <- log(mu) log_Z <- log_Z_com_poisson(log_mu, shape) out <- shape * (x * log_mu - lgamma(x + 1)) - log_Z if (!log) { out <- exp(out) } out } # random numbers from the COM-Poisson distribution rcom_poisson <- function(n, mu, shape, M = 10000) { n <- check_n_rdist(n, mu, shape) M <- as.integer(as_one_numeric(M)) log_mu <- log(mu) # approximating log_Z may yield too large random draws log_Z <- log_Z_com_poisson(log_mu, shape, approx = FALSE) u <- runif(n, 0, 1) cdf <- exp(-log_Z) lfac <- 0 y <- 0 out <- rep(0, n) not_found <- cdf < u while (any(not_found) && y <= M) { y <- y + 1 out[not_found] <- y lfac <- lfac + log(y) cdf <- cdf + exp(shape * (y * log_mu - lfac) - log_Z) not_found <- cdf < u } if (any(not_found)) { out[not_found] <- NA nfailed <- sum(not_found) warning2( "Drawing random numbers from the 'com_poisson' ", "distribution failed in ", nfailed, " cases." ) } out } # CDF of the COM-Poisson distribution pcom_poisson <- function(x, mu, shape, lower.tail = TRUE, log.p = FALSE) { x <- round(x) args <- expand(x = x, mu = mu, shape = shape) x <- args$x mu <- args$mu shape <- args$shape log_mu <- log(mu) log_Z <- log_Z_com_poisson(log_mu, shape) out <- rep(0, length(x)) dim(out) <- attributes(args)$max_dim out[x > 0] <- log1p_exp(shape * log_mu) k <- 2 lfac <- 0 while (any(x >= k)) { lfac <- lfac + log(k) term <- shape * (k * log_mu - lfac) out[x >= k] <- log_sum_exp(out[x >= k], term) k <- k + 1 } out <- out - log_Z out[out > 0] <- 0 if (!lower.tail) { out <- log1m_exp(out) } if (!log.p) { out <- exp(out) } out } # log normalizing constant of the COM Poisson distribution # @param log_mu log location parameter # @param shape shape parameter # @param M maximal evaluated element of the series # @param thres threshold for new elements at which to stop evaluation # @param approx use a closed form approximation of the mean if appropriate? log_Z_com_poisson <- function(log_mu, shape, M = 10000, thres = 1e-16, approx = TRUE) { if (isTRUE(any(shape <= 0))) { stop2("'shape' must be positive.") } if (isTRUE(any(shape == Inf))) { stop2("'shape' must be finite.") } approx <- as_one_logical(approx) args <- expand(log_mu = log_mu, shape = shape) log_mu <- args$log_mu shape <- args$shape out <- rep(NA, length(log_mu)) dim(out) <- attributes(args)$max_dim use_poisson <- shape == 1 if (any(use_poisson)) { # shape == 1 implies the poisson distribution out[use_poisson] <- exp(log_mu[use_poisson]) } if (approx) { # use a closed form approximation if appropriate use_approx <- log_mu * shape >= log(1.5) & log_mu >= log(1.5) if (any(use_approx)) { out[use_approx] <- log_Z_com_poisson_approx( log_mu[use_approx], shape[use_approx] ) } } use_exact <- is.na(out) if (any(use_exact)) { # direct computation of the truncated series M <- as.integer(as_one_numeric(M)) thres <- as_one_numeric(thres) log_thres <- log(thres) log_mu <- log_mu[use_exact] shape <- shape[use_exact] # first 2 terms of the series out_exact <- log1p_exp(shape * log_mu) lfac <- 0 k <- 2 converged <- FALSE while (!converged && k <= M) { lfac <- lfac + log(k) term <- shape * (k * log_mu - lfac) out_exact <- log_sum_exp(out_exact, term) converged <- all(term <= log_thres) k <- k + 1 } out[use_exact] <- out_exact if (!converged) { warning2( "Approximating the normalizing constant of the 'com_poisson' ", "distribution failed and results may be inaccurate." ) } } out } # approximate the log normalizing constant of the COM Poisson distribution # based on doi:10.1007/s10463-017-0629-6 log_Z_com_poisson_approx <- function(log_mu, shape) { shape_mu <- shape * exp(log_mu) shape2 <- shape^2 # first 4 terms of the residual series log_sum_resid <- log( 1 + shape_mu^(-1) * (shape2 - 1) / 24 + shape_mu^(-2) * (shape2 - 1) / 1152 * (shape2 + 23) + shape_mu^(-3) * (shape2 - 1) / 414720 * (5 * shape2^2 - 298 * shape2 + 11237) ) shape_mu + log_sum_resid - ((log(2 * pi) + log_mu) * (shape - 1) / 2 + log(shape) / 2) } # compute the log mean of the COM Poisson distribution # @param mu location parameter # @param shape shape parameter # @param M maximal evaluated element of the series # @param thres threshold for new elements at which to stop evaluation # @param approx use a closed form approximation of the mean if appropriate? mean_com_poisson <- function(mu, shape, M = 10000, thres = 1e-16, approx = TRUE) { if (isTRUE(any(shape <= 0))) { stop2("'shape' must be positive.") } if (isTRUE(any(shape == Inf))) { stop2("'shape' must be finite.") } approx <- as_one_logical(approx) args <- expand(mu = mu, shape = shape) mu <- args$mu shape <- args$shape out <- rep(NA, length(mu)) dim(out) <- attributes(args)$max_dim use_poisson <- shape == 1 if (any(use_poisson)) { # shape == 1 implies the poisson distribution out[use_poisson] <- mu[use_poisson] } if (approx) { # use a closed form approximation if appropriate use_approx <- mu^shape >= 1.5 & mu >= 1.5 if (any(use_approx)) { out[use_approx] <- mean_com_poisson_approx( mu[use_approx], shape[use_approx] ) } } use_exact <- is.na(out) if (any(use_exact)) { # direct computation of the truncated series M <- as.integer(as_one_numeric(M)) thres <- as_one_numeric(thres) log_thres <- log(thres) mu <- mu[use_exact] shape <- shape[use_exact] log_mu <- log(mu) # first 2 terms of the series log_num <- shape * log_mu # numerator log_Z <- log1p_exp(shape * log_mu) # denominator lfac <- 0 k <- 2 converged <- FALSE while (!converged && k <= M) { log_k <- log(k) lfac <- lfac + log_k term <- shape * (k * log_mu - lfac) log_num <- log_sum_exp(log_num, log_k + term) log_Z <- log_sum_exp(log_Z, term) converged <- all(term <= log_thres) k <- k + 1 } if (!converged) { warning2( "Approximating the mean of the 'com_poisson' ", "distribution failed and results may be inaccurate." ) } out[use_exact] <- exp(log_num - log_Z) } out } # approximate the mean of COM-Poisson distribution # based on doi:10.1007/s10463-017-0629-6 mean_com_poisson_approx <- function(mu, shape) { term <- 1 - (shape - 1) / (2 * shape) * mu^(-1) - (shape^2 - 1) / (24 * shape^2) * mu^(-2) - (shape^2 - 1) / (24 * shape^3) * mu^(-3) mu * term } #' The Dirichlet Distribution #' #' Density function and random number generation for the dirichlet #' distribution with shape parameter vector \code{alpha}. #' #' @name Dirichlet #' #' @inheritParams StudentT #' @param x Matrix of quantiles. Each row corresponds to one probability vector. #' @param alpha Matrix of positive shape parameters. Each row corresponds to one #' probability vector. #' #' @details See \code{vignette("brms_families")} for details on the #' parameterization. #' #' @export ddirichlet <- function(x, alpha, log = FALSE) { log <- as_one_logical(log) if (!is.matrix(x)) { x <- matrix(x, nrow = 1) } if (!is.matrix(alpha)) { alpha <- matrix(alpha, nrow(x), length(alpha), byrow = TRUE) } if (nrow(x) == 1L && nrow(alpha) > 1L) { x <- repl(x, nrow(alpha)) x <- do_call(rbind, x) } else if (nrow(x) > 1L && nrow(alpha) == 1L) { alpha <- repl(alpha, nrow(x)) alpha <- do_call(rbind, alpha) } if (isTRUE(any(x < 0))) { stop2("x must be non-negative.") } if (!is_equal(rowSums(x), rep(1, nrow(x)))) { stop2("x must sum to 1 per row.") } if (isTRUE(any(alpha <= 0))) { stop2("alpha must be positive.") } out <- lgamma(rowSums(alpha)) - rowSums(lgamma(alpha)) + rowSums((alpha - 1) * log(x)) if (!log) { out <- exp(out) } return(out) } #' @rdname Dirichlet #' @export rdirichlet <- function(n, alpha) { n <- as_one_numeric(n) if (!is.matrix(alpha)) { alpha <- matrix(alpha, nrow = 1) } if (prod(dim(alpha)) == 0) { stop2("alpha should be non-empty.") } if (isTRUE(any(alpha <= 0))) { stop2("alpha must be positive.") } if (n == 1) { n <- nrow(alpha) } if (n > nrow(alpha)) { alpha <- matrix(alpha, nrow = n, ncol = ncol(alpha), byrow = TRUE) } x <- matrix(rgamma(ncol(alpha) * n, alpha), ncol = ncol(alpha)) x / rowSums(x) } #' The Wiener Diffusion Model Distribution #' #' Density function and random generation for the Wiener #' diffusion model distribution with boundary separation \code{alpha}, #' non-decision time \code{tau}, bias \code{beta} and #' drift rate \code{delta}. #' #' @name Wiener #' #' @inheritParams StudentT #' @param alpha Boundary separation parameter. #' @param tau Non-decision time parameter. #' @param beta Bias parameter. #' @param delta Drift rate parameter. #' @param resp Response: \code{"upper"} or \code{"lower"}. #' If no character vector, it is coerced to logical #' where \code{TRUE} indicates \code{"upper"} and #' \code{FALSE} indicates \code{"lower"}. #' @param types Which types of responses to return? By default, #' return both the response times \code{"q"} and the dichotomous #' responses \code{"resp"}. If either \code{"q"} or \code{"resp"}, #' return only one of the two types. #' @param backend Name of the package to use as backend for the computations. #' Either \code{"Rwiener"} (the default) or \code{"rtdists"}. #' Can be set globally for the current \R session via the #' \code{"wiener_backend"} option (see \code{\link{options}}). #' #' @details #' These are wrappers around functions of the \pkg{RWiener} or \pkg{rtdists} #' package (depending on the chosen \code{backend}). See #' \code{vignette("brms_families")} for details on the parameterization. #' #' @seealso \code{\link[RWiener:wienerdist]{wienerdist}}, #' \code{\link[rtdists:Diffusion]{Diffusion}} #' #' @export dwiener <- function(x, alpha, tau, beta, delta, resp = 1, log = FALSE, backend = getOption("wiener_backend", "Rwiener")) { alpha <- as.numeric(alpha) tau <- as.numeric(tau) beta <- as.numeric(beta) delta <- as.numeric(delta) if (!is.character(resp)) { resp <- ifelse(resp, "upper", "lower") } log <- as_one_logical(log) backend <- match.arg(backend, c("Rwiener", "rtdists")) .dwiener <- paste0(".dwiener_", backend) args <- nlist(x, alpha, tau, beta, delta, resp) args <- as.list(do_call(expand, args)) args$log <- log do_call(.dwiener, args) } # dwiener using Rwiener as backend .dwiener_Rwiener <- function(x, alpha, tau, beta, delta, resp, log) { require_package("RWiener") .dwiener <- Vectorize( RWiener::dwiener, c("q", "alpha", "tau", "beta", "delta", "resp") ) args <- nlist(q = x, alpha, tau, beta, delta, resp, give_log = log) do_call(.dwiener, args) } # dwiener using rtdists as backend .dwiener_rtdists <- function(x, alpha, tau, beta, delta, resp, log) { require_package("rtdists") args <- list( rt = x, response = resp, a = alpha, t0 = tau, z = beta * alpha, v = delta ) out <- do_call(rtdists::ddiffusion, args) if (log) { out <- log(out) } out } #' @rdname Wiener #' @export rwiener <- function(n, alpha, tau, beta, delta, types = c("q", "resp"), backend = getOption("wiener_backend", "Rwiener")) { n <- as_one_numeric(n) alpha <- as.numeric(alpha) tau <- as.numeric(tau) beta <- as.numeric(beta) delta <- as.numeric(delta) types <- match.arg(types, several.ok = TRUE) backend <- match.arg(backend, c("Rwiener", "rtdists")) .rwiener <- paste0(".rwiener_", backend) args <- nlist(n, alpha, tau, beta, delta, types) do_call(.rwiener, args) } # rwiener using Rwiener as backend .rwiener_Rwiener <- function(n, alpha, tau, beta, delta, types) { require_package("RWiener") max_len <- max(lengths(list(alpha, tau, beta, delta))) if (max_len > 1L) { if (!n %in% c(1, max_len)) { stop2("Can only sample exactly once for each condition.") } n <- 1 } # helper function to return a numeric vector instead # of a data.frame with two columns as for RWiener::rwiener .rwiener_num <- function(n, alpha, tau, beta, delta, types) { out <- RWiener::rwiener(n, alpha, tau, beta, delta) out$resp <- ifelse(out$resp == "upper", 1, 0) if (length(types) == 1L) { out <- out[[types]] } out } # vectorized version of .rwiener_num .rwiener <- function(...) { fun <- Vectorize( .rwiener_num, c("alpha", "tau", "beta", "delta"), SIMPLIFY = FALSE ) do_call(rbind, fun(...)) } args <- nlist(n, alpha, tau, beta, delta, types) do_call(.rwiener, args) } # rwiener using rtdists as backend .rwiener_rtdists <- function(n, alpha, tau, beta, delta, types) { require_package("rtdists") max_len <- max(lengths(list(alpha, tau, beta, delta))) if (max_len > 1L) { if (!n %in% c(1, max_len)) { stop2("Can only sample exactly once for each condition.") } n <- max_len } out <- rtdists::rdiffusion( n, a = alpha, t0 = tau, z = beta * alpha, v = delta ) # TODO: use column names of rtdists in the output? names(out)[names(out) == "rt"] <- "q" names(out)[names(out) == "response"] <- "resp" out$resp <- ifelse(out$resp == "upper", 1, 0) if (length(types) == 1L) { out <- out[[types]] } out } # density of the cox proportional hazards model # @param x currently ignored as the information is passed # via 'bhaz' and 'cbhaz'. Before exporting the cox distribution # functions, this needs to be refactored so that x is actually used # @param mu positive location parameter # @param bhaz baseline hazard # @param cbhaz cumulative baseline hazard dcox <- function(x, mu, bhaz, cbhaz, log = FALSE) { out <- hcox(x, mu, bhaz, cbhaz, log = TRUE) + pcox(x, mu, bhaz, cbhaz, lower.tail = FALSE, log.p = TRUE) if (!log) { out <- exp(out) } out } # hazard function of the cox model hcox <- function(x, mu, bhaz, cbhaz, log = FALSE) { out <- log(bhaz) + log(mu) if (!log) { out <- exp(out) } out } # distribution function of the cox model pcox <- function(q, mu, bhaz, cbhaz, lower.tail = TRUE, log.p = FALSE) { log_surv <- -cbhaz * mu if (lower.tail) { if (log.p) { out <- log1m_exp(log_surv) } else { out <- 1 - exp(log_surv) } } else { if (log.p) { out <- log_surv } else { out <- exp(log_surv) } } out } #' Zero-Inflated Distributions #' #' Density and distribution functions for zero-inflated distributions. #' #' @name ZeroInflated #' #' @inheritParams StudentT #' @param zi zero-inflation probability #' @param mu,lambda location parameter #' @param shape,shape1,shape2 shape parameter #' @param phi precision parameter #' @param size number of trials #' @param prob probability of success on each trial #' #' @details #' The density of a zero-inflated distribution can be specified as follows. #' If \eqn{x = 0} set \eqn{f(x) = \theta + (1 - \theta) * g(0)}. #' Else set \eqn{f(x) = (1 - \theta) * g(x)}, #' where \eqn{g(x)} is the density of the non-zero-inflated part. NULL #' @rdname ZeroInflated #' @export dzero_inflated_poisson <- function(x, lambda, zi, log = FALSE) { pars <- nlist(lambda) .dzero_inflated(x, "pois", zi, pars, log) } #' @rdname ZeroInflated #' @export pzero_inflated_poisson <- function(q, lambda, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(lambda) .pzero_inflated(q, "pois", zi, pars, lower.tail, log.p) } #' @rdname ZeroInflated #' @export dzero_inflated_negbinomial <- function(x, mu, shape, zi, log = FALSE) { pars <- nlist(mu, size = shape) .dzero_inflated(x, "nbinom", zi, pars, log) } #' @rdname ZeroInflated #' @export pzero_inflated_negbinomial <- function(q, mu, shape, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(mu, size = shape) .pzero_inflated(q, "nbinom", zi, pars, lower.tail, log.p) } #' @rdname ZeroInflated #' @export dzero_inflated_binomial <- function(x, size, prob, zi, log = FALSE) { pars <- nlist(size, prob) .dzero_inflated(x, "binom", zi, pars, log) } #' @rdname ZeroInflated #' @export pzero_inflated_binomial <- function(q, size, prob, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(size, prob) .pzero_inflated(q, "binom", zi, pars, lower.tail, log.p) } #' @rdname ZeroInflated #' @export dzero_inflated_beta_binomial <- function(x, size, mu, phi, zi, log = FALSE) { pars <- nlist(size, mu, phi) .dzero_inflated(x, "beta_binomial", zi, pars, log) } #' @rdname ZeroInflated #' @export pzero_inflated_beta_binomial <- function(q, size, mu, phi, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(size, mu, phi) .pzero_inflated(q, "beta_binomial", zi, pars, lower.tail, log.p) } #' @rdname ZeroInflated #' @export dzero_inflated_beta <- function(x, shape1, shape2, zi, log = FALSE) { pars <- nlist(shape1, shape2) # zi_beta is technically a hurdle model .dhurdle(x, "beta", zi, pars, log, type = "real") } #' @rdname ZeroInflated #' @export pzero_inflated_beta <- function(q, shape1, shape2, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(shape1, shape2) # zi_beta is technically a hurdle model .phurdle(q, "beta", zi, pars, lower.tail, log.p, type = "real") } # @rdname ZeroInflated # @export dzero_inflated_asym_laplace <- function(x, mu, sigma, quantile, zi, log = FALSE) { pars <- nlist(mu, sigma, quantile) # zi_asym_laplace is technically a hurdle model .dhurdle(x, "asym_laplace", zi, pars, log, type = "real") } # @rdname ZeroInflated # @export pzero_inflated_asym_laplace <- function(q, mu, sigma, quantile, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(mu, sigma, quantile) # zi_asym_laplace is technically a hurdle model .phurdle(q, "asym_laplace", zi, pars, lower.tail, log.p, type = "real", lb = -Inf, ub = Inf) } # density of a zero-inflated distribution # @param dist name of the distribution # @param zi bernoulli zero-inflated parameter # @param pars list of parameters passed to pdf .dzero_inflated <- function(x, dist, zi, pars, log) { stopifnot(is.list(pars)) dist <- as_one_character(dist) log <- as_one_logical(log) args <- expand(dots = c(nlist(x, zi), pars)) x <- args$x zi <- args$zi pars <- args[names(pars)] pdf <- paste0("d", dist) out <- ifelse(x == 0, log(zi + (1 - zi) * do_call(pdf, c(0, pars))), log(1 - zi) + do_call(pdf, c(list(x), pars, log = TRUE)) ) if (!log) { out <- exp(out) } out } # CDF of a zero-inflated distribution # @param dist name of the distribution # @param zi bernoulli zero-inflated parameter # @param pars list of parameters passed to pdf # @param lb lower bound of the conditional distribution # @param ub upper bound of the conditional distribution .pzero_inflated <- function(q, dist, zi, pars, lower.tail, log.p, lb = 0, ub = Inf) { stopifnot(is.list(pars)) dist <- as_one_character(dist) lower.tail <- as_one_logical(lower.tail) log.p <- as_one_logical(log.p) lb <- as_one_numeric(lb) ub <- as_one_numeric(ub) args <- expand(dots = c(nlist(q, zi), pars)) q <- args$q zi <- args$zi pars <- args[names(pars)] cdf <- paste0("p", dist) # compute log CCDF values out <- log(1 - zi) + do_call(cdf, c(list(q), pars, lower.tail = FALSE, log.p = TRUE)) # take the limits of the distribution into account out <- ifelse(q < lb, 0, out) out <- ifelse(q > ub, -Inf, out) if (lower.tail) { out <- 1 - exp(out) if (log.p) { out <- log(out) } } else { if (!log.p) { out <- exp(out) } } out } #' Hurdle Distributions #' #' Density and distribution functions for hurdle distributions. #' #' @name Hurdle #' #' @inheritParams StudentT #' @param hu hurdle probability #' @param mu,lambda location parameter #' @param shape shape parameter #' @param sigma,scale scale parameter #' #' @details #' The density of a hurdle distribution can be specified as follows. #' If \eqn{x = 0} set \eqn{f(x) = \theta}. Else set #' \eqn{f(x) = (1 - \theta) * g(x) / (1 - G(0))} #' where \eqn{g(x)} and \eqn{G(x)} are the density and distribution #' function of the non-hurdle part, respectively. NULL #' @rdname Hurdle #' @export dhurdle_poisson <- function(x, lambda, hu, log = FALSE) { pars <- nlist(lambda) .dhurdle(x, "pois", hu, pars, log, type = "int") } #' @rdname Hurdle #' @export phurdle_poisson <- function(q, lambda, hu, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(lambda) .phurdle(q, "pois", hu, pars, lower.tail, log.p, type = "int") } #' @rdname Hurdle #' @export dhurdle_negbinomial <- function(x, mu, shape, hu, log = FALSE) { pars <- nlist(mu, size = shape) .dhurdle(x, "nbinom", hu, pars, log, type = "int") } #' @rdname Hurdle #' @export phurdle_negbinomial <- function(q, mu, shape, hu, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(mu, size = shape) .phurdle(q, "nbinom", hu, pars, lower.tail, log.p, type = "int") } #' @rdname Hurdle #' @export dhurdle_gamma <- function(x, shape, scale, hu, log = FALSE) { pars <- nlist(shape, scale) .dhurdle(x, "gamma", hu, pars, log, type = "real") } #' @rdname Hurdle #' @export phurdle_gamma <- function(q, shape, scale, hu, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(shape, scale) .phurdle(q, "gamma", hu, pars, lower.tail, log.p, type = "real") } #' @rdname Hurdle #' @export dhurdle_lognormal <- function(x, mu, sigma, hu, log = FALSE) { pars <- list(meanlog = mu, sdlog = sigma) .dhurdle(x, "lnorm", hu, pars, log, type = "real") } #' @rdname Hurdle #' @export phurdle_lognormal <- function(q, mu, sigma, hu, lower.tail = TRUE, log.p = FALSE) { pars <- list(meanlog = mu, sdlog = sigma) .phurdle(q, "lnorm", hu, pars, lower.tail, log.p, type = "real") } # density of a hurdle distribution # @param dist name of the distribution # @param hu bernoulli hurdle parameter # @param pars list of parameters passed to pdf # @param type support of distribution (int or real) .dhurdle <- function(x, dist, hu, pars, log, type) { stopifnot(is.list(pars)) dist <- as_one_character(dist) log <- as_one_logical(log) type <- match.arg(type, c("int", "real")) args <- expand(dots = c(nlist(x, hu), pars)) x <- args$x hu <- args$hu pars <- args[names(pars)] pdf <- paste0("d", dist) if (type == "int") { lccdf0 <- log(1 - do_call(pdf, c(0, pars))) } else { lccdf0 <- 0 } out <- ifelse(x == 0, log(hu), log(1 - hu) + do_call(pdf, c(list(x), pars, log = TRUE)) - lccdf0 ) if (!log) { out <- exp(out) } out } # CDF of a hurdle distribution # @param dist name of the distribution # @param hu bernoulli hurdle parameter # @param pars list of parameters passed to pdf # @param type support of distribution (int or real) # @param lb lower bound of the conditional distribution # @param ub upper bound of the conditional distribution .phurdle <- function(q, dist, hu, pars, lower.tail, log.p, type, lb = 0, ub = Inf) { stopifnot(is.list(pars)) dist <- as_one_character(dist) lower.tail <- as_one_logical(lower.tail) log.p <- as_one_logical(log.p) type <- match.arg(type, c("int", "real")) lb <- as_one_numeric(lb) ub <- as_one_numeric(ub) args <- expand(dots = c(nlist(q, hu), pars)) q <- args$q hu <- args$hu pars <- args[names(pars)] cdf <- paste0("p", dist) # compute log CCDF values out <- log(1 - hu) + do_call(cdf, c(list(q), pars, lower.tail = FALSE, log.p = TRUE)) if (type == "int") { pdf <- paste0("d", dist) out <- out - log(1 - do_call(pdf, c(0, pars))) } out <- ifelse(q < 0, log_sum_exp(out, log(hu)), out) # take the limits of the distribution into account out <- ifelse(q < lb, 0, out) out <- ifelse(q > ub, -Inf, out) if (lower.tail) { out <- 1 - exp(out) if (log.p) { out <- log(out) } } else { if (!log.p) { out <- exp(out) } } out } # density of the categorical distribution with the softmax transform # @param x positive integers not greater than ncat # @param eta the linear predictor (of length or ncol ncat) # @param log return values on the log scale? dcategorical <- function(x, eta, log = FALSE) { if (is.null(dim(eta))) { eta <- matrix(eta, nrow = 1) } if (length(dim(eta)) != 2L) { stop2("eta must be a numeric vector or matrix.") } out <- inv_link_categorical(eta, log = log, refcat = NULL) out[, x, drop = FALSE] } # generic inverse link function for the categorical family # # @param x Matrix (S x `ncat` or S x `ncat - 1` (depending on `refcat_obj`), # with S denoting the number of posterior draws and `ncat` denoting the number # of response categories) with values of `eta` for one observation (see # dcategorical()) or an array (S x N x `ncat` or S x N x `ncat - 1` (depending # on `refcat_obj`)) containing the same values as the matrix just described, # but for N observations. # @param refcat Integer indicating the reference category to be inserted in 'x'. # If NULL, `x` is not modified at all. # @param log Logical (length 1) indicating whether to log the return value. # # @return If `x` is a matrix, then a matrix (S x `ncat`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the inverse-link function applied to # `x`. If `x` is an array, then an array (S x N x `ncat`) containing the same # values as the matrix just described, but for N observations. inv_link_categorical <- function(x, refcat = 1, log = FALSE) { if (!is.null(refcat)) { x <- insert_refcat(x, refcat = refcat) } out <- log_softmax(x) if (!log) { out <- exp(out) } out } # generic link function for the categorical family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param refcat Numeric (length 1) giving the index of the reference category. # @param return_refcat Logical (length 1) indicating whether to include the # reference category in the return value. # # @return If `x` is a matrix, then a matrix (S x `ncat` or S x `ncat - 1` # (depending on `return_refcat`), with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) containing the # values of the link function applied to `x`. If `x` is an array, then an # array (S x N x `ncat` or S x N x `ncat - 1` (depending on `return_refcat`)) # containing the same values as the matrix just described, but for N # observations. link_categorical <- function(x, refcat = 1, return_refcat = FALSE) { ndim <- length(dim(x)) marg_noncat <- seq_along(dim(x))[-ndim] if (return_refcat) { x_tosweep <- x } else { x_tosweep <- slice(x, ndim, -refcat, drop = FALSE) } log(sweep( x_tosweep, MARGIN = marg_noncat, STATS = slice(x, ndim, refcat), FUN = "/" )) } # CDF of the categorical distribution with the softmax transform # @param q positive integers not greater than ncat # @param eta the linear predictor (of length or ncol ncat) # @param log.p return values on the log scale? pcategorical <- function(q, eta, log.p = FALSE) { p <- dcategorical(seq_len(max(q)), eta = eta) out <- cblapply(q, function(j) rowSums(p[, 1:j, drop = FALSE])) if (log.p) { out <- log(out) } out } # density of the multinomial distribution with the softmax transform # @param x positive integers not greater than ncat # @param eta the linear predictor (of length or ncol ncat) # @param log return values on the log scale? dmultinomial <- function(x, eta, log = FALSE) { if (is.null(dim(eta))) { eta <- matrix(eta, nrow = 1) } if (length(dim(eta)) != 2L) { stop2("eta must be a numeric vector or matrix.") } log_prob <- log_softmax(eta) size <- sum(x) x <- data2draws(x, dim = dim(eta)) out <- lgamma(size + 1) + rowSums(x * log_prob - lgamma(x + 1)) if (!log) { out <- exp(out) } out } # density of the cumulative distribution # # @param x Integer vector containing response category indices to return the # "densities" (probability masses) for. # @param eta Vector (length S, with S denoting the number of posterior draws) of # linear predictor draws. # @param thres Matrix (S x `ncat - 1`, with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) of threshold # draws. # @param disc Vector (length S, with S denoting the number of posterior draws, # or length 1 for recycling) of discrimination parameter draws. # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `length(x)`) containing the values of the inverse-link # function applied to `disc * (thres - eta)`. dcumulative <- function(x, eta, thres, disc = 1, link = "logit") { eta <- disc * (thres - eta) if (link == "identity") { out <- eta } else { out <- inv_link_cumulative(eta, link = link) } out[, x, drop = FALSE] } # generic inverse link function for the cumulative family # # @param x Matrix (S x `ncat - 1`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) with values of # `disc * (thres - eta)` for one observation (see dcumulative()) or an array # (S x N x `ncat - 1`) containing the same values as the matrix just # described, but for N observations. # @param link Character vector (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the inverse-link function applied to # `x`. If `x` is an array, then an array (S x N x `ncat`) containing the same # values as the matrix just described, but for N observations. inv_link_cumulative <- function(x, link) { x <- inv_link(x, link) ndim <- length(dim(x)) dim_noncat <- dim(x)[-ndim] ones_arr <- array(1, dim = c(dim_noncat, 1)) zeros_arr <- array(0, dim = c(dim_noncat, 1)) abind::abind(x, ones_arr) - abind::abind(zeros_arr, x) } # generic link function for the cumulative family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param link Character string (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat - 1`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the link function applied to `x`. If # `x` is an array, then an array (S x N x `ncat - 1`) containing the same # values as the matrix just described, but for N observations. link_cumulative <- function(x, link) { ndim <- length(dim(x)) ncat <- dim(x)[ndim] dim_noncat <- dim(x)[-ndim] nthres <- dim(x)[ndim] - 1 marg_noncat <- seq_along(dim(x))[-ndim] dim_t <- c(nthres, dim_noncat) x <- apply(slice(x, ndim, -ncat, drop = FALSE), marg_noncat, cumsum) x <- aperm(array(x, dim = dim_t), perm = c(marg_noncat + 1, 1)) link(x, link) } # density of the sratio distribution # # @param x Integer vector containing response category indices to return the # "densities" (probability masses) for. # @param eta Vector (length S, with S denoting the number of posterior draws) of # linear predictor draws. # @param thres Matrix (S x `ncat - 1`, with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) of threshold # draws. # @param disc Vector (length S, with S denoting the number of posterior draws, # or length 1 for recycling) of discrimination parameter draws. # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `length(x)`) containing the values of the inverse-link # function applied to `disc * (thres - eta)`. dsratio <- function(x, eta, thres, disc = 1, link = "logit") { eta <- disc * (thres - eta) if (link == "identity") { out <- eta } else { out <- inv_link_sratio(eta, link = link) } out[, x, drop = FALSE] } # generic inverse link function for the sratio family # # @param x Matrix (S x `ncat - 1`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) with values of # `disc * (thres - eta)` for one observation (see dsratio()) or an array # (S x N x `ncat - 1`) containing the same values as the matrix just # described, but for N observations. # @param link Character vector (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the inverse-link function applied to # `x`. If `x` is an array, then an array (S x N x `ncat`) containing the same # values as the matrix just described, but for N observations. inv_link_sratio <- function(x, link) { x <- inv_link(x, link) ndim <- length(dim(x)) dim_noncat <- dim(x)[-ndim] nthres <- dim(x)[ndim] marg_noncat <- seq_along(dim(x))[-ndim] ones_arr <- array(1, dim = c(dim_noncat, 1)) dim_t <- c(nthres, dim_noncat) Sx_cumprod <- aperm( array(apply(1 - x, marg_noncat, cumprod), dim = dim_t), perm = c(marg_noncat + 1, 1) ) abind::abind(x, ones_arr) * abind::abind(ones_arr, Sx_cumprod) } # generic link function for the sratio family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param link Character string (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat - 1`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the link function applied to `x`. If # `x` is an array, then an array (S x N x `ncat - 1`) containing the same # values as the matrix just described, but for N observations. link_sratio <- function(x, link) { ndim <- length(dim(x)) .F_k <- function(k) { if (k == 1) { prev_res <- list(F_k = NULL, S_km1_prod = 1) } else { prev_res <- .F_k(k - 1) } F_k <- slice(x, ndim, k, drop = FALSE) / prev_res$S_km1_prod .out <- list( F_k = abind::abind(prev_res$F_k, F_k), S_km1_prod = prev_res$S_km1_prod * (1 - F_k) ) return(.out) } x <- .F_k(dim(x)[ndim] - 1)$F_k link(x, link) } # density of the cratio distribution # # @param x Integer vector containing response category indices to return the # "densities" (probability masses) for. # @param eta Vector (length S, with S denoting the number of posterior draws) of # linear predictor draws. # @param thres Matrix (S x `ncat - 1`, with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) of threshold # draws. # @param disc Vector (length S, with S denoting the number of posterior draws, # or length 1 for recycling) of discrimination parameter draws. # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `length(x)`) containing the values of the inverse-link # function applied to `disc * (thres - eta)`. dcratio <- function(x, eta, thres, disc = 1, link = "logit") { eta <- disc * (eta - thres) if (link == "identity") { out <- eta } else { out <- inv_link_cratio(eta, link = link) } out[, x, drop = FALSE] } # generic inverse link function for the cratio family # # @param x Matrix (S x `ncat - 1`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) with values of # `disc * (thres - eta)` for one observation (see dcratio()) or an array # (S x N x `ncat - 1`) containing the same values as the matrix just # described, but for N observations. # @param link Character vector (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the inverse-link function applied to # `x`. If `x` is an array, then an array (S x N x `ncat`) containing the same # values as the matrix just described, but for N observations. inv_link_cratio <- function(x, link) { x <- inv_link(x, link) ndim <- length(dim(x)) dim_noncat <- dim(x)[-ndim] nthres <- dim(x)[ndim] marg_noncat <- seq_along(dim(x))[-ndim] ones_arr <- array(1, dim = c(dim_noncat, 1)) dim_t <- c(nthres, dim_noncat) x_cumprod <- aperm( array(apply(x, marg_noncat, cumprod), dim = dim_t), perm = c(marg_noncat + 1, 1) ) abind::abind(1 - x, ones_arr) * abind::abind(ones_arr, x_cumprod) } # generic link function for the cratio family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param link Character string (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat - 1`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the link function applied to `x`. If # `x` is an array, then an array (S x N x `ncat - 1`) containing the same # values as the matrix just described, but for N observations. link_cratio <- function(x, link) { ndim <- length(dim(x)) .F_k <- function(k) { if (k == 1) { prev_res <- list(F_k = NULL, F_km1_prod = 1) } else { prev_res <- .F_k(k - 1) } F_k <- 1 - slice(x, ndim, k, drop = FALSE) / prev_res$F_km1_prod .out <- list( F_k = abind::abind(prev_res$F_k, F_k), F_km1_prod = prev_res$F_km1_prod * F_k ) return(.out) } x <- .F_k(dim(x)[ndim] - 1)$F_k link(x, link) } # density of the acat distribution # # @param x Integer vector containing response category indices to return the # "densities" (probability masses) for. # @param eta Vector (length S, with S denoting the number of posterior draws) of # linear predictor draws. # @param thres Matrix (S x `ncat - 1`, with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) of threshold # draws. # @param disc Vector (length S, with S denoting the number of posterior draws, # or length 1 for recycling) of discrimination parameter draws. # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `length(x)`) containing the values of the inverse-link # function applied to `disc * (thres - eta)`. dacat <- function(x, eta, thres, disc = 1, link = "logit") { eta <- disc * (eta - thres) if (link == "identity") { out <- eta } else { out <- inv_link_acat(eta, link = link) } out[, x, drop = FALSE] } # generic inverse link function for the acat family # # @param x Matrix (S x `ncat - 1`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) with values of # `disc * (thres - eta)` (see dacat()). # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `ncat`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) containing the values # of the inverse-link function applied to `x`. inv_link_acat <- function(x, link) { ndim <- length(dim(x)) dim_noncat <- dim(x)[-ndim] nthres <- dim(x)[ndim] marg_noncat <- seq_along(dim(x))[-ndim] ones_arr <- array(1, dim = c(dim_noncat, 1)) dim_t <- c(nthres, dim_noncat) if (link == "logit") { # faster evaluation in this case exp_x_cumprod <- aperm( array(apply(exp(x), marg_noncat, cumprod), dim = dim_t), perm = c(marg_noncat + 1, 1) ) out <- abind::abind(ones_arr, exp_x_cumprod) } else { x <- inv_link(x, link) x_cumprod <- aperm( array(apply(x, marg_noncat, cumprod), dim = dim_t), perm = c(marg_noncat + 1, 1) ) Sx_cumprod_rev <- apply( 1 - slice(x, ndim, rev(seq_len(nthres)), drop = FALSE), marg_noncat, cumprod ) Sx_cumprod_rev <- aperm( array(Sx_cumprod_rev, dim = dim_t), perm = c(marg_noncat + 1, 1) ) Sx_cumprod_rev <- slice( Sx_cumprod_rev, ndim, rev(seq_len(nthres)), drop = FALSE ) out <- abind::abind(ones_arr, x_cumprod) * abind::abind(Sx_cumprod_rev, ones_arr) } catsum <- array(apply(out, marg_noncat, sum), dim = dim_noncat) sweep(out, marg_noncat, catsum, "/") } # generic link function for the acat family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param link Character string (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat - 1`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the link function applied to `x`. If # `x` is an array, then an array (S x N x `ncat - 1`) containing the same # values as the matrix just described, but for N observations. link_acat <- function(x, link) { ndim <- length(dim(x)) ncat <- dim(x)[ndim] x <- slice(x, ndim, -1, drop = FALSE) / slice(x, ndim, -ncat, drop = FALSE) if (link == "logit") { # faster evaluation in this case out <- log(x) } else { x <- inv_odds(x) out <- link(x, link) } out } # CDF for ordinal distributions # @param q positive integers not greater than ncat # @param eta draws of the linear predictor # @param thres draws of threshold parameters # @param disc draws of the discrimination parameter # @param family a character string naming the family # @param link a character string naming the link # @return a matrix of probabilities P(x <= q) pordinal <- function(q, eta, thres, disc = 1, family = NULL, link = "logit") { family <- as_one_character(family) link <- as_one_character(link) args <- nlist(x = seq_len(max(q)), eta, thres, disc, link) p <- do_call(paste0("d", family), args) .fun <- function(j) rowSums(as.matrix(p[, 1:j, drop = FALSE])) cblapply(q, .fun) } # helper functions to shift arbitrary distributions dshifted <- function(dist, x, shift = 0, ...) { do_call(paste0("d", dist), list(x - shift, ...)) } pshifted <- function(dist, q, shift = 0, ...) { do_call(paste0("p", dist), list(q - shift, ...)) } qshifted <- function(dist, p, shift = 0, ...) { do_call(paste0("q", dist), list(p, ...)) + shift } rshifted <- function(dist, n, shift = 0, ...) { do_call(paste0("r", dist), list(n, ...)) + shift } # validate argument p in q functions validate_p_dist <- function(p, lower.tail = TRUE, log.p = FALSE) { if (log.p) { p <- exp(p) } if (!lower.tail) { p <- 1 - p } if (isTRUE(any(p <= 0)) || isTRUE(any(p >= 1))) { stop2("'p' must contain probabilities in (0,1)") } p } # check if 'n' in r functions is valid # @param n number of desired random draws # @param .. parameter vectors # @return validated 'n' check_n_rdist <- function(n, ...) { n <- as.integer(as_one_numeric(n)) max_len <- max(lengths(list(...))) if (max_len > 1L) { if (!n %in% c(1, max_len)) { stop2("'n' must match the maximum length of the parameter vectors.") } n <- max_len } n } brms/R/loo_moment_match.R0000644000176200001440000001661014454230222015067 0ustar liggesusers#' Moment matching for efficient approximate leave-one-out cross-validation #' #' Moment matching for efficient approximate leave-one-out cross-validation #' (LOO-CV). See \code{\link[loo:loo_moment_match]{loo_moment_match}} #' for more details. #' #' @aliases loo_moment_match #' #' @inheritParams predict.brmsfit #' @param x An object of class \code{brmsfit}. #' @param loo An object of class \code{loo} originally created from \code{x}. #' @param k_threshold The threshold at which Pareto \eqn{k} #' estimates are treated as problematic. Defaults to \code{0.7}. #' See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} #' for more details. #' @param check Logical; If \code{TRUE} (the default), some checks #' check are performed if the \code{loo} object was generated #' from the \code{brmsfit} object passed to argument \code{fit}. #' @param recompile Logical, indicating whether the Stan model should be #' recompiled. This may be necessary if you are running moment matching on #' another machine than the one used to fit the model. No recompilation #' is done by default. #' @param ... Further arguments passed to the underlying methods. #' Additional arguments initially passed to \code{\link{loo}}, #' for example, \code{newdata} or \code{resp} need to be passed #' again to \code{loo_moment_match} in order for the latter #' to work correctly. #' @return An updated object of class \code{loo}. #' #' @details The moment matching algorithm requires draws of all variables #' defined in Stan's \code{parameters} block to be saved. Otherwise #' \code{loo_moment_match} cannot be computed. Thus, please set #' \code{save_pars = save_pars(all = TRUE)} in the call to \code{\link{brm}}, #' if you are planning to apply \code{loo_moment_match} to your models. #' #' @references #' Paananen, T., Piironen, J., Buerkner, P.-C., Vehtari, A. (2021). #' Implicitly Adaptive Importance Sampling. Statistics and Computing. #' #' @examples #' \dontrun{ #' fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), #' save_pars = save_pars(all = TRUE)) #' #' # throws warning about some pareto k estimates being too high #' (loo1 <- loo(fit1)) #' (mmloo1 <- loo_moment_match(fit1, loo = loo1)) #' } #' #' @importFrom loo loo_moment_match #' @export loo_moment_match #' @export loo_moment_match.brmsfit <- function(x, loo, k_threshold = 0.7, newdata = NULL, resp = NULL, check = TRUE, recompile = FALSE, ...) { stopifnot(is.loo(loo), is.brmsfit(x)) if (is.null(newdata)) { newdata <- model.frame(x) } else { newdata <- as.data.frame(newdata) } check <- as_one_logical(check) if (check) { yhash_loo <- attr(loo, "yhash") yhash_fit <- hash_response(x, newdata = newdata) if (!is_equal(yhash_loo, yhash_fit)) { stop2( "Response values used in 'loo' and 'x' do not match. ", "If this is a false positive, please set 'check' to FALSE." ) } } # otherwise loo_moment_match may fail in a new R session or on another machine x <- update_misc_env(x, recompile = recompile) out <- try(loo::loo_moment_match.default( x, loo = loo, post_draws = as.matrix, log_lik_i = .log_lik_i, unconstrain_pars = .unconstrain_pars, log_prob_upars = .log_prob_upars, log_lik_i_upars = .log_lik_i_upars, k_threshold = k_threshold, newdata = newdata, resp = resp, ... )) if (is_try_error(out)) { stop2( "Moment matching failed. Perhaps you did not set ", "'save_pars = save_pars(all = TRUE)' when fitting your model? ", "If you are running moment matching on another machine than the one ", "used to fit the model, you may need to set recompile = TRUE." ) } out } # compute a vector of log-likelihood values for the ith observation .log_lik_i <- function(x, i, newdata, ...) { as.vector(log_lik(x, newdata = newdata[i, , drop = FALSE], ...)) } # transform parameters to the unconstrained space .unconstrain_pars <- function(x, pars, ...) { unconstrain_pars_stanfit(x$fit, pars = pars, ...) } # compute log_prob for each posterior draws on the unconstrained space .log_prob_upars <- function(x, upars, ...) { x <- update_misc_env(x, only_windows = TRUE) log_prob_upars_stanfit(x$fit, upars = upars, ...) } # transform parameters to the constraint space .update_pars <- function(x, upars, ...) { # list with one element per posterior draw pars <- apply(upars, 1, .constrain_pars, x = x) # select required parameters only pars <- lapply(pars, "[", x$fit@sim$pars_oi_old) # transform draws ndraws <- length(pars) pars <- unlist(pars) npars <- length(pars) / ndraws dim(pars) <- c(npars, ndraws) # add dummy 'lp__' draws pars <- rbind(pars, rep(0, ndraws)) # bring draws into the right structure new_draws <- named_list(x$fit@sim$fnames_oi_old, list(numeric(ndraws))) if (length(new_draws) != nrow(pars)) { stop2("Updating parameters in `loo_moment_match.brmsfit' failed. ", "Please report a bug at https://github.com/paul-buerkner/brms.") } for (i in seq_len(npars)) { new_draws[[i]] <- pars[i, ] } # create new sim object to overwrite x$fit@sim x$fit@sim <- list( samples = list(new_draws), iter = ndraws, thin = 1, warmup = 0, chains = 1, n_save = ndraws, warmup2 = 0, permutation = list(seq_len(ndraws)), pars_oi = x$fit@sim$pars_oi_old, dims_oi = x$fit@sim$dims_oi_old, fnames_oi = x$fit@sim$fnames_oi_old, n_flatnames = length(x$fit@sim$fnames_oi_old) ) x$fit@stan_args <- list( list(chain_id = 1, iter = ndraws, thin = 1, warmup = 0) ) rename_pars(x) } # wrapper around rstan::constrain_pars # ensures that the right posterior draws are excluded .constrain_pars <- function(upars, x) { out <- rstan::constrain_pars(upars, object = x$fit) out[x$exclude] <- NULL out } # compute log_lik values based on the unconstrained parameters .log_lik_i_upars <- function(x, upars, i, ndraws = NULL, draw_ids = NULL, ...) { # do not pass draw_ids or ndraws further to avoid subsetting twice x <- update_misc_env(x, only_windows = TRUE) x <- .update_pars(x, upars = upars, ...) .log_lik_i(x, i = i, ...) } # -------- will be imported from rstan at some point ------- # transform parameters to the unconstraint space unconstrain_pars_stanfit <- function(x, pars, ...) { skeleton <- .create_skeleton(x@sim$pars_oi, x@par_dims[x@sim$pars_oi]) upars <- apply(pars, 1, FUN = function(theta) { rstan::unconstrain_pars(x, pars = .rstan_relist(theta, skeleton)) }) # for one parameter models if (is.null(dim(upars))) { dim(upars) <- c(1, length(upars)) } t(upars) } # compute log_prob for each posterior draws on the unconstrained space log_prob_upars_stanfit <- function(x, upars, ...) { apply(upars, 1, rstan::log_prob, object = x, adjust_transform = TRUE, gradient = FALSE) } # create a named list of draws for use with rstan methods .rstan_relist <- function (x, skeleton) { out <- utils::relist(x, skeleton) for (i in seq_along(skeleton)) { dim(out[[i]]) <- dim(skeleton[[i]]) } out } # rstan helper function to get dims of parameters right .create_skeleton <- function (pars, dims) { out <- lapply(seq_along(pars), function(i) { len_dims <- length(dims[[i]]) if (len_dims < 1) return(0) return(array(0, dim = dims[[i]])) }) names(out) <- pars out } brms/R/loo.R0000644000176200001440000007703314364257623012360 0ustar liggesusers#' Efficient approximate leave-one-out cross-validation (LOO) #' #' Perform approximate leave-one-out cross-validation based #' on the posterior likelihood using the \pkg{loo} package. #' For more details see \code{\link[loo:loo]{loo}}. #' #' @aliases loo LOO LOO.brmsfit #' #' @param x A \code{brmsfit} object. #' @param ... More \code{brmsfit} objects or further arguments #' passed to the underlying post-processing functions. #' In particular, see \code{\link{prepare_predictions}} for further #' supported arguments. #' @param compare A flag indicating if the information criteria #' of the models should be compared to each other #' via \code{\link{loo_compare}}. #' @param pointwise A flag indicating whether to compute the full #' log-likelihood matrix at once or separately for each observation. #' The latter approach is usually considerably slower but #' requires much less working memory. Accordingly, if one runs #' into memory issues, \code{pointwise = TRUE} is the way to go. #' @param moment_match Logical; Indicate whether \code{\link{loo_moment_match}} #' should be applied on problematic observations. Defaults to \code{FALSE}. #' For most models, moment matching will only work if you have set #' \code{save_pars = save_pars(all = TRUE)} when fitting the model with #' \code{\link{brm}}. See \code{\link{loo_moment_match.brmsfit}} for more #' details. #' @param reloo Logical; Indicate whether \code{\link{reloo}} #' should be applied on problematic observations. Defaults to \code{FALSE}. #' @param k_threshold The threshold at which pareto \eqn{k} #' estimates are treated as problematic. Defaults to \code{0.7}. #' Only used if argument \code{reloo} is \code{TRUE}. #' See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} for more details. #' @param save_psis Should the \code{"psis"} object created internally be saved #' in the returned object? For more details see \code{\link[loo:loo]{loo}}. #' @param moment_match_args Optional \code{list} of additional arguments passed to #' \code{\link{loo_moment_match}}. #' @param reloo_args Optional \code{list} of additional arguments passed to #' \code{\link{reloo}}. #' @param model_names If \code{NULL} (the default) will use model names #' derived from deparsing the call. Otherwise will use the passed #' values as model names. #' @inheritParams predict.brmsfit #' #' @details See \code{\link{loo_compare}} for details on model comparisons. #' For \code{brmsfit} objects, \code{LOO} is an alias of \code{loo}. #' Use method \code{\link{add_criterion}} to store #' information criteria in the fitted model object for later usage. #' #' @return If just one object is provided, an object of class \code{loo}. #' If multiple objects are provided, an object of class \code{loolist}. #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' (loo1 <- loo(fit1)) #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' (loo2 <- loo(fit2)) #' #' # compare both models #' loo_compare(loo1, loo2) #' } #' #' @references #' Vehtari, A., Gelman, A., & Gabry J. (2016). Practical Bayesian model #' evaluation using leave-one-out cross-validation and WAIC. In Statistics #' and Computing, doi:10.1007/s11222-016-9696-4. arXiv preprint arXiv:1507.04544. #' #' Gelman, A., Hwang, J., & Vehtari, A. (2014). #' Understanding predictive information criteria for Bayesian models. #' Statistics and Computing, 24, 997-1016. #' #' Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation #' and widely applicable information criterion in singular learning theory. #' The Journal of Machine Learning Research, 11, 3571-3594. #' #' @importFrom loo loo is.loo #' @export loo #' @export loo.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, moment_match = FALSE, reloo = FALSE, k_threshold = 0.7, save_psis = FALSE, moment_match_args = list(), reloo_args = list(), model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) c(args) <- nlist( criterion = "loo", pointwise, compare, resp, k_threshold, save_psis, moment_match, reloo, moment_match_args, reloo_args ) do_call(compute_loolist, args) } #' @export LOO.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, moment_match = FALSE, reloo = FALSE, k_threshold = 0.7, save_psis = FALSE, moment_match_args = list(), reloo_args = list(), model_names = NULL) { cl <- match.call() cl[[1]] <- quote(loo) eval(cl, parent.frame()) } #' @export LOO <- function(x, ...) { UseMethod("LOO") } #' Widely Applicable Information Criterion (WAIC) #' #' Compute the widely applicable information criterion (WAIC) #' based on the posterior likelihood using the \pkg{loo} package. #' For more details see \code{\link[loo:waic]{waic}}. #' #' @aliases waic WAIC WAIC.brmsfit #' #' @inheritParams loo.brmsfit #' #' @details See \code{\link{loo_compare}} for details on model comparisons. #' For \code{brmsfit} objects, \code{WAIC} is an alias of \code{waic}. #' Use method \code{\link[brms:add_criterion]{add_criterion}} to store #' information criteria in the fitted model object for later usage. #' #' @return If just one object is provided, an object of class \code{loo}. #' If multiple objects are provided, an object of class \code{loolist}. #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' (waic1 <- waic(fit1)) #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' (waic2 <- waic(fit2)) #' #' # compare both models #' loo_compare(waic1, waic2) #' } #' #' @references #' Vehtari, A., Gelman, A., & Gabry J. (2016). Practical Bayesian model #' evaluation using leave-one-out cross-validation and WAIC. In Statistics #' and Computing, doi:10.1007/s11222-016-9696-4. arXiv preprint arXiv:1507.04544. #' #' Gelman, A., Hwang, J., & Vehtari, A. (2014). #' Understanding predictive information criteria for Bayesian models. #' Statistics and Computing, 24, 997-1016. #' #' Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation #' and widely applicable information criterion in singular learning theory. #' The Journal of Machine Learning Research, 11, 3571-3594. #' #' @importFrom loo waic #' @export waic #' @export waic.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) c(args) <- nlist(criterion = "waic", pointwise, compare, resp) do_call(compute_loolist, args) } #' @export WAIC.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, model_names = NULL) { cl <- match.call() cl[[1]] <- quote(waic) eval(cl, parent.frame()) } #' @export WAIC <- function(x, ...) { UseMethod("WAIC") } # helper function used to create (lists of) 'loo' objects # @param models list of brmsfit objects # @param criterion name of the criterion to compute # @param use_stored use precomputed criterion objects if possible? # @param compare compare models using 'loo_compare'? # @param ... more arguments passed to compute_loo # @return If length(models) > 1 an object of class 'loolist' # If length(models) == 1 an object of class 'loo' compute_loolist <- function(models, criterion, use_stored = TRUE, compare = TRUE, ...) { criterion <- match.arg(criterion, loo_criteria()) args <- nlist(criterion, ...) for (i in seq_along(models)) { models[[i]] <- restructure(models[[i]]) } if (length(models) > 1L) { if (!match_nobs(models)) { stop2("Models have different number of observations.") } if (length(use_stored) == 1L) { use_stored <- rep(use_stored, length(models)) } out <- list(loos = named_list(names(models))) for (i in seq_along(models)) { args$x <- models[[i]] args$model_name <- names(models)[i] args$use_stored <- use_stored[i] out$loos[[i]] <- do_call(compute_loo, args) } compare <- as_one_logical(compare) if (compare) { out$diffs <- loo_compare(out$loos) # for backwards compatibility; remove in brms 3.0 out$ic_diffs__ <- SW(compare_ic(x = out$loos))$ic_diffs__ } class(out) <- "loolist" } else { args$x <- models[[1]] args$model_name <- names(models) args$use_stored <- use_stored out <- do_call(compute_loo, args) } out } # compute model fit criteria using the 'loo' package # @param x an object of class brmsfit # @param criterion the criterion to be computed # @param newdata optional data.frame of new data # @param resp optional names of the predicted response variables # @param model_name original variable name of object 'x' # @param use_stored use precomputed criterion objects if possible? # @param ... passed to the individual methods # @return an object of class 'loo' compute_loo <- function(x, criterion, newdata = NULL, resp = NULL, model_name = "", use_stored = TRUE, ...) { criterion <- match.arg(criterion, loo_criteria()) model_name <- as_one_character(model_name) use_stored <- as_one_logical(use_stored) out <- get_criterion(x, criterion) if (!(use_stored && is.loo(out))) { args <- nlist(x, newdata, resp, model_name, ...) out <- do_call(paste0(".", criterion), args) attr(out, "yhash") <- hash_response(x, newdata = newdata, resp = resp) } attr(out, "model_name") <- model_name out } # possible criteria to evaluate via the loo package loo_criteria <- function() { c("loo", "waic", "psis", "kfold", "loo_subsample") } # compute 'loo' criterion using the 'loo' package .loo <- function(x, pointwise, k_threshold, moment_match, reloo, moment_match_args, reloo_args, newdata, resp, model_name, save_psis, ...) { loo_args <- prepare_loo_args( x, newdata = newdata, resp = resp, pointwise = pointwise, save_psis = save_psis, ... ) out <- SW(do_call("loo", loo_args, pkg = "loo")) if (moment_match) { c(moment_match_args) <- nlist( x, loo = out, newdata, resp, k_threshold, check = FALSE, ... ) out <- do_call("loo_moment_match", moment_match_args) } if (reloo) { c(reloo_args) <- nlist( x, loo = out, newdata, resp, k_threshold, check = FALSE, ... ) out <- do_call("reloo", reloo_args) } recommend_loo_options(out, k_threshold, moment_match, model_name) out } # compute 'waic' criterion using the 'loo' package # @param model_name ignored but included to avoid being passed to '...' .waic <- function(x, pointwise, newdata, resp, model_name, ...) { loo_args <- prepare_loo_args( x, newdata = newdata, resp = resp, pointwise = pointwise, ... ) do_call("waic", loo_args, pkg = "loo") } # compute 'psis' criterion using the 'loo' package # @param model_name ignored but included to avoid being passed to '...' .psis <- function(x, newdata, resp, model_name, ...) { loo_args <- prepare_loo_args( x, newdata = newdata, resp = resp, pointwise = FALSE, ... ) loo_args$log_ratios <- -loo_args$x loo_args$x <- NULL do_call("psis", loo_args, pkg = "loo") } # prepare arguments passed to the methods of the `loo` package prepare_loo_args <- function(x, newdata, resp, pointwise, ...) { pointwise <- as_one_logical(pointwise) loo_args <- list(...) ll_args <- nlist(object = x, newdata, resp, pointwise, ...) loo_args$x <- do_call(log_lik, ll_args) if (pointwise) { loo_args$draws <- attr(loo_args$x, "draws") loo_args$data <- attr(loo_args$x, "data") } # compute pointwise relative efficiencies r_eff_args <- loo_args r_eff_args$fit <- x loo_args$r_eff <- do_call(r_eff_log_lik, r_eff_args) loo_args } #' Model comparison with the \pkg{loo} package #' #' For more details see \code{\link[loo:loo_compare]{loo_compare}}. #' #' @aliases loo_compare #' #' @inheritParams loo.brmsfit #' @param ... More \code{brmsfit} objects. #' @param criterion The name of the criterion to be extracted #' from \code{brmsfit} objects. #' #' @details All \code{brmsfit} objects should contain precomputed #' criterion objects. See \code{\link{add_criterion}} for more help. #' #' @return An object of class "\code{compare.loo}". #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' fit1 <- add_criterion(fit1, "waic") #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' fit2 <- add_criterion(fit2, "waic") #' #' # compare both models #' loo_compare(fit1, fit2, criterion = "waic") #' } #' #' @importFrom loo loo_compare #' @export loo_compare #' @export loo_compare.brmsfit <- function(x, ..., criterion = c("loo", "waic", "kfold"), model_names = NULL) { criterion <- match.arg(criterion) models <- split_dots(x, ..., model_names = model_names, other = FALSE) loos <- named_list(names(models)) for (i in seq_along(models)) { models[[i]] <- restructure(models[[i]]) loos[[i]] <- get_criterion(models[[i]], criterion) if (is.null(loos[[i]])) { stop2( "Model '", names(models)[i], "' does not contain a precomputed '", criterion, "' criterion. See ?loo_compare.brmsfit for help." ) } } loo_compare(loos) } #' Model averaging via stacking or pseudo-BMA weighting. #' #' Compute model weights for \code{brmsfit} objects via stacking #' or pseudo-BMA weighting. For more details, see #' \code{\link[loo:loo_model_weights]{loo::loo_model_weights}}. #' #' @aliases loo_model_weights #' #' @inheritParams loo.brmsfit #' #' @return A named vector of model weights. #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler, family = "gaussian") #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "gaussian") #' loo_model_weights(fit1, fit2) #' } #' #' @method loo_model_weights brmsfit #' @importFrom loo loo_model_weights #' @export loo_model_weights #' @export loo_model_weights.brmsfit <- function(x, ..., model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) models <- args$models args$models <- NULL log_lik_list <- lapply(models, function(x) do_call(log_lik, c(list(x), args)) ) args$x <- log_lik_list args$r_eff_list <- mapply( r_eff_log_lik, log_lik_list, fit = models, SIMPLIFY = FALSE ) out <- do_call(loo::loo_model_weights, args) names(out) <- names(models) out } #' Add model fit criteria to model objects #' #' @param x An \R object typically of class \code{brmsfit}. #' @param criterion Names of model fit criteria #' to compute. Currently supported are \code{"loo"}, #' \code{"waic"}, \code{"kfold"}, \code{"loo_subsample"}, #' \code{"bayes_R2"} (Bayesian R-squared), #' \code{"loo_R2"} (LOO-adjusted R-squared), and #' \code{"marglik"} (log marginal likelihood). #' @param model_name Optional name of the model. If \code{NULL} #' (the default) the name is taken from the call to \code{x}. #' @param overwrite Logical; Indicates if already stored fit #' indices should be overwritten. Defaults to \code{FALSE}. #' @param file Either \code{NULL} or a character string. In the latter case, the #' fitted model object including the newly added criterion values is saved via #' \code{\link{saveRDS}} in a file named after the string supplied in #' \code{file}. The \code{.rds} extension is added automatically. If \code{x} #' was already stored in a file before, the file name will be reused #' automatically (with a message) unless overwritten by \code{file}. In any #' case, \code{file} only applies if new criteria were actually added via #' \code{add_criterion} or if \code{force_save} was set to \code{TRUE}. #' @param force_save Logical; only relevant if \code{file} is specified and #' ignored otherwise. If \code{TRUE}, the fitted model object will be saved #' regardless of whether new criteria were added via \code{add_criterion}. #' @param ... Further arguments passed to the underlying #' functions computing the model fit criteria. #' #' @return An object of the same class as \code{x}, but #' with model fit criteria added for later usage. #' #' @details Functions \code{add_loo} and \code{add_waic} are aliases of #' \code{add_criterion} with fixed values for the \code{criterion} argument. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ Trt, data = epilepsy) #' # add both LOO and WAIC at once #' fit <- add_criterion(fit, c("loo", "waic")) #' print(fit$criteria$loo) #' print(fit$criteria$waic) #' } #' #' @export add_criterion <- function(x, ...) { UseMethod("add_criterion") } #' @rdname add_criterion #' @export add_criterion.brmsfit <- function(x, criterion, model_name = NULL, overwrite = FALSE, file = NULL, force_save = FALSE, ...) { if (!is.null(model_name)) { model_name <- as_one_character(model_name) } else { model_name <- deparse0(substitute(x)) } criterion <- unique(as.character(criterion)) if (any(criterion == "R2")) { # deprecated as of version 2.10.4 warning2("Criterion 'R2' is deprecated. Please use 'bayes_R2' instead.") criterion[criterion == "R2"] <- "bayes_R2" } loo_options <- c("loo", "waic", "kfold", "loo_subsample") options <- c(loo_options, "bayes_R2", "loo_R2", "marglik") if (!length(criterion) || !all(criterion %in% options)) { stop2("Argument 'criterion' should be a subset of ", collapse_comma(options)) } auto_save <- FALSE if (!is.null(file)) { file <- paste0(as_one_character(file), ".rds") } else { file <- x$file if (!is.null(file)) auto_save <- TRUE } force_save <- as_one_logical(force_save) overwrite <- as_one_logical(overwrite) if (overwrite) { # recompute all criteria new_criteria <- criterion } else { # only computed criteria not already stored new_criteria <- criterion[ulapply(x$criteria[criterion], is.null)] } # remove all criteria that are to be recomputed x$criteria[new_criteria] <- NULL args <- list(x, ...) for (fun in intersect(new_criteria, loo_options)) { args$model_names <- model_name x$criteria[[fun]] <- do_call(fun, args) } if ("bayes_R2" %in% new_criteria) { args$summary <- FALSE x$criteria$bayes_R2 <- do_call(bayes_R2, args) } if ("loo_R2" %in% new_criteria) { args$summary <- FALSE x$criteria$loo_R2 <- do_call(loo_R2, args) } if ("marglik" %in% new_criteria) { x$criteria$marglik <- do_call(bridge_sampler, args) } if (!is.null(file) && (force_save || length(new_criteria))) { if (auto_save) { message("Automatically saving the model object in '", file, "'") } x$file <- file saveRDS(x, file = file) } x } # extract a recomputed model fit criterion get_criterion <- function(x, criterion) { stopifnot(is.brmsfit(x)) criterion <- as_one_character(criterion) x$criteria[[criterion]] } # create a hash based on the response of a model hash_response <- function(x, newdata = NULL, resp = NULL, ...) { require_package("digest") stopifnot(is.brmsfit(x)) sdata <- standata( x, newdata = newdata, re_formula = NA, internal = TRUE, check_response = TRUE, only_response = TRUE ) add_funs <- lsp("brms", what = "exports", pattern = "^resp_") regex <- c("Y", sub("^resp_", "", add_funs)) regex <- outer(regex, escape_all(usc(resp)), FUN = paste0) regex <- paste0("(", as.vector(regex), ")", collapse = "|") regex <- paste0("^(", regex, ")(_|$)") out <- sdata[grepl(regex, names(sdata))] out <- as.matrix(as.data.frame(rmNULL(out))) out <- p(out, attr(sdata, "old_order")) # see issue #642 attributes(out) <- NULL digest::sha1(x = out, ...) } # compare the response parts of multiple brmsfit objects # @param models A list of brmsfit objects # @param ... passed to hash_response # @return TRUE if the response parts of all models match and FALSE otherwise match_response <- function(models, ...) { if (length(models) <= 1L) { out <- TRUE } else { yhash <- lapply(models, hash_response, ...) yhash_check <- ulapply(yhash, is_equal, yhash[[1]]) if (all(yhash_check)) { out <- TRUE } else { out <- FALSE } } out } # compare number of observations of multipe models # @param models A list of brmsfit objects # @param ... currently ignored # @return TRUE if the number of rows match match_nobs <- function(models, ...) { if (length(models) <= 1L) { out <- TRUE } else { nobs <- lapply(models, nobs) nobs_check <- ulapply(nobs, is_equal, nobs[[1]]) if (all(nobs_check)) { out <- TRUE } else { out <- FALSE } } out } # validate models passed to loo and related methods # @param models list of fitted model objects # @param model_names names specified by the user # @param sub_names names inferred by substitute() validate_models <- function(models, model_names, sub_names) { stopifnot(is.list(models)) model_names <- as.character(model_names) if (!length(model_names)) { model_names <- as.character(sub_names) } if (length(model_names) != length(models)) { stop2("Number of model names is not equal to the number of models.") } names(models) <- model_names for (i in seq_along(models)) { if (!is.brmsfit(models[[i]])) { stop2("Object '", names(models)[i], "' is not of class 'brmsfit'.") } } models } # recommend options if approximate loo fails for some observations # @param moment_match has moment matching already been performed? recommend_loo_options <- function(loo, k_threshold, moment_match = FALSE, model_name = "") { if (isTRUE(nzchar(model_name))) { model_name <- paste0(" in model '", model_name, "'") } else { model_name <- "" } n <- length(loo::pareto_k_ids(loo, threshold = k_threshold)) if (!moment_match && n > 0) { warning2( "Found ", n, " observations with a pareto_k > ", k_threshold, model_name, ". It is recommended to set 'moment_match = TRUE' in order ", "to perform moment matching for problematic observations. " ) out <- "loo_moment_match" } else if (n > 0 && n <= 10) { warning2( "Found ", n, " observations with a pareto_k > ", k_threshold, model_name, ". It is recommended to set 'reloo = TRUE' in order to ", "calculate the ELPD without the assumption that these observations " , "are negligible. This will refit the model ", n, " times to compute ", "the ELPDs for the problematic observations directly." ) out <- "reloo" } else if (n > 10) { warning2( "Found ", n, " observations with a pareto_k > ", k_threshold, model_name, ". With this many problematic observations, it may be more ", "appropriate to use 'kfold' with argument 'K = 10' to perform ", "10-fold cross-validation rather than LOO." ) out <- "kfold" } else { out <- "loo" } invisible(out) } # helper function to compute relative efficiences # @param x matrix of posterior draws # @param fit a brmsfit object to extract metadata from # @param allow_na allow NA values in the output? # @return a numeric vector of length NCOL(x) r_eff_helper <- function(x, chain_id, allow_na = TRUE, ...) { out <- loo::relative_eff(x, chain_id = chain_id, ...) if (!allow_na && anyNA(out)) { # avoid error in loo if some but not all r_effs are NA out <- rep(1, length(out)) warning2( "Ignoring relative efficiencies as some were NA. ", "See argument 'r_eff' in ?loo::loo for more details." ) } out } # wrapper around r_eff_helper to compute efficiency # of likelihood draws based on log-likelihood draws r_eff_log_lik <- function(x, ...) { UseMethod("r_eff_log_lik") } #' @export r_eff_log_lik.matrix <- function(x, fit, allow_na = FALSE, ...) { if (is.brmsfit_multiple(fit)) { # due to stacking of chains from multiple models # efficiency computations will likely be incorrect # assume relative efficiency of 1 for now return(rep(1, ncol(x))) } chain_id <- get_chain_id(nrow(x), fit) r_eff_helper(exp(x), chain_id = chain_id, allow_na = allow_na, ...) } #' @export r_eff_log_lik.function <- function(x, fit, draws, allow_na = FALSE, ...) { if (is.brmsfit_multiple(fit)) { # due to stacking of chains from multiple models # efficiency computations will likely be incorrect # assume relative efficiency of 1 for now return(rep(1, draws$nobs)) } lik_fun <- function(data_i, draws, ...) { exp(x(data_i, draws, ...)) } chain_id <- get_chain_id(draws$ndraws, fit) r_eff_helper( lik_fun, chain_id = chain_id, draws = draws, allow_na = allow_na, ... ) } # get chain IDs per posterior draw get_chain_id <- function(ndraws, fit) { if (ndraws != ndraws(fit)) { # don't know the chain IDs of a subset of draws chain_id <- rep(1L, ndraws) } else { nchains <- fit$fit@sim$chains chain_id <- rep(seq_len(nchains), each = ndraws / nchains) } chain_id } # print the output of a list of loo objects #' @export print.loolist <- function(x, digits = 1, ...) { model_names <- loo::find_model_names(x$loos) for (i in seq_along(x$loos)) { cat(paste0("Output of model '", model_names[i], "':\n")) print(x$loos[[i]], digits = digits, ...) cat("\n") } if (!is.null(x$diffs)) { cat("Model comparisons:\n") print(x$diffs, digits = digits, ...) } invisible(x) } # ---------- deprecated functions ---------- #' @rdname add_ic #' @export add_loo <- function(x, model_name = NULL, ...) { warning2("'add_loo' is deprecated. Please use 'add_criterion' instead.") if (!is.null(model_name)) { model_name <- as_one_character(model_name) } else { model_name <- deparse0(substitute(x)) } add_criterion(x, criterion = "loo", model_name = model_name, ...) } #' @rdname add_ic #' @export add_waic <- function(x, model_name = NULL, ...) { warning2("'add_waic' is deprecated. Please use 'add_criterion' instead.") if (!is.null(model_name)) { model_name <- as_one_character(model_name) } else { model_name <- deparse0(substitute(x)) } add_criterion(x, criterion = "waic", model_name = model_name, ...) } #' Add model fit criteria to model objects #' #' Deprecated aliases of \code{\link{add_criterion}}. #' #' @inheritParams add_criterion #' @param ic,value Names of model fit criteria #' to compute. Currently supported are \code{"loo"}, #' \code{"waic"}, \code{"kfold"}, \code{"R2"} (R-squared), and #' \code{"marglik"} (log marginal likelihood). #' #' @return An object of the same class as \code{x}, but #' with model fit criteria added for later usage. #' Previously computed criterion objects will be overwritten. #' #' @export add_ic <- function(x, ...) { UseMethod("add_ic") } #' @rdname add_ic #' @export add_ic.brmsfit <- function(x, ic = "loo", model_name = NULL, ...) { warning2("'add_ic' is deprecated. Please use 'add_criterion' instead.") if (!is.null(model_name)) { model_name <- as_one_character(model_name) } else { model_name <- deparse0(substitute(x)) } add_criterion(x, criterion = ic, model_name = model_name, ...) } #' @rdname add_ic #' @export 'add_ic<-' <- function(x, ..., value) { add_ic(x, ic = value, ...) } #' Compare Information Criteria of Different Models #' #' Compare information criteria of different models fitted #' with \code{\link{waic}} or \code{\link{loo}}. #' Deprecated and will be removed in the future. Please use #' \code{\link{loo_compare}} instead. #' #' @param ... At least two objects returned by #' \code{\link{waic}} or \code{\link{loo}}. #' Alternatively, \code{brmsfit} objects with information #' criteria precomputed via \code{\link{add_ic}} #' may be passed, as well. #' @param x A \code{list} containing the same types of objects as #' can be passed via \code{...}. #' @param ic The name of the information criterion to be extracted #' from \code{brmsfit} objects. Ignored if information #' criterion objects are only passed directly. #' #' @return An object of class \code{iclist}. #' #' @details See \code{\link{loo_compare}} for the recommended way #' of comparing models with the \pkg{loo} package. #' #' @seealso #' \code{\link{loo}}, #' \code{\link{loo_compare}} #' \code{\link{add_criterion}} #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' waic1 <- waic(fit1) #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' waic2 <- waic(fit2) #' #' # compare both models #' compare_ic(waic1, waic2) #' } #' #' @export compare_ic <- function(..., x = NULL, ic = c("loo", "waic", "kfold")) { # will be removed in brms 3.0 warning2( "'compare_ic' is deprecated and will be removed ", "in the future. Please use 'loo_compare' instead." ) ic <- match.arg(ic) if (!(is.null(x) || is.list(x))) { stop2("Argument 'x' should be a list.") } x$ic_diffs__ <- NULL x <- c(list(...), x) for (i in seq_along(x)) { # extract precomputed values from brmsfit objects if (is.brmsfit(x[[i]]) && !is.null(x[[i]][[ic]])) { x[[i]] <- x[[i]][[ic]] } } if (!all(sapply(x, inherits, "loo"))) { stop2("All inputs should have class 'loo' ", "or contain precomputed 'loo' objects.") } if (length(x) < 2L) { stop2("Expecting at least two objects.") } ics <- unname(sapply(x, function(y) rownames(y$estimates)[3])) if (!all(ics %in% ics[1])) { stop2("All inputs should be from the same criterion.") } yhash <- lapply(x, attr, which = "yhash") yhash_check <- ulapply(yhash, is_equal, yhash[[1]]) if (!all(yhash_check)) { warning2( "Model comparisons are likely invalid as the response ", "values of at least two models do not match." ) } names(x) <- loo::find_model_names(x) n_models <- length(x) ic_diffs <- matrix(0, nrow = n_models * (n_models - 1) / 2, ncol = 2) rnames <- rep("", nrow(ic_diffs)) # pairwise comparision to get differences in ICs and their SEs n <- 1 for (i in seq_len(n_models - 1)) { for (j in (i + 1):n_models) { tmp <- SW(loo::compare(x[[j]], x[[i]])) ic_diffs[n, ] <- c(-2 * tmp[["elpd_diff"]], 2 * tmp[["se"]]) rnames[n] <- paste(names(x)[i], "-", names(x)[j]) n <- n + 1 } } rownames(ic_diffs) <- rnames colnames(ic_diffs) <- c(toupper(ics[1]), "SE") x$ic_diffs__ <- ic_diffs class(x) <- "iclist" x } # print the output of LOO and WAIC with multiple models # deprecated as of brms > 2.5.0 and will be removed in brms 3.0 #' @export print.iclist <- function(x, digits = 2, ...) { m <- x m$ic_diffs__ <- NULL if (length(m)) { ic <- rownames(m[[1]]$estimates)[3] mat <- matrix(0, nrow = length(m), ncol = 2) dimnames(mat) <- list(names(m), c(toupper(ic), "SE")) for (i in seq_along(m)) { mat[i, ] <- m[[i]]$estimates[3, ] } } else { mat <- ic <- NULL } ic_diffs <- x$ic_diffs__ if (is.matrix(attr(x, "compare"))) { # deprecated as of brms 1.4.0 ic_diffs <- attr(x, "compare") } if (is.matrix(ic_diffs)) { # models were compared using the compare_ic function mat <- rbind(mat, ic_diffs) } print(round(mat, digits = digits), na.print = "") invisible(x) } brms/R/predictive_error.R0000644000176200001440000001364614417771045015134 0ustar liggesusers#' Posterior Draws of Predictive Errors #' #' Compute posterior draws of predictive errors, that is, observed minus #' predicted responses. Can be performed for the data used to fit the model #' (posterior predictive checks) or for new data. #' #' @inheritParams posterior_predict.brmsfit #' @param method Method used to obtain predictions. Can be set to #' \code{"posterior_predict"} (the default), \code{"posterior_epred"}, #' or \code{"posterior_linpred"}. For more details, see the respective #' function documentations. #' #' @return An S x N \code{array} of predictive error draws, where S is the #' number of posterior draws and N is the number of observations. #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, cores = 2) #' #' ## extract predictive errors #' pe <- predictive_error(fit) #' str(pe) #' } #' #' @aliases predictive_error #' @method predictive_error brmsfit #' @importFrom rstantools predictive_error #' @export #' @export predictive_error predictive_error.brmsfit <- function( object, newdata = NULL, re_formula = NULL, re.form = NULL, method = "posterior_predict", resp = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) { cl <- match.call() if ("re.form" %in% names(cl) && !missing(re.form)) { re_formula <- re.form } .predictive_error( object, newdata = newdata, re_formula = re_formula, method = method, type = "ordinary", resp = resp, ndraws = ndraws, draw_ids = draw_ids, sort = sort, ... ) } #' Posterior Draws of Residuals/Predictive Errors #' #' This method is an alias of \code{\link{predictive_error.brmsfit}} #' with additional arguments for obtaining summaries of the computed draws. #' #' @inheritParams predictive_error.brmsfit #' @param type The type of the residuals, #' either \code{"ordinary"} or \code{"pearson"}. #' More information is provided under 'Details'. #' @param summary Should summary statistics be returned #' instead of the raw values? Default is \code{TRUE}.. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' Only used if \code{summary} is \code{TRUE}. #' @param probs The percentiles to be computed by the \code{quantile} #' function. Only used if \code{summary} is \code{TRUE}. #' #' @return An \code{array} of predictive error/residual draws. If #' \code{summary = FALSE} the output resembles those of #' \code{\link{predictive_error.brmsfit}}. If \code{summary = TRUE} the output #' is an N x E matrix, where N is the number of observations and E denotes #' the summary statistics computed from the draws. #' #' @details Residuals of type \code{'ordinary'} are of the form \eqn{R = Y - #' Yrep}, where \eqn{Y} is the observed and \eqn{Yrep} is the predicted response. #' Residuals of type \code{pearson} are of the form \eqn{R = (Y - Yrep) / #' SD(Yrep)}, where \eqn{SD(Yrep)} is an estimate of the standard deviation of #' \eqn{Yrep}. #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, cores = 2) #' #' ## extract residuals/predictive errors #' res <- residuals(fit) #' head(res) #' } #' #' @export residuals.brmsfit <- function(object, newdata = NULL, re_formula = NULL, method = "posterior_predict", type = c("ordinary", "pearson"), resp = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { summary <- as_one_logical(summary) out <- .predictive_error( object, newdata = newdata, re_formula = re_formula, method = method, type = type, resp = resp, ndraws = ndraws, draw_ids = draw_ids, sort = sort, ... ) if (summary) { out <- posterior_summary(out, probs = probs, robust = robust) } out } # internal function doing the work for predictive_error.brmsfit .predictive_error <- function(object, newdata, re_formula, method, type, resp, ndraws, draw_ids, sort, nsamples = NULL, subset = NULL, ...) { contains_draws(object) object <- restructure(object) method <- validate_pp_method(method) type <- match.arg(type, c("ordinary", "pearson")) resp <- validate_resp(resp, object) family <- family(object, resp = resp) if (is_polytomous(family)) { stop2("Predictive errors are not defined for ordinal or categorical models.") } ndraws <- use_alias(ndraws, nsamples) draw_ids <- use_alias(draw_ids, subset) draw_ids <- validate_draw_ids(object, draw_ids, ndraws) pred_args <- nlist( object, newdata, re_formula, resp, draw_ids, summary = FALSE, sort = sort, ... ) yrep <- do_call(method, pred_args) y <- get_y(object, resp, newdata = newdata, sort = sort, warn = TRUE, ...) if (length(dim(yrep)) == 3L) { # multivariate model y <- lapply(seq_cols(y), function(i) y[, i]) y <- lapply(y, data2draws, dim = dim(yrep)[1:2]) y <- abind(y, along = 3) dimnames(y)[[3]] <- dimnames(yrep)[[3]] } else { y <- data2draws(y, dim = dim(yrep)) } out <- y - yrep remove(y, yrep) if (type == "pearson") { # deprecated as of brms 2.10.6 warning2("Type 'pearson' is deprecated and will be removed in the future.") # get predicted standard deviation for each observation pred_args$summary <- TRUE pred <- do_call("predict", pred_args) if (length(dim(pred)) == 3L) { sd_pred <- array2list(pred[, 2, ]) sd_pred <- lapply(sd_pred, data2draws, dim = dim(out)[1:2]) sd_pred <- abind(sd_pred, along = 3) } else { sd_pred <- data2draws(pred[, 2], dim = dim(out)) } out <- out / sd_pred } out } brms/R/zzz.R0000644000176200001440000000175613657252321012415 0ustar liggesusers# Uncomment the code below to enable unit tests for new stan functions # new_stan_functions <- function() { # # copy all new stan functions into a single .stan file and compile it # isystem <- system.file("chunks", package = "brms") # chunk_filenames <- list.files(isystem, pattern = "^fun_") # families <- list(cumulative("probit"), sratio("logit"), # cratio("cloglog"), acat("cauchit")) # cs <- c(rep(FALSE, 2), rep(TRUE, 2)) # ordinal_funs <- ulapply(seq_along(families), function(i) # stan_ordinal(families[[i]], cs = cs[i])$fun) # temp_file <- tempfile() # cat(paste0("functions { \n", # collapse(" #include '", chunk_filenames, "' \n"), # collapse(ordinal_funs), "} \nmodel {} \n"), # file = temp_file) # model <- rstan::stanc_builder(file = temp_file, isystem = isystem, # obfuscate_model_name = TRUE) # rstan::stan_model(stanc_ret = model) # } # new_stan_functions <- new_stan_functions() brms/R/formula-sp.R0000644000176200001440000004702714464720311013642 0ustar liggesusers# This file contains functions dealing with the extended # formula syntax to specify special effects terms #' Predictors with Measurement Error in \pkg{brms} Models #' #' (Soft deprecated) Specify predictors with measurement error. The function #' does not evaluate its arguments -- it exists purely to help set up a model. #' #' @param x The variable measured with error. #' @param sdx Known measurement error of \code{x} #' treated as standard deviation. #' @param gr Optional grouping factor to specify which #' values of \code{x} correspond to the same value of the #' latent variable. If \code{NULL} (the default) each #' observation will have its own value of the latent variable. #' #' @details #' For detailed documentation see \code{help(brmsformula)}. #' \code{me} terms are soft deprecated in favor of the more #' general and consistent \code{\link{mi}} terms. #' By default, latent noise-free variables are assumed #' to be correlated. To change that, add \code{set_mecor(FALSE)} #' to your model formula object (see examples). #' #' @seealso #' \code{\link{brmsformula}}, \code{\link{brmsformula-helpers}} #' #' @examples #' \dontrun{ #' # sample some data #' N <- 100 #' dat <- data.frame( #' y = rnorm(N), x1 = rnorm(N), #' x2 = rnorm(N), sdx = abs(rnorm(N, 1)) #' ) #' # fit a simple error-in-variables model #' fit1 <- brm(y ~ me(x1, sdx) + me(x2, sdx), data = dat, #' save_pars = save_pars(latent = TRUE)) #' summary(fit1) #' #' # turn off modeling of correlations #' bform <- bf(y ~ me(x1, sdx) + me(x2, sdx)) + set_mecor(FALSE) #' fit2 <- brm(bform, data = dat, save_pars = save_pars(latent = TRUE)) #' summary(fit2) #' } #' #' @export me <- function(x, sdx, gr = NULL) { # use 'term' for consistency with other special terms term <- deparse0(substitute(x)) sdx <- deparse0(substitute(sdx)) gr <- substitute(gr) if (!is.null(gr)) { gr <- deparse0(gr) stopif_illegal_group(gr) } else { gr <- "" } label <- deparse0(match.call()) out <- nlist(term, sdx, gr, label) class(out) <- c("me_term", "sp_term") out } #' Predictors with Missing Values in \pkg{brms} Models #' #' Specify predictor term with missing values in \pkg{brms}. The function does #' not evaluate its arguments -- it exists purely to help set up a model. #' For documentation on how to specify missing values in response variables, #' see \code{\link{resp_mi}}. #' #' @param x The variable containing missing values. #' @param idx An optional variable containing indices of observations in `x` #' that are to be used in the model. This is mostly relevant in partially #' subsetted models (via \code{resp_subset}) but may also have other #' applications that I haven't thought of. #' #' @details For detailed documentation see \code{help(brmsformula)}. #' #' @seealso \code{\link{brmsformula}} #' #' @examples #' \dontrun{ #' data("nhanes", package = "mice") #' N <- nrow(nhanes) #' #' # simple model with missing data #' bform1 <- bf(bmi | mi() ~ age * mi(chl)) + #' bf(chl | mi() ~ age) + #' set_rescor(FALSE) #' #' fit1 <- brm(bform1, data = nhanes) #' #' summary(fit1) #' plot(conditional_effects(fit1, resp = "bmi"), ask = FALSE) #' loo(fit1, newdata = na.omit(fit1$data)) #' #' # simulate some measurement noise #' nhanes$se <- rexp(N, 2) #' #' # measurement noise can be handled within 'mi' terms #' # with or without the presence of missing values #' bform2 <- bf(bmi | mi() ~ age * mi(chl)) + #' bf(chl | mi(se) ~ age) + #' set_rescor(FALSE) #' #' fit2 <- brm(bform2, data = nhanes) #' #' summary(fit2) #' plot(conditional_effects(fit2, resp = "bmi"), ask = FALSE) #' #' # 'mi' terms can also be used when some responses are subsetted #' nhanes$sub <- TRUE #' nhanes$sub[1:2] <- FALSE #' nhanes$id <- 1:N #' nhanes$idx <- sample(3:N, N, TRUE) #' #' # this requires the addition term 'index' being specified #' # in the subsetted part of the model #' bform3 <- bf(bmi | mi() ~ age * mi(chl, idx)) + #' bf(chl | mi(se) + subset(sub) + index(id) ~ age) + #' set_rescor(FALSE) #' #' fit3 <- brm(bform3, data = nhanes) #' #' summary(fit3) #' plot(conditional_effects(fit3, resp = "bmi"), ask = FALSE) #' } #' #' @export mi <- function(x, idx = NA) { # use 'term' for consistency with other special terms term <- deparse0(substitute(x)) term_vars <- all_vars(term) if (!is_equal(term, term_vars)) { stop2("'mi' only accepts single untransformed variables.") } idx <- deparse0(substitute(idx)) if (idx != "NA") { idx_vars <- all_vars(idx) if (!is_equal(idx, idx_vars)) { stop2("'mi' only accepts single untransformed variables.") } } label <- deparse0(match.call()) out <- nlist(term, idx, label) class(out) <- c("mi_term", "sp_term") out } #' Monotonic Predictors in \pkg{brms} Models #' #' Specify a monotonic predictor term in \pkg{brms}. The function does not #' evaluate its arguments -- it exists purely to help set up a model. #' #' @param x An integer variable or an ordered factor to be modeled as monotonic. #' @param id Optional character string. All monotonic terms #' with the same \code{id} within one formula will be modeled as #' having the same simplex (shape) parameter vector. If all monotonic terms #' of the same predictor have the same \code{id}, the resulting #' predictions will be conditionally monotonic for all values of #' interacting covariates (Bürkner & Charpentier, 2020). #' #' @details See Bürkner and Charpentier (2020) for the underlying theory. For #' detailed documentation of the formula syntax used for monotonic terms, #' see \code{help(brmsformula)} as well as \code{vignette("brms_monotonic")}. #' #' @seealso \code{\link{brmsformula}} #' #' @references #' Bürkner P. C. & Charpentier E. (2020). Modeling Monotonic Effects of Ordinal #' Predictors in Regression Models. British Journal of Mathematical and #' Statistical Psychology. doi:10.1111/bmsp.12195 #' #' @examples #' \dontrun{ #' # generate some data #' income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") #' income <- factor(sample(income_options, 100, TRUE), #' levels = income_options, ordered = TRUE) #' mean_ls <- c(30, 60, 70, 75) #' ls <- mean_ls[income] + rnorm(100, sd = 7) #' dat <- data.frame(income, ls) #' #' # fit a simple monotonic model #' fit1 <- brm(ls ~ mo(income), data = dat) #' summary(fit1) #' plot(fit1, N = 6) #' plot(conditional_effects(fit1), points = TRUE) #' #' # model interaction with other variables #' dat$x <- sample(c("a", "b", "c"), 100, TRUE) #' fit2 <- brm(ls ~ mo(income)*x, data = dat) #' summary(fit2) #' plot(conditional_effects(fit2), points = TRUE) #' #' # ensure conditional monotonicity #' fit3 <- brm(ls ~ mo(income, id = "i")*x, data = dat) #' summary(fit3) #' plot(conditional_effects(fit3), points = TRUE) #' } #' #' @export mo <- function(x, id = NA) { # use 'term' for consistency with other special terms term <- deparse0(substitute(x)) id <- as_one_character(id, allow_na = TRUE) label <- deparse0(match.call()) out <- nlist(term, id, label) class(out) <- c("mo_term", "sp_term") out } # find variable names for which to keep NAs vars_keep_na <- function(x, ...) { UseMethod("vars_keep_na") } #' @export vars_keep_na.mvbrmsterms <- function(x, ...) { resps <- get_element(x, "respform") resps <- ulapply(resps, terms_resp, check_names = FALSE) out <- lapply(x$terms, vars_keep_na, responses = resps, ...) vars_mi <- unique(ulapply(out, attr, "vars_mi")) out <- unique(unlist(out)) miss_mi <- setdiff(vars_mi, out) if (length(miss_mi)) { stop2( "Response models of variables in 'mi' terms require " , "specification of the addition argument 'mi'. See ?mi. ", "Error occurred for ", collapse_comma(miss_mi), "." ) } out } #' @export vars_keep_na.brmsterms <- function(x, responses = NULL, ...) { out <- character(0) if (is.formula(x$adforms$mi)) { mi_respcall <- terms_resp(x$respform, check_names = FALSE) mi_respvars <- all_vars(mi_respcall) mi_advars <- all_vars(x$adforms$mi) c(out) <- unique(c(mi_respcall, mi_respvars, mi_advars)) } if (is.formula(x$adforms$cens)) { y2_expr <- get_ad_expr(x, "cens", "y2", type = "vars") c(out) <- all_vars(y2_expr) } uni_mi <- ulapply(get_effect(x, "sp"), attr, "uni_mi") if (length(uni_mi)) { vars_mi <- ulapply(uni_mi, function(term) eval2(term)$term) miss_mi <- setdiff(vars_mi, responses) if (length(miss_mi)) { stop2( "Variables in 'mi' terms should also be specified " , "as response variables in the model. See ?mi. ", "Error occurred for ", collapse_comma(miss_mi), "." ) } attr(out, "vars_mi") <- vars_mi } out } # extract unique names of noise-free terms get_uni_me <- function(x) { uni_me <- ulapply(get_effect(x, "sp"), attr, "uni_me") if (!length(uni_me)) { return(NULL) } xname <- ulapply(uni_me, function(term) eval2(term)$term) df <- data.frame(xname, uni_me) df <- df[!duplicated(df), ] xdupl <- df$xname[duplicated(df$xname)] if (length(xdupl)) { calls <- df$uni_me[df$xname == xdupl[1]] stop2( "Variable '", xdupl[1], "' is used in different calls to 'me'.\n", "Associated calls are: ", collapse_comma(calls) ) } unique(uni_me) } # save all me-terms within a tidy data.frame tidy_meef <- function(bterms, data, old_levels = NULL) { uni_me <- get_uni_me(bterms) if (!length(uni_me)) { return(empty_meef()) } if (has_subset(bterms)) { # 'Xme' variables need to be the same across univariate models stop2("Argument 'subset' is not supported when using 'me' terms.") } out <- data.frame( term = uni_me, xname = "", grname = "", stringsAsFactors = FALSE ) levels <- vector("list", nrow(out)) for (i in seq_rows(out)) { tmp <- eval2(out$term[i]) out$xname[i] <- tmp$term if (isTRUE(nzchar(tmp$gr))) { out$grname[i] <- tmp$gr if (length(old_levels)) { levels[[i]] <- old_levels[[tmp$gr]] } else { levels[[i]] <- extract_levels(get(tmp$gr, data)) } } } out$coef <- rename(paste0("me", out$xname)) out$cor <- isTRUE(bterms$mecor) names(levels) <- out$grname levels <- levels[lengths(levels) > 0L] if (length(levels)) { levels <- levels[!duplicated(names(levels))] attr(out, "levels") <- levels } structure(out, class = c("meef_frame", "data.frame")) } empty_meef <- function() { out <- data.frame( term = character(0), xname = character(0), grname = character(0), cor = logical(0), stringsAsFactors = FALSE ) structure(out, class = c("meef_frame", "data.frame")) } is.meef_frame <- function(x) { inherits(x, "meef_frame") } # handle default of correlations between 'me' terms default_mecor <- function(mecor = NULL) { if (is.null(mecor)) TRUE else as_one_logical(mecor) } # find names of all variables used in a special effects type get_sp_vars <- function(x, type) { sp_terms <- ulapply(get_effect(x, "sp"), all_terms) all_vars(str2formula(get_matches_expr(regex_sp(type), sp_terms))) } # gather information of special effects terms # @param x either a formula or a list containing an element "sp" # @param data data frame containing the monotonic variables # @return a data.frame with one row per special term # TODO: refactor to store in long format to avoid several list columns? tidy_spef <- function(x, data) { if (is.formula(x)) { x <- brmsterms(x, check_response = FALSE)$dpars$mu } form <- x[["sp"]] if (!is.formula(form)) { return(empty_data_frame()) } mm <- sp_model_matrix(form, data, rename = FALSE) out <- data.frame(term = colnames(mm), stringsAsFactors = FALSE) out$coef <- rename(out$term) calls_cols <- c(paste0("calls_", all_sp_types()), "joint_call") list_cols <- c("vars_mi", "idx_mi", "idx2_mi", "ids_mo", "Imo") for (col in c(calls_cols, list_cols)) { out[[col]] <- vector("list", nrow(out)) } kmo <- 0 terms_split <- strsplit(out$term, ":") for (i in seq_rows(out)) { # prepare mo terms take_mo <- grepl_expr(regex_sp("mo"), terms_split[[i]]) if (sum(take_mo)) { out$calls_mo[[i]] <- terms_split[[i]][take_mo] nmo <- length(out$calls_mo[[i]]) out$Imo[[i]] <- (kmo + 1):(kmo + nmo) out$ids_mo[[i]] <- rep(NA, nmo) kmo <- kmo + nmo for (j in seq_along(out$calls_mo[[i]])) { mo_term <- out$calls_mo[[i]][[j]] mo_match <- get_matches_expr(regex_sp("mo"), mo_term) if (length(mo_match) > 1L || nchar(mo_match) < nchar(mo_term)) { stop2("The monotonic term '", mo_term, "' is invalid.") } out$ids_mo[[i]][j] <- eval2(mo_term)$id } } # prepare me terms take_me <- grepl_expr(regex_sp("me"), terms_split[[i]]) if (sum(take_me)) { out$calls_me[[i]] <- terms_split[[i]][take_me] # remove 'I' (identity) function calls that # were used solely to separate formula terms out$calls_me[[i]] <- gsub("^I\\(", "(", out$calls_me[[i]]) } # prepare mi terms take_mi <- grepl_expr(regex_sp("mi"), terms_split[[i]]) if (sum(take_mi)) { mi_parts <- terms_split[[i]][take_mi] out$calls_mi[[i]] <- get_matches_expr(regex_sp("mi"), mi_parts) out$vars_mi[[i]] <- out$idx_mi[[i]] <- rep(NA, length(out$calls_mi[[i]])) for (j in seq_along(out$calls_mi[[i]])) { mi_term <- eval2(out$calls_mi[[i]][[j]]) out$vars_mi[[i]][j] <- mi_term$term if (mi_term$idx != "NA") { out$idx_mi[[i]][j] <- mi_term$idx } } # do it like terms_resp to ensure correct matching out$vars_mi[[i]] <- gsub("\\.|_", "", make.names(out$vars_mi[[i]])) } has_sp_calls <- grepl_expr(regex_sp(all_sp_types()), terms_split[[i]]) sp_calls <- sub("^I\\(", "(", terms_split[[i]][has_sp_calls]) out$joint_call[[i]] <- paste0(sp_calls, collapse = " * ") out$Ic[i] <- any(!has_sp_calls) } # extract data frame to track all required index variables uni_mi <- unique(data.frame( var = unlist(out$vars_mi), idx = unlist(out$idx_mi), stringsAsFactors = FALSE )) uni_mi$idx2 <- rep(NA, nrow(uni_mi)) for (i in seq_rows(uni_mi)) { uni_mi_sub <- subset2(uni_mi, var = uni_mi$var[i]) uni_mi$idx2[i] <- match(uni_mi$idx[i], na.omit(uni_mi_sub$idx)) } attr(out, "uni_mi") <- uni_mi for (i in seq_rows(out)) { for (j in seq_along(out$idx_mi[[i]])) { sub <- subset2( uni_mi, var = out$vars_mi[[i]][j], idx = out$idx_mi[[i]][j] ) out$idx2_mi[[i]][j] <- sub$idx2 } } # extract information on covariates not_one <- apply(mm, 2, function(x) any(x != 1)) out$Ic <- cumsum(out$Ic | not_one) out } # extract names of monotonic simplex parameters # @param spef output of tidy_spef # @param use_id use the 'id' argument to construct simo labels? # @return a character vector of length nrow(spef) get_simo_labels <- function(spef, use_id = FALSE) { out <- named_list(spef$term) I <- which(lengths(spef$Imo) > 0) for (i in I) { # use the ID as label if specified out[[i]] <- ifelse( use_id & !is.na(spef$ids_mo[[i]]), spef$ids_mo[[i]], paste0(spef$coef[i], seq_along(spef$Imo[[i]])) ) } unlist(out) } # standard errors of variables with missing values get_sdy <- function(x, data = NULL) { stopifnot(is.brmsterms(x)) miform <- x$adforms[["mi"]] sdy <- NULL if (is.formula(miform)) { mi <- eval_rhs(miform) if (mi$vars$sdy != "NA") { sdy <- eval2(mi$vars$sdy, data) if (!is.null(sdy) && !is.numeric(sdy)) { stop2("Measurement error should be numeric.") } if (isTRUE(any(sdy <= 0))) { stop2("Measurement error should be positive.") } } } sdy } # names of grouping variables used in measurement error terms get_me_groups <- function(x) { uni_me <- get_uni_me(x) out <- lapply(uni_me, eval2) out <- ufrom_list(out, "gr") out[nzchar(out)] } # get the design matrix of special effects terms # @param formula a formula containing special effects terms # @param data data.frame passed by the user # @param types types of special terms to consider # @param ... passed to get_model_matrix # @details special terms will be evaluated to 1 so that columns # containing not only ones are those with covariates # @return design matrix of special effects terms and their covariates sp_model_matrix <- function(formula, data, types = all_sp_types(), ...) { attributes(data)$terms <- NULL terms_split <- strsplit(all_terms(formula), split = ":") terms_unique <- unique(unlist(terms_split)) regex <- regex_sp(types) terms_replace <- terms_unique[grepl_expr(regex, terms_unique)] dummies <- paste0("dummy", seq_along(terms_replace), "__") data[dummies] <- list(1) terms_comb <- rep(NA, length(terms_split)) # loop over terms and add dummy variables for (i in seq_along(terms_split)) { replace_i <- grepl_expr(regex, terms_split[[i]]) terms_i_replace <- terms_split[[i]][replace_i] dummies_i <- dummies[match(terms_i_replace, terms_replace)] terms_split[[i]][replace_i] <- dummies_i terms_comb[i] <- paste0(terms_split[[i]], collapse = ":") } new_formula <- str2formula(terms_comb) attributes(new_formula) <- attributes(formula) out <- get_model_matrix(new_formula, data, ...) # fixes issue #1504 colnames(out) <- rm_wsp(colnames(out)) # recover original column names colnames(out) <- rename(colnames(out), dummies, terms_replace) out } # formula of variables used in special effects terms sp_fake_formula <- function(...) { dots <- c(...) out <- vector("list", length(dots)) for (i in seq_along(dots)) { tmp <- eval2(dots[[i]]) out[[i]] <- all_vars(c(tmp$term, tmp$sdx, tmp$gr)) } str2formula(unique(unlist(out))) } # extract an me variable get_me_values <- function(term, data) { term <- get_sp_term(term) stopifnot(is.me_term(term)) x <- as.vector(eval2(term$term, data)) if (!is.numeric(x)) { stop2("Noisy variables should be numeric.") } as.array(x) } # extract the measurement error of an me term get_me_noise <- function(term, data) { term <- get_sp_term(term) stopifnot(is.me_term(term)) sdx <- as.vector(eval2(term$sdx, data)) if (length(sdx) == 0L) { stop2("Argument 'sdx' is missing in function 'me'.") } else if (length(sdx) == 1L) { sdx <- rep(sdx, nrow(data)) } if (!is.numeric(sdx)) { stop2("Measurement error should be numeric.") } if (isTRUE(any(sdx <= 0))) { stop2("Measurement error should be positive.") } as.array(sdx) } # extract the grouping variable of an me term get_me_group <- function(term, data) { term <- get_sp_term(term) stopifnot(is.me_term(term)) as.array(eval2(term$gr, data)) } # extract mo variables get_mo_values <- function(term, data) { term <- get_sp_term(term) stopifnot(is.mo_term(term)) x <- eval2(term$term, data) if (is.ordered(x)) { # counting starts at zero max_value <- length(levels(x)) - 1 x <- as.numeric(x) - 1 } else if (all(is_wholenumber(x))) { min_value <- attr(x, "min") if (is.null(min_value)) { min_value <- min(x) } x <- x - min_value max_value <- max(x) } else { stop2( "Monotonic predictors must be integers or ordered ", "factors. Error occurred for variable '", term$term, "'." ) } x <- as.array(x) attr(x, "max") <- max_value x } # prepare 'sp_term' objects get_sp_term <- function(term) { if (!is.sp_term(term)) { term <- eval2(as_one_character(term)) } term } # all effects which fall under the 'sp' category of brms all_sp_types <- function() { c("mo", "me", "mi") } # classes used to set up special effects terms is.sp_term <- function(x) { inherits(x, "sp_term") } is.mo_term <- function(x) { inherits(x, "mo_term") } is.me_term <- function(x) { inherits(x, "me_term") } is.mi_term <- function(x) { inherits(x, "mi_term") } brms/R/posterior_predict.R0000644000176200001440000010716014361545260015314 0ustar liggesusers#' Draws from the Posterior Predictive Distribution #' #' Compute posterior draws of the posterior predictive distribution. Can be #' performed for the data used to fit the model (posterior predictive checks) or #' for new data. By definition, these draws have higher variance than draws #' of the expected value of the posterior predictive distribution computed by #' \code{\link{posterior_epred.brmsfit}}. This is because the residual error #' is incorporated in \code{posterior_predict}. However, the estimated means of #' both methods averaged across draws should be very similar. #' #' @inheritParams prepare_predictions #' @param object An object of class \code{brmsfit}. #' @param re.form Alias of \code{re_formula}. #' @param transform (Deprecated) A function or a character string naming #' a function to be applied on the predicted responses #' before summary statistics are computed. #' @param negative_rt Only relevant for Wiener diffusion models. #' A flag indicating whether response times of responses #' on the lower boundary should be returned as negative values. #' This allows to distinguish responses on the upper and #' lower boundary. Defaults to \code{FALSE}. #' @param sort Logical. Only relevant for time series models. #' Indicating whether to return predicted values in the original #' order (\code{FALSE}; default) or in the order of the #' time series (\code{TRUE}). #' @param ntrys Parameter used in rejection sampling #' for truncated discrete models only #' (defaults to \code{5}). See Details for more information. #' @param cores Number of cores (defaults to \code{1}). On non-Windows systems, #' this argument can be set globally via the \code{mc.cores} option. #' @param ... Further arguments passed to \code{\link{prepare_predictions}} #' that control several aspects of data validation and prediction. #' #' @return An \code{array} of draws. In univariate models, #' the output is as an S x N matrix, where S is the number of posterior #' draws and N is the number of observations. In multivariate models, an #' additional dimension is added to the output which indexes along the #' different response variables. #' #' @template details-newdata-na #' @template details-allow_new_levels #' @details For truncated discrete models only: In the absence of any general #' algorithm to sample from truncated discrete distributions, rejection #' sampling is applied in this special case. This means that values are #' sampled until a value lies within the defined truncation boundaries. In #' practice, this procedure may be rather slow (especially in \R). Thus, we #' try to do approximate rejection sampling by sampling each value #' \code{ntrys} times and then select a valid value. If all values are #' invalid, the closest boundary is used, instead. If there are more than a #' few of these pathological cases, a warning will occur suggesting to #' increase argument \code{ntrys}. #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(time | cens(censored) ~ age + sex + (1 + age || patient), #' data = kidney, family = "exponential", init = "0") #' #' ## predicted responses #' pp <- posterior_predict(fit) #' str(pp) #' #' ## predicted responses excluding the group-level effect of age #' pp <- posterior_predict(fit, re_formula = ~ (1 | patient)) #' str(pp) #' #' ## predicted responses of patient 1 for new data #' newdata <- data.frame( #' sex = factor(c("male", "female")), #' age = c(20, 50), #' patient = c(1, 1) #' ) #' pp <- posterior_predict(fit, newdata = newdata) #' str(pp) #' } #' #' @aliases posterior_predict #' @method posterior_predict brmsfit #' @importFrom rstantools posterior_predict #' @export #' @export posterior_predict posterior_predict.brmsfit <- function( object, newdata = NULL, re_formula = NULL, re.form = NULL, transform = NULL, resp = NULL, negative_rt = FALSE, ndraws = NULL, draw_ids = NULL, sort = FALSE, ntrys = 5, cores = NULL, ... ) { cl <- match.call() if ("re.form" %in% names(cl) && !missing(re.form)) { re_formula <- re.form } contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_predict( prep, transform = transform, sort = sort, ntrys = ntrys, negative_rt = negative_rt, cores = cores, summary = FALSE ) } #' @export posterior_predict.mvbrmsprep <- function(object, ...) { if (length(object$mvpars$rescor)) { object$mvpars$Mu <- get_Mu(object) object$mvpars$Sigma <- get_Sigma(object) out <- posterior_predict.brmsprep(object, ...) } else { out <- lapply(object$resps, posterior_predict, ...) along <- ifelse(length(out) > 1L, 3, 2) out <- do_call(abind, c(out, along = along)) } out } #' @export posterior_predict.brmsprep <- function(object, transform = NULL, sort = FALSE, summary = FALSE, robust = FALSE, probs = c(0.025, 0.975), cores = NULL, ...) { summary <- as_one_logical(summary) cores <- validate_cores_post_processing(cores) if (is.customfamily(object$family)) { # ensure that the method can be found during parallel execution object$family$posterior_predict <- custom_family_method(object$family, "posterior_predict") } for (nlp in names(object$nlpars)) { object$nlpars[[nlp]] <- get_nlpar(object, nlpar = nlp) } for (dp in names(object$dpars)) { object$dpars[[dp]] <- get_dpar(object, dpar = dp) } pp_fun <- paste0("posterior_predict_", object$family$fun) pp_fun <- get(pp_fun, asNamespace("brms")) N <- choose_N(object) out <- plapply(seq_len(N), pp_fun, cores = cores, prep = object, ...) if (grepl("_mv$", object$family$fun)) { out <- do_call(abind, c(out, along = 3)) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- names(object$resps) } else if (has_multicol(object$family)) { out <- do_call(abind, c(out, along = 3)) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- object$cats } else { out <- do_call(cbind, out) } colnames(out) <- rownames(out) <- NULL if (use_int(object$family)) { out <- check_discrete_trunc_bounds( out, lb = object$data$lb, ub = object$data$ub ) } out <- reorder_obs(out, object$old_order, sort = sort) # transform predicted response draws before summarizing them if (!is.null(transform)) { # deprecated as of brms 2.12.3 warning2("Argument 'transform' is deprecated ", "and will be removed in the future.") out <- do_call(transform, list(out)) } attr(out, "levels") <- object$cats if (summary) { # only for compatibility with the 'predict' method if (is_ordinal(object$family)) { levels <- seq_len(max(object$data$nthres) + 1) out <- posterior_table(out, levels = levels) } else if (is_categorical(object$family)) { levels <- seq_len(object$data$ncat) out <- posterior_table(out, levels = levels) } else { out <- posterior_summary(out, probs = probs, robust = robust) } } out } #' Draws from the Posterior Predictive Distribution #' #' This method is an alias of \code{\link{posterior_predict.brmsfit}} #' with additional arguments for obtaining summaries of the computed draws. #' #' @inheritParams posterior_predict.brmsfit #' @param summary Should summary statistics be returned #' instead of the raw values? Default is \code{TRUE}. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' Only used if \code{summary} is \code{TRUE}. #' @param probs The percentiles to be computed by the \code{quantile} #' function. Only used if \code{summary} is \code{TRUE}. #' #' @return An \code{array} of predicted response values. #' If \code{summary = FALSE} the output resembles those of #' \code{\link{posterior_predict.brmsfit}}. #' #' If \code{summary = TRUE} the output depends on the family: For categorical #' and ordinal families, the output is an N x C matrix, where N is the number #' of observations, C is the number of categories, and the values are #' predicted category probabilities. For all other families, the output is a N #' x E matrix where E = \code{2 + length(probs)} is the number of summary #' statistics: The \code{Estimate} column contains point estimates (either #' mean or median depending on argument \code{robust}), while the #' \code{Est.Error} column contains uncertainty estimates (either standard #' deviation or median absolute deviation depending on argument #' \code{robust}). The remaining columns starting with \code{Q} contain #' quantile estimates as specified via argument \code{probs}. #' #' @seealso \code{\link{posterior_predict.brmsfit}} #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(time | cens(censored) ~ age + sex + (1 + age || patient), #' data = kidney, family = "exponential", init = "0") #' #' ## predicted responses #' pp <- predict(fit) #' head(pp) #' #' ## predicted responses excluding the group-level effect of age #' pp <- predict(fit, re_formula = ~ (1 | patient)) #' head(pp) #' #' ## predicted responses of patient 1 for new data #' newdata <- data.frame( #' sex = factor(c("male", "female")), #' age = c(20, 50), #' patient = c(1, 1) #' ) #' predict(fit, newdata = newdata) #' } #' #' @export predict.brmsfit <- function(object, newdata = NULL, re_formula = NULL, transform = NULL, resp = NULL, negative_rt = FALSE, ndraws = NULL, draw_ids = NULL, sort = FALSE, ntrys = 5, cores = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_predict( prep, transform = transform, ntrys = ntrys, negative_rt = negative_rt, sort = sort, cores = cores, summary = summary, robust = robust, probs = probs ) } #' Predictive Intervals #' #' Compute intervals from the posterior predictive distribution. #' #' @aliases predictive_interval #' #' @param object An \R object of class \code{brmsfit}. #' @param prob A number p (0 < p < 1) indicating the desired probability mass to #' include in the intervals. Defaults to \code{0.9}. #' @param ... Further arguments passed to \code{\link{posterior_predict}}. #' #' @return A matrix with 2 columns for the lower and upper bounds of the #' intervals, respectively, and as many rows as observations being predicted. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zBase, data = epilepsy, family = poisson()) #' predictive_interval(fit) #' } #' #' @importFrom rstantools predictive_interval #' @export predictive_interval #' @export predictive_interval.brmsfit <- function(object, prob = 0.9, ...) { out <- posterior_predict(object, ...) predictive_interval(out, prob = prob) } # validate method name to obtain posterior predictions # @param method name of the method # @return validated name of the method validate_pp_method <- function(method) { method <- as_one_character(method) if (method %in% c("posterior_predict", "predict", "pp")) { method <- "posterior_predict" } else if (method %in% c("posterior_epred", "fitted", "pp_expect")) { method <- "posterior_epred" } else if (method %in% c("posterior_linpred")) { method <- "posterior_linpred" } else if (method %in% c("predictive_error", "residuals")) { method <- "predictive_error" } else { stop2("Posterior predictive method '", method, "' it not supported.") } method } # ------------------- family specific posterior_predict methods --------------------- # All posterior_predict_ functions have the same arguments structure # @param i index of the observatio for which to compute pp values # @param prep A named list returned by prepare_predictions containing # all required data and posterior draws # @param ... ignored arguments # @param A vector of length prep$ndraws containing draws # from the posterior predictive distribution posterior_predict_gaussian <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) rcontinuous( n = prep$ndraws, dist = "norm", mean = mu, sd = sigma, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_student <- function(i, prep, ntrys = 5, ...) { nu <- get_dpar(prep, "nu", i = i) mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) rcontinuous( n = prep$ndraws, dist = "student_t", df = nu, mu = mu, sigma = sigma, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_lognormal <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "lnorm", meanlog = get_dpar(prep, "mu", i = i), sdlog = get_dpar(prep, "sigma", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_shifted_lognormal <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "shifted_lnorm", meanlog = get_dpar(prep, "mu", i = i), sdlog = get_dpar(prep, "sigma", i = i), shift = get_dpar(prep, "ndt", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_skew_normal <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) alpha <- get_dpar(prep, "alpha", i = i) rcontinuous( n = prep$ndraws, dist = "skew_normal", mu = mu, sigma = sigma, alpha = alpha, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_gaussian_mv <- function(i, prep, ...) { Mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i) .predict <- function(s) { rmulti_normal(1, mu = Mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_mv <- function(i, prep, ...) { nu <- get_dpar(prep, "nu", i = i) Mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i) .predict <- function(s) { rmulti_student_t(1, df = nu[s], mu = Mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_gaussian_time <- function(i, prep, ...) { obs <- with(prep$ac, begin_tg[i]:end_tg[i]) Jtime <- prep$ac$Jtime_tg[i, ] mu <- as.matrix(get_dpar(prep, "mu", i = obs)) Sigma <- get_cov_matrix_ac(prep, obs, Jtime = Jtime) .predict <- function(s) { rmulti_normal(1, mu = mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_time <- function(i, prep, ...) { obs <- with(prep$ac, begin_tg[i]:end_tg[i]) Jtime <- prep$ac$Jtime_tg[i, ] nu <- as.matrix(get_dpar(prep, "nu", i = obs)) mu <- as.matrix(get_dpar(prep, "mu", i = obs)) Sigma <- get_cov_matrix_ac(prep, obs, Jtime = Jtime) .predict <- function(s) { rmulti_student_t(1, df = nu[s, ], mu = mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_gaussian_lagsar <- function(i, prep, ...) { stopifnot(i == 1) .predict <- function(s) { M_new <- with(prep, diag(nobs) - ac$lagsar[s] * ac$Msar) mu <- as.numeric(solve(M_new) %*% mu[s, ]) Sigma <- solve(crossprod(M_new)) * sigma[s]^2 rmulti_normal(1, mu = mu, Sigma = Sigma) } mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_lagsar <- function(i, prep, ...) { stopifnot(i == 1) .predict <- function(s) { M_new <- with(prep, diag(nobs) - ac$lagsar[s] * ac$Msar) mu <- as.numeric(solve(M_new) %*% mu[s, ]) Sigma <- solve(crossprod(M_new)) * sigma[s]^2 rmulti_student_t(1, df = nu[s], mu = mu, Sigma = Sigma) } mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") nu <- get_dpar(prep, "nu") rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_gaussian_errorsar <- function(i, prep, ...) { stopifnot(i == 1) .predict <- function(s) { M_new <- with(prep, diag(nobs) - ac$errorsar[s] * ac$Msar) Sigma <- solve(crossprod(M_new)) * sigma[s]^2 rmulti_normal(1, mu = mu[s, ], Sigma = Sigma) } mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_errorsar <- function(i, prep, ...) { stopifnot(i == 1) .predict <- function(s) { M_new <- with(prep, diag(nobs) - ac$errorsar[s] * ac$Msar) Sigma <- solve(crossprod(M_new)) * sigma[s]^2 rmulti_student_t(1, df = nu[s], mu = mu[s, ], Sigma = Sigma) } mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") nu <- get_dpar(prep, "nu") rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_gaussian_fcor <- function(i, prep, ...) { stopifnot(i == 1) mu <- as.matrix(get_dpar(prep, "mu")) Sigma <- get_cov_matrix_ac(prep) .predict <- function(s) { rmulti_normal(1, mu = mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_fcor <- function(i, prep, ...) { stopifnot(i == 1) nu <- as.matrix(get_dpar(prep, "nu")) mu <- as.matrix(get_dpar(prep, "mu")) Sigma <- get_cov_matrix_ac(prep) .predict <- function(s) { rmulti_student_t(1, df = nu[s, ], mu = mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_binomial <- function(i, prep, ntrys = 5, ...) { rdiscrete( n = prep$ndraws, dist = "binom", size = prep$data$trials[i], prob = get_dpar(prep, "mu", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_beta_binomial <- function(i, prep, ntrys = 5, ...) { rdiscrete( n = prep$ndraws, dist = "beta_binomial", size = prep$data$trials[i], mu = get_dpar(prep, "mu", i = i), phi = get_dpar(prep, "phi", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_bernoulli <- function(i, prep, ...) { mu <- get_dpar(prep, "mu", i = i) rbinom(length(mu), size = 1, prob = mu) } posterior_predict_poisson <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) rdiscrete( n = prep$ndraws, dist = "pois", lambda = mu, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_negbinomial <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) shape <- get_dpar(prep, "shape", i) shape <- multiply_dpar_rate_denom(shape, prep, i = i) rdiscrete( n = prep$ndraws, dist = "nbinom", mu = mu, size = shape, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_negbinomial2 <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) sigma <- get_dpar(prep, "sigma", i) shape <- multiply_dpar_rate_denom(1 / sigma, prep, i = i) rdiscrete( n = prep$ndraws, dist = "nbinom", mu = mu, size = shape, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_geometric <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) shape <- 1 shape <- multiply_dpar_rate_denom(shape, prep, i = i) rdiscrete( n = prep$ndraws, dist = "nbinom", mu = mu, size = shape, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_discrete_weibull <- function(i, prep, ntrys = 5, ...) { rdiscrete( n = prep$ndraws, dist = "discrete_weibull", mu = get_dpar(prep, "mu", i = i), shape = get_dpar(prep, "shape", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_com_poisson <- function(i, prep, ntrys = 5, ...) { rdiscrete( n = prep$ndraws, dist = "com_poisson", mu = get_dpar(prep, "mu", i = i), shape = get_dpar(prep, "shape", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_exponential <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "exp", rate = 1 / get_dpar(prep, "mu", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_gamma <- function(i, prep, ntrys = 5, ...) { shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i = i) / shape rcontinuous( n = prep$ndraws, dist = "gamma", shape = shape, scale = scale, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_weibull <- function(i, prep, ntrys = 5, ...) { shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i = i) / gamma(1 + 1 / shape) rcontinuous( n = prep$ndraws, dist = "weibull", shape = shape, scale = scale, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_frechet <- function(i, prep, ntrys = 5, ...) { nu <- get_dpar(prep, "nu", i = i) scale <- get_dpar(prep, "mu", i = i) / gamma(1 - 1 / nu) rcontinuous( n = prep$ndraws, dist = "frechet", scale = scale, shape = nu, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_gen_extreme_value <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "gen_extreme_value", sigma = get_dpar(prep, "sigma", i = i), xi = get_dpar(prep, "xi", i = i), mu = get_dpar(prep, "mu", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_inverse.gaussian <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "inv_gaussian", mu = get_dpar(prep, "mu", i = i), shape = get_dpar(prep, "shape", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_exgaussian <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "exgaussian", mu = get_dpar(prep, "mu", i = i), sigma = get_dpar(prep, "sigma", i = i), beta = get_dpar(prep, "beta", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_wiener <- function(i, prep, negative_rt = FALSE, ntrys = 5, ...) { out <- rcontinuous( n = 1, dist = "wiener", delta = get_dpar(prep, "mu", i = i), alpha = get_dpar(prep, "bs", i = i), tau = get_dpar(prep, "ndt", i = i), beta = get_dpar(prep, "bias", i = i), types = if (negative_rt) c("q", "resp") else "q", lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) if (negative_rt) { # code lower bound responses as negative RTs out <- out[["q"]] * ifelse(out[["resp"]], 1, -1) } out } posterior_predict_beta <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i = i) phi <- get_dpar(prep, "phi", i = i) rcontinuous( n = prep$ndraws, dist = "beta", shape1 = mu * phi, shape2 = (1 - mu) * phi, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_von_mises <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "von_mises", mu = get_dpar(prep, "mu", i = i), kappa = get_dpar(prep, "kappa", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_asym_laplace <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "asym_laplace", mu = get_dpar(prep, "mu", i = i), sigma = get_dpar(prep, "sigma", i = i), quantile = get_dpar(prep, "quantile", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_zero_inflated_asym_laplace <- function(i, prep, ntrys = 5, ...) { zi <- get_dpar(prep, "zi", i = i) tmp <- runif(prep$ndraws, 0, 1) ifelse( tmp < zi, 0, rcontinuous( n = prep$ndraws, dist = "asym_laplace", mu = get_dpar(prep, "mu", i = i), sigma = get_dpar(prep, "sigma", i = i), quantile = get_dpar(prep, "quantile", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) ) } posterior_predict_cox <- function(i, prep, ...) { stop2("Cannot sample from the posterior predictive ", "distribution for family 'cox'.") } posterior_predict_hurdle_poisson <- function(i, prep, ...) { # hu is the bernoulli hurdle parameter hu <- get_dpar(prep, "hu", i = i) lambda <- get_dpar(prep, "mu", i = i) ndraws <- prep$ndraws # compare with hu to incorporate the hurdle process tmp <- runif(ndraws, 0, 1) # sample from a truncated poisson distribution # by adjusting lambda and adding 1 t = -log(1 - runif(ndraws) * (1 - exp(-lambda))) ifelse(tmp < hu, 0, rpois(ndraws, lambda = lambda - t) + 1) } posterior_predict_hurdle_negbinomial <- function(i, prep, ...) { hu <- get_dpar(prep, "hu", i = i) mu <- get_dpar(prep, "mu", i = i) ndraws <- prep$ndraws tmp <- runif(ndraws, 0, 1) # sample from an approximate(!) truncated negbinomial distribution # by adjusting mu and adding 1 t = -log(1 - runif(ndraws) * (1 - exp(-mu))) shape <- get_dpar(prep, "shape", i = i) ifelse(tmp < hu, 0, rnbinom(ndraws, mu = mu - t, size = shape) + 1) } posterior_predict_hurdle_gamma <- function(i, prep, ...) { hu <- get_dpar(prep, "hu", i = i) shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i = i) / shape ndraws <- prep$ndraws tmp <- runif(ndraws, 0, 1) ifelse(tmp < hu, 0, rgamma(ndraws, shape = shape, scale = scale)) } posterior_predict_hurdle_lognormal <- function(i, prep, ...) { hu <- get_dpar(prep, "hu", i = i) mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) ndraws <- prep$ndraws tmp <- runif(ndraws, 0, 1) ifelse(tmp < hu, 0, rlnorm(ndraws, meanlog = mu, sdlog = sigma)) } posterior_predict_hurdle_cumulative <- function(i, prep, ...) { mu <- get_dpar(prep, "mu", i = i) hu <- get_dpar(prep, "hu", i = i) disc <- get_dpar(prep, "disc", i = i) thres <- subset_thres(prep) nthres <- NCOL(thres) ndraws <- prep$ndraws p <- pordinal( seq_len(nthres + 1L), eta = mu, disc = disc, thres = thres, family = "cumulative", link = prep$family$link ) tmp <- runif(ndraws, 0, 1) ifelse( tmp < hu, 0L, first_greater(p, target = runif(prep$ndraws, min = 0, max = 1)) ) } posterior_predict_zero_inflated_beta <- function(i, prep, ...) { zi <- get_dpar(prep, "zi", i = i) mu <- get_dpar(prep, "mu", i = i) phi <- get_dpar(prep, "phi", i = i) tmp <- runif(prep$ndraws, 0, 1) ifelse( tmp < zi, 0, rbeta(prep$ndraws, shape1 = mu * phi, shape2 = (1 - mu) * phi) ) } posterior_predict_zero_one_inflated_beta <- function(i, prep, ...) { zoi <- get_dpar(prep, "zoi", i) coi <- get_dpar(prep, "coi", i) mu <- get_dpar(prep, "mu", i = i) phi <- get_dpar(prep, "phi", i = i) tmp <- runif(prep$ndraws, 0, 1) one_or_zero <- runif(prep$ndraws, 0, 1) ifelse(tmp < zoi, ifelse(one_or_zero < coi, 1, 0), rbeta(prep$ndraws, shape1 = mu * phi, shape2 = (1 - mu) * phi) ) } posterior_predict_zero_inflated_poisson <- function(i, prep, ...) { # zi is the bernoulli zero-inflation parameter zi <- get_dpar(prep, "zi", i = i) lambda <- get_dpar(prep, "mu", i = i) ndraws <- prep$ndraws # compare with zi to incorporate the zero-inflation process tmp <- runif(ndraws, 0, 1) ifelse(tmp < zi, 0L, rpois(ndraws, lambda = lambda)) } posterior_predict_zero_inflated_negbinomial <- function(i, prep, ...) { zi <- get_dpar(prep, "zi", i = i) mu <- get_dpar(prep, "mu", i = i) shape <- get_dpar(prep, "shape", i = i) ndraws <- prep$ndraws tmp <- runif(ndraws, 0, 1) ifelse(tmp < zi, 0L, rnbinom(ndraws, mu = mu, size = shape)) } posterior_predict_zero_inflated_binomial <- function(i, prep, ...) { zi <- get_dpar(prep, "zi", i = i) trials <- prep$data$trials[i] prob <- get_dpar(prep, "mu", i = i) ndraws <- prep$ndraws tmp <- runif(ndraws, 0, 1) ifelse(tmp < zi, 0L, rbinom(ndraws, size = trials, prob = prob)) } posterior_predict_zero_inflated_beta_binomial <- function(i, prep, ...) { zi <- get_dpar(prep, "zi", i = i) trials <- prep$data$trials[i] mu <- get_dpar(prep, "mu", i = i) phi <- get_dpar(prep, "phi", i = i) ndraws <- prep$ndraws draws <- rbeta_binomial(ndraws, size = trials, mu = mu, phi = phi) tmp <- runif(ndraws, 0, 1) draws[tmp < zi] <- 0L draws } posterior_predict_categorical <- function(i, prep, ...) { eta <- get_Mu(prep, i = i) eta <- insert_refcat(eta, refcat = prep$refcat) p <- pcategorical(seq_len(prep$data$ncat), eta = eta) first_greater(p, target = runif(prep$ndraws, min = 0, max = 1)) } posterior_predict_multinomial <- function(i, prep, ...) { eta <- get_Mu(prep, i = i) eta <- insert_refcat(eta, refcat = prep$refcat) p <- dcategorical(seq_len(prep$data$ncat), eta = eta) size <- prep$data$trials[i] rblapply(seq_rows(p), function(s) t(rmultinom(1, size, p[s, ]))) } posterior_predict_dirichlet <- function(i, prep, ...) { eta <- get_Mu(prep, i = i) eta <- insert_refcat(eta, refcat = prep$refcat) phi <- get_dpar(prep, "phi", i = i) cats <- seq_len(prep$data$ncat) alpha <- dcategorical(cats, eta = eta) * phi rdirichlet(prep$ndraws, alpha = alpha) } posterior_predict_dirichlet2 <- function(i, prep, ...) { mu <- get_Mu(prep, i = i) rdirichlet(prep$ndraws, alpha = mu) } posterior_predict_logistic_normal <- function(i, prep, ...) { mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i, cor_name = "lncor") .predict <- function(s) { rlogistic_normal(1, mu = mu[s, ], Sigma = Sigma[s, , ], refcat = prep$refcat) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_cumulative <- function(i, prep, ...) { posterior_predict_ordinal(i = i, prep = prep) } posterior_predict_sratio <- function(i, prep, ...) { posterior_predict_ordinal(i = i, prep = prep) } posterior_predict_cratio <- function(i, prep, ...) { posterior_predict_ordinal(i = i, prep = prep) } posterior_predict_acat <- function(i, prep, ...) { posterior_predict_ordinal(i = i, prep = prep) } posterior_predict_ordinal <- function(i, prep, ...) { thres <- subset_thres(prep, i) nthres <- NCOL(thres) p <- pordinal( seq_len(nthres + 1), eta = get_dpar(prep, "mu", i = i), disc = get_dpar(prep, "disc", i = i), thres = thres, family = prep$family$family, link = prep$family$link ) first_greater(p, target = runif(prep$ndraws, min = 0, max = 1)) } posterior_predict_custom <- function(i, prep, ...) { custom_family_method(prep$family, "posterior_predict")(i, prep, ...) } posterior_predict_mixture <- function(i, prep, ...) { families <- family_names(prep$family) theta <- get_theta(prep, i = i) smix <- sample_mixture_ids(theta) out <- rep(NA, prep$ndraws) for (j in seq_along(families)) { draw_ids <- which(smix == j) if (length(draw_ids)) { pp_fun <- paste0("posterior_predict_", families[j]) pp_fun <- get(pp_fun, asNamespace("brms")) tmp_prep <- pseudo_prep_for_mixture(prep, j, draw_ids) out[draw_ids] <- pp_fun(i, tmp_prep, ...) } } out } # ------------ predict helper-functions ---------------------- # random numbers from (possibly truncated) continuous distributions # @param n number of random values to generate # @param dist name of a distribution for which the functions # p, q, and r are available # @param ... additional arguments passed to the distribution functions # @param ntrys number of trys in rejection sampling for truncated models # @return vector of random values prep from the distribution rcontinuous <- function(n, dist, ..., lb = NULL, ub = NULL, ntrys = 5) { args <- list(...) if (is.null(lb) && is.null(ub)) { # sample as usual rdist <- paste0("r", dist) out <- do_call(rdist, c(list(n), args)) } else { # sample from truncated distribution pdist <- paste0("p", dist) qdist <- paste0("q", dist) if (!exists(pdist, mode = "function") || !exists(qdist, mode = "function")) { # use rejection sampling as CDF or quantile function are not available out <- rdiscrete(n, dist, ..., lb = lb, ub = ub, ntrys = ntrys) } else { if (is.null(lb)) lb <- -Inf if (is.null(ub)) ub <- Inf plb <- do_call(pdist, c(list(lb), args)) pub <- do_call(pdist, c(list(ub), args)) out <- runif(n, min = plb, max = pub) out <- do_call(qdist, c(list(out), args)) # infinite values may be caused by numerical imprecision out[out %in% c(-Inf, Inf)] <- NA } } out } # random numbers from (possibly truncated) discrete distributions # currently rejection sampling is used for truncated distributions # @param n number of random values to generate # @param dist name of a distribution for which the functions # p, q, and r are available # @param ... additional arguments passed to the distribution functions # @param lb optional lower truncation bound # @param ub optional upper truncation bound # @param ntrys number of trys in rejection sampling for truncated models # @return a vector of random values draws from the distribution rdiscrete <- function(n, dist, ..., lb = NULL, ub = NULL, ntrys = 5) { args <- list(...) rdist <- paste0("r", dist) if (is.null(lb) && is.null(ub)) { # sample as usual out <- do_call(rdist, c(list(n), args)) } else { # sample from truncated distribution via rejection sampling if (is.null(lb)) lb <- -Inf if (is.null(ub)) ub <- Inf out <- vector("list", ntrys) for (i in seq_along(out)) { # loop of the trys to prevent a mismatch between 'n' # and length of the parameter vectors passed as arguments out[[i]] <- as.vector(do_call(rdist, c(list(n), args))) } out <- do_call(cbind, out) out <- apply(out, 1, extract_valid_sample, lb = lb, ub = ub) } out } # sample from the IDs of the mixture components sample_mixture_ids <- function(theta) { stopifnot(is.matrix(theta)) mix_comp <- seq_cols(theta) ulapply(seq_rows(theta), function(s) sample(mix_comp, 1, prob = theta[s, ]) ) } # extract the first valid predicted value per Stan sample per observation # @param x draws to be check against truncation boundaries # @param lb vector of lower bounds # @param ub vector of upper bound # @return a valid truncated sample or else the closest boundary extract_valid_sample <- function(x, lb, ub) { valid <- match(TRUE, x >= lb & x <= ub) if (is.na(valid)) { # no valid truncated value found # set sample to lb or ub # 1e-10 is only to identify the invalid draws later on out <- ifelse(max(x) < lb, lb - 1e-10, ub + 1e-10) } else { out <- x[valid] } out } # check for invalid predictions of truncated discrete models # @param x matrix of predicted values # @param lb optional lower truncation bound # @param ub optional upper truncation bound # @param thres threshold (in %) of invalid values at which to warn the user # @return rounded values of 'x' check_discrete_trunc_bounds <- function(x, lb = NULL, ub = NULL, thres = 0.01) { if (is.null(lb) && is.null(ub)) { return(x) } if (is.null(lb)) lb <- -Inf if (is.null(ub)) ub <- Inf thres <- as_one_numeric(thres) # ensure correct comparison with vector bounds y <- as.vector(t(x)) pct_invalid <- mean(y < lb | y > ub, na.rm = TRUE) if (pct_invalid >= thres) { warning2( round(pct_invalid * 100), "% of all predicted values ", "were invalid. Increasing argument 'ntrys' may help." ) } round(x) } brms/R/projpred.R0000644000176200001440000003410714464640261013402 0ustar liggesusers#' Projection Predictive Variable Selection: Get Reference Model #' #' The \code{get_refmodel.brmsfit} method can be used to create the reference #' model structure which is needed by the \pkg{projpred} package for performing #' a projection predictive variable selection. This method is called #' automatically when performing variable selection via #' \code{\link[projpred:varsel]{varsel}} or #' \code{\link[projpred:cv_varsel]{cv_varsel}}, so you will rarely need to call #' it manually yourself. #' #' @inheritParams posterior_predict.brmsfit #' @param cvfun Optional cross-validation function #' (see \code{\link[projpred:get_refmodel]{get_refmodel}} for details). #' If \code{NULL} (the default), \code{cvfun} is defined internally #' based on \code{\link{kfold.brmsfit}}. #' @param dis Passed to argument \code{dis} of #' \code{\link[projpred:init_refmodel]{init_refmodel}}, but leave this at #' \code{NULL} unless \pkg{projpred} complains about it. #' @param latent See argument \code{latent} of #' \code{\link[projpred:extend_family]{extend_family}}. Setting this to #' \code{TRUE} requires a \pkg{projpred} version >= 2.4.0. #' @param brms_seed A seed used to infer seeds for \code{\link{kfold.brmsfit}} #' and for sampling group-level effects for new levels (in multilevel models). #' If \code{NULL}, then \code{\link{set.seed}} is not called at all. If not #' \code{NULL}, then the pseudorandom number generator (PRNG) state is reset #' (to the state before calling this function) upon exiting this function. #' @param ... Further arguments passed to #' \code{\link[projpred:init_refmodel]{init_refmodel}}. #' #' @details Note that the \code{extract_model_data} function used internally by #' \code{get_refmodel.brmsfit} ignores arguments \code{wrhs} and \code{orhs}. #' This is relevant for #' \code{\link[projpred:predict.refmodel]{predict.refmodel}}, for example. #' #' @return A \code{refmodel} object to be used in conjunction with the #' \pkg{projpred} package. #' #' @examples #' \dontrun{ #' # fit a simple model #' fit <- brm(count ~ zAge + zBase * Trt, #' data = epilepsy, family = poisson()) #' summary(fit) #' #' # The following code requires the 'projpred' package to be installed: #' library(projpred) #' #' # perform variable selection without cross-validation #' vs <- varsel(fit) #' summary(vs) #' plot(vs) #' #' # perform variable selection with cross-validation #' cv_vs <- cv_varsel(fit) #' summary(cv_vs) #' plot(cv_vs) #' } #' @exportS3Method projpred::get_refmodel brmsfit get_refmodel.brmsfit <- function(object, newdata = NULL, resp = NULL, cvfun = NULL, dis = NULL, latent = FALSE, brms_seed = NULL, ...) { require_package("projpred") object <- restructure(object) stopifnot_resp(object, resp) resp <- validate_resp(resp, object, multiple = FALSE) formula <- formula(object) if (!is.null(resp)) { formula <- formula$forms[[resp]] } # Infer "sub-seeds": if (exists(".Random.seed", envir = .GlobalEnv)) { rng_state_old <- get(".Random.seed", envir = .GlobalEnv) } if (!is.null(brms_seed)) { if (exists(".Random.seed", envir = .GlobalEnv)) { on.exit(assign(".Random.seed", rng_state_old, envir = .GlobalEnv)) } set.seed(brms_seed) } kfold_seed <- sample.int(.Machine$integer.max, 1) refprd_seed <- sample.int(.Machine$integer.max, 1) # prepare the family object for use in projpred family <- family(object, resp = resp) if (family$family == "bernoulli") { family$family <- "binomial" } else if (family$family == "gamma") { family$family <- "Gamma" } else if (family$family == "beta") { family$family <- "Beta" } aug_data <- (is_categorical(family) || is_ordinal(family)) && !latent # For the augmented-data and the latent approach, do not re-define the family # to preserve family-specific extra arguments ("extra" meaning "additionally # to `link`") like `refcat` and `thresholds` (see ?brmsfamily): if (!aug_data && !latent) { family <- get(family$family, mode = "function")(link = family$link) } # check if the model is supported by projpred bterms <- brmsterms(formula) if (length(bterms$dpars) > 1L && !conv_cats_dpars(family)) { stop2("Projpred does not support distributional models.") } if (conv_cats_dpars(family) && length(formula$pforms)) { stop2("Projpred does not support category-specific formulas.") } if (length(bterms$nlpars) > 0L) { stop2("Projpred does not support non-linear models.") } not_ok_term_types <- setdiff(all_term_types(), c("fe", "re", "offset", "sm")) if (any(not_ok_term_types %in% names(bterms$dpars$mu))) { stop2("Projpred only supports standard multilevel and smoothing terms as ", "well as offsets.") } # only use the raw formula for selection of terms formula <- formula$formula # LHS should only contain the response variable formula[[2]] <- bterms$respform[[2]] # projpred requires the dispersion parameter if present if (is.null(dis) && !latent) { if (family$family == "gaussian") { dis <- paste0("sigma", usc(resp)) dis <- as.data.frame(object, variable = dis)[[dis]] } else if (family$family == "Gamma") { dis <- paste0("shape", usc(resp)) dis <- as.data.frame(object, variable = dis)[[dis]] } } # allows to handle additional arguments implicitly extract_model_data <- function(object, newdata = NULL, ...) { .extract_model_data(object, newdata = newdata, resp = resp, ...) } # The default `ref_predfun` from projpred does not set `allow_new_levels`, so # use a customized `ref_predfun` which also handles some preparations for the # augmented-data projection: ref_predfun <- function(fit, newdata = NULL) { # Setting a seed is necessary for reproducible sampling of group-level # effects for new levels: if (exists(".Random.seed", envir = .GlobalEnv)) { rng_state_old <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", rng_state_old, envir = .GlobalEnv)) } set.seed(refprd_seed) lprd_args <- nlist( object = fit, newdata, resp, allow_new_levels = TRUE, sample_new_levels = "gaussian" ) if (is_ordinal(family) && !latent) { c(lprd_args) <- list(incl_thres = TRUE) } out <- do_call(posterior_linpred, lprd_args) if (length(dim(out)) == 2) { out <- t(out) } out } if (utils::packageVersion("projpred") <= "2.0.2" && NROW(object$ranef)) { warning2("In projpred versions <= 2.0.2, projpred's K-fold CV results may ", "not be reproducible for multilevel brms reference models.") } # extract a list of K-fold sub-models if (is.null(cvfun)) { cvfun <- function(folds, ...) { kfold( object, K = max(folds), save_fits = TRUE, folds = folds, seed = kfold_seed, ... )$fits[, "fit"] } } else { if (!is.function(cvfun)) { stop2("'cvfun' should be a function.") } } cvrefbuilder <- function(cvfit) { # For `brms_seed` in fold `cvfit$projpred_k` (= k) of K, choose a new seed # which is based on the original `brms_seed`: if (is.null(brms_seed)) { brms_seed_k <- NULL } else { brms_seed_k <- brms_seed + cvfit$projpred_k } projpred::get_refmodel(cvfit, resp = resp, dis = dis, latent = latent, brms_seed = brms_seed_k, called_from_cvrefbuilder = TRUE, ...) } # prepare data passed to projpred if (!is.null(newdata)) { warning2("Argument 'newdata' of get_refmodel.brmsfit() is deprecated and ", "will be removed in the future.") } data <- current_data( object, newdata, resp = resp, check_response = TRUE, allow_new_levels = TRUE ) attr(data, "terms") <- NULL args <- nlist( object, data, formula, family, dis, ref_predfun, cvfun, extract_model_data, cvrefbuilder, latent, ... ) if (aug_data) { c(args) <- list( augdat_link = get(paste0("link_", family$family), mode = "function"), augdat_ilink = get(paste0("inv_link_", family$family), mode = "function") ) if (is_ordinal(family)) { c(args) <- list( augdat_args_link = list(link = family$link), augdat_args_ilink = list(link = family$link) ) } } else if (latent) { require_package("projpred", "2.4.0") if (family$family == "cumulative") { args$latent_ilink <- latent_ilink_cumulative( object = object, family = family, bterms = bterms, resp = resp ) } # TODO: If requested by users, add response-scale support for more families: # For response-scale support, they all need a specific `latent_ilink` # function; some families (those for which the response can be numeric) also # require specific `latent_ll_oscale` and `latent_ppd_oscale` functions. The # binomial family (and thereby also the brms::bernoulli() family) has # response-scale support implemented natively in projpred. } do_call(projpred::init_refmodel, args) } # auxiliary data required in predictions via projpred # @return a named list with slots 'y', 'weights', and 'offset' .extract_model_data <- function(object, newdata = NULL, resp = NULL, extract_y = TRUE, ...) { stopifnot(is.brmsfit(object)) resp <- validate_resp(resp, object, multiple = FALSE) # extract the response variable manually instead of from make_standata # so that it passes input checks of validate_newdata later on (#1314) formula <- formula(object) if (!is.null(resp)) { formula <- formula$forms[[resp]] } bterms <- brmsterms(formula) y <- NULL if (extract_y) { data <- current_data( object, newdata, resp = resp, check_response = TRUE, allow_new_levels = TRUE, req_vars = all.vars(bterms$respform) ) y <- model.response(model.frame(bterms$respform, data, na.action = na.pass)) y <- unname(y) } # extract relevant auxiliary data (offsets and weights (or numbers of trials)) # call standata to ensure the correct format of the data # For this, we use `check_response = FALSE` and only include offsets and # weights (or numbers of trials) in `req_vars` because of issue #1457 (note # that all.vars(NULL) gives character(0), as desired). req_vars <- unlist(lapply(bterms$dpars, function(x) all.vars(x[["offset"]]))) req_vars <- unique(req_vars) c(req_vars) <- all.vars(bterms$adforms$weights) c(req_vars) <- all.vars(bterms$adforms$trials) args <- nlist( object, newdata, resp, allow_new_levels = TRUE, check_response = FALSE, internal = TRUE, req_vars = req_vars ) # NOTE: Missing weights don't cause an error here (see #1459) sdata <- do_call(standata, args) usc_resp <- usc(resp) N <- sdata[[paste0("N", usc_resp)]] weights <- as.vector(sdata[[paste0("weights", usc_resp)]]) trials <- as.vector(sdata[[paste0("trials", usc_resp)]]) if (is_binary(formula)) { trials <- rep(1, N) } if (!is.null(trials)) { if (!is.null(weights)) { stop2("Projpred cannot handle 'trials' and 'weights' at the same time.") } weights <- trials } if (is.null(weights)) { weights <- rep(1, N) } offset <- as.vector(sdata[[paste0("offsets", usc_resp)]]) if (is.null(offset)) { offset <- rep(0, N) } nlist(y, weights, offset) } # Construct the inverse-link function required for the latent projection in case # of the cumulative family. # # @param object See argument `object` of get_refmodel.brmsfit(), but here, the # `object` as modified inside of get_refmodel.brmsfit() is required. # @param family The `family` object corresponding to `object` (taking `resp` # into account). Could be re-inferred from `object` and `resp`, but for # computational efficiency, this is avoided. # @param bterms The `brmsterms` object corresponding to `object` (or rather # `object`'s `formula`, taking `resp` into account). Could be re-inferred from # `object` and `resp`, but for computational efficiency, this is avoided. # @param resp See argument `resp` of get_refmodel.brmsfit(), but here, the # `resp` as modified inside of get_refmodel.brmsfit() is required. # # @return A function to be supplied to projpred::extend_family()'s argument # `latent_ilink`. latent_ilink_cumulative <- function(object, family, bterms, resp) { stopifnot(!is.null(family$cats)) draws_mat <- as_draws_matrix(object) thres_regex <- paste0("^b", usc(combine_prefix(bterms)), "_Intercept\\[") thres_draws <- prepare_draws(draws_mat, variable = thres_regex, regex = TRUE) if (ncol(thres_draws) > length(family$cats) - 1L) { stop2("Currently, projpred does not support group-specific thresholds ", "(argument `gr` of resp_thres()).") } # Note: Currently, `disc` should always be constantly 1 because # distributional models are not allowed here. disc_regex <- paste0("^", "disc", resp, "$") disc_draws <- prepare_draws(draws_mat, variable = disc_regex, regex = TRUE) out <- function(lpreds, cl_ref, wdraws_ref = rep(1, length(cl_ref))) { thres_agg <- projpred::cl_agg(thres_draws, cl = cl_ref, wdraws = wdraws_ref) disc_agg <- projpred::cl_agg(disc_draws, cl = cl_ref, wdraws = wdraws_ref) disc_agg <- as.vector(disc_agg) lpreds_thres <- apply(thres_agg, 2, function(thres_agg_c) { # Notes on dimensionalities (with S_agg = `nrow(lpreds)`): # * `disc_agg` is a vector of length S_agg (because `disc` is not # predicted here), # * `thres_agg` is S_agg x C_lat (with C_lat = `ncats - 1L` = # `nthres`) and thus `thres_agg_c` is a vector of length S_agg, # * `lpreds` is S_agg x N (with N denoting the number of (possibly # new) observations (not necessarily the original number of # observations)). disc_agg * (thres_agg_c - lpreds) }, simplify = FALSE) # Coerce to an S_agg x N x C_lat array: lpreds_thres <- do.call(abind, c(lpreds_thres, rev.along = 0)) # Transform to response space, yielding an S_agg x N x C_cat array: return(inv_link_cumulative(lpreds_thres, link = family$link)) } # Free up some memory (keeping `draws_mat` would lead to unnecessary memory # usage because `draws_mat` would continue to live in the environment of the # returned function): rm(draws_mat) out } brms/R/launch_shinystan.R0000644000176200001440000000365114224021465015120 0ustar liggesusers#' Interface to \pkg{shinystan} #' #' Provide an interface to \pkg{shinystan} for models fitted with \pkg{brms} #' #' @aliases launch_shinystan #' #' @param object A fitted model object typically of class \code{brmsfit}. #' @param rstudio Only relevant for RStudio users. #' The default (\code{rstudio=FALSE}) is to launch the app #' in the default web browser rather than RStudio's pop-up Viewer. #' Users can change the default to \code{TRUE} #' by setting the global option \cr \code{options(shinystan.rstudio = TRUE)}. #' @param ... Optional arguments to pass to \code{\link[shiny:runApp]{runApp}} #' #' @return An S4 shinystan object #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "gaussian") #' launch_shinystan(fit) #' } #' #' @seealso \code{\link[shinystan:launch_shinystan]{launch_shinystan}} #' #' @method launch_shinystan brmsfit #' @importFrom shinystan launch_shinystan #' @export launch_shinystan #' @export launch_shinystan.brmsfit <- function( object, rstudio = getOption("shinystan.rstudio"), ... ) { contains_draws(object) if (object$algorithm != "sampling") { return(shinystan::launch_shinystan(object$fit, rstudio = rstudio, ...)) } inc_warmup <- isTRUE(object$fit@sim$n_save[1] > niterations(object)) draws <- as.array(object, inc_warmup = inc_warmup) warmup <- if (inc_warmup) nwarmup(object) else 0 sampler_params <- rstan::get_sampler_params(object$fit, inc_warmup = inc_warmup) control <- object$fit@stan_args[[1]]$control if (is.null(control)) { max_td <- 10 } else { max_td <- control$max_treedepth if (is.null(max_td)) { max_td <- 10 } } sso <- shinystan::as.shinystan( X = draws, model_name = object$fit@model_name, warmup = warmup, sampler_params = sampler_params, max_treedepth = max_td, algorithm = "NUTS" ) shinystan::launch_shinystan(sso, rstudio = rstudio, ...) } brms/R/stan-predictor.R0000644000176200001440000023534314477021361014516 0ustar liggesusers# unless otherwise specified, functions return a named list # of Stan code snippets to be pasted together later on # generate stan code for predictor terms stan_predictor <- function(x, ...) { UseMethod("stan_predictor") } # combine effects for the predictors of a single (non-linear) parameter # @param ... arguments passed to the underlying effect-specific functions #' @export stan_predictor.btl <- function(x, ...) { out <- collapse_lists( stan_fe(x, ...), stan_thres(x, ...), stan_sp(x, ...), stan_cs(x, ...), stan_sm(x, ...), stan_gp(x, ...), stan_ac(x, ...), stan_offset(x, ...), stan_bhaz(x, ...) ) out <- stan_special_prior(x, out = out, ...) out <- stan_eta_combine(x, out = out, ...) out } # prepare Stan code for non-linear terms #' @export stan_predictor.btnl <- function(x, ...) { collapse_lists( stan_nl(x, ...), stan_thres(x, ...), stan_bhaz(x, ...), stan_ac(x, ...) ) } #' @export stan_predictor.brmsterms <- function(x, data, prior, normalize, ...) { px <- check_prefix(x) resp <- usc(combine_prefix(px)) data <- subset_data(data, x) out <- list() str_add_list(out) <- stan_response(x, data = data, normalize = normalize) valid_dpars <- valid_dpars(x) args <- nlist(data, prior, normalize, nlpars = names(x$nlpars), ...) args$primitive <- use_glm_primitive(x) for (nlp in names(x$nlpars)) { nlp_args <- list(x$nlpars[[nlp]]) str_add_list(out) <- do_call(stan_predictor, c(nlp_args, args)) } for (dp in valid_dpars) { dp_terms <- x$dpars[[dp]] dp_comment <- stan_dpar_comments(dp, family = x$family) if (is.btl(dp_terms) || is.btnl(dp_terms)) { # distributional parameter is predicted str_add_list(out) <- do_call(stan_predictor, c(list(dp_terms), args)) } else if (is.numeric(x$fdpars[[dp]]$value)) { # distributional parameter is fixed to constant if (is_mix_proportion(dp, family = x$family)) { # mixture proportions are handled in 'stan_mixture' next } dp_value <- x$fdpars[[dp]]$value dp_comment <- stan_comment(dp_comment) str_add(out$tpar_def) <- glue( " real {dp}{resp} = {dp_value};{dp_comment}\n" ) str_add(out$pll_args) <- glue(", real {dp}{resp}") } else if (is.character(x$fdpars[[dp]]$value)) { # distributional parameter is fixed to another distributional parameter if (!x$fdpars[[dp]]$value %in% valid_dpars) { stop2("Parameter '", x$fdpars[[dp]]$value, "' cannot be found.") } if (is_mix_proportion(dp, family = x$family)) { stop2("Cannot set mixture proportions to be equal.") } dp_value <- x$fdpars[[dp]]$value dp_comment <- stan_comment(dp_comment) str_add(out$tpar_def) <- glue( " real {dp}{resp};{dp_comment}\n" ) str_add(out$tpar_comp) <- glue( " {dp}{resp} = {dp_value}{resp};\n" ) str_add(out$pll_args) <- glue(", real {dp}{resp}") } else { # distributional parameter is estimated as a scalar if (is_mix_proportion(dp, family = x$family)) { # mixture proportions are handled in 'stan_mixture' next } prefix <- "" if (dp %in% valid_dpars(x, type = "tmp")) { # some parameters are fully computed only after the model is run prefix <- "tmp_" dp_comment <- paste0(dp_comment, " (temporary)") } str_add_list(out) <- stan_prior( prior, dp, prefix = prefix, suffix = resp, header_type = "real", px = px, comment = dp_comment, normalize = normalize ) } } str_add_list(out) <- stan_dpar_transform( x, prior = prior, normalize = normalize, ... ) str_add_list(out) <- stan_mixture( x, data = data, prior = prior, normalize = normalize, ... ) out$model_log_lik <- stan_log_lik(x, data = data, normalize = normalize, ...) list(out) } #' @export stan_predictor.mvbrmsterms <- function(x, prior, threads, normalize, ...) { out <- lapply(x$terms, stan_predictor, prior = prior, threads = threads, normalize = normalize, ...) out <- unlist(out, recursive = FALSE) if (!x$rescor) { return(out) } resp_type <- out[[1]]$resp_type out <- collapse_lists(ls = out) out$resp_type <- "vector" adforms <- from_list(x$terms, "adforms") adnames <- unique(ulapply(adforms, names)) adallowed <- c("se", "weights", "mi") if (!all(adnames %in% adallowed)) { stop2("Only ", collapse_comma(adallowed), " are supported ", "addition arguments when 'rescor' is estimated.") } # we already know at this point that all families are identical family <- family_names(x)[1] stopifnot(family %in% c("gaussian", "student")) resp <- x$responses nresp <- length(resp) str_add(out$model_def) <- glue( " // multivariate predictor array\n", " array[N] vector[nresp] Mu;\n" ) str_add(out$model_comp_mvjoin) <- glue( " Mu[n] = {stan_vector(glue('mu_{resp}[n]'))};\n" ) str_add(out$data) <- glue( " int nresp; // number of responses\n", " int nrescor; // number of residual correlations\n" ) str_add(out$pll_args) <- glue(", data int nresp") str_add(out$tdata_def) <- glue( " array[N] vector[nresp] Y; // response array\n" ) str_add(out$tdata_comp) <- glue( " for (n in 1:N) {{\n", " Y[n] = {stan_vector(glue('Y_{resp}[n]'))};\n", " }}\n" ) str_add(out$pll_args) <- ", data vector[] Y" if (any(adnames %in% "weights")) { str_add(out$tdata_def) <- glue( " // weights of the pointwise log-likelihood\n", " vector[N] weights = weights_{resp[1]};\n" ) str_add(out$pll_args) <- glue(", data vector weights") } miforms <- rmNULL(from_list(adforms, "mi")) if (length(miforms)) { str_add(out$model_no_pll_def) <- " vector[nresp] Yl[N] = Y;\n" str_add(out$pll_args) <- ", vector[] Yl" for (i in seq_along(miforms)) { j <- match(names(miforms)[i], resp) # needs to happen outside of reduce_sum # to maintain consistency of indexing Yl str_add(out$model_no_pll_comp_mvjoin) <- glue( " Yl[n][{j}] = Yl_{resp[j]}[n];\n" ) } } str_add_list(out) <- stan_prior( prior, class = "Lrescor", type = "cholesky_factor_corr[nresp]", header_type = "matrix", comment = "parameters for multivariate linear models", normalize = normalize ) if (family == "student") { str_add_list(out) <- stan_prior( prior, class = "nu", header_type = "real", normalize = normalize ) } sigma <- ulapply(x$terms, stan_sigma_transform, threads = threads) if (any(grepl(stan_nn_regex(), sigma))) { str_add(out$model_def) <- " array[N] vector[nresp] sigma;\n" str_add(out$model_comp_mvjoin) <- glue( " sigma[n] = {stan_vector(sigma)};\n" ) if (family == "gaussian") { str_add(out$model_def) <- glue( " // cholesky factor of residual covariance matrix\n", " array[N] matrix[nresp, nresp] LSigma;\n" ) str_add(out$model_comp_mvjoin) <- glue( " LSigma[n] = diag_pre_multiply(sigma[n], Lrescor);\n" ) } else if (family == "student") { str_add(out$model_def) <- glue( " // residual covariance matrix\n", " array[N] matrix[nresp, nresp] Sigma;\n" ) str_add(out$model_comp_mvjoin) <- glue( " Sigma[n] = multiply_lower_tri_self_transpose(", "diag_pre_multiply(sigma[n], Lrescor));\n" ) } } else { str_add(out$model_def) <- glue( " vector[nresp] sigma = {stan_vector(sigma)};\n" ) if (family == "gaussian") { str_add(out$model_def) <- glue( " // cholesky factor of residual covariance matrix\n", " matrix[nresp, nresp] LSigma = ", "diag_pre_multiply(sigma, Lrescor);\n" ) } else if (family == "student") { str_add(out$model_def) <- glue( " // residual covariance matrix\n", " matrix[nresp, nresp] Sigma = ", "multiply_lower_tri_self_transpose(", "diag_pre_multiply(sigma, Lrescor));\n" ) } } str_add(out$gen_def) <- glue( " // residual correlations\n", " corr_matrix[nresp] Rescor", " = multiply_lower_tri_self_transpose(Lrescor);\n", " vector[nrescor] rescor;\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp("rescor", "nresp") out$model_comp_mvjoin <- paste0( " // combine univariate parameters\n", " for (n in 1:N) {\n", stan_nn_def(threads), out$model_comp_mvjoin, " }\n" ) if (isTRUE(nzchar(out$model_no_pll_comp_mvjoin))) { out$model_no_pll_comp_mvjoin <- paste0( " // combine univariate parameters\n", " for (n in 1:N) {\n", out$model_no_pll_comp_mvjoin, " }\n" ) } out$model_log_lik <- stan_log_lik( x, threads = threads, normalize = normalize, ... ) list(out) } # Stan code for population-level effects stan_fe <- function(bterms, data, prior, stanvars, threads, primitive, normalize, ...) { out <- list() family <- bterms$family fixef <- colnames(data_fe(bterms, data)$X) sparse <- is_sparse(bterms$fe) decomp <- get_decomp(bterms$fe) if (length(fixef) < 2L) { # decompositions require at least two predictors decomp <- "none" } center_X <- stan_center_X(bterms) ct <- str_if(center_X, "c") # remove the intercept from the design matrix? if (center_X) { fixef <- setdiff(fixef, "Intercept") } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) lpdf <- stan_lpdf_name(normalize) if (length(fixef)) { str_add(out$data) <- glue( " int K{p};", " // number of population-level effects\n", " matrix[N{resp}, K{p}] X{p};", " // population-level design matrix\n" ) if (decomp == "none") { str_add(out$pll_args) <- glue(", data matrix X{ct}{p}") } if (sparse) { if (decomp != "none") { stop2("Cannot use ", decomp, " decomposition for sparse matrices.") } if (use_threading(threads)) { stop2("Cannot use threading and sparse matrices at the same time.") } str_add(out$tdata_def) <- glue( " // sparse matrix representation of X{p}\n", " vector[rows(csr_extract_w(X{p}))] wX{p}", " = csr_extract_w(X{p});\n", " int vX{p}[size(csr_extract_v(X{p}))]", " = csr_extract_v(X{p});\n", " int uX{p}[size(csr_extract_u(X{p}))]", " = csr_extract_u(X{p});\n" ) } # prepare population-level coefficients b_type <- glue("vector[K{ct}{p}]") has_special_prior <- has_special_prior(prior, bterms, class = "b") if (decomp == "none") { if (has_special_prior) { str_add_list(out) <- stan_prior_non_centered( suffix = p, suffix_K = ct, normalize = normalize ) } else { str_add_list(out) <- stan_prior( prior, class = "b", coef = fixef, type = b_type, px = px, suffix = p, header_type = "vector", comment = "regression coefficients", normalize = normalize ) } } else { stopifnot(decomp == "QR") stopif_prior_bound(prior, class = "b", ls = px) if (has_special_prior) { str_add_list(out) <- stan_prior_non_centered( suffix = p, suffix_class = "Q", suffix_K = ct, normalize = normalize ) } else { str_add_list(out) <- stan_prior( prior, class = "b", coef = fixef, type = b_type, px = px, suffix = glue("Q{p}"), header_type = "vector", comment = "regression coefficients on QR scale", normalize = normalize ) } str_add(out$gen_def) <- glue( " // obtain the actual coefficients\n", " vector[K{ct}{p}] b{p} = XR{p}_inv * bQ{p};\n" ) } } order_intercepts <- order_intercepts(bterms) if (order_intercepts && !center_X) { stop2( "Identifying mixture components via ordering requires ", "population-level intercepts to be present.\n", "Try setting order = 'none' in function 'mixture'." ) } if (center_X) { # centering the design matrix improves convergence sub_X_means <- "" if (length(fixef)) { str_add(out$data) <- glue( " int Kc{p};", " // number of population-level effects after centering\n" ) sub_X_means <- glue(" - dot_product(means_X{p}, b{p})") if (is_ordinal(family)) { str_add(out$tdata_def) <- glue( " matrix[N{resp}, Kc{p}] Xc{p};", " // centered version of X{p}\n", " vector[Kc{p}] means_X{p};", " // column means of X{p} before centering\n" ) str_add(out$tdata_comp) <- glue( " for (i in 1:K{p}) {{\n", " means_X{p}[i] = mean(X{p}[, i]);\n", " Xc{p}[, i] = X{p}[, i] - means_X{p}[i];\n", " }}\n" ) } else { str_add(out$tdata_def) <- glue( " matrix[N{resp}, Kc{p}] Xc{p};", " // centered version of X{p} without an intercept\n", " vector[Kc{p}] means_X{p};", " // column means of X{p} before centering\n" ) str_add(out$tdata_comp) <- glue( " for (i in 2:K{p}) {{\n", " means_X{p}[i - 1] = mean(X{p}[, i]);\n", " Xc{p}[, i - 1] = X{p}[, i] - means_X{p}[i - 1];\n", " }}\n" ) } } if (!is_ordinal(family)) { # intercepts of ordinal models are handled in 'stan_thres' intercept_type <- "real" if (order_intercepts) { # identify mixtures via ordering of the intercepts dp_id <- dpar_id(px$dpar) str_add(out$tpar_def) <- glue( " // identify mixtures via ordering of the intercepts\n", " real Intercept{p} = ordered_Intercept{resp}[{dp_id}];\n" ) str_add(out$pll_args) <- glue(", real Intercept{p}") # intercept parameter needs to be defined outside of 'stan_prior' intercept_type <- "" } str_add(out$eta) <- glue(" + Intercept{p}") str_add(out$gen_def) <- glue( " // actual population-level intercept\n", " real b{p}_Intercept = Intercept{p}{sub_X_means};\n" ) str_add_list(out) <- stan_prior( prior, class = "Intercept", type = intercept_type, suffix = p, px = px, header_type = "real", comment = "temporary intercept for centered predictors", normalize = normalize ) } } if (decomp == "QR") { str_add(out$tdata_def) <- glue( " // matrices for QR decomposition\n", " matrix[N{resp}, K{ct}{p}] XQ{p};\n", " matrix[K{ct}{p}, K{ct}{p}] XR{p};\n", " matrix[K{ct}{p}, K{ct}{p}] XR{p}_inv;\n" ) str_add(out$tdata_comp) <- glue( " // compute and scale QR decomposition\n", " XQ{p} = qr_thin_Q(X{ct}{p}) * sqrt(N{resp} - 1);\n", " XR{p} = qr_thin_R(X{ct}{p}) / sqrt(N{resp} - 1);\n", " XR{p}_inv = inverse(XR{p});\n" ) str_add(out$pll_args) <- glue(", data matrix XQ{p}") } str_add(out$eta) <- stan_eta_fe(fixef, bterms, threads, primitive) out } # Stan code for group-level effects stan_re <- function(ranef, prior, normalize, ...) { lpdf <- ifelse(normalize, "lpdf", "lupdf") IDs <- unique(ranef$id) out <- list() # special handling of student-t group effects as their 'df' parameters # are defined on a per-group basis instead of a per-ID basis tranef <- get_dist_groups(ranef, "student") if (has_rows(tranef)) { str_add(out$par) <- " // parameters for student-t distributed group-level effects\n" for (i in seq_rows(tranef)) { g <- usc(tranef$ggn[i]) id <- tranef$id[i] str_add_list(out) <- stan_prior( prior, class = "df", group = tranef$group[i], suffix = g, normalize = normalize ) str_add(out$par) <- glue( " vector[N_{id}] udf{g};\n" ) str_add(out$model_prior) <- glue( " target += inv_chi_square_{lpdf}(udf{g} | df{g});\n" ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " vector[N_{id}] dfm{g};\n" ) str_add(out$tpar_comp) <- glue( " dfm{g} = sqrt(df{g} * udf{g});\n" ) } } # the ID syntax requires group-level effects to be evaluated separately tmp <- lapply(IDs, .stan_re, ranef = ranef, prior = prior, normalize = normalize, ...) out <- collapse_lists(ls = c(list(out), tmp)) out } # Stan code for group-level effects per ID # @param id the ID of the grouping factor # @param ranef output of tidy_ranef # @param prior object of class brmsprior .stan_re <- function(id, ranef, prior, threads, normalize, ...) { lpdf <- ifelse(normalize, "lpdf", "lupdf") out <- list() r <- subset2(ranef, id = id) has_cov <- nzchar(r$cov[1]) has_by <- nzchar(r$by[[1]]) Nby <- seq_along(r$bylevels[[1]]) ng <- seq_along(r$gcall[[1]]$groups) px <- check_prefix(r) uresp <- usc(unique(px$resp)) idp <- paste0(r$id, usc(combine_prefix(px))) # define data needed for group-level effects str_add(out$data) <- glue( " // data for group-level effects of ID {id}\n", " int N_{id}; // number of grouping levels\n", " int M_{id}; // number of coefficients per level\n" ) if (r$gtype[1] == "mm") { for (res in uresp) { str_add(out$data) <- cglue( " array[N{res}] int J_{id}{res}_{ng};", " // grouping indicator per observation\n", " array[N{res}] real W_{id}{res}_{ng};", " // multi-membership weights\n" ) str_add(out$pll_args) <- cglue( ", data int[] J_{id}{res}_{ng}, data real[] W_{id}{res}_{ng}" ) } } else { str_add(out$data) <- cglue( " array[N{uresp}] int J_{id}{uresp};", " // grouping indicator per observation\n" ) str_add(out$pll_args) <- cglue( ", data int[] J_{id}{uresp}" ) } if (has_by) { str_add(out$data) <- glue( " int Nby_{id}; // number of by-factor levels\n", " array[N_{id}] int Jby_{id};", " // by-factor indicator per observation\n" ) } if (has_cov) { str_add(out$data) <- glue( " matrix[N_{id}, N_{id}] Lcov_{id};", " // cholesky factor of known covariance matrix\n" ) } J <- seq_rows(r) reqZ <- !r$type %in% "sp" if (any(reqZ)) { str_add(out$data) <- " // group-level predictor values\n" if (r$gtype[1] == "mm") { for (i in which(reqZ)) { str_add(out$data) <- cglue( " vector[N{usc(r$resp[i])}] Z_{idp[i]}_{r$cn[i]}_{ng};\n" ) str_add(out$pll_args) <- cglue( ", data vector Z_{idp[i]}_{r$cn[i]}_{ng}" ) } } else { str_add(out$data) <- cglue( " vector[N{usc(r$resp[reqZ])}] Z_{idp[reqZ]}_{r$cn[reqZ]};\n" ) str_add(out$pll_args) <- cglue( ", data vector Z_{idp[reqZ]}_{r$cn[reqZ]}" ) } } # define standard deviation parameters has_special_prior <- has_special_prior(prior, px, class = "sd") if (has_by) { if (has_special_prior) { stop2("Special priors on class 'sd' are not yet compatible ", "with the 'by' argument.") } str_add_list(out) <- stan_prior( prior, class = "sd", group = r$group[1], coef = r$coef, type = glue("matrix[M_{id}, Nby_{id}]"), coef_type = glue("row_vector[Nby_{id}]"), suffix = glue("_{id}"), px = px, broadcast = "matrix", comment = "group-level standard deviations", normalize = normalize ) } else { if (has_special_prior) { if (stan_has_multiple_base_priors(px)) { stop2("Special priors on class 'sd' are not yet compatible with ", "group-level coefficients correlated across formulas.") } str_add(out$tpar_def) <- glue( " vector[M_{id}] sd_{id}; // group-level standard deviations\n" ) } else { str_add_list(out) <- stan_prior( prior, class = "sd", group = r$group[1], coef = r$coef, type = glue("vector[M_{id}]"), suffix = glue("_{id}"), px = px, comment = "group-level standard deviations", normalize = normalize ) } } # define group-level coefficients dfm <- "" tr <- get_dist_groups(r, "student") if (nrow(r) > 1L && r$cor[1]) { # multiple correlated group-level effects str_add(out$data) <- glue( " int NC_{id}; // number of group-level correlations\n" ) str_add(out$par) <- glue( " matrix[M_{id}, N_{id}] z_{id};", " // standardized group-level effects\n" ) str_add(out$model_prior) <- glue( " target += std_normal_{lpdf}(to_vector(z_{id}));\n" ) if (has_rows(tr)) { dfm <- glue("rep_matrix(dfm_{tr$ggn[1]}, M_{id}) .* ") } if (has_by) { str_add_list(out) <- stan_prior( prior, class = "L", group = r$group[1], coef = Nby, type = glue("cholesky_factor_corr[M_{id}]"), coef_type = glue("cholesky_factor_corr[M_{id}]"), suffix = glue("_{id}"), dim = glue("[Nby_{id}]"), comment = "cholesky factor of correlation matrix", normalize = normalize ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " matrix[N_{id}, M_{id}] r_{id}; // actual group-level effects\n" ) if (has_cov) { rdef <- glue( "scale_r_cor_by_cov(z_{id}, sd_{id}, L_{id}, Jby_{id}, Lcov_{id})" ) } else { rdef <- glue("scale_r_cor_by(z_{id}, sd_{id}, L_{id}, Jby_{id})") } str_add(out$tpar_comp) <- glue( " // compute actual group-level effects\n", " r_{id} = {dfm}{rdef};\n" ) str_add(out$gen_def) <- cglue( " // compute group-level correlations\n", " corr_matrix[M_{id}] Cor_{id}_{Nby}", " = multiply_lower_tri_self_transpose(L_{id}[{Nby}]);\n", " vector[NC_{id}] cor_{id}_{Nby};\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp( glue("cor_{id}_{Nby}"), glue("M_{id}") ) } else { str_add_list(out) <- stan_prior( prior, class = "L", group = r$group[1], suffix = usc(id), type = glue("cholesky_factor_corr[M_{id}]"), comment = "cholesky factor of correlation matrix", normalize = normalize ) if (has_cov) { rdef <- glue("scale_r_cor_cov(z_{id}, sd_{id}, L_{id}, Lcov_{id})") } else { rdef <- glue("scale_r_cor(z_{id}, sd_{id}, L_{id})") } # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " matrix[N_{id}, M_{id}] r_{id}; // actual group-level effects\n" ) str_add(out$tpar_comp) <- glue( " // compute actual group-level effects\n", " r_{id} = {dfm}{rdef};\n" ) str_add(out$gen_def) <- glue( " // compute group-level correlations\n", " corr_matrix[M_{id}] Cor_{id}", " = multiply_lower_tri_self_transpose(L_{id});\n", " vector[NC_{id}] cor_{id};\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp( cor = glue("cor_{id}"), ncol = glue("M_{id}") ) } # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- " // using vectors speeds up indexing in loops\n" str_add(out$tpar_def) <- cglue( " vector[N_{id}] r_{idp}_{r$cn};\n" ) str_add(out$tpar_comp) <- cglue( " r_{idp}_{r$cn} = r_{id}[, {J}];\n" ) str_add(out$pll_args) <- cglue( ", vector r_{idp}_{r$cn}" ) } else { # single or uncorrelated group-level effects str_add(out$par) <- glue( " array[M_{id}] vector[N_{id}] z_{id};", " // standardized group-level effects\n" ) str_add(out$model_prior) <- cglue( " target += std_normal_{lpdf}(z_{id}[{seq_rows(r)}]);\n" ) Lcov <- str_if(has_cov, glue("Lcov_{id} * ")) if (has_rows(tr)) { dfm <- glue("dfm_{tr$ggn[1]} .* ") } if (has_by) { # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- cglue( " vector[N_{id}] r_{idp}_{r$cn}; // actual group-level effects\n" ) str_add(out$tpar_comp) <- cglue( " r_{idp}_{r$cn} = {dfm}(transpose(sd_{id}[{J}, Jby_{id}])", " .* ({Lcov}z_{id}[{J}]));\n" ) } else { # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- cglue( " vector[N_{id}] r_{idp}_{r$cn}; // actual group-level effects\n" ) str_add(out$tpar_comp) <- cglue( " r_{idp}_{r$cn} = {dfm}(sd_{id}[{J}] * ({Lcov}z_{id}[{J}]));\n" ) } str_add(out$pll_args) <- cglue( ", vector r_{idp}_{r$cn}" ) } out } # Stan code of smooth terms stan_sm <- function(bterms, data, prior, threads, normalize, ...) { lpdf <- ifelse(normalize, "lpdf", "lupdf") out <- list() smef <- tidy_smef(bterms, data) if (!NROW(smef)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) slice <- stan_slice(threads) Xs_names <- attr(smef, "Xs_names") if (length(Xs_names)) { str_add(out$data) <- glue( " // data for splines\n", " int Ks{p}; // number of linear effects\n", " matrix[N{resp}, Ks{p}] Xs{p};", " // design matrix for the linear effects\n" ) str_add(out$pll_args) <- glue(", data matrix Xs{p}") if (has_special_prior(prior, px, class = "b")) { str_add_list(out) <- stan_prior_non_centered( suffix = glue("s{p}"), normalize = normalize ) } else { str_add_list(out) <- stan_prior( prior, class = "b", coef = Xs_names, type = glue("vector[Ks{p}]"), suffix = glue("s{p}"), header_type = "vector", px = px, comment = "unpenalized spline coefficients", normalize = normalize ) } str_add(out$eta) <- glue(" + Xs{p}{slice} * bs{p}") } for (i in seq_rows(smef)) { if (smef$nbases[[i]] == 0) { next # no penalized spline components present } pi <- glue("{p}_{i}") nb <- seq_len(smef$nbases[[i]]) str_add(out$data) <- glue( " // data for spline {i}\n", " int nb{pi}; // number of bases\n", " array[nb{pi}] int knots{pi}; // number of knots\n" ) str_add(out$data) <- " // basis function matrices\n" str_add(out$data) <- cglue( " matrix[N{resp}, knots{pi}[{nb}]] Zs{pi}_{nb};\n" ) str_add(out$pll_args) <- cglue(", data matrix Zs{pi}_{nb}") str_add(out$par) <- glue( " // parameters for spline {i}\n" ) str_add(out$par) <- cglue( " // standardized penalized spline coefficients\n", " vector[knots{pi}[{nb}]] zs{pi}_{nb};\n" ) if (has_special_prior(prior, px, class = "sds")) { str_add(out$tpar_def) <- glue( " // SDs of penalized spline coefficients\n", " vector[nb{pi}] sds{pi};\n" ) str_add(out$prior_global_scales) <- glue(" sds{pi}") str_add(out$prior_global_lengths) <- glue(" nb{pi}") } else { str_add_list(out) <- stan_prior( prior, class = "sds", coef = smef$term[i], suffix = pi, px = px, type = glue("vector[nb{pi}]"), coef_type = glue("vector[nb{pi}]"), comment = "SDs of penalized spline coefficients", normalize = normalize ) } # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- cglue( " // penalized spline coefficients\n", " vector[knots{pi}[{nb}]] s{pi}_{nb};\n" ) str_add(out$tpar_special_prior) <- cglue( " // compute penalized spline coefficients\n", " s{pi}_{nb} = sds{pi}[{nb}] * zs{pi}_{nb};\n" ) str_add(out$pll_args) <- cglue(", vector s{pi}_{nb}") str_add(out$model_prior) <- cglue( " target += std_normal_{lpdf}(zs{pi}_{nb});\n" ) str_add(out$eta) <- cglue( " + Zs{pi}_{nb}{slice} * s{pi}_{nb}" ) } out } # Stan code for category specific effects # @note not implemented for non-linear models stan_cs <- function(bterms, data, prior, ranef, threads, normalize, ...) { out <- list() csef <- colnames(get_model_matrix(bterms$cs, data)) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(bterms$resp) slice <- stan_slice(threads) ranef <- subset2(ranef, type = "cs", ls = px) if (length(csef)) { str_add(out$data) <- glue( " int Kcs{p}; // number of category specific effects\n", " matrix[N{resp}, Kcs{p}] Xcs{p}; // category specific design matrix\n" ) str_add(out$pll_args) <- glue(", data matrix Xcs{p}") str_add_list(out) <- stan_prior( prior, class = "b", coef = csef, type = glue("matrix[Kcs{p}, nthres{resp}]"), coef_type = glue("row_vector[nthres{resp}]"), suffix = glue("cs{p}"), px = px, broadcast = "matrix", header_type = "matrix", comment = "category specific effects", normalize = normalize ) str_add(out$model_def) <- glue( " // linear predictor for category specific effects\n", " matrix[N{resp}, nthres{resp}] mucs{p} = Xcs{p}{slice} * bcs{p};\n" ) } if (nrow(ranef)) { if (!length(csef)) { # only group-level category specific effects present str_add(out$model_def) <- glue( " // linear predictor for category specific effects\n", " matrix[N{resp}, nthres{resp}] mucs{p}", " = rep_matrix(0, N{resp}, nthres{resp});\n" ) } n <- stan_nn(threads) thres_regex <- "(?<=\\[)[[:digit:]]+(?=\\]$)" thres <- get_matches(thres_regex, ranef$coef, perl = TRUE) nthres <- max(as.numeric(thres)) mucs_loop <- "" for (i in seq_len(nthres)) { r_cat <- ranef[grepl(glue("\\[{i}\\]$"), ranef$coef), ] str_add(mucs_loop) <- glue( " mucs{p}[n, {i}] = mucs{p}[n, {i}]" ) for (id in unique(r_cat$id)) { r <- r_cat[r_cat$id == id, ] rpx <- check_prefix(r) idp <- paste0(r$id, usc(combine_prefix(rpx))) idresp <- paste0(r$id, usc(rpx$resp)) str_add(mucs_loop) <- cglue( " + r_{idp}_{r$cn}[J_{idresp}{n}] * Z_{idp}_{r$cn}{n}" ) } str_add(mucs_loop) <- ";\n" } str_add(out$model_comp_eta_loop) <- glue( " for (n in 1:N{resp}) {{\n", stan_nn_def(threads), mucs_loop, " }\n" ) } out } # Stan code for special effects stan_sp <- function(bterms, data, prior, stanvars, ranef, meef, threads, normalize, ...) { out <- list() spef <- tidy_spef(bterms, data) if (!nrow(spef)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) lpdf <- stan_lpdf_name(normalize) n <- stan_nn(threads) ranef <- subset2(ranef, type = "sp", ls = px) spef_coef <- rename(spef$term) invalid_coef <- setdiff(ranef$coef, spef_coef) if (length(invalid_coef)) { stop2( "Special group-level terms require corresponding ", "population-level terms:\nOccured for ", collapse_comma(invalid_coef) ) } # prepare Stan code of the linear predictor component for (i in seq_rows(spef)) { eta <- spef$joint_call[[i]] if (!is.null(spef$calls_mo[[i]])) { new_mo <- glue("mo(simo{p}_{spef$Imo[[i]]}, Xmo{p}_{spef$Imo[[i]]}{n})") eta <- rename(eta, spef$calls_mo[[i]], new_mo) } if (!is.null(spef$calls_me[[i]])) { Kme <- seq_along(meef$term) Ime <- match(meef$grname, unique(meef$grname)) nme <- ifelse(nzchar(meef$grname), glue("[Jme_{Ime}{n}]"), n) new_me <- glue("Xme_{Kme}{nme}") eta <- rename(eta, meef$term, new_me) } if (!is.null(spef$calls_mi[[i]])) { is_na_idx <- is.na(spef$idx2_mi[[i]]) idx_mi <- glue("[idxl{p}_{spef$vars_mi[[i]]}_{spef$idx2_mi[[i]]}{n}]") idx_mi <- ifelse(is_na_idx, n, idx_mi) new_mi <- glue("Yl_{spef$vars_mi[[i]]}{idx_mi}") eta <- rename(eta, spef$calls_mi[[i]], new_mi) str_add(out$pll_args) <- glue(", vector Yl_{spef$vars_mi[[i]]}") } if (spef$Ic[i] > 0) { str_add(eta) <- glue(" * Csp{p}_{spef$Ic[i]}{n}") } r <- subset2(ranef, coef = spef_coef[i]) rpars <- str_if(nrow(r), cglue(" + {stan_eta_rsp(r)}")) str_add(out$loopeta) <- glue(" + (bsp{p}[{i}]{rpars}) * {eta}") } # prepare general Stan code ncovars <- max(spef$Ic) str_add(out$data) <- glue( " int Ksp{p}; // number of special effects terms\n" ) if (ncovars > 0L) { str_add(out$data) <- " // covariates of special effects terms\n" str_add(out$data) <- cglue( " vector[N{resp}] Csp{p}_{seq_len(ncovars)};\n" ) str_add(out$pll_args) <- cglue(", data vector Csp{p}_{seq_len(ncovars)}") } # include special Stan code for monotonic effects which_Imo <- which(lengths(spef$Imo) > 0) if (any(which_Imo)) { str_add(out$data) <- glue( " int Imo{p}; // number of monotonic variables\n", " array[Imo{p}] int Jmo{p}; // length of simplexes\n" ) ids <- unlist(spef$ids_mo) lpdf <- stan_lpdf_name(normalize) for (i in which_Imo) { for (k in seq_along(spef$Imo[[i]])) { j <- spef$Imo[[i]][[k]] id <- spef$ids_mo[[i]][[k]] # index of first ID appearance j_id <- match(id, ids) str_add(out$data) <- glue( " array[N{resp}] int Xmo{p}_{j}; // monotonic variable\n" ) str_add(out$pll_args) <- glue( ", int[] Xmo{p}_{j}, vector simo{p}_{j}" ) if (is.na(id) || j_id == j) { # no ID or first appearance of the ID str_add(out$data) <- glue( " vector[Jmo{p}[{j}]] con_simo{p}_{j};", " // prior concentration of monotonic simplex\n" ) str_add(out$par) <- glue( " simplex[Jmo{p}[{j}]] simo{p}_{j}; // monotonic simplex\n" ) str_add(out$tpar_prior) <- glue( " lprior += dirichlet_{lpdf}(simo{p}_{j} | con_simo{p}_{j});\n" ) } else { # use the simplex shared across all terms of the same ID str_add(out$tpar_def) <- glue( " simplex[Jmo{p}[{j}]] simo{p}_{j} = simo{p}_{j_id};\n" ) } } } } # include special Stan code for missing value terms uni_mi <- na.omit(attr(spef, "uni_mi")) for (j in seq_rows(uni_mi)) { idxl <- glue("idxl{p}_{uni_mi$var[j]}_{uni_mi$idx2[j]}") str_add(out$data) <- glue( " array[N{resp}] int {idxl}; // matching indices\n" ) str_add(out$pll_args) <- glue(", data int[] {idxl}") } # prepare special effects coefficients if (has_special_prior(prior, bterms, class = "b")) { stopif_prior_bound(prior, class = "b", ls = px) str_add_list(out) <- stan_prior_non_centered( suffix = glue("sp{p}"), normalize = normalize ) } else { str_add_list(out) <- stan_prior( prior, class = "b", coef = spef$coef, type = glue("vector[Ksp{p}]"), px = px, suffix = glue("sp{p}"), header_type = "vector", comment = "special effects coefficients", normalize = normalize ) } out } # Stan code for latent gaussian processes stan_gp <- function(bterms, data, prior, threads, normalize, ...) { lpdf <- stan_lpdf_name(normalize) out <- list() px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) slice <- stan_slice(threads) gpef <- tidy_gpef(bterms, data) # kernel methods cannot simply be split up into partial sums for (i in seq_rows(gpef)) { pi <- glue("{p}_{i}") byvar <- gpef$byvars[[i]] cons <- gpef$cons[[i]] byfac <- length(cons) > 0L bynum <- !is.null(byvar) && !byfac k <- gpef$k[i] is_approx <- !isNA(k) iso <- gpef$iso[i] gr <- gpef$gr[i] sfx1 <- gpef$sfx1[[i]] sfx2 <- gpef$sfx2[[i]] str_add(out$data) <- glue( " // data related to GPs\n", " int Kgp{pi};", " // number of sub-GPs (equal to 1 unless 'by' was used)\n", " int Dgp{pi}; // GP dimension\n" ) if (is_approx) { str_add(out$data) <- glue( " // number of basis functions of an approximate GP\n", " int NBgp{pi};\n" ) } if (has_special_prior(prior, px, class = "sdgp")) { str_add(out$tpar_def) <- glue( " vector[Kgp{pi}] sdgp{pi}; // GP standard deviation parameters\n" ) str_add(out$prior_global_scales) <- glue(" sdgp{pi}") str_add(out$prior_global_lengths) <- glue(" Kgp{pi}") } else { str_add_list(out) <- stan_prior( prior, class = "sdgp", coef = sfx1, px = px, suffix = pi, type = glue("vector[Kgp{pi}]"), coef_type = glue("vector[Kgp{pi}]"), comment = "GP standard deviation parameters", normalize = normalize ) } if (gpef$iso[i]) { lscale_type <- "vector[1]" lscale_dim <- glue("[Kgp{pi}]") lscale_comment <- "GP length-scale parameters" } else { lscale_type <- glue("vector[Dgp{pi}]") lscale_dim <- glue("[Kgp{pi}]") lscale_comment <- "GP length-scale parameters" } if (byfac) { J <- seq_along(cons) Ngp <- glue("Ngp{pi}") Nsubgp <- glue("N", str_if(gr, "sub"), glue("gp{pi}")) Igp <- glue("Igp{pi}_{J}") str_add(out$data) <- glue( " // number of observations relevant for a certain sub-GP\n", " array[Kgp{pi}] int {Ngp};\n" ) str_add(out$data) <- " // indices and contrasts of sub-GPs per observation\n" str_add(out$data) <- cglue( " array[{Ngp}[{J}]] int {Igp};\n", " vector[{Ngp}[{J}]] Cgp{pi}_{J};\n" ) str_add(out$pll_args) <- cglue( ", data int[] {Igp}, data vector Cgp{pi}_{J}" ) str_add_list(out) <- stan_prior( prior, class = "lscale", coef = sfx2, type = lscale_type, dim = lscale_dim, suffix = glue("{pi}"), px = px, comment = lscale_comment, normalize = normalize ) if (gr) { str_add(out$data) <- glue( " // number of latent GP groups\n", " array[Kgp{pi}] int Nsubgp{pi};\n" ) str_add(out$data) <- cglue( " // indices of latent GP groups per observation\n", " array[{Ngp}[{J}]] int Jgp{pi}_{J};\n" ) str_add(out$pll_args) <- cglue(", data int[] Jgp{pi}_{J}") } if (is_approx) { str_add(out$data) <- " // approximate GP basis matrices and eigenvalues\n" str_add(out$data) <- cglue( " matrix[{Nsubgp}[{J}], NBgp{pi}] Xgp{pi}_{J};\n", " array[NBgp{pi}] vector[Dgp{pi}] slambda{pi}_{J};\n" ) str_add(out$par) <- " // latent variables of the GP\n" str_add(out$par) <- cglue( " vector[NBgp{pi}] zgp{pi}_{J};\n" ) str_add(out$model_no_pll_def) <- " // scale latent variables of the GP\n" str_add(out$model_no_pll_def) <- cglue( " vector[NBgp{pi}] rgp{pi}_{J} = sqrt(spd_cov_exp_quad(", "slambda{pi}_{J}, sdgp{pi}[{J}], lscale{pi}[{J}])) .* zgp{pi}_{J};\n" ) gp_call <- glue("Xgp{pi}_{J} * rgp{pi}_{J}") } else { # exact GPs str_add(out$data) <- " // covariates of the GP\n" str_add(out$data) <- cglue( " array[{Nsubgp}[{J}]] vector[Dgp{pi}] Xgp{pi}_{J};\n" ) str_add(out$par) <- " // latent variables of the GP\n" str_add(out$par) <- cglue( " vector[{Nsubgp}[{J}]] zgp{pi}_{J};\n" ) gp_call <- glue( "gp(Xgp{pi}_{J}, sdgp{pi}[{J}], lscale{pi}[{J}], zgp{pi}_{J})" ) } slice2 <- "" Igp_sub <- Igp if (use_threading(threads)) { str_add(out$model_comp_basic) <- cglue( " int which_gp{pi}_{J}[size_range({Igp}, start, end)] =", " which_range({Igp}, start, end);\n" ) slice2 <- glue("[which_gp{pi}_{J}]") Igp_sub <- glue("start_at_one({Igp}{slice2}, start)") } # TODO: add all GP elements to 'eta' at the same time? eta <- combine_prefix(px, keep_mu = TRUE, nlp = TRUE) eta <- glue("{eta}[{Igp_sub}]") str_add(out$model_no_pll_def) <- cglue( " vector[{Nsubgp}[{J}]] gp_pred{pi}_{J} = {gp_call};\n" ) str_add(out$pll_args) <- cglue(", vector gp_pred{pi}_{J}") Cgp <- glue("Cgp{pi}_{J}{slice2} .* ") Jgp <- str_if(gr, glue("[Jgp{pi}_{J}{slice2}]"), slice) str_add(out$model_comp_basic) <- cglue( " {eta} += {Cgp}gp_pred{pi}_{J}{Jgp};\n" ) str_add(out$model_prior) <- cglue( "{tp()}std_normal_{lpdf}(zgp{pi}_{J});\n" ) } else { # no by-factor variable str_add_list(out) <- stan_prior( prior, class = "lscale", coef = sfx2, type = lscale_type, dim = lscale_dim, suffix = glue("{pi}"), px = px, comment = lscale_comment, normalize = normalize ) Nsubgp <- glue("N{resp}") if (gr) { Nsubgp <- glue("Nsubgp{pi}") str_add(out$data) <- glue( " // number of latent GP groups\n", " int {Nsubgp};\n", " // indices of latent GP groups per observation\n", " array[N{resp}] int Jgp{pi};\n" ) str_add(out$pll_args) <- glue(", data int[] Jgp{pi}") } Cgp <- "" if (bynum) { str_add(out$data) <- glue( " // numeric by-variable of the GP\n", " vector[N{resp}] Cgp{pi};\n" ) str_add(out$pll_args) <- glue(", data vector Cgp{pi}") Cgp <- glue("Cgp{pi}{slice} .* ") } if (is_approx) { str_add(out$data) <- glue( " // approximate GP basis matrices\n", " matrix[{Nsubgp}, NBgp{pi}] Xgp{pi};\n", " // approximate GP eigenvalues\n", " array[NBgp{pi}] vector[Dgp{pi}] slambda{pi};\n" ) str_add(out$par) <- glue( " vector[NBgp{pi}] zgp{pi}; // latent variables of the GP\n" ) str_add(out$model_no_pll_def) <- glue( " // scale latent variables of the GP\n", " vector[NBgp{pi}] rgp{pi} = sqrt(spd_cov_exp_quad(", "slambda{pi}, sdgp{pi}[1], lscale{pi}[1])) .* zgp{pi};\n" ) if (gr) { # grouping prevents GPs to be computed efficiently inside reduce_sum str_add(out$model_no_pll_def) <- glue( " vector[{Nsubgp}] gp_pred{pi} = Xgp{pi} * rgp{pi};\n" ) str_add(out$eta) <- glue(" + {Cgp}gp_pred{pi}[Jgp{pi}{slice}]") str_add(out$pll_args) <- glue(", vector gp_pred{pi}") } else { # efficient computation of approx GPs inside reduce_sum is possible str_add(out$model_def) <- glue( " vector[N{resp}] gp_pred{pi} = Xgp{pi}{slice} * rgp{pi};\n" ) str_add(out$eta) <- glue(" + {Cgp}gp_pred{pi}") str_add(out$pll_args) <- glue(", data matrix Xgp{pi}, vector rgp{pi}") } } else { # exact GPs str_add(out$data) <- glue( " array[{Nsubgp}] vector[Dgp{pi}] Xgp{pi}; // covariates of the GP\n" ) str_add(out$par) <- glue( " vector[{Nsubgp}] zgp{pi}; // latent variables of the GP\n" ) gp_call <- glue("gp(Xgp{pi}, sdgp{pi}[1], lscale{pi}[1], zgp{pi})") # exact GPs are kernel based methods which # need to be computed outside of reduce_sum str_add(out$model_no_pll_def) <- glue( " vector[{Nsubgp}] gp_pred{pi} = {gp_call};\n" ) Jgp <- str_if(gr, glue("[Jgp{pi}{slice}]"), slice) str_add(out$eta) <- glue(" + {Cgp}gp_pred{pi}{Jgp}") str_add(out$pll_args) <- glue(", vector gp_pred{pi}") } str_add(out$model_prior) <- glue( "{tp()}std_normal_{lpdf}(zgp{pi});\n" ) } } out } # Stan code for the linear predictor of autocorrelation terms stan_ac <- function(bterms, data, prior, threads, normalize, ...) { lpdf <- stan_lpdf_name(normalize) out <- list() px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) n <- stan_nn(threads) slice <- stan_slice(threads) has_natural_residuals <- has_natural_residuals(bterms) has_ac_latent_residuals <- has_ac_latent_residuals(bterms) acef <- tidy_acef(bterms) if (has_ac_latent_residuals) { # families that do not have natural residuals require latent # residuals for residual-based autocor structures err_msg <- "Latent residuals are not implemented" if (is.btnl(bterms)) { stop2(err_msg, " for non-linear models.") } str_add(out$par) <- glue( " vector[N{resp}] zerr{p}; // unscaled residuals\n" ) if (has_special_prior(prior, px, class = "sderr")) { str_add(out$tpar_def) <- glue( " real sderr{p}; // SD of residuals\n" ) str_add(out$prior_global_scales) <- glue(" sderr{p}") str_add(out$prior_global_lengths) <- glue(" 1") } else { str_add_list(out) <- stan_prior( prior, class = "sderr", px = px, suffix = p, comment = "SD of residuals", normalize = normalize ) } str_add(out$tpar_def) <- glue( " vector[N{resp}] err{p}; // actual residuals\n" ) str_add(out$pll_args) <- glue(", vector err{p}") str_add(out$model_prior) <- glue( " target += std_normal_{lpdf}(zerr{p});\n" ) str_add(out$eta) <- glue(" + err{p}{slice}") } # validity of the autocor terms has already been checked in 'tidy_acef' acef_arma <- subset2(acef, class = "arma") if (NROW(acef_arma)) { if (use_threading(threads) && (!acef_arma$cov || has_natural_residuals)) { stop2("Threading is not supported for this ARMA model.") } str_add(out$data) <- glue( " // data needed for ARMA correlations\n", " int Kar{p}; // AR order\n", " int Kma{p}; // MA order\n" ) str_add(out$tdata_def) <- glue( " int max_lag{p} = max(Kar{p}, Kma{p});\n" ) if (!acef_arma$cov) { err_msg <- "Please set cov = TRUE in ARMA structures" if (is.formula(bterms$adforms$se)) { stop2(err_msg, " when including known standard errors.") } str_add(out$data) <- glue( " // number of lags per observation\n", " array[N{resp}] int J_lag{p};\n" ) str_add(out$model_def) <- glue( " // matrix storing lagged residuals\n", " matrix[N{resp}, max_lag{p}] Err{p}", " = rep_matrix(0, N{resp}, max_lag{p});\n" ) if (has_natural_residuals) { str_add(out$model_def) <- glue( " vector[N{resp}] err{p}; // actual residuals\n" ) Y <- str_if(is.formula(bterms$adforms$mi), "Yl", "Y") comp_err <- glue(" err{p}[n] = {Y}{p}[n] - mu{p}[n];\n") } else { if (acef_arma$q > 0) { # AR and MA structures cannot be distinguished when # using a single vector of latent residuals stop2("Please set cov = TRUE when modeling MA structures ", "for this family.") } str_add(out$tpar_comp) <- glue( " // compute ctime-series residuals\n", " err{p} = sderr{p} * zerr{p};\n" ) comp_err <- "" } add_ar <- str_if(acef_arma$p > 0, glue(" mu{p}[n] += Err{p}[n, 1:Kar{p}] * ar{p};\n") ) add_ma <- str_if(acef_arma$q > 0, glue(" mu{p}[n] += Err{p}[n, 1:Kma{p}] * ma{p};\n") ) str_add(out$model_comp_arma) <- glue( " // include ARMA terms\n", " for (n in 1:N{resp}) {{\n", add_ma, comp_err, " for (i in 1:J_lag{p}[n]) {{\n", " Err{p}[n + 1, i] = err{p}[n + 1 - i];\n", " }}\n", add_ar, " }}\n" ) } if (acef_arma$p > 0) { if (has_special_prior(prior, px, class = "ar")) { if (acef_arma$cov) { stop2("Cannot use shrinkage priors on 'ar' if cov = TRUE.") } str_add_list(out) <- stan_prior_non_centered( class = "ar", suffix = p, suffix_K = "ar" ) } else { str_add_list(out) <- stan_prior( prior, class = "ar", px = px, suffix = p, coef = seq_along(acef_arma$p), type = glue("vector[Kar{p}]"), header_type = "vector", comment = "autoregressive coefficients", normalize = normalize ) } } if (acef_arma$q > 0) { if (has_special_prior(prior, px, class = "ma")) { if (acef_arma$cov) { stop2("Cannot use shrinkage priors on 'ma' if cov = TRUE.") } str_add_list(out) <- stan_prior_non_centered( class = "ma", suffix = p, suffix_K = "ma" ) } else { str_add_list(out) <- stan_prior( prior, class = "ma", px = px, suffix = p, coef = seq_along(acef_arma$q), type = glue("vector[Kma{p}]"), header_type = "vector", comment = "moving-average coefficients", normalize = normalize ) } } } acef_cosy <- subset2(acef, class = "cosy") if (NROW(acef_cosy)) { # compound symmetry correlation structure # most code is shared with ARMA covariance models str_add_list(out) <- stan_prior( prior, class = "cosy", px = px, suffix = p, comment = "compound symmetry correlation", normalize = normalize ) } acef_unstr <- subset2(acef, class = "unstr") if (NROW(acef_unstr)) { # unstructured correlation matrix # most code is shared with ARMA covariance models # define prior on the Cholesky scale to consistency across # autocorrelation structures str_add_list(out) <- stan_prior( prior, class = "Lcortime", px = px, suffix = p, type = glue("cholesky_factor_corr[n_unique_t{p}]"), header_type = "matrix", comment = "cholesky factor of unstructured autocorrelation matrix", normalize = normalize ) } acef_time_cov <- subset2(acef, dim = "time", cov = TRUE) if (NROW(acef_time_cov)) { # use correlation structures in covariance matrix parameterization # optional for ARMA models and obligatory for COSY and UNSTR models # can only model one covariance structure at a time stopifnot(NROW(acef_time_cov) == 1) if (use_threading(threads)) { stop2("Threading is not supported for covariance-based autocorrelation models.") } str_add(out$data) <- glue( " // see the functions block for details\n", " int N_tg{p};\n", " array[N_tg{p}] int begin_tg{p};\n", " array[N_tg{p}] int end_tg{p};\n", " array[N_tg{p}] int nobs_tg{p};\n" ) str_add(out$pll_args) <- glue( ", int[] begin_tg{p}, int[] end_tg{p}, int[] nobs_tg{p}" ) str_add(out$tdata_def) <- glue( " int max_nobs_tg{p} = max(nobs_tg{p});", " // maximum dimension of the autocorrelation matrix\n" ) if (acef_time_cov$class == "unstr") { # unstructured time-covariances require additional data and cannot # be represented directly via Cholesky factors due to potentially # different time subsets str_add(out$data) <- glue( " array[N_tg{p}, max(nobs_tg{p})] int Jtime_tg{p};\n", " int n_unique_t{p}; // total number of unique time points\n", " int n_unique_cortime{p}; // number of unique correlations\n" ) str_add(out$pll_args) <- glue(", int[,] Jtime_tg{p}") if (has_ac_latent_residuals) { str_add(out$tpar_comp) <- glue( " // compute correlated time-series residuals\n", " err{p} = scale_time_err_flex(", "zerr{p}, sderr{p}, Lcortime{p}, nobs_tg{p}, begin_tg{p}, end_tg{p}, Jtime_tg{p});\n" ) } str_add(out$gen_def) <- glue( " // compute group-level correlations\n", " corr_matrix[n_unique_t{p}] Cortime{p}", " = multiply_lower_tri_self_transpose(Lcortime{p});\n", " vector[n_unique_cortime{p}] cortime{p};\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp( glue("cortime{p}"), glue("n_unique_t{p}") ) } else { # all other time-covariance structures can be represented directly # through Cholesky factors of the correlation matrix if (acef_time_cov$class == "arma") { if (acef_time_cov$p > 0 && acef_time_cov$q == 0) { cor_fun <- "ar1" cor_args <- glue("ar{p}[1]") } else if (acef_time_cov$p == 0 && acef_time_cov$q > 0) { cor_fun <- "ma1" cor_args <- glue("ma{p}[1]") } else { cor_fun <- "arma1" cor_args <- glue("ar{p}[1], ma{p}[1]") } } else if (acef_time_cov$class == "cosy") { cor_fun <- "cosy" cor_args <- glue("cosy{p}") } str_add(out$tpar_def) <- glue( " // cholesky factor of the autocorrelation matrix\n", " matrix[max_nobs_tg{p}, max_nobs_tg{p}] Lcortime{p};\n" ) str_add(out$pll_args) <- glue(", matrix Lcortime{p}") str_add(out$tpar_comp) <- glue( " // compute residual covariance matrix\n", " Lcortime{p} = cholesky_cor_{cor_fun}({cor_args}, max_nobs_tg{p});\n" ) if (has_ac_latent_residuals) { str_add(out$tpar_comp) <- glue( " // compute correlated time-series residuals\n", " err{p} = scale_time_err(", "zerr{p}, sderr{p}, Lcortime{p}, nobs_tg{p}, begin_tg{p}, end_tg{p});\n" ) } } } acef_sar <- subset2(acef, class = "sar") if (NROW(acef_sar)) { if (!has_natural_residuals) { stop2("SAR terms are not implemented for this family.") } if (use_threading(threads)) { stop2("Threading is not supported for SAR models.") } str_add(out$data) <- glue( " matrix[N{resp}, N{resp}] Msar{p}; // spatial weight matrix\n", " vector[N{resp}] eigenMsar{p}; // eigenvalues of Msar{p}\n" ) str_add(out$tdata_def) <- glue( " // the eigenvalues define the boundaries of the SAR correlation\n", " real min_eigenMsar{p} = min(eigenMsar{p});\n", " real max_eigenMsar{p} = max(eigenMsar{p});\n" ) if (acef_sar$type == "lag") { str_add_list(out) <- stan_prior( prior, class = "lagsar", px = px, suffix = p, comment = "lag-SAR correlation parameter", normalize = normalize ) } else if (acef_sar$type == "error") { str_add_list(out) <- stan_prior( prior, class = "errorsar", px = px, suffix = p, comment = "error-SAR correlation parameter", normalize = normalize ) } } acef_car <- subset2(acef, class = "car") if (NROW(acef_car)) { if (is.btnl(bterms)) { stop2("CAR terms are not implemented for non-linear models.") } str_add(out$data) <- glue( " // data for the CAR structure\n", " int Nloc{p};\n", " array[N{resp}] int Jloc{p};\n", " int Nedges{p};\n", " array[Nedges{p}] int edges1{p};\n", " array[Nedges{p}] int edges2{p};\n" ) if (has_special_prior(prior, px, class = "sdcar")) { str_add(out$tpar_def) <- glue( " real sdcar{p}; // SD of the CAR structure\n" ) str_add(out$prior_global_scales) <- glue(" sdcar{p}") str_add(out$prior_global_lengths) <- glue(" 1") } else { str_add_list(out) <- stan_prior( prior, class = "sdcar", px = px, suffix = p, comment = "SD of the CAR structure", normalize = normalize ) } str_add(out$pll_args) <- glue(", vector rcar{p}, data int[] Jloc{p}") str_add(out$loopeta) <- glue(" + rcar{p}[Jloc{p}{n}]") if (acef_car$type %in% c("escar", "esicar")) { str_add(out$data) <- glue( " vector[Nloc{p}] Nneigh{p};\n", " vector[Nloc{p}] eigenMcar{p};\n" ) } if (acef_car$type == "escar") { str_add(out$par) <- glue( " vector[Nloc{p}] rcar{p};\n" ) str_add_list(out) <- stan_prior( prior, class = "car", px = px, suffix = p, normalize = normalize ) car_args <- c( "car", "sdcar", "Nloc", "Nedges", "Nneigh", "eigenMcar", "edges1", "edges2" ) car_args <- paste0(car_args, p, collapse = ", ") str_add(out$model_prior) <- glue( " target += sparse_car_lpdf(\n", " rcar{p} | {car_args}\n", " );\n" ) } else if (acef_car$type == "esicar") { str_add(out$par) <- glue( " vector[Nloc{p} - 1] zcar{p};\n" ) str_add(out$tpar_def) <- glue( " vector[Nloc{p}] rcar{p};\n" ) str_add(out$tpar_comp) <- glue( " // sum-to-zero constraint\n", " rcar[1:(Nloc{p} - 1)] = zcar{p};\n", " rcar[Nloc{p}] = - sum(zcar{p});\n" ) car_args <- c( "sdcar", "Nloc", "Nedges", "Nneigh", "eigenMcar", "edges1", "edges2" ) car_args <- paste0(car_args, p, collapse = ", ") str_add(out$model_prior) <- glue( " target += sparse_icar_lpdf(\n", " rcar{p} | {car_args}\n", " );\n" ) } else if (acef_car$type %in% "icar") { # intrinsic car based on the case study of Mitzi Morris # http://mc-stan.org/users/documentation/case-studies/icar_stan.html str_add(out$par) <- glue( " // parameters for the ICAR structure\n", " vector[Nloc{p}] zcar{p};\n" ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " // scaled parameters for the ICAR structure\n", " vector[Nloc{p}] rcar{p};\n" ) str_add(out$tpar_comp) <- glue( " // compute scaled parameters for the ICAR structure\n", " rcar{p} = zcar{p} * sdcar{p};\n" ) str_add(out$model_prior) <- glue( " // improper prior on the spatial CAR component\n", " target += -0.5 * dot_self(zcar{p}[edges1{p}] - zcar{p}[edges2{p}]);\n", " // soft sum-to-zero constraint\n", " target += normal_{lpdf}(sum(zcar{p}) | 0, 0.001 * Nloc{p});\n" ) } else if (acef_car$type == "bym2") { # BYM2 car based on the case study of Mitzi Morris # http://mc-stan.org/users/documentation/case-studies/icar_stan.html str_add(out$data) <- glue( " // scaling factor of the spatial CAR component\n", " real car_scale{p};\n" ) str_add(out$par) <- glue( " // parameters for the BYM2 structure\n", " vector[Nloc{p}] zcar{p}; // spatial part\n", " vector[Nloc{p}] nszcar{p}; // non-spatial part\n", " // proportion of variance in the spatial part\n" ) str_add_list(out) <- stan_prior( prior, class = "rhocar", px = px, suffix = p, normalize = normalize ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " // scaled parameters for the BYM2 structure\n", " vector[Nloc{p}] rcar{p};\n" ) str_add(out$tpar_comp) <- glue( " // join the spatial and the non-spatial CAR component\n", " rcar{p} = (sqrt(1 - rhocar{p}) * nszcar{p}", " + sqrt(rhocar{p} * inv(car_scale{p})) * zcar{p}) * sdcar{p};\n" ) str_add(out$model_prior) <- glue( " // improper prior on the spatial BYM2 component\n", " target += -0.5 * dot_self(zcar{p}[edges1{p}] - zcar{p}[edges2{p}]);\n", " // soft sum-to-zero constraint\n", " target += normal_{lpdf}(sum(zcar{p}) | 0, 0.001 * Nloc{p});\n", " // proper prior on the non-spatial BYM2 component\n", " target += std_normal_{lpdf}(nszcar{p});\n" ) } } acef_fcor <- subset2(acef, class = "fcor") if (NROW(acef_fcor)) { if (!has_natural_residuals) { stop2("FCOR terms are not implemented for this family.") } if (use_threading(threads)) { stop2("Threading is not supported for FCOR models.") } str_add(out$data) <- glue( " matrix[N{resp}, N{resp}] Mfcor{p}; // known residual covariance matrix\n" ) str_add(out$tdata_def) <- glue( " matrix[N{resp}, N{resp}] Lfcor{p} = cholesky_decompose(Mfcor{p});\n" ) } out } # stan code for offsets stan_offset <- function(bterms, threads, ...) { out <- list() if (is.formula(bterms$offset)) { p <- usc(combine_prefix(bterms)) resp <- usc(bterms$resp) slice <- stan_slice(threads) # use 'offsets' as 'offset' will be reserved in stanc3 str_add(out$data) <- glue( " vector[N{resp}] offsets{p};\n") str_add(out$pll_args) <- glue(", data vector offsets{p}") str_add(out$eta) <- glue(" + offsets{p}{slice}") } out } # Stan code for non-linear predictor terms # @param nlpars names of the non-linear parameters stan_nl <- function(bterms, data, nlpars, threads, ...) { out <- list() resp <- usc(bterms$resp) par <- combine_prefix(bterms, keep_mu = TRUE, nlp = TRUE) # prepare non-linear model n <- paste0(str_if(bterms$loop, "[n]"), " ") new_nlpars <- glue(" nlp{resp}_{nlpars}{n}") # covariates in the non-linear model covars <- all.vars(bterms$covars) new_covars <- NULL if (length(covars)) { p <- usc(combine_prefix(bterms)) new_covars <- rep(NA, length(covars)) data_cnl <- data_cnl(bterms, data) if (bterms$loop) { slice <- stan_nn(threads) } else { slice <- stan_slice(threads) } slice <- paste0(slice, " ") str_add(out$data) <- " // covariates for non-linear functions\n" for (i in seq_along(covars)) { cname <- glue("C{p}_{i}") is_integer <- is.integer(data_cnl[[cname]]) is_matrix <- is.matrix(data_cnl[[cname]]) dim2 <- dim(data_cnl[[cname]])[2] if (is_integer) { if (is_matrix) { str_add(out$data) <- glue( " array[N{resp}, {dim2}] int C{p}_{i};\n" ) str_add(out$pll_args) <- glue(", data int[,] C{p}_{i}") } else { str_add(out$data) <- glue( " array[N{resp}] int C{p}_{i};\n" ) str_add(out$pll_args) <- glue(", data int[] C{p}_{i}") } } else { if (is_matrix) { str_add(out$data) <- glue( " matrix[N{resp}, {dim2}] C{p}_{i};\n" ) str_add(out$pll_args) <- glue(", data matrix C{p}_{i}") } else { str_add(out$data) <- glue( " vector[N{resp}] C{p}_{i};\n" ) str_add(out$pll_args) <- glue(", data vector C{p}_{i}") } } new_covars[i] <- glue(" C{p}_{i}{slice}") } } # add white spaces to be able to replace parameters and covariates syms <- c( "+", "-", "*", "/", "%", "^", ".*", "./", "'", ")", "(", ",", "==", "!=", "<=", ">=", "<", ">", "!", "&&", "||" ) regex <- glue("(? Nme_{i}; // number of latent values\n", " array[N] int Jme_{i}; // group index per observation\n" ) str_add(out$pll_args) <- glue(", data int[] Jme_{i}") } else { Nme <- "N" } str_add(out$data) <- glue( " int Mme_{i}; // number of groups\n" ) str_add(out$data) <- cglue( " vector[{Nme}] Xn_{K}; // noisy values\n", " vector[{Nme}] noise_{K}; // measurement noise\n" ) str_add_list(out) <- stan_prior( prior, "meanme", coef = coefs[K], suffix = usc(i), type = glue("vector[Mme_{i}]"), comment = "latent means", normalize = normalize ) str_add_list(out) <- stan_prior( prior, "sdme", coef = coefs[K], suffix = usc(i), type = glue("vector[Mme_{i}]"), comment = "latent SDs", normalize = normalize ) str_add(out$model_prior) <- cglue( " target += normal_{lpdf}(Xn_{K} | Xme_{K}, noise_{K});\n" ) if (meef$cor[K[1]] && length(K) > 1L) { str_add(out$data) <- glue( " int NCme_{i}; // number of latent correlations\n" ) str_add(out$par) <- glue( " matrix[Mme_{i}, {Nme}] zme_{i}; // standardized latent values\n" ) str_add_list(out) <- stan_prior( prior, "Lme", group = g, suffix = usc(i), type = glue("cholesky_factor_corr[Mme_{i}]"), comment = "cholesky factor of the latent correlation matrix", normalize = normalize ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " matrix[{Nme}, Mme_{i}] Xme{i}; // actual latent values\n" ) str_add(out$tpar_comp) <- glue( " // compute actual latent values\n", " Xme{i} = rep_matrix(transpose(meanme_{i}), {Nme})", " + transpose(diag_pre_multiply(sdme_{i}, Lme_{i}) * zme_{i});\n" ) str_add(out$tpar_def) <- cglue( " // using separate vectors increases efficiency\n", " vector[{Nme}] Xme_{K};\n" ) str_add(out$tpar_comp) <- cglue( " Xme_{K} = Xme{i}[, {J}];\n" ) str_add(out$pll_args) <- cglue(", vector Xme_{K}") str_add(out$model_prior) <- glue( " target += std_normal_{lpdf}(to_vector(zme_{i}));\n" ) str_add(out$gen_def) <- cglue( " // obtain latent correlation matrix\n", " corr_matrix[Mme_{i}] Corme_{i}", " = multiply_lower_tri_self_transpose(Lme_{i});\n", " vector[NCme_{i}] corme_{i};\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp( cor = glue("corme_{i}"), ncol = glue("Mme_{i}") ) } else { str_add(out$par) <- cglue( " vector[{Nme}] zme_{K}; // standardized latent values\n" ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- cglue( " vector[{Nme}] Xme_{K}; // actual latent values\n" ) str_add(out$tpar_comp) <- cglue( " // compute actual latent values\n", " Xme_{K} = meanme_{i}[{J}] + sdme_{i}[{J}] * zme_{K};\n" ) str_add(out$pll_args) <- cglue(", vector Xme_{K}") str_add(out$model_prior) <- cglue( " target += std_normal_{lpdf}(zme_{K});\n" ) } } out } # initialize and compute a linear predictor term in Stan language # @param out list of character strings containing Stan code # @param bterms btl object # @param ranef output of tidy_ranef # @param primitive use Stan's GLM likelihood primitives? # @param ... currently unused # @return list of character strings containing Stan code stan_eta_combine <- function(bterms, out, ranef, threads, primitive, ...) { stopifnot(is.btl(bterms), is.list(out)) if (primitive && !has_special_terms(bterms)) { # only overall effects and perhaps an intercept are present # which will be evaluated directly in the GLM primitive likelihood return(out) } px <- check_prefix(bterms) resp <- usc(bterms$resp) eta <- combine_prefix(px, keep_mu = TRUE, nlp = TRUE) out$eta <- sub("^[ \t\r\n]+\\+", "", out$eta, perl = TRUE) str_add(out$model_def) <- glue( " // initialize linear predictor term\n", " vector[N{resp}] {eta} = rep_vector(0.0, N{resp});\n" ) if (nzchar(out$eta)) { str_add(out$model_comp_eta) <- glue(" {eta} +={out$eta};\n") } out$eta <- NULL str_add(out$loopeta) <- stan_eta_re(ranef, threads = threads, px = px) if (nzchar(out$loopeta)) { # parts of eta are computed in a loop over observations out$loopeta <- sub("^[ \t\r\n]+\\+", "", out$loopeta, perl = TRUE) str_add(out$model_comp_eta_loop) <- glue( " for (n in 1:N{resp}) {{\n", " // add more terms to the linear predictor\n", stan_nn_def(threads), " {eta}[n] +={out$loopeta};\n", " }}\n" ) } out$loopeta <- NULL # possibly transform eta before it is passed to the likelihood inv_link <- stan_inv_link( bterms$family$link, vectorize = TRUE, transform = bterms$transform ) if (nzchar(inv_link)) { str_add(out$model_comp_dpar_link) <- glue( " {eta} = {inv_link}({eta});\n" ) } out } # define Stan code to compute the fixef part of eta # @param fixef names of the population-level effects # @param bterms object of class 'btl' # @param primitive use Stan's GLM likelihood primitives? # @return a single character string stan_eta_fe <- function(fixef, bterms, threads, primitive) { if (!length(fixef) || primitive) { return("") } p <- usc(combine_prefix(bterms)) center_X <- stan_center_X(bterms) decomp <- get_decomp(bterms$fe) sparse <- is_sparse(bterms$fe) if (sparse) { stopifnot(!center_X && decomp == "none") csr_args <- sargs( paste0(c("rows", "cols"), "(X", p, ")"), paste0(c("wX", "vX", "uX", "b"), p) ) eta_fe <- glue(" + csr_matrix_times_vector({csr_args})") } else { sfx_X <- sfx_b <- "" if (decomp == "QR") { sfx_X <- sfx_b <- "Q" } else if (center_X) { sfx_X <- "c" } slice <- stan_slice(threads) eta_fe <- glue(" + X{sfx_X}{p}{slice} * b{sfx_b}{p}") } eta_fe } # write the group-level part of the linear predictor # @return a single character string stan_eta_re <- function(ranef, threads, px = list()) { eta_re <- "" n <- stan_nn(threads) ranef <- subset2(ranef, type = c("", "mmc"), ls = px) for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) rpx <- check_prefix(r) idp <- paste0(r$id, usc(combine_prefix(rpx))) idresp <- paste0(r$id, usc(rpx$resp)) if (r$gtype[1] == "mm") { ng <- seq_along(r$gcall[[1]]$groups) for (i in seq_rows(r)) { str_add(eta_re) <- cglue( " + W_{idresp[i]}_{ng}{n}", " * r_{idp[i]}_{r$cn[i]}[J_{idresp[i]}_{ng}{n}]", " * Z_{idp[i]}_{r$cn[i]}_{ng}{n}" ) } } else { str_add(eta_re) <- cglue( " + r_{idp}_{r$cn}[J_{idresp}{n}] * Z_{idp}_{r$cn}{n}" ) } } eta_re } # Stan code for group-level parameters in special predictor terms # @param r data.frame created by tidy_ranef # @return a character vector: one element per row of 'r' stan_eta_rsp <- function(r) { stopifnot(nrow(r) > 0L, length(unique(r$gtype)) == 1L) rpx <- check_prefix(r) idp <- paste0(r$id, usc(combine_prefix(rpx))) idresp <- paste0(r$id, usc(rpx$resp)) if (r$gtype[1] == "mm") { ng <- seq_along(r$gcall[[1]]$groups) out <- rep("", nrow(r)) for (i in seq_along(out)) { out[i] <- glue( "W_{idresp[i]}_{ng}[n] * r_{idp[i]}_{r$cn[i]}[J_{idresp[i]}_{ng}[n]]", collapse = " + " ) } } else { out <- glue("r_{idp}_{r$cn}[J_{idresp}[n]]") } out } # does eta need to be transformed manually using the inv_link function stan_eta_transform <- function(family, bterms) { no_transform <- family$link == "identity" || has_joint_link(family) && !is.customfamily(family) !no_transform && !stan_has_built_in_fun(family, bterms) } # indicate if the population-level design matrix should be centered # implies a temporary shift in the intercept of the model stan_center_X <- function(x) { is.btl(x) && !no_center(x$fe) && has_intercept(x$fe) && !fix_intercepts(x) && !is_sparse(x$fe) && !has_sum_to_zero_thres(x) } stan_dpar_comments <- function(dpar, family) { dpar_class <- dpar_class(dpar, family) out <- switch(dpar_class, "", sigma = "dispersion parameter", shape = "shape parameter", nu = "degrees of freedom or shape", phi = "precision parameter", kappa = "precision parameter", beta = "scale parameter", zi = "zero-inflation probability", hu = "hurdle probability", zoi = "zero-one-inflation probability", coi = "conditional one-inflation probability", bs = "boundary separation parameter", ndt = "non-decision time parameter", bias = "initial bias parameter", disc = "discrimination parameters", quantile = "quantile parameter", xi = "shape parameter", alpha = "skewness parameter" ) out } # Stan code for transformations of distributional parameters # TODO: refactor into family-specific functions # TODO: add gamma and related families here to compute rate = shape / mean stan_dpar_transform <- function(bterms, prior, threads, normalize, ...) { stopifnot(is.brmsterms(bterms)) out <- list() families <- family_names(bterms) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(bterms$resp) if (any(conv_cats_dpars(families))) { stopifnot(length(families) == 1L) is_logistic_normal <- any(is_logistic_normal(families)) len_mu <- glue("ncat{p}{str_if(is_logistic_normal, '-1')}") str_add(out$model_def) <- glue( " // linear predictor matrix\n", " array[N{resp}] vector[{len_mu}] mu{p};\n" ) mu_dpars <- make_stan_names(glue("mu{bterms$family$cats}")) mu_dpars <- glue("{mu_dpars}{p}[n]") iref <- get_refcat(bterms$family, int = TRUE) if (is_logistic_normal) { mu_dpars <- mu_dpars[-iref] } else { mu_dpars[iref] <- "0" } str_add(out$model_comp_catjoin) <- glue( " for (n in 1:N{resp}) {{\n", " mu{p}[n] = {stan_vector(mu_dpars)};\n", " }}\n" ) } if (any(families %in% "skew_normal")) { # as suggested by Stephen Martin use sigma and mu of CP # but the skewness parameter alpha of DP dp_names <- names(bterms$dpars) for (i in which(families %in% "skew_normal")) { id <- str_if(length(families) == 1L, "", i) sigma <- stan_sigma_transform(bterms, id = id, threads = threads) ns <- str_if(grepl(stan_nn_regex(), sigma), "[n]") na <- str_if(glue("alpha{id}") %in% dp_names, "[n]") type_delta <- str_if(nzchar(na), glue("vector[N{resp}]"), "real") no <- str_if(any(nzchar(c(ns, na))), "[n]", "") type_omega <- str_if(nzchar(no), glue("vector[N{resp}]"), "real") str_add(out$model_def) <- glue( " // parameters used to transform the skew-normal distribution\n", " {type_delta} delta{id}{p}; // transformed alpha parameter\n", " {type_omega} omega{id}{p}; // scale parameter\n" ) alpha <- glue("alpha{id}{p}{na}") delta <- glue("delta{id}{p}{na}") omega <- glue("omega{id}{p}{no}") comp_delta <- glue( " {delta} = {alpha} / sqrt(1 + {alpha}^2);\n" ) comp_omega <- glue( " {omega} = {sigma} / sqrt(1 - sqrt(2 / pi())^2 * {delta}^2);\n" ) str_add(out$model_comp_dpar_trans) <- glue( " // use efficient skew-normal parameterization\n", str_if(!nzchar(na), comp_delta), str_if(!nzchar(no), comp_omega), " for (n in 1:N{resp}) {{\n", stan_nn_def(threads), str_if(nzchar(na), glue(" ", comp_delta)), str_if(nzchar(no), glue(" ", comp_omega)), " mu{id}{p}[n] = mu{id}{p}[n]", " - {omega} * {delta} * sqrt(2 / pi());\n", " }}\n" ) } } if (any(families %in% "gen_extreme_value")) { dp_names <- c(names(bterms$dpars), names(bterms$fdpars)) for (i in which(families %in% "gen_extreme_value")) { id <- str_if(length(families) == 1L, "", i) xi <- glue("xi{id}") if (!xi %in% dp_names) { str_add(out$model_def) <- glue( " real {xi}{p}; // scaled shape parameter\n" ) sigma <- glue("sigma{id}") sfx <- str_if(sigma %in% names(bterms$dpars), "_vector") args <- sargs( glue("tmp_{xi}{p}"), glue("Y{p}"), glue("mu{id}{p}"), glue("{sigma}{p}") ) str_add(out$model_comp_dpar_trans) <- glue( " {xi}{p} = scale_xi{sfx}({args});\n" ) } } } if (any(families %in% "logistic_normal")) { stopifnot(length(families) == 1L) predcats <- make_stan_names(get_predcats(bterms$family)) sigma_dpars <- glue("sigma{predcats}") reqn <- sigma_dpars %in% names(bterms$dpars) n <- ifelse(reqn, "[n]", "") sigma_dpars <- glue("{sigma_dpars}{p}{n}") ncatm1 <- glue("ncat{p}-1") if (any(reqn)) { # some of the sigmas are predicted str_add(out$model_def) <- glue( " // sigma parameter matrix\n", " array[N{resp}] vector[{ncatm1}] sigma{p};\n" ) str_add(out$model_comp_catjoin) <- glue( " for (n in 1:N{resp}) {{\n", " sigma{p}[n] = {stan_vector(sigma_dpars)};\n", " }}\n" ) } else { # none of the sigmas is predicted str_add(out$model_def) <- glue( " // sigma parameter vector\n", " vector[{ncatm1}] sigma{p} = {stan_vector(sigma_dpars)};\n" ) } # handle the latent correlation matrix 'lncor' str_add(out$tdata_def) <- glue( " // number of logistic normal correlations\n", " int nlncor{p} = choose({ncatm1}, 2);\n" ) str_add_list(out) <- stan_prior( prior, "Llncor", suffix = p, px = px, type = glue("cholesky_factor_corr[{ncatm1}]"), header_type = "matrix", comment = "logistic-normal Cholesky correlation matrix", normalize = normalize ) str_add(out$gen_def) <- glue( " // logistic normal correlations\n", " corr_matrix[{ncatm1}] Lncor", " = multiply_lower_tri_self_transpose(Llncor);\n", " vector[nlncor] lncor;\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp("lncor", ncatm1) } out } # Stan code for sigma to incorporate addition argument 'se' stan_sigma_transform <- function(bterms, id = "", threads = NULL) { if (nzchar(id)) { # find the right family in mixture models family <- family_names(bterms)[as.integer(id)] } else { family <- bterms$family$family stopifnot(!isTRUE(family == "mixture")) } p <- usc(combine_prefix(bterms)) ns <- str_if(glue("sigma{id}") %in% names(bterms$dpars), "[n]") has_sigma <- has_sigma(family) && !no_sigma(bterms) sigma <- str_if(has_sigma, glue("sigma{id}{p}{ns}")) if (is.formula(bterms$adforms$se)) { nse <- stan_nn(threads) sigma <- str_if(nzchar(sigma), glue("sqrt(square({sigma}) + se2{p}{nse})"), glue("se{p}{nse}") ) } sigma } brms/R/priors.R0000644000176200001440000024244714471053102013071 0ustar liggesusers#' Prior Definitions for \pkg{brms} Models #' #' Define priors for specific parameters or classes of parameters. #' #' @aliases brmsprior brmsprior-class #' #' @param prior A character string defining a distribution in \pkg{Stan} language #' @param class The parameter class. Defaults to \code{"b"} #' (i.e. population-level effects). #' See 'Details' for other valid parameter classes. #' @param coef Name of the coefficient within the parameter class. #' @param group Grouping factor for group-level parameters. #' @param resp Name of the response variable. #' Only used in multivariate models. #' @param dpar Name of a distributional parameter. #' Only used in distributional models. #' @param nlpar Name of a non-linear parameter. #' Only used in non-linear models. #' @param lb Lower bound for parameter restriction. Currently only allowed #' for classes \code{"b"}. Defaults to \code{NULL}, that is no restriction. #' @param ub Upper bound for parameter restriction. Currently only allowed #' for classes \code{"b"}. Defaults to \code{NULL}, that is no restriction. #' @param check Logical; Indicates whether priors #' should be checked for validity (as far as possible). #' Defaults to \code{TRUE}. If \code{FALSE}, \code{prior} is passed #' to the Stan code as is, and all other arguments are ignored. #' @param ... Arguments passed to \code{set_prior}. #' #' @return An object of class \code{brmsprior} to be used in the \code{prior} #' argument of \code{\link{brm}}. #' #' @details #' \code{set_prior} is used to define prior distributions for parameters #' in \pkg{brms} models. The functions \code{prior}, \code{prior_}, and #' \code{prior_string} are aliases of \code{set_prior} each allowing #' for a different kind of argument specification. #' \code{prior} allows specifying arguments as expression without #' quotation marks using non-standard evaluation. #' \code{prior_} allows specifying arguments as one-sided formulas #' or wrapped in \code{quote}. #' \code{prior_string} allows specifying arguments as strings just #' as \code{set_prior} itself. #' #' Below, we explain its usage and list some common #' prior distributions for parameters. #' A complete overview on possible prior distributions is given #' in the Stan Reference Manual available at \url{https://mc-stan.org/}. #' #' To combine multiple priors, use \code{c(...)} or the \code{+} operator #' (see 'Examples'). \pkg{brms} does not check if the priors are written #' in correct \pkg{Stan} language. Instead, \pkg{Stan} will check their #' syntactical correctness when the model is parsed to \code{C++} and #' returns an error if they are not. #' This, however, does not imply that priors are always meaningful if they are #' accepted by \pkg{Stan}. Although \pkg{brms} trys to find common problems #' (e.g., setting bounded priors on unbounded parameters), there is no guarantee #' that the defined priors are reasonable for the model. #' Below, we list the types of parameters in \pkg{brms} models, #' for which the user can specify prior distributions. #' #' Below, we provide details for the individual parameter classes that you can #' set priors on. Often, it may not be immediately clear, which parameters are #' present in the model. To get a full list of parameters and parameter #' classes for which priors can be specified (depending on the model) use #' function \code{\link{get_prior}}. #' #' 1. Population-level ('fixed') effects #' #' Every Population-level effect has its own regression parameter # These parameters are internally named as \code{b_}, where \code{} #' represents the name of the corresponding population-level effect. #' Suppose, for instance, that \code{y} is predicted by \code{x1} and \code{x2} #' (i.e., \code{y ~ x1 + x2} in formula syntax). #' Then, \code{x1} and \code{x2} have regression parameters #' \code{b_x1} and \code{b_x2} respectively. #' The default prior for population-level effects (including monotonic and #' category specific effects) is an improper flat prior over the reals. #' Other common options are normal priors or student-t priors. #' If we want to have a normal prior with mean 0 and #' standard deviation 5 for \code{x1}, and a unit student-t prior with 10 #' degrees of freedom for \code{x2}, we can specify this via #' \code{set_prior("normal(0,5)", class = "b", coef = "x1")} and \cr #' \code{set_prior("student_t(10, 0, 1)", class = "b", coef = "x2")}. #' To put the same prior on all population-level effects at once, #' we may write as a shortcut \code{set_prior("", class = "b")}. #' This also leads to faster sampling, because priors can be vectorized in this case. #' Both ways of defining priors can be combined using for instance #' \code{set_prior("normal(0, 2)", class = "b")} and \cr #' \code{set_prior("normal(0, 10)", class = "b", coef = "x1")} #' at the same time. This will set a \code{normal(0, 10)} prior on #' the effect of \code{x1} and a \code{normal(0, 2)} prior #' on all other population-level effects. #' However, this will break vectorization and #' may slow down the sampling procedure a bit. #' #' In case of the default intercept parameterization #' (discussed in the 'Details' section of \code{\link{brmsformula}}), #' general priors on class \code{"b"} will \emph{not} affect #' the intercept. Instead, the intercept has its own parameter class #' named \code{"Intercept"} and priors can thus be #' specified via \code{set_prior("", class = "Intercept")}. #' Setting a prior on the intercept will not break vectorization #' of the other population-level effects. #' Note that technically, this prior is set on an intercept that #' results when internally centering all population-level predictors #' around zero to improve sampling efficiency. On this centered #' intercept, specifying a prior is actually much easier and #' intuitive than on the original intercept, since the former #' represents the expected response value when all predictors #' are at their means. To treat the intercept as an ordinary #' population-level effect and avoid the centering parameterization, #' use \code{0 + Intercept} on the right-hand side of the model formula. #' #' In non-linear models, population-level effects are defined separately #' for each non-linear parameter. Accordingly, it is necessary to specify #' the non-linear parameter in \code{set_prior} so that priors #' we can be assigned correctly. #' If, for instance, \code{alpha} is the parameter and \code{x} the predictor #' for which we want to define the prior, we can write #' \code{set_prior("", coef = "x", nlpar = "alpha")}. #' As a shortcut we can use \code{set_prior("", nlpar = "alpha")} #' to set the same prior on all population-level effects of \code{alpha} at once. #' #' The same goes for specifying priors for specific distributional #' parameters in the context of distributional regression, for example, #' \code{set_prior("", coef = "x", dpar = "sigma")}. #' For most other parameter classes (see below), you need to indicate #' non-linear and distributional parameters in the same way as shown here. #' #' If desired, population-level effects can be restricted to fall only #' within a certain interval using the \code{lb} and \code{ub} arguments #' of \code{set_prior}. This is often required when defining priors #' that are not defined everywhere on the real line, such as uniform #' or gamma priors. When defining a \code{uniform(2,4)} prior, #' you should write \code{set_prior("uniform(2,4)", lb = 2, ub = 4)}. #' When using a prior that is defined on the positive reals only #' (such as a gamma prior) set \code{lb = 0}. #' In most situations, it is not useful to restrict population-level #' parameters through bounded priors #' (non-linear models are an important exception), #' but if you really want to this is the way to go. #' #' 2. Group-level ('random') effects #' #' Each group-level effect of each grouping factor has a standard deviation named #' \code{sd__}. Consider, for instance, the formula #' \code{y ~ x1 + x2 + (1 + x1 | g)}. #' We see that the intercept as well as \code{x1} are group-level effects #' nested in the grouping factor \code{g}. #' The corresponding standard deviation parameters are named as #' \code{sd_g_Intercept} and \code{sd_g_x1} respectively. #' These parameters are restricted to be non-negative and, by default, #' have a half student-t prior with 3 degrees of freedom and a #' scale parameter that depends on the standard deviation of the response #' after applying the link function. Minimally, the scale parameter is 2.5. #' This prior is used (a) to be only weakly informative in order to influence #' results as few as possible, while (b) providing at least some regularization #' to considerably improve convergence and sampling efficiency. #' To define a prior distribution only for standard deviations #' of a specific grouping factor, #' use \cr \code{set_prior("", class = "sd", group = "")}. #' To define a prior distribution only for a specific standard deviation #' of a specific grouping factor, you may write \cr #' \code{set_prior("", class = "sd", group = "", coef = "")}. #' #' If there is more than one group-level effect per grouping factor, #' the correlations between those effects have to be estimated. #' The prior \code{lkj_corr_cholesky(eta)} or in short #' \code{lkj(eta)} with \code{eta > 0} #' is essentially the only prior for (Cholesky factors) of correlation matrices. #' If \code{eta = 1} (the default) all correlations matrices #' are equally likely a priori. If \code{eta > 1}, extreme correlations #' become less likely, whereas \code{0 < eta < 1} results in #' higher probabilities for extreme correlations. #' Correlation matrix parameters in \code{brms} models are named as #' \code{cor_}, (e.g., \code{cor_g} if \code{g} is the grouping factor). #' To set the same prior on every correlation matrix, #' use for instance \code{set_prior("lkj(2)", class = "cor")}. #' Internally, the priors are transformed to be put on the Cholesky factors #' of the correlation matrices to improve efficiency and numerical stability. #' The corresponding parameter class of the Cholesky factors is \code{L}, #' but it is not recommended to specify priors for this parameter class directly. #' #' 4. Smoothing Splines #' #' Smoothing splines are implemented in \pkg{brms} using the 'random effects' #' formulation as explained in \code{\link[mgcv:gamm]{gamm}}). Thus, each #' spline has its corresponding standard deviations modeling the variability #' within this term. In \pkg{brms}, this parameter class is called \code{sds} #' and priors can be specified via #' \code{set_prior("", class = "sds", coef = "")}. #' The default prior is the same as for standard deviations of group-level effects. #' #' 5. Gaussian processes #' #' Gaussian processes as currently implemented in \pkg{brms} have two #' parameters, the standard deviation parameter \code{sdgp}, and #' characteristic length-scale parameter \code{lscale} (see \code{\link{gp}} #' for more details). The default prior of \code{sdgp} is the same as for #' standard deviations of group-level effects. The default prior of #' \code{lscale} is an informative inverse-gamma prior specifically tuned to #' the covariates of the Gaussian process (for more details see #' \url{https://betanalpha.github.io/assets/case_studies/gp_part3/part3.html}). #' This tuned prior may be overly informative in some cases, so please #' consider other priors as well to make sure inference is robust to the prior #' specification. If tuning fails, a half-normal prior is used instead. #' #' 6. Autocorrelation parameters #' #' The autocorrelation parameters currently implemented are named \code{ar} #' (autoregression), \code{ma} (moving average), \code{sderr} (standard #' deviation of latent residuals in latent ARMA models), \code{cosy} (compound #' symmetry correlation), \code{car} (spatial conditional autoregression), as #' well as \code{lagsar} and \code{errorsar} (spatial simultaneous #' autoregression). #' #' Priors can be defined by \code{set_prior("", class = "ar")} for #' \code{ar} and similar for other autocorrelation parameters. By default, #' \code{ar} and \code{ma} are bounded between \code{-1} and \code{1}; #' \code{cosy}, \code{car}, \code{lagsar}, and \code{errorsar} are bounded #' between \code{0} and \code{1}. The default priors are flat over the #' respective definition areas. #' #' 7. Parameters of measurement error terms #' #' Latent variables induced via measurement error \code{\link{me}} terms #' require both mean and standard deviation parameters, whose prior classes #' are named \code{"meanme"} and \code{"sdme"}, respectively. If multiple #' latent variables are induced this way, their correlation matrix will #' be modeled as well and corresponding priors can be specified via the #' \code{"corme"} class. All of the above parameters have flat priors over #' their respective definition spaces by default. #' #' 8. Distance parameters of monotonic effects #' #' As explained in the details section of \code{\link{brm}}, #' monotonic effects make use of a special parameter vector to #' estimate the 'normalized distances' between consecutive predictor #' categories. This is realized in \pkg{Stan} using the \code{simplex} #' parameter type. This class is named \code{"simo"} (short for #' simplex monotonic) in \pkg{brms}. #' The only valid prior for simplex parameters is the #' dirichlet prior, which accepts a vector of length \code{K - 1} #' (K = number of predictor categories) as input defining the #' 'concentration' of the distribution. Explaining the dirichlet prior #' is beyond the scope of this documentation, but we want to describe #' how to define this prior syntactically correct. #' If a predictor \code{x} with \code{K} categories is modeled as monotonic, #' we can define a prior on its corresponding simplex via \cr #' \code{prior(dirichlet(), class = simo, coef = mox1)}. #' The \code{1} in the end of \code{coef} indicates that this is the first #' simplex in this term. If interactions between multiple monotonic #' variables are modeled, multiple simplexes per term are required. #' For \code{}, we can put in any \code{R} expression #' defining a vector of length \code{K - 1}. The default is a uniform #' prior (i.e. \code{ = rep(1, K-1)}) over all simplexes #' of the respective dimension. #' #' 9. Parameters for specific families #' #' Some families need additional parameters to be estimated. #' Families \code{gaussian}, \code{student}, \code{skew_normal}, #' \code{lognormal}, and \code{gen_extreme_value} need the parameter #' \code{sigma} to account for the residual standard deviation. #' By default, \code{sigma} has a half student-t prior that scales #' in the same way as the group-level standard deviations. #' Further, family \code{student} needs the parameter #' \code{nu} representing the degrees of freedom of students-t distribution. #' By default, \code{nu} has prior \code{gamma(2, 0.1)} #' and a fixed lower bound of \code{1}. #' Families \code{gamma}, \code{weibull}, \code{inverse.gaussian}, and #' \code{negbinomial} need a \code{shape} parameter that has a #' \code{gamma(0.01, 0.01)} prior by default. #' For families \code{cumulative}, \code{cratio}, \code{sratio}, #' and \code{acat}, and only if \code{threshold = "equidistant"}, #' the parameter \code{delta} is used to model the distance between #' two adjacent thresholds. #' By default, \code{delta} has an improper flat prior over the reals. #' The \code{von_mises} family needs the parameter \code{kappa}, representing #' the concentration parameter. By default, \code{kappa} has prior #' \code{gamma(2, 0.01)}. #' #' Every family specific parameter has its own prior class, so that #' \code{set_prior("", class = "")} is the right way to go. #' All of these priors are chosen to be weakly informative, #' having only minimal influence on the estimations, #' while improving convergence and sampling efficiency. #' #' 10. Shrinkage priors #' #' To reduce the danger of overfitting in models with many predictor terms fit #' on comparably sparse data, brms supports special shrinkage priors, namely #' the (regularized) \code{\link{horseshoe}} and the \code{\link{R2D2}} prior. #' These priors can be applied on many parameter classes, either directly on #' the coefficient classes (e.g., class \code{b}), if directly setting priors #' on them is supported, or on the corresponding standard deviation #' hyperparameters (e.g., class \code{sd}) otherwise. Currently, the following #' classes support shrinkage priors: \code{b} (overall regression #' coefficients), \code{sds} (SDs of smoothing splines), \code{sdgp} (SDs of #' Gaussian processes), \code{ar} (autoregressive coefficients), \code{ma} #' (moving average coefficients), \code{sderr} (SD of latent residuals), #' \code{sdcar} (SD of spatial CAR structures), \code{sd} (SD of varying #' coefficients). #' #' 11. Fixing parameters to constants #' #' Fixing parameters to constants is possible by using the \code{constant} #' function, for example, \code{constant(1)} to fix a parameter to 1. #' Broadcasting to vectors and matrices is done automatically. #' #' @seealso \code{\link{get_prior}} #' #' @examples #' ## use alias functions #' (prior1 <- prior(cauchy(0, 1), class = sd)) #' (prior2 <- prior_(~cauchy(0, 1), class = ~sd)) #' (prior3 <- prior_string("cauchy(0, 1)", class = "sd")) #' identical(prior1, prior2) #' identical(prior1, prior3) #' #' # check which parameters can have priors #' get_prior(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = cumulative()) #' #' # define some priors #' bprior <- c(prior_string("normal(0,10)", class = "b"), #' prior(normal(1,2), class = b, coef = treat), #' prior_(~cauchy(0,2), class = ~sd, #' group = ~subject, coef = ~Intercept)) #' #' # verify that the priors indeed found their way into Stan's model code #' make_stancode(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = cumulative(), #' prior = bprior) #' #' # use the horseshoe prior to model sparsity in regression coefficients #' make_stancode(count ~ zAge + zBase * Trt, #' data = epilepsy, family = poisson(), #' prior = set_prior("horseshoe(3)")) #' #' # fix certain priors to constants #' bprior <- prior(constant(1), class = "b") + #' prior(constant(2), class = "b", coef = "zBase") + #' prior(constant(0.5), class = "sd") #' make_stancode(count ~ zAge + zBase + (1 | patient), #' data = epilepsy, prior = bprior) #' #' # pass priors to Stan without checking #' prior <- prior_string("target += normal_lpdf(b[1] | 0, 1)", check = FALSE) #' make_stancode(count ~ Trt, data = epilepsy, prior = prior) #' #' # define priors in a vectorized manner #' # useful in particular for categorical or multivariate models #' set_prior("normal(0, 2)", dpar = c("muX", "muY", "muZ")) #' #' @export set_prior <- function(prior, class = "b", coef = "", group = "", resp = "", dpar = "", nlpar = "", lb = NA, ub = NA, check = TRUE) { input <- nlist(prior, class, coef, group, resp, dpar, nlpar, lb, ub, check) input <- try(as.data.frame(input), silent = TRUE) if (is_try_error(input)) { stop2("Processing arguments of 'set_prior' has failed:\n", input) } out <- vector("list", nrow(input)) for (i in seq_along(out)) { out[[i]] <- do_call(.set_prior, input[i, ]) } Reduce("+", out) } # validate arguments passed to 'set_prior' .set_prior <- function(prior, class, coef, group, resp, dpar, nlpar, lb, ub, check) { prior <- as_one_character(prior) class <- as_one_character(class) group <- as_one_character(group) coef <- as_one_character(coef) resp <- as_one_character(resp) dpar <- as_one_character(dpar) nlpar <- as_one_character(nlpar) check <- as_one_logical(check) lb <- as_one_character(lb, allow_na = TRUE) ub <- as_one_character(ub, allow_na = TRUE) if (dpar == "mu") { # distributional parameter 'mu' is currently implicit #1368 dpar <- "" } if (!check) { # prior will be added to the log-posterior as is class <- coef <- group <- resp <- dpar <- nlpar <- lb <- ub <- "" } source <- "user" out <- nlist(prior, source, class, coef, group, resp, dpar, nlpar, lb, ub) do_call(brmsprior, out) } #' @describeIn set_prior Alias of \code{set_prior} allowing to #' specify arguments as expressions without quotation marks. #' @export prior <- function(prior, ...) { call <- as.list(match.call()[-1]) seval <- rmNULL(call[prior_seval_args()]) call[prior_seval_args()] <- NULL call <- lapply(call, deparse_no_string) do_call(set_prior, c(call, seval)) } #' @describeIn set_prior Alias of \code{set_prior} allowing to specify #' arguments as as one-sided formulas or wrapped in \code{quote}. #' @export prior_ <- function(prior, ...) { call <- nlist(prior, ...) seval <- rmNULL(call[prior_seval_args()]) call[prior_seval_args()] <- NULL as_string <- function(x) { if (is.formula(x) && length(x) == 2) { deparse_no_string(x[[2]]) } else if (is.call(x) || is.name(x) || is.atomic(x)) { deparse_no_string(x) } else { stop2("Arguments must be one-sided formula, call, name, or constant.") } } call <- lapply(call, as_string) do_call(set_prior, c(call, seval)) } # arguments for which to use standard evaluation prior_seval_args <- function() { c("check") } #' @describeIn set_prior Alias of \code{set_prior} allowing to #' specify arguments as strings. #' @export prior_string <- function(prior, ...) { set_prior(prior, ...) } #' Overview on Priors for \pkg{brms} Models #' #' Get information on all parameters (and parameter classes) for which priors #' may be specified including default priors. #' #' @inheritParams brm #' @param ... Other arguments for internal usage only. #' #' @return A data.frame with columns \code{prior}, \code{class}, \code{coef}, #' and \code{group} and several rows, each providing information on a #' parameter (or parameter class) on which priors can be specified. The prior #' column is empty except for internal default priors. #' #' @seealso \code{\link{set_prior}} #' #' @examples #' ## get all parameters and parameters classes to define priors on #' (prior <- get_prior(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), #' data = epilepsy, family = poisson())) #' #' ## define a prior on all population-level effects a once #' prior$prior[1] <- "normal(0,10)" #' #' ## define a specific prior on the population-level effect of Trt #' prior$prior[5] <- "student_t(10, 0, 5)" #' #' ## verify that the priors indeed found their way into Stan's model code #' make_stancode(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), #' data = epilepsy, family = poisson(), #' prior = prior) #' #' @export get_prior <- function(formula, data, family = gaussian(), autocor = NULL, data2 = NULL, knots = NULL, drop_unused_levels = TRUE, sparse = NULL, ...) { if (is.brmsfit(formula)) { stop2("Use 'prior_summary' to extract priors from 'brmsfit' objects.") } formula <- validate_formula( formula, data = data, family = family, autocor = autocor, sparse = sparse ) bterms <- brmsterms(formula) data2 <- validate_data2( data2, bterms = bterms, get_data2_autocor(formula) ) data <- validate_data( data, bterms = bterms, data2 = data2, knots = knots, drop_unused_levels = drop_unused_levels ) .get_prior(bterms, data, ...) } # internal work function of 'get_prior' # @param internal return priors for internal use? # @return a brmsprior object .get_prior <- function(bterms, data, internal = FALSE, ...) { ranef <- tidy_ranef(bterms, data) meef <- tidy_meef(bterms, data) # initialize output prior <- empty_prior() # priors for distributional parameters prior <- prior + prior_predictor( bterms, data = data, internal = internal ) # priors of group-level parameters def_scale_prior <- def_scale_prior(bterms, data) prior <- prior + prior_re( ranef, def_scale_prior = def_scale_prior, internal = internal ) # priors for noise-free variables prior <- prior + prior_Xme(meef, internal = internal) # explicitly label default priors as such prior$source <- "default" # apply 'unique' as the same prior may have been included multiple times to_order <- with(prior, order(resp, dpar, nlpar, class, group, coef)) prior <- unique(prior[to_order, , drop = FALSE]) rownames(prior) <- NULL class(prior) <- c("brmsprior", "data.frame") prior } # generate priors for predictor terms # @return a 'brmsprior' object prior_predictor <- function(x, ...) { UseMethod("prior_predictor") } #' @export prior_predictor.default <- function(x, ...) { empty_prior() } prior_predictor.mvbrmsterms <- function(x, internal = FALSE, ...) { prior <- empty_prior() for (i in seq_along(x$terms)) { prior <- prior + prior_predictor(x$terms[[i]], internal = internal, ...) } for (cl in c("b", "Intercept")) { # deprecated; see warning in 'validate_special_prior' if (any(with(prior, class == cl & coef == ""))) { prior <- prior + brmsprior(class = cl) } } if (x$rescor) { if (internal) { prior <- prior + brmsprior(class = "Lrescor", prior = "lkj_corr_cholesky(1)") } else { prior <- prior + brmsprior(class = "rescor", prior = "lkj(1)") } if (family_names(x)[1] %in% "student") { prior <- prior + brmsprior(class = "nu", prior = "gamma(2, 0.1)", lb = "1") } } prior } prior_predictor.brmsterms <- function(x, data, internal = FALSE, ...) { data <- subset_data(data, x) def_scale_prior <- def_scale_prior(x, data) valid_dpars <- valid_dpars(x) prior <- empty_prior() # priors for mixture models if (is.mixfamily(x$family)) { if (has_joint_theta(x)) { # individual theta parameters should not have a prior in this case theta_dpars <- str_subset(valid_dpars, "^theta[[:digit:]]+") valid_dpars <- setdiff(valid_dpars, theta_dpars) prior <- prior + brmsprior(prior = "dirichlet(1)", class = "theta", resp = x$resp) } if (fix_intercepts(x)) { # fixing thresholds across mixture components # requires a single set of priors at the top level stopifnot(is_ordinal(x)) prior <- prior + prior_thres(x, def_scale_prior = def_scale_prior) } } # priors for distributional parameters for (dp in valid_dpars) { def_dpar_prior <- def_dpar_prior(x, dp, data = data) if (!is.null(x$dpars[[dp]])) { # parameter is predicted dp_prior <- prior_predictor( x$dpars[[dp]], data = data, def_scale_prior = def_scale_prior, def_dpar_prior = def_dpar_prior, internal = internal ) } else if (!is.null(x$fdpars[[dp]])) { # parameter is fixed dp_prior <- empty_prior() } else { # parameter is estimated dp_bound <- dpar_bounds(dp, suffix = x$resp, family = x$family) dp_prior <- brmsprior( def_dpar_prior, class = dp, resp = x$resp, lb = dp_bound$lb, ub = dp_bound$ub ) } prior <- prior + dp_prior } # priors for non-linear parameters for (nlp in names(x$nlpars)) { nlp_prior <- prior_predictor( x$nlpars[[nlp]], data = data, def_scale_prior = def_scale_prior, internal = internal ) prior <- prior + nlp_prior } if (is_logistic_normal(x$family)) { if (internal) { prior <- prior + brmsprior("lkj_corr_cholesky(1)", class = "Llncor", resp = x$resp) } else { prior <- prior + brmsprior("lkj(1)", class = "lncor", resp = x$resp) } } prior } # prior for linear predictor termss #' @export prior_predictor.btl <- function(x, ...) { prior_fe(x, ...) + prior_thres(x, ...) + prior_sp(x, ...) + prior_cs(x, ...) + prior_sm(x, ...) + prior_gp(x, ...) + prior_ac(x, ...) + prior_bhaz(x, ...) } # priors for non-linear predictor terms #' @export prior_predictor.btnl <- function(x, ...) { # thresholds are required even in non-linear ordinal models prior_thres(x, ...) + prior_ac(x, ...) + prior_bhaz(x, ...) } # priors for population-level parameters prior_fe <- function(bterms, data, def_dpar_prior = "", ...) { prior <- empty_prior() fixef <- colnames(data_fe(bterms, data)$X) px <- check_prefix(bterms) center_X <- stan_center_X(bterms) if (center_X && !is_ordinal(bterms)) { # priors for ordinal thresholds are provided in 'prior_thres' prior <- prior + brmsprior(def_dpar_prior, class = "Intercept", ls = px) fixef <- setdiff(fixef, "Intercept") } if (length(fixef)) { prior <- prior + brmsprior(class = "b", coef = c("", fixef), ls = px) } prior } # priors for thresholds of ordinal models prior_thres <- function(bterms, def_scale_prior = "", ...) { prior <- empty_prior() if (!is_ordinal(bterms)) { # thresholds only exist in ordinal models return(prior) } if (fix_intercepts(bterms) && !is.mixfamily(bterms$family)) { # fixed thresholds cannot have separate priors return(prior) } # create priors for threshold per group .prior_thres <- function(thres, thres_prior = "", group = "") { prior <- empty_prior() if (has_equidistant_thres(bterms)) { # prior for the delta parameter for equidistant thresholds thres <- character(0) lb <- str_if(has_ordered_thres(bterms), "0") prior <- prior + brmsprior( class = "delta", group = group, lb = lb, ls = px ) } prior <- prior + brmsprior( prior = c(thres_prior, rep("", length(thres))), class = "Intercept", coef = c("", thres), group = group, ls = px ) } px <- check_prefix(bterms) groups <- get_thres_groups(bterms) if (any(nzchar(groups))) { # for models with multiple threshold vectors prior <- prior + .prior_thres(character(0), def_scale_prior) for (g in groups) { prior <- prior + .prior_thres(get_thres(bterms, group = g), group = g) } } else { # for models with a single threshold vector prior <- prior + .prior_thres(get_thres(bterms), def_scale_prior) } prior } # priors for coefficients of baseline hazards in the Cox model prior_bhaz <- function(bterms, ...) { prior <- empty_prior() if (!is_cox(bterms$family)) { return(prior) } px <- check_prefix(bterms) # the scale of sbhaz is not identified when an intercept is part of mu # thus a sum-to-one constraint ensures identification prior <- prior + brmsprior("dirichlet(1)", class = "sbhaz", ls = px) prior } # priors for special effects parameters prior_sp <- function(bterms, data, ...) { prior <- empty_prior() spef <- tidy_spef(bterms, data) if (nrow(spef)) { px <- check_prefix(bterms) prior <- prior + brmsprior( class = "b", coef = c("", spef$coef), ls = px ) simo_coef <- get_simo_labels(spef, use_id = TRUE) if (length(simo_coef)) { prior <- prior + brmsprior( prior = "dirichlet(1)", class = "simo", coef = simo_coef, ls = px ) } } prior } # priors for category spcific effects parameters prior_cs <- function(bterms, data, ...) { prior <- empty_prior() csef <- colnames(get_model_matrix(bterms$cs, data = data)) if (length(csef)) { px <- check_prefix(bterms) prior <- prior + brmsprior(class = "b", coef = c("", csef), ls = px) } prior } # default priors for hyper-parameters of noise-free variables prior_Xme <- function(meef, internal = FALSE, ...) { stopifnot(is.meef_frame(meef)) prior <- empty_prior() if (nrow(meef)) { prior <- prior + brmsprior(class = "meanme") + brmsprior(class = "meanme", coef = meef$coef) + brmsprior(class = "sdme", lb = "0") + brmsprior(class = "sdme", coef = meef$coef) # priors for correlation parameters groups <- unique(meef$grname) for (i in seq_along(groups)) { g <- groups[i] K <- which(meef$grname %in% g) if (meef$cor[K[1]] && length(K) > 1L) { if (internal) { prior <- prior + brmsprior("lkj_corr_cholesky(1)", class = "Lme") if (nzchar(g)) { prior <- prior + brmsprior(class = "Lme", group = g) } } else { prior <- prior + brmsprior("lkj(1)", class = "corme") if (nzchar(g)) { prior <- prior + brmsprior(class = "corme", group = g) } } } } } prior } # default priors of gaussian processes # @param def_scale_prior: a character string defining # the default prior SD parameters prior_gp <- function(bterms, data, def_scale_prior, ...) { prior <- empty_prior() gpef <- tidy_gpef(bterms, data) if (nrow(gpef)) { px <- check_prefix(bterms) lscale_prior <- def_lscale_prior(bterms, data) prior <- prior + brmsprior(class = "sdgp", prior = def_scale_prior, ls = px, lb = "0") + brmsprior(class = "sdgp", coef = unlist(gpef$sfx1), ls = px) + brmsprior(class = "lscale", ls = px, lb = "0") + brmsprior(class = "lscale", prior = lscale_prior, coef = names(lscale_prior), ls = px) } prior } # default priors for length-scale parameters of GPs # see https://betanalpha.github.io/assets/case_studies/gp_part3/part3.html # @param plb prior probability of being lower than minimum length-scale # @param pub prior probability of being higher than maximum length-scale def_lscale_prior <- function(bterms, data, plb = 0.01, pub = 0.01) { .opt_fun <- function(x, lb, ub) { # optimize parameters on the log-scale to make them positive only x <- exp(x) y1 <- pinvgamma(lb, x[1], x[2], log.p = TRUE) y2 <- pinvgamma(ub, x[1], x[2], lower.tail = FALSE, log.p = TRUE) c(y1 - log(plb), y2 - log(pub)) } .def_lscale_prior <- function(X) { dq <- diff_quad(X) ub <- sqrt(max(dq)) lb <- sqrt(min(dq[dq > 0])) # prevent extreme priors lb <- max(lb, 0.01 * ub) opt_res <- nleqslv::nleqslv( c(0, 0), .opt_fun, lb = lb, ub = ub, control = list(allowSingular = TRUE) ) prior <- "normal(0, 0.5)" if (opt_res$termcd %in% 1:2) { # use the inverse-gamma prior only in case of convergence pars <- exp(opt_res$x) prior <- paste0("inv_gamma(", sargs(round(pars, 6)), ")") } return(prior) } p <- usc(combine_prefix(bterms)) gpef <- tidy_gpef(bterms, data) data_gp <- data_gp(bterms, data, internal = TRUE) out <- vector("list", NROW(gpef)) for (i in seq_along(out)) { pi <- paste0(p, "_", i) iso <- gpef$iso[i] cons <- gpef$cons[[i]] if (length(cons) > 0L) { for (j in seq_along(cons)) { Xgp <- data_gp[[paste0("Xgp_prior", pi, "_", j)]] if (iso) { c(out[[i]]) <- .def_lscale_prior(Xgp) } else { c(out[[i]]) <- apply(Xgp, 2, .def_lscale_prior) } } } else { Xgp <- data_gp[[paste0("Xgp_prior", pi)]] if (iso) { out[[i]] <- .def_lscale_prior(Xgp) } else { out[[i]] <- apply(Xgp, 2, .def_lscale_prior) } } # transpose so that by-levels vary last names(out[[i]]) <- as.vector(t(gpef$sfx2[[i]])) } unlist(out) } # priors for varying effects parameters # @param ranef: a list returned by tidy_ranef # @param def_scale_prior a character string defining # the default prior for SD parameters # @param internal: see 'get_prior' prior_re <- function(ranef, def_scale_prior, internal = FALSE, ...) { prior <- empty_prior() if (!nrow(ranef)) { return(prior) } # global sd class px <- check_prefix(ranef) upx <- unique(px) if (length(def_scale_prior) > 1L) { def_scale_prior <- def_scale_prior[px$resp] } global_sd_prior <- brmsprior( class = "sd", prior = def_scale_prior, lb = "0", ls = px ) prior <- prior + global_sd_prior for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) group <- r$group[1] rpx <- check_prefix(r) urpx <- unique(rpx) # include group-level standard deviations prior <- prior + # don't specify lb as we already have it above brmsprior(class = "sd", group = group, ls = urpx) + brmsprior(class = "sd", coef = r$coef, group = group, ls = rpx) # detect duplicated group-level effects J <- with(prior, class == "sd" & nzchar(coef)) dupli <- duplicated(prior[J, ]) if (any(dupli)) { stop2("Duplicated group-level effects detected for group ", group) } # include correlation parameters if (isTRUE(r$cor[1]) && nrow(r) > 1L) { if (internal) { prior <- prior + brmsprior( class = "L", group = c("", group), prior = c("lkj_corr_cholesky(1)", "") ) } else { prior <- prior + brmsprior( class = "cor", group = c("", group), prior = c("lkj(1)", "") ) } } } tranef <- get_dist_groups(ranef, "student") if (isTRUE(nrow(tranef) > 0L)) { prior <- prior + brmsprior("gamma(2, 0.1)", class = "df", group = tranef$group, lb = "1") } prior } # priors for smooth terms prior_sm <- function(bterms, data, def_scale_prior, ...) { prior <- empty_prior() smef <- tidy_smef(bterms, data) if (NROW(smef)) { px <- check_prefix(bterms) # prior for the FE coefficients Xs_names <- attr(smef, "Xs_names") if (length(Xs_names)) { prior <- prior + brmsprior( class = "b", coef = c("", Xs_names), ls = px ) } # prior for SD parameters of the RE coefficients smterms <- unique(smef$term) prior <- prior + brmsprior(prior = def_scale_prior, class = "sds", lb = "0", ls = px) + brmsprior(class = "sds", coef = smterms, ls = px) } prior } # priors for autocor parameters prior_ac <- function(bterms, def_scale_prior, internal = FALSE, ...) { prior <- empty_prior() acef <- tidy_acef(bterms) if (!NROW(acef)) { return(prior) } px <- check_prefix(bterms) p <- combine_prefix(px) has_ac_latent_residuals <- has_ac_latent_residuals(bterms) if (has_ac_class(acef, "arma")) { acef_arma <- subset2(acef, class = "arma") # no boundaries are required in the conditional formulation # when natural residuals automatically define the scale need_arma_bound <- acef_arma$cov || has_ac_latent_residuals arma_lb <- str_if(need_arma_bound, "-1") arma_ub <- str_if(need_arma_bound, "1") if (acef_arma$p > 0) { prior <- prior + brmsprior(class = "ar", ls = px, lb = arma_lb, ub = arma_ub) } if (acef_arma$q > 0) { prior <- prior + brmsprior(class = "ma", ls = px, lb = arma_lb, ub = arma_ub) } } if (has_ac_class(acef, "cosy")) { # cosy correlations may be negative in theory but # this causes problems with divergent transitions (#878) prior <- prior + brmsprior(class = "cosy", ls = px, lb = "0", ub = "1") } if (has_ac_class(acef, "unstr")) { if (internal) { prior <- prior + brmsprior("lkj_corr_cholesky(1)", class = "Lcortime", ls = px) } else { prior <- prior + brmsprior("lkj(1)", class = "cortime", ls = px) } } if (has_ac_latent_residuals(bterms)) { prior <- prior + brmsprior(def_scale_prior, class = "sderr", ls = px, lb = "0") } if (has_ac_class(acef, "sar")) { acef_sar <- subset2(acef, class = "sar") sar_lb <- glue("min_eigenMsar{p}") sar_ub <- glue("max_eigenMsar{p}") if (acef_sar$type == "lag") { prior <- prior + brmsprior(class = "lagsar", lb = sar_lb, ub = sar_ub, ls = px) } if (acef_sar$type == "error") { prior <- prior + brmsprior(class = "errorsar", lb = sar_lb, ub = sar_ub, ls = px) } } if (has_ac_class(acef, "car")) { acef_car <- subset2(acef, class = "car") prior <- prior + brmsprior(def_scale_prior, class = "sdcar", lb = "0", ls = px) if (acef_car$type %in% "escar") { prior <- prior + brmsprior(class = "car", lb = "0", ub = "1", ls = px) } else if (acef_car$type %in% "bym2") { prior <- prior + brmsprior("beta(1, 1)", class = "rhocar", lb = "0", ub = "1", ls = px) } } prior } # default priors for distributional parameters def_dpar_prior <- function(x, dpar, data = NULL) { stopifnot(is.brmsterms(x)) dpar <- as_one_character(dpar) resp <- usc(x$resp) dpar_class <- dpar_class(dpar, family = x) link <- x$dpars[[dpar]]$family$link if (is.null(link)) { link <- "identity" } # ensures reasonable scaling in def_scale_prior x$family$link <- link if (link == "identity") { # dpar is estimated or predicted on the linear scale out <- switch(dpar_class, "", mu = def_scale_prior(x, data, center = FALSE, dpar = dpar), sigma = def_scale_prior(x, data), shape = "gamma(0.01, 0.01)", nu = "gamma(2, 0.1)", phi = "gamma(0.01, 0.01)", kappa = "gamma(2, 0.01)", beta = "gamma(1, 0.1)", zi = "beta(1, 1)", hu = "beta(1, 1)", zoi = "beta(1, 1)", coi = "beta(1, 1)", bs = "gamma(1, 1)", ndt = glue("uniform(0, min_Y{resp})"), bias = "beta(1, 1)", quantile = "beta(1, 1)", xi = "normal(0, 2.5)", alpha = "normal(0, 4)", disc = "lognormal(0, 1)", theta = "logistic(0, 1)" ) } else { # except for 'mu' all parameters only support one link other than identity out <- switch(dpar_class, "", mu = def_scale_prior(x, data, center = FALSE, dpar = dpar), sigma = def_scale_prior(x, data), shape = "student_t(3, 0, 2.5)", nu = "normal(2.7, 0.8)", phi = "student_t(3, 0, 2.5)", kappa = "normal(5.0, 0.8)", beta = "normal(1.7, 1.3)", zi = "logistic(0, 1)", hu = "logistic(0, 1)", zoi = "logistic(0, 1)", coi = "logistic(0, 1)", bs = "normal(-0.6, 1.3)", bias = "logistic(0, 1)", quantile = "logistic(0, 1)", xi = "normal(0, 4)", alpha = "normal(0, 4)", disc = "normal(0, 1)" ) } out } # default priors for scale/SD parameters def_scale_prior <- function(x, data, ...) { UseMethod("def_scale_prior") } #' @export def_scale_prior.mvbrmsterms <- function(x, data, ...) { out <- ulapply(x$terms, def_scale_prior, data = data, ...) names(out) <- x$responses out } # @param center Should the prior be centered around zero? # If FALSE, the prior location is computed based on Y. #' @export def_scale_prior.brmsterms <- function(x, data, center = TRUE, df = 3, location = 0, scale = 2.5, dpar = NULL, ...) { y <- unname(model.response(model.frame(x$respform, data))) link <- x$family$link if (has_logscale(x$family)) { link <- "log" } tlinks <- c("identity", "log", "inverse", "sqrt", "1/mu^2") if (link %in% tlinks && !is_like_factor(y) && !conv_cats_dpars(x)) { if (link %in% c("log", "inverse", "1/mu^2")) { # avoid Inf in link(y) y <- ifelse(y == 0, y + 0.1, y) } y_link <- SW(link(y, link = link)) scale_y <- round(mad(y_link), 1) if (is.finite(scale_y)) { scale <- max(scale, scale_y) } if (!center) { location_y <- round(median(y_link), 1) if (is.finite(location_y)) { location <- location_y } # offsets may render default intercept priors not sensible dpar <- as_one_character(dpar) offset <- unname(unlist(data_offset(x$dpars[[dpar]], data))) if (length(offset)) { mean_offset <- mean(offset) if (is.finite(mean_offset)) { location <- location - mean_offset } } } } paste0("student_t(", sargs(df, location, scale), ")") } #' Validate Prior for \pkg{brms} Models #' #' Validate priors supplied by the user. Return a complete #' set of priors for the given model, including default priors. #' #' @inheritParams get_prior #' @inheritParams brm #' #' @return An object of class \code{brmsprior}. #' #' @seealso \code{\link{get_prior}}, \code{\link{set_prior}}. #' #' @examples #' prior1 <- prior(normal(0,10), class = b) + #' prior(cauchy(0,2), class = sd) #' validate_prior(prior1, count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson()) #' #' @export validate_prior <- function(prior, formula, data, family = gaussian(), sample_prior = "no", data2 = NULL, knots = NULL, drop_unused_levels = TRUE, ...) { formula <- validate_formula(formula, data = data, family = family) bterms <- brmsterms(formula) data2 <- validate_data2(data2, bterms = bterms) data <- validate_data( data, bterms = bterms, data2 = data2, knots = knots, drop_unused_levels = drop_unused_levels ) .validate_prior( prior, bterms = bterms, data = data, sample_prior = sample_prior, ... ) } # internal work function of 'validate_prior' .validate_prior <- function(prior, bterms, data, sample_prior, ...) { sample_prior <- validate_sample_prior(sample_prior) all_priors <- .get_prior(bterms, data, internal = TRUE) if (is.null(prior)) { prior <- all_priors } else if (!is.brmsprior(prior)) { stop2("Argument 'prior' must be a 'brmsprior' object.") } # when updating existing priors, invalid priors should be allowed allow_invalid_prior <- isTRUE(attr(prior, "allow_invalid_prior")) # temporarily exclude priors that should not be checked no_checks <- !nzchar(prior$class) prior_no_checks <- prior[no_checks, ] prior <- prior[!no_checks, ] # check for duplicated priors prior$class <- rename( prior$class, c("^cor$", "^rescor$", "^corme$", "^lncor$", "^cortime$"), c("L", "Lrescor", "Lme", "Llncor", "Lcortime"), fixed = FALSE ) if (any(duplicated(prior))) { stop2("Duplicated prior specifications are not allowed.") } # check for invalid priors # it is good to let the user know beforehand that some of their priors # were invalid in the model to avoid unnecessary refits if (nrow(prior)) { valid_ids <- which(duplicated(rbind(all_priors, prior))) invalid <- !seq_rows(prior) %in% (valid_ids - nrow(all_priors)) if (any(invalid) && !allow_invalid_prior) { stop2( "The following priors do not correspond ", "to any model parameter: \n", collapse(.print_prior(prior[invalid, ]), "\n"), "Function 'get_prior' might be helpful to you." ) } prior <- prior[!invalid, ] } prior$prior <- sub("^(lkj|lkj_corr)\\(", "lkj_corr_cholesky(", prior$prior) # include default parameter bounds; only new priors need bounds which_needs_lb <- which(is.na(prior$lb) & !nzchar(prior$coef)) for (i in which_needs_lb) { if (!is.na(prior$ub[i]) && nzchar(prior$ub[i])) { # if ub is specified lb should be specified in the same line as well prior$lb[i] <- stan_base_prior(all_priors, "lb", sel_prior = prior[i, ]) } else { # take the corresponding lb from the default prior prior_sub_i <- rbind(prior[i, ], all_priors) prior_sub_i <- prior_sub_i[duplicated(prior_sub_i), ] stopifnot(NROW(prior_sub_i) == 1L) prior$lb[i] <- prior_sub_i$lb } } which_needs_ub <- which(is.na(prior$ub) & !nzchar(prior$coef)) for (i in which_needs_ub) { if (!is.na(prior$lb[i]) && nzchar(prior$lb[i])) { # if lb is specified ub should be specified in the same line as well prior$ub[i] <- stan_base_prior(all_priors, "ub", sel_prior = prior[i, ]) } else { # take the corresponding lb from the default prior prior_sub_i <- rbind(prior[i, ], all_priors) prior_sub_i <- prior_sub_i[duplicated(prior_sub_i), ] stopifnot(NROW(prior_sub_i) == 1L) prior$ub[i] <- prior_sub_i$ub } } # the remaining NAs are in coef priors which cannot have bounds yet prior$lb[is.na(prior$lb)] <- prior$ub[is.na(prior$ub)] <- "" # boundaries on individual coefficients are not yet supported # TODO: enable bounds for coefficients as well? if (any((nzchar(prior$lb) | nzchar(prior$ub)) & nzchar(prior$coef))) { stop2("Prior argument 'coef' may not be specified when using boundaries.") } # merge user-specified priors with default priors prior$new <- rep(TRUE, nrow(prior)) all_priors$new <- rep(FALSE, nrow(all_priors)) prior <- c(all_priors, prior, replace = TRUE) check_prior_content(prior) prior <- validate_special_prior(prior, bterms = bterms, data = data, ...) prior <- prior[with(prior, order(class, group, resp, dpar, nlpar, coef)), ] # check and warn valid but unused priors for (i in which(nzchar(prior$prior) & !nzchar(prior$coef))) { ls <- prior[i, c("class", "group", "resp", "dpar", "nlpar")] class(ls) <- "data.frame" prior_sub_coef <- subset2(prior, ls = ls) prior_sub_coef <- prior_sub_coef[nzchar(prior_sub_coef$coef), ] if (nrow(prior_sub_coef) && all(nzchar(prior_sub_coef$prior))) { warning2( "The global prior '", prior$prior[i], "' of class '", prior$class[i], "' will not be used in the model as all related coefficients have ", "individual priors already. If you did not set those ", "priors yourself, then maybe brms has assigned default priors. ", "See ?set_prior and ?get_prior for more details." ) } } prior <- prior + prior_no_checks rownames(prior) <- NULL attr(prior, "sample_prior") <- sample_prior if (is_verbose()) { # show remaining default priors added to the model def_prior <- prepare_print_prior(prior) def_prior <- subset2(def_prior, source = "default") if (nrow(def_prior)) { message("The following priors were automatically added to the model:") print(def_prior, show_df = TRUE) } } prior } # try to check if prior distributions are reasonable # @param prior A brmsprior object check_prior_content <- function(prior) { if (!is.brmsprior(prior) || !NROW(prior)) { return(invisible(TRUE)) } lb_priors <- c( "lognormal", "chi_square", "inv_chi_square", "scaled_inv_chi_square", "exponential", "gamma", "inv_gamma", "weibull", "frechet", "rayleigh", "pareto", "pareto_type_2" ) lb_priors_regex <- paste0("^(", paste0(lb_priors, collapse = "|"), ")") ulb_priors <- c("beta", "uniform", "von_mises", "beta_proportion") ulb_priors_regex <- paste0("^(", paste0(ulb_priors, collapse = "|"), ")") cormat_pars <- c( "cor", "L", "rescor", "Lrescor", "corme", "Lme", "lncor", "Llncor", "cortime", "Lcortime" ) cormat_regex <- "^((lkj)|(constant))" simplex_pars <- c("simo", "theta", "sbhaz") simplex_regex <- "^((dirichlet)|(constant))\\(" lb_warning <- ub_warning <- "" for (i in seq_rows(prior)) { if (!nzchar(prior$prior[i]) || !prior$new[i]) { next } msg_prior <- .print_prior(prior[i, ]) has_lb_prior <- grepl(lb_priors_regex, prior$prior[i]) has_ulb_prior <- grepl(ulb_priors_regex, prior$prior[i]) base_bounds <- stan_base_prior(prior, c("lb", "ub"), sel_prior = prior[i, ]) has_lb <- nzchar(base_bounds[, "lb"]) has_ub <- nzchar(base_bounds[, "ub"]) if ((has_lb_prior || has_ulb_prior) && !has_lb) { lb_warning <- paste0(lb_warning, msg_prior, "\n") } if (has_ulb_prior && !has_ub) { ub_warning <- paste0(ub_warning, msg_prior, "\n") } if (prior$class[i] %in% cormat_pars && !grepl(cormat_regex, prior$prior[i])) { stop2( "The only supported prior for correlation matrices is ", "the 'lkj' prior. See help(set_prior) for more details." ) } if (prior$class[i] %in% simplex_pars && !grepl(simplex_regex, prior$prior[i])) { stop2( "Currently 'dirichlet' is the only valid prior for ", "simplex parameters. See help(set_prior) for more details." ) } } if (nzchar(lb_warning)) { warning2( "It appears as if you have specified a lower bounded ", "prior on a parameter that has no natural lower bound.", "\nIf this is really what you want, please specify ", "argument 'lb' of 'set_prior' appropriately.", "\nWarning occurred for prior \n", lb_warning ) } if (nzchar(ub_warning)) { warning2( "It appears as if you have specified an upper bounded ", "prior on a parameter that has no natural upper bound.", "\nIf this is really what you want, please specify ", "argument 'ub' of 'set_prior' appropriately.", "\nWarning occurred for prior \n", ub_warning ) } invisible(TRUE) } # prepare special priors for use in Stan # required for priors that are not natively supported by Stan validate_special_prior <- function(x, ...) { UseMethod("validate_special_prior") } #' @export validate_special_prior.default <- function(x, prior = empty_prior(), ...) { prior } #' @export validate_special_prior.brmsprior <- function(x, bterms, ...) { if (!NROW(x)) { return(x) } if (is.null(x$new)) { x$new <- TRUE } x$remove <- FALSE x <- validate_special_prior(bterms, prior = x, ...) x <- x[!x$remove, ] x$new <- x$remove <- NULL x } #' @export validate_special_prior.mvbrmsterms <- function(x, prior = NULL, ...) { for (i in seq_along(x$terms)) { prior <- validate_special_prior(x$terms[[i]], prior = prior, ...) } prior } #' @export validate_special_prior.brmsterms <- function(x, data, prior = NULL, ...) { data <- subset_data(data, x) if (is.null(prior)) { prior <- empty_prior() } simple_sigma <- simple_sigma(x) for (dp in names(x$dpars)) { allow_autoscale <- dp == "mu" && simple_sigma prior <- validate_special_prior( x$dpars[[dp]], prior = prior, data = data, allow_autoscale = allow_autoscale, ... ) } for (nlp in names(x$nlpars)) { prior <- validate_special_prior( x$nlpars[[nlp]], prior = prior, data = data, allow_autoscale = simple_sigma, ... ) } prior } #' @export validate_special_prior.btnl <- function(x, prior, ...) { prior } # prepare special priors that cannot be passed to Stan as is # @param allow_autoscale allow autoscaling by parameter sigma? # @return a possibly updated brmsprior object with additional attributes #' @export validate_special_prior.btl <- function(x, prior, data, allow_autoscale = TRUE, ...) { allow_autoscale <- as_one_logical(allow_autoscale) px <- check_prefix(x) # prepare special priors such as horseshoe special <- list() # the order of the classes doesn't matter but for consistency # it is still the same as the order in the Stan code special_classes <- c("b", "sds", "sdgp", "ar", "ma", "sderr", "sdcar", "sd") for (sc in special_classes) { index <- which(find_rows(prior, class = sc, coef = "", group = "", ls = px)) if (!length(index)) { next } stopifnot(length(index) <= 1L) sub_prior <- prior$prior[index] if (any(is_special_prior(sub_prior))) { # shrinkage priors have been specified if (sc %in% c("b", "ar", "ma")) { if (any(nzchar(prior[index, "lb"]) | nzchar(prior[index, "ub"]))) { stop2("Setting boundaries on coefficients is not ", "allowed when using special priors.") } # TODO: allow special priors also for 'cs' coefficients if (is.formula(x[["cs"]])) { stop2("Special priors are not yet allowed ", "in models with category-specific effects.") } } if (sc %in% c("sds", "sdgp", "sderr", "sdcar", "sd")) { if (any(prior[index, "lb"] != "0" | nzchar(prior[index, "ub"]))) { stop2("Setting custom boundaries on SD parameters is not ", "allowed when using special priors.") } } coef_indices <- which( find_rows(prior, class = sc, ls = px) & !find_rows(prior, class = sc, group = "", coef = "") ) if (any(nzchar(prior$prior[coef_indices]))) { stop2( "Defining separate priors for single coefficients or groups is not ", "allowed when using special priors for the whole class." ) } tmp <- attributes(eval2(sub_prior)) tmp$autoscale <- isTRUE(tmp$autoscale) && allow_autoscale special[[sc]] <- tmp } } special_names <- unique(ufrom_list(special, "name")) if (length(special_names) > 1L) { stop2("Currently only one special prior per formula is allowed.") } prefix <- combine_prefix(px, keep_mu = TRUE) attributes(prior)$special[[prefix]] <- special prior } # validate argument 'sample_prior' validate_sample_prior <- function(sample_prior) { options <- c("no", "yes", "only") if (is.null(sample_prior)) { sample_prior <- "no" } if (!is.character(sample_prior)) { sample_prior <- as_one_logical(sample_prior) sample_prior <- if (sample_prior) "yes" else "no" } match.arg(sample_prior, options) } # get stored 'sample_prior' argument get_sample_prior <- function(prior) { validate_sample_prior(attr(prior, "sample_prior", TRUE)) } # create data.frames containing prior information brmsprior <- function(prior = "", class = "", coef = "", group = "", resp = "", dpar = "", nlpar = "", lb = "", ub = "", source = "", ls = list()) { if (length(ls)) { if (is.null(names(ls))) { stop("Argument 'ls' must be named.") } names <- all_cols_prior() if (!all(names(ls) %in% names)) { stop("Names of 'ls' must some of ", collapse_comma(names)) } for (v in names(ls)) { assign(v, ls[[v]]) } } out <- data.frame( prior, class, coef, group, resp, dpar, nlpar, lb, ub, source, stringsAsFactors = FALSE ) class(out) <- c("brmsprior", "data.frame") out } #' @describeIn set_prior Create an empty \code{brmsprior} object. #' @export empty_prior <- function() { char0 <- character(0) brmsprior( prior = char0, source = char0, class = char0, coef = char0, group = char0, resp = char0, dpar = char0, nlpar = char0, lb = char0, ub = char0 ) } # natural upper and lower bounds for priors # @param a named list with elements 'lb' and 'ub' prior_bounds <- function(prior) { switch(prior, lognormal = list(lb = 0, ub = Inf), chi_square = list(lb = 0, ub = Inf), inv_chi_square = list(lb = 0, ub = Inf), scaled_inv_chi_square = list(lb = 0, ub = Inf), exponential = list(lb = 0, ub = Inf), gamma = list(lb = 0, ub = Inf), inv_gamma = list(lb = 0, ub = Inf), weibull = list(lb = 0, ub = Inf), frechet = list(lb = 0, ub = Inf), rayleigh = list(lb = 0, ub = Inf), pareto = list(lb = 0, ub = Inf), pareto_type_2 = list(lb = 0, ub = Inf), beta = list(lb = 0, ub = 1), von_mises = list(lb = -pi, ub = pi), list(lb = -Inf, ub = Inf) ) } # all columns of brmsprior objects all_cols_prior <- function() { c("prior", "class", "coef", "group", "resp", "dpar", "nlpar", "lb", "ub", "source") } # relevant columns for duplication checks in brmsprior objects rcols_prior <- function() { c("class", "coef", "group", "resp", "dpar", "nlpar") } # default Stan definitions for distributional parameters # @param dpar name of a distributional parameter # @param suffix optional suffix of the parameter name # @param family optional brmsfamily object # @return a named list with numeric elements 'lb' and 'ub' dpar_bounds <- function(dpar, suffix = "", family = NULL) { dpar <- as_one_character(dpar) suffix <- usc(as_one_character(suffix)) if (is.mixfamily(family)) { if (dpar_class(dpar) == "theta") { return(list(lb = -Inf, ub = Inf)) } family <- family$mix[[as.numeric(dpar_id(dpar))]] } dpar_class <- dpar_class(dpar, family) if (is.customfamily(family)) { lb <- family$lb[[dpar_class]] ub <- family$ub[[dpar_class]] return(nlist(lb, ub)) } min_Y <- glue("min_Y{suffix}") out <- switch(dpar_class, sigma = list(lb = "0", ub = ""), shape = list(lb = "0", ub = ""), nu = list(lb = "1", ub = ""), phi = list(lb = "0", ub = ""), kappa = list(lb = "0", ub = ""), beta = list(lb = "0", ub = ""), zi = list(lb = "0", ub = "1"), hu = list(lb = "0", ub = "1"), zoi = list(lb = "0", ub = "1"), coi = list(lb = "0", ub = "1"), bs = list(lb = "0", ub = ""), ndt = list(lb = "0", ub = min_Y), bias = list(lb = "0", ub = "1"), disc = list(lb = "0", ub = ""), quantile = list(lb = "0", ub = "1"), xi = list(lb = "", ub = ""), alpha = list(lb = "", ub = "") ) out } # convert parameter bounds to Stan syntax # vectorized over both 'lb' and 'ub' vectors # @param bounds a named list with elements 'lb' and 'ub' # @param default default output if no proper bounds are specified convert_bounds2stan <- function(bounds, default = "") { lb <- as.character(bounds$lb) ub <- as.character(bounds$ub) stopifnot(length(lb) == length(ub)) default <- as_one_character(default, allow_na = TRUE) if (any(lb %in% "Inf")) { stop2("Lower boundaries cannot be positive infinite.") } if (any(ub %in% "-Inf")) { stop2("Upper boundaries cannot be negative infinite.") } lb <- ifelse( !is.na(lb) & !lb %in% c("NA", "-Inf", ""), paste0("lower=", lb), "" ) ub <- ifelse( !is.na(ub) & !ub %in% c("NA", "Inf", ""), paste0("upper=", ub), "" ) out <- ifelse( nzchar(lb) & nzchar(ub), glue("<{lb},{ub}>"), ifelse( nzchar(lb) & !nzchar(ub), glue("<{lb}>"), ifelse( !nzchar(lb) & nzchar(ub), glue("<{ub}>"), default ) ) ) out } # convert parameter bounds in Stan syntax # TODO: vectorize over a character vector of bounds? # complicated because of a mix of character and numeric values # to a named list with elements 'lb' and 'ub' convert_stan2bounds <- function(bound, default = c(-Inf, Inf)) { bound <- as_one_character(bound) stopifnot(length(default) == 2L) out <- list(lb = default[[1]], ub = default[[2]]) if (!is.na(bound) && isTRUE(nzchar(bound))) { lb <- get_matches("(<|,)lower=[^,>]+", bound) if (isTRUE(nzchar(lb))) { lb <- substr(lb, 8, nchar(lb)) lb_num <- SW(as.numeric(lb)) if (!is.na(lb_num)) { lb <- lb_num } out$lb <- lb } ub <- get_matches("(<|,)upper=[^,>]+", bound) if (isTRUE(nzchar(ub))) { ub <- substr(ub, 8, nchar(ub)) ub_num <- SW(as.numeric(ub)) if (!is.na(ub_num)) { ub <- ub_num } out$ub <- ub } } out } #' Checks if argument is a \code{brmsprior} object #' #' @param x An \R object #' #' @export is.brmsprior <- function(x) { inherits(x, "brmsprior") } #' Print method for \code{brmsprior} objects #' #' @param x An object of class \code{brmsprior}. #' @param show_df Logical; Print priors as a single #' \code{data.frame} (\code{TRUE}) or as a sequence of #' sampling statements (\code{FALSE})? #' @param ... Currently ignored. #' #' @export print.brmsprior <- function(x, show_df = NULL, ...) { if (is.null(show_df)) { show_df <- nrow(x) > 1L } show_df <- as_one_logical(show_df) y <- prepare_print_prior(x) if (show_df) { print.data.frame(y, row.names = FALSE, ...) } else { cat(collapse(.print_prior(y), "\n")) } invisible(x) } # prepare pretty printing of brmsprior objects prepare_print_prior <- function(x) { stopifnot(is.brmsprior(x)) if (is.null(x$source)) { x$source <- "" } x$source[!nzchar(x$source)] <- "(unknown)" # vectorize priors and bounds for pretty printing # TODO: improve efficiency of adding vectorization tags for (i in which(!nzchar(x$prior))) { base_prior <- stan_base_prior(x, sel_prior = x[i, ]) if (nzchar(base_prior)) { x$prior[i] <- base_prior x$source[i] <- "(vectorized)" } else { x$prior[i] <- "(flat)" } } for (i in which(!nzchar(x$lb) & !nzchar(x$ub))) { base_bounds <- stan_base_prior(x, c("lb", "ub"), sel_prior = x[i, ]) x$lb[i] <- base_bounds[, "lb"] x$ub[i] <- base_bounds[, "ub"] } x } # prepare text for print.brmsprior .print_prior <- function(x) { group <- usc(x$group) resp <- usc(x$resp) dpar <- usc(x$dpar) nlpar <- usc(x$nlpar) coef <- usc(x$coef) if (any(nzchar(c(resp, dpar, nlpar, coef)))) { group <- usc(group, "suffix") } bound <- convert_bounds2stan(x[c("lb", "ub")]) bound <- ifelse(nzchar(bound), paste0(bound, " "), "") tilde <- ifelse(nzchar(x$class) | nzchar(group) | nzchar(coef), " ~ ", "") prior <- ifelse(nzchar(x$prior), x$prior, "(flat)") paste0(bound, x$class, group, resp, dpar, nlpar, coef, tilde, prior) } # combine multiple brmsprior objects into one brmsprior #' @export c.brmsprior <- function(x, ..., replace = FALSE) { dots <- list(...) if (all(sapply(dots, is.brmsprior))) { replace <- as_one_logical(replace) # don't use 'c()' here to avoid creating a recursion out <- do_call(rbind, list(x, ...)) if (replace) { # update duplicated priors out <- unique(out, fromLast = TRUE) } } else { if (length(dots)) { stop2("Cannot add '", class(dots[[1]])[1], "' objects to the prior.") } out <- c(as.data.frame(x)) } out } #' @export "+.brmsprior" <- function(e1, e2) { if (is.null(e2)) { return(e1) } if (!is.brmsprior(e2)) { stop2("Cannot add '", class(e2)[1], "' objects to the prior.") } c(e1, e2) } #' Transform into a brmsprior object #' #' Try to transform an object into a \code{brmsprior} object. #' #' @param x An object to be transformed. #' @return A \code{brmsprior} object if the transformation was possible. #' #' @export as.brmsprior <- function(x) { if (is.brmsprior(x)) { return(x) } x <- as.data.frame(x) if (!"prior" %in% names(x)) { stop2("Column 'prior' is required.") } x$prior <- as.character(x$prior) defaults <- c( class = "b", coef = "", group = "", resp = "", dpar = "", nlpar = "", lb = NA, ub = NA ) for (v in names(defaults)) { if (!v %in% names(x)) { x[[v]] <- defaults[v] } x[[v]] <- as.character(x[[v]]) } x$source <- "user" all_vars <- c("prior", names(defaults), "source") x <- x[, all_vars, drop = FALSE] class(x) <- c("brmsprior", "data.frame") x } #' @export duplicated.brmsprior <- function(x, incomparables = FALSE, ...) { # compare only specific columns of the brmsprior object duplicated.data.frame(x[, rcols_prior()], incomparables, ...) } # evaluate the dirichlet prior of simplex parameters # avoid name clashing with the dirichlet family # @param prior a character vector of the form 'dirichlet(...)' # @param len desired length of the prior concentration vector # @param env environment in which to search for data # @return a numeric vector of prior concentration values eval_dirichlet <- function(prior, len = NULL, env = NULL) { dirichlet <- function(...) { out <- try(as.numeric(c(...))) if (is_try_error(out)) { stop2("Something went wrong. Did you forget to store ", "auxiliary data in the 'data2' argument?") } if (anyNA(out) || any(out <= 0)) { stop2("The dirichlet prior expects positive values.") } if (!is.null(len)) { if (length(out) == 1L) { out <- rep(out, len) } if (length(out) != len) { stop2("Invalid Dirichlet prior. Expected input of length ", len, ".") } } return(out) } prior <- as_one_character(prior) if (!nzchar(prior)) { prior <- "dirichlet(1)" } eval2(prior, envir = env, enclos = environment()) } #' Regularized horseshoe priors in \pkg{brms} #' #' Function used to set up regularized horseshoe priors and related #' hierarchical shrinkage priors for population-level effects in \pkg{brms}. The #' function does not evaluate its arguments -- it exists purely to help set up #' the model. #' #' @param df Degrees of freedom of student-t prior of the #' local shrinkage parameters. Defaults to \code{1}. #' @param scale_global Scale of the student-t prior of the global shrinkage #' parameter. Defaults to \code{1}. #' In linear models, \code{scale_global} will internally be #' multiplied by the residual standard deviation parameter \code{sigma}. #' @param df_global Degrees of freedom of student-t prior of the #' global shrinkage parameter. Defaults to \code{1}. If \code{df_global} #' is greater \code{1}, the shape of the prior will no longer resemble #' a horseshoe and it may be more appropriately called an hierarchical #' shrinkage prior in this case. #' @param scale_slab Scale of the Student-t slab. Defaults to \code{2}. The #' original unregularized horseshoe prior is obtained by setting #' \code{scale_slab} to infinite, which we can approximate in practice by #' setting it to a very large real value. #' @param df_slab Degrees of freedom of the student-t slab. #' Defaults to \code{4}. #' @param par_ratio Ratio of the expected number of non-zero coefficients #' to the expected number of zero coefficients. If specified, #' \code{scale_global} is ignored and internally computed as #' \code{par_ratio / sqrt(N)}, where \code{N} is the total number #' of observations in the data. #' @param autoscale Logical; indicating whether the horseshoe #' prior should be scaled using the residual standard deviation #' \code{sigma} if possible and sensible (defaults to \code{TRUE}). #' Autoscaling is not applied for distributional parameters or #' when the model does not contain the parameter \code{sigma}. #' @param main Logical (defaults to \code{FALSE}); only relevant if the horseshoe #' prior spans multiple parameter classes. In this case, only arguments given #' in the single instance where \code{main} is \code{TRUE} will be used. #' Arguments given in other instances of the prior will be ignored. #' See the Examples section below. #' #' @return A character string obtained by \code{match.call()} with #' additional arguments. #' #' @details #' The horseshoe prior is a special shrinkage prior initially proposed by #' Carvalho et al. (2009). #' It is symmetric around zero with fat tails and an infinitely large spike #' at zero. This makes it ideal for sparse models that have #' many regression coefficients, although only a minority of them is non-zero. #' The horseshoe prior can be applied on all population-level effects at once #' (excluding the intercept) by using \code{set_prior("horseshoe(1)")}. #' The \code{1} implies that the student-t prior of the local shrinkage #' parameters has 1 degrees of freedom. This may, however, lead to an #' increased number of divergent transition in \pkg{Stan}. #' Accordingly, increasing the degrees of freedom to slightly higher values #' (e.g., \code{3}) may often be a better option, although the prior #' no longer resembles a horseshoe in this case. #' Further, the scale of the global shrinkage parameter plays an important role #' in amount of shrinkage applied. It defaults to \code{1}, #' but this may result in too few shrinkage (Piironen & Vehtari, 2016). #' It is thus possible to change the scale using argument \code{scale_global} #' of the horseshoe prior, for instance \code{horseshoe(1, scale_global = 0.5)}. #' In linear models, \code{scale_global} will internally be multiplied by the #' residual standard deviation parameter \code{sigma}. See Piironen and #' Vehtari (2016) for recommendations how to properly set the global scale. #' The degrees of freedom of the global shrinkage prior may also be #' adjusted via argument \code{df_global}. #' Piironen and Vehtari (2017) recommend to specifying the ratio of the #' expected number of non-zero coefficients to the expected number of zero #' coefficients \code{par_ratio} rather than \code{scale_global} directly. #' As proposed by Piironen and Vehtari (2017), an additional regularization #' is applied that only affects non-zero coefficients. The amount of #' regularization can be controlled via \code{scale_slab} and \code{df_slab}. #' To make sure that shrinkage can equally affect all coefficients, #' predictors should be one the same scale. #' Generally, models with horseshoe priors a more likely than other models #' to have divergent transitions so that increasing \code{adapt_delta} #' from \code{0.8} to values closer to \code{1} will often be necessary. #' See the documentation of \code{\link{brm}} for instructions #' on how to increase \code{adapt_delta}. #' #' Currently, the following classes support the horseshoe prior: \code{b} #' (overall regression coefficients), \code{sds} (SDs of smoothing splines), #' \code{sdgp} (SDs of Gaussian processes), \code{ar} (autoregressive #' coefficients), \code{ma} (moving average coefficients), \code{sderr} (SD of #' latent residuals), \code{sdcar} (SD of spatial CAR structures), \code{sd} #' (SD of varying coefficients). #' #' @references #' Carvalho, C. M., Polson, N. G., & Scott, J. G. (2009). Handling sparsity via #' the horseshoe. Artificial Intelligence and Statistics. #' \url{http://proceedings.mlr.press/v5/carvalho09a} #' #' Piironen J. & Vehtari A. (2017). On the Hyperprior Choice for the Global #' Shrinkage Parameter in the Horseshoe Prior. Artificial Intelligence and #' Statistics. \url{https://arxiv.org/pdf/1610.05559v1.pdf} #' #' Piironen, J., and Vehtari, A. (2017). Sparsity information and regularization #' in the horseshoe and other shrinkage priors. Electronic Journal of #' Statistics. \url{https://arxiv.org/abs/1707.01694} #' #' @seealso \code{\link{set_prior}} #' #' @examples #' set_prior(horseshoe(df = 3, par_ratio = 0.1)) #' #' # specify the horseshoe prior across multiple parameter classes #' set_prior(horseshoe(df = 3, par_ratio = 0.1, main = TRUE), class = "b") + #' set_prior(horseshoe(), class = "sd") #' #' @export horseshoe <- function(df = 1, scale_global = 1, df_global = 1, scale_slab = 2, df_slab = 4, par_ratio = NULL, autoscale = TRUE, main = FALSE) { out <- deparse0(match.call()) name <- "horseshoe" df <- as.numeric(df) df_global <- as.numeric(df_global) df_slab <- as.numeric(df_slab) scale_global <- as.numeric(scale_global) scale_slab <- as.numeric(scale_slab) main <- as_one_logical(main) if (!isTRUE(df > 0)) { stop2("Invalid horseshoe prior: Degrees of freedom of ", "the local priors must be a single positive number.") } if (!isTRUE(df_global > 0)) { stop2("Invalid horseshoe prior: Degrees of freedom of ", "the global prior must be a single positive number.") } if (!isTRUE(scale_global > 0)) { stop2("Invalid horseshoe prior: Scale of the global ", "prior must be a single positive number.") } if (!isTRUE(df_slab > 0)) { stop2("Invalid horseshoe prior: Degrees of freedom of ", "the slab part must be a single positive number.") } if (!isTRUE(scale_slab > 0)) { stop2("Invalid horseshoe prior: Scale of the slab ", "part must be a single positive number.") } if (!is.null(par_ratio)) { par_ratio <- as.numeric(par_ratio) if (!isTRUE(par_ratio > 0)) { stop2("Argument 'par_ratio' must be greater 0.") } } autoscale <- as_one_logical(autoscale) att <- nlist( name, df, df_global, df_slab, scale_global, scale_slab, par_ratio, autoscale, main ) attributes(out)[names(att)] <- att out } #' R2D2 Priors in \pkg{brms} #' #' Function used to set up R2D2 priors for population-level effects in #' \pkg{brms}. The function does not evaluate its arguments -- it exists purely #' to help set up the model. #' #' @param mean_R2 Mean of the Beta prior on the coefficient of determination R^2. #' @param prec_R2 Precision of the Beta prior on the coefficient of determination R^2. #' @param cons_D2 Concentration vector of the Dirichlet prior on the variance #' decomposition parameters. Lower values imply more shrinkage. #' @param autoscale Logical; indicating whether the R2D2 #' prior should be scaled using the residual standard deviation #' \code{sigma} if possible and sensible (defaults to \code{TRUE}). #' Autoscaling is not applied for distributional parameters or #' when the model does not contain the parameter \code{sigma}. #' @param main Logical (defaults to \code{FALSE}); only relevant if the R2D2 #' prior spans multiple parameter classes. In this case, only arguments given #' in the single instance where \code{main} is \code{TRUE} will be used. #' Arguments given in other instances of the prior will be ignored. #' See the Examples section below. #' #' @details #' Currently, the following classes support the R2D2 prior: \code{b} #' (overall regression coefficients), \code{sds} (SDs of smoothing splines), #' \code{sdgp} (SDs of Gaussian processes), \code{ar} (autoregressive #' coefficients), \code{ma} (moving average coefficients), \code{sderr} (SD of #' latent residuals), \code{sdcar} (SD of spatial CAR structures), \code{sd} #' (SD of varying coefficients). #' #' Even when the R2D2 prior is applied to multiple parameter classes at once, #' the concentration vector (argument \code{cons_D2}) has to be provided #' jointly in the the one instance of the prior where \code{main = TRUE}. The #' order in which the elements of concentration vector correspond to the #' classes' coefficients is the same as the order of the classes provided #' above. #' #' @references #' Zhang, Y. D., Naughton, B. P., Bondell, H. D., & Reich, B. J. (2020). #' Bayesian regression using a prior on the model fit: The R2-D2 shrinkage #' prior. Journal of the American Statistical Association. #' \url{https://arxiv.org/pdf/1609.00046.pdf} #' #' Aguilar J. E. & Bürkner P. C. (2022). Intuitive Joint Priors for Bayesian #' Linear Multilevel Models: The R2D2M2 prior. ArXiv preprint. #' \url{https://arxiv.org/pdf/2208.07132.pdf} #' #' @seealso \code{\link{set_prior}} #' #' @examples #' set_prior(R2D2(mean_R2 = 0.8, prec_R2 = 10)) #' #' # specify the R2D2 prior across multiple parameter classes #' set_prior(R2D2(mean_R2 = 0.8, prec_R2 = 10, main = TRUE), class = "b") + #' set_prior(R2D2(), class = "sd") #' #' @export R2D2 <- function(mean_R2 = 0.5, prec_R2 = 2, cons_D2 = 0.5, autoscale = TRUE, main = FALSE) { out <- deparse0(match.call()) name <- "R2D2" mean_R2 <- as_one_numeric(mean_R2) prec_R2 <- as_one_numeric(prec_R2) cons_D2 <- as.numeric(cons_D2) main <- as_one_logical(main) if (!(mean_R2 > 0 && mean_R2 < 1)) { stop2("Invalid R2D2 prior: Mean of the R2 prior ", "must be a single number in (0, 1).") } if (prec_R2 <= 0) { stop2("Invalid R2D2 prior: Precision of the R2 prior ", "must be a single positive number.") } if (any(cons_D2 <= 0)) { stop2("Invalid R2D2 prior: Concentration of the D2 prior ", "must be a vector of positive numbers.") } autoscale <- as_one_logical(autoscale) att <- nlist(name, mean_R2, prec_R2, cons_D2, autoscale, main) attributes(out)[names(att)] <- att out } #' (Defunct) Set up a lasso prior in \pkg{brms} #' #' This functionality is no longer supported as of brms version 2.19.2. Please #' use the \code{\link{horseshoe}} or \code{\link{R2D2}} shrinkage priors instead. #' #' @param df Degrees of freedom of the chi-square prior of the inverse tuning #' parameter. Defaults to \code{1}. #' @param scale Scale of the lasso prior. Defaults to \code{1}. #' #' @return An error indicating that the lasso prior is no longer supported. #' #' @references #' Park, T., & Casella, G. (2008). The Bayesian Lasso. Journal of the American #' Statistical Association, 103(482), 681-686. #' #' @seealso \code{\link{set_prior}}, \code{\link{horseshoe}}, \code{\link{R2D2}} #' #' @export lasso <- function(df = 1, scale = 1) { stop2("The lasso prior is no longer supported as of brms version 2.19.2. ", "Please use the horseshoe or R2D2 shrinkage priors instead.") } # check for the usage of special priors # @param prior a character vector of priors # @param target optional special priors to search for # if NULL search for all special priors # @return a logical vector equal to the length of 'prior' is_special_prior <- function(prior, target = NULL) { stopifnot(is.character(prior)) if (is.null(target)) { target <- c("horseshoe", "R2D2", "lasso") } regex <- paste0("^", regex_or(target), "\\(") grepl(regex, prior) } # extract special prior information # @param prior a brmsprior object # @param class parameter class to be checked. the default ensures that #. the presence of any special prior is always detected # @param px object from which the prefix can be extract # @param type type of the special prior get_special_prior <- function(prior, px, class = NULL, main = FALSE) { out <- attr(prior, "special") prefix <- combine_prefix(px, keep_mu = TRUE) out <- out[[prefix]] if (!length(out)) { return(NULL) } if (main) { # get the main special prior to extract arguments from if (length(out) == 1L) { # only one class present which must then be main out <- out[[1]] } else { main <- which(ufrom_list(out, "main")) if (length(main) != 1L) { stop2("If special priors for multiple classes are given, ", "exactly one of them must be marked with 'main = TRUE'.") } out <- out[[main]] } } else if (!is.null(class)) { out <- out[[class]] } else { # just extract info on any class for example the first out <- out[[1]] } out } # is special prior information present? has_special_prior <- function(prior, px = NULL, class = NULL) { if (is.null(px)) { # is any special prior present? return(length(rmNULL(attr(prior, "special"))) > 0L) } .has_special_prior <- function(px) { !is.null(get_special_prior(prior, px = px, class = class)) } if (is.data.frame(px)) { # this case occurs for group-level parameters out <- FALSE for (i in seq_rows(px)) { out <- out || .has_special_prior(px[i, ]) } } else { out <- .has_special_prior(px) } out } # check if parameters should be sampled only from the prior is_prior_only <- function(prior) { is_equal(get_sample_prior(prior), "only") } brms/R/kfold.R0000644000176200001440000003746114361545260012661 0ustar liggesusers#' K-Fold Cross-Validation #' #' Perform exact K-fold cross-validation by refitting the model \eqn{K} #' times each leaving out one-\eqn{K}th of the original data. #' Folds can be run in parallel using the \pkg{future} package. #' #' @aliases kfold #' #' @inheritParams loo.brmsfit #' @param K The number of subsets of equal (if possible) size #' into which the data will be partitioned for performing #' \eqn{K}-fold cross-validation. The model is refit \code{K} times, each time #' leaving out one of the \code{K} subsets. If \code{K} is equal to the total #' number of observations in the data then \eqn{K}-fold cross-validation is #' equivalent to exact leave-one-out cross-validation. #' @param Ksub Optional number of subsets (of those subsets defined by \code{K}) #' to be evaluated. If \code{NULL} (the default), \eqn{K}-fold cross-validation #' will be performed on all subsets. If \code{Ksub} is a single integer, #' \code{Ksub} subsets (out of all \code{K}) subsets will be randomly chosen. #' If \code{Ksub} consists of multiple integers or a one-dimensional array #' (created via \code{as.array}) potentially of length one, the corresponding #' subsets will be used. This argument is primarily useful, if evaluation of #' all subsets is infeasible for some reason. #' @param folds Determines how the subsets are being constructed. #' Possible values are \code{NULL} (the default), \code{"stratified"}, #' \code{"grouped"}, or \code{"loo"}. May also be a vector of length #' equal to the number of observations in the data. Alters the way #' \code{group} is handled. More information is provided in the 'Details' #' section. #' @param group Optional name of a grouping variable or factor in the model. #' What exactly is done with this variable depends on argument \code{folds}. #' More information is provided in the 'Details' section. #' @param exact_loo Deprecated! Please use \code{folds = "loo"} instead. #' @param save_fits If \code{TRUE}, a component \code{fits} is added to #' the returned object to store the cross-validated \code{brmsfit} #' objects and the indices of the omitted observations for each fold. #' Defaults to \code{FALSE}. #' @param recompile Logical, indicating whether the Stan model should be #' recompiled. This may be necessary if you are running \code{reloo} on #' another machine than the one used to fit the model. #' @param future_args A list of further arguments passed to #' \code{\link[future:future]{future}} for additional control over parallel #' execution if activated. #' @param ... Further arguments passed to \code{\link{brm}}. #' #' @return \code{kfold} returns an object that has a similar structure as the #' objects returned by the \code{loo} and \code{waic} methods and #' can be used with the same post-processing functions. #' #' @details The \code{kfold} function performs exact \eqn{K}-fold #' cross-validation. First the data are partitioned into \eqn{K} folds #' (i.e. subsets) of equal (or as close to equal as possible) size by default. #' Then the model is refit \eqn{K} times, each time leaving out one of the #' \code{K} subsets. If \eqn{K} is equal to the total number of observations #' in the data then \eqn{K}-fold cross-validation is equivalent to exact #' leave-one-out cross-validation (to which \code{loo} is an efficient #' approximation). The \code{compare_ic} function is also compatible with #' the objects returned by \code{kfold}. #' #' The subsets can be constructed in multiple different ways: #' \itemize{ #' \item If both \code{folds} and \code{group} are \code{NULL}, the subsets #' are randomly chosen so that they have equal (or as close to equal as #' possible) size. #' \item If \code{folds} is \code{NULL} but \code{group} is specified, the #' data is split up into subsets, each time omitting all observations of one #' of the factor levels, while ignoring argument \code{K}. #' \item If \code{folds = "stratified"} the subsets are stratified after #' \code{group} using \code{\link[loo:kfold-helpers]{loo::kfold_split_stratified}}. #' \item If \code{folds = "grouped"} the subsets are split by #' \code{group} using \code{\link[loo:kfold-helpers]{loo::kfold_split_grouped}}. #' \item If \code{folds = "loo"} exact leave-one-out cross-validation #' will be performed and \code{K} will be ignored. Further, if \code{group} #' is specified, all observations corresponding to the factor level of the #' currently predicted single value are omitted. Thus, in this case, the #' predicted values are only a subset of the omitted ones. #' \item If \code{folds} is a numeric vector, it must contain one element per #' observation in the data. Each element of the vector is an integer in #' \code{1:K} indicating to which of the \code{K} folds the corresponding #' observation belongs. There are some convenience functions available in #' the \pkg{loo} package that create integer vectors to use for this purpose #' (see the Examples section below and also the #' \link[loo:kfold-helpers]{kfold-helpers} page). #' } #' #' When running \code{kfold} on a \code{brmsfit} created with the #' \pkg{cmdstanr} backend in a different \R session, several recompilations #' will be triggered because by default, \pkg{cmdstanr} writes the model #' executable to a temporary directory. To avoid that, set option #' \code{"cmdstanr_write_stan_file_dir"} to a nontemporary path of your choice #' before creating the original \code{brmsfit} (see section 'Examples' below). #' #' @examples #' \dontrun{ #' fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), #' data = epilepsy, family = poisson()) #' # throws warning about some pareto k estimates being too high #' (loo1 <- loo(fit1)) #' # perform 10-fold cross validation #' (kfold1 <- kfold(fit1, chains = 1)) #' #' # use the future package for parallelization #' library(future) #' plan(multiprocess) #' kfold(fit1, chains = 1) #' #' ## to avoid recompilations when running kfold() on a 'cmdstanr'-backend fit #' ## in a fresh R session, set option 'cmdstanr_write_stan_file_dir' before #' ## creating the initial 'brmsfit' #' ## CAUTION: the following code creates some files in the current working #' ## directory: two 'model_.stan' files, one 'model_(.exe)' #' ## executable, and one 'fit_cmdstanr_.rds' file #' set.seed(7) #' fname <- paste0("fit_cmdstanr_", sample.int(.Machine$integer.max, 1)) #' options(cmdstanr_write_stan_file_dir = getwd()) #' fit_cmdstanr <- brm(rate ~ conc + state, #' data = Puromycin, #' backend = "cmdstanr", #' file = fname) #' # now restart the R session and run the following (after attaching 'brms') #' set.seed(7) #' fname <- paste0("fit_cmdstanr_", sample.int(.Machine$integer.max, 1)) #' fit_cmdstanr <- brm(rate ~ conc + state, #' data = Puromycin, #' backend = "cmdstanr", #' file = fname) #' kfold_cmdstanr <- kfold(fit_cmdstanr, K = 2) #' } #' #' @seealso \code{\link{loo}}, \code{\link{reloo}} #' #' @importFrom loo kfold #' @export kfold #' @export kfold.brmsfit <- function(x, ..., K = 10, Ksub = NULL, folds = NULL, group = NULL, exact_loo = NULL, compare = TRUE, resp = NULL, model_names = NULL, save_fits = FALSE, recompile = NULL, future_args = list()) { args <- split_dots(x, ..., model_names = model_names) use_stored <- ulapply(args$models, function(x) is_equal(x$kfold$K, K)) if (!is.null(exact_loo) && as_one_logical(exact_loo)) { warning2("'exact_loo' is deprecated. Please use folds = 'loo' instead.") folds <- "loo" } c(args) <- nlist( criterion = "kfold", K, Ksub, folds, group, compare, resp, save_fits, recompile, future_args, use_stored ) do_call(compute_loolist, args) } # helper function to perform k-fold cross-validation # @inheritParams kfold.brmsfit # @param model_name ignored but included to avoid being passed to '...' .kfold <- function(x, K, Ksub, folds, group, save_fits, newdata, resp, model_name, recompile = NULL, future_args = list(), newdata2 = NULL, ...) { stopifnot(is.brmsfit(x), is.list(future_args)) if (is.brmsfit_multiple(x)) { warn_brmsfit_multiple(x) class(x) <- "brmsfit" } if (is.null(newdata)) { newdata <- x$data } else { newdata <- as.data.frame(newdata) } if (is.null(newdata2)) { newdata2 <- x$data2 } else { bterms <- brmsterms(x$formula) newdata2 <- validate_data2(newdata2, bterms) } N <- nrow(newdata) # validate argument 'group' if (!is.null(group)) { valid_groups <- get_cat_vars(x) if (length(group) != 1L || !group %in% valid_groups) { stop2("Group '", group, "' is not a valid grouping factor. ", "Valid groups are: \n", collapse_comma(valid_groups)) } gvar <- factor(get(group, newdata)) } # validate argument 'folds' if (is.null(folds)) { if (is.null(group)) { fold_type <- "random" folds <- loo::kfold_split_random(K, N) } else { fold_type <- "group" folds <- as.numeric(gvar) K <- length(levels(gvar)) message("Setting 'K' to the number of levels of '", group, "' (", K, ")") } } else if (is.character(folds) && length(folds) == 1L) { opts <- c("loo", "stratified", "grouped") fold_type <- match.arg(folds, opts) req_group_opts <- c("stratified", "grouped") if (fold_type %in% req_group_opts && is.null(group)) { stop2("Argument 'group' is required for fold type '", fold_type, "'.") } if (fold_type == "loo") { folds <- seq_len(N) K <- N message("Setting 'K' to the number of observations (", K, ")") } else if (fold_type == "stratified") { folds <- loo::kfold_split_stratified(K, gvar) } else if (fold_type == "grouped") { folds <- loo::kfold_split_grouped(K, gvar) } } else { fold_type <- "custom" folds <- as.numeric(factor(folds)) if (length(folds) != N) { stop2("If 'folds' is a vector, it must be of length N.") } K <- max(folds) message("Setting 'K' to the number of folds (", K, ")") } # validate argument 'Ksub' if (is.null(Ksub)) { Ksub <- seq_len(K) } else { # see issue #441 for reasons to check for arrays is_array_Ksub <- is.array(Ksub) Ksub <- as.integer(Ksub) if (any(Ksub <= 0 | Ksub > K)) { stop2("'Ksub' must contain positive integers not larger than 'K'.") } if (length(Ksub) == 1L && !is_array_Ksub) { Ksub <- sample(seq_len(K), Ksub) } else { Ksub <- unique(Ksub) } Ksub <- sort(Ksub) } # split dots for use in log_lik and update dots <- list(...) ll_arg_names <- arg_names("log_lik") ll_args <- dots[intersect(names(dots), ll_arg_names)] ll_args$allow_new_levels <- TRUE ll_args$resp <- resp ll_args$combine <- TRUE up_args <- dots[setdiff(names(dots), ll_arg_names)] up_args$refresh <- 0 # function to be run inside future::future .kfold_k <- function(k) { if (fold_type == "loo" && !is.null(group)) { omitted <- which(folds == folds[k]) predicted <- k } else { omitted <- predicted <- which(folds == k) } newdata_omitted <- newdata[-omitted, , drop = FALSE] fit <- x up_args$object <- fit up_args$newdata <- newdata_omitted up_args$data2 <- subset_data2(newdata2, -omitted) fit <- SW(do_call(update, up_args)) ll_args$object <- fit ll_args$newdata <- newdata[predicted, , drop = FALSE] ll_args$newdata2 <- subset_data2(newdata2, predicted) lppds <- do_call(log_lik, ll_args) out <- nlist(lppds, omitted, predicted) if (save_fits) out$fit <- fit return(out) } futures <- vector("list", length(Ksub)) lppds <- obs_order <- vector("list", length(Ksub)) if (save_fits) { fits <- array(list(), dim = c(length(Ksub), 3)) dimnames(fits) <- list(NULL, c("fit", "omitted", "predicted")) } x <- recompile_model(x, recompile = recompile) future_args$FUN <- .kfold_k future_args$seed <- TRUE for (k in Ksub) { ks <- match(k, Ksub) message("Fitting model ", k, " out of ", K) future_args$args <- list(k) futures[[ks]] <- do_call("futureCall", future_args, pkg = "future") } for (k in Ksub) { ks <- match(k, Ksub) tmp <- future::value(futures[[ks]]) if (save_fits) { fits[ks, ] <- tmp[c("fit", "omitted", "predicted")] } obs_order[[ks]] <- tmp$predicted lppds[[ks]] <- tmp$lppds } lppds <- do_call(cbind, lppds) elpds <- apply(lppds, 2, log_mean_exp) # make sure elpds are put back in the right order obs_order <- unlist(obs_order) elpds <- elpds[order(obs_order)] # compute effective number of parameters ll_args$object <- x ll_args$newdata <- newdata ll_args$newdata2 <- newdata2 ll_full <- do_call(log_lik, ll_args) lpds <- apply(ll_full, 2, log_mean_exp) ps <- lpds - elpds # put everything together in a loo object pointwise <- cbind(elpd_kfold = elpds, p_kfold = ps, kfoldic = -2 * elpds) est <- colSums(pointwise) se_est <- sqrt(nrow(pointwise) * apply(pointwise, 2, var)) estimates <- cbind(Estimate = est, SE = se_est) rownames(estimates) <- colnames(pointwise) out <- nlist(estimates, pointwise) atts <- nlist(K, Ksub, group, folds, fold_type) attributes(out)[names(atts)] <- atts if (save_fits) { out$fits <- fits out$data <- newdata } structure(out, class = c("kfold", "loo")) } #' Predictions from K-Fold Cross-Validation #' #' Compute and evaluate predictions after performing K-fold #' cross-validation via \code{\link{kfold}}. #' #' @param x Object of class \code{'kfold'} computed by \code{\link{kfold}}. #' For \code{kfold_predict} to work, the fitted model objects need to have #' been stored via argument \code{save_fits} of \code{\link{kfold}}. #' @param method The method used to make predictions. Either \code{"predict"} #' or \code{"fitted"}. See \code{\link{predict.brmsfit}} for details. #' @inheritParams predict.brmsfit #' #' @return A \code{list} with two slots named \code{'y'} and \code{'yrep'}. #' Slot \code{y} contains the vector of observed responses. #' Slot \code{yrep} contains the matrix of predicted responses, #' with rows being posterior draws and columns being observations. #' #' @seealso \code{\link{kfold}} #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zBase * Trt + (1|patient), #' data = epilepsy, family = poisson()) #' #' # perform k-fold cross validation #' (kf <- kfold(fit, save_fits = TRUE, chains = 1)) #' #' # define a loss function #' rmse <- function(y, yrep) { #' yrep_mean <- colMeans(yrep) #' sqrt(mean((yrep_mean - y)^2)) #' } #' #' # predict responses and evaluate the loss #' kfp <- kfold_predict(kf) #' rmse(y = kfp$y, yrep = kfp$yrep) #' } #' #' @export kfold_predict <- function(x, method = c("predict", "fitted"), resp = NULL, ...) { if (!inherits(x, "kfold")) { stop2("'x' must be a 'kfold' object.") } if (!all(c("fits", "data") %in% names(x))) { stop2( "Slots 'fits' and 'data' are required. ", "Please run kfold with 'save_fits = TRUE'." ) } method <- get(match.arg(method), mode = "function") resp <- validate_resp(resp, x$fits[[1, "fit"]], multiple = FALSE) all_predicted <- as.character(sort(unlist(x$fits[, "predicted"]))) npredicted <- length(all_predicted) ndraws <- ndraws(x$fits[[1, "fit"]]) y <- rep(NA, npredicted) yrep <- matrix(NA, nrow = ndraws, ncol = npredicted) names(y) <- colnames(yrep) <- all_predicted for (k in seq_rows(x$fits)) { fit_k <- x$fits[[k, "fit"]] predicted_k <- x$fits[[k, "predicted"]] obs_names <- as.character(predicted_k) newdata <- x$data[predicted_k, , drop = FALSE] y[obs_names] <- get_y(fit_k, resp, newdata = newdata, ...) yrep[, obs_names] <- method( fit_k, newdata = newdata, resp = resp, allow_new_levels = TRUE, summary = FALSE, ... ) } nlist(y, yrep) } brms/R/numeric-helpers.R0000644000176200001440000000766714430671711014667 0ustar liggesusers# Most of the functions below have equivalents in Stan. Defining them in R is # necessary to evaluate non-linear formulas containing these functions. logit <- function(p) { log(p) - log1p(-p) } inv_logit <- function(x) { 1 / (1 + exp(-x)) } cloglog <- function(x) { log(-log1p(-x)) } inv_cloglog <- function(x) { 1 - exp(-exp(x)) } Phi <- function(x) { pnorm(x) } # incomplete gamma funcion incgamma <- function(a, x) { pgamma(x, shape = a) * gamma(a) } square <- function(x) { x^2 } cbrt <- function(x) { x^(1/3) } exp2 <- function(x) { 2^x } pow <- function(x, y) { x^y } inv <- function(x) { 1/x } inv_sqrt <- function(x) { 1/sqrt(x) } inv_square <- function(x) { 1/x^2 } hypot <- function(x, y) { stopifnot(all(x >= 0)) stopifnot(all(y >= 0)) sqrt(x^2 + y^2) } log1m <- function(x) { log(1 - x) } step <- function(x) { ifelse(x > 0, 1, 0) } #' Logarithm with a minus one offset. #' #' Computes \code{log(x - 1)}. #' #' @param x A numeric or complex vector. #' @param base A positive or complex number: the base with respect to which #' logarithms are computed. Defaults to \emph{e} = \code{exp(1)}. #' #' @export logm1 <- function(x, base = exp(1)) { log(x - 1, base = base) } #' Exponential function plus one. #' #' Computes \code{exp(x) + 1}. #' #' @param x A numeric or complex vector. #' #' @export expp1 <- function(x) { exp(x) + 1 } #' Scaled logit-link #' #' Computes \code{logit((x - lb) / (ub - lb))} #' #' @param x A numeric or complex vector. #' @param lb Lower bound defaulting to \code{0}. #' @param ub Upper bound defaulting to \code{1}. #' #' @return A numeric or complex vector. #' #' @export logit_scaled <- function(x, lb = 0, ub = 1) { logit((x - lb) / (ub - lb)) } #' Scaled inverse logit-link #' #' Computes \code{inv_logit(x) * (ub - lb) + lb} #' #' @param x A numeric or complex vector. #' @param lb Lower bound defaulting to \code{0}. #' @param ub Upper bound defaulting to \code{1}. #' #' @return A numeric or complex vector between \code{lb} and \code{ub}. #' #' @export inv_logit_scaled <- function(x, lb = 0, ub = 1) { inv_logit(x) * (ub - lb) + lb } multiply_log <- function(x, y) { ifelse(x == y & x == 0, 0, x * log(y)) } log1p_exp <- function(x) { # approaches identity(x) for x -> Inf out <- log1p(exp(x)) ifelse(out < Inf, out, x) } log1m_exp <- function(x) { ifelse(x < 0, log1p(-exp(x)), NaN) } log_diff_exp <- function(x, y) { stopifnot(length(x) == length(y)) ifelse(x > y, log(exp(x) - exp(y)), NaN) } log_sum_exp <- function(x, y) { max <- pmax(x, y) max + log(exp(x - max) + exp(y - max)) } log_mean_exp <- function(x) { max_x <- max(x) max_x + log(sum(exp(x - max_x))) - log(length(x)) } log_expm1 <- function(x) { # approaches identity(x) for x -> Inf out <- log(expm1(x)) ifelse(out < Inf, out, x) } log_inv_logit <- function(x) { log(inv_logit(x)) } log1m_inv_logit <- function(x) { log(1 - inv_logit(x)) } scale_unit <- function(x, lb = min(x), ub = max(x)) { (x - lb) / (ub - lb) } fabs <- function(x) { abs(x) } log_softmax <- function(x) { ndim <- length(dim(x)) if (ndim <= 1) { x <- matrix(x, nrow = 1) ndim <- length(dim(x)) } dim_noncat <- dim(x)[-ndim] marg_noncat <- seq_along(dim(x))[-ndim] catsum <- log(array(apply(exp(x), marg_noncat, sum), dim = dim_noncat)) sweep(x, marg_noncat, catsum, "-") } softmax <- function(x) { # log_softmax is more numerically stable #1401 exp(log_softmax(x)) } inv_odds <- function(x) { x / (1 + x) } # inspired by logit but with softplus instead of log softit <- function(x) { log_expm1(x / (1 - x)) } # inspired by inv_logit but with softplus instead of exp inv_softit <- function(x) { y <- log1p_exp(x) y / (1 + y) } # inspired by inv_logit but with softplus instead of exp log_inv_softit <- function(x) { y <- log1p_exp(x) log(y) - log1p(y) } # inspired by inv_logit but with softplus instead of exp log1m_inv_softit <- function(x) { y <- log1p_exp(x) -log1p(y) } brms/R/restructure.R0000644000176200001440000006062114431360310014130 0ustar liggesusers#' Restructure Old \code{brmsfit} Objects #' #' Restructure old \code{brmsfit} objects to work with #' the latest \pkg{brms} version. This function is called #' internally when applying post-processing methods. #' However, in order to avoid unnecessary run time caused #' by the restructuring, I recommend explicitly calling #' \code{restructure} once per model after updating \pkg{brms}. #' #' @param x An object of class \code{brmsfit}. #' @param ... Currently ignored. #' #' @details #' If you are restructuring an old spline model (fitted with brms < 2.19.3) to #' avoid prediction inconsistencies between machines (see GitHub issue #1465), #' please make sure to \code{restructure} your model on the machine on which it #' was originally fitted. #' #' @return A \code{brmsfit} object compatible with the latest version #' of \pkg{brms}. #' #' @export restructure <- function(x, ...) { stopifnot(is.brmsfit(x)) if (is.null(x$version)) { # this is the latest version without saving the version number x$version <- list(brms = package_version("0.9.1")) } else if (is.package_version(x$version)) { # also added the rstan version in brms 1.5.0 x$version <- list(brms = x$version) } current_version <- utils::packageVersion("brms") restr_version <- restructure_version(x) if (restr_version >= current_version) { # object is up to date with the current brms version return(x) } if (restr_version < "2.0.0") { x <- restructure_v1(x) } if (restr_version < "3.0.0") { x <- restructure_v2(x) } # remember the version with which the object was restructured x$version$restructure <- current_version # remove unused attribute attr(x, "restructured") <- NULL x } restructure_v2 <- function(x) { # restructure models fitted with brms 2.x x$formula <- update_old_family(x$formula) bterms <- SW(brmsterms(x$formula)) pars <- variables(x) version <- restructure_version(x) if (version < "2.1.2") { x <- do_renaming(x, rename_old_bsp(pars)) } if (version < "2.1.3") { if ("weibull" %in% family_names(x)) { stop_parameterization_changed("weibull", "2.1.3") } } if (version < "2.1.8") { if ("exgaussian" %in% family_names(x)) { stop_parameterization_changed("exgaussian", "2.1.8") } } if (version < "2.1.9") { # reworked 'me' terms (#372) meef <- tidy_meef(bterms, model.frame(x)) if (isTRUE(nrow(meef) > 0)) { warning2( "Measurement error ('me') terms have been reworked ", "in version 2.1.9. I strongly recommend refitting your ", "model with the latest version of brms." ) } } if (version < "2.2.4") { # added 'dist' argument to grouping terms x$ranef <- tidy_ranef(bterms, model.frame(x)) } if (version < "2.3.7") { check_old_nl_dpars(bterms) } if (version < "2.8.3") { # argument 'sparse' is now specified within 'formula' sparse <- if (grepl("sparse matrix", stancode(x))) TRUE x$formula <- SW(validate_formula(x$formula, data = x$data, sparse = sparse)) } if (version < "2.8.4") { x <- rescale_old_mo(x) } if (version < "2.8.5") { if (any(grepl("^arr(\\[|_|$)", pars))) { warning2("ARR structures are no longer supported.") } } if (version < "2.8.6") { # internal handling of special effects terms has changed # this requires updating the 'terms' attribute of the data x$data <- rm_attr(x$data, c("brmsframe", "terms")) x$data <- validate_data(x$data, bterms) } if (version < "2.8.9") { if (any(grepl("^loclev(\\[|_|$)", pars))) { warning2("BSTS structures are no longer supported.") } } if (version < "2.10.4") { # model fit criteria have been moved to x$criteria criterion_names <- c("loo", "waic", "kfold", "R2", "marglik") criteria <- x[intersect(criterion_names, names(x))] x[criterion_names] <- NULL # rename 'R2' to 'bayes_R2' according to #793 names(criteria)[names(criteria) == "R2"] <- "bayes_R2" x$criteria <- criteria } if (version < "2.10.5") { # new slot 'thres' stored inside ordinal families if (is_ordinal(x$formula)) { x$formula <- SW(validate_formula(x$formula, data = x$data)) } } if (version < "2.11.2") { # 'autocor' was integrated into the formula interface x$formula <- SW(validate_formula(x$formula)) x$data2 <- validate_data2( data2 = list(), bterms = bterms, get_data2_autocor(x$formula) ) } if (version < "2.11.3") { # ordering after IDs matches the order of the posterior draws # if multiple IDs are used for the same grouping factor (#835) x$ranef <- x$ranef[order(x$ranef$id), , drop = FALSE] } if (version < "2.11.5") { # 'cats' is stored inside ordinal families again if (is_ordinal(x$formula)) { x$formula <- SW(validate_formula(x$formula, data = x$data)) } } if (version < "2.12.5") { # 'cov_ranef' was integrated into the formula interface if (length(x$cov_ranef)) { x$formula <- SW(validate_formula(x$formula, cov_ranef = x$cov_ranef)) cov_ranef <- get_data2_cov_ranef(x$formula) x$data2[names(cov_ranef)] <- cov_ranef } } if (version < "2.12.6") { # minor structural changes as part of internal interface improvements attr(x$data, "data_name") <- x$data.name x$stanvars <- SW(validate_stanvars(x$stanvars, stan_funs = x$stan_funs)) } if (version < "2.12.11") { # argument 'position' was added to stanvars for (i in seq_along(x$stanvars)) { x$stanvars[[i]]$position <- "start" } } if (version < "2.13.2") { # added support for 'cmdstanr' as additional backend x$backend <- "rstan" } if (version < "2.13.5") { # see issue #962 for discussion if ("cox" %in% family_names(x)) { stop_parameterization_changed("cox", "2.13.5") } } if (version < "2.13.8") { x$prior$source <- "" # ensure correct ordering of columns cols_prior <- intersect(all_cols_prior(), names(x$prior)) x$prior <- x$prior[, cols_prior] } if (version < "2.13.10") { # added support for threading x$threads <- threading() } if (version < "2.13.12") { # added more control over which parameters to save save_ranef <- isTRUE(attr(x$exclude, "save_ranef")) save_mevars <- isTRUE(attr(x$exclude, "save_mevars")) save_all_pars <- isTRUE(attr(x$exclude, "save_all_pars")) x$save_pars <- SW(validate_save_pars( save_pars(), save_ranef = save_ranef, save_mevars = save_mevars, save_all_pars = save_all_pars )) x$exclude <- NULL } if (version < "2.15.6") { # added support for OpenCL x$opencl <- opencl() } if (version < "2.16.1") { # problems with rstan::read_stan_csv as well as # non-unique variable names became apparent (#1218) x$fit <- repair_stanfit(x$fit) } if (version < "2.16.12") { # added full user control over parameter boundaries (#1324) # explicit bounds need to be added to old priors as a result x$prior$ub <- x$prior$lb <- NA for (i in which(nzchar(x$prior$bound))) { bounds <- convert_stan2bounds(x$prior$bound[i], default = c("", "")) x$prior[i, c("lb", "ub")] <- bounds } x$prior$bound <- NULL all_priors <- get_prior(x$formula, x$data, data2 = x$data2, internal = TRUE) # checking for lb is sufficient because both bounds are NA at the same time which_needs_bounds <- which(is.na(x$prior$lb) & !nzchar(x$prior$coef)) for (i in which_needs_bounds) { # take the corresponding bounds from the default prior prior_sub_i <- rbind(x$prior[i, ], all_priors) prior_sub_i <- prior_sub_i[duplicated(prior_sub_i), ] # should always have exactly one row but still check whether it has # any rows at all to prevent things from breaking accidentally if (NROW(prior_sub_i)) { x$prior[i, c("lb", "ub")] <- prior_sub_i[1, c("lb", "ub")] } else { x$prior[i, c("lb", "ub")] <- "" } } x$prior$lb[is.na(x$prior$lb)] <- x$prior$ub[is.na(x$prior$ub)] <- "" x$prior <- move2end(x$prior, "source") } if (version < "2.17.6") { # a slot was added that stores additional control arguments # that are directly passed to the Stan backends for later reuse (#1373) x$stan_args <- list() } if (version < "2.19.3") { # a slot was added to store parts of the Stan data computed at fitting time. # storing this is strictly required only for spline models but there it is # critical due to the machine-specific output of SVD (#1465) bterms <- brmsterms(x$formula) x$basis <- standata_basis(bterms, data = x$data) } x } # restructure models fitted with brms 1.x restructure_v1 <- function(x) { version <- restructure_version(x) if (version < "1.0.0") { warning2( "Models fitted with brms < 1.0 are no longer offically ", "supported and post-processing them may fail. I recommend ", "refitting the model with the latest version of brms." ) } x$formula <- restructure_formula_v1(formula(x), x$nonlinear) x$formula <- SW(validate_formula( formula(x), data = model.frame(x), family = family(x), autocor = x$autocor, threshold = x$threshold )) x$nonlinear <- x$partial <- x$threshold <- NULL bterms <- brmsterms(formula(x)) x$data <- rm_attr(x$data, "brmsframe") x$data <- validate_data(x$data, bterms) x$ranef <- tidy_ranef(bterms, model.frame(x)) if ("prior_frame" %in% class(x$prior)) { class(x$prior) <- c("brmsprior", "data.frame") } if (is(x$autocor, "cov_fixed")) { # deprecated as of brms 1.4.0 class(x$autocor) <- "cor_fixed" } if (version < "0.10.1") { if (length(bterms$dpars$mu$nlpars)) { # nlpar and group have changed positions change <- rename_old_re(x$ranef, variables(x), x$fit@sim$dims_oi) x <- do_renaming(x, change) } } if (version < "1.0.0") { # double underscores were added to group-level parameters change <- rename_old_re2(x$ranef, variables(x), x$fit@sim$dims_oi) x <- do_renaming(x, change) } if (version < "1.0.1.1") { # names of spline parameters had to be changed after # allowing for multiple covariates in one spline term change <- rename_old_sm( bterms, model.frame(x), variables(x), x$fit@sim$dims_oi ) x <- do_renaming(x, change) } if (version < "1.8.0.1") { att <- attributes(x$exclude) if (is.null(att$save_ranef)) { attr(x$exclude, "save_ranef") <- any(grepl("^r_", variables(x))) || !nrow(x$ranef) } if (is.null(att$save_mevars)) { attr(x$exclude, "save_mevars") <- any(grepl("^Xme_", variables(x))) } } if (version < "1.8.0.2") { x$prior$resp <- x$prior$dpar <- "" # ensure correct ordering of columns cols_prior <- intersect(all_cols_prior(), names(x$prior)) x$prior <- x$prior[, cols_prior] } if (version < "1.9.0.4") { # names of monotonic parameters had to be changed after # allowing for interactions in monotonic terms change <- rename_old_mo(bterms, x$data, pars = variables(x)) x <- do_renaming(x, change) } if (version >= "1.0.0" && version < "2.0.0") { change <- rename_old_categorical(bterms, x$data, pars = variables(x)) x <- do_renaming(x, change) } x } # get version with which a brmsfit object was restructured restructure_version <- function(x) { stopifnot(is.brmsfit(x)) out <- x$version$restructure if (!is.package_version(out)) { # models restructured with brms 2.11.1 store it as an attribute out <- attr(x, "restructured", exact = TRUE) } if (!is.package_version(out)) { out <- x$version$brms } out } # convert old model formulas to brmsformula objects restructure_formula_v1 <- function(formula, nonlinear = NULL) { if (is.brmsformula(formula) && is.formula(formula)) { # convert deprecated brmsformula objects back to formula class(formula) <- "formula" } if (is.brmsformula(formula)) { # already up to date return(formula) } old_nonlinear <- attr(formula, "nonlinear") nl <- length(nonlinear) > 0 if (is.logical(old_nonlinear)) { nl <- nl || old_nonlinear } else if (length(old_nonlinear)) { nonlinear <- c(nonlinear, old_nonlinear) nl <- TRUE } out <- structure(nlist(formula), class = "brmsformula") old_forms <- rmNULL(attributes(formula)[old_dpars()]) old_forms <- c(old_forms, nonlinear) out$pforms[names(old_forms)] <- old_forms bf(out, nl = nl) } # parameters to be restructured in old brmsformula objects old_dpars <- function() { c("mu", "sigma", "shape", "nu", "phi", "kappa", "beta", "xi", "zi", "hu", "zoi", "coi", "disc", "bs", "ndt", "bias", "quantile", "alpha", "theta") } # interchanges group and nlpar in names of group-level parameters # required for brms <= 0.10.0.9000 # @param ranef output of tidy_ranef # @param pars names of all parameters in the model # @param dims dimension of parameters # @return a list whose elements can be interpreted by do_renaming rename_old_re <- function(ranef, pars, dims) { out <- list() for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) g <- r$group[1] nlpar <- r$nlpar[1] stopifnot(nzchar(nlpar)) # rename sd-parameters old_sd_names <- paste0("sd_", nlpar, "_", g, "_", r$coef) new_sd_names <- paste0("sd_", g, "_", nlpar, "_", r$coef) for (i in seq_along(old_sd_names)) { lc(out) <- rename_simple( old_sd_names[i], new_sd_names[i], pars, dims ) } # rename cor-parameters new_cor_names <- get_cornames( paste0(nlpar, "_", r$coef), type = paste0("cor_", g), brackets = FALSE, sep = "_" ) old_cor_names <- get_cornames( r$coef, brackets = FALSE, sep = "_", type = paste0("cor_", nlpar, "_", g) ) for (i in seq_along(old_cor_names)) { lc(out) <- rename_simple( old_cor_names[i], new_cor_names[i], pars, dims ) } # rename r-parameters old_r_name <- paste0("r_", nlpar, "_", g) new_r_name <- paste0("r_", g, "_", nlpar) levels <- gsub("[ \t\r\n]", ".", attr(ranef, "levels")[[g]]) index_names <- make_index_names(levels, r$coef, dim = 2) new_r_names <- paste0(new_r_name, index_names) lc(out) <- rename_simple( old_r_name, new_r_names, pars, dims, pnames = new_r_name ) } out } # add double underscore in group-level parameters # required for brms < 1.0.0 # @note assumes that group and nlpar are correctly ordered already # @param ranef output of tidy_ranef # @param pars names of all parameters in the model # @param dims dimension of parameters # @return a list whose elements can be interpreted by do_renaming rename_old_re2 <- function(ranef, pars, dims) { out <- list() for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) g <- r$group[1] nlpars_usc <- usc(r$nlpar, "suffix") # rename sd-parameters old_sd_names <- paste0("sd_", g, "_", nlpars_usc, r$coef) new_sd_names <- paste0("sd_", g, "__", nlpars_usc, r$coef) for (i in seq_along(old_sd_names)) { lc(out) <- rename_simple(old_sd_names[i], new_sd_names[i], pars, dims) } # rename cor-parameters new_cor_names <- get_cornames( paste0(nlpars_usc, r$coef), type = paste0("cor_", g), brackets = FALSE ) old_cor_names <- get_cornames( paste0(nlpars_usc, r$coef), type = paste0("cor_", g), brackets = FALSE, sep = "_" ) for (i in seq_along(old_cor_names)) { lc(out) <- rename_simple(old_cor_names[i], new_cor_names[i], pars, dims) } # rename r-parameters for (nlpar in unique(r$nlpar)) { sub_r <- r[r$nlpar == nlpar, ] old_r_name <- paste0("r_", g, usc(nlpar)) new_r_name <- paste0("r_", g, usc(usc(nlpar))) levels <- gsub("[ \t\r\n]", ".", attr(ranef, "levels")[[g]]) index_names <- make_index_names(levels, sub_r$coef, dim = 2) new_r_names <- paste0(new_r_name, index_names) lc(out) <- rename_simple( old_r_name, new_r_names, pars, dims, pnames = new_r_name ) } } out } # change names of spline parameters fitted with brms <= 1.0.1 # this became necessary after allowing smooths with multiple covariates rename_old_sm <- function(bterms, data, pars, dims) { .rename_old_sm <- function(bt) { out <- list() smef <- tidy_smef(bt, data) if (nrow(smef)) { p <- usc(combine_prefix(bt), "suffix") old_smooths <- rename(paste0(p, smef$term)) new_smooths <- rename(paste0(p, smef$label)) old_sds_pars <- paste0("sds_", old_smooths) new_sds_pars <- paste0("sds_", new_smooths, "_1") old_s_pars <- paste0("s_", old_smooths) new_s_pars <- paste0("s_", new_smooths, "_1") for (i in seq_along(old_smooths)) { lc(out) <- rename_simple(old_sds_pars[i], new_sds_pars[i], pars, dims) dim_s <- dims[[old_s_pars[i]]] if (!is.null(dim_s)) { new_s_par_indices <- paste0(new_s_pars[i], "[", seq_len(dim_s), "]") lc(out) <- rename_simple( old_s_pars[i], new_s_par_indices, pars, dims, pnames = new_s_pars[i] ) } } } return(out) } out <- list() if (is.mvbrmsterms(bterms)) { for (r in bterms$responses) { c(out) <- .rename_old_sm(bterms$terms[[r]]$dpars$mu) } } else if (is.brmsterms(bterms)) { for (dp in names(bterms$dpars)) { bt <- bterms$dpars[[dp]] if (length(bt$nlpars)) { for (nlp in names(bt$nlpars)) { c(out) <- .rename_old_sm(bt$nlpars[[nlp]]) } } else { c(out) <- .rename_old_sm(bt) } } } out } # change names of monotonic effects fitted with brms <= 1.9.0 # this became necessary after implementing monotonic interactions rename_old_mo <- function(bterms, data, pars) { .rename_old_mo <- function(bt) { out <- list() spef <- tidy_spef(bt, data) has_mo <- lengths(spef$calls_mo) > 0 if (!any(has_mo)) { return(out) } spef <- spef[has_mo, ] p <- usc(combine_prefix(bt)) bmo_prefix <- paste0("bmo", p, "_") bmo_regex <- paste0("^", bmo_prefix, "[^_]+$") bmo_old <- pars[grepl(bmo_regex, pars)] bmo_new <- paste0(bmo_prefix, spef$coef) if (length(bmo_old) != length(bmo_new)) { stop2("Restructuring failed. Please refit your ", "model with the latest version of brms.") } for (i in seq_along(bmo_old)) { pos <- grepl(paste0("^", bmo_old[i]), pars) lc(out) <- rlist(pos, fnames = bmo_new[i]) } simo_regex <- paste0("^simplex", p, "_[^_]+$") simo_old_all <- pars[grepl(simo_regex, pars)] simo_index <- get_matches("\\[[[:digit:]]+\\]$", simo_old_all) simo_old <- unique(sub("\\[[[:digit:]]+\\]$", "", simo_old_all)) simo_coef <- get_simo_labels(spef) for (i in seq_along(simo_old)) { regex_pos <- paste0("^", simo_old[i]) pos <- grepl(regex_pos, pars) simo_new <- paste0("simo", p, "_", simo_coef[i]) simo_index_part <- simo_index[grepl(regex_pos, simo_old_all)] simo_new <- paste0(simo_new, simo_index_part) lc(out) <- rlist(pos, fnames = simo_new) } return(out) } out <- list() if (is.mvbrmsterms(bterms)) { for (r in bterms$responses) { c(out) <- .rename_old_mo(bterms$terms[[r]]$dpars$mu) } } else if (is.brmsterms(bterms)) { for (dp in names(bterms$dpars)) { bt <- bterms$dpars[[dp]] if (length(bt$nlpars)) { for (nlp in names(bt$nlpars)) { c(out) <- .rename_old_mo(bt$nlpars[[nlp]]) } } else { c(out) <- .rename_old_mo(bt) } } } out } # between version 1.0 and 2.0 categorical models used # the internal multivariate interface rename_old_categorical <- function(bterms, data, pars) { stopifnot(is.brmsterms(bterms)) if (!is_categorical(bterms$family)) { return(list()) } # compute the old category names respform <- bterms$respform old_dpars <- model.response(model.frame(respform, data = data)) old_dpars <- levels(factor(old_dpars)) old_dpars <- make.names(old_dpars[-1], unique = TRUE) old_dpars <- rename(old_dpars, ".", "x") new_dpars <- bterms$family$dpars stopifnot(length(old_dpars) == length(new_dpars)) pos <- rep(FALSE, length(pars)) new_pars <- pars for (i in seq_along(old_dpars)) { # not perfectly save but hopefully mostly correct regex <- paste0("(?<=_)", old_dpars[i], "(?=_|\\[)") pos <- pos | grepl(regex, pars, perl = TRUE) new_pars <- gsub(regex, new_dpars[i], new_pars, perl = TRUE) } list(nlist(pos, fnames = new_pars[pos])) } # as of brms 2.2 'mo' and 'me' terms are handled together rename_old_bsp <- function(pars) { pos <- grepl("^(bmo|bme)_", pars) if (!any(pos)) return(list()) fnames <- gsub("^(bmo|bme)_", "bsp_", pars[pos]) list(nlist(pos, fnames)) } # prepare for renaming of parameters in old models rename_simple <- function(oldname, fnames, pars, dims, pnames = fnames) { pos <- grepl(paste0("^", oldname), pars) if (any(pos)) { out <- nlist(pos, oldname, pnames, fnames, dims = dims[[oldname]]) class(out) <- c("rlist", "list") } else { out <- NULL } out } # rescale old 'b' coefficients of monotonic effects # to represent average instead of total differences rescale_old_mo <- function(x, ...) { UseMethod("rescale_old_mo") } #' @export rescale_old_mo.brmsfit <- function(x, ...) { bterms <- brmsterms(x$formula) rescale_old_mo(bterms, fit = x, ...) } #' @export rescale_old_mo.mvbrmsterms <- function(x, fit, ...) { for (resp in x$responses) { fit <- rescale_old_mo(x$terms[[resp]], fit = fit, ...) } fit } #' @export rescale_old_mo.brmsterms <- function(x, fit, ...) { for (dp in names(x$dpars)) { fit <- rescale_old_mo(x$dpars[[dp]], fit = fit, ...) } for (nlp in names(x$nlpars)) { fit <- rescale_old_mo(x$nlpars[[nlp]], fit = fit, ...) } fit } #' @export rescale_old_mo.btnl <- function(x, fit, ...) { fit } #' @export rescale_old_mo.btl <- function(x, fit, ...) { spef <- tidy_spef(x, fit$data) has_mo <- lengths(spef$Imo) > 0L if (!any(has_mo)) { return(fit) } warning2( "The parameterization of monotonic effects has changed in brms 2.8.4 ", "so that corresponding 'b' coefficients now represent average instead ", "of total differences between categories. See vignette('brms_monotonic') ", "for more details. Parameters of old models are adjusted automatically." ) p <- combine_prefix(x) all_pars <- variables(fit) chains <- fit$fit@sim$chains for (i in which(has_mo)) { bsp_par <- paste0("bsp", p, "_", spef$coef[i]) simo_regex <- paste0(spef$coef[i], seq_along(spef$Imo[[i]])) simo_regex <- paste0("simo", p, "_", simo_regex, "[") simo_regex <- paste0("^", escape_all(simo_regex)) # scaling factor by which to divide the old 'b' coefficients D <- prod(ulapply(simo_regex, function(r) sum(grepl(r, all_pars)))) for (j in seq_len(chains)) { fit$fit@sim$samples[[j]][[bsp_par]] <- fit$fit@sim$samples[[j]][[bsp_par]] / D } } fit } # update old families to work with the latest brms version update_old_family <- function(x, ...) { UseMethod("update_old_family") } #' @export update_old_family.default <- function(x, ...) { validate_family(x) } #' @export update_old_family.brmsfamily <- function(x, ...) { # new specials may have been added in new brms versions family_info <- get(paste0(".family_", x$family))() x$specials <- family_info$specials x } #' @export update_old_family.customfamily <- function(x, ...) { if (!is.null(x$predict)) { x$posterior_predict <- x$predict x$predict <- NULL } if (!is.null(x$fitted)) { x$posterior_epred <- x$fitted x$fitted <- NULL } x } #' @export update_old_family.mixfamily <- function(x, ...) { x$mix <- lapply(x$mix, update_old_family, ...) x } #' @export update_old_family.brmsformula <- function(x, ...) { x$family <- update_old_family(x$family, ...) x } #' @export update_old_family.mvbrmsformula <- function(x, ...) { x$forms <- lapply(x$forms, update_old_family, ...) x } stop_parameterization_changed <- function(family, version) { stop2( "The parameterization of '", family, "' models has changed in brms ", version, ". Please refit your model with the current version of brms." ) } check_old_nl_dpars <- function(bterms) { .check_nl_dpars <- function(x) { stopifnot(is.brmsterms(x)) non_mu_dpars <- x$dpars[names(x$dpars) != "mu"] if (any(ulapply(non_mu_dpars, is.btnl))) { stop2( "Non-linear parameters are global within univariate models ", "as of version 2.3.7. Please refit your model with the ", "latest version of brms." ) } return(TRUE) } if (is.mvbrmsterms(bterms)) { lapply(bterms$terms, .check_nl_dpars) } else { .check_nl_dpars(bterms) } TRUE } brms/R/brms-package.R0000644000176200001440000000763014213413565014107 0ustar liggesusers#' Bayesian Regression Models using 'Stan' #' #' @docType package #' @name brms-package #' @aliases brms #' #' @description #' \if{html}{ #' \figure{stanlogo.png}{options: width="50" alt="https://mc-stan.org/about/logo/"} #' \emph{Stan Development Team} #' } #' #' The \pkg{brms} package provides an interface to fit Bayesian generalized #' multivariate (non-)linear multilevel models using \pkg{Stan}, which is a C++ #' package for obtaining full Bayesian inference (see #' \url{https://mc-stan.org/}). The formula syntax is an extended version of the #' syntax applied in the \pkg{lme4} package to provide a familiar and simple #' interface for performing regression analyses. #' #' @details #' The main function of \pkg{brms} is \code{\link{brm}}, which uses #' formula syntax to specify a wide range of complex Bayesian models #' (see \code{\link{brmsformula}} for details). Based on the supplied #' formulas, data, and additional information, it writes the Stan code #' on the fly via \code{\link{make_stancode}}, prepares the data via #' \code{\link{make_standata}}, and fits the model using #' \pkg{\link[rstan:rstan]{Stan}}. #' #' Subsequently, a large number of post-processing methods can be applied: #' To get an overview on the estimated parameters, #' \code{\link[brms:summary.brmsfit]{summary}} or #' \code{\link[brms:conditional_effects.brmsfit]{conditional_effects}} #' are perfectly suited. Detailed visual analyses can be performed by applying #' the \code{\link{pp_check}} and \code{\link{stanplot}} methods, which both #' rely on the \pkg{\link[bayesplot:bayesplot-package]{bayesplot}} package. #' Model comparisons can be done via \code{\link{loo}} and \code{\link{waic}}, #' which make use of the \pkg{\link[loo:loo-package]{loo}} package as well as #' via \code{\link{bayes_factor}} which relies on the \pkg{bridgesampling} package. #' For a full list of methods to apply, type \code{methods(class = "brmsfit")}. #' #' Because \pkg{brms} is based on \pkg{Stan}, a C++ compiler is required. The #' program Rtools (available on #' \url{https://cran.r-project.org/bin/windows/Rtools/}) comes with a C++ #' compiler for Windows. On Mac, you should use Xcode. For further instructions #' on how to get the compilers running, see the prerequisites section at the #' \href{https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started}{RStan-Getting-Started} #' page. #' #' When comparing other packages fitting multilevel models to \pkg{brms}, keep #' in mind that the latter needs to compile models before actually fitting them, #' which will require between 20 and 40 seconds depending on your machine, #' operating system and overall model complexity. #' #' Thus, fitting smaller models may be relatively slow as compilation time makes #' up the majority of the whole running time. For larger / more complex #' models however, fitting my take several minutes or even hours, so that the #' compilation time won't make much of a difference for these models. #' #' See \code{vignette("brms_overview")} and \code{vignette("brms_multilevel")} #' for a general introduction and overview of \pkg{brms}. For a full list of #' available vignettes, type \code{vignette(package = "brms")}. #' #' @references #' Paul-Christian Buerkner (2017). brms: An R Package for Bayesian Multilevel #' Models Using Stan. \emph{Journal of Statistical Software}, 80(1), 1-28. #' \code{doi:10.18637/jss.v080.i01} #' #' Paul-Christian Buerkner (2018). Advanced Bayesian Multilevel Modeling #' with the R Package brms. \emph{The R Journal}. 10(1), 395–411. #' \code{doi:10.32614/RJ-2018-017} #' #' The Stan Development Team. \emph{Stan Modeling Language User's Guide and #' Reference Manual}. \url{https://mc-stan.org/users/documentation/}. #' #' Stan Development Team (2020). RStan: the R interface to Stan. R package #' version 2.21.2. \url{https://mc-stan.org/} #' #' @seealso #' \code{\link{brm}}, #' \code{\link{brmsformula}}, #' \code{\link{brmsfamily}}, #' \code{\link{brmsfit}} #' NULL brms/R/ggplot-themes.R0000644000176200001440000000660614213413565014334 0ustar liggesusers#' (Deprecated) Black Theme for \pkg{ggplot2} Graphics #' #' A black theme for ggplot graphics inspired by a blog post of Jon Lefcheck #' (\url{https://jonlefcheck.net/2013/03/11/black-theme-for-ggplot2-2/}). #' #' @param base_size base font size #' @param base_family base font family #' #' @return A \code{theme} object used in \pkg{ggplot2} graphics. #' #' @details When using \code{theme_black} in plots powered by the #' \pkg{bayesplot} package such as \code{pp_check} or \code{stanplot}, #' I recommend using the \code{"viridisC"} color scheme (see examples). #' #' @examples #' \dontrun{ #' # change default ggplot theme #' ggplot2::theme_set(theme_black()) #' #' # change default bayesplot color scheme #' bayesplot::color_scheme_set("viridisC") #' #' # fit a simple model #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), chains = 2) #' summary(fit) #' #' # create various plots #' plot(marginal_effects(fit), ask = FALSE) #' pp_check(fit) #' mcmc_plot(fit, type = "hex", variable = c("b_Intercept", "b_Trt1")) #' } #' #' @export theme_black = function(base_size = 12, base_family = "") { warning2("'theme_black' is deprecated. Please use the 'ggdark' package ", "for dark ggplot themes.") theme_grey(base_size = base_size, base_family = base_family) %+replace% theme( # axis options axis.line = element_blank(), axis.text.x = element_text( size = base_size * 0.8, color = "white", lineheight = 0.9 ), axis.text.y = element_text( size = base_size * 0.8, color = "white", lineheight = 0.9 ), axis.ticks = element_line(color = "white", size = 0.2), axis.title.x = element_text( size = base_size, color = "white", margin = margin(10, 0, 0, 0) ), axis.title.y = element_text( size = base_size, color = "white", angle = 90, margin = margin(0, 10, 0, 0) ), axis.ticks.length = unit(0.3, "lines"), # legend options legend.background = element_rect(color = NA, fill = "black"), legend.key = element_rect(color = "white", fill = "black"), legend.key.size = unit(1.2, "lines"), legend.key.height = NULL, legend.key.width = NULL, legend.text = element_text(size = base_size * 0.8, color = "white"), legend.title = element_text( size = base_size * 0.8, face = "bold", hjust = 0, color = "white" ), legend.position = "right", legend.text.align = NULL, legend.title.align = NULL, legend.direction = "vertical", legend.box = NULL, # panel options panel.background = element_rect(fill = "black", color = NA), panel.border = element_rect(fill = NA, color = "white"), panel.grid.major = element_line(color = "grey35"), panel.grid.minor = element_line(color = "grey20"), panel.spacing = unit(0.5, "lines"), # facetting options strip.background = element_rect(fill = "grey30", color = "grey10"), strip.text.x = element_text( size = base_size * 0.8, color = "white", margin = margin(3, 0, 4, 0) ), strip.text.y = element_text( size = base_size * 0.8, color = "white", angle = -90 ), # plot options plot.background = element_rect(color = "black", fill = "black"), plot.title = element_text(size = base_size * 1.2, color = "white"), plot.margin = unit(rep(1, 4), "lines") ) } brms/R/make_stancode.R0000644000176200001440000003755114424715563014364 0ustar liggesusers#' Stan Code for \pkg{brms} Models #' #' Generate Stan code for \pkg{brms} models #' #' @inheritParams brm #' @param ... Other arguments for internal usage only. #' #' @return A character string containing the fully commented \pkg{Stan} code #' to fit a \pkg{brms} model. #' #' @examples #' make_stancode(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "cumulative") #' #' make_stancode(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = "poisson") #' #' @export make_stancode <- function(formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sparse = NULL, sample_prior = "no", stanvars = NULL, stan_funs = NULL, knots = NULL, drop_unused_levels = TRUE, threads = getOption("brms.threads", NULL), normalize = getOption("brms.normalize", TRUE), save_model = NULL, ...) { if (is.brmsfit(formula)) { stop2("Use 'stancode' to extract Stan code from 'brmsfit' objects.") } formula <- validate_formula( formula, data = data, family = family, autocor = autocor, sparse = sparse, cov_ranef = cov_ranef ) bterms <- brmsterms(formula) data2 <- validate_data2( data2, bterms = bterms, get_data2_autocor(formula), get_data2_cov_ranef(formula) ) data <- validate_data( data, bterms = bterms, data2 = data2, knots = knots, drop_unused_levels = drop_unused_levels ) prior <- .validate_prior( prior, bterms = bterms, data = data, sample_prior = sample_prior ) stanvars <- validate_stanvars(stanvars, stan_funs = stan_funs) threads <- validate_threads(threads) .make_stancode( bterms, data = data, prior = prior, stanvars = stanvars, threads = threads, normalize = normalize, save_model = save_model, ... ) } # internal work function of 'make_stancode' # @param parse parse the Stan model for automatic syntax checking # @param backend name of the backend used for parsing # @param silent silence parsing messages .make_stancode <- function(bterms, data, prior, stanvars, threads = threading(), normalize = getOption("brms.normalize", TRUE), parse = getOption("brms.parse_stancode", FALSE), backend = getOption("brms.backend", "rstan"), silent = TRUE, save_model = NULL, ...) { normalize <- as_one_logical(normalize) parse <- as_one_logical(parse) backend <- match.arg(backend, backend_choices()) silent <- as_one_logical(silent) ranef <- tidy_ranef(bterms, data = data) meef <- tidy_meef(bterms, data = data) scode_predictor <- stan_predictor( bterms, data = data, prior = prior, normalize = normalize, ranef = ranef, meef = meef, stanvars = stanvars, threads = threads ) scode_ranef <- stan_re( ranef, prior = prior, threads = threads, normalize = normalize ) scode_Xme <- stan_Xme( meef, prior = prior, threads = threads, normalize = normalize ) scode_global_defs <- stan_global_defs( bterms, prior = prior, ranef = ranef, threads = threads ) # extend Stan's likelihood part if (use_threading(threads)) { # threading is activated for (i in seq_along(scode_predictor)) { resp <- usc(names(scode_predictor)[i]) pll_args <- stan_clean_pll_args( scode_predictor[[i]][["pll_args"]], scode_ranef[["pll_args"]], scode_Xme[["pll_args"]], collapse_stanvars_pll_args(stanvars) ) partial_log_lik <- paste0( scode_predictor[[i]][["pll_def"]], scode_predictor[[i]][["model_def"]], collapse_stanvars(stanvars, "likelihood", "start"), scode_predictor[[i]][["model_comp_basic"]], scode_predictor[[i]][["model_comp_eta"]], scode_predictor[[i]][["model_comp_eta_loop"]], scode_predictor[[i]][["model_comp_dpar_link"]], scode_predictor[[i]][["model_comp_dpar_trans"]], scode_predictor[[i]][["model_comp_mix"]], scode_predictor[[i]][["model_comp_arma"]], scode_predictor[[i]][["model_comp_catjoin"]], scode_predictor[[i]][["model_comp_mvjoin"]], scode_predictor[[i]][["model_log_lik"]], collapse_stanvars(stanvars, "likelihood", "end") ) partial_log_lik <- gsub(" target \\+=", " ptarget +=", partial_log_lik) partial_log_lik <- paste0( "// compute partial sums of the log-likelihood\n", "real partial_log_lik", resp, "_lpmf(int[] seq", resp, ", int start, int end", pll_args$typed, ") {\n", " real ptarget = 0;\n", " int N = end - start + 1;\n", partial_log_lik, " return ptarget;\n", "}\n" ) partial_log_lik <- wsp_per_line(partial_log_lik, 2) scode_predictor[[i]][["partial_log_lik"]] <- partial_log_lik static <- str_if(threads$static, "_static") scode_predictor[[i]][["model_lik"]] <- paste0( " target += reduce_sum", static, "(partial_log_lik", resp, "_lpmf", ", seq", resp, ", grainsize", pll_args$plain, ");\n" ) str_add(scode_predictor[[i]][["tdata_def"]]) <- glue( " int seq{resp}[N{resp}] = sequence(1, N{resp});\n" ) } scode_predictor <- collapse_lists(ls = scode_predictor) scode_predictor[["model_lik"]] <- paste0( scode_predictor[["model_no_pll_def"]], scode_predictor[["model_no_pll_comp_basic"]], scode_predictor[["model_no_pll_comp_mvjoin"]], scode_predictor[["model_lik"]] ) str_add(scode_predictor[["data"]]) <- " int grainsize; // grainsize for threading\n" } else { # threading is not activated scode_predictor <- collapse_lists(ls = scode_predictor) scode_predictor[["model_lik"]] <- paste0( scode_predictor[["model_no_pll_def"]], scode_predictor[["model_def"]], collapse_stanvars(stanvars, "likelihood", "start"), scode_predictor[["model_no_pll_comp_basic"]], scode_predictor[["model_comp_basic"]], scode_predictor[["model_comp_eta"]], scode_predictor[["model_comp_eta_loop"]], scode_predictor[["model_comp_dpar_link"]], scode_predictor[["model_comp_dpar_trans"]], scode_predictor[["model_comp_mix"]], scode_predictor[["model_comp_arma"]], scode_predictor[["model_comp_catjoin"]], scode_predictor[["model_no_pll_comp_mvjoin"]], scode_predictor[["model_comp_mvjoin"]], scode_predictor[["model_log_lik"]], collapse_stanvars(stanvars, "likelihood", "end") ) } scode_predictor[["model_lik"]] <- wsp_per_line(scode_predictor[["model_lik"]], 2) # get all priors added to 'lprior' scode_tpar_prior <- paste0( scode_predictor[["tpar_prior"]], scode_ranef[["tpar_prior"]], scode_Xme[["tpar_prior"]] ) # generate functions block scode_functions <- paste0( "// generated with brms ", utils::packageVersion("brms"), "\n", "functions {\n", scode_global_defs[["fun"]], collapse_stanvars(stanvars, "functions"), scode_predictor[["partial_log_lik"]], "}\n" ) # generate data block scode_data <- paste0( "data {\n", " int N; // total number of observations\n", scode_predictor[["data"]], scode_ranef[["data"]], scode_Xme[["data"]], " int prior_only; // should the likelihood be ignored?\n", collapse_stanvars(stanvars, "data"), "}\n" ) # generate transformed parameters block scode_transformed_data <- paste0( "transformed data {\n", scode_global_defs[["tdata_def"]], scode_predictor[["tdata_def"]], collapse_stanvars(stanvars, "tdata", "start"), scode_predictor[["tdata_comp"]], collapse_stanvars(stanvars, "tdata", "end"), "}\n" ) # generate parameters block scode_parameters <- paste0( scode_predictor[["par"]], scode_ranef[["par"]], scode_Xme[["par"]] ) # prepare additional sampling from priors scode_rngprior <- stan_rngprior( tpar_prior = scode_tpar_prior, par_declars = scode_parameters, gen_quantities = scode_predictor[["gen_def"]], special_prior = attr(prior, "special"), sample_prior = get_sample_prior(prior) ) scode_parameters <- paste0( "parameters {\n", scode_parameters, scode_rngprior[["par"]], collapse_stanvars(stanvars, "parameters"), "}\n" ) # generate transformed parameters block scode_lprior_def <- " real lprior = 0; // prior contributions to the log posterior\n" scode_transformed_parameters <- paste0( "transformed parameters {\n", scode_predictor[["tpar_def"]], scode_ranef[["tpar_def"]], scode_Xme[["tpar_def"]], str_if(normalize, scode_lprior_def), collapse_stanvars(stanvars, "tparameters", "start"), scode_predictor[["tpar_prior_const"]], scode_ranef[["tpar_prior_const"]], scode_Xme[["tpar_prior_const"]], scode_predictor[["tpar_comp"]], scode_predictor[["tpar_special_prior"]], scode_ranef[["tpar_comp"]], scode_Xme[["tpar_comp"]], # lprior cannot contain _lupdf functions in transformed parameters # as discussed on github.com/stan-dev/stan/issues/3094 str_if(normalize, scode_tpar_prior), collapse_stanvars(stanvars, "tparameters", "end"), "}\n" ) # combine likelihood with prior part not_const <- str_if(!normalize, " not") scode_model <- paste0( "model {\n", str_if(!normalize, scode_lprior_def), collapse_stanvars(stanvars, "model", "start"), " // likelihood", not_const, " including constants\n", " if (!prior_only) {\n", scode_predictor[["model_lik"]], " }\n", " // priors", not_const, " including constants\n", str_if(!normalize, scode_tpar_prior), " target += lprior;\n", scode_predictor[["model_prior"]], scode_ranef[["model_prior"]], scode_Xme[["model_prior"]], stan_unchecked_prior(prior), collapse_stanvars(stanvars, "model", "end"), "}\n" ) # generate generated quantities block scode_generated_quantities <- paste0( "generated quantities {\n", scode_predictor[["gen_def"]], scode_ranef[["gen_def"]], scode_Xme[["gen_def"]], scode_rngprior[["gen_def"]], collapse_stanvars(stanvars, "genquant", "start"), scode_predictor[["gen_comp"]], scode_ranef[["gen_comp"]], scode_rngprior[["gen_comp"]], scode_Xme[["gen_comp"]], collapse_stanvars(stanvars, "genquant", "end"), "}\n" ) # combine all elements into a complete Stan model scode <- paste0( scode_functions, scode_data, scode_transformed_data, scode_parameters, scode_transformed_parameters, scode_model, scode_generated_quantities ) scode <- expand_include_statements(scode) if (parse) { scode <- parse_model(scode, backend, silent = silent) } if (backend == "cmdstanr") { if (requireNamespace("cmdstanr", quietly = TRUE) && cmdstanr::cmdstan_version() >= "2.29.0") { tmp_file <- cmdstanr::write_stan_file(scode) scode <- .canonicalize_stan_model(tmp_file, overwrite_file = FALSE) } } if (is.character(save_model)) { cat(scode, file = save_model) } class(scode) <- c("character", "brmsmodel") scode } #' @export print.brmsmodel <- function(x, ...) { cat(x) invisible(x) } #' Extract Stan model code #' #' Extract Stan code that was used to specify the model. #' #' @aliases stancode.brmsfit #' #' @param object An object of class \code{brmsfit}. #' @param version Logical; indicates if the first line containing #' the \pkg{brms} version number should be included. #' Defaults to \code{TRUE}. #' @param regenerate Logical; indicates if the Stan code should #' be regenerated with the current \pkg{brms} version. #' By default, \code{regenerate} will be \code{FALSE} unless required #' to be \code{TRUE} by other arguments. #' @param threads Controls whether the Stan code should be threaded. #' See \code{\link{threading}} for details. #' @param backend Controls the Stan backend. See \code{\link{brm}} for details. #' @param ... Further arguments passed to \code{\link{make_stancode}} if the #' Stan code is regenerated. #' #' @return Stan model code for further processing. #' #' @export stancode.brmsfit <- function(object, version = TRUE, regenerate = NULL, threads = NULL, backend = NULL, ...) { if (is.null(regenerate)) { # determine whether regenerating the Stan code is required regenerate <- FALSE cl <- match.call() if ("threads" %in% names(cl)) { threads <- validate_threads(threads) if (use_threading(threads) && !use_threading(object$threads) || !use_threading(threads) && use_threading(object$threads)) { # threading changed; regenerated Stan code regenerate <- TRUE } object$threads <- threads } if ("backend" %in% names(cl)) { backend <- match.arg(backend, backend_choices()) # older Stan versions do not support array syntax if (require_old_stan_syntax(object, backend, "2.29.0")) { regenerate <- TRUE } object$backend <- backend } } regenerate <- as_one_logical(regenerate) if (regenerate) { object <- restructure(object) out <- make_stancode( formula = object$formula, data = object$data, prior = object$prior, data2 = object$data2, stanvars = object$stanvars, sample_prior = get_sample_prior(object$prior), threads = object$threads, backend = object$backend, ... ) } else { # extract Stan code unaltered out <- object$model } if (!version) { out <- sub("^[^\n]+[[:digit:]]\\.[^\n]+\n", "", out) } out } #' @rdname stancode.brmsfit #' @export stancode <- function(object, ...) { UseMethod("stancode") } # expand '#include' statements # This could also be done automatically by Stan at compilation time # but would result in Stan code that is not self-contained until compilation # @param model Stan code potentially including '#include' statements # @return Stan code with '#include' statements expanded expand_include_statements <- function(model) { path <- system.file("chunks", package = "brms") includes <- get_matches("#include '[^']+'", model) # removal of duplicates could make code generation easier in the future includes <- unique(includes) files <- gsub("(#include )|(')", "", includes) for (i in seq_along(includes)) { code <- readLines(paste0(path, "/", files[i])) code <- paste0(code, collapse = "\n") pattern <- paste0(" *", escape_all(includes[i])) model <- sub(pattern, code, model) } model } # check if Stan code includes normalization constants is_normalized <- function(stancode) { !grepl("_lup(d|m)f\\(", stancode) } # Normalizes Stan code to avoid triggering refit after whitespace and # comment changes in the generated code. # In some distant future, StanC3 may provide its own normalizing functions, # until then this is a set of regex hacks. # @param x a string containing the Stan code normalize_stancode <- function(x) { x <- as_one_character(x) # Remove single-line comments x <- gsub("//[^\n\r]*[\n\r]", " ", x) x <- gsub("//[^\n\r]*$", " ", x) # Remove multi-line comments x <- gsub("/\\*([^*]*(\\*[^/])?)*\\*/", " ", x) # Standardize whitespace (including newlines) x <- gsub("[[:space:]]+"," ", x) trimws(x) } # check if the currently installed Stan version requires older syntax # than the Stan version with which the model was initially fitted require_old_stan_syntax <- function(object, backend, version) { stopifnot(is.brmsfit(object)) isTRUE( (object$backend == "rstan" && object$version$rstan >= version || object$backend == "cmdstanr" && object$version$cmdstan >= version) && (backend == "rstan" && utils::packageVersion("rstan") < version || backend == "cmdstanr" && cmdstanr::cmdstan_version() < version) ) } brms/R/families.R0000644000176200001440000017414614453716004013353 0ustar liggesusers#' Special Family Functions for \pkg{brms} Models #' #' Family objects provide a convenient way to specify the details of the models #' used by many model fitting functions. The family functions presented here are #' for use with \pkg{brms} only and will **not** work with other model #' fitting functions such as \code{glm} or \code{glmer}. #' However, the standard family functions as described in #' \code{\link[stats:family]{family}} will work with \pkg{brms}. #' You can also specify custom families for use in \pkg{brms} with #' the \code{\link{custom_family}} function. #' #' @param family A character string naming the distribution family of the response #' variable to be used in the model. Currently, the following families are #' supported: \code{gaussian}, \code{student}, \code{binomial}, #' \code{bernoulli}, \code{beta-binomial}, \code{poisson}, \code{negbinomial}, #' \code{geometric}, \code{Gamma}, \code{skew_normal}, \code{lognormal}, #' \code{shifted_lognormal}, \code{exgaussian}, \code{wiener}, #' \code{inverse.gaussian}, \code{exponential}, \code{weibull}, #' \code{frechet}, \code{Beta}, \code{dirichlet}, \code{von_mises}, #' \code{asym_laplace}, \code{gen_extreme_value}, \code{categorical}, #' \code{multinomial}, \code{cumulative}, \code{cratio}, \code{sratio}, #' \code{acat}, \code{hurdle_poisson}, \code{hurdle_negbinomial}, #' \code{hurdle_gamma}, \code{hurdle_lognormal}, \code{hurdle_cumulative}, #' \code{zero_inflated_binomial}, \code{zero_inflated_beta_binomial}, #' \code{zero_inflated_beta}, \code{zero_inflated_negbinomial}, #' \code{zero_inflated_poisson}, and \code{zero_one_inflated_beta}. #' @param link A specification for the model link function. This can be a #' name/expression or character string. See the 'Details' section for more #' information on link functions supported by each family. #' @param link_sigma Link of auxiliary parameter \code{sigma} if being predicted. #' @param link_shape Link of auxiliary parameter \code{shape} if being predicted. #' @param link_nu Link of auxiliary parameter \code{nu} if being predicted. #' @param link_phi Link of auxiliary parameter \code{phi} if being predicted. #' @param link_kappa Link of auxiliary parameter \code{kappa} if being predicted. #' @param link_beta Link of auxiliary parameter \code{beta} if being predicted. #' @param link_zi Link of auxiliary parameter \code{zi} if being predicted. #' @param link_hu Link of auxiliary parameter \code{hu} if being predicted. #' @param link_zoi Link of auxiliary parameter \code{zoi} if being predicted. #' @param link_coi Link of auxiliary parameter \code{coi} if being predicted. #' @param link_disc Link of auxiliary parameter \code{disc} if being predicted. #' @param link_bs Link of auxiliary parameter \code{bs} if being predicted. #' @param link_ndt Link of auxiliary parameter \code{ndt} if being predicted. #' @param link_bias Link of auxiliary parameter \code{bias} if being predicted. #' @param link_alpha Link of auxiliary parameter \code{alpha} if being predicted. #' @param link_quantile Link of auxiliary parameter \code{quantile} if being predicted. #' @param link_xi Link of auxiliary parameter \code{xi} if being predicted. #' @param threshold A character string indicating the type #' of thresholds (i.e. intercepts) used in an ordinal model. #' \code{"flexible"} provides the standard unstructured thresholds, #' \code{"equidistant"} restricts the distance between #' consecutive thresholds to the same value, and #' \code{"sum_to_zero"} ensures the thresholds sum to zero. #' @param refcat Optional name of the reference response category used in #' \code{categorical}, \code{multinomial}, \code{dirichlet} and #' \code{logistic_normal} models. If \code{NULL} (the default), the first #' category is used as the reference. If \code{NA}, all categories will be #' predicted, which requires strong priors or carefully specified predictor #' terms in order to lead to an identified model. #' @param bhaz Currently for experimental purposes only. #' #' @details #' Below, we list common use cases for the different families. #' This list is not ment to be exhaustive. #' \itemize{ #' \item{Family \code{gaussian} can be used for linear regression.} #' #' \item{Family \code{student} can be used for robust linear regression #' that is less influenced by outliers.} #' #' \item{Family \code{skew_normal} can handle skewed responses in linear #' regression.} #' #' \item{Families \code{poisson}, \code{negbinomial}, and \code{geometric} #' can be used for regression of unbounded count data.} #' #' \item{Families \code{bernoulli}, \code{binomial}, and \code{beta_binomial} #' can be used for binary regression (i.e., most commonly logistic #' regression).} #' #' \item{Families \code{categorical} and \code{multinomial} can be used for #' multi-logistic regression when there are more than two possible outcomes.} #' #' \item{Families \code{cumulative}, \code{cratio} ('continuation ratio'), #' \code{sratio} ('stopping ratio'), and \code{acat} ('adjacent category') #' leads to ordinal regression.} #' #' \item{Families \code{Gamma}, \code{weibull}, \code{exponential}, #' \code{lognormal}, \code{frechet}, \code{inverse.gaussian}, and \code{cox} #' (Cox proportional hazards model) can be used (among others) for #' time-to-event regression also known as survival regression.} #' #' \item{Families \code{weibull}, \code{frechet}, and \code{gen_extreme_value} #' ('generalized extreme value') allow for modeling extremes.} #' #' \item{Families \code{beta}, \code{dirichlet}, and \code{logistic_normal} #' can be used to model responses representing rates or probabilities.} #' #' \item{Family \code{asym_laplace} allows for quantile regression when fixing #' the auxiliary \code{quantile} parameter to the quantile of interest.} #' #' \item{Family \code{exgaussian} ('exponentially modified Gaussian') and #' \code{shifted_lognormal} are especially suited to model reaction times.} #' #' \item{Family \code{wiener} provides an implementation of the Wiener #' diffusion model. For this family, the main formula predicts the drift #' parameter 'delta' and all other parameters are modeled as auxiliary parameters #' (see \code{\link{brmsformula}} for details).} #' #' \item{Families \code{hurdle_poisson}, \code{hurdle_negbinomial}, #' \code{hurdle_gamma}, \code{hurdle_lognormal}, \code{zero_inflated_poisson}, #' \code{zero_inflated_negbinomial}, \code{zero_inflated_binomial}, #' \code{zero_inflated_beta_binomial}, \code{zero_inflated_beta}, #' \code{zero_one_inflated_beta}, and \code{hurdle_cumulative} allow to estimate #' zero-inflated and hurdle models. These models can be very helpful when there #' are many zeros in the data (or ones in case of one-inflated models) #' that cannot be explained by the primary distribution of the response.} #' } #' #' Below, we list all possible links for each family. #' The first link mentioned for each family is the default. #' \itemize{ #' \item{Families \code{gaussian}, \code{student}, \code{skew_normal}, #' \code{exgaussian}, \code{asym_laplace}, and \code{gen_extreme_value} #' support the links (as names) \code{identity}, \code{log}, \code{inverse}, #' and \code{softplus}.} #' #' \item{Families \code{poisson}, \code{negbinomial}, \code{geometric}, #' \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, #' \code{hurdle_poisson}, and \code{hurdle_negbinomial} support #' \code{log}, \code{identity}, \code{sqrt}, and \code{softplus}.} #' #' \item{Families \code{binomial}, \code{bernoulli}, \code{beta_binomial}, #' \code{zero_inflated_binomial}, \code{zero_inflated_beta_binomial}, #' \code{Beta}, \code{zero_inflated_beta}, and \code{zero_one_inflated_beta} #' support \code{logit}, \code{probit}, \code{probit_approx}, \code{cloglog}, #' \code{cauchit}, \code{identity}, and \code{log}.} #' #' \item{Families \code{cumulative}, \code{cratio}, \code{sratio}, #' \code{acat}, and \code{hurdle_cumulative} support \code{logit}, #' \code{probit}, \code{probit_approx}, \code{cloglog}, and \code{cauchit}.} #' #' \item{Families \code{categorical}, \code{multinomial}, and \code{dirichlet} #' support \code{logit}.} #' #' \item{Families \code{Gamma}, \code{weibull}, \code{exponential}, #' \code{frechet}, and \code{hurdle_gamma} support #' \code{log}, \code{identity}, \code{inverse}, and \code{softplus}.} #' #' \item{Families \code{lognormal} and \code{hurdle_lognormal} #' support \code{identity} and \code{inverse}.} #' #' \item{Family \code{logistic_normal} supports \code{identity}.} #' #' \item{Family \code{inverse.gaussian} supports \code{1/mu^2}, #' \code{inverse}, \code{identity}, \code{log}, and \code{softplus}.} #' #' \item{Family \code{von_mises} supports \code{tan_half} and #' \code{identity}.} #' #' \item{Family \code{cox} supports \code{log}, \code{identity}, #' and \code{softplus} for the proportional hazards parameter.} #' #' \item{Family \code{wiener} supports \code{identity}, \code{log}, #' and \code{softplus} for the main parameter which represents the #' drift rate.} #' } #' #' Please note that when calling the \code{\link[stats:family]{Gamma}} family #' function of the \pkg{stats} package, the default link will be #' \code{inverse} instead of \code{log} although the latter is the default in #' \pkg{brms}. Also, when using the family functions \code{gaussian}, #' \code{binomial}, \code{poisson}, and \code{Gamma} of the \pkg{stats} #' package (see \code{\link[stats:family]{family}}), special link functions #' such as \code{softplus} or \code{cauchit} won't work. In this case, you #' have to use \code{brmsfamily} to specify the family with corresponding link #' function. #' #' @seealso \code{\link[brms:brm]{brm}}, #' \code{\link[stats:family]{family}}, #' \code{\link{customfamily}} #' #' @examples #' # create a family object #' (fam1 <- student("log")) #' # alternatively use the brmsfamily function #' (fam2 <- brmsfamily("student", "log")) #' # both leads to the same object #' identical(fam1, fam2) #' #' @export brmsfamily <- function(family, link = NULL, link_sigma = "log", link_shape = "log", link_nu = "logm1", link_phi = "log", link_kappa = "log", link_beta = "log", link_zi = "logit", link_hu = "logit", link_zoi = "logit", link_coi = "logit", link_disc = "log", link_bs = "log", link_ndt = "log", link_bias = "logit", link_xi = "log1p", link_alpha = "identity", link_quantile = "logit", threshold = "flexible", refcat = NULL, bhaz = NULL) { slink <- substitute(link) .brmsfamily( family, link = link, slink = slink, link_sigma = link_sigma, link_shape = link_shape, link_nu = link_nu, link_phi = link_phi, link_kappa = link_kappa, link_beta = link_beta, link_zi = link_zi, link_hu = link_hu, link_zoi = link_zoi, link_coi = link_coi, link_disc = link_disc, link_bs = link_bs, link_ndt = link_ndt, link_bias = link_bias, link_alpha = link_alpha, link_xi = link_xi, link_quantile = link_quantile, threshold = threshold, refcat = refcat, bhaz = bhaz ) } # helper function to prepare brmsfamily objects # @param family character string naming the model family # @param link character string naming the link function # @param slink can be used with substitute(link) for # non-standard evaluation of the link function # @param threshold threshold type for ordinal models # @param ... link functions (as character strings) of parameters # @return an object of 'brmsfamily' which inherits from 'family' .brmsfamily <- function(family, link = NULL, slink = link, threshold = "flexible", refcat = NULL, bhaz = NULL, ...) { family <- tolower(as_one_character(family)) aux_links <- list(...) pattern <- c("^normal$", "^zi_", "^hu_") replacement <- c("gaussian", "zero_inflated_", "hurdle_") family <- rename(family, pattern, replacement, fixed = FALSE) ok_families <- lsp("brms", pattern = "^\\.family_") ok_families <- sub("^\\.family_", "", ok_families) if (!family %in% ok_families) { stop2(family, " is not a supported family. Supported ", "families are:\n", collapse_comma(ok_families)) } family_info <- get(paste0(".family_", family))() ok_links <- family_info$links family_info$links <- NULL # non-standard evaluation of link if (!is.character(slink)) { slink <- deparse0(slink) } if (!slink %in% ok_links) { if (is.character(link)) { slink <- link } else if (!length(link) || identical(link, NA)) { slink <- NA } } if (length(slink) != 1L) { stop2("Argument 'link' must be of length 1.") } if (is.na(slink)) { slink <- ok_links[1] } if (!slink %in% ok_links) { stop2("'", slink, "' is not a supported link ", "for family '", family, "'.\nSupported links are: ", collapse_comma(ok_links)) } out <- list( family = family, link = slink, linkfun = function(mu) link(mu, link = slink), linkinv = function(eta) inv_link(eta, link = slink) ) out[names(family_info)] <- family_info class(out) <- c("brmsfamily", "family") all_valid_dpars <- c(valid_dpars(out), valid_dpars(out, type = "multi")) for (dp in all_valid_dpars) { alink <- as.character(aux_links[[paste0("link_", dp)]]) if (length(alink)) { alink <- as_one_character(alink) valid_links <- links_dpars(dp) if (!alink %in% valid_links) { stop2( "'", alink, "' is not a supported link ", "for parameter '", dp, "'.\nSupported links are: ", collapse_comma(valid_links) ) } out[[paste0("link_", dp)]] <- alink } } if (is_ordinal(out$family)) { # TODO: move specification of 'threshold' to the 'resp_thres' function? thres_options <- c("flexible", "equidistant", "sum_to_zero") out$threshold <- match.arg(threshold, thres_options) } if (conv_cats_dpars(out$family)) { if (!has_joint_link(out$family)) { out$refcat <- NA } else if (!is.null(refcat)) { allow_na_ref <- !is_logistic_normal(out$family) out$refcat <- as_one_character(refcat, allow_na = allow_na_ref) } } if (is_cox(out$family)) { if (!is.null(bhaz)) { if (!is.list(bhaz)) { stop2("'bhaz' should be a list.") } out$bhaz <- bhaz } else { out$bhaz <- list() } # set default arguments if (is.null(out$bhaz$df)) { out$bhaz$df <- 5L } if (is.null(out$bhaz$intercept)) { out$bhaz$intercept <- TRUE } } out } # checks and corrects validity of the model family # @param family Either a function, an object of class 'family' # or a character string of length one or two # @param link an optional character string naming the link function # ignored if family is a function or a family object # @param threshold optional character string specifying the threshold # type in ordinal models validate_family <- function(family, link = NULL, threshold = NULL) { if (is.function(family)) { family <- family() } if (!is(family, "brmsfamily")) { if (is.family(family)) { link <- family$link family <- family$family } if (is.character(family)) { if (is.null(link)) { link <- family[2] } family <- .brmsfamily(family[1], link = link) } else { stop2("Argument 'family' is invalid.") } } if (is_ordinal(family) && !is.null(threshold)) { # slot 'threshold' deprecated as of brms > 1.7.0 threshold <- match.arg(threshold, c("flexible", "equidistant")) family$threshold <- threshold } family } # extract special information of families # @param x object from which to extract # @param y name of the component to extract family_info <- function(x, y, ...) { UseMethod("family_info") } #' @export family_info.default <- function(x, y, ...) { x <- as.character(x) ulapply(x, .family_info, y = y, ...) } .family_info <- function(x, y, ...) { x <- as_one_character(x) y <- as_one_character(y) if (y == "family") { return(x) } if (!nzchar(x)) { return(NULL) } info <- get(paste0(".family_", x))() if (y == "link") { out <- info$links[1] # default link } else { info$links <- NULL out <- info[[y]] } out } family_info.NULL <- function(x, y, ...) { NULL } #' @export family_info.list <- function(x, y, ...) { ulapply(x, family_info, y = y, ...) } #' @export family_info.family <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.brmsfamily <- function(x, y, ...) { y <- as_one_character(y) out <- x[[y]] if (is.null(out)) { # required for models fitted with brms 2.2 or earlier out <- family_info(x$family, y = y, ...) } out } #' @export family_info.mixfamily <- function(x, y, ...) { out <- lapply(x$mix, family_info, y = y, ...) combine_family_info(out, y = y) } #' @export family_info.brmsformula <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.mvbrmsformula <- function(x, y, ...) { out <- lapply(x$forms, family_info, y = y, ...) combine_family_info(out, y = y) } #' @export family_info.brmsterms <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.mvbrmsterms <- function(x, y, ...) { out <- lapply(x$terms, family_info, y = y, ...) combine_family_info(out, y = y) } #' @export family_info.btl <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.btnl <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.brmsfit <- function(x, y, ...) { family_info(x$formula, y = y, ...) } # combine information from multiple families # provides special handling for certain elements combine_family_info <- function(x, y, ...) { y <- as_one_character(y) unite <- c( "dpars", "type", "specials", "include", "const", "cats", "ad", "normalized" ) if (y %in% c("family", "link")) { x <- unlist(x) } else if (y %in% unite) { x <- Reduce("union", x) } else if (y == "ybounds") { x <- do_call(rbind, x) x <- c(max(x[, 1]), min(x[, 2])) } else if (y == "closed") { # closed only if no bounds are open x <- do_call(rbind, x) clb <- !any(ulapply(x[, 1], isFALSE)) cub <- !any(ulapply(x[, 2], isFALSE)) x <- c(clb, cub) } else if (y == "thres") { # thresholds are the same across mixture components x <- x[[1]] } x } #' @rdname brmsfamily #' @export student <- function(link = "identity", link_sigma = "log", link_nu = "logm1") { slink <- substitute(link) .brmsfamily("student", link = link, slink = slink, link_sigma = link_sigma, link_nu = link_nu) } #' @rdname brmsfamily #' @export bernoulli <- function(link = "logit") { slink <- substitute(link) .brmsfamily("bernoulli", link = link, slink = slink) } #' @rdname brmsfamily #' @export beta_binomial <- function(link = "logit", link_phi = "log") { slink <- substitute(link) .brmsfamily("beta_binomial", link = link, slink = slink, link_phi = link_phi) } #' @rdname brmsfamily #' @export negbinomial <- function(link = "log", link_shape = "log") { slink <- substitute(link) .brmsfamily("negbinomial", link = link, slink = slink, link_shape = link_shape) } # not yet officially supported # @rdname brmsfamily # @export negbinomial2 <- function(link = "log", link_sigma = "log") { slink <- substitute(link) .brmsfamily("negbinomial2", link = link, slink = slink, link_sigma = link_sigma) } #' @rdname brmsfamily #' @export geometric <- function(link = "log") { slink <- substitute(link) .brmsfamily("geometric", link = link, slink = slink) } # do not export yet! # @rdname brmsfamily # @export discrete_weibull <- function(link = "logit", link_shape = "log") { slink <- substitute(link) .brmsfamily("discrete_weibull", link = link, slink = slink, link_shape = link_shape) } # do not export yet! # @rdname brmsfamily # @export com_poisson <- function(link = "log", link_shape = "log") { slink <- substitute(link) .brmsfamily("com_poisson", link = link, slink = slink, link_shape = link_shape) } #' @rdname brmsfamily #' @export lognormal <- function(link = "identity", link_sigma = "log") { slink <- substitute(link) .brmsfamily("lognormal", link = link, slink = slink, link_sigma = link_sigma) } #' @rdname brmsfamily #' @export shifted_lognormal <- function(link = "identity", link_sigma = "log", link_ndt = "log") { slink <- substitute(link) .brmsfamily("shifted_lognormal", link = link, slink = slink, link_sigma = link_sigma, link_ndt = link_ndt) } #' @rdname brmsfamily #' @export skew_normal <- function(link = "identity", link_sigma = "log", link_alpha = "identity") { slink <- substitute(link) .brmsfamily("skew_normal", link = link, slink = slink, link_sigma = link_sigma, link_alpha = link_alpha) } #' @rdname brmsfamily #' @export exponential <- function(link = "log") { slink <- substitute(link) .brmsfamily("exponential", link = link, slink = slink) } #' @rdname brmsfamily #' @export weibull <- function(link = "log", link_shape = "log") { slink <- substitute(link) .brmsfamily("weibull", link = link, slink = slink, link_shape = link_shape) } #' @rdname brmsfamily #' @export frechet <- function(link = "log", link_nu = "logm1") { slink <- substitute(link) .brmsfamily("frechet", link = link, slink = slink, link_nu = link_nu) } #' @rdname brmsfamily #' @export gen_extreme_value <- function(link = "identity", link_sigma = "log", link_xi = "log1p") { slink <- substitute(link) .brmsfamily("gen_extreme_value", link = link, slink = slink, link_sigma = link_sigma, link_xi = link_xi) } #' @rdname brmsfamily #' @export exgaussian <- function(link = "identity", link_sigma = "log", link_beta = "log") { slink <- substitute(link) .brmsfamily("exgaussian", link = link, slink = slink, link_sigma = link_sigma, link_beta = link_beta) } #' @rdname brmsfamily #' @export wiener <- function(link = "identity", link_bs = "log", link_ndt = "log", link_bias = "logit") { slink <- substitute(link) .brmsfamily("wiener", link = link, slink = slink, link_bs = link_bs, link_ndt = link_ndt, link_bias = link_bias) } #' @rdname brmsfamily #' @export Beta <- function(link = "logit", link_phi = "log") { slink <- substitute(link) .brmsfamily("beta", link = link, slink = slink, link_phi = link_phi) } #' @rdname brmsfamily #' @export dirichlet <- function(link = "logit", link_phi = "log", refcat = NULL) { slink <- substitute(link) .brmsfamily("dirichlet", link = link, slink = slink, link_phi = link_phi, refcat = refcat) } # not yet exported # @rdname brmsfamily # @export dirichlet2 <- function(link = "log") { slink <- substitute(link) .brmsfamily("dirichlet2", link = link, slink = slink, refcat = NA) } #' @rdname brmsfamily #' @export logistic_normal <- function(link = "identity", link_sigma = "log", refcat = NULL) { slink <- substitute(link) .brmsfamily("logistic_normal", link = link, slink = slink, link_sigma = link_sigma, refcat = refcat) } #' @rdname brmsfamily #' @export von_mises <- function(link = "tan_half", link_kappa = "log") { slink <- substitute(link) .brmsfamily("von_mises", link = link, slink = slink, link_kappa = link_kappa) } #' @rdname brmsfamily #' @export asym_laplace <- function(link = "identity", link_sigma = "log", link_quantile = "logit") { slink <- substitute(link) .brmsfamily("asym_laplace", link = link, slink = slink, link_sigma = link_sigma, link_quantile = link_quantile) } # do not export yet! # @rdname brmsfamily # @export zero_inflated_asym_laplace <- function(link = "identity", link_sigma = "log", link_quantile = "logit", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_asym_laplace", link = link, slink = slink, link_sigma = link_sigma, link_quantile = link_quantile, link_zi = link_zi) } #' @rdname brmsfamily #' @export cox <- function(link = "log", bhaz = NULL) { slink <- substitute(link) .brmsfamily("cox", link = link, bhaz = bhaz) } #' @rdname brmsfamily #' @export hurdle_poisson <- function(link = "log", link_hu = "logit") { slink <- substitute(link) .brmsfamily("hurdle_poisson", link = link, slink = slink, link_hu = link_hu) } #' @rdname brmsfamily #' @export hurdle_negbinomial <- function(link = "log", link_shape = "log", link_hu = "logit") { slink <- substitute(link) .brmsfamily("hurdle_negbinomial", link = link, slink = slink, link_shape = link_shape, link_hu = link_hu) } #' @rdname brmsfamily #' @export hurdle_gamma <- function(link = "log", link_shape = "log", link_hu = "logit") { slink <- substitute(link) .brmsfamily("hurdle_gamma", link = link, slink = slink, link_shape = link_shape, link_hu = link_hu) } #' @rdname brmsfamily #' @export hurdle_lognormal <- function(link = "identity", link_sigma = "log", link_hu = "logit") { slink <- substitute(link) .brmsfamily("hurdle_lognormal", link = link, slink = slink, link_sigma = link_sigma, link_hu = link_hu) } #' @rdname brmsfamily #' @export hurdle_cumulative <- function(link = "logit", link_hu = "logit", link_disc = "log", threshold = "flexible") { slink <- substitute(link) .brmsfamily("hurdle_cumulative", link = link, slink = slink, link_hu = link_hu, link_disc = link_disc, threshold = threshold) } #' @rdname brmsfamily #' @export zero_inflated_beta <- function(link = "logit", link_phi = "log", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_beta", link = link, slink = slink, link_phi = link_phi, link_zi = link_zi) } #' @rdname brmsfamily #' @export zero_one_inflated_beta <- function(link = "logit", link_phi = "log", link_zoi = "logit", link_coi = "logit") { slink <- substitute(link) .brmsfamily("zero_one_inflated_beta", link = link, slink = slink, link_phi = link_phi, link_zoi = link_zoi, link_coi = link_coi) } #' @rdname brmsfamily #' @export zero_inflated_poisson <- function(link = "log", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_poisson", link = link, slink = slink, link_zi = link_zi) } #' @rdname brmsfamily #' @export zero_inflated_negbinomial <- function(link = "log", link_shape = "log", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_negbinomial", link = link, slink = slink, link_shape = link_shape, link_zi = link_zi) } #' @rdname brmsfamily #' @export zero_inflated_binomial <- function(link = "logit", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_binomial", link = link, slink = slink, link_zi = link_zi) } #' @rdname brmsfamily #' @export zero_inflated_beta_binomial <- function(link = "logit", link_phi = "log", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_beta_binomial", link = link, slink = slink, link_phi = link_phi, link_zi = link_zi) } #' @rdname brmsfamily #' @export categorical <- function(link = "logit", refcat = NULL) { slink <- substitute(link) .brmsfamily("categorical", link = link, slink = slink, refcat = refcat) } #' @rdname brmsfamily #' @export multinomial <- function(link = "logit", refcat = NULL) { slink <- substitute(link) .brmsfamily("multinomial", link = link, slink = slink, refcat = refcat) } #' @rdname brmsfamily #' @export cumulative <- function(link = "logit", link_disc = "log", threshold = "flexible") { slink <- substitute(link) .brmsfamily("cumulative", link = link, slink = slink, link_disc = link_disc, threshold = threshold) } #' @rdname brmsfamily #' @export sratio <- function(link = "logit", link_disc = "log", threshold = "flexible") { slink <- substitute(link) .brmsfamily("sratio", link = link, slink = slink, link_disc = link_disc, threshold = threshold) } #' @rdname brmsfamily #' @export cratio <- function(link = "logit", link_disc = "log", threshold = "flexible") { slink <- substitute(link) .brmsfamily("cratio", link = link, slink = slink, link_disc = link_disc, threshold = threshold) } #' @rdname brmsfamily #' @export acat <- function(link = "logit", link_disc = "log", threshold = "flexible") { slink <- substitute(link) .brmsfamily("acat", link = link, slink = slink, link_disc = link_disc, threshold = threshold) } #' Finite Mixture Families in \pkg{brms} #' #' Set up a finite mixture family for use in \pkg{brms}. #' #' @param ... One or more objects providing a description of the #' response distributions to be combined in the mixture model. #' These can be family functions, calls to family functions or #' character strings naming the families. For details of supported #' families see \code{\link{brmsfamily}}. #' @param flist Optional list of objects, which are treated in the #' same way as objects passed via the \code{...} argument. #' @param nmix Optional numeric vector specifying the number of times #' each family is repeated. If specified, it must have the same length #' as the number of families passed via \code{...} and \code{flist}. #' @param order Ordering constraint to identify mixture components. #' If \code{'mu'} or \code{TRUE}, population-level intercepts #' of the mean parameters are ordered in non-ordinal models #' and fixed to the same value in ordinal models (see details). #' If \code{'none'} or \code{FALSE}, no ordering constraint is applied. #' If \code{NULL} (the default), \code{order} is set to \code{'mu'} #' if all families are the same and \code{'none'} otherwise. #' Other ordering constraints may be implemented in the future. #' #' @return An object of class \code{mixfamily}. #' #' @details #' #' Most families supported by \pkg{brms} can be used to form mixtures. The #' response variable has to be valid for all components of the mixture family. #' Currently, the number of mixture components has to be specified by the user. #' It is not yet possible to estimate the number of mixture components from the #' data. #' #' Ordering intercepts in mixtures of ordinal families is not possible as each #' family has itself a set of vector of intercepts (i.e. ordinal thresholds). #' Instead, \pkg{brms} will fix the vector of intercepts across components in #' ordinal mixtures, if desired, so that users can try to identify the mixture #' model via selective inclusion of predictors. #' #' For most mixture models, you may want to specify priors on the #' population-level intercepts via \code{\link{set_prior}} to improve #' convergence. In addition, it is sometimes necessary to set \code{init = 0} #' in the call to \code{\link{brm}} to allow chains to initialize properly. #' #' For more details on the specification of mixture #' models, see \code{\link{brmsformula}}. #' #' @examples #' \dontrun{ #' ## simulate some data #' set.seed(1234) #' dat <- data.frame( #' y = c(rnorm(200), rnorm(100, 6)), #' x = rnorm(300), #' z = sample(0:1, 300, TRUE) #' ) #' #' ## fit a simple normal mixture model #' mix <- mixture(gaussian, gaussian) #' prior <- c( #' prior(normal(0, 7), Intercept, dpar = mu1), #' prior(normal(5, 7), Intercept, dpar = mu2) #' ) #' fit1 <- brm(bf(y ~ x + z), dat, family = mix, #' prior = prior, chains = 2) #' summary(fit1) #' pp_check(fit1) #' #' ## use different predictors for the components #' fit2 <- brm(bf(y ~ 1, mu1 ~ x, mu2 ~ z), dat, family = mix, #' prior = prior, chains = 2) #' summary(fit2) #' #' ## fix the mixing proportions #' fit3 <- brm(bf(y ~ x + z, theta1 = 1, theta2 = 2), #' dat, family = mix, prior = prior, #' init = 0, chains = 2) #' summary(fit3) #' pp_check(fit3) #' #' ## predict the mixing proportions #' fit4 <- brm(bf(y ~ x + z, theta2 ~ x), #' dat, family = mix, prior = prior, #' init = 0, chains = 2) #' summary(fit4) #' pp_check(fit4) #' #' ## compare model fit #' LOO(fit1, fit2, fit3, fit4) #' } #' #' @export mixture <- function(..., flist = NULL, nmix = 1, order = NULL) { dots <- c(list(...), flist) if (length(nmix) == 1L) { nmix <- rep(nmix, length(dots)) } if (length(dots) != length(nmix)) { stop2("The length of 'nmix' should be the same ", "as the number of mixture components.") } dots <- dots[rep(seq_along(dots), nmix)] family <- list( family = "mixture", link = "identity", mix = lapply(dots, validate_family) ) class(family) <- c("mixfamily", "brmsfamily", "family") # validity checks if (length(family$mix) < 2L) { stop2("Expecting at least 2 mixture components.") } if (use_real(family) && use_int(family)) { stop2("Cannot mix families with real and integer support.") } is_ordinal <- ulapply(family$mix, is_ordinal) if (any(is_ordinal) && any(!is_ordinal)) { stop2("Cannot mix ordinal and non-ordinal families.") } no_mixture <- ulapply(family$mix, no_mixture) if (any(no_mixture)) { stop2("Some of the families are not allowed in mixture models.") } for (fam in family$mix) { if (is.customfamily(fam) && "theta" %in% fam$dpars) { stop2("Parameter name 'theta' is reserved in mixture models.") } } if (is.null(order)) { if (any(is_ordinal)) { family$order <- "none" message("Setting order = 'none' for mixtures of ordinal families.") } else if (length(unique(family_names(family))) == 1L) { family$order <- "mu" message("Setting order = 'mu' for mixtures of the same family.") } else { family$order <- "none" message("Setting order = 'none' for mixtures of different families.") } } else { if (length(order) != 1L) { stop2("Argument 'order' must be of length 1.") } if (is.character(order)) { valid_order <- c("none", "mu") if (!order %in% valid_order) { stop2("Argument 'order' is invalid. Valid options are: ", collapse_comma(valid_order)) } family$order <- order } else { family$order <- ifelse(as.logical(order), "mu", "none") } } family } #' Custom Families in \pkg{brms} Models #' #' Define custom families (i.e. response distribution) for use in #' \pkg{brms} models. It allows users to benefit from the modeling #' flexibility of \pkg{brms}, while applying their self-defined likelihood #' functions. All of the post-processing methods for \code{brmsfit} #' objects can be made compatible with custom families. #' See \code{vignette("brms_customfamilies")} for more details. #' For a list of built-in families see \code{\link{brmsfamily}}. #' #' @aliases customfamily #' #' @param name Name of the custom family. #' @param dpars Names of the distributional parameters of #' the family. One parameter must be named \code{"mu"} and #' the main formula of the model will correspond to that #' parameter. #' @param links Names of the link functions of the #' distributional parameters. #' @param type Indicates if the response distribution is #' continuous (\code{"real"}) or discrete (\code{"int"}). This controls #' if the corresponding density function will be named with #' \code{_lpdf} or \code{_lpmf}. #' @param lb Vector of lower bounds of the distributional #' parameters. Defaults to \code{NA} that is no lower bound. #' @param ub Vector of upper bounds of the distributional #' parameters. Defaults to \code{NA} that is no upper bound. #' @param vars Names of variables that are part of the likelihood function #' without being distributional parameters. That is, \code{vars} can be used #' to pass data to the likelihood. Such arguments will be added to the list of #' function arguments at the end, after the distributional parameters. See #' \code{\link{stanvar}} for details about adding self-defined data to the #' generated \pkg{Stan} model. Addition arguments \code{vreal} and \code{vint} #' may be used for this purpose as well (see Examples below). See also #' \code{\link{brmsformula}} and \code{\link{addition-terms}} for more #' details. #' @param loop Logical; Should the likelihood be evaluated via a loop #' (\code{TRUE}; the default) over observations in Stan? #' If \code{FALSE}, the Stan code will be written in a vectorized #' manner over observations if possible. #' @param specials A character vector of special options to enable #' for this custom family. Currently for internal use only. #' @param threshold Optional threshold type for custom ordinal families. #' Ignored for non-ordinal families. #' @param log_lik Optional function to compute log-likelihood values of #' the model in \R. This is only relevant if one wants to ensure #' compatibility with method \code{\link[brms:log_lik.brmsfit]{log_lik}}. #' @param posterior_predict Optional function to compute posterior prediction of #' the model in \R. This is only relevant if one wants to ensure compatibility #' with method \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}}. #' @param posterior_epred Optional function to compute expected values of the #' posterior predictive distribution of the model in \R. This is only relevant #' if one wants to ensure compatibility with method #' \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}. #' @param predict Deprecated alias of `posterior_predict`. #' @param fitted Deprecated alias of `posterior_epred`. #' @param env An \code{\link{environment}} in which certain post-processing #' functions related to the custom family can be found, if there were not #' directly passed to \code{custom_family}. This is only #' relevant if one wants to ensure compatibility with the methods #' \code{\link[brms:log_lik.brmsfit]{log_lik}}, #' \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}}, or #' \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}. #' By default, \code{env} is the environment from which #' \code{custom_family} is called. #' #' @details The corresponding probability density or mass \code{Stan} #' functions need to have the same name as the custom family. #' That is if a family is called \code{myfamily}, then the #' \pkg{Stan} functions should be called \code{myfamily_lpdf} or #' \code{myfamily_lpmf} depending on whether it defines a #' continuous or discrete distribution. #' #' @return An object of class \code{customfamily} inheriting #' from class \code{\link{brmsfamily}}. #' #' @seealso \code{\link{brmsfamily}}, \code{\link{brmsformula}}, #' \code{\link{stanvar}} #' #' @examples #' \dontrun{ #' ## demonstrate how to fit a beta-binomial model #' ## generate some fake data #' phi <- 0.7 #' n <- 300 #' z <- rnorm(n, sd = 0.2) #' ntrials <- sample(1:10, n, replace = TRUE) #' eta <- 1 + z #' mu <- exp(eta) / (1 + exp(eta)) #' a <- mu * phi #' b <- (1 - mu) * phi #' p <- rbeta(n, a, b) #' y <- rbinom(n, ntrials, p) #' dat <- data.frame(y, z, ntrials) #' #' # define a custom family #' beta_binomial2 <- custom_family( #' "beta_binomial2", dpars = c("mu", "phi"), #' links = c("logit", "log"), lb = c(NA, 0), #' type = "int", vars = "vint1[n]" #' ) #' #' # define the corresponding Stan density function #' stan_density <- " #' real beta_binomial2_lpmf(int y, real mu, real phi, int N) { #' return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); #' } #' " #' stanvars <- stanvar(scode = stan_density, block = "functions") #' #' # fit the model #' fit <- brm(y | vint(ntrials) ~ z, data = dat, #' family = beta_binomial2, stanvars = stanvars) #' summary(fit) #' #' #' # define a *vectorized* custom family (no loop over observations) #' # notice also that 'vint' no longer has an observation index #' beta_binomial2_vec <- custom_family( #' "beta_binomial2", dpars = c("mu", "phi"), #' links = c("logit", "log"), lb = c(NA, 0), #' type = "int", vars = "vint1", loop = FALSE #' ) #' #' # define the corresponding Stan density function #' stan_density_vec <- " #' real beta_binomial2_lpmf(int[] y, vector mu, real phi, int[] N) { #' return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); #' } #' " #' stanvars_vec <- stanvar(scode = stan_density_vec, block = "functions") #' #' # fit the model #' fit_vec <- brm(y | vint(ntrials) ~ z, data = dat, #' family = beta_binomial2_vec, #' stanvars = stanvars_vec) #' summary(fit_vec) #' } #' #' @export custom_family <- function(name, dpars = "mu", links = "identity", type = c("real", "int"), lb = NA, ub = NA, vars = NULL, loop = TRUE, specials = NULL, threshold = "flexible", log_lik = NULL, posterior_predict = NULL, posterior_epred = NULL, predict = NULL, fitted = NULL, env = parent.frame()) { name <- as_one_character(name) dpars <- as.character(dpars) links <- as.character(links) type <- match.arg(type) lb <- as.character(lb) ub <- as.character(ub) vars <- as.character(vars) loop <- as_one_logical(loop) specials <- as.character(specials) env <- as.environment(env) posterior_predict <- use_alias(posterior_predict, predict) posterior_epred <- use_alias(posterior_epred, fitted) if (any(duplicated(dpars))) { stop2("Duplicated 'dpars' are not allowed.") } if (!"mu" %in% dpars) { stop2("All families must have a 'mu' parameter.") } if (any(grepl("_|\\.", dpars))) { stop2("Dots or underscores are not allowed in 'dpars'.") } if (any(grepl("[[:digit:]]+$", dpars))) { stop2("'dpars' should not end with a number.") } for (arg in c("links", "lb", "ub")) { obj <- get(arg) if (length(obj) == 1L) { obj <- rep(obj, length(dpars)) assign(arg, obj) } if (length(dpars) != length(obj)) { stop2("'", arg, "' must be of the same length as 'dpars'.") } } if (!is.null(log_lik)) { log_lik <- as.function(log_lik) args <- names(formals(log_lik)) if (!is_equal(args[1:2], c("i", "prep"))) { stop2("The first two arguments of 'log_lik' ", "should be 'i' and 'prep'.") } } if (!is.null(posterior_predict)) { posterior_predict <- as.function(posterior_predict) args <- names(formals(posterior_predict)) if (!is_equal(args[1:3], c("i", "prep", "..."))) { stop2("The first three arguments of 'posterior_predict' ", "should be 'i', 'prep', and '...'.") } } if (!is.null(posterior_epred)) { posterior_epred <- as.function(posterior_epred) args <- names(formals(posterior_epred)) if (!is_equal(args[1], "prep")) { stop2("The first argument of 'posterior_epred' should be 'prep'.") } } lb <- named_list(dpars, lb) ub <- named_list(dpars, ub) is_mu <- "mu" == dpars link <- links[is_mu] normalized <- "" out <- nlist( family = "custom", link, name, dpars, lb, ub, type, vars, loop, specials, log_lik, posterior_predict, posterior_epred, env, normalized ) if (length(dpars) > 1L) { out[paste0("link_", dpars[!is_mu])] <- links[!is_mu] } class(out) <- c("customfamily", "brmsfamily", "family") if (is_ordinal(out)) { threshold <- match.arg(threshold) out$threshold <- threshold } out } # get post-processing methods for custom families custom_family_method <- function(family, name) { if (!is.customfamily(family)) { return(NULL) } out <- family[[name]] if (!is.function(out)) { out <- paste0(name, "_", family$name) out <- get(out, family$env) } out } # get valid distributional parameters for a family valid_dpars <- function(family, ...) { UseMethod("valid_dpars") } #' @export valid_dpars.default <- function(family, type = NULL, ...) { if (!length(family)) { if (is.null(type)) { return("mu") } else { return(NULL) } } family <- validate_family(family) info <- paste0(usc(type, "suffix"), "dpars") family_info(family, info, ...) } #' @export valid_dpars.mixfamily <- function(family, type = NULL, ...) { out <- lapply(family$mix, valid_dpars, type = type, ...) for (i in seq_along(out)) { if (length(out[[i]])) { out[[i]] <- paste0(out[[i]], i) } } out <- unlist(out) if (is.null(type)) { c(out) <- paste0("theta", seq_along(family$mix)) } out } #' @export valid_dpars.brmsformula <- function(family, ...) { valid_dpars(family$family, ...) } #' @export valid_dpars.mvbrmsformula <- function(family, ...) { ulapply(family$forms, valid_dpars, ...) } #' @export valid_dpars.brmsterms <- function(family, ...) { valid_dpars(family$family, ...) } #' @export valid_dpars.mvbrmsterms <- function(family, ...) { ulapply(family$terms, valid_dpars, ...) } #' @export valid_dpars.brmsfit <- function(family, ...) { valid_dpars(family$formula, ...) } # class of a distributional parameter dpar_class <- function(dpar, family = NULL) { out <- sub("[[:digit:]]*$", "", dpar) if (!is.null(family)) { # TODO: avoid these special cases by changing naming conventions # perhaps add a protected "C" before category names # and a protected "M" for mixture components if (conv_cats_dpars(family)) { # categorical-like models have non-integer suffixes # that will not be caught by the standard procedure multi_dpars <- valid_dpars(family, type = "multi") for (dp in multi_dpars) { sel <- grepl(paste0("^", dp), out) out[sel] <- dp } } } out } # id of a distributional parameter dpar_id <- function(dpar) { out <- get_matches("[[:digit:]]+$", dpar, simplify = FALSE) ulapply(out, function(x) ifelse(length(x), x, "")) } # link functions for distributional parameters links_dpars <- function(dpar) { if (!length(dpar)) dpar <- "" switch(dpar, character(0), mu = "identity", # not actually used sigma = c("log", "identity", "softplus", "squareplus"), shape = c("log", "identity", "softplus", "squareplus"), nu = c("logm1", "identity"), phi = c("log", "identity", "softplus", "squareplus"), kappa = c("log", "identity", "softplus", "squareplus"), beta = c("log", "identity", "softplus", "squareplus"), zi = c("logit", "identity"), hu = c("logit", "identity"), zoi = c("logit", "identity"), coi = c("logit", "identity"), disc = c("log", "identity", "softplus", "squareplus"), bs = c("log", "identity", "softplus", "squareplus"), ndt = c("log", "identity", "softplus", "squareplus"), bias = c("logit", "identity"), quantile = c("logit", "identity"), xi = c("log1p", "identity"), alpha = c("identity", "log", "softplus", "squareplus"), theta = c("identity") ) } # is a distributional parameter a mixture proportion? is_mix_proportion <- function(dpar, family) { dpar_class <- dpar_class(dpar, family) dpar_class %in% "theta" & is.mixfamily(family) } # generate a family object of a distributional parameter dpar_family <- function(family, dpar, ...) { UseMethod("dpar_family") } #' @export dpar_family.default <- function(family, dpar, ...) { dp_class <- dpar_class(dpar, family) if (dp_class == "mu") { if (conv_cats_dpars(family)) { link <- NULL if (!has_joint_link(family)) { link <- family$link } # joint links are applied directly in the likelihood function # so link is treated as 'identity' out <- .dpar_family(dpar, link) } else { # standard single mu parameters just store the original family out <- family } } else { # link_ is always defined for non-mu parameters link <- family[[paste0("link_", dp_class)]] out <- .dpar_family(dpar, link) } out } #' @export dpar_family.mixfamily <- function(family, dpar, ...) { dp_id <- as.numeric(dpar_id(dpar)) if (!(length(dp_id) == 1L && is.numeric(dp_id))) { stop2("Parameter '", dpar, "' is not a valid mixture parameter.") } out <- dpar_family(family$mix[[dp_id]], dpar, ...) out$order <- family$order out } # set up special family objects for distributional parameters # @param dpar name of the distributional parameter # @param link optional link function of the parameter .dpar_family <- function(dpar = NULL, link = NULL) { links <- links_dpars(dpar_class(dpar)) if (!length(link)) { if (!length(links)) { link <- "identity" } else { link <- links[1] } } link <- as_one_character(link) structure( nlist(family = "", link, dpar), class = c("brmsfamily", "family") ) } #' @export print.brmsfamily <- function(x, links = FALSE, newline = TRUE, ...) { cat("\nFamily:", x$family, "\n") cat("Link function:", x$link, "\n") if (!is.null(x$threshold)) { cat("Threshold:", x$threshold, "\n") } if (isTRUE(links) || is.character(links)) { dp_links <- x[grepl("^link_", names(x))] names(dp_links) <- sub("^link_", "", names(dp_links)) if (is.character(links)) { dp_links <- rmNULL(dp_links[links]) } for (dp in names(dp_links)) { cat(paste0( "Link function of '", dp, "' (if predicted): ", dp_links[[dp]], "\n" )) } } if (newline) { cat("\n") } invisible(x) } #' @export print.mixfamily <- function(x, newline = TRUE, ...) { cat("\nMixture\n") for (i in seq_along(x$mix)) { print(x$mix[[i]], newline = FALSE, ...) } if (newline) { cat("\n") } invisible(x) } #' @export print.customfamily <- function(x, links = FALSE, newline = TRUE, ...) { cat("\nCustom family:", x$name, "\n") cat("Link function:", x$link, "\n") cat("Parameters:", paste0(x$dpars, collapse = ", "), "\n") if (isTRUE(links) || is.character(links)) { dp_links <- x[grepl("^link_", names(x))] names(dp_links) <- sub("^link_", "", names(dp_links)) if (is.character(links)) { dp_links <- rmNULL(dp_links[links]) } for (dp in names(dp_links)) { cat(paste0( "Link function of '", dp, "' (if predicted): ", dp_links[[dp]], "\n" )) } } if (newline) { cat("\n") } invisible(x) } #' @method summary family #' @export summary.family <- function(object, link = TRUE, ...) { out <- object$family if (link) { out <- paste0(out, "(", object$link, ")") } out } #' @method summary mixfamily #' @export summary.mixfamily <- function(object, link = FALSE, ...) { families <- ulapply(object$mix, summary, link = link, ...) paste0("mixture(", paste0(families, collapse = ", "), ")") } #' @method summary customfamily #' @export summary.customfamily <- function(object, link = TRUE, ...) { object$family <- object$name summary.family(object, link = link, ...) } summarise_families <- function(x) { # summary of families used in summary.brmsfit UseMethod("summarise_families") } #' @export summarise_families.mvbrmsformula <- function(x, ...) { out <- ulapply(x$forms, summarise_families, ...) paste0("MV(", paste0(out, collapse = ", "), ")") } #' @export summarise_families.brmsformula <- function(x, ...) { summary(x$family, link = FALSE, ...) } summarise_links <- function(x, ...) { # summary of link functions used in summary.brmsfit UseMethod("summarise_links") } #' @export summarise_links.mvbrmsformula <- function(x, wsp = 0, ...) { str_wsp <- collapse(rep(" ", wsp)) links <- ulapply(x$forms, summarise_links, mv = TRUE, ...) paste0(links, collapse = paste0("\n", str_wsp)) } #' @export summarise_links.brmsformula <- function(x, mv = FALSE, ...) { x <- brmsterms(x) dpars <- valid_dpars(x) links <- setNames(rep("identity", length(dpars)), dpars) links_pred <- ulapply(x$dpars, function(x) x$family$link) links[names(links_pred)] <- links_pred if (conv_cats_dpars(x)) { links[grepl("^mu", names(links))] <- x$family$link } resp <- if (mv) usc(combine_prefix(x)) names(links) <- paste0(names(links), resp) paste0(names(links), " = ", links, collapse = "; ") } is.family <- function(x) { inherits(x, "family") } is.brmsfamily <- function(x) { inherits(x, "brmsfamily") } is.mixfamily <- function(x) { inherits(x, "mixfamily") } is.customfamily <- function(x) { inherits(x, "customfamily") } family_names <- function(x) { family_info(x, "family") } # indicate if family uses real responses use_real <- function(family) { "real" %in% family_info(family, "type") } # indicate if family uses integer responses use_int <- function(family) { "int" %in% family_info(family, "type") } is_binary <- function(family) { "binary" %in% family_info(family, "specials") } is_categorical <- function(family) { "categorical" %in% family_info(family, "specials") } is_ordinal <- function(family) { "ordinal" %in% family_info(family, "specials") } is_multinomial <- function(family) { "multinomial" %in% family_info(family, "specials") } is_logistic_normal <- function(family) { "logistic_normal" %in% family_info(family, "specials") } is_simplex <- function(family) { "simplex" %in% family_info(family, "specials") } is_polytomous <- function(family) { is_categorical(family) || is_ordinal(family) || is_multinomial(family) || is_simplex(family) } is_cox <- function(family) { "cox" %in% family_info(family, "specials") } # has joint link function over multiple inputs has_joint_link <- function(family) { "joint_link" %in% family_info(family, "specials") } allow_factors <- function(family) { specials <- c("binary", "categorical", "ordinal") any(specials %in% family_info(family, "specials")) } # check if the family has natural residuals has_natural_residuals <- function(family) { "residuals" %in% family_info(family, "specials") } # check if the family allows for residual correlations has_rescor <- function(family) { "rescor" %in% family_info(family, "specials") } # check if category specific effects are allowed allow_cs <- function(family) { any(c("cs", "ocs") %in% family_info(family, "specials")) } # check if category specific effects should be ordered needs_ordered_cs <- function(family) { "ocs" %in% family_info(family, "specials") } # choose dpar names based on categories? conv_cats_dpars <- function(family) { is_categorical(family) || is_multinomial(family) || is_simplex(family) } # check if mixtures of the given families are allowed no_mixture <- function(family) { is_categorical(family) || is_multinomial(family) || is_simplex(family) } # indicate if the response should consist of multiple columns has_multicol <- function(family) { is_multinomial(family) || is_simplex(family) } # indicate if the response is modeled on the log-scale # even if formally the link function is not 'log' has_logscale <- function(family) { "logscale" %in% family_info(family, "specials") } # indicate if family makes use of argument trials has_trials <- function(family) { "trials" %in% family_info(family, "ad") && !"custom" %in% family_names(family) } # indicate if family has more than two response categories has_cat <- function(family) { is_categorical(family) || is_multinomial(family) || is_simplex(family) } # indicate if family has thresholds has_thres <- function(family) { is_ordinal(family) } # indicate if family has equidistant thresholds has_equidistant_thres <- function(family) { "equidistant" %in% family_info(family, "threshold") } # indicate if family has sum-to-zero thresholds has_sum_to_zero_thres <- function(family) { "sum_to_zero" %in% family_info(family, "threshold") } # indicate if family has ordered thresholds has_ordered_thres <- function(family) { "ordered_thres" %in% family_info(family, "specials") } # compute threshold - eta in the likelihood has_thres_minus_eta <- function(family) { "thres_minus_eta" %in% family_info(family, "specials") } # compute eta - threshold in the likelihood has_eta_minus_thres <- function(family) { "eta_minus_thres" %in% family_info(family, "specials") } # has an extra category that is not part of the ordinal scale (#1429) has_extra_cat <- function(family) { "extra_cat" %in% family_info(family, "specials") } # get names of response categories get_cats <- function(family) { family_info(family, "cats") } # get reference category categorical-like models get_refcat <- function(family, int = FALSE) { refcat <- family_info(family, "refcat") if (int) { cats <- family_info(family, "cats") refcat <- match(refcat, cats) } refcat } # get names of predicted categories categorical-like models get_predcats <- function(family) { refcat <- family_info(family, "refcat") cats <- family_info(family, "cats") setdiff(cats, refcat) } # get names of ordinal thresholds for prior specification # @param group name of a group for which to extract categories get_thres <- function(family, group = "") { group <- as_one_character(group) thres <- family_info(family, "thres") subset2(thres, group = group)$thres } # get group names of ordinal thresholds get_thres_groups <- function(family) { thres <- family_info(family, "thres") unique(thres$group) } # has the model group specific thresholds? has_thres_groups <- function(family) { groups <- get_thres_groups(family) any(nzchar(groups)) } has_ndt <- function(family) { "ndt" %in% dpar_class(family_info(family, "dpars")) } has_sigma <- function(family) { "sigma" %in% dpar_class(family_info(family, "dpars")) } # check if sigma should be explicitely set to 0 no_sigma <- function(bterms) { stopifnot(is.brmsterms(bterms)) if (is.formula(bterms$adforms$se)) { se <- eval_rhs(bterms$adforms$se) se_only <- isFALSE(se$flags$sigma) if (se_only && use_ac_cov_time(bterms)) { stop2("Please set argument 'sigma' of function 'se' ", "to TRUE when modeling time-series covariance matrices.") } } else { se_only <- FALSE } se_only } # has the model a non-predicted but estimated sigma parameter? simple_sigma <- function(bterms) { stopifnot(is.brmsterms(bterms)) has_sigma(bterms) && !no_sigma(bterms) && !pred_sigma(bterms) } # has the model a predicted sigma parameter? pred_sigma <- function(bterms) { stopifnot(is.brmsterms(bterms)) "sigma" %in% dpar_class(names(bterms$dpars)) } # do not include a 'nu' parameter in a univariate model? no_nu <- function(bterms) { # the multi_student_t family only has a single 'nu' parameter isTRUE(bterms$rescor) && "student" %in% family_names(bterms) } # does the family-link combination have a built-in Stan function? has_built_in_fun <- function(family, link = NULL, dpar = NULL, cdf = FALSE) { link <- link %||% family$link glm_special <- paste0("sbi", usc(dpar), "_", link, str_if(cdf, "_cdf")) all(glm_special %in% family_info(family, "specials")) } # suffixes of Stan lpdfs or lpmfs for which only a normalized version exists always_normalized <- function(family) { family_info(family, "normalized") } # prepare for calling family specific post-processing functions prepare_family <- function(x) { stopifnot(is.brmsformula(x) || is.brmsterms(x)) family <- x$family acef <- tidy_acef(x) if (use_ac_cov_time(acef) && has_natural_residuals(x)) { family$fun <- paste0(family$family, "_time") } else if (has_ac_class(acef, "sar")) { acef_sar <- subset2(acef, class = "sar") if (has_ac_subset(acef_sar, type = "lag")) { family$fun <- paste0(family$family, "_lagsar") } else if (has_ac_subset(acef_sar, type = "error")) { family$fun <- paste0(family$family, "_errorsar") } } else if (has_ac_class(acef, "fcor")) { family$fun <- paste0(family$family, "_fcor") } else { family$fun <- family$family } family } # order intercepts to help identifying mixture components? # does not work in ordinal models as they have vectors of intercepts order_intercepts <- function(bterms) { dpar <- dpar_class(bterms[["dpar"]]) if (!length(dpar)) dpar <- "mu" isTRUE(!is_ordinal(bterms) && dpar %in% bterms$family[["order"]]) } # fix intercepts to help identifying mixture components? # currently enabled only in ordinal models fix_intercepts <- function(bterms) { dpar <- dpar_class(bterms[["dpar"]]) if (!length(dpar)) dpar <- "mu" isTRUE(is_ordinal(bterms) && dpar %in% bterms$family[["order"]]) } # does the mixture have a joint parameter vector 'theta' has_joint_theta <- function(bterms) { stopifnot(is.brmsterms(bterms)) is.mixfamily(bterms$family) && !"theta" %in% dpar_class(names(c(bterms$dpars, bterms$fdpars))) } # extract family boundaries family_bounds <- function(x, ...) { UseMethod("family_bounds") } # @return a named list with one element per response variable #' @export family_bounds.mvbrmsterms <- function(x, ...) { lapply(x$terms, family_bounds, ...) } # bounds of likelihood families # @return a list with elements 'lb' and 'ub' #' @export family_bounds.brmsterms <- function(x, ...) { family <- x$family$family if (is.null(family)) { return(list(lb = -Inf, ub = Inf)) } resp <- usc(x$resp) # TODO: define in family-lists.R pos_families <- c( "poisson", "negbinomial", "negbinomial2", "geometric", "gamma", "weibull", "exponential", "lognormal", "frechet", "inverse.gaussian", "hurdle_poisson", "hurdle_negbinomial", "hurdle_gamma", "hurdle_lognormal", "zero_inflated_poisson", "zero_inflated_negbinomial" ) beta_families <- c("beta", "zero_inflated_beta", "zero_one_inflated_beta") ordinal_families <- c("cumulative", "cratio", "sratio", "acat") if (family %in% pos_families) { out <- list(lb = 0, ub = Inf) } else if (family %in% c("bernoulli", beta_families)) { out <- list(lb = 0, ub = 1) } else if (family %in% c("categorical", ordinal_families)) { out <- list(lb = 1, ub = paste0("ncat", resp)) } else if (family %in% c("binomial", "zero_inflated_binomial", "beta_binomial", "zero_inflated_beta_binomial")) { out <- list(lb = 0, ub = paste0("trials", resp)) } else if (family %in% "von_mises") { out <- list(lb = -pi, ub = pi) } else if (family %in% c("wiener", "shifted_lognormal")) { out <- list(lb = paste0("min_Y", resp), ub = Inf) } else if (family %in% c("hurdle_cumulative")) { out <- list(lb = 0, ub = paste0("ncat", resp)) } else { out <- list(lb = -Inf, ub = Inf) } out } brms/R/brmsfit-helpers.R0000644000176200001440000010055714465420231014660 0ustar liggesuserscontains_draws <- function(x) { if (!(is.brmsfit(x) && length(x$fit@sim))) { stop2("The model does not contain posterior draws.") } invisible(TRUE) } is_mv <- function(x) { stopifnot(is.brmsfit(x)) is.mvbrmsformula(x$formula) } stopifnot_resp <- function(x, resp = NULL) { # TODO: merge into validate_resp? if (is_mv(x) && length(resp) != 1L) { stop2("Argument 'resp' must be a single variable name ", "when applying this method to a multivariate model.") } invisible(NULL) } # apply a link function # @param x an array of arbitrary dimension # @param link character string defining the link link <- function(x, link) { switch(link, identity = x, log = log(x), logm1 = logm1(x), log1p = log1p(x), inverse = 1 / x, sqrt = sqrt(x), "1/mu^2" = 1 / x^2, tan_half = tan(x / 2), logit = logit(x), probit = qnorm(x), cauchit = qcauchy(x), cloglog = cloglog(x), probit_approx = qnorm(x), softplus = log_expm1(x), squareplus = (x^2 - 1) / x, softit = softit(x), stop2("Link '", link, "' is not supported.") ) } # apply an inverse link function # @param x an array of arbitrary dimension # @param link a character string defining the link inv_link <- function(x, link) { switch(link, identity = x, log = exp(x), logm1 = expp1(x), log1p = expm1(x), inverse = 1 / x, sqrt = x^2, "1/mu^2" = 1 / sqrt(x), tan_half = 2 * atan(x), logit = inv_logit(x), probit = pnorm(x), cauchit = pcauchy(x), cloglog = inv_cloglog(x), probit_approx = pnorm(x), softplus = log1p_exp(x), squareplus = (x + sqrt(x^2 + 4)) / 2, softit = inv_softit(x), stop2("Link '", link, "' is not supported.") ) } # log CDF for unit interval link functions # @param x an array of arbitrary dimension # @param link a character string defining the link log_cdf <- function(x, link) { switch(link, logit = log_inv_logit(x), probit = pnorm(x, log.p = TRUE), cauchit = pcauchy(x, log.p = TRUE), cloglog = log1m_exp(-exp(x)), probit_approx = pnorm(x, log.p = TRUE), softit = log_inv_softit(x), stop2("Link '", link, "' is not supported.") ) } # log CCDF for unit interval link functions # @param x an array of arbitrary dimension # @param link a character string defining the link log_ccdf <- function(x, link) { switch(link, logit = log1m_inv_logit(x), probit = pnorm(x, log.p = TRUE, lower.tail = FALSE), cauchit = pcauchy(x, log.p = TRUE, lower.tail = FALSE), cloglog = -exp(x), probit_approx = pnorm(x, log.p = TRUE, lower.tail = FALSE), softit = log1m_inv_softit(x), stop2("Link '", link, "' is not supported.") ) } # validate integers indicating which draws to subset validate_draw_ids <- function(x, draw_ids = NULL, ndraws = NULL) { ndraws_total <- ndraws(x) if (is.null(draw_ids) && !is.null(ndraws)) { ndraws <- as_one_integer(ndraws) if (ndraws < 1 || ndraws > ndraws_total) { stop2("Argument 'ndraws' should be between 1 and ", "the maximum number of draws (", ndraws_total, ").") } draw_ids <- sample(seq_len(ndraws_total), ndraws) } if (!is.null(draw_ids)) { draw_ids <- as.integer(draw_ids) if (any(draw_ids < 1L) || any(draw_ids > ndraws_total)) { stop2("Some 'draw_ids' indices are out of range.") } } draw_ids } # get correlation names as combinations of variable names # @param names the variable names # @param type character string to be put in front of the returned strings # @param brackets should the correlation names contain brackets # or underscores as seperators? # @param sep character string to separate names; only used if !brackets # @return a vector of character strings get_cornames <- function(names, type = "cor", brackets = TRUE, sep = "__") { cornames <- NULL if (length(names) > 1) { for (i in seq_along(names)[-1]) { for (j in seq_len(i - 1)) { if (brackets) { c(cornames) <- paste0(type, "(", names[j], "," , names[i], ")") } else { c(cornames) <- paste0(type, sep, names[j], sep, names[i]) } } } } cornames } # extract names of categorical variables in the model get_cat_vars <- function(x) { stopifnot(is.brmsfit(x)) like_factor <- sapply(model.frame(x), is_like_factor) valid_groups <- c( names(model.frame(x))[like_factor], get_group_vars(x) ) unique(valid_groups[nzchar(valid_groups)]) } # covariance matrices based on correlation and SD draws # @param sd matrix of draws of standard deviations # @param cor matrix of draws of correlations get_cov_matrix <- function(sd, cor = NULL) { sd <- as.matrix(sd) stopifnot(all(sd >= 0)) ndraws <- nrow(sd) size <- ncol(sd) out <- array(diag(1, size), dim = c(size, size, ndraws)) out <- aperm(out, perm = c(3, 1, 2)) for (i in seq_len(size)) { out[, i, i] <- sd[, i]^2 } if (length(cor)) { cor <- as.matrix(cor) stopifnot(nrow(sd) == nrow(cor)) stopifnot(min(cor) >= -1, max(cor) <= 1) stopifnot(ncol(cor) == size * (size - 1) / 2) k <- 0 for (i in seq_len(size)[-1]) { for (j in seq_len(i - 1)) { k = k + 1 out[, j, i] <- out[, i, j] <- cor[, k] * sd[, i] * sd[, j] } } } out } # correlation matrices based on correlation draws # @param cor draws of correlations # @param size optional size of the desired correlation matrix; # ignored is 'cor' is specified # @param ndraws optional number of posterior draws; # ignored is 'cor' is specified get_cor_matrix <- function(cor, size = NULL, ndraws = NULL) { if (length(cor)) { cor <- as.matrix(cor) size <- -1 / 2 + sqrt(1 / 4 + 2 * ncol(cor)) + 1 ndraws <- nrow(cor) } size <- as_one_numeric(size) ndraws <- as_one_numeric(ndraws) stopifnot(is_wholenumber(size) && size > 0) stopifnot(is_wholenumber(ndraws) && ndraws > 0) out <- array(diag(1, size), dim = c(size, size, ndraws)) out <- aperm(out, perm = c(3, 1, 2)) if (length(cor)) { k <- 0 for (i in seq_len(size)[-1]) { for (j in seq_len(i - 1)) { k = k + 1 out[, j, i] <- out[, i, j] <- cor[, k] } } } out } # compute covariance matrices of autocor structures # @param prep a brmsprep object # @param obs observations for which to compute the covariance matrix # @param Jtime vector indicating to which time points obs belong # @param latent compute covariance matrix for latent residuals? get_cov_matrix_ac <- function(prep, obs = NULL, Jtime = NULL, latent = FALSE) { if (is.null(obs)) { obs <- seq_len(prep$nobs) } nobs <- length(obs) ndraws <- prep$ndraws acef <- prep$ac$acef # prepare correlations if (has_ac_class(acef, "arma")) { ar <- as.numeric(prep$ac$ar) ma <- as.numeric(prep$ac$ma) if (length(ar) && !length(ma)) { cor <- get_cor_matrix_ar1(ar, nobs) } else if (!length(ar) && length(ma)) { cor <- get_cor_matrix_ma1(ma, nobs) } else if (length(ar) && length(ma)) { cor <- get_cor_matrix_arma1(ar, ma, nobs) } else { stop2("Neither 'ar' nor 'ma' were supplied. Please report a bug.") } } else if (has_ac_class(acef, "cosy")) { cosy <- as.numeric(prep$ac$cosy) cor <- get_cor_matrix_cosy(cosy, nobs) } else if (has_ac_class(acef, "unstr")) { cortime <- prep$ac$cortime cor <- get_cor_matrix_unstr(cortime, Jtime) } else if (has_ac_class(acef, "fcor")) { cor <- get_cor_matrix_fcor(prep$ac$Mfcor, ndraws) } else { cor <- get_cor_matrix_ident(ndraws, nobs) } # prepare known standard errors if (!is.null(prep$data$se)) { se2 <- prep$data$se[obs]^2 se2 <- array(diag(se2, nobs), dim = c(nobs, nobs, ndraws)) se2 <- aperm(se2, perm = c(3, 1, 2)) # make sure not to add 'se' twice prep$data$se <- NULL } else { se2 <- rep(0, nobs) } # prepare residual standard deviations if (latent) { sigma2 <- as.numeric(prep$ac$sderr)^2 } else { sigma <- get_dpar(prep, "sigma", i = obs) if (NCOL(sigma) > 1L) { # sigma varies across observations sigma2 <- array(dim = c(ndraws, nobs, nobs)) for (s in seq_rows(sigma2)) { sigma2[s, , ] <- outer(sigma[s, ], sigma[s, ]) } } else { sigma2 <- as.numeric(sigma)^2 } } sigma2 * cor + se2 } # compute AR1 correlation matrices # @param ar AR1 autocorrelation draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_ar1 <- function(ar, nobs) { out <- array(0, dim = c(NROW(ar), nobs, nobs)) fac <- 1 / (1 - ar^2) pow_ar <- as.list(rep(1, nobs + 1)) for (i in seq_len(nobs)) { pow_ar[[i + 1]] <- ar^i out[, i, i] <- fac for (j in seq_len(i - 1)) { out[, i, j] <- fac * pow_ar[[i - j + 1]] out[, j, i] <- out[, i, j] } } out } # compute MA1 correlation matrices # @param ma MA1 autocorrelation draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_ma1 <- function(ma, nobs) { out <- array(0, dim = c(NROW(ma), nobs, nobs)) gamma0 <- 1 + ma^2 for (i in seq_len(nobs)) { out[, i, i] <- gamma0 if (i > 1) { out[, i, i - 1] <- ma } if (i < nobs) { out[, i, i + 1] <- ma } } out } # compute ARMA1 correlation matrices # @param ar AR1 autocorrelation draws # @param ma MA1 autocorrelation draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_arma1 <- function(ar, ma, nobs) { out <- array(0, dim = c(NROW(ar), nobs, nobs)) fac <- 1 / (1 - ar^2) gamma0 <- 1 + ma^2 + 2 * ar * ma gamma <- as.list(rep(NA, nobs)) gamma[[1]] <- (1 + ar * ma) * (ar + ma) for (i in seq_len(nobs)) { out[, i, i] <- fac * gamma0 gamma[[i]] <- gamma[[1]] * ar^(i - 1) for (j in seq_len(i - 1)) { out[, i, j] <- fac * gamma[[i - j]] out[, j, i] <- out[, i, j] } } out } # compute compound symmetry correlation matrices # @param cosy compund symmetry correlation draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_cosy <- function(cosy, nobs) { out <- array(0, dim = c(NROW(cosy), nobs, nobs)) for (i in seq_len(nobs)) { out[, i, i] <- 1 for (j in seq_len(i - 1)) { out[, i, j] <- cosy out[, j, i] <- out[, i, j] } } out } # compute unstructured time correlation matrices # @param cortime time correlation draws # @param Jtime indictor of rows/cols to consider in cortime # @return a numeric 'ndraws' x 'nobs' x 'nobs' array # where nobs = length(Jtime[Jtime > 0]) get_cor_matrix_unstr <- function(cortime, Jtime) { stopifnot(length(Jtime) > 0L) Jtime <- Jtime[Jtime > 0] get_cor_matrix(cortime)[, Jtime, Jtime, drop = FALSE] } # prepare a fixed correlation matrix # @param Mfcor correlation matrix to be prepared # @param ndraws number of posterior draws # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_fcor <- function(Mfcor, ndraws) { out <- array(Mfcor, dim = c(dim(Mfcor), ndraws)) aperm(out, c(3, 1, 2)) } # compute an identity correlation matrix # @param ndraws number of posterior draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_ident <- function(ndraws, nobs) { out <- array(0, dim = c(ndraws, nobs, nobs)) for (i in seq_len(nobs)) { out[, i, i] <- 1 } out } #' Draws of a Distributional Parameter #' #' Get draws of a distributional parameter from a \code{brmsprep} or #' \code{mvbrmsprep} object. This function is primarily useful when developing #' custom families or packages depending on \pkg{brms}. #' This function lets callers easily handle both the case when the #' distributional parameter is predicted directly, via a (non-)linear #' predictor or fixed to a constant. See the vignette #' \code{vignette("brms_customfamilies")} for an example use case. #' #' @param prep A 'brmsprep' or 'mvbrmsprep' object created by #' \code{\link[brms:prepare_predictions.brmsfit]{prepare_predictions}}. #' @param dpar Name of the distributional parameter. #' @param i The observation numbers for which predictions shall be extracted. #' If \code{NULL} (the default), all observation will be extracted. #' Ignored if \code{dpar} is not predicted. #' @param inv_link Should the inverse link function be applied? #' If \code{NULL} (the default), the value is chosen internally. #' In particular, \code{inv_link} is \code{TRUE} by default for custom #' families. #' @return #' If the parameter is predicted and \code{i} is \code{NULL} or #' \code{length(i) > 1}, an \code{S x N} matrix. If the parameter it not #' predicted or \code{length(i) == 1}, a vector of length \code{S}. Here #' \code{S} is the number of draws and \code{N} is the number of #' observations or length of \code{i} if specified. #' #' @examples #' \dontrun{ #' posterior_predict_my_dist <- function(i, prep, ...) { #' mu <- brms::get_dpar(prep, "mu", i = i) #' mypar <- brms::get_dpar(prep, "mypar", i = i) #' my_rng(mu, mypar) #' } #' } #' #' @export get_dpar <- function(prep, dpar, i = NULL, inv_link = NULL) { stopifnot(is.brmsprep(prep) || is.mvbrmsprep(prep)) dpar <- as_one_character(dpar) x <- prep$dpars[[dpar]] stopifnot(!is.null(x)) if (is.list(x)) { # compute draws of a predicted parameter out <- predictor(x, i = i, fprep = prep) if (is.null(inv_link)) { inv_link <- apply_dpar_inv_link(dpar, family = prep$family) } else { inv_link <- as_one_logical(inv_link) } if (inv_link) { out <- inv_link(out, x$family$link) } if (length(i) == 1L) { out <- slice_col(out, 1) } } else if (!is.null(i) && !is.null(dim(x))) { out <- slice_col(x, i) } else { out <- x } out } # get draws of a non-linear parameter # @param x object to extract posterior draws from # @param nlpar name of the non-linear parameter # @param i the current observation number # @return # If i is NULL or length(i) > 1: an S x N matrix # If length(i) == 1: a vector of length S get_nlpar <- function(prep, nlpar, i = NULL) { stopifnot(is.brmsprep(prep) || is.mvbrmsprep(prep)) x <- prep$nlpars[[nlpar]] stopifnot(!is.null(x)) if (is.list(x)) { # compute draws of a predicted parameter out <- predictor(x, i = i, fprep = prep) if (length(i) == 1L) { out <- slice_col(out, 1) } } else if (!is.null(i) && !is.null(dim(x))) { out <- slice_col(x, i) } else { out <- x } out } # get the mixing proportions of mixture models get_theta <- function(prep, i = NULL) { stopifnot(is.brmsprep(prep)) if ("theta" %in% names(prep$dpars)) { # theta was not predicted; no need to call get_dpar theta <- prep$dpars$theta } else { # theta was predicted; apply softmax mix_family <- prep$family families <- family_names(mix_family) theta <- vector("list", length(families)) for (j in seq_along(families)) { prep$family <- mix_family$mix[[j]] theta[[j]] <- as.matrix(get_dpar(prep, paste0("theta", j), i = i)) } theta <- abind(theta, along = 3) for (n in seq_len(dim(theta)[2])) { theta[, n, ] <- softmax(slice(theta, 2, n)) } if (length(i) == 1L) { dim(theta) <- dim(theta)[c(1, 3)] } } theta } # get posterior draws of multivariate mean vectors # only used in multivariate models with 'rescor' # and in univariate models with multiple 'mu' pars such as logistic_normal get_Mu <- function(prep, i = NULL) { is_mv <- is.mvbrmsprep(prep) if (is_mv) { Mu <- prep$mvpars$Mu } else { stopifnot(is.brmsprep(prep)) Mu <- prep$dpars$Mu } if (!is.null(Mu)) { stopifnot(!is.null(i)) Mu <- slice_col(Mu, i) return(Mu) } if (is_mv) { Mu <- lapply(prep$resps, get_dpar, "mu", i = i) } else { mu_dpars <- str_subset(names(prep$dpars), "^mu") Mu <- lapply(mu_dpars, get_dpar, prep = prep, i = i) } if (length(i) == 1L) { Mu <- do_call(cbind, Mu) } else { # keep correct dimension even if data has only 1 row Mu <- lapply(Mu, as.matrix) Mu <- abind::abind(Mu, along = 3) } Mu } # get posterior draws of residual covariance matrices # only used in multivariate models with 'rescor' # and in univariate models with multiple 'mu' pars such as logistic_normal get_Sigma <- function(prep, i = NULL, cor_name = NULL) { is_mv <- is.mvbrmsprep(prep) if (is_mv) { cor_name <- "rescor" Sigma <- prep$mvpars$Sigma } else { stopifnot(is.brmsprep(prep)) cor_name <- as_one_character(cor_name) Sigma <- prep$dpars$Sigma } if (!is.null(Sigma)) { # already computed before stopifnot(!is.null(i)) ldim <- length(dim(Sigma)) stopifnot(ldim %in% 3:4) if (ldim == 4L) { Sigma <- slice_col(Sigma, i) } return(Sigma) } if (is_mv) { cors <- prep$mvpars[[cor_name]] sigma <- named_list(names(prep$resps)) for (j in seq_along(sigma)) { sigma[[j]] <- get_dpar(prep$resps[[j]], "sigma", i = i) sigma[[j]] <- add_sigma_se(sigma[[j]], prep$resps[[j]], i = i) } } else { cors <- prep$dpars[[cor_name]] sigma_names <- str_subset(names(prep$dpars), "^sigma") sigma <- named_list(sigma_names) for (j in seq_along(sigma)) { sigma[[j]] <- get_dpar(prep, sigma_names[j], i = i) sigma[[j]] <- add_sigma_se(sigma[[j]], prep, i = i) } } is_matrix <- ulapply(sigma, is.matrix) if (!any(is_matrix)) { # happens if length(i) == 1 or if no sigma was predicted sigma <- do_call(cbind, sigma) Sigma <- get_cov_matrix(sigma, cors) } else { for (j in seq_along(sigma)) { # bring all sigmas to the same dimension if (!is_matrix[j]) { sigma[[j]] <- array(sigma[[j]], dim = dim_mu(prep)) } } nsigma <- length(sigma) sigma <- abind(sigma, along = 3) Sigma <- array(dim = c(dim_mu(prep), nsigma, nsigma)) for (n in seq_len(ncol(Sigma))) { Sigma[, n, , ] <- get_cov_matrix(slice(sigma, 2, n), cors) } } Sigma } # extract user-defined standard errors get_se <- function(prep, i = NULL) { stopifnot(is.brmsprep(prep)) se <- as.vector(prep$data[["se"]]) if (!is.null(se)) { if (!is.null(i)) { se <- se[i] } if (length(se) > 1L) { dim <- c(prep$ndraws, length(se)) se <- data2draws(se, dim = dim) } } else { se <- 0 } se } # add user defined standard errors to 'sigma' # @param sigma draws of the 'sigma' parameter add_sigma_se <- function(sigma, prep, i = NULL) { if ("se" %in% names(prep$data)) { se <- get_se(prep, i = i) sigma <- sqrt(se^2 + sigma^2) } sigma } # extract user-defined rate denominators get_rate_denom <- function(prep, i = NULL) { stopifnot(is.brmsprep(prep)) denom <- as.vector(prep$data[["denom"]]) if (!is.null(denom)) { if (!is.null(i)) { denom <- denom[i] } if (length(denom) > 1L) { dim <- c(prep$ndraws, length(denom)) denom <- data2draws(denom, dim = dim) } } else { denom <- 1 } denom } # multiply a parameter with the 'rate' denominator # @param dpar draws of the distributional parameter multiply_dpar_rate_denom <- function(dpar, prep, i = NULL) { if ("denom" %in% names(prep$data)) { denom <- get_rate_denom(prep, i = i) dpar <- dpar * denom } dpar } # return draws of ordinal thresholds for observation i # @param prep a bprepl or bprepnl object # @param i observation number subset_thres <- function(prep, i) { thres <- prep$thres$thres Jthres <- prep$thres$Jthres if (!is.null(Jthres)) { thres <- thres[, Jthres[i, 1]:Jthres[i, 2], drop = FALSE] } thres } # helper function of 'get_dpar' to decide if # the link function should be applied directly apply_dpar_inv_link <- function(dpar, family) { !(has_joint_link(family) && dpar_class(dpar, family) == "mu") } # insert zeros for the predictor term of the reference category # in categorical-like models using the softmax response function insert_refcat <- function(eta, refcat = 1) { stopifnot(is.array(eta)) refcat <- as_one_integer(refcat, allow_na = TRUE) if (isNA(refcat)) { # no reference category used return(eta) } # need to add zeros for the reference category ndim <- length(dim(eta)) dim_noncat <- dim(eta)[-ndim] zeros_arr <- array(0, dim = c(dim_noncat, 1)) before <- seq_len(refcat - 1) after <- setdiff(seq_dim(eta, ndim), before) abind::abind( slice(eta, ndim, before, drop = FALSE), zeros_arr, slice(eta, ndim, after, drop = FALSE) ) } # validate the 'resp' argument of 'predict' and related methods # @param resp response names to be validated # @param x valid response names or brmsfit object to extract names from # @param multiple allow multiple response variables? # @return names of validated response variables validate_resp <- function(resp, x, multiple = TRUE) { if (is.brmsfit(x)) { x <- brmsterms(x$formula)$responses } x <- as.character(x) if (!length(x)) { # resp is unused in univariate models return(NULL) } if (length(resp)) { resp <- as.character(resp) if (!all(resp %in% x)) { stop2("Invalid argument 'resp'. Valid response ", "variables are: ", collapse_comma(x)) } if (!multiple) { resp <- as_one_character(resp) } } else { resp <- x } resp } # split '...' into a list of model objects and other arguments # takes its argument names from parent.frame() # @param .... objects to split into model and non-model objects # @param x object treated in the same way as '...'. Adding it is # necessary for substitute() to catch the name of the first # argument passed to S3 methods. # @param model_names optional names of the model objects # @param other: allow non-model arguments in '...'? # @return # A list of arguments. All brmsfit objects are stored # as a list in element 'models' unless 'other' is FALSE. # In the latter case just returns a list of models split_dots <- function(x, ..., model_names = NULL, other = TRUE) { other <- as_one_logical(other) dots <- list(x, ...) names <- substitute(list(x, ...), env = parent.frame())[-1] names <- ulapply(names, deparse0) if (length(names)) { if (!length(names(dots))) { names(dots) <- names } else { has_no_name <- !nzchar(names(dots)) names(dots)[has_no_name] <- names[has_no_name] } } is_brmsfit <- unlist(lapply(dots, is.brmsfit)) models <- dots[is_brmsfit] models <- validate_models(models, model_names, names(models)) out <- dots[!is_brmsfit] if (other) { out$models <- models } else { if (length(out)) { stop2("Only model objects can be passed to '...' for this method.") } out <- models } out } # reorder observations to be in the initial user-defined order # currently only relevant for autocorrelation models # @param eta 'ndraws' x 'nobs' matrix or array # @param old_order optional vector to retrieve the initial data order # @param sort keep the new order as defined by the time-series? # @return the 'eta' matrix with possibly reordered columns reorder_obs <- function(eta, old_order = NULL, sort = FALSE) { stopifnot(length(dim(eta)) %in% c(2L, 3L)) if (!length(old_order) || sort) { return(eta) } stopifnot(length(old_order) == NCOL(eta)) p(eta, old_order, row = FALSE) } # update .MISC environment of the stanfit object # allows to call log_prob and other C++ using methods # on objects not created in the current R session # or objects created via another backend update_misc_env <- function(x, recompile = FALSE, only_windows = FALSE) { stopifnot(is.brmsfit(x)) recompile <- as_one_logical(recompile) only_windows <- as_one_logical(only_windows) if (recompile || !has_rstan_model(x)) { x <- add_rstan_model(x, overwrite = TRUE) } else if (os_is_windows() || !only_windows) { # TODO: detect when updating .MISC is not required # TODO: find a more efficient way to update .MISC old_backend <- x$backend x$backend <- "rstan" x$fit@.MISC <- suppressMessages(brm(fit = x, chains = 0))$fit@.MISC x$backend <- old_backend } x } #' Add compiled \pkg{rstan} models to \code{brmsfit} objects #' #' Compile a \code{\link[rstan:stanmodel-class]{stanmodel}} and add #' it to a \code{brmsfit} object. This enables some advanced functionality #' of \pkg{rstan}, most notably \code{\link[rstan:log_prob]{log_prob}} #' and friends, to be used with brms models fitted with other Stan backends. #' #' @param x A \code{brmsfit} object to be updated. #' @param overwrite Logical. If \code{TRUE}, overwrite any existing #' \code{\link[rstan:stanmodel-class]{stanmodel}}. Defaults to \code{FALSE}. #' #' @return A (possibly updated) \code{brmsfit} object. #' #' @export add_rstan_model <- function(x, overwrite = FALSE) { stopifnot(is.brmsfit(x)) overwrite <- as_one_logical(overwrite) if (!has_rstan_model(x) || overwrite) { message("Recompiling the model with 'rstan'") # threading is not yet supported by rstan and needs to be deactivated stanfit <- suppressMessages(rstan::stan( model_code = stancode(x, threads = threading(), backend = "rstan"), data = standata(x), chains = 0 )) x$fit@stanmodel <- stanfit@stanmodel x$fit@.MISC <- stanfit@.MISC message("Recompilation done") } x } # does the model have a non-empty rstan 'stanmodel' # that can be used for 'log_prob' and friends? has_rstan_model <- function(x) { stopifnot(is.brmsfit(x)) isTRUE(nzchar(x$fit@stanmodel@model_cpp$model_cppname)) && length(ls(pos = x$fit@.MISC)) > 0 } # extract argument names of a post-processing method arg_names <- function(method) { opts <- c("posterior_predict", "posterior_epred", "log_lik") method <- match.arg(method, opts) out <- names(formals(paste0(method, ".brmsfit"))) c(out) <- names(formals(prepare_predictions.brmsfit)) c(out) <- names(formals(validate_newdata)) out <- unique(out) out <- setdiff(out, c("object", "x", "...")) out } # validate 'cores' argument for use in post-processing functions validate_cores_post_processing <- function(cores) { if (is.null(cores)) { if (os_is_windows()) { # multi cores often leads to a slowdown on windows # in post-processing functions as discussed in #1129 cores <- 1L } else { cores <- getOption("mc.cores", 1L) } } cores <- as_one_integer(cores) if (cores < 1L) { cores <- 1L } cores } #' Check if cached fit can be used. #' #' Checks whether a given cached fit can be used without refitting when #' \code{file_refit = "on_change"} is used. #' This function is internal and exposed only to facilitate debugging problems #' with cached fits. The function may change or be removed in future versions #' and scripts should not use it. #' #' @param fit Old \code{brmsfit} object (e.g., loaded from file). #' @param sdata New Stan data (result of a call to \code{\link{make_standata}}). #' Pass \code{NULL} to avoid this data check. #' @param scode New Stan code (result of a call to \code{\link{make_stancode}}). #' Pass \code{NULL} to avoid this code check. #' @param data New data to check consistency of factor level names. #' Pass \code{NULL} to avoid this data check. #' @param algorithm New algorithm. Pass \code{NULL} to avoid algorithm check. #' @param silent Logical. If \code{TRUE}, no messages will be given. #' @param verbose Logical. If \code{TRUE} detailed report of the differences #' is printed to the console. #' @return A boolean indicating whether a refit is needed. #' #' @details #' Use with \code{verbose = TRUE} to get additional info on how the stored #' fit differs from the given data and code. #' #' @export #' @keywords internal brmsfit_needs_refit <- function(fit, sdata = NULL, scode = NULL, data = NULL, algorithm = NULL, silent = FALSE, verbose = FALSE) { stopifnot(is.brmsfit(fit)) silent <- as_one_logical(silent) verbose <- as_one_logical(verbose) if (!is.null(scode)) { scode <- as_one_character(scode) cached_scode <- stancode(fit) } if (!is.null(sdata)) { stopifnot(is.list(sdata)) cached_sdata <- standata(fit) } if (!is.null(data)) { stopifnot(is.data.frame(data)) cached_data <- fit$data } if (!is.null(algorithm)) { algorithm <- as_one_character(algorithm) stopifnot(!is.null(fit$algorithm)) } refit <- FALSE if (!is.null(scode)) { if (normalize_stancode(scode) != normalize_stancode(cached_scode)) { if (!silent) { message("Stan code has changed beyond whitespace/comments.") if (verbose) { require_package("diffobj") print(diffobj::diffChr(scode, cached_scode, format = "ansi8")) } } refit <- TRUE } } if (!is.null(sdata)) { sdata_equality <- all.equal(sdata, cached_sdata, check.attributes = FALSE) if (!isTRUE(sdata_equality)) { if (!silent) { message("The processed data for Stan has changed.") if (verbose) { print(sdata_equality) } } refit <- TRUE } } if (!is.null(data)) { # check consistency of factor names # as they are only stored as attributes in sdata (#1128) factor_level_message <- FALSE for (var in names(cached_data)) { if (is_like_factor(cached_data[[var]])) { cached_levels <- levels(factor(cached_data[[var]])) new_levels <- levels(factor(data[[var]])) if (!is_equal(cached_levels, new_levels)) { if (!silent) { factor_level_message <- TRUE if (verbose) { cat(paste0( "Names of factor levels have changed for variable '", var, "' ", "with cached levels (", collapse_comma(cached_levels), ") ", "but new levels (", collapse_comma(new_levels), ").\n" )) } } refit <- TRUE if (!verbose) { # no need to check all variables if we trigger a refit anyway break } } } } if (factor_level_message) { message("Names of factor levels have changed.") } } if (!is.null(algorithm)) { if (algorithm != fit$algorithm) { if (!silent) { message("Algorithm has changed from '", fit$algorithm, "' to '", algorithm, "'.\n") } refit <- TRUE } } refit } # read a brmsfit object from a file # @param file path to an rds file # @return a brmsfit object or NULL read_brmsfit <- function(file) { file <- check_brmsfit_file(file) dir <- dirname(file) if (!dir.exists(dir)) { stop2( "The directory '", dir, "' does not exist. Please choose an ", "existing directory where the model can be saved after fitting." ) } x <- suppressWarnings(try(readRDS(file), silent = TRUE)) if (!is_try_error(x)) { if (!is.brmsfit(x)) { stop2("Object loaded via 'file' is not of class 'brmsfit'.") } x$file <- file } else { x <- NULL } x } # write a brmsfit object to a file # @param x a brmsfit object # @param file path to an rds file # @param compress compression format supported by saveRDS # @return NULL write_brmsfit <- function(x, file, compress = TRUE) { stopifnot(is.brmsfit(x)) file <- check_brmsfit_file(file) x$file <- file saveRDS(x, file = file, compress = compress) invisible(x) } # check validity of file name to store a brmsfit object in check_brmsfit_file <- function(file) { file <- as_one_character(file) file_ending <- tolower(get_matches("\\.[^\\.]+$", file)) if (!isTRUE(file_ending == ".rds")) { file <- paste0(file, ".rds") } file } # check if a function requires an old default setting # only used to ensure backwards compatibility # @param version brms version in which the change to the default was made # @return TRUE or FALSE require_old_default <- function(version) { version <- as.package_version(version) brmsfit_version <- getOption(".brmsfit_version") isTRUE(brmsfit_version < version) } # add dummy draws to a brmsfit object for use in unit tests # @param x a brmsfit object # @param newpar name of the new parameter to add # @param dim dimension of the new parameter # @param dist name of the distribution from which to sample # @param ... further arguments passed to r # @return a brmsfit object including dummy draws of the new parameter add_dummy_draws <- function(x, newpar, dim = numeric(0), dist = "norm", ...) { stopifnot(is.brmsfit(x)) stopifnot(identical(dim, numeric(0))) newpar <- as_one_character(newpar) for (i in seq_along(x$fit@sim$samples)) { x$fit@sim$samples[[i]][[newpar]] <- do_call(paste0("r", dist), list(x$fit@sim$iter, ...)) } x$fit@sim$fnames_oi <- c(x$fit@sim$fnames_oi, newpar) x$fit@sim$dims_oi[[newpar]] <- dim x$fit@sim$pars_oi <- names(x$fit@sim$dims_oi) x } brms/R/data-predictor.R0000644000176200001440000010617314504263650014460 0ustar liggesusers#' Prepare Predictor Data #' #' Prepare data related to predictor variables in \pkg{brms}. #' Only exported for use in package development. #' #' @param x An \R object. #' @param ... Further arguments passed to or from other methods. #' #' @return A named list of data related to predictor variables. #' #' @keywords internal #' @export data_predictor <- function(x, ...) { UseMethod("data_predictor") } #' @export data_predictor.mvbrmsterms <- function(x, data, sdata = NULL, basis = NULL, ...) { out <- list(N = nrow(data)) for (r in names(x$terms)) { bs <- basis$resps[[r]] c(out) <- data_predictor(x$terms[[r]], data = data, sdata = sdata, basis = bs, ...) } out } #' @export data_predictor.brmsterms <- function(x, data, data2, prior, ranef, sdata = NULL, basis = NULL, ...) { out <- list() data <- subset_data(data, x) resp <- usc(combine_prefix(x)) args_eff <- nlist(data, data2, ranef, prior, sdata, ...) for (dp in names(x$dpars)) { args_eff_spec <- list(x = x$dpars[[dp]], basis = basis$dpars[[dp]]) c(out) <- do_call(data_predictor, c(args_eff_spec, args_eff)) } for (dp in names(x$fdpars)) { if (is.numeric(x$fdpars[[dp]]$value)) { out[[paste0(dp, resp)]] <- x$fdpars[[dp]]$value } } for (nlp in names(x$nlpars)) { args_eff_spec <- list(x = x$nlpars[[nlp]], basis = basis$nlpars[[nlp]]) c(out) <- do_call(data_predictor, c(args_eff_spec, args_eff)) } c(out) <- data_gr_local(x, data = data, ranef = ranef) c(out) <- data_mixture(x, data2 = data2, prior = prior) out } # prepare data for all types of effects for use in Stan # @param data the data passed by the user # @param ranef object retuend by 'tidy_ranef' # @param prior an object of class brmsprior # @param basis information from original Stan data used to correctly # predict from new data. See 'standata_basis' for details. # @param ... currently ignored # @return a named list of data to be passed to Stan #' @export data_predictor.btl <- function(x, data, data2 = list(), ranef = empty_ranef(), prior = brmsprior(), sdata = NULL, index = NULL, basis = NULL, ...) { out <- c( data_fe(x, data), data_sp(x, data, data2 = data2, prior = prior, index = index, basis = basis$sp), data_re(x, data, ranef = ranef), data_cs(x, data), data_sm(x, data, basis = basis$sm), data_gp(x, data, basis = basis$gp), data_ac(x, data, data2 = data2, basis = basis$ac), data_offset(x, data), data_bhaz(x, data, data2 = data2, prior = prior, basis = basis$bhaz) ) c(out) <- data_special_prior( x, data, prior = prior, ranef = ranef, sdata = c(sdata, out) ) out } # prepare data for non-linear parameters for use in Stan #' @export data_predictor.btnl <- function(x, data, data2 = list(), prior = brmsprior(), basis = NULL, ...) { out <- list() c(out) <- data_cnl(x, data) c(out) <- data_ac(x, data, data2 = data2, basis = basis$ac) c(out) <- data_bhaz(x, data, data2 = data2, prior = prior, basis = basis$bhaz) out } # prepare data of fixed effects data_fe <- function(bterms, data) { out <- list() p <- usc(combine_prefix(bterms)) # the intercept is removed inside the Stan code for non-ordinal models is_ord <- is_ordinal(bterms) cols2remove <- if (is_ord) "(Intercept)" X <- get_model_matrix(rhs(bterms$fe), data, cols2remove = cols2remove) avoid_dpars(colnames(X), bterms = bterms) out[[paste0("K", p)]] <- ncol(X) if (stan_center_X(bterms)) { # relevant if the intercept is treated separately to enable centering out[[paste0("Kc", p)]] <- ncol(X) - ifelse(is_ord, 0, 1) } out[[paste0("X", p)]] <- X out } # data preparation for splines data_sm <- function(bterms, data, basis = NULL) { out <- list() smterms <- all_terms(bterms[["sm"]]) if (!length(smterms)) { return(out) } p <- usc(combine_prefix(bterms)) new <- length(basis) > 0L knots <- get_knots(data) diagonal.penalty <- !require_old_default("2.8.7") bylevels <- named_list(smterms) ns <- 0 lXs <- list() for (i in seq_along(smterms)) { if (new) { sm <- basis[[i]]$sm } else { sm <- smoothCon( eval2(smterms[i]), data = data, knots = knots, absorb.cons = TRUE, diagonal.penalty = diagonal.penalty ) } # may contain multiple terms when 'by' is a factor for (j in seq_along(sm)) { ns <- ns + 1 if (length(sm[[j]]$by.level)) { bylevels[[i]][j] <- sm[[j]]$by.level } if (new) { # prepare smooths for use with new data # mgcv smooths are based on machine-specific SVD (#1465) re <- s2rPred(sm[[j]], re = basis[[i]]$re[[j]], data = data) } else { re <- mgcv::smooth2random(sm[[j]], names(data), type = 2) } lXs[[ns]] <- re$Xf if (NCOL(lXs[[ns]])) { colnames(lXs[[ns]]) <- paste0(sm[[j]]$label, "_", seq_cols(lXs[[ns]])) } Zs <- re$rand sfx <- paste0(p, "_", ns) out[[paste0("nb", sfx)]] <- length(Zs) if (length(Zs)) { names(Zs) <- paste0("Zs", sfx, "_", seq_along(Zs)) c(out) <- Zs out[[paste0("knots", sfx)]] <- as.array(ulapply(Zs, ncol)) } else { out[[paste0("knots", sfx)]] <- integer(0) } } } Xs <- do_call(cbind, lXs) avoid_dpars(colnames(Xs), bterms = bterms) smcols <- lapply(lXs, function(x) which(colnames(Xs) %in% colnames(x))) Xs <- structure(Xs, smcols = smcols, bylevels = bylevels) colnames(Xs) <- rename(colnames(Xs)) out[[paste0("Ks", p)]] <- ncol(Xs) out[[paste0("Xs", p)]] <- Xs out } # prepare data for group-level effects for use in Stan data_re <- function(bterms, data, ranef) { out <- list() px <- check_prefix(bterms) take <- find_rows(ranef, ls = px) & !find_rows(ranef, type = "sp") ranef <- ranef[take, ] if (!nrow(ranef)) { return(out) } gn <- unique(ranef$gn) for (i in seq_along(gn)) { r <- subset2(ranef, gn = gn[i]) Z <- get_model_matrix(r$form[[1]], data = data, rename = FALSE) idp <- paste0(r$id[1], usc(combine_prefix(px))) Znames <- paste0("Z_", idp, "_", r$cn) if (r$gtype[1] == "mm") { ng <- length(r$gcall[[1]]$groups) if (r$type[1] == "cs") { stop2("'cs' is not supported in multi-membership terms.") } if (r$type[1] == "mmc") { # see issue #353 for the general idea mmc_expr <- "^mmc\\([^:]*\\)" mmc_terms <- get_matches_expr(mmc_expr, colnames(Z)) for (t in mmc_terms) { pos <- which(grepl_expr(escape_all(t), colnames(Z))) if (length(pos) != ng) { stop2("Invalid term '", t, "': Expected ", ng, " coefficients but found ", length(pos), ".") } for (j in seq_along(Znames)) { for (k in seq_len(ng)) { out[[paste0(Znames[j], "_", k)]] <- as.array(Z[, pos[k]]) } } } } else { for (j in seq_along(Znames)) { out[paste0(Znames[j], "_", seq_len(ng))] <- list(as.array(Z[, j])) } } } else { if (r$type[1] == "cs") { ncatM1 <- nrow(r) / ncol(Z) Z_temp <- vector("list", ncol(Z)) for (k in seq_along(Z_temp)) { Z_temp[[k]] <- replicate(ncatM1, Z[, k], simplify = FALSE) } Z <- do_call(cbind, unlist(Z_temp, recursive = FALSE)) } if (r$type[1] == "mmc") { stop2("'mmc' is only supported in multi-membership terms.") } for (j in seq_cols(Z)) { out[[Znames[j]]] <- as.array(Z[, j]) } } } out } # compute data for each group-level-ID per univariate model data_gr_local <- function(bterms, data, ranef) { stopifnot(is.brmsterms(bterms)) out <- list() ranef <- subset2(ranef, resp = bterms$resp) resp <- usc(bterms$resp) for (id in unique(ranef$id)) { id_ranef <- subset2(ranef, id = id) idresp <- paste0(id, resp) nranef <- nrow(id_ranef) group <- id_ranef$group[1] levels <- attr(ranef, "levels")[[group]] if (id_ranef$gtype[1] == "mm") { # multi-membership grouping term gs <- id_ranef$gcall[[1]]$groups ngs <- length(gs) weights <- id_ranef$gcall[[1]]$weights if (is.formula(weights)) { scale <- isTRUE(attr(weights, "scale")) weights <- as.matrix(eval_rhs(weights, data)) if (!identical(dim(weights), c(nrow(data), ngs))) { stop2( "Grouping structure 'mm' expects 'weights' to be ", "a matrix with as many columns as grouping factors." ) } if (scale) { if (isTRUE(any(weights < 0))) { stop2("Cannot scale negative weights.") } weights <- sweep(weights, 1, rowSums(weights), "/") } } else { # all members get equal weights by default weights <- matrix(1 / ngs, nrow = nrow(data), ncol = ngs) } for (i in seq_along(gs)) { gdata <- get(gs[i], data) J <- match(gdata, levels) if (anyNA(J)) { # occurs for new levels only new_gdata <- gdata[!gdata %in% levels] new_levels <- unique(new_gdata) J[is.na(J)] <- match(new_gdata, new_levels) + length(levels) } out[[paste0("J_", idresp, "_", i)]] <- as.array(J) out[[paste0("W_", idresp, "_", i)]] <- as.array(weights[, i]) } } else { # ordinary grouping term g <- id_ranef$gcall[[1]]$groups gdata <- get(g, data) J <- match(gdata, levels) if (anyNA(J)) { # occurs for new levels only new_gdata <- gdata[!gdata %in% levels] new_levels <- unique(new_gdata) J[is.na(J)] <- match(new_gdata, new_levels) + length(levels) } out[[paste0("J_", idresp)]] <- as.array(J) } } out } # prepare global data for each group-level-ID data_gr_global <- function(ranef, data2) { out <- list() for (id in unique(ranef$id)) { tmp <- list() id_ranef <- subset2(ranef, id = id) nranef <- nrow(id_ranef) group <- id_ranef$group[1] levels <- attr(ranef, "levels")[[group]] tmp$N <- length(levels) tmp$M <- nranef tmp$NC <- as.integer(nranef * (nranef - 1) / 2) # prepare number of levels of an optional 'by' variable if (nzchar(id_ranef$by[1])) { stopifnot(!nzchar(id_ranef$type[1])) bylevels <- id_ranef$bylevels[[1]] Jby <- match(attr(levels, "by"), bylevels) tmp$Nby <- length(bylevels) tmp$Jby <- as.array(Jby) } # prepare within-group covariance matrices cov <- id_ranef$cov[1] if (nzchar(cov)) { # validation is only necessary here for compatibility with 'cov_ranef' cov_mat <- validate_recov_matrix(data2[[cov]]) found_levels <- rownames(cov_mat) found <- levels %in% found_levels if (any(!found)) { stop2("Levels of the within-group covariance matrix for '", group, "' do not match names of the grouping levels.") } cov_mat <- cov_mat[levels, levels, drop = FALSE] tmp$Lcov <- t(chol(cov_mat)) } names(tmp) <- paste0(names(tmp), "_", id) c(out) <- tmp } out } # prepare data for special effects for use in Stan data_sp <- function(bterms, data, data2, prior, index = NULL, basis = NULL) { out <- list() spef <- tidy_spef(bterms, data) if (!nrow(spef)) return(out) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) # prepare general data out[[paste0("Ksp", p)]] <- nrow(spef) Csp <- sp_model_matrix(bterms$sp, data) avoid_dpars(colnames(Csp), bterms = bterms) Csp <- Csp[, spef$Ic > 0, drop = FALSE] Csp <- lapply(seq_cols(Csp), function(i) as.array(Csp[, i])) if (length(Csp)) { Csp_names <- paste0("Csp", p, "_", seq_along(Csp)) out <- c(out, setNames(Csp, Csp_names)) } if (any(lengths(spef$Imo) > 0)) { # prepare data specific to monotonic effects out[[paste0("Imo", p)]] <- max(unlist(spef$Imo)) Xmo <- lapply(unlist(spef$calls_mo), get_mo_values, data = data) Xmo_names <- paste0("Xmo", p, "_", seq_along(Xmo)) c(out) <- setNames(Xmo, Xmo_names) if (!is.null(basis$Jmo)) { # take information from original data Jmo <- basis$Jmo } else { Jmo <- as.array(ulapply(Xmo, attr, "max")) } out[[paste0("Jmo", p)]] <- Jmo # prepare prior concentration of simplex parameters simo_coef <- get_simo_labels(spef, use_id = TRUE) ids <- unlist(spef$ids_mo) for (j in seq_along(simo_coef)) { # index of first ID appearance j_id <- match(ids[j], ids) if (is.na(ids[j]) || j_id == j) { # only evaluate priors without ID or first appearance of the ID # all other parameters will be copied over in the Stan code simo_prior <- subset2(prior, class = "simo", coef = simo_coef[j], ls = px ) con_simo <- eval_dirichlet(simo_prior$prior, Jmo[j], data2) out[[paste0("con_simo", p, "_", j)]] <- as.array(con_simo) } } } uni_mi <- attr(spef, "uni_mi") for (j in seq_rows(uni_mi)) { if (!is.na(uni_mi$idx[j])) { idxl <- get(uni_mi$idx[j], data) if (is.null(index[[uni_mi$var[j]]])) { # the 'idx' argument needs to be mapped against 'index' addition terms stop2("Response '", uni_mi$var[j], "' needs to have an 'index' addition ", "term to compare with 'idx'. See ?mi for examples.") } idxl <- match(idxl, index[[uni_mi$var[j]]]) if (anyNA(idxl)) { stop2("Could not match all indices in response '", uni_mi$var[j], "'.") } idxl_name <- paste0("idxl", p, "_", uni_mi$var[j], "_", uni_mi$idx2[j]) out[[idxl_name]] <- as.array(idxl) } else if (isTRUE(attr(index[[uni_mi$var[j]]], "subset"))) { # cross-formula referencing is required for subsetted variables stop2("mi() terms of subsetted variables require ", "the 'idx' argument to be specified.") } } out } # prepare data for category specific effects data_cs <- function(bterms, data) { out <- list() if (length(all_terms(bterms[["cs"]]))) { p <- usc(combine_prefix(bterms)) Xcs <- get_model_matrix(bterms$cs, data) avoid_dpars(colnames(Xcs), bterms = bterms) out <- c(out, list(Kcs = ncol(Xcs), Xcs = Xcs)) out <- setNames(out, paste0(names(out), p)) } out } # prepare global data for noise free variables data_Xme <- function(meef, data) { stopifnot(is.meef_frame(meef)) out <- list() groups <- unique(meef$grname) for (i in seq_along(groups)) { g <- groups[i] K <- which(meef$grname %in% g) Mme <- length(K) out[[paste0("Mme_", i)]] <- Mme out[[paste0("NCme_", i)]] <- Mme * (Mme - 1) / 2 if (nzchar(g)) { levels <- get_levels(meef)[[g]] gr <- get_me_group(meef$term[K[1]], data) Jme <- match(gr, levels) if (anyNA(Jme)) { # occurs for new levels only # replace NAs with unique values; fixes issue #706 gr[is.na(gr)] <- paste0("new_", seq_len(sum(is.na(gr))), "__") new_gr <- gr[!gr %in% levels] new_levels <- unique(new_gr) Jme[is.na(Jme)] <- length(levels) + match(new_gr, new_levels) } ilevels <- unique(Jme) out[[paste0("Nme_", i)]] <- length(ilevels) out[[paste0("Jme_", i)]] <- Jme } for (k in K) { Xn <- get_me_values(meef$term[k], data) noise <- get_me_noise(meef$term[k], data) if (nzchar(g)) { for (l in ilevels) { # validate values of the same level take <- Jme %in% l if (length(unique(Xn[take])) > 1L || length(unique(noise[take])) > 1L) { stop2( "Measured values and measurement error should be ", "unique for each group. Occured for level '", levels[l], "' of group '", g, "'." ) } } Xn <- get_one_value_per_group(Xn, Jme) noise <- get_one_value_per_group(noise, Jme) } out[[paste0("Xn_", k)]] <- as.array(Xn) out[[paste0("noise_", k)]] <- as.array(noise) } } out } # prepare data for Gaussian process terms # @param internal store some intermediate data for internal post-processing? # @param ... passed to '.data_gp' data_gp <- function(bterms, data, internal = FALSE, basis = NULL, ...) { out <- list() internal <- as_one_logical(internal) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) gpef <- tidy_gpef(bterms, data) for (i in seq_rows(gpef)) { pi <- paste0(p, "_", i) Xgp <- lapply(gpef$covars[[i]], eval2, data) D <- length(Xgp) out[[paste0("Dgp", pi)]] <- D invalid <- ulapply(Xgp, function(x) !is.numeric(x) || isTRUE(length(dim(x)) > 1L) ) if (any(invalid)) { stop2("Predictors of Gaussian processes should be numeric vectors.") } Xgp <- do_call(cbind, Xgp) cmc <- gpef$cmc[i] scale <- gpef$scale[i] gr <- gpef$gr[i] k <- gpef$k[i] c <- gpef$c[[i]] if (!isNA(k)) { out[[paste0("NBgp", pi)]] <- k ^ D Ks <- as.matrix(do_call(expand.grid, repl(seq_len(k), D))) } byvar <- gpef$byvars[[i]] byfac <- length(gpef$cons[[i]]) > 0L bynum <- !is.null(byvar) && !byfac if (byfac) { # for categorical 'by' variables prepare one GP per level # as.factor will keep unused levels needed for new data byval <- as.factor(get(byvar, data)) byform <- str2formula(c(ifelse(cmc, "0", "1"), "byval")) con_mat <- model.matrix(byform) cons <- colnames(con_mat) out[[paste0("Kgp", pi)]] <- length(cons) Ngp <- Nsubgp <- vector("list", length(cons)) for (j in seq_along(cons)) { # loop along contrasts of 'by' Cgp <- con_mat[, j] sfx <- paste0(pi, "_", j) tmp <- .data_gp( Xgp, k = k, gr = gr, sfx = sfx, Cgp = Cgp, c = c, scale = scale, internal = internal, basis = basis, ... ) Ngp[[j]] <- attributes(tmp)[["Ngp"]] Nsubgp[[j]] <- attributes(tmp)[["Nsubgp"]] c(out) <- tmp } out[[paste0("Ngp", pi)]] <- unlist(Ngp) if (gr) { out[[paste0("Nsubgp", pi)]] <- unlist(Nsubgp) } } else { out[[paste0("Kgp", pi)]] <- 1L c(out) <- .data_gp( Xgp, k = k, gr = gr, sfx = pi, c = c, scale = scale, internal = internal, basis = basis, ... ) if (bynum) { Cgp <- as.numeric(get(byvar, data)) out[[paste0("Cgp", pi)]] <- as.array(Cgp) } } } if (length(basis)) { # original covariate values are required in new GP prediction Xgp_old <- basis[grepl("^Xgp", names(basis))] names(Xgp_old) <- paste0(names(Xgp_old), "_old") out[names(Xgp_old)] <- Xgp_old } out } # helper function to preparae GP related data # @inheritParams data_gp # @param Xgp matrix of covariate values # @param k, gr, c see 'tidy_gpef' # @param sfx suffix to put at the end of data names # @param Cgp optional vector of values belonging to # a certain contrast of a factor 'by' variable .data_gp <- function(Xgp, k, gr, sfx, Cgp = NULL, c = NULL, scale = TRUE, internal = FALSE, basis = NULL) { out <- list() if (!is.null(Cgp)) { Cgp <- unname(Cgp) Igp <- which(Cgp != 0) Xgp <- Xgp[Igp, , drop = FALSE] out[[paste0("Igp", sfx)]] <- as.array(Igp) out[[paste0("Cgp", sfx)]] <- as.array(Cgp[Igp]) attr(out, "Ngp") <- length(Igp) } if (gr) { groups <- factor(match_rows(Xgp, Xgp)) ilevels <- levels(groups) Jgp <- match(groups, ilevels) Nsubgp <- length(ilevels) if (!is.null(Cgp)) { attr(out, "Nsubgp") <- Nsubgp } else { out[[paste0("Nsubgp", sfx)]] <- Nsubgp } out[[paste0("Jgp", sfx)]] <- as.array(Jgp) not_dupl_Jgp <- !duplicated(Jgp) Xgp <- Xgp[not_dupl_Jgp, , drop = FALSE] } if (scale) { # scale predictor for easier specification of priors if (length(basis)) { # scale Xgp based on the original data dmax <- basis[[paste0("dmax", sfx)]] } else { dmax <- sqrt(max(diff_quad(Xgp))) } if (!isTRUE(dmax > 0)) { stop2("Could not scale GP covariates. Please set 'scale' to FALSE in 'gp'.") } if (internal) { # required for scaling of GPs with new data out[[paste0("dmax", sfx)]] <- dmax } Xgp <- Xgp / dmax } if (length(basis)) { # center Xgp based on the original data cmeans <- basis[[paste0("cmeans", sfx)]] } else { cmeans <- colMeans(Xgp) } if (internal) { # required for centering of approximate GPs with new data out[[paste0("cmeans", sfx)]] <- cmeans # required to compute inverse-gamma priors for length-scales out[[paste0("Xgp_prior", sfx)]] <- Xgp } if (!isNA(k)) { # basis function approach requires centered variables Xgp <- sweep(Xgp, 2, cmeans) D <- NCOL(Xgp) L <- choose_L(Xgp, c = c) Ks <- as.matrix(do_call(expand.grid, repl(seq_len(k), D))) XgpL <- matrix(nrow = NROW(Xgp), ncol = NROW(Ks)) slambda <- matrix(nrow = NROW(Ks), ncol = D) for (m in seq_rows(Ks)) { XgpL[, m] <- eigen_fun_cov_exp_quad(Xgp, m = Ks[m, ], L = L) slambda[m, ] <- sqrt(eigen_val_cov_exp_quad(m = Ks[m, ], L = L)) } out[[paste0("Xgp", sfx)]] <- XgpL out[[paste0("slambda", sfx)]] <- slambda } else { out[[paste0("Xgp", sfx)]] <- as.array(Xgp) } out } # data for autocorrelation variables data_ac <- function(bterms, data, data2, basis = NULL, ...) { out <- list() N <- nrow(data) acef <- tidy_acef(bterms) if (has_ac_subset(bterms, dim = "time")) { gr <- get_ac_vars(acef, "gr", dim = "time") if (isTRUE(nzchar(gr))) { tgroup <- as.numeric(factor(data[[gr]])) } else { tgroup <- rep(1, N) } } if (has_ac_class(acef, "arma")) { # ARMA correlations acef_arma <- subset2(acef, class = "arma") out$Kar <- acef_arma$p out$Kma <- acef_arma$q if (!use_ac_cov_time(acef_arma)) { # data for the 'predictor' version of ARMA max_lag <- max(out$Kar, out$Kma) out$J_lag <- as.array(rep(0, N)) for (n in seq_len(N)[-N]) { ind <- n:max(1, n + 1 - max_lag) # indexes errors to be used in the n+1th prediction out$J_lag[n] <- sum(tgroup[ind] %in% tgroup[n + 1]) } } } if (use_ac_cov_time(acef)) { # data for the 'covariance' versions of time-series structures # TODO: change begin[i]:end[i] notation to slice[i]:(slice[i+1] - 1) # see comment on PR #1435 out$N_tg <- length(unique(tgroup)) out$begin_tg <- as.array(ulapply(unique(tgroup), match, tgroup)) out$nobs_tg <- as.array(with(out, c(if (N_tg > 1L) begin_tg[2:N_tg], N + 1) - begin_tg )) out$end_tg <- with(out, begin_tg + nobs_tg - 1) if (has_ac_class(acef, "unstr")) { time <- get_ac_vars(bterms, "time", dim = "time") time_data <- get(time, data) new_times <- extract_levels(time_data) if (length(basis)) { times <- basis$times # unstr estimates correlations only for given time points invalid_times <- setdiff(new_times, times) if (length(invalid_times)) { stop2("Cannot handle new time points in UNSTR models.") } } else { times <- new_times } out$n_unique_t <- length(times) out$n_unique_cortime <- out$n_unique_t * (out$n_unique_t - 1) / 2 Jtime <- match(time_data, times) out$Jtime_tg <- matrix(0L, out$N_tg, max(out$nobs_tg)) for (i in seq_len(out$N_tg)) { out$Jtime_tg[i, seq_len(out$nobs_tg[i])] <- Jtime[out$begin_tg[i]:out$end_tg[i]] } } } if (has_ac_class(acef, "sar")) { acef_sar <- subset2(acef, class = "sar") M <- data2[[acef_sar$M]] rmd_rows <- attr(data, "na.action") if (!is.null(rmd_rows)) { class(rmd_rows) <- NULL M <- M[-rmd_rows, -rmd_rows, drop = FALSE] } if (!is_equal(dim(M), rep(N, 2))) { stop2("Dimensions of 'M' for SAR terms must be equal to ", "the number of observations.") } out$Msar <- as.matrix(M) out$eigenMsar <- eigen(M)$values # simplifies code of choose_N out$N_tg <- 1 } if (has_ac_class(acef, "car")) { acef_car <- subset2(acef, class = "car") locations <- NULL if (length(basis)) { locations <- basis$locations } M <- data2[[acef_car$M]] if (acef_car$gr != "NA") { loc_data <- get(acef_car$gr, data) new_locations <- extract_levels(loc_data) if (is.null(locations)) { locations <- new_locations } else { invalid_locations <- setdiff(new_locations, locations) if (length(invalid_locations)) { stop2("Cannot handle new locations in CAR models.") } } Nloc <- length(locations) Jloc <- as.array(match(loc_data, locations)) if (is.null(rownames(M))) { stop2("Row names are required for 'M' in CAR terms.") } found <- locations %in% rownames(M) if (any(!found)) { stop2("Row names of 'M' for CAR terms do not match ", "the names of the grouping levels.") } M <- M[locations, locations, drop = FALSE] } else { warning2( "Using CAR terms without a grouping factor is deprecated. ", "Please use argument 'gr' even if each observation ", "represents its own location." ) Nloc <- N Jloc <- as.array(seq_len(Nloc)) if (!is_equal(dim(M), rep(Nloc, 2))) { if (length(basis)) { stop2("Cannot handle new data in CAR terms ", "without a grouping factor.") } else { stop2("Dimensions of 'M' for CAR terms must be equal ", "to the number of observations.") } } } edges_rows <- (Matrix::tril(M)@i + 1) edges_cols <- sort(Matrix::triu(M)@i + 1) ## sort to make consistent with rows edges <- cbind("rows" = edges_rows, "cols" = edges_cols) c(out) <- nlist( Nloc, Jloc, Nedges = length(edges_rows), edges1 = as.array(edges_rows), edges2 = as.array(edges_cols) ) if (acef_car$type %in% c("escar", "esicar")) { Nneigh <- Matrix::colSums(M) if (any(Nneigh == 0) && !length(basis)) { stop2( "For exact sparse CAR, all locations should have at ", "least one neighbor within the provided data set. ", "Consider using type = 'icar' instead." ) } inv_sqrt_D <- diag(1 / sqrt(Nneigh)) eigenMcar <- t(inv_sqrt_D) %*% M %*% inv_sqrt_D eigenMcar <- eigen(eigenMcar, TRUE, only.values = TRUE)$values c(out) <- nlist(Nneigh, eigenMcar) } else if (acef_car$type %in% "bym2") { c(out) <- list(car_scale = .car_scale(edges, Nloc)) } } if (has_ac_class(acef, "fcor")) { acef_fcor <- subset2(acef, class = "fcor") M <- data2[[acef_fcor$M]] rmd_rows <- attr(data, "na.action") if (!is.null(rmd_rows)) { class(rmd_rows) <- NULL M <- M[-rmd_rows, -rmd_rows, drop = FALSE] } if (nrow(M) != N) { stop2("Dimensions of 'M' for FCOR terms must be equal ", "to the number of observations.") } out$Mfcor <- M # simplifies code of choose_N out$N_tg <- 1 } if (length(out)) { resp <- usc(combine_prefix(bterms)) out <- setNames(out, paste0(names(out), resp)) } out } # prepare data of offsets for use in Stan data_offset <- function(bterms, data) { out <- list() px <- check_prefix(bterms) if (is.formula(bterms$offset)) { p <- usc(combine_prefix(px)) mf <- rm_attr(data, "terms") mf <- model.frame(bterms$offset, mf, na.action = na.pass) offset <- model.offset(mf) if (length(offset) == 1L) { offset <- rep(offset, nrow(data)) } # use 'offsets' as 'offset' will be reserved in stanc3 out[[paste0("offsets", p)]] <- as.array(offset) } out } # data for covariates in non-linear models # @param x a btnl object # @return a named list of data passed to Stan data_cnl <- function(bterms, data) { stopifnot(is.btnl(bterms)) out <- list() covars <- all.vars(bterms$covars) if (!length(covars)) { return(out) } p <- usc(combine_prefix(bterms)) for (i in seq_along(covars)) { cvalues <- get(covars[i], data) if (is_like_factor(cvalues)) { # need to apply factor contrasts cform <- str2formula(covars[i]) cvalues <- get_model_matrix(cform, data, cols2remove = "(Intercept)") if (NCOL(cvalues) == 1L) { dim(cvalues) <- NULL } } if (isTRUE(dim(cvalues) > 2L)) { stop2("Non-linear covariates should be vectors or matrices.") } out[[paste0("C", p, "_", i)]] <- as.array(cvalues) } out } # compute the spatial scaling factor of CAR models # @param edges matrix with two columns defining the adjacency of the locations # @param Nloc number of locations # @return a scalar scaling factor .car_scale <- function(edges, Nloc) { # amended from Imad Ali's code of CAR models in rstanarm stopifnot(is.matrix(edges), NCOL(edges) == 2) # Build the adjacency matrix adj_matrix <- Matrix::sparseMatrix( i = edges[, 1], j = edges[, 2], x = 1, symmetric = TRUE ) # The ICAR precision matrix (which is singular) Q <- Matrix::Diagonal(Nloc, Matrix::rowSums(adj_matrix)) - adj_matrix # Add a small jitter to the diagonal for numerical stability Q_pert <- Q + Matrix::Diagonal(Nloc) * max(Matrix::diag(Q)) * sqrt(.Machine$double.eps) # Compute the diagonal elements of the covariance matrix subject to the # constraint that the entries of the ICAR sum to zero. .Q_inv <- function(Q) { Sigma <- Matrix::solve(Q) A <- matrix(1, 1, NROW(Sigma)) W <- Sigma %*% t(A) Sigma <- Sigma - W %*% solve(A %*% W) %*% Matrix::t(W) return(Sigma) } Q_inv <- .Q_inv(Q_pert) # Compute the geometric mean of the variances (diagonal of Q_inv) exp(mean(log(Matrix::diag(Q_inv)))) } # data for special priors such as horseshoe and R2D2 data_special_prior <- function(bterms, data, prior, ranef, sdata = NULL) { out <- list() px <- check_prefix(bterms) p <- usc(combine_prefix(px)) if (!has_special_prior(prior, px)) { return(out) } # number of coefficients affected by the shrinkage prior # fully compute this here to avoid having to pass the prior around # to all the individual data preparation functions # the order of adding things to Kscales doesn't matter but for consistency # it is still the same as the order in the Stan code Kscales <- 0 if (has_special_prior(prior, px, class = "b")) { Kscales <- Kscales + sdata[[paste0("Kc", p)]] %||% sdata[[paste0("K", p)]] %||% 0 + sdata[[paste0("Ksp", p)]] %||% 0 + sdata[[paste0("Ks", p)]] %||% 0 } if (has_special_prior(prior, px, class = "sds")) { take <- grepl(paste0("^nb", p, "_"), names(sdata)) Kscales <- Kscales + sum(unlist(sdata[take])) } if (has_special_prior(prior, px, class = "sdgp")) { take <- grepl(paste0("^Kgp", p, "_"), names(sdata)) Kscales <- Kscales + sum(unlist(sdata[take])) } if (has_special_prior(prior, px, class = "ar")) { Kscales <- Kscales + sdata[[paste0("Kar", p)]] } if (has_special_prior(prior, px, class = "ma")) { Kscales <- Kscales + sdata[[paste0("Kma", p)]] } if (has_special_prior(prior, px, class = "sderr")) { Kscales <- Kscales + 1 } if (has_special_prior(prior, px, class = "sdcar")) { Kscales <- Kscales + 1 } if (has_special_prior(prior, px, class = "sd")) { ids <- unique(subset2(ranef, ls = px)$id) Kscales <- Kscales + sum(unlist(sdata[paste0("M_", ids)])) } out[[paste0("Kscales", p)]] <- Kscales special <- get_special_prior(prior, px, main = TRUE) if (special$name == "horseshoe") { # data for the horseshoe prior hs_names <- c("df", "df_global", "df_slab", "scale_global", "scale_slab") hs_data <- special[hs_names] if (!is.null(special$par_ratio)) { hs_data$scale_global <- special$par_ratio / sqrt(nrow(data)) } names(hs_data) <- paste0("hs_", hs_names, p) c(out) <- hs_data } else if (special$name == "R2D2") { # data for the R2D2 prior R2D2_names <- c("mean_R2", "prec_R2", "cons_D2") R2D2_data <- special[R2D2_names] if (length(R2D2_data$cons_D2) == 1L) { R2D2_data$cons_D2 <- rep(R2D2_data$cons_D2, Kscales) } if (length(R2D2_data$cons_D2) != Kscales) { stop2("Argument 'cons_D2' of the R2D2 prior must be of length 1 or ", Kscales) } R2D2_data$cons_D2 <- as.array(R2D2_data$cons_D2) names(R2D2_data) <- paste0("R2D2_", R2D2_names, p) c(out) <- R2D2_data } out } # Construct design matrices for brms models # @param formula a formula object # @param data A data frame created with model.frame. # If another sort of object, model.frame is called first. # @param cols2remove names of the columns to remove from # the model matrix; mainly used for intercepts # @param rename rename column names via rename()? # @param ... passed to stats::model.matrix # @return # The design matrix for the given formula and data. # For details see ?stats::model.matrix get_model_matrix <- function(formula, data = environment(formula), cols2remove = NULL, rename = TRUE, ...) { stopifnot(is_atomic_or_null(cols2remove)) terms <- validate_terms(formula) if (is.null(terms)) { return(NULL) } if (no_int(terms)) { cols2remove <- union(cols2remove, "(Intercept)") } X <- stats::model.matrix(terms, data, ...) cols2remove <- which(colnames(X) %in% cols2remove) if (length(cols2remove)) { X <- X[, -cols2remove, drop = FALSE] } if (rename) { colnames(X) <- rename(colnames(X), check_dup = TRUE) } X } # convenient wrapper around mgcv::PredictMat PredictMat <- function(object, data, ...) { data <- rm_attr(data, "terms") out <- mgcv::PredictMat(object, data = data, ...) if (length(dim(out)) < 2L) { # fixes issue #494 out <- matrix(out, nrow = 1) } out } # convenient wrapper around mgcv::smoothCon smoothCon <- function(object, data, ...) { data <- rm_attr(data, "terms") vars <- setdiff(c(object$term, object$by), "NA") for (v in vars) { if (is_like_factor(data[[v]])) { # allow factor-like variables #562 data[[v]] <- as.factor(data[[v]]) } else if (inherits(data[[v]], "difftime")) { # mgcv cannot handle 'difftime' variables data[[v]] <- as.numeric(data[[v]]) } } mgcv::smoothCon(object, data = data, ...) } # Aid prediction from smooths represented as 'type = 2' # code obtained from the doc of ?mgcv::smooth2random # @param sm output of mgcv::smoothCon # @param re output of mgcv::smooth2random # @param data new data supplied for prediction # @return A list of the same structure as returned by mgcv::smooth2random s2rPred <- function(sm, re, data) { # prediction matrix for new data X <- PredictMat(sm, data) # transform to RE parameterization if (!is.null(re$trans.U)) { X <- X %*% re$trans.U } if (is.null(re$trans.D)) { # regression spline without penalization out <- list(Xf = X) } else { X <- t(t(X) * re$trans.D) # re-order columns according to random effect re-ordering X[, re$rind] <- X[, re$pen.ind != 0] # re-order penalization index in same way pen.ind <- re$pen.ind pen.ind[re$rind] <- pen.ind[pen.ind > 0] # start returning the object Xf <- X[, which(re$pen.ind == 0), drop = FALSE] out <- list(rand = list(), Xf = Xf) for (i in seq_along(re$rand)) { # loop over random effect matrices out$rand[[i]] <- X[, which(pen.ind == i), drop = FALSE] attr(out$rand[[i]], "s.label") <- attr(re$rand[[i]], "s.label") } names(out$rand) <- names(re$rand) } out } brms/R/formula-gp.R0000644000176200001440000002647614454230211013625 0ustar liggesusers# R helper functions for Gaussian Processes #' Set up Gaussian process terms in \pkg{brms} #' #' Set up a Gaussian process (GP) term in \pkg{brms}. The function does not #' evaluate its arguments -- it exists purely to help set up a model with #' GP terms. #' #' @param ... One or more predictors for the GP. #' @param by A numeric or factor variable of the same length as #' each predictor. In the numeric vector case, the elements multiply #' the values returned by the GP. In the factor variable #' case, a separate GP is fitted for each factor level. #' @param k Optional number of basis functions for computing approximate #' GPs. If \code{NA} (the default), exact GPs are computed. #' @param cov Name of the covariance kernel. By default, #' the exponentiated-quadratic kernel \code{"exp_quad"} is used. #' @param iso A flag to indicate whether an isotropic (\code{TRUE}; the #' default) or a non-isotropic GP should be used. #' In the former case, the same amount of smoothing is applied to all #' predictors. In the latter case, predictors may have different smoothing. #' Ignored if only a single predictor is supplied. #' @param gr Logical; Indicates if auto-grouping should be used (defaults #' to \code{TRUE}). If enabled, observations sharing the same #' predictor values will be represented by the same latent variable #' in the GP. This will improve sampling efficiency #' drastically if the number of unique predictor combinations is small #' relative to the number of observations. #' @param cmc Logical; Only relevant if \code{by} is a factor. If \code{TRUE} #' (the default), cell-mean coding is used for the \code{by}-factor, that is #' one GP per level is estimated. If \code{FALSE}, contrast GPs are estimated #' according to the contrasts set for the \code{by}-factor. #' @param scale Logical; If \code{TRUE} (the default), predictors are #' scaled so that the maximum Euclidean distance between two points #' is 1. This often improves sampling speed and convergence. #' Scaling also affects the estimated length-scale parameters #' in that they resemble those of scaled predictors (not of the original #' predictors) if \code{scale} is \code{TRUE}. #' @param c Numeric value only used in approximate GPs. Defines the #' multiplicative constant of the predictors' range over which #' predictions should be computed. A good default could be \code{c = 5/4} #' but we are still working on providing better recommendations. #' #' @details A GP is a stochastic process, which #' describes the relation between one or more predictors #' \eqn{x = (x_1, ..., x_d)} and a response \eqn{f(x)}, where #' \eqn{d} is the number of predictors. A GP is the #' generalization of the multivariate normal distribution #' to an infinite number of dimensions. Thus, it can be #' interpreted as a prior over functions. The values of \eqn{f( )} #' at any finite set of locations are jointly multivariate #' normal, with a covariance matrix defined by the covariance #' kernel \eqn{k_p(x_i, x_j)}, where \eqn{p} is the vector of parameters #' of the GP: #' \deqn{(f(x_1), \ldots f(x_n) \sim MVN(0, (k_p(x_i, x_j))_{i,j=1}^n) .} #' The smoothness and general behavior of the function \eqn{f} #' depends only on the choice of covariance kernel. #' For a more detailed introduction to Gaussian processes, #' see \url{https://en.wikipedia.org/wiki/Gaussian_process}. #' #' Below, we describe the currently supported covariance kernels: #' \itemize{ #' \item{"exp_quad": }{The exponentiated-quadratic kernel is defined as #' \eqn{k(x_i, x_j) = sdgp^2 \exp(- || x_i - x_j ||^2 / (2 lscale^2))}, #' where \eqn{|| . ||} is the Euclidean norm, \eqn{sdgp} is a #' standard deviation parameter, and \eqn{lscale} is characteristic #' length-scale parameter. The latter practically measures how close two #' points \eqn{x_i} and \eqn{x_j} have to be to influence each other #' substantially.} #' } #' #' In the current implementation, \code{"exp_quad"} is the only supported #' covariance kernel. More options will follow in the future. #' #' @return An object of class \code{'gp_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @examples #' \dontrun{ #' # simulate data using the mgcv package #' dat <- mgcv::gamSim(1, n = 30, scale = 2) #' #' # fit a simple GP model #' fit1 <- brm(y ~ gp(x2), dat, chains = 2) #' summary(fit1) #' me1 <- conditional_effects(fit1, ndraws = 200, spaghetti = TRUE) #' plot(me1, ask = FALSE, points = TRUE) #' #' # fit a more complicated GP model #' fit2 <- brm(y ~ gp(x0) + x1 + gp(x2) + x3, dat, chains = 2) #' summary(fit2) #' me2 <- conditional_effects(fit2, ndraws = 200, spaghetti = TRUE) #' plot(me2, ask = FALSE, points = TRUE) #' #' # fit a multivariate GP model #' fit3 <- brm(y ~ gp(x1, x2), dat, chains = 2) #' summary(fit3) #' me3 <- conditional_effects(fit3, ndraws = 200, spaghetti = TRUE) #' plot(me3, ask = FALSE, points = TRUE) #' #' # compare model fit #' LOO(fit1, fit2, fit3) #' #' # simulate data with a factor covariate #' dat2 <- mgcv::gamSim(4, n = 90, scale = 2) #' #' # fit separate gaussian processes for different levels of 'fac' #' fit4 <- brm(y ~ gp(x2, by = fac), dat2, chains = 2) #' summary(fit4) #' plot(conditional_effects(fit4), points = TRUE) #' } #' #' @seealso \code{\link{brmsformula}} #' @export gp <- function(..., by = NA, k = NA, cov = "exp_quad", iso = TRUE, gr = TRUE, cmc = TRUE, scale = TRUE, c = NULL) { cov <- match.arg(cov, choices = c("exp_quad")) call <- match.call() label <- deparse0(call) vars <- as.list(substitute(list(...)))[-1] by <- deparse0(substitute(by)) cmc <- as_one_logical(cmc) if (is.null(call[["gr"]]) && require_old_default("2.12.8")) { # the default of 'gr' has changed in version 2.12.8 gr <- FALSE } else { gr <- as_one_logical(gr) } if (length(vars) > 1L) { iso <- as_one_logical(iso) } else { iso <- TRUE } if (!isNA(k)) { k <- as.integer(as_one_numeric(k)) if (k < 1L) { stop2("'k' must be positive.") } if (is.null(c)) { stop2( "'c' must be specified for approximate GPs. ", "A good default could be c = 5/4 but we are still ", "working on providing better recommendations." ) } c <- as.numeric(c) if (length(c) == 1L) { c <- rep(c, length(vars)) } if (length(c) != length(vars)) { stop2("'c' must be of the same length as the number of covariates.") } if (any(c <= 0)) { stop2("'c' must be positive.") } } else { c <- NA } scale <- as_one_logical(scale) term <- ulapply(vars, deparse0, backtick = TRUE, width.cutoff = 500L) out <- nlist(term, label, by, cov, k, iso, gr, cmc, scale, c) structure(out, class = "gp_term") } # get labels of gaussian process terms # @param x either a formula or a list containing an element "gp" # @param data data frame containing the covariates # @return a data.frame with one row per GP term tidy_gpef <- function(x, data) { if (is.formula(x)) { x <- brmsterms(x, check_response = FALSE)$dpars$mu } form <- x[["gp"]] if (!is.formula(form)) { return(empty_data_frame()) } out <- data.frame(term = all_terms(form), stringsAsFactors = FALSE) nterms <- nrow(out) out$cons <- out$byvars <- out$covars <- out$sfx1 <- out$sfx2 <- out$c <- vector("list", nterms) for (i in seq_len(nterms)) { gp <- eval2(out$term[i]) out$label[i] <- paste0("gp", rename(collapse(gp$term))) out$cov[i] <- gp$cov out$k[i] <- gp$k out$c[[i]] <- gp$c out$iso[i] <- gp$iso out$cmc[i] <- gp$cmc out$gr[i] <- gp$gr out$scale[i] <- gp$scale out$covars[[i]] <- gp$term if (gp$by != "NA") { out$byvars[[i]] <- gp$by str_add(out$label[i]) <- rename(gp$by) byval <- get(gp$by, data) if (is_like_factor(byval)) { byval <- unique(as.factor(byval)) byform <- str2formula(c(ifelse(gp$cmc, "0", "1"), "byval")) cons <- rename(colnames(model.matrix(byform))) out$cons[[i]] <- rm_wsp(sub("^byval", "", cons)) } } # sfx1 is for sdgp and sfx2 is for lscale out$sfx1[[i]] <- paste0(out$label[i], out$cons[[i]]) if (out$iso[i]) { out$sfx2[[i]] <- matrix(out$sfx1[[i]]) } else { out$sfx2[[i]] <- outer(out$sfx1[[i]], out$covars[[i]], paste0) } } out } # exponential-quadratic covariance matrix # not vectorized over parameter values cov_exp_quad <- function(x, x_new = NULL, sdgp = 1, lscale = 1) { sdgp <- as.numeric(sdgp) lscale <- as.numeric(lscale) Dls <- length(lscale) if (Dls == 1L) { # one dimensional or isotropic GP diff_quad <- diff_quad(x = x, x_new = x_new) out <- sdgp^2 * exp(-diff_quad / (2 * lscale^2)) } else { # multi-dimensional non-isotropic GP diff_quad <- diff_quad(x = x[, 1], x_new = x_new[, 1]) out <- sdgp^2 * exp(-diff_quad / (2 * lscale[1]^2)) for (d in seq_len(Dls)[-1]) { diff_quad <- diff_quad(x = x[, d], x_new = x_new[, d]) out <- out * exp(-diff_quad / (2 * lscale[d]^2)) } } out } # compute squared differences # @param x vector or matrix # @param x_new optional vector of matrix with the same ncol as x # @return an nrow(x) times nrow(x_new) matrix # @details if matrices are passed results are summed over the columns diff_quad <- function(x, x_new = NULL) { x <- as.matrix(x) if (is.null(x_new)) { x_new <- x } else { x_new <- as.matrix(x_new) } .diff_quad <- function(x1, x2) (x1 - x2)^2 out <- 0 for (i in seq_cols(x)) { out <- out + outer(x[, i], x_new[, i], .diff_quad) } out } # spectral density function # vectorized over parameter values spd_cov_exp_quad <- function(x, sdgp = 1, lscale = 1) { NB <- NROW(x) D <- NCOL(x) Dls <- NCOL(lscale) out <- matrix(nrow = length(sdgp), ncol = NB) if (Dls == 1L) { # one dimensional or isotropic GP constant <- sdgp^2 * (sqrt(2 * pi) * lscale)^D neg_half_lscale2 <- -0.5 * lscale^2 for (m in seq_len(NB)) { out[, m] <- constant * exp(neg_half_lscale2 * sum(x[m, ]^2)) } } else { # multi-dimensional non-isotropic GP constant <- sdgp^2 * sqrt(2 * pi)^D * matrixStats::rowProds(lscale) neg_half_lscale2 = -0.5 * lscale^2 for (m in seq_len(NB)) { x2 <- data2draws(x[m, ]^2, dim = dim(lscale)) out[, m] <- constant * exp(rowSums(neg_half_lscale2 * x2)) } } out } # compute the mth eigen value of an approximate GP eigen_val_cov_exp_quad <- function(m, L) { ((m * pi) / (2 * L))^2 } # compute the mth eigen function of an approximate GP eigen_fun_cov_exp_quad <- function(x, m, L) { x <- as.matrix(x) D <- ncol(x) stopifnot(length(m) == D, length(L) == D) out <- vector("list", D) for (i in seq_cols(x)) { out[[i]] <- 1 / sqrt(L[i]) * sin((m[i] * pi) / (2 * L[i]) * (x[, i] + L[i])) } Reduce("*", out) } # extended range of input data for which predictions should be made choose_L <- function(x, c) { if (!length(x)) { range <- 1 } else { range <- max(1, max(x, na.rm = TRUE) - min(x, na.rm = TRUE)) } c * range } # try to evaluate a GP term and # return an informative error message if it fails try_nug <- function(expr, nug) { out <- try(expr, silent = TRUE) if (is_try_error(out)) { stop2("The Gaussian process covariance matrix is not positive ", "definite.\nThis occurs for numerical reasons. Setting ", "'nug' above ", nug, " may help.") } out } brms/R/prepare_predictions.R0000644000176200001440000013442414464712657015632 0ustar liggesusers#' @export #' @rdname prepare_predictions prepare_predictions.brmsfit <- function( x, newdata = NULL, re_formula = NULL, allow_new_levels = FALSE, sample_new_levels = "uncertainty", incl_autocor = TRUE, oos = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, nug = NULL, smooths_only = FALSE, offset = TRUE, newdata2 = NULL, new_objects = NULL, point_estimate = NULL, ndraws_point_estimate = 1, ... ) { x <- restructure(x) # allows functions to fall back to old default behavior # which was used when originally fitting the model options(.brmsfit_version = x$version$brms) on.exit(options(.brmsfit_version = NULL)) snl_options <- c("uncertainty", "gaussian", "old_levels") sample_new_levels <- match.arg(sample_new_levels, snl_options) ndraws <- use_alias(ndraws, nsamples) draw_ids <- use_alias(draw_ids, subset) warn_brmsfit_multiple(x, newdata = newdata) newdata2 <- use_alias(newdata2, new_objects) x <- exclude_terms( x, incl_autocor = incl_autocor, offset = offset, smooths_only = smooths_only ) resp <- validate_resp(resp, x) draw_ids <- validate_draw_ids(x, draw_ids, ndraws) draws <- as_draws_matrix(x) draws <- suppressMessages(subset_draws(draws, draw = draw_ids)) draws <- point_draws(draws, point_estimate, ndraws_point_estimate) new_formula <- update_re_terms(x$formula, re_formula) bterms <- brmsterms(new_formula) ranef <- tidy_ranef(bterms, x$data) meef <- tidy_meef(bterms, x$data) new <- !is.null(newdata) sdata <- standata( x, newdata = newdata, re_formula = re_formula, newdata2 = newdata2, resp = resp, allow_new_levels = allow_new_levels, internal = TRUE, ... ) prep_ranef <- prepare_predictions_ranef( ranef = ranef, draws = draws, sdata = sdata, resp = resp, old_ranef = x$ranef, sample_new_levels = sample_new_levels, ) prepare_predictions( bterms, draws = draws, sdata = sdata, data = x$data, prep_ranef = prep_ranef, meef = meef, resp = resp, sample_new_levels = sample_new_levels, nug = nug, new = new, oos = oos, stanvars = x$stanvars ) } #' @export prepare_predictions.mvbrmsterms <- function(x, draws, sdata, resp = NULL, ...) { resp <- validate_resp(resp, x$responses) if (length(resp) > 1) { if (has_subset(x)) { stop2("Argument 'resp' must be a single variable name ", "for models using addition argument 'subset'.") } out <- list(ndraws = nrow(draws), nobs = sdata$N) out$resps <- named_list(resp) out$old_order <- attr(sdata, "old_order") for (r in resp) { out$resps[[r]] <- prepare_predictions( x$terms[[r]], draws = draws, sdata = sdata, ... ) } if (x$rescor) { out$family <- out$resps[[1]]$family out$family$fun <- paste0(out$family$family, "_mv") rescor <- get_cornames(resp, type = "rescor", brackets = FALSE) out$mvpars$rescor <- prepare_draws(draws, rescor) if (out$family$family == "student") { # store in out$dpars so that get_dpar can be called on nu out$dpars$nu <- as.vector(prepare_draws(draws, "nu")) } out$data$N <- out$resps[[1]]$data$N out$data$weights <- out$resps[[1]]$data$weights Y <- lapply(out$resps, function(x) x$data$Y) out$data$Y <- do_call(cbind, Y) } out <- structure(out, class = "mvbrmsprep") } else { out <- prepare_predictions( x$terms[[resp]], draws = draws, sdata = sdata, ... ) } out } #' @export prepare_predictions.brmsterms <- function(x, draws, sdata, data, ...) { data <- subset_data(data, x) ndraws <- nrow(draws) nobs <- sdata[[paste0("N", usc(x$resp))]] resp <- usc(combine_prefix(x)) out <- nlist(ndraws, nobs, resp = x$resp) out$family <- prepare_family(x) out$old_order <- attr(sdata, "old_order") if (has_subset(x) && !is.null(out$old_order)) { # old_order has length equal to the full number of observations # which is inappropriate for subsetted responses (#1483) out$old_order <- as.numeric(factor(out$old_order[attr(data, "subset")])) } valid_dpars <- valid_dpars(x) out$dpars <- named_list(valid_dpars) for (dp in valid_dpars) { dp_regex <- paste0("^", dp, resp, "$") if (is.btl(x$dpars[[dp]]) || is.btnl(x$dpars[[dp]])) { out$dpars[[dp]] <- prepare_predictions( x$dpars[[dp]], draws = draws, sdata = sdata, data = data, ... ) } else if (any(grepl(dp_regex, colnames(draws)))) { out$dpars[[dp]] <- as.vector(prepare_draws(draws, dp_regex, regex = TRUE)) } else if (is.numeric(x$fdpars[[dp]]$value)) { # fixed dpars are stored as regular draws as of brms 2.12.9 # so this manual extraction is only required for older models out$dpars[[dp]] <- x$fdpars[[dp]]$value } } out$nlpars <- named_list(names(x$nlpars)) for (nlp in names(x$nlpars)) { out$nlpars[[nlp]] <- prepare_predictions( x$nlpars[[nlp]], draws = draws, sdata = sdata, data = data, ... ) } if (is.mixfamily(x$family)) { families <- family_names(x$family) thetas <- paste0("theta", seq_along(families)) if (any(ulapply(out$dpars[thetas], is.list))) { # theta was predicted missing_id <- which(ulapply(out$dpars[thetas], is.null)) out$dpars[[paste0("theta", missing_id)]] <- structure( data2draws(0, c(ndraws, nobs)), predicted = TRUE ) } else { # theta was not predicted out$dpars$theta <- do_call(cbind, out$dpars[thetas]) out$dpars[thetas] <- NULL if (nrow(out$dpars$theta) == 1L) { dim <- c(nrow(draws), ncol(out$dpars$theta)) out$dpars$theta <- data2draws(out$dpars$theta, dim = dim) } } } if (is_ordinal(x$family)) { # it is better to handle ordinal thresholds outside the # main predictor term in particular for use in custom families if (is.mixfamily(x$family)) { mu_pars <- str_subset(names(x$dpars), "^mu[[:digit:]]+") for (mu in mu_pars) { out$thres[[mu]] <- prepare_predictions_thres(x$dpars[[mu]], draws, sdata, ...) } } else { out$thres <- prepare_predictions_thres(x$dpars$mu, draws, sdata, ...) } } if (is_logistic_normal(x$family)) { out$dpars$lncor <- prepare_draws(draws, "^lncor__", regex = TRUE) } if (is_cox(x$family)) { # prepare baseline hazard functions for the Cox model if (is.mixfamily(x$family)) { mu_pars <- str_subset(names(x$dpars), "^mu[[:digit:]]+") for (mu in mu_pars) { out$bhaz[[mu]] <- prepare_predictions_bhaz( x$dpars[[mu]], draws, sdata, ... ) } } else { out$bhaz <- prepare_predictions_bhaz(x$dpars$mu, draws, sdata, ...) } } # response category names for categorical and ordinal models out$cats <- get_cats(x) # reference category for categorical models out$refcat <- get_refcat(x, int = TRUE) # only include those autocor draws on the top-level # of the output which imply covariance matrices on natural residuals out$ac <- prepare_predictions_ac(x$dpars$mu, draws, sdata, nat_cov = TRUE, ...) out$data <- prepare_predictions_data(x, sdata = sdata, data = data, ...) structure(out, class = "brmsprep") } #' @export prepare_predictions.btnl <- function(x, draws, sdata, ...) { out <- list( family = x$family, nlform = x$formula[[2]], ndraws = nrow(draws), nobs = sdata[[paste0("N", usc(x$resp))]], used_nlpars = x$used_nlpars, loop = x$loop ) class(out) <- "bprepnl" p <- usc(combine_prefix(x)) covars <- all.vars(x$covars) for (i in seq_along(covars)) { cvalues <- sdata[[paste0("C", p, "_", i)]] cdim <- c(out$ndraws, out$nobs) if (is.matrix(cvalues)) { c(cdim) <- dim(cvalues)[2] } out$C[[covars[i]]] <- data2draws(cvalues, dim = cdim) } out } #' @export prepare_predictions.btl <- function(x, draws, sdata, ...) { ndraws <- nrow(draws) nobs <- sdata[[paste0("N", usc(x$resp))]] out <- nlist(family = x$family, ndraws, nobs) class(out) <- "bprepl" out$fe <- prepare_predictions_fe(x, draws, sdata, ...) out$sp <- prepare_predictions_sp(x, draws, sdata, ...) out$cs <- prepare_predictions_cs(x, draws, sdata, ...) out$sm <- prepare_predictions_sm(x, draws, sdata, ...) out$gp <- prepare_predictions_gp(x, draws, sdata, ...) out$re <- prepare_predictions_re(x, sdata, ...) out$ac <- prepare_predictions_ac(x, draws, sdata, nat_cov = FALSE, ...) out$offset <- prepare_predictions_offset(x, sdata, ...) out } # prepare predictions of ordinary population-level effects prepare_predictions_fe <- function(bterms, draws, sdata, ...) { out <- list() if (is.null(bterms[["fe"]])) { return(out) } p <- usc(combine_prefix(bterms)) X <- sdata[[paste0("X", p)]] fixef <- colnames(X) if (length(fixef)) { out$X <- X b_pars <- paste0("b", p, "_", fixef) out$b <- prepare_draws(draws, b_pars) } out } # prepare predictions of special effects terms prepare_predictions_sp <- function(bterms, draws, sdata, data, meef = empty_meef(), new = FALSE, ...) { out <- list() spef <- tidy_spef(bterms, data) if (!nrow(spef)) { return(out) } p <- usc(combine_prefix(bterms)) resp <- usc(bterms$resp) # prepare calls evaluated in sp_predictor out$calls <- vector("list", nrow(spef)) for (i in seq_along(out$calls)) { call <- spef$joint_call[[i]] if (!is.null(spef$calls_mo[[i]])) { new_mo <- paste0(".mo(simo_", spef$Imo[[i]], ", Xmo_", spef$Imo[[i]], ")") call <- rename(call, spef$calls_mo[[i]], new_mo) } if (!is.null(spef$calls_me[[i]])) { new_me <- paste0("Xme_", seq_along(meef$term)) call <- rename(call, meef$term, new_me) } if (!is.null(spef$calls_mi[[i]])) { is_na_idx <- is.na(spef$idx2_mi[[i]]) idx_mi <- paste0("idxl", p, "_", spef$vars_mi[[i]], "_", spef$idx2_mi[[i]]) idx_mi <- ifelse(is_na_idx, "", paste0("[, ", idx_mi, "]")) new_mi <- paste0("Yl_", spef$vars_mi[[i]], idx_mi) call <- rename(call, spef$calls_mi[[i]], new_mi) } if (spef$Ic[i] > 0) { str_add(call) <- paste0(" * Csp_", spef$Ic[i]) } out$calls[[i]] <- parse(text = paste0(call)) } # extract general data and parameters for special effects bsp_pars <- paste0("bsp", p, "_", spef$coef) out$bsp <- prepare_draws(draws, bsp_pars) colnames(out$bsp) <- spef$coef # prepare predictions specific to monotonic effects simo_coef <- get_simo_labels(spef) Jmo <- sdata[[paste0("Jmo", p)]] out$simo <- out$Xmo <- named_list(simo_coef) for (i in seq_along(simo_coef)) { J <- seq_len(Jmo[i]) simo_par <- paste0("simo", p, "_", simo_coef[i], "[", J, "]") out$simo[[i]] <- prepare_draws(draws, simo_par) out$Xmo[[i]] <- sdata[[paste0("Xmo", p, "_", i)]] } # prepare predictions specific to noise-free effects warn_me <- FALSE if (nrow(meef)) { save_mevars <- any(grepl("^Xme_", colnames(draws))) warn_me <- warn_me || !new && !save_mevars out$Xme <- named_list(meef$coef) Xme_regex <- paste0("^Xme_", escape_all(meef$coef), "\\[") Xn <- sdata[paste0("Xn_", seq_rows(meef))] noise <- sdata[paste0("noise_", seq_rows(meef))] groups <- unique(meef$grname) for (i in seq_along(groups)) { g <- groups[i] K <- which(meef$grname %in% g) if (nzchar(g)) { Jme <- sdata[[paste0("Jme_", i)]] } if (!new && save_mevars) { # extract original draws of latent variables for (k in K) { out$Xme[[k]] <- prepare_draws(draws, Xme_regex[k], regex = TRUE) } } else { # sample new values of latent variables if (nzchar(g)) { # TODO: reuse existing levels in predictions? # represent all indices between 1 and length(unique(Jme)) Jme <- as.numeric(factor(Jme)) me_dim <- c(nrow(out$bsp), max(Jme)) } else { me_dim <- c(nrow(out$bsp), sdata$N) } for (k in K) { dXn <- data2draws(Xn[[k]], me_dim) dnoise <- data2draws(noise[[k]], me_dim) out$Xme[[k]] <- array(rnorm(prod(me_dim), dXn, dnoise), me_dim) remove(dXn, dnoise) } } if (nzchar(g)) { for (k in K) { out$Xme[[k]] <- out$Xme[[k]][, Jme, drop = FALSE] } } } } # prepare predictions specific to missing value variables dim <- c(nrow(out$bsp), sdata[[paste0("N", resp)]]) vars_mi <- unique(unlist(spef$vars_mi)) if (length(vars_mi)) { # we know at this point that the model is multivariate Yl_names <- paste0("Yl_", vars_mi) out$Yl <- named_list(Yl_names) for (i in seq_along(out$Yl)) { vmi <- vars_mi[i] dim_y <- c(nrow(out$bsp), sdata[[paste0("N_", vmi)]]) Y <- data2draws(sdata[[paste0("Y_", vmi)]], dim_y) sdy <- sdata[[paste0("noise_", vmi)]] if (is.null(sdy)) { # missings only out$Yl[[i]] <- Y if (!new) { Ymi_regex <- paste0("^Ymi_", escape_all(vmi), "\\[") Ymi <- prepare_draws(draws, Ymi_regex, regex = TRUE) Jmi <- sdata[[paste0("Jmi_", vmi)]] out$Yl[[i]][, Jmi] <- Ymi } } else { # measurement-error in the response save_mevars <- any(grepl("^Yl_", colnames(draws))) if (save_mevars && !new) { Ymi_regex <- paste0("^Yl_", escape_all(vmi), "\\[") out$Yl[[i]] <- prepare_draws(draws, Ymi_regex, regex = TRUE) } else { warn_me <- warn_me || !new sdy <- data2draws(sdy, dim) out$Yl[[i]] <- rcontinuous( n = prod(dim), dist = "norm", mean = Y, sd = sdy, lb = sdata[[paste0("lbmi_", vmi)]], ub = sdata[[paste0("ubmi_", vmi)]] ) out$Yl[[i]] <- array(out$Yl[[i]], dim_y) } } } # extract index variables belonging to mi terms uni_mi <- na.omit(attr(spef, "uni_mi")) idxl_vars <- paste0("idxl", p, "_", uni_mi$var, "_", uni_mi$idx2) out$idxl <- sdata[idxl_vars] } if (warn_me) { warning2( "Noise-free latent variables were not saved. ", "You can control saving those variables via 'save_pars()'. ", "Treating original data as if it was new data as a workaround." ) } # prepare covariates ncovars <- max(spef$Ic) out$Csp <- vector("list", ncovars) for (i in seq_len(ncovars)) { out$Csp[[i]] <- sdata[[paste0("Csp", p, "_", i)]] out$Csp[[i]] <- data2draws(out$Csp[[i]], dim = dim) } out } # prepare predictions of category specific effects prepare_predictions_cs <- function(bterms, draws, sdata, data, ...) { out <- list() if (!is_ordinal(bterms$family)) { return(out) } resp <- usc(bterms$resp) out$nthres <- sdata[[paste0("nthres", resp)]] csef <- colnames(get_model_matrix(bterms$cs, data)) if (length(csef)) { p <- usc(combine_prefix(bterms)) cs_pars <- paste0("^bcs", p, "_", escape_all(csef), "\\[") out$bcs <- prepare_draws(draws, cs_pars, regex = TRUE) out$Xcs <- sdata[[paste0("Xcs", p)]] } out } # prepare predictions of smooth terms prepare_predictions_sm <- function(bterms, draws, sdata, data, ...) { out <- list() smef <- tidy_smef(bterms, data) if (!NROW(smef)) { return(out) } p <- usc(combine_prefix(bterms)) Xs_names <- attr(smef, "Xs_names") if (length(Xs_names)) { out$fe$Xs <- sdata[[paste0("Xs", p)]] # allow for "b_" prefix for compatibility with version <= 2.5.0 bspars <- paste0("^bs?", p, "_", escape_all(Xs_names), "$") out$fe$bs <- prepare_draws(draws, bspars, regex = TRUE) } out$re <- named_list(smef$label) for (i in seq_rows(smef)) { sm <- list() for (j in seq_len(smef$nbases[i])) { sm$Zs[[j]] <- sdata[[paste0("Zs", p, "_", i, "_", j)]] spars <- paste0("^s", p, "_", smef$label[i], "_", j, "\\[") sm$s[[j]] <- prepare_draws(draws, spars, regex = TRUE) } out$re[[i]] <- sm } out } # prepare predictions for Gaussian processes # @param new is new data used? # @param nug small numeric value to avoid numerical problems in GPs prepare_predictions_gp <- function(bterms, draws, sdata, data, new = FALSE, nug = NULL, ...) { gpef <- tidy_gpef(bterms, data) if (!nrow(gpef)) { return(list()) } p <- usc(combine_prefix(bterms)) if (is.null(nug)) { # nug for old data must be the same as in the Stan code as even tiny # differences (e.g., 1e-12 vs. 1e-11) will matter for larger lscales nug <- ifelse(new, 1e-8, 1e-12) } out <- named_list(gpef$label) for (i in seq_along(out)) { cons <- gpef$cons[[i]] if (length(cons)) { gp <- named_list(cons) for (j in seq_along(cons)) { gp[[j]] <- .prepare_predictions_gp( gpef, draws = draws, sdata = sdata, nug = nug, new = new, byj = j, p = p, i = i ) } attr(gp, "byfac") <- TRUE } else { gp <- .prepare_predictions_gp( gpef, draws = draws, sdata = sdata, nug = nug, new = new, p = p, i = i ) } out[[i]] <- gp } out } # prepare predictions for Gaussian processes # @param gpef output of tidy_gpef # @param p prefix created by combine_prefix() # @param i indiex of the Gaussian process # @param byj index for the contrast of a categorical 'by' variable # @return a list to be evaluated by .predictor_gp() .prepare_predictions_gp <- function(gpef, draws, sdata, nug, new, p, i, byj = NULL) { sfx1 <- escape_all(gpef$sfx1[[i]]) sfx2 <- escape_all(gpef$sfx2[[i]]) if (is.null(byj)) { lvl <- "" } else { lvl <- gpef$bylevels[[i]][byj] sfx1 <- sfx1[byj] sfx2 <- sfx2[byj, ] } j <- usc(byj) pi <- paste0(p, "_", i) gp <- list() sdgp <- paste0("^sdgp", p, "_", sfx1, "$") gp$sdgp <- as.vector(prepare_draws(draws, sdgp, regex = TRUE)) lscale <- paste0("^lscale", p, "_", sfx2, "$") gp$lscale <- prepare_draws(draws, lscale, regex = TRUE) zgp_regex <- paste0("^zgp", p, "_", sfx1, "\\[") gp$zgp <- prepare_draws(draws, zgp_regex, regex = TRUE) Xgp_name <- paste0("Xgp", pi, j) Igp_name <- paste0("Igp", pi, j) Jgp_name <- paste0("Jgp", pi, j) if (new && isNA(gpef$k[i])) { # in exact GPs old covariate values are required for predictions gp$x <- sdata[[paste0(Xgp_name, "_old")]] # nug for old data must be the same as in the Stan code as even tiny # differences (e.g., 1e-12 vs. 1e-11) will matter for larger lscales gp$nug <- 1e-12 # computing GPs for new data requires the old GP terms gp$yL <- .predictor_gp(gp) gp$x_new <- sdata[[Xgp_name]] gp$Igp <- sdata[[Igp_name]] } else { gp$x <- sdata[[Xgp_name]] gp$Igp <- sdata[[Igp_name]] if (!isNA(gpef$k[i])) { gp$slambda <- sdata[[paste0("slambda", pi, j)]] } } gp$Jgp <- sdata[[Jgp_name]] # possible factor from 'by' variable gp$Cgp <- sdata[[paste0("Cgp", pi, j)]] gp$nug <- nug gp } # prepare predictions for all group level effects # needs to be separate from 'prepare_predictions_re' to take correlations # across responses and distributional parameters into account (#779) # @param ranef output of 'tidy_ranef' based on the new formula and old data # @param old_ranef same as 'ranef' but based on the original formula # @return a named list with one element per group containing posterior draws # of levels used in the data as well as additional meta-data prepare_predictions_ranef <- function(ranef, draws, sdata, old_ranef, resp = NULL, sample_new_levels = "uncertainty", ...) { if (!nrow(ranef)) { return(list()) } # ensures subsetting 'ranef' by 'resp' works correctly resp <- resp %||% "" groups <- unique(ranef$group) out <- named_list(groups, list()) for (g in groups) { # prepare general variables related to group g ranef_g <- subset2(ranef, group = g) old_ranef_g <- subset2(old_ranef, group = g) used_levels <- attr(sdata, "levels")[[g]] old_levels <- attr(old_ranef, "levels")[[g]] nlevels <- length(old_levels) nranef <- nrow(ranef_g) # prepare draws of group-level effects rpars <- paste0("^r_", g, "(__.+)?\\[") rdraws <- prepare_draws(draws, rpars, regex = TRUE) if (!length(rdraws)) { stop2( "Group-level coefficients of group '", g, "' not found. ", "You can control saving those coefficients via 'save_pars()'." ) } # only prepare predictions of effects specified in the new formula cols_match <- c("coef", "resp", "dpar", "nlpar") used_rpars <- which(find_rows(old_ranef_g, ls = ranef_g[cols_match])) used_rpars <- outer(seq_len(nlevels), (used_rpars - 1) * nlevels, "+") used_rpars <- as.vector(used_rpars) rdraws <- rdraws[, used_rpars, drop = FALSE] rdraws <- column_to_row_major_order(rdraws, nranef) # prepare data required for indexing parameters gtype <- ranef_g$gtype[1] resp_g <- intersect(ranef_g$resp, resp)[1] # any valid ID works here as J and W are independent of the ID id <- subset2(ranef_g, resp = resp)$id[1] idresp <- paste0(id, usc(resp_g)) if (gtype == "mm") { ngf <- length(ranef_g$gcall[[1]]$groups) gf <- sdata[paste0("J_", idresp, "_", seq_len(ngf))] weights <- sdata[paste0("W_", idresp, "_", seq_len(ngf))] } else { gf <- sdata[paste0("J_", idresp)] weights <- list(rep(1, length(gf[[1]]))) } # generate draws for new levels args_new_rdraws <- nlist( ranef = ranef_g, gf, used_levels, old_levels, rdraws = rdraws, draws, sample_new_levels ) new_rdraws <- do_call(get_new_rdraws, args_new_rdraws) max_level <- attr(new_rdraws, "max_level") gf <- attr(new_rdraws, "gf") rdraws <- cbind(rdraws, new_rdraws) # keep only those levels actually used in the current data levels <- unique(unlist(gf)) rdraws <- subset_levels(rdraws, levels, nranef) # store all information required in 'prepare_predictions_re' out[[g]]$ranef <- ranef_g out[[g]]$rdraws <- rdraws out[[g]]$levels <- levels out[[g]]$nranef <- nranef out[[g]]$max_level <- max_level out[[g]]$gf <- gf out[[g]]$weights <- weights } out } # prepare predictions of group-level effects # @param prep_ranef a named list with one element per group containing # posterior draws of levels as well as additional meta-data prepare_predictions_re <- function(bterms, sdata, prep_ranef = list(), sample_new_levels = "uncertainty", ...) { out <- list() if (!length(prep_ranef)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) ranef_px <- from_list(prep_ranef, "ranef") ranef_px <- do_call(rbind, ranef_px) ranef_px <- subset2(ranef_px, ls = px) if (!NROW(ranef_px)) { return(out) } groups <- unique(ranef_px$group) # assigning S4 objects requires initialisation of list elements out[c("Z", "Zsp", "Zcs")] <- list(named_list(groups)) for (g in groups) { # extract variables specific to group 'g' ranef_g <- prep_ranef[[g]]$ranef ranef_g_px <- subset2(ranef_g, ls = px) rdraws <- prep_ranef[[g]]$rdraws nranef <- prep_ranef[[g]]$nranef levels <- prep_ranef[[g]]$levels max_level <- prep_ranef[[g]]$max_level gf <- prep_ranef[[g]]$gf weights <- prep_ranef[[g]]$weights # TODO: define 'select' according to parameter names not by position # store draws and corresponding data in the output # special group-level terms (mo, me, mi) ranef_g_px_sp <- subset2(ranef_g_px, type = "sp") if (nrow(ranef_g_px_sp)) { Z <- matrix(1, length(gf[[1]])) out[["Zsp"]][[g]] <- prepare_Z(Z, gf, max_level, weights) for (co in ranef_g_px_sp$coef) { # select from all varying effects of that group select <- find_rows(ranef_g, ls = px) & ranef_g$coef == co & ranef_g$type == "sp" select <- which(select) select <- select + nranef * (seq_along(levels) - 1) out[["rsp"]][[co]][[g]] <- rdraws[, select, drop = FALSE] } } # category specific group-level terms ranef_g_px_cs <- subset2(ranef_g_px, type = "cs") if (nrow(ranef_g_px_cs)) { # all categories share the same Z matrix ranef_g_px_cs_1 <- ranef_g_px_cs[grepl("\\[1\\]$", ranef_g_px_cs$coef), ] Znames <- paste0("Z_", ranef_g_px_cs_1$id, p, "_", ranef_g_px_cs_1$cn) Z <- do_call(cbind, sdata[Znames]) out[["Zcs"]][[g]] <- prepare_Z(Z, gf, max_level, weights) for (i in seq_len(sdata$nthres)) { index <- paste0("\\[", i, "\\]$") # select from all varying effects of that group select <- find_rows(ranef_g, ls = px) & grepl(index, ranef_g$coef) & ranef_g$type == "cs" select <- which(select) select <- as.vector(outer(select, nranef * (seq_along(levels) - 1), "+")) out[["rcs"]][[g]][[i]] <- rdraws[, select, drop = FALSE] } } # basic group-level terms ranef_g_px_basic <- subset2(ranef_g_px, type = c("", "mmc")) if (nrow(ranef_g_px_basic)) { Znames <- paste0("Z_", ranef_g_px_basic$id, p, "_", ranef_g_px_basic$cn) if (ranef_g_px_basic$gtype[1] == "mm") { ng <- length(ranef_g_px_basic$gcall[[1]]$groups) Z <- vector("list", ng) for (k in seq_len(ng)) { Z[[k]] <- do_call(cbind, sdata[paste0(Znames, "_", k)]) } } else { Z <- do_call(cbind, sdata[Znames]) } out[["Z"]][[g]] <- prepare_Z(Z, gf, max_level, weights) # select from all varying effects of that group select <- find_rows(ranef_g, ls = px) & ranef_g$type %in% c("", "mmc") select <- which(select) select <- as.vector(outer(select, nranef * (seq_along(levels) - 1), "+")) out[["r"]][[g]] <- rdraws[, select, drop = FALSE] } } out } # prepare predictions of autocorrelation parameters # @param nat_cov extract terms for covariance matrices of natural residuals? prepare_predictions_ac <- function(bterms, draws, sdata, oos = NULL, nat_cov = FALSE, new = FALSE, ...) { out <- list() nat_cov <- as_one_logical(nat_cov) acef <- tidy_acef(bterms) acef <- subset2(acef, nat_cov = nat_cov) if (!NROW(acef)) { return(out) } out$acef <- acef p <- usc(combine_prefix(bterms)) out$N_tg <- sdata[[paste0("N_tg", p)]] if (has_ac_class(acef, "arma")) { acef_arma <- subset2(acef, class = "arma") out$Y <- sdata[[paste0("Y", p)]] if (!is.null(oos)) { if (any(oos > length(out$Y))) { stop2("'oos' should not contain integers larger than N.") } # .predictor_arma has special behavior for NA responses out$Y[oos] <- NA } out$J_lag <- sdata[[paste0("J_lag", p)]] if (acef_arma$p > 0) { ar_regex <- paste0("^ar", p, "\\[") out$ar <- prepare_draws(draws, ar_regex, regex = TRUE) } if (acef_arma$q > 0) { ma_regex <- paste0("^ma", p, "\\[") out$ma <- prepare_draws(draws, ma_regex, regex = TRUE) } } if (has_ac_class(acef, "cosy")) { cosy_regex <- paste0("^cosy", p, "$") out$cosy <- prepare_draws(draws, cosy_regex, regex = TRUE) } if (has_ac_class(acef, "unstr")) { cortime_regex <- paste0("^cortime", p, "__") out$cortime <- prepare_draws(draws, cortime_regex, regex = TRUE) out$Jtime_tg <- sdata[[paste0("Jtime_tg", p)]] } if (use_ac_cov_time(acef)) { # prepare predictions for the covariance structures of time-series models out$begin_tg <- sdata[[paste0("begin_tg", p)]] out$end_tg <- sdata[[paste0("end_tg", p)]] } if (has_ac_latent_residuals(bterms)) { err_regex <- paste0("^err", p, "\\[") has_err <- any(grepl(err_regex, colnames(draws))) if (has_err && !new) { out$err <- prepare_draws(draws, err_regex, regex = TRUE) } else { if (!use_ac_cov_time(acef)) { stop2("Cannot predict new latent residuals ", "when using cov = FALSE in autocor terms.") } # need to sample correlated residuals out$err <- matrix(nrow = nrow(draws), ncol = length(out$Y)) sderr_regex <- paste0("^sderr", p, "$") out$sderr <- prepare_draws(draws, sderr_regex, regex = TRUE) for (i in seq_len(out$N_tg)) { obs <- with(out, begin_tg[i]:end_tg[i]) Jtime <- out$Jtime_tg[i, ] cov <- get_cov_matrix_ac(list(ac = out), obs, Jtime = Jtime, latent = TRUE) zeros <- rep(0, length(obs)) .err <- function(s) rmulti_normal(1, zeros, Sigma = cov[s, , ]) out$err[, obs] <- rblapply(seq_rows(draws), .err) } } } if (has_ac_class(acef, "sar")) { lagsar_regex <- paste0("^lagsar", p, "$") errorsar_regex <- paste0("^errorsar", p, "$") out$lagsar <- prepare_draws(draws, lagsar_regex, regex = TRUE) out$errorsar <- prepare_draws(draws, errorsar_regex, regex = TRUE) out$Msar <- sdata[[paste0("Msar", p)]] } if (has_ac_class(acef, "car")) { acef_car <- subset2(acef, class = "car") if (new && acef_car$gr == "NA") { stop2("Without a grouping factor, CAR models cannot handle newdata.") } gcar <- sdata[[paste0("Jloc", p)]] Zcar <- matrix(rep(1, length(gcar))) out$Zcar <- prepare_Z(Zcar, list(gcar)) rcar_regex <- paste0("^rcar", p, "\\[") rcar <- prepare_draws(draws, rcar_regex, regex = TRUE) rcar <- rcar[, unique(gcar), drop = FALSE] out$rcar <- rcar } if (has_ac_class(acef, "fcor")) { out$Mfcor <- sdata[[paste0("Mfcor", p)]] } out } prepare_predictions_offset <- function(bterms, sdata, ...) { p <- usc(combine_prefix(bterms)) sdata[[paste0("offsets", p)]] } # prepare predictions of ordinal thresholds prepare_predictions_thres <- function(bterms, draws, sdata, ...) { out <- list() if (!is_ordinal(bterms$family)) { return(out) } resp <- usc(bterms$resp) out$nthres <- sdata[[paste0("nthres", resp)]] out$Jthres <- sdata[[paste0("Jthres", resp)]] p <- usc(combine_prefix(bterms)) thres_regex <- paste0("^b", p, "_Intercept\\[") out$thres <- prepare_draws(draws, thres_regex, regex = TRUE) out } # prepare predictions of baseline functions for the cox model prepare_predictions_bhaz <- function(bterms, draws, sdata, ...) { if (!is_cox(bterms$family)) { return(NULL) } out <- list() p <- usc(combine_prefix(bterms)) sbhaz_regex <- paste0("^sbhaz", p) sbhaz <- prepare_draws(draws, sbhaz_regex, regex = TRUE) Zbhaz <- sdata[[paste0("Zbhaz", p)]] out$bhaz <- tcrossprod(sbhaz, Zbhaz) Zcbhaz <- sdata[[paste0("Zcbhaz", p)]] out$cbhaz <- tcrossprod(sbhaz, Zcbhaz) out } # extract data mainly related to the response variable prepare_predictions_data <- function(bterms, sdata, data, stanvars = NULL, ...) { resp <- usc(combine_prefix(bterms)) vars <- c( "Y", "trials", "ncat", "nthres", "se", "weights", "denom", "dec", "cens", "rcens", "lb", "ub" ) vars <- paste0(vars, resp) vars <- intersect(vars, names(sdata)) # variables of variable length need to be handled via regular expression escaped_resp <- escape_all(resp) vl_vars <- c("vreal", "vint") vl_vars <- regex_or(vl_vars) vl_vars <- paste0("^", vl_vars, "[[:digit:]]+", escaped_resp, "$") vl_vars <- str_subset(names(sdata), vl_vars) vars <- union(vars, vl_vars) out <- sdata[vars] # remove resp suffix from names to simplify post-processing names(out) <- sub(paste0(escaped_resp, "$"), "", names(out)) if (length(stanvars)) { stopifnot(is.stanvars(stanvars)) out[names(stanvars)] <- sdata[names(stanvars)] } out } # choose number of observations to be used in post-processing methods choose_N <- function(prep) { stopifnot(is.brmsprep(prep) || is.mvbrmsprep(prep)) if (!is.null(prep$ac$N_tg)) prep$ac$N_tg else prep$nobs } # create pseudo brmsprep objects for components of mixture models # @param comp the mixture component number # @param draw_ids see predict_mixture pseudo_prep_for_mixture <- function(prep, comp, draw_ids = NULL) { stopifnot(is.brmsprep(prep), is.mixfamily(prep$family)) if (!is.null(draw_ids)) { ndraws <- length(draw_ids) } else { ndraws <- prep$ndraws } out <- list( family = prep$family$mix[[comp]], ndraws = ndraws, nobs = prep$nobs, data = prep$data ) out$family$fun <- out$family$family for (dp in valid_dpars(out$family)) { out$dpars[[dp]] <- prep$dpars[[paste0(dp, comp)]] if (length(draw_ids) && length(out$dpars[[dp]]) > 1L) { out$dpars[[dp]] <- p(out$dpars[[dp]], draw_ids, row = TRUE) } } if (is_ordinal(out$family)) { out$thres <- prep$thres[[paste0("mu", comp)]] } if (is_cox(out$family)) { out$bhaz <- prep$bhaz[[paste0("mu", comp)]] } # weighting should happen after computing the mixture out$data$weights <- NULL structure(out, class = "brmsprep") } # take relevant cols of a matrix of group-level terms # if only a subset of levels is provided (for newdata) # @param x a matrix typically draws of r or Z design matrices # draws need to be stored in row major order # @param levels grouping factor levels to keep # @param nranef number of group-level effects subset_levels <- function(x, levels, nranef) { take_levels <- ulapply(levels, function(l) ((l - 1) * nranef + 1):(l * nranef) ) x[, take_levels, drop = FALSE] } # transform x from column to row major order # rows represent levels and columns represent effects # @param x a matrix of draws of group-level parameters # @param nranef number of group-level effects column_to_row_major_order <- function(x, nranef) { nlevels <- ncol(x) / nranef sort_levels <- ulapply(seq_len(nlevels), function(l) seq(l, ncol(x), by = nlevels) ) x[, sort_levels, drop = FALSE] } # prepare group-level design matrices for use in 'predictor' # @param Z (list of) matrices to be prepared # @param gf (list of) vectors containing grouping factor values # @param weights optional (list of) weights of the same length as gf # @param max_level maximal level of 'gf' # @return a sparse matrix representation of Z prepare_Z <- function(Z, gf, max_level = NULL, weights = NULL) { if (!is.list(Z)) { Z <- list(Z) } if (!is.list(gf)) { gf <- list(gf) } if (is.null(weights)) { weights <- rep(1, length(gf[[1]])) } if (!is.list(weights)) { weights <- list(weights) } if (is.null(max_level)) { max_level <- max(unlist(gf)) } levels <- unique(unlist(gf)) nranef <- ncol(Z[[1]]) Z <- mapply( expand_matrix, A = Z, x = gf, weights = weights, MoreArgs = nlist(max_level) ) Z <- Reduce("+", Z) subset_levels(Z, levels, nranef) } # expand a matrix into a sparse matrix of higher dimension # @param A matrix to be expanded # @param x levels to expand the matrix # @param max_level maximal number of levels that x can take on # @param weights weights to apply to rows of A before expanding # @param a sparse matrix of dimension nrow(A) x (ncol(A) * max_level) expand_matrix <- function(A, x, max_level = max(x), weights = 1) { stopifnot(is.matrix(A)) stopifnot(length(x) == nrow(A)) stopifnot(all(is_wholenumber(x) & x > 0)) stopifnot(length(weights) %in% c(1, nrow(A), prod(dim(A)))) A <- A * as.vector(weights) K <- ncol(A) i <- rep(seq_along(x), each = K) make_j <- function(n, K, x) K * (x[n] - 1) + 1:K j <- ulapply(seq_along(x), make_j, K = K, x = x) Matrix::sparseMatrix( i = i, j = j, x = as.vector(t(A)), dims = c(nrow(A), ncol(A) * max_level) ) } # generate draws for new group levels # @param ranef 'ranef_frame' object of only a single grouping variable # @param gf list of vectors of level indices in the current data # @param rdraws matrix of group-level draws in row major order # @param used_levels names of levels used in the current data # @param old_levels names of levels used in the original data # @param sample_new_levels specifies the way in which new draws are generated # @param draws optional matrix of draws from all model parameters # @return a matrix of draws for new group levels get_new_rdraws <- function(ranef, gf, rdraws, used_levels, old_levels, sample_new_levels, draws = NULL) { snl_options <- c("uncertainty", "gaussian", "old_levels") sample_new_levels <- match.arg(sample_new_levels, snl_options) g <- unique(ranef$group) stopifnot(length(g) == 1L) stopifnot(is.list(gf)) used_by_per_level <- attr(used_levels, "by") old_by_per_level <- attr(old_levels, "by") new_levels <- setdiff(used_levels, old_levels) nranef <- nrow(ranef) nlevels <- length(old_levels) max_level <- nlevels out <- vector("list", length(gf)) for (i in seq_along(gf)) { has_new_levels <- any(gf[[i]] > nlevels) if (has_new_levels) { new_indices <- sort(setdiff(gf[[i]], seq_len(nlevels))) out[[i]] <- matrix(NA, nrow(rdraws), nranef * length(new_indices)) if (sample_new_levels == "uncertainty") { for (j in seq_along(new_indices)) { # selected levels need to be the same for all varying effects # to correctly take their correlations into account if (length(old_by_per_level)) { # select from all levels matching the 'by' variable new_by <- used_by_per_level[used_levels == new_levels[j]] possible_levels <- old_levels[old_by_per_level == new_by] possible_levels <- which(old_levels %in% possible_levels) sel_levels <- sample(possible_levels, NROW(rdraws), TRUE) } else { # select from all levels sel_levels <- sample(seq_len(nlevels), NROW(rdraws), TRUE) } for (k in seq_len(nranef)) { for (s in seq_rows(rdraws)) { sel <- (sel_levels[s] - 1) * nranef + k out[[i]][s, (j - 1) * nranef + k] <- rdraws[s, sel] } } } } else if (sample_new_levels == "old_levels") { for (j in seq_along(new_indices)) { # choose an existing person to take the parameters from if (length(old_by_per_level)) { # select from all levels matching the 'by' variable new_by <- used_by_per_level[used_levels == new_levels[j]] possible_levels <- old_levels[old_by_per_level == new_by] possible_levels <- which(old_levels %in% possible_levels) sel_level <- sample(possible_levels, 1) } else { # select from all levels sel_level <- sample(seq_len(nlevels), 1) } for (k in seq_len(nranef)) { sel <- (sel_level - 1) * nranef + k out[[i]][, (j - 1) * nranef + k] <- rdraws[, sel] } } } else if (sample_new_levels == "gaussian") { if (any(!ranef$dist %in% "gaussian")) { stop2("Option sample_new_levels = 'gaussian' is not ", "available for non-gaussian group-level effects.") } for (j in seq_along(new_indices)) { # extract hyperparameters used to compute the covariance matrix if (length(old_by_per_level)) { new_by <- used_by_per_level[used_levels == new_levels[j]] rnames <- as.vector(get_rnames(ranef, bylevels = new_by)) } else { rnames <- get_rnames(ranef) } sd_pars <- paste0("sd_", g, "__", rnames) sd_draws <- prepare_draws(draws, sd_pars) cor_type <- paste0("cor_", g) cor_pars <- get_cornames(rnames, cor_type, brackets = FALSE) cor_draws <- matrix(0, nrow(sd_draws), length(cor_pars)) for (k in seq_along(cor_pars)) { if (cor_pars[k] %in% colnames(draws)) { cor_draws[, k] <- prepare_draws(draws, cor_pars[k]) } } cov_matrix <- get_cov_matrix(sd_draws, cor_draws) # sample new levels from the normal distribution # implied by the covariance matrix indices <- ((j - 1) * nranef + 1):(j * nranef) out[[i]][, indices] <- t(apply( cov_matrix, 1, rmulti_normal, n = 1, mu = rep(0, length(sd_pars)) )) } } max_level <- max_level + length(new_indices) } else { out[[i]] <- matrix(nrow = nrow(rdraws), ncol = 0) } } out <- do_call(cbind, out) structure(out, gf = gf, max_level = max_level) } # prepare draws of selected variables prepare_draws <- function(x, variable, ...) { x <- subset_draws(x, variable = variable, ...) # brms still assumes standard dropping behavior in many places # and so keeping the posterior format is dangerous at the moment unclass_draws(x) } # compute point estimates of posterior draws # currently used primarily for 'loo_subsample' # @param draws matrix of posterior draws # @param point_estimate optional name of the point estimate to be computed # @param ndraws_point_estimate number of repetitions of the point estimate's # value in the form of pseudo draws # @return a draws_matrix with one row point_draws <- function(draws, point_estimate = NULL, ndraws_point_estimate = 1) { if (is.null(point_estimate)) { return(draws) } point_estimate <- match.arg(point_estimate, c("mean", "median")) ndraws_point_estimate <- as_one_integer(ndraws_point_estimate) stopifnot(ndraws_point_estimate > 0) variables <- colnames(draws) if (point_estimate == "mean") { draws <- matrixStats::colMeans2(draws) } else if (point_estimate == "median") { draws <- matrixStats::colMedians(draws) } draws <- t(draws) draws <- matrix( draws, nrow = ndraws_point_estimate, ncol = ncol(draws), byrow = TRUE ) colnames(draws) <- variables as_draws_matrix(draws) } is.brmsprep <- function(x) { inherits(x, "brmsprep") } is.mvbrmsprep <- function(x) { inherits(x, "mvbrmsprep") } is.bprepl <- function(x) { inherits(x, "bprepl") } is.bprepnl <- function(x) { inherits(x, "bprepnl") } #' Prepare Predictions #' #' This method helps in preparing \pkg{brms} models for certin post-processing #' tasks most notably various forms of predictions. Unless you are a package #' developer, you will rarely need to call \code{prepare_predictions} directly. #' #' @name prepare_predictions #' @aliases prepare_predictions.brmsfit extract_draws #' #' @param x An \R object typically of class \code{'brmsfit'}. #' @param newdata An optional data.frame for which to evaluate predictions. If #' \code{NULL} (default), the original data of the model is used. #' \code{NA} values within factors are interpreted as if all dummy #' variables of this factor are zero. This allows, for instance, to make #' predictions of the grand mean when using sum coding. #' @param re_formula formula containing group-level effects to be considered in #' the prediction. If \code{NULL} (default), include all group-level effects; #' if \code{NA}, include no group-level effects. #' @param allow_new_levels A flag indicating if new levels of group-level #' effects are allowed (defaults to \code{FALSE}). Only relevant if #' \code{newdata} is provided. #'@param sample_new_levels Indicates how to sample new levels for grouping #' factors specified in \code{re_formula}. This argument is only relevant if #' \code{newdata} is provided and \code{allow_new_levels} is set to #' \code{TRUE}. If \code{"uncertainty"} (default), each posterior sample for a #' new level is drawn from the posterior draws of a randomly chosen existing #' level. Each posterior sample for a new level may be drawn from a different #' existing level such that the resulting set of new posterior draws #' represents the variation across existing levels. If \code{"gaussian"}, #' sample new levels from the (multivariate) normal distribution implied by the #' group-level standard deviations and correlations. This options may be useful #' for conducting Bayesian power analysis or predicting new levels in #' situations where relatively few levels where observed in the old_data. If #' \code{"old_levels"}, directly sample new levels from the existing levels, #' where a new level is assigned all of the posterior draws of the same #' (randomly chosen) existing level. #' @param newdata2 A named \code{list} of objects containing new data, which #' cannot be passed via argument \code{newdata}. Required for some objects #' used in autocorrelation structures, or \code{\link{stanvars}}. #' @param new_objects Deprecated alias of \code{newdata2}. #' @param incl_autocor A flag indicating if correlation structures originally #' specified via \code{autocor} should be included in the predictions. #' Defaults to \code{TRUE}. #' @param offset Logical; Indicates if offsets should be included in the #' predictions. Defaults to \code{TRUE}. #' @param oos Optional indices of observations for which to compute #' out-of-sample rather than in-sample predictions. Only required in models #' that make use of response values to make predictions, that is, currently #' only ARMA models. #' @param smooths_only Logical; If \code{TRUE} only predictions related to the #' @param resp Optional names of response variables. If specified, predictions #' are performed only for the specified response variables. #' @param ndraws Positive integer indicating how many posterior draws should #' be used. If \code{NULL} (the default) all draws are used. Ignored if #' \code{draw_ids} is not \code{NULL}. #' @param draw_ids An integer vector specifying the posterior draws to be used. #' If \code{NULL} (the default), all draws are used. #' @param nsamples Deprecated alias of \code{ndraws}. #' @param subset Deprecated alias of \code{draw_ids}. #' @param nug Small positive number for Gaussian process terms only. For #' numerical reasons, the covariance matrix of a Gaussian process might not be #' positive definite. Adding a very small number to the matrix's diagonal #' often solves this problem. If \code{NULL} (the default), \code{nug} is #' chosen internally. #' @param point_estimate Shall the returned object contain only point estimates #' of the parameters instead of their posterior draws? Defaults to #' \code{NULL} in which case no point estimate is computed. Alternatively, may #' be set to \code{"mean"} or \code{"median"}. This argument is primarily #' implemented to ensure compatibility with the \code{\link{loo_subsample}} #' method. #' @param ndraws_point_estimate Only used if \code{point_estimate} is not #' \code{NULL}. How often shall the point estimate's value be repeated? #' Defaults to \code{1}. #' @param ... Further arguments passed to \code{\link{validate_newdata}}. #' #' @return An object of class \code{'brmsprep'} or \code{'mvbrmsprep'}, #' depending on whether a univariate or multivariate model is passed. #' #' @export prepare_predictions <- function(x, ...) { UseMethod("prepare_predictions") } #' @export prepare_predictions.default <- function(x, ...) { NULL } # the name 'extract_draws' is deprecated as of brms 2.12.6 # remove it eventually in brms 3.0 #' @export extract_draws <- function(x, ...) { warning2("Method 'extract_draws' is deprecated. ", "Please use 'prepare_predictions' instead.") UseMethod("prepare_predictions") } brms/R/stanvars.R0000644000176200001440000002133514430677466013427 0ustar liggesusers#' User-defined variables passed to Stan #' #' Prepare user-defined variables to be passed to one of Stan's #' program blocks. This is primarily useful for defining more complex #' priors, for refitting models without recompilation despite #' changing priors, or for defining custom Stan functions. #' #' @aliases stanvars #' #' @param x An \R object containing data to be passed to Stan. #' Only required if \code{block = 'data'} and ignored otherwise. #' @param name Optional character string providing the desired variable #' name of the object in \code{x}. If \code{NULL} (the default) #' the variable name is directly inferred from \code{x}. #' @param scode Line of Stan code to define the variable #' in Stan language. If \code{block = 'data'}, the #' Stan code is inferred based on the class of \code{x} by default. #' @param block Name of one of Stan's program blocks in #' which the variable should be defined. Can be \code{'data'}, #' \code{'tdata'} (transformed data), \code{'parameters'}, #' \code{'tparameters'} (transformed parameters), \code{'model'}, #' \code{'likelihood'} (part of the model block where the likelihood is given), #' \code{'genquant'} (generated quantities) or \code{'functions'}. #' @param position Name of the position within the block where the #' Stan code should be placed. Currently allowed are \code{'start'} #' (the default) and \code{'end'} of the block. #' @param pll_args Optional Stan code to be put into the header #' of \code{partial_log_lik} functions. This ensures that the variables #' specified in \code{scode} can be used in the likelihood even when #' within-chain parallelization is activated via \code{\link{threading}}. #' #' @return An object of class \code{stanvars}. #' #' @details #' The \code{stanvar} function is not vectorized. Instead, multiple #' \code{stanvars} objects can be added together via \code{+} (see Examples). #' #' @examples #' bprior <- prior(normal(mean_intercept, 10), class = "Intercept") #' stanvars <- stanvar(5, name = "mean_intercept") #' make_stancode(count ~ Trt, epilepsy, prior = bprior, #' stanvars = stanvars) #' #' # define a multi-normal prior with known covariance matrix #' bprior <- prior(multi_normal(M, V), class = "b") #' stanvars <- stanvar(rep(0, 2), "M", scode = " vector[K] M;") + #' stanvar(diag(2), "V", scode = " matrix[K, K] V;") #' make_stancode(count ~ Trt + zBase, epilepsy, #' prior = bprior, stanvars = stanvars) #' #' # define a hierachical prior on the regression coefficients #' bprior <- set_prior("normal(0, tau)", class = "b") + #' set_prior("target += normal_lpdf(tau | 0, 10)", check = FALSE) #' stanvars <- stanvar(scode = "real tau;", #' block = "parameters") #' make_stancode(count ~ Trt + zBase, epilepsy, #' prior = bprior, stanvars = stanvars) #' #' # ensure that 'tau' is passed to the likelihood of a threaded model #' # not necessary for this example but may be necessary in other cases #' stanvars <- stanvar(scode = "real tau;", #' block = "parameters", pll_args = "real tau") #' make_stancode(count ~ Trt + zBase, epilepsy, #' stanvars = stanvars, threads = threading(2)) #' #' @export stanvar <- function(x = NULL, name = NULL, scode = NULL, block = "data", position = "start", pll_args = NULL) { vblocks <- c( "data", "tdata", "parameters", "tparameters", "model", "genquant", "functions", "likelihood" ) block <- match.arg(block, vblocks) vpositions <- c("start", "end") position <- match.arg(position, vpositions) if (block == "data") { if (is.null(x)) { stop2("Argument 'x' is required if block = 'data'.") } if (is.null(name)) { name <- deparse0(substitute(x)) } name <- as_one_character(name) if (!is_equal(name, make.names(name)) || grepl("\\.", name)) { stop2("'", limit_chars(name, 30), "' is not ", "a valid variable name in Stan.") } if (is.null(scode)) { # infer scode from x if (is.integer(x)) { if (length(x) == 1L) { scode <- paste0("int ", name) } else { scode <- paste0("int ", name, "[", length(x), "]") } } else if (is.vector(x)) { if (length(x) == 1L) { scode <- paste0("real ", name) } else { scode <- paste0("vector[", length(x), "] ", name) } } else if (is.array(x)) { if (length(dim(x)) == 1L) { scode <- paste0("vector[", length(x), "] ", name) } else if (is.matrix(x)) { scode <- paste0("matrix[", nrow(x), ", ", ncol(x), "] ", name) } } if (is.null(scode)) { stop2( "'stanvar' could not infer the Stan code for an object ", "of class '", class(x), "'. Please specify the Stan code ", "manually via argument 'scode'." ) } scode <- paste0(scode, ";") } if (is.null(pll_args)) { # infer pll_args from x pll_type <- str_if(block %in% c("data", "tdata"), "data ") if (is.integer(x)) { if (length(x) == 1L) { pll_type <- paste0(pll_type, "int") } else { pll_type <- paste0(pll_type, "int[]") } } else if (is.vector(x)) { if (length(x) == 1L) { pll_type <- paste0(pll_type, "real") } else { pll_type <- paste0(pll_type, "vector") } } else if (is.array(x)) { if (length(dim(x)) == 1L) { pll_type <- paste0(pll_type, "vector") } else if (is.matrix(x)) { pll_type <- paste0(pll_type, "matrix") } } if (!is.null(pll_type)) { pll_args <- paste0(pll_type, " ", name) } else { # don't throw an error because most people will not use threading pll_args <- character(0) } } } else { x <- NULL if (is.null(name)) { name <- "" } name <- as_one_character(name) if (is.null(scode)) { stop2("Argument 'scode' is required if block is not 'data'.") } scode <- as.character(scode) pll_args <- as.character(pll_args) } if (position == "end" && block %in% c("functions", "data")) { stop2("Position '", position, "' is not sensible for block '", block, "'.") } out <- nlist(name, sdata = x, scode, block, position, pll_args) structure(setNames(list(out), name), class = "stanvars") } # take a subset of a stanvars object # @param x a stanvars object # @param ... conditions defining the desired subset subset_stanvars <- function(x, ...) { x <- validate_stanvars(x) structure_not_null(x[find_elements(x, ...)], class = "stanvars") } # collapse Stan code provided in a stanvars object collapse_stanvars <- function(x, block = NULL, position = NULL) { x <- validate_stanvars(x) if (!length(x)) { return(character(0)) } if (!is.null(block)) { x <- subset_stanvars(x, block = block) } if (!is.null(position)) { x <- subset_stanvars(x, position = position) } if (!length(x)) { return("") } collapse(wsp(nsp = 2), ufrom_list(x, "scode"), "\n") } # collapse partial log-lik code provided in a stanvars object collapse_stanvars_pll_args <- function(x) { x <- validate_stanvars(x) if (!length(x)) { return(character(0)) } out <- ufrom_list(x, "pll_args") if (!length(out)) { return("") } collapse(", ", out) } # validate 'stanvars' objects validate_stanvars <- function(x, stan_funs = NULL) { if (is.null(x)) { x <- empty_stanvars() } if (!is.stanvars(x)) { stop2("Argument 'stanvars' is invalid. See ?stanvar for help.") } if (length(stan_funs) > 0) { warning2("Argument 'stan_funs' is deprecated. Please use argument ", "'stanvars' instead. See ?stanvar for more help.") stan_funs <- as_one_character(stan_funs) x <- x + stanvar(scode = stan_funs, block = "functions") } x } # add new data to stanvars # @param x a 'stanvars' object # @param newdata2 a list with new 'data2' objects # @return a 'stanvars' object add_newdata_stanvars <- function(x, newdata2) { stopifnot(is.stanvars(x)) stanvars_data <- subset_stanvars(x, block = "data") for (name in names(stanvars_data)) { if (name %in% names(newdata2)) { x[[name]]$sdata <- newdata2[[name]] } } x } #' @export c.stanvars <- function(x, ...) { dots <- lapply(list(...), validate_stanvars) class(x) <- "list" out <- unlist(c(list(x), dots), recursive = FALSE) svnames <- names(out)[nzchar(names(out))] if (any(duplicated(svnames))) { stop2("Duplicated names in 'stanvars' are not allowed.") } structure(out, class = "stanvars") } #' @export "+.stanvars" <- function(e1, e2) { c(e1, e2) } is.stanvars <- function(x) { inherits(x, "stanvars") } empty_stanvars <- function() { structure(list(), class = "stanvars") } brms/R/diagnostics.R0000644000176200001440000000676214361545260014071 0ustar liggesusers#' Extract Diagnostic Quantities of \pkg{brms} Models #' #' Extract quantities that can be used to diagnose sampling behavior #' of the algorithms applied by \pkg{Stan} at the back-end of \pkg{brms}. #' #' @name diagnostic-quantities #' @aliases log_posterior nuts_params rhat neff_ratio #' #' @param object,x A \code{brmsfit} object. #' @param pars An optional character vector of parameter names. #' For \code{nuts_params} these will be NUTS sampler parameter #' names rather than model parameters. If pars is omitted #' all parameters are included. #' @param ... Arguments passed to individual methods. #' #' @return The exact form of the output depends on the method. #' #' @details For more details see #' \code{\link[bayesplot:bayesplot-extractors]{bayesplot-extractors}}. #' #' @examples #' \dontrun{ #' fit <- brm(time ~ age * sex, data = kidney) #' #' lp <- log_posterior(fit) #' head(lp) #' #' np <- nuts_params(fit) #' str(np) #' # extract the number of divergence transitions #' sum(subset(np, Parameter == "divergent__")$Value) #' #' head(rhat(fit)) #' head(neff_ratio(fit)) #' } NULL #' @rdname diagnostic-quantities #' @importFrom bayesplot log_posterior #' @export log_posterior #' @export log_posterior.brmsfit <- function(object, ...) { contains_draws(object) bayesplot::log_posterior(object$fit, ...) } #' @rdname diagnostic-quantities #' @importFrom bayesplot nuts_params #' @export nuts_params #' @export nuts_params.brmsfit <- function(object, pars = NULL, ...) { contains_draws(object) bayesplot::nuts_params(object$fit, pars = pars, ...) } #' @rdname diagnostic-quantities #' @importFrom posterior rhat #' @export rhat #' @export rhat.brmsfit <- function(x, pars = NULL, ...) { contains_draws(x) # bayesplot uses outdated rhat code from rstan # bayesplot::rhat(object$fit, pars = pars, ...) draws <- as_draws_array(x, variable = pars, ...) tmp <- posterior::summarise_draws(draws, rhat = posterior::rhat) rhat <- tmp$rhat names(rhat) <- tmp$variable rhat } #' @rdname diagnostic-quantities #' @importFrom bayesplot neff_ratio #' @export neff_ratio #' @export neff_ratio.brmsfit <- function(object, pars = NULL, ...) { contains_draws(object) # bayesplot uses outdated ess code from rstan # bayesplot::neff_ratio(object$fit, pars = pars, ...) draws <- as_draws_array(object, variable = pars, ...) tmp <- posterior::summarise_draws( draws, ess_bulk = posterior::ess_bulk, ess_tail = posterior::ess_tail ) # min of ess_bulk and ess_tail mimics definition of posterior::rhat.default ess <- matrixStats::rowMins(cbind(tmp$ess_bulk, tmp$ess_tail)) names(ess) <- tmp$variable ess / ndraws(draws) } #' Extract Control Parameters of the NUTS Sampler #' #' Extract control parameters of the NUTS sampler such as #' \code{adapt_delta} or \code{max_treedepth}. #' #' @param x An \R object #' @param pars Optional names of the control parameters to be returned. #' If \code{NULL} (the default) all control parameters are returned. #' See \code{\link[rstan:stan]{stan}} for more details. #' @param ... Currently ignored. #' #' @return A named \code{list} with control parameter values. #' #' @export control_params <- function(x, ...) { UseMethod("control_params") } #' @rdname control_params #' @export control_params.brmsfit <- function(x, pars = NULL, ...) { contains_draws(x) if (is_equal(x$backend, "cmdstanr")) { out <- attr(x$fit, "metadata")$metadata } else { out <- attr(x$fit@sim$samples[[1]], "args")$control } if (!is.null(pars)) { out <- out[pars] } out } brms/R/family-lists.R0000644000176200001440000004304414403561357014173 0ustar liggesusers# This file contains a list for every native family. # These lists may contain the following elements: # links: possible link function (first is default) # dpars: distributional parameters of the family # type: either real or int (i.e. continuous or discrete) # ybounds: area of definition of the response values # closed: is the interval closed or open? # ad: supported addition arguments # include: names of user-defined Stan functions # to be included in the Stan code # normalized: suffixes of Stan lpdfs or lpmfs which only exist as normalized # versions; can also be "" in which case the family is always normalized # specials: character vector specialties of some families .family_gaussian <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus", "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit"), dpars = c("mu", "sigma"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "se", "cens", "trunc", "mi", "index"), normalized = c("_time_hom", "_time_het", "_lagsar", "_errorsar", "_fcor"), specials = c("residuals", "rescor") ) } .family_student <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus", "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit"), dpars = c("mu", "sigma", "nu"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "se", "cens", "trunc", "mi", "index"), include = "fun_logm1.stan", normalized = c("_time_hom", "_time_het", "_lagsar", "_errorsar", "_fcor"), specials = c("residuals", "rescor") ) } .family_skew_normal <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus", "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit"), dpars = c("mu", "sigma", "alpha"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "se", "cens", "trunc", "mi", "index") ) } .family_binomial <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit", "identity", "log" ), dpars = c("mu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "trials", "cens", "trunc", "index"), specials = "sbi_logit" ) } .family_beta_binomial <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit", "identity" ), dpars = c("mu", "phi"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "trials", "cens", "trunc", "index") ) } .family_bernoulli <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit", "identity", "log" ), dpars = c("mu"), type = "int", ybounds = c(0, 1), closed = c(TRUE, TRUE), ad = c("weights", "subset", "index"), specials = c("binary", "sbi_logit") ) } .family_categorical <- function() { list( links = "logit", dpars = NULL, multi_dpars = "mu", # size determined by the data type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "index"), specials = c("categorical", "joint_link", "sbi_logit") ) } .family_multinomial <- function() { list( links = "logit", dpars = NULL, multi_dpars = "mu", # size determined by the data type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "trials", "index"), specials = c("multinomial", "joint_link"), include = "fun_multinomial_logit.stan", normalized = "" ) } .family_beta <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit", "identity", "log" ), dpars = c("mu", "phi"), type = "real", ybounds = c(0, 1), closed = c(FALSE, FALSE), ad = c("weights", "subset", "cens", "trunc", "mi", "index") ) } .family_dirichlet <- function() { list( links = "logit", dpars = "phi", multi_dpars = "mu", # size determined by the data type = "real", ybounds = c(0, 1), closed = c(FALSE, FALSE), ad = c("weights", "subset", "index"), specials = c("simplex", "joint_link"), include = "fun_dirichlet_logit.stan", normalized = "" ) } .family_dirichlet2 <- function() { list( links = c("log", "softplus", "squareplus", "identity", "logm1"), dpars = NULL, multi_dpars = "mu", # size determined by the data type = "real", ybounds = c(0, 1), closed = c(FALSE, FALSE), ad = c("weights", "subset", "index"), specials = c("simplex"), include = "fun_logm1.stan", normalized = "" ) } .family_logistic_normal <- function() { list( links = "identity", dpars = NULL, multi_dpars = c("mu", "sigma"), # size determined by the data type = "real", ybounds = c(0, 1), closed = c(FALSE, FALSE), ad = c("weights", "subset", "index"), specials = c("simplex", "logistic_normal", "joint_link"), include = "fun_logistic_normal.stan", normalized = "" ) } .family_poisson <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "rate", "index"), specials = "sbi_log" ) } .family_negbinomial <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "rate", "index"), specials = "sbi_log" ) } # as negbinomial but with sigma = 1 / shape parameterization .family_negbinomial2 <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "sigma"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "rate", "index"), specials = "sbi_log" ) } .family_geometric <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "rate", "index"), specials = "sbi_log" ) } .family_discrete_weibull <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit", "identity" ), dpars = c("mu", "shape"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_discrete_weibull.stan" ) } .family_com_poisson <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_com_poisson.stan", specials = "sbi_log" ) } .family_gamma <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index") ) } .family_weibull <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index") ) } .family_exponential <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = "mu", type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index") ) } .family_frechet <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = c("mu", "nu"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = "fun_logm1.stan" ) } .family_inverse.gaussian <- function() { list( links = c("1/mu^2", "inverse", "identity", "log", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = "fun_inv_gaussian.stan" ) } .family_lognormal <- function() { list( links = c("identity", "inverse"), dpars = c("mu", "sigma"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), specials = "logscale" ) } .family_shifted_lognormal <- function() { list( links = c("identity", "inverse"), dpars = c("mu", "sigma", "ndt"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), specials = "logscale" ) } .family_exgaussian <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "beta"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index") ) } .family_wiener <- function() { list( links = c("identity", "log", "softplus", "squareplus"), dpars = c("mu", "bs", "ndt", "bias"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "dec", "index"), include = "fun_wiener_diffusion.stan", normalized = "" ) } .family_gen_extreme_value <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "xi"), tmp_dpars = "xi", type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = c("fun_gen_extreme_value.stan", "fun_scale_xi.stan"), normalized = "" ) } .family_von_mises <- function() { list( links = c("tan_half", "identity"), dpars = c("mu", "kappa"), type = "real", ybounds = c(-pi, pi), closed = c(TRUE, TRUE), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = c("fun_tan_half.stan", "fun_von_mises.stan"), normalized = "" ) } .family_asym_laplace <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "quantile"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = "fun_asym_laplace.stan", normalized = "" ) } .family_zero_inflated_asym_laplace <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "quantile", "zi"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = c("fun_asym_laplace.stan", "fun_zero_inflated_asym_laplace.stan") ) } .family_cox <- function() { list( links = c("log", "identity", "softplus", "squareplus"), dpars = c("mu"), type = "real", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_cox.stan", specials = c("cox", "sbi_log", "sbi_log_cdf"), normalized = "" ) } .family_cumulative <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit" ), dpars = c("mu", "disc"), type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "thres", "cat", "index"), specials = c( "ordinal", "ordered_thres", "thres_minus_eta", "joint_link", "ocs", "sbi_logit" ), normalized = "" ) } .family_sratio <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit" # , "softit" ), dpars = c("mu", "disc"), type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "thres", "cat", "index"), specials = c("ordinal", "cs", "thres_minus_eta", "joint_link"), normalized = "" ) } .family_cratio <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit" # , "softit" ), dpars = c("mu", "disc"), type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "thres", "cat", "index"), specials = c("ordinal", "cs", "eta_minus_thres", "joint_link"), normalized = "" ) } .family_acat <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit" ), dpars = c("mu", "disc"), type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "thres", "cat", "index"), specials = c("ordinal", "cs", "eta_minus_thres", "joint_link"), normalized = "" ) } .family_hurdle_poisson <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "hu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_hurdle_poisson.stan", specials = c("sbi_log", "sbi_hu_logit"), normalized = "" ) } .family_hurdle_negbinomial <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "shape", "hu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_hurdle_negbinomial.stan", specials = c("sbi_log", "sbi_hu_logit"), normalized = "" ) } .family_hurdle_gamma <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = c("mu", "shape", "hu"), type = "real", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_hurdle_gamma.stan", specials = "sbi_hu_logit", normalized = "" ) } .family_hurdle_lognormal <- function() { list( links = c("identity", "inverse"), dpars = c("mu", "sigma", "hu"), type = "real", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_hurdle_lognormal.stan", specials = c("logscale", "sbi_hu_logit"), normalized = "" ) } .family_hurdle_cumulative <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit" ), dpars = c("mu", "hu", "disc"), type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "thres", "cat", "index"), specials = c( "ordinal", "ordered_thres", "thres_minus_eta", "joint_link", "ocs", "sbi_logit", "extra_cat" ), normalized = "" ) } .family_zero_inflated_poisson <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "zi"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_zero_inflated_poisson.stan", specials = c("sbi_log", "sbi_zi_logit"), normalized = "" ) } .family_zero_inflated_negbinomial <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "shape", "zi"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_zero_inflated_negbinomial.stan", specials = c("sbi_log", "sbi_zi_logit"), normalized = "" ) } .family_zero_inflated_binomial <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit", "identity", "log" ), dpars = c("mu", "zi"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "trials", "cens", "trunc", "index"), include = "fun_zero_inflated_binomial.stan", specials = c("sbi_logit", "sbi_zi_logit"), normalized = "" ) } .family_zero_inflated_beta_binomial <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit", "identity", "log" ), dpars = c("mu", "phi", "zi"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "trials", "cens", "trunc", "index"), include = "fun_zero_inflated_beta_binomial.stan", specials = c("sbi_zi_logit"), normalized = "" ) } .family_zero_inflated_beta <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit", "identity", "log" ), dpars = c("mu", "phi", "zi"), type = "real", ybounds = c(0, 1), closed = c(TRUE, FALSE), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_zero_inflated_beta.stan", specials = "sbi_zi_logit", normalized = "" ) } .family_zero_one_inflated_beta <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "softit", "identity", "log" ), dpars = c("mu", "phi", "zoi", "coi"), type = "real", ybounds = c(0, 1), closed = c(TRUE, TRUE), ad = c("weights", "subset", "index"), include = "fun_zero_one_inflated_beta.stan", specials = "sbi_zi_logit", normalized = "" ) } .family_custom <- function() { list( ad = c("weights", "subset", "se", "cens", "trunc", "trials", "thres", "cat", "dec", "mi", "index", "vreal", "vint"), ybounds = c(-Inf, Inf), closed = c(NA, NA) ) } brms/R/formula-cs.R0000644000176200001440000000167314213413565013624 0ustar liggesusers#' Category Specific Predictors in \pkg{brms} Models #' #' @aliases cse #' #' @param expr Expression containing predictors, #' for which category specific effects should be estimated. #' For evaluation, \R formula syntax is applied. #' #' @details For detailed documentation see \code{help(brmsformula)} #' as well as \code{vignette("brms_overview")}. #' #' This function is almost solely useful when #' called in formulas passed to the \pkg{brms} package. #' #' @seealso \code{\link{brmsformula}} #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ period + carry + cs(treat), #' data = inhaler, family = sratio("cloglog"), #' prior = set_prior("normal(0,5)"), chains = 2) #' summary(fit) #' plot(fit, ask = FALSE) #' } #' #' @export cs <- function(expr) { deparse_no_string(substitute(expr)) } # alias of function 'cs' used in the JSS paper of brms #' @export cse <- function(expr) { deparse_no_string(substitute(expr)) } brms/R/data-response.R0000644000176200001440000005160314466704275014332 0ustar liggesusers#' Extract response values #' #' Extract response values from a \code{\link{brmsfit}} object. #' #' @param x A \code{\link{brmsfit}} object. #' @param resp Optional names of response variables for which to extract values. #' @param warn For internal use only. #' @param ... Further arguments passed to \code{\link{standata}}. #' @inheritParams posterior_predict.brmsfit #' #' @return Returns a vector of response values for univariate models and a #' matrix of response values with one column per response variable for #' multivariate models. #' #' @keywords internal #' @export get_y <- function(x, resp = NULL, sort = FALSE, warn = FALSE, ...) { stopifnot(is.brmsfit(x)) resp <- validate_resp(resp, x) sort <- as_one_logical(sort) warn <- as_one_logical(warn) args <- list(x, resp = resp, ...) args$re_formula <- NA args$check_response <- TRUE args$only_response <- TRUE args$internal <- TRUE sdata <- do_call(standata, args) if (warn) { if (any(paste0("cens", usc(resp)) %in% names(sdata))) { warning2("Results may not be meaningful for censored models.") } } Ynames <- paste0("Y", usc(resp)) if (length(Ynames) > 1L) { out <- do_call(cbind, sdata[Ynames]) colnames(out) <- resp } else { out <- sdata[[Ynames]] } old_order <- attr(sdata, "old_order") if (!is.null(old_order) && !sort) { stopifnot(length(old_order) == NROW(out)) out <- p(out, old_order) } out } #' Prepare Response Data #' #' Prepare data related to response variables in \pkg{brms}. #' Only exported for use in package development. #' #' @param x An \R object. #' @param ... Further arguments passed to or from other methods. #' #' @return A named list of data related to response variables. #' #' @keywords internal #' @export data_response <- function(x, ...) { UseMethod("data_response") } #' @export data_response.mvbrmsterms <- function(x, basis = NULL, ...) { out <- list() for (i in seq_along(x$terms)) { bs <- basis$resps[[x$responses[i]]] c(out) <- data_response(x$terms[[i]], basis = bs, ...) } if (x$rescor) { out$nresp <- length(x$responses) out$nrescor <- out$nresp * (out$nresp - 1) / 2 } out } #' @export data_response.brmsterms <- function(x, data, check_response = TRUE, internal = FALSE, basis = NULL, ...) { data <- subset_data(data, x) N <- nrow(data) # TODO: rename 'Y' to 'y' Y <- model.response(model.frame(x$respform, data, na.action = na.pass)) out <- list(N = N, Y = unname(Y)) if (is_binary(x$family)) { bin_levels <- basis$resp_levels if (is.null(bin_levels)) { bin_levels <- levels(as.factor(out$Y)) } # fixes issues #1298 and #1511 if (is.numeric(out$Y) && length(bin_levels) == 1L) { if (0 %in% bin_levels) { # 1 as default event level bin_levels <- c(0, 1) } else { # 0 as default non-event level bin_levels <- c(0, bin_levels) } } out$Y <- as.integer(as_factor(out$Y, levels = bin_levels)) - 1 } if (is_categorical(x$family)) { out$Y <- as.integer(as_factor(out$Y, levels = basis$resp_levels)) } if (is_ordinal(x$family) && is.ordered(out$Y)) { diff <- ifelse(has_extra_cat(x$family), 1L, 0L) out$Y <- as.integer(out$Y) - diff } if (check_response) { family4error <- family_names(x$family) if (is.mixfamily(x$family)) { family4error <- paste0(family4error, collapse = ", ") family4error <- paste0("mixture(", family4error, ")") } if (!allow_factors(x$family) && !is.numeric(out$Y)) { stop2("Family '", family4error, "' requires numeric responses.") } if (is_binary(x$family)) { if (any(!out$Y %in% c(0, 1))) { stop2("Family '", family4error, "' requires responses ", "to contain only two different values.") } } if (is_ordinal(x$family)) { extra_cat <- has_extra_cat(x$family) min_int <- ifelse(extra_cat, 0L, 1L) msg <- ifelse(extra_cat, "non-negative", "positive") if (any(!is_wholenumber(out$Y)) || any(out$Y < min_int)) { stop2("Family '", family4error, "' requires either ", msg, " integers or ordered factors as responses.") } } if (use_int(x$family)) { if (!all(is_wholenumber(out$Y))) { stop2("Family '", family4error, "' requires integer responses.") } } if (has_multicol(x$family)) { if (!is.matrix(out$Y)) { stop2("This model requires a response matrix.") } } if (is_simplex(x$family)) { if (!is_equal(rowSums(out$Y), rep(1, nrow(out$Y)))) { stop2("Response values in simplex models must sum to 1.") } } ybounds <- family_info(x$family, "ybounds") closed <- family_info(x$family, "closed") if (is.finite(ybounds[1])) { y_min <- min(out$Y, na.rm = TRUE) if (closed[1] && y_min < ybounds[1]) { stop2("Family '", family4error, "' requires response greater ", "than or equal to ", ybounds[1], ".") } else if (!closed[1] && y_min <= ybounds[1]) { stop2("Family '", family4error, "' requires response greater ", "than ", round(ybounds[1], 2), ".") } } if (is.finite(ybounds[2])) { y_max <- max(out$Y, na.rm = TRUE) if (closed[2] && y_max > ybounds[2]) { stop2("Family '", family4error, "' requires response smaller ", "than or equal to ", ybounds[2], ".") } else if (!closed[2] && y_max >= ybounds[2]) { stop2("Family '", family4error, "' requires response smaller ", "than ", round(ybounds[2], 2), ".") } } out$Y <- as.array(out$Y) } # data for addition arguments of the response if (has_trials(x$family) || is.formula(x$adforms$trials)) { if (!length(x$adforms$trials)) { stop2("Specifying 'trials' is required for this model.") } if (!is.formula(x$adforms$trials)) { stop2("Argument 'trials' is misspecified.") } trials <- get_ad_values(x, "trials", "trials", data) if (!is.numeric(trials)) { stop2("Number of trials must be numeric.") } if (any(!is_wholenumber(trials) | trials < 0)) { stop2("Number of trials must be non-negative integers.") } if (length(trials) == 1L) { trials <- rep(trials, nrow(data)) } if (check_response) { if (is_multinomial(x$family)) { if (!is_equal(rowSums(out$Y), trials)) { stop2("Number of trials does not match the number of events.") } } else if (has_trials(x$family)) { if (max(trials) == 1L && !internal) { message("Only 2 levels detected so that family 'bernoulli' ", "might be a more efficient choice.") } if (any(out$Y > trials)) { stop2("Number of trials is smaller than the number of events.") } } } out$trials <- as.array(trials) } if (has_cat(x$family)) { ncat <- length(get_cats(x$family)) if (min(ncat) < 2L) { stop2("At least two response categories are required.") } if (!has_multicol(x$family)) { if (ncat == 2L && !internal) { message("Only 2 levels detected so that family 'bernoulli' ", "might be a more efficient choice.") } if (check_response && any(out$Y > ncat)) { stop2("Number of categories is smaller than the response ", "variable would suggest.") } } out$ncat <- ncat } if (has_thres(x$family)) { thres <- family_info(x, "thres") if (has_thres_groups(x$family)) { groups <- get_thres_groups(x) out$ngrthres <- length(groups) grthres <- get_ad_values(x, "thres", "gr", data) grthres <- factor(rename(grthres), levels = groups) # create an matrix of threshold indices per observation Jgrthres <- match(grthres, groups) nthres <- as.array(rep(NA, length(groups))) for (i in seq_along(groups)) { nthres[i] <- max(subset2(thres, group = groups[i])$thres) } if (check_response && any(out$Y > nthres[Jgrthres] + 1)) { stop2("Number of thresholds is smaller than required by the response.") } Kthres_cumsum <- cumsum(nthres) Kthres_start <- c(1, Kthres_cumsum[-length(nthres)] + 1) Kthres_end <- Kthres_cumsum Jthres <- cbind(Kthres_start, Kthres_end)[Jgrthres, , drop = FALSE] out$Jthres <- Jthres } else { nthres <- max(thres$thres) if (check_response && any(out$Y > nthres + 1)) { stop2("Number of thresholds is smaller than required by the response.") } } if (max(nthres) == 1L && !internal) { message("Only 2 levels detected so that family 'bernoulli' ", "might be a more efficient choice.") } out$nthres <- nthres } if (is.formula(x$adforms$cat)) { warning2("Addition argument 'cat' is deprecated. Use 'thres' instead. ", "See ?brmsformula for more details.") } if (is.formula(x$adforms$se)) { se <- get_ad_values(x, "se", "se", data) if (!is.numeric(se)) { stop2("Standard errors must be numeric.") } if (min(se) < 0) { stop2("Standard errors must be non-negative.") } out$se <- as.array(se) } if (is.formula(x$adforms$weights)) { weights <- get_ad_values(x, "weights", "weights", data) if (!is.numeric(weights)) { stop2("Weights must be numeric.") } if (min(weights) < 0) { stop2("Weights must be non-negative.") } if (get_ad_flag(x, "weights", "scale")) { weights <- weights / sum(weights) * length(weights) } out$weights <- as.array(weights) } if (is.formula(x$adforms$dec)) { dec <- get_ad_values(x, "dec", "dec", data) if (is.character(dec) || is.factor(dec)) { if (!all(unique(dec) %in% c("lower", "upper"))) { stop2("Decisions should be 'lower' or 'upper' ", "when supplied as characters or factors.") } dec <- ifelse(dec == "lower", 0, 1) } else { dec <- as.numeric(as.logical(dec)) } out$dec <- as.array(dec) } if (is.formula(x$adforms$rate)) { denom <- get_ad_values(x, "rate", "denom", data) if (!is.numeric(denom)) { stop2("Rate denomiators should be numeric.") } if (isTRUE(any(denom <= 0))) { stop2("Rate denomiators should be positive.") } out$denom <- as.array(denom) } if (is.formula(x$adforms$cens) && check_response) { cens <- get_ad_values(x, "cens", "cens", data) cens <- prepare_cens(cens) if (!all(is_wholenumber(cens) & cens %in% -1:2)) { stop2( "Invalid censoring data. Accepted values are ", "'left', 'none', 'right', and 'interval'\n", "(abbreviations are allowed) or -1, 0, 1, and 2.\n", "TRUE and FALSE are also accepted ", "and refer to 'right' and 'none' respectively." ) } out$cens <- as.array(cens) icens <- cens %in% 2 y2_expr <- get_ad_expr(x, "cens", "y2") if (any(icens) || !is.null(y2_expr)) { # interval censoring is required # check for 'y2' above as well to prevent issue #1367 y2 <- unname(get_ad_values(x, "cens", "y2", data)) if (is.null(y2)) { stop2("Argument 'y2' is required for interval censored data.") } if (anyNA(y2[icens])) { stop2("'y2' should not be NA for interval censored observations.") } if (any(out$Y[icens] >= y2[icens])) { stop2("Left censor points must be smaller than right ", "censor points for interval censored data.") } y2[!icens] <- 0 # not used in Stan out$rcens <- as.array(y2) } } if (is.formula(x$adforms$trunc)) { lb <- as.numeric(get_ad_values(x, "trunc", "lb", data)) ub <- as.numeric(get_ad_values(x, "trunc", "ub", data)) if (any(lb >= ub)) { stop2("Truncation bounds are invalid: lb >= ub") } if (length(lb) == 1L) { lb <- rep(lb, N) } if (length(ub) == 1L) { ub <- rep(ub, N) } if (length(lb) != N || length(ub) != N) { stop2("Invalid truncation bounds.") } inv_bounds <- out$Y < lb | out$Y > ub if (check_response && isTRUE(any(inv_bounds))) { stop2("Some responses are outside of the truncation bounds.") } out$lb <- lb out$ub <- ub } if (is.formula(x$adforms$mi)) { sdy <- get_sdy(x, data) if (is.null(sdy)) { # missings only which_mi <- which(is.na(out$Y)) out$Jmi <- as.array(which_mi) out$Nmi <- length(out$Jmi) } else { # measurement error in the response if (length(sdy) == 1L) { sdy <- rep(sdy, length(out$Y)) } if (length(sdy) != length(out$Y)) { stop2("'sdy' must have the same length as the response.") } # all observations will have a latent score which_mi <- which(is.na(out$Y) | is.infinite(sdy)) out$Jme <- as.array(setdiff(seq_along(out$Y), which_mi)) out$Nme <- length(out$Jme) out$noise <- as.array(sdy) if (!internal) { out$noise[which_mi] <- Inf } } # bounds are required for predicting new missing values # not required in Stan right now as bounds are hard-coded there tbounds <- trunc_bounds(x, data, incl_family = TRUE) out$lbmi <- tbounds$lb out$ubmi <- tbounds$ub if (!internal) { # Stan does not allow NAs in data # use Inf to that min(Y) is not affected out$Y[which_mi] <- Inf } } if (is.formula(x$adforms$vreal)) { # vectors of real values for use in custom families vreal <- eval_rhs(x$adforms$vreal) vreal <- lapply(vreal$vars, eval2, data) names(vreal) <- paste0("vreal", seq_along(vreal)) for (i in seq_along(vreal)) { if (length(vreal[[i]]) == 1L) { vreal[[i]] <- rep(vreal[[i]], N) } vreal[[i]] <- as.array(as.numeric(vreal[[i]])) } c(out) <- vreal } if (is.formula(x$adforms$vint)) { # vectors of integer values for use in custom families vint <- eval_rhs(x$adforms$vint) vint <- lapply(vint$vars, eval2, data) names(vint) <- paste0("vint", seq_along(vint)) for (i in seq_along(vint)) { if (length(vint[[i]]) == 1L) { vint[[i]] <- rep(vint[[i]], N) } if (!all(is_wholenumber(vint[[i]]))) { stop2("'vint' requires whole numbers as input.") } vint[[i]] <- as.array(vint[[i]]) } c(out) <- vint } if (length(out)) { resp <- usc(combine_prefix(x)) out <- setNames(out, paste0(names(out), resp)) } out } # data specific for mixture models data_mixture <- function(bterms, data2, prior) { stopifnot(is.brmsterms(bterms)) out <- list() if (is.mixfamily(bterms$family)) { families <- family_names(bterms$family) dp_classes <- dpar_class(names(c(bterms$dpars, bterms$fdpars))) if (!any(dp_classes %in% "theta")) { # estimate mixture probabilities directly take <- find_rows(prior, class = "theta", resp = bterms$resp) theta_prior <- prior$prior[take] con_theta <- eval_dirichlet(theta_prior, length(families), data2) out$con_theta <- as.array(con_theta) p <- usc(combine_prefix(bterms)) names(out) <- paste0(names(out), p) } } out } # data for the baseline functions of Cox models data_bhaz <- function(bterms, data, data2, prior, basis = NULL) { out <- list() if (!is_cox(bterms$family)) { return(out) } y <- model.response(model.frame(bterms$respform, data, na.action = na.pass)) args <- bterms$family$bhaz bs <- basis$basis_matrix out$Zbhaz <- bhaz_basis_matrix(y, args, basis = bs) out$Zcbhaz <- bhaz_basis_matrix(y, args, integrate = TRUE, basis = bs) out$Kbhaz <- NCOL(out$Zbhaz) sbhaz_prior <- subset2(prior, class = "sbhaz", resp = bterms$resp) con_sbhaz <- eval_dirichlet(sbhaz_prior$prior, out$Kbhaz, data2) out$con_sbhaz <- as.array(con_sbhaz) out } # Basis matrices for baseline hazard functions of the Cox model # @param y vector of response values # @param args arguments passed to the spline generating functions # @param integrate compute the I-spline instead of the M-spline basis? # @param basis optional precomputed basis matrix # @return the design matrix of the baseline hazard function bhaz_basis_matrix <- function(y, args = list(), integrate = FALSE, basis = NULL) { require_package("splines2") if (!is.null(basis)) { # perform predictions based on an existing basis matrix stopifnot(inherits(basis, "mSpline")) if (integrate) { # for predictions just the attibutes are required # which are the same of M-Splines and I-Splines class(basis) <- c("matrix", "iSpline") } return(predict(basis, y)) } stopifnot(is.list(args)) args$x <- y if (!is.null(args$intercept)) { args$intercept <- as_one_logical(args$intercept) } if (is.null(args$Boundary.knots)) { # avoid 'knots' outside 'Boundary.knots' error (#1143) # we also need a smaller lower boundary knot to avoid lp = -Inf # the below choices are ad-hoc and may need further thought min_y <- min(y, na.rm = TRUE) max_y <- max(y, na.rm = TRUE) diff_y <- max_y - min_y lower_knot <- max(min_y - diff_y / 50, 0) upper_knot <- max_y + diff_y / 50 args$Boundary.knots <- c(lower_knot, upper_knot) } if (integrate) { out <- do_call(splines2::iSpline, args) } else { out <- do_call(splines2::mSpline, args) } out } # extract names of response categories # @param x a brmsterms object or one that can be coerced to it # @param data user specified data # @return a vector of category names extract_cat_names <- function(x, data) { stopifnot(is.brmsformula(x) || is.brmsterms(x)) respform <- validate_resp_formula(x$formula) mr <- model.response(model.frame(respform, data)) if (has_multicol(x)) { mr <- as.matrix(mr) out <- as.character(colnames(mr)) if (!length(out)) { out <- as.character(seq_cols(mr)) } } else { out <- levels(factor(mr)) } out } # extract names of ordinal thresholds # @param x a brmsterms object or one that can be coerced to it # @param data user specified data # @return a data.frame with columns 'thres' and 'group' extract_thres_names <- function(x, data) { stopifnot(is.brmsformula(x) || is.brmsterms(x), has_thres(x)) if (is.null(x$adforms)) { x$adforms <- terms_ad(x$formula, x$family) } nthres <- get_ad_values(x, "thres", "thres", data) if (any(!is_wholenumber(nthres) | nthres < 1L)) { stop2("Number of thresholds must be a positive integer.") } # has an extra category that is not part of the ordinal scale? (#1429) extra_cat <- has_extra_cat(x$family) grthres <- get_ad_values(x, "thres", "gr", data) if (!is.null(grthres)) { # grouping variable was specified if (!is_like_factor(grthres)) { stop2("Variable 'gr' in 'thres' needs to be factor-like.") } grthres <- factor(grthres) group <- levels(grthres) if (!length(nthres)) { # extract number of thresholds from the response values nthres <- rep(NA, length(group)) for (i in seq_along(group)) { take <- grthres %in% group[i] nthres[i] <- extract_nthres( x$formula, data[take, , drop = FALSE], extra_cat = extra_cat ) } } else if (length(nthres) == 1L) { # replicate number of thresholds across groups nthres <- rep(nthres, length(group)) } else { # number of thresholds is a variable in the data for (i in seq_along(group)) { # validate values of the same level take <- grthres %in% group[i] if (length(unique(nthres[take])) > 1L) { stop2("Number of thresholds should be unique for each group.") } } nthres <- get_one_value_per_group(nthres, grthres) } group <- rep(rename(group), nthres) thres <- ulapply(unname(nthres), seq_len) } else { # no grouping variable was specified group <- "" if (!length(nthres)) { # extract number of thresholds from the response values nthres <- extract_nthres(x$formula, data, extra_cat = extra_cat) } if (length(nthres) > 1L) { stop2("Number of thresholds needs to be a single value.") } thres <- seq_len(nthres) } data.frame(thres, group, stringsAsFactors = FALSE) } # extract threshold names from the response values # @param formula with the response on the LHS # @param data a data.frame from which to extract responses # @param extra_cat is the first category an extra (hurdle) category? # @return a single value for the number of thresholds extract_nthres <- function(formula, data, extra_cat = FALSE) { extra_cat <- as_one_logical(extra_cat) respform <- validate_resp_formula(formula) mr <- model.response(model.frame(respform, data)) if (is_like_factor(mr)) { # the first factor level is the extra category diff <- ifelse(extra_cat, 2L, 1L) out <- length(levels(factor(mr))) - diff } else { # 0 is the extra category which does not affect max out <- max(mr) - 1L } if (out < 1L) { stop2("Could not extract the number of thresholds. Use ordered factors ", "or positive integers as your ordinal response and ensure that ", "more than on response category is present.") } out } brms/R/data-helpers.R0000644000176200001440000005536114504264071014127 0ustar liggesusers# update data for use in brms functions # @param data the data passed by the user # @param bterms object of class brmsterms # @param na_action function defining how to treat NAs # @param drop_unused_levels should unused factor levels be removed? # @param attr_terms a list of attributes of the terms object of # the original model.frame; only used with newdata; # this ensures that (1) calls to 'poly' work correctly # and (2) that the number of variables matches the number # of variable names; fixes issue #73 # @param knots: a list of knot values for GAMMs # @return model.frame for use in brms functions validate_data <- function(data, bterms, data2 = list(), knots = NULL, na_action = na_omit, drop_unused_levels = TRUE, attr_terms = NULL) { if (missing(data)) { stop2("Data must be specified using the 'data' argument.") } if (is.null(knots)) { knots <- get_knots(data) } data <- try(as.data.frame(data), silent = TRUE) if (is_try_error(data)) { stop2("Argument 'data' must be coercible to a data.frame.") } if (!isTRUE(nrow(data) > 0L)) { stop2("Argument 'data' does not contain observations.") } data <- data_rsv_intercept(data, bterms = bterms) all_vars_formula <- bterms$allvars missing_vars <- setdiff(all_vars(all_vars_formula), names(data)) if (length(missing_vars)) { missing_vars2 <- setdiff(missing_vars, names(data2)) if (length(missing_vars2)) { stop2("The following variables can neither be found in ", "'data' nor in 'data2':\n", collapse_comma(missing_vars2)) } # all initially missing variables can be found in 'data2' # they are not necessarily of the length required for 'data' # so need to be excluded from the evaluation of 'model.frame' missing_vars_formula <- paste0(". ~ . ", collapse(" - ", missing_vars)) all_vars_formula <- update(all_vars_formula, missing_vars_formula) } all_vars_terms <- terms(all_vars_formula) # ensure that 'data2' comes first in the search path # during the evaluation of model.frame terms_env <- environment(all_vars_terms) environment(all_vars_terms) <- as.environment(as.list(data2)) parent.env(environment(all_vars_terms)) <- terms_env attributes(all_vars_terms)[names(attr_terms)] <- attr_terms # 'terms' prevents correct validation in 'model.frame' attr(data, "terms") <- NULL data <- model.frame( all_vars_terms, data, na.action = na.pass, drop.unused.levels = drop_unused_levels ) data <- na_action(data, bterms = bterms) if (any(grepl("__|_$", colnames(data)))) { stop2("Variable names may not contain double underscores ", "or underscores at the end.") } if (!isTRUE(nrow(data) > 0L)) { stop2("All observations in the data were removed ", "presumably because of NA values.") } groups <- get_group_vars(bterms) data <- combine_groups(data, groups) data <- fix_factor_contrasts(data, ignore = groups) attr(data, "knots") <- knots attr(data, "drop_unused_levels") <- drop_unused_levels data } # validate the 'data2' argument # @param data2 a named list of data objects # @param bterms object returned by 'brmsterms' # @param ... more named list to pass objects to data2 from other sources # only required for backwards compatibility with deprecated arguments # @return a validated named list of data objects validate_data2 <- function(data2, bterms, ...) { # TODO: specify spline-related matrices in 'data2' # this requires adding another parser layer with bterms and data as input if (is.null(data2)) { data2 <- list() } if (!is.list(data2)) { stop2("'data2' must be a list.") } if (length(data2) && !is_named(data2)) { stop2("All elements of 'data2' must be named.") } dots <- list(...) for (i in seq_along(dots)) { if (length(dots[[i]])) { stopifnot(is.list(dots[[i]]), is_named(dots[[i]])) data2[names(dots[[i]])] <- dots[[i]] } } # validate autocorrelation matrices acef <- tidy_acef(bterms) sar_M_names <- get_ac_vars(acef, "M", class = "sar") for (M in sar_M_names) { data2[[M]] <- validate_sar_matrix(get_from_data2(M, data2)) attr(data2[[M]], "obs_based_matrix") <- TRUE } car_M_names <- get_ac_vars(acef, "M", class = "car") for (M in car_M_names) { data2[[M]] <- validate_car_matrix(get_from_data2(M, data2)) # observation based CAR matrices are deprecated and # there is no need to label them as observation based } fcor_M_names <- get_ac_vars(acef, "M", class = "fcor") for (M in fcor_M_names) { data2[[M]] <- validate_fcor_matrix(get_from_data2(M, data2)) attr(data2[[M]], "obs_based_matrix") <- TRUE } # validate within-group covariance matrices cov_names <- ufrom_list(get_re(bterms)$gcall, "cov") cov_names <- cov_names[nzchar(cov_names)] for (cov in cov_names) { data2[[cov]] <- validate_recov_matrix(get_from_data2(cov, data2)) } data2 } # get an object from the 'data2' argument get_from_data2 <- function(x, data2) { if (!x %in% names(data2)) { stop2("Object '", x, "' was not found in 'data2'.") } get(x, data2) } # index observation based elements in 'data2' # @param data2 a named list of objects # @param i observation based indices # @return data2 with potentially indexed elements subset_data2 <- function(data2, i) { if (!length(data2)) { return(data2) } stopifnot(is.list(data2), is_named(data2)) for (var in names(data2)) { if (isTRUE(attr(data2[[var]], "obs_based_matrix"))) { # matrices with dimensions equal to the number of observations data2[[var]] <- data2[[var]][i, i, drop = FALSE] attr(data2[[var]], "obs_based_matrix") <- TRUE } } data2 } # add the reserved intercept variables to the data data_rsv_intercept <- function(data, bterms) { fe_forms <- get_effect(bterms, "fe") if (any(ulapply(fe_forms, no_int))) { if ("intercept" %in% ulapply(fe_forms, all_vars)) { warning2("Reserved variable name 'intercept' is deprecated. ", "Please use 'Intercept' instead.") } if (any(data[["intercept"]] != 1)) { stop2("Variable name 'intercept' is reserved in models ", "without a population-level intercept.") } if (any(data[["Intercept"]] != 1)) { stop2("Variable name 'Intercept' is reserved in models ", "without a population-level intercept.") } data$intercept <- data$Intercept <- rep(1, length(data[[1]])) } data } # combine grouping factors to form new variables # @param data data.frame to be updated # @param ... the grouping factors to be combined # @return 'data' including the new combined grouping factors combine_groups <- function(data, ...) { group <- c(...) for (i in seq_along(group)) { sgroup <- unlist(strsplit(group[[i]], ":")) if (length(sgroup) > 1L && !group[[i]] %in% names(data)) { new_var <- get(sgroup[1], data) for (j in 2:length(sgroup)) { new_var <- paste0(new_var, "_", get(sgroup[j], data)) } data[[group[[i]]]] <- new_var } } data } # hard code factor contrasts to be independent of the global "contrasts" option # @param data data.frame to be updated # @param olddata: optional data.frame from which contrasts are taken if present # @param ignore: names of variables for which not to fix contrasts # @return 'data' with amended contrasts attributes fix_factor_contrasts <- function(data, olddata = NULL, ignore = NULL) { stopifnot(is(data, "data.frame")) stopifnot(is.null(olddata) || is.list(olddata)) olddata <- as.data.frame(olddata) # fixes issue #105 for (i in seq_along(data)) { needs_contrast <- is.factor(data[[i]]) && !names(data)[i] %in% ignore if (needs_contrast && is.null(attr(data[[i]], "contrasts"))) { old_contrasts <- attr(olddata[[names(data)[i]]], "contrasts") if (!is.null(old_contrasts)) { # take contrasts from olddata contrasts(data[[i]]) <- old_contrasts } else if (length(unique(data[[i]])) > 1L) { # avoid error when supplying only a single level # hard code current global "contrasts" option contrasts(data[[i]]) <- contrasts(data[[i]]) } } } data } # order data for use in time-series models # @param data data.frame to be ordered # @param bterms brmsterms of mvbrmsterms object # @return 'data' potentially ordered differently order_data <- function(data, bterms) { # ordering does only matter for time-series models time <- get_ac_vars(bterms, "time", dim = "time") gr <- get_ac_vars(bterms, "gr", dim = "time") if (length(time) > 1L || length(gr) > 1L) { stop2("All time-series structures must have the same ", "'time' and 'gr' variables.") } if (length(time) || length(gr)) { if (length(gr)) { gv <- data[[gr]] } else { gv <- rep(1L, nrow(data)) } if (length(time)) { tv <- data[[time]] } else { tv <- seq_rows(data) } if (any(duplicated(data.frame(gv, tv)))) { stop2("Time points within groups must be unique.") } new_order <- do_call(order, list(gv, tv)) data <- data[new_order, , drop = FALSE] # old_order will allow to retrieve the initial order of the data attr(data, "old_order") <- order(new_order) } data } # subset data according to addition argument 'subset' subset_data <- function(data, bterms) { if (has_subset(bterms)) { # only evaluate a subset of the data subset <- as.logical(get_ad_values(bterms, "subset", "subset", data)) if (length(subset) != nrow(data)) { stop2("Length of 'subset' does not match the rows of 'data'.") } if (anyNA(subset)) { stop2("Subset variables may not contain NAs.") } # cross-formula indexing is no longer trivial for subsetted models check_cross_formula_indexing(bterms) data <- data[subset, , drop = FALSE] attr(data, "subset") <- subset } if (!NROW(data)) { stop2( "All rows of 'data' were removed via 'subset'. ", "Please make sure that variables do not contain NAs ", "for observations in which they are supposed to be used. ", "Please also make sure that each subset variable is ", "TRUE for at least one observation." ) } data } # like stats:::na.omit.data.frame but allows to certain NA values na_omit <- function(object, bterms, ...) { stopifnot(is.data.frame(object)) nobs <- nrow(object) if (is.mvbrmsterms(bterms)) { responses <- names(bterms$terms) subsets <- lapply(bterms$terms, get_ad_values, "subset", "subset", object) vars_sub <- lapply(bterms$terms, function(x) all_vars(x$allvars)) } vars_keep_na <- vars_keep_na(bterms) omit <- logical(nobs) for (v in names(object)) { x <- object[[v]] vars_v <- all_vars(v) keep_all_na <- all(vars_v %in% vars_keep_na) if (!is.atomic(x) || keep_all_na) { next } if (!is.mvbrmsterms(bterms)) { # remove all NAs in this variable keep_na <- rep(FALSE, nobs) } else { # allow to retain NAs in subsetted variables keep_na <- rep(TRUE, nobs) for (r in responses) { if (any(vars_v %in% vars_sub[[r]])) { if (!is.null(subsets[[r]])) { # keep NAs ignored because of 'subset' keep_na <- keep_na & !subsets[[r]] } else { # remove all NAs in this variable keep_na <- keep_na & FALSE } } } } is_na <- is.na(x) d <- dim(is_na) if (is.null(d) || length(d) != 2L) { omit <- omit | (is_na & !keep_na) } else { for (ii in seq_len(d[2L])) { omit <- omit | (is_na[, ii] & !keep_na) } } } if (any(omit > 0L)) { out <- object[!omit, , drop = FALSE] temp <- setNames(seq(omit)[omit], attr(object, "row.names")[omit]) attr(temp, "class") <- "omit" attr(out, "na.action") <- temp warning2("Rows containing NAs were excluded from the model.") } else { out <- object } out } # get a single value per group # @param x vector of values to extract one value per group # @param gr vector of grouping values # @return a vector of the same length as unique(group) get_one_value_per_group <- function(x, gr) { stopifnot(length(x) == length(gr)) not_dupl_gr <- !duplicated(gr) gr_unique <- gr[not_dupl_gr] to_order <- order(gr_unique) gr_unique <- gr_unique[to_order] out <- x[not_dupl_gr][to_order] names(out) <- gr_unique out } # extract knots values for use in spline terms get_knots <- function(data) { attr(data, "knots", TRUE) } get_drop_unused_levels <- function(data) { out <- attr(data, "drop_unused_levels", TRUE) %||% TRUE } # extract name of the data as originally passed by the user get_data_name <- function(data) { out <- attr(data, "data_name", TRUE) if (is.null(out)) { out <- "NULL" } out } #' Validate New Data #' #' Validate new data passed to post-processing methods of \pkg{brms}. Unless you #' are a package developer, you will rarely need to call \code{validate_newdata} #' directly. #' #' @inheritParams prepare_predictions #' @param newdata A \code{data.frame} containing new data to be validated. #' @param object A \code{brmsfit} object. #' @param check_response Logical; Indicates if response variables should #' be checked as well. Defaults to \code{TRUE}. #' @param group_vars Optional names of grouping variables to be validated. #' Defaults to all grouping variables in the model. #' @param req_vars Optional names of variables required in \code{newdata}. #' If \code{NULL} (the default), all variables in the original data #' are required (unless ignored for some other reason). #' @param ... Currently ignored. #' #' @return A validated \code{'data.frame'} based on \code{newdata}. #' #' @export validate_newdata <- function( newdata, object, re_formula = NULL, allow_new_levels = FALSE, newdata2 = NULL, resp = NULL, check_response = TRUE, incl_autocor = TRUE, group_vars = NULL, req_vars = NULL, ... ) { newdata <- try(as.data.frame(newdata), silent = TRUE) if (is_try_error(newdata)) { stop2("Argument 'newdata' must be coercible to a data.frame.") } object <- restructure(object) object <- exclude_terms(object, incl_autocor = incl_autocor) resp <- validate_resp(resp, object) new_formula <- update_re_terms(formula(object), re_formula) bterms <- brmsterms(new_formula, resp_rhs_all = FALSE) # fill values of not required variables all_vars <- all.vars(bterms$allvars) if (is.null(req_vars)) { req_vars <- all_vars } else { req_vars <- as.character(req_vars) req_vars <- intersect(req_vars, all_vars) } if (is.mvbrmsterms(bterms) && !is.null(resp)) { # variables not used in the included model parts # do not need to be specified in newdata resp <- validate_resp(resp, bterms$responses) form_req_vars <- from_list(bterms$terms[resp], "allvars") form_req_vars <- allvars_formula(form_req_vars) req_vars <- intersect(req_vars, all.vars(form_req_vars)) } not_req_vars <- setdiff(all_vars, req_vars) not_req_vars <- setdiff(not_req_vars, names(newdata)) newdata <- fill_newdata(newdata, not_req_vars, object$data) # check response and addition variables only_resp <- all.vars(bterms$respform) only_resp <- setdiff(only_resp, all.vars(rhs(bterms$allvars))) # always require 'dec' variables to be specified dec_vars <- get_ad_vars(bterms, "dec") missing_resp <- setdiff(c(only_resp, dec_vars), names(newdata)) if (length(missing_resp)) { if (check_response) { stop2("Response variables must be specified in 'newdata'.\n", "Missing variables: ", collapse_comma(missing_resp)) } else { newdata <- fill_newdata(newdata, missing_resp) } } # censoring and weighting vars are unused in post-processing methods cens_vars <- get_ad_vars(bterms, "cens") for (v in setdiff(cens_vars, names(newdata))) { newdata[[v]] <- 0 } weights_vars <- get_ad_vars(bterms, "weights") for (v in setdiff(weights_vars, names(newdata))) { newdata[[v]] <- 1 } mf <- model.frame(object) for (i in seq_along(mf)) { if (is_like_factor(mf[[i]])) { mf[[i]] <- as.factor(mf[[i]]) } } # fixes issue #279 newdata <- data_rsv_intercept(newdata, bterms) new_group_vars <- get_group_vars(bterms) if (allow_new_levels && length(new_group_vars)) { # grouping factors do not need to be specified # by the user if new levels are allowed mis_group_vars <- new_group_vars[!grepl(":", new_group_vars)] mis_group_vars <- setdiff(mis_group_vars, names(newdata)) newdata <- fill_newdata(newdata, mis_group_vars) } newdata <- combine_groups(newdata, new_group_vars) # validate factor levels in newdata if (is.null(group_vars)) { group_vars <- get_group_vars(object) } do_check <- union(get_pred_vars(bterms), get_int_vars(bterms)) # do not check variables from the 'unused' argument #1238 unused_arg_vars <- get_unused_arg_vars(bterms) dont_check <- unique(c(group_vars, cens_vars, unused_arg_vars)) dont_check <- setdiff(dont_check, do_check) dont_check <- names(mf) %in% dont_check is_factor <- ulapply(mf, is.factor) factors <- mf[is_factor & !dont_check] if (length(factors)) { factor_names <- names(factors) for (i in seq_along(factors)) { new_factor <- newdata[[factor_names[i]]] if (!is.null(new_factor)) { if (!is.factor(new_factor)) { new_factor <- factor(new_factor) } old_levels <- levels(factors[[i]]) if (length(old_levels) <= 1L) { # contrasts are not defined for factors with 1 or fewer levels next } new_levels <- levels(new_factor) old_contrasts <- contrasts(factors[[i]]) old_ordered <- is.ordered(factors[[i]]) to_zero <- is.na(new_factor) | new_factor %in% "zero__" # don't add the 'zero__' level to response variables is_resp <- factor_names[i] %in% all.vars(bterms$respform) if (!is_resp && any(to_zero)) { levels(new_factor) <- c(new_levels, "zero__") new_factor[to_zero] <- "zero__" old_levels <- c(old_levels, "zero__") old_contrasts <- rbind(old_contrasts, zero__ = 0) } if (any(!new_levels %in% old_levels)) { stop2( "New factor levels are not allowed.", "\nLevels allowed: ", collapse_comma(old_levels), "\nLevels found: ", collapse_comma(new_levels) ) } newdata[[factor_names[i]]] <- factor(new_factor, old_levels, ordered = old_ordered) # don't use contrasts(.) here to avoid dimension checks attr(newdata[[factor_names[i]]], "contrasts") <- old_contrasts } } } # check if originally numeric variables are still numeric num_names <- names(mf)[!is_factor] num_names <- setdiff(num_names, group_vars) for (nm in intersect(num_names, names(newdata))) { if (!anyNA(newdata[[nm]]) && !is.numeric(newdata[[nm]])) { stop2("Variable '", nm, "' was originally ", "numeric but is not in 'newdata'.") } } # validate monotonic variables mo_vars <- get_sp_vars(bterms, "mo") if (length(mo_vars)) { # factors have already been checked num_mo_vars <- names(mf)[!is_factor & names(mf) %in% mo_vars] for (v in num_mo_vars) { new_values <- get(v, newdata) min_value <- min(mf[[v]]) invalid <- new_values < min_value | new_values > max(mf[[v]]) invalid <- invalid | !is_wholenumber(new_values) if (sum(invalid)) { stop2("Invalid values in variable '", v, "': ", collapse_comma(new_values[invalid])) } attr(newdata[[v]], "min") <- min_value } } # update_data expects all original variables to be present used_vars <- c(names(newdata), all.vars(bterms$allvars)) used_vars <- union(used_vars, rsv_vars(bterms)) all_vars <- all.vars(str2formula(names(mf))) unused_vars <- setdiff(all_vars, used_vars) newdata <- fill_newdata(newdata, unused_vars) # validate grouping factors new_ranef <- tidy_ranef(bterms, data = mf) new_meef <- tidy_meef(bterms, data = mf) old_levels <- get_levels(new_ranef, new_meef) if (!allow_new_levels) { new_levels <- get_levels( tidy_ranef(bterms, data = newdata), tidy_meef(bterms, data = newdata) ) for (g in names(old_levels)) { unknown_levels <- setdiff(new_levels[[g]], old_levels[[g]]) if (length(unknown_levels)) { unknown_levels <- collapse_comma(unknown_levels) stop2( "Levels ", unknown_levels, " of grouping factor '", g, "' ", "cannot be found in the fitted model. ", "Consider setting argument 'allow_new_levels' to TRUE." ) } } } # ensure correct handling of functions like 'poly' or 'scale' old_terms <- attr(object$data, "terms") attr_terms <- c("variables", "predvars") attr_terms <- attributes(old_terms)[attr_terms] newdata <- validate_data( newdata, bterms = bterms, na_action = na.pass, drop_unused_levels = FALSE, attr_terms = attr_terms, data2 = current_data2(object, newdata2), knots = get_knots(object$data) ) newdata } # fill newdata with values for not required variables # @param newdata data.frame to be filled # @param vars character vector of not required variables # @param olddata optional data.frame to take values from # @param n row number of olddata to extract values from fill_newdata <- function(newdata, vars, olddata = NULL, n = 1L) { stopifnot(is.data.frame(newdata), is.character(vars)) vars <- setdiff(vars, names(newdata)) if (is.null(olddata)) { if (length(vars)) { newdata[, vars] <- NA } return(newdata) } stopifnot(is.data.frame(olddata), length(n) == 1L) for (v in vars) { # using NA for variables is not safe in all cases # for example when processing splines using mgcv # hence it is safer to use existing data values cval <- olddata[n, v] %||% NA if (length(dim(cval)) == 2L) { # matrix columns don't have automatic broadcasting apparently cval <- matrix(cval, nrow(newdata), ncol(cval), byrow = TRUE) } newdata[[v]] <- cval } newdata } # validate new data2 validate_newdata2 <- function(newdata2, object, ...) { stopifnot(is.brmsfit(object)) bterms <- brmsterms(object$formula) validate_data2(newdata2, bterms = bterms, ...) } # extract the current data current_data <- function(object, newdata = NULL, skip_validation = FALSE, ...) { stopifnot(is.brmsfit(object)) skip_validation <- as_one_logical(skip_validation) if (is.null(newdata)) { data <- object$data } else if (skip_validation) { data <- newdata } else { data <- validate_newdata(newdata, object = object, ...) } data } # extract the current data2 current_data2 <- function(object, newdata2 = NULL, skip_validation = FALSE, ...) { stopifnot(is.brmsfit(object)) skip_validation <- as_one_logical(skip_validation) if (is.null(newdata2)) { data2 <- object$data2 } else if (skip_validation) { data2 <- newdata2 } else { data2 <- validate_newdata2(newdata2, object = object, ...) } data2 } brms/R/brm.R0000644000176200001440000007316114427444030012333 0ustar liggesusers#' Fit Bayesian Generalized (Non-)Linear Multivariate Multilevel Models #' #' Fit Bayesian generalized (non-)linear multivariate multilevel models #' using Stan for full Bayesian inference. A wide range of distributions #' and link functions are supported, allowing users to fit -- among others -- #' linear, robust linear, count data, survival, response times, ordinal, #' zero-inflated, hurdle, and even self-defined mixture models all in a #' multilevel context. Further modeling options include non-linear and #' smooth terms, auto-correlation structures, censored data, meta-analytic #' standard errors, and quite a few more. In addition, all parameters of the #' response distributions can be predicted in order to perform distributional #' regression. Prior specifications are flexible and explicitly encourage #' users to apply prior distributions that actually reflect their beliefs. #' In addition, model fit can easily be assessed and compared with #' posterior predictive checks and leave-one-out cross-validation. #' #' @param formula An object of class \code{\link[stats:formula]{formula}}, #' \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can #' be coerced to that classes): A symbolic description of the model to be #' fitted. The details of model specification are explained in #' \code{\link{brmsformula}}. #' @param data An object of class \code{data.frame} (or one that can be coerced #' to that class) containing data of all variables used in the model. #' @param family A description of the response distribution and link function to #' be used in the model. This can be a family function, a call to a family #' function or a character string naming the family. Every family function has #' a \code{link} argument allowing to specify the link function to be applied #' on the response variable. If not specified, default links are used. For #' details of supported families see \code{\link{brmsfamily}}. By default, a #' linear \code{gaussian} model is applied. In multivariate models, #' \code{family} might also be a list of families. #' @param prior One or more \code{brmsprior} objects created by #' \code{\link{set_prior}} or related functions and combined using the #' \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} #' for more help. #' @param data2 A named \code{list} of objects containing data, which #' cannot be passed via argument \code{data}. Required for some objects #' used in autocorrelation structures to specify dependency structures #' as well as for within-group covariance matrices. #' @param autocor (Deprecated) An optional \code{\link{cor_brms}} object #' describing the correlation structure within the response variable (i.e., #' the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for #' a description of the available correlation structures. Defaults to #' \code{NULL}, corresponding to no correlations. In multivariate models, #' \code{autocor} might also be a list of autocorrelation structures. #' It is now recommend to specify autocorrelation terms directly #' within \code{formula}. See \code{\link{brmsformula}} for more details. #' @param sparse (Deprecated) Logical; indicates whether the population-level #' design matrices should be treated as sparse (defaults to \code{FALSE}). For #' design matrices with many zeros, this can considerably reduce required #' memory. Sampling speed is currently not improved or even slightly #' decreased. It is now recommended to use the \code{sparse} argument of #' \code{\link{brmsformula}} and related functions. #' @param cov_ranef (Deprecated) A list of matrices that are proportional to the #' (within) covariance structure of the group-level effects. The names of the #' matrices should correspond to columns in \code{data} that are used as #' grouping factors. All levels of the grouping factor should appear as #' rownames of the corresponding matrix. This argument can be used, among #' others to model pedigrees and phylogenetic effects. #' It is now recommended to specify those matrices in the formula #' interface using the \code{\link{gr}} and related functions. See #' \code{vignette("brms_phylogenetics")} for more details. #' @param save_pars An object generated by \code{\link{save_pars}} controlling #' which parameters should be saved in the model. The argument has no #' impact on the model fitting itself. #' @param save_ranef (Deprecated) A flag to indicate if group-level effects for #' each level of the grouping factor(s) should be saved (default is #' \code{TRUE}). Set to \code{FALSE} to save memory. The argument has no #' impact on the model fitting itself. #' @param save_mevars (Deprecated) A flag to indicate if draws of latent #' noise-free variables obtained by using \code{me} and \code{mi} terms should #' be saved (default is \code{FALSE}). Saving these draws allows to better #' use methods such as \code{predict} with the latent variables but leads to #' very large \R objects even for models of moderate size and complexity. #' @param save_all_pars (Deprecated) A flag to indicate if draws from all #' variables defined in Stan's \code{parameters} block should be saved #' (default is \code{FALSE}). Saving these draws is required in order to #' apply the methods \code{bridge_sampler}, \code{bayes_factor}, and #' \code{post_prob}. #' @param sample_prior Indicate if draws from priors should be drawn #' additionally to the posterior draws. Options are \code{"no"} (the #' default), \code{"yes"}, and \code{"only"}. Among others, these draws can #' be used to calculate Bayes factors for point hypotheses via #' \code{\link{hypothesis}}. Please note that improper priors are not sampled, #' including the default improper priors used by \code{brm}. See #' \code{\link{set_prior}} on how to set (proper) priors. Please also note #' that prior draws for the overall intercept are not obtained by default #' for technical reasons. See \code{\link{brmsformula}} how to obtain prior #' draws for the intercept. If \code{sample_prior} is set to \code{"only"}, #' draws are drawn solely from the priors ignoring the likelihood, which #' allows among others to generate draws from the prior predictive #' distribution. In this case, all parameters must have proper priors. #' @param knots Optional list containing user specified knot values to be used #' for basis construction of smoothing terms. See #' \code{\link[mgcv:gamm]{gamm}} for more details. #' @param drop_unused_levels Should unused factors levels in the data be #' dropped? Defaults to \code{TRUE}. #' @param stanvars An optional \code{stanvars} object generated by function #' \code{\link{stanvar}} to define additional variables for use in #' \pkg{Stan}'s program blocks. #' @param stan_funs (Deprecated) An optional character string containing #' self-defined \pkg{Stan} functions, which will be included in the functions #' block of the generated \pkg{Stan} code. It is now recommended to use the #' \code{stanvars} argument for this purpose instead. #' @param fit An instance of S3 class \code{brmsfit} derived from a previous #' fit; defaults to \code{NA}. If \code{fit} is of class \code{brmsfit}, the #' compiled model associated with the fitted result is re-used and all #' arguments modifying the model code or data are ignored. It is not #' recommended to use this argument directly, but to call the #' \code{\link[brms:update.brmsfit]{update}} method, instead. #' @param init Initial values for the sampler. If \code{NULL} (the default) or #' \code{"random"}, Stan will randomly generate initial values for parameters #' in a reasonable range. If \code{0}, all parameters are initialized to zero #' on the unconstrained space. This option is sometimes useful for certain #' families, as it happens that default random initial values cause draws to #' be essentially constant. Generally, setting \code{init = 0} is worth a try, #' if chains do not initialize or behave well. Alternatively, \code{init} can #' be a list of lists containing the initial values, or a function (or #' function name) generating initial values. The latter options are mainly #' implemented for internal testing but are available to users if necessary. #' If specifying initial values using a list or a function then currently the #' parameter names must correspond to the names used in the generated Stan #' code (not the names used in \R). For more details on specifying initial #' values you can consult the documentation of the selected \code{backend}. #' @param inits (Deprecated) Alias of \code{init}. #' @param chains Number of Markov chains (defaults to 4). #' @param iter Number of total iterations per chain (including warmup; defaults #' to 2000). #' @param warmup A positive integer specifying number of warmup (aka burnin) #' iterations. This also specifies the number of iterations used for stepsize #' adaptation, so warmup draws should not be used for inference. The number #' of warmup should not be larger than \code{iter} and the default is #' \code{iter/2}. #' @param thin Thinning rate. Must be a positive integer. Set \code{thin > 1} to #' save memory and computation time if \code{iter} is large. #' @param cores Number of cores to use when executing the chains in parallel, #' which defaults to 1 but we recommend setting the \code{mc.cores} option to #' be as many processors as the hardware and RAM allow (up to the number of #' chains). For non-Windows OS in non-interactive \R sessions, forking is used #' instead of PSOCK clusters. #' @param threads Number of threads to use in within-chain parallelization. For #' more control over the threading process, \code{threads} may also be a #' \code{brmsthreads} object created by \code{\link{threading}}. Within-chain #' parallelization is experimental! We recommend its use only if you are #' experienced with Stan's \code{reduce_sum} function and have a slow running #' model that cannot be sped up by any other means. Can be set globally for #' the current \R session via the \code{"brms.threads"} option (see #' \code{\link{options}}). #' @param opencl The platform and device IDs of the OpenCL device to use for #' fitting using GPU support. If you don't know the IDs of your OpenCL device, #' \code{c(0,0)} is most likely what you need. For more details, see #' \code{\link{opencl}}. Can be set globally for the current \R session via #' the \code{"brms.opencl"} option #' @param normalize Logical. Indicates whether normalization constants should #' be included in the Stan code (defaults to \code{TRUE}). Setting it #' to \code{FALSE} requires Stan version >= 2.25 to work. If \code{FALSE}, #' sampling efficiency may be increased but some post processing functions #' such as \code{\link{bridge_sampler}} will not be available. Can be #' controlled globally for the current \R session via the `brms.normalize` #' option. #' @param algorithm Character string naming the estimation approach to use. #' Options are \code{"sampling"} for MCMC (the default), \code{"meanfield"} for #' variational inference with independent normal distributions, #' \code{"fullrank"} for variational inference with a multivariate normal #' distribution, or \code{"fixed_param"} for sampling from fixed parameter #' values. Can be set globally for the current \R session via the #' \code{"brms.algorithm"} option (see \code{\link{options}}). #' @param backend Character string naming the package to use as the backend for #' fitting the Stan model. Options are \code{"rstan"} (the default) or #' \code{"cmdstanr"}. Can be set globally for the current \R session via the #' \code{"brms.backend"} option (see \code{\link{options}}). Details on the #' \pkg{rstan} and \pkg{cmdstanr} packages are available at #' \url{https://mc-stan.org/rstan/} and \url{https://mc-stan.org/cmdstanr/}, #' respectively. Additionally a \code{"mock"} backend is available to make #' testing \pkg{brms} and packages that depend on it easier. #' The \code{"mock"} backend does not actually do any fitting, it only checks #' the generated Stan code for correctness and then returns whatever is passed #' in an additional \code{mock_fit} argument as the result of the fit. #' @param control A named \code{list} of parameters to control the sampler's #' behavior. It defaults to \code{NULL} so all the default values are used. #' The most important control parameters are discussed in the 'Details' #' section below. For a comprehensive overview see #' \code{\link[rstan:stan]{stan}}. #' @param future Logical; If \code{TRUE}, the \pkg{\link[future:future]{future}} #' package is used for parallel execution of the chains and argument #' \code{cores} will be ignored. Can be set globally for the current \R #' session via the \code{"future"} option. The execution type is controlled via #' \code{\link[future:plan]{plan}} (see the examples section below). #' @param silent Verbosity level between \code{0} and \code{2}. #' If \code{1} (the default), most of the #' informational messages of compiler and sampler are suppressed. #' If \code{2}, even more messages are suppressed. The actual #' sampling progress is still printed. Set \code{refresh = 0} to turn this off #' as well. If using \code{backend = "rstan"} you can also set #' \code{open_progress = FALSE} to prevent opening additional progress bars. #' @param seed The seed for random number generation to make results #' reproducible. If \code{NA} (the default), \pkg{Stan} will set the seed #' randomly. #' @param save_model Either \code{NULL} or a character string. In the latter #' case, the model's Stan code is saved via \code{\link{cat}} in a text file #' named after the string supplied in \code{save_model}. #' @param file Either \code{NULL} or a character string. In the latter case, the #' fitted model object is saved via \code{\link{saveRDS}} in a file named #' after the string supplied in \code{file}. The \code{.rds} extension is #' added automatically. If the file already exists, \code{brm} will load and #' return the saved model object instead of refitting the model. #' Unless you specify the \code{file_refit} argument as well, the existing #' files won't be overwritten, you have to manually remove the file in order #' to refit and save the model under an existing file name. The file name #' is stored in the \code{brmsfit} object for later usage. #' @param file_compress Logical or a character string, specifying one of the #' compression algorithms supported by \code{\link{saveRDS}}. If the #' \code{file} argument is provided, this compression will be used when saving #' the fitted model object. #' @param file_refit Modifies when the fit stored via the \code{file} argument #' is re-used. Can be set globally for the current \R session via the #' \code{"brms.file_refit"} option (see \code{\link{options}}). #' For \code{"never"} (default) the fit is always loaded if it #' exists and fitting is skipped. For \code{"always"} the model is always #' refitted. If set to \code{"on_change"}, brms will #' refit the model if model, data or algorithm as passed to Stan differ from #' what is stored in the file. This also covers changes in priors, #' \code{sample_prior}, \code{stanvars}, covariance structure, etc. If you #' believe there was a false positive, you can use #' \code{\link{brmsfit_needs_refit}} to see why refit is deemed necessary. #' Refit will not be triggered for changes in additional parameters of the fit #' (e.g., initial values, number of iterations, control arguments, ...). A #' known limitation is that a refit will be triggered if within-chain #' parallelization is switched on/off. #' @param empty Logical. If \code{TRUE}, the Stan model is not created #' and compiled and the corresponding \code{'fit'} slot of the \code{brmsfit} #' object will be empty. This is useful if you have estimated a brms-created #' Stan model outside of \pkg{brms} and want to feed it back into the package. #' @param rename For internal use only. #' @param stan_model_args A \code{list} of further arguments passed to #' \code{\link[rstan:stan_model]{rstan::stan_model}} for \code{backend = #' "rstan"} or to \code{cmdstanr::cmdstan_model} for \code{backend = #' "cmdstanr"}, which allows to change how models are compiled. #' @param ... Further arguments passed to Stan. #' For \code{backend = "rstan"} the arguments are passed to #' \code{\link[rstan]{sampling}} or \code{\link[rstan]{vb}}. #' For \code{backend = "cmdstanr"} the arguments are passed to the #' \code{cmdstanr::sample} or \code{cmdstanr::variational} method. #' #' @return An object of class \code{brmsfit}, which contains the posterior #' draws along with many other useful information about the model. Use #' \code{methods(class = "brmsfit")} for an overview on available methods. #' #' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com} #' #' @details Fit a generalized (non-)linear multivariate multilevel model via #' full Bayesian inference using Stan. A general overview is provided in the #' vignettes \code{vignette("brms_overview")} and #' \code{vignette("brms_multilevel")}. For a full list of available vignettes #' see \code{vignette(package = "brms")}. #' #' \bold{Formula syntax of brms models} #' #' Details of the formula syntax applied in \pkg{brms} can be found in #' \code{\link{brmsformula}}. #' #' \bold{Families and link functions} #' #' Details of families supported by \pkg{brms} can be found in #' \code{\link{brmsfamily}}. #' #' \bold{Prior distributions} #' #' Priors should be specified using the #' \code{\link[brms:set_prior]{set_prior}} function. Its documentation #' contains detailed information on how to correctly specify priors. To find #' out on which parameters or parameter classes priors can be defined, use #' \code{\link[brms:get_prior]{get_prior}}. Default priors are chosen to be #' non or very weakly informative so that their influence on the results will #' be negligible and you usually don't have to worry about them. However, #' after getting more familiar with Bayesian statistics, I recommend you to #' start thinking about reasonable informative priors for your model #' parameters: Nearly always, there is at least some prior information #' available that can be used to improve your inference. #' #' \bold{Adjusting the sampling behavior of \pkg{Stan}} #' #' In addition to choosing the number of iterations, warmup draws, and #' chains, users can control the behavior of the NUTS sampler, by using the #' \code{control} argument. The most important reason to use \code{control} is #' to decrease (or eliminate at best) the number of divergent transitions that #' cause a bias in the obtained posterior draws. Whenever you see the #' warning "There were x divergent transitions after warmup." you should #' really think about increasing \code{adapt_delta}. To do this, write #' \code{control = list(adapt_delta = )}, where \code{} should usually #' be value between \code{0.8} (current default) and \code{1}. Increasing #' \code{adapt_delta} will slow down the sampler but will decrease the number #' of divergent transitions threatening the validity of your posterior #' draws. #' #' Another problem arises when the depth of the tree being evaluated in each #' iteration is exceeded. This is less common than having divergent #' transitions, but may also bias the posterior draws. When it happens, #' \pkg{Stan} will throw out a warning suggesting to increase #' \code{max_treedepth}, which can be accomplished by writing \code{control = #' list(max_treedepth = )} with a positive integer \code{} that should #' usually be larger than the current default of \code{10}. For more details #' on the \code{control} argument see \code{\link[rstan:stan]{stan}}. #' #' @references #' Paul-Christian Buerkner (2017). brms: An R Package for Bayesian Multilevel #' Models Using Stan. \emph{Journal of Statistical Software}, 80(1), 1-28. #' \code{doi:10.18637/jss.v080.i01} #' #' Paul-Christian Buerkner (2018). Advanced Bayesian Multilevel Modeling #' with the R Package brms. \emph{The R Journal}. 10(1), 395–411. #' \code{doi:10.32614/RJ-2018-017} #' #' @seealso \code{\link{brms}}, \code{\link{brmsformula}}, #' \code{\link{brmsfamily}}, \code{\link{brmsfit}} #' #' @examples #' \dontrun{ #' # Poisson regression for the number of seizures in epileptic patients #' # using normal priors for population-level effects #' # and half-cauchy priors for standard deviations of group-level effects #' prior1 <- prior(normal(0,10), class = b) + #' prior(cauchy(0,2), class = sd) #' fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), prior = prior1) #' #' # generate a summary of the results #' summary(fit1) #' #' # plot the MCMC chains as well as the posterior distributions #' plot(fit1, ask = FALSE) #' #' # predict responses based on the fitted model #' head(predict(fit1)) #' #' # plot conditional effects for each predictor #' plot(conditional_effects(fit1), ask = FALSE) #' #' # investigate model fit #' loo(fit1) #' pp_check(fit1) #' #' #' # Ordinal regression modeling patient's rating of inhaler instructions #' # category specific effects are estimated for variable 'treat' #' fit2 <- brm(rating ~ period + carry + cs(treat), #' data = inhaler, family = sratio("logit"), #' prior = set_prior("normal(0,5)"), chains = 2) #' summary(fit2) #' plot(fit2, ask = FALSE) #' WAIC(fit2) #' #' #' # Survival regression modeling the time between the first #' # and second recurrence of an infection in kidney patients. #' fit3 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), #' data = kidney, family = lognormal()) #' summary(fit3) #' plot(fit3, ask = FALSE) #' plot(conditional_effects(fit3), ask = FALSE) #' #' #' # Probit regression using the binomial family #' ntrials <- sample(1:10, 100, TRUE) #' success <- rbinom(100, size = ntrials, prob = 0.4) #' x <- rnorm(100) #' data4 <- data.frame(ntrials, success, x) #' fit4 <- brm(success | trials(ntrials) ~ x, data = data4, #' family = binomial("probit")) #' summary(fit4) #' #' #' # Non-linear Gaussian model #' fit5 <- brm( #' bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), #' ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, #' nl = TRUE), #' data = loss, family = gaussian(), #' prior = c( #' prior(normal(5000, 1000), nlpar = "ult"), #' prior(normal(1, 2), nlpar = "omega"), #' prior(normal(45, 10), nlpar = "theta") #' ), #' control = list(adapt_delta = 0.9) #' ) #' summary(fit5) #' conditional_effects(fit5) #' #' #' # Normal model with heterogeneous variances #' data_het <- data.frame( #' y = c(rnorm(50), rnorm(50, 1, 2)), #' x = factor(rep(c("a", "b"), each = 50)) #' ) #' fit6 <- brm(bf(y ~ x, sigma ~ 0 + x), data = data_het) #' summary(fit6) #' plot(fit6) #' conditional_effects(fit6) #' #' # extract estimated residual SDs of both groups #' sigmas <- exp(as.data.frame(fit6, variable = "^b_sigma_", regex = TRUE)) #' ggplot(stack(sigmas), aes(values)) + #' geom_density(aes(fill = ind)) #' #' #' # Quantile regression predicting the 25%-quantile #' fit7 <- brm(bf(y ~ x, quantile = 0.25), data = data_het, #' family = asym_laplace()) #' summary(fit7) #' conditional_effects(fit7) #' #' #' # use the future package for more flexible parallelization #' library(future) #' plan(multiprocess) #' fit7 <- update(fit7, future = TRUE) #' #' #' # fit a model manually via rstan #' scode <- make_stancode(count ~ Trt, data = epilepsy) #' sdata <- make_standata(count ~ Trt, data = epilepsy) #' stanfit <- rstan::stan(model_code = scode, data = sdata) #' # feed the Stan model back into brms #' fit8 <- brm(count ~ Trt, data = epilepsy, empty = TRUE) #' fit8$fit <- stanfit #' fit8 <- rename_pars(fit8) #' summary(fit8) #' } #' #' @import parallel #' @import methods #' @import stats #' @import Rcpp #' @export brm <- function(formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sample_prior = "no", sparse = NULL, knots = NULL, drop_unused_levels = TRUE, stanvars = NULL, stan_funs = NULL, fit = NA, save_pars = NULL, save_ranef = NULL, save_mevars = NULL, save_all_pars = NULL, init = NULL, inits = NULL, chains = 4, iter = 2000, warmup = floor(iter / 2), thin = 1, cores = getOption("mc.cores", 1), threads = getOption("brms.threads", NULL), opencl = getOption("brms.opencl", NULL), normalize = getOption("brms.normalize", TRUE), control = NULL, algorithm = getOption("brms.algorithm", "sampling"), backend = getOption("brms.backend", "rstan"), future = getOption("future", FALSE), silent = 1, seed = NA, save_model = NULL, stan_model_args = list(), file = NULL, file_compress = TRUE, file_refit = getOption("brms.file_refit", "never"), empty = FALSE, rename = TRUE, ...) { # optionally load brmsfit from file # Loading here only when we should directly load the file. # The "on_change" option needs sdata and scode to be built file_refit <- match.arg(file_refit, file_refit_options()) if (!is.null(file) && file_refit == "never") { x <- read_brmsfit(file) if (!is.null(x)) { return(x) } } # validate arguments later passed to Stan algorithm <- match.arg(algorithm, algorithm_choices()) backend <- match.arg(backend, backend_choices()) normalize <- as_one_logical(normalize) silent <- validate_silent(silent) iter <- as_one_numeric(iter) warmup <- as_one_numeric(warmup) thin <- as_one_numeric(thin) chains <- as_one_numeric(chains) cores <- as_one_numeric(cores) init <- use_alias(init, inits) threads <- validate_threads(threads) opencl <- validate_opencl(opencl) future <- as_one_logical(future) && chains > 0L seed <- as_one_numeric(seed, allow_na = TRUE) empty <- as_one_logical(empty) rename <- as_one_logical(rename) # initialize brmsfit object if (is.brmsfit(fit)) { # re-use existing model x <- fit x$criteria <- list() sdata <- standata(x) if (!is.null(file) && file_refit == "on_change") { x_from_file <- read_brmsfit(file) if (!is.null(x_from_file)) { needs_refit <- brmsfit_needs_refit( x_from_file, scode = stancode(x), sdata = sdata, data = x$data, algorithm = algorithm, silent = silent ) if (!needs_refit) { return(x_from_file) } } } backend <- x$backend model <- compiled_model(x) exclude <- exclude_pars(x) } else { # build new model formula <- validate_formula( formula, data = data, family = family, autocor = autocor, sparse = sparse, cov_ranef = cov_ranef ) family <- get_element(formula, "family") bterms <- brmsterms(formula) data2 <- validate_data2( data2, bterms = bterms, get_data2_autocor(formula), get_data2_cov_ranef(formula) ) data_name <- substitute_name(data) data <- validate_data( data, bterms = bterms, data2 = data2, knots = knots, drop_unused_levels = drop_unused_levels ) attr(data, "data_name") <- data_name prior <- .validate_prior( prior, bterms = bterms, data = data, sample_prior = sample_prior ) stanvars <- validate_stanvars(stanvars, stan_funs = stan_funs) save_pars <- validate_save_pars( save_pars, save_ranef = save_ranef, save_mevars = save_mevars, save_all_pars = save_all_pars ) ranef <- tidy_ranef(bterms, data = data) # generate Stan code model <- .make_stancode( bterms, data = data, prior = prior, stanvars = stanvars, save_model = save_model, backend = backend, threads = threads, opencl = opencl, normalize = normalize ) # initialize S3 object x <- brmsfit( formula = formula, data = data, data2 = data2, prior = prior, stanvars = stanvars, model = model, algorithm = algorithm, backend = backend, threads = threads, opencl = opencl, save_pars = save_pars, ranef = ranef, family = family, basis = standata_basis(bterms, data = data), stan_args = nlist(init, silent, control, stan_model_args, ...) ) exclude <- exclude_pars(x) # generate Stan data before compiling the model to avoid # unnecessary compilations in case of invalid data sdata <- .make_standata( bterms, data = data, prior = prior, data2 = data2, stanvars = stanvars, threads = threads ) if (empty) { # return the brmsfit object with an empty 'fit' slot return(x) } if (!is.null(file) && file_refit == "on_change") { x_from_file <- read_brmsfit(file) if (!is.null(x_from_file)) { needs_refit <- brmsfit_needs_refit( x_from_file, scode = model, sdata = sdata, data = data, algorithm = algorithm, silent = silent ) if (!needs_refit) { return(x_from_file) } } } # compile the Stan model compile_args <- stan_model_args compile_args$model <- model compile_args$backend <- backend compile_args$threads <- threads compile_args$opencl <- opencl compile_args$silent <- silent model <- do_call(compile_model, compile_args) } # fit the Stan model fit_args <- nlist( model, sdata, algorithm, backend, iter, warmup, thin, chains, cores, threads, opencl, init, exclude, control, future, seed, silent, ... ) x$fit <- do_call(fit_model, fit_args) # rename parameters to have human readable names if (rename) { x <- rename_pars(x) } if (!is.null(file)) { x <- write_brmsfit(x, file, compress = file_compress) } x } brms/R/misc.R0000644000176200001440000007102314504263724012506 0ustar liggesusers# type-stable indexing of vector and matrix type objects # @param x an R object typically a vector or matrix # @param i optional index; if NULL, x is returned unchanged # @param row indicating if rows or cols should be indexed # only relevant if x has two or three dimensions p <- function(x, i = NULL, row = TRUE) { # TODO: replace by "slice" if (isTRUE(length(dim(x)) > 3L)) { stop2("'p' can only handle objects up to 3 dimensions.") } if (!length(i)) { out <- x } else if (length(dim(x)) == 2L) { if (row) { out <- x[i, , drop = FALSE] } else { out <- x[, i, drop = FALSE] } } else if (length(dim(x)) == 3L) { if (row) { out <- x[i, , , drop = FALSE] } else { out <- x[, i, , drop = FALSE] } } else { out <- x[i] } out } # extract parts of an object with selective dropping of dimensions # @param x,...,drop same as in x[..., drop] # @param drop_dim Optional numeric or logical vector controlling # which dimensions to drop. Will overwrite argument 'drop'. extract <- function(x, ..., drop = FALSE, drop_dim = NULL) { if (!length(dim(x))) { return(x[...]) } if (length(drop_dim)) { drop <- FALSE } else { drop <- as_one_logical(drop) } out <- x[..., drop = drop] if (drop || !length(drop_dim) || any(dim(out) == 0L)) { return(out) } if (is.numeric(drop_dim)) { drop_dim <- seq_along(dim(x)) %in% drop_dim } if (!is.logical(drop_dim)) { stop2("'drop_dim' needs to be logical or numeric.") } keep <- dim(out) > 1L | !drop_dim new_dim <- dim(out)[keep] if (length(new_dim) <= 1L) { # use vectors instead of 1D arrays new_dim <- NULL } dim(out) <- new_dim out } # extract slices of one array dimension without dropping other dimensions # @param x an array # @param dim dimension from which to take the slice # @param i slice index # @param drop Logical (length 1) indicating whether to drop dimension `dim`. slice <- function(x, dim, i, drop = TRUE) { ndim <- length(dim(x)) commas1 <- collapse(rep(", ", dim - 1)) commas2 <- collapse(rep(", ", ndim - dim)) drop_dim <- ifelse(drop, ", drop_dim = dim", "") expr <- paste0("extract(x, ", commas1, "i", commas2, drop_dim, ")") eval2(expr) } # slice out columns without dropping other dimensions # @param x an array; a vector or 1D array is treated as already sliced # @param i column index slice_col <- function(x, i) { if (length(dim(x)) < 2L) { # a vector or 1D array is treated as already sliced return(x) } slice(x, 2, i) } seq_rows <- function(x) { seq_len(NROW(x)) } seq_cols <- function(x) { seq_len(NCOL(x)) } seq_dim <- function(x, dim) { dim <- as_one_numeric(dim) if (dim == 1) { len <- NROW(x) } else if (dim == 2) { len <- NCOL(x) } else { len <- dim(x)[dim] } if (length(len) == 1L && !isNA(len)) { out <- seq_len(len) } else { out <- integer(0) } out } # match rows in x with rows in y match_rows <- function(x, y, ...) { x <- as.data.frame(x) y <- as.data.frame(y) x <- do.call("paste", c(x, sep = "\r")) y <- do.call("paste", c(y, sep = "\r")) match(x, y, ...) } # find elements of 'x' matching sub-elements passed via 'ls' and '...' find_elements <- function(x, ..., ls = list(), fun = '%in%') { x <- as.list(x) if (!length(x)) { return(logical(0)) } out <- rep(TRUE, length(x)) ls <- c(ls, list(...)) if (!length(ls)) { return(out) } if (is.null(names(ls))) { stop("Argument 'ls' must be named.") } for (name in names(ls)) { tmp <- from_list(x, name) out <- out & do_call(fun, list(tmp, ls[[name]])) } out } # find rows of 'x' matching columns passed via 'ls' and '...' # similar to 'find_elements' but for matrix like objects find_rows <- function(x, ..., ls = list(), fun = '%in%') { x <- as.data.frame(x) if (!nrow(x)) { return(logical(0)) } out <- rep(TRUE, nrow(x)) ls <- c(ls, list(...)) if (!length(ls)) { return(out) } if (is.null(names(ls))) { stop("Argument 'ls' must be named.") } for (name in names(ls)) { out <- out & do_call(fun, list(x[[name]], ls[[name]])) } out } # subset 'x' using arguments passed via 'ls' and '...' subset2 <- function(x, ..., ls = list(), fun = '%in%') { x[find_rows(x, ..., ls = ls, fun = fun), , drop = FALSE] } # convert array to list of elements with reduced dimension # @param x an arrary of dimension d # @return a list of arrays of dimension d-1 array2list <- function(x) { if (is.null(dim(x))) { return(as.list(x)) } ndim <- length(dim(x)) out <- list(length = dim(x)[ndim]) ind <- collapse(rep(",", ndim - 1)) for (i in seq_len(dim(x)[ndim])) { out[[i]] <- eval2(paste0("x[", ind, i, "]")) if (length(dim(x)) > 2) { # avoid accidental dropping of other dimensions dim(out[[i]]) <- dim(x)[-ndim] } } names(out) <- dimnames(x)[[ndim]] out } # move elements to the start of a named object move2start <- function(x, first) { x[c(first, setdiff(names(x), first))] } # move elements to the end of a named object move2end <- function(x, last) { x[c(setdiff(names(x), last), last)] } # wrapper around replicate but without simplifying repl <- function(expr, n) { replicate(n, expr, simplify = FALSE) } # find the first element in A that is greater than target # @param A a matrix # @param target a vector of length nrow(A) # @param i column of A being checked first # @return a vector of the same length as target containing the # column ids where A[,i] was first greater than target first_greater <- function(A, target, i = 1) { ifelse(target <= A[, i] | ncol(A) == i, i, first_greater(A, target, i + 1)) } # check if an object is NULL isNULL <- function(x) { is.null(x) || ifelse(is.vector(x), all(sapply(x, is.null)), FALSE) } # recursively removes NULL entries from an object rmNULL <- function(x, recursive = TRUE) { x <- Filter(Negate(isNULL), x) if (recursive) { x <- lapply(x, function(x) if (is.list(x)) rmNULL(x) else x) } x } # find the first argument that is not NULL first_not_null <- function(...) { dots <- list(...) out <- NULL i <- 1L while (isNULL(out) && i <= length(dots)) { if (!isNULL(dots[[i]])) { out <- dots[[i]] } i <- i + 1L } out } is_atomic_or_null <- function(x) { is.atomic(x) || is.null(x) } isNA <- function(x) { length(x) == 1L && is.na(x) } is_equal <- function(x, y, check.attributes = FALSE, ...) { isTRUE(all.equal(x, y, check.attributes = check.attributes, ...)) } # check if 'x' will behave like a factor in design matrices is_like_factor <- function(x) { is.factor(x) || is.character(x) || is.logical(x) } # as.factor but allows to pass levels as_factor <- function(x, levels = NULL) { if (is.null(levels)) { out <- as.factor(x) } else { out <- factor(x, levels = levels) } out } # coerce 'x' to a single logical value as_one_logical <- function(x, allow_na = FALSE) { s <- substitute(x) x <- as.logical(x) if (length(x) != 1L || anyNA(x) && !allow_na) { s <- deparse0(s, max_char = 100L) stop2("Cannot coerce '", s, "' to a single logical value.") } x } # coerce 'x' to a single integer value as_one_integer <- function(x, allow_na = FALSE) { s <- substitute(x) x <- SW(as.integer(x)) if (length(x) != 1L || anyNA(x) && !allow_na) { s <- deparse0(s, max_char = 100L) stop2("Cannot coerce '", s, "' to a single integer value.") } x } # coerce 'x' to a single numeric value as_one_numeric <- function(x, allow_na = FALSE) { s <- substitute(x) x <- SW(as.numeric(x)) if (length(x) != 1L || anyNA(x) && !allow_na) { s <- deparse0(s, max_char = 100L) stop2("Cannot coerce '", s, "' to a single numeric value.") } x } # coerce 'x' to a single character string as_one_character <- function(x, allow_na = FALSE) { s <- substitute(x) x <- as.character(x) if (length(x) != 1L || anyNA(x) && !allow_na) { s <- deparse0(s, max_char = 100L) stop2("Cannot coerce '", s, "' to a single character value.") } x } # coerce 'x' to a single character variable name as_one_variable <- function(x, allow_na = TRUE) { x <- as_one_character(x) if (x == "NA" && allow_na) { return(x) } if (!nzchar(x) || !is_equal(x, all_vars(x))) { stop2("Cannot coerce '", x, "' to a single variable name.") } x } has_rows <- function(x) { isTRUE(nrow(x) > 0L) } has_cols <- function(x) { isTRUE(ncol(x) > 0L) } # expand arguments to the same length # @param ... arguments to expand # @param length optional expansion length # otherwise taken to be the largest supplied length # @return a data.frame with one variable per element in '...' expand <- function(..., dots = list(), length = NULL) { dots <- c(dots, list(...)) max_dim <- NULL if (is.null(length)) { lengths <- lengths(dots) length <- max(lengths) max_dim <- dim(dots[[match(length, lengths)]]) } out <- as.data.frame(lapply(dots, rep, length.out = length)) structure(out, max_dim = max_dim) } # structure but ignore NULL structure_not_null <- function(.Data, ...) { if (!is.null(.Data)) { .Data <- structure(.Data, ...) } .Data } # remove specified attributes rm_attr <- function(x, attr) { attributes(x)[attr] <- NULL x } # unidimensional subsetting while keeping attributes subset_keep_attr <- function(x, y) { att <- attributes(x) x <- x[y] att$names <- names(x) attributes(x) <- att x } '%||%' <- function(x, y) { if (is.null(x)) x <- y x } # check if 'x' is a whole number (integer) is_wholenumber <- function(x, tol = .Machine$double.eps) { if (is.numeric(x)) { out <- abs(x - round(x)) < tol } else { out <- rep(FALSE, length(x)) } dim(out) <- dim(x) out } # helper function to check symmetry of a matrix is_symmetric <- function(x, tol = sqrt(.Machine$double.eps)) { isSymmetric(x, tol = tol, check.attributes = FALSE) } # unlist lapply output ulapply <- function(X, FUN, ..., recursive = TRUE, use.names = TRUE) { unlist(lapply(X, FUN, ...), recursive, use.names) } # rbind lapply output rblapply <- function(X, FUN, ...) { do.call(rbind, lapply(X, FUN, ...)) } # cbind lapply output cblapply <- function(X, FUN, ...) { do.call(cbind, lapply(X, FUN, ...)) } # parallel lapply sensitive to the operating system plapply <- function(X, FUN, cores = 1, ...) { if (cores == 1) { out <- lapply(X, FUN, ...) } else { if (!os_is_windows()) { out <- parallel::mclapply(X = X, FUN = FUN, mc.cores = cores, ...) } else { cl <- parallel::makePSOCKcluster(cores) on.exit(parallel::stopCluster(cl)) out <- parallel::parLapply(cl = cl, X = X, fun = FUN, ...) } } out } # extract objects stored in each element of a list # @param x a list-like object # @param name name of the object to extract from_list <- function(x, name, ...) { lapply(x, "[[", name, ...) } # unlist from_list output ufrom_list <- function(x, name, ..., recursive = TRUE, use.names = TRUE) { unlist(from_list(x, name, ...), recursive, use.names) } # check if the operating system is Windows os_is_windows <- function() { isTRUE(Sys.info()[['sysname']] == "Windows") } # find variables in a character string or expression all_vars <- function(expr, ...) { if (is.character(expr)) { expr <- str2expression(expr) } all.vars(expr, ...) } # reimplemented for older R versions # see ?parse in R 3.6 or higher str2expression <- function(x) { parse(text = x, keep.source = FALSE) } # reimplemented for older R versions # see ?parse in R 3.6 or higher str2lang <- function(x) { str2expression(x)[[1]] } # append list(...) to x lc <- function(x, ...) { dots <- rmNULL(list(...), recursive = FALSE) c(x, dots) } 'c<-' <- function(x, value) { c(x, value) } 'lc<-' <- function(x, value) { lc(x, value) } collapse <- function(..., sep = "") { paste(..., sep = sep, collapse = "") } collapse_comma <- function(...) { paste0("'", ..., "'", collapse = ", ") } # add characters to an existing string 'str_add<-' <- function(x, start = FALSE, value) { if (start) paste0(value, x) else paste0(x, value) } # add list of characters to an existing list 'str_add_list<-' <- function(x, start = FALSE, value) { stopifnot(is.list(x), is.list(value)) out <- if (start) list(value, x) else list(x, value) collapse_lists(ls = out) } # type-stable if clause for strings with default else output str_if <- function(cond, yes, no = "") { cond <- as_one_logical(cond) if (cond) as.character(yes) else as.character(no) } # select elements which match a regex pattern str_subset <- function(x, pattern, ...) { x[grepl(pattern, x, ...)] } # similar to glue::glue but specialized for generating Stan code glue <- function(..., sep = "", collapse = NULL, envir = parent.frame(), open = "{", close = "}", na = "NA") { dots <- list(...) dots <- dots[lengths(dots) > 0L] args <- list( .x = NULL, .sep = sep, .envir = envir, .open = open, .close = close, .na = na, .trim = FALSE, .transformer = zero_length_transformer ) out <- do.call(glue::glue_data, c(dots, args)) if (!is.null(collapse)) { collapse <- as_one_character(collapse) out <- paste0(out, collapse = collapse) } out } # used in 'glue' to handle zero-length inputs zero_length_transformer <- function(text, envir) { out <- glue::identity_transformer(text, envir) if (!length(out)) { out <- "" } out } # collapse strings evaluated with glue cglue <- function(..., envir = parent.frame()) { glue(..., envir = envir, collapse = "") } # check if a certain package is installed # @param package package name # @param version optional minimal version number to require require_package <- function(package, version = NULL) { if (!requireNamespace(package, quietly = TRUE)) { stop2("Please install the '", package, "' package.") } if (!is.null(version)) { version <- as.package_version(version) if (utils::packageVersion(package) < version) { stop2("Please install package '", package, "' version ", version, " or higher.") } } invisible(TRUE) } # rename specified patterns in a character vector # @param x a character vector to be renamed # @param pattern the regular expressions in x to be replaced # @param replacement the replacements # @param fixed same as for 'gsub' # @param check_dup: logical; check for duplications in x after renaming # @param ... passed to 'gsub' # @return renamed character vector of the same length as x rename <- function(x, pattern = NULL, replacement = NULL, fixed = TRUE, check_dup = FALSE, ...) { pattern <- as.character(pattern) replacement <- as.character(replacement) if (!length(pattern) && !length(replacement)) { # default renaming to avoid special characters in coeffcient names pattern <- c( " ", "(", ")", "[", "]", ",", "\"", "'", "?", "+", "-", "*", "/", "^", "=" ) replacement <- c(rep("", 9), "P", "M", "MU", "D", "E", "EQ") } if (length(replacement) == 1L) { replacement <- rep(replacement, length(pattern)) } stopifnot(length(pattern) == length(replacement)) # avoid zero-length pattern error has_chars <- nzchar(pattern) pattern <- pattern[has_chars] replacement <- replacement[has_chars] out <- x for (i in seq_along(pattern)) { out <- gsub(pattern[i], replacement[i], out, fixed = fixed, ...) } dup <- duplicated(out) if (check_dup && any(dup)) { dup <- x[out %in% out[dup]] stop2("Internal renaming led to duplicated names. \n", "Occured for: ", collapse_comma(dup)) } out } # collapse strings having the same name in different lists # @param ... named lists # @param ls a list of named lists # @param a named list containing the collapsed strings collapse_lists <- function(..., ls = list()) { ls <- c(list(...), ls) elements <- unique(unlist(lapply(ls, names))) args <- c(FUN = collapse, lapply(ls, "[", elements), SIMPLIFY = FALSE) out <- do.call(mapply, args) names(out) <- elements out } # create a named list using object names nlist <- function(...) { m <- match.call() dots <- list(...) no_names <- is.null(names(dots)) has_name <- if (no_names) FALSE else nzchar(names(dots)) if (all(has_name)) return(dots) nms <- as.character(m)[-1] if (no_names) { names(dots) <- nms } else { names(dots)[!has_name] <- nms[!has_name] } dots } # initialize a named list # @param names names of the elements # @param values optional values of the elements named_list <- function(names, values = NULL) { if (!is.null(values)) { if (length(values) <= 1L) { values <- replicate(length(names), values) } values <- as.list(values) stopifnot(length(values) == length(names)) } else { values <- vector("list", length(names)) } setNames(values, names) } # is an object named? is_named <- function(x) { names <- names(x) if (is.null(names)) { return(FALSE) } if (any(!nzchar(names) | is.na(names))) { return(FALSE) } TRUE } #' Execute a Function Call #' #' Execute a function call similar to \code{\link{do.call}}, but without #' deparsing function arguments. For large number of arguments (i.e., more #' than a few thousand) this function currently is somewhat inefficient #' and should be used with care in this case. #' #' @param what Either a function or a non-empty character string naming the #' function to be called. #' @param args A list of arguments to the function call. The names attribute of #' \code{args} gives the argument names. #' @param pkg Optional name of the package in which to search for the #' function if \code{what} is a character string. #' @param envir An environment within which to evaluate the call. #' #' @return The result of the (evaluated) function call. #' #' @keywords internal #' @export do_call <- function(what, args, pkg = NULL, envir = parent.frame()) { call <- "" if (length(args)) { if (!is.list(args)) { stop2("'args' must be a list.") } fun_args <- names(args) if (is.null(fun_args)) { fun_args <- rep("", length(args)) } else { nzc <- nzchar(fun_args) fun_args[nzc] <- paste0("`", fun_args[nzc], "` = ") } names(args) <- paste0(".x", seq_along(args)) call <- paste0(fun_args, names(args), collapse = ",") } else { args <- list() } if (is.function(what)) { args$.fun <- what what <- ".fun" } else { what <- paste0("`", as_one_character(what), "`") if (!is.null(pkg)) { what <- paste0(as_one_character(pkg), "::", what) } } call <- paste0(what, "(", call, ")") eval2(call, envir = args, enclos = envir) } # create an empty data frame empty_data_frame <- function() { as.data.frame(matrix(nrow = 0, ncol = 0)) } # replace elements in x with elements in value # @param x named list-like object # @param value another named list-like object # @param dont_replace names of elements that cannot be replaced 'replace_args<-' <- function(x, dont_replace = NULL, value) { value_name <- deparse0(substitute(value), max_char = 100L) value <- as.list(value) if (length(value) && is.null(names(value))) { stop2("Argument '", value_name, "' must be named.") } invalid <- names(value)[names(value) %in% dont_replace] if (length(invalid)) { invalid <- collapse_comma(invalid) stop2("Argument(s) ", invalid, " cannot be replaced.") } x[names(value)] <- value x } # deparse0 'x' if it is no string deparse_no_string <- function(x) { if (!is.character(x)) { x <- deparse0(x) } x } # combine deparse lines into one string # since R 4.0 we also have base::deparse1 for this purpose deparse0 <- function(x, max_char = NULL, ...) { out <- collapse(deparse(x, ...)) if (isTRUE(max_char > 0)) { out <- substr(out, 1L, max_char) } out } # like 'eval' but parses characters before evaluation eval2 <- function(expr, envir = parent.frame(), ...) { if (is.character(expr)) { expr <- str2expression(expr) } eval(expr, envir, ...) } # evaluate an expression without printing output or messages # @param expr expression to be evaluated # @param type type of output to be suppressed (see ?sink) # @param try wrap evaluation of expr in 'try' and # not suppress outputs if evaluation fails? # @param silent actually evaluate silently? eval_silent <- function(expr, type = "output", try = FALSE, silent = TRUE, ...) { try <- as_one_logical(try) silent <- as_one_logical(silent) type <- match.arg(type, c("output", "message")) expr <- substitute(expr) envir <- parent.frame() if (silent) { if (try && type == "message") { try_out <- try(utils::capture.output( out <- eval(expr, envir), type = type, ... )) if (is_try_error(try_out)) { # try again without suppressing error messages out <- eval(expr, envir) } } else { utils::capture.output(out <- eval(expr, envir), type = type, ...) } } else { out <- eval(expr, envir) } out } # find the name that 'x' had in a specific environment substitute_name <- function(x, envir = parent.frame(), nchar = 50) { out <- substitute(x) out <- eval2(paste0("substitute(", out, ")"), envir = envir) if (missing(out)) { return(NULL) } substr(collapse(deparse(out)), 1, nchar) } # recursive sorting of dependencies # @param x named list of dependencies per element # @param sorted already sorted element names # @return a vector of sorted element names sort_dependencies <- function(x, sorted = NULL) { if (!length(x)) { return(NULL) } if (length(names(x)) != length(x)) { stop2("Argument 'x' must be named.") } take <- !ulapply(x, function(dep) any(!dep %in% sorted)) new <- setdiff(names(x)[take], sorted) out <- union(sorted, new) if (length(new)) { out <- union(out, sort_dependencies(x, sorted = out)) } else if (!all(names(x) %in% out)) { stop2("Cannot handle circular dependency structures.") } out } stop2 <- function(...) { stop(..., call. = FALSE) } warning2 <- function(...) { warning(..., call. = FALSE) } # get first occurrence of 'x' in '...' objects # @param x The name of the required element # @param ... named R objects that may contain 'x' get_arg <- function(x, ...) { dots <- list(...) i <- 1 out <- NULL while (i <= length(dots) && is.null(out)) { if (!is.null(dots[[i]][[x]])) { out <- dots[[i]][[x]] } else { i <- i + 1 } } out } SW <- function(expr) { base::suppressWarnings(expr) } # get pattern matches in text as vector # @param simplify return an atomic vector of matches? # @param first only return the first match in each string? # @return character vector containing matches get_matches <- function(pattern, text, simplify = TRUE, first = FALSE, ...) { x <- regmatches(text, gregexpr(pattern, text, ...)) if (first) { x <- lapply(x, function(t) if (length(t)) t[1] else t) } if (simplify) { if (first) { x <- lapply(x, function(t) if (length(t)) t else "") } x <- unlist(x) } x } # find matches in the parse tree of an expression # @param pattern pattern to be matched # @param expr expression to be searched in # @return character vector containing matches get_matches_expr <- function(pattern, expr, ...) { if (is.character(expr)) { expr <- str2expression(expr) } out <- NULL for (i in seq_along(expr)) { sexpr <- try(expr[[i]], silent = TRUE) if (!is_try_error(sexpr)) { sexpr_char <- deparse0(sexpr) out <- c(out, get_matches(pattern, sexpr_char, ...)) } if (is.call(sexpr) || is.expression(sexpr)) { out <- c(out, get_matches_expr(pattern, sexpr, ...)) } } trim_wsp(unique(out)) } # like 'grepl' but handles (parse trees of) expressions grepl_expr <- function(pattern, expr, ...) { as.logical(ulapply(expr, function(e) length(get_matches_expr(pattern, e, ...)) > 0L)) } # combine character vectors into a joint regular 'or' expression # @param x a character vector # @param escape escape all special characters in 'x'? regex_or <- function(x, escape = FALSE) { if (escape) { x <- escape_all(x) } paste0("(", paste0("(", x, ")", collapse = "|"), ")") } # escape dots in character strings escape_dot <- function(x) { gsub(".", "\\.", x, fixed = TRUE) } # escape all special characters in character strings escape_all <- function(x) { specials <- c(".", "*", "+", "?", "^", "$", "(", ")", "[", "]", "|") for (s in specials) { x <- gsub(s, paste0("\\", s), x, fixed = TRUE) } x } # add an underscore to non-empty character strings # @param x a character vector # @param pos position of the underscore usc <- function(x, pos = c("prefix", "suffix")) { pos <- match.arg(pos) x <- as.character(x) if (!length(x)) x <- "" if (pos == "prefix") { x <- ifelse(nzchar(x), paste0("_", x), "") } else { x <- ifelse(nzchar(x), paste0(x, "_"), "") } x } # round using the largest remainder method round_largest_remainder <- function(x) { x <- as.numeric(x) total <- round(sum(x)) out <- floor(x) diff <- x - out J <- order(diff, decreasing = TRUE) I <- seq_len(total - floor(sum(out))) out[J[I]] <- out[J[I]] + 1 out } # add leading and trailing white spaces # @param x object accepted by paste # @param nsp number of white spaces to add wsp <- function(x = "", nsp = 1) { sp <- collapse(rep(" ", nsp)) if (length(x)) { out <- ifelse(nzchar(x), paste0(sp, x, sp), sp) } else { out <- NULL } out } # add white space per line the the strings # @param x object accepted by paste # @param nsp number of white spaces to add wsp_per_line <- function(x, nsp) { sp <- collapse(rep(" ", nsp)) x <- paste0(sp, x) x <- gsub("\\n(?=.+)", paste0("\n", sp), x, perl = TRUE) x } # remove whitespaces in character strings rm_wsp <- function(x) { out <- gsub("[ \t\r\n]+", "", x, perl = TRUE) dim(out) <- dim(x) out } # trim whitespaces in character strings trim_wsp <- function(x) { out <- gsub("[ \t\r\n]+", " ", x, perl = TRUE) dim(out) <- dim(x) out } # limit the number of characters of a vector # @param x a character vector # @param chars maximum number of characters to show # @param lsuffix number of characters to keep at the end of the strings # @return possible truncated character vector limit_chars <- function(x, chars = NULL, lsuffix = 4) { stopifnot(is.character(x)) if (!is.null(chars)) { chars_x <- nchar(x) - lsuffix suffix <- substr(x, chars_x + 1, chars_x + lsuffix) x <- substr(x, 1, chars_x) x <- ifelse(chars_x <= chars, x, paste0(substr(x, 1, chars - 3), "...")) x <- paste0(x, suffix) } x } # ensure that deprecated arguments still work # @param arg input to the new argument # @param alias input to the deprecated argument # @param default the default value of alias # @param warn should a warning be printed if alias is specified? use_alias <- function(arg, alias = NULL, default = NULL, warn = TRUE) { arg_name <- Reduce(paste, deparse(substitute(arg))) alias_name <- Reduce(paste, deparse(substitute(alias))) if (!is_equal(alias, default)) { arg <- alias if (grepl("^dots\\$", alias_name)) { alias_name <- gsub("^dots\\$", "", alias_name) } else if (grepl("^dots\\[\\[", alias_name)) { alias_name <- gsub("^dots\\[\\[\"|\"\\]\\]$", "", alias_name) } if (warn) { warning2("Argument '", alias_name, "' is deprecated. ", "Please use argument '", arg_name, "' instead.") } } arg } warn_deprecated <- function(new, old = as.character(sys.call(sys.parent()))[1]) { msg <- paste0("Function '", old, "' is deprecated.") if (!missing(new)) { msg <- paste0(msg, " Please use '", new, "' instead.") } warning2(msg) invisible(NULL) } # check if x is a try-error resulting from try() is_try_error <- function(x) { inherits(x, "try-error") } # check if verbose mode is activated is_verbose <- function() { as_one_logical(getOption("brms.verbose", FALSE)) } viridis6 <- function() { c("#440154", "#414487", "#2A788E", "#22A884", "#7AD151", "#FDE725") } expect_match2 <- function(object, regexp, ..., all = TRUE) { testthat::expect_match(object, regexp, fixed = TRUE, ..., all = all) } # startup messages for brms .onAttach <- function(libname, pkgname) { version <- utils::packageVersion("brms") packageStartupMessage( "Loading 'brms' package (version ", version, "). Useful instructions\n", "can be found by typing help('brms'). A more detailed introduction\n", "to the package is available through vignette('brms_overview')." ) invisible(NULL) } # code to execute when loading brms .onLoad <- function(libname, pkgname) { # ensure compatibility with older R versions backports::import(pkgname) # dynamically register the 'recover_data' and 'emm_basis' # methods needed by 'emmeans', if that package is installed if (requireNamespace("emmeans", quietly = TRUE) && utils::packageVersion("emmeans") >= "1.4.0") { emmeans::.emm_register("brmsfit", pkgname) } invisible(NULL) } brms/R/hypothesis.R0000644000176200001440000005432014430673574013761 0ustar liggesusers#' Non-Linear Hypothesis Testing #' #' Perform non-linear hypothesis testing for all model parameters. #' #' @param x An \code{R} object. If it is no \code{brmsfit} object, #' it must be coercible to a \code{data.frame}. #' In the latter case, the variables used in the \code{hypothesis} argument #' need to correspond to column names of \code{x}, while the rows #' are treated as representing posterior draws of the variables. #' @param hypothesis A character vector specifying one or more #' non-linear hypothesis concerning parameters of the model. #' @param class A string specifying the class of parameters being tested. #' Default is "b" for population-level effects. #' Other typical options are "sd" or "cor". #' If \code{class = NULL}, all parameters can be tested #' against each other, but have to be specified with their full name #' (see also \code{\link[brms:draws-index-brms]{variables}}) #' @param group Name of a grouping factor to evaluate only #' group-level effects parameters related to this grouping factor. #' @param alpha The alpha-level of the tests (default is 0.05; #' see 'Details' for more information). #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' @param scope Indicates where to look for the variables specified in #' \code{hypothesis}. If \code{"standard"}, use the full parameter names #' (subject to the restriction given by \code{class} and \code{group}). #' If \code{"coef"} or \code{"ranef"}, compute the hypothesis for all levels #' of the grouping factor given in \code{"group"}, based on the #' output of \code{\link{coef.brmsfit}} and \code{\link{ranef.brmsfit}}, #' respectively. #' @param seed A single numeric value passed to \code{\link{set.seed}} #' to make results reproducible. #' @param ... Currently ignored. #' #' @details Among others, \code{hypothesis} computes an evidence ratio #' (\code{Evid.Ratio}) for each hypothesis. For a one-sided hypothesis, this #' is just the posterior probability (\code{Post.Prob}) under the hypothesis #' against its alternative. That is, when the hypothesis is of the form #' \code{a > b}, the evidence ratio is the ratio of the posterior probability #' of \code{a > b} and the posterior probability of \code{a < b}. In this #' example, values greater than one indicate that the evidence in favor of #' \code{a > b} is larger than evidence in favor of \code{a < b}. For an #' two-sided (point) hypothesis, the evidence ratio is a Bayes factor between #' the hypothesis and its alternative computed via the Savage-Dickey density #' ratio method. That is the posterior density at the point of interest #' divided by the prior density at that point. Values greater than one #' indicate that evidence in favor of the point hypothesis has increased after #' seeing the data. In order to calculate this Bayes factor, all parameters #' related to the hypothesis must have proper priors and argument #' \code{sample_prior} of function \code{brm} must be set to \code{"yes"}. #' Otherwise \code{Evid.Ratio} (and \code{Post.Prob}) will be \code{NA}. #' Please note that, for technical reasons, we cannot sample from priors of #' certain parameters classes. Most notably, these include overall intercept #' parameters (prior class \code{"Intercept"}) as well as group-level #' coefficients. When interpreting Bayes factors, make sure that your priors #' are reasonable and carefully chosen, as the result will depend heavily on #' the priors. In particular, avoid using default priors. #' #' The \code{Evid.Ratio} may sometimes be \code{0} or \code{Inf} implying very #' small or large evidence, respectively, in favor of the tested hypothesis. #' For one-sided hypotheses pairs, this basically means that all posterior #' draws are on the same side of the value dividing the two hypotheses. In #' that sense, instead of \code{0} or \code{Inf,} you may rather read it as #' \code{Evid.Ratio} smaller \code{1 / S} or greater \code{S}, respectively, #' where \code{S} denotes the number of posterior draws used in the #' computations. #' #' The argument \code{alpha} specifies the size of the credible interval #' (i.e., Bayesian confidence interval). For instance, if we tested a #' two-sided hypothesis and set \code{alpha = 0.05} (5\%) an, the credible #' interval will contain \code{1 - alpha = 0.95} (95\%) of the posterior #' values. Hence, \code{alpha * 100}\% of the posterior values will #' lie outside of the credible interval. Although this allows testing of #' hypotheses in a similar manner as in the frequentist null-hypothesis #' testing framework, we strongly argue against using arbitrary cutoffs (e.g., #' \code{p < .05}) to determine the 'existence' of an effect. #' #' @return A \code{\link{brmshypothesis}} object. #' #' @seealso \code{\link{brmshypothesis}} #' #' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com} #' #' @examples #' \dontrun{ #' ## define priors #' prior <- c(set_prior("normal(0,2)", class = "b"), #' set_prior("student_t(10,0,1)", class = "sigma"), #' set_prior("student_t(10,0,1)", class = "sd")) #' #' ## fit a linear mixed effects models #' fit <- brm(time ~ age + sex + disease + (1 + age|patient), #' data = kidney, family = lognormal(), #' prior = prior, sample_prior = "yes", #' control = list(adapt_delta = 0.95)) #' #' ## perform two-sided hypothesis testing #' (hyp1 <- hypothesis(fit, "sexfemale = age + diseasePKD")) #' plot(hyp1) #' hypothesis(fit, "exp(age) - 3 = 0", alpha = 0.01) #' #' ## perform one-sided hypothesis testing #' hypothesis(fit, "diseasePKD + diseaseGN - 3 < 0") #' #' hypothesis(fit, "age < Intercept", #' class = "sd", group = "patient") #' #' ## test the amount of random intercept variance on all variance #' h <- paste("sd_patient__Intercept^2 / (sd_patient__Intercept^2 +", #' "sd_patient__age^2 + sigma^2) = 0") #' (hyp2 <- hypothesis(fit, h, class = NULL)) #' plot(hyp2) #' #' ## test more than one hypothesis at once #' h <- c("diseaseGN = diseaseAN", "2 * diseaseGN - diseasePKD = 0") #' (hyp3 <- hypothesis(fit, h)) #' plot(hyp3, ignore_prior = TRUE) #' #' ## compute hypotheses for all levels of a grouping factor #' hypothesis(fit, "age = 0", scope = "coef", group = "patient") #' #' ## use the default method #' dat <- as.data.frame(fit) #' str(dat) #' hypothesis(dat, "b_age > 0") #' } #' #' @export hypothesis.brmsfit <- function(x, hypothesis, class = "b", group = "", scope = c("standard", "ranef", "coef"), alpha = 0.05, robust = FALSE, seed = NULL, ...) { # use a seed as prior_draws.brmsfit randomly permutes draws if (!is.null(seed)) { set.seed(seed) } contains_draws(x) x <- restructure(x) group <- as_one_character(group) scope <- match.arg(scope) if (scope == "standard") { if (!length(class)) { class <- "" } class <- as_one_character(class) if (nzchar(group)) { class <- paste0(class, "_", group, "__") } else if (nzchar(class)) { class <- paste0(class, "_") } out <- .hypothesis( x, hypothesis, class = class, alpha = alpha, robust = robust, ... ) } else { co <- do_call(scope, list(x, summary = FALSE)) if (!group %in% names(co)) { stop2("'group' should be one of ", collapse_comma(names(co))) } out <- hypothesis_coef( co[[group]], hypothesis, alpha = alpha, robust = robust, ... ) } out } #' @rdname hypothesis.brmsfit #' @export hypothesis <- function(x, ...) { UseMethod("hypothesis") } #' @rdname hypothesis.brmsfit #' @export hypothesis.default <- function(x, hypothesis, alpha = 0.05, robust = FALSE, ...) { x <- as.data.frame(x) .hypothesis( x, hypothesis, class = "", alpha = alpha, robust = robust, ... ) } #' Descriptions of \code{brmshypothesis} Objects #' #' A \code{brmshypothesis} object contains posterior draws #' as well as summary statistics of non-linear hypotheses as #' returned by \code{\link{hypothesis}}. #' #' @name brmshypothesis #' #' @param ignore_prior A flag indicating if prior distributions #' should also be plotted. Only used if priors were specified on #' the relevant parameters. #' @param digits Minimal number of significant digits, #' see \code{\link[base:print.default]{print.default}}. #' @param chars Maximum number of characters of each hypothesis #' to print or plot. If \code{NULL}, print the full hypotheses. #' Defaults to \code{20}. #' @param colors Two values specifying the colors of the posterior #' and prior density respectively. If \code{NULL} (the default) #' colors are taken from the current color scheme of #' the \pkg{bayesplot} package. #' @param ... Currently ignored. #' @inheritParams plot.brmsfit #' #' @details #' The two most important elements of a \code{brmshypothesis} object are #' \code{hypothesis}, which is a data.frame containing the summary estimates #' of the hypotheses, and \code{samples}, which is a data.frame containing #' the corresponding posterior draws. #' #' @seealso \code{\link{hypothesis}} NULL # internal function to evaluate hypotheses # @param x the primary object passed to the hypothesis method; # needs to be a brmsfit object or coercible to a data.frame # @param hypothesis vector of character strings containing the hypotheses # @param class prefix of the parameters in the hypotheses # @param alpha the 'alpha-level' as understood by frequentist statistics # @return a 'brmshypothesis' object .hypothesis <- function(x, hypothesis, class, alpha, robust, combine = TRUE, ...) { if (!is.character(hypothesis) || !length(hypothesis)) { stop2("Argument 'hypothesis' must be a character vector.") } if (length(alpha) != 1L || alpha < 0 || alpha > 1) { stop2("Argument 'alpha' must be a single value in [0,1].") } class <- as_one_character(class) robust <- as_one_logical(robust) out <- vector("list", length(hypothesis)) for (i in seq_along(out)) { out[[i]] <- eval_hypothesis( hypothesis[i], x = x, class = class, alpha = alpha, robust = robust, name = names(hypothesis)[i] ) } if (combine) { out <- combine_hlist(out, class = class, alpha = alpha) } out } # evaluate hypotheses for an arrary of ranefs or coefs # seperaly for each grouping-factor level hypothesis_coef <- function(x, hypothesis, alpha, ...) { stopifnot(is.array(x), length(dim(x)) == 3L) levels <- dimnames(x)[[2]] coefs <- dimnames(x)[[3]] x <- lapply(seq_along(levels), function(l) structure(as.data.frame(x[, l, ]), names = coefs) ) out <- vector("list", length(levels)) for (l in seq_along(levels)) { out[[l]] <- .hypothesis( x[[l]], hypothesis, class = "", alpha = alpha, combine = FALSE, ... ) for (i in seq_along(out[[l]])) { out[[l]][[i]]$summary$Group <- levels[l] } } out <- unlist(out, recursive = FALSE) out <- as.list(matrix(out, ncol = length(hypothesis), byrow = TRUE)) out <- combine_hlist(out, class = "", alpha = alpha) out$hypothesis$Group <- factor(out$hypothesis$Group, levels) out$hypothesis <- move2start(out$hypothesis, "Group") out } # combine list of outputs of eval_hypothesis # @param hlist list of evaluate hypothesis # @return a 'brmshypothesis' object combine_hlist <- function(hlist, class, alpha) { stopifnot(is.list(hlist)) hs <- do_call(rbind, lapply(hlist, function(h) h$summary)) rownames(hs) <- NULL samples <- lapply(hlist, function(h) h$samples) samples <- as.data.frame(do_call(cbind, samples)) prior_samples <- lapply(hlist, function(h) h$prior_samples) prior_samples <- as.data.frame(do_call(cbind, prior_samples)) names(samples) <- names(prior_samples) <- paste0("H", seq_along(hlist)) class <- sub("_+$", "", class) # TODO: rename 'samples' to 'draws' in brms 3.0 out <- nlist(hypothesis = hs, samples, prior_samples, class, alpha) structure(out, class = "brmshypothesis") } # evaluate a single hypothesis based on the posterior draws eval_hypothesis <- function(h, x, class, alpha, robust, name = NULL) { stopifnot(length(h) == 1L && is.character(h)) pars <- variables(x)[grepl(paste0("^", class), variables(x))] # parse hypothesis string h <- gsub("[ \t\r\n]", "", h) sign <- get_matches("=|<|>", h) lr <- get_matches("[^=<>]+", h) if (length(sign) != 1L || length(lr) != 2L) { stop2("Every hypothesis must be of the form 'left (= OR < OR >) right'.") } h <- paste0("(", lr[1], ")") h <- paste0(h, ifelse(lr[2] != "0", paste0("-(", lr[2], ")"), "")) varsH <- find_vars(h) parsH <- paste0(class, varsH) miss_pars <- setdiff(parsH, pars) if (length(miss_pars)) { miss_pars <- collapse_comma(miss_pars) stop2("Some parameters cannot be found in the model: \n", miss_pars) } # rename hypothesis for correct evaluation h_renamed <- rename(h, c(":", "[", "]", ","), c("___", ".", ".", "..")) # get posterior and prior draws pattern <- c(paste0("^", class), ":", "\\[", "\\]", ",") repl <- c("", "___", ".", ".", "..") samples <- as.data.frame(x, variable = parsH) names(samples) <- rename(names(samples), pattern, repl, fixed = FALSE) samples <- as.matrix(eval2(h_renamed, samples)) prior_samples <- prior_draws(x, variable = parsH) if (!is.null(prior_samples) && ncol(prior_samples) == length(varsH)) { names(prior_samples) <- rename( names(prior_samples), pattern, repl, fixed = FALSE ) prior_samples <- as.matrix(eval2(h_renamed, prior_samples)) } else { prior_samples <- NULL } # summarize hypothesis wsign <- switch(sign, "=" = "equal", "<" = "less", ">" = "greater") probs <- switch(sign, "=" = c(alpha / 2, 1 - alpha / 2), "<" = c(alpha, 1 - alpha), ">" = c(alpha, 1 - alpha) ) if (robust) { measures <- c("median", "mad") } else { measures <- c("mean", "sd") } measures <- c(measures, "quantile", "evidence_ratio") sm <- lapply( measures, get_estimate, draws = samples, probs = probs, wsign = wsign, prior_samples = prior_samples ) sm <- as.data.frame(matrix(unlist(sm), nrow = 1)) names(sm) <- c("Estimate", "Est.Error", "CI.Lower", "CI.Upper", "Evid.Ratio") sm$Post.Prob <- sm$Evid.Ratio / (1 + sm$Evid.Ratio) if (is.infinite(sm$Evid.Ratio)) { sm$Post.Prob <- 1 } if (sign == "=") { sm$Star <- str_if(!(sm$CI.Lower <= 0 && 0 <= sm$CI.Upper), "*") } else { sm$Star <- str_if(sm$Post.Prob > 1 - alpha, "*") } if (!length(name) || !nzchar(name)) { name <- paste(h, sign, "0") } sm$Hypothesis <- as_one_character(name) sm <- move2start(sm, "Hypothesis") if (is.null(prior_samples)) { prior_samples <- as.matrix(rep(NA, nrow(samples))) } nlist(summary = sm, samples, prior_samples) } # find all valid variable names in a string # @param x a character string # @param dot are dots allowed in variable names? # @param brackets allow brackets at the end of variable names? # @return all valid variable names within the string # @note does not use the R parser itself to allow for double points, # square brackets, and commas at the end of names find_vars <- function(x, dot = TRUE, brackets = TRUE) { x <- gsub("[[:space:]]", "", as_one_character(x)) dot <- as_one_logical(dot) brackets <- as_one_logical(brackets) regex_all <- paste0( "([^([:digit:]|[:punct:])]", if (dot) "|\\.", ")", "[[:alnum:]_\\:", if (dot) "\\.", "]*", if (brackets) "(\\[[^],]+(,[^],]+)*\\])?" ) pos_all <- gregexpr(regex_all, x)[[1]] regex_fun <- paste0( "([^([:digit:]|[:punct:])]", if (dot) "|\\.", ")", "[[:alnum:]_", if (dot) "\\.", "]*\\(" ) pos_fun <- gregexpr(regex_fun, x)[[1]] pos_decnum <- gregexpr("\\.[[:digit:]]+", x)[[1]] keep <- !pos_all %in% c(pos_fun, pos_decnum) pos_var <- pos_all[keep] attr(pos_var, "match.length") <- attributes(pos_all)$match.length[keep] if (length(pos_var)) { out <- unique(unlist(regmatches(x, list(pos_var)))) } else { out <- character(0) } out } #' Compute Density Ratios #' #' Compute the ratio of two densities at given points based on draws of the #' corresponding distributions. #' #' @param x Vector of draws from the first distribution, usually the posterior #' distribution of the quantity of interest. #' @param y Optional vector of draws from the second distribution, usually the #' prior distribution of the quantity of interest. If \code{NULL} (the #' default), only the density of \code{x} will be evaluated. #' @param point Numeric values at which to evaluate and compare the densities. #' Defaults to \code{0}. #' @param n Single numeric value. Influences the accuracy of the density #' estimation. See \code{\link[stats:density]{density}} for details. #' @param ... Further arguments passed to \code{\link[stats:density]{density}}. #' #' @return A vector of length equal to \code{length(point)}. If \code{y} is #' provided, the density ratio of \code{x} against \code{y} is returned. Else, #' only the density of \code{x} is returned. #' #' @details In order to achieve sufficient accuracy in the density estimation, #' more draws than usual are required. That is you may need an effective #' sample size of 10,000 or more to reliably estimate the densities. #' #' @examples #' x <- rnorm(10000) #' y <- rnorm(10000, mean = 1) #' density_ratio(x, y, point = c(0, 1)) #' #' @export density_ratio <- function(x, y = NULL, point = 0, n = 4096, ...) { x <- as.numeric(x) point <- as.numeric(point) dots <- list(...) dots <- dots[names(dots) %in% names(formals("density.default"))] dots$n <- n eval_density <- function(x, point) { # evaluate density of x at point from <- min(x) to <- max(x) if (from > point) { from <- point - sd(x) / 4 } else if (to < point) { to <- point + sd(x) / 4 } dens <- do_call(density, c(nlist(x, from, to), dots)) return(spline(dens$x, dens$y, xout = point)$y) } out <- ulapply(point, eval_density, x = x) if (!is.null(y)) { y <- as.numeric(y) out <- out / ulapply(point, eval_density, x = y) } out } # compute the evidence ratio between two disjunct hypotheses # @param x posterior draws # @param cut the cut point between the two hypotheses # @param wsign direction of the hypothesis # @param prior_samples optional prior draws for two-sided hypothesis # @param ... optional arguments passed to density_ratio # @return the evidence ratio of the two hypothesis evidence_ratio <- function(x, cut = 0, wsign = c("equal", "less", "greater"), prior_samples = NULL, ...) { wsign <- match.arg(wsign) if (wsign == "equal") { if (is.null(prior_samples)) { out <- NA } else { out <- density_ratio(x, prior_samples, point = cut, ...) } } else if (wsign == "less") { out <- length(which(x < cut)) out <- out / (length(x) - out) } else if (wsign == "greater") { out <- length(which(x > cut)) out <- out / (length(x) - out) } out } # round all numeric elements of a list-like object round_numeric <- function(x, digits = 2) { stopifnot(is.list(x)) for (i in seq_along(x)) { if (is.numeric(x[[i]])) { x[[i]] <- round(x[[i]], digits = digits) } } x } #' @rdname brmshypothesis #' @export print.brmshypothesis <- function(x, digits = 2, chars = 20, ...) { # make sure hypothesis names are not too long x$hypothesis$Hypothesis <- limit_chars( x$hypothesis$Hypothesis, chars = chars ) cat(paste0("Hypothesis Tests for class ", x$class, ":\n")) x$hypothesis <- round_numeric(x$hypothesis, digits = digits) print(x$hypothesis, quote = FALSE) pone <- (1 - x$alpha * 2) * 100 ptwo <- (1 - x$alpha) * 100 cat(glue( "---\n'CI': {pone}%-CI for one-sided and {ptwo}%-CI for two-sided hypotheses.\n", "'*': For one-sided hypotheses, the posterior probability exceeds {ptwo}%;\n", "for two-sided hypotheses, the value tested against lies outside the {ptwo}%-CI.\n", "Posterior probabilities of point hypotheses assume equal prior probabilities.\n" )) invisible(x) } #' @rdname brmshypothesis #' @method plot brmshypothesis #' @export plot.brmshypothesis <- function(x, N = 5, ignore_prior = FALSE, chars = 40, colors = NULL, theme = NULL, ask = TRUE, plot = TRUE, ...) { dots <- list(...) if (!is.data.frame(x$samples)) { stop2("No posterior draws found.") } plot <- use_alias(plot, dots$do_plot) if (is.null(colors)) { colors <- bayesplot::color_scheme_get()[c(4, 2)] colors <- unname(unlist(colors)) } if (length(colors) != 2L) { stop2("Argument 'colors' must be of length 2.") } .plot_fun <- function(samples) { gg <- ggplot(samples, aes(x = .data[["values"]])) + facet_wrap("ind", ncol = 1, scales = "free") + xlab("") + ylab("") + theme + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) if (ignore_prior) { gg <- gg + geom_density(alpha = 0.7, fill = colors[1], na.rm = TRUE) } else { gg <- gg + geom_density(aes(fill = .data[["Type"]]), alpha = 0.7, na.rm = TRUE) + scale_fill_manual(values = colors) } return(gg) } samples <- cbind(x$samples, Type = "Posterior") if (!ignore_prior) { prior_samples <- cbind(x$prior_samples, Type = "Prior") samples <- rbind(samples, prior_samples) } if (plot) { default_ask <- devAskNewPage() on.exit(devAskNewPage(default_ask)) devAskNewPage(ask = FALSE) } hyps <- limit_chars(x$hypothesis$Hypothesis, chars = chars) if (!is.null(x$hypothesis$Group)) { hyps <- paste0(x$hypothesis$Group, ": ", hyps) } names(samples)[seq_along(hyps)] <- hyps nplots <- ceiling(length(hyps) / N) plots <- vector(mode = "list", length = nplots) for (i in seq_len(nplots)) { rel_hyps <- hyps[((i - 1) * N + 1):min(i * N, length(hyps))] sub_samples <- cbind( utils::stack(samples[, rel_hyps, drop = FALSE]), samples[, "Type", drop = FALSE] ) # make sure that parameters appear in the original order sub_samples$ind <- with(sub_samples, factor(ind, levels = unique(ind))) plots[[i]] <- .plot_fun(sub_samples) if (plot) { plot(plots[[i]]) if (i == 1) devAskNewPage(ask = ask) } } invisible(plots) } brms/R/bayes_R2.R0000644000176200001440000000643214417766775013244 0ustar liggesusers#' Compute a Bayesian version of R-squared for regression models #' #' @aliases bayes_R2 #' #' @inheritParams predict.brmsfit #' @param ... Further arguments passed to #' \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}, #' which is used in the computation of the R-squared values. #' #' @return If \code{summary = TRUE}, an M x C matrix is returned #' (M = number of response variables and c = \code{length(probs) + 2}) #' containing summary statistics of the Bayesian R-squared values. #' If \code{summary = FALSE}, the posterior draws of the Bayesian #' R-squared values are returned in an S x M matrix (S is the number of draws). #' #' @details For an introduction to the approach, see Gelman et al. (2018) #' and \url{https://github.com/jgabry/bayes_R2/}. #' #' @references Andrew Gelman, Ben Goodrich, Jonah Gabry & Aki Vehtari. (2018). #' R-squared for Bayesian regression models, \emph{The American Statistician}. #' \code{10.1080/00031305.2018.1549100} (Preprint available at #' \url{https://stat.columbia.edu/~gelman/research/published/bayes_R2_v3.pdf}) #' #' @examples #' \dontrun{ #' fit <- brm(mpg ~ wt + cyl, data = mtcars) #' summary(fit) #' bayes_R2(fit) #' #' # compute R2 with new data #' nd <- data.frame(mpg = c(10, 20, 30), wt = c(4, 3, 2), cyl = c(8, 6, 4)) #' bayes_R2(fit, newdata = nd) #' } #' #' @method bayes_R2 brmsfit #' @importFrom rstantools bayes_R2 #' @export bayes_R2 #' @export bayes_R2.brmsfit <- function(object, resp = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(object) object <- restructure(object) resp <- validate_resp(resp, object) summary <- as_one_logical(summary) # check for precomputed values R2 <- get_criterion(object, "bayes_R2") if (is.matrix(R2)) { # assumes unsummarized 'R2' as ensured by 'add_criterion' take <- colnames(R2) %in% paste0("R2", resp) R2 <- R2[, take, drop = FALSE] if (summary) { R2 <- posterior_summary(R2, probs = probs, robust = robust) } return(R2) } family <- family(object, resp = resp) if (conv_cats_dpars(family)) { stop2("'bayes_R2' is not defined for unordered categorical models.") } if (is_ordinal(family)) { warning2( "Predictions are treated as continuous variables in ", "'bayes_R2' which is likely invalid for ordinal families." ) } args_y <- list(object, warn = TRUE, ...) args_ypred <- list(object, sort = TRUE, ...) R2 <- named_list(paste0("R2", resp)) for (i in seq_along(R2)) { # assumes expectations of different responses to be independent args_ypred$resp <- args_y$resp <- resp[i] y <- do_call(get_y, args_y) ypred <- do_call(posterior_epred, args_ypred) if (is_ordinal(family(object, resp = resp[i]))) { ypred <- ordinal_probs_continuous(ypred) } R2[[i]] <- .bayes_R2(y, ypred) } R2 <- do_call(cbind, R2) colnames(R2) <- paste0("R2", resp) if (summary) { R2 <- posterior_summary(R2, probs = probs, robust = robust) } R2 } # internal function of bayes_R2.brmsfit # see https://github.com/jgabry/bayes_R2/blob/master/bayes_R2.pdf .bayes_R2 <- function(y, ypred, ...) { e <- -1 * sweep(ypred, 2, y) var_ypred <- matrixStats::rowVars(ypred) var_e <- matrixStats::rowVars(e) as.matrix(var_ypred / (var_ypred + var_e)) } brms/R/formula-re.R0000644000176200001440000007157414424715563013643 0ustar liggesusers# This file contains functions dealing with the extended # lme4-like formula syntax to specify group-level terms #' Set up basic grouping terms in \pkg{brms} #' #' Function used to set up a basic grouping term in \pkg{brms}. #' The function does not evaluate its arguments -- #' it exists purely to help set up a model with grouping terms. #' \code{gr} is called implicitly inside the package #' and there is usually no need to call it directly. #' #' @param ... One or more terms containing grouping factors. #' @param by An optional factor variable, specifying sub-populations of the #' groups. For each level of the \code{by} variable, a separate #' variance-covariance matrix will be fitted. Levels of the grouping factor #' must be nested in levels of the \code{by} variable. #' @param cor Logical. If \code{TRUE} (the default), group-level terms will be #' modelled as correlated. #' @param id Optional character string. All group-level terms across the model #' with the same \code{id} will be modeled as correlated (if \code{cor} is #' \code{TRUE}). See \code{\link{brmsformula}} for more details. #' @param cov An optional matrix which is proportional to the withon-group #' covariance matrix of the group-level effects. All levels of the grouping #' factor should appear as rownames of the corresponding matrix. This argument #' can be used, among others, to model pedigrees and phylogenetic effects. See #' \code{vignette("brms_phylogenetics")} for more details. By default, levels #' of the same grouping factor are modeled as independent of each other. #' @param dist Name of the distribution of the group-level effects. #' Currently \code{"gaussian"} is the only option. #' #' @seealso \code{\link{brmsformula}} #' #' @examples #' \dontrun{ #' # model using basic lme4-style formula #' fit1 <- brm(count ~ Trt + (1|patient), data = epilepsy) #' summary(fit1) #' #' # equivalent model using 'gr' which is called anyway internally #' fit2 <- brm(count ~ Trt + (1|gr(patient)), data = epilepsy) #' summary(fit2) #' #' # include Trt as a by variable #' fit3 <- brm(count ~ Trt + (1|gr(patient, by = Trt)), data = epilepsy) #' summary(fit3) #' } #' #' @export gr <- function(..., by = NULL, cor = TRUE, id = NA, cov = NULL, dist = "gaussian") { label <- deparse0(match.call()) groups <- as.character(as.list(substitute(list(...)))[-1]) if (length(groups) > 1L) { stop2("Grouping structure 'gr' expects only a single grouping term") } stopif_illegal_group(groups[1]) cor <- as_one_logical(cor) id <- as_one_character(id, allow_na = TRUE) by <- substitute(by) if (!is.null(by)) { by <- deparse0(by) } else { by <- "" } cov <- substitute(cov) if (!is.null(cov)) { cov <- all.vars(cov) if (length(cov) != 1L) { stop2("Argument 'cov' must contain exactly one variable.") } } else { cov <- "" } dist <- match.arg(dist, c("gaussian", "student")) byvars <- all_vars(by) allvars <- str2formula(c(groups, byvars)) nlist(groups, allvars, label, by, cor, id, cov, dist, type = "") } #' Set up multi-membership grouping terms in \pkg{brms} #' #' Function to set up a multi-membership grouping term in \pkg{brms}. #' The function does not evaluate its arguments -- #' it exists purely to help set up a model with grouping terms. #' #' @inheritParams gr #' @param weights A matrix specifying the weights of each member. #' It should have as many columns as grouping terms specified in \code{...}. #' If \code{NULL} (the default), equally weights are used. #' @param by An optional factor matrix, specifying sub-populations of the #' groups. It should have as many columns as grouping terms specified in #' \code{...}. For each level of the \code{by} variable, a separate #' variance-covariance matrix will be fitted. Levels of the grouping factor #' must be nested in levels of the \code{by} variable matrix. #' @param scale Logical; if \code{TRUE} (the default), #' weights are standardized in order to sum to one per row. #' If negative weights are specified, \code{scale} needs #' to be set to \code{FALSE}. #' #' @seealso \code{\link{brmsformula}}, \code{\link{mmc}} #' #' @examples #' \dontrun{ #' # simulate some data #' dat <- data.frame( #' y = rnorm(100), x1 = rnorm(100), x2 = rnorm(100), #' g1 = sample(1:10, 100, TRUE), g2 = sample(1:10, 100, TRUE) #' ) #' #' # multi-membership model with two members per group and equal weights #' fit1 <- brm(y ~ x1 + (1|mm(g1, g2)), data = dat) #' summary(fit1) #' #' # weight the first member two times for than the second member #' dat$w1 <- rep(2, 100) #' dat$w2 <- rep(1, 100) #' fit2 <- brm(y ~ x1 + (1|mm(g1, g2, weights = cbind(w1, w2))), data = dat) #' summary(fit2) #' #' # multi-membership model with level specific covariate values #' dat$xc <- (dat$x1 + dat$x2) / 2 #' fit3 <- brm(y ~ xc + (1 + mmc(x1, x2) | mm(g1, g2)), data = dat) #' summary(fit3) #' } #' #' @export mm <- function(..., weights = NULL, scale = TRUE, by = NULL, cor = TRUE, id = NA, cov = NULL, dist = "gaussian") { label <- deparse0(match.call()) groups <- as.character(as.list(substitute(list(...)))[-1]) if (length(groups) < 2) { stop2("Multi-membership terms require at least two grouping variables.") } for (i in seq_along(groups)) { stopif_illegal_group(groups[i]) } cor <- as_one_logical(cor) id <- as_one_character(id, allow_na = TRUE) by <- substitute(by) if (!is.null(by)) { by <- deparse0(by) } else { by <- "" } cov <- substitute(cov) if (!is.null(cov)) { cov <- all.vars(cov) if (length(cov) != 1L) { stop2("Argument 'cov' must contain exactly one variable.") } } else { cov <- "" } dist <- match.arg(dist, c("gaussian", "student")) scale <- as_one_logical(scale) weights <- substitute(weights) weightvars <- all_vars(weights) byvars <- all_vars(by) allvars <- str2formula(c(groups, weightvars, byvars)) if (!is.null(weights)) { weights <- str2formula(deparse_no_string(weights)) attr(weights, "scale") <- scale weightvars <- str2formula(weightvars) } nlist( groups, weights, weightvars, allvars, label, by, cor, id, cov, dist, type = "mm" ) } #' Multi-Membership Covariates #' #' Specify covariates that vary over different levels #' of multi-membership grouping factors thus requiring #' special treatment. This function is almost solely useful, #' when called in combination with \code{\link{mm}}. #' Outside of multi-membership terms it will behave #' very much like \code{\link{cbind}}. #' #' @param ... One or more terms containing covariates #' corresponding to the grouping levels specified in \code{\link{mm}}. #' #' @return A matrix with covariates as columns. #' #' @seealso \code{\link{mm}} #' #' @examples #' \dontrun{ #' # simulate some data #' dat <- data.frame( #' y = rnorm(100), x1 = rnorm(100), x2 = rnorm(100), #' g1 = sample(1:10, 100, TRUE), g2 = sample(1:10, 100, TRUE) #' ) #' #' # multi-membership model with level specific covariate values #' dat$xc <- (dat$x1 + dat$x2) / 2 #' fit <- brm(y ~ xc + (1 + mmc(x1, x2) | mm(g1, g2)), data = dat) #' summary(fit) #' } #' #' @export mmc <- function(...) { dots <- list(...) if (any(ulapply(dots, is_like_factor))) { stop2("'mmc' requires numeric variables.") } out <- cbind(...) colnames(out) <- paste0("?", colnames(out)) out } # check if the group part of a group-level term is invalid # @param group the group part of a group-level term illegal_group_expr <- function(group) { group <- as_one_character(group) valid_expr <- ":|([^([:digit:]|[:punct:])]|\\.)[[:alnum:]_\\.]*" rsv_signs <- c("+", "-", "*", "/", "|", "::") nzchar(gsub(valid_expr, "", group)) || any(ulapply(rsv_signs, grepl, x = group, fixed = TRUE)) } stopif_illegal_group <- function(group) { if (illegal_group_expr(group)) { stop2( "Illegal grouping term '", group, "'. It may contain ", "only variable names combined by the symbol ':'" ) } invisible(NULL) } re_lhs <- function(re_terms) { get_matches("^[^\\|]*", re_terms) } re_mid <- function(re_terms) { get_matches("\\|([^\\|]*\\||)", re_terms) } re_rhs <- function(re_terms) { sub("^\\|", "", get_matches("\\|[^\\|]*$", re_terms)) } # extract the three parts of group-level terms # @param re_terms character vector of RE terms in lme4 syntax # @return a data.frame with one row per group-level term re_parts <- function(re_terms) { lhs <- re_lhs(re_terms) mid <- re_mid(re_terms) rhs <- re_rhs(re_terms) out <- nlist(lhs, mid, rhs) if (any(lengths(out) != length(re_terms))) { stop2("Invalid syntax used in group-level terms.") } as.data.frame(out, stringsAsFactors = FALSE) } # split nested group-level terms and check for special effects terms # @param re_terms character vector of RE terms in extended lme4 syntax split_re_terms <- function(re_terms) { if (!length(re_terms)) { return(re_terms) } stopifnot(is.character(re_terms)) # split after grouping factor terms re_parts <- re_parts(re_terms) new_re_terms <- vector("list", length(re_terms)) for (i in seq_along(re_terms)) { new_re_rhs <- terms(formula(paste0("~", re_parts$rhs[i]))) new_re_rhs <- attr(new_re_rhs, "term.labels") new_re_rhs <- ifelse( !grepl("^(gr|mm)\\(", new_re_rhs), paste0("gr(", new_re_rhs, ")"), new_re_rhs ) new_re_terms[[i]] <- paste0( re_parts$lhs[i], re_parts$mid[i], new_re_rhs ) } re_terms <- unlist(new_re_terms) # split after coefficient types re_parts <- re_parts(re_terms) new_re_terms <- type <- vector("list", length(re_terms)) for (i in seq_along(re_terms)) { lhs_form <- formula(paste("~", re_parts$lhs[i])) lhs_all_terms <- all_terms(lhs_form) # otherwise varying intercepts cannot be handled reliably is_cs_term <- grepl_expr(regex_sp("cs"), lhs_all_terms) if (any(is_cs_term) && !all(is_cs_term)) { stop2("Please specify category specific effects ", "in separate group-level terms.") } new_lhs <- NULL # prepare effects of special terms valid_types <- c("sp", "cs", "mmc") invalid_types <- c("sm", "gp") for (t in c(valid_types, invalid_types)) { lhs_tform <- do_call(paste0("terms_", t), list(lhs_form)) if (is.formula(lhs_tform)) { if (t %in% invalid_types) { stop2("Cannot handle splines or GPs in group-level terms.") } new_lhs <- c(new_lhs, formula2str(lhs_tform, rm = 1)) type[[i]] <- c(type[[i]], t) } } # prepare effects of basic terms lhs_terms <- terms(lhs_form) fe_form <- terms_fe(lhs_terms) fe_terms <- all_terms(fe_form) # the intercept lives within not outside of 'cs' terms has_intercept <- has_intercept(lhs_terms) && !"cs" %in% type[[i]] if (length(fe_terms) || has_intercept) { new_lhs <- c(new_lhs, formula2str(fe_form, rm = 1)) type[[i]] <- c(type[[i]], "") } # extract information from the mid section of the terms rhs_call <- str2lang(re_parts$rhs[i]) if (re_parts$mid[i] == "||") { # ||-syntax overwrites the 'cor' argument rhs_call$cor <- FALSE } gcall <- eval(rhs_call) if (gcall$cor) { id <- gsub("\\|", "", re_parts$mid[i]) if (nzchar(id)) { # ID-syntax overwrites the 'id' argument rhs_call$id <- id } else { id <- gcall$id } if (length(new_lhs) > 1 && isNA(id)) { # ID is required to model coefficients as correlated # if multiple types are provided within the same term rhs_call$id <- collapse(sample(0:9, 10, TRUE)) } } re_parts$mid[i] <- "|" re_parts$rhs[i] <- deparse0(rhs_call) new_re_terms[[i]] <- paste0(new_lhs, re_parts$mid[i], re_parts$rhs[i]) new_re_terms[[i]] <- new_re_terms[[i]][order(type[[i]])] type[[i]] <- sort(type[[i]]) } re_terms <- unlist(new_re_terms) structure(re_terms, type = unlist(type)) } # extract group-level terms from a formula of character vector # @param x formula or character vector # @param formula return a formula rather than a character string? # @param brackets include group-level terms in brackets? get_re_terms <- function(x, formula = FALSE, brackets = TRUE) { if (is.formula(x)) { x <- all_terms(x) } re_pos <- grepl("\\|", x) out <- x[re_pos] if (brackets && length(out)) { out <- paste0("(", out, ")") } if (formula) { out <- str2formula(out) } out } # validate the re_formula argument # @inheritParams extract_draws.brmsfit # @param formula: formula to match re_formula with # @return updated re_formula containing only terms existent in formula check_re_formula <- function(re_formula, formula) { old_re_formula <- get_re_terms(formula, formula = TRUE) if (is.null(re_formula)) { re_formula <- old_re_formula } else if (SW(anyNA(re_formula))) { re_formula <- ~1 } else { re_formula <- get_re_terms(as.formula(re_formula), formula = TRUE) new <- brmsterms(re_formula, check_response = FALSE)$dpars$mu[["re"]] old <- brmsterms(old_re_formula, check_response = FALSE)$dpars$mu[["re"]] if (NROW(new) && NROW(old)) { # compare old and new ranefs new_terms <- lapply(new$form, terms) found <- rep(FALSE, NROW(new)) for (i in seq_rows(new)) { group <- new$group[[i]] old_terms <- lapply(old$form[old$group == group], terms) j <- 1 while (!found[i] && j <= length(old_terms)) { new_term_labels <- attr(new_terms[[i]], "term.labels") old_term_labels <- attr(old_terms[[j]], "term.labels") new_intercept <- attr(new_terms[[i]], "intercept") old_intercept <- attr(old_terms[[j]], "intercept") found[i] <- isTRUE( all(new_term_labels %in% old_term_labels) && new_intercept <= old_intercept ) if (found[i]) { # terms have to maintain the original order so that Z_* data # and r_* parameters match in 'extract_draws' (fixes issue #844) term_matches <- match(new_term_labels, old_term_labels) if (is.unsorted(term_matches)) { stop2("Order of terms in 're_formula' should match the original order.") } } j <- j + 1 } } new <- new[found, ] if (NROW(new)) { forms <- ulapply(new$form, formula2str, rm = 1) groups <- ufrom_list(new$gcall, "label") re_terms <- paste("(", forms, "|", groups, ")") re_formula <- formula(paste("~", paste(re_terms, collapse = "+"))) } else { re_formula <- ~1 } } else { re_formula <- ~1 } } re_formula } # remove existing group-level terms in formula and # add valid group-level terms of re_formula update_re_terms <- function(formula, re_formula) { UseMethod("update_re_terms") } #' @export update_re_terms.mvbrmsformula <- function(formula, re_formula) { formula$forms <- lapply(formula$forms, update_re_terms, re_formula) formula } #' @export update_re_terms.brmsformula <- function(formula, re_formula) { formula$formula <- update_re_terms(formula$formula, re_formula) formula$pforms <- lapply(formula$pforms, update_re_terms, re_formula) formula } #' @export update_re_terms.formula <- function(formula, re_formula = NULL) { if (is.null(re_formula) || get_nl(formula)) { return(formula) } re_formula <- check_re_formula(re_formula, formula) new_formula <- formula2str(formula) old_re_terms <- get_re_terms(formula, brackets = FALSE) if (length(old_re_terms)) { # remove old group-level terms rm_terms <- c( paste0("+ (", old_re_terms, ")"), paste0("(", old_re_terms, ")"), old_re_terms ) new_formula <- rename(new_formula, rm_terms, "") if (grepl("~( *\\+*)*$", new_formula)) { # lhs only formulas are syntactically invalid # also check for trailing '+' signs (#769) new_formula <- paste(new_formula, "1") } } # add new group-level terms new_re_terms <- get_re_terms(re_formula) new_formula <- paste(c(new_formula, new_re_terms), collapse = "+") new_formula <- formula(new_formula) attributes(new_formula) <- attributes(formula) new_formula } # extract group-level terms get_re <- function(x, ...) { UseMethod("get_re") } #' @export get_re.default <- function(x, ...) { NULL } # get group-level information in a data.frame # @param bterms object of class 'brmsterms' # @param all logical; include ranefs of additional parameters? #' @export get_re.brmsterms <- function(x, ...) { re <- named_list(c(names(x$dpars), names(x$nlpars))) for (dp in names(x$dpars)) { re[[dp]] <- get_re(x$dpars[[dp]]) } for (nlp in names(x$nlpars)) { re[[nlp]] <- get_re(x$nlpars[[nlp]]) } do_call(rbind, re) } #' @export get_re.mvbrmsterms <- function(x, ...) { do_call(rbind, lapply(x$terms, get_re, ...)) } #' @export get_re.btl <- function(x, ...) { px <- check_prefix(x) re <- x[["re"]] if (is.null(re)) { re <- empty_re() } re$resp <- rep(px$resp, nrow(re)) re$dpar <- rep(px$dpar, nrow(re)) re$nlpar <- rep(px$nlpar, nrow(re)) re } # gather information on group-level effects # @param bterms object of class brmsterms # @param data data.frame containing all model variables # @param old_levels optional original levels of the grouping factors # @return a tidy data.frame with the following columns: # id: ID of the group-level effect # group: name of the grouping factor # gn: number of the grouping term within the respective formula # coef: name of the group-level effect # cn: number of the effect within the ID # resp: name of the response variable # dpar: name of the distributional parameter # nlpar: name of the non-linear parameter # cor: are correlations modeled for this effect? # ggn: global number of the grouping factor # type: special effects type; can be 'sp' or 'cs' # gcall: output of functions 'gr' or 'mm' # form: formula used to compute the effects tidy_ranef <- function(bterms, data, old_levels = NULL) { data <- combine_groups(data, get_group_vars(bterms)) re <- get_re(bterms) ranef <- vector("list", nrow(re)) used_ids <- new_ids <- NULL id_groups <- list() j <- 1 for (i in seq_rows(re)) { if (!nzchar(re$type[i])) { coef <- colnames(get_model_matrix(re$form[[i]], data)) } else if (re$type[i] == "sp") { coef <- tidy_spef(re$form[[i]], data)$coef } else if (re$type[i] == "mmc") { coef <- rename(all_terms(re$form[[i]])) } else if (re$type[i] == "cs") { resp <- re$resp[i] if (nzchar(resp)) { stopifnot(is.mvbrmsterms(bterms)) nthres <- max(get_thres(bterms$terms[[resp]])) } else { stopifnot(is.brmsterms(bterms)) nthres <- max(get_thres(bterms)) } indices <- paste0("[", seq_len(nthres), "]") coef <- colnames(get_model_matrix(re$form[[i]], data = data)) coef <- as.vector(t(outer(coef, indices, paste0))) } avoid_dpars(coef, bterms = bterms) rdat <- data.frame( id = re$id[[i]], group = re$group[[i]], gn = re$gn[[i]], gtype = re$gtype[[i]], coef = coef, cn = NA, resp = re$resp[[i]], dpar = re$dpar[[i]], nlpar = re$nlpar[[i]], ggn = NA, cor = re$cor[[i]], type = re$type[[i]], by = re$gcall[[i]]$by, cov = re$gcall[[i]]$cov, dist = re$gcall[[i]]$dist, stringsAsFactors = FALSE ) bylevels <- NULL if (nzchar(rdat$by[1])) { bylevels <- eval2(rdat$by[1], data) bylevels <- rm_wsp(extract_levels(bylevels)) } rdat$bylevels <- repl(bylevels, nrow(rdat)) rdat$form <- repl(re$form[[i]], nrow(rdat)) rdat$gcall <- repl(re$gcall[[i]], nrow(rdat)) # prepare group-level IDs id <- re$id[[i]] if (is.na(id)) { rdat$id <- j j <- j + 1 } else { if (id %in% used_ids) { k <- match(id, used_ids) rdat$id <- new_ids[k] new_id_groups <- c(re$group[[i]], re$gcall[[i]]$groups) if (!identical(new_id_groups, id_groups[[k]])) { stop2("Can only combine group-level terms of the ", "same grouping factors.") } } else { used_ids <- c(used_ids, id) k <- length(used_ids) rdat$id <- new_ids[k] <- j id_groups[[k]] <- c(re$group[[i]], re$gcall[[i]]$groups) j <- j + 1 } } ranef[[i]] <- rdat } ranef <- do_call(rbind, c(list(empty_ranef()), ranef)) # check for overlap between different group types rsv_groups <- ranef[nzchar(ranef$gtype), "group"] other_groups <- ranef[!nzchar(ranef$gtype), "group"] inv_groups <- intersect(rsv_groups, other_groups) if (length(inv_groups)) { inv_groups <- paste0("'", inv_groups, "'", collapse = ", ") stop2("Grouping factor names ", inv_groups, " are resevered.") } # check for duplicated and thus not identified effects dup <- duplicated(ranef[, c("group", "coef", vars_prefix())]) if (any(dup)) { dr <- ranef[which(dup)[1], ] stop2( "Duplicated group-level effects are not allowed.\n", "Occured for effect '", dr$coef, "' of group '", dr$group, "'." ) } if (nrow(ranef)) { for (id in unique(ranef$id)) { ranef$cn[ranef$id == id] <- seq_len(sum(ranef$id == id)) } ranef$ggn <- match(ranef$group, unique(ranef$group)) if (is.null(old_levels)) { rsub <- ranef[!duplicated(ranef$group), ] levels <- named_list(rsub$group) for (i in seq_along(levels)) { # combine levels of all grouping factors within one grouping term levels[[i]] <- unique(ulapply( rsub$gcall[[i]]$groups, function(g) extract_levels(get(g, data)) )) # fixes issue #1353 bysel <- ranef$group == names(levels)[i] & nzchar(ranef$by) & !duplicated(ranef$by) bysel <- which(bysel) if (length(bysel) > 1L) { stop2("Each grouping factor can only be associated with one 'by' variable.") } # ensure that a non-NULL by-variable is found if present if (length(bysel) == 1L) { rsub[i, ] <- ranef[bysel, ] } # store information of corresponding by-levels if (nzchar(rsub$by[i])) { stopifnot(rsub$type[i] %in% c("", "mmc")) by <- rsub$by[i] bylevels <- rsub$bylevels[[i]] byvar <- rm_wsp(eval2(by, data)) groups <- rsub$gcall[[i]]$groups if (rsub$gtype[i] == "mm") { byvar <- as.matrix(byvar) if (!identical(dim(byvar), c(nrow(data), length(groups)))) { stop2( "Grouping structure 'mm' expects 'by' to be ", "a matrix with as many columns as grouping factors." ) } df <- J <- named_list(groups) for (k in seq_along(groups)) { J[[k]] <- match(get(groups[k], data), levels[[i]]) df[[k]] <- data.frame(J = J[[k]], by = byvar[, k]) } J <- unlist(J) df <- do_call(rbind, df) } else { J <- match(get(groups, data), levels[[i]]) df <- data.frame(J = J, by = byvar) } df <- unique(df) if (nrow(df) > length(unique(J))) { stop2("Some levels of ", collapse_comma(groups), " correspond to multiple levels of '", by, "'.") } df <- df[order(df$J), ] by_per_level <- bylevels[match(df$by, bylevels)] attr(levels[[i]], "by") <- by_per_level } } attr(ranef, "levels") <- levels } else { # for newdata numeration has to depend on the original levels attr(ranef, "levels") <- old_levels } # incorporate deprecated 'cov_ranef' argument ranef <- update_ranef_cov(ranef, bterms) } # ordering after IDs matches the order of the posterior draws # if multiple IDs are used for the same grouping factor (#835) ranef <- ranef[order(ranef$id), , drop = FALSE] structure(ranef, class = c("ranef_frame", "data.frame")) } empty_ranef <- function() { structure( data.frame( id = numeric(0), group = character(0), gn = numeric(0), coef = character(0), cn = numeric(0), resp = character(0), dpar = character(0), nlpar = character(0), ggn = numeric(0), cor = logical(0), type = character(0), form = character(0), stringsAsFactors = FALSE ), class = c("ranef_frame", "data.frame") ) } empty_re <- function() { data.frame( group = character(0), gtype = character(0), gn = numeric(0), id = numeric(0), type = character(0), cor = logical(0), form = character(0) ) } is.ranef_frame <- function(x) { inherits(x, "ranef_frame") } # extract names of all grouping variables get_group_vars <- function(x, ...) { UseMethod("get_group_vars") } #' @export get_group_vars.brmsfit <- function(x, ...) { get_group_vars(x$formula, ...) } #' @export get_group_vars.default <- function(x, ...) { get_group_vars(brmsterms(x), ...) } #' @export get_group_vars.brmsterms <- function(x, ...) { .get_group_vars(x, ...) } #' @export get_group_vars.mvbrmsterms <- function(x, ...) { .get_group_vars(x, ...) } .get_group_vars <- function(x, ...) { out <- c(get_re_groups(x), get_me_groups(x), get_ac_groups(x)) out <- out[nzchar(out)] if (length(out)) { c(out) <- unlist(strsplit(out, ":")) out <- sort(unique(out)) } out } # get names of grouping variables of re terms get_re_groups <- function(x, ...) { ufrom_list(get_re(x)$gcall, "groups") } # extract information about groups with a certain distribution get_dist_groups <- function(ranef, dist) { out <- subset2(ranef, dist = dist) out[!duplicated(out$group), c("group", "ggn", "id")] } # extract list of levels with one element per grouping factor # @param ... objects with a level attribute get_levels <- function(...) { dots <- list(...) out <- vector("list", length(dots)) for (i in seq_along(out)) { levels <- attr(dots[[i]], "levels", exact = TRUE) if (is.list(levels)) { stopifnot(!is.null(names(levels))) out[[i]] <- as.list(levels) } else if (!is.null(levels)) { stopifnot(isTRUE(nzchar(names(dots)[i]))) out[[i]] <- setNames(list(levels), names(dots)[[i]]) } } out <- unlist(out, recursive = FALSE) out[!duplicated(names(out))] } extract_levels <- function(x) { # do not check for NAs according to #1355 if (!is.factor(x)) { x <- factor(x) } levels(x) } # extract names of group-level effects # @param ranef output of tidy_ranef() # @param group optional name of a grouping factor for # which to extract effect names # @param bylevels optional names of 'by' levels for # which to extract effect names # @return a vector of character strings get_rnames <- function(ranef, group = NULL, bylevels = NULL) { stopifnot(is.data.frame(ranef)) if (!is.null(group)) { group <- as_one_character(group) ranef <- subset2(ranef, group = group) } stopifnot(length(unique(ranef$group)) == 1L) out <- paste0(usc(combine_prefix(ranef), "suffix"), ranef$coef) if (isTRUE(nzchar(ranef$by[1]))) { if (!is.null(bylevels)) { stopifnot(all(bylevels %in% ranef$bylevels[[1]])) } else { bylevels <- ranef$bylevels[[1]] } bylabels <- paste0(ranef$by[1], bylevels) out <- outer(out, bylabels, paste, sep = ":") } out } # validate within-group covariance matrices # @param M a matrix to be validated validate_recov_matrix <- function(M) { M <- as.matrix(M) if (!isSymmetric(unname(M))) { stop2("Within-group covariance matrices must be symmetric.") } found_levels <- rownames(M) if (is.null(found_levels)) { found_levels <- colnames(M) } if (is.null(found_levels)) { stop2("Row or column names are required for within-group covariance matrices.") } rownames(M) <- colnames(M) <- found_levels evs <- eigen(M, symmetric = TRUE, only.values = TRUE)$values if (min(evs) <= 0) { stop2("Within-group covariance matrices must be positive definite.") } M } # check validity of the 'cov_ranef' argument # argument 'cov_ranef' is deprecated as of version 2.12.5 validate_cov_ranef <- function(cov_ranef) { if (is.null(cov_ranef)) { return(cov_ranef) } warning2( "Argument 'cov_ranef' is deprecated and will be removed in the future. ", "Please use argument 'cov' in function 'gr' instead." ) cr_names <- names(cov_ranef) cr_is_named <- length(cr_names) && all(nzchar(cr_names)) if (!is.list(cov_ranef) || !cr_is_named) { stop2("'cov_ranef' must be a named list.") } if (any(duplicated(cr_names))) { stop2("Names of 'cov_ranef' must be unique.") } cov_ranef } # update 'ranef' according to information in 'cov_ranef' # argument 'cov_ranef' is deprecated as of version 2.12.5 update_ranef_cov <- function(ranef, bterms) { cr_names <- names(bterms$cov_ranef) if (!length(cr_names)) { return(ranef) } unused_names <- setdiff(cr_names, ranef$group) if (length(unused_names)) { stop2("The following elements of 'cov_ranef' are unused: ", collapse_comma(unused_names)) } has_cov <- ranef$group %in% cr_names ranef$cov[has_cov] <- ranef$group[has_cov] ranef } # extract 'cov_ranef' for storage in 'data2' # @param x a list-like object get_data2_cov_ranef <- function(x) { x[["cov_ranef"]] } brms/R/exclude_terms.R0000644000176200001440000000271314213413565014413 0ustar liggesusers# exclude predictor terms from being evaluated exclude_terms <- function(x, ...) { UseMethod("exclude_terms") } #' @export exclude_terms.brmsfit <- function(x, ...) { x$formula <- exclude_terms(x$formula, ...) x } #' @export exclude_terms.mvbrmsformula <- function(x, ...) { for (i in seq_along(x$forms)) { x$forms[[i]] <- exclude_terms(x$forms[[i]], ...) } x } #' @export exclude_terms.brmsformula <- function( x, excl_term_types = NULL, incl_autocor = TRUE, smooths_only = FALSE, offset = TRUE, ... ) { excl_term_types <- as.character(excl_term_types) # TODO: deprecate the three arguments below? incl_autocor <- as_one_logical(incl_autocor) smooths_only <- as_one_logical(smooths_only) offset <- as_one_logical(offset) if (!incl_autocor) { c(excl_term_types) <- "ac" } if (!offset) { c(excl_term_types) <- "offset" } if (smooths_only) { excl_term_types <- setdiff(all_term_types(), "sm") } if (!length(excl_term_types)) { return(x) } invalid_types <- setdiff(excl_term_types, all_term_types()) if (length(invalid_types)) { stop2("The following term types are invalid: ", collapse_comma(invalid_types)) } attr(x$formula, "excl_term_types") <- excl_term_types for (i in seq_along(x$pforms)) { attr(x$pforms[[i]], "excl_term_types") <- excl_term_types } x } # extract names of excluded term types excluded_term_types <- function(x) { as.character(attr(x, "excl_term_types", TRUE)) } brms/R/bridgesampling.R0000644000176200001440000002160114454227765014550 0ustar liggesusers#' Log Marginal Likelihood via Bridge Sampling #' #' Computes log marginal likelihood via bridge sampling, #' which can be used in the computation of bayes factors #' and posterior model probabilities. #' The \code{brmsfit} method is just a thin wrapper around #' the corresponding method for \code{stanfit} objects. #' #' @aliases bridge_sampler #' #' @param samples A \code{brmsfit} object. #' @param recompile Logical, indicating whether the Stan model should be #' recompiled. This may be necessary if you are running bridge sampling on #' another machine than the one used to fit the model. No recompilation #' is done by default. #' @param ... Additional arguments passed to #' \code{\link[bridgesampling:bridge_sampler]{bridge_sampler.stanfit}}. #' #' @details Computing the marginal likelihood requires samples of all variables #' defined in Stan's \code{parameters} block to be saved. Otherwise #' \code{bridge_sampler} cannot be computed. Thus, please set \code{save_pars #' = save_pars(all = TRUE)} in the call to \code{brm}, if you are planning to #' apply \code{bridge_sampler} to your models. #' #' The computation of marginal likelihoods based on bridge sampling requires #' a lot more posterior draws than usual. A good conservative #' rule of thump is perhaps 10-fold more draws (read: the default of 4000 #' draws may not be enough in many cases). If not enough posterior #' draws are provided, the bridge sampling algorithm tends to be #' unstable leading to considerably different results each time it is run. #' We thus recommend running \code{bridge_sampler} #' multiple times to check the stability of the results. #' #' More details are provided under #' \code{\link[bridgesampling:bridge_sampler]{bridgesampling::bridge_sampler}}. #' #' @seealso \code{ #' \link[brms:bayes_factor.brmsfit]{bayes_factor}, #' \link[brms:post_prob.brmsfit]{post_prob} #' } #' #' @examples #' \dontrun{ #' # model with the treatment effect #' fit1 <- brm( #' count ~ zAge + zBase + Trt, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_pars = save_pars(all = TRUE) #' ) #' summary(fit1) #' bridge_sampler(fit1) #' #' # model without the treatment effect #' fit2 <- brm( #' count ~ zAge + zBase, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_pars = save_pars(all = TRUE) #' ) #' summary(fit2) #' bridge_sampler(fit2) #' } #' #' @method bridge_sampler brmsfit #' @importFrom bridgesampling bridge_sampler #' @export bridge_sampler #' @export bridge_sampler.brmsfit <- function(samples, recompile = FALSE, ...) { out <- get_criterion(samples, "marglik") if (inherits(out, "bridge") && !is.na(out$logml)) { # return precomputed criterion return(out) } samples <- restructure(samples) if (samples$version$brms <= "1.8.0") { stop2( "Models fitted with brms 1.8.0 or lower are not ", "usable in method 'bridge_sampler'." ) } if (!is_normalized(samples$model)) { stop2( "The Stan model has to be normalized to be ", "usable in method 'bridge_sampler'." ) } # otherwise bridge_sampler may fail in a new R session or on another machine samples <- update_misc_env(samples, recompile = recompile) out <- try(bridge_sampler(samples$fit, ...)) if (is_try_error(out)) { stop2( "Bridgesampling failed. Perhaps you did not set ", "'save_pars = save_pars(all = TRUE)' when fitting your model? ", "If you are running bridge sampling on another machine than the one ", "used to fit the model, you may need to set recompile = TRUE." ) } out } #' Bayes Factors from Marginal Likelihoods #' #' Compute Bayes factors from marginal likelihoods. #' #' @aliases bayes_factor #' #' @param x1 A \code{brmsfit} object #' @param x2 Another \code{brmsfit} object based on the same responses. #' @param log Report Bayes factors on the log-scale? #' @param ... Additional arguments passed to #' \code{\link[brms:bridge_sampler.brmsfit]{bridge_sampler}}. #' #' @details Computing the marginal likelihood requires samples #' of all variables defined in Stan's \code{parameters} block #' to be saved. Otherwise \code{bayes_factor} cannot be computed. #' Thus, please set \code{save_all_pars = TRUE} in the call to \code{brm}, #' if you are planning to apply \code{bayes_factor} to your models. #' #' The computation of Bayes factors based on bridge sampling requires #' a lot more posterior samples than usual. A good conservative #' rule of thumb is perhaps 10-fold more samples (read: the default of 4000 #' samples may not be enough in many cases). If not enough posterior #' samples are provided, the bridge sampling algorithm tends to be unstable, #' leading to considerably different results each time it is run. #' We thus recommend running \code{bayes_factor} #' multiple times to check the stability of the results. #' #' More details are provided under #' \code{\link[bridgesampling:bf]{bridgesampling::bayes_factor}}. #' #' @seealso \code{ #' \link[brms:bridge_sampler.brmsfit]{bridge_sampler}, #' \link[brms:post_prob.brmsfit]{post_prob} #' } #' #' @examples #' \dontrun{ #' # model with the treatment effect #' fit1 <- brm( #' count ~ zAge + zBase + Trt, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_all_pars = TRUE #' ) #' summary(fit1) #' #' # model without the treatment effect #' fit2 <- brm( #' count ~ zAge + zBase, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_all_pars = TRUE #' ) #' summary(fit2) #' #' # compute the bayes factor #' bayes_factor(fit1, fit2) #' } #' #' @method bayes_factor brmsfit #' @importFrom bridgesampling bayes_factor #' @export bayes_factor #' @export bayes_factor.brmsfit <- function(x1, x2, log = FALSE, ...) { model_name_1 <- deparse0(substitute(x1)) model_name_2 <- deparse0(substitute(x2)) match_response(list(x1, x2)) bridge1 <- bridge_sampler(x1, ...) bridge2 <- bridge_sampler(x2, ...) out <- bayes_factor(bridge1, bridge2, log = log) attr(out, "model_names") <- c(model_name_1, model_name_2) out } #' Posterior Model Probabilities from Marginal Likelihoods #' #' Compute posterior model probabilities from marginal likelihoods. #' The \code{brmsfit} method is just a thin wrapper around #' the corresponding method for \code{bridge} objects. #' #' @aliases post_prob #' #' @inheritParams loo.brmsfit #' @param prior_prob Numeric vector with prior model probabilities. #' If omitted, a uniform prior is used (i.e., all models are equally #' likely a priori). The default \code{NULL} corresponds to equal #' prior model weights. #' #' @details Computing the marginal likelihood requires samples #' of all variables defined in Stan's \code{parameters} block #' to be saved. Otherwise \code{post_prob} cannot be computed. #' Thus, please set \code{save_all_pars = TRUE} in the call to \code{brm}, #' if you are planning to apply \code{post_prob} to your models. #' #' The computation of model probabilities based on bridge sampling requires #' a lot more posterior samples than usual. A good conservative #' rule of thump is perhaps 10-fold more samples (read: the default of 4000 #' samples may not be enough in many cases). If not enough posterior #' samples are provided, the bridge sampling algorithm tends to be #' unstable leading to considerably different results each time it is run. #' We thus recommend running \code{post_prob} #' multiple times to check the stability of the results. #' #' More details are provided under #' \code{\link[bridgesampling:post_prob]{bridgesampling::post_prob}}. #' #' @seealso \code{ #' \link[brms:bridge_sampler.brmsfit]{bridge_sampler}, #' \link[brms:bayes_factor.brmsfit]{bayes_factor} #' } #' #' @examples #' \dontrun{ #' # model with the treatment effect #' fit1 <- brm( #' count ~ zAge + zBase + Trt, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_all_pars = TRUE #' ) #' summary(fit1) #' #' # model without the treatent effect #' fit2 <- brm( #' count ~ zAge + zBase, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_all_pars = TRUE #' ) #' summary(fit2) #' #' # compute the posterior model probabilities #' post_prob(fit1, fit2) #' #' # specify prior model probabilities #' post_prob(fit1, fit2, prior_prob = c(0.8, 0.2)) #' } #' #' @method post_prob brmsfit #' @importFrom bridgesampling post_prob #' @export post_prob #' @export post_prob.brmsfit <- function(x, ..., prior_prob = NULL, model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) models <- args$models args$models <- NULL bs <- vector("list", length(models)) for (i in seq_along(models)) { bs[[i]] <- do_call(bridge_sampler, c(list(models[[i]]), args)) } model_names <- names(models) do_call(post_prob, c(bs, nlist(prior_prob, model_names))) } brms/R/stan-response.R0000644000176200001440000007046014477011603014354 0ustar liggesusers# unless otherwise specifiedm functions return a named list # of Stan code snippets to be pasted together later on # Stan code for the response variables stan_response <- function(bterms, data, normalize) { stopifnot(is.brmsterms(bterms)) lpdf <- stan_lpdf_name(normalize) family <- bterms$family rtype <- str_if(use_int(family), "int", "real") multicol <- has_multicol(family) px <- check_prefix(bterms) resp <- usc(combine_prefix(px)) out <- list(resp_type = rtype) if (nzchar(resp)) { # global N is defined elsewhere str_add(out$data) <- glue( " int N{resp}; // number of observations\n" ) str_add(out$pll_def) <- glue( " int N{resp} = end - start + 1;\n" ) } if (has_cat(family)) { str_add(out$data) <- glue( " int ncat{resp}; // number of categories\n" ) str_add(out$pll_args) <- glue(", data int ncat{resp}") } if (has_multicol(family)) { if (rtype == "real") { str_add(out$data) <- glue( " array[N{resp}] vector[ncat{resp}] Y{resp}; // response array\n" ) str_add(out$pll_args) <- glue(", data vector[] Y{resp}") } else if (rtype == "int") { str_add(out$data) <- glue( " array[N{resp}, ncat{resp}] int Y{resp}; // response array\n" ) str_add(out$pll_args) <- glue(", data int[,] Y{resp}") } } else { if (rtype == "real") { # type vector (instead of real[]) is required by some PDFs str_add(out$data) <- glue( " vector[N{resp}] Y{resp}; // response variable\n" ) str_add(out$pll_args) <- glue(", data vector Y{resp}") } else if (rtype == "int") { str_add(out$data) <- glue( " array[N{resp}] int Y{resp}; // response variable\n" ) str_add(out$pll_args) <- glue(", data int[] Y{resp}") } } if (has_ndt(family)) { str_add(out$tdata_def) <- glue( " real min_Y{resp} = min(Y{resp});\n" ) } if (has_trials(family) || is.formula(bterms$adforms$trials)) { str_add(out$data) <- glue( " array[N{resp}] int trials{resp}; // number of trials\n" ) str_add(out$pll_args) <- glue(", data int[] trials{resp}") } if (is.formula(bterms$adforms$weights)) { str_add(out$data) <- glue( " vector[N{resp}] weights{resp}; // model weights\n" ) str_add(out$pll_args) <- glue(", data vector weights{resp}") } if (has_thres(family)) { groups <- get_thres_groups(family) if (any(nzchar(groups))) { str_add(out$data) <- glue( " int ngrthres{resp}; // number of threshold groups\n", " array[ngrthres{resp}] int nthres{resp}; // number of thresholds\n", " array[N{resp}, 2] int Jthres{resp}; // threshold indices\n" ) str_add(out$tdata_def) <- glue( " int nmthres{resp} = sum(nthres{resp});", " // total number of thresholds\n", " array[ngrthres{resp}] int Kthres_start{resp};", " // start index per threshold group\n", " array[ngrthres{resp}] int Kthres_end{resp};", " // end index per threshold group\n" ) str_add(out$tdata_comp) <- glue( " Kthres_start{resp}[1] = 1;\n", " Kthres_end{resp}[1] = nthres{resp}[1];\n", " for (i in 2:ngrthres{resp}) {{\n", " Kthres_start{resp}[i] = Kthres_end{resp}[i-1] + 1;\n", " Kthres_end{resp}[i] = Kthres_end{resp}[i-1] + nthres{resp}[i];\n", " }}\n" ) str_add(out$pll_args) <- glue( ", data int[] nthres{resp}, data int[,] Jthres{resp}" ) } else { str_add(out$data) <- glue( " int nthres{resp}; // number of thresholds\n" ) str_add(out$pll_args) <- glue(", data int nthres{resp}") } } if (is.formula(bterms$adforms$se)) { str_add(out$data) <- glue( " vector[N{resp}] se{resp}; // known sampling error\n" ) str_add(out$tdata_def) <- glue( " vector[N{resp}] se2{resp} = square(se{resp});\n" ) str_add(out$pll_args) <- glue( ", data vector se{resp}, data vector se2{resp}" ) } if (is.formula(bterms$adforms$dec)) { str_add(out$data) <- glue( " array[N{resp}] int dec{resp}; // decisions\n" ) str_add(out$pll_args) <- glue(", data int[] dec{resp}") } if (is.formula(bterms$adforms$rate)) { str_add(out$data) <- glue( " vector[N{resp}] denom{resp};", " // response denominator\n" ) str_add(out$tdata_def) <- glue( " // log response denominator\n", " vector[N{resp}] log_denom{resp} = log(denom{resp});\n" ) str_add(out$pll_args) <- glue( ", data vector denom{resp}, data vector log_denom{resp}" ) } if (is.formula(bterms$adforms$cens)) { str_add(out$data) <- glue( " array[N{resp}] int cens{resp}; // indicates censoring\n" ) str_add(out$pll_args) <- glue(", data int[] cens{resp}") y2_expr <- get_ad_expr(bterms, "cens", "y2") if (!is.null(y2_expr)) { # interval censoring is required if (rtype == "int") { str_add(out$data) <- glue( " array[N{resp}] int rcens{resp};" ) str_add(out$pll_args) <- glue(", data int[] rcens{resp}") } else { str_add(out$data) <- glue( " vector[N{resp}] rcens{resp};" ) str_add(out$pll_args) <- glue(", data vector rcens{resp}") } str_add(out$data) <- " // right censor points for interval censoring\n" } } bounds <- trunc_bounds(bterms, data = data) if (any(bounds$lb > -Inf)) { str_add(out$data) <- glue( " array[N{resp}] {rtype} lb{resp}; // lower truncation bounds;\n" ) str_add(out$pll_args) <- glue(", data {rtype}[] lb{resp}") } if (any(bounds$ub < Inf)) { str_add(out$data) <- glue( " array[N{resp}] {rtype} ub{resp}; // upper truncation bounds\n" ) str_add(out$pll_args) <- glue(", data {rtype}[] ub{resp}") } if (is.formula(bterms$adforms$mi)) { # TODO: pass 'Ybounds' via 'standata' instead of hardcoding them Ybounds <- trunc_bounds(bterms, data, incl_family = TRUE, stan = TRUE) sdy <- get_sdy(bterms, data) if (is.null(sdy)) { # response is modeled without measurement error str_add(out$data) <- glue( " int Nmi{resp}; // number of missings\n", " array[Nmi{resp}] int Jmi{resp}; // positions of missings\n" ) str_add(out$par) <- glue( " vector{Ybounds}[Nmi{resp}] Ymi{resp}; // estimated missings\n" ) str_add(out$model_no_pll_def) <- glue( " // vector combining observed and missing responses\n", " vector[N{resp}] Yl{resp} = Y{resp};\n" ) str_add(out$model_no_pll_comp_basic) <- glue( " Yl{resp}[Jmi{resp}] = Ymi{resp};\n" ) str_add(out$pll_args) <- glue(", vector Yl{resp}") } else { str_add(out$data) <- glue( " // data for measurement-error in the response\n", " vector[N{resp}] noise{resp};\n", " // information about non-missings\n", " int Nme{resp};\n", " array[Nme{resp}] int Jme{resp};\n" ) str_add(out$par) <- glue( " vector{Ybounds}[N{resp}] Yl{resp}; // latent variable\n" ) str_add(out$model_prior) <- glue( " target += normal_{lpdf}(Y{resp}[Jme{resp}]", " | Yl{resp}[Jme{resp}], noise{resp}[Jme{resp}]);\n" ) str_add(out$pll_args) <- glue(", vector Yl{resp}") } } if (is.formula(bterms$adforms$vreal)) { # vectors of real values for use in custom families vreal <- eval_rhs(bterms$adforms$vreal) k <- length(vreal$vars) str_add(out$data) <- cglue( " // data for custom real vectors\n", " array[N{resp}] real vreal{seq_len(k)}{resp};\n" ) str_add(out$pll_args) <- cglue(", data real[] vreal{seq_len(k)}{resp}") } if (is.formula(bterms$adforms$vint)) { # vectors of integer values for use in custom families vint <- eval_rhs(bterms$adforms$vint) k <- length(vint$vars) str_add(out$data) <- cglue( " // data for custom integer vectors\n", " array[N{resp}] int vint{seq_len(k)}{resp};\n" ) str_add(out$pll_args) <- cglue(", data int[] vint{seq_len(k)}{resp}") } out } # Stan code for ordinal thresholds # intercepts in ordinal models require special treatment # and must be present even when using non-linear predictors # thus the relevant Stan code cannot be part of 'stan_fe' stan_thres <- function(bterms, data, prior, normalize, ...) { stopifnot(is.btl(bterms) || is.btnl(bterms)) out <- list() if (!is_ordinal(bterms)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) type <- str_if(has_ordered_thres(bterms), "ordered", "vector") coef_type <- str_if(has_ordered_thres(bterms), "", "real") gr <- grb <- "" groups <- get_thres_groups(bterms) if (has_thres_groups(bterms)) { # include one threshold vector per group gr <- usc(seq_along(groups)) grb <- paste0("[", seq_along(groups), "]") } if (fix_intercepts(bterms)) { # identify ordinal mixtures by fixing their thresholds to the same values if (has_equidistant_thres(bterms)) { stop2("Cannot use equidistant and fixed thresholds at the same time.") } # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- " // ordinal thresholds\n" str_add(out$tpar_def) <- cglue( " {type}[nthres{resp}{grb}] Intercept{p}{gr};\n" ) str_add(out$tpar_comp) <- " // fix thresholds across ordinal mixture components\n" str_add(out$tpar_comp) <- cglue( " Intercept{p}{gr} = fixed_Intercept{resp}{gr};\n" ) } else { if (has_equidistant_thres(bterms)) { bound <- subset2(prior, class = "delta", group = "", ls = px)$bound for (i in seq_along(groups)) { str_add_list(out) <- stan_prior( prior, class = "Intercept", group = groups[i], prefix = "first_", suffix = glue("{p}{gr[i]}"), px = px, comment = "first threshold", normalize = normalize ) str_add_list(out) <- stan_prior( prior, class = "delta", group = groups[i], px = px, suffix = gr[i], comment = "distance between thresholds", normalize = normalize ) } str_add(out$tpar_def) <- " // temporary thresholds for centered predictors\n" str_add(out$tpar_def) <- cglue( " {type}[nthres{resp}{grb}] Intercept{p}{gr};\n" ) str_add(out$tpar_comp) <- " // compute equidistant thresholds\n" str_add(out$tpar_comp) <- cglue( " for (k in 1:(nthres{resp}{grb})) {{\n", " Intercept{p}{gr}[k] = first_Intercept{p}{gr}", " + (k - 1.0) * delta{p}{gr};\n", " }}\n" ) } else { for (i in seq_along(groups)) { str_add_list(out) <- stan_prior( prior, class = "Intercept", group = groups[i], coef = get_thres(bterms, group = groups[i]), type = glue("{type}[nthres{resp}{grb[i]}]"), coef_type = coef_type, px = px, suffix = glue("{p}{gr[i]}"), comment = "temporary thresholds for centered predictors", normalize = normalize ) } } } stz <- "" if (has_sum_to_zero_thres(bterms)) { stz <- "_stz" str_add(out$tpar_def) <- cglue( " vector[nthres{resp}{grb}] Intercept{p}_stz{gr};", " // sum-to-zero constraint thresholds\n" ) str_add(out$tpar_comp) <- " // compute sum-to-zero constraint thresholds\n" str_add(out$tpar_comp) <- cglue( " Intercept{p}_stz{gr} = Intercept{p}{gr} - mean(Intercept{p}{gr});\n" ) } if (has_thres_groups(bterms)) { # merge all group specific thresholds into one vector str_add(out$tpar_def) <- glue( " vector[nmthres{resp}] merged_Intercept{p}{stz}; // merged thresholds\n" ) str_add(out$tpar_comp) <- " // merge thresholds\n" grj <- seq_along(groups) grj <- glue("Kthres_start{resp}[{grj}]:Kthres_end{resp}[{grj}]") str_add(out$tpar_comp) <- cglue( " merged_Intercept{p}{stz}[{grj}] = Intercept{p}{stz}{gr};\n" ) str_add(out$pll_args) <- cglue(", vector merged_Intercept{p}{stz}") } else { str_add(out$pll_args) <- glue(", vector Intercept{p}{stz}") } sub_X_means <- "" if (stan_center_X(bterms) && length(all_terms(bterms$fe))) { # centering of the design matrix improves convergence # ordinal families either use thres - mu or mu - thres # both implies adding to the temporary intercept sub_X_means <- glue(" + dot_product(means_X{p}, b{p})") } str_add(out$gen_def) <- " // compute actual thresholds\n" str_add(out$gen_def) <- cglue( " vector[nthres{resp}{grb}] b{p}_Intercept{gr}", " = Intercept{p}{stz}{gr}{sub_X_means};\n" ) out } # Stan code for the baseline functions of the Cox model stan_bhaz <- function(bterms, prior, threads, normalize, ...) { stopifnot(is.btl(bterms) || is.btnl(bterms)) out <- list() if (!is_cox(bterms$family)) { return(out) } lpdf <- stan_lpdf_name(normalize) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) slice <- stan_slice(threads) str_add(out$data) <- glue( " // data for flexible baseline functions\n", " int Kbhaz{resp}; // number of basis functions\n", " // design matrix of the baseline function\n", " matrix[N{resp}, Kbhaz{resp}] Zbhaz{resp};\n", " // design matrix of the cumulative baseline function\n", " matrix[N{resp}, Kbhaz{resp}] Zcbhaz{resp};\n", " // a-priori concentration vector of baseline coefficients\n", " vector[Kbhaz{resp}] con_sbhaz{resp};\n" ) str_add(out$par) <- glue( " simplex[Kbhaz{resp}] sbhaz{resp}; // baseline coefficients\n" ) str_add(out$tpar_prior) <- glue( " lprior += dirichlet_{lpdf}(sbhaz{resp} | con_sbhaz{resp});\n" ) str_add(out$model_def) <- glue( " // compute values of baseline function\n", " vector[N{resp}] bhaz{resp} = Zbhaz{resp}{slice} * sbhaz{resp};\n", " // compute values of cumulative baseline function\n", " vector[N{resp}] cbhaz{resp} = Zcbhaz{resp}{slice} * sbhaz{resp};\n" ) str_add(out$pll_args) <- glue( ", data matrix Zbhaz{resp}, data matrix Zcbhaz{resp}, vector sbhaz{resp}" ) out } # Stan code specific to mixture families stan_mixture <- function(bterms, data, prior, threads, normalize, ...) { out <- list() if (!is.mixfamily(bterms$family)) { return(out) } lpdf <- stan_lpdf_name(normalize) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) nmix <- length(bterms$family$mix) theta_pred <- grepl("^theta", names(bterms$dpars)) theta_pred <- bterms$dpars[theta_pred] theta_fix <- grepl("^theta", names(bterms$fdpars)) theta_fix <- bterms$fdpars[theta_fix] def_thetas <- cglue( " real theta{1:nmix}{p}; // mixing proportion\n" ) if (length(theta_pred)) { if (length(theta_pred) != nmix - 1) { stop2("Can only predict all but one mixing proportion.") } missing_id <- setdiff(1:nmix, dpar_id(names(theta_pred))) str_add(out$model_def) <- glue( " vector[N{p}] theta{missing_id}{p} = rep_vector(0.0, N{p});\n", " real log_sum_exp_theta{p};\n" ) sum_exp_theta <- glue("exp(theta{1:nmix}{p}[n])", collapse = " + ") str_add(out$model_comp_mix) <- glue( " for (n in 1:N{p}) {{\n", " // scale theta to become a probability vector\n", " log_sum_exp_theta{p} = log({sum_exp_theta});\n" ) str_add(out$model_comp_mix) <- cglue( " theta{1:nmix}{p}[n] = theta{1:nmix}{p}[n] - log_sum_exp_theta{p};\n" ) str_add(out$model_comp_mix) <- " }\n" } else if (length(theta_fix)) { # fix mixture proportions if (length(theta_fix) != nmix) { stop2("Can only fix no or all mixing proportions.") } str_add(out$data) <- " // mixing proportions\n" str_add(out$data) <- cglue( " real theta{1:nmix}{p};\n" ) str_add(out$pll_args) <- cglue(", real theta{1:nmix}{p}") } else { # estimate mixture proportions str_add(out$data) <- glue( " vector[{nmix}] con_theta{p}; // prior concentration\n" ) str_add(out$par) <- glue( " simplex[{nmix}] theta{p}; // mixing proportions\n" ) str_add(out$tpar_prior) <- glue( " lprior += dirichlet_{lpdf}(theta{p} | con_theta{p});\n" ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- " // mixing proportions\n" str_add(out$tpar_def) <- cglue( " real theta{1:nmix}{p};\n" ) str_add(out$tpar_comp) <- cglue( " theta{1:nmix}{p} = theta{p}[{1:nmix}];\n" ) str_add(out$pll_args) <- cglue(", real theta{1:nmix}{p}") } if (order_intercepts(bterms)) { # identify mixtures by ordering the intercepts of their components str_add(out$par) <- glue( " ordered[{nmix}] ordered_Intercept{p}; // to identify mixtures\n" ) } if (fix_intercepts(bterms)) { # identify ordinal mixtures by fixing their thresholds to the same values stopifnot(is_ordinal(bterms)) gr <- grb <- "" groups <- get_thres_groups(bterms) if (has_thres_groups(bterms)) { # include one threshold vector per group gr <- usc(seq_along(groups)) grb <- paste0("[", seq_along(groups), "]") } type <- str_if(has_ordered_thres(bterms), "ordered", "vector") coef_type <- str_if(has_ordered_thres(bterms), "", "real") for (i in seq_along(groups)) { str_add_list(out) <- stan_prior( prior, class = "Intercept", coef = get_thres(bterms, group = groups[i]), type = glue("{type}[nthres{p}{grb[i]}]"), coef_type = coef_type, px = px, prefix = "fixed_", suffix = glue("{p}{gr[i]}"), comment = "thresholds fixed over mixture components", normalize = normalize ) } } out } # ordinal log-probability densitiy functions in Stan language # @return a character string stan_ordinal_lpmf <- function(family, link) { stopifnot(is.character(family), is.character(link)) inv_link <- stan_inv_link(link, vectorize = FALSE) th <- function(k) { # helper function generating stan code inside inv_link(.) if (family %in% c("cumulative", "sratio")) { out <- glue("thres[{k}] - mu") } else if (family %in% c("cratio", "acat")) { out <- glue("mu - thres[{k}]") } glue("disc * ({out})") } out <- glue( " /* {family}-{link} log-PDF for a single response\n", " * Args:\n", " * y: response category\n", " * mu: latent mean parameter\n", " * disc: discrimination parameter\n", " * thres: ordinal thresholds\n", " * Returns:\n", " * a scalar to be added to the log posterior\n", " */\n", " real {family}_{link}_lpmf(int y, real mu, real disc, vector thres) {{\n" ) # define the function body if (family == "cumulative") { if (inv_link == "inv_logit") { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " if (y == 1) {{\n", " return log_inv_logit({th(1)});\n", " }} else if (y == nthres + 1) {{\n", " return log1m_inv_logit({th('nthres')});\n", " }} else {{\n", # TODO: replace with log_inv_logit_diff once rstan >= 2.25 " return log_diff_exp(\n", " log_inv_logit({th('y')}), \n", " log_inv_logit({th('y - 1')})\n", " );\n", " }}\n", " }}\n" ) } else { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " real p;\n", " if (y == 1) {{\n", " p = {inv_link}({th(1)});\n", " }} else if (y == nthres + 1) {{\n", " p = 1 - {inv_link}({th('nthres')});\n", " }} else {{\n", " p = {inv_link}({th('y')}) -\n", " {inv_link}({th('y - 1')});\n", " }}\n", " return log(p);\n", " }}\n" ) } } else if (family %in% c("sratio", "cratio")) { # TODO: support 'softit' link as well if (inv_link == "inv_cloglog") { qk <- str_if( family == "sratio", "-exp({th('k')})", "log1m_exp(-exp({th('k')}))" ) } else if (inv_link == "inv_logit") { qk <- str_if( family == "sratio", "log1m_inv_logit({th('k')})", "log_inv_logit({th('k')})" ) } else if (inv_link == "Phi") { # TODO: replace with more stable std_normal_lcdf once rstan >= 2.25 qk <- str_if( family == "sratio", "normal_lccdf({th('k')}|0,1)", "normal_lcdf({th('k')}|0,1)" ) } else if (inv_link == "Phi_approx") { qk <- str_if( family == "sratio", "log1m_inv_logit(0.07056 * pow({th('k')}, 3.0) + 1.5976 * {th('k')})", "log_inv_logit(0.07056 * pow({th('k')}, 3.0) + 1.5976 * {th('k')})" ) } else if (inv_link == "inv_cauchit") { qk <- str_if( family == "sratio", "cauchy_lccdf({th('k')}|0,1)", "cauchy_lcdf({th('k')}|0,1)" ) } qk <- glue(qk) str_add(out) <- glue( " int nthres = num_elements(thres);\n", " vector[nthres + 1] p;\n", " vector[nthres] q;\n", " int k = 1;\n", " while (k <= min(y, nthres)) {{\n", " q[k] = {qk};\n", " p[k] = log1m_exp(q[k]);\n", " for (kk in 1:(k - 1)) p[k] = p[k] + q[kk];\n", " k += 1;\n", " }}\n", " if (y == nthres + 1) {{\n", " p[nthres + 1] = sum(q);\n", " }}\n", " return p[y];\n", " }}\n" ) } else if (family == "acat") { if (inv_link == "inv_logit") { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " vector[nthres + 1] p = append_row(0, cumulative_sum(disc * (mu - thres)));\n", " return p[y] - log_sum_exp(p);\n", " }}\n" ) } else { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " vector[nthres + 1] p;\n", " vector[nthres] q;\n", " for (k in 1:(nthres))\n", " q[k] = {inv_link}({th('k')});\n", " for (k in 1:(nthres + 1)) {{\n", " p[k] = 1.0;\n", " for (kk in 1:(k - 1)) p[k] = p[k] * q[kk];\n", " for (kk in k:(nthres)) p[k] = p[k] * (1 - q[kk]);\n", " }}\n", " return log(p[y]) - log(sum(p));\n", " }}\n" ) } } # lpmf function for multiple merged thresholds str_add(out) <- glue( " /* {family}-{link} log-PDF for a single response and merged thresholds\n", " * Args:\n", " * y: response category\n", " * mu: latent mean parameter\n", " * disc: discrimination parameter\n", " * thres: vector of merged ordinal thresholds\n", " * j: start and end index for the applid threshold within 'thres'\n", " * Returns:\n", " * a scalar to be added to the log posterior\n", " */\n", " real {family}_{link}_merged_lpmf(", "int y, real mu, real disc, vector thres, int[] j) {{\n", " return {family}_{link}_lpmf(y | mu, disc, thres[j[1]:j[2]]);\n", " }}\n" ) if (family == "cumulative" && link == "logit") { # use the more efficient 'ordered_logistic' built-in function str_add(out) <- glue( " /* ordered-logistic log-PDF for a single response and merged thresholds\n", " * Args:\n", " * y: response category\n", " * mu: latent mean parameter\n", " * thres: vector of merged ordinal thresholds\n", " * j: start and end index for the applid threshold within 'thres'\n", " * Returns:\n", " * a scalar to be added to the log posterior\n", " */\n", " real ordered_logistic_merged_lpmf(", "int y, real mu, vector thres, int[] j) {{\n", " return ordered_logistic_lpmf(y | mu, thres[j[1]:j[2]]);\n", " }}\n" ) } out } # log probability density for hurdle ordinal models # @return a character string stan_hurdle_ordinal_lpmf <- function(family, link) { stopifnot(is.character(family), is.character(link)) # TODO: generalize to non-cumulative families? stopifnot(family == "hurdle_cumulative") inv_link <- stan_inv_link(link, vectorize = FALSE) th <- function(k) { out <- glue("thres[{k}] - mu") glue("disc * ({out})") } out <- glue( " /* {family}-{link} log-PDF for a single response\n", " * Args:\n", " * y: response category\n", " * mu: latent mean parameter\n", " * hu: hurdle probability\n", " * disc: discrimination parameter\n", " * thres: ordinal thresholds\n", " * Returns:\n", " * a scalar to be added to the log posterior\n", " */\n", " real {family}_{link}_lpmf(int y, real mu, real hu, real disc, vector thres) {{\n", "\n" ) # define the function body if (inv_link == "inv_logit") { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " if (y == 0) {{\n", " return bernoulli_lpmf(1 | hu);\n", " }} else if (y == 1) {{\n", " return log_inv_logit({th(1)}) +\n", " bernoulli_lpmf(0 | hu);\n", " }} else if (y == nthres + 2) {{\n", " return log1m_inv_logit({th('nthres')}) +\n", " bernoulli_lpmf(0 | hu);\n", " }} else {{\n", # TODO: replace with log_inv_logit_diff once rstan >= 2.25 " return log_diff_exp(\n", " log_inv_logit({th('y')}), \n", " log_inv_logit({th('y - 1')})\n", " ) + bernoulli_lpmf(0 | hu) ;\n", " }}\n", " }}\n" ) } else { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " real p;\n", " if (y == 0){{\n", " p = hu;\n", " }} else if (y == 1) {{\n", " p = {inv_link}({th(1)}) * (1 - hu);\n", " }} else if (y == nthres + 1) {{\n", " p = (1 - {inv_link}({th('nthres')})) * (1 - hu);\n", " }} else {{\n", " p = ({inv_link}({th('y')}) -\n", " {inv_link}({th('y - 1')})) * (1 - hu);\n", " }}\n", " return log(p);\n", " }}\n" ) } # lpmf function for multiple merged thresholds str_add(out) <- glue( " /* {family}-{link} log-PDF for a single response and merged thresholds\n", " * Args:\n", " * y: response category\n", " * mu: latent mean parameter\n", " * hu: hurdle probability\n", " * disc: discrimination parameter\n", " * thres: vector of merged ordinal thresholds\n", " * j: start and end index for the applid threshold within 'thres'\n", " * Returns:\n", " * a scalar to be added to the log posterior\n", " */\n", " real {family}_{link}_merged_lpmf(", "int y, real mu, real hu, real disc, vector thres, int[] j) {{\n", " return {family}_{link}_lpmf(y | mu, hu, disc, thres[j[1]:j[2]]);\n", " }}\n" ) if (link == "logit") { # use the more efficient ordered_logistic function when disc == 1 str_add(out) <- glue( "\n", " // Use more efficient ordered_logistic function with disc == 1\n", " real hurdle_cumulative_ordered_logistic_lpmf(int y, real mu, real hu, real disc, vector thres) {{\n", " if (y == 0) {{\n", " return bernoulli_lpmf(1 | hu);\n", " }} else {{\n", " return ordered_logistic_lpmf(y | mu, thres) +\n", " bernoulli_lpmf(0 | hu);\n", " }}\n", " }}\n" ) str_add(out) <- glue( " /* use ordered-logistic log-PDF for a single response and merged thresholds\n", " * Args:\n", " * y: response category\n", " * mu: latent mean parameter\n", " * hu: hurdle probability\n", " * thres: vector of merged ordinal thresholds\n", " * j: start and end index for the applid threshold within 'thres'\n", " * Returns:\n", " * a scalar to be added to the log posterior\n", " */\n", " real hurdle_cumulative_ordered_logistic_merged_lpmf(", "int y, real mu, real hu, real disc, vector thres, int[] j) {{\n", " return hurdle_cumulative_ordered_logistic_lpmf(y | mu, hu, disc, thres[j[1]:j[2]]);\n", " }}\n" ) } out } brms/R/brmsformula.R0000644000176200001440000021150414454230113014072 0ustar liggesusers#' Set up a model formula for use in \pkg{brms} #' #' Set up a model formula for use in the \pkg{brms} package #' allowing to define (potentially non-linear) additive multilevel #' models for all parameters of the assumed response distribution. #' #' @aliases bf #' #' @param formula An object of class \code{formula} #' (or one that can be coerced to that class): #' a symbolic description of the model to be fitted. #' The details of model specification are given in 'Details'. #' @param ... Additional \code{formula} objects to specify predictors of #' non-linear and distributional parameters. Formulas can either be named #' directly or contain names on their left-hand side. Alternatively, #' it is possible to fix parameters to certain values by passing #' numbers or character strings in which case arguments have to be named #' to provide the parameter names. See 'Details' for more information. #' @param flist Optional list of formulas, which are treated in the #' same way as formulas passed via the \code{...} argument. #' @param nl Logical; Indicates whether \code{formula} should be #' treated as specifying a non-linear model. By default, \code{formula} #' is treated as an ordinary linear model formula. #' @param loop Logical; Only used in non-linear models. #' Indicates if the computation of the non-linear formula should be #' done inside (\code{TRUE}) or outside (\code{FALSE}) a loop #' over observations. Defaults to \code{TRUE}. #' @param center Logical; Indicates if the population-level design #' matrix should be centered, which usually increases sampling efficiency. #' See the 'Details' section for more information. #' Defaults to \code{TRUE} for distributional parameters #' and to \code{FALSE} for non-linear parameters. #' @param cmc Logical; Indicates whether automatic cell-mean coding #' should be enabled when removing the intercept by adding \code{0} #' to the right-hand of model formulas. Defaults to \code{TRUE} to #' mirror the behavior of standard \R formula parsing. #' @param sparse Logical; indicates whether the population-level design matrices #' should be treated as sparse (defaults to \code{FALSE}). For design matrices #' with many zeros, this can considerably reduce required memory. Sampling #' speed is currently not improved or even slightly decreased. #' @param decomp Optional name of the decomposition used for the #' population-level design matrix. Defaults to \code{NULL} that is #' no decomposition. Other options currently available are #' \code{"QR"} for the QR decomposition that helps in fitting models #' with highly correlated predictors. #' @param family Same argument as in \code{\link{brm}}. #' If \code{family} is specified in \code{brmsformula}, it will #' overwrite the value specified in other functions. #' @param autocor An optional \code{formula} which contains #' autocorrelation terms as described in \code{\link{autocor-terms}} #' or alternatively a \code{\link{cor_brms}} object (deprecated). #' If \code{autocor} is specified in \code{brmsformula}, it will #' overwrite the value specified in other functions. #' @param unused An optional \code{formula} which contains variables #' that are unused in the model but should still be stored in the #' model's data frame. This can be useful, for example, #' if those variables are required for post-processing the model. #' #' @return An object of class \code{brmsformula}, which #' is essentially a \code{list} containing all model #' formulas as well as some additional information. #' #' @seealso \code{\link{mvbrmsformula}}, \code{\link{brmsformula-helpers}} #' #' @details #' #' \bold{General formula structure} #' #' The \code{formula} argument accepts formulas of the following syntax: #' #' \code{response | aterms ~ pterms + (gterms | group)} #' #' The \code{pterms} part contains effects that are assumed to be the same #' across observations. We call them 'population-level' or 'overall' effects, #' or (adopting frequentist vocabulary) 'fixed' effects. The optional #' \code{gterms} part may contain effects that are assumed to vary across #' grouping variables specified in \code{group}. We call them 'group-level' or #' 'varying' effects, or (adopting frequentist vocabulary) 'random' effects, #' although the latter name is misleading in a Bayesian context. For more #' details type \code{vignette("brms_overview")} and #' \code{vignette("brms_multilevel")}. #' #' \bold{Group-level terms} #' #' Multiple grouping factors each with multiple group-level effects are #' possible. (Of course we can also run models without any group-level #' effects.) Instead of \code{|} you may use \code{||} in grouping terms to #' prevent correlations from being modeled. Equivalently, the \code{cor} #' argument of the \code{\link{gr}} function can be used for this purpose, #' for example, \code{(1 + x || g)} is equivalent to #' \code{(1 + x | gr(g, cor = FALSE))}. #' #' It is also possible to model different group-level terms of the same #' grouping factor as correlated (even across different formulas, e.g., in #' non-linear models) by using \code{||} instead of \code{|}. All #' group-level terms sharing the same ID will be modeled as correlated. If, #' for instance, one specifies the terms \code{(1+x|i|g)} and \code{(1+z|i|g)} #' somewhere in the formulas passed to \code{brmsformula}, correlations #' between the corresponding group-level effects will be estimated. In the #' above example, \code{i} is not a variable in the data but just a symbol to #' indicate correlations between multiple group-level terms. Equivalently, the #' \code{id} argument of the \code{\link{gr}} function can be used as well, #' for example, \code{(1 + x | gr(g, id = "i"))}. #' #' If levels of the grouping factor belong to different sub-populations, #' it may be reasonable to assume a different covariance matrix for each #' of the sub-populations. For instance, the variation within the #' treatment group and within the control group in a randomized control #' trial might differ. Suppose that \code{y} is the outcome, and #' \code{x} is the factor indicating the treatment and control group. #' Then, we could estimate different hyper-parameters of the varying #' effects (in this case a varying intercept) for treatment and control #' group via \code{y ~ x + (1 | gr(subject, by = x))}. #' #' You can specify multi-membership terms using the \code{\link{mm}} #' function. For instance, a multi-membership term with two members #' could be \code{(1 | mm(g1, g2))}, where \code{g1} and \code{g2} #' specify the first and second member, respectively. Moreover, #' if a covariate \code{x} varies across the levels of the grouping-factors #' \code{g1} and \code{g2}, we can save the respective covariate values #' in the variables \code{x1} and \code{x2} and then model the varying #' effect as \code{(1 + mmc(x1, x2) | mm(g1, g2))}. #' #' \bold{Special predictor terms} #' #' Flexible non-linear smooth terms can modeled using the \code{\link{s}} #' and \code{\link{t2}} functions in the \code{pterms} part #' of the model formula. This allows to fit generalized additive mixed #' models (GAMMs) with \pkg{brms}. The implementation is similar to that #' used in the \pkg{gamm4} package. For more details on this model class #' see \code{\link[mgcv:gam]{gam}} and \code{\link[mgcv:gamm]{gamm}}. #' #' Gaussian process terms can be fitted using the \code{\link{gp}} #' function in the \code{pterms} part of the model formula. Similar to #' smooth terms, Gaussian processes can be used to model complex non-linear #' relationships, for instance temporal or spatial autocorrelation. #' However, they are computationally demanding and are thus not recommended #' for very large datasets or approximations need to be used. #' #' The \code{pterms} and \code{gterms} parts may contain four non-standard #' effect types namely monotonic, measurement error, missing value, and #' category specific effects, which can be specified using terms of the #' form \code{mo(predictor)}, \code{me(predictor, sd_predictor)}, #' \code{mi(predictor)}, and \code{cs()}, respectively. #' Category specific effects can only be estimated in #' ordinal models and are explained in more detail in the package's #' main vignette (type \code{vignette("brms_overview")}). #' The other three effect types are explained in the following. #' #' A monotonic predictor must either be integer valued or an ordered factor, #' which is the first difference to an ordinary continuous predictor. #' More importantly, predictor categories (or integers) are not assumed to be #' equidistant with respect to their effect on the response variable. #' Instead, the distance between adjacent predictor categories (or integers) #' is estimated from the data and may vary across categories. #' This is realized by parameterizing as follows: #' One parameter takes care of the direction and size of the effect similar #' to an ordinary regression parameter, while an additional parameter vector #' estimates the normalized distances between consecutive predictor categories. #' A main application of monotonic effects are ordinal predictors that #' can this way be modeled without (falsely) treating them as continuous #' or as unordered categorical predictors. For more details and examples #' see \code{vignette("brms_monotonic")}. #' #' Quite often, predictors are measured and as such naturally contain #' measurement error. Although most researchers are well aware of this problem, #' measurement error in predictors is ignored in most #' regression analyses, possibly because only few packages allow #' for modeling it. Notably, measurement error can be handled in #' structural equation models, but many more general regression models #' (such as those featured by \pkg{brms}) cannot be transferred #' to the SEM framework. In \pkg{brms}, effects of noise-free predictors #' can be modeled using the \code{me} (for 'measurement error') function. #' If, say, \code{y} is the response variable and #' \code{x} is a measured predictor with known measurement error #' \code{sdx}, we can simply include it on the right-hand side of the #' model formula via \code{y ~ me(x, sdx)}. #' This can easily be extended to more general formulas. #' If \code{x2} is another measured predictor with corresponding error #' \code{sdx2} and \code{z} is a predictor without error #' (e.g., an experimental setting), we can model all main effects #' and interactions of the three predictors in the well known manner: #' \code{y ~ me(x, sdx) * me(x2, sdx2) * z}. #' The \code{me} function is soft deprecated in favor of the more flexible #' and consistent \code{mi} function (see below). #' #' When a variable contains missing values, the corresponding rows will #' be excluded from the data by default (row-wise exclusion). However, #' quite often we want to keep these rows and instead estimate the missing values. #' There are two approaches for this: (a) Impute missing values before #' the model fitting for instance via multiple imputation (see #' \code{\link{brm_multiple}} for a way to handle multiple imputed datasets). #' (b) Impute missing values on the fly during model fitting. The latter #' approach is explained in the following. Using a variable with missing #' values as predictors requires two things, First, we need to specify that #' the predictor contains missings that should to be imputed. #' If, say, \code{y} is the primary response, \code{x} is a #' predictor with missings and \code{z} is a predictor without missings, #' we go for \code{y ~ mi(x) + z}. Second, we need to model \code{x} #' as an additional response with corresponding predictors and the #' addition term \code{mi()}. In our example, we could write #' \code{x | mi() ~ z}. Measurement error may be included via #' the \code{sdy} argument, say, \code{x | mi(sdy = se) ~ z}. #' See \code{\link{mi}} for examples with real data. #' #' #' \bold{Autocorrelation terms} #' #' Autocorrelation terms can be directly specified inside the \code{pterms} #' part as well. Details can be found in \code{\link{autocor-terms}}. #' #' \bold{Additional response information} #' #' Another special of the \pkg{brms} formula syntax is the optional #' \code{aterms} part, which may contain multiple terms of the form #' \code{fun()} separated by \code{+} each providing special #' information on the response variable. \code{fun} can be replaced with #' either \code{se}, \code{weights}, \code{subset}, \code{cens}, \code{trunc}, #' \code{trials}, \code{cat}, \code{dec}, \code{rate}, \code{vreal}, or #' \code{vint}. Their meanings are explained below #' (see also \code{\link{addition-terms}}). #' #' For families \code{gaussian}, \code{student} and \code{skew_normal}, it is #' possible to specify standard errors of the observations, thus allowing #' to perform meta-analysis. Suppose that the variable \code{yi} contains #' the effect sizes from the studies and \code{sei} the corresponding #' standard errors. Then, fixed and random effects meta-analyses can #' be conducted using the formulas \code{yi | se(sei) ~ 1} and #' \code{yi | se(sei) ~ 1 + (1|study)}, respectively, where #' \code{study} is a variable uniquely identifying every study. #' If desired, meta-regression can be performed via #' \code{yi | se(sei) ~ 1 + mod1 + mod2 + (1|study)} #' or \cr \code{yi | se(sei) ~ 1 + mod1 + mod2 + (1 + mod1 + mod2|study)}, #' where \code{mod1} and \code{mod2} represent moderator variables. #' By default, the standard errors replace the parameter \code{sigma}. #' To model \code{sigma} in addition to the known standard errors, #' set argument \code{sigma} in function \code{se} to \code{TRUE}, #' for instance, \code{yi | se(sei, sigma = TRUE) ~ 1}. #' #' For all families, weighted regression may be performed using #' \code{weights} in the \code{aterms} part. Internally, this is #' implemented by multiplying the log-posterior values of each #' observation by their corresponding weights. #' Suppose that variable \code{wei} contains the weights #' and that \code{yi} is the response variable. #' Then, formula \code{yi | weights(wei) ~ predictors} #' implements a weighted regression. #' #' For multivariate models, \code{subset} may be used in the \code{aterms} #' part, to use different subsets of the data in different univariate #' models. For instance, if \code{sub} is a logical variable and #' \code{y} is the response of one of the univariate models, we may #' write \code{y | subset(sub) ~ predictors} so that \code{y} is #' predicted only for those observations for which \code{sub} evaluates #' to \code{TRUE}. #' #' For log-linear models such as poisson models, \code{rate} may be used #' in the \code{aterms} part to specify the denominator of a response that #' is expressed as a rate. The numerator is given by the actual response #' variable and has a distribution according to the family as usual. Using #' \code{rate(denom)} is equivalent to adding \code{offset(log(denom))} to #' the linear predictor of the main parameter but the former is arguably #' more convenient and explicit. #' #' With the exception of categorical and ordinal families, #' left, right, and interval censoring can be modeled through #' \code{y | cens(censored) ~ predictors}. The censoring variable #' (named \code{censored} in this example) should contain the values #' \code{'left'}, \code{'none'}, \code{'right'}, and \code{'interval'} #' (or equivalently \code{-1}, \code{0}, \code{1}, and \code{2}) to indicate that #' the corresponding observation is left censored, not censored, right censored, #' or interval censored. For interval censored data, a second variable #' (let's call it \code{y2}) has to be passed to \code{cens}. In this case, #' the formula has the structure \code{y | cens(censored, y2) ~ predictors}. #' While the lower bounds are given in \code{y}, the upper bounds are given #' in \code{y2} for interval censored data. Intervals are assumed to be open #' on the left and closed on the right: \code{(y, y2]}. #' #' With the exception of categorical and ordinal families, #' the response distribution can be truncated using the \code{trunc} #' function in the addition part. If the response variable is truncated #' between, say, 0 and 100, we can specify this via #' \code{yi | trunc(lb = 0, ub = 100) ~ predictors}. #' Instead of numbers, variables in the data set can also be passed allowing #' for varying truncation points across observations. Defining only one of #' the two arguments in \code{trunc} leads to one-sided truncation. #' #' For all continuous families, missing values in the responses can be imputed #' within Stan by using the addition term \code{mi}. This is mostly #' useful in combination with \code{mi} predictor terms as explained #' above under 'Special predictor terms'. #' #' For families \code{binomial} and \code{zero_inflated_binomial}, #' addition should contain a variable indicating the number of trials #' underlying each observation. In \code{lme4} syntax, we may write for instance #' \code{cbind(success, n - success)}, which is equivalent #' to \code{success | trials(n)} in \pkg{brms} syntax. If the number of trials #' is constant across all observations, say \code{10}, #' we may also write \code{success | trials(10)}. #' \bold{Please note that the \code{cbind()} syntax will not work #' in \pkg{brms} in the expected way because this syntax is reserved #' for other purposes.} #' #' For all ordinal families, \code{aterms} may contain a term #' \code{thres(number)} to specify the number thresholds (e.g, #' \code{thres(6)}), which should be equal to the total number of response #' categories - 1. If not given, the number of thresholds is calculated from #' the data. If different threshold vectors should be used for different #' subsets of the data, the \code{gr} argument can be used to provide the #' grouping variable (e.g, \code{thres(6, gr = item)}, if \code{item} is the #' grouping variable). In this case, the number of thresholds can also be a #' variable in the data with different values per group. #' #' A deprecated quasi alias of \code{thres()} is \code{cat()} with which the #' total number of response categories (i.e., number of thresholds + 1) can be #' specified. #' #' In Wiener diffusion models (family \code{wiener}) the addition term #' \code{dec} is mandatory to specify the (vector of) binary decisions #' corresponding to the reaction times. Non-zero values will be treated #' as a response on the upper boundary of the diffusion process and zeros #' will be treated as a response on the lower boundary. Alternatively, #' the variable passed to \code{dec} might also be a character vector #' consisting of \code{'lower'} and \code{'upper'}. #' #' All families support the \code{index} addition term to uniquely identify #' each observation of the corresponding response variable. Currently, #' \code{index} is primarily useful in combination with the \code{subset} #' addition and \code{\link{mi}} terms. #' #' For custom families, it is possible to pass an arbitrary number of real and #' integer vectors via the addition terms \code{vreal} and \code{vint}, #' respectively. An example is provided in #' \code{vignette('brms_customfamilies')}. To pass multiple vectors of the #' same data type, provide them separated by commas inside a single #' \code{vreal} or \code{vint} statement. #' #' Multiple addition terms of different types may be specified at the same #' time using the \code{+} operator. For example, the formula #' \code{formula = yi | se(sei) + cens(censored) ~ 1} implies a censored #' meta-analytic model. #' #' The addition argument \code{disp} (short for dispersion) #' has been removed in version 2.0. You may instead use the #' distributional regression approach by specifying #' \code{sigma ~ 1 + offset(log(xdisp))} or #' \code{shape ~ 1 + offset(log(xdisp))}, where \code{xdisp} is #' the variable being previously passed to \code{disp}. #' #' \bold{Parameterization of the population-level intercept} #' #' By default, the population-level intercept (if incorporated) is estimated #' separately and not as part of population-level parameter vector \code{b} As #' a result, priors on the intercept also have to be specified separately. #' Furthermore, to increase sampling efficiency, the population-level design #' matrix \code{X} is centered around its column means \code{X_means} if the #' intercept is incorporated. This leads to a temporary bias in the intercept #' equal to \code{}, where \code{<,>} is the scalar product. The #' bias is corrected after fitting the model, but be aware that you are #' effectively defining a prior on the intercept of the centered design matrix #' not on the real intercept. You can turn off this special handling of the #' intercept by setting argument \code{center} to \code{FALSE}. For more #' details on setting priors on population-level intercepts, see #' \code{\link{set_prior}}. #' #' This behavior can be avoided by using the reserved #' (and internally generated) variable \code{Intercept}. #' Instead of \code{y ~ x}, you may write #' \code{y ~ 0 + Intercept + x}. This way, priors can be #' defined on the real intercept, directly. In addition, #' the intercept is just treated as an ordinary population-level effect #' and thus priors defined on \code{b} will also apply to it. #' Note that this parameterization may be less efficient #' than the default parameterization discussed above. #' #' \bold{Formula syntax for non-linear models} #' #' In \pkg{brms}, it is possible to specify non-linear models #' of arbitrary complexity. #' The non-linear model can just be specified within the \code{formula} #' argument. Suppose, that we want to predict the response \code{y} #' through the predictor \code{x}, where \code{x} is linked to \code{y} #' through \code{y = alpha - beta * lambda^x}, with parameters #' \code{alpha}, \code{beta}, and \code{lambda}. This is certainly a #' non-linear model being defined via #' \code{formula = y ~ alpha - beta * lambda^x} (addition arguments #' can be added in the same way as for ordinary formulas). #' To tell \pkg{brms} that this is a non-linear model, #' we set argument \code{nl} to \code{TRUE}. #' Now we have to specify a model for each of the non-linear parameters. #' Let's say we just want to estimate those three parameters #' with no further covariates or random effects. Then we can pass #' \code{alpha + beta + lambda ~ 1} or equivalently #' (and more flexible) \code{alpha ~ 1, beta ~ 1, lambda ~ 1} #' to the \code{...} argument. #' This can, of course, be extended. If we have another predictor \code{z} and #' observations nested within the grouping factor \code{g}, we may write for #' instance \code{alpha ~ 1, beta ~ 1 + z + (1|g), lambda ~ 1}. #' The formula syntax described above applies here as well. #' In this example, we are using \code{z} and \code{g} only for the #' prediction of \code{beta}, but we might also use them for the other #' non-linear parameters (provided that the resulting model is still #' scientifically reasonable). #' #' By default, non-linear covariates are treated as real vectors in Stan. #' However, if the data of the covariates is of type `integer` in \R (which #' can be enforced by the `as.integer` function), the Stan type will be #' changed to an integer array. That way, covariates can also be used #' for indexing purposes in Stan. #' #' Non-linear models may not be uniquely identified and / or show bad convergence. #' For this reason it is mandatory to specify priors on the non-linear parameters. #' For instructions on how to do that, see \code{\link{set_prior}}. #' For some examples of non-linear models, see \code{vignette("brms_nonlinear")}. #' #' \bold{Formula syntax for predicting distributional parameters} #' #' It is also possible to predict parameters of the response distribution such #' as the residual standard deviation \code{sigma} in gaussian models or the #' hurdle probability \code{hu} in hurdle models. The syntax closely resembles #' that of a non-linear parameter, for instance \code{sigma ~ x + s(z) + #' (1+x|g)}. For some examples of distributional models, see #' \code{vignette("brms_distreg")}. #' #' Parameter \code{mu} exists for every family and can be used as an #' alternative to specifying terms in \code{formula}. If both \code{mu} and #' \code{formula} are given, the right-hand side of \code{formula} is ignored. #' Accordingly, specifying terms on the right-hand side of both \code{formula} #' and \code{mu} at the same time is deprecated. In future versions, #' \code{formula} might be updated by \code{mu}. #' #' The following are #' distributional parameters of specific families (all other parameters are #' treated as non-linear parameters): \code{sigma} (residual standard #' deviation or scale of the \code{gaussian}, \code{student}, #' \code{skew_normal}, \code{lognormal} \code{exgaussian}, and #' \code{asym_laplace} families); \code{shape} (shape parameter of the #' \code{Gamma}, \code{weibull}, \code{negbinomial}, and related zero-inflated #' / hurdle families); \code{nu} (degrees of freedom parameter of the #' \code{student} and \code{frechet} families); \code{phi} (precision #' parameter of the \code{beta} and \code{zero_inflated_beta} families); #' \code{kappa} (precision parameter of the \code{von_mises} family); #' \code{beta} (mean parameter of the exponential component of the #' \code{exgaussian} family); \code{quantile} (quantile parameter of the #' \code{asym_laplace} family); \code{zi} (zero-inflation probability); #' \code{hu} (hurdle probability); \code{zoi} (zero-one-inflation #' probability); \code{coi} (conditional one-inflation probability); #' \code{disc} (discrimination) for ordinal models; \code{bs}, \code{ndt}, and #' \code{bias} (boundary separation, non-decision time, and initial bias of #' the \code{wiener} diffusion model). By default, distributional parameters #' are modeled on the log scale if they can be positive only or on the logit #' scale if the can only be within the unit interval. #' #' Alternatively, one may fix distributional parameters to certain values. #' However, this is mainly useful when models become too #' complicated and otherwise have convergence issues. #' We thus suggest to be generally careful when making use of this option. #' The \code{quantile} parameter of the \code{asym_laplace} distribution #' is a good example where it is useful. By fixing \code{quantile}, #' one can perform quantile regression for the specified quantile. #' For instance, \code{quantile = 0.25} allows predicting the 25\%-quantile. #' Furthermore, the \code{bias} parameter in drift-diffusion models, #' is assumed to be \code{0.5} (i.e. no bias) in many applications. #' To achieve this, simply write \code{bias = 0.5}. #' Other possible applications are the Cauchy distribution as a #' special case of the Student-t distribution with #' \code{nu = 1}, or the geometric distribution as a special case of #' the negative binomial distribution with \code{shape = 1}. #' Furthermore, the parameter \code{disc} ('discrimination') in ordinal #' models is fixed to \code{1} by default and not estimated, #' but may be modeled as any other distributional parameter if desired #' (see examples). For reasons of identification, \code{'disc'} #' can only be positive, which is achieved by applying the log-link. #' #' In categorical models, distributional parameters do not have #' fixed names. Instead, they are named after the response categories #' (excluding the first one, which serves as the reference category), #' with the prefix \code{'mu'}. If, for instance, categories are named #' \code{cat1}, \code{cat2}, and \code{cat3}, the distributional parameters #' will be named \code{mucat2} and \code{mucat3}. #' #' Some distributional parameters currently supported by \code{brmsformula} #' have to be positive (a negative standard deviation or precision parameter #' does not make any sense) or are bounded between 0 and 1 (for zero-inflated / #' hurdle probabilities, quantiles, or the initial bias parameter of #' drift-diffusion models). #' However, linear predictors can be positive or negative, and thus the log link #' (for positive parameters) or logit link (for probability parameters) are used #' by default to ensure that distributional parameters are within their valid intervals. #' This implies that, by default, effects for such distributional parameters are #' estimated on the log / logit scale and one has to apply the inverse link #' function to get to the effects on the original scale. #' Alternatively, it is possible to use the identity link to predict parameters #' on their original scale, directly. However, this is much more likely to lead #' to problems in the model fitting, if the parameter actually has a restricted range. #' #' See also \code{\link{brmsfamily}} for an overview of valid link functions. #' #' \bold{Formula syntax for mixture models} #' #' The specification of mixture models closely resembles that #' of non-mixture models. If not specified otherwise (see below), #' all mean parameters of the mixture components are predicted #' using the right-hand side of \code{formula}. All types of predictor #' terms allowed in non-mixture models are allowed in mixture models #' as well. #' #' Distributional parameters of mixture distributions have the same #' name as those of the corresponding ordinary distributions, but with #' a number at the end to indicate the mixture component. For instance, if #' you use family \code{mixture(gaussian, gaussian)}, the distributional #' parameters are \code{sigma1} and \code{sigma2}. #' Distributional parameters of the same class can be fixed to the same value. #' For the above example, we could write \code{sigma2 = "sigma1"} to make #' sure that both components have the same residual standard deviation, #' which is in turn estimated from the data. #' #' In addition, there are two types of special distributional parameters. #' The first are named \code{mu}, that allow for modeling different #' predictors for the mean parameters of different mixture components. #' For instance, if you want to predict the mean of the first component #' using predictor \code{x} and the mean of the second component using #' predictor \code{z}, you can write \code{mu1 ~ x} as well as \code{mu2 ~ z}. #' The second are named \code{theta}, which constitute the mixing #' proportions. If the mixing proportions are fixed to certain values, #' they are internally normalized to form a probability vector. #' If one seeks to predict the mixing proportions, all but #' one of the them has to be predicted, while the remaining one is used #' as the reference category to identify the model. The so-called 'softmax' #' transformation is applied on the linear predictor terms to form a #' probability vector. #' #' For more information on mixture models, see #' the documentation of \code{\link{mixture}}. #' #' \bold{Formula syntax for multivariate models} #' #' Multivariate models may be specified using \code{mvbind} notation #' or with help of the \code{\link{mvbf}} function. #' Suppose that \code{y1} and \code{y2} are response variables #' and \code{x} is a predictor. Then \code{mvbind(y1, y2) ~ x} #' specifies a multivariate model. #' The effects of all terms specified at the RHS of the formula #' are assumed to vary across response variables. #' For instance, two parameters will be estimated for \code{x}, #' one for the effect on \code{y1} and another for the effect on \code{y2}. #' This is also true for group-level effects. When writing, for instance, #' \code{mvbind(y1, y2) ~ x + (1+x|g)}, group-level effects will be #' estimated separately for each response. To model these effects #' as correlated across responses, use the ID syntax (see above). #' For the present example, this would look as follows: #' \code{mvbind(y1, y2) ~ x + (1+x|2|g)}. Of course, you could also use #' any value other than \code{2} as ID. #' #' It is also possible to specify different formulas for different responses. #' If, for instance, \code{y1} should be predicted by \code{x} and \code{y2} #' should be predicted by \code{z}, we could write \code{mvbf(y1 ~ x, y2 ~ z)}. #' Alternatively, multiple \code{brmsformula} objects can be added to #' specify a joint multivariate model (see 'Examples'). #' #' @examples #' # multilevel model with smoothing terms #' brmsformula(y ~ x1*x2 + s(z) + (1+x1|1) + (1|g2)) #' #' # additionally predict 'sigma' #' brmsformula(y ~ x1*x2 + s(z) + (1+x1|1) + (1|g2), #' sigma ~ x1 + (1|g2)) #' #' # use the shorter alias 'bf' #' (formula1 <- brmsformula(y ~ x + (x|g))) #' (formula2 <- bf(y ~ x + (x|g))) #' # will be TRUE #' identical(formula1, formula2) #' #' # incorporate censoring #' bf(y | cens(censor_variable) ~ predictors) #' #' # define a simple non-linear model #' bf(y ~ a1 - a2^x, a1 + a2 ~ 1, nl = TRUE) #' #' # predict a1 and a2 differently #' bf(y ~ a1 - a2^x, a1 ~ 1, a2 ~ x + (x|g), nl = TRUE) #' #' # correlated group-level effects across parameters #' bf(y ~ a1 - a2^x, a1 ~ 1 + (1 |2| g), a2 ~ x + (x |2| g), nl = TRUE) #' # alternative but equivalent way to specify the above model #' bf(y ~ a1 - a2^x, a1 ~ 1 + (1 | gr(g, id = 2)), #' a2 ~ x + (x | gr(g, id = 2)), nl = TRUE) #' #' # define a multivariate model #' bf(mvbind(y1, y2) ~ x * z + (1|g)) #' #' # define a zero-inflated model #' # also predicting the zero-inflation part #' bf(y ~ x * z + (1+x|ID1|g), zi ~ x + (1|ID1|g)) #' #' # specify a predictor as monotonic #' bf(y ~ mo(x) + more_predictors) #' #' # for ordinal models only #' # specify a predictor as category specific #' bf(y ~ cs(x) + more_predictors) #' # add a category specific group-level intercept #' bf(y ~ cs(x) + (cs(1)|g)) #' # specify parameter 'disc' #' bf(y ~ person + item, disc ~ item) #' #' # specify variables containing measurement error #' bf(y ~ me(x, sdx)) #' #' # specify predictors on all parameters of the wiener diffusion model #' # the main formula models the drift rate 'delta' #' bf(rt | dec(decision) ~ x, bs ~ x, ndt ~ x, bias ~ x) #' #' # fix the bias parameter to 0.5 #' bf(rt | dec(decision) ~ x, bias = 0.5) #' #' # specify different predictors for different mixture components #' mix <- mixture(gaussian, gaussian) #' bf(y ~ 1, mu1 ~ x, mu2 ~ z, family = mix) #' #' # fix both residual standard deviations to the same value #' bf(y ~ x, sigma2 = "sigma1", family = mix) #' #' # use the '+' operator to specify models #' bf(y ~ 1) + #' nlf(sigma ~ a * exp(b * x), a ~ x) + #' lf(b ~ z + (1|g), dpar = "sigma") + #' gaussian() #' #' # specify a multivariate model using the '+' operator #' bf(y1 ~ x + (1|g)) + #' gaussian() + cor_ar(~1|g) + #' bf(y2 ~ z) + poisson() #' #' # specify correlated residuals of a gaussian and a poisson model #' form1 <- bf(y1 ~ 1 + x + (1|c|obs), sigma = 1) + gaussian() #' form2 <- bf(y2 ~ 1 + x + (1|c|obs)) + poisson() #' #' # model missing values in predictors #' bf(bmi ~ age * mi(chl)) + #' bf(chl | mi() ~ age) + #' set_rescor(FALSE) #' #' # model sigma as a function of the mean #' bf(y ~ eta, nl = TRUE) + #' lf(eta ~ 1 + x) + #' nlf(sigma ~ tau * sqrt(eta)) + #' lf(tau ~ 1) #' #' @export brmsformula <- function(formula, ..., flist = NULL, family = NULL, autocor = NULL, nl = NULL, loop = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL, unused = NULL) { if (is.brmsformula(formula)) { out <- formula } else { out <- list(formula = as_formula(formula)) class(out) <- "brmsformula" } # parse and validate dots arguments dots <- c(out$pforms, out$pfix, list(...), flist) dots <- lapply(dots, function(x) if (is.list(x)) x else list(x)) dots <- unlist(dots, recursive = FALSE) forms <- list() for (i in seq_along(dots)) { c(forms) <- validate_par_formula(dots[[i]], par = names(dots)[i]) } is_dupl_pars <- duplicated(names(forms), fromLast = TRUE) if (any(is_dupl_pars)) { dupl_pars <- collapse_comma(names(forms)[is_dupl_pars]) message("Replacing initial definitions of parameters ", dupl_pars) forms[is_dupl_pars] <- NULL } not_form <- ulapply(forms, function(x) !is.formula(x)) fix <- forms[not_form] forms[names(fix)] <- NULL out$pforms <- forms # validate fixed distributional parameters fix_theta <- fix[dpar_class(names(fix)) %in% "theta"] if (length(fix_theta)) { # normalize mixing proportions sum_theta <- sum(unlist(fix_theta)) fix_theta <- lapply(fix_theta, "/", sum_theta) fix[names(fix_theta)] <- fix_theta } out$pfix <- fix for (dp in names(out$pfix)) { if (is.character(out$pfix[[dp]])) { if (identical(dp, out$pfix[[dp]])) { stop2("Equating '", dp, "' with itself is not meaningful.") } ap_class <- dpar_class(dp) if (ap_class == "mu") { stop2("Equating parameters of class 'mu' is not allowed.") } if (!identical(ap_class, dpar_class(out$pfix[[dp]]))) { stop2("Can only equate parameters of the same class.") } if (out$pfix[[dp]] %in% names(out$pfix)) { stop2("Cannot use fixed parameters on ", "the right-hand side of an equation.") } if (out$pfix[[dp]] %in% names(out$pforms)) { stop2("Cannot use predicted parameters on ", "the right-hand side of an equation.") } } } if (!is.null(nl)) { attr(out$formula, "nl") <- as_one_logical(nl) } else if (!is.null(out[["nl"]])) { # for backwards compatibility with brms <= 1.8.0 attr(out$formula, "nl") <- out[["nl"]] out[["nl"]] <- NULL } if (is.null(attr(out$formula, "nl"))) { attr(out$formula, "nl") <- FALSE } if (!is.null(loop)) { attr(out$formula, "loop") <- as_one_logical(loop) } if (is.null(attr(out$formula, "loop"))) { attr(out$formula, "loop") <- TRUE } if (!is.null(center)) { attr(out$formula, "center") <- as_one_logical(center) } if (!is.null(cmc)) { attr(out$formula, "cmc") <- as_one_logical(cmc) } if (!is.null(sparse)) { attr(out$formula, "sparse") <- as_one_logical(sparse) } if (!is.null(decomp)) { attr(out$formula, "decomp") <- match.arg(decomp, decomp_opts()) } if (!is.null(unused)) { attr(out$formula, "unused") <- as.formula(unused) } if (!is.null(autocor)) { attr(out$formula, "autocor") <- validate_autocor(autocor) } else if (!is.null(out$autocor)) { # for backwards compatibility with brms <= 2.11.0 attr(out$formula, "autocor") <- validate_autocor(out$autocor) out$autocor <- NULL } if (!is.null(family)) { out$family <- validate_family(family) } if (!is.null(lhs(formula))) { out$resp <- terms_resp(formula) } # add default values for unspecified elements defs <- list(pforms = list(), pfix = list(), family = NULL, resp = NULL) defs <- defs[setdiff(names(defs), names(rmNULL(out, FALSE)))] out[names(defs)] <- defs class(out) <- c("brmsformula", "bform") split_bf(out) } # alias of brmsformula #' @export bf <- function(formula, ..., flist = NULL, family = NULL, autocor = NULL, nl = NULL, loop = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL) { brmsformula( formula, ..., flist = flist, family = family, autocor = autocor, nl = nl, loop = loop, center = center, cmc = cmc, sparse = sparse, decomp = decomp ) } #' Linear and Non-linear formulas in \pkg{brms} #' #' Helper functions to specify linear and non-linear #' formulas for use with \code{\link[brms:brmsformula]{brmsformula}}. #' #' @name brmsformula-helpers #' @aliases bf-helpers nlf lf set_nl set_rescor #' #' @param formula Non-linear formula for a distributional parameter. #' The name of the distributional parameter can either be specified #' on the left-hand side of \code{formula} or via argument \code{dpar}. #' @param dpar Optional character string specifying the distributional #' parameter to which the formulas passed via \code{...} and #' \code{flist} belong. #' @param resp Optional character string specifying the response #' variable to which the formulas passed via \code{...} and #' \code{flist} belong. Only relevant in multivariate models. #' @param autocor A one sided formula containing autocorrelation #' terms. All none autocorrelation terms in \code{autocor} will #' be silently ignored. #' @param rescor Logical; Indicates if residual correlation between #' the response variables should be modeled. Currently this is only #' possible in multivariate \code{gaussian} and \code{student} models. #' Only relevant in multivariate models. #' @param mecor Logical; Indicates if correlations between latent variables #' defined by \code{\link{me}} terms should be modeled. Defaults to \code{TRUE}. #' @inheritParams brmsformula #' #' @return For \code{lf} and \code{nlf} a \code{list} that can be #' passed to \code{\link[brms:brmsformula]{brmsformula}} or added #' to an existing \code{brmsformula} or \code{mvbrmsformula} object. #' For \code{set_nl} and \code{set_rescor} a logical value that can be #' added to an existing \code{brmsformula} or \code{mvbrmsformula} object. #' #' @seealso \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} #' #' @examples #' # add more formulas to the model #' bf(y ~ 1) + #' nlf(sigma ~ a * exp(b * x)) + #' lf(a ~ x, b ~ z + (1|g)) + #' gaussian() #' #' # specify 'nl' later on #' bf(y ~ a * inv_logit(x * b)) + #' lf(a + b ~ z) + #' set_nl(TRUE) #' #' # specify a multivariate model #' bf(y1 ~ x + (1|g)) + #' bf(y2 ~ z) + #' set_rescor(TRUE) #' #' # add autocorrelation terms #' bf(y ~ x) + acformula(~ arma(p = 1, q = 1) + car(W)) NULL #' @rdname brmsformula-helpers #' @export nlf <- function(formula, ..., flist = NULL, dpar = NULL, resp = NULL, loop = NULL) { formula <- as_formula(formula) if (is.null(lhs(formula))) { stop2("Argument 'formula' must be two-sided.") } if (length(c(list(...), flist))) { warning2( "Arguments '...' and 'flist' in nlf() will be reworked ", "at some point. Please avoid using them if possible." ) } warn_dpar(dpar) if (!is.null(resp)) { resp <- as_one_character(resp) } if (!is.null(loop)) { attr(formula, "loop") <- as_one_logical(loop) } if (is.null(attr(formula, "loop"))) { attr(formula, "loop") <- TRUE } out <- c( list(structure(formula, nl = TRUE)), lf(..., flist = flist) ) structure(out, resp = resp) } #' @rdname brmsformula-helpers #' @export lf <- function(..., flist = NULL, dpar = NULL, resp = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL) { out <- c(list(...), flist) warn_dpar(dpar) if (!is.null(resp)) { resp <- as_one_character(resp) } cmc <- if (!is.null(cmc)) as_one_logical(cmc) center <- if (!is.null(center)) as_one_logical(center) decomp <- if (!is.null(decomp)) match.arg(decomp, decomp_opts()) for (i in seq_along(out)) { if (!is.null(cmc)) { attr(out[[i]], "cmc") <- cmc } if (!is.null(center)) { attr(out[[i]], "center") <- center } if (!is.null(sparse)) { attr(out[[i]], "sparse") <- sparse } if (!is.null(decomp)) { attr(out[[i]], "decomp") <- decomp } } structure(out, resp = resp) } #' @rdname brmsformula-helpers #' @export acformula <- function(autocor, resp = NULL) { autocor <- terms_ac(as.formula(autocor)) if (!is.formula(autocor)) { stop2("'autocor' must contain at least one autocorrelation term.") } if (!is.null(resp)) { resp <- as_one_character(resp) } structure(autocor, resp = resp, class = c("acformula", "formula")) } #' @rdname brmsformula-helpers #' @export set_nl <- function(nl = TRUE, dpar = NULL, resp = NULL) { nl <- as_one_logical(nl) if (!is.null(dpar)) { dpar <- as_one_character(dpar) } if (!is.null(resp)) { resp <- as_one_character(resp) } structure(nl, dpar = dpar, resp = resp, class = "setnl") } #' Set up a multivariate model formula for use in \pkg{brms} #' #' Set up a multivariate model formula for use in the \pkg{brms} package #' allowing to define (potentially non-linear) additive multilevel #' models for all parameters of the assumed response distributions. #' #' @aliases mvbf #' #' @param ... Objects of class \code{formula} or \code{brmsformula}, #' each specifying a univariate model. See \code{\link{brmsformula}} #' for details on how to specify univariate models. #' @param flist Optional list of formulas, which are treated in the #' same way as formulas passed via the \code{...} argument. #' @param rescor Logical; Indicates if residual correlation between #' the response variables should be modeled. Currently, this is only #' possible in multivariate \code{gaussian} and \code{student} models. #' If \code{NULL} (the default), \code{rescor} is internally set to #' \code{TRUE} when possible. #' #' @return An object of class \code{mvbrmsformula}, which #' is essentially a \code{list} containing all model formulas #' as well as some additional information for multivariate models. #' #' @details See \code{vignette("brms_multivariate")} for a case study. #' #' @seealso \code{\link{brmsformula}}, \code{\link{brmsformula-helpers}} #' #' @examples #' bf1 <- bf(y1 ~ x + (1|g)) #' bf2 <- bf(y2 ~ s(z)) #' mvbf(bf1, bf2) #' #' @export mvbrmsformula <- function(..., flist = NULL, rescor = NULL) { dots <- c(list(...), flist) if (!length(dots)) { stop2("No objects passed to 'mvbrmsformula'.") } forms <- list() for (i in seq_along(dots)) { if (is.mvbrmsformula(dots[[i]])) { forms <- c(forms, dots[[i]]$forms) if (is.null(rescor)) { rescor <- dots[[i]]$rescor } } else { forms <- c(forms, list(bf(dots[[i]]))) } } if (!is.null(rescor)) { rescor <- as_one_logical(rescor) } responses <- ufrom_list(forms, "resp") if (any(duplicated(responses))) { stop2("Cannot use the same response variable twice in the same model.") } names(forms) <- responses structure( nlist(forms, responses, rescor), class = c("mvbrmsformula", "bform") ) } #' @export mvbf <- function(..., flist = NULL, rescor = NULL) { mvbrmsformula(..., flist = flist, rescor = rescor) } # build a mvbrmsformula object based on a brmsformula object # which uses mvbind on the left-hand side to specify MV models split_bf <- function(x) { stopifnot(is.brmsformula(x)) resp <- terms_resp(x$formula, check_names = FALSE) str_adform <- formula2str(x$formula) str_adform <- get_matches("\\|[^~]*(?=~)", str_adform, perl = TRUE) if (length(resp) > 1L) { # mvbind syntax used to specify MV model flist <- named_list(resp) for (i in seq_along(resp)) { flist[[i]] <- x str_lhs <- paste0(resp[[i]], str_adform) flist[[i]]$formula[[2]] <- parse(text = str_lhs)[[1]] flist[[i]]$resp <- resp[[i]] } x <- mvbf(flist = flist) } x } #' Bind response variables in multivariate models #' #' Can be used to specify a multivariate \pkg{brms} model within a single #' formula. Outside of \code{\link{brmsformula}}, it just behaves like #' \code{\link{cbind}}. #' #' @param ... Same as in \code{\link{cbind}} #' #' @seealso \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} #' #' @examples #' bf(mvbind(y1, y2) ~ x) #' #' @export mvbind <- function(...) { cbind(...) } #' @rdname brmsformula-helpers #' @export set_rescor <- function(rescor = TRUE) { structure(as_one_logical(rescor), class = "setrescor") } allow_rescor <- function(x) { # indicate if estimating 'rescor' is allowed for this model if (!(is.mvbrmsformula(x) || is.mvbrmsterms(x))) { return(FALSE) } parts <- if (is.mvbrmsformula(x)) x$forms else x$terms families <- from_list(parts, "family") has_rescor <- ulapply(families, has_rescor) is_mixture <- ulapply(families, is.mixfamily) family_names <- ufrom_list(families, "family") all(has_rescor) && !any(is_mixture) && length(unique(family_names)) == 1L } #' @rdname brmsformula-helpers #' @export set_mecor <- function(mecor = TRUE) { structure(as_one_logical(mecor), class = "setmecor") } #' @export "+.bform" <- function(e1, e2) { if (is.brmsformula(e1)) { out <- plus_brmsformula(e1, e2) } else if (is.mvbrmsformula(e1)) { out <- plus_mvbrmsformula(e1, e2) } else { stop2("Method '+.bform' not implemented for ", class(e1)[1], " objects.") } out } # internal helper function of '+.bform' plus_brmsformula <- function(e1, e2) { if (is.function(e2)) { e2 <- try(e2(), silent = TRUE) if (!is.family(e2)) { stop2("Don't know how to handle non-family functions.") } } if (is.family(e2)) { e1 <- bf(e1, family = e2) } else if (is.cor_brms(e2) || inherits(e2, "acformula")) { e1 <- bf(e1, autocor = e2) } else if (inherits(e2, "setnl")) { dpar <- attr(e2, "dpar") if (is.null(dpar)) { e1 <- bf(e1, nl = e2) } else { if (is.null(e1$pforms[[dpar]])) { stop2("Parameter '", dpar, "' has no formula.") } attr(e1$pforms[[dpar]], "nl") <- e2 e1 <- bf(e1) } } else if (inherits(e2, "setmecor")) { e1$mecor <- e2[1] } else if (is.brmsformula(e2)) { e1 <- mvbf(e1, e2) } else if (inherits(e2, "setrescor")) { stop2("Setting 'rescor' is only possible in multivariate models.") } else if (is.ac_term(e2)) { stop2("Autocorrelation terms can only be specified on the right-hand ", "side of a formula, not added to a 'brmsformula' object.") } else if (!is.null(e2)) { e1 <- bf(e1, e2) } e1 } # internal helper function of '+.bform' plus_mvbrmsformula <- function(e1, e2) { if (is.function(e2)) { e2 <- try(e2(), silent = TRUE) if (!is.family(e2)) { stop2("Don't know how to handle non-family functions.") } } if (is.family(e2) || is.cor_brms(e2)) { e1$forms <- lapply(e1$forms, "+", e2) } else if (inherits(e2, "setrescor")) { e1$rescor <- e2[1] } else if (inherits(e2, "setmecor")) { e1$mecor <- e2[1] } else if (is.brmsformula(e2)) { e1 <- mvbf(e1, e2) } else if (is.mvbrmsformula(e2)) { # TODO: enable this option stop2("Cannot add two 'mvbrmsformula' objects together. Instead, ", "please add the individual 'brmsformula' objects directly.") } else if (is.ac_term(e2)) { stop2("Autocorrelation terms can only be specified on the right-hand ", "side of a formula, not added to a 'mvbrmsformula' object.") } else if (!is.null(e2)) { resp <- attr(e2, "resp", TRUE) if (is.null(resp)) { stop2( "Don't know how to add a ", class(e2)[1], " object ", "without the response variable name. ", "See help('brmsformula-helpers') for more details." ) } if (!isTRUE(resp %in% e1$responses)) { stop2("'resp' should be one of ", collapse_comma(e1$responses), ".") } e1$forms[[resp]] <- e1$forms[[resp]] + e2 } e1 } # extract the 'nl' attribute from a brmsformula object # @param x object to extract 'nl' from # @param dpar optional name of a distributional parameter # for which 'nl' should be extracted # @param resp: optional name of a response variable # for which 'nl' should be extracted # @param aol: (as one logical) apply isTRUE to the result? get_nl <- function(x, dpar = NULL, resp = NULL, aol = TRUE) { if (is.mvbrmsformula(x)) { resp <- as_one_character(resp) x <- x$forms[[resp]] } if (is.brmsformula(x)) { if (is.null(dpar)) { x <- x$formula } else { dpar <- as_one_character(dpar) x <- x$pforms[[dpar]] } } nl <- attr(x, "nl", TRUE) if (aol) { nl <- isTRUE(nl) } nl } # available options for the 'decomp' argument decomp_opts <- function() { c("none", "QR") } # validate a formula of an additional parameter # @param formula an formula object # @param par optional name of the parameter; if not specified # the parameter name will be inferred from the formula # @param rsv_pars optional character vector of reserved parameter names # @return a named list of length one containing the formula validate_par_formula <- function(formula, par = NULL, rsv_pars = NULL) { stopifnot(length(par) <= 1L) try_formula <- try(as_formula(formula), silent = TRUE) if (is_try_error(try_formula)) { if (length(formula) != 1L) { stop2("Expecting a single value when fixing parameter '", par, "'.") } scalar <- SW(as.numeric(formula)) if (!is.na(scalar)) { formula <- scalar } else { formula <- as.character(formula) } out <- named_list(par, formula) } else { formula <- try_formula if (!is.null(lhs(formula))) { resp_pars <- all.vars(formula[[2]]) out <- named_list(resp_pars, list(formula)) for (i in seq_along(out)) { out[[i]][[2]] <- eval2(paste("quote(", resp_pars[i], ")")) } } else { if (!isTRUE(nzchar(par))) { stop2("Additional formulas must be named.") } formula <- formula(paste(par, formula2str(formula))) out <- named_list(par, list(formula)) } } pars <- names(out) if (any(grepl("\\.|_", pars))) { stop2("Parameter names should not contain dots or underscores.") } inv_pars <- intersect(pars, rsv_pars) if (length(inv_pars)) { stop2("The following parameter names are reserved", "for this model:\n", collapse_comma(inv_pars)) } out } # validate formulas dedicated to response variables # @param x coerced to a formula object # @param empty_ok is an empty left-hand-side ok? # @return a formula of the form ~ 1 validate_resp_formula <- function(x, empty_ok = TRUE) { out <- lhs(as_formula(x)) if (is.null(out)) { if (empty_ok) { out <- ~ 1 } else { str_x <- formula2str(x, space = "trim") stop2("Response variable is missing in formula ", str_x) } } out <- gsub("\\|+[^~]*~", "~", formula2str(out)) out <- try(formula(out), silent = TRUE) if (is_try_error(out)) { str_x <- formula2str(x, space = "trim") stop2("Incorrect use of '|' on the left-hand side of ", str_x) } environment(out) <- environment(x) out } # incorporate additional arguments into the model formula validate_formula <- function(formula, ...) { UseMethod("validate_formula") } #' @export validate_formula.default <- function(formula, ...) { validate_formula(bf(formula), ...) } # incorporate additional arguments into the model formula # @param formula object of class 'formula' of 'brmsformula' # @param data optional data.frame to validate data related arguments # @param family optional 'family' object # @param autocor (deprecated) optional 'cor_brms' object # @param threshold (deprecated) threshold type for ordinal models # @param cov_ranef (deprecated) named list of group covariance matrices # @return a brmsformula object compatible with the current version of brms #' @export validate_formula.brmsformula <- function( formula, family = gaussian(), autocor = NULL, data = NULL, threshold = NULL, sparse = NULL, cov_ranef = NULL, ... ) { out <- bf(formula) if (is.null(out$family) && !is.null(family)) { out$family <- validate_family(family) } # allow the '.' symbol in the formulas out$formula <- expand_dot_formula(out$formula, data) for (i in seq_along(out$pforms)) { out$pforms[[i]] <- expand_dot_formula(out$pforms[[i]], data) } # allow 'me' terms to be correlated out$mecor <- default_mecor(out$mecor) if (has_cat(out) && !is.null(data)) { # for easy access of response categories # allow to update 'cats' with new data out$family$cats <- extract_cat_names(out, data) } if (is_ordinal(out$family)) { # thresholds and category names are data dependent try_terms <- try(stats::terms(out$formula), silent = TRUE) intercept <- attr(try_terms, "intercept", TRUE) if (!is_try_error(try_terms) && isTRUE(intercept == 0)) { stop2("Cannot remove the intercept in an ordinal model.") } if (!is.null(data)) { # for easy access of thresholds and response categories (#838) # allow to update 'cats' and 'thres' with new data out$family$thres <- extract_thres_names(out, data) out$family$cats <- extract_cat_names(out, data) } if (is.mixfamily(out$family)) { # every mixture family needs to know about response categories for (i in seq_along(out$family$mix)) { out$family$mix[[i]]$thres <- out$family$thres } } } conv_cats_dpars <- conv_cats_dpars(out$family) if (conv_cats_dpars && !is.null(data)) { # allow to update 'dpars' with new data # define distributional parameters based on response categories if (length(out$family$cats) < 2L) { stop2("At least 2 response categories are required.") } if (is.null(out$family$refcat)) { # the first level serves as the reference category out$family$refcat <- out$family$cats[1] } if (isNA(out$family$refcat)) { # implies predicting all categories predcats <- out$family$cats } else { if (!out$family$refcat %in% out$family$cats) { stop2("The reference response category must be one of ", collapse_comma(out$family$cats), ".") } predcats <- setdiff(out$family$cats, out$family$refcat) } multi_dpars <- valid_dpars(out$family, type = "multi") # 'rev' so that mu comes last but appears first in the end for (dp in rev(multi_dpars)) { dp_dpars <- make_stan_names(paste0(dp, predcats)) if (any(duplicated(dp_dpars))) { stop2("Invalid response category names. Please avoid ", "using any special characters in the names.") } old_dp_dpars <- str_subset(out$family$dpars, paste0("^", dp)) out$family$dpars <- setdiff(out$family$dpars, old_dp_dpars) out$family$dpars <- union(dp_dpars, out$family$dpars) } } # incorporate deprecated arguments require_threshold <- is_ordinal(out$family) && is.null(out$family$threshold) if (require_threshold && !is.null(threshold)) { # slot 'threshold' is deprecated as of brms 1.7.0 out$family <- validate_family(out$family, threshold = threshold) } if (!is.null(sparse)) { # a global 'sparse' argument is deprecated as of brms 2.8.3 warning2( "Argument 'sparse' should be specified within the ", "'formula' argument. See ?brmsformula for help." ) sparse <- as_one_logical(sparse) if (is.null(attr(out$formula, "sparse"))) { attr(out$formula, "sparse") <- sparse } for (i in seq_along(out$pforms)) { if (is.null(attr(out$pforms[[i]], "sparse"))) { attr(out$pforms[[i]], "sparse") <- sparse } } } if (is.null(attr(out$formula, "autocor")) && !is.null(autocor)) { # 'autocor' interface has been changed in brms 2.11.1 warning2( "Argument 'autocor' should be specified within the ", "'formula' argument. See ?brmsformula for help." ) # store 'autocor' as an attribute to carry it around more easily attr(out$formula, "autocor") <- validate_autocor(autocor) } if (!is.null(cov_ranef)) { # 'cov_ranef' is deprecated as of brms 2.12.5 out$cov_ranef <- validate_cov_ranef(cov_ranef) } bf(out) } # incorporate additional arguments into MV model formulas # allow passing lists of families or autocors #' @export validate_formula.mvbrmsformula <- function( formula, family = NULL, autocor = NULL, cov_ranef = NULL, ... ) { nresp <- length(formula$forms) if (!is(family, "list")) { family <- replicate(nresp, family, simplify = FALSE) } else if (length(family) != nresp) { stop2("If 'family' is a list, it has to be of the same ", "length as the number of response variables.") } if (!is(autocor, "list")) { autocor <- replicate(nresp, autocor, simplify = FALSE) } else if (length(autocor) != nresp) { stop2("If 'autocor' is a list, it has to be of the same ", "length as the number of response variables.") } for (i in seq_len(nresp)) { formula$forms[[i]] <- validate_formula( formula$forms[[i]], family = family[[i]], autocor = autocor[[i]], ... ) } if (length(formula$forms) < 2L) { stop2("Multivariate models require at least two responses.") } allow_rescor <- allow_rescor(formula) if (is.null(formula$rescor)) { # with 'mi' terms we usually don't want rescor to be estimated miforms <- ulapply(formula$forms, function(f) terms_ad(f$formula, f$family, FALSE)[["mi"]] ) formula$rescor <- allow_rescor && !length(miforms) message("Setting 'rescor' to ", formula$rescor, " by default for this model") if (formula$rescor) { warning2( "In the future, 'rescor' will be set to FALSE by default for ", "all models. It is thus recommended to explicitely set ", "'rescor' via 'set_rescor' instead of using the default." ) } } formula$rescor <- as_one_logical(formula$rescor) if (formula$rescor) { if (!allow_rescor) { stop2("Currently, estimating 'rescor' is only possible ", "in multivariate gaussian or student models.") } } # handle default of correlations between 'me' terms formula$mecor <- default_mecor(formula$mecor) for (i in seq_along(formula$forms)) { formula$forms[[i]]$mecor <- formula$mecor } # incorporate deprecated arguments if (!is.null(cov_ranef)) { # 'cov_ranef' is deprecated as of brms 2.12.5 formula$cov_ranef <- validate_cov_ranef(cov_ranef) } formula } # update a brmsformula and / or its attributes # @param brmsformula object # @param formula.: formula to update 'object' # @param mode supports the following options: # "update": apply update.formula # "replace": replace old formula # "keep": keep old formula # attributes are always updated # @param ... currently unused # @return a brmsformula object #' @export update.brmsformula <- function(object, formula., mode = c("update", "replace", "keep"), ...) { mode <- match.arg(mode) object <- bf(object) up_nl <- get_nl(formula., aol = FALSE) if (is.null(up_nl)) { up_nl <- get_nl(object) } # already use up_nl here to avoid ordinary parsing of NL formulas formula. <- bf(formula., nl = up_nl) up_family <- formula.[["family"]] if (is.null(up_family)) { up_family <- object[["family"]] } up_autocor <- attr(formula.$formula, "autocor") if (is.null(up_autocor)) { up_autocor <- attr(object$formula, "autocor") } old_form <- object$formula up_form <- formula.$formula if (mode == "update") { new_form <- update(old_form, up_form, ...) } else if (mode == "replace") { new_form <- up_form } else if (mode == "keep") { new_form <- old_form } flist <- c(object$pforms, object$pfix, formula.$pforms, formula.$pfix) bf(new_form, flist = flist, family = up_family, autocor = up_autocor, nl = up_nl) } #' @export update.mvbrmsformula <- function(object, formula., ...) { # temporary until proper updating is implemented if (!missing(formula.)) { stop2("Updating formulas of multivariate models is not yet possible.") } object } #' Update Formula Addition Terms #' #' Update additions terms used in formulas of \pkg{brms}. See #' \code{\link{addition-terms}} for details. #' #' @param formula Two-sided formula to be updated. #' @param adform One-sided formula containing addition terms to update #' \code{formula} with. #' @param action Indicates what should happen to the existing addition terms in #' \code{formula}. If \code{"update"} (the default), old addition terms that #' have no corresponding term in \code{adform} will be kept. If #' \code{"replace"}, all old addition terms will be removed. #' #' @return An object of class \code{formula}. #' #' @examples #' form <- y | trials(size) ~ x #' update_adterms(form, ~ trials(10)) #' update_adterms(form, ~ weights(w)) #' update_adterms(form, ~ weights(w), action = "replace") #' update_adterms(y ~ x, ~ trials(10)) #' #' @export update_adterms <- function(formula, adform, action = c("update", "replace")) { formula <- as_formula(formula) adform <- as_formula(adform) action <- match.arg(action) if (is.null(lhs(formula))) { stop2("Can't update a ond-sided formula.") } str_formula <- formula2str(formula) old_ad <- get_matches("(?<=\\|)[^~]*(?=~)", str_formula, perl = TRUE) new_ad_terms <- attr(terms(adform), "term.labels") if (action == "update" && length(old_ad)) { # extract adterms from the original formula old_ad <- formula(paste("~", old_ad)) old_ad_terms <- attr(terms(old_ad), "term.labels") old_adnames <- get_matches("^[^\\(]+", old_ad_terms) old_adnames <- sub("^resp_", "", old_adnames) new_adnames <- get_matches("^[^\\(]+", new_ad_terms) new_adnames <- sub("^resp_", "", new_adnames) # keep unmatched adterms of the original formula keep <- !old_adnames %in% new_adnames new_ad_terms <- c(old_ad_terms[keep], new_ad_terms) } if (length(new_ad_terms)) { new_ad_terms <- paste(new_ad_terms, collapse = "+") new_ad_terms <- paste("|", new_ad_terms) } resp <- gsub("\\|.+", "", deparse0(formula[[2]])) out <- formula(paste(resp, new_ad_terms, "~1")) out[[3]] <- formula[[3]] attributes(out) <- attributes(formula) out } #' @export print.brmsformula <- function(x, wsp = 0, digits = 2, ...) { cat(formula2str(x$formula, space = "trim"), "\n") str_wsp <- collapse(rep(" ", wsp)) autocor <- attr(x$formula, "autocor") if (!is.null(autocor)) { autocor <- formula2str(autocor, rm = 1, space = "trim") cat(paste0(str_wsp, "autocor ~ ", autocor, "\n")) } pforms <- x$pforms if (length(pforms)) { pforms <- ulapply(pforms, formula2str, space = "trim") cat(collapse(str_wsp, pforms, "\n")) } pfix <- x$pfix if (length(pfix)) { pfix <- lapply(pfix, function(x) ifelse(is.numeric(x), round(x, digits), x) ) pfix <- paste0(names(pfix), " = ", unlist(pfix)) cat(collapse(str_wsp, pfix, "\n")) } invisible(x) } #' @export print.mvbrmsformula <- function(x, wsp = 0, ...) { for (i in seq_along(x$forms)) { if (i > 1) cat(collapse(rep(" ", wsp))) print(x$forms[[i]], wsp = wsp, ...) } invisible(x) } #' Checks if argument is a \code{brmsformula} object #' #' @param x An \R object #' #' @export is.brmsformula <- function(x) { inherits(x, "brmsformula") } #' Checks if argument is a \code{mvbrmsformula} object #' #' @param x An \R object #' #' @export is.mvbrmsformula <- function(x) { inherits(x, "mvbrmsformula") } is_nonlinear <- function(x) { stopifnot(is.brmsfit(x)) get_nl(bf(x$formula)) } warn_dpar <- function(dpar) { # argument 'dpar' in formula helper functions is deprecated as of 2.3.7 if (!is.null(dpar)) { warning2("Argument 'dpar' is no longer necessary and ignored.") } NULL } # return the right-hand side of a formula rhs <- function(x) { attri <- attributes(x) x <- as.formula(x) x <- if (length(x) == 3) x[-2] else x do_call(structure, c(list(x), attri)) } # return the left-hand side of a formula lhs <- function(x) { x <- as.formula(x) if (length(x) == 3L) update(x, . ~ 1) else NULL } # convert a string to a formula # @param x vector of strings to be converted # @param ... passed to formula() str2formula <- function(x, env = parent.frame(), collapse = "+") { has_chars <- nzchar(x) if (length(x) && any(has_chars)) { out <- paste(x[has_chars], collapse = collapse) } else { out <- "1" } as.formula(paste("~", out), env = env) } # convert a formula to a character string # @param formula a model formula # @param rm a vector of to elements indicating how many characters # should be removed at the beginning and end of the string respectively # @param space how should whitespaces be treated? # option 'rm' is dangerous as it may combine different operators (#1142) # @return a single character string or NULL formula2str <- function(formula, rm = c(0, 0), space = c("trim", "rm")) { if (is.null(formula)) { return(NULL) } formula <- as.formula(formula) space <- match.arg(space) if (anyNA(rm[2])) rm[2] <- 0 x <- Reduce(paste, deparse(formula)) x <- gsub("[\t\r\n]+", " ", x, perl = TRUE) if (space == "trim") { x <- trim_wsp(x) } else { x <- rm_wsp(x) } substr(x, 1 + rm[1], nchar(x) - rm[2]) } # right-hand side of a formula as a character string str_rhs <- function(x) { formula2str(rhs(x), rm = c(1, 0)) } # left-hand side of a formula as a character string str_lhs <- function(x) { formula2str(lhs(x), rm = c(0, 2)) } is.formula <- function(x) { inherits(x, "formula") } # wrapper around as.formula with additional checks as_formula <- function(x) { x <- as.formula(x) # fixes issue #749 rhs <- rhs(x)[[2]] if (isTRUE(is.call(rhs) && rhs[[1]] == "~")) { stop2("Nested formulas are not allowed. Did you use '~~' somewhere?") } x } # expand the '.' variable in formula using stats::terms expand_dot_formula <- function(formula, data = NULL) { if (isTRUE("." %in% all.vars(formula))) { att <- attributes(formula) try_terms <- try( stats::terms(formula, data = data), silent = TRUE ) if (!is_try_error(try_terms)) { formula <- formula(try_terms) } attributes(formula) <- att } formula } brms/R/formula-ad.R0000644000176200001440000003321314430660136013575 0ustar liggesusers#' Additional Response Information #' #' Provide additional information on the response variable #' in \pkg{brms} models, such as censoring, truncation, or #' known measurement error. Detailed documentation on the use #' of each of these functions can be found in the Details section #' of \code{\link{brmsformula}} (under "Additional response information"). #' #' @name addition-terms #' @aliases se weights trials thres cat dec cens trunc #' @aliases index rate subset vreal vint #' #' @param x A vector; Ideally a single variable defined in the data (see #' Details). Allowed values depend on the function: \code{resp_se} and #' \code{resp_weights} require positive numeric values. \code{resp_trials}, #' \code{resp_thres}, and \code{resp_cat} require positive integers. #' \code{resp_dec} requires \code{0} and \code{1}, or alternatively #' \code{'lower'} and \code{'upper'}. \code{resp_subset} requires \code{0} and #' \code{1}, or alternatively \code{FALSE} and \code{TRUE}. \code{resp_cens} #' requires \code{'left'}, \code{'none'}, \code{'right'}, and #' \code{'interval'} (or equivalently \code{-1}, \code{0}, \code{1}, and #' \code{2}) to indicate left, no, right, or interval censoring. #' \code{resp_index} does not make any requirements other than the value being #' unique for each observation. #' @param sigma Logical; Indicates whether the residual standard deviation #' parameter \code{sigma} should be included in addition to the known #' measurement error. Defaults to \code{FALSE} for backwards compatibility, #' but setting it to \code{TRUE} is usually the better choice. #' @param scale Logical; Indicates whether weights should be scaled #' so that the average weight equals one. Defaults to \code{FALSE}. #' @param y2 A vector specifying the upper bounds in interval censoring. #' Will be ignored for non-interval censored observations. However, it #' should NOT be \code{NA} even for non-interval censored observations to #' avoid accidental exclusion of these observations. #' @param lb A numeric vector or single numeric value specifying #' the lower truncation bound. #' @param ub A numeric vector or single numeric value specifying #' the upper truncation bound. #' @param sdy Optional known measurement error of the response #' treated as standard deviation. If specified, handles #' measurement error and (completely) missing values #' at the same time using the plausible-values-technique. #' @param denom A vector of positive numeric values specifying #' the denominator values from which the response rates are computed. #' @param gr A vector of grouping indicators. #' @param ... For \code{resp_vreal}, vectors of real values. #' For \code{resp_vint}, vectors of integer values. In Stan, #' these variables will be named \code{vreal1}, \code{vreal2}, ..., #' and \code{vint1}, \code{vint2}, ..., respectively. #' #' @return A list of additional response information to be processed further #' by \pkg{brms}. #' #' @details #' These functions are almost solely useful when #' called in formulas passed to the \pkg{brms} package. #' Within formulas, the \code{resp_} prefix may be omitted. #' More information is given in the 'Details' section #' of \code{\link{brmsformula}} (under "Additional response information"). #' #' It is highly recommended to use a single data variable as input #' for \code{x} (instead of a more complicated expression) to make sure all #' post-processing functions work as expected. #' #' @seealso #' \code{\link{brm}}, #' \code{\link{brmsformula}} #' #' @examples #' \dontrun{ #' ## Random effects meta-analysis #' nstudies <- 20 #' true_effects <- rnorm(nstudies, 0.5, 0.2) #' sei <- runif(nstudies, 0.05, 0.3) #' outcomes <- rnorm(nstudies, true_effects, sei) #' data1 <- data.frame(outcomes, sei) #' fit1 <- brm(outcomes | se(sei, sigma = TRUE) ~ 1, #' data = data1) #' summary(fit1) #' #' ## Probit regression using the binomial family #' n <- sample(1:10, 100, TRUE) # number of trials #' success <- rbinom(100, size = n, prob = 0.4) #' x <- rnorm(100) #' data2 <- data.frame(n, success, x) #' fit2 <- brm(success | trials(n) ~ x, data = data2, #' family = binomial("probit")) #' summary(fit2) #' #' ## Survival regression modeling the time between the first #' ## and second recurrence of an infection in kidney patients. #' fit3 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), #' data = kidney, family = lognormal()) #' summary(fit3) #' #' ## Poisson model with truncated counts #' fit4 <- brm(count | trunc(ub = 104) ~ zBase * Trt, #' data = epilepsy, family = poisson()) #' summary(fit4) #' } #' NULL # TODO: split into separate docs for each function #' @rdname addition-terms #' @export resp_se <- function(x, sigma = FALSE) { se <- deparse0(substitute(x)) sigma <- as_one_logical(sigma) class_resp_special( "se", call = match.call(), vars = nlist(se), flags = nlist(sigma) ) } #' @rdname addition-terms #' @export resp_weights <- function(x, scale = FALSE) { weights <- deparse0(substitute(x)) scale <- as_one_logical(scale) class_resp_special( "weights", call = match.call(), vars = nlist(weights), flags = nlist(scale) ) } #' @rdname addition-terms #' @export resp_trials <- function(x) { trials <- deparse0(substitute(x)) class_resp_special("trials", call = match.call(), vars = nlist(trials)) } #' @rdname addition-terms #' @export resp_thres <- function(x, gr = NA) { thres <- deparse0(substitute(x)) gr <- deparse0(substitute(gr)) class_resp_special("thres", call = match.call(), vars = nlist(thres, gr)) } #' @rdname addition-terms #' @export resp_cat <- function(x) { # deprecated as of brms 2.10.5 # number of thresholds = number of response categories - 1 thres <- deparse0(substitute(x)) str_add(thres) <- " - 1" class_resp_special( "thres", call = match.call(), vars = nlist(thres, gr = "NA") ) } #' @rdname addition-terms #' @export resp_dec <- function(x) { dec <- deparse0(substitute(x)) class_resp_special("dec", call = match.call(), vars = nlist(dec)) } #' @rdname addition-terms #' @export resp_cens <- function(x, y2 = NA) { cens <- deparse0(substitute(x)) y2 <- deparse0(substitute(y2)) class_resp_special("cens", call = match.call(), vars = nlist(cens, y2)) } #' @rdname addition-terms #' @export resp_trunc <- function(lb = -Inf, ub = Inf) { lb <- deparse0(substitute(lb)) ub <- deparse0(substitute(ub)) class_resp_special("trunc", call = match.call(), vars = nlist(lb, ub)) } #' @rdname addition-terms #' @export resp_mi <- function(sdy = NA) { sdy <- deparse0(substitute(sdy)) class_resp_special("mi", call = match.call(), vars = nlist(sdy)) } #' @rdname addition-terms #' @export resp_index <- function(x) { index <- deparse0(substitute(x)) class_resp_special("index", call = match.call(), vars = nlist(index)) } #' @rdname addition-terms #' @export resp_rate <- function(denom) { denom <- deparse0(substitute(denom)) class_resp_special("rate", call = match.call(), vars = nlist(denom)) } #' @rdname addition-terms #' @export resp_subset <- function(x) { subset <- deparse0(substitute(x)) class_resp_special("subset", call = match.call(), vars = nlist(subset)) } #' @rdname addition-terms #' @export resp_vreal <- function(...) { vars <- as.list(substitute(list(...)))[-1] class_resp_special("vreal", call = match.call(), vars = vars) } #' @rdname addition-terms #' @export resp_vint <- function(...) { vars <- as.list(substitute(list(...)))[-1] class_resp_special("vint", call = match.call(), vars = vars) } # class underlying response addition terms # @param type type of the addition term # @param call the call to the original addition term function # @param vars named list of unevaluated variables # @param flags named list of (evaluated) logical indicators class_resp_special <- function(type, call, vars = list(), flags = list()) { type <- as_one_character(type) stopifnot(is.call(call), is.list(vars), is.list(flags)) label <- deparse0(call) out <- nlist(type, call, label, vars, flags) class(out) <- c("resp_special") out } # computes data for addition arguments eval_rhs <- function(formula, data = NULL) { formula <- as.formula(formula) eval(rhs(formula)[[2]], data, environment(formula)) } # get expression for a variable of an addition term # @param x list with potential $adforms elements # @param ad name of the addition term # @param target name of the element to extract # @type type of the element to extract # @return a character string or NULL get_ad_expr <- function(x, ad, name, type = "vars") { ad <- as_one_character(ad) name <- as_one_character(name) type <- as_one_character(type) if (is.null(x$adforms[[ad]])) { return(NULL) } out <- eval_rhs(x$adforms[[ad]])[[type]][[name]] if (type == "vars" && is_equal(out, "NA")) { out <- NULL } out } # get values of a variable used in an addition term # @return a vector of values or NULL get_ad_values <- function(x, ad, name, data) { expr <- get_ad_expr(x, ad, name, type = "vars") eval2(expr, data) } # get a flag used in an addition term # @return TRUE or FALSE get_ad_flag <- function(x, ad, name) { expr <- get_ad_expr(x, ad, name, type = "flags") as_one_logical(eval2(expr)) } # get variable names used in addition terms get_ad_vars <- function(x, ...) { UseMethod("get_ad_vars") } #' @export get_ad_vars.brmsterms <- function(x, ad, ...) { ad <- as_one_character(ad) all_vars(x$adforms[[ad]]) } #' @export get_ad_vars.mvbrmsterms <- function(x, ad, ...) { unique(ulapply(x$terms, get_ad_vars, ad = ad, ...)) } # coerce censored values into the right format # @param x vector of censoring indicators # @return transformed vector of censoring indicators prepare_cens <- function(x) { .prepare_cens <- function(x) { stopifnot(length(x) == 1L) regx <- paste0("^", x) if (grepl(regx, "left")) { x <- -1 } else if (grepl(regx, "none") || isFALSE(x)) { x <- 0 } else if (grepl(regx, "right") || isTRUE(x)) { x <- 1 } else if (grepl(regx, "interval")) { x <- 2 } return(x) } x <- unname(x) if (is.factor(x)) { x <- as.character(x) } ulapply(x, .prepare_cens) } # extract information on censoring of the response variable # @return vector of censoring indicators or NULL in case of no censoring get_cens <- function(bterms, data, resp = NULL) { if (!is.null(resp)) { bterms <- bterms$terms[[resp]] } out <- NULL if (is.formula(bterms$adforms$cens)) { out <- get_ad_values(bterms, "cens", "cens", data) out <- prepare_cens(out) } out } # extract truncation boundaries # @param bterms a brmsterms object # @param data data.frame containing the truncation variables # @param incl_family include the family in the derivation of the bounds? # @param stan return bounds in form of Stan syntax? # @return a list with elements 'lb' and 'ub' or corresponding Stan code trunc_bounds <- function(bterms, data = NULL, incl_family = FALSE, stan = FALSE, ...) { stopifnot(is.brmsterms(bterms)) if (is.formula(bterms$adforms$trunc)) { trunc <- eval_rhs(bterms$adforms$trunc) } else { trunc <- resp_trunc() } out <- list( lb = eval2(trunc$vars$lb, data), ub = eval2(trunc$vars$ub, data) ) if (incl_family) { family_bounds <- family_bounds(bterms) out$lb <- max(out$lb, family_bounds$lb) out$ub <- min(out$ub, family_bounds$ub) } if (stan) { if (any(out$lb > -Inf | out$ub < Inf)) { tmp <- c( if (out$lb > -Inf) paste0("lower=", out$lb), if (out$ub < Inf) paste0("upper=", out$ub) ) out <- paste0("<", paste0(tmp, collapse = ","), ">") } else { out <- "" } } out } # check if addition argument 'subset' ist used in the model has_subset <- function(bterms) { .has_subset <- function(x) { is.formula(x$adforms$subset) } if (is.brmsterms(bterms)) { out <- .has_subset(bterms) } else if (is.mvbrmsterms(bterms)) { out <- any(ulapply(bterms$terms, .has_subset)) } else { out <- FALSE } out } # construct a list of indices for cross-formula referencing tidy_index <- function(x, data) { out <- .tidy_index(x, data) if (is.brmsterms(x)) { # ensure consistent format for both uni- and multivariate models out <- list(out) names(out)[1] <- terms_resp(x$respform) } out } # internal version of tidy_index .tidy_index <- function(x, ...) { UseMethod(".tidy_index") } #' @export .tidy_index.brmsterms <- function(x, data, ...) { out <- get_ad_values(x, "index", "index", data) if (is.null(out)) { return(NULL) } if (has_subset(x)) { subset <- as.logical(get_ad_values(x, "subset", "subset", data)) out <- out[subset] attr(out, "subset") <- TRUE } if (anyNA(out)) { stop2("NAs are not allowed in 'index' variables.") } if (anyDuplicated(out)) { stop2("Index of response '", x$resp, "' contains duplicated values.") } out } #' @export .tidy_index.mvbrmsterms <- function(x, data, ...) { lapply(x$terms, .tidy_index, data = data, ...) } # check if cross-formula referencing is possible in subsetted models check_cross_formula_indexing <- function(bterms) { sp_terms <- ulapply(get_effect(bterms, "sp"), all_terms) me_terms <- get_matches_expr(regex_sp("me"), sp_terms) if (length(me_terms)) { stop2("Cannot use me() terms in subsetted formulas.") } mi_terms <- get_matches_expr(regex_sp("mi"), sp_terms) idx_vars <- lapply(mi_terms, function(x) eval2(x)$idx) if (any(idx_vars == "NA")) { stop2("mi() terms in subsetted formulas require ", "the 'idx' argument to be specified.") } invisible(TRUE) } # does an expression consist of a single variable? is_single_variable <- function(x) { x <- as_one_character(x) is_equal(x, all_vars(x)) } brms/R/loo_predict.R0000644000176200001440000002036714417766716014077 0ustar liggesusers#' Compute Weighted Expectations Using LOO #' #' These functions are wrappers around the \code{\link[loo]{E_loo}} #' function of the \pkg{loo} package. #' #' @aliases loo_predict loo_linpred loo_predictive_interval #' #' @param object An object of class \code{brmsfit}. #' @param type The statistic to be computed on the results. #' Can by either \code{"mean"} (default), \code{"var"}, or #' \code{"quantile"}. #' @param probs A vector of quantiles to compute. #' Only used if \code{type = quantile}. #' @param prob For \code{loo_predictive_interval}, a scalar in \eqn{(0,1)} #' indicating the desired probability mass to include in the intervals. The #' default is \code{prob = 0.9} (\eqn{90}\% intervals). #' @param psis_object An optional object returned by \code{\link[loo]{psis}}. #' If \code{psis_object} is missing then \code{\link[loo]{psis}} is executed #' internally, which may be time consuming for models fit to very large datasets. #' @param ... Optional arguments passed to the underlying methods that is #' \code{\link[brms:log_lik.brmsfit]{log_lik}}, as well as #' \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}} or #' \code{\link[brms:posterior_linpred.brmsfit]{posterior_linpred}}. #' @inheritParams posterior_predict.brmsfit #' #' @return \code{loo_predict} and \code{loo_linpred} return a vector with one #' element per observation. The only exception is if \code{type = "quantile"} #' and \code{length(probs) >= 2}, in which case a separate vector for each #' element of \code{probs} is computed and they are returned in a matrix with #' \code{length(probs)} rows and one column per observation. #' #' \code{loo_predictive_interval} returns a matrix with one row per #' observation and two columns. #' \code{loo_predictive_interval(..., prob = p)} is equivalent to #' \code{loo_predict(..., type = "quantile", probs = c(a, 1-a))} with #' \code{a = (1 - p)/2}, except it transposes the result and adds informative #' column names. #' #' @examples #' \dontrun{ #' ## data from help("lm") #' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14) #' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69) #' d <- data.frame( #' weight = c(ctl, trt), #' group = gl(2, 10, 20, labels = c("Ctl", "Trt")) #' ) #' fit <- brm(weight ~ group, data = d) #' loo_predictive_interval(fit, prob = 0.8) #' #' ## optionally log-weights can be pre-computed and reused #' psis <- loo::psis(-log_lik(fit), cores = 2) #' loo_predictive_interval(fit, prob = 0.8, psis_object = psis) #' loo_predict(fit, type = "var", psis_object = psis) #' } #' #' @method loo_predict brmsfit #' @importFrom rstantools loo_predict #' @export loo_predict #' @export loo_predict.brmsfit <- function(object, type = c("mean", "var", "quantile"), probs = 0.5, psis_object = NULL, resp = NULL, ...) { type <- match.arg(type) stopifnot_resp(object, resp) if (is.null(psis_object)) { message("Running PSIS to compute weights") psis_object <- compute_loo(object, criterion = "psis", resp = resp, ...) } preds <- posterior_predict(object, resp = resp, ...) loo::E_loo(preds, psis_object, type = type, probs = probs)$value } #' @rdname loo_predict.brmsfit #' @method loo_linpred brmsfit #' @importFrom rstantools loo_linpred #' @export loo_linpred #' @export loo_linpred.brmsfit <- function(object, type = c("mean", "var", "quantile"), probs = 0.5, psis_object = NULL, resp = NULL, ...) { type <- match.arg(type) stopifnot_resp(object, resp) family <- family(object, resp = resp) if (is_ordinal(family) || is_categorical(family)) { stop2("Method 'loo_linpred' is not implemented ", "for categorical or ordinal models") } if (is.null(psis_object)) { message("Running PSIS to compute weights") psis_object <- compute_loo(object, criterion = "psis", resp = resp, ...) } preds <- posterior_linpred(object, resp = resp, ...) loo::E_loo(preds, psis_object, type = type, probs = probs)$value } #' @rdname loo_predict.brmsfit #' @method loo_predictive_interval brmsfit #' @importFrom rstantools loo_predictive_interval #' @export loo_predictive_interval #' @export loo_predictive_interval.brmsfit <- function(object, prob = 0.9, psis_object = NULL, ...) { if (length(prob) != 1L) { stop2("Argument 'prob' should be of length 1.") } alpha <- (1 - prob) / 2 probs <- c(alpha, 1 - alpha) labs <- paste0(100 * probs, "%") intervals <- loo_predict( object, type = "quantile", probs = probs, psis_object = psis_object, ... ) rownames(intervals) <- labs t(intervals) } #' Compute a LOO-adjusted R-squared for regression models #' #' @aliases loo_R2 #' #' @inheritParams bayes_R2.brmsfit #' @param ... Further arguments passed to #' \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}} and #' \code{\link[brms:log_lik.brmsfit]{log_lik}}, #' which are used in the computation of the R-squared values. #' #' @return If \code{summary = TRUE}, an M x C matrix is returned #' (M = number of response variables and c = \code{length(probs) + 2}) #' containing summary statistics of the LOO-adjusted R-squared values. #' If \code{summary = FALSE}, the posterior draws of the LOO-adjusted #' R-squared values are returned in an S x M matrix (S is the number of draws). #' #' @examples #' \dontrun{ #' fit <- brm(mpg ~ wt + cyl, data = mtcars) #' summary(fit) #' loo_R2(fit) #' #' # compute R2 with new data #' nd <- data.frame(mpg = c(10, 20, 30), wt = c(4, 3, 2), cyl = c(8, 6, 4)) #' loo_R2(fit, newdata = nd) #' } #' #' @method loo_R2 brmsfit #' @importFrom rstantools loo_R2 #' @export loo_R2 #' @export loo_R2.brmsfit <- function(object, resp = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(object) object <- restructure(object) resp <- validate_resp(resp, object) summary <- as_one_logical(summary) # check for precomputed values R2 <- get_criterion(object, "loo_R2") if (is.matrix(R2)) { # assumes unsummarized 'loo_R2' as ensured by 'add_criterion' take <- colnames(R2) %in% paste0("R2", resp) R2 <- R2[, take, drop = FALSE] if (summary) { R2 <- posterior_summary(R2, probs = probs, robust = robust) } return(R2) } family <- family(object, resp = resp) if (conv_cats_dpars(family)) { stop2("'loo_R2' is not defined for unordered categorical models.") } if (is_ordinal(family)) { warning2( "Predictions are treated as continuous variables in ", "'loo_R2' which is likely invalid for ordinal families." ) } args_y <- list(object, warn = TRUE, ...) args_ypred <- list(object, sort = TRUE, ...) R2 <- named_list(paste0("R2", resp)) for (i in seq_along(R2)) { # assumes expectations of different responses to be independent args_ypred$resp <- args_y$resp <- resp[i] y <- do_call(get_y, args_y) ypred <- do_call(posterior_epred, args_ypred) ll <- do_call(log_lik, args_ypred) r_eff <- r_eff_log_lik(ll, object) if (is_ordinal(family(object, resp = resp[i]))) { ypred <- ordinal_probs_continuous(ypred) } R2[[i]] <- .loo_R2(y, ypred, ll, r_eff) } R2 <- do_call(cbind, R2) colnames(R2) <- paste0("R2", resp) if (summary) { R2 <- posterior_summary(R2, probs = probs, robust = robust) } R2 } # internal function of loo_R2.brmsfit # see http://discourse.mc-stan.org/t/stan-summary-r2-or-adjusted-r2/4308/4 # and https://github.com/stan-dev/rstanarm/blob/master/R/bayes_R2.R .loo_R2 <- function(y, ypred, ll, r_eff) { psis_object <- loo::psis(log_ratios = -ll, r_eff = r_eff) ypredloo <- loo::E_loo(ypred, psis_object, log_ratios = -ll)$value err_loo <- ypredloo - y # simulated dirichlet weights S <- nrow(ypred) N <- ncol(ypred) exp_draws <- matrix(rexp(S * N, rate = 1), nrow = S, ncol = N) weights <- exp_draws / rowSums(exp_draws) var_y <- (N / (N - 1)) * (rowSums(sweep(weights, 2, y^2, FUN = "*")) - rowSums(sweep(weights, 2, y, FUN = "*"))^2) var_err_loo <- (N / (N - 1)) * (rowSums(sweep(weights, 2, err_loo^2, FUN = "*")) - rowSums(sweep(weights, 2, err_loo, FUN = "*")^2)) out <- unname(1 - var_err_loo / var_y) out[out < -1] <- -1 out[out > 1] <- 1 as.matrix(out) } brms/R/autocor.R0000644000176200001440000004473114364257623013242 0ustar liggesusers# All functions in this file belong to the deprecated 'cor_brms' class # for specifying autocorrelation structures. They will be removed in brms 3. #' (Deprecated) Correlation structure classes for the \pkg{brms} package #' #' Classes of correlation structures available in the \pkg{brms} package. #' \code{cor_brms} is not a correlation structure itself, #' but the class common to all correlation structures implemented in \pkg{brms}. #' #' @name cor_brms #' @aliases cor_brms-class #' #' @section Available correlation structures: #' \describe{ #' \item{cor_arma}{autoregressive-moving average (ARMA) structure, #' with arbitrary orders for the autoregressive and moving #' average components} #' \item{cor_ar}{autoregressive (AR) structure of arbitrary order} #' \item{cor_ma}{moving average (MA) structure of arbitrary order} #' \item{cor_car}{Spatial conditional autoregressive (CAR) structure} #' \item{cor_sar}{Spatial simultaneous autoregressive (SAR) structure} #' \item{cor_fixed}{fixed user-defined covariance structure} #' } #' #' @seealso #' \code{\link{cor_arma}, \link{cor_ar}, \link{cor_ma}, #' \link{cor_car}, \link{cor_sar}, \link{cor_fixed}} #' NULL #' (Deprecated) ARMA(p,q) correlation structure #' #' This function is deprecated. Please see \code{\link{arma}} for the new syntax. #' This functions is a constructor for the \code{cor_arma} class, representing #' an autoregression-moving average correlation structure of order (p, q). #' #' @aliases cor_arma-class #' #' @param formula A one sided formula of the form \code{~ t}, or \code{~ t | g}, #' specifying a time covariate \code{t} and, optionally, a grouping factor #' \code{g}. A covariate for this correlation structure must be integer #' valued. When a grouping factor is present in \code{formula}, the #' correlation structure is assumed to apply only to observations within the #' same grouping level; observations with different grouping levels are #' assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to #' using the order of the observations in the data as a covariate, and no #' groups. #' @param p A non-negative integer specifying the autoregressive (AR) #' order of the ARMA structure. Default is 0. #' @param q A non-negative integer specifying the moving average (MA) #' order of the ARMA structure. Default is 0. #' @param r No longer supported. #' @param cov A flag indicating whether ARMA effects should be estimated by #' means of residual covariance matrices. This is currently only possible for #' stationary ARMA effects of order 1. If the model family does not have #' natural residuals, latent residuals are added automatically. If #' \code{FALSE} (the default) a regression formulation is used that is #' considerably faster and allows for ARMA effects of order higher than 1 but #' is only available for \code{gaussian} models and some of its #' generalizations. #' #' @return An object of class \code{cor_arma}, representing an #' autoregression-moving-average correlation structure. #' #' @seealso \code{\link{cor_ar}}, \code{\link{cor_ma}} #' #' @examples #' cor_arma(~ visit | patient, p = 2, q = 2) #' #' @export cor_arma <- function(formula = ~1, p = 0, q = 0, r = 0, cov = FALSE) { formula <- as.formula(formula) p <- as_one_numeric(p) q <- as_one_numeric(q) cov <- as_one_logical(cov) if ("r" %in% names(match.call())) { warning2("The ARR structure is no longer supported and ignored.") } if (!(p >= 0 && p == round(p))) { stop2("Autoregressive order must be a non-negative integer.") } if (!(q >= 0 && q == round(q))) { stop2("Moving-average order must be a non-negative integer.") } if (!sum(p, q)) { stop2("At least one of 'p' and 'q' should be greater zero.") } if (cov && (p > 1 || q > 1)) { stop2("Covariance formulation of ARMA structures is ", "only possible for effects of maximal order one.") } x <- nlist(formula, p, q, cov) class(x) <- c("cor_arma", "cor_brms") x } #' (Deprecated) AR(p) correlation structure #' #' This function is deprecated. Please see \code{\link{ar}} for the new syntax. #' This function is a constructor for the \code{cor_arma} class, #' allowing for autoregression terms only. #' #' @inheritParams cor_arma #' @param p A non-negative integer specifying the autoregressive (AR) #' order of the ARMA structure. Default is 1. #' #' @return An object of class \code{cor_arma} containing solely autoregression terms. #' #' @details AR refers to autoregressive effects of residuals, which #' is what is typically understood as autoregressive effects. #' However, one may also model autoregressive effects of the response #' variable, which is called ARR in \pkg{brms}. #' #' @seealso \code{\link{cor_arma}} #' #' @examples #' cor_ar(~visit|patient, p = 2) #' #' @export cor_ar <- function(formula = ~1, p = 1, cov = FALSE) { cor_arma(formula = formula, p = p, q = 0, cov = cov) } #' (Deprecated) MA(q) correlation structure #' #' This function is deprecated. Please see \code{\link{ma}} for the new syntax. #' This function is a constructor for the \code{cor_arma} class, #' allowing for moving average terms only. #' #' @inheritParams cor_arma #' @param q A non-negative integer specifying the moving average (MA) #' order of the ARMA structure. Default is 1. #' #' @return An object of class \code{cor_arma} containing solely moving #' average terms. #' #' @seealso \code{\link{cor_arma}} #' #' @examples #' cor_ma(~visit|patient, q = 2) #' #' @export cor_ma <- function(formula = ~1, q = 1, cov = FALSE) { cor_arma(formula = formula, p = 0, q = q, cov = cov) } #' (Defunct) ARR correlation structure #' #' The ARR correlation structure is no longer supported. #' #' @inheritParams cor_arma #' #' @keywords internal #' @export cor_arr <- function(formula = ~1, r = 1) { cor_arma(formula = formula, p = 0, q = 0, r = r) } #' (Deprecated) Compound Symmetry (COSY) Correlation Structure #' #' This function is deprecated. Please see \code{\link{cosy}} for the new syntax. #' This functions is a constructor for the \code{cor_cosy} class, representing #' a compound symmetry structure corresponding to uniform correlation. #' #' @aliases cor_cosy-class #' #' @inheritParams cor_arma #' #' @return An object of class \code{cor_cosy}, representing a compound symmetry #' correlation structure. #' #' @examples #' cor_cosy(~ visit | patient) #' #' @export cor_cosy <- function(formula = ~1) { formula <- as.formula(formula) x <- nlist(formula) class(x) <- c("cor_cosy", "cor_brms") x } #' (Deprecated) Spatial simultaneous autoregressive (SAR) structures #' #' Thse functions are deprecated. Please see \code{\link{sar}} for the new #' syntax. These functions are constructors for the \code{cor_sar} class #' implementing spatial simultaneous autoregressive structures. #' The \code{lagsar} structure implements SAR of the response values: #' \deqn{y = \rho W y + \eta + e} #' The \code{errorsar} structure implements SAR of the residuals: #' \deqn{y = \eta + u, u = \rho W u + e} #' In the above equations, \eqn{\eta} is the predictor term and #' \eqn{e} are independent normally or t-distributed residuals. #' #' @param W An object specifying the spatial weighting matrix. #' Can be either the spatial weight matrix itself or an #' object of class \code{listw} or \code{nb}, from which #' the spatial weighting matrix can be computed. #' @param type Type of the SAR structure. Either \code{"lag"} #' (for SAR of the response values) or \code{"error"} #' (for SAR of the residuals). #' #' @details Currently, only families \code{gaussian} and \code{student} #' support SAR structures. #' #' @return An object of class \code{cor_sar} to be used in calls to #' \code{\link{brm}}. #' #' @examples #' \dontrun{ #' data(oldcol, package = "spdep") #' fit1 <- brm(CRIME ~ INC + HOVAL, data = COL.OLD, #' autocor = cor_lagsar(COL.nb), #' chains = 2, cores = 2) #' summary(fit1) #' plot(fit1) #' #' fit2 <- brm(CRIME ~ INC + HOVAL, data = COL.OLD, #' autocor = cor_errorsar(COL.nb), #' chains = 2, cores = 2) #' summary(fit2) #' plot(fit2) #' } #' #' @export cor_sar <- function(W, type = c("lag", "error")) { type <- match.arg(type) W_name <- deparse0(substitute(W)) W <- validate_sar_matrix(W) structure( nlist(W, W_name, type), class = c("cor_sar", "cor_brms") ) } #' @rdname cor_sar #' @export cor_lagsar <- function(W) { out <- cor_sar(W, type = "lag") out$W_name <- deparse0(substitute(W)) out } #' @rdname cor_sar #' @export cor_errorsar <- function(W) { out <- cor_sar(W, type = "error") out$W_name <- deparse0(substitute(W)) out } #' (Deprecated) Spatial conditional autoregressive (CAR) structures #' #' These function are deprecated. Please see \code{\link{car}} for the new #' syntax. These functions are constructors for the \code{cor_car} class #' implementing spatial conditional autoregressive structures. #' #' @param W Adjacency matrix of locations. #' All non-zero entries are treated as if the two locations #' are adjacent. If \code{formula} contains a grouping factor, #' the row names of \code{W} have to match the levels #' of the grouping factor. #' @param formula An optional one-sided formula of the form #' \code{~ 1 | g}, where \code{g} is a grouping factor mapping #' observations to spatial locations. If not specified, #' each observation is treated as a separate location. #' It is recommended to always specify a grouping factor #' to allow for handling of new data in post-processing methods. #' @param type Type of the CAR structure. Currently implemented #' are \code{"escar"} (exact sparse CAR), \code{"esicar"} #' (exact sparse intrinsic CAR), \code{"icar"} (intrinsic CAR), #' and \code{"bym2"}. More information is provided in the 'Details' section. #' #' @details The \code{escar} and \code{esicar} types are #' implemented based on the case study of Max Joseph #' (\url{https://github.com/mbjoseph/CARstan}). The \code{icar} and #' \code{bym2} type is implemented based on the case study of Mitzi Morris #' (\url{https://mc-stan.org/users/documentation/case-studies/icar_stan.html}). #' #' @examples #' \dontrun{ #' # generate some spatial data #' east <- north <- 1:10 #' Grid <- expand.grid(east, north) #' K <- nrow(Grid) #' #' # set up distance and neighbourhood matrices #' distance <- as.matrix(dist(Grid)) #' W <- array(0, c(K, K)) #' W[distance == 1] <- 1 #' #' # generate the covariates and response data #' x1 <- rnorm(K) #' x2 <- rnorm(K) #' theta <- rnorm(K, sd = 0.05) #' phi <- rmulti_normal( #' 1, mu = rep(0, K), Sigma = 0.4 * exp(-0.1 * distance) #' ) #' eta <- x1 + x2 + phi #' prob <- exp(eta) / (1 + exp(eta)) #' size <- rep(50, K) #' y <- rbinom(n = K, size = size, prob = prob) #' dat <- data.frame(y, size, x1, x2) #' #' # fit a CAR model #' fit <- brm(y | trials(size) ~ x1 + x2, data = dat, #' family = binomial(), autocor = cor_car(W)) #' summary(fit) #' } #' #' @export cor_car <- function(W, formula = ~1, type = "escar") { options <- c("escar", "esicar", "icar", "bym2") type <- match.arg(type, options) W_name <- deparse0(substitute(W)) W <- validate_car_matrix(W) formula <- as.formula(formula) if (!is.null(lhs(formula))) { stop2("'formula' should be a one-sided formula.") } if (length(attr(terms(formula), "term.labels")) > 1L) { stop2("'formula' should not contain more than one term.") } structure( nlist(W, W_name, formula, type), class = c("cor_car", "cor_brms") ) } #' @rdname cor_car #' @export cor_icar <- function(W, formula = ~1) { out <- cor_car(W, formula, type = "icar") out$W_name <- deparse0(substitute(W)) out } #' (Deprecated) Fixed user-defined covariance matrices #' #' This function is deprecated. Please see \code{\link{fcor}} for the new #' syntax. Define a fixed covariance matrix of the response variable for #' instance to model multivariate effect sizes in meta-analysis. #' #' @aliases cov_fixed #' #' @param V Known covariance matrix of the response variable. #' If a vector is passed, it will be used as diagonal entries #' (variances) and covariances will be set to zero. #' #' @return An object of class \code{cor_fixed}. #' #' @examples #' \dontrun{ #' dat <- data.frame(y = rnorm(3)) #' V <- cbind(c(0.5, 0.3, 0.2), c(0.3, 1, 0.1), c(0.2, 0.1, 0.2)) #' fit <- brm(y~1, data = dat, autocor = cor_fixed(V)) #' } #' #' @export cor_fixed <- function(V) { V_name <- deparse0(substitute(V)) if (is.vector(V)) { V <- diag(V) } else { V <- as.matrix(V) } if (!isSymmetric(unname(V))) { stop2("'V' must be symmetric") } structure(nlist(V, V_name), class = c("cor_fixed", "cor_brms")) } #' (Defunct) Basic Bayesian Structural Time Series #' #' The BSTS correlation structure is no longer supported. #' #' @inheritParams cor_arma #' #' @keywords internal #' @export cor_bsts <- function(formula = ~1) { stop2("The BSTS structure is no longer supported.") } #' Check if argument is a correlation structure #' #' Check if argument is one of the correlation structures #' used in \pkg{brms}. #' #' @param x An \R object. #' #' @export is.cor_brms <- function(x) { inherits(x, "cor_brms") } #' @rdname is.cor_brms #' @export is.cor_arma <- function(x) { inherits(x, "cor_arma") } #' @rdname is.cor_brms #' @export is.cor_cosy <- function(x) { inherits(x, "cor_cosy") } #' @rdname is.cor_brms #' @export is.cor_sar <- function(x) { inherits(x, "cor_sar") } #' @rdname is.cor_brms #' @export is.cor_car <- function(x) { inherits(x, "cor_car") } #' @rdname is.cor_brms #' @export is.cor_fixed <- function(x) { inherits(x, "cor_fixed") } #' @export print.cor_empty <- function(x, ...) { cat("empty()\n") } #' @export print.cor_arma <- function(x, ...) { cat(paste0("arma(", formula2str(x$formula), ", ", x$p, ", ", x$q, ")\n")) invisible(x) } #' @export print.cor_cosy <- function(x, ...) { cat(paste0("cosy(", formula2str(x$formula), ")\n")) invisible(x) } #' @export print.cor_sar <- function(x, ...) { cat(paste0("sar(", x$W_name, ", '", x$type, "')\n")) invisible(x) } #' @export print.cor_car <- function(x, ...) { form <- formula2str(x$formula) cat(paste0("car(", x$W_name, ", ", form, ", '", x$type, "')\n")) invisible(x) } #' @export print.cor_fixed <- function(x, ...) { cat("Fixed covariance matrix: \n") print(x$V) invisible(x) } #' @export print.cov_fixed <- function(x, ...) { class(x) <- "cor_fixed" print.cor_fixed(x) } stop_not_cor_brms <- function(x) { if (!(is.null(x) || is.cor_brms(x))) { stop2("Argument 'autocor' must be of class 'cor_brms'.") } TRUE } # empty 'cor_brms' object cor_empty <- function() { structure(list(), class = c("cor_empty", "cor_brms")) } is.cor_empty <- function(x) { inherits(x, "cor_empty") } #' (Deprecated) Extract Autocorrelation Objects #' #' @inheritParams posterior_predict.brmsfit #' @param ... Currently unused. #' #' @return A \code{cor_brms} object or a list of such objects for multivariate #' models. Not supported for models fitted with brms 2.11.1 or higher. #' #' @export autocor.brmsfit <- function(object, resp = NULL, ...) { warning2("Method 'autocor' is deprecated and will be removed in the future.") object <- restructure(object) resp <- validate_resp(resp, object) if (!is.null(resp)) { # multivariate model autocor <- object$autocor[resp] if (length(resp) == 1L) { autocor <- autocor[[1]] } } else { # univariate model autocor <- object$autocor } autocor } #' @rdname autocor.brmsfit #' @export autocor <- function(object, ...) { UseMethod("autocor") } # extract variables for autocorrelation structures # @param autocor object of class 'cor_brms' # @return a list with elements 'time', and 'group' terms_autocor <- function(autocor) { out <- list() formula <- autocor$formula if (is.null(formula)) { formula <- ~1 } if (!is.null(lhs(formula))) { stop2("Autocorrelation formulas must be one-sided.") } formula <- formula2str(formula) time <- as.formula(paste("~", gsub("~|\\|[[:print:]]*", "", formula))) time_vars <- all_vars(time) if (is.cor_car(autocor) && length(time_vars) > 0L) { stop2("The CAR structure should not contain a 'time' variable.") } if (length(time_vars) > 1L) { stop2("Autocorrelation structures may only contain 1 time variable.") } if (length(time_vars)) { out$time <- time_vars } else { out$time <- NA } group <- sub("^\\|*", "", sub("~[^\\|]*", "", formula)) stopif_illegal_group(group) group_vars <- all_vars(group) if (length(group_vars)) { out$group <- paste0(group_vars, collapse = ":") } else { out$group <- NA } out } # transform a 'cor_brms' object into a formula # this ensure compatibility with brms <= 2.11 as_formula_cor_brms <- function(x) { stop_not_cor_brms(x) if (is.cor_empty(x)) { return(NULL) } args <- data2 <- list() pac <- terms_autocor(x) if (is.cor_arma(x)) { fun <- "arma" args$time <- pac$time args$gr <- pac$group args$p <- x$p args$q <- x$q args$cov <- x$cov out <- paste0(names(args), " = ", args, collapse = ", ") out <- paste0("arma(", out, ")") } else if (is.cor_cosy(x)) { fun <- "cosy" args$time <- pac$time args$gr <- pac$group } else if (is.cor_sar(x)) { fun <- "sar" args$M <- make_M_names(x$W_name) args$type <- paste0("'", x$type, "'") data2[[args$M]] <- x$W } else if (is.cor_car(x)) { fun <- "car" args$M <- make_M_names(x$W_name) args$gr <- pac$group args$type <- paste0("'", x$type, "'") data2[[args$M]] <- x$W } else if (is.cor_fixed(x)) { fun <- "fcor" args$M <- make_M_names(x$V_name) data2[[args$M]] <- x$V } out <- paste0(names(args), " = ", args, collapse = ", ") out <- paste0(fun, "(", out, ")") out <- str2formula(out) attr(out, "data2") <- data2 class(out) <- c("cor_brms_formula", "formula") out } # ensures covariance matrix inputs are named reasonably make_M_names <- function(x) { out <- make.names(x) if (!length(out)) { # likely unique random name for the matrix argument out <- paste0("M", collapse(sample(0:9, 5, TRUE))) } out } # get data objects from 'autocor' for use in 'data2' # for backwards compatibility with brms <= 2.11 get_data2_autocor <- function(x, ...) { UseMethod("get_data2_autocor") } #' @export get_data2_autocor.brmsformula <- function(x, ...) { attr(attr(x$formula, "autocor"), "data2") } #' @export get_data2_autocor.mvbrmsformula <- function(x, ...) { ulapply(x$forms, get_data2_autocor, recursive = FALSE) } #' @export print.cor_brms_formula <- function(x, ...) { y <- x attr(y, "data2") <- NULL class(y) <- "formula" print(y) invisible(x) } brms/R/summary.R0000644000176200001440000004760714367200714013260 0ustar liggesusers#' Create a summary of a fitted model represented by a \code{brmsfit} object #' #' @param object An object of class \code{brmsfit}. #' @param priors Logical; Indicating if priors should be included #' in the summary. Default is \code{FALSE}. #' @param prob A value between 0 and 1 indicating the desired probability #' to be covered by the uncertainty intervals. The default is 0.95. #' @param mc_se Logical; Indicating if the uncertainty in \code{Estimate} #' caused by the MCMC sampling should be shown in the summary. Defaults to #' \code{FALSE}. #' @param ... Other potential arguments #' @inheritParams posterior_summary #' #' @details The convergence diagnostics \code{Rhat}, \code{Bulk_ESS}, and #' \code{Tail_ESS} are described in detail in Vehtari et al. (2020). #' #' @references #' Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, and #' Paul-Christian Bürkner (2020). Rank-normalization, folding, and #' localization: An improved R-hat for assessing convergence of #' MCMC. *Bayesian Analysis*. 1–28. dpi:10.1214/20-BA1221 #' #' @method summary brmsfit #' @importMethodsFrom rstan summary #' @importFrom posterior subset_draws summarize_draws #' @export summary.brmsfit <- function(object, priors = FALSE, prob = 0.95, robust = FALSE, mc_se = FALSE, ...) { priors <- as_one_logical(priors) probs <- validate_ci_bounds(prob) robust <- as_one_logical(robust) mc_se <- as_one_logical(mc_se) object <- restructure(object) bterms <- brmsterms(object$formula) out <- list( formula = object$formula, data_name = get_data_name(object$data), group = unique(object$ranef$group), nobs = nobs(object), ngrps = ngrps(object), autocor = object$autocor, prior = empty_prior(), algorithm = algorithm(object) ) class(out) <- "brmssummary" if (!length(object$fit@sim)) { # the model does not contain posterior draws return(out) } out$chains <- nchains(object) # iterations before thinning out$iter <- object$fit@sim$iter out$warmup <- object$fit@sim$warmup out$thin <- nthin(object) stan_args <- object$fit@stan_args[[1]] out$sampler <- paste0(stan_args$method, "(", stan_args$algorithm, ")") if (priors) { out$prior <- prior_summary(object, all = FALSE) } # compute a summary for given set of parameters # TODO: align names with summary outputs of other methods and packages .summary <- function(draws, variables, probs, robust) { # quantiles with appropriate names to retain backwards compatibility .quantile <- function(x, ...) { qs <- posterior::quantile2(x, probs = probs, ...) prob <- probs[2] - probs[1] names(qs) <- paste0(c("l-", "u-"), prob * 100, "% CI") return(qs) } draws <- subset_draws(draws, variable = variables) measures <- list() if (robust) { measures$Estimate <- median if (mc_se) { measures$MCSE <- posterior::mcse_median } measures$Est.Error <- mad } else { measures$Estimate <- mean if (mc_se) { measures$MCSE <- posterior::mcse_mean } measures$Est.Error <- sd } c(measures) <- list( quantiles = .quantile, Rhat = posterior::rhat, Bulk_ESS = posterior::ess_bulk, Tail_ESS = posterior::ess_tail ) out <- do.call(summarize_draws, c(list(draws), measures)) out <- as.data.frame(out) rownames(out) <- out$variable out$variable <- NULL return(out) } variables <- variables(object) incl_classes <- c( "b", "bs", "bcs", "bsp", "bmo", "bme", "bmi", "bm", valid_dpars(object), "delta", "lncor", "rescor", "ar", "ma", "sderr", "cosy", "cortime", "lagsar", "errorsar", "car", "sdcar", "rhocar", "sd", "cor", "df", "sds", "sdgp", "lscale", "simo" ) incl_regex <- paste0("^", regex_or(incl_classes), "(_|$|\\[)") variables <- variables[grepl(incl_regex, variables)] draws <- as_draws_array(object, variable = variables) full_summary <- .summary(draws, variables, probs, robust) if (algorithm(object) == "sampling") { Rhats <- full_summary[, "Rhat"] if (any(Rhats > 1.05, na.rm = TRUE)) { warning2( "Parts of the model have not converged (some Rhats are > 1.05). ", "Be careful when analysing the results! We recommend running ", "more iterations and/or setting stronger priors." ) } div_trans <- sum(nuts_params(object, pars = "divergent__")$Value) adapt_delta <- control_params(object)$adapt_delta if (div_trans > 0) { warning2( "There were ", div_trans, " divergent transitions after warmup. ", "Increasing adapt_delta above ", adapt_delta, " may help. See ", "http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup" ) } } # summary of population-level effects fe_pars <- variables[grepl(fixef_pars(), variables)] out$fixed <- full_summary[fe_pars, , drop = FALSE] rownames(out$fixed) <- gsub(fixef_pars(), "", fe_pars) # summary of family specific parameters spec_pars <- c(valid_dpars(object), "delta") spec_pars <- paste0(spec_pars, collapse = "|") spec_pars <- paste0("^(", spec_pars, ")($|_)") spec_pars <- variables[grepl(spec_pars, variables)] out$spec_pars <- full_summary[spec_pars, , drop = FALSE] # correlation parameters require renaming to look good in the summary lncor_pars <- variables[grepl("^lncor_", variables)] if (length(lncor_pars)) { lncor_summary <- full_summary[lncor_pars, , drop = FALSE] lncor_pars <- sub("__", ",", sub("__", "(", lncor_pars)) rownames(lncor_summary) <- paste0(lncor_pars, ")") out$spec_pars <- rbind(out$spec_pars, lncor_summary) } # summary of residual correlations rescor_pars <- variables[grepl("^rescor_", variables)] if (length(rescor_pars)) { out$rescor_pars <- full_summary[rescor_pars, , drop = FALSE] rescor_pars <- sub("__", ",", sub("__", "(", rescor_pars)) rownames(out$rescor_pars) <- paste0(rescor_pars, ")") } # summary of autocorrelation effects cor_pars <- variables[grepl(regex_autocor_pars(), variables)] out$cor_pars <- full_summary[cor_pars, , drop = FALSE] rownames(out$cor_pars) <- cor_pars cortime_pars <- variables[grepl("^cortime_", variables)] if (length(cortime_pars)) { tmp <- full_summary[cortime_pars, , drop = FALSE] cortime_pars <- sub("__", ",", sub("__", "(", cortime_pars)) rownames(tmp) <- paste0(cortime_pars, ")") out$cor_pars <- rbind(out$cor_pars, tmp) } # summary of group-level effects for (g in out$group) { gregex <- escape_dot(g) sd_prefix <- paste0("^sd_", gregex, "__") sd_pars <- variables[grepl(sd_prefix, variables)] cor_prefix <- paste0("^cor_", gregex, "__") cor_pars <- variables[grepl(cor_prefix, variables)] df_prefix <- paste0("^df_", gregex, "$") df_pars <- variables[grepl(df_prefix, variables)] gpars <- c(df_pars, sd_pars, cor_pars) out$random[[g]] <- full_summary[gpars, , drop = FALSE] if (has_rows(out$random[[g]])) { sd_names <- sub(sd_prefix, "sd(", sd_pars) cor_names <- sub(cor_prefix, "cor(", cor_pars) cor_names <- sub("__", ",", cor_names) df_names <- sub(df_prefix, "df", df_pars) gnames <- c(df_names, paste0(c(sd_names, cor_names), ")")) rownames(out$random[[g]]) <- gnames } } # summary of smooths sm_pars <- variables[grepl("^sds_", variables)] if (length(sm_pars)) { out$splines <- full_summary[sm_pars, , drop = FALSE] rownames(out$splines) <- paste0(gsub("^sds_", "sds(", sm_pars), ")") } # summary of monotonic parameters mo_pars <- variables[grepl("^simo_", variables)] if (length(mo_pars)) { out$mo <- full_summary[mo_pars, , drop = FALSE] rownames(out$mo) <- gsub("^simo_", "", mo_pars) } # summary of gaussian processes gp_pars <- variables[grepl("^(sdgp|lscale)_", variables)] if (length(gp_pars)) { out$gp <- full_summary[gp_pars, , drop = FALSE] rownames(out$gp) <- gsub("^sdgp_", "sdgp(", rownames(out$gp)) rownames(out$gp) <- gsub("^lscale_", "lscale(", rownames(out$gp)) rownames(out$gp) <- paste0(rownames(out$gp), ")") } out } #' Print a summary for a fitted model represented by a \code{brmsfit} object #' #' @aliases print.brmssummary #' #' @param x An object of class \code{brmsfit} #' @param digits The number of significant digits for printing out the summary; #' defaults to 2. The effective sample size is always rounded to integers. #' @param ... Additional arguments that would be passed #' to method \code{summary} of \code{brmsfit}. #' #' @seealso \code{\link{summary.brmsfit}} #' #' @export print.brmsfit <- function(x, digits = 2, ...) { print(summary(x, ...), digits = digits, ...) } #' @export print.brmssummary <- function(x, digits = 2, ...) { cat(" Family: ") cat(summarise_families(x$formula), "\n") cat(" Links: ") cat(summarise_links(x$formula, wsp = 9), "\n") cat("Formula: ") print(x$formula, wsp = 9) cat(paste0( " Data: ", x$data_name, " (Number of observations: ", x$nobs, ") \n" )) if (!isTRUE(nzchar(x$sampler))) { cat("\nThe model does not contain posterior draws.\n") return(invisible(x)) } # TODO: make this option a user-facing argument? short <- as_one_logical(getOption("brms.short_summary", FALSE)) if (!short) { total_ndraws <- ceiling((x$iter - x$warmup) / x$thin * x$chains) cat(paste0( " Draws: ", x$chains, " chains, each with iter = ", x$iter, "; warmup = ", x$warmup, "; thin = ", x$thin, ";\n", " total post-warmup draws = ", total_ndraws, "\n" )) } cat("\n") if (nrow(x$prior)) { cat("Priors: \n") print(x$prior, show_df = FALSE) cat("\n") } if (length(x$splines)) { cat("Smooth Terms: \n") print_format(x$splines, digits) cat("\n") } if (length(x$gp)) { cat("Gaussian Process Terms: \n") print_format(x$gp, digits) cat("\n") } if (nrow(x$cor_pars)) { cat("Correlation Structures:\n") # TODO: better printing for correlation structures? print_format(x$cor_pars, digits) cat("\n") } if (length(x$random)) { cat("Group-Level Effects: \n") for (i in seq_along(x$random)) { g <- names(x$random)[i] cat(paste0("~", g, " (Number of levels: ", x$ngrps[[g]], ") \n")) print_format(x$random[[g]], digits) cat("\n") } } if (nrow(x$fixed)) { cat("Population-Level Effects: \n") print_format(x$fixed, digits) cat("\n") } if (length(x$mo)) { cat("Simplex Parameters: \n") print_format(x$mo, digits) cat("\n") } if (nrow(x$spec_pars)) { cat("Family Specific Parameters: \n") print_format(x$spec_pars, digits) cat("\n") } if (length(x$rescor_pars)) { cat("Residual Correlations: \n") print_format(x$rescor, digits) cat("\n") } if (!short) { cat(paste0("Draws were sampled using ", x$sampler, ". ")) if (x$algorithm == "sampling") { cat(paste0( "For each parameter, Bulk_ESS\n", "and Tail_ESS are effective sample size measures, ", "and Rhat is the potential\n", "scale reduction factor on split chains ", "(at convergence, Rhat = 1)." )) } cat("\n") } invisible(x) } # helper function to print summary matrices in nice format # also displays -0.00 as a result of round negative values to zero (#263) # @param x object to be printed; coerced to matrix # @param digits number of digits to show # @param no_digits names of columns for which no digits should be shown print_format <- function(x, digits = 2, no_digits = c("Bulk_ESS", "Tail_ESS")) { x <- as.matrix(x) digits <- as.numeric(digits) if (length(digits) != 1L) { stop2("'digits' should be a single numeric value.") } out <- x fmt <- paste0("%.", digits, "f") for (i in seq_cols(x)) { if (isTRUE(colnames(x)[i] %in% no_digits)) { out[, i] <- sprintf("%.0f", x[, i]) } else { out[, i] <- sprintf(fmt, x[, i]) } } print(out, quote = FALSE, right = TRUE) invisible(x) } # regex to extract population-level coefficients fixef_pars <- function() { types <- c("", "s", "cs", "sp", "mo", "me", "mi", "m") types <- paste0("(", types, ")", collapse = "|") paste0("^b(", types, ")_") } # algorithm used in the model fitting algorithm <- function(x) { stopifnot(is.brmsfit(x)) if (is.null(x$algorithm)) "sampling" else x$algorithm } #' Summarize Posterior draws #' #' Summarizes posterior draws based on point estimates (mean or median), #' estimation errors (SD or MAD) and quantiles. This function mainly exists to #' retain backwards compatibility. It will eventually be replaced by functions #' of the \pkg{posterior} package (see examples below). #' #' @param x An \R object. #' @inheritParams as.matrix.brmsfit #' @param probs The percentiles to be computed by the #' \code{\link[stats:quantile]{quantile}} function. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' @param ... More arguments passed to or from other methods. #' #' @return A matrix where rows indicate variables #' and columns indicate the summary estimates. #' #' @seealso \code{\link[posterior:summarize_draws]{summarize_draws}} #' #' @examples #' \dontrun{ #' fit <- brm(time ~ age * sex, data = kidney) #' posterior_summary(fit) #' #' # recommended workflow using posterior #' library(posterior) #' draws <- as_draws_array(fit) #' summarise_draws(draws, default_summary_measures()) #' } #' #' @export posterior_summary <- function(x, ...) { UseMethod("posterior_summary") } #' @rdname posterior_summary #' @export posterior_summary.default <- function(x, probs = c(0.025, 0.975), robust = FALSE, ...) { # TODO: replace with summary functions from posterior # TODO: find a way to represent 3D summaries as well if (!length(x)) { stop2("No posterior draws supplied.") } if (robust) { coefs <- c("median", "mad", "quantile") } else { coefs <- c("mean", "sd", "quantile") } .posterior_summary <- function(x) { do_call(cbind, lapply( coefs, get_estimate, draws = x, probs = probs, na.rm = TRUE )) } if (length(dim(x)) <= 2L) { # data.frames cause trouble in as.array x <- as.matrix(x) } else { x <- as.array(x) } if (length(dim(x)) == 2L) { out <- .posterior_summary(x) rownames(out) <- colnames(x) } else if (length(dim(x)) == 3L) { out <- lapply(array2list(x), .posterior_summary) out <- abind(out, along = 3) dnx <- dimnames(x) dimnames(out) <- list(dnx[[2]], dimnames(out)[[2]], dnx[[3]]) } else { stop("'x' must be of dimension 2 or 3.") } # TODO: align names with summary outputs of other methods and packages colnames(out) <- c("Estimate", "Est.Error", paste0("Q", probs * 100)) out } #' @rdname posterior_summary #' @export posterior_summary.brmsfit <- function(x, pars = NA, variable = NULL, probs = c(0.025, 0.975), robust = FALSE, ...) { out <- as.matrix(x, pars = pars, variable = variable, ...) posterior_summary(out, probs = probs, robust = robust, ...) } # calculate estimates over posterior draws # @param coef coefficient to be applied on the draws (e.g., "mean") # @param draws the draws over which to apply coef # @param margin see 'apply' # @param ... additional arguments passed to get(coef) # @return typically a matrix with colnames(draws) as colnames get_estimate <- function(coef, draws, margin = 2, ...) { # TODO: replace with summary functions from posterior dots <- list(...) args <- list(X = draws, MARGIN = margin, FUN = coef) fun_args <- names(formals(coef)) if (!"..." %in% fun_args) { dots <- dots[names(dots) %in% fun_args] } x <- do_call(apply, c(args, dots)) if (is.null(dim(x))) { x <- matrix(x, dimnames = list(NULL, coef)) } else if (coef == "quantile") { x <- aperm(x, length(dim(x)):1) } x } # validate bounds of credible intervals # @return a numeric vector of length 2 validate_ci_bounds <- function(prob, probs = NULL) { if (!is.null(probs)) { # deprecated as of version 2.13.7 warning2("Argument 'probs' is deprecated. Please use 'prob' instead.") if (length(probs) != 2L) { stop2("Arguments 'probs' must be of length 2.") } probs <- as.numeric(probs) } else { prob <- as_one_numeric(prob) if (prob < 0 || prob > 1) { stop2("'prob' must be a single numeric value in [0, 1].") } probs <- c((1 - prob) / 2, 1 - (1 - prob) / 2) } probs } #' Table Creation for Posterior Draws #' #' Create a table for unique values of posterior draws. #' This is usually only useful when summarizing predictions #' of ordinal models. #' #' @param x A matrix of posterior draws where rows #' indicate draws and columns indicate parameters. #' @param levels Optional values of possible posterior values. #' Defaults to all unique values in \code{x}. #' #' @return A matrix where rows indicate parameters #' and columns indicate the unique values of #' posterior draws. #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ period + carry + treat, #' data = inhaler, family = cumulative()) #' pr <- predict(fit, summary = FALSE) #' posterior_table(pr) #' } #' #' @export posterior_table <- function(x, levels = NULL) { x <- as.matrix(x) if (anyNA(x)) { warning2("NAs will be ignored in 'posterior_table'.") } if (is.null(levels)) { levels <- sort(unique(as.vector(x))) } xlevels <- attr(x, "levels") if (length(xlevels) != length(levels)) { xlevels <- levels } out <- lapply(seq_len(ncol(x)), function(n) table(factor(x[, n], levels = levels)) ) out <- do_call(rbind, out) # compute relative frequencies out <- out / rowSums(out) rownames(out) <- colnames(x) colnames(out) <- paste0("P(Y = ", xlevels, ")") out } #' Compute posterior uncertainty intervals #' #' Compute posterior uncertainty intervals for \code{brmsfit} objects. #' #' @param object An object of class \code{brmsfit}. #' @param prob A value between 0 and 1 indicating the desired probability #' to be covered by the uncertainty intervals. The default is 0.95. #' @inheritParams as.matrix.brmsfit #' @param ... More arguments passed to \code{\link{as.matrix.brmsfit}}. #' #' @return A \code{matrix} with lower and upper interval bounds #' as columns and as many rows as selected variables. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt, #' data = epilepsy, family = negbinomial()) #' posterior_interval(fit) #' } #' #' @aliases posterior_interval #' @method posterior_interval brmsfit #' @export #' @export posterior_interval #' @importFrom rstantools posterior_interval posterior_interval.brmsfit <- function( object, pars = NA, variable = NULL, prob = 0.95, ... ) { ps <- as.matrix(object, pars = pars, variable = variable, ...) rstantools::posterior_interval(ps, prob = prob) } #' Extract Priors of a Bayesian Model Fitted with \pkg{brms} #' #' @aliases prior_summary #' #' @param object An object of class \code{brmsfit}. #' @param all Logical; Show all parameters in the model which may have #' priors (\code{TRUE}) or only those with proper priors (\code{FALSE})? #' @param ... Further arguments passed to or from other methods. #' #' @return For \code{brmsfit} objects, an object of class \code{brmsprior}. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt #' + (1|patient) + (1|obs), #' data = epilepsy, family = poisson(), #' prior = c(prior(student_t(5,0,10), class = b), #' prior(cauchy(0,2), class = sd))) #' #' prior_summary(fit) #' prior_summary(fit, all = FALSE) #' print(prior_summary(fit, all = FALSE), show_df = FALSE) #' } #' #' @method prior_summary brmsfit #' @export #' @export prior_summary #' @importFrom rstantools prior_summary prior_summary.brmsfit <- function(object, all = TRUE, ...) { object <- restructure(object) prior <- object$prior if (!all) { prior <- prior[nzchar(prior$prior), ] } prior } brms/R/plot.R0000644000176200001440000002360114427160300012516 0ustar liggesusers#' Trace and Density Plots for MCMC Draws #' #' @param x An object of class \code{brmsfit}. #' @param pars Deprecated alias of \code{variable}. #' Names of the parameters to plot, as given by a #' character vector or a regular expression. #' @param variable Names of the variables (parameters) to plot, as given by a #' character vector or a regular expression (if \code{regex = TRUE}). By #' default, a hopefully not too large selection of variables is plotted. #' @param combo A character vector with at least two elements. #' Each element of \code{combo} corresponds to a column in the resulting #' graphic and should be the name of one of the available #' \code{\link[bayesplot:MCMC-overview]{MCMC}} functions #' (omitting the \code{mcmc_} prefix). #' @param N The number of parameters plotted per page. #' @param theme A \code{\link[ggplot2:theme]{theme}} object #' modifying the appearance of the plots. #' For some basic themes see \code{\link[ggplot2:ggtheme]{ggtheme}} #' and \code{\link[bayesplot:theme_default]{theme_default}}. #' @param regex Logical; Indicates whether \code{variable} should #' be treated as regular expressions. Defaults to \code{FALSE}. #' @param fixed (Deprecated) Indicates whether parameter names #' should be matched exactly (\code{TRUE}) or treated as #' regular expressions (\code{FALSE}). Default is \code{FALSE} #' and only works with argument \code{pars}. #' @param plot Logical; indicates if plots should be #' plotted directly in the active graphic device. #' Defaults to \code{TRUE}. #' @param ask Logical; indicates if the user is prompted #' before a new page is plotted. #' Only used if \code{plot} is \code{TRUE}. #' @param newpage Logical; indicates if the first set of plots #' should be plotted to a new page. #' Only used if \code{plot} is \code{TRUE}. #' @param ... Further arguments passed to #' \code{\link[bayesplot:MCMC-combos]{mcmc_combo}}. #' #' @return An invisible list of #' \code{\link[gtable:gtable]{gtable}} objects. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt #' + (1|patient) + (1|visit), #' data = epilepsy, family = "poisson") #' plot(fit) #' ## plot population-level effects only #' plot(fit, variable = "^b_", regex = TRUE) #' } #' #' @method plot brmsfit #' @import ggplot2 #' @importFrom graphics plot #' @importFrom grDevices devAskNewPage #' @export plot.brmsfit <- function(x, pars = NA, combo = c("dens", "trace"), N = 5, variable = NULL, regex = FALSE, fixed = FALSE, theme = NULL, plot = TRUE, ask = TRUE, newpage = TRUE, ...) { contains_draws(x) if (!is_wholenumber(N) || N < 1) { stop2("Argument 'N' must be a positive integer.") } variable <- use_variable_alias(variable, x, pars, fixed = fixed) if (is.null(variable)) { variable <- default_plot_variables(x) regex <- TRUE } draws <- as.array(x, variable = variable, regex = regex) variables <- dimnames(draws)[[3]] if (!length(variables)) { stop2("No valid variables selected.") } if (plot) { default_ask <- devAskNewPage() on.exit(devAskNewPage(default_ask)) devAskNewPage(ask = FALSE) } n_plots <- ceiling(length(variables) / N) plots <- vector(mode = "list", length = n_plots) for (i in seq_len(n_plots)) { sub_vars <- variables[((i - 1) * N + 1):min(i * N, length(variables))] sub_draws <- draws[, , sub_vars, drop = FALSE] plots[[i]] <- bayesplot::mcmc_combo( sub_draws, combo = combo, gg_theme = theme, ... ) if (plot) { plot(plots[[i]], newpage = newpage || i > 1) if (i == 1) { devAskNewPage(ask = ask) } } } invisible(plots) } # list all parameter classes to be included in plots by default default_plot_variables <- function(family) { c(fixef_pars(), "^sd_", "^cor_", "^sigma_", "^rescor_", paste0("^", valid_dpars(family), "$"), "^delta$", "^theta", "^ar", "^ma", "^arr", "^sderr", "^lagsar", "^errorsar", "^car", "^sdcar", "^sdb_", "^sdbsp_", "^sdbs_", "^sds_", "^sdgp_", "^lscale_") } #' MCMC Plots Implemented in \pkg{bayesplot} #' #' Convenient way to call MCMC plotting functions #' implemented in the \pkg{bayesplot} package. #' #' @aliases stanplot stanplot.brmsfit #' #' @inheritParams plot.brmsfit #' @param object An \R object typically of class \code{brmsfit} #' @param type The type of the plot. #' Supported types are (as names) \code{hist}, \code{dens}, #' \code{hist_by_chain}, \code{dens_overlay}, #' \code{violin}, \code{intervals}, \code{areas}, \code{acf}, #' \code{acf_bar},\code{trace}, \code{trace_highlight}, \code{scatter}, #' \code{rhat}, \code{rhat_hist}, \code{neff}, \code{neff_hist} #' \code{nuts_acceptance}, \code{nuts_divergence}, #' \code{nuts_stepsize}, \code{nuts_treedepth}, and \code{nuts_energy}. #' For an overview on the various plot types see #' \code{\link[bayesplot:MCMC-overview]{MCMC-overview}}. #' @param ... Additional arguments passed to the plotting functions. #' See \code{\link[bayesplot:MCMC-overview]{MCMC-overview}} for #' more details. #' #' @return A \code{\link[ggplot2:ggplot]{ggplot}} object #' that can be further customized using the \pkg{ggplot2} package. #' #' @details #' Also consider using the \pkg{shinystan} package available via #' method \code{\link{launch_shinystan}} in \pkg{brms} for flexible #' and interactive visual analysis. #' #' @examples #' \dontrun{ #' model <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = "poisson") #' #' # plot posterior intervals #' mcmc_plot(model) #' #' # only show population-level effects in the plots #' mcmc_plot(model, variable = "^b_", regex = TRUE) #' #' # show histograms of the posterior distributions #' mcmc_plot(model, type = "hist") #' #' # plot some diagnostics of the sampler #' mcmc_plot(model, type = "neff") #' mcmc_plot(model, type = "rhat") #' #' # plot some diagnostics specific to the NUTS sampler #' mcmc_plot(model, type = "nuts_acceptance") #' mcmc_plot(model, type = "nuts_divergence") #' } #' #' @export mcmc_plot.brmsfit <- function(object, pars = NA, type = "intervals", variable = NULL, regex = FALSE, fixed = FALSE, ...) { contains_draws(object) object <- restructure(object) type <- as_one_character(type) variable <- use_variable_alias(variable, object, pars, fixed = fixed) if (is.null(variable)) { variable <- default_plot_variables(object) regex <- TRUE } valid_types <- as.character(bayesplot::available_mcmc("")) valid_types <- sub("^mcmc_", "", valid_types) if (!type %in% valid_types) { stop2("Invalid plot type. Valid plot types are: \n", collapse_comma(valid_types)) } mcmc_fun <- get(paste0("mcmc_", type), asNamespace("bayesplot")) mcmc_arg_names <- names(formals(mcmc_fun)) mcmc_args <- list(...) if ("x" %in% mcmc_arg_names) { if (grepl("^nuts_", type)) { # x refers to a molten data.frame of NUTS parameters mcmc_args$x <- nuts_params(object) } else { # x refers to a data.frame of draws draws <- as.array(object, variable = variable, regex = regex) if (!length(draws)) { stop2("No valid parameters selected.") } sel_variables <- dimnames(draws)[[3]] if (type %in% c("scatter", "hex") && length(sel_variables) != 2L) { stop2("Exactly 2 parameters must be selected for this type.", "\nParameters selected: ", collapse_comma(sel_variables)) } mcmc_args$x <- draws } } if ("lp" %in% mcmc_arg_names) { mcmc_args$lp <- log_posterior(object) } use_nuts <- isTRUE(object$algorithm == "sampling") if ("np" %in% mcmc_arg_names && use_nuts) { mcmc_args$np <- nuts_params(object) } interval_type <- type %in% c("intervals", "areas") if ("rhat" %in% mcmc_arg_names && !interval_type) { mcmc_args$rhat <- rhat(object) } if ("ratio" %in% mcmc_arg_names) { mcmc_args$ratio <- neff_ratio(object) } do_call(mcmc_fun, mcmc_args) } #' @rdname mcmc_plot.brmsfit #' @export mcmc_plot <- function(object, ...) { UseMethod("mcmc_plot") } # 'stanplot' has been deprecated in brms 2.10.6; remove in brms 3.0 #' @export stanplot <- function(object, ...) { UseMethod("stanplot") } #' @export stanplot.brmsfit <- function(object, ...) { warning2("Method 'stanplot' is deprecated. Please use 'mcmc_plot' instead.") mcmc_plot.brmsfit(object, ...) } #' Create a matrix of output plots from a \code{brmsfit} object #' #' A \code{\link[graphics:pairs]{pairs}} #' method that is customized for MCMC output. #' #' @param x An object of class \code{brmsfit} #' @inheritParams plot.brmsfit #' @param ... Further arguments to be passed to #' \code{\link[bayesplot:MCMC-scatterplots]{mcmc_pairs}}. #' #' @details For a detailed description see #' \code{\link[bayesplot:MCMC-scatterplots]{mcmc_pairs}}. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt #' + (1|patient) + (1|visit), #' data = epilepsy, family = "poisson") #' pairs(fit, variable = variables(fit)[1:3]) #' pairs(fit, variable = "^sd_", regex = TRUE) #' } #' #' @export pairs.brmsfit <- function(x, pars = NA, variable = NULL, regex = FALSE, fixed = FALSE, ...) { variable <- use_variable_alias(variable, x, pars, fixed = fixed) if (is.null(variable)) { variable <- default_plot_variables(x) regex <- TRUE } draws <- as.array(x, variable = variable, regex = regex) bayesplot::mcmc_pairs(draws, ...) } #' Default \pkg{bayesplot} Theme for \pkg{ggplot2} Graphics #' #' This theme is imported from the \pkg{bayesplot} package. #' See \code{\link[bayesplot:theme_default]{theme_default}} #' for a complete documentation. #' #' @name theme_default #' #' @param base_size base font size #' @param base_family base font family #' #' @return A \code{theme} object used in \pkg{ggplot2} graphics. #' #' @importFrom bayesplot theme_default #' @export theme_default NULL brms/R/predictor.R0000644000176200001440000004136114500562452013544 0ustar liggesusers# compute predictor terms predictor <- function(prep, ...) { UseMethod("predictor") } # compute linear/additive predictor terms # @param prep a list generated by prepare_predictions containing # all required data and posterior draws # @param i An optional vector indicating the observation(s) # for which to compute eta. If NULL, eta is computed # for all all observations at once. # @param fprep Optional full brmsprep object of the model. # Currently only needed in non-linear models or for # predicting new data in models with autocorrelation. # @return Usually an S x N matrix where S is the number of draws # and N is the number of observations or length of i if specified. #' @export predictor.bprepl <- function(prep, i = NULL, fprep = NULL, ...) { nobs <- ifelse(!is.null(i), length(i), prep$nobs) eta <- matrix(0, nrow = prep$ndraws, ncol = nobs) + predictor_fe(prep, i) + predictor_re(prep, i) + predictor_sp(prep, i) + predictor_sm(prep, i) + predictor_gp(prep, i) + predictor_offset(prep, i, nobs) # some autocorrelation structures depend on eta eta <- predictor_ac(eta, prep, i, fprep = fprep) # intentionally last as it may return 3D arrays eta <- predictor_cs(eta, prep, i) unname(eta) } # compute non-linear predictor terms # @param prep a list generated by prepare_predictions containing # all required data and posterior draws # @param i An optional vector indicating the observation(s) # for which to compute eta. If NULL, eta is computed # for all all observations at once. # @param ... further arguments passed to predictor.bprepl # @return Usually an S x N matrix where S is the number of draws # and N is the number of observations or length of i if specified. #' @export predictor.bprepnl <- function(prep, i = NULL, fprep = NULL, ...) { stopifnot(!is.null(fprep)) nlpars <- prep$used_nlpars covars <- names(prep$C) args <- named_list(c(nlpars, covars)) for (nlp in nlpars) { args[[nlp]] <- get_nlpar(fprep, nlpar = nlp, i = i, ...) } for (cov in covars) { args[[cov]] <- p(prep$C[[cov]], i, row = FALSE) } dim_eta <- dim(rmNULL(args)[[1]]) # evaluate non-linear predictor if (!prep$loop) { # cannot reasonably vectorize over posterior draws # when 'nlform' must be evaluated jointly across observations # and hence 'loop' had been set to FALSE for (i in seq_along(args)) { old_dim <- dim(args[[i]]) args[[i]] <- split(args[[i]], slice.index(args[[i]], 1)) if (length(old_dim) > 2L) { # split drops array dimensions which need to be restored args[[i]] <- lapply(args[[i]], "dim<-", old_dim[-1]) } } .fun <- function(...) eval(prep$nlform, list(...)) eta <- try( t(do_call(mapply, c(list(FUN = .fun, SIMPLIFY = "array"), args))), silent = TRUE ) } else { # assumes fully vectorized version of 'nlform' eta <- try(eval(prep$nlform, args), silent = TRUE) } if (is_try_error(eta)) { if (grepl("could not find function", eta)) { eta <- rename(eta, "Error in eval(expr, envir, enclos) : ", "") vectorize <- str_if(prep$loop, ", vectorize = TRUE") message( eta, " Most likely this is because you used a Stan ", "function in the non-linear model formula that ", "is not defined in R. If this is a user-defined function, ", "please run 'expose_functions(.", vectorize, ")' ", "on your fitted model and try again." ) } else { eta <- rename(eta, "^Error :", "", fixed = FALSE) stop2(eta) } } dim(eta) <- dim_eta unname(eta) } # compute eta for overall effects predictor_fe <- function(prep, i) { fe <- prep[["fe"]] if (!isTRUE(ncol(fe[["X"]]) > 0)) { return(0) } eta <- try(.predictor_fe(X = p(fe[["X"]], i), b = fe[["b"]])) if (is_try_error(eta)) { stop2( "Something went wrong (see the error message above). ", "Perhaps you transformed numeric variables ", "to factors or vice versa within the model formula? ", "If yes, please convert your variables beforehand. ", "Or did you set a predictor variable to NA?" ) } eta } # workhorse function of predictor_fe # @param X fixed effects design matrix # @param b draws of fixed effects coeffients .predictor_fe <- function(X, b) { stopifnot(is.matrix(X)) stopifnot(is.matrix(b)) tcrossprod(b, X) } # compute eta for varying effects predictor_re <- function(prep, i) { eta <- 0 re <- prep[["re"]] group <- names(re[["r"]]) for (g in group) { eta_g <- try(.predictor_re(Z = p(re[["Z"]][[g]], i), r = re[["r"]][[g]])) if (is_try_error(eta_g)) { stop2( "Something went wrong (see the error message above). ", "Perhaps you transformed numeric variables ", "to factors or vice versa within the model formula? ", "If yes, please convert your variables beforehand. ", "Or did you use a grouping factor also for a different purpose? ", "If yes, please make sure that its factor levels are correct ", "also in the new data you may have provided." ) } eta <- eta + eta_g } eta } # workhorse function of predictor_re # @param Z sparse random effects design matrix # @param r random effects draws # @return linear predictor for random effects .predictor_re <- function(Z, r) { Matrix::as.matrix(Matrix::tcrossprod(r, Z)) } # compute eta for special effects terms predictor_sp <- function(prep, i) { eta <- 0 sp <- prep[["sp"]] if (!length(sp)) { return(eta) } eval_list <- list() for (j in seq_along(sp[["simo"]])) { eval_list[[paste0("Xmo_", j)]] <- p(sp[["Xmo"]][[j]], i) eval_list[[paste0("simo_", j)]] <- sp[["simo"]][[j]] } for (j in seq_along(sp[["Xme"]])) { eval_list[[paste0("Xme_", j)]] <- p(sp[["Xme"]][[j]], i, row = FALSE) } for (j in seq_along(sp[["Yl"]])) { eval_list[[names(sp[["Yl"]])[j]]] <- p(sp[["Yl"]][[j]], i, row = FALSE) } for (j in seq_along(sp[["idxl"]])) { eval_list[[names(sp[["idxl"]])[j]]] <- p(sp[["idxl"]][[j]], i, row = FALSE) } for (j in seq_along(sp[["Csp"]])) { eval_list[[paste0("Csp_", j)]] <- p(sp[["Csp"]][[j]], i, row = FALSE) } re <- prep[["re"]] coef <- colnames(sp[["bsp"]]) for (j in seq_along(coef)) { # prepare special group-level effects rsp <- named_list(names(re[["rsp"]][[coef[j]]])) for (g in names(rsp)) { rsp[[g]] <- .predictor_re( Z = p(re[["Zsp"]][[g]], i), r = re[["rsp"]][[coef[j]]][[g]] ) } eta <- eta + .predictor_sp( eval_list, call = sp[["calls"]][[j]], b = sp[["bsp"]][, j], r = Reduce("+", rsp) ) } eta } # workhorse function of predictor_sp # @param call expression for evaluation of special effects # @param eval_list list containing variables for 'call' # @param b special effects coefficients draws # @param r matrix with special effects group-level draws .predictor_sp <- function(eval_list, call, b, r = NULL) { b <- as.vector(b) if (is.null(r)) r <- 0 (b + r) * eval(call, eval_list) } # R implementation of the user defined Stan function 'mo' # @param simplex posterior draws of a simplex parameter vector # @param X variable modeled as monotonic .mo <- function(simplex, X) { stopifnot(is.matrix(simplex), is.atomic(X)) D <- NCOL(simplex) simplex <- cbind(0, simplex) for (i in seq_cols(simplex)[-1]) { # compute the cumulative representation of the simplex simplex[, i] <- simplex[, i] + simplex[, i - 1] } D * simplex[, X + 1] } # compute eta for smooth terms predictor_sm <- function(prep, i) { eta <- 0 if (!length(prep[["sm"]])) { return(eta) } fe <- prep[["sm"]]$fe if (length(fe)) { eta <- eta + .predictor_fe(X = p(fe$Xs, i), b = fe$bs) } re <- prep[["sm"]]$re for (k in seq_along(re)) { for (j in seq_along(re[[k]]$s)) { Zs <- p(re[[k]]$Zs[[j]], i) s <- re[[k]]$s[[j]] eta <- eta + .predictor_fe(X = Zs, b = s) } } eta } # compute eta for gaussian processes predictor_gp <- function(prep, i) { if (!length(prep[["gp"]])) { return(0) } if (!is.null(i)) { stop2("Pointwise evaluation is not supported for Gaussian processes.") } eta <- matrix(0, nrow = prep$ndraws, ncol = prep$nobs) for (k in seq_along(prep[["gp"]])) { gp <- prep[["gp"]][[k]] if (isTRUE(attr(gp, "byfac"))) { # categorical 'by' variable for (j in seq_along(gp)) { if (length(gp[[j]][["Igp"]])) { eta[, gp[[j]][["Igp"]]] <- .predictor_gp(gp[[j]]) } } } else { eta <- eta + .predictor_gp(gp) } } eta } # workhorse function of predictor_gp # @param gp a list returned by '.prepare_predictions_gp' # @return A S x N matrix to be added to the linear predictor # @note does not work with pointwise evaluation .predictor_gp <- function(gp) { if (is.null(gp[["slambda"]])) { # predictions for exact GPs ndraws <- length(gp[["sdgp"]]) eta <- as.list(rep(NA, ndraws)) if (!is.null(gp[["x_new"]])) { for (i in seq_along(eta)) { eta[[i]] <- with(gp, .predictor_gp_new( x_new = x_new, yL = yL[i, ], x = x, sdgp = sdgp[i], lscale = lscale[i, ], nug = nug )) } } else { for (i in seq_along(eta)) { eta[[i]] <- with(gp, .predictor_gp_old( x = x, sdgp = sdgp[i], lscale = lscale[i, ], zgp = zgp[i, ], nug = nug )) } } eta <- do_call(rbind, eta) } else { # predictions for approximate GPs eta <- with(gp, .predictor_gpa( x = x, sdgp = sdgp, lscale = lscale, zgp = zgp, slambda = slambda )) } if (!is.null(gp[["Jgp"]])) { eta <- eta[, gp[["Jgp"]], drop = FALSE] } if (!is.null(gp[["Cgp"]])) { eta <- eta * data2draws(gp[["Cgp"]], dim = dim(eta)) } eta } # make exact GP predictions for old data points # vectorized over posterior draws # @param x old predictor values # @param sdgp sample of parameter sdgp # @param lscale sample of parameter lscale # @param zgp draws of parameter vector zgp # @param nug very small positive value to ensure numerical stability .predictor_gp_old <- function(x, sdgp, lscale, zgp, nug) { Sigma <- cov_exp_quad(x, sdgp = sdgp, lscale = lscale) lx <- nrow(x) Sigma <- Sigma + diag(rep(nug, lx), lx, lx) L_Sigma <- try_nug(t(chol(Sigma)), nug = nug) as.numeric(L_Sigma %*% zgp) } # make exact GP predictions for new data points # vectorized over posterior draws # @param x_new new predictor values # @param yL linear predictor of the old data # @param x old predictor values # @param sdgp sample of parameter sdgp # @param lscale sample of parameter lscale # @param nug very small positive value to ensure numerical stability .predictor_gp_new <- function(x_new, yL, x, sdgp, lscale, nug) { Sigma <- cov_exp_quad(x, sdgp = sdgp, lscale = lscale) lx <- nrow(x) lx_new <- nrow(x_new) Sigma <- Sigma + diag(rep(nug, lx), lx, lx) L_Sigma <- try_nug(t(chol(Sigma)), nug = nug) L_Sigma_inverse <- solve(L_Sigma) K_div_yL <- L_Sigma_inverse %*% yL K_div_yL <- t(t(K_div_yL) %*% L_Sigma_inverse) k_x_x_new <- cov_exp_quad(x, x_new, sdgp = sdgp, lscale = lscale) mu_yL_new <- as.numeric(t(k_x_x_new) %*% K_div_yL) v_new <- L_Sigma_inverse %*% k_x_x_new cov_yL_new <- cov_exp_quad(x_new, sdgp = sdgp, lscale = lscale) - t(v_new) %*% v_new + diag(rep(nug, lx_new), lx_new, lx_new) yL_new <- try_nug( rmulti_normal(1, mu = mu_yL_new, Sigma = cov_yL_new), nug = nug ) return(yL_new) } # make predictions for approximate GPs # vectorized over posterior draws # @param x matrix of evaluated eigenfunctions of the cov matrix # @param sdgp sample of parameter sdgp # @param lscale sample of parameter lscale # @param zgp draws of parameter vector zgp # @param slambda vector of eigenvalues of the cov matrix # @note no need to differentiate between old and new data points .predictor_gpa <- function(x, sdgp, lscale, zgp, slambda) { spd <- sqrt(spd_cov_exp_quad(slambda, sdgp, lscale)) (spd * zgp) %*% t(x) } # compute eta for category specific effects # @param predictor matrix of other additive terms # @return 3D predictor array in the presence of 'cs' effects # otherwise return 'eta' unchanged predictor_cs <- function(eta, prep, i) { cs <- prep[["cs"]] re <- prep[["re"]] if (!length(cs[["bcs"]]) && !length(re[["rcs"]])) { return(eta) } nthres <- cs[["nthres"]] rcs <- NULL if (!is.null(re[["rcs"]])) { groups <- names(re[["rcs"]]) rcs <- vector("list", nthres) for (k in seq_along(rcs)) { rcs[[k]] <- named_list(groups) for (g in groups) { rcs[[k]][[g]] <- .predictor_re( Z = p(re[["Zcs"]][[g]], i), r = re[["rcs"]][[g]][[k]] ) } rcs[[k]] <- Reduce("+", rcs[[k]]) } } .predictor_cs( eta, X = p(cs[["Xcs"]], i), b = cs[["bcs"]], nthres = nthres, r = rcs ) } # workhorse function of predictor_cs # @param X category specific design matrix # @param b category specific effects draws # @param nthres number of thresholds # @param eta linear predictor matrix # @param r list of draws of cs group-level effects # @return 3D predictor array including category specific effects .predictor_cs <- function(eta, X, b, nthres, r = NULL) { stopifnot(is.null(X) && is.null(b) || is.matrix(X) && is.matrix(b)) nthres <- max(nthres) eta <- predictor_expand(eta, nthres) if (!is.null(X)) { I <- seq(1, (nthres) * ncol(X), nthres) - 1 X <- t(X) } for (k in seq_len(nthres)) { if (!is.null(X)) { eta[, , k] <- eta[, , k] + b[, I + k, drop = FALSE] %*% X } if (!is.null(r[[k]])) { eta[, , k] <- eta[, , k] + r[[k]] } } eta } # expand dimension of the predictor matrix to a 3D array predictor_expand <- function(eta, nthres) { if (length(dim(eta)) == 2L) { eta <- array(eta, dim = c(dim(eta), nthres)) } eta } predictor_offset <- function(prep, i, nobs) { if (is.null(prep$offset)) { return(0) } eta <- rep(p(prep$offset, i), prep$ndraws) matrix(eta, ncol = nobs, byrow = TRUE) } # compute eta for autocorrelation structures # @note eta has to be passed to this function in # order for ARMA structures to work correctly predictor_ac <- function(eta, prep, i, fprep = NULL) { if (!is.null(prep$ac[["err"]])) { # auto-correlations via latent residuals eta <- eta + p(prep$ac$err, i, row = FALSE) } else if (has_ac_class(prep$ac$acef, "arma")) { # ARMA correlations via explicit natural residuals if (!is.null(i)) { stop2("Pointwise evaluation is not possible for ARMA models.") } eta <- .predictor_arma( eta, ar = prep$ac$ar, ma = prep$ac$ma, Y = prep$ac$Y, J_lag = prep$ac$J_lag, fprep = fprep ) } if (has_ac_class(prep$ac$acef, "car")) { eta <- eta + .predictor_re(Z = p(prep$ac$Zcar, i), r = prep$ac$rcar) } eta } # add ARMA effects to a predictor matrix # @param eta linear predictor matrix # @param ar optional autoregressive draws # @param ma optional moving average draws # @param Y vector of response values # @param J_lag autocorrelation lag for each observation # @return linear predictor matrix updated by ARMA effects .predictor_arma <- function(eta, ar = NULL, ma = NULL, Y = NULL, J_lag = NULL, fprep = NULL) { if (is.null(ar) && is.null(ma)) { return(eta) } if (anyNA(Y)) { # predicting Y will be necessary at some point stopifnot(is.brmsprep(fprep) || is.mvbrmsprep(fprep)) pp_fun <- paste0("posterior_predict_", fprep$family$fun) pp_fun <- get(pp_fun, asNamespace("brms")) } S <- nrow(eta) N <- length(Y) max_lag <- max(J_lag, 1) Kar <- ifelse(is.null(ar), 0, ncol(ar)) Kma <- ifelse(is.null(ma), 0, ncol(ma)) # relevant if time-series are shorter than the ARMA orders take_ar <- seq_len(min(Kar, max_lag)) take_ma <- seq_len(min(Kma, max_lag)) ar <- ar[, take_ar, drop = FALSE] ma <- ma[, take_ma, drop = FALSE] Err <- array(0, dim = c(S, max_lag, max_lag + 1)) err <- zero_mat <- matrix(0, nrow = S, ncol = max_lag) zero_vec <- rep(0, S) for (n in seq_len(N)) { if (Kma) { eta[, n] <- eta[, n] + rowSums(ma * Err[, take_ma, max_lag]) } eta_before_ar <- eta[, n] if (Kar) { eta[, n] <- eta[, n] + rowSums(ar * Err[, take_ar, max_lag]) } # AR terms need to be included in the predictions of y if missing # the prediction code thus differs from the structure of the Stan code y <- Y[n] if (is.na(y)) { # y was not observed and has to be predicted fprep$dpars$mu <- eta y <- pp_fun(n, fprep) } # errors in AR models need to be computed before adding AR terms err[, max_lag] <- y - eta_before_ar if (J_lag[n] > 0) { # store residuals of former observations I <- seq_len(J_lag[n]) Err[, I, max_lag + 1] <- err[, max_lag + 1 - I] } # keep the size of 'err' and 'Err' as small as possible Err <- abind(Err[, , -1, drop = FALSE], zero_mat) err <- cbind(err[, -1, drop = FALSE], zero_vec) } eta } brms/R/prior_draws.R0000644000176200001440000001175514213413565014111 0ustar liggesusers#' Extract Prior Draws #' #' Extract prior draws of specified parameters #' #' @aliases prior_draws.brmsfit prior_samples #' #' @param x An \code{R} object typically of class \code{brmsfit}. #' @inheritParams as.data.frame.brmsfit #' @param ... Arguments passed to individual methods (if applicable). #' #' @details To make use of this function, the model must contain draws of #' prior distributions. This can be ensured by setting \code{sample_prior = #' TRUE} in function \code{brm}. Priors of certain parameters cannot be saved #' for technical reasons. For instance, this is the case for the #' population-level intercept, which is only computed after fitting the model #' by default. If you want to treat the intercept as part of all the other #' regression coefficients, so that sampling from its prior becomes possible, #' use \code{... ~ 0 + Intercept + ...} in the formulas. #' #' @return A \code{data.frame} containing the prior draws. #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "cumulative", #' prior = set_prior("normal(0,2)", class = "b"), #' sample_prior = TRUE) #' #' # extract all prior draws #' draws1 <- prior_draws(fit) #' head(draws1) #' #' # extract prior draws for the coefficient of 'treat' #' draws2 <- prior_draws(fit, "b_treat") #' head(draws2) #' } #' #' @export prior_draws.brmsfit <- function(x, variable = NULL, pars = NULL, ...) { variable <- use_alias(variable, pars) if (!is.null(variable)) { variable <- as.character(variable) } all_names <- variables(x) prior_names <- unique(all_names[grepl("^prior_", all_names)]) if (!length(prior_names)) { return(data.frame(NULL)) } draws <- as.data.frame(x, variable = prior_names) names(draws) <- sub("^prior_", "", prior_names) if (is.null(variable)) { return(draws) } # get prior draws for a single variable .prior_draws <- function(variable) { matches <- paste0("^", escape_all(names(draws))) matches <- lapply(matches, regexpr, text = variable) matches <- ulapply(matches, attr, which = "match.length") if (max(matches) == -1 || ignore_prior(x, variable)) { out <- NULL } else { take <- match(max(matches), matches) # order draws randomly to avoid artificial dependencies # between parameters using the same prior draws draws <- list(draws[sample(ndraws(x)), take]) out <- structure(draws, names = variable) } return(out) } draws <- rmNULL(lapply(variable, .prior_draws)) draws <- data.frame(draws, check.names = FALSE) draws } #' @rdname prior_draws.brmsfit #' @export prior_draws <- function(x, ...) { UseMethod("prior_draws") } #' @export prior_draws.default <- function(x, variable = NULL, pars = NULL, regex = FALSE, fixed = FALSE, ...) { call <- match.call() if ("pars" %in% names(call)) { variable <- use_alias(variable, pars) regex <- !as_one_logical(fixed) } if (is.null(variable)) { variable <- "^prior_" regex <- TRUE } else { variable <- as.character(variable) regex <- as_one_logical(regex) if (regex) { hat <- substr(variable, 1, 1) == "^" variable <- ifelse(hat, substr(variable, 2, nchar(variable)), variable) variable <- paste0("^prior_", variable) } else { variable <- paste0("prior_", variable) } } x <- as_draws_df(as.data.frame(x)) if (!regex) { # missing variables will leads to an error in posterior variable <- intersect(variable, variables(x)) if (!length(variable)) { return(data.frame(NULL)) } } x <- subset_draws(x, variable = variable, regex = regex, ...) unclass_draws(x) } #' @rdname prior_draws.brmsfit #' @export prior_samples <- function(x, ...) { warning2("'prior_samples' is deprecated. Please use 'prior_draws' instead.") UseMethod("prior_draws") } # ignore priors of certain parameters from whom we cannot obtain prior draws # currently applies only to overall intercepts of centered design matrices # fixes issue #696 # @param x a brmsfit object # @param variable name of a single variable # @return TRUE (if the prior should be ignored) or FALSE ignore_prior <- function(x, variable) { stopifnot(is.brmsfit(x)) variable <- as_one_character(variable) out <- FALSE if (grepl("^b_.*Intercept($|\\[)", variable)) { # cannot sample from intercepts if 'center' was TRUE intercept_priors <- subset2(x$prior, class = "Intercept") if (NROW(intercept_priors)) { # prefixes of the model intercepts p_intercepts <- usc(combine_prefix(intercept_priors)) # prefix of the parameter under question p_par <- sub("^b", "", variable) p_par <- sub("_Intercept($|\\[)", "", p_par) out <- p_par %in% p_intercepts if (out) { warning2( "Sampling from the prior of an overall intercept is not ", "possible by default. See the documentation of the ", "'sample_prior' argument in help('brm')." ) } } } out } brms/R/backends.R0000644000176200001440000007557614462172323013343 0ustar liggesusers# parse Stan model code # @param model Stan model code # @return validated Stan model code parse_model <- function(model, backend, ...) { backend <- as_one_character(backend) .parse_model <- get(paste0(".parse_model_", backend), mode = "function") .parse_model(model, ...) } # parse Stan model code with rstan # @param model Stan model code # @return validated Stan model code .parse_model_rstan <- function(model, silent = 1, ...) { out <- eval_silent( rstan::stanc(model_code = model, ...), type = "message", try = TRUE, silent = silent ) out$model_code } # parse Stan model code with cmdstanr # @param model Stan model code # @return validated Stan model code .parse_model_cmdstanr <- function(model, silent = 1, ...) { require_package("cmdstanr") temp_file <- cmdstanr::write_stan_file(model) if (cmdstanr::cmdstan_version() >= "2.29.0") { .canonicalize_stan_model(temp_file, overwrite_file = TRUE) } out <- eval_silent( cmdstanr::cmdstan_model(temp_file, compile = FALSE, ...), type = "message", try = TRUE, silent = silent ) out$check_syntax(quiet = TRUE) collapse(out$code(), "\n") } # parse model with a mock backend for testing .parse_model_mock <- function(model, silent = TRUE, parse_error = NULL, parse_check = "rstan", ...) { if (!is.null(parse_error)) { stop2(parse_error) } else if (parse_check == "rstan") { out <- .parse_model_rstan(model, silent = silent, ...) } else if (parse_check == "cmdstanr") { out <- .parse_model_cmdstanr(model, silent = silent, ...) } else if (is.null(parse_check)) { out <- "mock_code" } else { stop2("Unknown 'parse_check' value.") } out } # compile Stan model # @param model Stan model code # @return validated Stan model code compile_model <- function(model, backend, ...) { backend <- as_one_character(backend) .compile_model <- get(paste0(".compile_model_", backend), mode = "function") .compile_model(model, ...) } # compile Stan model with rstan # @param model Stan model code # @return model compiled with rstan .compile_model_rstan <- function(model, threads, opencl, silent = 1, ...) { args <- list(...) args$model_code <- model if (silent < 2) { message("Compiling Stan program...") } if (use_threading(threads)) { if (utils::packageVersion("rstan") >= "2.26") { threads_per_chain_def <- rstan::rstan_options("threads_per_chain") on.exit(rstan::rstan_options(threads_per_chain = threads_per_chain_def)) rstan::rstan_options(threads_per_chain = threads$threads) } else { stop2("Threading is not supported by backend 'rstan' version ", utils::packageVersion("rstan"), ".") } } if (use_opencl(opencl)) { stop2("OpenCL is not supported by backend 'rstan' version ", utils::packageVersion("rstan"), ".") } eval_silent( do_call(rstan::stan_model, args), type = "message", try = TRUE, silent = silent >= 2 ) } # compile Stan model with cmdstanr # @param model Stan model code # @return model compiled with cmdstanr .compile_model_cmdstanr <- function(model, threads, opencl, silent = 1, ...) { require_package("cmdstanr") args <- list(...) args$stan_file <- cmdstanr::write_stan_file(model) if (cmdstanr::cmdstan_version() >= "2.29.0") { .canonicalize_stan_model(args$stan_file, overwrite_file = TRUE) } if (use_threading(threads)) { args$cpp_options$stan_threads <- TRUE } if (use_opencl(opencl)) { args$cpp_options$stan_opencl <- TRUE } eval_silent( do_call(cmdstanr::cmdstan_model, args), type = "message", try = TRUE, silent = silent >= 2 ) } # compile model with a mock backend for testing .compile_model_mock <- function(model, threads, opencl, compile_check = "rstan", compile_error = NULL, silent = 1, ...) { if (!is.null(compile_error)) { stop2(compile_error) } else if (compile_check == "rstan") { out <- .parse_model_rstan(model, silent = silent, ...) } else if (compile_check == "cmdstanr") { out <- .parse_model_cmdstanr(model, silent = silent, ...) } else if (is.null(compile_check)) { out <- list() } else { stop2("Unknown 'compile_check' value.") } out } # fit Stan model # @param model Stan model code # @return validated Stan model code fit_model <- function(model, backend, ...) { backend <- as_one_character(backend) .fit_model <- get(paste0(".fit_model_", backend), mode = "function") .fit_model(model, ...) } # fit Stan model with rstan # @param model a compiled Stan model # @param sdata named list to be passed to Stan as data # @return a fitted Stan model .fit_model_rstan <- function(model, sdata, algorithm, iter, warmup, thin, chains, cores, threads, opencl, init, exclude, seed, control, silent, future, ...) { # some input checks and housekeeping if (use_threading(threads)) { if (utils::packageVersion("rstan") >= "2.26") { threads_per_chain_def <- rstan::rstan_options("threads_per_chain") on.exit(rstan::rstan_options(threads_per_chain = threads_per_chain_def)) rstan::rstan_options(threads_per_chain = threads$threads) } else { stop2("Threading is not supported by backend 'rstan' version ", utils::packageVersion("rstan"), ".") } } if (use_opencl(opencl)) { stop2("OpenCL is not supported by backend 'rstan' version ", utils::packageVersion("rstan"), ".") } if (is.null(init)) { init <- "random" } else if (is.character(init) && !init %in% c("random", "0")) { init <- get(init, mode = "function", envir = parent.frame()) } args <- nlist( object = model, data = sdata, iter, seed, init = init, pars = exclude, include = FALSE ) dots <- list(...) args[names(dots)] <- dots # do the actual sampling if (silent < 2) { message("Start sampling") } if (algorithm %in% c("sampling", "fixed_param")) { c(args) <- nlist(warmup, thin, control, show_messages = !silent) if (algorithm == "fixed_param") { args$algorithm <- "Fixed_param" } if (future) { if (cores > 1L) { warning2("Argument 'cores' is ignored when using 'future'.") } args$chains <- 1L futures <- fits <- vector("list", chains) for (i in seq_len(chains)) { args$chain_id <- i if (is.list(init)) { args$init <- init[i] } futures[[i]] <- future::future( brms::do_call(rstan::sampling, args), packages = "rstan", seed = TRUE ) } for (i in seq_len(chains)) { fits[[i]] <- future::value(futures[[i]]) } out <- rstan::sflist2stanfit(fits) rm(futures, fits) } else { c(args) <- nlist(chains, cores) out <- do_call(rstan::sampling, args) } } else if (algorithm %in% c("fullrank", "meanfield")) { # vb does not support parallel execution c(args) <- nlist(algorithm) out <- do_call(rstan::vb, args) } else { stop2("Algorithm '", algorithm, "' is not supported.") } out <- repair_stanfit(out) out } # fit Stan model with cmdstanr # @param model a compiled Stan model # @param sdata named list to be passed to Stan as data # @return a fitted Stan model .fit_model_cmdstanr <- function(model, sdata, algorithm, iter, warmup, thin, chains, cores, threads, opencl, init, exclude, seed, control, silent, future, ...) { require_package("cmdstanr") # some input checks and housekeeping class(sdata) <- "list" if (isNA(seed)) { seed <- NULL } if (is_equal(init, "random")) { init <- NULL } else if (is_equal(init, "0")) { init <- 0 } if (future) { stop2("Argument 'future' is not supported by backend 'cmdstanr'.") } args <- nlist(data = sdata, seed, init) if (use_threading(threads)) { if (algorithm %in% c("sampling", "fixed_param")) { args$threads_per_chain <- threads$threads } else if (algorithm %in% c("fullrank", "meanfield")) { args$threads <- threads$threads } } if (use_opencl(opencl)) { args$opencl_ids <- opencl$ids } dots <- list(...) args[names(dots)] <- dots args[names(control)] <- control chains <- as_one_numeric(chains) empty_model <- chains <= 0 if (empty_model) { # fit the model with minimal amount of draws # TODO: replace with a better solution chains <- 1 iter <- 2 warmup <- 1 thin <- 1 cores <- 1 } # do the actual sampling if (silent < 2) { message("Start sampling") } if (algorithm %in% c("sampling", "fixed_param")) { c(args) <- nlist( iter_sampling = iter - warmup, iter_warmup = warmup, chains, thin, parallel_chains = cores, show_messages = !silent, fixed_param = algorithm == "fixed_param" ) out <- do_call(model$sample, args) } else if (algorithm %in% c("fullrank", "meanfield")) { # vb does not support parallel execution c(args) <- nlist(iter, algorithm) out <- do_call(model$variational, args) } else { stop2("Algorithm '", algorithm, "' is not supported.") } # not all metadata is not stored by read_csv_as_stanfit metadata <- cmdstanr::read_cmdstan_csv( out$output_files(), variables = "", sampler_diagnostics = "" ) # ensure that only relevant variables are read from CSV variables <- repair_variable_names(metadata$metadata$variables) variables <- unique(sub("\\[.+", "", variables)) variables <- setdiff(variables, exclude) # transform into stanfit object for consistent output structure out <- read_csv_as_stanfit(out$output_files(), variables = variables) out <- repair_stanfit(out) # allow updating the model without recompilation attributes(out)$CmdStanModel <- model attributes(out)$metadata <- metadata if (empty_model) { # allow correct updating of an 'empty' model out@sim <- list() } out } # fit model with a mock backend for testing .fit_model_mock <- function(model, sdata, algorithm, iter, warmup, thin, chains, cores, threads, opencl, init, exclude, seed, control, silent, future, mock_fit, ...) { if (is.function(mock_fit)) { out <- mock_fit() } else { out <- mock_fit } out } # extract the compiled stan model # @param x brmsfit object compiled_model <- function(x) { stopifnot(is.brmsfit(x)) backend <- x$backend %||% "rstan" if (backend == "rstan") { out <- rstan::get_stanmodel(x$fit) } else if (backend == "cmdstanr") { out <- attributes(x$fit)$CmdStanModel } else if (backend == "mock") { stop2("'compiled_model' is not supported in the mock backend.") } out } # Does the model need recompilation before being able to sample again? needs_recompilation <- function(x) { stopifnot(is.brmsfit(x)) backend <- x$backend %||% "rstan" if (backend == "rstan") { # TODO: figure out when rstan requires recompilation out <- FALSE } else if (backend == "cmdstanr") { exe_file <- attributes(x$fit)$CmdStanModel$exe_file() out <- !is.character(exe_file) || !file.exists(exe_file) } else if (backend == "mock") { out <- FALSE } out } #' Recompile Stan models in \code{brmsfit} objects #' #' Recompile the Stan model inside a \code{brmsfit} object, if necessary. #' This does not change the model, it simply recreates the executable #' so that sampling is possible again. #' #' @param x An object of class \code{brmsfit}. #' @param recompile Logical, indicating whether the Stan model should be #' recompiled. If \code{NULL} (the default), \code{recompile_model} tries #' to figure out internally, if recompilation is necessary. Setting it to #' \code{FALSE} will cause \code{recompile_model} to always return the #' \code{brmsfit} object unchanged. #' #' @return A (possibly updated) \code{brmsfit} object. #' #' @export recompile_model <- function(x, recompile = NULL) { stopifnot(is.brmsfit(x)) if (is.null(recompile)) { recompile <- needs_recompilation(x) } recompile <- as_one_logical(recompile) if (!recompile) { return(x) } message("Recompiling the Stan model") backend <- x$backend %||% "rstan" new_model <- compile_model( stancode(x), backend = backend, threads = x$threads, opencl = x$opencl, silent = 2 ) if (backend == "rstan") { x$fit@stanmodel <- new_model } else if (backend == "cmdstanr") { attributes(x)$CmdStanModel <- new_model } else if (backend == "mock") { stop2("'recompile_model' is not supported in the mock backend.") } x } # extract the elapsed time during model fitting # @param x brmsfit object elapsed_time <- function(x) { stopifnot(is.brmsfit(x)) backend <- x$backend %||% "rstan" if (backend == "rstan") { out <- rstan::get_elapsed_time(x$fit) out <- data.frame( chain_id = seq_len(nrow(out)), warmup = out[, "warmup"], sampling = out[, "sample"] ) out$total <- out$warmup + out$sampling rownames(out) <- NULL } else if (backend == "cmdstanr") { out <- attributes(x$fit)$metadata$time$chains } else if (backend == "mock") { stop2("'elapsed_time' not supported in the mock backend.") } out } # supported Stan backends backend_choices <- function() { c("rstan", "cmdstanr", "mock") } # supported Stan algorithms algorithm_choices <- function() { c("sampling", "meanfield", "fullrank", "fixed_param") } # check if the model was fit the the required backend require_backend <- function(backend, x) { stopifnot(is.brmsfit(x)) backend <- match.arg(backend, backend_choices()) if (isTRUE(x$backend != backend)) { stop2("Backend '", backend, "' is required for this method.") } invisible(TRUE) } #' Threading in Stan #' #' Use threads for within-chain parallelization in \pkg{Stan} via the \pkg{brms} #' interface. Within-chain parallelization is experimental! We recommend its use #' only if you are experienced with Stan's \code{reduce_sum} function and have a #' slow running model that cannot be sped up by any other means. #' #' @param threads Number of threads to use in within-chain parallelization. #' @param grainsize Number of observations evaluated together in one chunk on #' one of the CPUs used for threading. If \code{NULL} (the default), #' \code{grainsize} is currently chosen as \code{max(100, N / (2 * #' threads))}, where \code{N} is the number of observations in the data. This #' default is experimental and may change in the future without prior notice. #' @param static Logical. Apply the static (non-adaptive) version of #' \code{reduce_sum}? Defaults to \code{FALSE}. Setting it to \code{TRUE} #' is required to achieve exact reproducibility of the model results #' (if the random seed is set as well). #' #' @return A \code{brmsthreads} object which can be passed to the #' \code{threads} argument of \code{brm} and related functions. #' #' @details The adaptive scheduling procedure used by \code{reduce_sum} will #' prevent the results to be exactly reproducible even if you set the random #' seed. If you need exact reproducibility, you have to set argument #' \code{static = TRUE} which may reduce efficiency a bit. #' #' To ensure that chunks (whose size is defined by \code{grainsize}) require #' roughly the same amount of computing time, we recommend storing #' observations in random order in the data. At least, please avoid sorting #' observations after the response values. This is because the latter often #' cause variations in the computing time of the pointwise log-likelihood, #' which makes up a big part of the parallelized code. #' #' @examples #' \dontrun{ #' # this model just serves as an illustration #' # threading may not actually speed things up here #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = negbinomial(), #' chains = 1, threads = threading(2, grainsize = 100), #' backend = "cmdstanr") #' summary(fit) #' } #' #' @export threading <- function(threads = NULL, grainsize = NULL, static = FALSE) { out <- list(threads = NULL, grainsize = NULL) class(out) <- "brmsthreads" if (!is.null(threads)) { threads <- as_one_numeric(threads) if (!is_wholenumber(threads) || threads < 1) { stop2("Number of threads needs to be positive.") } out$threads <- threads } if (!is.null(grainsize)) { grainsize <- as_one_numeric(grainsize) if (!is_wholenumber(grainsize) || grainsize < 1) { stop2("The grainsize needs to be positive.") } out$grainsize <- grainsize } out$static <- as_one_logical(static) out } is.brmsthreads <- function(x) { inherits(x, "brmsthreads") } # validate 'thread' argument validate_threads <- function(threads) { if (is.null(threads)) { threads <- threading() } else if (is.numeric(threads)) { threads <- as_one_numeric(threads) threads <- threading(threads) } else if (!is.brmsthreads(threads)) { stop2("Argument 'threads' needs to be numeric or ", "specified via the 'threading' function.") } threads } # is threading activated? use_threading <- function(threads) { isTRUE(validate_threads(threads)$threads > 0) } #' GPU support in Stan via OpenCL #' #' Use OpenCL for GPU support in \pkg{Stan} via the \pkg{brms} interface. Only #' some \pkg{Stan} functions can be run on a GPU at this point and so #' a lot of \pkg{brms} models won't benefit from OpenCL for now. #' #' @param ids (integer vector of length 2) The platform and device IDs of the #' OpenCL device to use for fitting. If you don't know the IDs of your OpenCL #' device, \code{c(0,0)} is most likely what you need. #' #' @return A \code{brmsopencl} object which can be passed to the #' \code{opencl} argument of \code{brm} and related functions. #' #' @details For more details on OpenCL in \pkg{Stan}, check out #' \url{https://mc-stan.org/docs/2_26/cmdstan-guide/parallelization.html#opencl} #' as well as \url{https://mc-stan.org/docs/2_26/stan-users-guide/opencl.html}. #' #' @examples #' \dontrun{ #' # this model just serves as an illustration #' # OpenCL may not actually speed things up here #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), #' chains = 2, cores = 2, opencl = opencl(c(0, 0)), #' backend = "cmdstanr") #' summary(fit) #' } #' #' @export opencl <- function(ids = NULL) { out <- list(ids = NULL) class(out) <- "brmsopencl" if (!is.null(ids)) { ids <- as.integer(ids) if (!length(ids) == 2L) { stop2("OpenCl 'ids' needs to be an integer vector of length 2.") } out$ids <- ids } out } is.brmsopencl <- function(x) { inherits(x, "brmsopencl") } # validate the 'opencl' argument validate_opencl <- function(opencl) { if (is.null(opencl)) { opencl <- opencl() } else if (is.numeric(opencl)) { opencl <- opencl(opencl) } else if (!is.brmsopencl(opencl)) { stop2("Argument 'opencl' needs to an integer vector or ", "specified via the 'opencl' function.") } opencl } # is OpenCL activated? use_opencl <- function(opencl) { !is.null(validate_opencl(opencl)$ids) } # validate the 'silent' argument validate_silent <- function(silent) { silent <- as_one_integer(silent) if (silent < 0 || silent > 2) { stop2("'silent' must be between 0 and 2.") } silent } # ensure that variable dimensions at the end are correctly written # convert names like b.1.1 to b[1,1] repair_variable_names <- function(x) { x <- sub("\\.", "[", x) x <- gsub("\\.", ",", x) x[grep("\\[", x)] <- paste0(x[grep("\\[", x)], "]") x } # repair parameter names of stanfit objects repair_stanfit <- function(x) { stopifnot(is.stanfit(x)) if (!length(x@sim$fnames_oi)) { # nothing to rename return(x) } # the posterior package cannot deal with non-unique parameter names # this case happens rarely but might happen when sample_prior = "yes" x@sim$fnames_oi <- make.unique(as.character(x@sim$fnames_oi), "__") for (i in seq_along(x@sim$samples)) { # stanfit may have renamed dimension suffixes (#1218) if (length(x@sim$samples[[i]]) == length(x@sim$fnames_oi)) { names(x@sim$samples[[i]]) <- x@sim$fnames_oi } } x } # possible options for argument 'file_refit' file_refit_options <- function() { c("never", "always", "on_change") } # canonicalize Stan model file in accordance with the current Stan version .canonicalize_stan_model <- function(stan_file, overwrite_file = TRUE) { cmdstan_mod <- cmdstanr::cmdstan_model(stan_file, compile = FALSE) out <- utils::capture.output( cmdstan_mod$format( canonicalize = list("deprecations", "braces", "parentheses"), overwrite_file = overwrite_file, backup = FALSE ) ) paste0(out, collapse = "\n") } # read in stan CSVs via cmdstanr and repackage into a stanfit object # efficient replacement of rstan::read_stan_csv # @param files character vector of CSV files names where draws are stored # @param variables character vectors of variables to extract draws for # @param sampler_diagnostics character vectors of diagnostics to extract # @return a stanfit object read_csv_as_stanfit <- function(files, variables = NULL, sampler_diagnostics = NULL) { csfit <- cmdstanr::read_cmdstan_csv( files = files, variables = variables, sampler_diagnostics = sampler_diagnostics, format = NULL ) # @model_name model_name = gsub(".csv", "", basename(files[[1]])) # @model_pars svars <- csfit$metadata$stan_variables if (!is.null(variables)) { variables_main <- unique(gsub("\\[.*\\]", "", variables)) svars <- intersect(variables_main, svars) } if ("lp__" %in% svars) { svars <- c(setdiff(svars, "lp__"), "lp__") } pars_oi <- svars par_names <- csfit$metadata$model_params # @par_dims par_dims <- vector("list", length(svars)) names(par_dims) <- svars par_dims <- lapply(par_dims, function(x) x <- integer(0)) pdims_num <- ulapply( svars, function(x) sum(grepl(paste0("^", x, "\\[.*\\]$"), par_names)) ) par_dims[pdims_num != 0] <- csfit$metadata$stan_variable_sizes[svars][pdims_num != 0] # @mode mode <- 0L # @sim rstan_diagn_order <- c("accept_stat__", "treedepth__", "stepsize__", "divergent__", "n_leapfrog__", "energy__") if (!is.null(sampler_diagnostics)) { rstan_diagn_order <- rstan_diagn_order[rstan_diagn_order %in% sampler_diagnostics] } res_vars <- c(".chain", ".iteration", ".draw") if ("post_warmup_draws" %in% names(csfit)) { # for MCMC samplers n_chains <- max( nchains(csfit$warmup_draws), nchains(csfit$post_warmup_draws) ) n_iter_warmup <- niterations(csfit$warmup_draws) n_iter_sample <- niterations(csfit$post_warmup_draws) if (n_iter_warmup > 0) { csfit$warmup_draws <- as_draws_df(csfit$warmup_draws) csfit$warmup_sampler_diagnostics <- as_draws_df(csfit$warmup_sampler_diagnostics) } if (n_iter_sample > 0) { csfit$post_warmup_draws <- as_draws_df(csfit$post_warmup_draws) csfit$post_warmup_sampler_diagnostics <- as_draws_df(csfit$post_warmup_sampler_diagnostics) } # called 'samples' for consistency with rstan samples <- rbind(csfit$warmup_draws, csfit$post_warmup_draws) # manage memory csfit$warmup_draws <- NULL csfit$post_warmup_draws <- NULL # prepare sampler diagnostics diagnostics <- rbind(csfit$warmup_sampler_diagnostics, csfit$post_warmup_sampler_diagnostics) # manage memory csfit$warmup_sampler_diagnostics <- NULL csfit$post_warmup_sampler_diagnostics <- NULL # convert to regular data.frame diagnostics <- as.data.frame(diagnostics) diag_chain_ids <- diagnostics$.chain diagnostics[res_vars] <- NULL } else if ("draws" %in% names(csfit)) { # for variational inference "samplers" n_chains <- 1 n_iter_warmup <- 0 n_iter_sample <- niterations(csfit$draws) if (n_iter_sample > 0) { csfit$draws <- as_draws_df(csfit$draws) } # called 'samples' for consistency with rstan samples <- csfit$draws # manage memory csfit$draws <- NULL # VI has no sampler diagnostics diag_chain_ids <- rep(1L, nrow(samples)) diagnostics <- as.data.frame(matrix(nrow = nrow(samples), ncol = 0)) } # convert to regular data.frame samples <- as.data.frame(samples) chain_ids <- samples$.chain samples[res_vars] <- NULL if ("lp__" %in% colnames(samples)) { samples <- move2end(samples, "lp__") } fnames_oi <- colnames(samples) colnames(samples) <- gsub("\\[", ".", colnames(samples)) colnames(samples) <- gsub("\\]", "", colnames(samples)) colnames(samples) <- gsub("\\,", ".", colnames(samples)) # split samples into chains samples <- split(samples, chain_ids) names(samples) <- NULL # split diagnostics into chains diagnostics <- split(diagnostics, diag_chain_ids) names(diagnostics) <- NULL # @sim$sample: largely 113-130 from rstan::read_stan_csv values <- list() values$algorithm <- csfit$metadata$algorithm values$engine <- csfit$metadata$engine values$metric <- csfit$metadata$metric sampler_t <- NULL if (!is.null(values$algorithm)) { if (values$algorithm == "rwm" || values$algorithm == "Metropolis") { sampler_t <- "Metropolis" } else if (values$algorithm == "hmc") { if (values$engine == "static") { sampler_t <- "HMC" } else { if (values$metric == "unit_e") { sampler_t <- "NUTS(unit_e)" } else if (values$metric == "diag_e") { sampler_t <- "NUTS(diag_e)" } else if (values$metric == "dense_e") { sampler_t <- "NUTS(dense_e)" } } } } adapt_info <- vector("list", 4) idx_samples <- (n_iter_warmup + 1):(n_iter_warmup + n_iter_sample) for (i in seq_along(samples)) { m <- colMeans(samples[[i]][idx_samples, , drop=FALSE]) rownames(samples[[i]]) <- seq_rows(samples[[i]]) attr(samples[[i]], "sampler_params") <- diagnostics[[i]][rstan_diagn_order] rownames(attr(samples[[i]], "sampler_params")) <- seq_rows(diagnostics[[i]]) # reformat back to text if (is_equal(sampler_t, "NUTS(dense_e)")) { mmatrix_txt <- "\n# Elements of inverse mass matrix:\n# " mmat <- paste0(apply(csfit$inv_metric[[i]], 1, paste0, collapse=", "), collapse="\n# ") } else { mmatrix_txt <- "\n# Diagonal elements of inverse mass matrix:\n# " mmat <- paste0(csfit$inv_metric[[i]], collapse = ", ") } adapt_info[[i]] <- paste0("# Step size = ", csfit$step_size[[i]], mmatrix_txt, mmat, "\n# ") attr(samples[[i]], "adaptation_info") <- adapt_info[[i]] attr(samples[[i]], "args") <- list(sampler_t = sampler_t, chain_id = i) if (NROW(csfit$metadata$time)) { time_i <- as.double(csfit$metadata$time[i, c("warmup", "sampling")]) names(time_i) <- c("warmup", "sample") attr(samples[[i]], "elapsed_time") <- time_i } attr(samples[[i]], "mean_pars") <- m[-length(m)] attr(samples[[i]], "mean_lp__") <- m["lp__"] } perm_lst <- lapply(seq_len(n_chains), function(id) sample.int(n_iter_sample)) # @sim sim <- list( samples = samples, iter = csfit$metadata$iter_sampling + csfit$metadata$iter_warmup, thin = csfit$metadata$thin, warmup = csfit$metadata$iter_warmup, chains = n_chains, n_save = rep(n_iter_sample + n_iter_warmup, n_chains), warmup2 = rep(n_iter_warmup, n_chains), permutation = perm_lst, pars_oi = pars_oi, dims_oi = par_dims, fnames_oi = fnames_oi, n_flatnames = length(fnames_oi) ) # @stan_args sargs <- list( stan_version_major = as.character(csfit$metadata$stan_version_major), stan_version_minor = as.character(csfit$metadata$stan_version_minor), stan_version_patch = as.character(csfit$metadata$stan_version_patch), model = csfit$metadata$model_name, start_datetime = gsub(" ", "", csfit$metadata$start_datetime), method = csfit$metadata$method, iter = csfit$metadata$iter_sampling + csfit$metadata$iter_warmup, warmup = csfit$metadata$iter_warmup, save_warmup = csfit$metadata$save_warmup, thin = csfit$metadata$thin, engaged = as.character(csfit$metadata$adapt_engaged), gamma = csfit$metadata$gamma, delta = csfit$metadata$adapt_delta, kappa = csfit$metadata$kappa, t0 = csfit$metadata$t0, init_buffer = as.character(csfit$metadata$init_buffer), term_buffer = as.character(csfit$metadata$term_buffer), window = as.character(csfit$metadata$window), algorithm = csfit$metadata$algorithm, engine = csfit$metadata$engine, max_depth = csfit$metadata$max_treedepth, metric = csfit$metadata$metric, metric_file = character(0), # not stored in metadata stepsize = NA, # add in loop stepsize_jitter = csfit$metadata$stepsize_jitter, num_chains = as.character(csfit$metadata$num_chains), chain_id = NA, # add in loop file = character(0), # not stored in metadata init = NA, # add in loop seed = as.character(csfit$metadata$seed), file = NA, # add in loop diagnostic_file = character(0), # not stored in metadata refresh = as.character(csfit$metadata$refresh), sig_figs = as.character(csfit$metadata$sig_figs), profile_file = csfit$metadata$profile_file, num_threads = as.character(csfit$metadata$threads_per_chain), stanc_version = gsub(" ", "", csfit$metadata$stanc_version), stancflags = character(0), # not stored in metadata adaptation_info = NA, # add in loop has_time = is.numeric(csfit$metadata$time$total), time_info = NA, # add in loop sampler_t = sampler_t ) sargs_rep <- replicate(n_chains, sargs, simplify = FALSE) for (i in seq_along(sargs_rep)) { sargs_rep[[i]]$chain_id <- i sargs_rep[[i]]$stepsize <- csfit$metadata$step_size[i] sargs_rep[[i]]$init <- as.character(csfit$metadata$init[i]) # two 'file' elements: select the second file_idx <- which(names(sargs_rep[[i]]) == "file") sargs_rep[[i]][[file_idx[2]]] <- files[[i]] sargs_rep[[i]]$adaptation_info <- adapt_info[[i]] if (NROW(csfit$metadata$time)) { sargs_rep[[i]]$time_info <- paste0( c("# Elapsed Time: ", "# ", "# ", "# "), c(csfit$metadata$time[i, c("warmup", "sampling", "total")], ""), c(" seconds (Warm-up)", " seconds (Sampling)", " seconds (Total)", "") ) } } # @stanmodel null_dso <- new( "cxxdso", sig = list(character(0)), dso_saved = FALSE, dso_filename = character(0), modulename = character(0), system = R.version$system, cxxflags = character(0), .CXXDSOMISC = new.env(parent = emptyenv()) ) null_sm <- new( "stanmodel", model_name = model_name, model_code = character(0), model_cpp = list(), dso = null_dso ) # @date sdate <- do.call(max, lapply(files, function(csv) file.info(csv)$mtime)) sdate <- format(sdate, "%a %b %d %X %Y") new( "stanfit", model_name = model_name, model_pars = svars, par_dims = par_dims, mode = mode, sim = sim, inits = list(), stan_args = sargs_rep, stanmodel = null_sm, date = sdate, # not the time of sampling .MISC = new.env(parent = emptyenv()) ) } brms/R/conditional_effects.R0000644000176200001440000013730714504263635015566 0ustar liggesusers#' Display Conditional Effects of Predictors #' #' Display conditional effects of one or more numeric and/or categorical #' predictors including two-way interaction effects. #' #' @aliases marginal_effects marginal_effects.brmsfit #' #' @param x An object of class \code{brmsfit}. #' @param effects An optional character vector naming effects (main effects or #' interactions) for which to compute conditional plots. Interactions are #' specified by a \code{:} between variable names. If \code{NULL} (the #' default), plots are generated for all main effects and two-way interactions #' estimated in the model. When specifying \code{effects} manually, \emph{all} #' two-way interactions (including grouping variables) may be plotted #' even if not originally modeled. #' @param conditions An optional \code{data.frame} containing variable values #' to condition on. Each effect defined in \code{effects} will #' be plotted separately for each row of \code{conditions}. Values in the #' \code{cond__} column will be used as titles of the subplots. If \code{cond__} #' is not given, the row names will be used for this purpose instead. #' It is recommended to only define a few rows in order to keep the plots clear. #' See \code{\link{make_conditions}} for an easy way to define conditions. #' If \code{NULL} (the default), numeric variables will be conditionalized by #' using their means and factors will get their first level assigned. #' \code{NA} values within factors are interpreted as if all dummy #' variables of this factor are zero. This allows, for instance, to make #' predictions of the grand mean when using sum coding. #' @param int_conditions An optional named \code{list} whose elements are #' vectors of values of the variables specified in \code{effects}. #' At these values, predictions are evaluated. The names of #' \code{int_conditions} have to match the variable names exactly. #' Additionally, the elements of the vectors may be named themselves, #' in which case their names appear as labels for the conditions in the plots. #' Instead of vectors, functions returning vectors may be passed and are #' applied on the original values of the corresponding variable. #' If \code{NULL} (the default), predictions are evaluated at the #' \eqn{mean} and at \eqn{mean +/- sd} for numeric predictors and at #' all categories for factor-like predictors. #' @param re_formula A formula containing group-level effects to be considered #' in the conditional predictions. If \code{NULL}, include all group-level #' effects; if \code{NA} (default), include no group-level effects. #' @param robust If \code{TRUE} (the default) the median is used as the #' measure of central tendency. If \code{FALSE} the mean is used instead. #' @param prob A value between 0 and 1 indicating the desired probability #' to be covered by the uncertainty intervals. The default is 0.95. #' @param probs (Deprecated) The quantiles to be used in the computation of #' uncertainty intervals. Please use argument \code{prob} instead. #' @param method Method used to obtain predictions. Can be set to #' \code{"posterior_epred"} (the default), \code{"posterior_predict"}, #' or \code{"posterior_linpred"}. For more details, see the respective #' function documentations. #' @param spaghetti Logical. Indicates if predictions should #' be visualized via spaghetti plots. Only applied for numeric #' predictors. If \code{TRUE}, it is recommended #' to set argument \code{ndraws} to a relatively small value #' (e.g., \code{100}) in order to reduce computation time. #' @param surface Logical. Indicates if interactions or #' two-dimensional smooths should be visualized as a surface. #' Defaults to \code{FALSE}. The surface type can be controlled #' via argument \code{stype} of the related plotting method. #' @param categorical Logical. Indicates if effects of categorical #' or ordinal models should be shown in terms of probabilities #' of response categories. Defaults to \code{FALSE}. #' @param ordinal (Deprecated) Please use argument \code{categorical}. #' Logical. Indicates if effects in ordinal models #' should be visualized as a raster with the response categories #' on the y-axis. Defaults to \code{FALSE}. #' @param transform A function or a character string naming #' a function to be applied on the predicted responses #' before summary statistics are computed. Only allowed #' if \code{method = "posterior_predict"}. #' @param resolution Number of support points used to generate #' the plots. Higher resolution leads to smoother plots. #' Defaults to \code{100}. If \code{surface} is \code{TRUE}, #' this implies \code{10000} support points for interaction terms, #' so it might be necessary to reduce \code{resolution} #' when only few RAM is available. #' @param too_far Positive number. #' For surface plots only: Grid points that are too #' far away from the actual data points can be excluded from the plot. #' \code{too_far} determines what is too far. The grid is scaled into #' the unit square and then grid points more than \code{too_far} #' from the predictor variables are excluded. By default, all #' grid points are used. Ignored for non-surface plots. #' @param select_points Positive number. #' Only relevant if \code{points} or \code{rug} are set to \code{TRUE}: #' Actual data points of numeric variables that #' are too far away from the values specified in \code{conditions} #' can be excluded from the plot. Values are scaled into #' the unit interval and then points more than \code{select_points} #' from the values in \code{conditions} are excluded. #' By default, all points are used. #' @param ... Further arguments such as \code{draw_ids} or \code{ndraws} #' passed to \code{\link{posterior_predict}} or \code{\link{posterior_epred}}. #' @inheritParams plot.brmsfit #' @param ncol Number of plots to display per column for each effect. #' If \code{NULL} (default), \code{ncol} is computed internally based #' on the number of rows of \code{conditions}. #' @param points Logical. Indicates if the original data points should be added #' via \code{\link{geom_jitter}}. Default is \code{FALSE}. Can be controlled #' globally via the \code{brms.plot_points} option. Note that only those data #' points will be added that match the specified conditions defined in #' \code{conditions}. For categorical predictors, the conditions have to match #' exactly. For numeric predictors, argument \code{select_points} is used to #' determine, which points do match a condition. #' @param rug Logical. Indicates if a rug representation of predictor values #' should be added via \code{\link{geom_rug}}. Default is \code{FALSE}. #' Depends on \code{select_points} in the same way as \code{points} does. Can #' be controlled globally via the \code{brms.plot_rug} option. #' @param mean Logical. Only relevant for spaghetti plots. #' If \code{TRUE} (the default), display the mean regression #' line on top of the regression lines for each sample. #' @param jitter_width Only used if \code{points = TRUE}: #' Amount of horizontal jittering of the data points. #' Mainly useful for ordinal models. Defaults to \code{0} that #' is no jittering. #' @param stype Indicates how surface plots should be displayed. #' Either \code{"contour"} or \code{"raster"}. #' @param line_args Only used in plots of continuous predictors: #' A named list of arguments passed to #' \code{\link{geom_smooth}}. #' @param cat_args Only used in plots of categorical predictors: #' A named list of arguments passed to #' \code{\link{geom_point}}. #' @param errorbar_args Only used in plots of categorical predictors: #' A named list of arguments passed to #' \code{\link{geom_errorbar}}. #' @param surface_args Only used in surface plots: #' A named list of arguments passed to #' \code{\link{geom_contour}} or #' \code{\link{geom_raster}} #' (depending on argument \code{stype}). #' @param spaghetti_args Only used in spaghetti plots: #' A named list of arguments passed to #' \code{\link{geom_smooth}}. #' @param point_args Only used if \code{points = TRUE}: #' A named list of arguments passed to #' \code{\link{geom_jitter}}. #' @param rug_args Only used if \code{rug = TRUE}: #' A named list of arguments passed to #' \code{\link{geom_rug}}. #' @param facet_args Only used if if multiple condtions are provided: #' A named list of arguments passed to #' \code{\link{facet_wrap}}. #' #' @return An object of class \code{'brms_conditional_effects'} which is a #' named list with one data.frame per effect containing all information #' required to generate conditional effects plots. Among others, these #' data.frames contain some special variables, namely \code{estimate__} #' (predicted values of the response), \code{se__} (standard error of the #' predicted response), \code{lower__} and \code{upper__} (lower and upper #' bounds of the uncertainty interval of the response), as well as #' \code{cond__} (used in faceting when \code{conditions} contains multiple #' rows). #' #' The corresponding \code{plot} method returns a named #' list of \code{\link{ggplot}} objects, which can be further #' customized using the \pkg{ggplot2} package. #' #' @details When creating \code{conditional_effects} for a particular predictor #' (or interaction of two predictors), one has to choose the values of all #' other predictors to condition on. By default, the mean is used for #' continuous variables and the reference category is used for factors, but #' you may change these values via argument \code{conditions}. This also has #' an implication for the \code{points} argument: In the created plots, only #' those points will be shown that correspond to the factor levels actually #' used in the conditioning, in order not to create the false impression of #' bad model fit, where it is just due to conditioning on certain factor #' levels. #' #' To fully change colors of the created plots, one has to amend both #' \code{scale_colour} and \code{scale_fill}. See #' \code{\link{scale_colour_grey}} or \code{\link{scale_colour_gradient}} for #' more details. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1 | patient), #' data = epilepsy, family = poisson()) #' #' ## plot all conditional effects #' plot(conditional_effects(fit), ask = FALSE) #' #' ## change colours to grey scale #' library(ggplot2) #' ce <- conditional_effects(fit, "zBase:Trt") #' plot(ce, plot = FALSE)[[1]] + #' scale_color_grey() + #' scale_fill_grey() #' #' ## only plot the conditional interaction effect of 'zBase:Trt' #' ## for different values for 'zAge' #' conditions <- data.frame(zAge = c(-1, 0, 1)) #' plot(conditional_effects(fit, effects = "zBase:Trt", #' conditions = conditions)) #' #' ## also incorporate group-level effects variance over patients #' ## also add data points and a rug representation of predictor values #' plot(conditional_effects(fit, effects = "zBase:Trt", #' conditions = conditions, re_formula = NULL), #' points = TRUE, rug = TRUE) #' #' ## change handling of two-way interactions #' int_conditions <- list( #' zBase = setNames(c(-2, 1, 0), c("b", "c", "a")) #' ) #' conditional_effects(fit, effects = "Trt:zBase", #' int_conditions = int_conditions) #' conditional_effects(fit, effects = "Trt:zBase", #' int_conditions = list(zBase = quantile)) #' #' ## fit a model to illustrate how to plot 3-way interactions #' fit3way <- brm(count ~ zAge * zBase * Trt, data = epilepsy) #' conditions <- make_conditions(fit3way, "zAge") #' conditional_effects(fit3way, "zBase:Trt", conditions = conditions) #' ## only include points close to the specified values of zAge #' ce <- conditional_effects( #' fit3way, "zBase:Trt", conditions = conditions, #' select_points = 0.1 #' ) #' plot(ce, points = TRUE) #' } #' #' @export conditional_effects.brmsfit <- function(x, effects = NULL, conditions = NULL, int_conditions = NULL, re_formula = NA, prob = 0.95, robust = TRUE, method = "posterior_epred", spaghetti = FALSE, surface = FALSE, categorical = FALSE, ordinal = FALSE, transform = NULL, resolution = 100, select_points = 0, too_far = 0, probs = NULL, ...) { probs <- validate_ci_bounds(prob, probs = probs) method <- validate_pp_method(method) spaghetti <- as_one_logical(spaghetti) surface <- as_one_logical(surface) categorical <- as_one_logical(categorical) ordinal <- as_one_logical(ordinal) contains_draws(x) x <- restructure(x) new_formula <- update_re_terms(x$formula, re_formula = re_formula) bterms <- brmsterms(new_formula) if (!is.null(transform) && method != "posterior_predict") { stop2("'transform' is only allowed if 'method = posterior_predict'.") } if (ordinal) { warning2("Argument 'ordinal' is deprecated. ", "Please use 'categorical' instead.") } rsv_vars <- rsv_vars(bterms) use_def_effects <- is.null(effects) if (use_def_effects) { effects <- get_all_effects(bterms, rsv_vars = rsv_vars) } else { # allow to define interactions in any order effects <- strsplit(as.character(effects), split = ":") if (any(unique(unlist(effects)) %in% rsv_vars)) { stop2("Variables ", collapse_comma(rsv_vars), " should not be used as effects for this model") } if (any(lengths(effects) > 2L)) { stop2("To display interactions of order higher than 2 ", "please use the 'conditions' argument.") } all_effects <- get_all_effects( bterms, rsv_vars = rsv_vars, comb_all = TRUE ) ae_coll <- all_effects[lengths(all_effects) == 1L] ae_coll <- ulapply(ae_coll, paste, collapse = ":") matches <- match(lapply(all_effects, sort), lapply(effects, sort), 0L) if (sum(matches) > 0 && sum(matches > 0) < length(effects)) { invalid <- effects[setdiff(seq_along(effects), sort(matches))] invalid <- ulapply(invalid, paste, collapse = ":") warning2( "Some specified effects are invalid for this model: ", collapse_comma(invalid), "\nValid effects are ", "(combinations of): ", collapse_comma(ae_coll) ) } effects <- unique(effects[sort(matches)]) if (!length(effects)) { stop2( "All specified effects are invalid for this model.\n", "Valid effects are (combinations of): ", collapse_comma(ae_coll) ) } } if (categorical || ordinal) { int_effs <- lengths(effects) == 2L if (any(int_effs)) { effects <- effects[!int_effs] warning2( "Interactions cannot be plotted directly if 'categorical' ", "is TRUE. Please use argument 'conditions' instead." ) } } if (!length(effects)) { stop2("No valid effects detected.") } mf <- model.frame(x) conditions <- prepare_conditions( x, conditions = conditions, effects = effects, re_formula = re_formula, rsv_vars = rsv_vars ) int_conditions <- lapply(int_conditions, function(x) if (is.numeric(x)) sort(x, TRUE) else x ) int_vars <- get_int_vars(bterms) group_vars <- get_group_vars(bterms) out <- list() for (i in seq_along(effects)) { eff <- effects[[i]] cond_data <- prepare_cond_data( mf[, eff, drop = FALSE], conditions = conditions, int_conditions = int_conditions, int_vars = int_vars, group_vars = group_vars, surface = surface, resolution = resolution, reorder = use_def_effects ) if (surface && length(eff) == 2L && too_far > 0) { # exclude prediction grid points too far from data ex_too_far <- mgcv::exclude.too.far( g1 = cond_data[[eff[1]]], g2 = cond_data[[eff[2]]], d1 = mf[, eff[1]], d2 = mf[, eff[2]], dist = too_far) cond_data <- cond_data[!ex_too_far, ] } c(out) <- conditional_effects( bterms, fit = x, cond_data = cond_data, method = method, surface = surface, spaghetti = spaghetti, categorical = categorical, ordinal = ordinal, re_formula = re_formula, transform = transform, conditions = conditions, int_conditions = int_conditions, select_points = select_points, probs = probs, robust = robust, ... ) } structure(out, class = "brms_conditional_effects") } #' @rdname conditional_effects.brmsfit #' @export conditional_effects <- function(x, ...) { UseMethod("conditional_effects") } # compute expected values of MV models for use in conditional_effects # @return a list of summarized prediction matrices #' @export conditional_effects.mvbrmsterms <- function(x, resp = NULL, ...) { resp <- validate_resp(resp, x$responses) x$terms <- x$terms[resp] out <- lapply(x$terms, conditional_effects, ...) unlist(out, recursive = FALSE) } # conditional_effects for univariate model # @return a list with the summarized prediction matrix as the only element # @note argument 'resp' exists only to be excluded from '...' (#589) #' @export conditional_effects.brmsterms <- function( x, fit, cond_data, int_conditions, method, surface, spaghetti, categorical, ordinal, probs, robust, dpar = NULL, nlpar = NULL, resp = NULL, ... ) { stopifnot(is.brmsfit(fit)) effects <- attr(cond_data, "effects") types <- attr(cond_data, "types") catscale <- NULL pred_args <- list( fit, newdata = cond_data, allow_new_levels = TRUE, dpar = dpar, nlpar = nlpar, resp = if (nzchar(x$resp)) x$resp, incl_autocor = FALSE, ... ) if (method != "posterior_predict") { # 'transform' creates problems in 'posterior_linpred' pred_args$transform <- NULL } out <- do_call(method, pred_args) rownames(cond_data) <- NULL if (categorical || ordinal) { if (method != "posterior_epred") { stop2("Can only use 'categorical' with method = 'posterior_epred'.") } if (!is_polytomous(x)) { stop2("Argument 'categorical' may only be used ", "for categorical or ordinal models.") } if (categorical && ordinal) { stop2("Please use argument 'categorical' instead of 'ordinal'.") } catscale <- str_if(is_multinomial(x), "Count", "Probability") cats <- dimnames(out)[[3]] if (is.null(cats)) cats <- seq_dim(out, 3) cond_data <- repl(cond_data, length(cats)) cond_data <- do_call(rbind, cond_data) cond_data$cats__ <- factor(rep(cats, each = ncol(out)), levels = cats) effects[2] <- "cats__" types[2] <- "factor" } else { if (conv_cats_dpars(x$family) && is.null(dpar)) { stop2("Please set 'categorical' to TRUE.") } if (is_ordinal(x$family) && is.null(dpar) && method != "posterior_linpred") { warning2( "Predictions are treated as continuous variables in ", "'conditional_effects' by default which is likely invalid ", "for ordinal families. Please set 'categorical' to TRUE." ) if (method == "posterior_epred") { out <- ordinal_probs_continuous(out) } } } cond_data <- add_effects__(cond_data, effects) first_numeric <- types[1] %in% "numeric" second_numeric <- types[2] %in% "numeric" both_numeric <- first_numeric && second_numeric if (second_numeric && !surface) { # only convert 'effect2__' to factor so that the original # second effect variable remains unchanged in the data mde2 <- round(cond_data[[effects[2]]], 2) levels2 <- sort(unique(mde2), TRUE) cond_data$effect2__ <- factor(mde2, levels = levels2) labels2 <- names(int_conditions[[effects[2]]]) if (length(labels2) == length(levels2)) { levels(cond_data$effect2__) <- labels2 } } spag <- NULL if (first_numeric && spaghetti) { if (surface) { stop2("Cannot use 'spaghetti' and 'surface' at the same time.") } spag <- out if (categorical) { spag <- do_call(cbind, array2list(spag)) } sample <- rep(seq_rows(spag), each = ncol(spag)) if (length(types) == 2L) { # draws should be unique across plotting groups sample <- paste0(sample, "_", cond_data[[effects[2]]]) } spag <- data.frame(as.numeric(t(spag)), factor(sample)) colnames(spag) <- c("estimate__", "sample__") # ensures that 'cbind' works even in the presence of matrix columns cond_data_spag <- repl(cond_data, nrow(spag) / nrow(cond_data)) cond_data_spag <- Reduce(rbind, cond_data_spag) spag <- cbind(cond_data_spag, spag) } out <- posterior_summary(out, probs = probs, robust = robust) if (categorical || ordinal) { out <- do_call(rbind, array2list(out)) } colnames(out) <- c("estimate__", "se__", "lower__", "upper__") out <- cbind(cond_data, out) if (!is.null(dpar)) { response <- dpar } else if (!is.null(nlpar)) { response <- nlpar } else { response <- as.character(x$formula[2]) } attr(out, "effects") <- effects attr(out, "response") <- response attr(out, "surface") <- unname(both_numeric && surface) attr(out, "categorical") <- categorical attr(out, "catscale") <- catscale attr(out, "ordinal") <- ordinal attr(out, "spaghetti") <- spag attr(out, "points") <- make_point_frame(x, fit$data, effects, ...) name <- paste0(usc(x$resp, "suffix"), paste0(effects, collapse = ":")) setNames(list(out), name) } # get combinations of variables used in predictor terms # @param ... character vectors or formulas # @param alist a list of character vectors or formulas get_var_combs <- function(..., alist = list()) { dots <- c(list(...), alist) for (i in seq_along(dots)) { if (is.formula(dots[[i]])) { dots[[i]] <- attr(terms(dots[[i]]), "term.labels") } dots[[i]] <- lapply(dots[[i]], all_vars) } unique(unlist(dots, recursive = FALSE)) } # extract combinations of predictor variables get_all_effects <- function(x, ...) { UseMethod("get_all_effects") } #' @export get_all_effects.default <- function(x, ...) { NULL } #' @export get_all_effects.mvbrmsterms <- function(x, ...) { out <- lapply(x$terms, get_all_effects, ...) unique(unlist(out, recursive = FALSE)) } # get all effects for use in conditional_effects # @param bterms object of class brmsterms # @param rsv_vars character vector of reserved variables # @param comb_all include all main effects and two-way interactions? # @return a list with one element per valid effect / effects combination # excludes all 3-way or higher interactions #' @export get_all_effects.brmsterms <- function(x, rsv_vars = NULL, comb_all = FALSE, ...) { stopifnot(is_atomic_or_null(rsv_vars)) out <- list() for (dp in names(x$dpars)) { out <- c(out, get_all_effects(x$dpars[[dp]])) } for (nlp in names(x$nlpars)) { out <- c(out, get_all_effects(x$nlpars[[nlp]])) } out <- rmNULL(lapply(out, setdiff, y = rsv_vars)) if (comb_all) { # allow to combine all variables with each other out <- unique(unlist(out)) out <- c(out, get_group_vars(x)) if (length(out)) { int <- expand.grid(out, out, stringsAsFactors = FALSE) int <- int[int[, 1] != int[, 2], ] int <- as.list(as.data.frame(t(int), stringsAsFactors = FALSE)) int <- unique(unname(lapply(int, sort))) out <- c(as.list(out), int) } } unique(out[lengths(out) <= 2L]) } #' @export get_all_effects.btl <- function(x, ...) { c(get_var_combs(x[["fe"]], x[["cs"]]), get_all_effects_type(x, "sp"), get_all_effects_type(x, "sm"), get_all_effects_type(x, "gp")) } # extract variable combinations from special terms get_all_effects_type <- function(x, type) { stopifnot(is.btl(x)) type <- as_one_character(type) regex_type <- regex_sp(type) terms <- all_terms(x[[type]]) out <- named_list(terms) for (i in seq_along(terms)) { # some special terms can appear within interactions # we did not allow ":" within these terms so we can use it for splitting term_parts <- unlist(strsplit(terms[i], split = ":")) vars <- vector("list", length(term_parts)) for (j in seq_along(term_parts)) { matches <- get_matches_expr(regex_type, term_parts[j]) for (k in seq_along(matches)) { # evaluate special terms to extract variables tmp <- eval2(matches[[k]]) c(vars[[j]]) <- setdiff(unique(c(tmp$term, tmp$by)), "NA") } # extract all variables not part of any special term c(vars[[j]]) <- setdiff(all_vars(term_parts[j]), all_vars(matches)) } vars <- unique(unlist(vars)) out[[i]] <- str2formula(vars, collapse = "*") } get_var_combs(alist = out) } #' @export get_all_effects.btnl <- function(x, ...) { covars <- all_vars(rhs(x$covars)) out <- as.list(covars) if (length(covars) > 1L) { c(out) <- utils::combn(covars, 2, simplify = FALSE) } unique(out) } # extract names of predictor variables get_pred_vars <- function(x) { unique(unlist(get_all_effects(x))) } # extract names of variables treated as integers get_int_vars <- function(x, ...) { UseMethod("get_int_vars") } #' @export get_int_vars.mvbrmsterms <- function(x, ...) { unique(ulapply(x$terms, get_int_vars)) } #' @export get_int_vars.brmsterms <- function(x, ...) { advars <- ulapply(rmNULL(x$adforms[c("trials", "thres", "vint")]), all_vars) unique(c(advars, get_sp_vars(x, "mo"))) } # transform posterior draws of ordinal probabilities to a # continuous scale assuming equidistance between adjacent categories # @param x an ndraws x nobs x ncat array of posterior draws # @return an ndraws x nobs matrix of posterior draws ordinal_probs_continuous <- function(x) { stopifnot(length(dim(x)) == 3) for (k in seq_dim(x, 3)) { x[, , k] <- x[, , k] * k } x <- lapply(seq_dim(x, 2), function(s) rowSums(x[, s, ])) do_call(cbind, x) } #' Prepare Fully Crossed Conditions #' #' This is a helper function to prepare fully crossed conditions primarily #' for use with the \code{conditions} argument of \code{\link{conditional_effects}}. #' Automatically creates labels for each row in the \code{cond__} column. #' #' @param x An \R object from which to extract the variables #' that should be part of the conditions. #' @param vars Names of the variables that should be part of the conditions. #' @param ... Arguments passed to \code{\link{rows2labels}}. #' #' @return A \code{data.frame} where each row indicates a condition. #' #' @details For factor like variables, all levels are used as conditions. #' For numeric variables, \code{mean + (-1:1) * SD} are used as conditions. #' #' @seealso \code{\link{conditional_effects}}, \code{\link{rows2labels}} #' #' @examples #' df <- data.frame(x = c("a", "b"), y = rnorm(10)) #' make_conditions(df, vars = c("x", "y")) #' #' @export make_conditions <- function(x, vars, ...) { # rev ensures that the last variable varies fastest in expand.grid vars <- rev(as.character(vars)) if (!is.data.frame(x) && "data" %in% names(x)) { x <- x$data } x <- as.data.frame(x) out <- named_list(vars) for (v in vars) { tmp <- get(v, x) if (is_like_factor(tmp)) { tmp <- levels(as.factor(tmp)) } else { tmp <- mean(tmp, na.rm = TRUE) + (-1:1) * sd(tmp, na.rm = TRUE) } out[[v]] <- tmp } out <- rev(expand.grid(out)) out$cond__ <- rows2labels(out, ...) out } # extract the cond__ variable used for faceting get_cond__ <- function(x) { out <- x[["cond__"]] if (is.null(out)) { out <- rownames(x) } as.character(out) } #' Convert Rows to Labels #' #' Convert information in rows to labels for each row. #' #' @param x A \code{data.frame} for which to extract labels. #' @param digits Minimal number of decimal places shown in #' the labels of numeric variables. #' @param sep A single character string defining the separator #' between variables used in the labels. #' @param incl_vars Indicates if variable names should #' be part of the labels. Defaults to \code{TRUE}. #' @param ... Currently unused. #' #' @return A character vector of the same length as the number #' of rows of \code{x}. #' #' @seealso \code{\link{make_conditions}}, \code{\link{conditional_effects}} #' #' @export rows2labels <- function(x, digits = 2, sep = " & ", incl_vars = TRUE, ...) { x <- as.data.frame(x) incl_vars <- as_one_logical(incl_vars) out <- x for (i in seq_along(out)) { if (!is_like_factor(out[[i]])) { out[[i]] <- round(out[[i]], digits) } if (incl_vars) { out[[i]] <- paste0(names(out)[i], " = ", out[[i]]) } } paste_sep <- function(..., sep__ = sep) { paste(..., sep = sep__) } Reduce(paste_sep, out) } # prepare conditions for use in conditional_effects # @param fit an object of class 'brmsfit' # @param conditions optional data.frame containing user defined conditions # @param effects see conditional_effects # @param re_formula see conditional_effects # @param rsv_vars names of reserved variables # @return a data.frame with (possibly updated) conditions prepare_conditions <- function(fit, conditions = NULL, effects = NULL, re_formula = NA, rsv_vars = NULL) { mf <- model.frame(fit) new_formula <- update_re_terms(fit$formula, re_formula = re_formula) bterms <- brmsterms(new_formula) if (any(grepl_expr("^(as\\.)?factor(.+)$", bterms$allvars))) { # conditions are chosen based the variables stored in the data # this approach cannot take into account possible transformations # to factors happening inside the model formula warning2( "Using 'factor' or 'as.factor' in the model formula ", "might lead to problems in 'conditional_effects'.", "Please convert your variables to factors beforehand." ) } req_vars <- all_vars(rhs(bterms$allvars)) req_vars <- setdiff(req_vars, rsv_vars) req_vars <- setdiff(req_vars, names(fit$data2)) if (is.null(conditions)) { conditions <- as.data.frame(as.list(rep(NA, length(req_vars)))) names(conditions) <- req_vars } else { conditions <- as.data.frame(conditions) if (!nrow(conditions)) { stop2("Argument 'conditions' must have a least one row.") } conditions <- unique(conditions) if (any(duplicated(get_cond__(conditions)))) { stop2("Condition labels should be unique.") } req_vars <- setdiff(req_vars, names(conditions)) } # special treatment for 'trials' addition variables trial_vars <- all_vars(bterms$adforms$trials) trial_vars <- trial_vars[!vars_specified(trial_vars, conditions)] if (length(trial_vars)) { message("Setting all 'trials' variables to 1 by ", "default if not specified otherwise.") req_vars <- setdiff(req_vars, trial_vars) for (v in trial_vars) { conditions[[v]] <- 1L } } # use sensible default values for unspecified variables subset_vars <- get_ad_vars(bterms, "subset") int_vars <- get_int_vars(bterms) group_vars <- get_group_vars(bterms) req_vars <- setdiff(req_vars, group_vars) for (v in req_vars) { if (is_like_factor(mf[[v]])) { # factor-like variable if (v %in% subset_vars) { # avoid unintentional subsetting of newdata (#755) conditions[[v]] <- TRUE } else { # use reference category for factors levels <- levels(as.factor(mf[[v]])) ordered <- is.ordered(mf[[v]]) conditions[[v]] <- factor(levels[1], levels, ordered = ordered) } } else { # numeric-like variable if (v %in% subset_vars) { # avoid unintentional subsetting of newdata (#755) conditions[[v]] <- 1 } else if (v %in% int_vars) { # ensure valid integer values conditions[[v]] <- round(median(mf[[v]], na.rm = TRUE)) } else { conditions[[v]] <- mean(mf[[v]], na.rm = TRUE) } } } all_vars <- c(all_vars(bterms$allvars), "cond__") unused_vars <- setdiff(names(conditions), all_vars) if (length(unused_vars)) { warning2( "The following variables in 'conditions' are not ", "part of the model:\n", collapse_comma(unused_vars) ) } cond__ <- conditions$cond__ conditions <- validate_newdata( conditions, fit, re_formula = re_formula, allow_new_levels = TRUE, check_response = FALSE, incl_autocor = FALSE ) conditions$cond__ <- cond__ conditions } # prepare data to be used in conditional_effects # @param data data.frame containing only data of the predictors of interest # @param conditions see argument 'conditions' of conditional_effects # @param int_conditions see argument 'int_conditions' of conditional_effects # @param int_vars names of variables being treated as integers # @param group_vars names of grouping variables # @param surface generate surface plots later on? # @param resolution number of distinct points at which to evaluate # the predictors of interest # @param reorder reorder predictors so that numeric ones come first? prepare_cond_data <- function(data, conditions, int_conditions = NULL, int_vars = NULL, group_vars = NULL, surface = FALSE, resolution = 100, reorder = TRUE) { effects <- names(data) stopifnot(length(effects) %in% c(1L, 2L)) is_factor <- ulapply(data, is_like_factor) | names(data) %in% group_vars types <- ifelse(is_factor, "factor", "numeric") # numeric effects should come first if (reorder) { new_order <- order(types, decreasing = TRUE) effects <- effects[new_order] types <- types[new_order] } # handle first predictor if (effects[1] %in% names(int_conditions)) { # first predictor has pre-specified conditions int_cond <- int_conditions[[effects[1]]] if (is.function(int_cond)) { int_cond <- int_cond(data[[effects[1]]]) } values <- int_cond } else if (types[1] == "factor") { # first predictor is factor-like values <- factor(unique(data[[effects[1]]])) } else { # first predictor is numeric min1 <- min(data[[effects[1]]], na.rm = TRUE) max1 <- max(data[[effects[1]]], na.rm = TRUE) if (effects[1] %in% int_vars) { values <- seq(min1, max1, by = 1) } else { values <- seq(min1, max1, length.out = resolution) } } if (length(effects) == 2L) { # handle second predictor values <- setNames(list(values, NA), effects) if (effects[2] %in% names(int_conditions)) { # second predictor has pre-specified conditions int_cond <- int_conditions[[effects[2]]] if (is.function(int_cond)) { int_cond <- int_cond(data[[effects[2]]]) } values[[2]] <- int_cond } else if (types[2] == "factor") { # second predictor is factor-like values[[2]] <- factor(unique(data[[effects[2]]])) } else { # second predictor is numeric if (surface) { min2 <- min(data[[effects[2]]], na.rm = TRUE) max2 <- max(data[[effects[2]]], na.rm = TRUE) if (effects[2] %in% int_vars) { values[[2]] <- seq(min2, max2, by = 1) } else { values[[2]] <- seq(min2, max2, length.out = resolution) } } else { if (effects[2] %in% int_vars) { median2 <- median(data[[effects[2]]]) mad2 <- mad(data[[effects[2]]]) values[[2]] <- round((-1:1) * mad2 + median2) } else { mean2 <- mean(data[[effects[2]]], na.rm = TRUE) sd2 <- sd(data[[effects[2]]], na.rm = TRUE) values[[2]] <- (-1:1) * sd2 + mean2 } } } data <- do_call(expand.grid, values) } else { stopifnot(length(effects) == 1L) data <- structure(data.frame(values), names = effects) } # no need to have the same value combination more than once data <- unique(data) data <- data[do_call(order, unname(as.list(data))), , drop = FALSE] data <- replicate(nrow(conditions), data, simplify = FALSE) cond_vars <- setdiff(names(conditions), effects) cond__ <- get_cond__(conditions) for (j in seq_rows(conditions)) { data[[j]] <- fill_newdata(data[[j]], cond_vars, conditions, n = j) data[[j]]$cond__ <- cond__[j] } data <- do_call(rbind, data) data$cond__ <- factor(data$cond__, cond__) structure(data, effects = effects, types = types) } # which variables in 'vars' are specified in 'data'? vars_specified <- function(vars, data) { .fun <- function(v) isTRUE(v %in% names(data)) && any(!is.na(data[[v]])) as.logical(ulapply(vars, .fun)) } # prepare data points based on the provided conditions # allows to add data points to conditional effects plots # @return a data.frame containing the data points to be plotted make_point_frame <- function(bterms, mf, effects, conditions, select_points = 0, transform = NULL, ...) { stopifnot(is.brmsterms(bterms), is.data.frame(mf)) effects <- intersect(effects, names(mf)) points <- mf[, effects, drop = FALSE] points$resp__ <- model.response( model.frame(bterms$respform, mf, na.action = na.pass) ) req_vars <- names(mf) groups <- get_re_groups(bterms) if (length(groups)) { c(req_vars) <- unlist(strsplit(groups, ":")) } req_vars <- unique(setdiff(req_vars, effects)) req_vars <- intersect(req_vars, names(conditions)) if (length(req_vars)) { # find out which data point is valid for which condition cond__ <- get_cond__(conditions) mf <- mf[, req_vars, drop = FALSE] conditions <- conditions[, req_vars, drop = FALSE] points$cond__ <- NA points <- replicate(nrow(conditions), points, simplify = FALSE) for (i in seq_along(points)) { cond <- conditions[i, , drop = FALSE] # ensures correct handling of matrix columns not_na <- function(x) !any(is.na(x) | x %in% "zero__") not_na <- ulapply(cond, not_na) cond <- cond[, not_na, drop = FALSE] mf_tmp <- mf[, not_na, drop = FALSE] if (ncol(mf_tmp)) { is_num <- sapply(mf_tmp, is.numeric) is_num <- is_num & !names(mf_tmp) %in% groups if (sum(is_num)) { # handle numeric variables stopifnot(select_points >= 0) if (select_points > 0) { for (v in names(mf_tmp)[is_num]) { min <- min(mf_tmp[, v], na.rm = TRUE) max <- max(mf_tmp[, v], na.rm = TRUE) unit <- scale_unit(mf_tmp[, v], min, max) unit_cond <- scale_unit(cond[, v], min, max) unit_diff <- abs(unit - unit_cond) close_enough <- unit_diff <= select_points mf_tmp[[v]][close_enough] <- cond[, v] mf_tmp[[v]][!close_enough] <- NA } } else { # take all numeric values if select_points is zero cond <- cond[, !is_num, drop = FALSE] mf_tmp <- mf_tmp[, !is_num, drop = FALSE] } } } if (ncol(mf_tmp)) { # handle factors and grouping variables # do it like base::duplicated K <- do_call("paste", c(mf_tmp, sep = "\r")) %in% do_call("paste", c(cond, sep = "\r")) } else { K <- seq_rows(mf) } # cond__ allows to assign points to conditions points[[i]]$cond__[K] <- cond__[i] } points <- do_call(rbind, points) points <- points[!is.na(points$cond__), , drop = FALSE] points$cond__ <- factor(points$cond__, cond__) } points <- add_effects__(points, effects) if (!is.numeric(points$resp__)) { points$resp__ <- as.numeric(as.factor(points$resp__)) if (is_binary(bterms$family)) { points$resp__ <- points$resp__ - 1 } } if (!is.null(transform)) { points$resp__ <- do_call(transform, list(points$resp__)) } points } # add effect__ variables to the data add_effects__ <- function(data, effects) { for (i in seq_along(effects)) { data[[paste0("effect", i, "__")]] <- eval2(effects[i], data) } data } #' @export print.brms_conditional_effects <- function(x, ...) { plot(x, ...) } #' @rdname conditional_effects.brmsfit #' @method plot brms_conditional_effects #' @importFrom rlang .data #' @export plot.brms_conditional_effects <- function( x, ncol = NULL, points = getOption("brms.plot_points", FALSE), rug = getOption("brms.plot_rug", FALSE), mean = TRUE, jitter_width = 0, stype = c("contour", "raster"), line_args = list(), cat_args = list(), errorbar_args = list(), surface_args = list(), spaghetti_args = list(), point_args = list(), rug_args = list(), facet_args = list(), theme = NULL, ask = TRUE, plot = TRUE, ... ) { dots <- list(...) plot <- use_alias(plot, dots$do_plot) stype <- match.arg(stype) smooths_only <- isTRUE(attr(x, "smooths_only")) if (points && smooths_only) { stop2("Argument 'points' is invalid for objects ", "returned by 'conditional_smooths'.") } if (!is_equal(jitter_width, 0)) { warning2("'jitter_width' is deprecated. Please use ", "'point_args = list(width = )' instead.") } if (!is.null(theme) && !is.theme(theme)) { stop2("Argument 'theme' should be a 'theme' object.") } if (plot) { default_ask <- devAskNewPage() on.exit(devAskNewPage(default_ask)) devAskNewPage(ask = FALSE) } dont_replace <- c("mapping", "data", "inherit.aes") plots <- named_list(names(x)) for (i in seq_along(x)) { response <- attr(x[[i]], "response") effects <- attr(x[[i]], "effects") ncond <- length(unique(x[[i]]$cond__)) df_points <- attr(x[[i]], "points") categorical <- isTRUE(attr(x[[i]], "categorical")) catscale <- attr(x[[i]], "catscale") surface <- isTRUE(attr(x[[i]], "surface")) # deprecated as of brms 2.4.3 ordinal <- isTRUE(attr(x[[i]], "ordinal")) if (surface || ordinal) { # surface plots for two dimensional interactions or ordinal plots plots[[i]] <- ggplot(x[[i]]) + aes(.data[["effect1__"]], .data[["effect2__"]]) + labs(x = effects[1], y = effects[2]) if (ordinal) { width <- ifelse(is_like_factor(x[[i]]$effect1__), 0.9, 1) .surface_args <- nlist( mapping = aes(fill = .data[["estimate__"]]), height = 0.9, width = width ) replace_args(.surface_args, dont_replace) <- surface_args plots[[i]] <- plots[[i]] + do_call(geom_tile, .surface_args) + scale_fill_gradientn(colors = viridis6(), name = catscale) + ylab(response) } else if (stype == "contour") { .surface_args <- nlist( mapping = aes( z = .data[["estimate__"]], colour = after_stat(.data[["level"]]) ), bins = 30, linewidth = 1.3 ) replace_args(.surface_args, dont_replace) <- surface_args plots[[i]] <- plots[[i]] + do_call(geom_contour, .surface_args) + scale_color_gradientn(colors = viridis6(), name = response) } else if (stype == "raster") { .surface_args <- nlist(mapping = aes(fill = .data[["estimate__"]])) replace_args(.surface_args, dont_replace) <- surface_args plots[[i]] <- plots[[i]] + do_call(geom_raster, .surface_args) + scale_fill_gradientn(colors = viridis6(), name = response) } } else { # plot effects of single predictors or two-way interactions gvar <- if (length(effects) == 2L) "effect2__" spaghetti <- attr(x[[i]], "spaghetti") aes_tmp <- aes(x = .data[["effect1__"]], y = .data[["estimate__"]]) if (!is.null(gvar)) { aes_tmp$colour <- aes(colour = .data[[gvar]])$colour } plots[[i]] <- ggplot(x[[i]]) + aes_tmp + labs(x = effects[1], y = response, colour = effects[2]) if (is.null(spaghetti)) { aes_tmp <- aes(ymin = .data[["lower__"]], ymax = .data[["upper__"]]) if (!is.null(gvar)) { aes_tmp$fill <- aes(fill = .data[[gvar]])$fill } plots[[i]] <- plots[[i]] + aes_tmp + labs(fill = effects[2]) } # extract suggested colors for later use colors <- ggplot_build(plots[[i]]) colors <- unique(colors$data[[1]][["colour"]]) if (points && !categorical && !surface) { # add points first so that they appear behind the predictions .point_args <- list( mapping = aes(x = .data[["effect1__"]], y = .data[["resp__"]]), data = df_points, inherit.aes = FALSE, size = 2 / ncond^0.25, height = 0, width = jitter_width ) if (is_like_factor(df_points[, gvar])) { .point_args$mapping[c("colour", "fill")] <- aes(colour = .data[[gvar]], fill = .data[[gvar]]) } replace_args(.point_args, dont_replace) <- point_args plots[[i]] <- plots[[i]] + do_call(geom_jitter, .point_args) } if (!is.null(spaghetti)) { # add a regression line for each sample separately .spaghetti_args <- list( aes(group = .data[["sample__"]]), data = spaghetti, stat = "identity", linewidth = 0.5 ) if (!is.null(gvar)) { .spaghetti_args[[1]]$colour <- aes(colour = .data[[gvar]])$colour } if (length(effects) == 1L) { .spaghetti_args$colour <- alpha("blue", 0.1) } else { # workaround to get transparent lines plots[[i]] <- plots[[i]] + scale_color_manual(values = alpha(colors, 0.1)) } replace_args(.spaghetti_args, dont_replace) <- spaghetti_args plots[[i]] <- plots[[i]] + do_call(geom_smooth, .spaghetti_args) } if (is.numeric(x[[i]]$effect1__)) { # line plots for numeric predictors .line_args <- list(stat = "identity") if (!is.null(spaghetti)) { # display a white mean regression line if (!is.null(gvar)) { .line_args$mapping <- aes(group = .data[[gvar]]) } .line_args$colour <- alpha("white", 0.8) } replace_args(.line_args, dont_replace) <- line_args if (mean || is.null(spaghetti)) { plots[[i]] <- plots[[i]] + do_call(geom_smooth, .line_args) } if (rug) { .rug_args <- list( aes(x = .data[["effect1__"]]), sides = "b", data = df_points, inherit.aes = FALSE ) if (is_like_factor(df_points[, gvar])) { .point_args$mapping[c("colour", "fill")] <- aes(colour = .data[[gvar]], fill = .data[[gvar]]) } replace_args(.rug_args, dont_replace) <- rug_args plots[[i]] <- plots[[i]] + do_call(geom_rug, .rug_args) } } else { # points and errorbars for factors .cat_args <- list( position = position_dodge(width = 0.4), size = 4 / ncond^0.25 ) .errorbar_args <- list( position = position_dodge(width = 0.4), width = 0.3 ) replace_args(.cat_args, dont_replace) <- cat_args replace_args(.errorbar_args, dont_replace) <- errorbar_args plots[[i]] <- plots[[i]] + do_call(geom_point, .cat_args) + do_call(geom_errorbar, .errorbar_args) } if (categorical) { plots[[i]] <- plots[[i]] + ylab(catscale) + labs(fill = response, color = response) } } if (ncond > 1L) { # one plot per row of conditions if (is.null(ncol)) { ncol <- max(floor(sqrt(ncond)), 3) } .facet_args <- nlist(facets = "cond__", ncol) replace_args(.facet_args, dont_replace) <- facet_args plots[[i]] <- plots[[i]] + do_call(facet_wrap, .facet_args) } plots[[i]] <- plots[[i]] + theme if (plot) { plot(plots[[i]]) if (i == 1) { devAskNewPage(ask = ask) } } } invisible(plots) } # the name 'marginal_effects' is deprecated as of brms 2.10.3 # do not remove it eventually as it has been used in the brms papers #' @export marginal_effects <- function(x, ...) { UseMethod("marginal_effects") } #' @export marginal_effects.brmsfit <- function(x, ...) { warning2("Method 'marginal_effects' is deprecated. ", "Please use 'conditional_effects' instead.") conditional_effects.brmsfit(x, ...) } #' @export print.brmsMarginalEffects <- function(x, ...) { class(x) <- "brms_conditional_effects" print(x, ...) } #' @export plot.brmsMarginalEffects <- function(x, ...) { class(x) <- "brms_conditional_effects" plot(x, ...) } brms/R/conditional_smooths.R0000644000176200001440000002011414213413565015622 0ustar liggesusers#' Display Smooth Terms #' #' Display smooth \code{s} and \code{t2} terms of models #' fitted with \pkg{brms}. #' #' @aliases marginal_smooths marginal_smooths.brmsfit #' #' @inheritParams conditional_effects.brmsfit #' @param smooths Optional character vector of smooth terms #' to display. If \code{NULL} (the default) all smooth terms #' are shown. #' @param ndraws Positive integer indicating how many #' posterior draws should be used. #' If \code{NULL} (the default) all draws are used. #' Ignored if \code{draw_ids} is not \code{NULL}. #' @param draw_ids An integer vector specifying #' the posterior draws to be used. #' If \code{NULL} (the default), all draws are used. #' @param nsamples Deprecated alias of \code{ndraws}. #' @param subset Deprecated alias of \code{draw_ids}. #' @param ... Currently ignored. #' #' @return For the \code{brmsfit} method, #' an object of class \code{brms_conditional_effects}. See #' \code{\link{conditional_effects}} for #' more details and documentation of the related plotting function. #' #' @details Two-dimensional smooth terms will be visualized using #' either contour or raster plots. #' #' @examples #' \dontrun{ #' set.seed(0) #' dat <- mgcv::gamSim(1, n = 200, scale = 2) #' fit <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) #' # show all smooth terms #' plot(conditional_smooths(fit), rug = TRUE, ask = FALSE) #' # show only the smooth term s(x2) #' plot(conditional_smooths(fit, smooths = "s(x2)"), ask = FALSE) #' #' # fit and plot a two-dimensional smooth term #' fit2 <- brm(y ~ t2(x0, x2), data = dat) #' ms <- conditional_smooths(fit2) #' plot(ms, stype = "contour") #' plot(ms, stype = "raster") #' } #' #' @export conditional_smooths.brmsfit <- function(x, smooths = NULL, int_conditions = NULL, prob = 0.95, spaghetti = FALSE, resolution = 100, too_far = 0, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, probs = NULL, ...) { probs <- validate_ci_bounds(prob, probs = probs) spaghetti <- as_one_logical(spaghetti) draw_ids <- use_alias(draw_ids, subset) ndraws <- use_alias(ndraws, nsamples) contains_draws(x) x <- restructure(x) x <- exclude_terms(x, incl_autocor = FALSE) smooths <- rm_wsp(as.character(smooths)) conditions <- prepare_conditions(x) draw_ids <- validate_draw_ids(x, draw_ids, ndraws) bterms <- brmsterms(exclude_terms(x$formula, smooths_only = TRUE)) out <- conditional_smooths( bterms, fit = x, smooths = smooths, conditions = conditions, int_conditions = int_conditions, too_far = too_far, resolution = resolution, probs = probs, spaghetti = spaghetti, draw_ids = draw_ids ) if (!length(out)) { stop2("No valid smooth terms found in the model.") } structure(out, class = "brms_conditional_effects", smooths_only = TRUE) } #' @rdname conditional_smooths.brmsfit #' @export conditional_smooths <- function(x, ...) { UseMethod("conditional_smooths") } #' @export conditional_smooths.default <- function(x, ...) { NULL } #' @export conditional_smooths.mvbrmsterms <- function(x, ...) { out <- list() for (r in names(x$terms)) { c(out) <- conditional_smooths(x$terms[[r]], ...) } out } #' @export conditional_smooths.brmsterms <- function(x, ...) { out <- list() for (dp in names(x$dpars)) { c(out) <- conditional_smooths(x$dpars[[dp]], ...) } for (nlp in names(x$nlpars)) { c(out) <- conditional_smooths(x$nlpars[[nlp]], ...) } out } # conditional smooths for a single predicted parameter # @param fit brmsfit object # @param smooths optional names of smooth terms to plot # @param conditions output of prepare_conditions # @param int_conditions values of by-vars at which to evalute smooths # @param ...: currently ignored # @return a named list with one element per smooth term #' @export conditional_smooths.btl <- function(x, fit, smooths, conditions, int_conditions, probs, resolution, too_far, spaghetti, ...) { stopifnot(is.brmsfit(fit)) out <- list() mf <- model.frame(fit) smef <- tidy_smef(x, mf) # fixes issue #1265 smef$term <- rm_wsp(smef$term) smterms <- unique(smef$term) if (!length(smooths)) { I <- seq_along(smterms) } else { I <- which(smterms %in% smooths) } for (i in I) { # loop over smooth terms and compute their predictions smooth <- smterms[i] sub_smef <- subset2(smef, term = smooth) # extract raw variable names before transformations covars <- all_vars(sub_smef$covars[[1]]) byvars <- all_vars(sub_smef$byvars[[1]]) ncovars <- length(covars) if (ncovars > 2L) { byvars <- c(covars[3:ncovars], byvars) covars <- covars[1:2] ncovars <- 2L } vars <- c(covars, byvars) values <- named_list(vars) is_numeric <- setNames(rep(FALSE, ncovars), covars) for (cv in covars) { is_numeric[cv] <- is.numeric(mf[[cv]]) if (cv %in% names(int_conditions)) { int_cond <- int_conditions[[cv]] if (is.function(int_cond)) { int_cond <- int_cond(mf[[cv]]) } values[[cv]] <- int_cond } else if (is_numeric[cv]) { values[[cv]] <- seq( min(mf[[cv]]), max(mf[[cv]]), length.out = resolution ) } else { values[[cv]] <- levels(factor(mf[[cv]])) } } for (cv in byvars) { if (cv %in% names(int_conditions)) { int_cond <- int_conditions[[cv]] if (is.function(int_cond)) { int_cond <- int_cond(mf[[cv]]) } values[[cv]] <- int_cond } else if (is.numeric(mf[[cv]])) { mean2 <- mean(mf[[cv]], na.rm = TRUE) sd2 <- sd(mf[[cv]], na.rm = TRUE) values[[cv]] <- (-1:1) * sd2 + mean2 } else { values[[cv]] <- levels(factor(mf[[cv]])) } } newdata <- expand.grid(values) if (ncovars == 2L && too_far > 0) { # exclude prediction grid points too far from data ex_too_far <- mgcv::exclude.too.far( g1 = newdata[[covars[1]]], g2 = newdata[[covars[2]]], d1 = mf[, covars[1]], d2 = mf[, covars[2]], dist = too_far ) newdata <- newdata[!ex_too_far, ] } other_vars <- setdiff(names(conditions), vars) newdata <- fill_newdata(newdata, other_vars, conditions) eta <- posterior_smooths(x, fit, smooth, newdata, ...) effects <- na.omit(sub_smef$covars[[1]][1:2]) cond_data <- add_effects__(newdata[, vars, drop = FALSE], effects) if (length(byvars)) { # byvars will be plotted as facets cond_data$cond__ <- rows2labels(cond_data[, byvars, drop = FALSE]) } else { cond_data$cond__ <- factor(1) } spa_data <- NULL if (spaghetti && ncovars == 1L && is_numeric[1]) { sample <- rep(seq_rows(eta), each = ncol(eta)) spa_data <- data.frame(as.numeric(t(eta)), factor(sample)) colnames(spa_data) <- c("estimate__", "sample__") spa_data <- cbind(cond_data, spa_data) } eta <- posterior_summary(eta, robust = TRUE, probs = probs) colnames(eta) <- c("estimate__", "se__", "lower__", "upper__") eta <- cbind(cond_data, eta) response <- combine_prefix(x, keep_mu = TRUE) response <- paste0(response, ": ", smooth) points <- mf[, vars, drop = FALSE] points <- add_effects__(points, covars) attr(eta, "response") <- response attr(eta, "effects") <- effects attr(eta, "surface") <- all(is_numeric) && ncovars == 2L attr(eta, "spaghetti") <- spa_data attr(eta, "points") <- points out[[response]] <- eta } out } # the name 'marginal_smooths' is deprecated as of brms 2.10.3 # do not remove it eventually as it has been used in the brms papers #' @export marginal_smooths <- function(x, ...) { UseMethod("marginal_smooths") } #' @export marginal_smooths.brmsfit <- function(x, ...) { warning2("Method 'marginal_smooths' is deprecated. ", "Please use 'conditional_smooths' instead.") conditional_smooths.brmsfit(x, ...) } brms/R/loo_subsample.R0000644000176200001440000000472314213413565014417 0ustar liggesusers#' Efficient approximate leave-one-out cross-validation (LOO) using subsampling #' #' @aliases loo_subsample #' #' @inheritParams loo.brmsfit #' #' @details More details can be found on #' \code{\link[loo:loo_subsample]{loo_subsample}}. #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' (loo1 <- loo_subsample(fit1)) #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' (loo2 <- loo_subsample(fit2)) #' #' # compare both models #' loo_compare(loo1, loo2) #' } #' #' @importFrom loo loo_subsample #' @export loo_subsample #' @export loo_subsample.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) c(args) <- nlist( criterion = "loo_subsample", compare, resp, add_point_estimate = TRUE ) do_call(compute_loolist, args) } # compute 'loo_subsample' criterion using the 'loo' package # @param model_name ignored but included to avoid being passed to '...' .loo_subsample <- function(x, newdata, resp, model_name, ...) { loo_args <- prepare_loo_args( x, newdata = newdata, resp = resp, pointwise = TRUE, ... ) do_call("loo_subsample", loo_args, pkg = "loo") } # methods required in loo_subsample #' @importFrom loo .ndraws #' @export .ndraws.brmsprep <- function(x) { x$ndraws } #' @export .ndraws.mvbrmsprep <- function(x) { x$ndraws } #' @importFrom loo .thin_draws #' @export .thin_draws.brmsprep <- function(draws, loo_approximation_draws) { # brmsprep objects are too complex to implement a post-hoc subsetting method if (length(loo_approximation_draws)) { stop2("'loo_approximation_draws' is not supported for brmsfit objects.") } draws } #' @export .thin_draws.mvbrmsprep <- function(draws, loo_approximation_draws) { if (length(loo_approximation_draws)) { stop2("'loo_approximation_draws' is not supported for brmsfit objects.") } draws } #' @importFrom loo .compute_point_estimate #' @export .compute_point_estimate.brmsprep <- function(draws) { # point estimates are stored in the form of an attribute rather # than computed on the fly due to the complexity of brmsprep objects attr(draws, "point_estimate") } #' @export .compute_point_estimate.mvbrmsprep <- function(draws) { attr(draws, "point_estimate") } brms/R/brmsterms.R0000644000176200001440000010542114454230164013565 0ustar liggesusers#' Parse Formulas of \pkg{brms} Models #' #' Parse formulas objects for use in \pkg{brms}. #' #' @aliases parse_bf #' #' @inheritParams brm #' @param check_response Logical; Indicates whether the left-hand side #' of \code{formula} (i.e. response variables and addition arguments) #' should be parsed. If \code{FALSE}, \code{formula} may also be one-sided. #' @param resp_rhs_all Logical; Indicates whether to also include response #' variables on the right-hand side of formula \code{.$allvars}, #' where \code{.} represents the output of \code{brmsterms}. #' @param ... Further arguments passed to or from other methods. #' #' @return An object of class \code{brmsterms} or \code{mvbrmsterms} #' (for multivariate models), which is a \code{list} containing all #' required information initially stored in \code{formula} #' in an easier to use format, basically a list of formulas #' (not an abstract syntax tree). #' #' @details This is the main formula parsing function of \pkg{brms}. #' It should usually not be called directly, but is exported to allow #' package developers making use of the formula syntax implemented #' in \pkg{brms}. As long as no other packages depend on this functions, #' it may be changed without deprecation warnings, when new features make #' this necessary. #' #' @seealso #' \code{\link{brm}}, #' \code{\link{brmsformula}}, #' \code{\link{mvbrmsformula}} #' #' @export brmsterms <- function(formula, ...) { UseMethod("brmsterms") } # the name 'parse_bf' is deprecated as of brms 2.12.4 # remove it eventually in brms 3.0 #' @export parse_bf <- function(x, ...) { warning2("Method 'parse_bf' is deprecated. Please use 'brmsterms' instead.") UseMethod("brmsterms") } #' @rdname brmsterms #' @export brmsterms.default <- function(formula, ...) { brmsterms(validate_formula(formula), ...) } #' @rdname brmsterms #' @export brmsterms.brmsformula <- function(formula, check_response = TRUE, resp_rhs_all = TRUE, ...) { x <- validate_formula(formula) mv <- isTRUE(x$mv) rescor <- mv && isTRUE(x$rescor) mecor <- isTRUE(x$mecor) formula <- x$formula family <- x$family y <- nlist(formula, family, mv, rescor, mecor) y$cov_ranef <- x$cov_ranef class(y) <- "brmsterms" if (check_response) { # extract response variables y$respform <- validate_resp_formula(formula, empty_ok = FALSE) if (mv) { y$resp <- terms_resp(y$respform) } else { y$resp <- "" } } # extract addition arguments adforms <- terms_ad(formula, family, check_response) advars <- str2formula(ulapply(adforms, all_vars)) y$adforms[names(adforms)] <- adforms # centering would lead to incorrect results for grouped threshold vectors # as each threshold vector only affects a subset of observations if (!is.null(get_ad_expr(y, "thres", "gr"))) { attr(formula, "center") <- FALSE dp_classes <- dpar_class(names(x$pforms)) mu_names <- names(x$pforms)[dp_classes == "mu"] for (dp in mu_names) { attr(x$pforms[[dp]], "center") <- FALSE } } # combine the main formula with formulas for the 'mu' parameters if (is.mixfamily(family)) { mu_dpars <- paste0("mu", seq_along(family$mix)) for (dp in mu_dpars) { x$pforms[[dp]] <- combine_formulas(formula, x$pforms[[dp]], dp) } x$pforms <- move2start(x$pforms, mu_dpars) } else if (conv_cats_dpars(x$family)) { mu_dpars <- str_subset(x$family$dpars, "^mu") for (dp in mu_dpars) { x$pforms[[dp]] <- combine_formulas(formula, x$pforms[[dp]], dp) } x$pforms <- move2start(x$pforms, mu_dpars) } else { x$pforms[["mu"]] <- combine_formulas(formula, x$pforms[["mu"]], "mu") x$pforms <- move2start(x$pforms, "mu") } # predicted distributional parameters resp <- ifelse(mv && !is.null(y$resp), y$resp, "") dpars <- intersect(names(x$pforms), valid_dpars(family)) dpar_forms <- x$pforms[dpars] nlpars <- setdiff(names(x$pforms), dpars) y$dpars <- named_list(dpars) for (dp in dpars) { if (get_nl(dpar_forms[[dp]])) { y$dpars[[dp]] <- terms_nlf(dpar_forms[[dp]], nlpars, resp) } else { y$dpars[[dp]] <- terms_lf(dpar_forms[[dp]]) } y$dpars[[dp]]$family <- dpar_family(family, dp) y$dpars[[dp]]$dpar <- dp y$dpars[[dp]]$resp <- resp if (dpar_class(dp) == "mu") { y$dpars[[dp]]$respform <- y$respform y$dpars[[dp]]$adforms <- y$adforms } y$dpars[[dp]]$transform <- stan_eta_transform(y$dpars[[dp]]$family, y) check_cs(y$dpars[[dp]]) } y$nlpars <- named_list(nlpars) if (length(nlpars)) { nlpar_forms <- x$pforms[nlpars] for (nlp in nlpars) { if (is.null(attr(nlpar_forms[[nlp]], "center"))) { # design matrices of non-linear parameters will not be # centered by default to make prior specification easier attr(nlpar_forms[[nlp]], "center") <- FALSE } if (get_nl(nlpar_forms[[nlp]])) { y$nlpars[[nlp]] <- terms_nlf(nlpar_forms[[nlp]], nlpars, resp) } else { y$nlpars[[nlp]] <- terms_lf(nlpar_forms[[nlp]]) } y$nlpars[[nlp]]$nlpar <- nlp y$nlpars[[nlp]]$resp <- resp check_cs(y$nlpars[[nlp]]) } used_nlpars <- ufrom_list(c(y$dpars, y$nlpars), "used_nlpars") unused_nlpars <- setdiff(nlpars, used_nlpars) if (length(unused_nlpars)) { stop2( "The parameter '", unused_nlpars[1], "' is not a ", "valid distributional or non-linear parameter. ", "Did you forget to set 'nl = TRUE'?" ) } # sort non-linear parameters after dependency used_nlpars <- from_list(y$nlpars, "used_nlpars") sorted_nlpars <- sort_dependencies(used_nlpars) y$nlpars <- y$nlpars[sorted_nlpars] } # fixed distributional parameters valid_dpars <- valid_dpars(y) inv_fixed_dpars <- setdiff(names(x$pfix), valid_dpars) if (length(inv_fixed_dpars)) { stop2("Invalid fixed parameters: ", collapse_comma(inv_fixed_dpars)) } if ("sigma" %in% valid_dpars && no_sigma(y)) { # some models require setting sigma to 0 if ("sigma" %in% c(names(x$pforms), names(x$pfix))) { stop2("Cannot predict or fix 'sigma' in this model.") } x$pfix$sigma <- 0 } if ("nu" %in% valid_dpars && no_nu(y)) { if ("nu" %in% c(names(x$pforms), names(x$pfix))) { stop2("Cannot predict or fix 'nu' in this model.") } x$pfix$nu <- 1 } disc_pars <- valid_dpars[dpar_class(valid_dpars) %in% "disc"] for (dp in disc_pars) { # 'disc' is set to 1 and not estimated by default if (!dp %in% c(names(x$pforms), names(x$pfix))) { x$pfix[[dp]] <- 1 } } for (dp in names(x$pfix)) { y$fdpars[[dp]] <- list(value = x$pfix[[dp]], dpar = dp) } check_fdpars(y$fdpars) # make a formula containing all required variables y$unused <- attr(x$formula, "unused") lhsvars <- if (resp_rhs_all) all_vars(y$respform) y$allvars <- allvars_formula( lhsvars, advars, lapply(y$dpars, get_allvars), lapply(y$nlpars, get_allvars), y$time$allvars, get_unused_arg_vars(y), .env = environment(formula) ) if (check_response) { # add y$respform to the left-hand side of y$allvars # avoid using update.formula as it is inefficient for longer formulas formula_allvars <- y$respform formula_allvars[[3]] <- y$allvars[[2]] environment(formula_allvars) <- environment(y$allvars) y$allvars <- formula_allvars } y } #' @rdname brmsterms #' @export brmsterms.mvbrmsformula <- function(formula, ...) { x <- validate_formula(formula) x$rescor <- isTRUE(x$rescor) x$mecor <- isTRUE(x$mecor) out <- structure(list(), class = "mvbrmsterms") out$terms <- named_list(names(x$forms)) for (i in seq_along(out$terms)) { x$forms[[i]]$rescor <- x$rescor x$forms[[i]]$mecor <- x$mecor x$forms[[i]]$mv <- TRUE out$terms[[i]] <- brmsterms(x$forms[[i]], ...) } list_allvars <- lapply(out$terms, get_allvars) out$allvars <- allvars_formula( list_allvars, .env = environment(list_allvars[[1]]) ) # required to find variables used solely in the response part lhs_resp <- function(x) deparse0(lhs(x$respform)[[2]]) out$respform <- paste0(ulapply(out$terms, lhs_resp), collapse = ",") out$respform <- formula(paste0("mvbind(", out$respform, ") ~ 1")) out$responses <- ufrom_list(out$terms, "resp") out$rescor <- x$rescor out$mecor <- x$mecor out$cov_ranef <- x$cov_ranef out } # parse linear/additive formulas # @param formula an ordinary model formula # @return a 'btl' object terms_lf <- function(formula) { formula <- rhs(as.formula(formula)) y <- nlist(formula) formula <- terms(formula) check_accidental_helper_functions(formula) types <- setdiff(all_term_types(), excluded_term_types(formula)) for (t in types) { tmp <- do_call(paste0("terms_", t), list(formula)) if (is.data.frame(tmp) || is.formula(tmp)) { y[[t]] <- tmp } } y$allvars <- allvars_formula( get_allvars(y$fe), get_allvars(y$re), get_allvars(y$cs), get_allvars(y$sp), get_allvars(y$sm), get_allvars(y$gp), get_allvars(y$ac), get_allvars(y$offset) ) structure(y, class = "btl") } # parse non-linear formulas # @param formula non-linear model formula # @param nlpars names of all non-linear parameters # @param resp optional name of a response variable # @return a 'btnl' object terms_nlf <- function(formula, nlpars, resp = "") { if (!length(nlpars)) { stop2("No non-linear parameters specified.") } loop <- !isFALSE(attr(formula, "loop")) formula <- rhs(as.formula(formula)) y <- nlist(formula) all_vars <- all_vars(formula) y$used_nlpars <- intersect(all_vars, nlpars) covars <- setdiff(all_vars, nlpars) y$covars <- structure(str2formula(covars), int = FALSE) if (!"ac" %in% excluded_term_types(formula)) { y$ac <- terms_ac(attr(formula, "autocor")) } y$allvars <- allvars_formula(covars, get_allvars(y$ac)) y$loop <- loop structure(y, class = "btnl") } # extract addition arguments out of formula # @return a list of formulas each containg a single addition term terms_ad <- function(formula, family = NULL, check_response = TRUE) { x <- list() ad_funs <- lsp("brms", what = "exports", pattern = "^resp_") ad_funs <- sub("^resp_", "", ad_funs) families <- family_names(family) if (is.family(family) && any(nzchar(families))) { str_formula <- formula2str(formula) ad <- get_matches("(?<=\\|)[^~]*(?=~)", str_formula, perl = TRUE) valid_ads <- family_info(family, "ad") if (length(ad)) { ad_terms <- terms(str2formula(ad)) if (length(attr(ad_terms, "offset"))) { stop2("Offsets are not allowed in addition terms.") } ad_terms <- attr(ad_terms, "term.labels") for (a in ad_funs) { matches <- grep(paste0("^(resp_)?", a, "\\(.*\\)$"), ad_terms) if (length(matches) == 1L) { x[[a]] <- ad_terms[matches] if (!grepl("^resp_", x[[a]])) { x[[a]] <- paste0("resp_", x[[a]]) } ad_terms <- ad_terms[-matches] if (!is.na(x[[a]]) && a %in% valid_ads) { x[[a]] <- str2formula(x[[a]]) } else { stop2("Argument '", a, "' is not supported for ", "family '", summary(family), "'.") } } else if (length(matches) > 1L) { stop2("Each addition argument may only be defined once.") } } if (length(ad_terms)) { stop2("The following addition terms are invalid:\n", collapse_comma(ad_terms)) } } if (check_response && "wiener" %in% families && !is.formula(x$dec)) { stop2("Addition argument 'dec' is required for family 'wiener'.") } if (is.formula(x$cat)) { # 'cat' was replaced by 'thres' in brms 2.10.5 x$thres <- x$cat } } x } # extract fixed effects terms terms_fe <- function(formula) { if (!is.terms(formula)) { formula <- terms(formula) } all_terms <- all_terms(formula) sp_terms <- find_terms(all_terms, "all", complete = FALSE) re_terms <- all_terms[grepl("\\|", all_terms)] int_term <- attr(formula, "intercept") fe_terms <- setdiff(all_terms, c(sp_terms, re_terms)) out <- paste(c(int_term, fe_terms), collapse = "+") out <- str2formula(out) attr(out, "allvars") <- allvars_formula(out) attr(out, "decomp") <- get_decomp(formula) if (has_rsv_intercept(out, has_intercept(formula))) { attr(out, "int") <- FALSE } if (no_cmc(formula)) { attr(out, "cmc") <- FALSE } if (no_center(formula)) { attr(out, "center") <- FALSE } if (is_sparse(formula)) { attr(out, "sparse") <- TRUE } out } # gather information of group-level terms # @return a data.frame with one row per group-level term terms_re <- function(formula) { re_terms <- get_re_terms(formula, brackets = FALSE) if (!length(re_terms)) { return(NULL) } re_terms <- split_re_terms(re_terms) re_parts <- re_parts(re_terms) out <- allvars <- vector("list", length(re_terms)) type <- attr(re_terms, "type") for (i in seq_along(re_terms)) { gcall <- eval2(re_parts$rhs[i]) form <- str2formula(re_parts$lhs[i]) group <- paste0(gcall$type, collapse(gcall$groups)) out[[i]] <- data.frame( group = group, gtype = gcall$type, gn = i, id = gcall$id, type = type[i], cor = gcall$cor, stringsAsFactors = FALSE ) out[[i]]$gcall <- list(gcall) out[[i]]$form <- list(form) # gather all variables used in the group-level term # at this point 'cs' terms are no longer recognized as such ftype <- str_if(type[i] %in% "cs", "", type[i]) re_allvars <- get_allvars(form, type = ftype) allvars[[i]] <- allvars_formula(re_allvars, gcall$allvars) } out <- do_call(rbind, out) out <- out[order(out$group), ] attr(out, "allvars") <- allvars_formula(allvars) if (no_cmc(formula)) { # disabling cell-mean coding in all group-level terms # has to come last to avoid removal of attributes for (i in seq_rows(out)) { attr(out$form[[i]], "cmc") <- FALSE } } out } # extract category specific terms for ordinal models terms_cs <- function(formula) { out <- find_terms(formula, "cs") if (!length(out)) { return(NULL) } out <- ulapply(out, eval2, envir = environment()) out <- str2formula(out) attr(out, "allvars") <- allvars_formula(out) # do not test whether variables were supplied to 'cs' # to allow category specific group-level intercepts attr(out, "int") <- FALSE out } # extract special effects terms terms_sp <- function(formula) { types <- c("mo", "me", "mi") out <- find_terms(formula, types, complete = FALSE) if (!length(out)) { return(NULL) } uni_mo <- get_matches_expr(regex_sp("mo"), out) uni_me <- get_matches_expr(regex_sp("me"), out) uni_mi <- get_matches_expr(regex_sp("mi"), out) # remove the intercept as it is handled separately out <- str2formula(c("0", out)) attr(out, "int") <- FALSE attr(out, "uni_mo") <- uni_mo attr(out, "uni_me") <- uni_me attr(out, "uni_mi") <- uni_mi attr(out, "allvars") <- str2formula(all_vars(out)) # TODO: do we need sp_fake_formula at all? # attr(out, "allvars") <- sp_fake_formula(uni_mo, uni_me, uni_mi) out } # extract spline terms terms_sm <- function(formula) { out <- find_terms(formula, "sm") if (!length(out)) { return(NULL) } if (any(grepl("^(te|ti)\\(", out))) { stop2("Tensor product smooths 'te' and 'ti' are not yet ", "implemented in brms. Consider using 't2' instead.") } out <- str2formula(out) attr(out, "allvars") <- mgcv::interpret.gam(out)$fake.formula out } # extract gaussian process terms terms_gp <- function(formula) { out <- find_terms(formula, "gp") if (!length(out)) { return(NULL) } eterms <- lapply(out, eval2, envir = environment()) covars <- from_list(eterms, "term") byvars <- from_list(eterms, "by") allvars <- str2formula(unlist(c(covars, byvars))) allvars <- str2formula(all_vars(allvars)) if (!length(all_vars(allvars))) { stop2("No variable supplied to function 'gp'.") } out <- str2formula(out) attr(out, "allvars") <- allvars out } # extract autocorrelation terms terms_ac <- function(formula) { autocor <- attr(formula, "autocor") out <- c(find_terms(formula, "ac"), find_terms(autocor, "ac")) if (!length(out)) { return(NULL) } eterms <- lapply(out, eval2, envir = environment()) allvars <- unlist(c( from_list(eterms, "time"), from_list(eterms, "gr") )) allvars <- str2formula(all_vars(allvars)) out <- str2formula(out) attr(out, "allvars") <- allvars out } # extract offset terms terms_offset <- function(formula) { if (!is.terms(formula)) { formula <- terms(as.formula(formula)) } pos <- attr(formula, "offset") if (is.null(pos)) { return(NULL) } vars <- attr(formula, "variables") out <- ulapply(pos, function(i) deparse0(vars[[i + 1]])) out <- str2formula(out) attr(out, "allvars") <- str2formula(all_vars(out)) out } # extract multiple covariates in multi-membership terms terms_mmc <- function(formula) { out <- find_terms(formula, "mmc") if (!length(out)) { return(NULL) } # remove the intercept as it is handled separately out <- str2formula(c("0", out)) attr(out, "allvars") <- allvars_formula(out) attr(out, "int") <- FALSE out } # extract response variable names # assumes multiple response variables to be combined via mvbind terms_resp <- function(formula, check_names = TRUE) { formula <- lhs(as.formula(formula)) if (is.null(formula)) { return(NULL) } expr <- validate_resp_formula(formula)[[2]] if (length(expr) <= 1L) { out <- deparse_no_string(expr) } else { str_fun <- deparse_no_string(expr[[1]]) used_mvbind <- grepl("^(brms:::?)?mvbind$", str_fun) if (used_mvbind) { out <- ulapply(expr[-1], deparse_no_string) } else { out <- deparse_no_string(expr) } } if (check_names) { out <- make_stan_names(out) } out } #' Checks if argument is a \code{brmsterms} object #' #' @param x An \R object #' #' @seealso \code{\link[brms:brmsterms]{brmsterms}} #' #' @export is.brmsterms <- function(x) { inherits(x, "brmsterms") } #' Checks if argument is a \code{mvbrmsterms} object #' #' @param x An \R object #' #' @seealso \code{\link[brms:brmsterms]{brmsterms}} #' #' @export is.mvbrmsterms <- function(x) { inherits(x, "mvbrmsterms") } is.btl <- function(x) { inherits(x, "btl") } is.btnl <- function(x) { inherits(x, "btnl") } # transform mvbrmsterms objects for use in stan_llh.brmsterms as.brmsterms <- function(x) { stopifnot(is.mvbrmsterms(x), x$rescor) families <- ulapply(x$terms, function(y) y$family$family) stopifnot(all(families == families[1])) out <- structure(list(), class = "brmsterms") out$family <- structure( list(family = paste0(families[1], "_mv"), link = "identity"), class = c("brmsfamily", "family") ) info <- get(paste0(".family_", families[1]))() out$family[names(info)] <- info out$sigma_pred <- any(ulapply(x$terms, function(x) "sigma" %in% names(x$dpar) || is.formula(x$adforms$se) )) weight_forms <- rmNULL(lapply(x$terms, function(x) x$adforms$weights)) if (length(weight_forms)) { str_wf <- unique(ulapply(weight_forms, formula2str)) if (length(str_wf) > 1L) { stop2("All responses should use the same", "weights if 'rescor' is estimated.") } out$adforms$weights <- weight_forms[[1]] } miforms <- rmNULL(lapply(x$terms, function(x) x$adforms$mi)) if (length(miforms)) { out$adforms$mi <- miforms[[1]] } out } # names of supported term types all_term_types <- function() { c("fe", "re", "sp", "cs", "sm", "gp", "ac", "offset") } # avoid ambiguous parameter names # @param names names to check for ambiguity # @param bterms a brmsterms object avoid_dpars <- function(names, bterms) { dpars <- c(names(bterms$dpars), "sp", "cs") if (length(dpars)) { dpars_prefix <- paste0("^", dpars, "_") invalid <- any(ulapply(dpars_prefix, grepl, names)) if (invalid) { dpars <- paste0("'", dpars, "_'", collapse = ", ") stop2("Variable names starting with ", dpars, " are not allowed for this model.") } } invisible(NULL) } vars_prefix <- function() { c("dpar", "resp", "nlpar") } # check and tidy parameter prefixes check_prefix <- function(x, keep_mu = FALSE) { vpx <- vars_prefix() if (is.data.frame(x) && nrow(x) == 0) { # avoids a bug in data.frames with zero rows x <- list() } x[setdiff(vpx, names(x))] <- "" x <- x[vpx] for (i in seq_along(x)) { x[[i]] <- as.character(x[[i]]) if (!length(x[[i]])) { x[[i]] <- "" } x[[i]] <- ifelse( !keep_mu & names(x)[i] == "dpar" & x[[i]] %in% "mu", yes = "", no = x[[i]] ) x[[i]] <- ifelse( keep_mu & names(x)[i] == "dpar" & x[[i]] %in% "", yes = "mu", no = x[[i]] ) } x } # combined parameter prefixes # @param prefix object from which to extract prefixes # @param keep_mu keep the 'mu' prefix if available or remove it? # @param nlp include the 'nlp' prefix for non-linear parameters? combine_prefix <- function(prefix, keep_mu = FALSE, nlp = FALSE) { prefix <- check_prefix(prefix, keep_mu = keep_mu) if (is_nlpar(prefix) && nlp) { prefix$dpar <- "nlp" } prefix <- lapply(prefix, usc) sub("^_", "", do_call(paste0, prefix)) } # check validity of fixed distributional parameters check_fdpars <- function(x) { stopifnot(is.null(x) || is.list(x)) pos_pars <- c( "sigma", "shape", "nu", "phi", "kappa", "beta", "disc", "bs", "ndt", "theta" ) prob_pars <- c("zi", "hu", "bias", "quantile") for (dp in names(x)) { apc <- dpar_class(dp) value <- x[[dp]]$value if (apc %in% pos_pars && value < 0) { stop2("Parameter '", dp, "' must be positive.") } if (apc %in% prob_pars && (value < 0 || value > 1)) { stop2("Parameter '", dp, "' must be between 0 and 1.") } } invisible(TRUE) } # combine all variables in one formuula # @param x (list of) formulas or character strings # @return a formula with all variables on the right-hand side allvars_formula <- function(..., .env = parent.frame()) { out <- rmNULL(c(...)) out <- collapse(ulapply(out, plus_rhs)) all_vars <- all_vars(out) invalid_vars <- setdiff(all_vars, make.names(all_vars)) if (length(invalid_vars)) { stop2("The following variable names are invalid: ", collapse_comma(invalid_vars)) } str2formula(c(out, all_vars), env = .env) } # conveniently extract a formula of all relevant variables # @param x any object from which to extract 'allvars' # @param type predictor type; requires a 'parse_' function # @return a formula with all variables on the right-hand side # or NULL if 'allvars' cannot be found get_allvars <- function(x, type = "") { out <- attr(x, "allvars", TRUE) if (is.null(out) && "allvars" %in% names(x)) { out <- x[["allvars"]] } if (is.null(out) && is.formula(x)) { type <- as_one_character(type) type <- str_if(nzchar(type), type, "fe") terms_fun <- get(paste0("terms_", type), mode = "function") out <- attr(terms_fun(x), "allvars") } stopifnot(is.null(out) || is.formula(out)) out } # add 'x' to the right-hand side of a formula plus_rhs <- function(x) { if (is.formula(x)) { x <- sub("^[^~]*~", "", formula2str(x)) } if (length(x) && all(nzchar(x))) { out <- paste0(" + ", paste(x, collapse = "+")) } else { out <- " + 1" } out } # like stats::terms but keeps attributes if possible terms <- function(formula, ...) { old_attributes <- attributes(formula) formula <- stats::terms(formula, ...) new_attributes <- attributes(formula) sel_names <- setdiff(names(old_attributes), names(new_attributes)) attributes(formula)[sel_names] <- old_attributes[sel_names] formula } is.terms <- function(x) { inherits(x, "terms") } # combine formulas for distributional parameters # @param formula1 primary formula from which to take the RHS # @param formula2 secondary formula used to update the RHS of formula1 # @param lhs character string to define the left-hand side of the output # @param update a flag to indicate whether updating should be allowed. # Defaults to FALSE to maintain backwards compatibility # @return a formula object combine_formulas <- function(formula1, formula2, lhs = "", update = FALSE) { stopifnot(is.formula(formula1)) stopifnot(is.null(formula2) || is.formula(formula2)) lhs <- as_one_character(lhs) update <- as_one_logical(update) if (is.null(formula2)) { rhs <- str_rhs(formula1) att <- attributes(formula1) } else if (update && has_terms(formula1)) { # TODO: decide about intuitive updating behavior if (get_nl(formula1) || get_nl(formula2)) { stop2("Cannot combine non-linear formulas.") } old_formula <- eval2(paste0("~ ", str_rhs(formula1))) new_formula <- eval2(paste0("~ . + ", str_rhs(formula2))) rhs <- str_rhs(update(old_formula, new_formula)) att <- attributes(formula1) att[names(attributes(formula2))] <- attributes(formula2) } else { rhs <- str_rhs(formula2) att <- attributes(formula2) } out <- eval2(paste0(lhs, " ~ ", rhs)) attributes(out)[names(att)] <- att out } # does the formula contain any terms? # @return TRUE or FALSE has_terms <- function(formula) { stopifnot(is.formula(formula)) terms <- try(terms(rhs(formula)), silent = TRUE) is_try_error(terms) || length(attr(terms, "term.labels")) || length(attr(terms, "offset")) } # has a linear formula any terms except overall effects? has_special_terms <- function(x) { if (!is.btl(x)) { return(FALSE) } special_terms <- c("sp", "sm", "gp", "ac", "cs", "offset") NROW(x[["re"]]) > 0 || any(lengths(x[special_terms])) } # indicate if the predictor term belongs to a non-linear parameter is_nlpar <- function(x) { isTRUE(nzchar(x[["nlpar"]])) } # indicate if the intercept should be removed no_int <- function(x) { isFALSE(attr(x, "int", exact = TRUE)) } # indicate if cell mean coding should be disabled no_cmc <- function(x) { isFALSE(attr(x, "cmc", exact = TRUE)) } # indicate if centering of the design matrix should be disabled no_center <- function(x) { isFALSE(attr(x, "center", exact = TRUE)) } # indicate if the design matrix should be handled as sparse is_sparse <- function(x) { isTRUE(attr(x, "sparse", exact = TRUE)) } # get the decomposition type of the design matrix get_decomp <- function(x) { out <- attr(x, "decomp", exact = TRUE) if (is.null(out)) { out <- "none" } as_one_character(out) } # extract different types of effects get_effect <- function(x, ...) { UseMethod("get_effect") } #' @export get_effect.default <- function(x, ...) { NULL } #' @export get_effect.brmsfit <- function(x, ...) { get_effect(x$formula, ...) } #' @export get_effect.brmsformula <- function(x, ...) { get_effect(brmsterms(x), ...) } #' @export get_effect.mvbrmsformula <- function(x, ...) { get_effect(brmsterms(x), ...) } #' @export get_effect.mvbrmsterms <- function(x, ...) { ulapply(x$terms, get_effect, recursive = FALSE, ...) } # extract formulas of a certain effect type # @param target effect type to return # @param all logical; include effects of nlpars and dpars? # @return a list of formulas #' @export get_effect.brmsterms <- function(x, target = "fe", ...) { out <- named_list(c(names(x$dpars), names(x$nlpars))) for (dp in names(x$dpars)) { out[[dp]] <- get_effect(x$dpars[[dp]], target = target) } for (nlp in names(x$nlpars)) { out[[nlp]] <- get_effect(x$nlpars[[nlp]], target = target) } unlist(out, recursive = FALSE) } #' @export get_effect.btl <- function(x, target = "fe", ...) { x[[target]] } #' @export get_effect.btnl <- function(x, target = "fe", ...) { x[[target]] } all_terms <- function(x) { if (!length(x)) { return(character(0)) } if (!is.terms(x)) { x <- terms(as.formula(x)) } trim_wsp(attr(x, "term.labels")) } # generate a regular expression to extract special terms # @param type one or more special term types to be extracted # TODO: rule out expressions such as mi(y) + mi(x) regex_sp <- function(type = "all") { choices <- c("all", "sp", "sm", "gp", "cs", "mmc", "ac", all_sp_types()) type <- unique(match.arg(type, choices, several.ok = TRUE)) funs <- c( sm = "(s|(t2)|(te)|(ti))", gp = "gp", cs = "cse?", mmc = "mmc", ac = "((arma)|(ar)|(ma)|(cosy)|(unstr)|(sar)|(car)|(fcor))" ) funs[all_sp_types()] <- all_sp_types() if ("sp" %in% type) { # allows extracting all 'sp' terms at once type <- setdiff(type, "sp") type <- union(type, all_sp_types()) } if ("all" %in% type) { # allows extracting all special terms at once type <- names(funs) } funs <- funs[type] allow_colon <- c("cs", "mmc", "ac") inner <- ifelse(names(funs) %in% allow_colon, ".*", "[^:]*") out <- paste0("^(", funs, ")\\(", inner, "\\)$") paste0("(", out, ")", collapse = "|") } # find special terms of a certain type # @param x formula object of character vector from which to extract terms # @param type special terms type to be extracted. see regex_sp() # @param complete check if terms consist completely of single special terms? # @param ranef include group-level terms? # @return a character vector of matching terms find_terms <- function(x, type, complete = TRUE, ranef = FALSE) { if (is.formula(x)) { x <- all_terms(x) } else { x <- trim_wsp(as.character(x)) } complete <- as_one_logical(complete) ranef <- as_one_logical(ranef) regex <- regex_sp(type) is_match <- grepl_expr(regex, x) if (!ranef) { is_match <- is_match & !grepl("\\|", x) } out <- x[is_match] if (complete) { matches <- lapply(out, get_matches_expr, pattern = regex) # each term may contain only one special function call invalid <- out[lengths(matches) > 1L] if (!length(invalid)) { # each term must be exactly equal to the special function call invalid <- out[unlist(matches) != out] } # TODO: some terms can be part of I() calls (#1520); reflect this here? if (length(invalid)) { stop2("The term '", invalid[1], "' is invalid in brms syntax.") } } out } # validate a terms object (or one that can be coerced to it) # for use primarily in 'get_model_matrix' # @param x any R object # @return a (possibly amended) terms object or NULL # if 'x' could not be coerced to a terms object validate_terms <- function(x) { no_int <- no_int(x) no_cmc <- no_cmc(x) if (is.formula(x) && !is.terms(x)) { x <- terms(x) } if (!is.terms(x)) { return(NULL) } if (no_int || !has_intercept(x) && no_cmc) { # allows to remove the intercept without causing cell mean coding attr(x, "intercept") <- 1 attr(x, "int") <- FALSE } x } # checks if the formula contains an intercept has_intercept <- function(formula) { if (is.terms(formula)) { out <- as.logical(attr(formula, "intercept")) } else { formula <- as.formula(formula) try_terms <- try(terms(formula), silent = TRUE) if (is_try_error(try_terms)) { out <- FALSE } else { out <- as.logical(attr(try_terms, "intercept")) } } out } # check if model makes use of the reserved intercept variables # @param has_intercept does the model have an intercept? # if NULL this will be inferred from formula itself has_rsv_intercept <- function(formula, has_intercept = NULL) { .has_rsv_intercept <- function(terms, has_intercept) { has_intercept <- as_one_logical(has_intercept) intercepts <- c("intercept", "Intercept") out <- !has_intercept && any(intercepts %in% all_vars(rhs(terms))) return(out) } if (is.terms(formula)) { if (is.null(has_intercept)) { has_intercept <- has_intercept(formula) } return(.has_rsv_intercept(formula, has_intercept)) } formula <- try(as.formula(formula), silent = TRUE) if (is_try_error(formula)) { return(FALSE) } if (is.null(has_intercept)) { try_terms <- try(terms(formula), silent = TRUE) if (is_try_error(try_terms)) { return(FALSE) } has_intercept <- has_intercept(try_terms) } .has_rsv_intercept(formula, has_intercept) } # names of reserved variables rsv_vars <- function(bterms) { stopifnot(is.brmsterms(bterms) || is.mvbrmsterms(bterms)) .rsv_vars <- function(x) { rsv_int <- any(ulapply(x$dpars, has_rsv_intercept)) if (rsv_int) c("intercept", "Intercept") else NULL } if (is.mvbrmsterms(bterms)) { out <- unique(ulapply(bterms$terms, .rsv_vars)) } else { out <- .rsv_vars(bterms) } out } # are category specific effects present? has_cs <- function(bterms) { length(get_effect(bterms, target = "cs")) > 0L || any(get_re(bterms)$type %in% "cs") } # check if category specific effects are allowed check_cs <- function(bterms) { stopifnot(is.btl(bterms) || is.btnl(bterms)) if (has_cs(bterms)) { if (!is_equal(dpar_class(bterms$dpar), "mu")) { stop2("Category specific effects are only supported ", "for the main parameter 'mu'.") } if (!(is.null(bterms$family) || allow_cs(bterms$family))) { stop2("Category specific effects are not supported for this family.") } if (needs_ordered_cs(bterms$family)) { warning2("Category specific effects for this family should be ", "considered experimental and may have convergence issues.") } } invisible(NULL) } # check for the presence of helper functions accidentally used # within a formula instead of added to bf(). See #1103 check_accidental_helper_functions <- function(formula) { terms <- all_terms(formula) # see help("brmsformula-helpers") for the list of functions funs <- c("nlf", "lf", "acformula", "set_nl", "set_rescor", "set_mecor") regex <- paste0("(", funs, ")", collapse = "|") regex <- paste0("^(", regex, ")\\(") matches <- get_matches(regex, terms, first = TRUE) matches <- sub("\\($", "", matches) matches <- unique(matches) matches <- matches[nzchar(matches)] for (m in matches) { loc <- utils::find(m, mode = "function") if (is_equal(loc[1], "package:brms")) { stop2("Function '", m, "' should not be part of the right-hand side ", "of a formula. See help('brmsformula-helpers') for the correct syntax.") } } invisible(TRUE) } # extract names of variables added via the 'unused' argument get_unused_arg_vars <- function(x, ...) { UseMethod("get_unused_arg_vars") } #' @export get_unused_arg_vars.brmsformula <- function(x, ...) { all_vars(attr(x$formula, "unused")) } #' @export get_unused_arg_vars.mvbrmsformula <- function(x, ...) { unique(ulapply(x$forms, get_unused_arg_vars, ...)) } #' @export get_unused_arg_vars.brmsterms <- function(x, ...) { all_vars(x$unused) } #' @export get_unused_arg_vars.mvbrmsterms <- function(x, ...) { unique(ulapply(x$terms, get_unused_arg_vars, ...)) } # extract elements from objects # @param x object from which to extract elements # @param name name of the element to be extracted get_element <- function(x, name, ...) { UseMethod("get_element") } #' @export get_element.default <- function(x, name, ...) { x[[name]] } #' @export get_element.mvbrmsformula <- function(x, name, ...) { lapply(x$forms, get_element, name = name, ...) } #' @export get_element.mvbrmsterms <- function(x, name, ...) { lapply(x$terms, get_element, name = name, ...) } brms/R/log_lik.R0000644000176200001440000010560214361545260013173 0ustar liggesusers#' Compute the Pointwise Log-Likelihood #' #' @aliases log_lik logLik.brmsfit #' #' @param object A fitted model object of class \code{brmsfit}. #' @inheritParams posterior_predict.brmsfit #' @param combine Only relevant in multivariate models. #' Indicates if the log-likelihoods of the submodels should #' be combined per observation (i.e. added together; the default) #' or if the log-likelihoods should be returned separately. #' @param pointwise A flag indicating whether to compute the full #' log-likelihood matrix at once (the default), or just return #' the likelihood function along with all data and draws #' required to compute the log-likelihood separately for each #' observation. The latter option is rarely useful when #' calling \code{log_lik} directly, but rather when computing #' \code{\link{waic}} or \code{\link{loo}}. #' @param add_point_estimate For internal use only. Ensures compatibility #' with the \code{\link{loo_subsample}} method. #' #' @return Usually, an S x N matrix containing the pointwise log-likelihood #' draws, where S is the number of draws and N is the number #' of observations in the data. For multivariate models and if #' \code{combine} is \code{FALSE}, an S x N x R array is returned, #' where R is the number of response variables. #' If \code{pointwise = TRUE}, the output is a function #' with a \code{draws} attribute containing all relevant #' data and posterior draws. #' #' @template details-newdata-na #' @template details-allow_new_levels #' #' @aliases log_lik #' @method log_lik brmsfit #' @export #' @export log_lik #' @importFrom rstantools log_lik log_lik.brmsfit <- function(object, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, pointwise = FALSE, combine = TRUE, add_point_estimate = FALSE, cores = NULL, ...) { pointwise <- as_one_logical(pointwise) combine <- as_one_logical(combine) add_point_estimate <- as_one_logical(add_point_estimate) contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = TRUE, ... ) if (add_point_estimate) { # required for the loo_subsample method # Computing a point estimate based on the full prep object is too # difficult due to its highly nested structure. As an alternative, a second # prep object is created from the point estimates of the draws directly. attr(prep, "point_estimate") <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = TRUE, point_estimate = "median", ... ) } if (pointwise) { stopifnot(combine) log_lik <- log_lik_pointwise # names need to be 'data' and 'draws' as per ?loo::loo.function attr(log_lik, "data") <- data.frame(i = seq_len(choose_N(prep))) attr(log_lik, "draws") <- prep } else { log_lik <- log_lik(prep, combine = combine, cores = cores) if (anyNA(log_lik)) { warning2( "NAs were found in the log-likelihood. Possibly this is because ", "some of your responses contain NAs. If you use 'mi' terms, try ", "setting 'resp' to those response variables without missing values. ", "Alternatively, use 'newdata' to predict only complete cases." ) } } log_lik } #' @export logLik.brmsfit <- function(object, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, pointwise = FALSE, combine = TRUE, cores = NULL, ...) { cl <- match.call() cl[[1]] <- quote(log_lik) eval(cl, parent.frame()) } #' @export log_lik.mvbrmsprep <- function(object, combine = TRUE, ...) { if (length(object$mvpars$rescor)) { object$mvpars$Mu <- get_Mu(object) object$mvpars$Sigma <- get_Sigma(object) out <- log_lik.brmsprep(object, ...) } else { out <- lapply(object$resps, log_lik, ...) if (combine) { out <- Reduce("+", out) } else { along <- ifelse(length(out) > 1L, 3, 2) out <- do_call(abind, c(out, along = along)) } } out } #' @export log_lik.brmsprep <- function(object, cores = NULL, ...) { cores <- validate_cores_post_processing(cores) log_lik_fun <- paste0("log_lik_", object$family$fun) log_lik_fun <- get(log_lik_fun, asNamespace("brms")) if (is.customfamily(object$family)) { # ensure that the method can be found during parallel execution object$family$log_lik <- custom_family_method(object$family, "log_lik") } for (nlp in names(object$nlpars)) { object$nlpars[[nlp]] <- get_nlpar(object, nlpar = nlp) } for (dp in names(object$dpars)) { object$dpars[[dp]] <- get_dpar(object, dpar = dp) } N <- choose_N(object) out <- plapply(seq_len(N), log_lik_fun, cores = cores, prep = object) out <- do_call(cbind, out) colnames(out) <- NULL old_order <- object$old_order sort <- isTRUE(ncol(out) != length(old_order)) reorder_obs(out, old_order, sort = sort) } # evaluate log_lik in a pointwise manner # cannot be an S3 method since 'data_i' must be the first argument # names must be 'data_i' and 'draws' as per ?loo::loo.function log_lik_pointwise <- function(data_i, draws, ...) { i <- data_i$i if (is.mvbrmsprep(draws) && !length(draws$mvpars$rescor)) { out <- lapply(draws$resps, log_lik_pointwise, i = i) out <- Reduce("+", out) } else { log_lik_fun <- paste0("log_lik_", draws$family$fun) log_lik_fun <- get(log_lik_fun, asNamespace("brms")) out <- log_lik_fun(i, draws) } out } # All log_lik_ functions have the same arguments structure # @param i index of the observatio for which to compute log-lik values # @param prep A named list returned by prepare_predictions containing # all required data and posterior draws # @return a vector of length prep$ndraws containing the pointwise # log-likelihood for the ith observation log_lik_gaussian <- function(i, prep) { mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) args <- list(mean = mu, sd = sigma) # log_lik_censor computes the conventional log_lik in case of no censoring out <- log_lik_censor(dist = "norm", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = pnorm, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_student <- function(i, prep) { nu <- get_dpar(prep, "nu", i = i) mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) args <- list(df = nu, mu = mu, sigma = sigma) out <- log_lik_censor( dist = "student_t", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pstudent_t, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_lognormal <- function(i, prep) { sigma <- get_dpar(prep, "sigma", i = i) args <- list(meanlog = get_dpar(prep, "mu", i), sdlog = sigma) out <- log_lik_censor(dist = "lnorm", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = plnorm, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_shifted_lognormal <- function(i, prep) { sigma <- get_dpar(prep, "sigma", i = i) ndt <- get_dpar(prep, "ndt", i = i) args <- list(meanlog = get_dpar(prep, "mu", i), sdlog = sigma, shift = ndt) out <- log_lik_censor("shifted_lnorm", args, i = i, prep = prep) out <- log_lik_truncate(out, pshifted_lnorm, args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_skew_normal <- function(i, prep) { mu <- get_dpar(prep, "mu", i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) alpha <- get_dpar(prep, "alpha", i = i) args <- nlist(mu, sigma, alpha) out <- log_lik_censor( dist = "skew_normal", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pskew_normal, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_gaussian_mv <- function(i, prep) { Mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i) dmn <- function(s) { dmulti_normal( prep$data$Y[i, ], mu = Mu[s, ], Sigma = Sigma[s, , ], log = TRUE ) } out <- sapply(1:prep$ndraws, dmn) log_lik_weight(out, i = i, prep = prep) } log_lik_student_mv <- function(i, prep) { nu <- get_dpar(prep, "nu", i = i) Mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i) dmst <- function(s) { dmulti_student_t( prep$data$Y[i, ], df = nu[s], mu = Mu[s, ], Sigma = Sigma[s, , ], log = TRUE ) } out <- sapply(1:prep$ndraws, dmst) log_lik_weight(out, i = i, prep = prep) } log_lik_gaussian_time <- function(i, prep) { obs <- with(prep$ac, begin_tg[i]:end_tg[i]) Jtime <- prep$ac$Jtime_tg[i, ] Y <- as.numeric(prep$data$Y[obs]) mu <- as.matrix(get_dpar(prep, "mu", i = obs)) Sigma <- get_cov_matrix_ac(prep, obs, Jtime = Jtime) .log_lik <- function(s) { C <- as.matrix(Sigma[s, , ]) Cinv <- solve(C) e <- Y - mu[s, ] g <- solve(C, e) cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar) ll <- dnorm(Y, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_student_time <- function(i, prep) { obs <- with(prep$ac, begin_tg[i]:end_tg[i]) Jtime <- prep$ac$Jtime_tg[i, ] Y <- as.numeric(prep$data$Y[obs]) nu <- as.matrix(get_dpar(prep, "nu", i = obs)) mu <- as.matrix(get_dpar(prep, "mu", i = obs)) Sigma <- get_cov_matrix_ac(prep, obs, Jtime = Jtime) .log_lik <- function(s) { df <- nu[s, ] C <- as.matrix(Sigma[s, , ]) Cinv <- solve(C) e <- Y - mu[s, ] g <- solve(C, e) cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar * student_t_cov_factor(df, Cinv, e)) dfloo <- df + nrow(Cinv) - 1 ll <- dstudent_t(Y, dfloo, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_gaussian_lagsar <- function(i, prep) { mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") Y <- as.numeric(prep$data$Y) I <- diag(prep$nobs) stopifnot(i == 1) # see http://mc-stan.org/loo/articles/loo2-non-factorizable.html .log_lik <- function(s) { IB <- I - with(prep$ac, lagsar[s, ] * Msar) Cinv <- t(IB) %*% IB / sigma[s]^2 e <- Y - solve(IB, mu[s, ]) g <- Cinv %*% e cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar) ll <- dnorm(Y, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_student_lagsar <- function(i, prep) { nu <- get_dpar(prep, "nu") mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") Y <- as.numeric(prep$data$Y) I <- diag(prep$nobs) stopifnot(i == 1) # see http://mc-stan.org/loo/articles/loo2-non-factorizable.html .log_lik <- function(s) { df <- nu[s] IB <- I - with(prep$ac, lagsar[s, ] * Msar) Cinv <- t(IB) %*% IB / sigma[s]^2 e <- Y - solve(IB, mu[s, ]) g <- Cinv %*% e cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar * student_t_cov_factor(df, Cinv, e)) dfloo <- df + nrow(Cinv) - 1 ll <- dstudent_t(Y, dfloo, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_gaussian_errorsar <- function(i, prep) { stopifnot(i == 1) mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") Y <- as.numeric(prep$data$Y) I <- diag(prep$nobs) .log_lik <- function(s) { IB <- I - with(prep$ac, errorsar[s, ] * Msar) Cinv <- t(IB) %*% IB / sigma[s]^2 e <- Y - mu[s, ] g <- Cinv %*% e cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar) ll <- dnorm(Y, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_student_errorsar <- function(i, prep) { stopifnot(i == 1) nu <- get_dpar(prep, "nu") mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") Y <- as.numeric(prep$data$Y) I <- diag(prep$nobs) .log_lik <- function(s) { df <- nu[s] IB <- I - with(prep$ac, errorsar[s, ] * Msar) Cinv <- t(IB) %*% IB / sigma[s]^2 e <- Y - mu[s, ] g <- Cinv %*% e cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar * student_t_cov_factor(df, Cinv, e)) dfloo <- df + nrow(Cinv) - 1 ll <- dstudent_t(Y, dfloo, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_gaussian_fcor <- function(i, prep) { stopifnot(i == 1) Y <- as.numeric(prep$data$Y) mu <- get_dpar(prep, "mu") Sigma <- get_cov_matrix_ac(prep) .log_lik <- function(s) { C <- as.matrix(Sigma[s, , ]) Cinv <- solve(C) e <- Y - mu[s, ] g <- solve(C, e) cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar) ll <- dnorm(Y, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_student_fcor <- function(i, prep) { stopifnot(i == 1) Y <- as.numeric(prep$data$Y) nu <- get_dpar(prep, "nu") mu <- get_dpar(prep, "mu") Sigma <- get_cov_matrix_ac(prep) .log_lik <- function(s) { df <- nu[s] C <- as.matrix(Sigma[s, , ]) Cinv <- solve(C) e <- Y - mu[s, ] g <- solve(C, e) cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar * student_t_cov_factor(df, Cinv, e)) dfloo <- df + nrow(Cinv) - 1 ll <- dstudent_t(Y, dfloo, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_binomial <- function(i, prep) { trials <- prep$data$trials[i] args <- list(size = trials, prob = get_dpar(prep, "mu", i)) out <- log_lik_censor( dist = "binom", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pbinom, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_bernoulli <- function(i, prep) { args <- list(size = 1, prob = get_dpar(prep, "mu", i)) out <- log_lik_censor( dist = "binom", args = args, i = i, prep = prep ) # no truncation allowed log_lik_weight(out, i = i, prep = prep) } log_lik_beta_binomial <- function(i, prep) { trials <- prep$data$trials[i] mu <- get_dpar(prep, "mu", i) phi <- get_dpar(prep, "phi", i) args <- nlist(size = trials, mu, phi) out <- log_lik_censor("beta_binomial", args, i, prep) out <- log_lik_truncate(out, pbeta_binomial, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_poisson <- function(i, prep) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) args <- list(lambda = mu) out <- log_lik_censor( dist = "pois", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = ppois, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_negbinomial <- function(i, prep) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) shape <- get_dpar(prep, "shape", i) shape <- multiply_dpar_rate_denom(shape, prep, i = i) args <- list(mu = mu, size = shape) out <- log_lik_censor( dist = "nbinom", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pnbinom, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_negbinomial2 <- function(i, prep) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) sigma <- get_dpar(prep, "sigma", i) shape <- multiply_dpar_rate_denom(1 / sigma, prep, i = i) args <- list(mu = mu, size = shape) out <- log_lik_censor( dist = "nbinom", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pnbinom, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_geometric <- function(i, prep) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) shape <- 1 shape <- multiply_dpar_rate_denom(shape, prep, i = i) args <- list(mu = mu, size = shape) out <- log_lik_censor( dist = "nbinom", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pnbinom, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_discrete_weibull <- function(i, prep) { args <- list( mu = get_dpar(prep, "mu", i), shape = get_dpar(prep, "shape", i = i) ) out <- log_lik_censor( dist = "discrete_weibull", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pdiscrete_weibull, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_com_poisson <- function(i, prep) { args <- list( mu = get_dpar(prep, "mu", i), shape = get_dpar(prep, "shape", i = i) ) # no censoring or truncation allowed yet out <- do_call(dcom_poisson, c(prep$data$Y[i], args, log = TRUE)) log_lik_weight(out, i = i, prep = prep) } log_lik_exponential <- function(i, prep) { args <- list(rate = 1 / get_dpar(prep, "mu", i)) out <- log_lik_censor(dist = "exp", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = pexp, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_gamma <- function(i, prep) { shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i) / shape args <- nlist(shape, scale) out <- log_lik_censor(dist = "gamma", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = pgamma, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_weibull <- function(i, prep) { shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i = i) / gamma(1 + 1 / shape) args <- list(shape = shape, scale = scale) out <- log_lik_censor( dist = "weibull", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pweibull, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_frechet <- function(i, prep) { nu <- get_dpar(prep, "nu", i = i) scale <- get_dpar(prep, "mu", i = i) / gamma(1 - 1 / nu) args <- list(scale = scale, shape = nu) out <- log_lik_censor( dist = "frechet", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pfrechet, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_gen_extreme_value <- function(i, prep) { sigma <- get_dpar(prep, "sigma", i = i) xi <- get_dpar(prep, "xi", i = i) mu <- get_dpar(prep, "mu", i) args <- nlist(mu, sigma, xi) out <- log_lik_censor(dist = "gen_extreme_value", args = args, i = i, prep = prep) out <- log_lik_truncate(out, cdf = pgen_extreme_value, args = args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_inverse.gaussian <- function(i, prep) { args <- list(mu = get_dpar(prep, "mu", i), shape = get_dpar(prep, "shape", i = i)) out <- log_lik_censor(dist = "inv_gaussian", args = args, i = i, prep = prep) out <- log_lik_truncate(out, cdf = pinv_gaussian, args = args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_exgaussian <- function(i, prep) { args <- list(mu = get_dpar(prep, "mu", i), sigma = get_dpar(prep, "sigma", i = i), beta = get_dpar(prep, "beta", i = i)) out <- log_lik_censor(dist = "exgaussian", args = args, i = i, prep = prep) out <- log_lik_truncate(out, cdf = pexgaussian, args = args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_wiener <- function(i, prep) { args <- list( delta = get_dpar(prep, "mu", i), alpha = get_dpar(prep, "bs", i = i), tau = get_dpar(prep, "ndt", i = i), beta = get_dpar(prep, "bias", i = i), resp = prep$data[["dec"]][i] ) out <- do_call(dwiener, c(prep$data$Y[i], args, log = TRUE)) log_lik_weight(out, i = i, prep = prep) } log_lik_beta <- function(i, prep) { mu <- get_dpar(prep, "mu", i) phi <- get_dpar(prep, "phi", i) args <- list(shape1 = mu * phi, shape2 = (1 - mu) * phi) out <- log_lik_censor(dist = "beta", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = pbeta, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_von_mises <- function(i, prep) { args <- list( mu = get_dpar(prep, "mu", i), kappa = get_dpar(prep, "kappa", i = i) ) out <- log_lik_censor( dist = "von_mises", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pvon_mises, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_asym_laplace <- function(i, prep, ...) { args <- list( mu = get_dpar(prep, "mu", i), sigma = get_dpar(prep, "sigma", i), quantile = get_dpar(prep, "quantile", i) ) out <- log_lik_censor(dist = "asym_laplace", args, i, prep) out <- log_lik_truncate(out, pasym_laplace, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_asym_laplace <- function(i, prep, ...) { args <- list( mu = get_dpar(prep, "mu", i), sigma = get_dpar(prep, "sigma", i), quantile = get_dpar(prep, "quantile", i), zi = get_dpar(prep, "zi", i) ) out <- log_lik_censor(dist = "zero_inflated_asym_laplace", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_asym_laplace, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_cox <- function(i, prep, ...) { args <- list( mu = get_dpar(prep, "mu", i), bhaz = prep$bhaz$bhaz[, i], cbhaz = prep$bhaz$cbhaz[, i] ) out <- log_lik_censor(dist = "cox", args = args, i = i, prep = prep) out <- log_lik_truncate(out, cdf = pcox, args = args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_hurdle_poisson <- function(i, prep) { hu <- get_dpar(prep, "hu", i) lambda <- get_dpar(prep, "mu", i) args <- nlist(lambda, hu) out <- log_lik_censor("hurdle_poisson", args, i, prep) out <- log_lik_truncate(out, phurdle_poisson, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_hurdle_negbinomial <- function(i, prep) { hu <- get_dpar(prep, "hu", i) mu <- get_dpar(prep, "mu", i) shape <- get_dpar(prep, "shape", i = i) args <- nlist(mu, shape, hu) out <- log_lik_censor("hurdle_negbinomial", args, i, prep) out <- log_lik_truncate(out, phurdle_negbinomial, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_hurdle_gamma <- function(i, prep) { hu <- get_dpar(prep, "hu", i) shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i) / shape args <- nlist(shape, scale, hu) out <- log_lik_censor("hurdle_gamma", args, i, prep) out <- log_lik_truncate(out, phurdle_gamma, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_hurdle_lognormal <- function(i, prep) { hu <- get_dpar(prep, "hu", i) mu <- get_dpar(prep, "mu", i) sigma <- get_dpar(prep, "sigma", i = i) args <- nlist(mu, sigma, hu) out <- log_lik_censor("hurdle_lognormal", args, i, prep) out <- log_lik_truncate(out, phurdle_lognormal, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_hurdle_cumulative <- function(i, prep) { mu <- get_dpar(prep, "mu", i = i) hu <- get_dpar(prep, "hu", i = i) disc <- get_dpar(prep, "disc", i = i) thres <- subset_thres(prep, i) nthres <- NCOL(thres) eta <- disc * (thres - mu) y <- prep$data$Y[i] if (y == 0L) { out <- dbinom(1, size = 1, prob = hu, log = TRUE) } else if (y == 1L) { out <- log_cdf(eta[, 1L], prep$family$link) + dbinom(0, size = 1, prob = hu, log = TRUE) } else if (y == nthres + 1L) { out <- log_ccdf(eta[, y - 1L], prep$family$link) + dbinom(0, size = 1, prob = hu, log = TRUE) } else { out <- log_diff_exp( log_cdf(eta[, y], prep$family$link), log_cdf(eta[, y - 1L], prep$family$link) ) + dbinom(0, size = 1, prob = hu, log = TRUE) } log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_poisson <- function(i, prep) { zi <- get_dpar(prep, "zi", i) lambda <- get_dpar(prep, "mu", i) args <- nlist(lambda, zi) out <- log_lik_censor("zero_inflated_poisson", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_poisson, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_negbinomial <- function(i, prep) { zi <- get_dpar(prep, "zi", i) mu <- get_dpar(prep, "mu", i) shape <- get_dpar(prep, "shape", i = i) args <- nlist(mu, shape, zi) out <- log_lik_censor("zero_inflated_negbinomial", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_negbinomial, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_binomial <- function(i, prep) { trials <- prep$data$trials[i] mu <- get_dpar(prep, "mu", i) zi <- get_dpar(prep, "zi", i) args <- list(size = trials, prob = mu, zi = zi) out <- log_lik_censor("zero_inflated_binomial", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_binomial, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_beta_binomial <- function(i, prep) { trials <- prep$data$trials[i] mu <- get_dpar(prep, "mu", i) phi <- get_dpar(prep, "phi", i) zi <- get_dpar(prep, "zi", i) args <- nlist(size = trials, mu, phi, zi) out <- log_lik_censor("zero_inflated_beta_binomial", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_beta_binomial, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_beta <- function(i, prep) { zi <- get_dpar(prep, "zi", i) mu <- get_dpar(prep, "mu", i) phi <- get_dpar(prep, "phi", i) args <- nlist(shape1 = mu * phi, shape2 = (1 - mu) * phi, zi) out <- log_lik_censor("zero_inflated_beta", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_beta, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_one_inflated_beta <- function(i, prep) { zoi <- get_dpar(prep, "zoi", i) coi <- get_dpar(prep, "coi", i) if (prep$data$Y[i] %in% c(0, 1)) { out <- dbinom(1, size = 1, prob = zoi, log = TRUE) + dbinom(prep$data$Y[i], size = 1, prob = coi, log = TRUE) } else { phi <- get_dpar(prep, "phi", i) mu <- get_dpar(prep, "mu", i) args <- list(shape1 = mu * phi, shape2 = (1 - mu) * phi) out <- dbinom(0, size = 1, prob = zoi, log = TRUE) + do_call(dbeta, c(prep$data$Y[i], args, log = TRUE)) } log_lik_weight(out, i = i, prep = prep) } log_lik_categorical <- function(i, prep) { stopifnot(prep$family$link == "logit") eta <- get_Mu(prep, i = i) eta <- insert_refcat(eta, refcat = prep$refcat) out <- dcategorical(prep$data$Y[i], eta = eta, log = TRUE) log_lik_weight(out, i = i, prep = prep) } log_lik_multinomial <- function(i, prep) { stopifnot(prep$family$link == "logit") eta <- get_Mu(prep, i = i) eta <- insert_refcat(eta, refcat = prep$refcat) out <- dmultinomial(prep$data$Y[i, ], eta = eta, log = TRUE) log_lik_weight(out, i = i, prep = prep) } log_lik_dirichlet <- function(i, prep) { stopifnot(prep$family$link == "logit") eta <- get_Mu(prep, i = i) eta <- insert_refcat(eta, refcat = prep$refcat) phi <- get_dpar(prep, "phi", i = i) cats <- seq_len(prep$data$ncat) alpha <- dcategorical(cats, eta = eta) * phi out <- ddirichlet(prep$data$Y[i, ], alpha = alpha, log = TRUE) log_lik_weight(out, i = i, prep = prep) } log_lik_dirichlet2 <- function(i, prep) { mu <- get_Mu(prep, i = i) out <- ddirichlet(prep$data$Y[i, ], alpha = mu, log = TRUE) log_lik_weight(out, i = i, prep = prep) } log_lik_logistic_normal <- function(i, prep, ...) { mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i, cor_name = "lncor") dlmn <- function(s) { dlogistic_normal( prep$data$Y[i, ], mu = mu[s, ], Sigma = Sigma[s, , ], refcat = prep$refcat, log = TRUE ) } out <- sapply(1:prep$ndraws, dlmn) log_lik_weight(out, i = i, prep = prep) } log_lik_cumulative <- function(i, prep) { disc <- get_dpar(prep, "disc", i = i) mu <- get_dpar(prep, "mu", i = i) thres <- subset_thres(prep, i) nthres <- NCOL(thres) eta <- disc * (thres - mu) y <- prep$data$Y[i] if (y == 1L) { out <- log_cdf(eta[, 1L], prep$family$link) } else if (y == nthres + 1L) { out <- log_ccdf(eta[, y - 1L], prep$family$link) } else { out <- log_diff_exp( log_cdf(eta[, y], prep$family$link), log_cdf(eta[, y - 1L], prep$family$link) ) } log_lik_weight(out, i = i, prep = prep) } log_lik_sratio <- function(i, prep) { disc <- get_dpar(prep, "disc", i = i) mu <- get_dpar(prep, "mu", i = i) thres <- subset_thres(prep, i) nthres <- NCOL(thres) eta <- disc * (thres - mu) y <- prep$data$Y[i] q <- sapply(seq_len(min(y, nthres)), function(k) log_ccdf(eta[, k], prep$family$link) ) if (y == 1L) { out <- log1m_exp(q[, 1L]) } else if (y == 2L) { out <- log1m_exp(q[, 2L]) + q[, 1L] } else if (y == nthres + 1L) { out <- rowSums(q) } else { out <- log1m_exp(q[, y]) + rowSums(q[, 1L:(y - 1L)]) } log_lik_weight(out, i = i, prep = prep) } log_lik_cratio <- function(i, prep) { disc <- get_dpar(prep, "disc", i = i) mu <- get_dpar(prep, "mu", i = i) thres <- subset_thres(prep, i) nthres <- NCOL(thres) eta <- disc * (mu - thres) y <- prep$data$Y[i] q <- sapply(seq_len(min(y, nthres)), function(k) log_cdf(eta[, k], prep$family$link) ) if (y == 1L) { out <- log1m_exp(q[, 1L]) } else if (y == 2L) { out <- log1m_exp(q[, 2L]) + q[, 1L] } else if (y == nthres + 1L) { out <- rowSums(q) } else { out <- log1m_exp(q[, y]) + rowSums(q[, 1L:(y - 1L)]) } log_lik_weight(out, i = i, prep = prep) } log_lik_acat <- function(i, prep) { disc <- get_dpar(prep, "disc", i = i) mu <- get_dpar(prep, "mu", i = i) thres <- subset_thres(prep, i) nthres <- NCOL(thres) eta <- disc * (mu - thres) y <- prep$data$Y[i] # TODO: check if computation can be made more numerically stable if (prep$family$link == "logit") { # more efficient computation for logit link q <- sapply(1:nthres, function(k) eta[, k]) p <- cbind(rep(0, nrow(eta)), q[, 1], matrix(0, nrow = nrow(eta), ncol = nthres - 1)) if (nthres > 1L) { p[, 3:(nthres + 1)] <- sapply(3:(nthres + 1), function(k) rowSums(q[, 1:(k - 1)])) } out <- p[, y] - log(rowSums(exp(p))) } else { q <- sapply(1:nthres, function(k) inv_link(eta[, k], prep$family$link)) p <- cbind(apply(1 - q[, 1:nthres], 1, prod), matrix(0, nrow = nrow(eta), ncol = nthres)) if (nthres > 1L) { p[, 2:nthres] <- sapply(2:nthres, function(k) apply(as.matrix(q[, 1:(k - 1)]), 1, prod) * apply(as.matrix(1 - q[, k:nthres]), 1, prod)) } p[, nthres + 1] <- apply(q[, 1:nthres], 1, prod) out <- log(p[, y]) - log(apply(p, 1, sum)) } log_lik_weight(out, i = i, prep = prep) } log_lik_custom <- function(i, prep) { custom_family_method(prep$family, "log_lik")(i, prep) } log_lik_mixture <- function(i, prep) { families <- family_names(prep$family) theta <- get_theta(prep, i = i) out <- array(NA, dim = dim(theta)) for (j in seq_along(families)) { log_lik_fun <- paste0("log_lik_", families[j]) log_lik_fun <- get(log_lik_fun, asNamespace("brms")) tmp_draws <- pseudo_prep_for_mixture(prep, j) out[, j] <- exp(log(theta[, j]) + log_lik_fun(i, tmp_draws)) } if (isTRUE(prep[["pp_mixture"]])) { out <- log(out) - log(rowSums(out)) } else { out <- log(rowSums(out)) } log_lik_weight(out, i = i, prep = prep) } # ----------- log_lik helper-functions ----------- # compute (possibly censored) log_lik values # @param dist name of a distribution for which the functions # d (pdf) and p (cdf) are available # @param args additional arguments passed to pdf and cdf # @param prep a brmsprep object # @return vector of log_lik values log_lik_censor <- function(dist, args, i, prep) { pdf <- get(paste0("d", dist), mode = "function") cdf <- get(paste0("p", dist), mode = "function") y <- prep$data$Y[i] cens <- prep$data$cens[i] if (is.null(cens) || cens == 0) { x <- do_call(pdf, c(y, args, log = TRUE)) } else if (cens == 1) { x <- do_call(cdf, c(y, args, lower.tail = FALSE, log.p = TRUE)) } else if (cens == -1) { x <- do_call(cdf, c(y, args, log.p = TRUE)) } else if (cens == 2) { rcens <- prep$data$rcens[i] x <- log(do_call(cdf, c(rcens, args)) - do_call(cdf, c(y, args))) } x } # adjust log_lik in truncated models # @param x vector of log_lik values # @param cdf a cumulative distribution function # @param args arguments passed to cdf # @param i observation number # @param prep a brmsprep object # @return vector of log_lik values log_lik_truncate <- function(x, cdf, args, i, prep) { lb <- prep$data[["lb"]][i] ub <- prep$data[["ub"]][i] if (is.null(lb) && is.null(ub)) { return(x) } if (!is.null(lb)) { log_cdf_lb <- do_call(cdf, c(lb, args, log.p = TRUE)) } else { log_cdf_lb <- rep(-Inf, length(x)) } if (!is.null(ub)) { log_cdf_ub <- do_call(cdf, c(ub, args, log.p = TRUE)) } else { log_cdf_ub <- rep(0, length(x)) } x - log_diff_exp(log_cdf_ub, log_cdf_lb) } # weight log_lik values according to defined weights # @param x vector of log_lik values # @param i observation number # @param prep a brmsprep object # @return vector of log_lik values log_lik_weight <- function(x, i, prep) { weight <- prep$data$weights[i] if (!is.null(weight)) { x <- x * weight } x } # after some discussion with Aki Vehtari and Daniel Simpson, # I disallowed computation of log-likelihood values for some models # until pointwise solutions are implemented stop_no_pw <- function() { stop2("Cannot yet compute pointwise log-likelihood for this model ", "because the observations are not conditionally independent.") } # multiplicate factor for conditional student-t models # see http://proceedings.mlr.press/v33/shah14.pdf # note that brms parameterizes C instead of Cov(y) = df / (df - 2) * C # @param df degrees of freedom parameter # @param Cinv inverse of the full matrix # @param e vector of error terms, that is, y - mu student_t_cov_factor <- function(df, Cinv, e) { beta1 <- ulapply(seq_rows(Cinv), student_t_beta1_i, Cinv, e) (df + beta1) / (df + nrow(Cinv) - 1) } # beta1 in equation (6) of http://proceedings.mlr.press/v33/shah14.pdf # @param i observation index to exclude in the submatrix # @param Cinv inverse of the full matrix # @param e vector of error terms, that is, y - mu # @param vector of length one student_t_beta1_i <- function(i, Cinv, e) { sub_Cinv_i <- sub_inverse_symmetric(Cinv, i) t(e[-i]) %*% sub_Cinv_i %*% e[-i] } # efficient submatrix inverse for a symmetric matrix # see http://www.scielo.org.mx/pdf/cys/v20n2/1405-5546-cys-20-02-00251.pdf # @param Cinv inverse of the full matrix # @param i observation index to exclude in the submatrix # @return inverse of the submatrix after removing observation i sub_inverse_symmetric <- function(Cinv, i) { csub <- Cinv[i, -i] D <- outer(csub, csub) Cinv[-i, -i] - D / Cinv[i, i] } brms/R/exclude_pars.R0000644000176200001440000001621514477022305014230 0ustar liggesusers# list parameters NOT to be saved by Stan # @return a vector of parameter names to be excluded exclude_pars <- function(x, ...) { UseMethod("exclude_pars") } #' @export exclude_pars.default <- function(x, ...) { character(0) } #' @export exclude_pars.brmsfit <- function(x, ...) { out <- character(0) save_pars <- x$save_pars bterms <- brmsterms(x$formula) c(out) <- exclude_pars(bterms, data = x$data, save_pars = save_pars, ...) meef <- tidy_meef(bterms, x$data) if (has_rows(meef)) { I <- seq_along(unique(meef$grname)) K <- seq_rows(meef) c(out) <- paste0(c("Corme_"), I) if (!save_pars$all) { c(out) <- c(paste0("zme_", K), paste0("Lme_", I)) } if (isFALSE(save_pars$latent)) { c(out) <- paste0("Xme_", K) } else if (is.character(save_pars$latent)) { sub_K <- K[!meef$xname %in% save_pars$latent] if (length(sub_K)) { c(out) <- paste0("Xme_", sub_K) } } } ranef <- x$ranef if (has_rows(ranef)) { rm_re_pars <- c(if (!save_pars$all) c("z", "L"), "Cor", "r") for (id in unique(ranef$id)) { c(out) <- paste0(rm_re_pars, "_", id) } if (isFALSE(save_pars$group)) { p <- usc(combine_prefix(ranef)) c(out) <- paste0("r_", ranef$id, p, "_", ranef$cn) } else if (is.character(save_pars$group)) { sub_ranef <- ranef[!ranef$group %in% save_pars$group, ] if (has_rows(sub_ranef)) { sub_p <- usc(combine_prefix(sub_ranef)) c(out) <- paste0("r_", sub_ranef$id, sub_p, "_", sub_ranef$cn) } } tranef <- get_dist_groups(ranef, "student") if (!save_pars$all && has_rows(tranef)) { c(out) <- paste0(c("udf_", "dfm_"), tranef$ggn) } } out <- unique(out) out <- setdiff(out, save_pars$manual) out } #' @export exclude_pars.mvbrmsterms <- function(x, save_pars, ...) { out <- c("Rescor", "Sigma") if (!save_pars$all) { c(out) <- c("Lrescor", "LSigma") } for (i in seq_along(x$terms)) { c(out) <- exclude_pars(x$terms[[i]], save_pars = save_pars, ...) } out } #' @export exclude_pars.brmsterms <- function(x, data, save_pars, ...) { resp <- usc(combine_prefix(x)) data <- subset_data(data, x) par_classes <- c("Lncor", "Cortime") out <- paste0(par_classes, resp) if (!save_pars$all) { par_classes <- c( "ordered_Intercept", "fixed_Intercept", "theta", "Llncor", "Lcortime" ) c(out) <- paste0(par_classes, resp) } for (dp in names(x$dpars)) { c(out) <- exclude_pars(x$dpars[[dp]], data = data, save_pars = save_pars, ...) } for (nlp in names(x$nlpars)) { c(out) <- exclude_pars(x$nlpars[[nlp]], data = data, save_pars = save_pars, ...) } if (is.formula(x$adforms$mi)) { if (!(isTRUE(save_pars$latent) || x$resp %in% save_pars$latent)) { c(out) <- paste0("Yl", resp) } } if (!(isTRUE(save_pars$group) || ".err" %in% save_pars$group)) { # latent residuals are like group-level effects c(out) <- paste0("err", resp) } out } #' @export exclude_pars.btl <- function(x, data, save_pars, ...) { out <- character(0) p <- usc(combine_prefix(x)) c(out) <- paste0("chol_cor", p) if (!save_pars$all) { par_classes <- c( "bQ", "zb", "zbsp", "zbs", "zar", "zma", "hs_local", "R2D2_phi", "scales", "Intercept", "first_Intercept", "merged_Intercept", "zcar", "nszcar", "zerr" ) c(out) <- paste0(par_classes, p) smef <- tidy_smef(x, data) for (i in seq_rows(smef)) { nb <- seq_len(smef$nbases[i]) c(out) <- paste0("zs", p, "_", i, "_", nb) } } out } #' Control Saving of Parameter Draws #' #' Control which (draws of) parameters should be saved in a \pkg{brms} #' model. The output of this function is meant for usage in the #' \code{save_pars} argument of \code{\link{brm}}. #' #' @param group A flag to indicate if group-level coefficients for #' each level of the grouping factors should be saved (default is #' \code{TRUE}). Set to \code{FALSE} to save memory. Alternatively, #' \code{group} may also be a character vector naming the grouping factors #' for which to save draws of coefficients. #' @param latent A flag to indicate if draws of latent variables obtained by #' using \code{me} and \code{mi} terms should be saved (default is #' \code{FALSE}). Saving these draws allows to better use methods such as #' \code{posterior_predict} with the latent variables but leads to very large #' \R objects even for models of moderate size and complexity. Alternatively, #' \code{latent} may also be a character vector naming the latent variables #' for which to save draws. #' @param all A flag to indicate if draws of all variables defined in Stan's #' \code{parameters} block should be saved (default is \code{FALSE}). Saving #' these draws is required in order to apply the certain methods such as #' \code{bridge_sampler} and \code{bayes_factor}. #' @param manual A character vector naming Stan variable names which should be #' saved. These names should match the variable names inside the Stan code #' before renaming. This feature is meant for power users only and will rarely #' be useful outside of very special cases. #' #' @return A list of class \code{"save_pars"}. #' #' @examples #' \dontrun{ #' # don't store group-level coefficients #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), #' save_pars = save_pars(group = FALSE)) #' variables(fit) #' } #' #' @export save_pars <- function(group = TRUE, latent = FALSE, all = FALSE, manual = NULL) { out <- list() if (is.logical(group)) { out$group <- as_one_logical(group) } else { out$group <- as.character(group) } if (is.logical(latent)) { out$latent <- as_one_logical(latent) } else { out$latent <- as.character(latent) } out$all <- as_one_logical(all) out$manual <- as.character(manual) class(out) <- "save_pars" out } # validate 'save_pars' argument # deprecated arguments: # @param save_ranef save varying effects per level? # @param save_mevars save noise-free variables? # @param save_all_pars save all variables from the 'parameters' block? # @return validated 'save_pars' argument validate_save_pars <- function(save_pars, save_ranef = NULL, save_mevars = NULL, save_all_pars = NULL) { if (is.null(save_pars)) { save_pars <- save_pars() } if (!is.save_pars(save_pars)) { stop2("Argument 'save_pars' needed to be created via 'save_pars()'.") } if (!is.null(save_ranef)) { warning2( "Argument 'save_ranef' is deprecated. Please use argument ", "'group' in function 'save_pars()' instead." ) save_pars$group <- as_one_logical(save_ranef) } if (!is.null(save_mevars)) { warning2( "Argument 'save_mevars' is deprecated. Please use argument ", "'latent' in function 'save_pars()' instead." ) save_pars$latent <- as_one_logical(save_mevars) } if (!is.null(save_all_pars)) { warning2( "Argument 'save_all_pars' is deprecated. Please use argument ", "'all' in function 'save_pars()' instead." ) save_pars$all <- as_one_logical(save_all_pars) } save_pars } is.save_pars <- function(x) { inherits(x, "save_pars") } brms/R/emmeans.R0000644000176200001440000002107714464653070013206 0ustar liggesusers#' Support Functions for \pkg{emmeans} #' #' Functions required for compatibility of \pkg{brms} with \pkg{emmeans}. #' Users are not required to call these functions themselves. Instead, #' they will be called automatically by the \code{emmeans} function #' of the \pkg{emmeans} package. #' #' @name emmeans-brms-helpers #' #' @inheritParams posterior_epred.brmsfit #' @param re_formula Optional formula containing group-level effects to be #' considered in the prediction. If \code{NULL}, include all group-level #' effects; if \code{NA} (default), include no group-level effects. #' @param epred Logical. If \code{TRUE} compute predictions of #' the posterior predictive distribution's mean #' (see \code{\link{posterior_epred.brmsfit}}) while ignoring #' arguments \code{dpar} and \code{nlpar}. Defaults to \code{FALSE}. #' If you have specified a response transformation within the formula, #' you need to set \code{epred} to \code{TRUE} for \pkg{emmeans} to #' detect this transformation. #' @param data,trms,xlev,grid,vcov. Arguments required by \pkg{emmeans}. #' @param ... Additional arguments passed to \pkg{emmeans}. #' #' @details #' In order to ensure compatibility of most \pkg{brms} models with #' \pkg{emmeans}, predictions are not generated 'manually' via a design matrix #' and coefficient vector, but rather via \code{\link{posterior_linpred.brmsfit}}. #' This appears to generally work well, but note that it produces an `.@linfct` #' slot that contains the computed predictions as columns instead of the #' coefficients. #' #' @examples #' \dontrun{ #' fit1 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), #' data = kidney, family = lognormal()) #' summary(fit1) #' #' # summarize via 'emmeans' #' library(emmeans) #' rg <- ref_grid(fit1) #' em <- emmeans(rg, "disease") #' summary(em, point.est = mean) #' #' # obtain estimates for the posterior predictive distribution's mean #' epred <- emmeans(fit1, "disease", epred = TRUE) #' summary(epred, point.est = mean) #' #' #' # model with transformed response variable #' fit2 <- brm(log(mpg) ~ factor(cyl), data = mtcars) #' summary(fit2) #' #' # results will be on the log scale by default #' emmeans(fit2, ~ cyl) #' # log transform is detected and can be adjusted automatically #' emmeans(fit2, ~ cyl, epred = TRUE, type = "response") #' } NULL # recover the variables used in the model predictions # @param data only added to prevent it from being passed further via ... #' @rdname emmeans-brms-helpers recover_data.brmsfit <- function(object, data, resp = NULL, dpar = NULL, nlpar = NULL, re_formula = NA, epred = FALSE, ...) { bterms <- .extract_par_terms( object, resp = resp, dpar = dpar, nlpar = nlpar, re_formula = re_formula, epred = epred ) data <- rm_attr(object$data, "terms") # use of model.frame fixes issue #1531 mf <- model.frame(bterms$allvars, data = data) trms <- attr(mf, "terms") # brms has no call component so the call is just a dummy for the most part cl <- call("brms") if (epred) { # fixes issue #1360 for in-formula response transformations cl$formula <- bterms$respform } emmeans::recover_data(cl, trms, "na.omit", data = data, ...) } # Calculate the basis for making predictions. In some sense, this is # similar to the fitted() function with new data on the link scale. # Transforming to response scale, if desired, is handled by emmeans. #' @rdname emmeans-brms-helpers emm_basis.brmsfit <- function(object, trms, xlev, grid, vcov., resp = NULL, dpar = NULL, nlpar = NULL, re_formula = NA, epred = FALSE, ...) { if (is_equal(dpar, "mean")) { # deprecated as of version 2.15.9 warning2("dpar = 'mean' is deprecated. Please use epred = TRUE instead.") epred <- TRUE dpar <- NULL } epred <- as_one_logical(epred) bterms <- .extract_par_terms( object, resp = resp, dpar = dpar, nlpar = nlpar, re_formula = re_formula, epred = epred ) if (epred) { post.beta <- posterior_epred( object, newdata = grid, re_formula = re_formula, resp = resp, incl_autocor = FALSE, ... ) } else { req_vars <- all_vars(bterms$allvars) post.beta <- posterior_linpred( object, newdata = grid, re_formula = re_formula, resp = resp, dpar = dpar, nlpar = nlpar, incl_autocor = FALSE, req_vars = req_vars, # offsets are handled by emmeans (#1096) transform = FALSE, offset = FALSE, ... ) } if (anyNA(post.beta)) { stop2("emm_basis.brmsfit created NAs. Please check your reference grid.") } misc <- bterms$.misc if (length(dim(post.beta)) == 3L) { # reshape to a 2D matrix, for example, in multivariate models ynames <- dimnames(post.beta)[[3]] if (is.null(ynames)) { ynames <- as.character(seq_len(dim(post.beta)[3])) } dims <- dim(post.beta) post.beta <- matrix(post.beta, ncol = prod(dims[2:3])) misc$ylevs = list(rep.meas = ynames) } attr(post.beta, "n.chains") <- object$fit@sim$chains X <- diag(ncol(post.beta)) bhat <- apply(post.beta, 2, mean) V <- cov(post.beta) nbasis <- matrix(NA) dfargs <- list() dffun <- function(k, dfargs) Inf environment(dffun) <- baseenv() nlist(X, bhat, nbasis, V, dffun, dfargs, misc, post.beta) } # extract terms of specific predicted parameter(s) in the model # currently, the only slots that matter in the returned object are # allvars: formula with all required variables on the right-hand side # .misc: a named list with additional info to be interpreted by emmeans .extract_par_terms <- function(x, ...) { UseMethod(".extract_par_terms") } #' @export .extract_par_terms.brmsfit <- function(x, resp = NULL, re_formula = NA, dpar = NULL, epred = FALSE, ...) { if (is_equal(dpar, "mean")) { # deprecation warning already provided in emm_basis.brmsfit epred <- TRUE dpar <- NULL } resp <- validate_resp(resp, x) new_formula <- update_re_terms(formula(x), re_formula) # autocorrelation terms are always excluded for emmeans predictions (#1424) new_formula <- exclude_terms(new_formula, incl_autocor = FALSE) bterms <- brmsterms(new_formula, resp_rhs_all = FALSE) if (is_ordinal(bterms)) { warning2("brms' emmeans support for ordinal models is experimental ", "and currently ignores the threshold parameters.") } .extract_par_terms(bterms, resp = resp, dpar = dpar, epred = epred, ...) } #' @export .extract_par_terms.mvbrmsterms <- function(x, resp, epred, ...) { stopifnot(is.character(resp)) epred <- as_one_logical(epred) out <- x # only use selected univariate models out$terms <- out$terms[resp] if (epred) { out$allvars <- allvars_formula(lapply(out$terms, get_allvars)) out$.misc <- list() return(out) } for (i in seq_along(out$terms)) { out$terms[[i]] <- .extract_par_terms(out$terms[[i]], epred = epred, ...) } out$allvars <- allvars_formula(lapply(out$terms, get_allvars)) misc_list <- unique(from_list(out$terms, ".misc")) if (length(misc_list) > 1L){ stop2("brms' emmeans support for multivariate models is limited ", "to cases where all univariate models have the same family.") } out$.misc <- misc_list[[1]] out } #' @export .extract_par_terms.brmsterms <- function(x, dpar, nlpar, epred, ...) { epred <- as_one_logical(epred) all_dpars <- names(x$dpars) all_nlpars <- names(x$nlpars) out <- x if (epred) { out$.misc <- list() return(out) } if (!is.null(nlpar)) { if (!is.null(dpar)) { stop2("'dpar' and 'nlpar' cannot be specified at the same time.") } nlpar <- as_one_character(nlpar) if (!nlpar %in% all_nlpars) { stop2( "Non-linear parameter '", nlpar, "' is not part of the model.", "\nSupported parameters are: ", collapse_comma(all_nlpars) ) } out <- x$nlpars[[nlpar]] } else if (!is.null(dpar)) { dpar <- as_one_character(dpar) if (!dpar %in% all_dpars) { stop2( "Distributional parameter '", dpar, "' is not part of the model.", "\nSupported parameters are: ", collapse_comma(all_dpars) ) } out <- x$dpars[[dpar]] } else { # extract 'mu' parameter by default if (!"mu" %in% names(x$dpars)) { # concerns categorical-like and mixture models stop2("emmeans is not yet supported for this brms model.") } out <- x$dpars[["mu"]] } if (!is.null(out$offset)) { # ensure that offsets are detected by emmeans (#1096) out$allvars <- allvars_formula(out$allvars, out$offset) } out$.misc <- emmeans::.std.link.labels(out$family, list()) out } brms/R/posterior.R0000644000176200001440000002273314213413565013602 0ustar liggesusers#' Index \code{brmsfit} objects #' #' @aliases variables nvariables niterations nchains ndraws #' #' Index variables, iterations, chains, and draws. #' #' @param x A \code{brmsfit} object or another \R object for which #' the methods are defined. #' @param ... Arguments passed to individual methods (if applicable). #' #' @name draws-index-brms NULL #' @rdname draws-index-brms #' @importFrom posterior variables #' @method variables brmsfit #' @export #' @export variables variables.brmsfit <- function(x, ...) { # TODO: simplify once rstan and cmdstanr support these methods out <- dimnames(x$fit) if (is.list(out)) { out <- out$parameters } out } #' @method variables data.frame variables.data.frame <- function(x, ...) { names(x) } #' @rdname draws-index-brms #' @importFrom posterior nvariables #' @method nvariables brmsfit #' @export #' @export nvariables nvariables.brmsfit <- function(x, ...) { length(variables(x, ...)) } #' @rdname draws-index-brms #' @importFrom posterior niterations #' @method niterations brmsfit #' @export #' @export niterations niterations.brmsfit <- function(x) { if (!is.stanfit(x$fit)) return(0) niterations <- x$fit@sim$n_save[1] %||% 0 niterations - nwarmup(x) } #' @rdname draws-index-brms #' @importFrom posterior nchains #' @method nchains brmsfit #' @export #' @export nchains nchains.brmsfit <- function(x) { if (!is.stanfit(x$fit)) return(0) x$fit@sim$chains %||% 0 } #' @rdname draws-index-brms #' @importFrom posterior ndraws #' @method ndraws brmsfit #' @export #' @export ndraws ndraws.brmsfit <- function(x) { niterations(x) * nchains(x) } nwarmup <- function(x) { if (!is.stanfit(x$fit)) return(0) x$fit@sim$warmup2[1] %||% 0 } nthin <- function(x) { if (!is.stanfit(x$fit)) return(1) x$fit@sim$thin %||% 1 } #' Transform \code{brmsfit} to \code{draws} objects #' #' Transform a \code{brmsfit} object to a format supported by the #' \pkg{posterior} package. #' #' @aliases as_draws as_draws_matrix as_draws_array as_draws_df #' @aliases as_draws_rvars as_draws_list #' #' @param x A \code{brmsfit} object or another \R object for which #' the methods are defined. #' @param variable A character vector providing the variables to extract. #' By default, all variables are extracted. #' @param regex Logical; Should variable should be treated as a (vector of) #' regular expressions? Any variable in \code{x} matching at least one of the #' regular expressions will be selected. Defaults to \code{FALSE}. #' @param inc_warmup Should warmup draws be included? Defaults to \code{FALSE}. #' @param ... Arguments passed to individual methods (if applicable). #' #' @details To subset iterations, chains, or draws, use the #' \code{\link[posterior:subset_draws]{subset_draws}} method after #' transforming the \code{brmsfit} to a \code{draws} object. #' #' @seealso \code{\link[posterior:draws]{draws}} #' \code{\link[posterior:subset_draws]{subset_draws}} #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson()) #' #' # extract posterior draws in an array format #' (draws_fit <- as_draws_array(fit)) #' posterior::summarize_draws(draws_fit) #' #' # extract only certain variables #' as_draws_array(fit, variable = "r_patient") #' as_draws_array(fit, variable = "^b_", regex = TRUE) #' #' # extract posterior draws in a random variables format #' as_draws_rvars(fit) #' } #' #' @name draws-brms NULL #' @rdname draws-brms #' @importFrom posterior as_draws #' @method as_draws brmsfit #' @export #' @export as_draws as_draws.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { # draws_list is the fastest format to convert to at the moment as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... ) } #' @rdname draws-brms #' @importFrom posterior as_draws_matrix #' @method as_draws_matrix brmsfit #' @export #' @export as_draws_matrix as_draws_matrix.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { as_draws_matrix(as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... )) } #' @rdname draws-brms #' @importFrom posterior as_draws_array #' @method as_draws_array brmsfit #' @export #' @export as_draws_array as_draws_array.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { as_draws_array(as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... )) } #' @rdname draws-brms #' @importFrom posterior as_draws_df #' @method as_draws_df brmsfit #' @export #' @export as_draws_df as_draws_df.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { as_draws_df(as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... )) } #' @rdname draws-brms #' @importFrom posterior as_draws_list #' @method as_draws_list brmsfit #' @export #' @export as_draws_list as_draws_list.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { .as_draws_list( x$fit, variable = variable, regex = regex, inc_warmup = inc_warmup, ... ) } #' @rdname draws-brms #' @importFrom posterior as_draws_rvars #' @method as_draws_rvars brmsfit #' @export #' @export as_draws_rvars as_draws_rvars.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { as_draws_rvars(as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... )) } # in stanfit objects draws are stored in a draws_list-like format # so converting from there will be most efficient # may be removed once rstan supports posterior natively .as_draws_list <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { stopifnot(is.stanfit(x)) inc_warmup <- as_one_logical(inc_warmup) if (!length(x@sim$samples)) { stop2("The model does not contain posterior draws.") } out <- as_draws_list(x@sim$samples) # first subset variables then remove warmup as removing warmup # will take a lot of time when extracting many variables out <- subset_draws(out, variable = variable, regex = regex) if (!inc_warmup) { nwarmup <- x@sim$warmup2[1] %||% 0 warmup_ids <- seq_len(nwarmup) iteration_ids <- posterior::iteration_ids(out) if (length(warmup_ids)) { iteration_ids <- iteration_ids[-warmup_ids] } out <- subset_draws(out, iteration = iteration_ids) } out } #' Extract Posterior Draws #' #' Extract posterior draws in conventional formats #' as data.frames, matrices, or arrays. #' #' @inheritParams as_draws.brmsfit #' @param pars Deprecated alias of \code{variable}. For reasons of backwards #' compatibility, \code{pars} is interpreted as a vector of regular #' expressions by default unless \code{fixed = TRUE} is specified. #' @param draw The draw indices to be select. Subsetting draw indices will lead #' to an automatic merging of chains. #' @param subset Deprecated alias of \code{draw}. #' @param row.names,optional Unused and only added for consistency with #' the \code{\link[base:as.data.frame]{as.data.frame}} generic. #' @param ... Further arguments to be passed to the corresponding #' \code{\link[brms:draws-brms]{as_draws_*}} methods as well as to #' \code{\link[posterior:subset_draws]{subset_draws}}. #' #' @return A data.frame, matrix, or array containing the posterior draws. #' #' @seealso \code{\link[brms:draws-brms]{as_draws}}, #' \code{\link[posterior:subset_draws]{subset_draws}} #' #' @export as.data.frame.brmsfit <- function(x, row.names = NULL, optional = TRUE, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) { variable <- use_variable_alias(variable, x, pars = pars, ...) draw <- use_alias(draw, subset) out <- as_draws_df(x, variable = variable, ...) out <- suppressMessages(subset_draws(out, draw = draw, ...)) unclass_draws(out) } #' @rdname as.data.frame.brmsfit #' @export as.matrix.brmsfit <- function(x, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) { variable <- use_variable_alias(variable, x, pars = pars, ...) draw <- use_alias(draw, subset) out <- as_draws_matrix(x, variable = variable, ...) out <- suppressMessages(subset_draws(out, draw = draw, ...)) unclass_draws(out) } #' @rdname as.data.frame.brmsfit #' @export as.array.brmsfit <- function(x, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) { variable <- use_variable_alias(variable, x, pars = pars, ...) draw <- use_alias(draw, subset) out <- as_draws_array(x, variable = variable, ...) out <- suppressMessages(subset_draws(out, draw = draw, ...)) unclass_draws(out) } # use the deprecated 'pars' alias to 'variable' use_variable_alias <- function(variable, object, pars = NA, ...) { if (!anyNA(pars)) { warning2("Argument 'pars' is deprecated. Please use 'variable' instead.") variable <- extract_pars(pars, variables(object), ...) } variable } # remove the posterior draws format classes from objects unclass_draws <- function(x, ...) { UseMethod("unclass_draws") } #' @export unclass_draws.default <- function(x, ...) { unclass(x) } #' @export unclass_draws.draws_df <- function(x, ...) { x <- as.data.frame(x) x$.chain <- x$.iteration <- x$.draw <- NULL x } brms/R/formula-ac.R0000644000176200001440000005243614413001711013571 0ustar liggesusers#' Autocorrelation structures #' #' Specify autocorrelation terms in \pkg{brms} models. Currently supported terms #' are \code{\link{arma}}, \code{\link{ar}}, \code{\link{ma}}, #' \code{\link{cosy}}, \code{\link{unstr}}, \code{\link{sar}}, #' \code{\link{car}}, and \code{\link{fcor}}. Terms can be directly specified #' within the formula, or passed to the \code{autocor} argument of #' \code{\link{brmsformula}} in the form of a one-sided formula. For deprecated #' ways of specifying autocorrelation terms, see \code{\link{cor_brms}}. #' #' @name autocor-terms #' #' @details The autocor term functions are almost solely useful when called in #' formulas passed to the \pkg{brms} package. They do not evaluate its #' arguments -- but exist purely to help set up a model with autocorrelation #' terms. #' #' @seealso \code{\link{brmsformula}}, \code{\link{acformula}}, #' \code{\link{arma}}, \code{\link{ar}}, \code{\link{ma}}, #' \code{\link{cosy}}, \code{\link{unstr}}, \code{\link{sar}}, #' \code{\link{car}}, \code{\link{fcor}} #' #' @examples #' # specify autocor terms within the formula #' y ~ x + arma(p = 1, q = 1) + car(M) #' #' # specify autocor terms in the 'autocor' argument #' bf(y ~ x, autocor = ~ arma(p = 1, q = 1) + car(M)) #' #' # specify autocor terms via 'acformula' #' bf(y ~ x) + acformula(~ arma(p = 1, q = 1) + car(M)) NULL #' Set up ARMA(p,q) correlation structures #' #' Set up an autoregressive moving average (ARMA) term of order (p, q) in #' \pkg{brms}. The function does not evaluate its arguments -- it exists purely #' to help set up a model with ARMA terms. #' #' @param time An optional time variable specifying the time ordering #' of the observations. By default, the existing order of the observations #' in the data is used. #' @param gr An optional grouping variable. If specified, the correlation #' structure is assumed to apply only to observations within the same grouping #' level. #' @param p A non-negative integer specifying the autoregressive (AR) #' order of the ARMA structure. Default is \code{1}. #' @param q A non-negative integer specifying the moving average (MA) #' order of the ARMA structure. Default is \code{1}. #' @param cov A flag indicating whether ARMA effects should be estimated by #' means of residual covariance matrices. This is currently only possible for #' stationary ARMA effects of order 1. If the model family does not have #' natural residuals, latent residuals are added automatically. If #' \code{FALSE} (the default), a regression formulation is used that is #' considerably faster and allows for ARMA effects of order higher than 1 but #' is only available for \code{gaussian} models and some of its #' generalizations. #' #' @return An object of class \code{'arma_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}}, \code{\link{ar}}, \code{\link{ma}}, #' #' @examples #' \dontrun{ #' data("LakeHuron") #' LakeHuron <- as.data.frame(LakeHuron) #' fit <- brm(x ~ arma(p = 2, q = 1), data = LakeHuron) #' summary(fit) #' } #' #' @export arma <- function(time = NA, gr = NA, p = 1, q = 1, cov = FALSE) { label <- deparse0(match.call()) time <- deparse0(substitute(time)) gr <- deparse0(substitute(gr)) .arma(time = time, gr = gr, p = p, q = q, cov = cov, label = label) } #' Set up AR(p) correlation structures #' #' Set up an autoregressive (AR) term of order p in \pkg{brms}. The function #' does not evaluate its arguments -- it exists purely to help set up a model #' with AR terms. #' #' @inheritParams arma #' #' @return An object of class \code{'arma_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}}, \code{\link{arma}}, \code{\link{ma}} #' #' @examples #' \dontrun{ #' data("LakeHuron") #' LakeHuron <- as.data.frame(LakeHuron) #' fit <- brm(x ~ ar(p = 2), data = LakeHuron) #' summary(fit) #' } #' #' @export ar <- function(time = NA, gr = NA, p = 1, cov = FALSE) { label <- deparse0(match.call()) time <- deparse0(substitute(time)) gr <- deparse0(substitute(gr)) .arma(time = time, gr = gr, p = p, q = 0, cov = cov, label = label) } #' Set up MA(q) correlation structures #' #' Set up a moving average (MA) term of order q in \pkg{brms}. The function does #' not evaluate its arguments -- it exists purely to help set up a model with #' MA terms. #' #' @inheritParams arma #' #' @return An object of class \code{'arma_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}}, \code{\link{arma}}, \code{\link{ar}} #' #' @examples #' \dontrun{ #' data("LakeHuron") #' LakeHuron <- as.data.frame(LakeHuron) #' fit <- brm(x ~ ma(p = 2), data = LakeHuron) #' summary(fit) #' } #' #' @export ma <- function(time = NA, gr = NA, q = 1, cov = FALSE) { label <- deparse0(match.call()) time <- deparse0(substitute(time)) gr <- deparse0(substitute(gr)) .arma(time = time, gr = gr, p = 0, q = q, cov = cov, label = label) } # helper function to validate input to arma() .arma <- function(time, gr, p, q, cov, label) { time <- as_one_variable(time) gr <- as_one_character(gr) stopif_illegal_group(gr) p <- as_one_numeric(p) q <- as_one_numeric(q) if (!(p >= 0 && is_wholenumber(p))) { stop2("Autoregressive order must be a non-negative integer.") } if (!(q >= 0 && is_wholenumber(q))) { stop2("Moving-average order must be a non-negative integer.") } if (!sum(p, q)) { stop2("At least one of 'p' and 'q' should be greater zero.") } cov <- as_one_logical(cov) if (cov && (p > 1 || q > 1)) { stop2("Covariance formulation of ARMA structures is ", "only possible for effects of maximal order one.") } label <- as_one_character(label) out <- nlist(time, gr, p, q, cov, label) class(out) <- c("arma_term", "ac_term") out } #' Set up COSY correlation structures #' #' Set up a compounds symmetry (COSY) term in \pkg{brms}. The function does #' not evaluate its arguments -- it exists purely to help set up a model with #' COSY terms. #' #' @inheritParams arma #' #' @return An object of class \code{'cosy_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}} #' #' @examples #' \dontrun{ #' data("lh") #' lh <- as.data.frame(lh) #' fit <- brm(x ~ cosy(), data = lh) #' summary(fit) #' } #' #' @export cosy <- function(time = NA, gr = NA) { label <- deparse0(match.call()) time <- deparse0(substitute(time)) time <- as_one_variable(time) gr <- deparse0(substitute(gr)) stopif_illegal_group(gr) out <- nlist(time, gr, label) class(out) <- c("cosy_term", "ac_term") out } #' Set up UNSTR correlation structures #' #' Set up an unstructured (UNSTR) correlation term in \pkg{brms}. The function does #' not evaluate its arguments -- it exists purely to help set up a model with #' UNSTR terms. #' #' @inheritParams arma #' #' @return An object of class \code{'unstr_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}} #' #' @examples #' \dontrun{ #' # add an unstructured correlation matrix for visits within the same patient #' fit <- brm(count ~ Trt + unstr(visit, patient), data = epilepsy) #' summary(fit) #' } #' #' @export unstr <- function(time, gr) { label <- deparse0(match.call()) time <- deparse0(substitute(time)) time <- as_one_variable(time) gr <- deparse0(substitute(gr)) stopif_illegal_group(gr) out <- nlist(time, gr, label) class(out) <- c("unstr_term", "ac_term") out } #' Spatial simultaneous autoregressive (SAR) structures #' #' Set up an spatial simultaneous autoregressive (SAR) term in \pkg{brms}. The #' function does not evaluate its arguments -- it exists purely to help set up a #' model with SAR terms. #' #' @param M An object specifying the spatial weighting matrix. #' Can be either the spatial weight matrix itself or an #' object of class \code{listw} or \code{nb}, from which #' the spatial weighting matrix can be computed. #' @param type Type of the SAR structure. Either \code{"lag"} #' (for SAR of the response values) or \code{"error"} #' (for SAR of the residuals). More information is #' provided in the 'Details' section. #' #' @details The \code{lagsar} structure implements SAR of the response values: #' \deqn{y = \rho W y + \eta + e} #' The \code{errorsar} structure implements SAR of the residuals: #' \deqn{y = \eta + u, u = \rho W u + e} #' In the above equations, \eqn{\eta} is the predictor term and \eqn{e} are #' independent normally or t-distributed residuals. Currently, only families #' \code{gaussian} and \code{student} support SAR structures. #' #' @return An object of class \code{'sar_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}} #' #' @examples #' \dontrun{ #' data(oldcol, package = "spdep") #' fit1 <- brm(CRIME ~ INC + HOVAL + sar(COL.nb, type = "lag"), #' data = COL.OLD, data2 = list(COL.nb = COL.nb), #' chains = 2, cores = 2) #' summary(fit1) #' plot(fit1) #' #' fit2 <- brm(CRIME ~ INC + HOVAL + sar(COL.nb, type = "error"), #' data = COL.OLD, data2 = list(COL.nb = COL.nb), #' chains = 2, cores = 2) #' summary(fit2) #' plot(fit2) #' } #' #' @export sar <- function(M, type = "lag") { label <- deparse0(match.call()) if (missing(M)) { stop2("Argument 'M' is missing in sar().") } M <- deparse0(substitute(M)) M <- as_one_variable(M) options <- c("lag", "error") type <- match.arg(type, options) out <- nlist(M, type, label) class(out) <- c("sar_term", "ac_term") out } #' Spatial conditional autoregressive (CAR) structures #' #' Set up an spatial conditional autoregressive (CAR) term in \pkg{brms}. The #' function does not evaluate its arguments -- it exists purely to help set up a #' model with CAR terms. #' #' @param M Adjacency matrix of locations. All non-zero entries are treated as #' if the two locations are adjacent. If \code{gr} is specified, the row names #' of \code{M} have to match the levels of the grouping factor. #' @param gr An optional grouping factor mapping observations to spatial #' locations. If not specified, each observation is treated as a separate #' location. It is recommended to always specify a grouping factor to allow #' for handling of new data in post-processing methods. #' @param type Type of the CAR structure. Currently implemented are #' \code{"escar"} (exact sparse CAR), \code{"esicar"} (exact sparse intrinsic #' CAR), \code{"icar"} (intrinsic CAR), and \code{"bym2"}. More information is #' provided in the 'Details' section. #' #' @return An object of class \code{'car_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}} #' #' @details The \code{escar} and \code{esicar} types are #' implemented based on the case study of Max Joseph #' (\url{https://github.com/mbjoseph/CARstan}). The \code{icar} and #' \code{bym2} type is implemented based on the case study of Mitzi Morris #' (\url{https://mc-stan.org/users/documentation/case-studies/icar_stan.html}). #' #' @examples #' \dontrun{ #' # generate some spatial data #' east <- north <- 1:10 #' Grid <- expand.grid(east, north) #' K <- nrow(Grid) #' #' # set up distance and neighbourhood matrices #' distance <- as.matrix(dist(Grid)) #' W <- array(0, c(K, K)) #' W[distance == 1] <- 1 #' #' # generate the covariates and response data #' x1 <- rnorm(K) #' x2 <- rnorm(K) #' theta <- rnorm(K, sd = 0.05) #' phi <- rmulti_normal( #' 1, mu = rep(0, K), Sigma = 0.4 * exp(-0.1 * distance) #' ) #' eta <- x1 + x2 + phi #' prob <- exp(eta) / (1 + exp(eta)) #' size <- rep(50, K) #' y <- rbinom(n = K, size = size, prob = prob) #' dat <- data.frame(y, size, x1, x2) #' #' # fit a CAR model #' fit <- brm(y | trials(size) ~ x1 + x2 + car(W), #' data = dat, data2 = list(W = W), #' family = binomial()) #' summary(fit) #' } #' #' @export car <- function(M, gr = NA, type = "escar") { label <- deparse0(match.call()) if (missing(M)) { stop2("Argument 'M' is missing in car().") } M <- deparse0(substitute(M)) M <- as_one_variable(M) gr <- deparse0(substitute(gr)) stopif_illegal_group(gr) options <- c("escar", "esicar", "icar", "bym2") type <- match.arg(type, options) out <- nlist(M, gr, type, label) class(out) <- c("car_term", "ac_term") out } #' Fixed residual correlation (FCOR) structures #' #' Set up a fixed residual correlation (FCOR) term in \pkg{brms}. The function #' does not evaluate its arguments -- it exists purely to help set up a model #' with FCOR terms. #' #' @param M Known correlation/covariance matrix of the response variable. #' If a vector is passed, it will be used as diagonal entries #' (variances) and correlations/covariances will be set to zero. #' The actual covariance matrix used in the likelihood is obtained #' by multiplying \code{M} by the square of the residual standard #' deviation parameter \code{sigma} estimated as part of the model. #' #' @return An object of class \code{'fcor_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}} #' #' @examples #' \dontrun{ #' dat <- data.frame(y = rnorm(3)) #' V <- cbind(c(0.5, 0.3, 0.2), c(0.3, 1, 0.1), c(0.2, 0.1, 0.2)) #' fit <- brm(y ~ 1 + fcor(V), data = dat, data2 = list(V = V)) #' } #' #' @export fcor <- function(M) { label <- deparse0(match.call()) if (missing(M)) { stop2("Argument 'M' is missing in fcor().") } M <- deparse0(substitute(M)) M <- as_one_variable(M) out <- nlist(M, label) class(out) <- c("fcor_term", "ac_term") out } # validate 'autocor' argument validate_autocor <- function(autocor) { if (is.null(autocor) || is.cor_empty(autocor)) { return(NULL) } if (is.cor_brms(autocor)) { warning2("Using 'cor_brms' objects for 'autocor' is deprecated. ", "Please see ?cor_brms for details.") autocor <- as_formula_cor_brms(autocor) } if (is.null(autocor)) { return(NULL) } autocor <- as.formula(autocor) att <- attributes(autocor) autocor <- terms_ac(autocor) if (!is.null(autocor) && !is.formula(autocor)) { stop2("Argument 'autocor' must be coercible to a formula.") } attributes(autocor)[names(att)] <- att autocor } # gather information on autocor terms # @return a data.frame with one row per autocor term tidy_acef <- function(x, ...) { UseMethod("tidy_acef") } #' @export tidy_acef.default <- function(x, ...) { x <- brmsterms(x, check_response = FALSE) tidy_acef(x, ...) } #' @export tidy_acef.mvbrmsterms <- function(x, ...) { out <- lapply(x$terms, tidy_acef, ...) out <- do_call(rbind, out) structure(out, class = acef_class()) } #' @export tidy_acef.brmsterms <- function(x, ...) { out <- lapply(x$dpars, tidy_acef, ...) out <- do_call(rbind, out) if (!NROW(out)) { return(empty_acef()) } out <- structure(out, class = acef_class()) if (has_ac_class(out, "sar")) { if (any(c("sigma", "nu") %in% names(x$dpars))) { stop2("SAR models are not implemented when predicting 'sigma' or 'nu'.") } } if (use_ac_cov(out)) { if (isTRUE(x$rescor)) { stop2("Explicit covariance terms cannot be modeled ", "when 'rescor' is estimated at the same time.") } } out } #' @export tidy_acef.btl <- function(x, ...) { form <- x[["ac"]] if (!is.formula(form)) { return(empty_acef()) } if (is.mixfamily(x$family)) { stop2("Autocorrelation terms cannot be applied in mixture models.") } px <- check_prefix(x) out <- data.frame(term = all_terms(form), stringsAsFactors = FALSE) nterms <- NROW(out) cnames <- c("class", "dim", "type", "time", "gr", "p", "q", "M") out[cnames] <- list(NA) out$cov <- out$nat_cov <- FALSE out[names(px)] <- px for (i in seq_len(nterms)) { ac <- eval2(out$term[i]) if (is.arma_term(ac)) { out$class[i] <- "arma" out$dim[i] <- "time" out$time[i] <- ac$time out$gr[i] <- ac$gr out$p[i] <- ac$p out$q[i] <- ac$q out$cov[i] <- ac$cov } if (is.cosy_term(ac)) { out$class[i] <- "cosy" out$dim[i] <- "time" out$time[i] <- ac$time out$gr[i] <- ac$gr out$cov[i] <- TRUE } if (is.unstr_term(ac)) { out$class[i] <- "unstr" out$dim[i] <- "time" out$time[i] <- ac$time out$gr[i] <- ac$gr out$cov[i] <- TRUE } if (is.sar_term(ac)) { out$class[i] <- "sar" out$dim[i] <- "space" out$type[i] <- ac$type out$M[i] <- ac$M out$cov[i] <- TRUE } if (is.car_term(ac)) { out$class[i] <- "car" out$dim[i] <- "space" out$type[i] <- ac$type out$gr[i] <- ac$gr out$M[i] <- ac$M } if (is.fcor_term(ac)) { out$class[i] <- "fcor" out$M[i] <- ac$M out$cov[i] <- TRUE } } # covariance matrices of natural residuals will be handled # directly in the likelihood function while latent residuals will # be added to the linear predictor of the main parameter 'mu' out$nat_cov <- out$cov & has_natural_residuals(x) class(out) <- acef_class() # validate specified autocor terms if (any(duplicated(out$class))) { stop2("Can only model one term per autocorrelation class.") } if (NROW(subset2(out, dim = "time")) > 1) { stop2("Can only model one time-series term.") } if (NROW(subset2(out, dim = "space")) > 1) { stop2("Can only model one spatial term.") } if (NROW(subset2(out, nat_cov = TRUE)) > 1) { stop2("Can only model one covariance matrix of natural residuals.") } if (use_ac_cov(out) || has_ac_class(out, "arma")) { if (any(!out$dpar %in% c("", "mu") | nzchar(out$nlpar))) { stop2("Explicit covariance terms can only be specified on 'mu'.") } } out } #' @export tidy_acef.btnl <- function(x, ... ) { tidy_acef.btl(x, ...) } #' @export tidy_acef.acef <- function(x, ...) { x } #' @export tidy_acef.NULL <- function(x, ...) { empty_acef() } empty_acef <- function() { structure(empty_data_frame(), class = acef_class()) } acef_class <- function() { c("acef", "data.frame") } # get names of certain autocor variables get_ac_vars <- function(x, var, ...) { var <- match.arg(var, c("time", "gr", "M")) acef <- subset2(tidy_acef(x), ...) out <- unique(acef[[var]]) setdiff(na.omit(out), "NA") } # get names of autocor grouping variables get_ac_groups <- function(x, ...) { get_ac_vars(x, "gr", ...) } # is certain subset of autocor terms is present? has_ac_subset <- function(x, ...) { NROW(subset2(tidy_acef(x), ...)) > 0L } # is a certain autocorrelation class present? has_ac_class <- function(x, class) { has_ac_subset(x, class = class) } # use explicit residual covariance structure? use_ac_cov <- function(x) { has_ac_subset(x, cov = TRUE) } # use explicit residual covariance structure for time-series? use_ac_cov_time <- function(x) { has_ac_subset(x, cov = TRUE, dim = "time") } # does the model need latent residuals for autocor structures? has_ac_latent_residuals <- function(bterms) { !has_natural_residuals(bterms) && (use_ac_cov(bterms) || has_ac_class(bterms, "arma")) } # validate SAR matrices validate_sar_matrix <- function(M) { if (is(M, "listw")) { require_package("spdep") M <- spdep::listw2mat(M) } else if (is(M, "nb")) { require_package("spdep") M <- spdep::nb2mat(M) } if (length(dim(M)) != 2L) { stop2("'M' for SAR terms must be of class 'matrix', 'listw', or 'nb'.") } M <- Matrix::Matrix(M, sparse = TRUE) M } # validate CAR matrices validate_car_matrix <- function(M) { if (length(dim(M)) != 2L) { stop2("'M' for CAR terms must be a matrix.") } M <- Matrix::Matrix(M, sparse = TRUE) if (!Matrix::isSymmetric(M, check.attributes = FALSE)) { stop2("'M' for CAR terms must be symmetric.") } colnames(M) <- rownames(M) not_binary <- M@x != 1 if (any(not_binary)) { message("Converting all non-zero values in 'M' to 1.") M@x[not_binary] <- 1 } M } # validate FCOR matrices validate_fcor_matrix <- function(M) { if (length(dim(M)) <= 1L) { M <- diag(as.vector(M), length(M)) } if (length(dim(M)) != 2L) { stop2("'M' for FCOR terms must be a matrix.") } M <- as.matrix(M) if (!isSymmetric(M, check.attributes = FALSE)) { stop2("'M' for FCOR terms must be symmetric.") } if (min(eigen(M)$values <= 0)) { stop2("'M' for FCOR terms must be positive definite.") } M } # regex to extract all parameter names of autocorrelation structures regex_autocor_pars <- function() { # cortime is ignored here to allow custom renaming in summary.brmsfit p <- c("ar", "ma", "sderr", "cosy", "lagsar", "errorsar", "car", "sdcar", "rhocar") p <- paste0("(", p, ")", collapse = "|") paste0("^(", p, ")(\\[|_|$)") } is.ac_term <- function(x) { inherits(x, "ac_term") } is.arma_term <- function(x) { inherits(x, "arma_term") } is.cosy_term <- function(x) { inherits(x, "cosy_term") } is.unstr_term <- function(x) { inherits(x, "unstr_term") } is.sar_term <- function(x) { inherits(x, "sar_term") } is.car_term <- function(x) { inherits(x, "car_term") } is.fcor_term <- function(x) { inherits(x, "fcor_term") } brms/R/pp_mixture.R0000644000176200001440000000706114464640324013750 0ustar liggesusers#' Posterior Probabilities of Mixture Component Memberships #' #' Compute the posterior probabilities of mixture component #' memberships for each observation including uncertainty #' estimates. #' #' @inheritParams predict.brmsfit #' @param x An \R object usually of class \code{brmsfit}. #' @param log Logical; Indicates whether to return #' probabilities on the log-scale. #' #' @return #' If \code{summary = TRUE}, an N x E x K array, #' where N is the number of observations, K is the number #' of mixture components, and E is equal to \code{length(probs) + 2}. #' If \code{summary = FALSE}, an S x N x K array, where #' S is the number of posterior draws. #' #' @details #' The returned probabilities can be written as #' \eqn{P(Kn = k | Yn)}, that is the posterior probability #' that observation n originates from component k. #' They are computed using Bayes' Theorem #' \deqn{P(Kn = k | Yn) = P(Yn | Kn = k) P(Kn = k) / P(Yn),} #' where \eqn{P(Yn | Kn = k)} is the (posterior) likelihood #' of observation n for component k, \eqn{P(Kn = k)} is #' the (posterior) mixing probability of component k #' (i.e. parameter \code{theta}), and #' \deqn{P(Yn) = \sum (k=1,...,K) P(Yn | Kn = k) P(Kn = k)} #' is a normalizing constant. #' #' @examples #' \dontrun{ #' ## simulate some data #' set.seed(1234) #' dat <- data.frame( #' y = c(rnorm(100), rnorm(50, 2)), #' x = rnorm(150) #' ) #' ## fit a simple normal mixture model #' mix <- mixture(gaussian, nmix = 2) #' prior <- c( #' prior(normal(0, 5), Intercept, nlpar = mu1), #' prior(normal(0, 5), Intercept, nlpar = mu2), #' prior(dirichlet(2, 2), theta) #' ) #' fit1 <- brm(bf(y ~ x), dat, family = mix, #' prior = prior, chains = 2, init = 0) #' summary(fit1) #' #' ## compute the membership probabilities #' ppm <- pp_mixture(fit1) #' str(ppm) #' #' ## extract point estimates for each observation #' head(ppm[, 1, ]) #' #' ## classify every observation according to #' ## the most likely component #' apply(ppm[, 1, ], 1, which.max) #' } #' #' @export pp_mixture.brmsfit <- function(x, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, log = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { log <- as_one_logical(log) contains_draws(x) x <- restructure(x) stopifnot_resp(x, resp) if (is_mv(x)) { resp <- validate_resp(resp, x$formula$responses, multiple = FALSE) family <- x$family[[resp]] } else { family <- x$family } if (!is.mixfamily(family)) { stop2("Method 'pp_mixture' can only be applied to mixture models.") } prep <- prepare_predictions( x, newdata = newdata, re_formula = re_formula, resp = resp, draw_ids = draw_ids, ndraws = ndraws, check_response = TRUE, ... ) stopifnot(is.brmsprep(prep)) prep$pp_mixture <- TRUE for (dp in names(prep$dpars)) { prep$dpars[[dp]] <- get_dpar(prep, dpar = dp) } N <- choose_N(prep) out <- lapply(seq_len(N), log_lik_mixture, prep = prep) out <- abind(out, along = 3) out <- aperm(out, c(1, 3, 2)) old_order <- prep$old_order sort <- isTRUE(ncol(out) != length(old_order)) out <- reorder_obs(out, old_order, sort = sort) if (!log) { out <- exp(out) } if (summary) { out <- posterior_summary(out, probs = probs, robust = robust) dimnames(out) <- list( seq_len(nrow(out)), colnames(out), paste0("P(K = ", seq_len(dim(out)[3]), " | Y)") ) } out } #' @rdname pp_mixture.brmsfit #' @export pp_mixture <- function(x, ...) { UseMethod("pp_mixture") } brms/R/model_weights.R0000644000176200001440000002730614213413565014407 0ustar liggesusers#' Model Weighting Methods #' #' Compute model weights in various ways, for instance, via #' stacking of posterior predictive distributions, Akaike weights, #' or marginal likelihoods. #' #' @inheritParams loo.brmsfit #' @param weights Name of the criterion to compute weights from. Should be one #' of \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"stacking"} (current #' default), or \code{"bma"}, \code{"pseudobma"}, For the former three #' options, Akaike weights will be computed based on the information criterion #' values returned by the respective methods. For \code{"stacking"} and #' \code{"pseudobma"}, method \code{\link{loo_model_weights}} will be used to #' obtain weights. For \code{"bma"}, method \code{\link{post_prob}} will be #' used to compute Bayesian model averaging weights based on log marginal #' likelihood values (make sure to specify reasonable priors in this case). #' For some methods, \code{weights} may also be a numeric vector of #' pre-specified weights. #' #' @return A numeric vector of weights for the models. #' #' @examples #' \dontrun{ #' # model with 'treat' as predictor #' fit1 <- brm(rating ~ treat + period + carry, data = inhaler) #' summary(fit1) #' #' # model without 'treat' as predictor #' fit2 <- brm(rating ~ period + carry, data = inhaler) #' summary(fit2) #' #' # obtain Akaike weights based on the WAIC #' model_weights(fit1, fit2, weights = "waic") #' } #' #' @export model_weights.brmsfit <- function(x, ..., weights = "stacking", model_names = NULL) { weights <- validate_weights_method(weights) args <- split_dots(x, ..., model_names = model_names) models <- args$models args$models <- NULL model_names <- names(models) if (weights %in% c("loo", "waic", "kfold")) { # Akaike weights based on information criteria ics <- rep(NA, length(models)) for (i in seq_along(ics)) { args$x <- models[[i]] args$model_names <- names(models)[i] ics[i] <- SW(do_call(weights, args))$estimates[3, 1] } ic_diffs <- ics - min(ics) out <- exp(-ic_diffs / 2) } else if (weights %in% c("stacking", "pseudobma")) { args <- c(unname(models), args) args$method <- weights out <- do_call("loo_model_weights", args) } else if (weights %in% "bma") { args <- c(unname(models), args) out <- do_call("post_prob", args) } out <- as.numeric(out) out <- out / sum(out) names(out) <- model_names out } #' @rdname model_weights.brmsfit #' @export model_weights <- function(x, ...) { UseMethod("model_weights") } # validate name of the applied weighting method validate_weights_method <- function(method) { method <- as_one_character(method) method <- tolower(method) if (method == "loo2") { warning2("Weight method 'loo2' is deprecated. Use 'stacking' instead.") method <- "stacking" } if (method == "marglik") { warning2("Weight method 'marglik' is deprecated. Use 'bma' instead.") method <- "bma" } options <- c("loo", "waic", "kfold", "stacking", "pseudobma", "bma") match.arg(method, options) } #' Posterior predictive draws averaged across models #' #' Compute posterior predictive draws averaged across models. #' Weighting can be done in various ways, for instance using #' Akaike weights based on information criteria or #' marginal likelihoods. #' #' @inheritParams model_weights.brmsfit #' @param method Method used to obtain predictions to average over. Should be #' one of \code{"posterior_predict"} (default), \code{"posterior_epred"}, #' \code{"posterior_linpred"} or \code{"predictive_error"}. #' @param control Optional \code{list} of further arguments #' passed to the function specified in \code{weights}. #' @param ndraws Total number of posterior draws to use. #' @param nsamples Deprecated alias of \code{ndraws}. #' @param seed A single numeric value passed to \code{\link{set.seed}} #' to make results reproducible. #' @param summary Should summary statistics #' (i.e. means, sds, and 95\% intervals) be returned #' instead of the raw values? Default is \code{TRUE}. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' Only used if \code{summary} is \code{TRUE}. #' @param probs The percentiles to be computed by the \code{quantile} #' function. Only used if \code{summary} is \code{TRUE}. #' #' @return Same as the output of the method specified #' in argument \code{method}. #' #' @details Weights are computed with the \code{\link{model_weights}} method. #' #' @seealso \code{\link{model_weights}}, \code{\link{posterior_average}} #' #' @examples #' \dontrun{ #' # model with 'treat' as predictor #' fit1 <- brm(rating ~ treat + period + carry, data = inhaler) #' summary(fit1) #' #' # model without 'treat' as predictor #' fit2 <- brm(rating ~ period + carry, data = inhaler) #' summary(fit2) #' #' # compute model-averaged predicted values #' (df <- unique(inhaler[, c("treat", "period", "carry")])) #' pp_average(fit1, fit2, newdata = df) #' #' # compute model-averaged fitted values #' pp_average(fit1, fit2, method = "fitted", newdata = df) #' } #' #' @export pp_average.brmsfit <- function( x, ..., weights = "stacking", method = "posterior_predict", ndraws = NULL, nsamples = NULL, summary = TRUE, probs = c(0.025, 0.975), robust = FALSE, model_names = NULL, control = list(), seed = NULL ) { if (!is.null(seed)) { set.seed(seed) } method <- validate_pp_method(method) ndraws <- use_alias(ndraws, nsamples) if (any(c("draw_ids", "subset") %in% names(list(...)))) { stop2("Cannot use argument 'draw_ids' in pp_average.") } args <- split_dots(x, ..., model_names = model_names) args$summary <- FALSE models <- args$models args$models <- NULL if (!match_response(models)) { stop2("Can only average models predicting the same response.") } if (is.null(ndraws)) { ndraws <- ndraws(models[[1]]) } ndraws <- as_one_integer(ndraws) weights <- validate_weights(weights, models, control) ndraws <- round_largest_remainder(weights * ndraws) names(weights) <- names(ndraws) <- names(models) out <- named_list(names(models)) for (i in seq_along(out)) { if (ndraws[i] > 0) { args$object <- models[[i]] args$ndraws <- ndraws[i] out[[i]] <- do_call(method, args) } } out <- do_call(rbind, out) if (summary) { out <- posterior_summary(out, probs = probs, robust = robust) } attr(out, "weights") <- weights attr(out, "ndraws") <- ndraws out } #' @rdname pp_average.brmsfit #' @export pp_average <- function(x, ...) { UseMethod("pp_average") } # validate weights passed to model averaging functions # see pp_average.brmsfit for more documentation validate_weights <- function(weights, models, control = list()) { if (!is.numeric(weights)) { weight_args <- c(unname(models), control) weight_args$weights <- weights weights <- do_call(model_weights, weight_args) } else { if (length(weights) != length(models)) { stop2("If numeric, 'weights' must have the same length ", "as the number of models.") } if (any(weights < 0)) { stop2("If numeric, 'weights' must be positive.") } } weights / sum(weights) } #' Posterior draws of parameters averaged across models #' #' Extract posterior draws of parameters averaged across models. #' Weighting can be done in various ways, for instance using #' Akaike weights based on information criteria or #' marginal likelihoods. #' #' @inheritParams pp_average.brmsfit #' @param variable Names of variables (parameters) for which to average across #' models. Only those variables can be averaged that appear in every model. #' Defaults to all overlapping variables. #' @param pars Deprecated alias of \code{variable}. #' @param missing An optional numeric value or a named list of numeric values #' to use if a model does not contain a variable for which posterior draws #' should be averaged. Defaults to \code{NULL}, in which case only those #' variables can be averaged that are present in all of the models. #' #' @return A \code{data.frame} of posterior draws. #' #' @details Weights are computed with the \code{\link{model_weights}} method. #' #' @seealso \code{\link{model_weights}}, \code{\link{pp_average}} #' #' @examples #' \dontrun{ #' # model with 'treat' as predictor #' fit1 <- brm(rating ~ treat + period + carry, data = inhaler) #' summary(fit1) #' #' # model without 'treat' as predictor #' fit2 <- brm(rating ~ period + carry, data = inhaler) #' summary(fit2) #' #' # compute model-averaged posteriors of overlapping parameters #' posterior_average(fit1, fit2, weights = "waic") #' } #' #' @export posterior_average.brmsfit <- function( x, ..., variable = NULL, pars = NULL, weights = "stacking", ndraws = NULL, nsamples = NULL, missing = NULL, model_names = NULL, control = list(), seed = NULL ) { if (!is.null(seed)) { set.seed(seed) } variable <- use_alias(variable, pars) ndraws <- use_alias(ndraws, nsamples) models <- split_dots(x, ..., model_names = model_names, other = FALSE) vars_list <- lapply(models, variables) all_vars <- unique(unlist(vars_list)) if (is.null(missing)) { common_vars <- lapply(vars_list, function(x) all_vars %in% x) common_vars <- all_vars[Reduce("&", common_vars)] if (is.null(variable)) { variable <- setdiff(common_vars, "lp__") } variable <- as.character(variable) inv_vars <- setdiff(variable, common_vars) if (length(inv_vars)) { inv_vars <- collapse_comma(inv_vars) stop2( "Parameters ", inv_vars, " cannot be found in all ", "of the models. Consider using argument 'missing'." ) } } else { if (is.null(variable)) { variable <- setdiff(all_vars, "lp__") } variable <- as.character(variable) inv_vars <- setdiff(variable, all_vars) if (length(inv_vars)) { inv_vars <- collapse_comma(inv_vars) stop2("Parameters ", inv_vars, " cannot be found in any of the models.") } if (is.list(missing)) { all_miss_vars <- unique(ulapply( models, function(m) setdiff(variable, variables(m)) )) inv_vars <- setdiff(all_miss_vars, names(missing)) if (length(inv_vars)) { stop2("Argument 'missing' has no value for parameters ", collapse_comma(inv_vars), ".") } missing <- lapply(missing, as_one_numeric, allow_na = TRUE) } else { missing <- as_one_numeric(missing, allow_na = TRUE) missing <- named_list(variable, missing) } } if (is.null(ndraws)) { ndraws <- ndraws(models[[1]]) } ndraws <- as_one_integer(ndraws) weights <- validate_weights(weights, models, control) ndraws <- round_largest_remainder(weights * ndraws) names(weights) <- names(ndraws) <- names(models) out <- named_list(names(models)) for (i in seq_along(out)) { if (ndraws[i] > 0) { draw <- sample(seq_len(ndraws(models[[i]])), ndraws[i]) draw <- sort(draw) found_vars <- intersect(variable, variables(models[[i]])) if (length(found_vars)) { out[[i]] <- as.data.frame( models[[i]], variable = found_vars, draw = draw ) } else { out[[i]] <- as.data.frame(matrix( numeric(0), nrow = ndraws[i], ncol = 0 )) } if (!is.null(missing)) { miss_vars <- setdiff(variable, names(out[[i]])) if (length(miss_vars)) { out[[i]][miss_vars] <- missing[miss_vars] } } } } out <- do_call(rbind, out) rownames(out) <- NULL attr(out, "weights") <- weights attr(out, "ndraws") <- ndraws out } #' @rdname posterior_average.brmsfit #' @export posterior_average <- function(x, ...) { UseMethod("posterior_average") } brms/R/posterior_epred.R0000644000176200001440000007372514454230276014773 0ustar liggesusers#' Draws from the Expected Value of the Posterior Predictive Distribution #' #' Compute posterior draws of the expected value of the posterior predictive #' distribution. Can be performed for the data used to fit the model (posterior #' predictive checks) or for new data. By definition, these predictions have #' smaller variance than the posterior predictions performed by the #' \code{\link{posterior_predict.brmsfit}} method. This is because only the #' uncertainty in the expected value of the posterior predictive distribution is #' incorporated in the draws computed by \code{posterior_epred} while the #' residual error is ignored there. However, the estimated means of both methods #' averaged across draws should be very similar. #' #' @aliases pp_expect #' #' @inheritParams posterior_predict.brmsfit #' @param dpar Optional name of a predicted distributional parameter. #' If specified, expected predictions of this parameters are returned. #' @param nlpar Optional name of a predicted non-linear parameter. #' If specified, expected predictions of this parameters are returned. #' #' @return An \code{array} of draws. For #' categorical and ordinal models, the output is an S x N x C array. #' Otherwise, the output is an S x N matrix, where S is the number of #' posterior draws, N is the number of observations, and C is the number of #' categories. In multivariate models, an additional dimension is added to the #' output which indexes along the different response variables. #' #' @template details-newdata-na #' @template details-allow_new_levels #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' #' ## compute expected predictions #' ppe <- posterior_epred(fit) #' str(ppe) #' } #' #' @aliases posterior_epred #' @method posterior_epred brmsfit #' @importFrom rstantools posterior_epred #' @export posterior_epred #' @export posterior_epred.brmsfit <- function(object, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ...) { cl <- match.call() if ("re.form" %in% names(cl) && !missing(re.form)) { re_formula <- re.form } contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_epred( prep, dpar = dpar, nlpar = nlpar, sort = sort, scale = "response", summary = FALSE ) } #' @export posterior_epred.mvbrmsprep <- function(object, ...) { out <- lapply(object$resps, posterior_epred, ...) along <- ifelse(length(out) > 1L, 3, 2) do_call(abind, c(out, along = along)) } #' @export posterior_epred.brmsprep <- function(object, dpar, nlpar, sort, scale = "response", incl_thres = NULL, summary = FALSE, robust = FALSE, probs = c(0.025, 0.975), ...) { summary <- as_one_logical(summary) dpars <- names(object$dpars) nlpars <- names(object$nlpars) if (length(dpar)) { # predict a distributional parameter dpar <- as_one_character(dpar) if (!dpar %in% dpars) { stop2("Invalid argument 'dpar'. Valid distributional ", "parameters are: ", collapse_comma(dpars)) } if (length(nlpar)) { stop2("Cannot use 'dpar' and 'nlpar' at the same time.") } predicted <- is.bprepl(object$dpars[[dpar]]) || is.bprepnl(object$dpars[[dpar]]) if (predicted) { # parameter varies across observations if (scale == "linear") { object$dpars[[dpar]]$family$link <- "identity" } if (is_ordinal(object$family)) { object$dpars[[dpar]]$cs <- NULL object$family <- object$dpars[[dpar]]$family <- .dpar_family(link = object$dpars[[dpar]]$family$link) } if (dpar_class(dpar) == "theta" && scale == "response") { ap_id <- as.numeric(dpar_id(dpar)) out <- get_theta(object)[, , ap_id, drop = FALSE] dim(out) <- dim(out)[c(1, 2)] } else { out <- get_dpar(object, dpar = dpar, inv_link = TRUE) } } else { # parameter is constant across observations out <- object$dpars[[dpar]] out <- matrix(out, nrow = object$ndraws, ncol = object$nobs) } } else if (length(nlpar)) { # predict a non-linear parameter nlpar <- as_one_character(nlpar) if (!nlpar %in% nlpars) { stop2("Invalid argument 'nlpar'. Valid non-linear ", "parameters are: ", collapse_comma(nlpars)) } out <- get_nlpar(object, nlpar = nlpar) } else { # no dpar or nlpar specified incl_thres <- as_one_logical(incl_thres %||% FALSE) incl_thres <- incl_thres && is_ordinal(object$family) && scale == "linear" if (incl_thres) { # extract linear predictor array with thresholds etc. included if (is.mixfamily(object$family)) { stop2("'incl_thres' is not supported for mixture models.") } object$family$link <- "identity" } if (scale == "response" || incl_thres) { # predict the mean of the response distribution for (nlp in nlpars) { object$nlpars[[nlp]] <- get_nlpar(object, nlpar = nlp) } for (dp in dpars) { object$dpars[[dp]] <- get_dpar(object, dpar = dp) } if (is_trunc(object)) { out <- posterior_epred_trunc(object) } else { posterior_epred_fun <- paste0("posterior_epred_", object$family$family) posterior_epred_fun <- get(posterior_epred_fun, asNamespace("brms")) out <- posterior_epred_fun(object) } } else { # return results on the linear scale # extract all 'mu' parameters if (conv_cats_dpars(object$family)) { out <- dpars[grepl("^mu", dpars)] } else { out <- dpars[dpar_class(dpars) %in% "mu"] } if (length(out) == 1L) { out <- get_dpar(object, dpar = out, inv_link = FALSE) } else { # multiple mu parameters in categorical or mixture models out <- lapply(out, get_dpar, prep = object, inv_link = FALSE) out <- abind::abind(out, along = 3) } } } if (is.null(dim(out))) { out <- as.matrix(out) } colnames(out) <- NULL out <- reorder_obs(out, object$old_order, sort = sort) if (scale == "response" && is_polytomous(object$family) && length(dim(out)) == 3L && dim(out)[3] == length(object$cats)) { # for ordinal models with varying thresholds, dim[3] may not match cats dimnames(out)[[3]] <- object$cats } if (summary) { # only for compatibility with the 'fitted' method out <- posterior_summary(out, probs = probs, robust = robust) if (is_polytomous(object$family) && length(dim(out)) == 3L) { if (scale == "linear") { dimnames(out)[[3]] <- paste0("eta", seq_dim(out, 3)) } else { dimnames(out)[[3]] <- paste0("P(Y = ", dimnames(out)[[3]], ")") } } } out } #' Expected Values of the Posterior Predictive Distribution #' #' This method is an alias of \code{\link{posterior_epred.brmsfit}} #' with additional arguments for obtaining summaries of the computed draws. #' #' @inheritParams posterior_epred.brmsfit #' @param object An object of class \code{brmsfit}. #' @param scale Either \code{"response"} or \code{"linear"}. #' If \code{"response"}, results are returned on the scale #' of the response variable. If \code{"linear"}, #' results are returned on the scale of the linear predictor term, #' that is without applying the inverse link function or #' other transformations. #' @param summary Should summary statistics be returned #' instead of the raw values? Default is \code{TRUE}.. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' Only used if \code{summary} is \code{TRUE}. #' @param probs The percentiles to be computed by the \code{quantile} #' function. Only used if \code{summary} is \code{TRUE}. #' #' @return An \code{array} of predicted \emph{mean} response values. #' If \code{summary = FALSE} the output resembles those of #' \code{\link{posterior_epred.brmsfit}}. #' #' If \code{summary = TRUE} the output depends on the family: For categorical #' and ordinal families, the output is an N x E x C array, where N is the #' number of observations, E is the number of summary statistics, and C is the #' number of categories. For all other families, the output is an N x E #' matrix. The number of summary statistics E is equal to \code{2 + #' length(probs)}: The \code{Estimate} column contains point estimates (either #' mean or median depending on argument \code{robust}), while the #' \code{Est.Error} column contains uncertainty estimates (either standard #' deviation or median absolute deviation depending on argument #' \code{robust}). The remaining columns starting with \code{Q} contain #' quantile estimates as specified via argument \code{probs}. #' #' In multivariate models, an additional dimension is added to the output #' which indexes along the different response variables. #' #' @seealso \code{\link{posterior_epred.brmsfit}} #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' #' ## compute expected predictions #' fitted_values <- fitted(fit) #' head(fitted_values) #' #' ## plot expected predictions against actual response #' dat <- as.data.frame(cbind(Y = standata(fit)$Y, fitted_values)) #' ggplot(dat) + geom_point(aes(x = Estimate, y = Y)) #' } #' #' @export fitted.brmsfit <- function(object, newdata = NULL, re_formula = NULL, scale = c("response", "linear"), resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { scale <- match.arg(scale) summary <- as_one_logical(summary) contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_epred( prep, dpar = dpar, nlpar = nlpar, sort = sort, scale = scale, summary = summary, robust = robust, probs = probs ) } #' Posterior Draws of the Linear Predictor #' #' Compute posterior draws of the linear predictor, that is draws before #' applying any link functions or other transformations. Can be performed for #' the data used to fit the model (posterior predictive checks) or for new data. #' #' @inheritParams posterior_epred.brmsfit #' @param object An object of class \code{brmsfit}. #' @param transform Logical; if \code{FALSE} #' (the default), draws of the linear predictor are returned. #' If \code{TRUE}, draws of the transformed linear predictor, #' that is, after applying the inverse link function are returned. #' @param dpar Name of a predicted distributional parameter #' for which draws are to be returned. By default, draws #' of the main distributional parameter(s) \code{"mu"} are returned. #' @param incl_thres Logical; only relevant for ordinal models when #' \code{transform} is \code{FALSE}, and ignored otherwise. Shall the #' thresholds and category-specific effects be included in the linear #' predictor? For backwards compatibility, the default is to not include them. #' #' @seealso \code{\link{posterior_epred.brmsfit}} #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' #' ## extract linear predictor values #' pl <- posterior_linpred(fit) #' str(pl) #' } #' #' @aliases posterior_linpred #' @method posterior_linpred brmsfit #' @importFrom rstantools posterior_linpred #' @export #' @export posterior_linpred posterior_linpred.brmsfit <- function( object, transform = FALSE, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, dpar = NULL, nlpar = NULL, incl_thres = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) { cl <- match.call() if ("re.form" %in% names(cl) && !missing(re.form)) { re_formula <- re.form } scale <- "linear" transform <- as_one_logical(transform) if (transform) { scale <- "response" # if transform, return inv-link draws of only a single # distributional or non-linear parameter for consistency # of brms and rstanarm if (is.null(dpar) && is.null(nlpar)) { dpar <- "mu" } } contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_epred( prep, dpar = dpar, nlpar = nlpar, sort = sort, scale = scale, incl_thres = incl_thres, summary = FALSE ) } # ------------------- family specific posterior_epred methods --------------------- # All posterior_epred_ functions have the same arguments structure # @param prep A named list returned by prepare_predictions containing # all required data and draws # @return transformed linear predictor representing the mean # of the posterior predictive distribution posterior_epred_gaussian <- function(prep) { if (!is.null(prep$ac$lagsar)) { prep$dpars$mu <- posterior_epred_lagsar(prep) } prep$dpars$mu } posterior_epred_student <- function(prep) { if (!is.null(prep$ac$lagsar)) { prep$dpars$mu <- posterior_epred_lagsar(prep) } prep$dpars$mu } posterior_epred_skew_normal <- function(prep) { prep$dpars$mu } posterior_epred_lognormal <- function(prep) { with(prep$dpars, exp(mu + sigma^2 / 2)) } posterior_epred_shifted_lognormal <- function(prep) { with(prep$dpars, exp(mu + sigma^2 / 2) + ndt) } posterior_epred_binomial <- function(prep) { trials <- data2draws(prep$data$trials, dim_mu(prep)) prep$dpars$mu * trials } posterior_epred_beta_binomial <- function(prep) { # beta part included in mu trials <- data2draws(prep$data$trials, dim_mu(prep)) prep$dpars$mu * trials } posterior_epred_bernoulli <- function(prep) { prep$dpars$mu } posterior_epred_poisson <- function(prep) { multiply_dpar_rate_denom(prep$dpars$mu, prep) } posterior_epred_negbinomial <- function(prep) { multiply_dpar_rate_denom(prep$dpars$mu, prep) } posterior_epred_negbinomial2 <- function(prep) { multiply_dpar_rate_denom(prep$dpars$mu, prep) } posterior_epred_geometric <- function(prep) { multiply_dpar_rate_denom(prep$dpars$mu, prep) } posterior_epred_discrete_weibull <- function(prep) { mean_discrete_weibull(prep$dpars$mu, prep$dpars$shape) } posterior_epred_com_poisson <- function(prep) { mean_com_poisson(prep$dpars$mu, prep$dpars$shape) } posterior_epred_exponential <- function(prep) { prep$dpars$mu } posterior_epred_gamma <- function(prep) { prep$dpars$mu } posterior_epred_weibull <- function(prep) { prep$dpars$mu } posterior_epred_frechet <- function(prep) { prep$dpars$mu } posterior_epred_gen_extreme_value <- function(prep) { with(prep$dpars, mu + sigma * (gamma(1 - xi) - 1) / xi) } posterior_epred_inverse.gaussian <- function(prep) { prep$dpars$mu } posterior_epred_exgaussian <- function(prep) { prep$dpars$mu } posterior_epred_wiener <- function(prep) { # obtained from https://doi.org/10.1016/j.jmp.2009.01.006 # mu is the drift rate with(prep$dpars, ndt - bias / mu + bs / mu * (exp(-2 * mu * bias) - 1) / (exp(-2 * mu * bs) - 1) ) } posterior_epred_beta <- function(prep) { prep$dpars$mu } posterior_epred_von_mises <- function(prep) { prep$dpars$mu } posterior_epred_asym_laplace <- function(prep) { with(prep$dpars, mu + sigma * (1 - 2 * quantile) / (quantile * (1 - quantile)) ) } posterior_epred_zero_inflated_asym_laplace <- function(prep) { posterior_epred_asym_laplace(prep) * (1 - prep$dpars$zi) } posterior_epred_cox <- function(prep) { stop2("Cannot compute expected values of the posterior predictive ", "distribution for family 'cox'.") } posterior_epred_hurdle_poisson <- function(prep) { with(prep$dpars, mu / (1 - exp(-mu)) * (1 - hu)) } posterior_epred_hurdle_negbinomial <- function(prep) { with(prep$dpars, mu / (1 - (shape / (mu + shape))^shape) * (1 - hu)) } posterior_epred_hurdle_gamma <- function(prep) { with(prep$dpars, mu * (1 - hu)) } posterior_epred_hurdle_lognormal <- function(prep) { with(prep$dpars, exp(mu + sigma^2 / 2) * (1 - hu)) } posterior_epred_hurdle_cumulative <- function(prep) { adjust <- ifelse(prep$family$link == "identity", 0, 1) ncat_max <- max(prep$data$nthres) + adjust nact_min <- min(prep$data$nthres) + adjust init_mat <- matrix( ifelse(prep$family$link == "identity", NA, 0), nrow = prep$ndraws, ncol = ncat_max - nact_min ) args <- list(link = prep$family$link) out <- vector("list", prep$nobs) for (i in seq_along(out)) { args_i <- args args_i$eta <- slice_col(get_dpar(prep, "mu", i)) args_i$disc <- slice_col(get_dpar(prep, "disc", i)) args_i$thres <- subset_thres(prep, i) ncat_i <- NCOL(args_i$thres) + adjust args_i$x <- seq_len(ncat_i) out[[i]] <- do_call(dcumulative, args_i) if (ncat_i < ncat_max) { sel <- seq_len(ncat_max - ncat_i) out[[i]] <- cbind(out[[i]], init_mat[, sel]) } hu <- get_dpar(prep, "hu", i) out[[i]] <- cbind(hu, out[[i]] * (1 - hu)) } out <- abind(out, along = 3) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- c(paste0(0), seq_len(ncat_max)) out } posterior_epred_zero_inflated_poisson <- function(prep) { with(prep$dpars, mu * (1 - zi)) } posterior_epred_zero_inflated_negbinomial <- function(prep) { with(prep$dpars, mu * (1 - zi)) } posterior_epred_zero_inflated_binomial <- function(prep) { trials <- data2draws(prep$data$trials, dim_mu(prep)) prep$dpars$mu * trials * (1 - prep$dpars$zi) } posterior_epred_zero_inflated_beta_binomial <- function(prep) { # same as zero_inflated_binom as beta part is included in mu trials <- data2draws(prep$data$trials, dim_mu(prep)) prep$dpars$mu * trials * (1 - prep$dpars$zi) } posterior_epred_zero_inflated_beta <- function(prep) { with(prep$dpars, mu * (1 - zi)) } posterior_epred_zero_one_inflated_beta <- function(prep) { with(prep$dpars, zoi * coi + mu * (1 - zoi)) } posterior_epred_categorical <- function(prep) { get_probs <- function(i) { eta <- insert_refcat(slice_col(eta, i), refcat = prep$refcat) dcategorical(cats, eta = eta) } eta <- get_Mu(prep) cats <- seq_len(prep$data$ncat) out <- abind(lapply(seq_cols(eta), get_probs), along = 3) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- prep$cats out } posterior_epred_multinomial <- function(prep) { get_counts <- function(i) { eta <- insert_refcat(slice_col(eta, i), refcat = prep$refcat) dcategorical(cats, eta = eta) * trials[i] } eta <- get_Mu(prep) cats <- seq_len(prep$data$ncat) trials <- prep$data$trials out <- abind(lapply(seq_cols(eta), get_counts), along = 3) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- prep$cats out } posterior_epred_dirichlet <- function(prep) { get_probs <- function(i) { eta <- insert_refcat(slice_col(eta, i), refcat = prep$refcat) dcategorical(cats, eta = eta) } eta <- get_Mu(prep) cats <- seq_len(prep$data$ncat) out <- abind(lapply(seq_cols(eta), get_probs), along = 3) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- prep$cats out } posterior_epred_dirichlet2 <- function(prep) { mu <- get_Mu(prep) sums_mu <- apply(mu, 1:2, sum) cats <- seq_len(prep$data$ncat) for (i in cats) { mu[, , i] <- mu[, , i] / sums_mu } dimnames(mu)[[3]] <- prep$cats mu } posterior_epred_logistic_normal <- function(prep) { stop2("Cannot compute expected values of the posterior predictive ", "distribution for family 'logistic_normal'.") } posterior_epred_cumulative <- function(prep) { posterior_epred_ordinal(prep) } posterior_epred_sratio <- function(prep) { posterior_epred_ordinal(prep) } posterior_epred_cratio <- function(prep) { posterior_epred_ordinal(prep) } posterior_epred_acat <- function(prep) { posterior_epred_ordinal(prep) } posterior_epred_custom <- function(prep) { custom_family_method(prep$family, "posterior_epred")(prep) } posterior_epred_mixture <- function(prep) { families <- family_names(prep$family) prep$dpars$theta <- get_theta(prep) out <- 0 for (j in seq_along(families)) { posterior_epred_fun <- paste0("posterior_epred_", families[j]) posterior_epred_fun <- get(posterior_epred_fun, asNamespace("brms")) tmp_prep <- pseudo_prep_for_mixture(prep, j) if (length(dim(prep$dpars$theta)) == 3L) { theta <- prep$dpars$theta[, , j] } else { theta <- prep$dpars$theta[, j] } out <- out + theta * posterior_epred_fun(tmp_prep) } out } # ------ posterior_epred helper functions ------ # compute 'posterior_epred' for ordinal models posterior_epred_ordinal <- function(prep) { dens <- get(paste0("d", prep$family$family), mode = "function") # the linear scale has one column less than the response scale adjust <- ifelse(prep$family$link == "identity", 0, 1) ncat_max <- max(prep$data$nthres) + adjust nact_min <- min(prep$data$nthres) + adjust init_mat <- matrix(ifelse(prep$family$link == "identity", NA, 0), nrow = prep$ndraws, ncol = ncat_max - nact_min) args <- list(link = prep$family$link) out <- vector("list", prep$nobs) for (i in seq_along(out)) { args_i <- args args_i$eta <- slice_col(prep$dpars$mu, i) args_i$disc <- slice_col(prep$dpars$disc, i) args_i$thres <- subset_thres(prep, i) ncat_i <- NCOL(args_i$thres) + adjust args_i$x <- seq_len(ncat_i) out[[i]] <- do_call(dens, args_i) if (ncat_i < ncat_max) { sel <- seq_len(ncat_max - ncat_i) out[[i]] <- cbind(out[[i]], init_mat[, sel]) } } out <- abind(out, along = 3) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- seq_len(ncat_max) out } # compute 'posterior_epred' for lagsar models posterior_epred_lagsar <- function(prep) { stopifnot(!is.null(prep$ac$lagsar)) I <- diag(prep$nobs) .posterior_epred <- function(s) { IB <- I - with(prep$ac, lagsar[s, ] * Msar) as.numeric(solve(IB, prep$dpars$mu[s, ])) } out <- rblapply(seq_len(prep$ndraws), .posterior_epred) rownames(out) <- NULL out } # expand data to dimension appropriate for # vectorized multiplication with posterior draws data2draws <- function(x, dim) { stopifnot(length(dim) %in% 2:3) if (length(dim) == 2) { # expand vector into a matrix of draws stopifnot(length(x) %in% c(1, dim[2])) out <- matrix(x, nrow = dim[1], ncol = dim[2], byrow = TRUE) } else { # expand matrix into an array of draws stopifnot(length(x) == 1 || is_equal(dim(x), dim[2:3])) out <- array(x, dim = c(dim[2:3], dim[1])) out <- aperm(out, perm = c(3, 1, 2)) } out } # expected dimension of the main parameter 'mu' dim_mu <- function(prep) { c(prep$ndraws, prep$nobs) } # is the model truncated? is_trunc <- function(prep) { stopifnot(is.brmsprep(prep)) any(prep$data[["lb"]] > -Inf) || any(prep$data[["ub"]] < Inf) } # prepares data required for truncation and calles the # family specific truncation function for posterior_epred values posterior_epred_trunc <- function(prep) { stopifnot(is_trunc(prep)) lb <- data2draws(prep$data[["lb"]], dim_mu(prep)) ub <- data2draws(prep$data[["ub"]], dim_mu(prep)) posterior_epred_trunc_fun <- paste0("posterior_epred_trunc_", prep$family$family) posterior_epred_trunc_fun <- try( get(posterior_epred_trunc_fun, asNamespace("brms")), silent = TRUE ) if (is_try_error(posterior_epred_trunc_fun)) { stop2("posterior_epred values on the respone scale not yet implemented ", "for truncated '", prep$family$family, "' models.") } trunc_args <- nlist(prep, lb, ub) do_call(posterior_epred_trunc_fun, trunc_args) } # ----- family specific truncation functions ----- # @param prep output of 'prepare_predictions' # @param lb lower truncation bound # @param ub upper truncation bound # @return draws of the truncated mean parameter posterior_epred_trunc_gaussian <- function(prep, lb, ub) { zlb <- (lb - prep$dpars$mu) / prep$dpars$sigma zub <- (ub - prep$dpars$mu) / prep$dpars$sigma # truncated mean of standard normal; see Wikipedia trunc_zmean <- (dnorm(zlb) - dnorm(zub)) / (pnorm(zub) - pnorm(zlb)) prep$dpars$mu + trunc_zmean * prep$dpars$sigma } posterior_epred_trunc_student <- function(prep, lb, ub) { zlb <- with(prep$dpars, (lb - mu) / sigma) zub <- with(prep$dpars, (ub - mu) / sigma) nu <- prep$dpars$nu # see Kim 2008: Moments of truncated Student-t distribution G1 <- gamma((nu - 1) / 2) * nu^(nu / 2) / (2 * (pt(zub, df = nu) - pt(zlb, df = nu)) * gamma(nu / 2) * gamma(0.5)) A <- (nu + zlb^2) ^ (-(nu - 1) / 2) B <- (nu + zub^2) ^ (-(nu - 1) / 2) trunc_zmean <- G1 * (A - B) prep$dpars$mu + trunc_zmean * prep$dpars$sigma } posterior_epred_trunc_lognormal <- function(prep, lb, ub) { lb <- ifelse(lb < 0, 0, lb) m1 <- with(prep$dpars, exp(mu + sigma^2 / 2) * (pnorm((log(ub) - mu) / sigma - sigma) - pnorm((log(lb) - mu) / sigma - sigma)) ) with(prep$dpars, m1 / (plnorm(ub, meanlog = mu, sdlog = sigma) - plnorm(lb, meanlog = mu, sdlog = sigma)) ) } posterior_epred_trunc_gamma <- function(prep, lb, ub) { lb <- ifelse(lb < 0, 0, lb) prep$dpars$scale <- prep$dpars$mu / prep$dpars$shape # see Jawitz 2004: Moments of truncated continuous univariate distributions m1 <- with(prep$dpars, scale / gamma(shape) * (incgamma(1 + shape, ub / scale) - incgamma(1 + shape, lb / scale)) ) with(prep$dpars, m1 / (pgamma(ub, shape, scale = scale) - pgamma(lb, shape, scale = scale)) ) } posterior_epred_trunc_exponential <- function(prep, lb, ub) { lb <- ifelse(lb < 0, 0, lb) inv_mu <- 1 / prep$dpars$mu # see Jawitz 2004: Moments of truncated continuous univariate distributions m1 <- with(prep$dpars, mu * (incgamma(2, ub / mu) - incgamma(2, lb / mu))) m1 / (pexp(ub, rate = inv_mu) - pexp(lb, rate = inv_mu)) } posterior_epred_trunc_weibull <- function(prep, lb, ub) { lb <- ifelse(lb < 0, 0, lb) prep$dpars$a <- 1 + 1 / prep$dpars$shape prep$dpars$scale <- with(prep$dpars, mu / gamma(a)) # see Jawitz 2004: Moments of truncated continuous univariate distributions m1 <- with(prep$dpars, scale * (incgamma(a, (ub / scale)^shape) - incgamma(a, (lb / scale)^shape)) ) with(prep$dpars, m1 / (pweibull(ub, shape, scale = scale) - pweibull(lb, shape, scale = scale)) ) } posterior_epred_trunc_binomial <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) max_value <- max(prep$data$trials) ub <- ifelse(ub > max_value, max_value, ub) trials <- prep$data$trials if (length(trials) > 1) { trials <- data2draws(trials, dim_mu(prep)) } args <- list(size = trials, prob = prep$dpars$mu) posterior_epred_trunc_discrete(dist = "binom", args = args, lb = lb, ub = ub) } posterior_epred_trunc_poisson <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) mu <- multiply_dpar_rate_denom(prep$dpars$mu, prep) max_value <- 3 * max(mu) ub <- ifelse(ub > max_value, max_value, ub) args <- list(lambda = mu) posterior_epred_trunc_discrete(dist = "pois", args = args, lb = lb, ub = ub) } posterior_epred_trunc_negbinomial <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) mu <- multiply_dpar_rate_denom(prep$dpars$mu, prep) max_value <- 3 * max(mu) ub <- ifelse(ub > max_value, max_value, ub) shape <- multiply_dpar_rate_denom(prep$dpars$shape, prep) args <- list(mu = mu, size = shape) posterior_epred_trunc_discrete(dist = "nbinom", args = args, lb = lb, ub = ub) } posterior_epred_trunc_negbinomial2 <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) mu <- multiply_dpar_rate_denom(prep$dpars$mu, prep) max_value <- 3 * max(mu) ub <- ifelse(ub > max_value, max_value, ub) shape <- multiply_dpar_rate_denom(1 / prep$dpars$sigma, prep) args <- list(mu = mu, size = shape) posterior_epred_trunc_discrete(dist = "nbinom", args = args, lb = lb, ub = ub) } posterior_epred_trunc_geometric <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) mu <- multiply_dpar_rate_denom(prep$dpars$mu, prep) max_value <- 3 * max(mu) ub <- ifelse(ub > max_value, max_value, ub) shape <- multiply_dpar_rate_denom(1, prep) args <- list(mu = mu, size = shape) posterior_epred_trunc_discrete(dist = "nbinom", args = args, lb = lb, ub = ub) } # posterior_epred values for truncated discrete distributions posterior_epred_trunc_discrete <- function(dist, args, lb, ub) { stopifnot(is.matrix(lb), is.matrix(ub)) message( "Computing posterior_epred values for truncated ", "discrete models may take a while." ) pdf <- get(paste0("d", dist), mode = "function") cdf <- get(paste0("p", dist), mode = "function") mean_kernel <- function(x, args) { # just x * density(x) x * do_call(pdf, c(x, args)) } if (any(is.infinite(c(lb, ub)))) { stop("lb and ub must be finite") } # simplify lb and ub back to vector format vec_lb <- lb[1, ] vec_ub <- ub[1, ] min_lb <- min(vec_lb) # array of dimension S x N x length((lb+1):ub) mk <- lapply((min_lb + 1):max(vec_ub), mean_kernel, args = args) mk <- do_call(abind, c(mk, along = 3)) m1 <- vector("list", ncol(mk)) for (n in seq_along(m1)) { # summarize only over non-truncated values for this observation J <- (vec_lb[n] - min_lb + 1):(vec_ub[n] - min_lb) m1[[n]] <- rowSums(mk[, n, ][, J, drop = FALSE]) } rm(mk) m1 <- do.call(cbind, m1) m1 / (do.call(cdf, c(list(ub), args)) - do.call(cdf, c(list(lb), args))) } #' @export pp_expect <- function(object, ...) { warning2("Method 'pp_expect' is deprecated. ", "Please use 'posterior_epred' instead.") UseMethod("posterior_epred") } brms/R/datasets.R0000644000176200001440000001533614213413565013365 0ustar liggesusers#' Infections in kidney patients #' #' @description This dataset, originally discussed in #' McGilchrist and Aisbett (1991), describes the first and second #' (possibly right censored) recurrence time of #' infection in kidney patients using portable dialysis equipment. #' In addition, information on the risk variables age, sex and disease #' type is provided. #' #' @format A data frame of 76 observations containing #' information on the following 7 variables. #' \describe{ #' \item{time}{The time to first or second recurrence of the infection, #' or the time of censoring} #' \item{recur}{A factor of levels \code{1} or \code{2} #' indicating if the infection recurred for the first #' or second time for this patient} #' \item{censored}{Either \code{0} or \code{1}, where \code{0} indicates #' no censoring of recurrence time and \code{1} indicates right censoring} #' \item{patient}{The patient number} #' \item{age}{The age of the patient} #' \item{sex}{The sex of the patient} #' \item{disease}{A factor of levels \code{other, GN, AN}, #' and \code{PKD} specifying the type of disease} #' } #' #' @source McGilchrist, C. A., & Aisbett, C. W. (1991). #' Regression with frailty in survival analysis. #' \emph{Biometrics}, 47(2), 461-466. #' #' @examples #' \dontrun{ #' ## performing surivival analysis using the "weibull" family #' fit1 <- brm(time | cens(censored) ~ age + sex + disease, #' data = kidney, family = weibull, init = "0") #' summary(fit1) #' plot(fit1) #' #' ## adding random intercepts over patients #' fit2 <- brm(time | cens(censored) ~ age + sex + disease + (1|patient), #' data = kidney, family = weibull(), init = "0", #' prior = set_prior("cauchy(0,2)", class = "sd")) #' summary(fit2) #' plot(fit2) #' } #' "kidney" #' Clarity of inhaler instructions #' #' @description Ezzet and Whitehead (1991) analyze data from a two-treatment, #' two-period crossover trial to compare 2 inhalation devices for #' delivering the drug salbutamol in 286 asthma patients. #' Patients were asked to rate the clarity of leaflet instructions #' accompanying each device, using a 4-point ordinal scale. #' #' @format A data frame of 572 observations containing #' information on the following 5 variables. #' \describe{ #' \item{subject}{The subject number} #' \item{rating}{The rating of the inhaler instructions #' on a scale ranging from 1 to 4} #' \item{treat}{A contrast to indicate which of #' the two inhaler devices was used} #' \item{period}{A contrast to indicate the time of administration} #' \item{carry}{A contrast to indicate possible carry over effects} #' } #' #' @source Ezzet, F., & Whitehead, J. (1991). #' A random effects model for ordinal responses from a crossover trial. #' \emph{Statistics in Medicine}, 10(6), 901-907. #' #' @examples #' \dontrun{ #' ## ordinal regression with family "sratio" #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler, family = sratio(), #' prior = set_prior("normal(0,5)")) #' summary(fit1) #' plot(fit1) #' #' ## ordinal regression with family "cumulative" #' ## and random intercept over subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = cumulative(), #' prior = set_prior("normal(0,5)")) #' summary(fit2) #' plot(fit2) #' } #' "inhaler" #' Epileptic seizure counts #' #' @description Breslow and Clayton (1993) analyze data initially #' provided by Thall and Vail (1990) concerning #' seizure counts in a randomized trial of anti-convulsant #' therapy in epilepsy. Covariates are treatment, #' 8-week baseline seizure counts, and age of the patients in years. #' #' @format A data frame of 236 observations containing information #' on the following 9 variables. #' \describe{ #' \item{Age}{The age of the patients in years} #' \item{Base}{The seizure count at 8-weeks baseline} #' \item{Trt}{Either \code{0} or \code{1} indicating #' if the patient received anti-convulsant therapy} #' \item{patient}{The patient number} #' \item{visit}{The session number from \code{1} (first visit) #' to \code{4} (last visit)} #' \item{count}{The seizure count between two visits} #' \item{obs}{The observation number, that is #' a unique identifier for each observation} #' \item{zAge}{Standardized \code{Age}} #' \item{zBase}{Standardized \code{Base}} #' } #' #' @source Thall, P. F., & Vail, S. C. (1990). #' Some covariance models for longitudinal count data with overdispersion. #' \emph{Biometrics, 46(2)}, 657-671. \cr #' #' Breslow, N. E., & Clayton, D. G. (1993). #' Approximate inference in generalized linear mixed models. #' \emph{Journal of the American Statistical Association}, 88(421), 9-25. #' #' @examples #' \dontrun{ #' ## poisson regression without random effects. #' fit1 <- brm(count ~ zAge + zBase * Trt, #' data = epilepsy, family = poisson()) #' summary(fit1) #' plot(fit1) #' #' ## poisson regression with varying intercepts of patients #' ## as well as normal priors for overall effects parameters. #' fit2 <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), #' prior = set_prior("normal(0,5)")) #' summary(fit2) #' plot(fit2) #' } #' "epilepsy" #' Cumulative Insurance Loss Payments #' #' @description This dataset, discussed in Gesmann & Morris (2020), contains #' cumulative insurance loss payments over the course of ten years. #' #' @format A data frame of 55 observations containing information #' on the following 4 variables. #' \describe{ #' \item{AY}{Origin year of the insurance (1991 to 2000)} #' \item{dev}{Deviation from the origin year in months} #' \item{cum}{Cumulative loss payments} #' \item{premium}{Achieved premiums for the given origin year} #' } #' #' @source Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving #' Models. \emph{CAS Research Papers}. #' #' @examples #' \dontrun{ #' # non-linear model to predict cumulative loss payments #' fit_loss <- brm( #' bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), #' ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, #' nl = TRUE), #' data = loss, family = gaussian(), #' prior = c( #' prior(normal(5000, 1000), nlpar = "ult"), #' prior(normal(1, 2), nlpar = "omega"), #' prior(normal(45, 10), nlpar = "theta") #' ), #' control = list(adapt_delta = 0.9) #' ) #' #' # basic summaries #' summary(fit_loss) #' conditional_effects(fit_loss) #' #' # plot predictions per origin year #' conditions <- data.frame(AY = unique(loss$AY)) #' rownames(conditions) <- unique(loss$AY) #' me_loss <- conditional_effects( #' fit_loss, conditions = conditions, #' re_formula = NULL, method = "predict" #' ) #' plot(me_loss, ncol = 5, points = TRUE) #' } #' "loss" brms/R/pp_check.R0000644000176200001440000001717214224021465013325 0ustar liggesusers#' Posterior Predictive Checks for \code{brmsfit} Objects #' #' Perform posterior predictive checks with the help #' of the \pkg{bayesplot} package. #' #' @aliases pp_check #' #' @param object An object of class \code{brmsfit}. #' @param type Type of the ppc plot as given by a character string. #' See \code{\link[bayesplot:PPC-overview]{PPC}} for an overview #' of currently supported types. You may also use an invalid #' type (e.g. \code{type = "xyz"}) to get a list of supported #' types in the resulting error message. #' @param ndraws Positive integer indicating how many #' posterior draws should be used. #' If \code{NULL} all draws are used. If not specified, #' the number of posterior draws is chosen automatically. #' Ignored if \code{draw_ids} is not \code{NULL}. #' @param prefix The prefix of the \pkg{bayesplot} function to be applied. #' Either `"ppc"` (posterior predictive check; the default) #' or `"ppd"` (posterior predictive distribution), the latter being the same #' as the former except that the observed data is not shown for `"ppd"`. #' @param group Optional name of a factor variable in the model #' by which to stratify the ppc plot. This argument is required for #' ppc \code{*_grouped} types and ignored otherwise. #' @param x Optional name of a variable in the model. #' Only used for ppc types having an \code{x} argument #' and ignored otherwise. #' @param ... Further arguments passed to \code{\link{predict.brmsfit}} #' as well as to the PPC function specified in \code{type}. #' @inheritParams prepare_predictions.brmsfit #' #' @return A ggplot object that can be further #' customized using the \pkg{ggplot2} package. #' #' @details For a detailed explanation of each of the ppc functions, #' see the \code{\link[bayesplot:PPC-overview]{PPC}} #' documentation of the \pkg{\link[bayesplot:bayesplot-package]{bayesplot}} #' package. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt #' + (1|patient) + (1|obs), #' data = epilepsy, family = poisson()) #' #' pp_check(fit) # shows dens_overlay plot by default #' pp_check(fit, type = "error_hist", ndraws = 11) #' pp_check(fit, type = "scatter_avg", ndraws = 100) #' pp_check(fit, type = "stat_2d") #' pp_check(fit, type = "rootogram") #' pp_check(fit, type = "loo_pit") #' #' ## get an overview of all valid types #' pp_check(fit, type = "xyz") #' #' ## get a plot without the observed data #' pp_check(fit, prefix = "ppd") #' } #' #' @importFrom bayesplot pp_check #' @export pp_check #' @export pp_check.brmsfit <- function(object, type, ndraws = NULL, prefix = c("ppc", "ppd"), group = NULL, x = NULL, newdata = NULL, resp = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, ...) { dots <- list(...) if (missing(type)) { type <- "dens_overlay" } type <- as_one_character(type) prefix <- match.arg(prefix) if (!is.null(group)) { group <- as_one_character(group) } if (!is.null(x)) { x <- as_one_character(x) } ndraws_given <- any(c("ndraws", "nsamples") %in% names(match.call())) ndraws <- use_alias(ndraws, nsamples) draw_ids <- use_alias(draw_ids, subset) resp <- validate_resp(resp, object, multiple = FALSE) if (prefix == "ppc") { # no type checking for prefix 'ppd' yet valid_types <- as.character(bayesplot::available_ppc("")) valid_types <- sub("^ppc_", "", valid_types) if (!type %in% valid_types) { stop2("Type '", type, "' is not a valid ppc type. ", "Valid types are:\n", collapse_comma(valid_types)) } } ppc_fun <- get(paste0(prefix, "_", type), asNamespace("bayesplot")) object <- restructure(object) stopifnot_resp(object, resp) family <- family(object, resp = resp) if (has_multicol(family)) { stop2("'pp_check' is not implemented for this family.") } valid_vars <- names(model.frame(object)) if ("group" %in% names(formals(ppc_fun))) { if (is.null(group)) { stop2("Argument 'group' is required for ppc type '", type, "'.") } if (!group %in% valid_vars) { stop2("Variable '", group, "' could not be found in the data.") } } if ("x" %in% names(formals(ppc_fun))) { if (!is.null(x) && !x %in% valid_vars) { stop2("Variable '", x, "' could not be found in the data.") } } if (type == "error_binned") { if (is_polytomous(family)) { stop2("Type '", type, "' is not available for polytomous models.") } method <- "posterior_epred" } else { method <- "posterior_predict" } if (!ndraws_given) { aps_types <- c( "error_scatter_avg", "error_scatter_avg_vs_x", "intervals", "intervals_grouped", "loo_pit", "loo_intervals", "loo_ribbon", "ribbon", "ribbon_grouped", "rootogram", "scatter_avg", "scatter_avg_grouped", "stat", "stat_2d", "stat_freqpoly_grouped", "stat_grouped", "violin_grouped" ) if (!is.null(draw_ids)) { ndraws <- NULL } else if (type %in% aps_types) { ndraws <- NULL message("Using all posterior draws for ppc type '", type, "' by default.") } else { ndraws <- 10 message("Using 10 posterior draws for ppc type '", type, "' by default.") } } y <- NULL if (prefix == "ppc") { # y is ignored in prefix 'ppd' plots y <- get_y(object, resp = resp, newdata = newdata, ...) } draw_ids <- validate_draw_ids(object, draw_ids, ndraws) pred_args <- list( object, newdata = newdata, resp = resp, draw_ids = draw_ids, ... ) yrep <- do_call(method, pred_args) if (anyNA(y)) { warning2("NA responses are not shown in 'pp_check'.") take <- !is.na(y) y <- y[take] yrep <- yrep[, take, drop = FALSE] } data <- current_data( object, newdata = newdata, resp = resp, re_formula = NA, check_response = TRUE, ... ) # prepare plotting arguments ppc_args <- list() if (prefix == "ppc") { ppc_args$y <- y ppc_args$yrep <- yrep } else if (prefix == "ppd") { ppc_args$ypred <- yrep } if (!is.null(group)) { ppc_args$group <- data[[group]] } if (!is.null(x)) { ppc_args$x <- data[[x]] if (!is_like_factor(ppc_args$x)) { ppc_args$x <- as.numeric(ppc_args$x) } } if ("psis_object" %in% setdiff(names(formals(ppc_fun)), names(ppc_args))) { ppc_args$psis_object <- do_call( compute_loo, c(pred_args, criterion = "psis") ) } if ("lw" %in% setdiff(names(formals(ppc_fun)), names(ppc_args))) { ppc_args$lw <- weights( do_call(compute_loo, c(pred_args, criterion = "psis")) ) } # censored responses are misleading when displayed in pp_check bterms <- brmsterms(object$formula) cens <- get_cens(bterms, data, resp = resp) if (!is.null(cens) & type != 'km_overlay') { warning2("Censored responses are not shown in 'pp_check'.") take <- !cens if (!any(take)) { stop2("No non-censored responses found.") } ppc_args$y <- ppc_args$y[take] ppc_args$yrep <- ppc_args$yrep[, take, drop = FALSE] if (!is.null(ppc_args$group)) { ppc_args$group <- ppc_args$group[take] } if (!is.null(ppc_args$x)) { ppc_args$x <- ppc_args$x[take] } if (!is.null(ppc_args$psis_object)) { # tidier to re-compute with subset psis_args <- c(pred_args, criterion = "psis") psis_args$newdata <- data[take, ] ppc_args$psis_object <- do_call(compute_loo, psis_args) } if (!is.null(ppc_args$lw)) { ppc_args$lw <- ppc_args$lw[,take] } } # most ... arguments are meant for the prediction function for_pred <- names(dots) %in% names(formals(prepare_predictions.brmsfit)) ppc_args <- c(ppc_args, dots[!for_pred]) do_call(ppc_fun, ppc_args) } brms/R/brmsfit-methods.R0000644000176200001440000004623014454227314014663 0ustar liggesusers# This file contains several extractor methods for brmsfit objects. # A lot of other brmsfit methods have their own dedicated files. #' Extract Population-Level Estimates #' #' Extract the population-level ('fixed') effects #' from a \code{brmsfit} object. #' #' @aliases fixef #' #' @inheritParams predict.brmsfit #' @param pars Optional names of coefficients to extract. #' By default, all coefficients are extracted. #' @param ... Currently ignored. #' #' @return If \code{summary} is \code{TRUE}, a matrix returned #' by \code{\link{posterior_summary}} for the population-level effects. #' If \code{summary} is \code{FALSE}, a matrix with one row per #' posterior draw and one column per population-level effect. #' #' @examples #' \dontrun{ #' fit <- brm(time | cens(censored) ~ age + sex + disease, #' data = kidney, family = "exponential") #' fixef(fit) #' # extract only some coefficients #' fixef(fit, pars = c("age", "sex")) #' } #' #' @method fixef brmsfit #' @export #' @export fixef #' @importFrom nlme fixef fixef.brmsfit <- function(object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), pars = NULL, ...) { contains_draws(object) all_pars <- variables(object) fpars <- all_pars[grepl(fixef_pars(), all_pars)] if (!is.null(pars)) { pars <- as.character(pars) fpars <- fpars[sub("^[^_]+_", "", fpars) %in% pars] } if (!length(fpars)) { return(NULL) } out <- as.matrix(object, variable = fpars) colnames(out) <- gsub(fixef_pars(), "", fpars) if (summary) { out <- posterior_summary(out, probs, robust) } out } #' Covariance and Correlation Matrix of Population-Level Effects #' #' Get a point estimate of the covariance or #' correlation matrix of population-level parameters #' #' @inheritParams fixef.brmsfit #' @param correlation Logical; if \code{FALSE} (the default), compute #' the covariance matrix, if \code{TRUE}, compute the correlation matrix. #' #' @return covariance or correlation matrix of population-level parameters #' #' @details Estimates are obtained by calculating the maximum likelihood #' covariances (correlations) of the posterior draws. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), #' data = epilepsy, family = gaussian(), chains = 2) #' vcov(fit) #' } #' #' @export vcov.brmsfit <- function(object, correlation = FALSE, pars = NULL, ...) { contains_draws(object) all_pars <- variables(object) fpars <- all_pars[grepl(fixef_pars(), all_pars)] if (!is.null(pars)) { pars <- as.character(pars) fpars <- intersect(fpars, paste0("b_", pars)) } if (!length(fpars)) { return(NULL) } draws <- as.data.frame(object, variable = fpars) names(draws) <- sub(fixef_pars(), "", names(draws)) if (correlation) { out <- cor(draws) } else { out <- cov(draws) } out } #' Extract Group-Level Estimates #' #' Extract the group-level ('random') effects of each level #' from a \code{brmsfit} object. #' #' @aliases ranef #' #' @inheritParams fixef.brmsfit #' @param groups Optional names of grouping variables #' for which to extract effects. #' @param ... Currently ignored. #' #' @return A list of 3D arrays (one per grouping factor). #' If \code{summary} is \code{TRUE}, #' the 1st dimension contains the factor levels, #' the 2nd dimension contains the summary statistics #' (see \code{\link{posterior_summary}}), and #' the 3rd dimension contains the group-level effects. #' If \code{summary} is \code{FALSE}, the 1st dimension contains #' the posterior draws, the 2nd dimension contains the factor levels, #' and the 3rd dimension contains the group-level effects. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), #' data = epilepsy, family = gaussian(), chains = 2) #' ranef(fit) #' } #' #' @method ranef brmsfit #' @export #' @export ranef #' @importFrom nlme ranef ranef.brmsfit <- function(object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), pars = NULL, groups = NULL, ...) { contains_draws(object) object <- restructure(object) if (!nrow(object$ranef)) { stop2("The model does not contain group-level effects.") } all_pars <- variables(object) if (!is.null(pars)) { pars <- as.character(pars) } ranef <- object$ranef all_groups <- unique(ranef$group) if (!is.null(groups)) { groups <- as.character(groups) all_groups <- intersect(all_groups, groups) } out <- named_list(all_groups) for (g in all_groups) { r <- subset2(ranef, group = g) coefs <- paste0(usc(combine_prefix(r), "suffix"), r$coef) rpars <- all_pars[grepl(paste0("^r_", g, "(__.+\\[|\\[)"), all_pars)] if (!is.null(pars)) { coefs <- coefs[r$coef %in% pars] if (!length(coefs)) { next } regex <- paste0("(", escape_all(coefs), ")", collapse = "|") regex <- paste0(",", regex, "\\]$") rpars <- rpars[grepl(regex, rpars)] } levels <- attr(ranef, "levels")[[g]] if (length(rpars)) { # draws of varying coefficients were saved out[[g]] <- as.matrix(object, variable = rpars) dim(out[[g]]) <- c(nrow(out[[g]]), length(levels), length(coefs)) } else { # draws of varying coefficients were not saved out[[g]] <- array(dim = c(ndraws(object), length(levels), length(coefs))) } dimnames(out[[g]])[2:3] <- list(levels, coefs) if (summary) { out[[g]] <- posterior_summary(out[[g]], probs, robust) } } rmNULL(out, recursive = FALSE) } #' Extract Model Coefficients #' #' Extract model coefficients, which are the sum of population-level #' effects and corresponding group-level effects #' #' @inheritParams ranef.brmsfit #' @param ... Further arguments passed to \code{\link{fixef.brmsfit}} #' and \code{\link{ranef.brmsfit}}. #' #' @return A list of 3D arrays (one per grouping factor). #' If \code{summary} is \code{TRUE}, #' the 1st dimension contains the factor levels, #' the 2nd dimension contains the summary statistics #' (see \code{\link{posterior_summary}}), and #' the 3rd dimension contains the group-level effects. #' If \code{summary} is \code{FALSE}, the 1st dimension contains #' the posterior draws, the 2nd dimension contains the factor levels, #' and the 3rd dimension contains the group-level effects. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), #' data = epilepsy, family = gaussian(), chains = 2) #' ## extract population and group-level coefficients separately #' fixef(fit) #' ranef(fit) #' ## extract combined coefficients #' coef(fit) #' } #' #' @export coef.brmsfit <- function(object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(object) object <- restructure(object) if (!nrow(object$ranef)) { stop2("No group-level effects detected. Call method ", "'fixef' to access population-level effects.") } fixef <- fixef(object, summary = FALSE, ...) coef <- ranef(object, summary = FALSE, ...) # add missing coefficients to fixef all_ranef_names <- unique(ulapply(coef, function(x) dimnames(x)[[3]])) fixef_names <- colnames(fixef) fixef_no_digits <- get_matches("^[^\\[]+", fixef_names) miss_fixef <- setdiff(all_ranef_names, fixef_names) miss_fixef_no_digits <- get_matches("^[^\\[]+", miss_fixef) new_fixef <- named_list(miss_fixef) for (k in seq_along(miss_fixef)) { # digits occur in ordinal models with category specific effects match_fixef <- match(miss_fixef_no_digits[k], fixef_names) if (!is.na(match_fixef)) { new_fixef[[k]] <- fixef[, match_fixef] } else if (!miss_fixef[k] %in% fixef_no_digits) { new_fixef[[k]] <- 0 } } rm_fixef <- fixef_names %in% miss_fixef_no_digits fixef <- fixef[, !rm_fixef, drop = FALSE] fixef <- do_call(cbind, c(list(fixef), rmNULL(new_fixef))) for (g in names(coef)) { # add missing coefficients to ranef ranef_names <- dimnames(coef[[g]])[[3]] ranef_no_digits <- get_matches("^[^\\[]+", ranef_names) miss_ranef <- setdiff(fixef_names, ranef_names) miss_ranef_no_digits <- get_matches("^[^\\[]+", miss_ranef) new_ranef <- named_list(miss_ranef) for (k in seq_along(miss_ranef)) { # digits occur in ordinal models with category specific effects match_ranef <- match(miss_ranef_no_digits[k], ranef_names) if (!is.na(match_ranef)) { new_ranef[[k]] <- coef[[g]][, , match_ranef] } else if (!miss_ranef[k] %in% ranef_no_digits) { new_ranef[[k]] <- array(0, dim = dim(coef[[g]])[1:2]) } } rm_ranef <- ranef_names %in% miss_ranef_no_digits coef[[g]] <- coef[[g]][, , !rm_ranef, drop = FALSE] coef[[g]] <- abind(c(list(coef[[g]]), rmNULL(new_ranef))) for (nm in dimnames(coef[[g]])[[3]]) { is_ord_intercept <- grepl("(^|_)Intercept\\[[[:digit:]]+\\]$", nm) if (is_ord_intercept) { # correct the sign of thresholds in ordinal models resp <- if (is_mv(object)) get_matches("^[^_]+", nm) family <- family(object, resp = resp)$family if (has_thres_minus_eta(family)) { coef[[g]][, , nm] <- fixef[, nm] - coef[[g]][, , nm] } else if (has_eta_minus_thres(family)) { coef[[g]][, , nm] <- coef[[g]][, , nm] - fixef[, nm] } else { coef[[g]][, , nm] <- fixef[, nm] + coef[[g]][, , nm] } } else { coef[[g]][, , nm] <- fixef[, nm] + coef[[g]][, , nm] } } if (summary) { coef[[g]] <- posterior_summary(coef[[g]], probs, robust) } } coef } #' Extract Variance and Correlation Components #' #' This function calculates the estimated standard deviations, #' correlations and covariances of the group-level terms #' in a multilevel model of class \code{brmsfit}. #' For linear models, the residual standard deviations, #' correlations and covariances are also returned. #' #' @aliases VarCorr #' #' @param x An object of class \code{brmsfit}. #' @inheritParams fixef.brmsfit #' @param sigma Ignored (included for compatibility with #' \code{\link[nlme:VarCorr]{VarCorr}}). #' @param ... Currently ignored. #' #' @return A list of lists (one per grouping factor), each with #' three elements: a matrix containing the standard deviations, #' an array containing the correlation matrix, and an array #' containing the covariance matrix with variances on the diagonal. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), #' data = epilepsy, family = gaussian(), chains = 2) #' VarCorr(fit) #' } #' #' @method VarCorr brmsfit #' @import abind abind #' @importFrom nlme VarCorr #' @export VarCorr #' @export VarCorr.brmsfit <- function(x, sigma = 1, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(x) x <- restructure(x) if (!(nrow(x$ranef) || any(grepl("^sigma($|_)", variables(x))))) { stop2("The model does not contain covariance matrices.") } .VarCorr <- function(y) { # extract draws for sd, cor and cov out <- list(sd = as.matrix(x, variable = y$sd_pars)) colnames(out$sd) <- y$rnames # compute correlation and covariance matrices found_cor_pars <- intersect(y$cor_pars, variables(x)) if (length(found_cor_pars)) { cor <- as.matrix(x, variable = found_cor_pars) if (length(found_cor_pars) < length(y$cor_pars)) { # some correlations are missing and will be replaced by 0 cor_all <- matrix(0, nrow = nrow(cor), ncol = length(y$cor_pars)) names(cor_all) <- y$cor_pars for (i in seq_len(ncol(cor_all))) { found <- match(names(cor_all)[i], colnames(cor)) if (!is.na(found)) { cor_all[, i] <- cor[, found] } } cor <- cor_all } out$cor <- get_cor_matrix(cor = cor) out$cov <- get_cov_matrix(sd = out$sd, cor = cor) dimnames(out$cor)[2:3] <- list(y$rnames, y$rnames) dimnames(out$cov)[2:3] <- list(y$rnames, y$rnames) if (summary) { out$cor <- posterior_summary(out$cor, probs, robust) out$cov <- posterior_summary(out$cov, probs, robust) } } if (summary) { out$sd <- posterior_summary(out$sd, probs, robust) } return(out) } if (nrow(x$ranef)) { get_names <- function(group) { # get names of group-level parameters r <- subset2(x$ranef, group = group) rnames <- as.vector(get_rnames(r)) cor_type <- paste0("cor_", group) sd_pars <- paste0("sd_", group, "__", rnames) cor_pars <- get_cornames(rnames, cor_type, brackets = FALSE) nlist(rnames, sd_pars, cor_pars) } group <- unique(x$ranef$group) tmp <- lapply(group, get_names) names(tmp) <- group } else { tmp <- list() } # include residual variances in the output as well bterms <- brmsterms(x$formula) if (is.brmsterms(bterms)) { if (simple_sigma(bterms) && !is.mixfamily(x$family)) { tmp_resid <- list(rnames = bterms$resp, sd_pars = "sigma") tmp <- c(tmp, residual__ = list(tmp_resid)) } } else if (is.mvbrmsterms(bterms)) { simple_sigma <- ulapply(bterms$terms, simple_sigma) pred_sigma <- ulapply(bterms$terms, pred_sigma) is_mix <- ulapply(x$family, is.mixfamily) if (any(simple_sigma) && !any(pred_sigma) && !any(is_mix)) { resps <- bterms$responses[simple_sigma] sd_pars <- paste0("sigma_", resps) if (bterms$rescor) { cor_pars <- get_cornames(resps, type = "rescor", brackets = FALSE) } else { cor_pars <- character(0) } tmp_resid <- nlist(rnames = resps, sd_pars, cor_pars) tmp <- c(tmp, residual__ = list(tmp_resid)) } } lapply(tmp, .VarCorr) } #' @export model.frame.brmsfit <- function(formula, ...) { formula$data } #' (Deprecated) Number of Posterior Samples #' #' Extract the number of posterior samples (draws) stored in a fitted Bayesian #' model. Method \code{nsamples} is deprecated. Please use \code{ndraws} #' instead. #' #' @aliases nsamples #' #' @param object An object of class \code{brmsfit}. #' @param subset An optional integer vector defining a subset of samples #' to be considered. #' @param incl_warmup A flag indicating whether to also count warmup / burn-in #' samples. #' @param ... Currently ignored. #' #' @method nsamples brmsfit #' @export #' @export nsamples #' @importFrom rstantools nsamples nsamples.brmsfit <- function(object, subset = NULL, incl_warmup = FALSE, ...) { warning2("'nsamples.brmsfit' is deprecated. Please use 'ndraws' instead.") if (!is(object$fit, "stanfit") || !length(object$fit@sim)) { out <- 0 } else { ntsamples <- object$fit@sim$n_save[1] if (!incl_warmup) { ntsamples <- ntsamples - object$fit@sim$warmup2[1] } ntsamples <- ntsamples * object$fit@sim$chains if (length(subset)) { out <- length(subset) if (out > ntsamples || max(subset) > ntsamples) { stop2("Argument 'subset' is invalid.") } } else { out <- ntsamples } } out } #' @export nobs.brmsfit <- function(object, resp = NULL, ...) { if (is_mv(object) && length(resp)) { resp <- validate_resp(resp, object, multiple = FALSE) bterms <- brmsterms(object$formula$forms[[resp]]) out <- nrow(subset_data(model.frame(object), bterms)) } else { out <- nrow(model.frame(object)) } out } #' Number of Grouping Factor Levels #' #' Extract the number of levels of one or more grouping factors. #' #' @aliases ngrps.brmsfit #' #' @param object An \R object. #' @param ... Currently ignored. #' #' @return A named list containing the number of levels per #' grouping factor. #' #' @export ngrps.brmsfit <- function(object, ...) { object <- restructure(object) if (nrow(object$ranef)) { out <- lapply(attr(object$ranef, "levels"), length) } else { out <- NULL } out } #' @rdname ngrps.brmsfit #' @export ngrps <- function(object, ...) { UseMethod("ngrps") } #' @export formula.brmsfit <- function(x, ...) { x$formula } #' @export getCall.brmsfit <- function(x, ...) { x$formula } #' Extract Model Family Objects #' #' @inheritParams posterior_predict.brmsfit #' @param ... Currently unused. #' #' @return A \code{brmsfamily} object #' or a list of such objects for multivariate models. #' #' @export family.brmsfit <- function(object, resp = NULL, ...) { resp <- validate_resp(resp, object) if (!is.null(resp)) { # multivariate model family <- from_list(object$formula$forms[resp], "family") if (length(resp) == 1L) { family <- family[[1]] } } else { # univariate model family <- object$formula$family if (is.null(family)) { family <- object$family } } family } #' Expose user-defined \pkg{Stan} functions #' #' Export user-defined \pkg{Stan} function and #' optionally vectorize them. For more details see #' \code{\link[rstan:expose_stan_functions]{expose_stan_functions}}. #' #' @param x An object of class \code{brmsfit}. #' @param vectorize Logical; Indicates if the exposed functions #' should be vectorized via \code{\link{Vectorize}}. #' Defaults to \code{FALSE}. #' @param env Environment where the functions should be made #' available. Defaults to the global environment. #' @param ... Further arguments passed to #' \code{\link[rstan:expose_stan_functions]{expose_stan_functions}}. #' #' @export expose_functions.brmsfit <- function(x, vectorize = FALSE, env = globalenv(), ...) { vectorize <- as_one_logical(vectorize) stanmodel <- compiled_model(x) if (x$backend == "cmdstanr") { if ("expose_functions" %in% names(stanmodel)) { funs <- .expose_functions_cmdstanr( stanmodel, vectorize = vectorize, env = env, ... ) } else { # older versions of cmdstanr cannot export stan functions (#1176) scode <- strsplit(stancode(x), "\n")[[1]] data_line <- grep("^data[ ]+\\{$", scode) scode <- paste0(c(scode[seq_len(data_line - 1)], "\n"), collapse = "\n") stanmodel <- tempfile(fileext = ".stan") cat(scode, file = stanmodel) funs <- .expose_functions_rstan( stanmodel, vectorize = vectorize, env = env, ... ) } } else { funs <- .expose_functions_rstan( stanmodel, vectorize = vectorize, env = env, ... ) } invisible(funs) } # expose stan functions via rstan .expose_functions_rstan <- function(stanmodel, vectorize, env, ...) { if (vectorize) { fun_env <- new.env() funs <- rstan::expose_stan_functions(stanmodel, env = fun_env, ...) for (i in seq_along(funs)) { FUN <- Vectorize(get(funs[i], pos = fun_env)) assign(funs[i], FUN, pos = env) } } else { funs <- rstan::expose_stan_functions(stanmodel, env = env, ...) } funs } # expose stan functions via cmdstanr .expose_functions_cmdstanr <- function(stanmodel, vectorize, env, ...) { suppressMessages(stanmodel$expose_functions()) fun_env <- stanmodel$functions funs <- names(fun_env) for (i in seq_along(funs)) { FUN <- get(funs[i], pos = fun_env) # cmdstanr adds some non-functions to the environment if (!is.function(FUN)) { next } if (vectorize) { FUN <- Vectorize(FUN) } assign(funs[i], FUN, pos = env) } funs } #' @rdname expose_functions.brmsfit #' @export expose_functions <- function(x, ...) { UseMethod("expose_functions") } brms/R/formula-sm.R0000644000176200001440000000725114363466264013646 0ustar liggesusers# This file contains functions dealing with the extended # formula syntax to specify smooth terms via mgcv #' Defining smooths in \pkg{brms} formulas #' #' Functions used in definition of smooth terms within a model formulas. #' The function does not evaluate a (spline) smooth - it exists purely #' to help set up a model using spline based smooths. #' #' @param ... Arguments passed to \code{\link[mgcv:s]{mgcv::s}} or #' \code{\link[mgcv:t2]{mgcv::t2}}. #' #' @details The function defined here are just simple wrappers of the respective #' functions of the \pkg{mgcv} package. When using them, please cite the #' appropriate references obtained via \code{citation("mgcv")}. #' #' \pkg{brms} uses the "random effects" parameterization of smoothing splines #' as explained in \code{\link[mgcv:gamm]{mgcv::gamm}}. A nice tutorial on this #' topic can be found in Pedersen et al. (2019). The answers provided in this #' \href{https://discourse.mc-stan.org/t/better-priors-non-flat-for-gams-brms/23012/4}{Stan discourse post} #' may also be helpful. #' #' @seealso \code{\link{brmsformula}}, #' \code{\link[mgcv:s]{mgcv::s}}, \code{\link[mgcv:t2]{mgcv::t2}} #' #' @references #' Pedersen, E. J., Miller, D. L., Simpson, G. L., & Ross, N. (2019). #' Hierarchical generalized additive models in ecology: an introduction with #' mgcv. PeerJ. #' #' @examples #' \dontrun{ #' # simulate some data #' dat <- mgcv::gamSim(1, n = 200, scale = 2) #' #' # fit univariate smooths for all predictors #' fit1 <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), #' data = dat, chains = 2) #' summary(fit1) #' plot(conditional_smooths(fit1), ask = FALSE) #' #' # fit a more complicated smooth model #' fit2 <- brm(y ~ t2(x0, x1) + s(x2, by = x3), #' data = dat, chains = 2) #' summary(fit2) #' plot(conditional_smooths(fit2), ask = FALSE) #' } #' #' @export s <- function(...) { mgcv::s(...) } #' @rdname s #' @export t2 <- function(...) { mgcv::t2(...) } # extract information about smooth terms # @param x either a formula or a list containing an element "sm" # @param data data.frame containing the covariates tidy_smef <- function(x, data) { if (is.formula(x)) { x <- brmsterms(x, check_response = FALSE)$dpars$mu } form <- x[["sm"]] if (!is.formula(form)) { return(empty_data_frame()) } out <- data.frame(term = all_terms(form), stringsAsFactors = FALSE) nterms <- nrow(out) out$sfun <- get_matches("^[^\\(]+", out$term) out$vars <- out$byvars <- out$covars <- vector("list", nterms) for (i in seq_len(nterms)) { sm <- eval2(out$term[i]) out$covars[[i]] <- sm$term if (sm$by != "NA") { out$byvars[[i]] <- sm$by } out$vars[[i]] <- c(out$covars[[i]], out$byvars[[i]]) } out$label <- paste0(out$sfun, rename(ulapply(out$vars, collapse))) # prepare information inferred from the data sdata <- data_sm(x, data) bylevels <- attr(sdata$Xs, "bylevels") nby <- lengths(bylevels) tmp <- vector("list", nterms) for (i in seq_len(nterms)) { tmp[[i]] <- out[i, , drop = FALSE] tmp[[i]]$termnum <- i if (nby[i] > 0L) { tmp[[i]] <- do_call(rbind, repl(tmp[[i]], nby[i])) tmp[[i]]$bylevel <- rm_wsp(bylevels[[i]]) tmp[[i]]$byterm <- paste0(tmp[[i]]$term, tmp[[i]]$bylevel) str_add(tmp[[i]]$label) <- rename(tmp[[i]]$bylevel) } else { tmp[[i]]$bylevel <- NA tmp[[i]]$byterm <- tmp[[i]]$term } } out <- do_call(rbind, tmp) out$knots <- sdata[grepl("^knots_", names(sdata))] out$nbases <- lengths(out$knots) attr(out, "Xs_names") <- colnames(sdata$Xs) rownames(out) <- NULL out } # check if smooths are present in the model has_smooths <- function(bterms) { length(get_effect(bterms, target = "sm")) > 0L } brms/NEWS.md0000644000176200001440000024702514501037402012321 0ustar liggesusers# brms 2.20.3 ### Other Changes * Switch to the new array syntax of Stan. This increases the version requirements of Stan to >= 2.26. # brms 2.20.0 ### New Features * Apply the `horseshoe` and `R2D2` priors globally, that is, for all additive predictor terms specified in the same formula. (#1492) * Use `as.brmsprior` to transform objects into a `brmsprior`. (#1491) * Use matrix data as non-linear covariates. (#1488) ### Other Changes * No longer support the `lasso` prior as it is not a good shrinkage prior and incompatible with the newly implemented global shrinkage prior framework. * No longer support multiple deprecated prior options for categorical and multivariate models after around 3 years of deprecation. (#1420) * Deprecate argument `newdata` of `get_refmodel.brmsfit()`. (#1502) * Disallow binomial models without `trials` argument after several years of deprecation. (#1501) ### Bug Fixes * Fix a long-standing bug in the post-processing of spline models that could lead to non-sensible results if predictions were performed on a different machine than where the model was originally fitted. Old spline models can be repaired via `restructure`. Special thanks to Simon Wood, Ruben Arslan, Marta Kołczyńska, Patrick Hogan, and Urs Kalbitzer. (#1465) * Fix a bunch of minor issues occurring for rare feature combinations. # brms 2.19.0 ### New Features * Model unstructured autocorrelation matrices via the `unstr` term thanks to the help of Sebastian Weber. (#1435) * Model ordinal data with an extra category (non-response or similar) via the `hurdle_cumulative` family thanks to Stephen Wild. (#1448) * Improve user control over model recompilation via argument `recompile` in post-processing methods that require a compiled Stan model. * Extend control over the `point_estimate` feature in `prepare_predictions` via the new argument `ndraws_point_estimate`. * Add support for the latent projection available in **projpred** versions >= 2.4.0. (#1451) ### Bug Fixes * Fix a Stan syntax error in threaded models with `lasso` priors. (#1427) * Fix Stan compilation issues for some of the more special link functions such as `cauchit` or `softplus`. * Fix a bug for predictions in **projpred**, previously requiring more variables in `newdata` than necessary. (#1457, #1459, #1460) # brms 2.18.0 ### New Features * Support regression splines with fixed degrees of freedom specified via `s(..., fx = TRUE)`. * Reuse user-specified control arguments originally passed to the Stan backend in `update` and related methods. (#1373, #1378) * Allow to retain unused factors levels via `drop_unused_levels = FALSE` in `brm` and related functions. (#1346) * Automatically update old default priors based on new input when when updating models via `update.brmsfit`. (#1380) * Allow to use `dirichlet` priors for more parameter types. (#1165) ### Other Changes * Improve efficiency of converting models fitted with `backend = "cmdstanr"` to `stanfit` objects thanks to Simon Mills and Jacob Socolar. (#1331) * Allow for more `O1` optimization of brms-generated Stan models thanks to Aki Vehtari. (#1382) ### Bug Fixes * Fix problems with missing boundaries of `sdme` parameters in models with known response standard errors thanks to Solomon Kurz. (#1348) * Fix Stan code of `gamma` models with `softplus` link. * Allow for more flexible data inputs to `brm_multiple`. (#1383) * Ensure that `control_params` returns the right values for models fitted with the `cmdstanr` backend. (#1390) * Fix problems in multivariate spline models when using the `subset` addition term. (#1385) # brms 2.17.0 ### New Features * Add full user control for boundaries of most parameters via the `lb` and `ub` arguments of `set_prior` and related functions. (#878, #1094) * Add family `logistic_normal` for simplex responses. (#1274) * Add argument `future_args` to `kfold` and `reloo` for additional control over parallel execution via futures. * Add families `beta_binomial` & `zero_inflated_beta_binomial` for potentially over-dispersed and zero-inflated binomial response models thanks to Hayden Rabel. (#1319 & #1311) * Display `ppd_*` plots in `pp_check` via argument `prefix`. (#1313) * Support the `log` link in binomial and beta type families. (#1316) * Support **projpred**'s augmented-data projection. (#1292, #1294) ### Other changes * Argument `brms_seed` has been added to `get_refmodel.brmsfit()`. (#1287) * Deprecate argument `inits` in favor of `init` for consistency with the Stan backends. * Improve speed of the `summary` method for high-dimensional models. (#1330) ### Bug Fixes * Fix Stan code of threaded multivariate models thanks to Anirban Mukherjee. (#1277) * Fix usage of `int_conditions` in `conditional_smooths` thanks to Urs Kalbitzer. (#1280) * Fix an error sometimes occurring for multilevel (reference) models in `projpred`'s K-fold CV. (#1286) * Fix response values in `make_standata` for `bernoulli` families when only 1s are present thanks to Facundo Munoz. (#1298) * Fix `pp_check` for censored responses to work for all plot types thanks to Hayden Rabel. (#1327) * Ensure that argument `overwrite` in `add_criterion` works as expected for all criteria thanks to Andrew Milne. (#1323) * Fix a problem in `launch_shinystan` occurring when warmup draws were saved thanks to Frank Weber. (#1257, #1329) * Fix numerical stability problems in `log_lik` for ordinal models. (#1192) # brms 2.16.3 ### Other changes * Move `projpred` from `Imports:` to `Suggests:`. This has the important implication that users need to load or attach `projpred` themselves if they want to use it (the more common case is probably attaching, which is achieved by `library(projpred)`). (#1222) ### Bug Fixes * Ensure that argument `overwrite` in `add_criterion` is working as intended thanks to Ruben Arslan. (#1219) * Fix a bug in `get_refmodel.brmsfit()` (i.e., when using `projpred` for a `"brmsfit"`) causing offsets not to be recognized. (#1220) * Several further minor bug fixes. # brms 2.16.1 ### Bug Fixes * Fix a bug causing problems during post-processing of models fitted with older versions of brms and the `cmdstanr` backend thanks to Riccardo Fusaroli. (#1218) # brms 2.16.0 ### New Features * Support several methods of the `posterior` package. (#1204) * Substantially extend compatibility of `brms` models with `emmeans` thanks to Mattan S. Ben-Shachar. (#907, #1134) * Combine missing value (`mi`) terms with `subset` addition terms. (#1063) * Expose function `get_dpar` for use in the post-processing of custom families thank to Martin Modrak. (#1131) * Support the `squareplus` link function in all families and distributional parameters that also allow for the `log` link function. * Add argument `incl_thres` to `posterior_linpred.brmsfit()` allowing to subtract the threshold-excluding linear predictor from the thresholds in case of an ordinal family. (#1137) * Add a `"mock"` backend option to facilitate testing thanks to Martin Modrak. (#1116) * Add option `file_refit = "always"` to always overwrite models stored via the `file` argument. (#1151) * Initial GPU support via OpenCL thanks to the help Rok Češnovar. (#1166) * Support argument `robust` in method `hypothesis`. (#1170) * Vectorize the Stan code of custom likelihoods via argument `loop` of `custom_family`. (#1084) * Experimentally allow category specific effects for ordinal `cumulative` models. (#1060) * Regenerate Stan code of an existing model via argument `regenerate` of method `stancode`. * Support `expose_functions` for models fitted with the `cmdstanr` backend thanks to Sebastian Weber. (#1176) * Support `log_prob` and related functionality in models fitted with the `cmdstanr` backend via function `add_rstan_model`. (#1184) ### Other Changes * Remove use of `cbind` to express multivariate models after over two years of deprecation (please use `mvbind` instead). * Method `posterior_linpred(transform = TRUE)` is now equal to `posterior_epred(dpar = "mu")` and no longer deprecated. * Refactor and extend internal post-processing functions for ordinal and categorical models thanks to Frank Weber. (#1159) * Ignore `NA` values in interval censored boundaries as long as they are unused. (#1070) * Take offsets into account when deriving default priors for overall intercept parameters. (#923) * Soft deprecate measurement error (`me`) terms in favor of the more general and consistent missing value (`mi`) terms. (#698) ### Bug Fixes * Fix an issue in the post-processing of non-normal ARMA models thanks to Thomas Buehrens. (#1149) * Fix an issue with default baseline hazard knots in `cox` models thanks to Malcolm Gillies. (#1143) * Fix a bug in non-linear models caused by accidental merging of operators in the non-linear formula thanks to Fernando Miguez. (#1142) * Correctly trigger a refit for `file_refit = "on_change"` if factor level names have changed thanks to Martin Modrak. (#1128) * Validate factors in `validate_newdata` even when they are simultaneously used as predictors and grouping variables thanks to Martin Modrak. (#1141) * Fix a bug in the Stan code generation of threaded mixture models with predicted mixture probabilities thanks to Riccardo Fusaroli. (#1150) * Remove duplicated Stan code related to the `horseshoe` prior thanks to Max Joseph. (#1167) * Fix an issue in the post-processing of non-looped non-linear parameters thanks to Sebastian Weber. * Fix an issue in the Stan code of threaded non-looped non-linear models thanks to Sebastian Weber. (#1175) * Fix problems in the post-processing of multivariate meta-analytic models that could lead to incorrect handling of known standard errors. # brms 2.15.0 ### New Features * Turn off normalization in the Stan model via argument `normalize`. to increase sampling efficiency thanks to Andrew Johnson. (#1017, #1053) * Enable `posterior_predict` for truncated continuous models even if the required CDF or quantile functions are unavailable. * Update and export `validate_prior` to validate priors supplied by the user. * Add support for within-chain threading with `rstan (Stan >= 2.25)` backend. * Apply the R2-D2 shrinkage prior to population-level coefficients via function `R2D2` to be used in `set_prior`. * Extend support for `arma` correlation structures in non-normal families. * Extend scope of variables passed via `data2` for use in the evaluation of most model terms. * Refit models previously stored on disc only when necessary thanks to Martin Modrak. The behavior can be controlled via `file_refit`. (#1058) * Allow for a finer tuning of informational messages printed in `brm` via the `silent` argument. (#1076) * Allow `stanvars` to alter distributional parameters. (#1061) * Allow `stanvars` to be used inside threaded likelihoods. (#1111) ### Other Changes * Improve numerical stability of ordinal sequential models (families `sratio` and `cratio`) thanks to Andrew Johnson. (#1087) ### Bug Fixes * Allow fitting `multinomial` models with the `cmdstanr` backend thanks to Andrew Johnson. (#1033) * Allow user-defined Stan functions in threaded models. (#1034) * Allow usage of the `:` operator in autocorrelation terms. * Fix Stan code generation when specifying coefficient-level priors on spline terms. * Fix numerical issues occurring in edge cases during post-processing of Gaussian processes thanks to Marta Kołczyńska. * Fix an error during post-processing of new levels in multi-membership terms thanks to Guilherme Mohor. * Fix a bug in the Stan code of threaded `wiener` drift diffusion models thanks to the GitHub user yanivabir. (#1085) * Fix a bug in the threaded Stan code for GPs with categorical `by` variables thanks to Reece Willoughby. (#1081) * Fix a bug in the threaded Stan code when using QR decomposition thanks to Steve Bronder. (#1086) * Include offsets in `emmeans` related methods thanks to Russell V. Lenth. (#1096) # brms 2.14.4 ### New Features * Support `projpred` version 2.0 for variable selection in generalized linear and additive multilevel models thanks to Alejandro Catalina. * Support `by` variables in multi-membership terms. * Use Bayesian bootstrap in `loo_R2`. ### Bug Fixes * Allow non-linear terms in threaded models. * Allow multi-membership terms in threaded models. * Allow `se` addition terms in threaded models. * Allow `categorical` families in threaded models. * Fix updating of parameters in `loo_moment_match`. * Fix facet labels in `conditional_effects` thanks to Isaac Petersen. (#1014) # brms 2.14.0 ### New Features * Experimentally support within-chain parallelization via `reduce_sum` using argument `threads` in `brm` thanks to Sebastian Weber. (#892) * Add algorithm `fixed_param` to sample from fixed parameter values. (#973) * No longer remove `NA` values in `data` if there are unused because of the `subset` addition argument. (#895) * Combine `by` variables and within-group correlation matrices in group-level terms. (#674) * Add argument `robust` to the `summary` method. (#976) * Parallelize evaluation of the `posterior_predict` and `log_lik` methods via argument `cores`. (#819) * Compute effective number of parameters in `kfold`. * Show prior sources and vectorization in the `print` output of `brmsprior` objects. (#761) * Store unused variables in the model's data frame via argument `unused` of function `brmsformula`. * Support posterior mean predictions in `emmeans` via `dpar = "mean"` thanks to Russell V. Lenth. (#993) * Improve control of which parameters should be saved via function `save_pars` and corresponding argument in `brm`. (#746) * Add method `posterior_smooths` to computing predictions of individual smooth terms. (#738) * Allow to display grouping variables in `conditional_effects` using the `effects` argument. (#1012) ### Other Changes * Improve sampling efficiency for a lot of models by using Stan's GLM-primitives even in non-GLM cases. (#984) * Improve sampling efficiency of multilevel models with within-group covariances thanks to David Westergaard. (#977) * Deprecate argument `probs` in the `conditional_effects` method in favor of argument `prob`. ### Bug Fixes * Fix a problem in `pp_check` inducing wronger observation orders in time series models thanks to Fiona Seaton. (#1007) * Fix multiple problems with `loo_moment_match` that prevented it from working for some more complex models. # brms 2.13.5 ### New Features * Support the Cox proportional hazards model for time-to-event data via family `cox`. (#230, #962) * Support method `loo_moment_match`, which can be used to update a `loo` object when Pareto k estimates are large. ### Other Changes * Improve the prediction behavior in post-processing methods when sampling new levels of grouping factors via `sample_new_levels = "uncertainty"`. (#956) ### Bug Fixes * Fix minor problems with MKL on CRAN. # brms 2.13.3 ### New Features * Fix shape parameters across multiple monotonic terms via argument `id` in function `mo` to ensure conditionally monotonic effects. (#924) * Support package `rtdists` as additional backend of `wiener` distribution functions thanks to the help of Henrik Singmann. (#385) ### Bug Fixes * Fix generated Stan Code of models with improper global priors and `constant` priors on some coefficients thanks to Frank Weber. (#919) * Fix a bug in `conditional_effects` occurring for categorical models with matrix predictors thanks to Jamie Cranston. (#933) ### Other Changes * Adjust behavior of the `rate` addition term so that it also affects the `shape` parameter in `negbinomial` models thanks to Edward Abraham. (#915) * Adjust the default inverse-gamma prior on length-scale parameters of Gaussian processes to be less extreme in edge cases thanks to Topi Paananen. # brms 2.13.0 ### New Features * Constrain ordinal thresholds to sum to zero via argument `threshold` in ordinal family functions thanks to the help of Marta Kołczyńska. * Support `posterior_linpred` as method in `conditional_effects`. * Use `std_normal` in the Stan code for improved efficiency. * Add arguments `cor`, `id`, and `cov` to the functions `gr` and `mm` for easy specification of group-level correlation structures. * Improve workflow to feed back brms-created models which were fitted somewhere else back into brms. (#745) * Improve argument `int_conditions` in `conditional_effects` to work for all predictors not just interactions. * Support multiple imputation of data passed via `data2` in `brm_multiple`. (#886) * Fully support the `emmeans` package thanks to the help of Russell V. Lenth. (#418) * Control the within-block position of Stan code added via `stanvar` using the `position` argument. ### Bug Fixes * Fix issue in Stan code of models with multiple `me` terms thanks to Chris Chatham. (#855, #856) * Fix scaling problems in the estimation of ordinal models with multiple threshold vectors thanks to Marta Kołczyńska and Rok Češnovar. * Allow usage of `std_normal` in `set_prior` thanks to Ben Goodrich. (#867) * Fix Stan code of distributional models with `weibull`, `frechet`, or `inverse.gaussian` families thanks to Brian Huey and Jack Caster. (#879) * Fix Stan code of models which are truncated and weighted at the same time thanks to Michael Thompson. (#884) * Fix Stan code of multivariate models with custom families and data variables passed to the likelihood thanks to Raoul Wolf. (#906) ### Other Changes * Reduce minimal scale of several default priors from 10 to 2.5. The resulting priors should remain weakly informative. * Automatically group observations in `gp` for increased efficiency. * Rename `parse_bf` to `brmsterms` and deprecate the former function. * Rename `extract_draws` to `prepare_predictions` and deprecate the former function. * Deprecate using a model-dependent `rescor` default. * Deprecate argument `cov_ranef` in `brm` and related functions. * Improve several internal interfaces. This should not have any user-visible changes. * Simplify the parameterization of the horseshoe prior thanks to Aki Vehtari. (#873) * Store fixed distributional parameters as regular draws so that they behave as if they were estimated in post-processing methods. # brms 2.12.0 ### New Features * Fix parameters to constants via the `prior` argument. (#783) * Specify autocorrelation terms directly in the model formula. (#708) * Translate integer covariates in non-linear formulas to integer arrays in Stan. * Estimate `sigma` in combination with fixed correlation matrices via autocorrelation term `fcor`. * Use argument `data2` in `brm` and related functions to pass data objects which cannot be passed via `data`. The usage of `data2` will be extended in future versions. * Compute pointwise log-likelihood values via `log_lik` for non-factorizable Student-t models. (#705) ### Bug Fixes * Fix output of `posterior_predict` for `multinomial` models thanks to Ivan Ukhov. * Fix selection of group-level terms via `re_formula` in multivariate models thanks to Maxime Dahirel. (#834) * Enforce correct ordering of terms in `re_formula` thanks to @ferberkl. (#844) * Fix post-processing of multivariate multilevel models when multiple IDs are used for the same grouping factor thanks to @lott999. (#835) * Store response category names of ordinal models in the output of `posterior_predict` again thanks to Mattew Kay. (#838) * Handle `NA` values more consistently in `posterior_table` thanks to Anna Hake. (#845) * Fix a bug in the Stan code of models with multiple monotonic varying effects across different groups thanks to Julian Quandt. ### Other Changes * Rename `offset` variables to `offsets` in the generated Stan code as the former will be reserved in the new stanc3 compiler. # brms 2.11.1 ### Bug Fixes * Fix version requirement of the `loo` package. * Fix effective sample size note in the `summary` output. (#824) * Fix an edge case in the handling of covariates in special terms thanks to Andrew Milne. (#823) * Allow restructuring objects multiple times with different brms versions thanks to Jonathan A. Nations. (#828) * Fix validation of ordered factors in `newdata` thanks to Andrew Milne. (#830) # brms 2.11.0 ### New Features * Support grouped ordinal threshold vectors via addition argument `resp_thres`. (#675) * Support method `loo_subsample` for performing approximate leave-one-out cross-validation for large data. * Allow storing more model fit criteria via `add_criterion`. (#793) ### Bug Fixes * Fix prediction uncertainties of new group levels for `sample_new_levels = "uncertainty"` thanks to Dominic Magirr. (#779) * Fix problems when using `pp_check` on censored models thanks to Andrew Milne. (#744) * Fix error in the generated Stan code of multivariate `zero_inflated_binomial` models thanks to Raoul Wolf. (#756) * Fix predictions of spline models when using addition argument `subset` thanks to Ruben Arslan. * Fix out-of-sample predictions of AR models when predicting more than one step ahead. * Fix problems when using `reloo` or `kfold` with CAR models. * Fix problems when using `fitted(..., scale = "linear")` with multinomial models thanks to Santiago Olivella. (#770) * Fix problems in the `as.mcmc` method for thinned models thanks to @hoxo-m. (#811) * Fix problems in parsing covariates of special effects terms thanks to Riccardo Fusaroli (#813) ### Other Changes * Rename `marginal_effects` to `conditional_effects` and `marginal_smooths` to `conditional_smooths`. (#735) * Rename `stanplot` to `mcmc_plot`. * Add method `pp_expect` as an alias of `fitted`. (#644) * Model fit criteria computed via `add_criterion` are now stored in the `brmsfit$criteria` slot. * Deprecate `resp_cat` in favor of `resp_thres`. * Deprecate specifying global priors on regression coefficients in categorical and multivariate models. * Improve names of weighting methods in `model_weights`. * Deprecate reserved variable `intercept` in favor of `Intercept`. * Deprecate argument `exact_match` in favor of `fixed`. * Deprecate functions `add_loo` and `add_waic` in favor of `add_criterion`. # brms 2.10.0 ### New Features * Improve convergence diagnostics in the `summary` output. (#712) * Use primitive Stan GLM functions whenever possible. (#703) * Pass real and integer data vectors to custom families via the addition arguments `vreal` and `vint`. (#707) * Model compound symmetry correlations via `cor_cosy`. (#403) * Predict `sigma` in combination with several autocorrelation structures. (#403) * Use addition term `rate` to conveniently handle denominators of rate responses in log-linear models. * Fit BYM2 CAR models via `cor_car` thanks to the case study and help of Mitzi Morris. ### Other Changes * Substantially improve the sampling efficiency of SAR models thanks to the GitHub user aslez. (#680) * No longer allow changing the boundaries of autocorrelation parameters. * Set the number of trials to 1 by default in `marginal_effects` if not specified otherwise. (#718) * Use non-standard evaluation for addition terms. * Name temporary intercept parameters more consistently in the Stan code. ### Bug Fixes * Fix problems in the post-processing of `me` terms with grouping factors thanks to the GitHub user tatters. (#706) * Allow grouping variables to start with a dot thanks to Bruno Nicenboim. (#679) * Allow the `horseshoe` prior in categorical and related models thanks to the Github user tatters. (#678) * Fix extraction of prior samples for overall intercepts in `prior_samples` thanks to Jonas Kristoffer Lindelov. (#696) * Allow underscores to be used in category names of categorical responses thanks to Emmanuel Charpentier. (#672) * Fix Stan code of multivariate models with multi-membership terms thanks to the Stan discourse user Pia. * Improve checks for non-standard variable names thanks to Ryan Holbrook. (#721) * Fix problems when plotting facetted spaghetti plots via `marginal_smooths` thanks to Gavin Simpson. (#740) # brms 2.9.0 ### New Features * Specify non-linear ordinal models. (#623) * Allow to fix thresholds in ordinal mixture models (#626) * Use the `softplus` link function in various families. (#622) * Use QR decomposition of design matrices via argument `decomp` of `brmsformula` thanks to the help of Ben Goodrich. (#640) * Define argument `sparse` separately for each model formula. * Allow using `bayes_R2` and `loo_R2` with ordinal models. (#639) * Support `cor_arma` in non-normal models. (#648) ### Other Changes * Change the parameterization of monotonic effects to improve their interpretability. (#578) * No longer support the `cor_arr` and `cor_bsts` correlation structures after a year of deprecation. * Refactor internal evaluation of special predictor terms. * Improve penalty of splines thanks to Ben Goodrich and Ruben Arslan. ### Bug Fixes * Fix a problem when applying `marginal_effects` to measurement error models thanks to Jonathan A. Nations. (#636) * Fix computation of log-likelihood values for weighted mixture models. * Fix computation of fitted values for truncated lognormal and weibull models. * Fix checking of response boundaries for models with missing values thanks to Lucas Deschamps. * Fix Stan code of multivariate models with both residual correlations and missing value terms thanks to Solomon Kurz. * Fix problems with interactions of special terms when extracting variable names in `marginal_effects`. * Allow compiling a model in `brm_multiple` without sampling thanks to Will Petry. (#671) # brms 2.8.0 ### New Features * Fit multinomial models via family `multinomial`. (#463) * Fit Dirichlet models via family `dirichlet`. (#463) * Fit conditional logistic models using the `categorical` and `multinomial` families together with non-linear formula syntax. (#560) * Choose the reference category of `categorical` and related families via argument `refcat` of the corresponding family functions. * Use different subsets of the data in different univariate parts of a multivariate model via addition argument `subset`. (#360) * Control the centering of population-level design matrices via argument `center` of `brmsformula` and related functions. * Add an `update` method for `brmsfit_multiple` objects. (#615) * Split folds after `group` in the `kfold` method. (#619) ### Other changes * Deprecate `compare_ic` and instead recommend `loo_compare` for the comparison of `loo` objects to ensure consistency between packages. (#414) * Use the **glue** package in the Stan code generation. (#549) * Introduce `mvbind` to eventually replace `cbind` in the formula syntax of multivariate models. * Validate several sampling-related arguments in `brm` before compiling the Stan model. (#576) * Show evaluated vignettes on CRAN again. (#591) * Export function `get_y` which is used to extract response values from `brmsfit` objects. ### Bug fixes * Fix an error when trying to change argument `re_formula` in `bayes_R2` thanks to the GitHub user emieldl. (#592) * Fix occasional problems when running chains in parallel via the **future** package thanks to Jared Knowles. (#579) * Ensure correct ordering of response categories in ordinal models thanks to Jonas Kristoffer Lindelov. (#580) * Ignore argument `resp` of `marginal_effects` in univariate models thanks to Vassilis Kehayas. (#589) * Correctly disable cell-mean coding in varying effects. * Allow to fix parameter `ndt` in drift diffusion models. * Fix Stan code for t-distributed varying effects thanks to Ozgur Asar. * Fix an error in the post-processing of monotonic effects occurring for multivariate models thanks to James Rae. (#598) * Fix lower bounds in truncated discrete models. * Fix checks of the original data in `kfold` thanks to the GitHub user gcolitti. (#602) * Fix an error when applying the `VarCorr` method to meta-analytic models thanks to Michael Scharkow. (#616) # brms 2.7.0 ### New features * Fit approximate and non-isotropic Gaussian processes via `gp`. (#540) * Enable parallelization of model fitting in `brm_multiple` via the future package. (#364) * Perform posterior predictions based on k-fold cross-validation via `kfold_predict`. (#468) * Indicate observations for out-of-sample predictions in ARMA models via argument `oos` of `extract_draws`. (#539) ### Other changes * Allow factor-like variables in smooth terms. (#562) * Make plotting of `marginal_effects` more robust to the usage of non-standard variable names. * Deactivate certain data validity checks when using custom families. * Improve efficiency of adjacent category models. * No longer print informational messages from the Stan parser. ### Bug fixes * Fix an issue that could result in a substantial efficiency drop of various post-processing methods for larger models. * Fix an issue when that resulted in an error when using `fitted(..., scale = "linear")` with ordinal models thanks to Andrew Milne. (#557) * Allow setting priors on the overall intercept in sparse models. * Allow sampling from models with only a single observation that also contain an offset thanks to Antonio Vargas. (#545) * Fix an error when sampling from priors in mixture models thanks to Jacki Buros Novik. (#542) * Fix a problem when trying to sample from priors of parameter transformations. * Allow using `marginal_smooths` with ordinal models thanks to Andrew Milne. (#570) * Fix an error in the post-processing of `me` terms thanks to the GitHub user hlluik. (#571) * Correctly update `warmup` samples when using `update.brmsfit`. # brms 2.6.0 ### New features * Fit factor smooth interactions thanks to Simon Wood. * Specify separate priors for thresholds in ordinal models. (#524) * Pass additional arguments to `rstan::stan_model` via argument `stan_model_args` in `brm`. (#525) * Save model objects via argument `file` in `add_ic` after adding model fit criteria. (#478) * Compute density ratios based on MCMC samples via `density_ratio`. * Ignore offsets in various post-processing methods via argument `offset`. * Update addition terms in formulas via `update_adterms`. ### Other changes * Improve internal modularization of smooth terms. * Reduce size of internal example models. ### Bug fixes * Correctly plot splines with factorial covariates via `marginal_smooths`. * Allow sampling from priors in intercept only models thanks to Emmanuel Charpentier. (#529) * Allow logical operators in non-linear formulas. # brms 2.5.0 ### New features * Improve `marginal_effects` to better display ordinal and categorical models via argument `categorical`. (#491, #497) * Improve method `kfold` to offer more options for specifying omitted subsets. (#510) * Compute estimated values of non-linear parameters via argument `nlpar` in method `fitted`. * Disable automatic cell-mean coding in model formulas without an intercept via argument `cmc` of `brmsformula` and related functions thanks to Marie Beisemann. * Allow using the `bridge_sampler` method even if prior samples are drawn within the model. (#485) * Specify post-processing functions of custom families directly in `custom_family`. * Select a subset of coefficients in `fixef`, `ranef`, and `coef` via argument `pars`. (#520) * Allow to `overwrite` already stored fit indices when using `add_ic`. ### Other changes * Ignore argument `resp` when post-processing univariate models thanks to Ruben Arslan. (#488) * Deprecate argument `ordinal` of `marginal_effects`. (#491) * Deprecate argument `exact_loo` of `kfold`. (#510) * Deprecate usage of `binomial` families without specifying `trials`. * No longer sample from priors of population-level intercepts when using the default intercept parameterization. ### Bug fixes * Correctly sample from LKJ correlation priors thanks to Donald Williams. * Remove stored fit indices when calling `update` on brmsfit objects thanks to Emmanuel Charpentier. (#490) * Fix problems when predicting a single data point using spline models thanks to Emmanuel Charpentier. (#494) * Set `Post.Prob = 1` if `Evid.Ratio = Inf` in method `hypothesis` thanks to Andrew Milne. (#509) * Ensure correct handling of argument `file` in `brm_multiple`. # brms 2.4.0 ### New features * Define custom variables in all of Stan's program blocks via function `stanvar`. (#459) * Change the scope of non-linear parameters to be global within univariate models. (#390) * Allow to automatically group predictor values in Gaussian processes specified via `gp`. This may lead to a considerable increase in sampling efficiency. (#300) * Compute LOO-adjusted R-squared using method `loo_R2`. * Compute non-linear predictors outside of a loop over observations by means of argument `loop` in `brmsformula`. * Fit non-linear mixture models. (#456) * Fit censored or truncated mixture models. (#469) * Allow `horseshoe` and `lasso` priors to be set on special population-level effects. * Allow vectors of length greater one to be passed to `set_prior`. * Conveniently save and load fitted model objects in `brm` via argument `file`. (#472) * Display posterior probabilities in the output of `hypothesis`. ### Other changes * Deprecate argument `stan_funs` in `brm` in favor of using the `stanvars` argument for the specification of custom Stan functions. * Deprecate arguments `flist` and `...` in `nlf`. * Deprecate argument `dpar` in `lf` and `nlf`. ### Bug fixes * Allow custom families in mixture models thanks to Noam Ross. (#453) * Ensure compatibility with **mice** version 3.0. (#455) * Fix naming of correlation parameters of group-level terms with multiple subgroups thanks to Kristoffer Magnusson. (#457) * Improve scaling of default priors in `lognormal` models (#460). * Fix multiple problems in the post-processing of categorical models. * Fix validation of nested grouping factors in post-processing methods when passing new data thanks to Liam Kendall. # brms 2.3.1 ### New features * Allow censoring and truncation in zero-inflated and hurdle models. (#430) * Export zero-inflated and hurdle distribution functions. ### Other changes * Improve sampling efficiency of the ordinal families `cumulative`, `sratio`, and `cratio`. (#433) * Allow to specify a single k-fold subset in method `kfold`. (#441) ### Bug fixes * Fix a problem in `launch_shinystan` due to which the maximum treedepth was not correctly displayed thanks to Paul Galpern. (#431) # brms 2.3.0 ### Features * Extend `cor_car` to support intrinsic CAR models in pairwise difference formulation thanks to the case study of Mitzi Morris. * Compute `loo` and related methods for non-factorizable normal models. ### Other changes * Rename quantile columns in `posterior_summary`. This affects the output of `predict` and related methods if `summary = TRUE`. (#425) * Use hashes to check if models have the same response values when performing model comparisons. (#414) * No longer set `pointwise` dynamically in `loo` and related methods. (#416) * No longer show information criteria in the summary output. * Simplify internal workflow to implement native response distributions. (#421) ### Bug fixes * Allow `cor_car` in multivariate models with residual correlations thanks to Quentin Read. (#427) * Fix a problem in the Stan code generation of distributional `beta` models thanks to Hans van Calster. (#404) * Fix `launch_shinystan.brmsfit` so that all parameters are now shown correctly in the diagnose tab. (#340) # brms 2.2.0 ### Features * Specify custom response distributions with function `custom_family`. (#381) * Model missing values and measurement error in responses using the `mi` addition term. (#27, #343) * Allow missing values in predictors using `mi` terms on the right-hand side of model formulas. (#27) * Model interactions between the special predictor terms `mo`, `me`, and `mi`. (#313) * Introduce methods `model_weights` and `loo_model_weights` providing several options to compute model weights. (#268) * Introduce method `posterior_average` to extract posterior samples averaged across models. (#386) * Allow hyperparameters of group-level effects to vary over the levels of a categorical covariate using argument `by` in function `gr`. (#365) * Allow predictions of measurement-error models with new data. (#335) * Pass user-defined variables to Stan via `stanvar`. (#219, #357) * Allow ordinal families in mixture models. (#389) * Model covariates in multi-membership structures that vary over the levels of the grouping factor via `mmc` terms. (#353) * Fit shifted log-normal models via family `shifted_lognormal`. (#218) * Specify nested non-linear formulas. * Introduce function `make_conditions` to ease preparation of conditions for `marginal_effects`. ### Other changes * Change the parameterization of `weibull` and `exgaussian` models to be consistent with other model classes. Post-processing of related models fitted with earlier version of `brms` is no longer possible. * Treat integer responses in `ordinal` models as directly indicating categories even if the lowest integer is not one. * Improve output of the `hypothesis` method thanks to the ideas of Matti Vuorre. (#362) * Always plot `by` variables as facets in `marginal_smooths`. * Deprecate the `cor_bsts` correlation structure. ### Bug fixes * Allow the `:` operator to combine groups in multi-membership terms thanks to Gang Chen. * Avoid an unexpected error when calling `LOO` with argument `reloo = TRUE` thanks to Peter Konings. (#348) * Fix problems in `predict` when applied to categorical models thanks to Lydia Andreyevna Krasilnikova and Thomas Vladeck. (#336, #345) * Allow truncation in multivariate models with missing values thanks to Malte Lau Petersen. (#380) * Force time points to be unique within groups in autocorrelation structures thanks to Ruben Arslan. (#363) * Fix problems when post-processing multiple uncorrelated group-level terms of the same grouping factor thanks to Ivy Jansen. (#374) * Fix a problem in the Stan code of multivariate `weibull` and `frechet` models thanks to the GitHub user philj1s. (#375) * Fix a rare error when post-processing `binomial` models thanks to the GitHub user SeanH94. (#382) * Keep attributes of variables when preparing the `model.frame` thanks to Daniel Luedecke. (#393) # brms 2.1.0 ### Features * Fit models on multiple imputed datasets via `brm_multiple` thanks to Ruben Arslan. (#27) * Combine multiple `brmsfit` objects via function `combine_models`. * Compute model averaged posterior predictions with method `pp_average`. (#319) * Add new argument `ordinal` to `marginal_effects` to generate special plots for ordinal models thanks to the idea of the GitHub user silberzwiebel. (#190) * Use informative inverse-gamma priors for length-scale parameters of Gaussian processes. (#275) * Compute hypotheses for all levels of a grouping factor at once using argument `scope` in method `hypothesis`. (#327) * Vectorize user-defined `Stan` functions exported via `export_functions` using argument `vectorize`. * Allow predicting new data in models with ARMA autocorrelation structures. ### Bug fixes * Correctly recover noise-free coefficients through `me` terms thanks to Ruben Arslan. As a side effect, it is no longer possible to define priors on noise-free `Xme` variables directly, but only on their hyper-parameters `meanme` and `sdme`. * Fix problems in renaming parameters of the `cor_bsts` structure thanks to Joshua Edward Morten. (#312) * Fix some unexpected errors when predicting from ordinal models thanks to David Hervas and Florian Bader. (#306, #307, #331) * Fix problems when estimating and predicting multivariate ordinal models thanks to David West. (#314) * Fix various minor problems in autocorrelation structures thanks to David West. (#320) # brms 2.0.1 ### Features * Export the helper functions `posterior_summary` and `posterior_table` both being used to summarize posterior samples and predictions. ### Bug fixes * Fix incorrect computation of intercepts in `acat` and `cratio` models thanks to Peter Phalen. (#302) * Fix `pointwise` computation of `LOO` and `WAIC` in multivariate models with estimated residual correlation structure. * Fix problems in various S3 methods sometimes requiring unused variables to be specified in `newdata`. * Fix naming of Stan models thanks to Hao Ran Lai. # brms 2.0.0 This is the second major release of `brms`. The main new feature are generalized multivariate models, which now support everything already possible in univariate models, but with multiple response variables. Further, the internal structure of the package has been improved considerably to be easier to maintain and extend in the future. In addition, most deprecated functionality and arguments have been removed to provide a clean new start for the package. Models fitted with `brms` 1.0 or higher should remain fully compatible with `brms` 2.0. ### Features * Add support for generalized multivariate models, where each of the univariate models may have a different family and autocorrelation structure. Residual correlations can be estimated for multivariate `gaussian` and `student` models. All features supported in univariate models are now also available in multivariate models. (#3) * Specify different formulas for different categories in `categorical` models. * Add weakly informative default priors for the parameter class `Intercept` to improve convergence of more complex distributional models. * Optionally display the MC standard error in the `summary` output. (#280) * Add argument `re.form` as an alias of `re_formula` to the methods `posterior_predict`, `posterior_linpred`, and `predictive_error` for consistency with other packages making use of these methods. (#283) ### Other changes * Refactor many parts of the package to make it more consistent and easier to extend. * Show the link functions of all distributional parameters in the `summary` output. (#277) * Reduce working memory requirements when extracting posterior samples for use in `predict` and related methods thanks to Fanyi Zhang. (#224) * Remove deprecated aliases of functions and arguments from the package. (#278) * No longer support certain prior specifications, which were previously labeled as deprecated. * Remove the deprecated addition term `disp` from the package. * Remove old versions of methods `fixef`, `ranef`, `coef`, and `VarCorr`. * No longer support models fitted with `brms` < 1.0, which used the multivariate `'trait'` syntax originally deprecated in `brms` 1.0. * Make posterior sample extraction in the `summary` method cleaner and less error prone. * No longer fix the seed for random number generation in `brm` to avoid unexpected behavior in simulation studies. ### Bug fixes * Store `stan_funs` in `brmsfit` objects to allow using `update` on models with user-defined Stan functions thanks to Tom Wallis. (#288) * Fix problems in various post-processing methods when applied to models with the reserved variable `intercept` in group-level terms thanks to the GitHub user ASKurz. (#279) * Fix an unexpected error in `predict` and related methods when setting `sample_new_levels = "gaussian"` in models with only one group-level effect. Thanks to Timothy Mastny. (#286) # brms 1.10.2 ### Features * Allow setting priors on noise-free variables specified via function `me`. * Add arguments `Ksub`, `exact_loo` and `group` to method `kfold` for defining omitted subsets according to a grouping variable or factor. * Allow addition argument `se` in `skew_normal` models. ### Bug fixes * Ensure correct behavior of horseshoe and lasso priors in multivariate models thanks to Donald Williams. * Allow using `identity` links on all parameters of the `wiener` family thanks to Henrik Singmann. (#276) * Use reasonable dimnames in the output of `fitted` when returning linear predictors of ordinal models thanks to the GitHub user atrolle. (#274) * Fix problems in `marginal_smooths` occurring for multi-membership models thanks to Hans Tierens. # brms 1.10.0 ### Features * Rebuild monotonic effects from scratch to allow specifying interactions with other variables. (#239) * Introduce methods `posterior_linpred` and `posterior_interval` for consistency with other model fitting packages based on `Stan`. * Introduce function `theme_black` providing a black `ggplot2` theme. * Specify special group-level effects within the same terms as ordinary group-level effects. * Add argument `prob` to `summary`, which allows to control the width of the computed uncertainty intervals. (#259) * Add argument `newdata` to the `kfold` method. * Add several arguments to the `plot` method of `marginal_effects` to improve control over the appearences of the plots. ### Other changes * Use the same noise-free variables for all model parts in measurement error models. (#257) * Make names of local-level terms used in the `cor_bsts` structure more informative. * Store the `autocor` argument within `brmsformula` objects. * Store posterior and prior samples in separate slots in the output of method `hypothesis`. * No longer change the default theme of `ggplot2` when attaching `brms`. (#256) * Make sure signs of estimates are not dropped when rounding to zero in `summary.brmsfit`. (#263) * Refactor parts of `extract_draws` and `linear_predictor` to be more consistent with the rest of the package. ### Bug fixes * Do not silence the `Stan` parser when calling `brm` to get informative error messages about invalid priors. * Fix problems with spaces in priors passed to `set_prior`. * Handle non `data.frame` objects correctly in `hypothesis.default`. * Fix a problem relating to the colour of points displayed in `marginal_effects`. # brms 1.9.0 ### Features * Perform model comparisons based on marginal likelihoods using the methods `bridge_sampler`, `bayes_factor`, and `post_prob` all powered by the `bridgesampling` package. * Compute a Bayesian version of R-squared with the `bayes_R2` method. * Specify non-linear models for all distributional parameters. * Combine multiple model formulas using the `+` operator and the helper functions `lf`, `nlf`, and `set_nl`. * Combine multiple priors using the `+` operator. * Split the `nlpar` argument of `set_prior` into the three arguments `resp`, `dpar`, and `nlpar` to allow for more flexible prior specifications. ### Other changes * Refactor parts of the package to prepare for the implementation of more flexible multivariate models in future updates. * Keep all constants in the log-posterior in order for `bridge_sampler` to be working correctly. * Reduce the amount of renaming done within the `stanfit` object. * Rename argument `auxpar` of `fitted.brmsfit` to `dpar`. * Use the `launch_shinystan` generic provided by the `shinystan` package. * Set `bayesplot::theme_default()` as the default `ggplot2` theme when attaching `brms`. * Include citations of the `brms` overview paper as published in the Journal of Statistical Software. ### Bug fixes * Fix problems when calling `fitted` with `hurdle_lognormal` models thanks to Meghna Krishnadas. * Fix problems when predicting `sigma` in `asym_laplace` models thanks to Anna Josefine Sorensen. # brms 1.8.0 ### Features * Fit conditional autoregressive (CAR) models via function `cor_car` thanks to the case study of Max Joseph. * Fit spatial autoregressive (SAR) models via function `cor_sar`. Currently works for families `gaussian` and `student`. * Implement skew normal models via family `skew_normal`. Thanks to Stephen Martin for suggestions on the parameterization. * Add method `reloo` to perform exact cross-validation for problematic observations and `kfold` to perform k-fold cross-validation thanks to the Stan Team. * Regularize non-zero coefficients in the `horseshoe` prior thanks to Juho Piironen and Aki Vehtari. * Add argument `new_objects` to various post-processing methods to allow for passing of data objects, which cannot be passed via `newdata`. * Improve parallel execution flexibility via the `future` package. ### Other changes * Improve efficiency and stability of ARMA models. * Throw an error when the intercept is removed in an ordinal model instead of silently adding it back again. * Deprecate argument `threshold` in `brm` and instead recommend passing `threshold` directly to the ordinal family functions. * Throw an error instead of a message when invalid priors are passed. * Change the default value of the `autocor` slot in `brmsfit` objects to an empty `cor_brms` object. * Shorten `Stan` code by combining declarations and definitions where possible. ### Bug fixes * Fix problems in `pp_check` when the variable specified in argument `x` has attributes thanks to Paul Galpern. * Fix problems when computing fitted values for truncated discrete models based on new data thanks to Nathan Doogan. * Fix unexpected errors when passing models, which did not properly initialize, to various post-processing methods. * Do not accidently drop the second dimension of matrices in `summary.brmsfit` for models with only a single observation. # brms 1.7.0 ### Features * Fit latent Gaussian processes of one or more covariates via function `gp` specified in the model formula (#221). * Rework methods `fixef`, `ranef`, `coef`, and `VarCorr` to be more flexible and consistent with other post-processing methods (#200). * Generalize method `hypothesis` to be applicable on all objects coercible to a `data.frame` (#198). * Visualize predictions via spaghetti plots using argument `spaghetti` in `marginal_effects` and `marginal_smooths`. * Introduce method `add_ic` to store and reuse information criteria in fitted model objects (#220). * Allow for negative weights in multi-membership grouping structures. * Introduce an `as.array` method for `brmsfit` objects. ### Other changes * Show output of \R code in HTML vignettes thanks to Ben Goodrich (#158). * Resolve citations in PDF vignettes thanks to Thomas Kluth (#223). * Improve sampling efficiency for `exgaussian` models thanks to Alex Forrence (#222). * Also transform data points when using argument `transform` in `marginal_effects` thanks to Markus Gesmann. ### Bug fixes * Fix an unexpected error in `marginal_effects` occurring for some models with autocorrelation terms thanks to Markus Gesmann. * Fix multiple problems occurring for models with the `cor_bsts` structure thanks to Andrew Ellis. # brms 1.6.1 ### Features * Implement zero-one-inflated beta models via family `zero_one_inflated_beta`. * Allow for more link functions in zero-inflated and hurdle models. ### Other changes * Ensure full compatibility with `bayesplot` version 1.2.0. * Deprecate addition argument `disp`. ### Bug fixes * Fix problems when setting priors on coefficients of auxiliary parameters when also setting priors on the corresponding coefficients of the mean parameter. Thanks to Matti Vuorre for reporting this bug. * Allow ordered factors to be used as grouping variables thanks to the GitHub user itissid. # brms 1.6.0 ### Features * Fit finite mixture models using family function `mixture`. * Introduce method `pp_mixture` to compute posterior probabilities of mixture component memberships thanks to a discussion with Stephen Martin. * Implement different ways to sample new levels of grouping factors in `predict` and related methods through argument `sample_new_levels`. Thanks to Tom Wallis and Jonah Gabry for a detailed discussion about this feature. * Add methods `loo_predict`, `loo_linpred`, and `loo_predictive_interval` for computing LOO predictions thanks to Aki Vehtari and Jonah Gabry. * Allow using `offset` in formulas of non-linear and auxiliary parameters. * Allow sparse matrix multiplication in non-linear and distributional models. * Allow using the `identity` link for all auxiliary parameters. * Introduce argument `negative_rt` in `predict` and `posterior_predict` to distinguish responses on the upper and lower boundary in `wiener` diffusion models thanks to Guido Biele. * Introduce method `control_params` to conveniently extract control parameters of the NUTS sampler. * Introduce argument `int_conditions` in `marginal_effects` for enhanced plotting of two-way interactions thanks to a discussion with Thomas Kluth. * Improve flexibility of the `conditions` argument of `marginal_effects`. * Extend method `stanplot` to correctly handle some new `mcmc_` plots of the `bayesplot` package. ### Other changes * Improve the `update` method to only recompile models when the `Stan` code changes. * Warn about divergent transitions when calling `summary` or `print` on `brmsfit` objects. * Warn about unused variables in argument `conditions` when calling `marginal_effects`. * Export and document several distribution functions that were previously kept internal. ### Bug fixes * Fix problems with the inclusion of offsets occurring for more complicated formulas thanks to Christian Stock. * Fix a bug that led to invalid Stan code when sampling from priors in intercept only models thanks to Tom Wallis. * Correctly check for category specific group-level effects in non-ordinal models thanks to Wayne Folta. * Fix problems in `pp_check` when specifying argument `newdata` together with arguments `x` or `group`. * Rename the last column in the output of `hypothesis` to `"star"` in order to avoid problems with zero length column names thanks to the GitHub user puterleat. * Add a missing new line statement at the end of the `summary` output thanks to Thomas Kluth. # brms 1.5.1 ### Features * Allow `horseshoe` and `lasso` priors to be applied on population-level effects of non-linear and auxiliary parameters. * Force recompiling `Stan` models in `update.brmsfit` via argument `recompile`. ### Other changes * Avoid indexing of matrices in non-linear models to slightly improve sampling speed. ### Bug fixes * Fix a severe problem (introduced in version 1.5.0), when predicting `Beta` models thanks to Vivian Lam. * Fix problems when summarizing some models fitted with older version of `brms` thanks to Vivian Lam. * Fix checks of argument `group` in method `pp_check` thanks to Thomas K. * Get arguments `subset` and `nsamples` working correctly in `marginal_smooths`. # brms 1.5.0 ### Features * Implement the generalized extreme value distribution via family `gen_extreme_value`. * Improve flexibility of the `horseshoe` prior thanks to Juho Piironen. * Introduce auxiliary parameter `mu` as an alternative to specifying effects within the `formula` argument in function `brmsformula`. * Return fitted values of auxiliary parameters via argument `auxpar` of method `fitted`. * Add vignette `"brms_multilevel"`, in which the advanced formula syntax of `brms` is explained in detail using several examples. ### Other changes * Refactor various parts of the package to ease implementation of mixture and multivariate models in future updates. This should not have any user visible effects. * Save the version number of `rstan` in element `version` of `brmsfit` objects. ### Bug fixes * Fix a rare error when predicting `von_mises` models thanks to John Kirwan. # brms 1.4.0 ### Features * Fit quantile regression models via family `asym_laplace` (asymmetric Laplace distribution). * Specify non-linear models in a (hopefully) more intuitive way using `brmsformula`. * Fix auxiliary parameters to certain values through `brmsformula`. * Allow `family` to be specified in `brmsformula`. * Introduce family `frechet` for modelling strictly positive responses. * Allow truncation and censoring at the same time. * Introduce function `prior_` allowing to specify priors using one-sided formulas or `quote`. * Pass priors to `Stan` directly without performing any checks by setting `check = FALSE` in `set_prior`. * Introduce method `nsamples` to extract the number of posterior samples. * Export the main formula parsing function `parse_bf`. * Add more options to customize two-dimensional surface plots created by `marginal_effects` or `marginal_smooths`. ### Other changes * Change structure of `brmsformula` objects to be more reliable and easier to extend. * Make sure that parameter `nu` never falls below `1` to reduce convergence problems when using family `student`. * Deprecate argument `nonlinear`. * Deprecate family `geometric`. * Rename `cov_fixed` to `cor_fixed`. * Make handling of addition terms more transparent by exporting and documenting related functions. * Refactor helper functions of the `fitted` method to be easier to extend in the future. * Remove many units tests of internal functions and add tests of user-facing functions instead. * Import some generics from `nlme` instead of `lme4` to remove dependency on the latter one. * Do not apply `structure` to `NULL` anymore to get rid of warnings in R-devel. ### Bug fixes * Fix problems when fitting smoothing terms with factors as `by` variables thanks to Milani Chaloupka. * Fix a bug that could cause some monotonic effects to be ignored in the `Stan` code thanks to the GitHub user bschneider. * Make sure that the data of models with only a single observation are compatible with the generated `Stan` code. * Handle argument `algorithm` correctly in `update.brmsfit`. * Fix a bug sometimes causing an error in `marginal_effects` when using family `wiener` thanks to Andrew Ellis. * Fix problems in `fitted` when applied to `zero_inflated_beta` models thanks to Milani Chaloupka. * Fix minor problems related to the prediction of autocorrelated models. * Fix a few minor bugs related to the backwards compatibility of multivariate and related models fitted with `brms` < 1.0.0. # brms 1.3.1 ### Features * Introduce the auxiliary parameter `disc` ('discrimination') to be used in ordinal models. By default it is not estimated but fixed to one. * Create `marginal_effects` plots of two-way interactions of variables that were not explicitely modeled as interacting. ### Other changes * Move `rstan` to 'Imports' and `Rcpp` to 'Depends' in order to avoid loading `rstan` into the global environment automatically. ### Bug fixes * Fix a bug leading to unexpected errors in some S3 methods when applied to ordinal models. # brms 1.3.0 ### Features * Fit error-in-variables models using function `me` in the model formulae. * Fit multi-membership models using function `mm` in grouping terms. * Add families `exgaussian` (exponentially modified Gaussian distribution) and `wiener` (Wiener diffusion model distribution) specifically suited to handle for response times. * Add the `lasso` prior as an alternative to the `horseshoe` prior for sparse models. * Add the methods `log_posterior`, `nuts_params`, `rhat`, and `neff_ratio` for `brmsfit` objects to conveniently access quantities used to diagnose sampling behavior. * Combine chains in method `as.mcmc` using argument `combine_chains`. * Estimate the auxiliary parameter `sigma` in models with known standard errors of the response by setting argument `sigma` to `TRUE` in addition function `se`. * Allow visualizing two-dimensional smooths with the `marginal_smooths` method. ### Other changes * Require argument `data` to be explicitely specified in all user facing functions. * Refactor the `stanplot` method to use `bayesplot` on the backend. * Use the `bayesplot` theme as the default in all plotting functions. * Add the abbreviations `mo` and `cs` to specify monotonic and category specific effects respectively. * Rename generated variables in the data.frames returned by `marginal_effects` to avoid potential naming conflicts. * Deprecate argument `cluster` and use the native `cores` argument of `rstan` instead. * Remove argument `cluster_type` as it is no longer required to apply forking. * Remove the deprecated `partial` argument. # brms 1.2.0 ### Features * Add the new family `hurdle_lognormal` specifically suited for zero-inflated continuous responses. * Introduce the `pp_check` method to perform various posterior predictive checks using the `bayesplot` package. * Introduce the `marginal_smooths` method to better visualize smooth terms. * Allow varying the scale of global shrinkage parameter of the `horseshoe` prior. * Add functions `prior` and `prior_string` as aliases of `set_prior`, the former allowing to pass arguments without quotes `""` using non-standard evaluation. * Introduce four new vignettes explaining how to fit non-linear models, distributional models, phylogenetic models, and monotonic effects respectively. * Extend the `coef` method to better handle category specific group-level effects. * Introduce the `prior_summary` method for `brmsfit` objects to obtain a summary of prior distributions applied. * Sample from the prior of the original population-level intercept when `sample_prior = TRUE` even in models with an internal temporary intercept used to improve sampling efficiency. * Introduce methods `posterior_predict`, `predictive_error` and `log_lik` as (partial) aliases of `predict`, `residuals`, and `logLik` respectively. ### Other changes * Improve computation of Bayes factors in the `hypothesis` method to be less influenced by MCMC error. * Improve documentation of default priors. * Refactor internal structure of some formula and prior evaluating functions. This should not have any user visible effects. * Use the `bayesplot` package as the new backend of `plot.brmsfit`. ### Bug fixes * Better mimic `mgcv` when parsing smooth terms to make sure all arguments are correctly handled. * Avoid an error occurring during the prediction of new data when grouping factors with only a single factor level were supplied thanks to Tom Wallis. * Fix `marginal_effects` to consistently produce plots for all covariates in non-linear models thanks to David Auty. * Improve the `update` method to better recognize situations where recompliation of the `Stan` code is necessary thanks to Raphael P.H. * Allow to correctly `update` the `sample_prior` argument to value `"only"`. * Fix an unexpected error occurring in many S3 methods when the thinning rate is not a divisor of the total number of posterior samples thanks to Paul Zerr. # brms 1.1.0 ### Features * Estimate monotonic group-level effects. * Estimate category specific group-level effects. * Allow `t2` smooth terms based on multiple covariates. * Estimate interval censored data via the addition argument `cens` in the model formula. * Allow to compute `residuals` also based on predicted values instead of fitted values. ### Other changes * Use the prefix `bcs` in parameter names of category specific effects and the prefix `bm` in parameter names of monotonic effects (instead of the prefix `b`) to simplify their identification. * Ensure full compatibility with `ggplot2` version 2.2. ### Bug fixes * Fix a bug that could result in incorrect threshold estimates for `cumulative` and `sratio` models thanks to Peter Congdon. * Fix a bug that sometimes kept distributional `gamma` models from being compiled thanks to Tim Beechey. * Fix a bug causing an error in `predict` and related methods when two-level factors or logical variables were used as covariates in non-linear models thanks to Martin Schmettow. * Fix a bug causing an error when passing lists to additional arguments of smoothing functions thanks to Wayne Folta. * Fix a bug causing an error in the `prior_samples` method for models with multiple group-level terms that refer to the same grouping factor thanks to Marco Tullio Liuzza. * Fix a bug sometimes causing an error when calling `marginal_effects` for weighted models. # brms 1.0.1 \subsection{MINOR CHANGES * Center design matrices inside the Stan code instead of inside `make_standata`. * Get rid of several warning messages occurring on CRAN. # brms 1.0.0 This is one of the largest updates of `brms` since its initial release. In addition to many new features, the multivariate `'trait'` syntax has been removed from the package as it was confusing for users, required much special case coding, and was hard to maintain. See `help(brmsformula)` for details of the formula syntax applied in `brms`. ### Features * Allow estimating correlations between group-level effects defined across multiple formulae (e.g., in non-linear models) by specifying IDs in each grouping term via an extended `lme4` syntax. * Implement distributional regression models allowing to fully predict auxiliary parameters of the response distribution. Among many other possibilities, this can be used to model heterogeneity of variances. * Zero-inflated and hurdle models do not use multivariate syntax anymore but instead have special auxiliary parameters named `zi` and `hu` defining zero-inflation / hurdle probabilities. * Implement the `von_mises` family to model circular responses. * Introduce the `brmsfamily` function for convenient specification of `family` objects. * Allow predictions of `t2` smoothing terms for new data. * Feature vectors as arguments for the addition argument `trunc` in order to model varying truncation points. ### Other changes * Remove the `cauchy` family after several months of deprecation. * Make sure that group-level parameter names are unambiguous by adding double underscores thanks to the idea of the GitHub user schmettow. * The `predict` method now returns predicted probabilities instead of absolute frequencies of samples for ordinal and categorical models. * Compute the linear predictor in the model block of the Stan program instead of in the transformed parameters block. This avoids saving samples of unnecessary parameters to disk. Thanks goes to Rick Arrano for pointing me to this issue. * Colour points in `marginal_effects` plots if sensible. * Set the default of the `robust` argument to `TRUE` in `marginal_effects.brmsfit`. ### Bug fixes * Fix a bug that could occur when predicting factorial response variables for new data. Only affects categorical and ordinal models. * Fix a bug that could lead to duplicated variable names in the Stan code when sampling from priors in non-linear models thanks to Tom Wallis. * Fix problems when trying to pointwise evaluate non-linear formulae in `logLik.brmsfit` thanks to Tom Wallis. * Ensure full compatibility of the `ranef` and `coef` methods with non-linear models. * Fix problems that occasionally occurred when handling `dplyr` datasets thanks to the GitHub user Atan1988. # brms 0.10.0 ### Features * Add support for generalized additive mixed models (GAMMs). Smoothing terms can be specified using the `s` and `t2` functions in the model formula. * Introduce `as.data.frame` and `as.matrix` methods for `brmsfit` objects. ### Other changes * The `gaussian("log")` family no longer implies a log-normal distribution, but a normal distribution with log-link to match the behavior of `glm`. The log-normal distribution can now be specified via family `lognormal`. * Update syntax of `Stan` models to match the recommended syntax of `Stan` 2.10. ### Bug fixes * The `ngrps` method should now always return the correct result for non-linear models. * Fix problems in `marginal_effects` for models using the reserved variable `intercept` thanks to Frederik Aust. * Fix a bug in the `print` method of `brmshypothesis` objects that could lead to duplicated and thus invalid row names. * Residual standard deviation parameters of multivariate models are again correctly displayed in the output of the `summary` method. * Fix problems when using variational Bayes algorithms with `brms` while having `rstan` >= 2.10.0 installed thanks to the GitHub user cwerner87. # brms 0.9.1 ### Features * Allow the '/' symbol in group-level terms in the `formula` argument to indicate nested grouping structures. * Allow to compute `WAIC` and `LOO` based on the pointwise log-likelihood using argument `pointwise` to substantially reduce memory requirements. ### Other changes * Add horizontal lines to the errorbars in `marginal_effects` plots for factors. ### Bug fixes * Fix a bug that could lead to a cryptic error message when changing some parts of the model `formula` using the `update` method. * Fix a bug that could lead to an error when calling `marginal_effects` for predictors that were generated with the `base::scale` function thanks to Tom Wallis. * Allow interactions of numeric and categorical predictors in `marginal_effects` to be passed to the `effects` argument in any order. * Fix a bug that could lead to incorrect results of `predict` and related methods when called with `newdata` in models using the `poly` function thanks to Brock Ferguson. * Make sure that user-specified factor contrasts are always applied in multivariate models. # brms 0.9.0 ### Features * Add support for `monotonic` effects allowing to use ordinal predictors without assuming their categories to be equidistant. * Apply multivariate formula syntax in categorical models to considerably increase modeling flexibility. * Add the addition argument `disp` to define multiplicative factors on dispersion parameters. For linear models, `disp` applies to the residual standard deviation `sigma` so that it can be used to weight observations. * Treat the fixed effects design matrix as sparse by using the `sparse` argument of `brm`. This can considerably reduce working memory requirements if the predictors contain many zeros. * Add the `cor_fixed` correlation structure to allow for fixed user-defined covariance matrices of the response variable. * Allow to pass self-defined `Stan` functions via argument `stan_funs` of `brm`. * Add the `expose_functions` method allowing to expose self-defined `Stan` functions in `R`. * Extend the functionality of the `update` method to allow all model parts to be updated. * Center the fixed effects design matrix also in multivariate models. This may lead to increased sampling speed in models with many predictors. ### Other changes * Refactor `Stan` code and data generating functions to be more consistent and easier to extent. * Improve checks of user-define prior specifications. * Warn about models that have not converged. * Make sure that regression curves computed by the `marginal_effects` method are always smooth. * Allow to define category specific effects in ordinal models directly within the `formula` argument. ### Bug fixes * Fix problems in the generated `Stan` code when using very long non-linear model formulas thanks to Emmanuel Charpentier. * Fix a bug that prohibited to change priors on single standard deviation parameters in non-linear models thanks to Emmanuel Charpentier. * Fix a bug that prohibited to use nested grouping factors in non-linear models thanks to Tom Wallis. * Fix a bug in the linear predictor computation within `R`, occurring for ordinal models with multiple category specific effects. This could lead to incorrect outputs of `predict`, `fitted`, and `logLik` for these models. * Make sure that the global `"contrasts"` option is not used when post-processing a model. # brms 0.8.0 ### Features * Implement generalized non-linear models, which can be specified with the help of the `nonlinear` argument in `brm`. * Compute and plot marginal effects using the `marginal_effects` method thanks to the help of Ruben Arslan. * Implement zero-inflated beta models through family `zero_inflated_beta` thanks to the idea of Ali Roshan Ghias. * Allow to restrict domain of fixed effects and autocorrelation parameters using new arguments `lb` and `ub` in function `set_prior` thanks to the idea of Joel Gombin. * Add an `as.mcmc` method for compatibility with the `coda` package. * Allow to call the `WAIC`, `LOO`, and `logLik` methods with new data. ### Other changes * Make sure that `brms` is fully compatible with `loo` version 0.1.5. * Optionally define the intercept as an ordinary fixed effect to avoid the reparametrization via centering of the fixed effects design matrix. * Do not compute the WAIC in `summary` by default anymore to reduce computation time of the method for larger models. * The `cauchy` family is now deprecated and will be removed soon as it often has convergence issues and not much practical application anyway. * Change the default settings of the number of chains and warmup samples to the defaults of `rstan` (i.e., `chains = 4` and `warmup = iter / 2`). * Do not remove bad behaving chains anymore as they may point to general convergence problems that are dangerous to ignore. * Improve flexibility of the `theme` argument in all plotting functions. * Only show the legend once per page, when computing trace and density plots with the `plot` method. * Move code of self-defined `Stan` functions to `inst/chunks` and incorporate them into the models using `rstan::stanc_builder`. Also, add unit tests for these functions. ### Bug fixes * Fix problems when predicting with `newdata` for zero-inflated and hurdle models thanks to Ruben Arslan. * Fix problems when predicting with `newdata` if it is a subset of the data stored in a `brmsfit` object thanks to Ruben Arslan. * Fix data preparation for multivariate models if some responses are `NA` thanks to Raphael Royaute. * Fix a bug in the `predict` method occurring for some multivariate models so that it now always returns the predictions of all response variables, not just the first one. * Fix a bug in the log-likelihood computation of `hurdle_poisson` and `hurdle_negbinomial` models. This may lead to minor changes in the values obtained by `WAIC` and `LOO` for these models. * Fix some backwards compatibility issues of models fitted with version <= 0.5.0 thanks to Ulf Koether. # brms 0.7.0 ### Features * Use variational inference algorithms as alternative to the NUTS sampler by specifying argument `algorithm` in the `brm` function. * Implement beta regression models through family `Beta`. * Implement zero-inflated binomial models through family `zero_inflated_binomial`. * Implement multiplicative effects for family `bernoulli` to fit (among others) 2PL IRT models. * Generalize the `formula` argument for zero-inflated and hurdle models so that predictors can be included in only one of the two model parts thanks to the idea of Wade Blanchard. * Combine fixed and random effects estimates using the new `coef` method. * Call the `residuals` method with `newdata` thanks to the idea of Friederike Holz-Ebeling. * Allow new levels of random effects grouping factors in the `predict`, `fitted`, and `residuals` methods using argument `allow_new_levels`. * Selectively exclude random effects in the `predict`, `fitted`, and `residuals` methods using argument `re_formula`. * Add a `plot` method for objects returned by method `hypothesis` to visualize prior and posterior distributions of the hypotheses being tested. ### Other changes * Improve evaluation of the response part of the `formula` argument to reliably allow terms with more than one variable (e.g., `y/x ~ 1`). * Improve sampling efficiency of models containing many fixed effects through centering the fixed effects design matrix thanks to Wayne Folta. * Improve sampling efficiency of models containing uncorrelated random effects specified by means of `(random || group)` terms in `formula` thanks to Ali Roshan Ghias. * Utilize user-defined functions in the `Stan` code of ordinal models to improve readability as well as sampling efficiency. * Make sure that model comparisons using `LOO` or `WAIC` are only performed when models are based on the same responses. * Use some generic functions of the `lme4` package to avoid unnecessary function masking. This leads to a change in the argument order of method `VarCorr`. * Change the `ggplot` theme in the `plot` method through argument `theme`. * Remove the `n.` prefix in arguments `n.iter`, `n.warmup`, `n.thin`, `n.chains`, and `n.cluster` of the `brm` function. The old argument names remain usable as deprecated aliases. * Amend names of random effects parameters to simplify matching with their respective grouping factor levels. ### Bug fixes * Fix a bug in the `hypothesis` method that could cause valid model parameters to be falsely reported as invalid. * Fix a bug in the `prior_samples` method that could cause prior samples of parameters of the same class to be artificially correlated. * Fix `Stan` code of linear models with moving-average effects and non-identity link functions so that they no longer contain code related solely to autoregressive effects. * Fix a bug in the evaluation of `formula` that could cause complicated random effects terms to be falsely treated as fixed effects. * Fix several bugs when calling the `fitted` and `predict` methods with `newdata` thanks to Ali Roshan Ghias. # brms 0.6.0 ### Features * Add support for zero-inflated and hurdle models thanks to the idea of Scott Baldwin. * Implement inverse gaussian models through family `inverse.gaussian`. * Allow to specify truncation boundaries of the response variable thanks to the idea of Maciej Beresewicz. * Add support for autoregressive (AR) effects of residuals, which can be modeled using the `cor_ar` and `cor_arma` functions. * Stationary autoregressive-moving-average (ARMA) effects of order one can now also be fitted using special covariance matrices. * Implement multivariate student-t models. * Binomial and ordinal families now support the `cauchit` link function. * Allow family functions to be used in the `family` argument. * Easy access to various `rstan` plotting functions using the `stanplot` method. * Implement horseshoe priors to model sparsity in fixed effects coefficients thanks to the idea of Josh Chang. * Automatically scale default standard deviation priors so that they remain only weakly informative independent on the response scale. * Report model weights computed by the `loo` package when comparing multiple fitted models. ### Other changes * Separate the fixed effects Intercept from other fixed effects in the `Stan` code to slightly improve sampling efficiency. * Move autoregressive (AR) effects of the response from the `cor_ar` to the `cor_arr` function as the result of implementing AR effects of residuals. * Improve checks on argument `newdata` used in the `fitted` and `predict` method. * Method `standata` is now the only way to extract data that was passed to `Stan` from a `brmsfit` object. * Slightly improve `Stan` code for models containing no random effects. * Change the default prior of the degrees of freedom of the `student` family to `gamma(2,0.1)`. * Improve readability of the output of method `VarCorr`. * Export the `make_stancode` function to give users direct access to `Stan` code generated by `brms`. * Rename the `brmdata` function to `make_standata`. The former remains usable as a deprecated alias. * Improve documentation to better explain differences in autoregressive effects across R packages. ### Bug fixes * Fix a bug that could cause an unexpected error when the `predict` method was called with `newdata`. * Avoid side effects of the `rstan` compilation routines that could occasionally cause R to crash. * Make `brms` work correctly with `loo` version 0.1.3 thanks to Mauricio Garnier Villarreal and Jonah Gabry. * Fix a bug that could cause WAIC and LOO estimates to be slightly incorrect for `gaussian` models with `log` link. # brms 0.5.0 ### Features * Compute the Watanabe-Akaike information criterion (WAIC) and leave-one-out cross-validation (LOO) using the `loo` package. * Provide an interface to `shinystan` with S3 method `launch_shiny`. * New functions `get_prior` and `set_prior` to make prior specifications easier. * Log-likelihood values and posterior predictive samples can now be calculated within R after the model has been fitted. * Make predictions based on new data using S3 method `predict`. * Allow for customized covariance structures of grouping factors with multiple random effects. * New S3 methods `fitted` and `residuals` to compute fitted values and residuals, respectively. ### Other changes * Arguments `WAIC` and `predict` are removed from the `brm` function, as they are no longer necessary. * New argument `cluster_type` in function `brm` allowing to choose the cluster type created by the parallel package. * Remove chains that fail to initialize while sampling in parallel leaving the other chains untouched. * Redesign trace and density plots to be faster and more stable. * S3 method `VarCorr` now always returns covariance matrices regardless of whether correlations were estimated. ### Bug fixes * Fix a bug in S3 method `hypothesis` related to the calculation of Bayes-factors for point hypotheses. * User-defined covariance matrices that are not strictly positive definite for numerical reasons should now be handled correctly. * Fix problems when a factor is used as fixed effect and as random effects grouping variable at the same time thanks to Ulf Koether. * Fix minor issues with internal parameter naming. * Perform additional checking on user defined priors. # brms 0.4.1 ### Features * Allow for sampling from all specified proper priors in the model. * Compute Bayes-factors for point hypotheses in S3 method `hypothesis`. ### Bug fixes * Fix a bug that could cause an error for models with multiple grouping factors thanks to Jonathan Williams. * Fix a bug that could cause an error for weighted poisson and exponential models. # brms 0.4.0 ### Features * Implement the Watanabe-Akaike Information Criterion (WAIC). * Implement the `||`-syntax for random effects allowing for the estimation of random effects standard deviations without the estimation of correlations. * Allow to combine multiple grouping factors within one random effects argument using the interaction symbol `:`. * Generalize S3 method `hypothesis` to be used with all parameter classes not just fixed effects. In addition, one-sided hypothesis testing is now possible. * Introduce new family `multigaussian` allowing for multivariate normal regression. * Introduce new family `bernoulli` for dichotomous response variables as a more efficient alternative to families `binomial` or `categorical` in this special case. ### Other changes * Slightly change the internal structure of brms to reflect that `rstan` is finally on CRAN. * Thoroughly check validity of the response variable before the data is passed to `Stan`. * Prohibit variable names containing double underscores `__` to avoid naming conflicts. * Allow function calls with several arguments (e.g. `poly(x,3)`) in the formula argument of function `brm`. * Always center random effects estimates returned by S3 method `ranef` around zero. * Prevent the use of customized covariance matrices for grouping factors with multiple random effects for now. * Remove any experimental `JAGS` code from the package. ### Bug fixes * Fix a bug in S3 method `hypothesis` leading to an error when numbers with decimal places were used in the formulation of the hypotheses. * Fix a bug in S3 method `ranef` that caused an error for grouping factors with only one random effect. * Fix a bug that could cause the fixed intercept to be wrongly estimated in the presence of multiple random intercepts thanks to Jarrod Hadfield. # brms 0.3.0 ### Features * Introduce new methods `parnames` and `posterior_samples` for class 'brmsfit' to extract parameter names and posterior samples for given parameters, respectively. * Introduce new method `hypothesis` for class `brmsfit` allowing to test non-linear hypotheses concerning fixed effects. * Introduce new argument `addition` in function brm to get a more flexible approach in specifying additional information on the response variable (e.g., standard errors for meta-analysis). Alternatively, this information can also be passed to the `formula` argument directly. * Introduce weighted and censored regressions through argument `addition` of function brm. * Introduce new argument `cov.ranef` in the `brm` function allowing for customized covariance structures of random effects thanks to the idea of Boby Mathew. * Introduce new argument `autocor` in function brm allowing for autocorrelation of the response variable. * Introduce new functions `cor.ar`, `cor.ma`, and `cor.arma`, to be used with argument `autocor` for modeling autoregressive, moving-average, and autoregressive-moving-average models. ### Other changes * Amend parametrization of random effects to increase efficiency of the sampling algorithms. * Improve vectorization of sampling statements. ### Bug fixes * Fix a bug that could cause an error when fitting poisson models while `predict = TRUE`. * Fix a bug that caused an error when sampling only one chain while `silent = TRUE`. # brms 0.2.0 ### Features * New S3 class `brmsfit` to be returned by the `brm` function. * New methods for class `brmsfit`: `summary`, `print`, `plot`, `predict`, `fixef`, `ranef`, `VarCorr`, `nobs`, `ngrps`, and `formula`. * Introduce new argument `silent` in the `brm` function, allowing to suppress most of `Stan`'s intermediate output. * Introduce new families `negbinomial` (negative binomial) and `geometric` to allow for more flexibility in modeling count data. ### Other changes * Amend warning and error messages to make them more informative. * Correct examples in the documentation. * Extend the README file. ### Bug fixes * Fix a bug that caused problems when formulas contained more complicated function calls. * Fix a bug that caused an error when posterior predictives were sampled for family `cumulative`. * Fix a bug that prohibited to use of improper flat priors for parameters that have proper priors by default. # brms 0.1.0 * Initial release version brms/MD50000644000176200001440000005642314504354272011545 0ustar liggesusers1c4e0075685e2411346b2b0d1f5618ed *DESCRIPTION f93e0f75909b0e57fda8c00323aa9082 *NAMESPACE 5400c58c78bca8828b0c2fdee9efb57d *NEWS.md 51b07ba52f3175ad20dc9eab04e9b7b5 *R/autocor.R a977205a6d4855316b9135210b77d140 *R/backends.R a62648c0b2b854f598e93e5b0d7f54a1 *R/bayes_R2.R aaca4a01a779f6299bfa90f846009ff4 *R/bridgesampling.R 70c854017f787ca5410af4d1b11ba338 *R/brm.R 78d11d3dcc85a7c1515069490b65e554 *R/brm_multiple.R 4e51574efdc48fd8c1573048d8cb18b2 *R/brms-package.R 8543ce72c62510e2407175d2377a2e43 *R/brmsfit-class.R b29f8ef84b35e2568d8015ed029f8d07 *R/brmsfit-helpers.R b8ac9b917cf4a00195e08d9302cf2630 *R/brmsfit-methods.R f41059be5d60e2e3d645ca81d511ff78 *R/brmsformula.R ffd00b0976dfc34caf2a3e3acc50ef6f *R/brmsterms.R 400bd9256ea2a414b5ea213da35e0f9a *R/conditional_effects.R 191afe28bfbaea0d95c8173b08aa5247 *R/conditional_smooths.R 2f55b43d3ea4a02459fcfc80408c4f0e *R/data-helpers.R ea607c4f5fc20f7907a08ef8cc830a9f *R/data-predictor.R 0bc6b9489dc0b8c5d3973eacae53d5df *R/data-response.R d79f47e77c3b11477953f636c7605647 *R/datasets.R 0c817919353979d3a2789e40fb78311b *R/diagnostics.R 1cbf56464d8f335ba227690250bd0315 *R/distributions.R 805784342a291b9a55d808694e76380e *R/emmeans.R 26d517502235bfe06358d79a1eb1483c *R/exclude_pars.R a8ef78dc95fc0a1275a3086c59dbe18a *R/exclude_terms.R 1fe8d5cc34c860a4ea6a9d67b700cb31 *R/families.R 3df049b8c68d1608b5d66df5b31e2744 *R/family-lists.R 68f36aadd1f1d5f5488dd8c141ef5593 *R/formula-ac.R 4adb356417101ed135f2c1523135e329 *R/formula-ad.R b9d10079aad0858ab9e5271763b22eac *R/formula-cs.R 67a8782eaecf477c25a9351aa4640499 *R/formula-gp.R d3457d5af09ab5c81db99dcd19cefbab *R/formula-re.R e58b63fc58ee16c156631fb3e1eb95ab *R/formula-sm.R 2bd33d27d45e762eb80c7c891476b883 *R/formula-sp.R aefb99178ab21e8064b69728118f3ae8 *R/ggplot-themes.R ca3b0f323cb9ae8a69933a08c0d00547 *R/hypothesis.R 744ee0078c11a264a5828eb6f32dc643 *R/kfold.R 919680283ce369f76420b4888ca19f35 *R/launch_shinystan.R 02a5d988fd9a6fa5dc2f890412a9ab2e *R/log_lik.R 6540a28c12e609854e6879acbec487d2 *R/loo.R 563b0ae4233be0b957f603954b484f68 *R/loo_moment_match.R 101ef84e65d885b4d43d3cbd2a75d9b5 *R/loo_predict.R 24a2aef03aa8af0815e352d452fe3228 *R/loo_subsample.R 71c9036f3d4dc02049eef21acc00e4f7 *R/lsp.R 9f52e2623d658620abc6f27cf946283f *R/make_stancode.R f55d71c9ff274ec97e7ca0d69912ea14 *R/make_standata.R a54fc7a51f9673e3bbd64b51cabdb8ad *R/misc.R 4b1b8ea8e0b0fef4c5e6c4de29ed93e5 *R/model_weights.R a133b55157752e50dfb55e0c1253d64c *R/numeric-helpers.R 62edba889c324fe1d6991f2ee4ccf6c8 *R/plot.R 97742adeebaa219cd89fddb6539a73cf *R/posterior.R d8263fc0717b64053a6f1969341dbcef *R/posterior_epred.R 35bdc09399cbbb6a5c5ca19a3bf1f27f *R/posterior_predict.R d440bca90ad433492cabc870135ea8cc *R/posterior_samples.R 3346bb6b9b80c3240ebb1539ada8b95c *R/posterior_smooths.R b127515afdc1cefdf577738d8f8d12f1 *R/pp_check.R 62ee3cd86c9c7bc84e2c7522951715bf *R/pp_mixture.R bd404eda215fefa87f3145c524a25c82 *R/predictive_error.R e103afcd1c19a7e6ccffa03acebd05bd *R/predictor.R 08308fe7302e691dfa678b115b2fb5c1 *R/prepare_predictions.R 5aa300349b0a542bce1aff6d55f9929c *R/prior_draws.R 4091bd9aa821578e7bc8175396338d96 *R/priors.R 5b6147b8c8b0ba75ad961810cdc7149d *R/projpred.R 28b902dafaf42771f633d3aabeb6318b *R/reloo.R 08fac48bd70f8e24fd4dc672d72ebdd9 *R/rename_pars.R a557a8ac1ca3fae507c31cdde6534c94 *R/restructure.R 4bb1354729a341a41083baed5a52508c *R/stan-helpers.R 7b6e025f8980b9cbe5fff3046838a994 *R/stan-likelihood.R ca13c5aea5e2a042a03263efbe3f6187 *R/stan-predictor.R e63b8952e5dc3031a50d82e7609b85a9 *R/stan-prior.R 0cbc45c354a119da1d276413985d68e4 *R/stan-response.R 6c49b5a6a1ec61143837b039f6792ff0 *R/stanvars.R ef2cbc548463738596c01257a9d8de56 *R/summary.R 29d77be94d28b5e1af5dc271b6b42352 *R/sysdata.rda 46f2b9ecf901307008c217bee2f2c88b *R/update.R 72b4fff3d0529ec33b90921c73c8ba4c *R/zzz.R 840450a90d1819e3064892ed59ce4449 *README.md 09b38eaa01414ce2c54d6227da53574b *build/vignette.rds 29545093bb6edb0416e8ca2725949829 *data/epilepsy.rda d3e1729f040540ec7d640ce9d9e1c7c4 *data/inhaler.rda b491f9e27065b6a30dfaa06835d4058f *data/kidney.rda 1012588a05c5b7e59dfb16bc57adcf85 *data/loss.rda 2024133c73ebe6ad6887ac0f58235b4d *inst/CITATION ea378d56df10b3d9272d2dca8262e8fb *inst/chunks/fun_asym_laplace.stan f2521f333d3275f362f4faf53cf139c4 *inst/chunks/fun_cauchit.stan fd90371e171982b87e128fcf8113266e *inst/chunks/fun_cholesky_cor_ar1.stan b8cdce7d7bdad258e873ab5cc8cc24d4 *inst/chunks/fun_cholesky_cor_arma1.stan 356998171806c24364357c7fa83bc4f3 *inst/chunks/fun_cholesky_cor_cosy.stan 1a86c51d5048b4cb4f280f041e28c7c6 *inst/chunks/fun_cholesky_cor_ma1.stan a30d2ab735cbaa4a548853443afc2d02 *inst/chunks/fun_cloglog.stan deb4338fee0c1d931b368cee1250e9c4 *inst/chunks/fun_com_poisson.stan 9cb57f90a4bb61d8ee746f518e2ae9ef *inst/chunks/fun_cox.stan d7f5ae53cc6aac2bcd8747f56692daae *inst/chunks/fun_dirichlet_logit.stan 77bad66bf658c6d78b16f3b74284a9c7 *inst/chunks/fun_discrete_weibull.stan 737cf8920b1cc73a6e3f5e1784a4c719 *inst/chunks/fun_gaussian_process.stan b9906a838d8cc37dad6482a8d92222e9 *inst/chunks/fun_gaussian_process_approx.stan 90bf3da8c32ea57699d7bd87ed025da8 *inst/chunks/fun_gen_extreme_value.stan ce292cbc3e1d372d58c21a0626bed6da *inst/chunks/fun_horseshoe.stan 6f382f8dd397b8a4a0aae8085753ffbc *inst/chunks/fun_hurdle_gamma.stan 32a82f3e36792d38d9f3b6aa58b14dbd *inst/chunks/fun_hurdle_lognormal.stan 22cb3b99c9a7aa3b39756cd2575635fb *inst/chunks/fun_hurdle_negbinomial.stan a753bb5e1ed481d31dfb5a72a27c937d *inst/chunks/fun_hurdle_poisson.stan 6fc37af292edf6efcdf13f5686dc1e18 *inst/chunks/fun_inv_gaussian.stan a3157d0fcbcfb824bbb8b8254eefd4f1 *inst/chunks/fun_is_equal.stan 00aa1a161641dc9210d78322c8d900ef *inst/chunks/fun_logistic_normal.stan e62c82e93ce1290ebed84ba2e2f93e48 *inst/chunks/fun_logm1.stan 2f8ebeeae1b8524a0f336c7bd7951fdd *inst/chunks/fun_monotonic.stan 4b3a37209ccaf984ee6eb865babe87b1 *inst/chunks/fun_multinomial_logit.stan 6a4e61936b11e2d17d4c2b1a1fcd3502 *inst/chunks/fun_normal_errorsar.stan 7da89af6f64975a537de8f5c77d307e2 *inst/chunks/fun_normal_fcor.stan abd3602bbdca2185ec2f338d4c284ae0 *inst/chunks/fun_normal_lagsar.stan 7ab49277c42ee6eaf32247e72b89a163 *inst/chunks/fun_normal_time.stan 6a90bdd74f4d76861fb7a5a3e69e57b0 *inst/chunks/fun_normal_time_se.stan f4429d472004f71c4a71cba4f1c631bb *inst/chunks/fun_r2d2.stan 0bc8ab0d7b8523aa844e1afbb8e9259f *inst/chunks/fun_scale_r_cor.stan 303ec48df785bfd1d452cad8eb558c08 *inst/chunks/fun_scale_r_cor_by.stan 89efdf967914bf7f96759cc706a6471a *inst/chunks/fun_scale_r_cor_by_cov.stan 73c259890366f5d8ae4268c41114a80a *inst/chunks/fun_scale_r_cor_cov.stan b74b2047ad34ad3338f482d2e7e35344 *inst/chunks/fun_scale_time_err.stan 3984fbf915968ea965adda307f313964 *inst/chunks/fun_scale_xi.stan 704cb649d6370d7b143ac75c8a0a09de *inst/chunks/fun_sequence.stan 9055b7ed64fb3c800445880c6b7c1827 *inst/chunks/fun_softit.stan dea555e32d108b81f7b1b9ec6f2df2cd *inst/chunks/fun_softplus.stan dcdf6b009a117d4b65f8c0c231decbe9 *inst/chunks/fun_sparse_car_lpdf.stan b3c1b668aafb089e95b105c4825406f2 *inst/chunks/fun_sparse_icar_lpdf.stan 53896ce7ec5d9f389f6a1625b6a336a6 *inst/chunks/fun_squareplus.stan 7555bfbc11d4a00d0b107c3cc7058563 *inst/chunks/fun_stack_vectors.stan 21f75d7dcbc7ec643344bde03b071057 *inst/chunks/fun_student_t_errorsar.stan 7ecdcc3ef15aac75059696c3092159b9 *inst/chunks/fun_student_t_fcor.stan 17be1585e0d2a3da3db0f5b186dd0e57 *inst/chunks/fun_student_t_lagsar.stan 89a340d935ec389a47b2c294bd45be61 *inst/chunks/fun_student_t_time.stan 5c904c30462b103360ebc045bbcf58cc *inst/chunks/fun_student_t_time_se.stan 5fadf85cea68af8c71493b19b244309b *inst/chunks/fun_tan_half.stan 59b5e7f52ac92c6cf06de27f9d09b0c5 *inst/chunks/fun_von_mises.stan 05dd663dda6797c2ec25e2d67a39004d *inst/chunks/fun_which_range.stan 6e5b83378b4701b2c6fb3efd326f95ee *inst/chunks/fun_wiener_diffusion.stan 6a85f96ae29a539743865b155c8393ea *inst/chunks/fun_zero_inflated_asym_laplace.stan e76db5963e8b6358822005b7b3ffe57e *inst/chunks/fun_zero_inflated_beta.stan 701b8376108cde636fd66906f9ce6f17 *inst/chunks/fun_zero_inflated_beta_binomial.stan 007ca9d21e1e8ace60ac061af77ed100 *inst/chunks/fun_zero_inflated_binomial.stan 6f99414a24b70e038b595f3affc7584f *inst/chunks/fun_zero_inflated_negbinomial.stan 078cb83d3122b01e310aa6c7272e1db3 *inst/chunks/fun_zero_inflated_poisson.stan c2055bf9157eb32470ead3264ba79d91 *inst/chunks/fun_zero_one_inflated_beta.stan 73b796f222b65966a41e50d6e08c5483 *inst/doc/brms_customfamilies.R b044a924fbded90c6ca9da8bc01f85cd *inst/doc/brms_customfamilies.Rmd 5330f2476097fb1ec3c727460e03912b *inst/doc/brms_customfamilies.html 05e53c1b4763120cd27f208a820aa457 *inst/doc/brms_distreg.R 3a1e89b91c2b94282c6df6607f405a89 *inst/doc/brms_distreg.Rmd 54087f3d644b61251581bb5aeeeb1512 *inst/doc/brms_distreg.html 8fc9924120fd5301b479e2eebb8c2897 *inst/doc/brms_families.Rmd 541e8b4741923dc0beb6b88c31aab363 *inst/doc/brms_families.html 7bc766f8f78ca7a4b1206fc52b796a03 *inst/doc/brms_missings.R cdf2b42e1cf561cdf4f1f1f7c06d8148 *inst/doc/brms_missings.Rmd 2ce44f1f124f4ca0988300387a9e8a2b *inst/doc/brms_missings.html 975fbf30dc344a58912d2ba55fb31dd7 *inst/doc/brms_monotonic.R ee62fe59cf57cd22be35969e4c27dc7b *inst/doc/brms_monotonic.Rmd e2f7fb5a549f3ab954551dcb80251ceb *inst/doc/brms_monotonic.html f7cece21fca8fbaaa53a106038349d0c *inst/doc/brms_multilevel.ltx 4faf1524b34dfb59bfe8dbeafe7a4acc *inst/doc/brms_multilevel.pdf a7e2942d137e03618604f6815207a6c0 *inst/doc/brms_multivariate.R 7567746ed5926d36df405b3a22f01eef *inst/doc/brms_multivariate.Rmd 5d2c2288d920e13988f0478b311125b7 *inst/doc/brms_multivariate.html 3ac17800c6a2f929f1c8689434b57234 *inst/doc/brms_nonlinear.R 1d4ca841f24e6d41d803ea56afbdbbae *inst/doc/brms_nonlinear.Rmd a7acefc722f4de39fe55c46ddcd059b7 *inst/doc/brms_nonlinear.html 5ae728c35b6cd8d27e69cf0a119e646f *inst/doc/brms_overview.ltx af5d4cdddad8d61c7dea9704b9e256ac *inst/doc/brms_overview.pdf 8e540a5fba57119659a394690dcdf749 *inst/doc/brms_phylogenetics.R 85bd37fc5196318ee06e034c67a50c9a *inst/doc/brms_phylogenetics.Rmd 1acb1cde3939675cde5f915386ab8d04 *inst/doc/brms_phylogenetics.html 2024dafcddff989348dd66f408914d93 *inst/doc/brms_threading.R c123e93d353853ba9f95fc6ea2a77a35 *inst/doc/brms_threading.Rmd 4ebf85402149c1e70ef2412cf0ea69d4 *inst/doc/brms_threading.html f819a4d09188807bbb5fdda05405e6e1 *man/AsymLaplace.Rd 5213ddb4c75aedec7603e2fdea1af660 *man/BetaBinomial.Rd 14c9f1fc298dcbd26f807f24e07550db *man/Dirichlet.Rd 97e447e090056a382da7393a794dc68d *man/ExGaussian.Rd 348c7a3e4b2acf7822be61a7b9ace214 *man/Frechet.Rd 7b52f19a8bb07059496ab6b2b3aab6ef *man/GenExtremeValue.Rd 35c9075a7ca69042980395bfd9e02bc5 *man/Hurdle.Rd 7b4a4ebd29218e15291e36d3f5c46f9b *man/InvGaussian.Rd 3e30acb0a73351ea6c1b985e94028f42 *man/LogisticNormal.Rd 1ffc2bc2c24125e8d7e481fb9a6007b0 *man/MultiNormal.Rd add9404002291d1d5f16391e65941d1d *man/MultiStudentT.Rd 6416de3d84c529b87aea16a6873f7761 *man/R2D2.Rd 8bf7659c384cabee6cdd45e0616995db *man/Shifted_Lognormal.Rd 8ae449627b649dddccafb402636dd9d6 *man/SkewNormal.Rd 70ba47e2aa88bf4f6138afe10afc6ede *man/StudentT.Rd 6685626d29ded45092d866eab6dbc9c7 *man/VarCorr.brmsfit.Rd b3629fa3b04924241d79932524cc55e4 *man/VonMises.Rd 930e9213115e835e1ecb1321258c4308 *man/Wiener.Rd 2f1fdd6851666b81b68f2896f15c3c0b *man/ZeroInflated.Rd 950b24967deccd1d87d011cbdaba50fd *man/add_criterion.Rd 2551b940b4b7637a537f26bed7718db8 *man/add_ic.Rd 8ead7876a361cce15b28222a5dc69629 *man/add_rstan_model.Rd 6daa00533c3c23250fbad22cd90d1a15 *man/addition-terms.Rd d1694011ffb4be502a30de3bfa1d5709 *man/ar.Rd e48c1cebb35608b8aa990d9aaedcefab *man/arma.Rd 82b55dbf7026b27fe7b97bf7ce022b5a *man/as.brmsprior.Rd 1a5ec35ec370b88d90d312f2ee2894b9 *man/as.data.frame.brmsfit.Rd 0bebf3936e927a156ffba64b82863f98 *man/as.mcmc.brmsfit.Rd a8a13c885a4f26bb5badf8b43daf36c0 *man/autocor-terms.Rd 8b6e6789c093ecd1badcf2b7db00459a *man/autocor.brmsfit.Rd 7d69950d7307c6406ee93d57d49df47d *man/bayes_R2.brmsfit.Rd c3e6672f4201e9b6b20308c48225c31d *man/bayes_factor.brmsfit.Rd c4a446cf8e2dffd057691878aa56d500 *man/bridge_sampler.brmsfit.Rd 2b98a0653def2d8e02e19508e1e7b159 *man/brm.Rd 08784a1932e936a90e4c64d27d97fac8 *man/brm_multiple.Rd 1b54ac4c4eef3d7603ab2000c1659b09 *man/brms-package.Rd 64691a2ccdd67ba0f618883464b95bd2 *man/brmsfamily.Rd 4e9f4719274a063ac2eb9b077bc49993 *man/brmsfit-class.Rd 3c66fbab1383d6b1a65ea8962a626220 *man/brmsfit_needs_refit.Rd 1036924e7dc17991d0a52a31458486e4 *man/brmsformula-helpers.Rd 697e4fc27b7862acd54c4c1a67094337 *man/brmsformula.Rd 93957b9d8e53a7f24bb2c216c86a9ebf *man/brmshypothesis.Rd c2b51e956debe84df87304a1d8ddccaa *man/brmsterms.Rd fe96ed9e31d6050d2d5fc3ce16a1a1cc *man/car.Rd ebcaf612c03c7e8e49795504239101b6 *man/coef.brmsfit.Rd 585718ec6123ceb262c5a46f5c2033b3 *man/combine_models.Rd 2d4529ffb04ae9b7b2cf32688d65667a *man/compare_ic.Rd a17e3f13c0466cfa845776fe1973f44a *man/conditional_effects.brmsfit.Rd 94137c497cb22d22b30812e31fb1f343 *man/conditional_smooths.brmsfit.Rd 75d9f30b935f0e99a41d01e4bf0f9b4e *man/control_params.Rd 4aa53d8353dffc501c9bf55b85e0432f *man/cor_ar.Rd cecad3804f335aea8259e3dc4303647b *man/cor_arma.Rd 7f439f409b1aa8e77f01687959e46393 *man/cor_arr.Rd 8f8b4ff277782a5cce802a93a0099784 *man/cor_brms.Rd 46dc58cfcc1ebebd9bc1b972cfeb4352 *man/cor_bsts.Rd ab427b3414aefcf08e9ab1bb23e9092c *man/cor_car.Rd c0de434e09365e73bc60be48b9020269 *man/cor_cosy.Rd 0c8390364d274f1240881f8db6bf4efe *man/cor_fixed.Rd b830c26475a2c54fbdeaa1db9e14b98b *man/cor_ma.Rd 713028020db23317a00dcbd763eeef6c *man/cor_sar.Rd 9d3a1304f56bd940335c3a2ff440f684 *man/cosy.Rd 285a185d228d276351aa993c5d518dee *man/cs.Rd 95ee9e173941ae06fbdf66b1cb820bce *man/custom_family.Rd 7d03bee9e6784550dd14c86517f88048 *man/data_predictor.Rd ce7eda06e9eefc17ea320ef7f1448c2a *man/data_response.Rd 04b4aae9b081455668a9ecbe6ff7b8a1 *man/density_ratio.Rd 1fa223725ad6bf8d81b68686cacc2198 *man/diagnostic-quantities.Rd e589de81ddbec620fa58e1501bc887c0 *man/do_call.Rd 0acbb82e6f9b540146069d2cf844713a *man/draws-brms.Rd 675ee1a3277bd1a7367047e66a21846c *man/draws-index-brms.Rd 14f1e9a3ae3e4c32c254a446847b1ab8 *man/emmeans-brms-helpers.Rd 640e1713fbc56c4761a3fc9ed1a40d57 *man/epilepsy.Rd 0474edf753d3dc04367b0fce35b851e5 *man/expose_functions.brmsfit.Rd ef2c51106154f8ec1eed261051f21a63 *man/expp1.Rd ebbceb3446d178098f9cb6d582ae65b5 *man/family.brmsfit.Rd e80efced805aedd25858d661a85b4959 *man/fcor.Rd fd6256ab632b8707d49d225666c9baf1 *man/figures/README-conditional_effects-1.png e38f3cdcc27f25047e75cb0544b46af0 *man/figures/README-plot-1.png b92756dfff9f51be038ca8acc5e2a8e9 *man/figures/brms.png 5fad10a5cc62c59ea429a5ce90191e2e *man/figures/stanlogo.png d0a1a12ff0c499035d2fadc665e38a5d *man/fitted.brmsfit.Rd 655490ff4efde340116660f0781d9350 *man/fixef.brmsfit.Rd cbb1a56c5f50bc677ef49e3b4efca504 *man/get_dpar.Rd 501776e4a5843fdeb7499c87ad36a0cf *man/get_prior.Rd 09740c97054d320ebea93ad8e14dfee0 *man/get_refmodel.brmsfit.Rd d76887acafebd4415eb1afb776d84224 *man/get_y.Rd 63de21a08fc9ce3e6320a0b54d1d1230 *man/gp.Rd 551a0a9608e2da325d7bfb528c185475 *man/gr.Rd 2ca58f139c3077436f01da902c82054d *man/horseshoe.Rd 17a509e86162fd0eee2b79098c44a3bf *man/hypothesis.brmsfit.Rd b35515077061d0109567561d704fe0e0 *man/inhaler.Rd 91bc090feda4bd1d3905237cb210afc0 *man/inv_logit_scaled.Rd d3887f794ca279d6e91f78d359488413 *man/is.brmsfit.Rd 4bfcffa8ee62d0ba281e00ac75c44c62 *man/is.brmsfit_multiple.Rd d345caf2b9ad7295e3c8b3c7550099b9 *man/is.brmsformula.Rd 2495abf33e51dd1c7b777be17639e83b *man/is.brmsprior.Rd b4e9ae0fe2f7e053481f5baec6c169f8 *man/is.brmsterms.Rd 719230daa3fa48becfd07b2abd132270 *man/is.cor_brms.Rd a8d15115fddf4462460bee22230c7aa1 *man/is.mvbrmsformula.Rd 9a9129afc0fa575f47184739243bb79d *man/is.mvbrmsterms.Rd c7b47358ab7759e10ffed3e9fa5896da *man/kfold.brmsfit.Rd 68aebab51d29b7c6a28025d602665656 *man/kfold_predict.Rd 3da1d29a87963c216a1c6e7a03062f41 *man/kidney.Rd 7f439a55f6cf09c82c5ee2384d971814 *man/lasso.Rd ccfb164d7b2242b8b6a77747e9f8c04a *man/launch_shinystan.brmsfit.Rd 62e6869c08bf324141f3f5ce86fc496f *man/log_lik.brmsfit.Rd 900ea73d5b892e4fb1436ca014dfcb16 *man/logit_scaled.Rd 1e4ddd51ad3a4561cb500f85ad5f2e0a *man/logm1.Rd d7688327753660acb96bdad83f973985 *man/loo.brmsfit.Rd 7d6aeee8042b66b5eccb0d67d4231394 *man/loo_R2.brmsfit.Rd 57a07a3eaada9ab2c256a44d27ffec5d *man/loo_compare.brmsfit.Rd f29597640d7d655a681115ab42f02789 *man/loo_model_weights.brmsfit.Rd b8fd7183f4c9cac71829abe73860498c *man/loo_moment_match.brmsfit.Rd 3e9d22039f442d0f207b03cf1372c4b6 *man/loo_predict.brmsfit.Rd e0d53fb404be8407bf1a5ec23410af8d *man/loo_subsample.brmsfit.Rd 7fa05a35f23a32ff377f16d4376fee7a *man/loss.Rd 9ae67551d15eb035058fe01f8fd551f4 *man/ma.Rd 67442f213bfd5a21dc2d7ef560ff618e *man/make_conditions.Rd 12ff26a954b6aeec040e8ad66c43ecf5 *man/make_stancode.Rd f15be36a5b476ba2d7180fbcde6b08e0 *man/make_standata.Rd 699f7d9796dc61fcb6ba5a7fcfe8f03b *man/mcmc_plot.brmsfit.Rd 8a4b6431285accd9445532bc466b216f *man/me.Rd 059d1149efd9185938a5da4f54af27c9 *man/mi.Rd 1870ea75d4b13e11c6d60aea1d554382 *man/mixture.Rd 35ea0579c8e2ce93aba57e7d33085bed *man/mm.Rd c8f23b1448b4d3fddc30f12c3c6747af *man/mmc.Rd 67fdada3b82bf0de3502920ff501f251 *man/mo.Rd bc4a98eb125d89c537ba607c0831f921 *man/model_weights.brmsfit.Rd 41d271b33d265ac55dce75c385d429ca *man/mvbind.Rd d9faea0f79c1ed4b2107cedef3c2aea5 *man/mvbrmsformula.Rd c12860b45008dfc4a57a27111003d8f5 *man/ngrps.brmsfit.Rd f736c567c641266af3de81ac00769748 *man/nsamples.brmsfit.Rd 5eadb0ff319ed12d02d1d8af4d2ad78e *man/opencl.Rd ab92f50152366dab3e9dfd8f9f65e2a4 *man/pairs.brmsfit.Rd 3c30943f7c3617d6b30253272079cecf *man/parnames.Rd 818eaed07ea2ce3cba5a9144c745bdb5 *man/plot.brmsfit.Rd fdf888004fff752c2772e53d818a7d3d *man/post_prob.brmsfit.Rd 6979e03753e7fa14c452ee4b075c5d5d *man/posterior_average.brmsfit.Rd a6f3954c775e77a519e671f4cc254848 *man/posterior_epred.brmsfit.Rd 19a86630a8ccb9f149f796922bb4fb84 *man/posterior_interval.brmsfit.Rd 1757a38e3bc680130da3fb96742af71a *man/posterior_linpred.brmsfit.Rd 9720106055b89341c65a33b5f47278b2 *man/posterior_predict.brmsfit.Rd 1a2ada66c95792eb04d29443968a4d00 *man/posterior_samples.brmsfit.Rd cde301543987b3569dce77d3691b128c *man/posterior_smooths.brmsfit.Rd 8b15c4487188671d4819d06f1acdc827 *man/posterior_summary.Rd 95c2007d202944bdb2946fef3b64f14f *man/posterior_table.Rd 8b07cf4bbf3eaecdec266f5a200ebb4a *man/pp_average.brmsfit.Rd ad3cea79773407539a101427046c48ed *man/pp_check.brmsfit.Rd b80fe2c679ba40e666af3da4cd22e3d1 *man/pp_mixture.brmsfit.Rd cad0a460283bd612dde7d923d8d859c0 *man/predict.brmsfit.Rd c4bb1c7846bd0620853d545d76aaea40 *man/predictive_error.brmsfit.Rd 9fb4d1382e914f9ff5a306f783091c4b *man/predictive_interval.brmsfit.Rd c921d88d912db1cb8b797405b5424b8d *man/prepare_predictions.Rd 6a101409630ca8947aeba54e0dd89551 *man/print.brmsfit.Rd e8009c6186fb93c8d89682c4dd34c1cb *man/print.brmsprior.Rd 924af1c1af0be14eaf84860a048b3a8c *man/prior_draws.brmsfit.Rd 49d839532a1d0da147db7b992946a854 *man/prior_summary.brmsfit.Rd f24e2e8f9ac0bac9fb6e51bd2dd66a55 *man/ranef.brmsfit.Rd 2d049bf19ee1db3b1e00b044c41f3e3d *man/recompile_model.Rd 5268361e209ced758eba19451b12c228 *man/reloo.brmsfit.Rd 12020b6fce6d091457ca0d133a2d720f *man/rename_pars.Rd 943743c6e7baef3da436fc16d8811c58 *man/residuals.brmsfit.Rd 4b631d46883284d89060cca9e30cbf47 *man/restructure.Rd 4f7f207825b3f9a5f951f9cc02d9bc65 *man/rows2labels.Rd 952bcce8ddb9faa6516e7c2b70e22c29 *man/s.Rd 36766e1568e020be0eedcd04c795f0c9 *man/sar.Rd 93c434f2f48ec1a9397139fff5444473 *man/save_pars.Rd bc6a16d731b55fdba511ffd181b26c95 *man/set_prior.Rd 5b9bc103dd04679d89fe9a3a7ea017c4 *man/stancode.brmsfit.Rd a6bc6ed748ddf011083b3656f87f8844 *man/standata.brmsfit.Rd d32e81cdca926de517ba324f48020e4b *man/stanvar.Rd 609139f5fa9d220f297ec9aac9675a05 *man/summary.brmsfit.Rd 6ef704004a7c7719b0806d1cd47f3736 *man/theme_black.Rd 6f15836eefa722613d11ae2a26d498b2 *man/theme_default.Rd 9ccc493e1ffc4cb8233711028629ef7a *man/threading.Rd 7093aec94cb5402d9bdcbd0aa84b8496 *man/unstr.Rd 7a73dcfd9ad17ab4aa446ed5930e5640 *man/update.brmsfit.Rd 3776192391910c4f222a6ff404067a83 *man/update.brmsfit_multiple.Rd d4329014c6586f1d939c80df9105286d *man/update_adterms.Rd 4b51313d63442b5f22ae5df5dd65c902 *man/validate_newdata.Rd c2478c03fe32ef7fd153e2d3405d2b10 *man/validate_prior.Rd 1174c012f645c3dc8f70ef58fe542671 *man/vcov.brmsfit.Rd dee69387c0a4ef07a8953e4980649c80 *man/waic.brmsfit.Rd eb94c0cef2e4c20ce9610bd1cc3661b6 *tests/testthat.R 7d17ab2ab674f8c2c73fe7183a2a47e4 *tests/testthat/helpers/insert_refcat_ch.R 5ec846c86afc79b242210452eb4e1b00 *tests/testthat/helpers/inv_link_categorical_ch.R 919d639446d3c2ab168cbdcf3bb4336d *tests/testthat/helpers/inv_link_ordinal_ch.R 771dcf586afefa69ae5c82a1c867e845 *tests/testthat/helpers/link_categorical_ch.R 55eff9dc736befdf5b7569a5b0bdf9f1 *tests/testthat/helpers/link_ordinal_ch.R 34a79884fed445b69b7fcd9e4166e531 *tests/testthat/helpers/simopts_catlike.R 83cf80ac0b464e6217fabba119a182c5 *tests/testthat/helpers/simopts_catlike_oneobs.R 984b086239e1ec078f2d326b50d75ff4 *tests/testthat/tests.brm.R 9ca8a7841717c461c28f247391e8af7e *tests/testthat/tests.brmsfit-helpers.R 9596d5813173fc6daf1f61b757cbb79c *tests/testthat/tests.brmsfit-methods.R 218087f1991e81f0f9696364227e3dd6 *tests/testthat/tests.brmsformula.R 147d519778a7cd17bdbe0d365c9ea20a *tests/testthat/tests.brmsterms.R 0701b29bcf35b3dc573c0e48e87762fe *tests/testthat/tests.data-helpers.R 8f9b40ffa35b881cf466b934006f1c62 *tests/testthat/tests.distributions.R ed2a592a2a4d6cfaf359b86ddde34252 *tests/testthat/tests.emmeans.R 65451b49b0aeda03d07d49ebba424295 *tests/testthat/tests.exclude_pars.R ab04c140db8801b3c4119c06f9f416d6 *tests/testthat/tests.families.R c0f0e2f4c4fd1094e94b1e08cf71dfc2 *tests/testthat/tests.log_lik.R 9cb924905bbdf097bb0205a2b2fa26f1 *tests/testthat/tests.make_stancode.R 963482e15380179ec28a7fa8c3a665d3 *tests/testthat/tests.make_standata.R 9e74f38ff67944eae6927f0147973509 *tests/testthat/tests.misc.R 1f9eab3e51b82733cdc38e06ef801bcb *tests/testthat/tests.posterior_epred.R 7099551942ba414c49848beaf0819a47 *tests/testthat/tests.posterior_predict.R 86ee3df7ea451e221f71f21a50d493c8 *tests/testthat/tests.priors.R cb138a6b597a890d5d8224e104c77a7c *tests/testthat/tests.read_csv_as_stanfit.R 217e1ec6d85a8964e69621bcac0369c8 *tests/testthat/tests.rename_pars.R dd26cd3d288495e0396b1912d46065fc *tests/testthat/tests.restructure.R 65f6180b5a6026b675b6ac1065e49713 *tests/testthat/tests.stan_functions.R b044a924fbded90c6ca9da8bc01f85cd *vignettes/brms_customfamilies.Rmd 3a1e89b91c2b94282c6df6607f405a89 *vignettes/brms_distreg.Rmd 8fc9924120fd5301b479e2eebb8c2897 *vignettes/brms_families.Rmd cdf2b42e1cf561cdf4f1f1f7c06d8148 *vignettes/brms_missings.Rmd ee62fe59cf57cd22be35969e4c27dc7b *vignettes/brms_monotonic.Rmd f7cece21fca8fbaaa53a106038349d0c *vignettes/brms_multilevel.ltx 7567746ed5926d36df405b3a22f01eef *vignettes/brms_multivariate.Rmd 1d4ca841f24e6d41d803ea56afbdbbae *vignettes/brms_nonlinear.Rmd 5ae728c35b6cd8d27e69cf0a119e646f *vignettes/brms_overview.ltx 85bd37fc5196318ee06e034c67a50c9a *vignettes/brms_phylogenetics.Rmd c123e93d353853ba9f95fc6ea2a77a35 *vignettes/brms_threading.Rmd 8e122a174183d81956fefd5f7d9a2b9b *vignettes/citations_multilevel.bib 6ba1d5ec8ecc1031d8845d82dcef11da *vignettes/citations_overview.bib 1e02697a37e36908b7d8954bfaea2e92 *vignettes/flowchart.pdf 598082534ce6cb51d34c01a69dda5088 *vignettes/inhaler_plot.pdf d7d237f55a6850eba15ad5ceeaf821f6 *vignettes/kidney_conditional_effects.pdf 7632f1034a93aa91cd5d27f3430419f7 *vignettes/kidney_plot.pdf 130d165d8715c0e39e51dac5a843d50a *vignettes/me_loss1.pdf 2c51e8bc0ba3986d8e445b445943473c *vignettes/me_loss1_year.pdf 70c11e0b4eb944016ef306a402fce2c4 *vignettes/me_rent1.pdf beff1ce999b4bd7244ecbe2b6e887c9a *vignettes/me_rent2.pdf 8d6a4a639492d0ac1e71bbf25b93fa03 *vignettes/me_rent3.pdf 5b56487f6dc0b92bfe7894ba09264971 *vignettes/me_zinb1.pdf 1fe96ffc00b75a46155b60f534625f43 *vignettes/ppc_mm1.pdf brms/inst/0000755000176200001440000000000014504270214012171 5ustar liggesusersbrms/inst/doc/0000755000176200001440000000000014504270212012734 5ustar liggesusersbrms/inst/doc/brms_nonlinear.R0000644000176200001440000001032414504267375016107 0ustar liggesusersparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- b <- c(2, 0.75) x <- rnorm(100) y <- rnorm(100, mean = b[1] * exp(b[2] * x)) dat1 <- data.frame(x, y) ## ---- results='hide'-------------------------------------------------------------------- prior1 <- prior(normal(1, 2), nlpar = "b1") + prior(normal(0, 2), nlpar = "b2") fit1 <- brm(bf(y ~ b1 * exp(b2 * x), b1 + b2 ~ 1, nl = TRUE), data = dat1, prior = prior1) ## --------------------------------------------------------------------------------------- summary(fit1) plot(fit1) plot(conditional_effects(fit1), points = TRUE) ## ---- results='hide'-------------------------------------------------------------------- fit2 <- brm(y ~ x, data = dat1) ## --------------------------------------------------------------------------------------- summary(fit2) ## --------------------------------------------------------------------------------------- pp_check(fit1) pp_check(fit2) ## --------------------------------------------------------------------------------------- loo(fit1, fit2) ## --------------------------------------------------------------------------------------- data(loss) head(loss) ## ---- results='hide'-------------------------------------------------------------------- fit_loss <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) ## --------------------------------------------------------------------------------------- summary(fit_loss) plot(fit_loss, N = 3, ask = FALSE) conditional_effects(fit_loss) ## --------------------------------------------------------------------------------------- conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_loss <- conditional_effects( fit_loss, conditions = conditions, re_formula = NULL, method = "predict" ) plot(me_loss, ncol = 5, points = TRUE) ## --------------------------------------------------------------------------------------- inv_logit <- function(x) 1 / (1 + exp(-x)) ability <- rnorm(300) p <- 0.33 + 0.67 * inv_logit(ability) answer <- ifelse(runif(300, 0, 1) < p, 1, 0) dat_ir <- data.frame(ability, answer) ## ---- results='hide'-------------------------------------------------------------------- fit_ir1 <- brm(answer ~ ability, data = dat_ir, family = bernoulli()) ## --------------------------------------------------------------------------------------- summary(fit_ir1) plot(conditional_effects(fit_ir1), points = TRUE) ## ---- results='hide'-------------------------------------------------------------------- fit_ir2 <- brm( bf(answer ~ 0.33 + 0.67 * inv_logit(eta), eta ~ ability, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = prior(normal(0, 5), nlpar = "eta") ) ## --------------------------------------------------------------------------------------- summary(fit_ir2) plot(conditional_effects(fit_ir2), points = TRUE) ## --------------------------------------------------------------------------------------- loo(fit_ir1, fit_ir2) ## ---- results='hide'-------------------------------------------------------------------- fit_ir3 <- brm( bf(answer ~ guess + (1 - guess) * inv_logit(eta), eta ~ 0 + ability, guess ~ 1, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = c( prior(normal(0, 5), nlpar = "eta"), prior(beta(1, 1), nlpar = "guess", lb = 0, ub = 1) ) ) ## --------------------------------------------------------------------------------------- summary(fit_ir3) plot(fit_ir3) plot(conditional_effects(fit_ir3), points = TRUE) brms/inst/doc/brms_threading.Rmd0000644000176200001440000005611514464666420016417 0ustar liggesusers--- title: "Running brms models with within-chain parallelization" author: "Sebastian Weber & Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Running brms models with within-chain parallelization} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(ggplot2) library(brms) theme_set(theme_default()) ``` ```{r, fake-data-sim, include=FALSE, eval=TRUE} set.seed(54647) # number of observations N <- 1E4 # number of group levels G <- round(N / 10) # number of predictors P <- 3 # regression coefficients beta <- rnorm(P) # sampled covariates, group means and fake data fake <- matrix(rnorm(N * P), ncol = P) dimnames(fake) <- list(NULL, paste0("x", 1:P)) # fixed effect part and sampled group membership fake <- transform( as.data.frame(fake), theta = fake %*% beta, g = sample.int(G, N, replace=TRUE) ) # add random intercept by group fake <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g") # linear predictor fake <- transform(fake, mu = theta + eta) # sample Poisson data fake <- transform(fake, y = rpois(N, exp(mu))) # shuffle order of data rows to ensure even distribution of computational effort fake <- fake[sample.int(N, N),] # drop not needed row names rownames(fake) <- NULL ``` ```{r, model-poisson, include=FALSE} model_poisson <- brm( y ~ 1 + x1 + x2 + (1 | g), data = fake, family = poisson(), iter = 500, # short sampling to speedup example chains = 2, prior = prior(normal(0,1), class = b) + prior(constant(1), class = sd, group = g), backend = "cmdstanr", threads = threading(4), save_pars = save_pars(all = TRUE) ) ``` ```{r, benchmark, include=FALSE} # Benchmarks given model with cross-product of tuning parameters CPU # cores, grainsize and iterations. Models are run with either static # or non-static scheduler and initial values are set by default to 0 on the # unconstrained scale. Function returns a data-frame with the # cross-product of the tuning parameters and as result column the # respective runtime. benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100, static = FALSE) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) scaling_model <- update( model, refresh = 0, threads = threading(1, grainsize = grainsize[1], static = static), chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark <- function(cores, size, iter) { bench_fit <- update( scaling_model, warmup=0, iter = iter, chains = 1, seed = 1234, init = init, refresh = 0, save_warmup=TRUE, threads = threading(cores, grainsize = size, static = static), inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter) res <- with(cases, mapply(run_benchmark, cores, grainsize, iter)) cbind(cases, as.data.frame(t(res))) } benchmark_reference <- function(model, iter=100, init=0) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) ref_model <- update( model, refresh = 0, threads = NULL, chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark_ref <- function(iter_bench) { bench_fit <- update( ref_model, warmup=0, iter = iter_bench, chains = 1, seed = 1234, init = init, refresh = 0, inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } ref <- sapply(iter, run_benchmark_ref) ref <- cbind(as.data.frame(t(ref)), iter=iter) ref } extract_warmup_info <- function(bfit) { adapt <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n") step_size <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2])) inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]])) list(step_size=step_size, inv_metric=inv_metric) } extract_draw <- function(sims, draw) { lapply(sims, brms:::slice, dim = 1, i = draw, drop = TRUE) } ``` ## Introduction Full Bayesian inference is a computationally very demanding task and often we wish to run our models faster in shorter walltime. With modern computers we nowadays have multiple processors available on a given machine such that the use of running the inference in parallel will shorten the overall walltime. While between-chain parallelization is straightforward by merely launching multiple chains at the same time, the use of within-chain parallelization is more complicated in various ways. This vignette aims to introduce the user to within-chain parallelization with **brms**, since its efficient use depends on various aspects specific to the users model. ## Quick summary Assuming you have a **brms** model which you wish to evaluate faster by using more cores per chain, for example: ```{r, eval=FALSE} fit_serial <- brm( count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), chains = 4, cores = 4, backend = "cmdstanr" ) ``` Then running this model with threading requires `cmdstanr` as backend and you can simply add threading support to an existing model with the `update` mechanism as: ```{r, eval=FALSE} fit_parallel <- update( fit_serial, chains = 2, cores = 2, backend = "cmdstanr", threads = threading(2) ) ``` The example above assumes that 4 cores are available which are best used without within-chain parallelization by running 4 chains in parallel. When using within chain parallelization it is still advisable to use just as many threads *in total* as you have CPU cores. It's thus sensible in this case to reduce the number of chains running in parallel to just 2, but allow each chain to use 2 threads. Obviously this will reduce the number of iterations in the posterior here as we assumed a fixed amount of 4 cores. - Only apply within-chain parallelization to large problems which take more than a few minutes at least to calculate. The `epilepsy` example above is actually too small to gain in speed (just a few seconds per chain on this machine). - Within-chain parallelization is less efficient than between-chain parallelization. So only use within-chain parallelism if more CPUs can be used to run the entire analysis. - Due to details of the model and data-set, speedups with more cores can be very limited. Not every model amends to within-chain parallelization and an empirical evaluation is in some cases advisable. - Enabling threading *usually* slows down any model to some extent and this slowdown must be offset by sufficient cores per chain in order to really gain in execution speed. - Doubling the execution speed with few cores is a lot easier than obtaining larger speedups with even more cores. - Models with computationally expensive likelihoods are easier to parallelize than less expensive likelihoods. For example, the Poisson distribution involves expensive $\log\Gamma$ functions whereas the normal likelihood is very cheap to calculate in comparison. - Models with many parameters (e.g., multilevel models) carry a large overhead when running in parallel. - With a larger overhead of the model, the likelihood must be sufficiently expensive such that the relative computational cost of likelihood to parallelization overhead is favorable. - Avoid using hyper-threading, that is, only use as many threads as you have physical cores available. - Ensure that the data is randomly sorted such that consecutive subsets of the data are roughly of the same computational effort. ## Within-chain parallelization The within-chain parallelization implemented in **brms** is based on the `reduce_sum` facility in Stan. The basic principle that `reduce_sum` uses is to split a large summation into arbitrary smaller partial sums. Due to the commutativity and associativity of the sum operation these smaller partial sums can be evaluated in any order and in parallel from one another. **brms** leverages `reduce_sum` to evaluate the log-likelihood of the model in parallel as for example $$ \begin{aligned} l(y|\theta) &= \sum_{i=1}^N l_i(y_i| \theta) \\ &= \sum_{i=1}^{S_1} l_i(y_i| \theta) + \sum_{i=S_1+1}^N l_i(y_i| \theta). \end{aligned} $$ As a consequence, the within-chain parallelization requires mutually independent log-likelihood terms which restricts its applicability to some degree. Furthermore, the within-chain parallelization is only applicable to the evaluation of the data likelihood while all other parts of the model, for example priors, will remain running serially. Thus, only a partial fraction of the entire Stan model will run in parallel which limits the potential speedup one may obtain. The theoretical speedup for a partially in parallel running program is described by [Amdahl‘s law](https://en.wikipedia.org/wiki/Amdahl%27s_law). For example, with 90% of the computational load running in parallel one can essentially double the execution speed with 2 cores while 8 cores may only speedup the program by at most 5x. How large the computational cost of the log-likelihood is in relation to the entire model is very dependent on the model of the user. In practice, the speedups are even smaller than the theoretical speedups. This is caused by the additional overhead implied by forming multiple smaller sums than just one large one. For example, for each partial sum formed the entire parameter vector $\theta$ has to be copied in memory for Stan to be able to calculate the gradient of the log-likelihood. Hence, with more partial sums, more copying is necessary as opposed to evaluating just one large sum. Whether the additional copying is indeed relevant depends on the computational cost of the log-likelihood of each term and the number of parameters. For a model with a computationally cheap normal log-likelihood, this effect is more important than for a model with a Poisson log-likelihood, and for multilevel models with many parameters more copying is needed than for simpler regression models. It may therefore be necessary to form sufficiently large partial sums to warrant an efficient parallel execution. The size of the partial sums is referred to as the `grainsize`, which is set to a reasonable default value. However, for some models this tuning parameter requires some attention from the user for optimal performance. Finally, it is important to note that by default the exact size and order of the partial sums is not stable as it is adjusted to the load of the system. As a result, exact numerical reproducibility is not guaranteed by default. In order to warrant the same size and order of the partial sums, the `static` option must be used and set to `TRUE`, which uses a deterministic scheduler for the parallel work. ## Example model As a toy demonstration, we use here a multilevel Poisson model. The model is a varying intercept model with $`r N`$ data observation which are grouped into $`r G`$ groups. Each data item has $`r P`$ continuous covariates. The simulation code for the fake data can be found in the appendix and it's first $10$ rows are: ```{r} kable(head(fake, 10), digits = 3) ``` The **brms** model fitting this data is: ```{r, eval=FALSE} <> ``` Here we have fixed the standard deviation of the between-group variation for the intercept to the true value of $1$ as used in the simulation. This is to avoid unfavorable geometry of the problem allowing us to concentrate on computational aspects alone. The Poisson likelihood is a relatively expensive likelihood due to the use of $\log\Gamma$ function as opposed to, for example, a normal likelihood which does is by far less expensive operations. Moreover, this example is chosen in order to demonstrate parallelization overhead implied by a large number of parameters. ## Managing parallelization overhead As discussed above, the key mechanism to run Stan programs with parallelization is to split the large sum over independent log likelihood terms into arbitrary smaller *partial sums*. Creating more *partial sums* allows to increase simultaneous parallel computations in a granular way, but at the same time additional overhead is introduced through the requirement to copy the entire parameter vector for each *partial sum* formed along with further overhead due to splitting up a single large task into multiple smaller ones. By default, **brms** will choose a sensible `grainsize` which defines how large a given *partial sum* will roughly be. The actual chunk size is automatically tuned whenever the default non-static scheduler is used, which is the recommended choice to start with. As noted before, only the static scheduler is giving fully deterministic results since the chunk size and order of partial sums will be the same during sampling. While we expect that the default `grainsize` in **brms** is reasonably good for many models, it can improve performance if one tunes the `grainsize` specifically to a given model and data-set. We suggest to increase successively the number of chunks a given data set is split into with the static scheduler and run this on a single core. This way one can control the number of *partial sum* accurately and monitor the execution time as it increases. These experiments are run with only a single chain and very short iteration numbers as we are not interested in the statistical results, but rather aim to be able to explore the tuning parameter space of the chunk size as quickly as possible. The number of iterations needed to get reliable runtime estimates for a given chunk size will depend on many details and the easiest way to determine this is to run this benchmark with multiple number of iterations. Whenever their results match approximately, then the iteration numbers are sufficient. In order to decrease the variation between runs, we also fix the random seed, initial value and the tuning parameters of the sampler (step size and mass matrix). Below is an example R code demonstrating such a benchmark. The utility function `benchmark_threading` is shown and explained in the appendix. ```{r, chunking-scale, message=FALSE, warning=FALSE, results='hide'} chunking_bench <- transform( data.frame(chunks = 4^(0:3)), grainsize = ceiling(N / chunks) ) iter_test <- c(10, 20, 40) # very short test runs scaling_chunking <- benchmark_threading( model_poisson, cores = 1, grainsize = chunking_bench$grainsize, # test various grainsizes iter = iter_test, static = TRUE # with static partitioner ) # run as reference the model *without* reduce_sum ref <- benchmark_reference(model_poisson, iter_test) # for additional data munging please refer to the appendix ``` ```{r, munge-chunking-scaling, include=FALSE} scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize") single_chunk <- transform( subset(scaling_chunking, chunks == 1), num_leapfrog_single = num_leapfrog, num_leapfrog = NULL, runtime_single = runtime, runtime = NULL, grainsize = NULL, chunks=NULL ) scaling_chunking <- transform( merge(scaling_chunking, single_chunk), slowdown = runtime/runtime_single, iter = factor(iter), runtime_single = NULL ) ref <- transform(ref, iter=factor(iter)) ``` Graphically summarizing the results shows that with more than 8 chunks the overhead is about 10% and increasing further with more chunks. For models without many parameters, no such overhead should be observed. Furthermore, one can see that 25 and 50 iterations give similar results implying that 25 iterations suffice for stable runtime estimates for these (and the following) benchmarks. The overhead of up to 20% in this example with 16 chunks may seem large due to the scaling of the plot. One must not forget that when we start to use more CPU cores, the overhead is easily offset, but it limits the maximal speedup we can get. For example, some 2 units of computation become 2.4 units due to the overhead such that on 2 cores we don't quite double the execution speed, but rather get a 1.6x increase in speed instead of a 2x speedup. Considering in addition the time per leapfrog step of the NUTS sampler shows on an absolute scale similar information as before. The upside of this representation is that we can visualize the slowdown in relation to the program *without* `reduce_sum`. As we can see, the additional overhead due to merely enabling `reduce_sum` is substantial in this example. This is attributed in the specific example to the large number of random effects. ```{r} ggplot(scaling_chunking) + aes(chunks, slowdown, colour = iter, shape = iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.8, 2.5, by=0.1)) + ggtitle("Slowdown with increasing number of chunks") ggplot(scaling_chunking) + aes(chunks, 1E3 * runtime/num_leapfrog, colour = iter, shape=iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.1, 2.0, by=0.1)) + geom_hline(data=ref, aes(yintercept=1E3 * runtime/num_leapfrog, colour=iter), linetype=I(2)) + ggtitle("Time per leapfrog step vs number of chunks", "Dashed line is reference model without reduce_sum") + ylab("Time per leapfrog step [ms]") ``` ## Parallelization speedup In practice, we are often interested in so-called "hard-scaling" properties of the parallelization system. That is, for a fixed problem size we would like to know how much faster we can execute the Stan program with increasing number of threads. As nowadays CPUs usually run with so-called hyper-threading, it is also of interest if this technique is beneficial for Stan programs as well (spoiler alert: it's not useful). As we have seen before, the `grainsize` can have an impact on the performance and is as such a tuning parameter. Below we demonstrate some exemplary R code which runs a benchmark with varying number of CPU cores and varying number of `grainsize`s. ```{r, speedup-scale, message=FALSE, warning=FALSE, results='hide'} num_cpu <- parallel::detectCores(logical = FALSE) num_cpu_logical <- parallel::detectCores(logical = TRUE) grainsize_default <- ceiling(N / (2 * num_cpu)) cores <- c(2^seq(0, floor(log2(num_cpu_logical))), num_cpu, num_cpu_logical) cores <- sort(unique(cores)) grainsize <- c(grainsize_default, grainsize_default/2, grainsize_default/4) grainsize <- round(grainsize) iter_scaling <- 20 scaling_cores <- benchmark_threading( model_poisson, cores = cores, grainsize = grainsize, iter = iter_scaling, static = FALSE ) single_core <- transform( subset(scaling_cores, cores == 1), runtime_single = runtime, num_leapfrog=NULL, runtime=NULL, cores = NULL ) scaling_cores <- transform( merge(scaling_cores, single_core), speedup = runtime_single/runtime, grainsize = factor(grainsize) ) ``` It is important to consider the absolute runtime and the relative speedup vs. running on a single core. The relative speedup can be misleading if the single core runtime is very slow in which case speed gains on more CPUs may look overly good. Considering instead the absolute runtime avoids this problem. After all, we are interested in the shortest walltime we can get rather than any relative speedups. ```{r} ggplot(scaling_cores) + aes(cores, runtime, shape = grainsize, color = grainsize) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_cores$cores) + scale_y_log10(breaks=seq(0.1, 1.4, by=0.1)) + theme(legend.position = c(0.85, 0.8)) + geom_hline(data=subset(ref, iter==iter_scaling), aes(yintercept=runtime), linetype=I(2)) + ggtitle("Runtime with varying number of cores", "Dashed line is reference model without reduce_sum") ggplot(scaling_cores) + aes(cores, speedup, shape = grainsize, color = grainsize) + geom_abline(slope = 1, intercept = 0, linetype = 2) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks=scaling_cores$cores) + scale_y_log10(breaks=scaling_cores$cores) + theme(aspect.ratio = 1) + coord_fixed(xlim = c(1, num_cpu_logical), ylim = c(1, num_cpu_logical)) + ggtitle("Relative speedup vs 1 core") ``` The vertical dotted line marks the physical number of CPU cores on the machine this was run. The horizontal dashed line in the plot with absolute runtime marks the respective runtime of the model *without* `reduce_sum` and the dashed unity line in the plot with the relative speedup marks the theoretical maximal speedup. We can see that there is no further reduction in execution time when increasing the thread count to be greater than the number of physical CPUs. Hence, the use of hyper-threading is not helpful when aiming to maximize the speed of a Stan program. Moreover, the use of threading outperforms the single core runtime only when using more than 4 cores in this example. For this example, the shown `grainsize`s matter on some machines but not on others, so your results may look quite different from what is shown here. The overall speedups may not seem impressive in this case, which is attributed in this case to the large number of parameters relative to the number of observations. However, we can still outperform the single core runtime when using many cores. Though the most important advantage of threading is that with an increasing data set size, the user has the option to use a brute-force approach to balance the increase in walltime needed. ```{r} kable(scaling_cores, digits = 2) ``` For a given Stan model one should usually choose the number of chains and the number of threads per chain to be equal to the number of (physical) cores one wishes to use. Only if different chains of the model have relatively different execution times (which they should not have, but it occurs sometimes in practice), then one may consider the use of hyper-threading. Doing so will share the resources evenly across all chains and whenever the fastest chain finishes, the freed resources can be given to the still running chains. ## Appendix ### Fake data simulation ```{r, eval=FALSE} <> ``` ### Poisson example model ```{r, eval=FALSE} <> ``` ### Threading benchmark function ```{r, eval=FALSE} <> ``` ### Munging of slowdown with chunking data ```{r, eval=FALSE} <> ``` brms/inst/doc/brms_customfamilies.R0000644000176200001440000000652714504266025017147 0ustar liggesusersparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## ----cbpp------------------------------------------------------------------------------- data("cbpp", package = "lme4") head(cbpp) ## ----fit1, results='hide'--------------------------------------------------------------- fit1 <- brm(incidence | trials(size) ~ period + (1|herd), data = cbpp, family = binomial()) ## ----fit1_summary----------------------------------------------------------------------- summary(fit1) ## ----beta_binomial2--------------------------------------------------------------------- beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(0, 0), ub = c(1, NA), type = "int", vars = "vint1[n]" ) ## ----stan_funs-------------------------------------------------------------------------- stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int T) { return beta_binomial_lpmf(y | T, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int T) { return beta_binomial_rng(T, mu * phi, (1 - mu) * phi); } " ## ----stanvars--------------------------------------------------------------------------- stanvars <- stanvar(scode = stan_funs, block = "functions") ## ----fit2, results='hide'--------------------------------------------------------------- fit2 <- brm( incidence | vint(size) ~ period + (1|herd), data = cbpp, family = beta_binomial2, stanvars = stanvars ) ## ----summary_fit2----------------------------------------------------------------------- summary(fit2) ## --------------------------------------------------------------------------------------- expose_functions(fit2, vectorize = TRUE) ## ----log_lik---------------------------------------------------------------------------- log_lik_beta_binomial2 <- function(i, prep) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] y <- prep$data$Y[i] beta_binomial2_lpmf(y, mu, phi, trials) } ## ----loo-------------------------------------------------------------------------------- loo(fit1, fit2) ## ----posterior_predict------------------------------------------------------------------ posterior_predict_beta_binomial2 <- function(i, prep, ...) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] beta_binomial2_rng(mu, phi, trials) } ## ----pp_check--------------------------------------------------------------------------- pp_check(fit2) ## ----posterior_epred-------------------------------------------------------------------- posterior_epred_beta_binomial2 <- function(prep) { mu <- brms::get_dpar(prep, "mu") trials <- prep$data$vint1 trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE) mu * trials } ## ----conditional_effects---------------------------------------------------------------- conditional_effects(fit2, conditions = data.frame(size = 1)) brms/inst/doc/brms_monotonic.Rmd0000644000176200001440000002040014224753353016437 0ustar liggesusers--- title: "Estimating Monotonic Effects with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Monotonic Effects with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette is about monotonic effects, a special way of handling discrete predictors that are on an ordinal or higher scale (Bürkner & Charpentier, in review). A predictor, which we want to model as monotonic (i.e., having a monotonically increasing or decreasing relationship with the response), must either be integer valued or an ordered factor. As opposed to a continuous predictor, predictor categories (or integers) are not assumed to be equidistant with respect to their effect on the response variable. Instead, the distance between adjacent predictor categories (or integers) is estimated from the data and may vary across categories. This is realized by parameterizing as follows: One parameter, $b$, takes care of the direction and size of the effect similar to an ordinary regression parameter. If the monotonic effect is used in a linear model, $b$ can be interpreted as the expected average difference between two adjacent categories of the ordinal predictor. An additional parameter vector, $\zeta$, estimates the normalized distances between consecutive predictor categories which thus defines the shape of the monotonic effect. For a single monotonic predictor, $x$, the linear predictor term of observation $n$ looks as follows: $$\eta_n = b D \sum_{i = 1}^{x_n} \zeta_i$$ The parameter $b$ can take on any real value, while $\zeta$ is a simplex, which means that it satisfies $\zeta_i \in [0,1]$ and $\sum_{i = 1}^D \zeta_i = 1$ with $D$ being the number of elements of $\zeta$. Equivalently, $D$ is the number of categories (or highest integer in the data) minus 1, since we start counting categories from zero to simplify the notation. ## A Simple Monotonic Model A main application of monotonic effects are ordinal predictors that can be modeled this way without falsely treating them either as continuous or as unordered categorical predictors. In Psychology, for instance, this kind of data is omnipresent in the form of Likert scale items, which are often treated as being continuous for convenience without ever testing this assumption. As an example, suppose we are interested in the relationship of yearly income (in $) and life satisfaction measured on an arbitrary scale from 0 to 100. Usually, people are not asked for the exact income. Instead, they are asked to rank themselves in one of certain classes, say: 'below 20k', 'between 20k and 40k', 'between 40k and 100k' and 'above 100k'. We use some simulated data for illustration purposes. ```{r} income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") income <- factor(sample(income_options, 100, TRUE), levels = income_options, ordered = TRUE) mean_ls <- c(30, 60, 70, 75) ls <- mean_ls[income] + rnorm(100, sd = 7) dat <- data.frame(income, ls) ``` We now proceed with analyzing the data modeling `income` as a monotonic effect. ```{r, results='hide'} fit1 <- brm(ls ~ mo(income), data = dat) ``` The summary methods yield ```{r} summary(fit1) plot(fit1, variable = "simo", regex = TRUE) plot(conditional_effects(fit1)) ``` The distributions of the simplex parameter of `income`, as shown in the `plot` method, demonstrate that the largest difference (about 70% of the difference between minimum and maximum category) is between the first two categories. Now, let's compare of monotonic model with two common alternative models. (a) Assume `income` to be continuous: ```{r, results='hide'} dat$income_num <- as.numeric(dat$income) fit2 <- brm(ls ~ income_num, data = dat) ``` ```{r} summary(fit2) ``` or (b) Assume `income` to be an unordered factor: ```{r, results='hide'} contrasts(dat$income) <- contr.treatment(4) fit3 <- brm(ls ~ income, data = dat) ``` ```{r} summary(fit3) ``` We can easily compare the fit of the three models using leave-one-out cross-validation. ```{r} loo(fit1, fit2, fit3) ``` The monotonic model fits better than the continuous model, which is not surprising given that the relationship between `income` and `ls` is non-linear. The monotonic and the unordered factor model have almost identical fit in this example, but this may not be the case for other data sets. ## Setting Prior Distributions In the previous monotonic model, we have implicitly assumed that all differences between adjacent categories were a-priori the same, or formulated correctly, had the same prior distribution. In the following, we want to show how to change this assumption. The canonical prior distribution of a simplex parameter is the Dirichlet distribution, a multivariate generalization of the beta distribution. It is non-zero for all valid simplexes (i.e., $\zeta_i \in [0,1]$ and $\sum_{i = 1}^D \zeta_i = 1$) and zero otherwise. The Dirichlet prior has a single parameter $\alpha$ of the same length as $\zeta$. The higher $\alpha_i$ the higher the a-priori probability of higher values of $\zeta_i$. Suppose that, before looking at the data, we expected that the same amount of additional money matters more for people who generally have less money. This translates into a higher a-priori values of $\zeta_1$ (difference between 'below_20' and '20_to_40') and hence into higher values of $\alpha_1$. We choose $\alpha_1 = 2$ and $\alpha_2 = \alpha_3 = 1$, the latter being the default value of $\alpha$. To fit the model we write: ```{r, results='hide'} prior4 <- prior(dirichlet(c(2, 1, 1)), class = "simo", coef = "moincome1") fit4 <- brm(ls ~ mo(income), data = dat, prior = prior4, sample_prior = TRUE) ``` The `1` at the end of `"moincome1"` may appear strange when first working with monotonic effects. However, it is necessary as one monotonic term may be associated with multiple simplex parameters, if interactions of multiple monotonic variables are included in the model. ```{r} summary(fit4) ``` We have used `sample_prior = TRUE` to also obtain draws from the prior distribution of `simo_moincome1` so that we can visualized it. ```{r} plot(fit4, variable = "prior_simo", regex = TRUE, N = 3) ``` As is visible in the plots, `simo_moincome1[1]` was a-priori on average twice as high as `simo_moincome1[2]` and `simo_moincome1[3]` as a result of setting $\alpha_1$ to 2. ## Modeling interactions of monotonic variables Suppose, we have additionally asked participants for their age. ```{r} dat$age <- rnorm(100, mean = 40, sd = 10) ``` We are not only interested in the main effect of age but also in the interaction of income and age. Interactions with monotonic variables can be specified in the usual way using the `*` operator: ```{r, results='hide'} fit5 <- brm(ls ~ mo(income)*age, data = dat) ``` ```{r} summary(fit5) conditional_effects(fit5, "income:age") ``` ## Modelling Monotonic Group-Level Effects Suppose that the 100 people in our sample data were drawn from 10 different cities; 10 people per city. Thus, we add an identifier for `city` to the data and add some city-related variation to `ls`. ```{r} dat$city <- rep(1:10, each = 10) var_city <- rnorm(10, sd = 10) dat$ls <- dat$ls + var_city[dat$city] ``` With the following code, we fit a multilevel model assuming the intercept and the effect of `income` to vary by city: ```{r, results='hide'} fit6 <- brm(ls ~ mo(income)*age + (mo(income) | city), data = dat) ``` ```{r} summary(fit6) ``` reveals that the effect of `income` varies only little across cities. For the present data, this is not overly surprising given that, in the data simulations, we assumed `income` to have the same effect across cities. ## References Bürkner P. C. & Charpentier, E. (in review). [Monotonic Effects: A Principled Approach for Including Ordinal Predictors in Regression Models](https://psyarxiv.com/9qkhj/). *PsyArXiv preprint*. brms/inst/doc/brms_distreg.html0000644000176200001440000172756414504266211016340 0ustar liggesusers Estimating Distributional Models with brms

Estimating Distributional Models with brms

Paul Bürkner

2023-09-25

Introduction

This vignette provides an introduction on how to fit distributional regression models with brms. We use the term distributional model to refer to a model, in which we can specify predictor terms for all parameters of the assumed response distribution. In the vast majority of regression model implementations, only the location parameter (usually the mean) of the response distribution depends on the predictors and corresponding regression parameters. Other parameters (e.g., scale or shape parameters) are estimated as auxiliary parameters assuming them to be constant across observations. This assumption is so common that most researchers applying regression models are often (in my experience) not aware of the possibility of relaxing it. This is understandable insofar as relaxing this assumption drastically increase model complexity and thus makes models hard to fit. Fortunately, brms uses Stan on the backend, which is an incredibly flexible and powerful tool for estimating Bayesian models so that model complexity is much less of an issue.

Suppose we have a normally distributed response variable. Then, in basic linear regression, we specify a predictor term \(\eta_{\mu}\) for the mean parameter \(\mu\) of the normal distribution. The second parameter of the normal distribution – the residual standard deviation \(\sigma\) – is assumed to be constant across observations. We estimate \(\sigma\) but do not try to predict it. In a distributional model, however, we do exactly this by specifying a predictor term \(\eta_{\sigma}\) for \(\sigma\) in addition to the predictor term \(\eta_{\mu}\). Ignoring group-level effects for the moment, the linear predictor of a parameter \(\theta\) for observation \(n\) has the form

\[\eta_{\theta n} = \sum_{i = 1}^{K_{\theta}} b_{\theta i} x_{\theta i n}\] where \(x_{\theta i n}\) denotes the value of the \(i\)th predictor of parameter \(\theta\) for observation \(n\) and \(b_{\theta i}\) is the \(i\)th regression coefficient of parameter \(\theta\). A distributional normal model with response variable \(y\) can then be written as

\[y_n \sim \mathcal{N}\left(\eta_{\mu n}, \, \exp(\eta_{\sigma n}) \right)\] We used the exponential function around \(\eta_{\sigma}\) to reflect that \(\sigma\) constitutes a standard deviation and thus only takes on positive values, while a linear predictor can be any real number.

A simple distributional model

Unequal variance models are possibly the most simple, but nevertheless very important application of distributional models. Suppose we have two groups of patients: One group receives a treatment (e.g., an antidepressive drug) and another group receives placebo. Since the treatment may not work equally well for all patients, the symptom variance of the treatment group may be larger than the symptom variance of the placebo group after some weeks of treatment. For simplicity, assume that we only investigate the post-treatment values.

group <- rep(c("treat", "placebo"), each = 30)
symptom_post <- c(rnorm(30, mean = 1, sd = 2), rnorm(30, mean = 0, sd = 1))
dat1 <- data.frame(group, symptom_post)
head(dat1)
  group symptom_post
1 treat    3.5555564
2 treat   -0.7629582
3 treat    1.3273393
4 treat    3.6603350
5 treat   -0.8676630
6 treat   -1.4614449

The following model estimates the effect of group on both the mean and the residual standard deviation of the normal response distribution.

fit1 <- brm(bf(symptom_post ~ group, sigma ~ group),
            data = dat1, family = gaussian())

Useful summary statistics and plots can be obtained via

summary(fit1)
plot(fit1, N = 2, ask = FALSE)

plot(conditional_effects(fit1), points = TRUE)

The population-level effect sigma_grouptreat, which is the contrast of the two residual standard deviations on the log-scale, reveals that the variances of both groups are indeed different. This impression is confirmed when looking at the conditional_effects of group. Going one step further, we can compute the residual standard deviations on the original scale using the hypothesis method.

hyp <- c("exp(sigma_Intercept) = 0",
         "exp(sigma_Intercept + sigma_grouptreat) = 0")
hypothesis(fit1, hyp)
Hypothesis Tests for class b:
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (exp(sigma_Interc... = 0     1.34      0.19     1.03     1.78         NA        NA    *
2 (exp(sigma_Interc... = 0     2.33      0.31     1.80     3.07         NA        NA    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.

We may also directly compare them and plot the posterior distribution of their difference.

hyp <- "exp(sigma_Intercept + sigma_grouptreat) > exp(sigma_Intercept)"
(hyp <- hypothesis(fit1, hyp))
Hypothesis Tests for class b:
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (exp(sigma_Interc... > 0     0.99      0.37     0.41     1.61        399         1    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(hyp, chars = NULL)

Indeed, the residual standard deviation of the treatment group seems to larger than that of the placebo group. Moreover the magnitude of this difference is pretty similar to what we expected due to the values we put into the data simulations.

Zero-Inflated Models

Another important application of the distributional regression framework are so called zero-inflated models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. For example, if one seeks to predict the number of cigarettes people smoke per day and also includes non-smokers, there will be a huge amount of zeros which, when not modeled appropriately, can seriously distort parameter estimates. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (), the data are described as follows: “The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish.”

zinb <- read.csv("https://paul-buerkner.github.io/data/fish.csv")
head(zinb)
  nofish livebait camper persons child         xb         zg count
1      1        0      0       1     0 -0.8963146  3.0504048     0
2      0        1      1       1     0 -0.5583450  1.7461489     0
3      0        1      0       1     0 -0.4017310  0.2799389     0
4      0        1      1       2     1 -0.9562981 -0.6015257     0
5      0        1      0       1     0  0.4368910  0.5277091     1
6      0        1      1       4     2  1.3944855 -0.7075348     0

As predictors we choose the number of people per group, the number of children, as well as whether the group consists of campers. Many groups may not even try catching any fish at all (thus leading to many zero responses) and so we fit a zero-inflated Poisson model to the data. For now, we assume a constant zero-inflation probability across observations.

fit_zinb1 <- brm(count ~ persons + child + camper,
                 data = zinb, family = zero_inflated_poisson())

Again, we summarize the results using the usual methods.

summary(fit_zinb1)
 Family: zero_inflated_poisson 
  Links: mu = log; zi = identity 
Formula: count ~ persons + child + camper 
   Data: zinb (Number of observations: 250) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -1.01      0.18    -1.37    -0.66 1.00     2715     2520
persons       0.87      0.05     0.79     0.97 1.00     2873     2513
child        -1.36      0.09    -1.54    -1.18 1.00     3004     2756
camper        0.80      0.09     0.62     0.98 1.00     3329     2694

Family Specific Parameters: 
   Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
zi     0.41      0.04     0.32     0.49 1.00     2897     2191

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_zinb1), ask = FALSE)

According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability zi is pretty large with a mean of 41%. Please note that the probability of catching no fish is actually higher than 41%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-inflation). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here).

Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish. Most children are just terribly bad at waiting for hours until something happens. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data.

fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child),
                 data = zinb, family = zero_inflated_poisson())
summary(fit_zinb2)
 Family: zero_inflated_poisson 
  Links: mu = log; zi = logit 
Formula: count ~ persons + child + camper 
         zi ~ child
   Data: zinb (Number of observations: 250) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept       -1.08      0.18    -1.44    -0.73 1.00     3079     2805
zi_Intercept    -0.95      0.25    -1.47    -0.50 1.00     3943     2527
persons          0.89      0.05     0.80     0.99 1.00     3111     2915
child           -1.17      0.09    -1.36    -0.99 1.00     3386     3025
camper           0.78      0.09     0.60     0.95 1.00     4397     2853
zi_child         1.22      0.27     0.72     1.75 1.00     4220     3018

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_zinb2), ask = FALSE)

To transform the linear predictor of zi into a probability, brms applies the logit-link:

\[logit(zi) = \log\left(\frac{zi}{1-zi}\right) = \eta_{zi}\]

The logit-link takes values within \([0, 1]\) and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors.

According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your change of catching no fish at all (as implied by the zero-inflation part) most likely because groups with more children are not even trying.

Additive Distributional Models

In the examples so far, we did not have multilevel data and thus did not fully use the capabilities of the distributional regression framework of brms. In the example presented below, we will not only show how to deal with multilevel data in distributional models, but also how to incorporate smooth terms (i.e., splines) into the model. In many applications, we have no or only a very vague idea how the relationship between a predictor and the response looks like. A very flexible approach to tackle this problems is to use splines and let them figure out the form of the relationship. For illustration purposes, we simulate some data with the mgcv package, which is also used in brms to prepare smooth terms.

dat_smooth <- mgcv::gamSim(eg = 6, n = 200, scale = 2, verbose = FALSE)
Gu & Wahba 4 term additive model
head(dat_smooth[, 1:6])
          y        x0        x1         x2        x3         f
1 14.891365 0.1903068 0.4984357 0.25244076 0.3443001 15.613500
2 10.338635 0.8504896 0.3393447 0.55035731 0.9019582 11.758805
3 10.514918 0.3776844 0.5620602 0.95473475 0.5267090 13.932690
4 19.897204 0.9622479 0.7628432 0.07152812 0.3633239 18.577226
5  9.969535 0.2932593 0.1140945 0.41305367 0.7297697  9.757228
6 14.048834 0.2929994 0.7510520 0.61085187 0.6794771 15.333578

The data contains the predictors x0 to x3 as well as the grouping factor fac indicating the nested structure of the data. We predict the response variable y using smooth terms of x1 and x2 and a varying intercept of fac. In addition, we assume the residual standard deviation sigma to vary by a smoothing term of x0 and a varying intercept of fac.

fit_smooth1 <- brm(
  bf(y ~ s(x1) + s(x2) + (1|fac), sigma ~ s(x0) + (1|fac)),
  data = dat_smooth, family = gaussian(),
  chains = 2, control = list(adapt_delta = 0.95)
)
summary(fit_smooth1)
 Family: gaussian 
  Links: mu = identity; sigma = log 
Formula: y ~ s(x1) + s(x2) + (1 | fac) 
         sigma ~ s(x0) + (1 | fac)
   Data: dat_smooth (Number of observations: 200) 
  Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 2000

Smooth Terms: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sds(sx1_1)           3.43      2.35     0.71    10.05 1.00      606      653
sds(sx2_1)          21.71      5.95    13.33    35.96 1.00      877     1180
sds(sigma_sx0_1)     0.87      0.84     0.03     3.16 1.00      720     1043

Group-Level Effects: 
~fac (Number of levels: 4) 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)           4.92      2.19     2.27    10.84 1.00     1111     1177
sd(sigma_Intercept)     0.17      0.19     0.01     0.67 1.00      595      959

Population-Level Effects: 
                Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept          16.16      2.28    11.44    20.77 1.01      857      999
sigma_Intercept     0.73      0.13     0.48     0.98 1.00     1062      813
sx1_1              16.70      6.69     4.49    31.34 1.00     1317      964
sx2_1              49.63     18.38    14.59    88.05 1.00     1782     1358
sigma_sx0_1        -0.16      1.78    -3.77     3.61 1.00     1032      826

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_smooth1), points = TRUE, ask = FALSE)

This model is likely an overkill for the data at hand, but nicely demonstrates the ease with which one can specify complex models with brms and to fit them using Stan on the backend.

brms/inst/doc/brms_phylogenetics.R0000644000176200001440000001335414504270071016770 0ustar liggesusersparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- phylo <- ape::read.nexus("https://paul-buerkner.github.io/data/phylo.nex") data_simple <- read.table( "https://paul-buerkner.github.io/data/data_simple.txt", header = TRUE ) head(data_simple) ## --------------------------------------------------------------------------------------- A <- ape::vcv.phylo(phylo) ## ---- results='hide'-------------------------------------------------------------------- model_simple <- brm( phen ~ cofactor + (1|gr(phylo, cov = A)), data = data_simple, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "b"), prior(normal(0, 50), "Intercept"), prior(student_t(3, 0, 20), "sd"), prior(student_t(3, 0, 20), "sigma") ) ) ## --------------------------------------------------------------------------------------- summary(model_simple) plot(model_simple, N = 2, ask = FALSE) plot(conditional_effects(model_simple), points = TRUE) ## --------------------------------------------------------------------------------------- hyp <- "sd_phylo__Intercept^2 / (sd_phylo__Intercept^2 + sigma^2) = 0" (hyp <- hypothesis(model_simple, hyp, class = NULL)) plot(hyp) ## --------------------------------------------------------------------------------------- data_repeat <- read.table( "https://paul-buerkner.github.io/data/data_repeat.txt", header = TRUE ) data_repeat$spec_mean_cf <- with(data_repeat, sapply(split(cofactor, phylo), mean)[phylo]) head(data_repeat) ## ---- results='hide'-------------------------------------------------------------------- model_repeat1 <- brm( phen ~ spec_mean_cf + (1|gr(phylo, cov = A)) + (1|species), data = data_repeat, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0,10), "b"), prior(normal(0,50), "Intercept"), prior(student_t(3,0,20), "sd"), prior(student_t(3,0,20), "sigma") ), sample_prior = TRUE, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ## --------------------------------------------------------------------------------------- summary(model_repeat1) ## --------------------------------------------------------------------------------------- hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat1, hyp, class = NULL)) plot(hyp) ## --------------------------------------------------------------------------------------- data_repeat$within_spec_cf <- data_repeat$cofactor - data_repeat$spec_mean_cf ## ---- results='hide'-------------------------------------------------------------------- model_repeat2 <- update( model_repeat1, formula = ~ . + within_spec_cf, newdata = data_repeat, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ## --------------------------------------------------------------------------------------- summary(model_repeat2) ## --------------------------------------------------------------------------------------- hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat2, hyp, class = NULL)) ## --------------------------------------------------------------------------------------- data_fisher <- read.table( "https://paul-buerkner.github.io/data/data_effect.txt", header = TRUE ) data_fisher$obs <- 1:nrow(data_fisher) head(data_fisher) ## ---- results='hide'-------------------------------------------------------------------- model_fisher <- brm( Zr | se(sqrt(1 / (N - 3))) ~ 1 + (1|gr(phylo, cov = A)) + (1|obs), data = data_fisher, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "Intercept"), prior(student_t(3, 0, 10), "sd") ), control = list(adapt_delta = 0.95), chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ## --------------------------------------------------------------------------------------- summary(model_fisher) plot(model_fisher) ## --------------------------------------------------------------------------------------- data_pois <- read.table( "https://paul-buerkner.github.io/data/data_pois.txt", header = TRUE ) data_pois$obs <- 1:nrow(data_pois) head(data_pois) ## ---- results='hide'-------------------------------------------------------------------- model_pois <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)) + (1|obs), data = data_pois, family = poisson("log"), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ## --------------------------------------------------------------------------------------- summary(model_pois) plot(conditional_effects(model_pois), points = TRUE) ## ---- results='hide'-------------------------------------------------------------------- model_normal <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)), data = data_pois, family = gaussian(), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ## --------------------------------------------------------------------------------------- summary(model_normal) ## --------------------------------------------------------------------------------------- pp_check(model_pois) pp_check(model_normal) ## --------------------------------------------------------------------------------------- loo(model_pois, model_normal) brms/inst/doc/brms_multivariate.R0000644000176200001440000000473714504267210016627 0ustar liggesusersparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## ----data------------------------------------------------------------------------------- data("BTdata", package = "MCMCglmm") head(BTdata) ## ----fit1, message=FALSE, warning=FALSE, results='hide'--------------------------------- bform1 <- bf(mvbind(tarsus, back) ~ sex + hatchdate + (1|p|fosternest) + (1|q|dam)) + set_rescor(TRUE) fit1 <- brm(bform1, data = BTdata, chains = 2, cores = 2) ## ----summary1, warning=FALSE------------------------------------------------------------ fit1 <- add_criterion(fit1, "loo") summary(fit1) ## ----pp_check1, message=FALSE----------------------------------------------------------- pp_check(fit1, resp = "tarsus") pp_check(fit1, resp = "back") ## ----R2_1------------------------------------------------------------------------------- bayes_R2(fit1) ## ----fit2, message=FALSE, warning=FALSE, results='hide'--------------------------------- bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) bf_back <- bf(back ~ hatchdate + (1|p|fosternest) + (1|q|dam)) fit2 <- brm(bf_tarsus + bf_back + set_rescor(TRUE), data = BTdata, chains = 2, cores = 2) ## ----summary2, warning=FALSE------------------------------------------------------------ fit2 <- add_criterion(fit2, "loo") summary(fit2) ## ----loo12------------------------------------------------------------------------------ loo(fit1, fit2) ## ----fit3, message=FALSE, warning=FALSE, results='hide'--------------------------------- bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) + lf(sigma ~ 0 + sex) + skew_normal() bf_back <- bf(back ~ s(hatchdate) + (1|p|fosternest) + (1|q|dam)) + gaussian() fit3 <- brm( bf_tarsus + bf_back + set_rescor(FALSE), data = BTdata, chains = 2, cores = 2, control = list(adapt_delta = 0.95) ) ## ----summary3, warning=FALSE------------------------------------------------------------ fit3 <- add_criterion(fit3, "loo") summary(fit3) ## ----me3-------------------------------------------------------------------------------- conditional_effects(fit3, "hatchdate", resp = "back") brms/inst/doc/brms_phylogenetics.html0000644000176200001440000146055414504270072017545 0ustar liggesusers Estimating Phylogenetic Multilevel Models with brms

Estimating Phylogenetic Multilevel Models with brms

Paul Bürkner

2023-09-25

Introduction

In the present vignette, we want to discuss how to specify phylogenetic multilevel models using brms. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. The usual approach would be to model species as a grouping factor in a multilevel model and estimate varying intercepts (and possibly also varying slopes) over species. However, species are not independent as they come from the same phylogenetic tree and we thus have to adjust our model to incorporate this dependency. The examples discussed here are from chapter 11 of the book Modern Phylogenetic Comparative Methods and the application in Evolutionary Biology (de Villemeruil & Nakagawa, 2014). The necessary data can be downloaded from the corresponding website (https://www.mpcm-evolution.com/). Some of these models may take a few minutes to fit.

A Simple Phylogenetic Model

Assume we have measurements of a phenotype, phen (say the body size), and a cofactor variable (say the temperature of the environment). We prepare the data using the following code.

phylo <- ape::read.nexus("https://paul-buerkner.github.io/data/phylo.nex")
data_simple <- read.table(
  "https://paul-buerkner.github.io/data/data_simple.txt",
  header = TRUE
)
head(data_simple)
       phen  cofactor phylo
1 107.06595 10.309588  sp_1
2  79.61086  9.690507  sp_2
3 116.38186 15.007825  sp_3
4 143.28705 19.087673  sp_4
5 139.60993 15.658404  sp_5
6  68.50657  6.005236  sp_6

The phylo object contains information on the relationship between species. Using this information, we can construct a covariance matrix of species (Hadfield & Nakagawa, 2010).

A <- ape::vcv.phylo(phylo)

Now we are ready to fit our first phylogenetic multilevel model:

model_simple <- brm(
  phen ~ cofactor + (1|gr(phylo, cov = A)),
  data = data_simple,
  family = gaussian(),
  data2 = list(A = A),
  prior = c(
    prior(normal(0, 10), "b"),
    prior(normal(0, 50), "Intercept"),
    prior(student_t(3, 0, 20), "sd"),
    prior(student_t(3, 0, 20), "sigma")
  )
)

With the exception of (1|gr(phylo, cov = A)) instead of (1|phylo) this is a basic multilevel model with a varying intercept over species (phylo is an indicator of species in this data set). However, by using cov = A in the gr function, we make sure that species are correlated as specified by the covariance matrix A. We pass A itself via the data2 argument which can be used for any kinds of data that does not fit into the regular structure of the data argument. Setting priors is not required for achieving good convergence for this model, but it improves sampling speed a bit. After fitting, the results can be investigated in detail.

summary(model_simple)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: phen ~ cofactor + (1 | gr(phylo, cov = A)) 
   Data: data_simple (Number of observations: 200) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)    14.40      2.17    10.28    18.76 1.00      992     1880

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    37.79      7.14    23.31    52.09 1.00     2559     2614
cofactor      5.18      0.14     4.90     5.45 1.00     7480     3298

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     9.24      0.74     7.83    10.73 1.00     1283     2158

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(model_simple, N = 2, ask = FALSE)

plot(conditional_effects(model_simple), points = TRUE)

The so called phylogenetic signal (often symbolize by \(\lambda\)) can be computed with the hypothesis method and is roughly \(\lambda = 0.7\) for this example.

hyp <- "sd_phylo__Intercept^2 / (sd_phylo__Intercept^2 + sigma^2) = 0"
(hyp <- hypothesis(model_simple, hyp, class = NULL))
Hypothesis Tests for class :
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (sd_phylo__Interc... = 0      0.7      0.09      0.5     0.84         NA        NA    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(hyp)

Note that the phylogenetic signal is just a synonym of the intra-class correlation (ICC) used in the context phylogenetic analysis.

A Phylogenetic Model with Repeated Measurements

Often, we have multiple observations per species and this allows to fit more complicated phylogenetic models.

data_repeat <- read.table(
  "https://paul-buerkner.github.io/data/data_repeat.txt",
  header = TRUE
)
data_repeat$spec_mean_cf <-
  with(data_repeat, sapply(split(cofactor, phylo), mean)[phylo])
head(data_repeat)
       phen  cofactor species phylo spec_mean_cf
1 107.41919 11.223724    sp_1  sp_1    10.309588
2 109.16403  9.805934    sp_1  sp_1    10.309588
3  91.88672 10.308423    sp_1  sp_1    10.309588
4 121.54341  8.355349    sp_1  sp_1    10.309588
5 105.31638 11.854510    sp_1  sp_1    10.309588
6  64.99859  4.314015    sp_2  sp_2     3.673914

The variable spec_mean_cf just contains the mean of the cofactor for each species. The code for the repeated measurement phylogenetic model looks as follows:

model_repeat1 <- brm(
  phen ~ spec_mean_cf + (1|gr(phylo, cov = A)) + (1|species),
  data = data_repeat,
  family = gaussian(),
  data2 = list(A = A),
  prior = c(
    prior(normal(0,10), "b"),
    prior(normal(0,50), "Intercept"),
    prior(student_t(3,0,20), "sd"),
    prior(student_t(3,0,20), "sigma")
  ),
  sample_prior = TRUE, chains = 2, cores = 2,
  iter = 4000, warmup = 1000
)

The variables phylo and species are identical as they are both identifiers of the species. However, we model the phylogenetic covariance only for phylo and thus the species variable accounts for any specific effect that would be independent of the phylogenetic relationship between species (e.g., environmental or niche effects). Again we can obtain model summaries as well as estimates of the phylogenetic signal.

summary(model_repeat1)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: phen ~ spec_mean_cf + (1 | gr(phylo, cov = A)) + (1 | species) 
   Data: data_repeat (Number of observations: 1000) 
  Draws: 2 chains, each with iter = 4000; warmup = 1000; thin = 1;
         total post-warmup draws = 6000

Group-Level Effects: 
~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)    16.34      1.86    12.92    20.13 1.00     1625     2406

~species (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     5.02      0.83     3.32     6.56 1.00     1044     1343

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept       36.37      7.92    20.67    52.08 1.00     4095     3220
spec_mean_cf     5.10      0.10     4.91     5.30 1.00     8582     5016

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     8.11      0.20     7.74     8.51 1.00     5145     3988

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
hyp <- paste(
  "sd_phylo__Intercept^2 /",
  "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0"
)
(hyp <- hypothesis(model_repeat1, hyp, class = NULL))
Hypothesis Tests for class :
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (sd_phylo__Interc... = 0     0.74      0.06     0.62     0.83          0         0    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(hyp)

So far, we have completely ignored the variability of the cofactor within species. To incorporate this into the model, we define

data_repeat$within_spec_cf <- data_repeat$cofactor - data_repeat$spec_mean_cf

and then fit it again using within_spec_cf as an additional predictor.

model_repeat2 <- update(
  model_repeat1, formula = ~ . + within_spec_cf,
  newdata = data_repeat, chains = 2, cores = 2,
  iter = 4000, warmup = 1000
)

The results are almost unchanged, with apparently no relationship between the phenotype and the within species variance of cofactor.

summary(model_repeat2)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: phen ~ spec_mean_cf + (1 | gr(phylo, cov = A)) + (1 | species) + within_spec_cf 
   Data: data_repeat (Number of observations: 1000) 
  Draws: 2 chains, each with iter = 4000; warmup = 1000; thin = 1;
         total post-warmup draws = 6000

Group-Level Effects: 
~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)    16.53      1.89    13.10    20.48 1.00     1400     1969

~species (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     4.99      0.85     3.28     6.59 1.00      986     1402

Population-Level Effects: 
               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept         36.13      7.91    20.46    51.86 1.00     3419     3256
spec_mean_cf       5.10      0.10     4.90     5.30 1.00     7344     4643
within_spec_cf    -0.06      0.19    -0.43     0.30 1.00     8867     3379

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     8.11      0.20     7.73     8.50 1.00     5793     4052

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Also, the phylogenetic signal remains more or less the same.

hyp <- paste(
  "sd_phylo__Intercept^2 /",
  "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0"
)
(hyp <- hypothesis(model_repeat2, hyp, class = NULL))
Hypothesis Tests for class :
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (sd_phylo__Interc... = 0     0.74      0.06     0.62     0.84          0         0    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.

A Phylogenetic Meta-Analysis

Let’s say we have Fisher’s z-transformed correlation coefficients \(Zr\) per species along with corresponding sample sizes (e.g., correlations between male coloration and reproductive success):

data_fisher <- read.table(
  "https://paul-buerkner.github.io/data/data_effect.txt",
  header = TRUE
)
data_fisher$obs <- 1:nrow(data_fisher)
head(data_fisher)
          Zr  N phylo obs
1 0.28917549 13  sp_1   1
2 0.02415579 40  sp_2   2
3 0.19513651 39  sp_3   3
4 0.09831239 40  sp_4   4
5 0.13780152 66  sp_5   5
6 0.13710587 41  sp_6   6

We assume the sampling variance to be known and as \(V(Zr) = \frac{1}{N - 3}\) for Fisher’s values, where \(N\) is the sample size per species. Incorporating the known sampling variance into the model is straight forward. One has to keep in mind though, that brms requires the sampling standard deviation (square root of the variance) as input instead of the variance itself. The group-level effect of obs represents the residual variance, which we have to model explicitly in a meta-analytic model.

model_fisher <- brm(
  Zr | se(sqrt(1 / (N - 3))) ~ 1 + (1|gr(phylo, cov = A)) + (1|obs),
  data = data_fisher, family = gaussian(),
  data2 = list(A = A),
  prior = c(
    prior(normal(0, 10), "Intercept"),
    prior(student_t(3, 0, 10), "sd")
  ),
  control = list(adapt_delta = 0.95),
  chains = 2, cores = 2, iter = 4000, warmup = 1000
)

A summary of the fitted model is obtained via

summary(model_fisher)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: Zr | se(sqrt(1/(N - 3))) ~ 1 + (1 | gr(phylo, cov = A)) + (1 | obs) 
   Data: data_fisher (Number of observations: 200) 
  Draws: 2 chains, each with iter = 4000; warmup = 1000; thin = 1;
         total post-warmup draws = 6000

Group-Level Effects: 
~obs (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.05      0.03     0.00     0.10 1.00      907     1935

~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.06      0.04     0.00     0.15 1.00      855     1568

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     0.16      0.04     0.08     0.24 1.00     3598     2794

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.00      0.00     0.00     0.00   NA       NA       NA

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(model_fisher)

The meta-analytic mean (i.e., the model intercept) is \(0.16\) with a credible interval of \([0.08, 0.25]\). Thus the mean correlation across species is positive according to the model.

A phylogenetic count-data model

Suppose that we analyze a phenotype that consists of counts instead of being a continuous variable. In such a case, the normality assumption will likely not be justified and it is recommended to use a distribution explicitly suited for count data, for instance the Poisson distribution. The following data set (again retrieved from mpcm-evolution.org) provides an example.

data_pois <- read.table(
  "https://paul-buerkner.github.io/data/data_pois.txt",
  header = TRUE
)
data_pois$obs <- 1:nrow(data_pois)
head(data_pois)
  phen_pois   cofactor phylo obs
1         1  7.8702830  sp_1   1
2         0  3.4690529  sp_2   2
3         1  2.5478774  sp_3   3
4        14 18.2286628  sp_4   4
5         1  2.5302806  sp_5   5
6         1  0.5145559  sp_6   6

As the Poisson distribution does not have a natural overdispersion parameter, we model the residual variance via the group-level effects of obs (e.g., see Lawless, 1987).

model_pois <- brm(
  phen_pois ~ cofactor + (1|gr(phylo, cov = A)) + (1|obs),
  data = data_pois, family = poisson("log"),
  data2 = list(A = A),
  chains = 2, cores = 2, iter = 4000,
  control = list(adapt_delta = 0.95)
)

Again, we obtain a summary of the fitted model via

summary(model_pois)
 Family: poisson 
  Links: mu = log 
Formula: phen_pois ~ cofactor + (1 | gr(phylo, cov = A)) + (1 | obs) 
   Data: data_pois (Number of observations: 200) 
  Draws: 2 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~obs (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.18      0.09     0.01     0.34 1.00      615      829

~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.18      0.10     0.02     0.42 1.01      729      932

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -2.08      0.20    -2.49    -1.69 1.00     3694     2524
cofactor      0.25      0.01     0.23     0.27 1.00     4669     3079

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(model_pois), points = TRUE)

Now, assume we ignore the fact that the phenotype is count data and fit a linear normal model instead.

model_normal <- brm(
  phen_pois ~ cofactor + (1|gr(phylo, cov = A)),
  data = data_pois, family = gaussian(),
  data2 = list(A = A),
  chains = 2, cores = 2, iter = 4000,
  control = list(adapt_delta = 0.95)
)
summary(model_normal)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: phen_pois ~ cofactor + (1 | gr(phylo, cov = A)) 
   Data: data_pois (Number of observations: 200) 
  Draws: 2 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.70      0.51     0.03     1.93 1.00      964     1522

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -3.06      0.65    -4.37    -1.82 1.00     2926     2359
cofactor      0.68      0.04     0.60     0.76 1.00     5669     2637

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     3.43      0.18     3.10     3.82 1.00     4081     2665

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

We see that cofactor has a positive relationship with the phenotype in both models. One should keep in mind, though, that the estimates of the Poisson model are on the log-scale, as we applied the canonical log-link function in this example. Therefore, estimates are not comparable to a linear normal model even if applied to the same data. What we can compare, however, is the model fit, for instance graphically via posterior predictive checks.

pp_check(model_pois)

pp_check(model_normal)

Apparently, the distribution of the phenotype predicted by the Poisson model resembles the original distribution of the phenotype pretty closely, while the normal models fails to do so. We can also apply leave-one-out cross-validation for direct numerical comparison of model fit.

loo(model_pois, model_normal)
Output of model 'model_pois':

Computed from 4000 by 200 log-likelihood matrix

         Estimate   SE
elpd_loo   -348.4 17.0
p_loo        29.7  3.4
looic       696.8 34.0
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     167   83.5%   454       
 (0.5, 0.7]   (ok)        30   15.0%   189       
   (0.7, 1]   (bad)        3    1.5%   293       
   (1, Inf)   (very bad)   0    0.0%   <NA>      
See help('pareto-k-diagnostic') for details.

Output of model 'model_normal':

Computed from 4000 by 200 log-likelihood matrix

         Estimate   SE
elpd_loo   -536.0 15.9
p_loo        10.3  2.3
looic      1072.0 31.8
------
Monte Carlo SE of elpd_loo is 0.1.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     193   96.5%   555       
 (0.5, 0.7]   (ok)         7    3.5%   329       
   (0.7, 1]   (bad)        0    0.0%   <NA>      
   (1, Inf)   (very bad)   0    0.0%   <NA>      

All Pareto k estimates are ok (k < 0.7).
See help('pareto-k-diagnostic') for details.

Model comparisons:
             elpd_diff se_diff
model_pois      0.0       0.0 
model_normal -187.6      18.0 

Since smaller values of loo indicate better fit, it is again evident that the Poisson model fits the data better than the normal model. Of course, the Poisson model is not the only reasonable option here. For instance, you could use a negative binomial model (via family negative_binomial), which already contains an overdispersion parameter so that modeling a varying intercept of obs becomes obsolete.

Phylogenetic models with multiple group-level effects

In the above examples, we have only used a single group-level effect (i.e., a varying intercept) for the phylogenetic grouping factors. In brms, it is also possible to estimate multiple group-level effects (e.g., a varying intercept and a varying slope) for these grouping factors. However, it requires repeatedly computing Kronecker products of covariance matrices while fitting the model. This will be very slow especially when the grouping factors have many levels and matrices are thus large.

References

de Villemeruil P. & Nakagawa, S. (2014) General quantitative genetic methods for comparative biology. In: Modern phylogenetic comparative methods and their application in evolutionary biology: concepts and practice (ed. Garamszegi L.) Springer, New York. pp. 287-303.

Hadfield, J. D. & Nakagawa, S. (2010) General quantitative genetic methods for comparative biology: phylogenies, taxonomies, and multi-trait models for continuous and categorical characters. Journal of Evolutionary Biology. 23. 494-508.

Lawless, J. F. (1987). Negative binomial and mixed Poisson regression. Canadian Journal of Statistics, 15(3), 209-225.

brms/inst/doc/brms_distreg.Rmd0000644000176200001440000002521614224753311016077 0ustar liggesusers--- title: "Estimating Distributional Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Distributional Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette provides an introduction on how to fit distributional regression models with **brms**. We use the term *distributional model* to refer to a model, in which we can specify predictor terms for all parameters of the assumed response distribution. In the vast majority of regression model implementations, only the location parameter (usually the mean) of the response distribution depends on the predictors and corresponding regression parameters. Other parameters (e.g., scale or shape parameters) are estimated as auxiliary parameters assuming them to be constant across observations. This assumption is so common that most researchers applying regression models are often (in my experience) not aware of the possibility of relaxing it. This is understandable insofar as relaxing this assumption drastically increase model complexity and thus makes models hard to fit. Fortunately, **brms** uses **Stan** on the backend, which is an incredibly flexible and powerful tool for estimating Bayesian models so that model complexity is much less of an issue. Suppose we have a normally distributed response variable. Then, in basic linear regression, we specify a predictor term $\eta_{\mu}$ for the mean parameter $\mu$ of the normal distribution. The second parameter of the normal distribution -- the residual standard deviation $\sigma$ -- is assumed to be constant across observations. We estimate $\sigma$ but do not try to *predict* it. In a distributional model, however, we do exactly this by specifying a predictor term $\eta_{\sigma}$ for $\sigma$ in addition to the predictor term $\eta_{\mu}$. Ignoring group-level effects for the moment, the linear predictor of a parameter $\theta$ for observation $n$ has the form $$\eta_{\theta n} = \sum_{i = 1}^{K_{\theta}} b_{\theta i} x_{\theta i n}$$ where $x_{\theta i n}$ denotes the value of the $i$th predictor of parameter $\theta$ for observation $n$ and $b_{\theta i}$ is the $i$th regression coefficient of parameter $\theta$. A distributional normal model with response variable $y$ can then be written as $$y_n \sim \mathcal{N}\left(\eta_{\mu n}, \, \exp(\eta_{\sigma n}) \right)$$ We used the exponential function around $\eta_{\sigma}$ to reflect that $\sigma$ constitutes a standard deviation and thus only takes on positive values, while a linear predictor can be any real number. ## A simple distributional model Unequal variance models are possibly the most simple, but nevertheless very important application of distributional models. Suppose we have two groups of patients: One group receives a treatment (e.g., an antidepressive drug) and another group receives placebo. Since the treatment may not work equally well for all patients, the symptom variance of the treatment group may be larger than the symptom variance of the placebo group after some weeks of treatment. For simplicity, assume that we only investigate the post-treatment values. ```{r} group <- rep(c("treat", "placebo"), each = 30) symptom_post <- c(rnorm(30, mean = 1, sd = 2), rnorm(30, mean = 0, sd = 1)) dat1 <- data.frame(group, symptom_post) head(dat1) ``` The following model estimates the effect of `group` on both the mean and the residual standard deviation of the normal response distribution. ```{r, results='hide'} fit1 <- brm(bf(symptom_post ~ group, sigma ~ group), data = dat1, family = gaussian()) ``` Useful summary statistics and plots can be obtained via ```{r, results='hide'} summary(fit1) plot(fit1, N = 2, ask = FALSE) plot(conditional_effects(fit1), points = TRUE) ``` The population-level effect `sigma_grouptreat`, which is the contrast of the two residual standard deviations on the log-scale, reveals that the variances of both groups are indeed different. This impression is confirmed when looking at the `conditional_effects` of `group`. Going one step further, we can compute the residual standard deviations on the original scale using the `hypothesis` method. ```{r} hyp <- c("exp(sigma_Intercept) = 0", "exp(sigma_Intercept + sigma_grouptreat) = 0") hypothesis(fit1, hyp) ``` We may also directly compare them and plot the posterior distribution of their difference. ```{r} hyp <- "exp(sigma_Intercept + sigma_grouptreat) > exp(sigma_Intercept)" (hyp <- hypothesis(fit1, hyp)) plot(hyp, chars = NULL) ``` Indeed, the residual standard deviation of the treatment group seems to larger than that of the placebo group. Moreover the magnitude of this difference is pretty similar to what we expected due to the values we put into the data simulations. ## Zero-Inflated Models Another important application of the distributional regression framework are so called zero-inflated models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. For example, if one seeks to predict the number of cigarettes people smoke per day and also includes non-smokers, there will be a huge amount of zeros which, when not modeled appropriately, can seriously distort parameter estimates. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (\url{https://stats.idre.ucla.edu/stata/dae/zero-inflated-poisson-regression}), the data are described as follows: "The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish." ```{r} zinb <- read.csv("https://paul-buerkner.github.io/data/fish.csv") head(zinb) ``` As predictors we choose the number of people per group, the number of children, as well as whether the group consists of campers. Many groups may not even try catching any fish at all (thus leading to many zero responses) and so we fit a zero-inflated Poisson model to the data. For now, we assume a constant zero-inflation probability across observations. ```{r, results='hide'} fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson()) ``` Again, we summarize the results using the usual methods. ```{r} summary(fit_zinb1) plot(conditional_effects(fit_zinb1), ask = FALSE) ``` According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability `zi` is pretty large with a mean of 41%. Please note that the probability of catching no fish is actually higher than 41%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-*inflation*). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here). Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish. Most children are just terribly bad at waiting for hours until something happens. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data. ```{r, results='hide'} fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) ``` ```{r} summary(fit_zinb2) plot(conditional_effects(fit_zinb2), ask = FALSE) ``` To transform the linear predictor of `zi` into a probability, **brms** applies the logit-link: $$logit(zi) = \log\left(\frac{zi}{1-zi}\right) = \eta_{zi}$$ The logit-link takes values within $[0, 1]$ and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors. According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your change of catching no fish at all (as implied by the zero-inflation part) most likely because groups with more children are not even trying. ## Additive Distributional Models In the examples so far, we did not have multilevel data and thus did not fully use the capabilities of the distributional regression framework of **brms**. In the example presented below, we will not only show how to deal with multilevel data in distributional models, but also how to incorporate smooth terms (i.e., splines) into the model. In many applications, we have no or only a very vague idea how the relationship between a predictor and the response looks like. A very flexible approach to tackle this problems is to use splines and let them figure out the form of the relationship. For illustration purposes, we simulate some data with the **mgcv** package, which is also used in **brms** to prepare smooth terms. ```{r} dat_smooth <- mgcv::gamSim(eg = 6, n = 200, scale = 2, verbose = FALSE) head(dat_smooth[, 1:6]) ``` The data contains the predictors `x0` to `x3` as well as the grouping factor `fac` indicating the nested structure of the data. We predict the response variable `y` using smooth terms of `x1` and `x2` and a varying intercept of `fac`. In addition, we assume the residual standard deviation `sigma` to vary by a smoothing term of `x0` and a varying intercept of `fac`. ```{r, results='hide'} fit_smooth1 <- brm( bf(y ~ s(x1) + s(x2) + (1|fac), sigma ~ s(x0) + (1|fac)), data = dat_smooth, family = gaussian(), chains = 2, control = list(adapt_delta = 0.95) ) ``` ```{r} summary(fit_smooth1) plot(conditional_effects(fit_smooth1), points = TRUE, ask = FALSE) ``` This model is likely an overkill for the data at hand, but nicely demonstrates the ease with which one can specify complex models with **brms** and to fit them using **Stan** on the backend. brms/inst/doc/brms_multilevel.ltx0000644000176200001440000016555114213413565016716 0ustar liggesusers\documentclass[article, nojss]{jss} %\VignetteIndexEntry{Multilevel Models with brms} %\VignetteEngine{R.rsp::tex} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% almost as usual \author{Paul-Christian B\"urkner} \title{Advanced Bayesian Multilevel Modeling with the \proglang{R} Package \pkg{brms}} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Paul-Christian B\"urkner} %% comma-separated \Plaintitle{Advanced Bayesian Multilevel Modeling with the R Package brms} %% without formatting \Shorttitle{Advanced Bayesian Multilevel Modeling with \pkg{brms}} %% a short title (if necessary) %% an abstract and keywords \Abstract{ The \pkg{brms} package allows R users to easily specify a wide range of Bayesian single-level and multilevel models, which are fitted with the probabilistic programming language \proglang{Stan} behind the scenes. Several response distributions are supported, of which all parameters (e.g., location, scale, and shape) can be predicted at the same time thus allowing for distributional regression. Non-linear relationships may be specified using non-linear predictor terms or semi-parametric approaches such as splines or Gaussian processes. Multivariate models, in which each response variable can be predicted using the above mentioned options, can be fitted as well. To make all of these modeling options possible in a multilevel framework, \pkg{brms} provides an intuitive and powerful formula syntax, which extends the well known formula syntax of \pkg{lme4}. The purpose of the present paper is to introduce this syntax in detail and to demonstrate its usefulness with four examples, each showing other relevant aspects of the syntax. If you use \pkg{brms}, please cite this article as published in the R Journal \citep{brms2}. } \Keywords{Bayesian inference, multilevel models, distributional regression, MCMC, \proglang{Stan}, \proglang{R}} \Plainkeywords{Bayesian inference, multilevel models, distributional regression, MCMC, Stan, R} %% without formatting %% at least one keyword must be supplied %% publication information %% NOTE: Typically, this can be left commented and will be filled out by the technical editor %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{2012-06-04} %% \Acceptdate{2012-06-04} %% The address of (at least) one author should be given %% in the following format: \Address{ Paul-Christian B\"urkner\\ E-mail: \email{paul.buerkner@gmail.com}\\ URL: \url{https://paul-buerkner.github.io} } %% It is also possible to add a telephone and fax number %% before the e-mail in the following format: %% Telephone: +43/512/507-7103 %% Fax: +43/512/507-2851 %% for those who use Sweave please include the following line (with % symbols): %% need no \usepackage{Sweave.sty} %% end of declarations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %% include your article here, just as usual %% Note that you should use the \pkg{}, \proglang{} and \code{} commands. \section{Introduction} Multilevel models (MLMs) offer great flexibility for researchers across sciences \citep{brown2015, demidenko2013, gelmanMLM2006, pinheiro2006}. They allow modeling of data measured on different levels at the same time -- for instance data of students nested within classes and schools -- thus taking complex dependency structures into account. It is not surprising that many packages for R have been developed to fit MLMs. Usually, however, the functionality of these implementations is limited insofar as it is only possible to predict the mean of the response distribution. Other parameters of the response distribution, such as the residual standard deviation in linear models, are assumed constant across observations, which may be violated in many applications. Accordingly, it is desirable to allow for prediction of \emph{all} response parameters at the same time. Models doing exactly that are often referred to as \emph{distributional models} or more verbosely \emph{models for location, scale and shape} \citep{rigby2005}. Another limitation of basic MLMs is that they only allow for linear predictor terms. While linear predictor terms already offer good flexibility, they are of limited use when relationships are inherently non-linear. Such non-linearity can be handled in at least two ways: (1) by fully specifying a non-linear predictor term with corresponding parameters each of which can be predicted using MLMs \citep{lindstrom1990}, or (2) estimating the form of the non-linear relationship on the fly using splines \citep{wood2004} or Gaussian processes \citep{rasmussen2006}. The former are often simply called \emph{non-linear models}, while models applying splines are referred to as \emph{generalized additive models} (GAMs; \citeauthor{hastie1990}, \citeyear{hastie1990}). Combining all of these modeling options into one framework is a complex task, both conceptually and with regard to model fitting. Maximum likelihood methods, which are typically applied in classical 'frequentist' statistics, can reach their limits at some point and fully Bayesian methods become the go-to solutions to fit such complex models \citep{gelman2014}. In addition to being more flexible, the Bayesian framework comes with other advantages, for instance, the ability to derive probability statements for every quantity of interest or explicitly incorporating prior knowledge about parameters into the model. The former is particularly relevant in non-linear models, for which classical approaches struggle more often than not in propagating all the uncertainty in the parameter estimates to non-linear functions such as out-of-sample predictions. Possibly the most powerful program for performing full Bayesian inference available to date is Stan \citep{stanM2017, carpenter2017}. It implements Hamiltonian Monte Carlo \citep{duane1987, neal2011, betancourt2014} and its extension, the No-U-Turn (NUTS) Sampler \citep{hoffman2014}. These algorithms converge much more quickly than other Markov-Chain Monte-Carlo (MCMC) algorithms especially for high-dimensional models \citep{hoffman2014, betancourt2014, betancourt2017}. An excellent non-mathematical introduction to Hamiltonian Monte Carlo can be found in \citet{betancourt2017}. Stan comes with its own programming language, allowing for great modeling flexibility \cite{stanM2017, carpenter2017}). Many researchers may still be hesitent to use Stan directly, as every model has to be written, debugged and possibly also optimized. This may be a time-consuming and error-prone process even for researchers familiar with Bayesian inference. The \pkg{brms} package \citep{brms1}, presented in this paper, aims to remove these hurdles for a wide range of regression models by allowing the user to benefit from the merits of Stan by using extended \pkg{lme4}-like \citep{bates2015} formula syntax, with which many R users are familiar with. It offers much more than writing efficient and human-readable Stan code: \pkg{brms} comes with many post-processing and visualization functions, for instance to perform posterior predictive checks, leave-one-out cross-validation, visualization of estimated effects, and prediction of new data. The overarching aim is to have one general framework for regression modeling, which offers everything required to successfully apply regression models to complex data. To date, it already replaces and extends the functionality of dozens of other R packages, each of which is restricted to specific regression models\footnote{Unfortunately, due to the implementation via Stan, it is not easily possible for users to define their own response distributions and run them via \pkg{brms}. If you feel that a response distribution is missing in \pkg{brms}, please open an issue on GitHub (\url{https://github.com/paul-buerkner/brms}).}. The purpose of the present article is to provide an introduction of the advanced multilevel formula syntax implemented in \pkg{brms}, which allows to fit a wide and growing range of non-linear distributional multilevel models. A general overview of the package is already given in \citet{brms1}. Accordingly, the present article focuses on more recent developments. We begin by explaining the underlying structure of distributional models. Next, the formula syntax of \pkg{lme4} and its extensions implemented in \pkg{brms} are explained. Four examples that demonstrate the use of the new syntax are discussed in detail. Afterwards, the functionality of \pkg{brms} is compared with that of \pkg{rstanarm} \citep{rstanarm2017} and \pkg{MCMCglmm} \citep{hadfield2010}. We end by describing future plans for extending the package. \section{Model description} \label{model} The core of models implemented in \pkg{brms} is the prediction of the response $y$ through predicting all parameters $\theta_p$ of the response distribution $D$, which is also called the model \code{family} in many R packages. We write $$y_i \sim D(\theta_{1i}, \theta_{2i}, ...)$$ to stress the dependency on the $i\textsuperscript{th}$ observation. Every parameter $\theta_p$ may be regressed on its own predictor term $\eta_p$ transformed by the inverse link function $f_p$ that is $\theta_{pi} = f_p(\eta_{pi})$\footnote{A parameter can also be assumed constant across observations so that a linear predictor is not required.}. Such models are typically refered to as \emph{distributional models}\footnote{The models described in \citet{brms1} are a sub-class of the here described models.}. Details about the parameterization of each \code{family} are given in \code{vignette("brms\_families")}. Suppressing the index $p$ for simplicity, a predictor term $\eta$ can generally be written as $$ \eta = \mathbf{X} \beta + \mathbf{Z} u + \sum_{k = 1}^K s_k(x_k) $$ In this equation, $\beta$ and $u$ are the coefficients at population-level and group-level respectively and $\mathbf{X}, \mathbf{Z}$ are the corresponding design matrices. The terms $s_k(x_k)$ symbolize optional smooth functions of unspecified form based on covariates $x_k$ fitted via splines (see \citet{wood2011} for the underlying implementation in the \pkg{mgcv} package) or Gaussian processes \citep{williams1996}. The response $y$ as well as $\mathbf{X}$, $\mathbf{Z}$, and $x_k$ make up the data, whereas $\beta$, $u$, and the smooth functions $s_k$ are the model parameters being estimated. The coefficients $\beta$ and $u$ may be more commonly known as fixed and random effects, but I avoid theses terms following the recommendations of \citet{gelmanMLM2006}. Details about prior distributions of $\beta$ and $u$ can be found in \citet{brms1} and under \code{help("set\_prior")}. As an alternative to the strictly additive formulation described above, predictor terms may also have any form specifiable in Stan. We call it a \emph{non-linear} predictor and write $$\eta = f(c_1, c_2, ..., \phi_1, \phi_2, ...)$$ The structure of the function $f$ is given by the user, $c_r$ are known or observed covariates, and $\phi_s$ are non-linear parameters each having its own linear predictor term $\eta_{\phi_s}$ of the form specified above. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves. A frequentist implementation of such models, which inspired the non-linear syntax in \pkg{brms}, can be found in the \pkg{nlme} package \citep{nlme2016}. \section{Extended multilevel formula syntax} \label{formula_syntax} The formula syntax applied in \pkg{brms} builds upon the syntax of the R package \pkg{lme4} \citep{bates2015}. First, we will briefly explain the \pkg{lme4} syntax used to specify multilevel models and then introduce certain extensions that allow to specify much more complicated models in \pkg{brms}. An \pkg{lme4} formula has the general form \begin{Sinput} response ~ pterms + (gterms | group) \end{Sinput} The \code{pterms} part contains the population-level effects that are assumed to be the same across obervations. The \code{gterms} part contains so called group-level effects that are assumed to vary accross grouping variables specified in \code{group}. Multiple grouping factors each with multiple group-level effects are possible. Usually, \code{group} contains only a single variable name pointing to a factor, but you may also use \code{g1:g2} or \code{g1/g2}, if both \code{g1} and \code{g2} are suitable grouping factors. The \code{:} operator creates a new grouping factor that consists of the combined levels of \code{g1} and \code{g2} (you could think of this as pasting the levels of both factors together). The \code{/} operator indicates nested grouping structures and expands one grouping factor into two or more when using multiple \code{/} within one term. If, for instance, you write \code{(1 | g1/g2)}, it will be expanded to \code{(1 | g1) + (1 | g1:g2)}. Instead of \code{|} you may use \code{||} in grouping terms to prevent group-level correlations from being modeled. This may be useful in particular when modeling so many group-level effects that convergence of the fitting algorithms becomes an issue due to model complexity. One limitation of the \code{||} operator in \pkg{lme4} is that it only splits up terms so that columns of the design matrix originating from the same term are still modeled as correlated (e.g., when coding a categorical predictor; see the \code{mixed} function of the \pkg{afex} package by \citet{afex2015} for a way to avoid this behavior). While intuitive and visually appealing, the classic \pkg{lme4} syntax is not flexible enough to allow for specifying the more complex models supported by \pkg{brms}. In non-linear or distributional models, for instance, multiple parameters are predicted, each having their own population and group-level effects. Hence, multiple formulas are necessary to specify such models\footnote{Actually, it is possible to specify multiple model parts within one formula using interactions terms for instance as implemented in \pkg{MCMCglmm} \citep{hadfield2010}. However, this syntax is limited in flexibility and requires a rather deep understanding of the way R parses formulas, thus often being confusing to users.}. Then, however, specifying group-level effects of the same grouping factor to be correlated \emph{across} formulas becomes complicated. The solution implemented in \pkg{brms} (and currently unique to it) is to expand the \code{|} operator into \code{||}, where \code{} can be any value. Group-level terms with the same \code{ID} will then be modeled as correlated if they share same grouping factor(s)\footnote{It might even be further extended to \code{|fun()|}, where \code{fun} defines the type of correlation structure, defaulting to unstructured that is estimating the full correlation matrix. The \code{fun} argument is not yet supported by \pkg{brms} but could be supported in the future if other correlation structures, such as compound symmetry or Toeplitz, turn out to have reasonable practical applications and effective implementations in Stan.}. For instance, if the terms \code{(x1|ID|g1)} and \code{(x2|ID|g1)} appear somewhere in the same or different formulas passed to \pkg{brms}, they will be modeled as correlated. Further extensions of the classical \pkg{lme4} syntax refer to the \code{group} part. It is rather limited in its flexibility since only variable names combined by \code{:} or \code{/} are supported. We propose two extensions of this syntax: Firstly, \code{group} can generally be split up in its terms so that, say, \code{(1 | g1 + g2)} is expanded to \code{(1 | g1) + (1 | g2)}. This is fully consistent with the way \code{/} is handled so it provides a natural generalization to the existing syntax. Secondly, there are some special grouping structures that cannot be expressed by simply combining grouping variables. For instance, multi-membership models cannot be expressed this way. To overcome this limitation, we propose wrapping terms in \code{group} within special functions that allow specifying alternative grouping structures: \code{(gterms | fun(group))}. In \pkg{brms}, there are currently two such functions implemented, namely \code{gr} for the default behavior and \code{mm} for multi-membership terms. To be compatible with the original syntax and to keep formulas short, \code{gr} is automatically added internally if none of these functions is specified. While some non-linear relationships, such as quadratic relationships, can be expressed within the basic R formula syntax, other more complicated ones cannot. For this reason, it is possible in \pkg{brms} to fully specify non-linear predictor terms similar to how it is done in \pkg{nlme}, but fully compatible with the extended multilevel syntax described above. Suppose, for instance, we want to model the non-linear growth curve $$ y = b_1 (1 - \exp(-(x / b_2)^{b_3}) $$ between $y$ and $x$ with parameters $b_1$, $b_2$, and $b_3$ (see Example 3 in this paper for an implementation of this model with real data). Furthermore, we want all three parameters to vary by a grouping variable $g$ and model those group-level effects as correlated. Additionally $b_1$ should be predicted by a covariate $z$. We can express this in \pkg{brms} using multiple formulas, one for the non-linear model itself and one per non-linear parameter: \begin{Sinput} y ~ b1 * (1 - exp(-(x / b2) ^ b3) b1 ~ z + (1|ID|g) b2 ~ (1|ID|g) b3 ~ (1|ID|g) \end{Sinput} The first formula will not be evaluated using standard R formula parsing, but instead taken literally. In contrast, the formulas for the non-linear parameters will be evaluated in the usual way and are compatible with all terms supported by \pkg{brms}. Note that we have used the above described ID-syntax to model group-level effects as correlated across formulas. There are other syntax extensions implemented in \pkg{brms} that do not directly target grouping terms. Firstly, there are terms formally included in the \code{pterms} part that are handled separately. The most prominent examples are smooth terms specified through the \code{s} and \code{t2} functions of the \pkg{mgcv} package \citep{wood2011}. Other examples are category specific effects \code{cs}, monotonic effects \code{mo}, noise-free effects \code{me}, or Gaussian process terms \code{gp}. The former is explained in \citet{brms1}, while the latter three are documented in \code{help(brmsformula)}. Internally, these terms are extracted from \code{pterms} and not included in the construction of the population-level design matrix. Secondly, making use of the fact that \code{|} is unused on the left-hand side of $\sim$ in formula, additional information on the response variable may be specified via \begin{Sinput} response | aterms ~ \end{Sinput} The \code{aterms} part may contain multiple terms of the form \code{fun()} separated by \code{+} each providing special information on the response variable. This allows among others to weight observations, provide known standard errors for meta-analysis, or model censored or truncated data. As it is not the main topic of the present paper, we refer to \code{help("brmsformula")} and \code{help("addition-terms")} for more details. To set up the model formulas and combine them into one object, \pkg{brms} defines the \code{brmsformula} (or short \code{bf}) function. Its output can then be passed to the \code{parse\_bf} function, which splits up the formulas in separate parts and prepares them for the generation of design matrices and related data. Other packages may re-use these functions in their own routines making it easier to offer support for the above described multilevel syntax. \section{Examples} The idea of \pkg{brms} is to provide one unified framework for multilevel regression models in R. As such, the above described formula syntax in all of its variations can be applied in combination with all response distributions supported by \pkg{brms} (currently about 35 response distributions are supported; see \code{help("brmsfamily")} and \code{vignette("brms\_families")} for an overview). In this section, we will discuss four examples in detail, each focusing on certain aspects of the syntax. They are chosen to provide a broad overview of the modeling options. The first is about the number of fish caught be different groups of people. It does not actually contain any multilevel structure, but helps in understanding how to set up formulas for different model parts. The second example is about housing rents in Munich. We model the data using splines and a distributional regression approach. The third example is about cumulative insurance loss payments across several years, which is fitted using a rather complex non-linear multilevel model. Finally, the fourth example is about the performance of school children, who change school during the year, thus requiring a multi-membership model. Despite not being covered in the four examples, there are a few more modeling options that we want to briefly describe. First, \pkg{brms} allows fitting so called phylogenetic models. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. Species are not independent as they come from the same phylogenetic tree, implying that different levels of the same grouping-factor (i.e., species) are likely correlated. There is a whole vignette dedicated to this topic, which can be found via \code{vignette("brms\_phylogenetics")}. Second, there is a canonical way to handle ordinal predictors, without falsely assuming they are either categorical or continuous. We call them monotonic effects and discuss them in \code{vignette("brms\_monotonic")}. Last but not least, it is possible to account for measurement error in both response and predictor variables. This is often ignored in applied regression modeling \citep{westfall2016}, although measurement error is very common in all scientific fields making use of observational data. There is no vignette yet covering this topic, but one will be added in the future. In the meantime, \code{help("brmsformula")} is the best place to start learning about such models as well as about other details of the \pkg{brms} formula syntax. \subsection{Example 1: Catching fish} An important application of the distributional regression framework of \pkg{brms} are so called zero-inflated and hurdle models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (\url{https://stats.idre.ucla.edu/stata/dae/zero-inflated-poisson-regression}), the data are described as follows: ``The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish.'' \begin{Sinput} zinb <- read.csv("http://stats.idre.ucla.edu/stat/data/fish.csv") zinb$camper <- factor(zinb$camper, labels = c("no", "yes")) head(zinb) \end{Sinput} \begin{Sinput} nofish livebait camper persons child xb zg count 1 1 0 no 1 0 -0.8963146 3.0504048 0 2 0 1 yes 1 0 -0.5583450 1.7461489 0 3 0 1 no 1 0 -0.4017310 0.2799389 0 4 0 1 yes 2 1 -0.9562981 -0.6015257 0 5 0 1 no 1 0 0.4368910 0.5277091 1 6 0 1 yes 4 2 1.3944855 -0.7075348 0 \end{Sinput} As predictors we choose the number of people per group, the number of children, as well as whether or not the group consists of campers. Many groups may not catch any fish just because they do not try and so we fit a zero-inflated Poisson model. For now, we assume a constant zero-inflation probability across observations. \begin{Sinput} fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson("log")) \end{Sinput} The model is readily summarized via \begin{Sinput} summary(fit_zinb1) \end{Sinput} \begin{Sinput} Family: zero_inflated_poisson (log) Formula: count ~ persons + child + camper Data: zinb (Number of observations: 250) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept -1.01 0.17 -1.34 -0.67 2171 1 persons 0.87 0.04 0.79 0.96 2188 1 child -1.36 0.09 -1.55 -1.18 1790 1 camper 0.80 0.09 0.62 0.98 2950 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat zi 0.41 0.04 0.32 0.49 2409 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} A graphical summary is available through \begin{Sinput} conditional_effects(fit_zinb1) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_zinb1.pdf} \caption{Conditional effects plots of the \code{fit\_zinb1} model.} \label{me_zinb1} \end{figure} (see Figure \ref{me_zinb1}). In fact, the \code{conditional\_effects} method turned out to be so powerful in visualizing effects of predictors that I am using it almost as frequently as \code{summary}. According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability \code{zi} is pretty large with a mean of 41\%. Please note that the probability of catching no fish is actually higher than 41\%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-\emph{inflation}). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here). Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish, since children often lack the patience required for fishing. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data. \begin{Sinput} fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) \end{Sinput} To transform the linear predictor of \code{zi} into a probability, \pkg{brms} applies the logit-link, which takes values within $[0, 1]$ and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors. \begin{Sinput} summary(fit_zinb2) \end{Sinput} \begin{Sinput} Family: zero_inflated_poisson (log) Formula: count ~ persons + child + camper zi ~ child Data: zinb (Number of observations: 250) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept -1.07 0.18 -1.43 -0.73 2322 1 persons 0.89 0.05 0.80 0.98 2481 1 child -1.17 0.10 -1.37 -1.00 2615 1 camper 0.78 0.10 0.60 0.96 3270 1 zi_Intercept -0.95 0.27 -1.52 -0.48 2341 1 zi_child 1.21 0.28 0.69 1.79 2492 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your chance of catching no fish at all (as implied by the zero-inflation part), possibly because groups with more children spend less time or no time at all fishing. Comparing model fit via leave-one-out cross validation as implemented in the \pkg{loo} package \citep{loo2016, vehtari2016}. \begin{Sinput} LOO(fit_zinb1, fit_zinb2) \end{Sinput} \begin{Sinput} LOOIC SE fit_zinb1 1639.52 363.30 fit_zinb2 1621.35 362.39 fit_zinb1 - fit_zinb2 18.16 15.71 \end{Sinput} reveals that the second model using the number of children to predict both model parts has better fit. However, when considering the standard error of the \code{LOOIC} difference, improvement in model fit is apparently modest and not substantial. More examples of distributional model can be found in \code{vignette("brms\_distreg")}. \subsection{Example 2: Housing rents} In their book about regression modeling, \citet{fahrmeir2013} use an example about the housing rents in Munich from 1999. The data contains information about roughly 3000 apartments including among others the absolute rent (\code{rent}), rent per square meter (\code{rentsqm}), size of the apartment (\code{area}), construction year (\code{yearc}), and the district in Munich (\code{district}), where the apartment is located. The data can be found in the \pkg{gamlss.data} package \citep{gamlss.data}: \begin{Sinput} data("rent99", package = "gamlss.data") head(rent99) \end{Sinput} \begin{Sinput} rent rentsqm area yearc location bath kitchen cheating district 1 109.9487 4.228797 26 1918 2 0 0 0 916 2 243.2820 8.688646 28 1918 2 0 0 1 813 3 261.6410 8.721369 30 1918 1 0 0 1 611 4 106.4103 3.547009 30 1918 2 0 0 0 2025 5 133.3846 4.446154 30 1918 2 0 0 1 561 6 339.0256 11.300851 30 1918 2 0 0 1 541 \end{Sinput} Here, we aim at predicting the rent per square meter with the size of the apartment as well as the construction year, while taking the district of Munich into account. As the effect of both predictors on the rent is of unknown non-linear form, we model these variables using a bivariate tensor spline \citep{wood2013}. The district is accounted for via a varying intercept. \begin{Sinput} fit_rent1 <- brm(rentsqm ~ t2(area, yearc) + (1|district), data = rent99, chains = 2, cores = 2) \end{Sinput} We fit the model using just two chains (instead of the default of four chains) on two processor cores to reduce the model fitting time for the purpose of the present paper. In general, using the default option of four chains (or more) is recommended. \begin{Sinput} summary(fit_rent1) \end{Sinput} \begin{Sinput} Family: gaussian(identity) Formula: rentsqm ~ t2(area, yearc) + (1 | district) Data: rent99 (Number of observations: 3082) Samples: 2 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 2000 ICs: LOO = NA; WAIC = NA; R2 = NA Smooth Terms: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sds(t2areayearc_1) 4.93 2.32 1.61 10.77 1546 1.00 sds(t2areayearc_2) 5.78 2.87 1.58 13.15 1175 1.00 sds(t2areayearc_3) 8.09 3.19 3.66 16.22 1418 1.00 Group-Level Effects: ~district (Number of levels: 336) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.60 0.06 0.48 0.73 494 1.01 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 7.80 0.11 7.59 8.02 2000 1.00 t2areayearc_1 -1.00 0.09 -1.15 -0.83 2000 1.00 t2areayearc_2 0.75 0.17 0.43 1.09 2000 1.00 t2areayearc_3 -0.07 0.16 -0.40 0.24 1579 1.00 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 1.95 0.03 1.90 2.01 2000 1.00 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} For models including splines, the output of \code{summary} is not tremendously helpful, but we get at least some information. Firstly, the credible intervals of the standard deviations of the coefficients forming the splines (under \code{'Smooth Terms'}) are sufficiently far away from zero to indicate non-linearity in the (combined) effect of \code{area} and \code{yearc}. Secondly, even after controlling for these predictors, districts still vary with respect to rent per square meter by a sizable amount as visible under \code{'Group-Level Effects'} in the output. To further understand the effect of the predictor, we apply graphical methods: \begin{Sinput} conditional_effects(fit_rent1, surface = TRUE) \end{Sinput} In Figure \ref{me_rent1}, the conditional effects of both predictors are displayed, while the respective other predictor is fixed at its mean. In Figure \ref{me_rent2}, the combined effect is shown, clearly demonstrating an interaction between the variables. In particular, housing rents appear to be highest for small and relatively new apartments. \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_rent1.pdf} \caption{Conditional effects plots of the \code{fit\_rent1} model for single predictors.} \label{me_rent1} \end{figure} \begin{figure}[ht] \centering \includegraphics[width=0.7\textwidth,keepaspectratio]{me_rent2.pdf} \caption{Surface plot of the \code{fit\_rent1} model for the combined effect of \code{area} and \code{yearc}.} \label{me_rent2} \end{figure} In the above example, we only considered the mean of the response distribution to vary by \code{area} and \code{yearc}, but this my not necessarily reasonable assumption, as the variation of the response might vary with these variables as well. Accordingly, we fit splines and effects of district for both the location and the scale parameter, which is called \code{sigma} in Gaussian models. \begin{Sinput} bform <- bf(rentsqm ~ t2(area, yearc) + (1|ID1|district), sigma ~ t2(area, yearc) + (1|ID1|district)) fit_rent2 <- brm(bform, data = rent99, chains = 2, cores = 2) \end{Sinput} If not otherwise specified, \code{sigma} is predicted on the log-scale to ensure it is positive no matter how the predictor term looks like. Instead of \code{(1|district)} as in the previous model, we now use \code{(1|ID1|district)} in both formulas. This results in modeling the varying intercepts of both model parts as correlated (see the description of the ID-syntax above). The group-level part of the \code{summary} output looks as follows: \begin{Sinput} Group-Level Effects: ~district (Number of levels: 336) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.60 0.06 0.49 0.73 744 1.00 sd(sigma_Intercept) 0.11 0.02 0.06 0.15 751 1.00 cor(Intercept,sigma_Intercept) 0.72 0.17 0.35 0.98 648 1.00 \end{Sinput} As visible from the positive correlation of the intercepts, districts with overall higher rent per square meter have higher variation at the same time. Lastly, we want to turn our attention to the splines. While \code{conditional\_effects} is used to visualize effects of predictors on the expected response, \code{conditional\_smooths} is used to show just the spline parts of the model: \begin{Sinput} conditional_smooths(fit_rent2) \end{Sinput} The plot on the left-hand side of Figure \ref{me_rent3} resembles the one in Figure \ref{me_rent2}, but the scale is different since only the spline is plotted. The right-hand side of \ref{me_rent3} shows the spline for \code{sigma}. Since we apply the log-link on \code{sigma} by default the spline is on the log-scale as well. As visible in the plot, the variation in the rent per square meter is highest for relatively small and old apartments, while the variation is smallest for medium to large apartments build around the 1960s. \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_rent3.pdf} \caption{Plots showing the smooth terms of the \code{fit\_rent2} model.} \label{me_rent3} \end{figure} \subsection{Example 3: Insurance loss payments} On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see \url{http://www.magesblog.com/2015/11/loss-developments-via-growth-curves-and.html}). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows: $$cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)$$ $$\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)$$ The cumulative insurance payments $cum$ will grow over time, and we model this dependency using the variable $dev$. Further, $ult_{AY}$ is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters $\theta$ and $\omega$, which are responsible for the growth of the cumulative loss and are for now assumed to be the same across years. We load the data \begin{Sinput} url <- paste0("https://raw.githubusercontent.com/mages/", "diesunddas/master/Data/ClarkTriangle.csv") loss <- read.csv(url) head(loss) \end{Sinput} \begin{Sinput} AY dev cum 1 1991 6 357.848 2 1991 18 1124.788 3 1991 30 1735.330 4 1991 42 2182.708 5 1991 54 2745.596 6 1991 66 3319.994 \end{Sinput} and translate the proposed model into a non-linear \pkg{brms} model. \begin{Sinput} nlform <- bf(cum ~ ult * (1 - exp(-(dev / theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE) nlprior <- c(prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta")) fit_loss1 <- brm(formula = nlform, data = loss, family = gaussian(), prior = nlprior, control = list(adapt_delta = 0.9)) \end{Sinput} In the above functions calls, quite a few things are going on. The formulas are wrapped in \code{bf} to combine them into one object. The first formula specifies the non-linear model. We set argument \code{nl = TRUE} so that \pkg{brms} takes this formula literally and instead of using standard R formula parsing. We specify one additional formula per non-linear parameter (a) to clarify what variables are covariates and what are parameters and (b) to specify the predictor term for the parameters. We estimate a group-level effect of accident year (variable \code{AY}) for the ultimate loss \code{ult}. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in the case of \code{ult}, contains only a varying intercept for year. Both \code{omega} and \code{theta} are assumed to be constant across observations so we just fit a population-level intercept. Priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. Otherwise, we may observe that different Markov chains converge to different parameter regions as multiple posterior distribution are equally plausible. Setting prior distributions is a difficult task especially in non-linear models. It requires some experience and knowledge both about the model that is being fitted and about the data at hand. Additionally, there is more to keep in mind to optimize the sampler's performance: Firstly, using non- or weakly informative priors in non-linear models often leads to problems even if the model is generally identified. For instance, if a zero-centered and reasonably wide prior such as \code{normal(0, 10000)} it set on \code{ult}, there is little information about \code{theta} and \code{omega} for samples of \code{ult} being close to zero, which may lead to convergence problems. Secondly, Stan works best when parameters are roughly on the same order of magnitude \citep{stan2017}. In the present example, \code{ult} is of three orders larger than \code{omega}. Still, the sampler seems to work quite well, but this may not be true for other models. One solution is to rescale parameters before model fitting. For instance, for the present example, one could have downscaled \code{ult} by replacing it with \code{ult * 1000} and correspondingly the \code{normal(5000, 1000)} prior with \code{normal(5, 1)}. In the \code{control} argument we increase \code{adapt\_delta} to get rid of a few divergent transitions (cf. \citeauthor{stan2017}, \citeyear{stan2017}; \citeauthor{brms1}, \citeyear{brms1}). Again the model is summarized via \begin{Sinput} summary(fit_loss1) \end{Sinput} \begin{Sinput} Family: gaussian (identity) Formula: cum ~ ult * (1 - exp(-(dev / theta)^omega)) ult ~ 1 + (1 | AY) omega ~ 1 theta ~ 1 Data: loss (Number of observations: 55) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Group-Level Effects: ~AY (Number of levels: 10) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(ult_Intercept) 745.74 231.31 421.05 1306.04 916 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat ult_Intercept 5273.70 292.34 4707.11 5852.28 798 1 omega_Intercept 1.34 0.05 1.24 1.43 2167 1 theta_Intercept 46.07 2.09 42.38 50.57 1896 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 139.93 15.52 113.6 175.33 2358 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} as well as \begin{Sinput} conditional_effects(fit_loss1) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.7\textwidth,keepaspectratio]{me_loss1.pdf} \caption{Conditional effects plots of the \code{fit\_loss1} model.} \label{me_loss1} \end{figure} (see Figure \ref{me_loss1}). We can also visualize the cumulative insurance loss over time separately for each year. \begin{Sinput} conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_year <- conditional_effects(fit_loss1, conditions = conditions, re_formula = NULL, method = "predict") plot(me_year, ncol = 5, points = TRUE) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_loss1_year.pdf} \caption{Conditional effects plots of the \code{fit\_loss1} model separately for each accident year.} \label{me_loss1_year} \end{figure} (see Figure \ref{me_loss1_year}). It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. In the above model, we considered \code{omega} and \code{delta} to be constant across years, which may not necessarily be true. We can easily investigate this by fitting varying intercepts for all three non-linear parameters also estimating group-level correlation using the above introduced \code{ID} syntax. \begin{Sinput} nlform2 <- bf(cum ~ ult * (1 - exp(-(dev / theta)^omega)), ult ~ 1 + (1|ID1|AY), omega ~ 1 + (1|ID1|AY), theta ~ 1 + (1|ID1|AY), nl = TRUE) fit_loss2 <- update(fit_loss1, formula = nlform2, control = list(adapt_delta = 0.90)) \end{Sinput} We could have also specified all predictor terms more conveniently within one formula as \begin{Sinput} ult + omega + theta ~ 1 + (1|ID1|AY) \end{Sinput} because the structure of the predictor terms is identical. To compare model fit, we perform leave-one-out cross-validation. \begin{Sinput} LOO(fit_loss1, fit_loss2) \end{Sinput} \begin{Sinput} LOOIC SE fit_loss1 715.44 19.24 fit_loss2 720.60 19.85 fit_loss1 - fit_loss2 -5.15 5.34 \end{Sinput} Since smaller values indicate better expected out-of-sample predictions and thus better model fit, the simpler model that only has a varying intercept over parameter \code{ult} is preferred. This may not be overly surprising, given that three varying intercepts as well as three group-level correlations are probably overkill for data containing only 55 observations. Nevertheless, it nicely demonstrates how to apply the \code{ID} syntax in practice. More examples of non-linear models can be found in \code{vignette("brms\_nonlinear")}. \subsection{Example 4: Performance of school children} Suppose that we want to predict the performance of students in the final exams at the end of the year. There are many variables to consider, but one important factor will clearly be school membership. Schools might differ in the ratio of teachers and students, the general quality of teaching, in the cognitive ability of the students they draw, or other factors we are not aware of that induce dependency among students of the same school. Thus, it is advised to apply a multilevel modeling techniques including school membership as a group-level term. Of course, we should account for class membership and other levels of the educational hierarchy as well, but for the purpose of the present example, we will focus on schools only. Usually, accounting for school membership is pretty-straight forward by simply adding a varying intercept to the formula: \code{(1 | school)}. However, a non-negligible number of students might change schools during the year. This would result in a situation where one student is a member of multiple schools and so we need a multi-membership model. Setting up such a model not only requires information on the different schools students attend during the year, but also the amount of time spend at each school. The latter can be used to weight the influence each school has on its students, since more time attending a school will likely result in greater influence. For now, let us assume that students change schools maximally once a year and spend equal time at each school. We will later see how to relax these assumptions. Real educational data are usually relatively large and complex so that we simulate our own data for the purpose of this tutorial paper. We simulate 10 schools and 1000 students, with each school having the same expected number of 100 students. We model 10\% of students as changing schools. \begin{Sinput} data_mm <- sim_multi_mem(nschools = 10, nstudents = 1000, change = 0.1) head(data_mm) \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 1 8 9 0.5 0.5 16.27422 2 10 9 0.5 0.5 18.71387 3 5 3 0.5 0.5 23.65319 4 3 5 0.5 0.5 22.35204 5 5 3 0.5 0.5 16.38019 6 10 6 0.5 0.5 17.63494 \end{Sinput} The code of function \code{sim\_multi\_mem} can be found in the online supplement of the present paper. For reasons of better illustration, students changing schools appear in the first rows of the data. Data of students being only at a single school looks as follows: \begin{Sinput} data_mm[101:106, ] \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 101 2 2 0.5 0.5 27.247851 102 9 9 0.5 0.5 24.041427 103 4 4 0.5 0.5 12.575001 104 2 2 0.5 0.5 21.203644 105 4 4 0.5 0.5 12.856166 106 4 4 0.5 0.5 9.740174 \end{Sinput} Thus, school variables are identical, but we still have to specify both in order to pass the data appropriately. Incorporating no other predictors into the model for simplicity, a multi-membership model is specified as \begin{Sinput} fit_mm <- brm(y ~ 1 + (1 | mm(s1, s2)), data = data_mm) \end{Sinput} The only new syntax element is that multiple grouping factors (\code{s1} and \code{s2}) are wrapped in \code{mm}. Everything else remains exactly the same. Note that we did not specify the relative weights of schools for each student and thus, by default, equal weights are assumed. \begin{Sinput} summary(fit_mm) \end{Sinput} \begin{Sinput} Family: gaussian (identity) Formula: y ~ 1 + (1 | mm(s1, s2)) Data: data_mm (Number of observations: 1000) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Group-Level Effects: ~mms1s2 (Number of levels: 10) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 2.76 0.82 1.69 4.74 682 1.01 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 19 0.93 17.06 20.8 610 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 3.58 0.08 3.43 3.75 2117 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} With regard to the assumptions made in the above example, it is unlikely that all children who change schools stay in both schools equally long. To relax this assumption, we have to specify weights. First, we amend the simulated data to contain non-equal weights for students changing schools. For all other students, weighting does of course not matter as they stay in the same school anyway. \begin{Sinput} data_mm[1:100, "w1"] <- runif(100, 0, 1) data_mm[1:100, "w2"] <- 1 - data_mm[1:100, "w1"] head(data_mm) \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 1 8 9 0.3403258 0.65967423 16.27422 2 10 9 0.1771435 0.82285652 18.71387 3 5 3 0.9059811 0.09401892 23.65319 4 3 5 0.4432007 0.55679930 22.35204 5 5 3 0.8052026 0.19479738 16.38019 6 10 6 0.5610243 0.43897567 17.63494 \end{Sinput} Incorporating these weights into the model is straight forward. \begin{Sinput} fit_mm2 <- brm(y ~ 1 + (1 | mm(s1, s2, weights = cbind(w1, w2))), data = data_mm) \end{Sinput} The summary output is similar to the previous, so we do not show it here. The second assumption that students change schools only once a year, may also easily be relaxed by providing more than two grouping factors, say, \code{mm(s1, s2, s3)}. \section{Comparison between packages} Over the years, many R packages have been developed that implement MLMs, each being more or less general in their supported models. In \cite{brms1}, I compared \pkg{brms} with \pkg{lme4} \citep{bates2015}, \pkg{MCMCglmm} \citep{hadfield2010}, \pkg{rstanarm} \citep{rstanarm2017}, and \pkg{rethinking} \citep{mcelreath2017}. Since then, quite a few new features have been added in particular to \pkg{brms} and \pkg{rstanarm}. Accordingly, in the present paper, I will update these comparisons, but focus on \pkg{brms}, \pkg{rstanarm}, and \pkg{MCMCglmm} as the possibly most important R packages implementing Bayesian MLMs. While \pkg{brms} and \pkg{rstanarm} are both based on the probabilistic programming language \pkg{Stan}, \pkg{MCMCglmm} implements its own custom MCMC algorithm. Modeling options and other important information of these packages are summarized in Table~\ref{comparison} and will be discussed in detail below. Regarding general model classes, all three packages support the most common types such as linear, count data and certain survival models. Currently, \pkg{brms} and \pkg{MCMCglmm} provide more flexibility when modeling categorical and ordinal data. Additionally, they offer support for zero-inflated and hurdle models to account for access zeros in the data (see Example 1 above). For survival / time-to-event models, \pkg{rstanarm} offers great flexibility via the \code{stan\_jm} function, which allows for complex association structures between time-to-event data and one or more models of longitudinal covariates (for details see \url{https://cran.r-project.org/web/packages/rstanarm/vignettes/jm.html}). Model classes currently specific to \pkg{brms} are robust linear models using Student's t-distribution (family \code{student}) as well as response times models via the exponentially modified Gaussian (family \code{exgaussian}) distribution or the Wiener diffusion model (family \code{wiener}). The latter allows to simultaneously model dichotomous decisions and their corresponding response times (for a detailed example see \url{http://singmann.org/wiener-model-analysis-with-brms-part-i/}). All three packages offer many additional modeling options, with \pkg{brms} currently having the greatest flexibility (see Table~\ref{comparison} for a summary). Moreover, the packages differ in the general framework, in which they are implemented: \pkg{brms} and \pkg{MCMCglmm} each come with a single model fitting function (\code{brm} and \code{MCMCglmm} respectively), through which all of their models can be specified. Further, their framework allows to seamlessly combine most modeling options with each other in the same model. In contrast, the approach of \pkg{rstanarm} is to emulate existing functions of other packages. This has the advantage of an easier transition between classical and Bayesian models, since the syntax used to specify models stays the same. However, it comes with the main disadvantage that many modeling options cannot be used in combination within the same model. Information criteria are available in all three packages. The advantages of WAIC and LOO implemented in \pkg{brms} and \pkg{rstanarm}, are their less restrictive assumptions and that their standard errors can be easily estimated to get a better sense of the uncertainty in the criteria. Comparing the prior options of the packages, \pkg{brms} offers a little more flexibility than \pkg{MCMCglmm} and \pkg{rstanarm}, as virtually any prior distribution can be applied on population-level effects as well as on the standard deviations of group-level effects. In addition, I believe that the way priors are specified in \pkg{brms} is more intuitive as it is directly evident what prior is actually applied. In \pkg{brms}, Bayes factors are available both via Savage-Dickey ratios \citep{wagenmakers2010} and bridge-sampling \citep{bridgesampling2017}, while \pkg{rstanarm} allows for the latter option. For a detailed comparison with respect to sampling efficiency, see \cite{brms1}. \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{rstanarm}} & \parbox{2cm}{\pkg{MCMCglmm}} \\ \hline \\ [-1.5ex] \parbox{6cm}{\textbf{Model classes}} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & no & no \\ Count data models & yes & yes & yes \\ Survival models & yes & yes$^1$ & yes \\ Response times models & yes & no & no \\ Beta models & yes & yes & no \\ Categorical models & yes & yes$^2$ & yes \\ Multinomial models & no & no & yes \\ Ordinal models & various & cumulative$^2$ & cumulative \\ Zero-inflated and hurdle models & yes & no & yes \\ \hline \\ [-1.5ex] \parbox{5cm}{\textbf{Modeling options}} & & & \\ [1ex] Variable link functions & various & various & no \\ Multilevel structures & yes & yes & yes \\ Multi-membership & yes & no & yes \\ Multivariate responses & yes & yes$^3$ & yes \\ Non-linear predictors & yes & limited$^4$ & no \\ Distributional regression & yes & no & no \\ Finite mixtures & yes & no & no \\ Splines (additive models) & yes & yes & yes \\ Gaussian Processes & yes & no & no \\ Temporal autocorrelation & yes & yes$^{2, 5}$ & no \\ Spatial autocorrelation & yes & yes$^{2, 5}$ & no \\ Monotonic effects & yes & no & no \\ Category specific effects & yes & no & no \\ Measurement error & yes & no & no \\ Weights & yes & yes & no \\ Offset & yes & yes & using priors \\ Censored data & yes & yes$^1$ & yes \\ Truncated data & yes & no & no \\ Customized covariances & yes & no & yes \\ Missing value imputation & no & no & no \\ \hline \\ [-1.5ex] \textbf{Bayesian specifics} & & & \\ [1ex] Population-level priors & flexible & flexible & normal \\ Group-level priors & normal & normal & normal \\ Covariance priors & flexible & restricted$^6$ & restricted$^7$ \\ Bayes factors & yes & yes$^8$ & no \\ Parallelization & yes & yes & no \\ \hline \\ [-1.5ex] \textbf{Other} & & & \\ [1ex] Estimator & HMC, NUTS & HMC, NUTS & MH, Gibbs$^9$ \\ Information criterion & WAIC, LOO & WAIC, LOO & DIC \\ C++ compiler required & yes & no & no \\ \hline \end{tabular} \caption{ Comparison of the capabilities of the \pkg{brms}, \pkg{rstanarm} and \pkg{MCMCglmm} packages. Notes: (1) Advanced functionality available via \code{stan\_jm}. (2) No group-level terms and related functionality allowed. (3) Cannot be combined with other modeling options such as splines. (4) Functionality limited to linear Gaussian models and certein pre-specified non-linear functions. (5) Functionality available only on GitHub branches (\url{https://github.com/stan-dev/rstanarm}). (6) For details see \cite{hadfield2010}. (7) For details see \url{https://github.com/stan-dev/rstanarm/wiki/Prior-distributions}. (8) Available via the \pkg{bridgesampling} package \citep{bridgesampling2017}. (9) Estimator consists of a combination of both algorithms. } \label{comparison} \end{table} \section{Conclusion} The present paper is meant to introduce R users and developers to the extended \pkg{lme4} formula syntax applied in \pkg{brms}. Only a subset of modeling options were discussed in detail, which ensured the paper was not too broad. For some of the more basic models that \pkg{brms} can fit, see \citet{brms1}. Many more examples can be found in the growing number of vignettes accompanying the package (see \code{vignette(package = "brms")} for an overview). To date, \pkg{brms} is already one of the most flexible R packages when it comes to regression modeling. However, for the future, there are quite a few more features that I am planning to implement (see \url{https://github.com/paul-buerkner/brms/issues} for the current list of issues). In addition to smaller, incremental updates, I have two specific features in mind: (1) latent variables estimated via confirmatory factor analysis and (2) missing value imputation. I receive ideas and suggestions from users almost every day -- for which I am always grateful -- and so the list of features that will be implemented in the proceeding versions of \pkg{brms} will continue to grow. \section*{Acknowledgments} First of all, I would like to thank the Stan Development Team for creating the probabilistic programming language Stan, which is an incredibly powerful and flexible tool for performing full Bayesian inference. Without it, \pkg{brms} could not fit a single model. Furthermore, I want to thank Heinz Holling, Donald Williams and Ruben Arslan for valuable comments on earlier versions of the paper. I also would like to thank the many users who reported bugs or had ideas for new features, thus helping to continuously improve \pkg{brms}. \bibliography{citations_multilevel} \end{document} brms/inst/doc/brms_nonlinear.html0000644000176200001440000205140314504267376016660 0ustar liggesusers Estimating Non-Linear Models with brms

Estimating Non-Linear Models with brms

Paul Bürkner

2023-09-25

Introduction

This vignette provides an introduction on how to fit non-linear multilevel models with brms. Non-linear models are incredibly flexible and powerful, but require much more care with respect to model specification and priors than typical generalized linear models. Ignoring group-level effects for the moment, the predictor term \(\eta_n\) of a generalized linear model for observation \(n\) can be written as follows:

\[\eta_n = \sum_{i = 1}^K b_i x_{ni}\]

where \(b_i\) is the regression coefficient of predictor \(i\) and \(x_{ni}\) is the data of predictor \(i\) for observation \(n\). This also comprises interaction terms and various other data transformations. However, the structure of \(\eta_n\) is always linear in the sense that the regression coefficients \(b_i\) are multiplied by some predictor values and then summed up. This implies that the hypothetical predictor term

\[\eta_n = b_1 \exp(b_2 x_n)\]

would not be a linear predictor anymore and we could not fit it using classical techniques of generalized linear models. We thus need a more general model class, which we will call non-linear models. Note that the term ‘non-linear’ does not say anything about the assumed distribution of the response variable. In particular it does not mean ‘not normally distributed’ as we can apply non-linear predictor terms to all kinds of response distributions (for more details on response distributions available in brms see vignette("brms_families")).

A Simple Non-Linear Model

We begin with a simple example using simulated data.

b <- c(2, 0.75)
x <- rnorm(100)
y <- rnorm(100, mean = b[1] * exp(b[2] * x))
dat1 <- data.frame(x, y)

As stated above, we cannot use a generalized linear model to estimate \(b\) so we go ahead an specify a non-linear model.

prior1 <- prior(normal(1, 2), nlpar = "b1") +
  prior(normal(0, 2), nlpar = "b2")
fit1 <- brm(bf(y ~ b1 * exp(b2 * x), b1 + b2 ~ 1, nl = TRUE),
            data = dat1, prior = prior1)

When looking at the above code, the first thing that becomes obvious is that we changed the formula syntax to display the non-linear formula including predictors (i.e., x) and parameters (i.e., b1 and b2) wrapped in a call to bf. This stands in contrast to classical R formulas, where only predictors are given and parameters are implicit. The argument b1 + b2 ~ 1 serves two purposes. First, it provides information, which variables in formula are parameters, and second, it specifies the linear predictor terms for each parameter. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves (see also the following examples). In the present case, we have no further variables to predict b1 and b2 and thus we just fit intercepts that represent our estimates of \(b_1\) and \(b_2\) in the model equation above. The formula b1 + b2 ~ 1 is a short form of b1 ~ 1, b2 ~ 1 that can be used if multiple non-linear parameters share the same formula. Setting nl = TRUE tells brms that the formula should be treated as non-linear.

In contrast to generalized linear models, priors on population-level parameters (i.e., ‘fixed effects’) are often mandatory to identify a non-linear model. Thus, brms requires the user to explicitly specify these priors. In the present example, we used a normal(1, 2) prior on (the population-level intercept of) b1, while we used a normal(0, 2) prior on (the population-level intercept of) b2. Setting priors is a non-trivial task in all kinds of models, especially in non-linear models, so you should always invest some time to think of appropriate priors. Quite often, you may be forced to change your priors after fitting a non-linear model for the first time, when you observe different MCMC chains converging to different posterior regions. This is a clear sign of an identification problem and one solution is to set stronger (i.e., more narrow) priors.

To obtain summaries of the fitted model, we apply

summary(fit1)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: y ~ b1 * exp(b2 * x) 
         b1 ~ 1
         b2 ~ 1
   Data: dat1 (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
b1_Intercept     1.82      0.11     1.60     2.04 1.00     1587     1837
b2_Intercept     0.75      0.04     0.68     0.82 1.00     1574     1875

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     1.04      0.08     0.91     1.20 1.00     2326     2109

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fit1)

plot(conditional_effects(fit1), points = TRUE)

The summary method reveals that we were able to recover the true parameter values pretty nicely. According to the plot method, our MCMC chains have converged well and to the same posterior. The conditional_effects method visualizes the model-implied (non-linear) regression line.

We might be also interested in comparing our non-linear model to a classical linear model.

fit2 <- brm(y ~ x, data = dat1)
summary(fit2)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: y ~ x 
   Data: dat1 (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     2.40      0.13     2.14     2.65 1.00     4305     2619
x             1.80      0.13     1.55     2.05 1.00     3844     2789

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     1.31      0.10     1.14     1.52 1.00     3479     2854

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

To investigate and compare model fit, we can apply graphical posterior predictive checks, which make use of the bayesplot package on the backend.

pp_check(fit1)

pp_check(fit2)

We can also easily compare model fit using leave-one-out cross-validation.

loo(fit1, fit2)
Output of model 'fit1':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -147.2  6.7
p_loo         3.0  0.8
looic       294.4 13.3
------
Monte Carlo SE of elpd_loo is 0.0.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     98    98.0%   1521      
 (0.5, 0.7]   (ok)        2     2.0%   963       
   (0.7, 1]   (bad)       0     0.0%   <NA>      
   (1, Inf)   (very bad)  0     0.0%   <NA>      

All Pareto k estimates are ok (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'fit2':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -172.4 14.6
p_loo         6.4  4.3
looic       344.8 29.2
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     99    99.0%   2700      
 (0.5, 0.7]   (ok)        0     0.0%   <NA>      
   (0.7, 1]   (bad)       1     1.0%   27        
   (1, Inf)   (very bad)  0     0.0%   <NA>      
See help('pareto-k-diagnostic') for details.

Model comparisons:
     elpd_diff se_diff
fit1   0.0       0.0  
fit2 -25.2      15.0  

Since smaller LOOIC values indicate better model fit, it is immediately evident that the non-linear model fits the data better, which is of course not too surprising since we simulated the data from exactly that model.

A Real-World Non-Linear model

On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see https://www.magesblog.com/post/2015-11-03-loss-developments-via-growth-curves-and/). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows:

\[cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)\] \[\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)\]

The cumulative insurance payments \(cum\) will grow over time, and we model this dependency using the variable \(dev\). Further, \(ult_{AY}\) is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters \(\theta\) and \(\omega\), which are responsible for the growth of the cumulative loss and are assumed to be the same across years. The data is already shipped with brms.

data(loss)
head(loss)
    AY dev      cum premium
1 1991   6  357.848   10000
2 1991  18 1124.788   10000
3 1991  30 1735.330   10000
4 1991  42 2182.708   10000
5 1991  54 2745.596   10000
6 1991  66 3319.994   10000

and translate the proposed model into a non-linear brms model.

fit_loss <- brm(
  bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)),
     ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1,
     nl = TRUE),
  data = loss, family = gaussian(),
  prior = c(
    prior(normal(5000, 1000), nlpar = "ult"),
    prior(normal(1, 2), nlpar = "omega"),
    prior(normal(45, 10), nlpar = "theta")
  ),
  control = list(adapt_delta = 0.9)
)

We estimate a group-level effect of accident year (variable AY) for the ultimate loss ult. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in case of ult, contains only an varying intercept over year. Again, priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. We summarize the model using well known methods.

summary(fit_loss)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: cum ~ ult * (1 - exp(-(dev/theta)^omega)) 
         ult ~ 1 + (1 | AY)
         omega ~ 1
         theta ~ 1
   Data: loss (Number of observations: 55) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~AY (Number of levels: 10) 
                  Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(ult_Intercept)   740.53    228.84   430.68  1299.73 1.00     1238     1833

Population-Level Effects: 
                Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
ult_Intercept    5282.42    291.01  4722.34  5858.59 1.00     1004     1677
omega_Intercept     1.33      0.05     1.24     1.43 1.00     2591     2302
theta_Intercept    46.23      2.15    42.42    50.86 1.00     2423     2022

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma   140.22     16.11   113.57   176.18 1.00     2125     1968

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fit_loss, N = 3, ask = FALSE)

conditional_effects(fit_loss)

Next, we show marginal effects separately for each year.

conditions <- data.frame(AY = unique(loss$AY))
rownames(conditions) <- unique(loss$AY)
me_loss <- conditional_effects(
  fit_loss, conditions = conditions,
  re_formula = NULL, method = "predict"
)
plot(me_loss, ncol = 5, points = TRUE)

It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. For a more detailed discussion of this data set, see Section 4.5 in Gesmann & Morris (2020).

Advanced Item-Response Models

As a third example, we want to show how to model more advanced item-response models using the non-linear model framework of brms. For simplicity, suppose we have a single forced choice item with three alternatives of which only one is correct. Our response variable is whether a person answers the item correctly (1) or not (0). Person are assumed to vary in their ability to answer the item correctly. However, every person has a 33% chance of getting the item right just by guessing. We thus simulate some data to reflect this situation.

inv_logit <- function(x) 1 / (1 + exp(-x))
ability <- rnorm(300)
p <- 0.33 + 0.67 * inv_logit(ability)
answer <- ifelse(runif(300, 0, 1) < p, 1, 0)
dat_ir <- data.frame(ability, answer)

The most basic item-response model is equivalent to a simple logistic regression model.

fit_ir1 <- brm(answer ~ ability, data = dat_ir, family = bernoulli())

However, this model completely ignores the guessing probability and will thus likely come to biased estimates and predictions.

summary(fit_ir1)
 Family: bernoulli 
  Links: mu = logit 
Formula: answer ~ ability 
   Data: dat_ir (Number of observations: 300) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     0.93      0.13     0.67     1.19 1.00     3312     2606
ability       0.48      0.15     0.19     0.77 1.00     2847     2409

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_ir1), points = TRUE)

A more sophisticated approach incorporating the guessing probability looks as follows:

fit_ir2 <- brm(
  bf(answer ~ 0.33 + 0.67 * inv_logit(eta),
     eta ~ ability, nl = TRUE),
  data = dat_ir, family = bernoulli("identity"),
  prior = prior(normal(0, 5), nlpar = "eta")
)

It is very important to set the link function of the bernoulli family to identity or else we will apply two link functions. This is because our non-linear predictor term already contains the desired link function (0.33 + 0.67 * inv_logit), but the bernoulli family applies the default logit link on top of it. This will of course lead to strange and uninterpretable results. Thus, please make sure that you set the link function to identity, whenever your non-linear predictor term already contains the desired link function.

summary(fit_ir2)
 Family: bernoulli 
  Links: mu = identity 
Formula: answer ~ 0.33 + 0.67 * inv_logit(eta) 
         eta ~ ability
   Data: dat_ir (Number of observations: 300) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
eta_Intercept     0.27      0.17    -0.07     0.60 1.00     3362     2632
eta_ability       0.68      0.22     0.28     1.13 1.00     3153     2688

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_ir2), points = TRUE)

Comparing model fit via leave-one-out cross-validation

loo(fit_ir1, fit_ir2)
Output of model 'fit_ir1':

Computed from 4000 by 300 log-likelihood matrix

         Estimate   SE
elpd_loo   -178.8  7.4
p_loo         2.0  0.2
looic       357.7 14.7
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Output of model 'fit_ir2':

Computed from 4000 by 300 log-likelihood matrix

         Estimate   SE
elpd_loo   -178.6  7.4
p_loo         2.1  0.2
looic       357.2 14.8
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Model comparisons:
        elpd_diff se_diff
fit_ir2  0.0       0.0   
fit_ir1 -0.2       0.4   

shows that both model fit the data equally well, but remember that predictions of the first model might still be misleading as they may well be below the guessing probability for low ability values. Now, suppose that we don’t know the guessing probability and want to estimate it from the data. This can easily be done changing the previous model just a bit.

fit_ir3 <- brm(
  bf(answer ~ guess + (1 - guess) * inv_logit(eta),
    eta ~ 0 + ability, guess ~ 1, nl = TRUE),
  data = dat_ir, family = bernoulli("identity"),
  prior = c(
    prior(normal(0, 5), nlpar = "eta"),
    prior(beta(1, 1), nlpar = "guess", lb = 0, ub = 1)
  )
)

Here, we model the guessing probability as a non-linear parameter making sure that it cannot exceed the interval \([0, 1]\). We did not estimate an intercept for eta, as this will lead to a bias in the estimated guessing parameter (try it out; this is an excellent example of how careful one has to be in non-linear models).

summary(fit_ir3)
 Family: bernoulli 
  Links: mu = identity 
Formula: answer ~ guess + (1 - guess) * inv_logit(eta) 
         eta ~ 0 + ability
         guess ~ 1
   Data: dat_ir (Number of observations: 300) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
                Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
eta_ability         0.78      0.25     0.33     1.29 1.00     3026     2470
guess_Intercept     0.42      0.05     0.32     0.51 1.00     2967     2548

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fit_ir3)

plot(conditional_effects(fit_ir3), points = TRUE)

The results show that we are able to recover the simulated model parameters with this non-linear model. Of course, real item-response data have multiple items so that accounting for item and person variability (e.g., using a multilevel model with varying intercepts) becomes necessary as we have multiple observations per item and person. Luckily, this can all be done within the non-linear framework of brms and I hope that this vignette serves as a good starting point.

References

Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving Models. CAS Research Papers.

brms/inst/doc/brms_missings.R0000644000176200001440000000412414504266264015753 0ustar liggesusersparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- data("nhanes", package = "mice") head(nhanes) ## --------------------------------------------------------------------------------------- library(mice) imp <- mice(nhanes, m = 5, print = FALSE) ## ---- results = 'hide', message = FALSE------------------------------------------------- fit_imp1 <- brm_multiple(bmi ~ age*chl, data = imp, chains = 2) ## --------------------------------------------------------------------------------------- summary(fit_imp1) ## --------------------------------------------------------------------------------------- plot(fit_imp1, variable = "^b", regex = TRUE) ## --------------------------------------------------------------------------------------- round(fit_imp1$rhats, 2) ## --------------------------------------------------------------------------------------- conditional_effects(fit_imp1, "age:chl") ## ---- results = 'hide', message = FALSE------------------------------------------------- bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) fit_imp2 <- brm(bform, data = nhanes) ## --------------------------------------------------------------------------------------- summary(fit_imp2) conditional_effects(fit_imp2, "age:chl", resp = "bmi") ## --------------------------------------------------------------------------------------- nhanes$se <- rexp(nrow(nhanes), 2) ## ---- results = 'hide', message = FALSE, eval = FALSE----------------------------------- # bform <- bf(bmi | mi() ~ age * mi(chl)) + # bf(chl | mi(se) ~ age) + set_rescor(FALSE) # fit_imp3 <- brm(bform, data = nhanes) brms/inst/doc/brms_distreg.R0000644000176200001440000000601014504266210015543 0ustar liggesusersparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- group <- rep(c("treat", "placebo"), each = 30) symptom_post <- c(rnorm(30, mean = 1, sd = 2), rnorm(30, mean = 0, sd = 1)) dat1 <- data.frame(group, symptom_post) head(dat1) ## ---- results='hide'-------------------------------------------------------------------- fit1 <- brm(bf(symptom_post ~ group, sigma ~ group), data = dat1, family = gaussian()) ## ---- results='hide'-------------------------------------------------------------------- summary(fit1) plot(fit1, N = 2, ask = FALSE) plot(conditional_effects(fit1), points = TRUE) ## --------------------------------------------------------------------------------------- hyp <- c("exp(sigma_Intercept) = 0", "exp(sigma_Intercept + sigma_grouptreat) = 0") hypothesis(fit1, hyp) ## --------------------------------------------------------------------------------------- hyp <- "exp(sigma_Intercept + sigma_grouptreat) > exp(sigma_Intercept)" (hyp <- hypothesis(fit1, hyp)) plot(hyp, chars = NULL) ## --------------------------------------------------------------------------------------- zinb <- read.csv("https://paul-buerkner.github.io/data/fish.csv") head(zinb) ## ---- results='hide'-------------------------------------------------------------------- fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson()) ## --------------------------------------------------------------------------------------- summary(fit_zinb1) plot(conditional_effects(fit_zinb1), ask = FALSE) ## ---- results='hide'-------------------------------------------------------------------- fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) ## --------------------------------------------------------------------------------------- summary(fit_zinb2) plot(conditional_effects(fit_zinb2), ask = FALSE) ## --------------------------------------------------------------------------------------- dat_smooth <- mgcv::gamSim(eg = 6, n = 200, scale = 2, verbose = FALSE) head(dat_smooth[, 1:6]) ## ---- results='hide'-------------------------------------------------------------------- fit_smooth1 <- brm( bf(y ~ s(x1) + s(x2) + (1|fac), sigma ~ s(x0) + (1|fac)), data = dat_smooth, family = gaussian(), chains = 2, control = list(adapt_delta = 0.95) ) ## --------------------------------------------------------------------------------------- summary(fit_smooth1) plot(conditional_effects(fit_smooth1), points = TRUE, ask = FALSE) brms/inst/doc/brms_multilevel.pdf0000644000176200001440000107025214504270212016643 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4222 /Filter /FlateDecode /N 76 /First 637 >> stream x\[sF~_qj{7:u,Ŏx@1I($8; IM[5[ hq 2ʬt%L&F?CSȄ5BcϤQ3Y8ZgBeu)0dZge8,25hjb<)2[q9ڡNKL8ˊ  3z3y "Je9J=&Q ր q'qeL7X-[+3 Ƃ7`T> ! ƀ91AJwPBF֨Ltރ78IRܫ 8ekeA8:8e-^ʚVaAY /3,DhXP6i$`XP6-( B&)8΀d-VZmsqRX :`@Aq,I2lz^0g{5kոBCu;o.3v2{ 85W$ᢼp-,ls9\+8'z;i-1isOykN^S^SfN\b {S˿q8|Y5WpO&.?+ -^3i.+ m6SXb5}~kyNK/AcN"a snq{ڹtڧmϛvxC{ܶ'`/_?_fڟTMsr!g&7U3,{#"2|+3cO=Kui_tp.YIm҇<Ÿ2ɍz*q=7d|ٳ ;\i l5 @l]jaB[|l%mzK00i|22/u{>jDCڌnhY"ͤSk&ldjmSaҊevE6ѳMlD&zֶqSRd_DxPZ0irZ&}.I?+bOC<2kY,hTi%{S8YyRNwם#.?pH8_lR/_IztIڟk#i\Gn O;vihKy؛n黟޾7z8؏~=ƗkuW KafU0TIÖ1Y&LFՠ"[I( A7H,?0j#:|n?UۅcG!;f?fYa=fV vqQGH]1VWU]3D\U=`6ekXs5)KY rRN)d_ޗhػ3HV5 &>%dU#:7*jUh| K&SjЩYS+uv ӗGoۉNQ%r66/v|4V [f5\!vA)uGvӟ 7ݞ r۟FA#H9C 3S?10tT!m~\Qq[MW!gIx Kb@?mDd$ы(VfU oG.BW 竇m|GxfdQnq9i?p9ґ0d`0P展.MH=]ltb$[#T-/wmj_W]]vH66#kbb.'~AEv'ϟ?=9̺nZ]baׅfhi'o15E2eY[P&g3L#O{;F!I! y„k ɏӬx=Xj i 6mٛ,z>X{l,iυZi1oY3&f93s=es01 {~eYo8_F`„zӫ. _sd)7M<˜غl`oxᨊvq:$Vj<^(1nc7[M5}'']cyucsVSz_GJI Q"~-9=,0p0Ēa#Ho\SR΀h҃ m)u6H穞~BP8kJzT(/V kWDA,̶ҵ{W-%v 6=^(BqWu3hdA?GQ=X׾VڷU#vU>[ܢ|dPNR*9;. 0gT NBB)@)\ &n<">ZtVKTMTMuT]|q-*ľE9TMTMuTPqilҜذ&{59%KZC"la8OZ*T"RT}5x-U}||G*ԊԦ \֤6=Hb R=]"HfmhgV~~;yukSΆqs\ͳwOQZU U/w`h->uSNۉBw% m/ּ7XZW vѠWo%\+_ % wH(LM߼3kaJ$Z!Y7V#BnAAi I!A!yӛ1)z A֝Q`C%"tg|@!]5{.jvÿ͆g^|22Y9.<ؾJ00TM׈9./Ďbgɧc^O. E!:Gw?*Wn~HE8Ujp;>>=z޿)T^CI5,>+P䅣2p E?a0U(ˑVЧC@LFrHH[3CXz+9?ʽT7n#Nr"9%C4B:3I]ӂ8}- n+l!UGӦ7RpnsȄt_1ęP`!$,v5+ G1 >>kyAǛ6N< eo4Ns|/QA ёkDMᙲ~P~dú2b5bGoⵕW>F#!J_\ЛibHW@uTJ/룹sl$?3e endstream endobj 78 0 obj << /Subtype /XML /Type /Metadata /Length 1550 >> stream GPL Ghostscript 9.53.3 Bayesian inference, multilevel models, distributional regression, MCMC, Stan, R 2023-09-25T13:35:38+02:00 2023-09-25T13:35:38+02:00 LaTeX with hyperref Advanced Bayesian Multilevel Modeling with the R Package brmsPaul-Christian Bürkner endstream endobj 79 0 obj << /Type /ObjStm /Length 3367 /Filter /FlateDecode /N 76 /First 688 >> stream x[Ys6~_ǤJJI<凑DI,ϡ5HfHJmɳEQH t7&J&YThǢfX x-S%SF"J)6t'f$&Fd X8Y2`XDrP3T`hjXZrYPFI <0A2gHN"sjVb^Z$ Rn1kTWJ91gDzXjE7 2t P(C#~&BsatҘue4iPDtc2h4Ganc>#ԥZʣkG H# !\CUVTD)@e5aZC@Qe wro FYjIlFqTR %STD)W+VCUn>G6&fj$((?~iꧭ~gݞ۳u{nٺ=[glݞ۳u{j=wU1z6ߞx7ǰU3 5/PU=QGŒ%LyBCϻxQ|Z_EyAMf"/\]|v῟<ͧY5*Nד{q9;eu9;-ˋru 0 nXgM%$Nʏ8ę E :Qq2>]"˴H}w^WTW|Oʣ@\.Lmi +R[dGVnzoMOdŶdxRޭèv=lI9{yds-6.ܵыORHV6aoFCG nT{'n41P:6frn/C^+bwπu(p o|.l|mP*m6_齕t<Z"ʴgj"5Sfx(x.^b,ő8Ocq,0 Jq2лgϊ(1S1<9dUԛ8/X%b)G\&k>.E,[|?b$^Z-m MPKEԚBE\:qYy;z:Tĕj\vuEZ ˰%`k٨soZoMɝ~sWnK,][~rN}8^dgYOE['rYK{2^A;"c#fWo꫃ҳqm!LpDśZX;<䌪2MZ6߼}dcE5z2 bVy=x6~K l|&-;`tq:0Ӛ'X{%زڨ#TsOK_ $lJX{{dU ,ȗ/\E% D7I׊i(+$R\UdS0i-Ʈs)8n/yiH=bJ cifP\h^001M`jK[\R:)%`XmyXY<{Ԩ'9"e D w ; ( }y7};R<'y !B D_o F P=\; :Lk z5pm5JpLq3`Pժ/ L˱FH7Q NnީB}_d x8 }4Y'nĖS`*^YLk݌dx!p)b![oI 5˯(xJPΗ,z՗s-Z& P7\>q U/nFJrGMcNQ]u ]+~'P>)Oޡ2:k1crU^>)7ޟZOsD3oa:t<ǡY / "9%n5=ٽNnMiRxnjC \%~Ӷ 1$S&m._m}y s[s0i3bS=Uʩq`V-wc%?F*!#t9'tV_yUBw 4Tyǃgu?O%WGEm#=>vE5ڜ5>lsn8;-gkR%c%#wUnŷۗ8s2{>~[zya;G>Cjl.d zQ3am_D[8C:]E(O M {᷋꒓Uڒeo+{^=S8JV`-S8r=. -El'We[cС;hfg58˃gd|r<105OCuL {4]}pq~SKep F+˽O{:6z8ۋt1 ۵^f:N7Ǝt5ȍ0O8eɅm@Y#y>qdBuY ѐcDES&:PNq싯S057M1k &4tSavk]ޮ!dxN%O0ٜs7vm%du9)Up sNG%ygC :"IQ "Dyb(kDZ  Ɋ0ΥVyIϥTP]uԓZT蜃8U%PiTPF`;Fq(褹 i&M`awTWk0xJppK2EDTBs*=HFg||\(aPly~Htd1/~r)r[pY۰H^Ro A_iD>6 NmmfzM.9la6969z#&/L+w˓bQWaċKǵYQnS[&Sx;,OOٖ{.^1jd:''Kܓf9F 8ct @L׹7U *Бk9vMzLM_=L&6nendstream endobj 156 0 obj << /Type /ObjStm /Length 3079 /Filter /FlateDecode /N 76 /First 692 >> stream x[[s6}~阸:θqҦ4m-6'T.= R-ʮlM$nb3 Lgd:8&`i$ ߝbRJh&A g4#a8LDL9.0#ՊLE`ZJHF^1ia *c;fDD3 :GfAA0e =(f"40ì,稰c#_l h /Q a r""+* h]00 -v嘗4u#5cd`LJ2HH=Fb>*fA`%1b)hcJxR@K/EtJj,âS"WұSg1֝:( T J|xjLaDT$FR*LR41Ja%)P eCo}3@)x:W;gz v}i-㏪YSPZ6?/eC lP#3:jqQW% Jy/hqŧ-|=/ jhB Q'IBSuO= i0$Z Dv_ z_TE>־◄`R5y6b^LySVqY_N_3~Tm(ǟL#m3%)8}RN (;4 K>_9~gϟO]Jqp\/&mI9:O T˺.gqshK򓦘4B7 :AM5_y]i'ͫ϶ylT!?<)uCGxY4gyIc0 (vsK^WkW[ _)K|}FMl,\'gŀ۾h*µAִ}v*õ*Liܶ¾fݸuۯOiYu2gM_w.m.KQ]mSC[qCa q5~cZnnS6u`mz#??_c~_W5|hH]/IYUj_(l>jQOx~O?+? 8///;>ᓢ~/Vg!e1ӳr{~Q5tu1-[du}'^O7o>V|q1GռGyu⨕諷Iv0.nj7h_%zg%VcRJfQz j7ct1f6[L&TQI8M>sPxo{Y9{C?ˌ!S`{%/(Vl 7f}!29VJ`3XH#d&c2*#p3#g~1XNm~JYifE1bv%δR%(-U&tc8;ydp]AV.k?,uAN² ([:Gv|w|w|w|!1bo~OϞrծ9peI3jk!;KCJ E9df4!ׁ*B* 0Ie("C "TzDQ~HP͂V&,aVG"EV*LAz ZQJm׭(y]k !z`C4Yې¯`S( !%PFd5(0 v+PI06.ɛ:+"[&yVHlM.+hjv0/':Yg@0&T fv !︁ "Kq(wm.,$"1kG ި57^8xKx!VS $H+%}zIϏي;ᣜܪG/^7/iQ$St d!RCG+RVtnОki)S)+vിz==) ,i4d&{y˱ #>L+-8K?rK>S>-gzx QwU2 p}<^Ҥ4 kFj8i;gGhw<լD;DZrp=ˤ,3 9IFw_c,R| >v)u!kOv]c13e2/AxZg r6tԝyxFOq;1Xbvл&b lƒ d0jcFmZOYTt&Ae,%%(ryP[$ʂġ'ϤSeNb3'ȟ۫s;&e|.1y ,0r VсAvo:9`0uf@Y`p0DIvbFq8BeuvvؘtRC|R/kM~Z݃`lôZM]CJ';@&iKԽdCH3 Itef!@Zzvܶu5}6w`oתƫ Qᵱ0:p 7\@DIߔȺV\Qet<`f(W@O.p,da%x-?~HIup,O> TSj|^}l.F8l]4mbh IA4#mՊ66hTڑwׅ.P F+ p_1yȑ;HUHTېꄎNQ={ZlQq_"/4)_6ќA3+V:d~ :i!AkglM/B(B_g-̓D<6AclߠhFt쁲x.< s bOKbҐ GsD ]ܿ.npTPDy(ڸ.EҞ˹ۛ%endstream endobj 233 0 obj << /Type /ObjStm /Length 2648 /Filter /FlateDecode /N 74 /First 666 >> stream xZ[o~ﯘg~) F:mZIH@;3)Y2 CY;sfWiS2Znjph=~;6HF0DDG2EGaT SR/V ?W0ԉLi%:i1nVa3v Ak8zf}3Fbgxx"If<+vvYs3j#@59fmU`q ۠iUd6ҡ^0'M甦bN9iA9w dq ԺhUd.¦^h yx䕤!ͼ)d,(൏ $ &U,Q`I2Q"&GB J=(rEusgqJ7p8U2RB鄴65,S6(t:'q{j_-n{bz#  ?j64͔CSnp%*Āb)1F8aM w`hbz 4:CBĬra =lB7B73QBB֚bD$mk&A3vSE1vBRT[gbT^Wg 5iy3"K3v b ,Y3}6/6ZpzZ׫rV'SH cX\!6~tizC7  cCo"ۨ{b8y+^mh\3u͋|B*QBkMnmn]n}nCn;P(Zۼ_ļOnwC~zttmFXc>㣷:Jet60OXeLgxY|~t.n(JNBQن $^?z7oO3|8kz_W$_]NH:ٲi˔Dtr{&hepw欽$EG&S, %^MoQ7|Wf w5X!H/ך,7Ѝ5W}~^׻^5 mV+"!OIpuaiћw/ޮ9/n{໔nO;CEK|sJ?xS=XTj^aN*ِ'[xjCzS;YPN 9d@sbyV/ΉCӃ4G%FzRx7ȤQ2( .s/75E #LxeAd\p7-7(sFqYx**QP*ɝ&`A$qSd7HIerdMag &%"t`LJ*XcҎopaxU0iF{PLZEWOs\~+HjQ#6rIw!$cyɠ60Nt>\NCP2rI;&AXx: ~[X ߩDQ "ÀʡE?EׄXXzIW3ꐠ2b'O5Q"; i;Ja1i8]RL@džwwRFzL!cd1p=TP j'4pYH֬PekenUnunMnu71WәiVuOm.f-s,|~O7lv:.żn[5lOG/9BΦ[%!yF-H*79գ &վ^$"zr=;Zz^/'׋jBX5M{99]^&7e;iТj % VR }qS=tϻg-5R}:^@2|H&g^#$P8DŽHڄ : Ԉ>zDВ>L/S0ݲuDCW T-} =`X~kerr֬eszK[ N).$TU>y|q\&^z]> stream x[KsF󾧵#xDo70{Z3>H{|`ݢ eY 痙8Ju^:=Ǚv?\kU(r挧Z yЪ9؝-_x+Pjm-?UY[V:U<ѓk|7]Ǚ*_{gtqܰq[(7^ömMcw^O'ڕvpڋYzu'Xkn mUgLnZsŖ({ec*ޣP:ˊvWړ?>ĿAq/oQ kasS8+9E}s-ė{{ NοحPcmiA As7{>.~ߤq8-ҵܓ &(۰i$7^+݊Yk<0ɺFbKڹvAʃF1(S x,g28;A(W ;fn|(M{sa-`%~rEђwJX;^*{5`~.UP h7 mt#'-FN ^! 2i xcy }|?d:N썹fߥ{@Ӡ@,'憉ℸſ-I%jG3,} +i\^qlȓ:CS1:3BdĠ܌b6uI\|1RD!Mt8H-mSCHƁz宕q>a͐2(u}B{/. E8ea-J\-pfm,1pa^ |X9ŮJ$PPnOcj (?5g4>zQ:CL =XUk 4dpNO'^Dž:bh!0&adtzj [&"Wd6 O.z2.(-U-SXhTsLM7:6MTlJ#gh,mu ZGo3֌qq*\$FƠ^ԙ= IV4rgy{Ng@<"{ k9I&auaur$z{qF?nw%K͘"uNMJ.AcE@r8vxC>5"Bˀ@[acBѪ渍 ؍m#myyڏ;4lu >0Y(]~8cy<)gmE;|gcH E3V y2 F0*TE(k I%-#pxŸ? ؂y"AMt8ߥ T r,"0{HtT`BHes4p嶵V=0Wf/h$nĩzzv{lpM+4Tc:OwYT8 hw/tNxl?PvDd&~T #ŽR^11V <KS-&+Ƞ]))ˊXQp v Tx~⍆PR>lPS94s19sKJrz<~]]v Cq@{Dx^鈤bdRQxK1nK%D7 u͒Wא4jk2~`)zIU+z3*|Bp4, Z_sUS ' 2o6'3w*o\[1DXGm< 1yDjL iAG=-e]iPAJTEy5EzγP&Pan/b1%W7R M2GP^"nmRP]󠇓 EJB_,"p  Uv[yh"{2~R ^DŽ<8 ZP/[gP!wqXȗȦ\ 4Vc/Ǫ)OR/{O4zl(5p,fST>B'Q]4"ƒZ[@ g= unT"Ѝ=ۄU]WBXo4UhÐy =X=V]ԯvFڞ"`TTK1:L![tTs1[q`N)LÐsp\Y=гCÉ@L!mE%S4plr v֚P 7kGZ穦pc!BJXb9Dд?R߅R/}vV%gQkF Q'!v#H hv;0Q_rZ,*c+TAn~"i3)60U|~Fi3_|tHqI$"uKq޽Qh@ ]M:RࠑVޖ{Srf3 yJ<=BA0ϴe/ 4HmC/ڇɽ 8@)7v@ձ"> s\8sELw3:|K7*CЩxklaE.)ppL?yU0x9caD+K ufEp)2={%UA G M}M7F#҃[b$UIMBWx @z*)ԇ:~wS'4Rȹ\HtS&ᕨ'Ma">G'\#{"p6Hkv¬c43֏"^ Q:KhqA.O: "FR$rlExo}ѽ`܋C`9j`(~l]wUG3.*OK 9Hvk^i()Bz';IH g@bK͸H8l`Z2gV.8.Cƛ5D$heb x@IJx=(9\ uH ,>. 3sL}c#ᙘP"9^ ~ E΍ #׎k ,_iTq0UWP&Eoxcyzi&>=68|v﷝#pF`2xBW*j}J&'qm[h3Λ n(!4H߰JW@4xE9MXU?ɤ4̕Ex8UUc9e*r$c+8~&5; ]G#҅tSѰ=8Ԅq:MAL.d=@WIYh@o8wQ*BܔF:m!k*~YKT h5z/w GLL}宍BPԌGl( Ho$ADsSͮV ƧECY;/czSuVV/_"BJpECjd|F k6&Z^fYM} yW|b*9{_P9uw{E澊W@f0:k)o@5EXpӚnx}?(2nAlDlKYLKLLi &~9HTcBbXA4U^@>@N p7Paoa 7LwyvvG]/W&>u'q#I8k$mmZK+'d1uC6o'slVL]f )%: N)L%i'r8 |R "҉HDNr6ZXWR[`*c|uM+>)eFE<J4F5&wbuk( O/\ʼ{a.ZMd GL'nc;(SV*E Ka6)LAaIm)NgR{tRwY8Co< $O.}IYPOpL?'M"HgWafHHᒧɬCIHnjoצ'VAs!I)?`t .l {PټWjcg}2&Ng9jJK *+ @`?[i'5wM%MmSoǾ=MD/ecqSX+"3!Rm]Kq͗ ԶeIi\.܌9I!)@D+qxL>&St?ZOendstream endobj 309 0 obj << /Filter /FlateDecode /Length 4951 >> stream x[KsFr$d؃1 ((F3ܐ#f|MKtFC^=83 B҈Tnˬ_N_mNӛ_N8==6/NO2/OҙW.RӋB,dagK,Tֲu],װwwۨz/[s#HxE0Vu:a/.~T.ά1o~Tr^7Wi[XBdyh2Z(r;uu 6܅Qbx ՅHwstc,B3'Cގ$D+$]Rkc$E\v69qt`PT9*ͪнPspQWp\cIx+ ~;jriDZ՚6$-awaig80~PO}'8xP \]Y+g|3Yg6þMU61G"X '$kIcL ι ^~C73>ז`e&ӝ6wwOsLqAP%mILۂx&ZyJϫğhAV:t $[e-Mp9vx7oSRqDn1вd!< )%SHަOA נeR=7:> ܢ $M ($t[ 6!J[|fU gߙ'kvY%^ &D=Mء2gSi#3 _igK@/WkL kXJԦD^Sm@Jwۉ$PG`_wN) R0 jw|u8P x3!+2jwGSO\%_v0F!)%AѤg'GEUnŏϔc'W&=gNFqc+1QBJNeh|]YLH+Hx'#Q,=_ZRV|縺qs$0XٵACQt;Is i%ӟ(zx*؟\xI O̐7۱ɠy`c< ҘM F  JGjC 3Pk1Hl er v$߄F2JFΛp"ąa8Z vwᖄF?ݑ0TvI0_S~VƛЄS-K痓#5s2CХLhuqrYOa/yănq&Q7E,*p[߀_0[΋hFl[ms*!)=%!#{TG+JMFSs_ Oʹ8XmP] t`4r4U<7b_w %wmFICEJOio0CvL@79DS'u\bm`N(xb#gj '7~TJ >EslhArĀg9)HW9JwF+NWW>S_rQǶ;&G ~B:ðjr[PdIbۉG=P$P'R=Z_Qz] pDW~TG }AVӢx[t># Qb7{;hȅWnZ&ϲQxðnדBfW#<,A|DȩrN9{6=sG13n;xutOP\zXށ'TDA⻺) xQX`GEX?rB-w^7\8@TvTx%(1Oߝ1fGĥfOဲHFlݖjHH ܫ%W M@N߷5[ϟ*[ϟ9܎xBlMs@Ũ'D?r)+`,opXB jj9/L@|Ne?Dӯ\N?;?ѥ1@ALCBRͣd]q'h0V(8V!U_u*u<>N4 O##331?Aj0ϓ<Z "qWwWTa0 >AdI!=nڳ/yIk_We?oitw6Cxרh`|Oq<InOr{5~ A>z¾f8*S=> stream xX TW֮YBwF'nc{DqKPӍ @lݗٕM6Ah\2DMtuffgIH-x̙cYwﶂ( ֆB ,x|i䢐$Tғ-7yk 6J D;0Q*w. 3`닞Y~1 .m  tՅnw]>eWâ@ a~;tavlr]x뒵{L0 ^weQhm60(8}MK{fx3c58ƇYǬg&2IFfyYȼ,b0/f 3Y,c1˙*UƖchƑqa>cBCwGƊI`;탖 zK9Qa5hOU[M̃Y h3az nC`ɔюNXFtJ8ïE2?,޾ D,eK&$5+gV +KXDg̊Z|QJUAoT2ch@;q*[Mf&h% Bٙx v W>d=g((=tZ)JJ;z@&Ʃ\ h (7_~Q 8TWCr r0UʖC>aB K+39LğDzpz!h-R 5K%60Р0!qMdS{ulwÑO>q"q cnZՖ0G\8^r7qE h -8pV Njѷ8=\SGs'\U؂5:x 7oFw0// E(nn_zyIiW9+ix^CJ5%AU}ǛY~9 `>q?p~frI2s58F&& {Q~sW΃ ܍P&!-6[ƭ:dBJ>195?s'p!dQ8l_mXlNʀ3 gAǽ Vv IǍiԽ"ahH,2AqDـKP Ηqx>:Ӵ+ZXZ';TZp(Myd4첐A8BƠ5N0!E>U=ѝ+wѕarZ=tRv?pI-lv &N{Xrզ'@9wóxu$nRm3ҳNvU"4˛Oxnb_y) fgώ Iofm`:aFӃHb%ui:N2Zv+-`z-t=Yig #I4V#?e?IV%'x &D^b ^cqQ}N) !۔sg? 2to]ݷA!bbsޤ/"G ,7c *~HlFBzRn$9BKлB蛮;<+&# (O?bǤ0gsgfS)q*Jz[nШh%쮒F):~26g2MiVA!sv,xƒy9Jټc%/io'rQfKqml(FqߑЏdඃe1K9t; 8[XC(h!eKJ)& 8}j1 !i?dѓR?>PbL|i47EG0eePJiv@;׶ˌۨa;od~A^`E%E%6\֟*<ReMVX9>Ce,c.XÀLfEQ % )?R.@ljj1U2kdJF QJzQ:z& MsR |h![u=DHHH{ʉ$J(#ic {Rb Qj妿5^v7 "=4Øet9'GE\G{S,a*SUOJj% S.OJby3쾒7hJFF\o_BKMFĊzU)v ȿOMFYX{jf94Ck ƣO5l˸p,/8IIž"}FcrEKjLI弨k9F/%{O|_8H@g IPӚJJNY0PRFwVl(=0+P94[yg"OA8a|=2{Bsh'Hn[]̉V0}  MٽA 332KNԿl'2s{9,C "Fd-ub:_Vo_`c\8#1R4FSSED =S=g ֽ88ׯ%P@X[_zG$ڻ9Kq%ݿ|+V47 9MH/; =W@c[hC\X]XQdJM=<}ıR오^>Ν[_|^[Dڔ4k/W_(BF?u}9{M_W&iflsz[R^"}Yok s?`+ E|lW \ qS4ͻjognj2F_t(BRpt6@0QCvD} 3zˈeqI54Zutn(3zd :F&z?8RP:Onh!q"I+&o,m(=\!9מQ \G뼅s7,ӊ$D'Ў_w ?)hizI#ք'ZD/ kջ݅׈Hfy~S%7g -7!%Oǰg2R!Rp\F+,WB Z't:G(??L/Ww̑MKe;-l1#<ٻZDM|K.t7kCՏ)-uv_W3ۗЧ  :̗[Rf> stream x}VyTSw}1$!GXľĭ:Upvf"ڢ( Jd a .,R}.cUpj[Xڎsm{tť֞?}+FPcY*h"Ƥ3|WiROf >ae) '? L.25SRpvXnx|4bKT" Mץ'%$ff͘1W|CZ Md$q@ ?r]ejN$Fƫt՚0՚U!%V ^W_R5FOK̎΍ '$&nCQ+`j" VSkPj!Q~TZBMCRseTr\)#bD9P `ЈY#)"-upt(vx,3*9CLsqwFnvbTNNS-u6[(P~*.rƇDB[d~LM_8%XM 2E(>t\Ci$WTHA;EYdx{5Tq}ZTiѿ[`Nv>zK[R ᰁzc}d<ES)k95aE\ҋp>S4UWP3yżn2+1 yE%ʵzZo #Ӊ;&8x|6z|%Y8 Szݮh'38KE=`r/4A]^C3WЇ2ۺ?ǩJATkGdjσ7'aRV@S/FJӦ]`CdX;'E .ѕFh0;@aAq[9Wt$@jK^O^`.t°+*oL9.h ;Ȕ{oǍ@:q 0gnP=Ǔ=x_m٠t2 ^FZTfI!9d1zCV6|tX=-"f!\bJ Zᰉk2dkUf&l|ၗi _-֯LJ`50IȋT$WqarG"`Q ?n\`:c8M~L!JzV|;Gmna8/Zy/Wo`+د2a_e]8&ŷw'(RӘ ee[ʸ+c9>DtF:9[$aws{جcdMN;t~7֎,9z#(!OkLA!Iñ*u>dTp_Tu=) Bm.]j%#0J&8´zW`;.熮N)k*wV ePȤko߳wGNSŋ)=;4 ^A븻ãqa&kxpW#?WgH~n j "Յ1F.g~S~0WxbGw\n8 ɶa$LٳdO/q'. ߋڢ)hkB?mO-rS{h'1RGs:F_Ԡľqr,Uo?.\Y\ZZ\׆=+M#'+{m׳Zz Wڳb9K-"QU*y>ɱZ;p\'%=y|tG*6oDjZ2Ѫ4g2 ~P_Sݲ3dn VEi`y= PZZaT4e*Q/0lyb/4[D-F/9[u5}C<Ի0pO~ =-F 77clȇf/p > stream xWyTwPd+Z6n-cEqWEe[4.Ҳ g4yNhF5:n9?ɫ&љ>_}~%,PvUqby{m0M$!( 0[!E[ڀX['1( dyqȨdto^^leZ7E8lSL\ZRL2,6Bx)eqia3.V9*leAʀV+Zb)/F͍OLJ WoZyydT6i3f#)j4CVR5TGͣS)?Q_P=@9RrjD9Sv,dd~Ȝ!-|,%R{F6MB CVV׆ vsý7`ͯ)qVTWW>t2XBj TB ?yoX:#Xeh 仁8Y=(;<m ՠal5I BhX9N}M>&G\cm9X" ",az\qDW肮QUѰ60Fyt7#~'eD*gN,C /0I.?-mV 2Du#*qq_x z7r20@%Q4뮁J|A50..# _%U|h[mv),,oZ$C)OmpL"fTYqP%~}L7w $DĆkb"zd0nh= /:dkLe^R4 rNݮOo[DOX2b#2TuR6,. אm,ʯ)V%6:.6="zK ›"vבbG6t/Gih>J^ 9\’ Aǩ#PZӕ)5Y0+9,+&2M-5" Gz^8vAh2giO( RfԐaPf@cK)[VTSv/f89tNIT+yB$Y"LQ 5;U/UUu#c|~$9Y 3}GL/0A}rXE9vEa[C!P8`O:u. }wOlyHqPU^!7'7 ;4>B]__QYˑh^zzǰhd^PΡ T| ?@@):e'/v RNl 4{c^j1JYkywTkhhTThhkTwwkk7>d^k}F:[$dn$=PͶ_1hVO&xS\ '32b l3kĪ4 a&j(N,UcE>0i#CgR >i| g[83t7O;Nl !8 p 5H}ȸUs(a}9')倒}M˭z4Fs)dQamNPT0nLc-EuuɰTOLSHXT|дQαJ b(uL4[oE ~0n͑|!Ua'Kfˠ+aЕFb o ıѮ9FĞ1Xψgl.Fš3}4$?Ei]anniR EY 4;VCfuY^BToq'0O`? IK_ SMyB&\>̗? 7X@MѮ8d7RlHp]"r'̇eU݅k^5`sḮ&92Gۙ蚸a7H2{iќmVߣ̓Ʒv4C?fо|=E h, -wr*KW~j(QɢH[VWRg3|&SS&y ' ;EiBVՕNK O݃0TmG{\,D,%{hO+~EVNB_֊")5(xш ϭB qO{E|I¸'ýW/^ ܓϕn|xwy'&{q Rշ@DZ>)x){uƪ.CӉƺ!7bGF+WumV${RM7V( \gmZF!/PXx5_UVPb< lNQOendstream endobj 313 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8393 >> stream xyXT2 b`;X; bE齈PY3Cu`(.jcL1b \Kbb!~@Mrs}Ù3k (c#J [oo5cgKo!Go ݰWEW[Ԑ%ߢ.  0ɫLyairJ ^AKxzZΜ1cԩ|K(%,׸Dtmfi"=-]ݽ\|=,<,7o[nkg~;(_}ieA˃W ]:|MHuQnns;>[NykdV3g͞3ݹ/xN|s( P6Xj#Gmޣަl5LMw-$j+5F-P۩TjFSө j%eEfRYj6CޥSs Vk:Zl*5|L:bgNGF[LDPdo2<ݓ1 Y6R5Y4T$-J!Yu89fPHN[%+z~NPӁwy-A'ăIXaOQ?4臧H˒'؇99:ogp*<|.C#4#ǎ#p8ʹ6;ӳ]N=vwYJ87MJ}関"6E!q9^N$iGYT.݀G{KO K^*9sXCc9<аfT,Iah1w׊oܸv+:ЛI#CVt&[ɉN~{(D I>_@ɣwBt=$+ɹ:nkzZ̓2ha<2YA޹rܔY@t~z3exf)\ehQ(nT=+~V"{ĵJR>dXWdUߪ 9A)#q$qBSU0T ))q:E>W5C iX 7ܗ i|_4#iF&9ȝi9 ݖfWnW^) 2Ga:>3mAkX=K.0 I6O!K@OnQJǝ_P / $ o~E." o[ *{ItY zB #4C*]RcglDT2Ȁf(#UoMPˌSPYrycګ{ *ӧ OL/mMFzQ#nJ.րR?@P"HCENLn!7Isv`;h:hhfϦۋv3jU5%eUyԲꆓu<{̃ަ\\xOVpbO;t/qKv9nQ~k~M"VOYqké[; ߥ7ү+=;)Zδ}6 f {H҇TIR\J#ո<#!;vK7JagǫJ>,Y4x$?xdeRx\͐ehbdbԬdɊ H0 s(,.S3|Ӕ2ԁԪ6ϰD`;7mL * Raڤ‘ٗYhu UXIz&fTWr]!V~Ww n;ϰhCT4>{[tn_i4aV%ƠJ*W-mBgdH#!;e$Di?} g9xsZ솅};,{V"S.{8{Ď8U^UqJHF* JG'H@^hR@1ls`. J,1UEa~?_ɢem"d 24M=oc!? ăp"&-9h3WU 2Wb,j)<6:40PTܫA5(k?kx(@/I>A$ㅯqO88^-4RQzl:_\f"U U5i?0C3"=?2ِNÝ0CtLٹ=Q,I"3~,mBFDD{PR!<&l)qCՆfI-m7z%-.z#/\eoxwNDkGeIQ!1DA|/#nlA*`P<@Rr`Yh}c^7~~ 3 pu/K(!O$x[+*2UBVA$]/p/ӓժ4?4;N2KeI`_'_9Z2,QHLeYAi}UQ\Т,o5E7W46dV%OIC7?(40%@Ͼĸ7`fju]^&&˪9d&ʐQ_v#Z29}yo]zsωEGGryO$r xws>rSh8<&۰kȮe LRd&~8650?R*/7eP>1+Qsp =>)acgu.rek} J \pdTѫ9Ho3-3=a?ʠr9tFN&,l- FrHH"- ]iԵKT޾ c=<b?8 DED69 ր[;#nlPvgIV,Y=DPy:mʀyS>Roa"K, G@[,ㅴ<4`QuFhH=2K7?HCDV?PVg{xZ]Ђk_C AWߝ[s˷{:<1U+@gѮR%^~-3 A =̒ہܔOV'O(ofsn:jT$vqۙjMn& M?IzY;OL)q/ 4#^N)HneƒvL69´f" 8ŕk߲]c "WNШGY(K/I" $왋9h2@䤖B){ f +j h!yFUA6$-WSM$zwZ6r 1qDƃ|(-nhzA_V;2ܛHZUIܕWS/Mxݴs_lY~ؿqh<1<яCbV[pA7z8ӫ%}'BK2;a t%»%.k: kOʃKT*RxmwEAϮ=! f,6A׺}ݿJ3#ӕP ߗ4U~bBkߝ/ɮI{cZ:qxp?$Lc%J?傹Fδ3_=H- cU7DBH*ųXUZj/@UR+ H 6.w!S\ex/Wi g<%&h4ih>.E 3z/^e@/G/hNԒk!-n^QAAŁ9 N J)LRzJzW_!}8paA+$G0OE )sWR,+âXB--Ox%Cxw6TCMBBJt<AT־bAA.*Z/M4C#  e "**}s&dcR,?$B2IuyvBnAZNEuI:xr|9L% ;PVwF7%p'>l`I4 =Xfղ',|Y~+f^J}Sla7lG]~ EYgKр/PQs`3LaȎ4q"(s!?wC[)sd4`sNW1݅$X|ÞAyҭ'ЊLOgL+oS/)q5W|f YxnDVw`lb=})7TEUy%JV|t0^0_&x3 +in@iIB$pww/|J;ly"k2])r'i0*.uSoe*Q$3L(f6|Z伸!A1?'iw TH#a8/&;  J_cJgT5nci3=}96?C$ȂdeDŽUDQn+ӡIGAbAF5N~WVTf71m^`#h!T3{/2ŋUZ*עX_YW㕴K&.ι> #ǿ#1ȗOhd-&L?\ 9J r2hd)IIZn,.컲Ur;DYek=Hc*:=7m}<2wq ̿cJm;#JΡm=:v8Vntb-OeYL!~7$Y%E:(:w]Q1Xϱ% v 4H,#i% )L-ʾqVsac< [xcmǎfM(xp @lԶ鉸k^zR2huo,Y{賦y< Ei[ܸ`Ms`"3U?#;~ǎ6^x)Q덛׶.e˻%[YzBݵY4daMq?u5U\cd?hJ~,0;YWPXa߼/^`}h;vצ쑬b.0,/aQG6dޞIɣa^jS`Mէ7Xؑ3TOں]#_< y~6#ZIkS?Z5\MV& Z'%A +rt@:sW.ңWL|Lf`\zӑᅄ-A-Nd\O>8_BVI*XCIѤfKByG7ze{O`jҭGgڬǰM7>` -a߬C "PδtztLOdtRs{g$FRyJ-D˾賗>de7vX01qWP ؛Ͼ6O< |#6W jma!AwckΓ]̼'g>xLPwx}i@&7ٽBi ,ɾDzyr3o'[PTiu]"3LNK[T:n(2k}!$iam4Вh ='EZm( ^}M.!͔Q4y|+J ڌLlА9HIQ(!4Jty< 0<2<HKy _dېQ½?W"{KpV-cܲ?e8lRl"SnIexEoTpXpyh5W`ъ|oZw+iiX'`G` ,IC0j4/)j& W,bX\EPz\ODBDu2D@dYP&{#;{<@\‡o!MU6OUBabnP1>uMU@4][ZQQVix6,`Iˌurmktiɢ]t 4z䞒7٥33[1oMǿ;O?cW+VF& upu2 4ރL'w[?:Y >=Ƌ n^4{K,gٺbC\ʶ3#?t|B;5!" ,/$# ~beCr@5;8L/-F:N:m&Cզg DQ{:endstream endobj 314 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8089 >> stream xzxga1`q脲%$4/ 䆛d˒5QoEM'HJvSܽ7nO!lfxsA!DPPГ6nLx~ޜ1i)qg'9_ eʆQ<5􍉳1l蓨q 23 ;%)97ty / -]17t]L(S#J Ɉ]7wMbbJ̌؄䘴Ј۷mzf>7A˗gX2kUXrV[N^ nca͉I[SE99s1 _X3gMbXJL&‰)b*1FL'"vb&I"v+Nb%1E"0įyjyb X@#/EFb1XB ~5ZlP$cRyZ,HiV 5M֔dl%ʛhL=܋hE1HKy]T`ځ@ͭB:G'/!^v7{MՠHOfn2X^nt5gP8O~ݵtfҜHi$J%qMnL^UuFPH^J/F0BQ6SeKY}ϭUЂTIs(u7 Uф9LA;iǜ/QlvG>9 bOvhIsska;ܢ㴘,f'} mq_ b~wk>+u4?q0G[6%]6/k몘Ştu8L8q7.н~UC}A+<`䪨-[$79Yc:${,*+GL^LKI>"WeLmUUfY¾$~ 9jZ_DslL&GQfl@b4w>!3oX:OꯒvI)řn:Zf**7Yz.m 5d#)wI|e*i:(1 yE&6~@لL^Y4z;[hW70-h[> d26Ї"X9.;`,PSm2 *PhG'+==߉fXY[hSf+qN/''!^/pyaF}=G}[-t8v9ۜη@X낪~ߍ nZ}ig߻am!f8޿wPC0jju w_\*?c߯јd$RMeuvУ.k2jף+J.df2JutiF;kq&O&s3a.RN_o0rYxz_ؾI.~iau&CQh2{%g}F9mZD؇q31-6(m=Ux(\ݭ^;k<ρ~ 5<"MHI*Dyn67mQoحGkO=:;j@-b{ZOtåޓxhhƠw}ĈVJ2;}8ͩv6A%4 Rɐ`I <"2c yAaT VEw)Qk((--I⩰{2΀_ 8dG7mt!QY/쌏RUrsNgN7q h YpgS[J2"RwIT]l o#L6郘-չq9ŻTt<6(,kul̷˳SZIl=hSh|@а7Q{tBJ(fIP3F$BÎ5J562XH.>=4 GtZ{먫iG=kZuwpBaHa/CQc%Sld$J4?#y`Pԝv:OA0L0:Kܣ`?N.{qW~D!\- Th&]R&qL9f/-r&Dl%MPAJ=X[qv[dw}4 tu}30 ljZo4*9@n9ύWOv`m1B4ƞ#kv\JqaXb `g<'"B!T T_l[BqAV Tvn:1Lj64B9Xn@B9Ŀ4 ,aܳ}o]t~Пpx빂7dž;<*ع;% u]V1@ :Y܌!\$sXKnPJ 9b,1Kl 5P* E*T-lؽgh YV TTeW% ?x'2罠^OGN^j={$X/PmzUn;$-B?~?S^)4 ufOb*Ơ6 Tܓ 8 &YD R$ e ģVBK!WYZLT9ȹ,/z 1)=q4xmpdoӸ/͢S,R|[<ԒR7_fMd1\ )"n7h"ڃQR `A'|m:7?|}Fh4}/߻G77s0 (^'ć|T ʾ.c/+8[;3{:ECsC-H)/W /ި )$PĢ(oC -pKQG;FUAx߆9;=g RiNb2JYPA=X]t5|A36K3=c%d F1hŸpzAE3&lÿqw7N[@"{IAurI8Wd-YxNrøܳuHwI x6?3E'G\9AYRSQz_A,>?xjJ"q2vhx1|jTo}ü&99=z"ϋ}ƋNư(7V8 vܨ+fm", LS[H[lG%Q&v#?~:$U 6&1<&<@G`K9t\E>-])cTmb^rlKnIfݾ==cgj UVa:mP ?A7/:wqc?DCYp2SScN zbА8'1P7~(t9 N_8C$Ǟf?zJ mbEt(MWkGkh/rR Eӽ-Wl,t6&?wi^|Qϣ&QC{ѨPQQYec~ Ez_;E\BrTxdvvwj<5gj&mµ & {OY,:FKumj,hz-:&.ںV*M.;;dVGLB{;uWa4gd58Ȭtb(v xWMSC)UQvAEB 9&Lf=p9Q3 !>I@r ~mM&eh_P0P1~Y(.;9CTXmT+;+zhUq"{9xdf\T7Ye MbX-B.)UQ+Q%0ml,Y"ne~C:-C:3W$鑴MF`ؚ}le4mS#T-?/:u0_4`Q$AmL|\ń|..v/X~"@/7huS#'TQ(_}R2e%Jk3E }[?-HT~cʥTk"wo+1#r;r~ŁY_AW/,DutiN=3kn4ihtWYky{vR]gt筳v _=8qqBDϯˡ$7Y&lvd }qjZXTL2%LYxo-?a8Fq#2"e:#=8B^)o8C=:nG5k Rk3ޕ l3ɻu{_^$ ѰϾXEۣ{>>FuU:ZЛ *b2#MJ[[(>pp**+&'&2lbՠԕ8(;B4  |yp֋Vx-G}5>w⠣ Q&i1WR~Ճt*򰐍9&&]POp/hgBv}r#CC|#%CR'rVq/ #*)wȝr9] P5Сza~]]ꩳSh86_-߅fDE[MCU~ŒDmx& NTk 5 Cܡ53hQ(uE 54QZnG؅_ypHAp}ᯬrݔbsª? 5zDJ}9 6Hj?S߁&^`! .+30@z 6a/rRnmPa͑-e@ڭV;|$['߉>/öPg:Apq.Z&wʐ+caI(4+9PIGCCY F2ed x**+<|j(&sB[-Ĕʃ_V 5Q$aC{{YsѴ<1][(c5I$'G\ GY[I(HdAZbv,f7Q4sO ^Fo|; Dm6Zzqcv{'Л&ف@!Yh 4{UȟQT*kԛvSl)R0jN(=0~ϭ&ļ2UL(tvJ\U}*JU%T%w%c짅~=(Moƅ~7kof #lM^VVjjN`qreVoU =7TץFbzr> stream xZKsF==5!3 JR(y"+>r=lw 8M`Pw=8MqQz8_t%)҂tdcD+v3yBi&i& O /gV⏜:}w+2\0:RêfGLJ̒\k0 F+$ .vsϰD`hD`oh[. ˒"`0°ih3? grsR0[nƻ*UlLjU3g"m座!76 \_f3|Y !3#_AS,\ Ѷ7EC. f#8 0D Wa3v}7j>4Q 4aֹkS$ X*l. BYe,4e^cI&Ͷ;h*tr5s`a"f3^q2P* C/B&x" En_n: cqB>)?\O`;MvfJI6nPzFjiv}@ ifS.8Ք Η+(&mu#HjGOAhFFNݱel?><%GޔRW' 8q.qڗ^̎&tr h Q!jJm9A2Q>u^W ɮt*'"9Lka^"iĤg2%'636`2r\9RRh>*୐qD]xx[W6| W,.Bzȫր O`8WA3 &N8lP2۽CäRSXB"퉁ߖ?>l A1ޟ6vфsl+" ֋J`%IEzA`wga#y" ݡыrq:aJ0#EcvvAbB خʎCxlR KZsvVOqy 9)jnV\[Az[,;gB%$kkj4zpi ,UGʎ\Wѱ/9 q qsBnh S]qFÉ jͅqth*%(Y( XSƊ8tʿr5) :s2vyb'`H v1e ~!䙇|ڝ̀S)w4pHr@! 6~qHPrE<ْ/XxzBMdm ..$ )j[;F$2NatC}w`#p erހI#rF@>((-`HY,*YhA4eC(? ELYK39 HÙ!ynqӠ Tt I35CY3k}j.b>E`N9LlIbb?ilũ0`DN= Zւ rpU=tYzt0Ð.9%Fΐڷdos03$ aIsM?u0Ca ܍mxRT^ `6QIHanm=W_uV׻ e"cd¿#}4b^&bh:ݩk#)""ratM$OJ(ĸ +/NA$a{E=)c˴HyPykRb /#TsN|IB*ۖUGUbr%Ez0iR/67X^ՠͪt<$la%ETH i9 |p Ś> ᜩOBB'@]۹0V ̪W}n'T2NvixGU\,?=~HWH);_>T_I͂sxIaճxVAMCR^rK=һ.E E+LZ(\Z5\i5жD,4 ȠDDQeYiSŮa K,̌K/jeah_7H5`~0os- $e!%k2^Kі+M\gdi#upmnuUf4شsM$v(kFsC|qYUqHjQȺD/Ե P`{[II5DO`lzW0m$xC x0ySc8Ԭ -CGG3m[GIT u]7xSP4|#hXb.k‰{L/X)ĵ)My^`+dWZ(Gxyf@;$"a/\^ ؍>0Xu{A ha Pw8_t}9!Y)х9"P3E,v}Zsޛ< 6]ļ1:%)dl$cΖ} o!}R8]+$ 4ݲ){P4Mlc~%VD '~#$h}rƥҝڍQ٧N9zDaAS8\<Džu5턛PѽtWTekU͜*ll!` 3xQBr_|;0ǔ^JwxY&֠\׼no? rZf?I}1LD @u9+::;na*=o7 waSyyc|1ZbVǎb>^]_ Qc;]y?m9-).`pЉoηOLj\?! uTXMzpz I^2j6)Cu9LR@T5QͥńʑYk8CL9 Ps=0 Q&JcA !hSE*ZU&@bo苢h_IoX)n \pҗT_CQ+I\Y?fnq^iBR_r/]!y}5CE%N' l>87vuç.EA W%̘`]BF3endstream endobj 316 0 obj << /Filter /FlateDecode /Length 4079 >> stream xZKs=kv+i4ˤ=&9vlP$[ͲAWWk{P45bu_&I]IoV'dqNN?7˫gR/=\? MնROV'L?ck_iɏT@ '$UǩVѬYg?Lg3LJQy&t){ Sy,| 3/e[l睲򪮽8 ޲]6oq s03+S k.f:4XS}kZn:WoqL6uMNC>3)|ŵ̤tG-/5-F:;300.^.iJ;8mXu ܇eR.lcưoo [(;&ZVәM)i]!f׭,kǿt5bo$'O\rspdC+` >M۩5m~: 8b- {> i6|&@nl%` Ɏas A>4yVUv? }EBU0]a R$g4R>g4W'=+MѴ;|P/ueaZƊJi ?>ŋ?W3$Zh?7}`hG3 L6kp.Z* wa_C,1̌*^~ Z6bkA$:G!ࡲVCyAC^-UpEFJN NO YGX 1~ _w EZ .`Er9܅@6܅( hpگG_!k.cL~a߁QkmsB o#Rzp-ݧ=]6bfJkiZӎa  H:v6`Y/v@ry+9!Hz|k1op+(QMR8r& XtΛ.);q9U"¬ (@?͸W/"Y|IJZқRuIjb*IUg!eoL(Mg8w^@NρRr̲hV9@:~eÊVJ h Shd8k."똭R}IRgVtҝL *_ 3mD7AM"A$$-[szSjٿQJ(󀻰Ѱ}ӵә=X4)|%%H|\g )^F,6r<y`/8`\葽瀙Ɉs/k"8/B'B+Gr|-4h+m_Gâ|5Eeo*FY2\^JJ Qtj07_ƌzT"o7 }i~(9 #,w ճ~i3z_\)}mGw`Ds ufik` K59tp3' yEUrEU<9eTKuCzM=( D&uD ?Ob$Th iŞya0ޣ]A[oOU8؏򧇛'lՐ]׆s9Q8F7]S؝>$yBoZJneL?`C>wuG]Dz93 D:8HQ[;Ƶ{ʝ|DJ(3*Iof*{"Mh #TDUj. K$0f{qq%N^8wo>DPc>{+ź0o ]qopwMw[x~ؽ*:n3&#H; rĨ:(Q^`%D3|2)H1Ia.d> LK! J5d[@A¼` |؋Ӕh9_WkSL MuwP3Py ǵߖ|* Xb|]Ϣ;- ,v&^ f8ؤz2іTkc']VHN5,s[V 5^oSHh-%Ks/+֣D}=FށnO ~ҠLw0?+3>RUEkm5$0Ǧ4$˻A`pp@e/~eF̫MOS~)}+ a2Yۓ4{$!#/lu*DܯK~ =^B ԰ rfSfab4vbIc S@~8_u;dռDP$c Rv3iϣ/~.GFѸcwy0V>5tU~*̧!wXX䰐N<6tkJ8ni^[cx17^6Lz$=rc,o <Ϭi&/sszn(KVݿ+6 TPu]S$x̣oC+9&L壔^Rz/|* :cg# ơpH/bbeY>( rq8r 44]i@5i&}0MZא+vM hT7O4d[qpG )҉o! j֠6eHwR푠nrc$Uf%QEmP$֋Tt&lT6>tM7+ϒgudN?V:*B GEo-hZr[ap\-g 3tE&m~Xƈi0 {=[5_|J {wQx&~SwsD  Oo3rNa~8{UNxxw="mSӨ6;ZCendstream endobj 317 0 obj << /Filter /FlateDecode /Length 6975 >> stream x]IwF/=0r}rgʞqu=#!P*Ē d$IC rb"WߜO,ޜ|1KZ]pբrU8ۜdҊo lukͩ6 g*U4kvt:VKctXiKcS]E^{3ٻ]4z[غ`g-ekn_(hߟ.qU6b .^Uh=t4ҫUEuu3lNdhMX īlivϫQwd5ZvN+ TmzY vxL{)ڲibPxdWw d6j~ȃqZQq500\42WiZ5!UJ]: mlg*]]l/&>23fU`IU7vĀu|_,ɝm49M?m([f߲߼+bqSru^vQu4*_Z1!#:EP[@A)4:;E+!YC FS'Kb Р_F]6*U5Mu=6*7;ͱåRyf4ln1A /l[ ئIeݖ,Pcg}Ǯx nds.~CM> 5nĘԼ)a}uw~҂۵C+4BvjQ+WM#.Ͽ%I;遺^prf=Ʃl+xQ<cW2m;Yq:?]&yy"6U@nBY PyUXVS7x @|H*5{麞{b341zl r##Ka`&{-"O9 "= iϗ1%߉ly5*^Am@RtV m;-%ֶe۪D7kQ7WdaӍw w`30q=ףP+hӡʨP ޮ|.w1G$<$6TDx1( +n6`)UB~ ڎ,Fr,!HlQ1o~{V>K鴥°.F /  3J.$\׾.ʌY=5 8+wr^X{6R![9ݢ\+KvXzVYjK0̱21kG+N6,ϞKȳ3$ J (MғQƲ.7e} *b,~많߸>7p!EM zrٟtG)p$ve|8uD3xo4.qn4ވ^( Q >8BӐ|3w[,S4~06S.TD׽,Iƻd%7݀3u[b71!)*+2yߵn=4{%+wvғ(|;uY €]N͋w xrέ٦p|'$G O6d4B`hdǻwcJP$O0aI,jL9y5(WJ.(ryyԋQ*W_DY"HNGqz#u6ٛ!oeYm&** %@uCm~yy`QUE^2Mץ  @p(]qpHVa: wtC@Ǡ엿,h7aP9Q<4m敢t\SKWS) eȵO͸m(z),0kT=I +%SDq|ݫ4J &G1:TuV8`h>@k.?MCz"@ 9G:灰 1L89AnNޑ oЩQlwC:F` @ce}-3O9lv'];$3p1K9ImC0Ѝ!x]ߥnpʌҢʏĨha酭ФX7i> SdpwL~wG=JB1ʽSB 1A^;Xog᫓.F(jY-,m/%:՞6uQ;KZCs6hVVIscThj!)pql @ZZXm"Q| w4gOUCSw$}JSnL}) z'8f (fgc_|UOڗ-N Miص`U̫ qTf}qƋԱˮ*ѿ ߟ#x'Co+ \1,N8"6'OV5R>!9%u?'p`.(݉4n^G9h@@67~)k$k?`J%>} P?'NzPu/Is6D/Հx ;P .d!z {!I~^RNL"JµעJ >:].(]$HBS(4eJS1K>.]#W@*̫ vqXQ#"C!iGDIBJ PvIJ'K{yv._)ƒE= y<<]ddhw%m*;x(%Y+oY;vQi5ZΘm?bq(\RM:X\CǔO@SPH\e8qdʎ 3zl[&мznCATRCiu NX@nPkqvJ|^)?Ak˱@GJBLP%ǦE,wk)S g5?KQ!|կR3D぀%Ց@{)ٳ;ۦk,fCzgN"OS5k+7M8Kx~\ɥn?C"2I+Mr>iJʗ JQV3.L%Ct;9?YaxGqyO扣k8!L|o<3k853  ^;U"jxI'l78@Vv|ڪgWbNFҠSʊc.m`wn3@leh.]["{%pٸSJcLNa=S/<pfU9n#,5_ϴ{"Ysq_r^Ky^c:cU >*s>!Lbgd׾rhy1{*iKvMv K r '-]]v0g7`>?8H"Wm?rJ?9wXO`$TВΑr:wMJSE D=(x8^o$*^J?I㾄-_L\D:O6n-I(\z-0[G PR癷 D%P)FVF7Mu1W?ȳDǫn)zB\R|QF}p' 8&\Lķ ;Iy=OJ_`Pt&j-ĸFS{eg>,%ҸDAU'G5+tHHS|\Xt]$T;RgK*0=k6XP\ڀ5F6JdGw@%y&,4w>plHYns@VQ[ysw4(A*qX?*n±c &Cu+yTpMu21 ^'\8q==t|csq]>0UIZ)ݟA\6XW#ag#Kl $ܼ͕V~O<5eq^hM %ʋ"F'#~Cb;W +TI &JDms1;A 7szcIqt)=rON.KܟM >ğ8f[yn)}نF:N9W!}|qfW}\{]!]z =KhËJfe{(:J' UڼN(C3UE蛯Vqvɲvpjϊ(* Udw[znRϠ|W:IVUĈ J0qǖJMEtHq{z=, ?E}uoO2}fCCosciB}z?(\ip1LF#gqT0w }QV69`Ϯ@1`'RIx Y!,PO=QO.OFD,|AՃIYy<]h.;CC9ޓ$Ճ7FЎ/"*8%5 YuEf7FYGcuNVqnad*XStHd W@(sVoOj(n9$_'?U5YUb& rg$kG@)UXI+K/[Pn ҆o|{G*TࠗKΕq/Bg G9` wmP5 t0,Gi$7,Q4]G12xVYʻ|}1] SUŗ۩]v !2wBdé4XOU$K4@0d8[e2e,1tT5)֙W+;%|'e v@ȁj ,/yo#0 - \X;2Sk ;-RpF)zۍT2wp]*/`::p<1^q__u<omw给 _]GIP`0buUendstream endobj 318 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3205 >> stream xWiTTWTyUD½!QIZc$gkZ!A4Έ!X8NQs\jHR;0jC©ר u*ZLP&j3Cݥ!5"Lz4xCxJ~!OZ88'M:{ji-)j+ʑE?mvIse[Ȫm^cR[!tB|elƞ0V>7BvA4G~,Y+^:F)$[NUȱRVO>@%J]j/~|F@L0Ph=+Sէk!,*ns÷,sUk$ۇjN@2y\![wm2Ǿiuk` + Gv #8а|QWU y+k>c׮#~CUH>Pke5:*geLѺM˰'yI ֳ2pG.ZoUmXHbf"@ofm1^_BK:yu)Mh܁Zx&IXg*44@S@ &$K O Q8t&ʫ.kO;;;zmQڣQ54_SJBIzDrcZD\K` Ў~4ЏQDU@h@:fbcѪN:(VmZ* x^q,Nj@_M@HtG=řrs[Ʊ(2 !(;`R;vq]:9Oq&zJ^q!xquz[-V򲥳=@PXKj f0k޻ʡ@өK(!šFekFFSij]ؕ@/!]}T E/se<}Y`h n;O4 ޫЍ>nBRڔ~{i)ɏYC3CL3PS&!>XX4WFjȋ$`t+{ ӿPuwzoV]^S:B]SL}ʬw2x_m[e`:k}' ^2E+ [{~W%_*ŗ]CbMP ]Y), ZEޚKxV6v#jՀ 37],!ΐb䘻Z546w#SA[\ X*̕h nl:=lx|Hvؚfl4V5_I+{s^//+`wj`-=pĝi/.q&%tb- F0='x_H~ߍ V.mT?^475 pPݟ9}tQ;*D[Z=CCfs? vZOYzRDԽ aUz6>Zql Ump ^5 )܊ehF/BoYY>k#4^2 g/LN[K 8p>o?7٥erŤ*<,ԛEkU#K$nt\M36 -dA~Ci37eA,ҦЄ7ʺVu !v_0ԁdp?w7~pg?Fףu&2(Dhi+Z6zTi^R^8 Xo>N3}Wwl!30$g $_i##tA٪ >/e-]>:qW2Źxx gCZ'Ov͑?_64"pB &nTdh?="*0,U۱iA`#zwBxz9\rNRc'DS0E"Vdu7SOľC2Xf5Dr9//ʣ^pe$h;s:d=i鮾:tBoAc-'LqDK=wԨuwQ× \ Űw+U [h&snр%Du3/F>py vz丘MTؿ,]cnÃN4kenQ#۲9">";tE+ețHWtӥK͒fcT@p7%Fr_c]#FUS"o} Io !ɅzM[9!L'J X\f; $i֜MyQXBkש)j>tj"Zi&m}s;l$̠0mȑݨ]ҭlgu 1#$K%^^DF p`Rm9p0L aȪm"]N />gb?YIT.;=CCǠU,0gȍO(0 m{ 5`j tmW^sxW_}0Bn _W5:.>\l"g>JVY2הѩxܹ{2xOu Fh0 W7!bFQ khendstream endobj 319 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1465 >> stream xTkLSgJ{U;PVt1gMT/sJaP-P2PxSK)7EeTEձdo6Ch1ƨ-qaf?{{{QUQ(3֭hّi^iڑha\0xIPQJp_Oaf&;~4g"ciN#JbKay$/w{)x%qqcXίdfXvY Ls6&~}$\)z ҭ_H:3 䵴/0KԖ}pcm#s/`gEa<(`;,m*& <PkJ9ZY G?\.ԯm,kjpl[;k*>$@|I[mՇ Oמ_Y_; Pr h(- X%qCw,# Pϒ~ݏq]q]P 4w/zva(<tU':rutI+^\Б3β/z6d1mղ;`ʓ1 g%فl,f$jk6IFcS9 vC76u|_F$/D8Mu7M3 [>@*G]m"YjpQuF0ɖ/\$1Sd*h(m/- H좳xBGuSOXgߏt,{>e2_=jJVfRsμkNLm'ܪ3>ucAzMԺ,Yk1qRN ]6 p۫ʲ [.Q`9y'Z.)| wgF΄+O= lOq]xVvC> stream xYXWמ̎q%EF@EA}e p ]h,jjLDS3.K=s"JGD^0;"M0M$%}}hm*%a2F:^; y~"DiD.Cb>f ,X4>y]ncfmnfklcpk6'8-,sVNf7mp䚢VA;V ]+ [ain&>N~[ߜoւ.Zd;˦[X99oXΝGQөMfʂzBͤYL͡Rۨ 5NYS;|j'2vQk(jzZOR(;j1ZBSK)j#5@M,IdʐCRF1eBiSӨW)b( 5ң^)ʆdҡ(A\ԡ5MR}ly{^F+ş1i &ZN<5cҥA1þM1e)SN[Õqg.o| mM&Y&M9-_O3WRkЀʅעD`sؤ-s<`bAz^GxPgrDPJe(g%'&U?5M# yKՅt ](؄j$?-]Wv'_,7A #PuHQ OOX7:g҃\ +XDWoJ\a-Ͷ{MJێMcX*pS 0#)2v mYP͍;}DJ'b@Jl[kvϣ1kxkw/[/x@5%0}zaH"bۂ'bm~ {Nn6bHl.ҵz!m-$::3,l.h$ AЬVWQIdL|h+F!N/۾!ޙ?,f?WYZ~-4WPAW*J`a2;sUG5#ꐦ>C\[psʌוΎV9Tv@Żֻhw×{[7Axy-Tz_g( E2z8$m/){/dp-|xV]3H'# h!N4c}oDs6XTjInlab_K(2#7it#4xOöWl7I"P:BԈ:rIBr`ՠzX>191%ģ$ У YXc$p&=_U֝I60Q؀/t(]{aVe/Ko[$@]<4sSjݡ?G٪R 189'PC2L>ZT)EMcvh b4j"۪H".l8"42:TLjQ#*1<3nKrA!G q`99tyFt.*)No_^Gp38/UgK[(fra(b:&mD^z>MhI8x,ZfJ&NL(&vj Gђד_ŃyGQWR#o~S.=x\~jYl *C=(** ׏ D U Dܒ Ry~}LdXTZғã✜܊rT> X#k3h&\ Z4>2_8DaX(uz遙cxvbgyN#s!m44KȡQtƍ(!լU?鶌h(VZΏҠIx6އqjD70qO  LpMޕ\D? ^Ct$]]@tJ' %5I?]5n/ gH楹 >`*v [`^?6È5o&xǤd)ifυAtG~2D!; RM rΊfPAIl쉐ؘP/OqivjMw6-xk BqYQi|zBn+,]}뻳LR$9>&4WݙbMEL0Ѵ\UrK}+͈m "?/ĝ `(;,%t=kD-"(*Ln"bTm:!_@ ~@ xhtu윟P^YVZVQEqqE^PXv &TdF*Q.k7j(OiT?@Qٙ(ķDաJfo*!=V 4J ,ܫ-#ÓVdGav;]ޚMߺqzO HCXC42B: DdRc`!8rd^wD"65 CW? Sld1i|)Yt}-zÏx*֟7O=xO350õtŝ+B~ihݧC$k>p㑭;Dx}8}!8۔pIښ-Bsѷ#ԏn.Q6JdXdlwQܻYop1٤YT3OP;dw]cUpj/LeWKc,q"㧄ǿOlSRUUXu5ah+B/PllNBkؚ3wQq>{_ct__eQIq)ȧBZHXg^.toa Lk2C=1J(rkx1kTRsQ:J`II V:ދ'w> -Q{J[lk}YS~yNaHcAT7Iz9lu;sgo\:yËwo˜s7{剓5%u|sU AEK戈8 AKd2+Okln: ON2~20O F/\C)|b[xaV~UXk(ѫ~I#ڎ:φHfP`u`ka\kn- )>9y(}i/)p kOl]܏ǏAYu.Ӆ5!88""8!AnU~xNqF #6@ ]>X~4heE[g7:Ü={*yu#zɈ Lݻ%mbӶ$ e+[F-6uٷyECۡdzkklG +:KG`>}48݄(C8;/גx :`aъCi0 js@7`Ln|LCM]M};vA5%.T(ԭ=$OG[}n g6gbՎv'~˗>Ys=-Ҁ#J+]lV?fs5Qi <~6iqT5UMFG)}L0ZXgiƟhK0;ަsʕvs`;Kk&D 8B5$ sKRtV\vd f5 ,5? 缹?VP8UDM<.5 HAF2c\pSt{'2k}?0YbRֲ֤"3EkwnuP1%5=LQeFf$9^rÓAOA$5nb~%tk cleu +fSs{}B"$,,1zOod3"뎎 Bx'OI $'=h*Ra%FwtNOEWCHz 8u u4Hg]u:D.zlp~zG΃#y@Kp8w,uLb~5 PQ0q^WkA&x j~'Ck_a+A뾗מ я_i:+ⓛrPg_^Xo`pY.: {|NAEam塚V Ȑ_wuui_=0Wjrsx'?TNKX/Bye͖s.`289:Zlé~q.IuZƳl׾{r~־3"A"\'hZ]s"[Q3*ɩf2+lFtNi?ќjQ5RZT^jMR2Sd<) (i\MjQ˭˜!E>"b2/˜]J."StGL/芞a6-~).6<#m^=~1a!uEE9G)љW쑤f$g8&ZV ~G婥E4d &.:1vKrB+b\VT^j|Is[r>Fm&%&+Bi$*!&>4$BSQtIffۢ ՅJڲ?j^ubԕTJ])SBO\xX zT`L;TX] +KKiBq6 T( srjs^:h0ru%endstream endobj 321 0 obj << /Filter /FlateDecode /Length 6121 >> stream x\I$u7tOL t1dD6 CCȞNM-ZfohȊȌ1ꊌ["555/f}~bYz߾RƱ+qj?ej'f}5u#uS7Jָj5Ꞿ*'vеիwdz_, ]kaW7J\pKfsj5z]r+*%jì_uqt]?WjwExJCwoS0E;gs1jX.=GuhSBuz\:c/W^Bl7-LdXVkfKp<=I &jp:}~\K@H-^\[iHj EfZC8S'gVp?^Xn~׎ U2~WwKm\S[53Q+/!\Lck.R)"jlCMEkWԴ_q !$n)&hؔ^>'L.2eyٙ$wmTp-OU$:faզO{›@O/J }CZw흟Mszu}fLBjl`T4"ȯ+lv +5 d|1" _dfpYun)ښ96J\6ίx#D07GƐKc]a7xV Ⱈ}x8ҭVُ#bIr.,)XZIܦ.qXzZWJcyn.aA'v( Ln0-GaHX?l2MhV>a;6p6`SsjW A=L"ٷݘgcQ3=?S2g@5UnpK iCfE>K&֛!HX,iPJ; m fێt`y 哂rї2[}iS^UGP- "8~-<LbvHnuaX]w5E€-1s) (n wo{B~IȩJr;Eaw dhqDwn\'>T!m hALGtR XMGc/jŤŌq;3Ţi4D @E3q[tT$٧3f h^WYᜅ-zg{p#?g ^| ͵䬟Po r75Waw`c2ȯblpt,`x<OaƐU:59&mR g3@f&ӁTF=cxZi)6[ FѮ(7xS`% \q3p}H ~067m-d2-b7}ޕiFH)O vyQ M~[+,(U%N?u`>`x%;C>ɚ1=ӯ 0a#s Ӯ Z4w TYGlDև#H+Y/3La̢uG 8x,zay) (:j)$G*Z}P튎>j#-UM?u%F#/r2G Olj-e΄.dF%Y8)khLx)Z^t ZtTCrK@w>@SMȘi遴Kp"I.<y270iokh(}gm@BLmSt&P@bB4x`E58E4yưd`+JG|39,0SRzaNv= ko=1jFqqGe=3 7Rs41SH-(h)[1!Wd}ۧTu׽ѕ>S=-(WK0a ﻻpXX}y[F[/IĐHC< FI`4EweBTq5pW!oҌh~uZXy,b2 QaRR'Vą ײ.n&&_Vyb|'ц 2OH;2ݜ!TsƀWdVp9ب>1 讁JWj"gQk}wq-s8p;_+ jiwV7%gs8pQ]?Z&z6ïk" &|vj!CA%<߱2>K4.B^JT$<1کl 9pǩY X| ^M؏h'sHu^P0t %NE\X^sէ Ut MݧI\3r=4FȽ"M}"BZbq71.gC'!ϊ0!1Sw[BYe". 0ɉdqz(<($0*W='8X[+34N䒡 ܦ8-Pr$KR`W) B~RU3ɠw&2tjY``#z6PK]Ÿb;Lh - L0ñ}iag#hj ng#W7Cd2lpH)gjMS,vmpF#Mr֍+sϊ kJRr N7GQc6uޯ/H%YlH"M{*КJv}J?L(W\HA[?3 Ȣ@f! f,vh3Pr |56Ʉy f2tN{/awOY6tHLtHEhNߑ,"X2e}_rDJ4}_l%>< )q3h90MR[x2jڅn9['A0+SS>)6!ʀP3 X[vgkGzB<]N$< LlJRqM:@ԒE-ěpaJ] øbDV$1COo,v\1a"E1 [(} CluS\#7 LJY("G!>ǻ}NMVBwQؿn- paRqOE,4*ߏp5؏8&ҲG]bڧP\{wYS?-(`puaJ3c'Sջ cf`MXZMMzWajz,0qrS!0Sb2jvoDL՟]'9 -n~{c9{D2riObpuZ ø=:*dCdjۇ]LyI]3d1pMnT.u!ɇf=laUT8 7yyTKdr=X`v IpBGp TK3(6.5 {Θb~܊>pzzC}>Enm߹zпо :8>2eG~QZƽ#~<49Ze:4`ʿM2y3/ɌH6- Myu*Kˮ"NZŔrc~yDë92o@EI}Kŵ}M{!V0>cO]5TFM9UmZ__tR֤{Px妯Si3VʉjgRpS,>vL$.Ј4sy*F_^[ 6Jg.Sn (7MBnbZ^M?XPK5ƃȚ95|=ەalC9Lt!AӯBRmoD".dGuHv|*tWeQNbUtOR㘎XĻr3UgN%HnN،Oue?z Nɍ:~8P[Nw#vA "h*5=v5Ԣ9!7`Rc8t umrSw9yQGB*QO`oòN }a~'34Qq 2skc6=S>pY!s*?MA=+"t$@jxFhY]iJp.4Le1ĻzXf U~8ſANaAQ%R x]#u?|&<|t!p,^AYӠh\?8l"D/,NGKa>D`W M/rJI܎7bQ {HIGjN!kϜA7' 5e|FJ7鍕C1LݔB faM/O|煬w"C].yN=(*ybbHvY/ɧ9& 1&}X03b-$፪m6 u UgFujlu w>1 Nv݅/ܚ AlયU:QF^ ̩ MEitm9s)_#NeubߓԲCZHlߦ1ߥ`a6UOA$U`KJ?-]QV0A]A9t pV$ XHs$I]8:7q l,]om'zfs$*vS}B,y15~a} zF6f,߷.ͨɐ']Z:$۝W4WU;2~^[ȪA|? ,WR|LVd[/Aäa!Zѽ%xOS9Φ_RbәS^4W"\sd+ 2,¦leu&S9l2 l(1& x%wY!y?ph gKn gMB!,U5PΊ~2;'kޮ[_tN_Ii5j!`"ۘ=1!n ~qrY%g eſ=0b.9n p6r%Z?l/ 82IHCŌvh2f>DM_QNOg͘YبwXV~޽,w⤀%Os%ZRzq<dP6OIu8vf{ qdÉϮ_I3 e qoxO3endstream endobj 322 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1106 >> stream xmQ{LSwWH{Wu{, b|dʣR*`R{CE'in6#EYC3ی풸|I9' "D6U,їmUh.IB ?hvJr$y5 2P X98 G2d]l,/3ZE񉱱[lgSش3gb.g*Jٴ86\[7Y ,g`cu+rrU9soFVc¢7DHRQ%A *C!}@P2(͓0\?5 xaSciw9}Zj?*EXi1W.q4/F 2Z8'͇bB~/bžKR7PBK=F"x"'Q~~baN/ rqTLu8 6'G`;Tb+W3hATr_ILȍD}-Ⓖ-1@}tF> ozx2IXK*E5;?iL ŕ <uy9\?xkEMeʥ le(ӹ|pGn^;|G.^$Q-Fȩ6G=쨜+08T2|t<ˠGe-O'T8$ K z:% oI2;pȀsk =t{?z'djv5 賊;ggHTc[|`HqP̛m%1O3y4ky2h[2hW<x\Z1U"7ϗͮ_F[M QN9o%90d^#)p O=z{OfߦolLK >a5z눎! *wo{;4ҏ :Ȇ daGnj.pwP PĒi~-I ׻]ms Tt憍 cXÙnas{< >40 Y)grE\R*nϽͽurB@Wendstream endobj 323 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 888 >> stream x]mLSw^Z:nؖMeɆS0[ FP)ҒBOK#/l 331 #:Yl~3#sqn1nQr&Ɨ$'9χw( Ei7 V6~imG,6Qҳ )G ^iq*dC-fZ ǿkQ5Yxh-QR!Sp4ڏǛl)4S};_Ɨe#MBl|Y|К<:x+-~Mou& _z-ёuDO:drP OG6b±J)^erl ]cbAhJn;6f Iq\F_.T- ٯ;"=S)MaJO>`r=]XȪFYu?p2.7:p[`xukrs  swwz4$+f{+42[InZ~3b?%{8/tSDKdžQڗwaؙؠp\{:7B`O7Gy(}ĝ{j F?qv3p7‚:̻>J5l$X/fZz;:9j vq.(*S, 'j51.ĵ-%AΙ^G࡬/;u7qItN'B[SC__V|6A}U2x *j`$y9>b Mnଯnݰg)ΰޏRZApi( nMZNuF< #}HZih8D7endstream endobj 324 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5116 >> stream xX TW֮Dl5Ռ1jJ:"74;ݷYEB#ADg4θD3ј9gנ&Oqм}o%YP"h5p߈3g E 51`]ߕ (eK^Δk1X:6qz0ilѦaX$pVD u3k3υN~;trS$Ą8Fprfӟ Ɋ'`_y"i}rw'nSf2WS҈eȨؕq'&O>S>3g͞3cFSXj-5rQLyRS Tj#E-6Qf]j ZAP++GjzZCJEP6-5XʁLYQp5DelEŎA}bdyL2F"|N3apUՙ!>CY/:ihMea=|,pւ.b`F$-]$-9eJ_b m- ؑ|E&G;䔪v'C2q؋/Bck9_,U6v G}=AROrʨ/ F^'S^ҁjb.V.hex8-]r FV]aFJOu,Z+k׾3VGh1OD$RFQXHD!Rd?D'<6"d+B Qlۖ ~T(I|;4v4|:_2x=S d{~ޱg' {\|<7ph9ltCTk'!kd/7K\åVIV}lLo)(DBƛZ;KI[$ x‰h:`Ehhp]2@pRNKt=$ldR "mf/%b.ݺz{z0j^i/Y7a0LwM,y&aⱿGuÀXz]%hpZIN&&8RĈjEQa A#p01.doPҲepiQ u6 K)*(JQCZ*Vgddk!f. +g8GUTiLfDHuX*;ex+()VI!m._ OTRq-P6x:.n ףQR9_#CR29%a<{h>#zpy_I'r_p10%PT!CKỷk7c(Lɞ }IO*<򠠕E%O@f~*&f?7I𠛱G9BK,ZyrΑFcjRXnו\x~B)J5?j7* e`W)XvSO.2h<\NٴyAN1}?WA5G M Y5f $6 |bto$M63_yp3? l'  uioqrI"t"t` "RO%\ *tDO{vAkԹq8KP@] XC8h"FɻUEPu{tӡpMk>KRC(@ 7ҔEBUYcDHZRz^@ݦ#''׷@$HY8n:*.'l~X %Cr0{4}z9C^I^S[7/=OY/+&5sd\Y(^298n#3r 7q2s7dx)k$gO뇟 Q Ԑ Lh$gM"u|kk-тU$IE!lK3I39٫vX8H#Cμ>@&?N-:LŃ7uWIjqM-DU{Ԭoh66_Pjezj٬t]dSl聱݈;E/!#~> vá #}~Pq P4fo ooiOY|hҞڒފb| %$/(yۙ&aO#*M͜3 TMXj&Vd T h !-nC+8FCRJǰOvkM[G5$VFL =US)[E)d?jW<{˷%QQ]S { 6mB'o 0$IEg\8Łldr5\Ȅ9]c[C{5A%PWYa&!dNFIqͼWapWGzӊ SQ.M%wמ/XH]ϱg-t$ocW C%͎`ǎ-bu1iA]ʪn_#Z}F#jk:s5s=;rIҪ?[E(f^Adj"aHEPNZuԻdѫsm`:a0E$6W]d뽊2ف}=`Ufdue5vŔ4bj<<&&<>ibI#wiZh;yhy{֞) yq5oUG GM,C&ԓiYMZ4tcRgÆ)&,ْ#;S(n&0yɬŘL RuPbO:2 Z+LO{齄ln~\6ZH0_׉\/xIP?iI#|XV7Da2+s!k,C.٧GދG f ڢb]#T٠: /d^ >f$0B )L(h1V<y>E#O'B@]c"6)195#qhնGb=,_6ؾ"nw8%ެM* ;V8>']-Rf| ["eƤuز a|[x"ġI׾ zr8`3-I*QW?iϿn=^3R^f6|dgscOs03m}ahgؐv|x 6ohIXG}xxE'?.K#ODQu?GNl3Wsg.7r’WϢ͖+__30[m&11m2b Na˟*X' R,$nuF]˥deg&Wdo@5gE%Qz8/On똴)H&:G8cQt*#2}* ;yN:X L [&]oq->+4_(+`槻ch⧝e)ClEn8^}|e+s U3K"Xr@ m'PUH>2m袹5^' Fc 2Ffraf g@O=E?3uj[jrv ^!y-@c~bzѬYrj+5sHsK[m l(ëfٶ: dDMpF ߕ>)IJUUB>Qj(,e%<#KJJ^ФZCa^ IN=9'FpUB,EV;J@΀^oFnhT^NLaN^Vlg,Lx8aqlu _F0?PQ0ʓӐF$vC3?M&]ѩ dmDx E-xVBe]Fcܡd`,9#͇ |v:!`ic|Mdd\|ddm\ccMm#V~f^kAL%j !{d##7[iLx}R]cMoM֞NytOA ZlTDw~D"=u] .q ^#=<2;}czD˳阱II'L' LImɝZ5Ʉ_j=(V[Y Zz0XȓW~dmmw yb.endstream endobj 325 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2452 >> stream xU PSg1ps,1ޛU m}lER¶Db$G@πX@CueV%hTSZN{{(ivg29;w/\&P"kmX:YCvmR.󼯈j?]d C]SO.>1xXƞߡvFyQbO+)5 Ly[,KHMR k #7ڰQ!<2by 8v(jz:%xWF&6.>A=QJ?9?.=©j=5HFERQ\*ZISP!jj J¨u'5Ptܨ+HCPYˢ*у bWV|%إӕv1}P2YY|6Tz9#˓oZm6a*=uʺR@nE}o[v$Aˁql Վv!֭'|6Y%Ww"X$ qGܑ8N\dË6dJL=J\g/ug.˝Ů=>t> d l?F(s@/|G\cY?%$Mw<0({S懮 v,{I"wa?d!;">c@?2\%XOG8 =. xx4gɓD9 <#k}rцitJqψuSFtXG-CNK])Da)eTm46OlXmsHeЗԑV֮255|eRkdP TMM"^*M}lB&pG-E>Z.$%'>._֨QFHYoTM<fA%m_G 'Y;؇5(uB^nsV q $I\UJF,]$N*Go==˟#y dGȒ4#j ZҲepOJ,5C#ۏkDBXBZK @K(sH+i7U7t:S [,n\ J[f̶Ӭvj:,O|a#  [; Ǭce|-mőH@:FڕMu#ekӀ 'D#(~l0qM"3a{1\zچW!KGtwyvH5ĖÑF顂@k3~ْ77]b~Vt_S=t6[Ҝ L!ӈH.nF xa eڎ(= VݨIEE13dB*6 =pOd-$lBAI6셼ZCվ:R-jw:>Euk/6S L¾ijlH("cb^=j=s7&U1%ktuBI9E"+v֗(VTI@:AC~܌C~f[}'QQV^5Ls 7QC06}׷Z[n/7WsDiBbΝ5w6}T`Eڂª=,> stream x]An@ E0 N$MɢU0Em.nj2[^s-Ʃ_>?֤yqʘc~kKV_{ tiQqʽ)ͽޗ6NW͚tǽK쩺(YSahH ;v U U<@Fl}tVXh !&^<@6u-PT A$Uw }dt$ dVI<@$hArIԬa%T 54 l v 6 e(kip+`.P$鱮:m>?>6#̋uH]endstream endobj 327 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4074 >> stream xWyxSe?!9`YF2 ud@B)PZII5moofk6M{K)ͶP(UQe`d^g:3' .I}m> c0nMLr6 3 ϭZ_^Y}\qFiՌ -y2!v [Q\dd0UT&kCIrժ+VпkⳤWƿ]P"p3s\2~[^/-)dŗM߽NYL[g ESzð "I4[-7/[XWO,z|`X K6`{װ}سFl&mbyab̰0 FĦcOKfNc*~~F?~ūf.y~Vs=gbsfۜUs>n^GP0P*!g^1iy RsfTf`ހFŹz-#=S8%ƵQL^;dHMyomzVqaqT ׫|BmRh ;a(5*ND>NW|4:梧ѓ=6]od#}Vڊ|~g0*38ts["YZZ0ԅzxmj^~NU?s?l@ǰvgi52{4㿼XtPS7;-NJ8`z.FHgM)UM;w/>E3ڣgzn3#rhZѨ K)T\ ;O9 =0PU[ZAe p[QSlj1蹳 C?s9SSD`;F^v]2j5B  u\&) cg-}>Sp@c0tD>QP]'KBu+E:Ys 4A&{ARKq7qb8PoG[j)*h,z>i; ^aFt?ٛҤ,K "UF :aofR_PO8)ە &w@ţu4*U'\ؕUQxZC1p o׮jZ.jD%^gEq #҃VXrA9r!jq?Ҩn0twňU{!/:2[V𲁭U$TzM@UfâBfՃ"-q6-JdY.XiNJŹ:?v1ʡMQ8 U Hk;OƄT9ŏ~bR,6,9}CVby"b_D{xemHi'Zr!ޥyc,V"B+2(VHNƬ"d[e i8fuAz%GOh .ls]db=nlQqzw"`Kg.qu<~W?#‘GXZ\I GOѬȶG}YAN2U Qhߝ T KKZYs#ADnmKjqԆΓ])c4"TPxA%"|ĝ~۲L! o%Il7~YRw¬RӼL *G9ǻ[}~a;7s'.1zP)ffWlBLZ.5#%0 =%eM2xn u$^NUҏ%:uȠC* rtliPpf%8mT\]觟>iJEd +LZãttHkX7?9*^b|/8={qKƍ/h(+,h..pssu"Kd 3kPO\VRw{}6Z,fz`1U# :pZg잏~fs"J*J.fD<*B1h uU`+\*;W%N/"׌Q{D(3@3tV 4AXl'P~.RONo=]_7y ]\C%G5N{ ~#B[/3><=@X5*~qgNq;O`Ga0WV} :ai+a?PZȥVNQqjAcl{w~|jGvVuzN.o$ Y[i@.BӀ|* Te:*W[X֨4*Fh@>dd.8 --6W SϻE6acP+㉈ÒJڙ lcgyw}Q|3"G,{mgG@=}찺:;:*I*fκXzu*BH'ߎo3#Q >LM3Hgu]4dznZS'ذrޯ̍ni-fXtz=SBԳۭz/l@7Pf]9D-m4l.u~е.t-D=Ⱦ5)EVRtEb~h ,*7)g?ؒW*XJ Xtы8s' iqsrN9-Tc&"fMߵ+#v&>4[V\gm1кư9Mendstream endobj 328 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2466 >> stream x{tu'}jlTf"" +"Ѻ*J6m6I$cILN&wi SW=gw\w*ݳgΙ?~sfsW`"h\UFUnEiiŵ'DE.U9}ZvBl~3 ?&BB,$gP2)nE m9yVQZ\zGWO䕲rZY^*˭*L]){AKe˫dy%E2y,pl{tن-۷/[A尺T^YXag_V(6?E܋a56,2s:l=mn9E"a"e1'c7Ǟ]AZV<2'(k8C-He8}467ՃT 2\'9TLF'45mt=! c!o $^>xaw4[YGmpw+DH>9$<+Foc/poE_{DY9!*4fs(\SQOmh7[~|Ax>l Re\G7Z4j$Zoow?sJuF]@eIt8qoJ4Z`udB3x\ 0$zc#ݯ}6\h%1싒} cmh@ŦsOmٕWYEԽ<*]*q'иP7{8"0~}nN4ٕ ]f)wXAx>ȋQga`M>.CTGh|zREʭ5w e"m6`Ca&}Tԡw bѴ { ?>9 xOVkXȺ5EJv*b r}#^!a~#u'zGtl >[)v 8m?&DuZPZ#42u&;+wNzLNtjWMM 52Hԁ6fXn@5g3)-JC'lbT4"*6JE1l+55:g!:A9{6M9%g\:E\^?xCfQAϬTU&f/ Z:ZsHg[H(1pQ~h 6s+'vyxؾ) 2dh+/+™U?QCIz0N]pZI4m+m~.HgPQ:rAq  mtv6]G̅˳ .H"nu;BMp[OֱlnWk!CS8“P&8֩.r6Vu;y[.q|=L_pe7H^\YW䫇{8D|sd]! aOrTԇM_I3qEӔX#\{ߔ<%_0G# NAmP ?_wߙ(=їAb b%!#B:־ҙ\}۱hZCתRVֶ:{ZIe5:&P=Ar|Ah?\^f1k}w`MnrMd^'5؄ijƋ~>dٴ/{fVs/gJk' k: 0~O'ɸ. :FFǦƎ@״ ~i#/nܛFlKS4_D'i ~R =RGFCA7q@l`shC6|로=5z f]+B[bQKZuBOKM%!oRWWМY=ԭeG"Z3j'O"KNZ~_K_ > stream xcd`ab`ddM,M)64 JM/I,f!CgfVY~'Y3?u0w%V{Fʢ#c]] iTध_^竧_TSHJHISOSIP v Vp N4L$1v1032_͌?ί讕|^L[f/U'y;a|]3#mӚ;T};>?/CWQxRi}'|總@-W=OmW|S7O)|B\帘Ey8yM;gb_}}Oӑhendstream endobj 330 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1266 >> stream xeLSWkJ?\,&1A~8CB((-aAki Wq3M!fngeYdm.=B,7&{K!QؘQR[K0PG@Id$ $Gg! A.PpA(Ιh ݒ˵o$$$ \Z]WXb 9,J|:3\QHͅ;bٟOJl ^tU3X;xX|Qro[#D& FD+|<LφjGyīO;7twfu2[QsG50Jm&j-IIOpz쬲d{#KiDm 8:|^xl 'Wp;]km@CUZ7Eʐu/(n|"9lC6u"6;U~KKtOd0¾Hȿb0C.bڭyGHԉ gdFO),8p)DŽ d:?FiC|K1x.AJ5I6$W~-\;_:A°o8N8C7Yuk2]B1 ہup/Tp׌$ yXKS 6wIw$ʽ=2( XyM}:0 ٗO$M8?f*vOYL8#K3YPHm4B _8XPKp?3 ˿{sz2u8+k|e^p[nD.'qG-`ek/endstream endobj 331 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1525 >> stream x]yPSW_yy"*kF}NE- .uC!P"d_R1J[qå8NXu}̵cO3f|;G<ObZ0ڨm간;;Na!i8oS03Xr<޶k2(F)"99|o}v6>%!&ZSxvuybyXDVD*tSbbVR R.*HxbkgpN,ٟ|!&hBB8R‰HH~^2G,L~UYƿ}+zJg-*Iv kLxD` PkwJC %[L?C@~qכB gR݊HԤA{Cz򷻽!(&%VKIK=7 1l|bOqyYNvnt@=;qr,Ǝ:/=d!6W;/~m|YZJ;v-G>::1w"F/K@)F v6D1(@Xdf+PH`i/샽i!\P;!#Eۅ)^oڟ= Pr04:'"A Mu*EwedG74i)qgwGB9P ! ejӃCH7tPJ[G*AhsHGy_k enPmԖ핇 tv q%/b,j;uwr5J B[CT)MiL=sZUu)W&I~)rp`lӈPr>h(d 49wUΘ1],BΜBCȹٝf`GQ#vx+:f`6 / \iM-^mMAj[˞v:UYSaJg]*._m e@JoS uiJ.8yL;do1zo4$ϒ]qz(\P-;ydXKѶ]9'^aa3)bɐ+XS 0gC9uY\Jۂ `iJkPNbԠX&&ex&A4A"SL(aj R[UFٚ\/0`GiV;s۳[je箳Hz>F'PA];9H_g9g e ^>eNl>~ GsᵅE)ʼnL8#m;6'n!Җg$Mo6ж*RvKr2(Uʔy*TlL~Qdkꎚ[hi*Y%-vvDPr; :n_ z0K|ԀL8 3&7it g4νcɸo//?sy5Di$}/йoO䢐2]A p}"_ЎB]'6V=`K meW,lۢ!+@_a ,O qZx)Ûs#\\}D  #$U$ӓGVl@4 8,D,@A?AӸqu5bFY"b%ˇ7ۧ~aAId6P:wYjMx0<} ]8UL9@ m2HD{YqP `7|ӖKx"ְ^g9%zJFJ(*A2s‡_Q'2#3)Ġhxk4"\Uq.S-{d8 k#cC\tNS B{m6P"oX9ˋ Z1CEp閁(5 psĉ+ꗦbkwg6E 5\`gZNA;F k ~@G$fdTln*t yŒ9fޖw D%iH_cLX_LDp.=$r4d8e\`i<_IO m.Ӏ+Sf^}f?#a;G͔(b M^hS-`?gj-~GG^,.X4oN qL c^y%Q-zJeY#Q͌\5&~{.fVaY͆H{ "N#{Ɋ5Zc j~f(ؔ,)/oHg8s>ujpo= cC}i3O`VS^ݹw4t:C2t8ܶ.icfpRiuTh;É qx Zp>p5֣!+]YFCFw7<3nD) U:.CuWqg1nm/JQU/gW"fWXe"Q,IC.e ?kǕ0w VeѾq6e̡9WQf{DˢPi9"] f!JyqZAaL{w h)~]V "atF %*Ƹd{Ċ)\ͱ%M HlR9ls'`X@Ku|ٛeM4쪀] (yLjuY:5Y9NYu7)J{Eu e?fݳ 4l9e R\rBqdIb \LX7jG-QEgjN?s@}!%fjHon>}^q:,,tOs\I.ד>#ys˥G`=ZDL)w6vDDo!gp@&hәLOC+a6S~z^7j>^ƽH]ϊV.;yLSߎOxf7o$ďn.hSnE+}PxJ؄u+:ǖ=A*Z֒k]6? ?weBS~sLЊsV'VsZD|J1ч[p^TgًCql!")TPĜ-(rSeK(R]g \P{>V%-r昱b }Ex@l:TtXZc6ݓgT!v}XZXMIeb3/bcxY̮ƨHa zIrߓ9eFTآ{Hi>m`Æ# 2mN5/MF'TadQG IoYgoD؄WĽDn3gVwohre~) .)rBwnI Ir}1.bzU5@&~̛6+ jP hi@5 {1/=rJFC/?:S%e扻0KMWK' \;~6OH T$4w2"uh|V<2R̹?}Q͚\Pp=kiB"jN=HnP1&]Q5ڵ Fa've3_p&bRg<.+^Yc ٕ@'qQ)NRW˃!\db1* :E>ioWr rHu6_t B3`+F-)(1~p*ߑG-msh\j;Gs@E  W>l]:΅j>E~%YS&UiUM1Q٢[P3*Q^n>TJ'e9t$ y@()g=iyqaWrb]Z v~~QQq&Z5+Nã~)͝DU,SL Oti{Kr֋i)X El;1e8O^h u.tz<~Gw'7۟Ux֦$k^=9sfsnjTً=[s<6s3Ck>+3MI=4[#NaVz$ı >p{9"vEI EU| wcG﫿[}L]! ~>jZ/ N5ik4Ͳ!koGV>q15c{;4S&G *Ǎ FxC`eAX֐ \ìu6Т2ȣ8Kxe v'*Jf^Z 9=%$5GY k|?.da*I,D48UI pLwg\F!Lv=O=C:;?p'.H?-kVLf[uqߩz:ƹCzE`^U!KthJ˓{b-KU5U ?w3⾹t7P%k?pG{T}GLUDXdM֗'?mQ_>qk1Fq ㌿q.DF%#OYMUKqmDGgf$׏MF9bendstream endobj 334 0 obj << /Filter /FlateDecode /Length 3443 >> stream xZݏ) (*N,&C:[;_"$މDuoCgfw]J&pp{7{7K>KJgW8ݝV7W!Sp'1᳛+  r'F-n.$F8OT˜m7t* Շ.BJ fOM@[xQ"H$e(ȳ$Hubf~ %dR'E![ײ/?/C,J}s-%)sO ʾnç#$\<ᅘ-L2r{Sʾ)˂6eӪrz_U[_4]/qﯳǔnrFi(4* QoEOx@k, X_||M{lǾZdžy/w%,^U+wwժ˹JS/ 6we_^;ɋkC<Q/8^~){I,We^U{0rbI%) . %252H\{ )Hȉ?e;@@xra2S^H|W\TqўjSo!bz2S!h0LLLEb52D<2??4у"",%Չ g#CDP|QD'`N|=䇵ћibj(|;~p ?nΓ!'vTeW*0A2ςN\< Ҽ~TrhCu=U6Qxɱ 7Ho>wXqvWEqv8*`PuUU+8z|LrQpopB빹 jmKAl=F&-b*|Ac;A nBl{T(M{3{2DɁhTA=bLd+ŐauhKJ]@!#d^e$Xxh1h@px E !bVn+P y)mz鬟)rG!A2!$j*~C+VӒ4ZVXA"-i G,I,-O#Jg&|kĜA6RԻ+tm☋16u6q {-A4x} +1{| ,Qu-Gwx*!΅sFF<QmޔMlKg%c.'+LmTOϻj(SI茂޿m#2k9r!UQ{bN-.V}+_cvӮ=.0vE\LU([+kj}/e@_'(mbAkӴi16*]јq0ڧZelJ?v/6ڋ$`j&*owJ  !o&i =9WjH嫆?ꓠ{ rmcPmq[,2S.v`k;嶎c 5Rr F(4ݤbU7Pij j6* ]`> EDv˶f:= u`Lr*+FΡ/n+=.F&\jDJf;s'9.!$'_mgW6PPia_ GV)p2*Li*®qq)L+2>v$k<A/oo]{ HEO90f؛n;ባ~`3_d}gЬ*ii 0/]HsTl="0TAR'9H3uUxN; ojT3J CI9=KTXZP{g>Ѵ;\nF,9~Nͱc9Vhdt"Nƻ&MŽ31ey}y 6:k{l;ukk%P0r|6ΐz? E< ;;˯ qh;[*>/%d'aCG .[ݒ 5OK!Ŋcж$|\׿%"8,ڀ k9 0M5!Lk!!yX$ v;;>X4XruidGl$|t _.Ijk9ao S':C'UW^;o P?Eh2uMuP|jԛsP@kˀh1XSϠQ#HP * BS#dzTdʡ-Ò j-{vgd]PqY (]& 4! ܵNˋ8 dn?rOv7u = C-,nW5sI3I@uçI_6(#>Ay RwqhrJG`hꐡY>2v7l&0r^B&HnGCzm "2WQ'V4$eGM0W@%>zPYpsal1 (2qkg#E = ҔN? Fd!8d$#O- {3-Xag6k;@pXo?{4utP*vހ98q|gٗN(*k@CL Z]통mvt-Chl,0)}R?TfBsi͇/,Z Em! ~ӷrמ<@~b} jҸ%LXq#*S(Y:k[8sM#-=UI$6Yk0]ھ; XJIC|Qy?۝_Gt&c%=PC|mxeCKp"`zQ㉓t=F1{>u^jdnuV(?C~as^]h- X7OWTRb2>N%唰a#YWLz8LOLle ;м[갢&+_lXAgACjGu,Kk`Gr7M's^ƀmIx %?FI hCsČce*EBo"@#!5 m?{hAۜ7)rP Oowyaخgt p hXylwg@φ1ئ*׷̞ h6>]endstream endobj 335 0 obj << /Filter /FlateDecode /Length 51389 >> stream xKϷ[rEH BBB n~z(9HRN۱;c]|&  0ALUUWZOp{U֟}MkOٗ,_hgc}ѷ䯥zc~e}w?Gu洿ÿgm~?Oz;;k?+qxEeQǹїr5?*};_SH㻟VǿOkK?_?eozKw~/~Ͽ/~_^+|_?'%x ? h煿|~ym&lc״!M^2{[_[m~zV_}oߔ;Kk܄Nǔ%/~znn_W&cvk{$%3;?a[n~|N[_s,ۯ_g_Y׻q ?8Ͽ2U)k_})5UR(0r^~ZgK\c3ҏ*:R>hۦ#H5\~m_lRvX|׾[xsأ#|>+,~F:[M =|>כ&4k|d}n>~ϮǞcN>}yγ32? >TOJx*#?Oe3%>?kUxw9OC,\|gzTIq^021Gû;O>iw|GIag.|M@f?2 +| FdB3is124YY56j+Yj&,k>_Xc_CY-S권3Y$3O[L܉ <|lءl6>+` %Fܶbom`Ҙngak7so^-HV\ʢhrӋ֌jaHt[>h]jo -%%qGEt9qFrS`]-usx[֭sc'vm69`%=L?o/ianEXe:Țsӥx,\Օzls4ynau;2|9sq9,m$F7693OF50ȹ:74,p_0Ș;yk0 AƤ>{LK/(z~3:IYO^gva]e.`x>o@+/H.||L2&eb곏n` L>xpFZT]g.PgUĔQUaUcJ BgDjw֑-p({u{VYHgN&!r:Μ熥zt3A{ꀬk䏳&yP\O<밉}#Ow><Ͽ[^O V+J+@:>wp0L<\ u b!ڈq<]U /- ءq:&~}QCx*{$G:wVn^^D$<,]q_ǝw泞z h>$mY+{59$x Vxɹ)Τ`saLWsL{31>0J?7131-#v#31_; y3BY b;1?zI2T0V;z|nFZ%f sC]0AiXsp)\ѥ' Eϼ&[BܸxC-DJ=ʜ= `]CWhuL&#R;BSBԾ*7.N3b*ZuWemB;EN&kW؟h(Sչ 5wC4 3dDxg,qS%bp.̥ B*2)\0"t9H.hd&\;qneɊZ]$Guf **~젲;TT}[&HONY!Tql 4K!\5#Ϥq !=9'S)dY A/8t99 B+C.6 `:ȏUUV@!]؍CrUל MCqÁrgɁ)i(1!G ,ΑAǒ8TB\LآS'yYuAsf3be-`1Q4p1z,Y1~-6O r_^]־h#~zPT<ދ7Íf !~4$` ~“oL"y(# lpc,8p VObH1mBt08vFĔm r8* !ZDFh 1цC0r9mhLϞ3#ɐIcfqن|v$,s1 ilyhD\"GșӘ'h|& 돓DgLp  E 4rj[g5p~k2Z4x<fpp%y0Ow#L|Ռi&͜f 2;o:D݌8sr&x|4*h挌ffNJ<.i= wHefE*L388 8^m,Q_d&q2@'21e꘥(Q8u3zgdH-rd 81F+P%?a]- @;Z+AlF|\n+%IP\ϩX`la!ˁI{]#@xo)LC!9[qH0C,`HYc bQpGpTk`MCƒ4\4!9i*cvx (." CC@cuiɫQ rHsnmeLx=niR6H0Nj֕M?98D.Urqx;##mF&'u,3$#=1BX뉡Yh1n˟<#u0f_AgHfpсˈ=#v݅FAu zͩ@ݹ! t3I9>8gI19֋BIy1r%6# aDf1S\8:ML􅣎(7S=q sZ&pOfj;\Ա#-4"$K<8]:ʃO؂k=SGdF|:!_>Rțp,a #q4BS^sc+K_br.rW]wtG>;:Fh n` s ؗ (..>*٬QwWqtm1GI4|zqs;y] YtT1oFU#{b1cVćGQtYC -JW dH`v@qt"#F&# d _Ġp&OT2t2R=0ӧ(?ڱQڗV?#6 %"^+_ڞtIҧbP.ij,B,>hPލ=Df lLIIFg~ V,CE3^PxL1zF,/@,-(B-j!C^$h{Fo"9fJЩ愛k ٩T4E25Ʃئ}aWlS5LJ:D 8+Bcٞt:cЁ#S*^.tOh%Rlo@Q&Q#JqHS2&/J-_EPI̸-sɞqNh.Ǎ:a9PxH9K^2Ã='FwDá|ED 1b9Ϲ$k.!YHO2ME88@ݺkeUC Crtܷa ;-h1JCd` uVCap^>"7c%mS:ce+JfGr,|*rhHU/j>A{KT$y .Q))ek*y `ن(mX$4E`''Ou1R#H);ȆlbMˆ#1]S4vYh""4v=ݞPIwhyfFAIuC#F<K S# vWάEΑ +88|n_`gElgAH펃cIQbeVtFDĬ$xd_r4JM3!#tL~QLo PTD>XImX-fwIQ`JJ<)y+?`E.ժI9k5IӴl5B,Br!a㒚nu)h6/ds K|..b@򲚓lJpmAmqlp֕-8jC[=tj]'L+%l됐QXg[@x4,/n%"/ȭ.֚#[+؈k#rk[啃j-_xеM]+H쉇ڭ^ H5ѵnno]"ɩ؊`e`k^YMr퓥7©#VR+#^l=-bKԍjRɽܻj &-#c> # hAd-LgRrmss^\cJ=3⹝A őXX"E33-,@ Gh릛%4 ˆVyLth܍vDŖ0m\@<^<[E D zb`5=.0/ Q]`ކ0_`P껈6QMђ ?[4x!0cdk 1bް'>Lj#&uPB.B;}Ryýq>#ou'h&S# hEW`(TR/5k`G<a9P] @5Rdh% 1{k6KB5"DhzV61i2Ę\tJnyxYKRaX '2 OHO M #:@uOFuz^1jr'K"wįfFo8[ |d3h3 ZLĬ9u}9ypr1z!3EjFGj<AڭXXSvU (""SW@Ź$1>A6$tAvQd)32)}@uiJĥ8ue& gcd>6{Ozh]W]waPQ(͇͇oRxXo2R3»è{Dl{ m`%I4;S8&3{'8:3QDvl>A%>m8*^T_9G+{`M.;vsl hůV -yK_wDuۃ*V$7]N[.lj41:֞(E1Q`m]4˶T .#6JrohN>r`u,¹4ȅk7!n5Jz™?󐶣? Aby$rWF$5+2(j`~: 0G4 *4 ؀G:@PЉ@kر_еTPg{Bmgd>ecXu?`&_].2'"* ~$؅A] iJU99l #[+į.HU.H 0-9\!tAMAN2k= h lW$([E$(%A*EFtU.h8ݨZ~AT5ɲ؊'odQ(A2. ԛELoW Ha~^4܃* dl#_@F`G`ʀ} P2|2FQc&#+LohZ3#dDFIc1YC ]4\R8)Afysr (-FeOyS5+Pq;T:Ta'hnWӾ"`W8uYG8߹ ي߂@馵~J+9Fc%Ǡt13*(9Puk"d ]1Yc2A%+1yc1%bEKEc_\i7Eѝ-r>̾4炾 4R[|*m=[EQMRhb5qzJ ϊl7]zkYT.SRxT"DŽ+)ԆnMWrŨ,3++ oAQ059ķ(s?qE1|7 X~:d}]P4oAcE Sx ثgsra H}Dӥ |%HHjLM` =r GNH|1 I#'ZfHꉋ~1.(䂟~-?GSI=(t :|"Y=/ۦ&0& SLMuʣxdv9Q('xh~PCSO[6[SzAvlYP3Dr],vC1z{m;!!i?YU3/V~vK^feET>5 &ЫfjevEHZY 12 儎'N- +ѫը^Bz8S oNߋ%=)^B[oK;XK63i2r͛ΐgY7IϚd#k59ٟd#oqMwGqCy7Y%)"?z 2eP%rcκk Tf1a>'d>)CWK>d% m~k%*X -;+'$}Ly\֎H:fx$џ kEBgJST8H?EtOjrN:ȭ\-v좧%&m(n$$xFFқ%&b6Ii ;!q l6/ ϳ/hk˷u5BGJo ,I]z9[^G -ICj'o">$A/i!ol']d2P[&^/t7_,߆H:6Q4߼EG7o& ug:]1ݺ^.H X/+~d ݱ3jZ v [ <>5zM'%cnz~m.{>-4&@Mtt?5ٽYr R{ zٯ5}򐙮=)=y0[,Lpoz7m0իܡ/*Lpo,ewןxV!8;l-B6a|r2"_qp {bb8Bs{״oQ֎d5nSǐVv;A8ʃoGNЁ5E2jGqcҪ cm0fEލ1bnԎIv.+1j%wA.>=-9{aO:RY[rc1vQ;Frv O y@1N:r9b-1M gwKEth鈎{AjgZ"'W^td[x^ "{g{jKtou2_ǧ'/G(r[cvz>}ڻ&#6ٵTv=H@A[3RA~`~=UaؾYT+=/>#|6n t6 ,[(-E 3m :M"s#;c$윖Vn^v6NVE{_S13Lo䎳uke皴uegbfQmvAgL~Ñ/nGg`љޛ$`@ni3N^jqh3T.F68ݔ?ssNdjշptrq-EpY%$ŨX#RkІHn#&Uz2\~&ta,NF`6 o##Òq‘l|IUlhޘ.531Ѽ(#3Ᵽy/^;ih>D|&/ƕ"͏16t6ߞ60k27srG_9_řD%;8sҸhޒl8Y_g,yglPG(;J;,6x>- lZ]ԧKy}VD^Onvh:?5On v%~X^>-5;Δ XZʩ21OIT&?/{/ \Ŀ2aAxC##cD+1~dk 5HZ M.uƣzn:s]y}O^UlSwjf?A4@]V^j;+Gðh pj 6i|ZGu1Tt36`Gv(Th#p8_&o*DI)/W+K{{W֕uQX? $x=[F?vQ٩UFt &=gvY@3?`Jod(fY_J͎Ccc3xrB7(fb5#󌃩jm&_v`W##rRDTs7[~)>YlMk;s^x o `m'd}G,i6ub4?SG;Yُ̲ݡ'&/B>0$Ғ',Afjtb;.t9޼kLD E6dgW&/EgS@҆% p3;/;7#3{WL]f"M qRA<5yi)MII1wsuH&Okq}Q ^Jvh~l.m;A; G##O6/QO(8>@ᮅǍqu4jhR{k&#\ W0^oNtN];Zucv|-`;0ʈ!d]:Ĩk׋ICD>=5O}}A6;c+{]SCG{X^&C{,=V›+)пTǜMy MKf;5U6.qݏ|mKe(M_⮒3!Ƴtz$I#}|ARu7!{IH<}a{>cDkvl8=ҧ 6QFHLeOX-U;/vyeLju9[f%| ץC糯PQҼPM\#R> [[ӗ]hB׷r%P:~u:UiKM;Xv^gG3y8wpl M^zF&/ah:;dS̆rl m.--y}}=#ô؀Dz"Cu~,b{c|͆pbv\"'cν[9!W1ޭ0'd9%W& y kc<a~Y)OJ8/Qj}hq.}xy`w 5wM`;O:HXzjW1ԱT46JmxTպo;ތϔl*1axwcu5%uN曎- ?)շ7$^=| TSIl$>ϊؚY{o~47Y}ZJeud[ uY}fKVSKѱRVY}+ /Vj:Ķ3i%s\|>{H\|hN|3w$>OS#4Yz8;hu'-MpAؽMf]<(8gDL@l{}i)=iޛKSOMoVs6[G1A{yi>\f Q*׆ x~KzP4̈|$Doi|Pq`ML@H{b`yrN mt]t.0R C?x~҇eB?'ź2ҲS.Gk׈Z8$"HuˏJ"-+:Liͧ\ܼⴖk0dMZWx&nVO:"FG 'O px,?{Axƙ a8cZg2I03֚j! ;k^o uYG5:/Y8i1A<0b?/Xb/z-w #1Bv~v1ɝĬ+Kfad<{'䴣fa9KnmKC hz,PxcW}9!s# 7Xo ST!:i+7Hix|:BXO)2tz(FO8&%\ONhL&0nEɎips j ɍ7SA]n۫I PϹc9+'"[,gbOq54ܰAN-n |OB_F|੬|^ÔNv{97˲> $}Cj̉ s&3G0ӣ؊sf3 %ێ ˨_|s$H#|r~N =y~;ڰYz#h|D(5z>]~G4zzCSlո}ݿpb>=~=}G |Zk<wӞػ1XN? hy1O;6ln u΀ϹRWp3cߚ9;bgLEcc_GrG9j=[ͩU@Yѥn@mAUfni:#mnM6c} +=&csK_#m1驱`i*K42=}Z?]%@УhM2}~IΒM2N,lH&YEldLrI.Zs-2 "^F?Ɍ %Dfa%]=]`KɇgMTkLv&| ݜNG\ ?Mƞ1(P FْS Cv+<&Oִ2I$@fa5 nKD%|bct (01T^ǘ- vR\27Vmb= v"z Dot:\'FvZ8ٯ'x#q[hV{yjh "Fӌ0S4K8Z|-]نOf6r_=`Œ&QܑkSSw-ckb/.Q{"rG7Y!]{54i !JĞ|ˆ$MWA7{=YzƵ{f}鵄%^$]Mh.ceC4?Њ|ݣF[ZaʼvP>s{n`&}v\>s36B5rfKgfuW4앮}533| rv~MC66wynRO@|` o?BjhZ4Zae;Mr XiѾW+%2zSף‚12W%Jҳy XX#ڽ;BnzG4Fs*]kD;9}9+.b7BinxhеYhhR=8\Sd<~<8YV}-FidZ"Dy-kwh7[k5\tSԲD cW+#ZãDt:\]3tZ]ْ`t67< f{'3 {쯜aD EFIv+ca/f"2AěYƙ #E#ȟpZծK "KyeG;52!蚪 FeOÆ2"^.CJaݛj&Z=M╾ wkGfcvhXw2aJ73aC w%)m˼SԴ$ƥn֩pH ~L K\ڇyzѰzXUº6%]zr@qKv{;iP1nifOXP;VA #) @oT i|.(jVӓIco^-"Wȟesgw?oG}ťdܕd+f{֍DXYz@\G=hH"[HB6ISkº!qaa "k c).!l(9NvC(+.Z`h0zB_BhwXdXim#VauLjV%4殐Dli\d'&\HDBsa1hێ[|>Y|c;Mn-=#݋_ 2Z -D<#r]G_6؎%_3itɞLlQ?9_W#zAjϖ*a|E@:3.~c|7*_Gma Ge>@;?>|_H3;)m?_PQگG+*}F4* ~0Gw_uGN&)>,`2wg왾[&ք0g}27D[h4#A'_Ju)_O~>ꇈE4bϣFU|/?G75ow/??߾o?__|~\xo/_o_W}o?o_/_?WrV?oY͔VwNyL /:~ %zao &ߩ-z\3PYDJMd/aMRTQHѪb7i )m,c! H.u4zFc iM.{S )+|wm(zچPOɰ.-ȱlhXqaIoCĕ?16ԎXE,.0޳a T<#vaZ^ƃ4gk<28$cjjIw*ULmjףMUZK%4;o 4UzP^2ɃnEm2GP *03oA4BT^ .&Bs)D'2"Bvp)) F2HF %CEĐ(1D:3"_lYsgdP.D6SCPf$]ǎS#Guȉ䂆a"BH6SGftũzy2FrSG@lHʡU [iÎ r::CG0qWeF\WqUgn!טH5\"j覤0[HU2PEn 3Fd$Ԇ]~(]qEjƀ%5;b}TëD1Ue'0U9 4#"[Њ3f<Fް*L"V9bf g fMg/聣ֶ%qFQjm;cQ?i3߳wkM2_xʟcH٭0}9o 1HŎϮQKFz<g.MCR[,E2d)qKtAQ pHߡQϤG`e~#4ph@Pa.T䦩j=i0h'!̇s^$Q K? Dm!q!߸H\ ۤ#6K2{M_H֊BHgO 0e%GelD87]Iڵ'4hĻG@.uMo} /A͐HMP juI}'v/{ YCء։Yq2Q`3wAAt0Z2Եuow)#;ơ6kPA+Hliլ֚bov9_g+o9:IulӀW&A7 XK"=,8ج3fq3: S/N:Fjڰ!LӖnX5' K6Gܝ7@[:pup#慿Rch{24s"aH2ct ,V RT9o?J6qҐƼ;L+J_L=* vP/+BӀ$fb 26k[PjWF.֮_Xk]%:5F|+m=i?\:S'mt7y3cՑUDVrx|/esi,RϊGWTpOX3NҪaftuk@ a;]nWC<*my5jExC'@ ԝہ[K-[ϲKw<ϝW53 NZ/NdX،|<3:փG(/=aNryf&)gLRulCwyzNzo3"=ЄZ^y#-Ǿ@}s$Ϛ-y󓝟w'H>onHFIxHtC [#s'@Q5.|RAȚ/h$'Y MI -=4uVl,"?/fag-kq祧C^zovbZ3:zaGhX ѯ-tI$s`~2bG:;Be|$h]^. ۡ9tT/AW\jo_^$qbֆ@|q3$@;;@#H$z0$Y{Öxm! TWƑiۤ)<3C෍\oXZ. gdꓗSad[eC3nӊ~ vo\hw 5qbLEsb_7|t"k {5qbœQ ?1p p"LI8DohrןKN`N6/0D3.g):;1l(\7,dNIJp7j/IΫkeP֕u{ X}w'K\ϭ/ϭ݈.ϥʇ:cǼ.gڈ'ڽ=ExxKM/93SXƕG|~o}I#/Xs yN6 y1i:JNvlSrYZ}fqڼzȫ'5db, n^A:8I$+WOk''LrbN)'ez0`*a%qGS׳+5iW8+@7(\Wds!sQمÚuN 1Oݰ+ o֯پʅĭ0В lr'^qWZhZ1ڌUq dpu{T4YgT Hy e?c`{0 "2P7LJJqOq^W7T]݀k2vfaԦ]]B!cujp]8Co Q-] {t:9#ws0L7@xP}6 Aؘn@Lf?wpmRtNkm%uΎ3˹q=T /#Ä ŏ hHK;W# V?‡!7 +lvD(ܯʰQf(1Tq3 .|tk+Bzs\3DW$iITYjQ5̚U#3js; We+jx+XGw i;wKW㱿wpNP̩Hi~:ȤoY;"\u5c: .NTN>E6s Z#<u1J]D&𲥑=5wRhdP H|"m5^v̸> uCrٝT9gE~`ٛIGfE4% $xxpNJh#=mtj:ziĐ@EkeH}n :q+uF.mC X^uPj!*]P/S\~Э;G;|K!tSHSDw؈#0f͚jO,F" 4$1T;Ik68Gk6ˎUvKtۓE.>Q)鵢>89nsQFS<ǫ]TVvIU.sy($}GIdz\ B<w;/ugm[WKM%/|D聥K«nRQWe+kuOW8mmwU^aWzvEPSvC}WӚ2LfWJs)mt;Łj9fj8*!Wwk2tA-'ُнi}I\pQyދ/Eƥܱ[~WԀY@ΧIvek4XB|Yx=f${ܒ5r#ʮJٓSN $nC@B+2{7ZBHY8رBmpdXq [ȡr+i#Ю oY'È1/*8ڥ[[3JZj*&ޚtCyTom:tsQ !zOSE.&ԨMgʭ1uDNV)\9Әpk<.\pövҏ\65wgYnͫzr_tMr,Ea+{Fqg),rr(A:e *sл"cX9 %LkBn" ͷȜMy.2z0lkv()XS -`Q[d.wȄ@:sF)/V书Xr]f߷̞l2SܛK+N co5-J̮%>J/:2Ui-err"w(]p99=ٖr# (S!$H2r^9ҙш$?#p90LPU؞J(8#j2dIZmXhڱ0yA, K֮&e{lH-`V#cw<Envhyc'7jbǾE8 ʪe}.yj6d+D~Du \__L? +ARgS~oYKZ͇¥D1dK$r8v-)k<V*=&zGV1t,㢚읻ҭ؇MPt`!ƹRX%  et tr(ݕ[QDSUmJYI;-+zmZ>WJr?Ё[_)*Y_\썉sz;i3XRj!5& -v{D|XLT' V)9NPה{Tf/Æ\',y3ׇ5g3IV uTZuJ:\ؠyPl+^[}UqӼ6AVVjV *՞j/c~jS:+P9:t3o\QJkSJSRyVYS%YE]zFW8gvyڠܖn]9x*ke&UWSW_xD鹨ti\)N][ Q |7v֐ƽBNFbBFڏdKaG7:{8 V'Kwx0!z`v#m=b6-j(Ėf()zpuĮa.sePO WV' \O{ wp]mFVS{_- brR@b-pc mpx֨=].z`Yx4n)akG V(wAsև=T@ob[bu+z5& 8\; #vRu͵iw^;:RZrt[)[ 6a8H\&^Mgz`z4U]fO4޽cdSǻޅ;K =htfո v׻i)w촮?55iMgrwiꌷշ]l86:]R|o\wso-ǷEIT;S Bro*֘EK{dg7K Upxkq.yY}c.9#IU/Xjm6M uDES^HZWǥskTIKG .[fOָckW,%-c]\ua6;[\C:ToqmvԜZ u-]X8q+k5-,؞;GMڼl+bh;Kծc[Y).֩l+뒴r+R􍷲K9:UGm_}GH VH:d x(M򃰶-^;_]݇4kfC\f-*o6F#[N~L_EG\26Fkw3!aRS7՚įKvV{0{8STٔhu֝+vZLQ}.GՌj!7렭פ:Uϖ9[_ I/lW022kMCc`r}br'KZX!J5s5vu.G,~3z#fzݴ: rƹe}QX1rPp(P&9gQRCHy-Gg'n6QlJjLLDJ[FKBiz^za:p$C.^7+\#'NyѓbBK%})Q(|uNu⼵===Ң)x;LN#{ɡ4%u)9U09;{B'1F9]wzPn X;#HG cV C66${/oҥ:m(%+G*>1U0,K5ݿvv:$*|Bs/;v@.7)Hto-U l%H)v 1\CEk%[&՚ _-G.h<5ֶRg ljփPg"H.jk=B%^*VeLڼ1v%޳zaeǎ(7@^909Ƹ튼Zy1Q!ͯ!8զ&t`yb##?ܥyTe; o"pi^=;xښʜs[.2L.Q|v|LÚ7͐UWojfɚ4q5?E 4<1ci<3i{(vq^&w);fH?]x߷Iۦ֧[Ẅhfjϭc/5);fv5Ӝv@?]e~5#B .% ҵPC$2quG(e`Eُ79A>痁c]=A;AݟuZro'ٍBBF EvwʹfBɬݦMzDo7%2/)Qm| 6p}KC 쮺w0HafGN0}"RXl$1kTKq!dPs0]mT"vp1mht|9̹.||ޱ/Z|5d-ܨw;jӰ~*; ;6d֚8l@bEd "t 7%8|>d9}p>YbQ]dxݦ.W'6\xT*#~d[N8qDy]{ mv(@vPdMW6#])%jO|І4s t }S֧UzgS}4GArQI6H7+MVP0wfgauG=!␤2<= A 5)-xfb4*85A E=v.=B܌|ki~1t+{D.Q¤h X(3e 2*2۽ @͐ dbEMd4`H&hMR4Dsf2-=2ɴз H` J^51 Ljy/4wvޭ1y /Mۼ dc88pz+0)ˑ,Dh?I \q.jRw璘=T ;bsPS) O&s NF%0ڃùPn;qSi=tq iV$k}W9J}==1p7ӝ6银;}q9I"z"AxbD.br9@M.D;σ!Ѡj4h,;(_јJVVFn끝C17.rB#M}z>=i҈u!4S;@ t캅\epY\g!%uYCChz5PxhUP 4Ln6D< ?z9ؐ|F&AFl0b (}6̴ȣKc8D>.'6Y; rb/Z-9). 2vѺ !G4 )*,Ц_p:ThFiϷ:hnEMDA%n|DA$6 蚏pɑMiYM9*" "iV)b2M/ 9?) zbYIK0J4$ZBRr}"\Z.zy82s@쇐YflCD雏M97C4̇aVD-(TFS$A4Iʥ>#{U%bE$UhCd?:KLl5=Р,1iKHե}~#Ҡ 3Рbi$f;ɚ)V4.]}ӛv.Z%i uЯ' ,h #Fl {F :q6MVys>!n;!S!Q>g,OҞ1Ss& mʺS'빻ScEבԩZD<'77uqYN8,`A4 svꍝuet4-pƤm`Lp0#k7c*o]^gkHi ߳_2ci-76ʄnOSq'd27@.NL r_Q-#L2푾GJ,4!do4nR71Tb+41ӕ_Z2+ot6&er`v+dd "L2B2c.u6TX;P tԦn uD&k\?!Brk=x L3V61n0 ؤAr:=ZGl99/H #(ˆe|Z`rؾ1d0jdk0jyZ|FFa22{=#J =e$vTԲAS襔JTalB[e5Vڙ-:f7e(-ә-CN&OLUؘ9*b*1#pgs\83m%sS|A S2VPB'6hj*i1`{Ѿ!u-GMp381u9t=ۄQQ5JcQUȢz7fDfIHAŽҨ4e* !`Qp 6!9sFAd:}H9SxmFiU$)(4.-L3f+UsFbDY{Eעyi^RVwuE8Kb֍FFUiئ^DQa(Oԋ|O #I627B73#^ QfH,|9J@OL\S[&it. ܋xz;fɹTJ+ ܋t `WVE)5^׿^ɟ~s/JmFTo0*lbO2{GXFx_Q+m΅}\Ggps.HUZrjGvH?:Vd\)k-DNg]y@hoѲ+Z'YRtDej/'n6"`c iYzZXvoUdh=  ݢ%NZ.6Ok! "Qa9<4.i훸"mqG e0 ˁ$(X9vٵʳSb)#2(f1Zv6}jVf]ly :>e$>F(MqKMF L =2cG 90r*epT)2SG,YeU}1\r9sp\8.OWMc̿2R0^~demvA^FVá #W#W1 cH=HT|.3>EbV hPX-$_{L,рp }c ]o}&0] bdŖ/jJ;h(D>l02Ov;0L_諲)/`|UX;&_Y=2¤+H-1IFN"ח,0ZS!0_r=`aUF" +*ɟFV6YrM z89 +'A%S凫%t;{DDJ#&WDc,K~{%sXٱ# k1*>hٚh'I|:†+U;6 |-!]#>eXQ*5bf4^ƙ@ y2Ե~WURb"VzeGrZZr 60+ uA=fbNy3(̕h=23Afr!6՘j;cfP-Lmh^p)J'#BK0ZCmG5Rm\GVM"tvkTZ{ӴJu$hH[ik:vۑtR4G`Uw$hZ nޑj׷؜v:fiAqZ5[-:innP8m$`DuPa,?`stYL/91iZh .#NL.E,Di9j> H-m4p:i}1GhtN;!dqS9^hʙG1 <ѱPܐQƀQ?04 AhZVA-z:{B;rӻ)6C++!K "1=4Bџt^vZvN~͓1gY[4osTi9 r-/Y9 n7N2OA%G g`XmZ}Aw1t JP8~c:1״Nz2ƙosim,ηs ۝le+d];Ra}6y@A؇F֑B jq`c=B,XC bYI,kո'# F9se&P_G+;=8of>H1`Vc=B #k;s31#.bjYw PJs1_ǀzG()`%q ؼ9)` @?X["i@yd}]#-RN@6B׫u !ȓv?Zb͉rBxtj{Īā 0(|kQ띙q(;2ya`-fg*Ң<80cā+8q vPtD|09$D< p42B"`Pwk^‚X*t%;vk p,RBl;"K#%TWSJKmk)!=:1 RN<1edž>g {b,!;:lw\ 6U "pBJ=#"jX6fbD.&ّ^K~#l,PwتHAL{igf^Z -_/1 R-:  Bqn=@@0J= oͮ|j7̶=qcbvX2mza;0dC̦1;o:ly( E*>XGFim~ =$tg|6"$Dҡ[d)M`6ufm$`}Y2@G3-FF-iA93&KH$ "[S՞ u(Gnp#\h'$Bڶ< B1h:$$%WFcK֤)Ȧib76~ٸ^EGkT@TE$:&%,${W^b/"b>8]ݣ5DD@UL$p*6;>DnDK$C݀WҼ6|BeZGnH9urBRNݏ9>WY`@N=qAl7}}6@QrF#7y!`3+"@6.֛Kfjv:|8^_L1MobٙvIJ/z#J2’ sdVdy2b< sɟĔ=R91fDX!9`Ě|@Xz/ ~2Xvt`9xXJǤXNKt6ܤ/#NG_ulm E$~8SL]*$݊6LO3/VY`#귌`e ~ &0"4FL:`^4;+לivoN!#}䘦߇L "5ndsMG|6VZt'6̛digQSʱMOJyO?KxRĆ~3=N[Z?, \b{l$ws QF 9d&3ZQ{-zɾوĪ\ _*`@ʝUWgRPR2_p{$t M\av/8sm/6%즸~$\F~ce2%;\[-e\CoYe&*(p'DI wr2?tbye&kQf*lJ@8Lnwg,%6YG2%faG.ze̘c.eb+ަ Tg9L$@V#,(r fȝNՠ] W}ڈ)ۮ޾\ $X[JgwJМR"0ܹĪVe;%.X2 .,#չe錧"ٖ5GĦČTՙ炮qiBSU2KY#x}nJB9rf٭ՇSUCHaQP&RG U(`Bdj<*&r TB˚}=I4 *%؟FW֢P -UhKpT sz*v/[εm AJhm+0L j[Z༁)9fL9828dHRtndJh1T@ԁ[vcy4fMI2׫~)I&83Eh 3MoHqCS̒<1{S繸mJ&mҎm"5%^˗md3jJ9:L95u3@MnNU?)fe/a9mʦp-#%~nFJI?^ׁHr`uSf|#R%b+ $؄y9N6J[=E8,˓!%Ȅ#.nBj*jqi_ bRew.lhI4aNkP2m" ӺCi4%֬u"K(b\yx'qr8WmBsUg_Q9Y (%5ry]fkimk4 Tzx։@zKv`#v[ G{9) phƻ #xb7|BX‡DT'Yv:== Y! 1X@ՔeG#rwZ^ltF/ r8x:88ǐMRX/ˋ! NxsŔk@xDهXGH/RG^f9Q61P$p$_( _ZNklp7f:o6[1U C)'<&3.N.0G{(pO#iV{QJteJ?! d>dAd0p.+u80pȆzfey1 o%+v2_Mcmns䀘Ϩߢuț{s@ܛ. beƀ6Y|)s x% )s xrZJgV(#28lݤoLwyOQB˼]Eg3liQ6= 'e  #AQY) *Bzb0 3RXÀr1jH# /f:*L3T dS]gԮ|cMaYO)@b?s~5u4P ۫  ɀK%7T8 IB3vZpRA4 &]0Xޘn#{$[: ‚LkmTxaj=?V Znbۿ0* @)AXݯ^0#nHWdd rc cG% B? J@D݁J4[0@% %ͩ)dn5+D%(ڞ[gJPɥ(ˇوV,AVPh XB# +hɓѱIAg%0ryZ3Y3i 0ZP@Zg!D=|q 7FDI3-"aťx)jixYlFw!s(h;{U0ra۳k2c3$[;+Џz;}@rn OvI}<Ģ02:[V0/j^1 n;,J qL >ȁj5&0KcOWM̀{C`$^E%Et]L0m%GuFxWFn Fn4#FV&1jO3TclaK“R(\!uX`x1y`\t R Nrŭ} D9՜zdlDLύ)xCaTe*:ҁ9^3wme@fBaKH>`Djk]`&<“5g#OhY@Ô/Y*+r /,֒Gĕ$#-i_)1Ӽw̴}.Rqǜumkm3zlQ !FW.JK=jtmEi4A ;wMRpM $ld6egڈXQFim:k [/ t(ܧ F9AYE *uh̹vzߐ1Z]ISB7--)K{L%.)C̿RڒNp6m* pELmIvѷp-{HxѡZl;m:(gV#fPXc%3u2\jjAAs ]f!D qqNv5KaHPTy6`A%SE D+dW3kn6uo&1Hv-Q$v?1urc9a2Y=R}[ fQ&E fab:-fkNPTB1 6Ї ^yi{ ]Ɏ^>4>[Αn/kyu?l9}r~×s]u/= t;/{.0k5!цHф B빪m=hVo .\Qh}ىmMw0uj'DžGa+LDO*o+>+;opGe7_; p "Ibo06C ͎74*%c(/JwQ@G2 |ŎY'0 :4vF E2COQ{)a9<p]@F&JCaVW]wa@Z)wﰃ@L6"$ *.5 K:^?ד$6e6YȖ/f/2{![ҹEOvXBe^f dSy_y+)u괈~, FIW^Tk@xNGv5_p]W޽f{^UmU@vKǾLdž:i#RkerԒ,ze&zY=_A1K#-NZWKI; @VrW";)Kv^mqA8|\$bі ;F\n,$Qq#.زvsyXIKu43ň읊Kv`g5:c رʳWOk\ m-YN^ !"?zCK&ʑ7WӹrqҮ2Y:}'d!̾8RB.N#yjY&;fZJ$D^}a[}^q)Ҵ5ʘ`rU+V6ƥ ^`޴cX^5]qŬ랭Wi\_˞Nie4Fg/N`കW)e˃+zih'.g<ʱ:dk;ឳ:t0ꀙmF,Wŀs >ܟ[5.p,XX0 :/&ĀckbUbk>&z DoSy}k8jOEPDo3m|.;q7u6˩V yZ*y"l,Tȧo3N(cTIR];}4 mT=Z9qfO.70 cl 3{Off5x51WMQ ͥ(3/b KGJ(m8I{FBѺ߆a#])M֌* fD{ )D+i!zlv%9 4+\FUaB;/An| _HH;篱lg+N*zd=J'IE=(8qNO,{*KsliEϘATO/-Ess$k0d]) b3Hs˝7GST?N)e<3o:v?vʁ1"֞Sʥ˞Sj>`>X)5k=S H5i'@ؐvZgqRЧzO ER2sO %-q9] f;f *aG*A?rriDb)==a>)=;|< SEc[l,tET9& eƯ3ԽG6{ӊDWT=Wt5H3_:Ya? Tۅqb(lm^6  )V-%UDE;5팥 J|'|<4ccKG^F/r<мȰ^|>#t78ُC53n-K3!i_:38ҹ$ZG43~<4J=]4~^y&ӌ}lИb|t⥆ .JşulQ͵ e|x[G~T"@x;U>?g!#JM#h[kyjM}k뷖Bxkw>)sʾٴ0>lh}?fߚ~V$J6ug =ل[IE'{Rs!=ٲrRw`\x ڹ|fX } څÎdOgHKxp2V K32ճd.1d:{._|~ n?-<`¾ӟgO*Nma_\GG'|]xR>KVH7nxģFnb`NGY1E8F$t9Ж.ا/VòFDX?z(>貖!.G*Z<xH8u#[~2Vbp(g^)O ݮ hooxe"!̤w_@%_O^n=żȀw7dN*7~G>?=ϟy~?_>{?>w?7_||yK߿w ~g?k7_?_o__OuJ#Ӆ8EuS tXcg/X[N߾Wc\ߖî:w&?~֤m$)NG侵|pG"*%O}|>DD?obݱ>Vbz&|g/?{>1i̧;UڟzyV^s/mk uͧI@O߿yຮ_߼^~xџ>}F>>>ֆGnNCNo>^hy:YG rpKQYpF;B:ݟBlKLÿoR-7?{k)}8^e8/9GO_}oWg%?}[O㍾Ϸ>zys_4LUj?_SZ_p-g?߆Sޱ]<"hO}SiO|F}p;ȹ7=Û޼ P%?R/={MZO>긿^7s{#r}:^÷9że؁|a+sNxн ޹otYgo^}ۿ_r<1( Z:Sܷ·/_~_v?eO?3}߷}k?>}߼ۅF^>jR[tg&Msϐv7=(E[ 7xN{a_W7_}wyO^y؛{| 9/{ qyʅ,upq'_4'xh`+ŏWo=7tgu/2'Ky} m=goR*Ou_mţݳ{n~3.{ȣ=6_{O|eyԧ5߲/W^QU?6ZW>]\h߭w4x̊ܫac~7E-3O4tO;>tax?/Րmj~sK1=1i\Dž~4wO=ȶX kxK{=e>ro|F@/B/$CU%Q3T>ezGяGo~Jܭl8rnЏEO^p+a[:s8]bd޷}=_@+{y _g{)~ao±^d7Ae| ظ+k\+}Yc..gD[K~iSYt$ +rA1z9t/42=H!4D6n=yo^15(Q/|61 D/[W\"$j>[݇T?66h+Dϟ^%3&{dz}qր?'O_Yſ=/?퇱\|Cg^{c]Ǔi9!#u---$$q/=v\wx =?׃BqI꩖~iE} _"Jmb2ω&;|7]??Pſ{ͻ_v /TӤskY> Wu= _-]ps[_ywշ~/}~|/zT8ބp}W_?Ϲ?{E慜4"_X_u|Wu )WIߡS-Vũ7k="~5WVK'_~IO˙ֆ{ςendstream endobj 336 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 676 >> stream xuP]HSq}ܦΙ@nެ|P\(%~w6u:54sSfdhF/~A RГD->s9 IDUUT[6kQa~nZ"43"^%&6-JU( a \sy:hewO\ٻZFPXx1??VŖ&rV[YPUް9=oZX7e5lyڼhkonWgn[.;תZl4vG !$n8(x$A>JDušt1?=+*VZwL990ԁLاakx$, L%+]{۽u{޻˲iY)C|>π?eƅ^c5#O*w:;N vLBހ7hXبR <sA@P ==jGsF4 > ,'%Xob !W._#2S gI:K1/@{܃7gθQA.&&J FO4d -)8Xx(nωbu&Oy@?ñ|1 '.<?#I?endstream endobj 337 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 404 >> stream xcd`ab`ddM,,IL6 JM/I,f!CG_VY~'Y3u0w =G{&4fFJʢ#c]] iTध_^竧_TSHJHISOSIP v Vp HAb&FFk3y24ի۝tGw5Jg/O=9jJMw /۞H.zzբ+fn;gnZr6tGuU%tugtY7qβ jfńJwr|e ~8O6{\XBy8yW͚3gr?φYzzzLOendstream endobj 338 0 obj << /Filter /FlateDecode /Length 6986 >> stream x\[sFv~_XoFN*UuvԺRVj<@!9\ȟ$ns.@w EyK"w.ݿ^6l_fw\]z!7U)nV\OĥθUWoZmt6oV?WZ]5ucM[2~7WZkdjS}qJWWM#T^s2نLVۼ-pufkouů0O/mm]M;^'99Tm2nV0*Nȧ`&yUz\[\f9}jROL~SC|<3Fm]uL_l+EXNZ ĝ{]qcdS8+c\j7FUaB,C?9TUП :y]|Z1yB6o|'f(k']]˖kI] )QnT}i>45h ϟV:b[F8mi~ bCy;Q3a9+{wA-ɫRE qpC#5qំpZs_җ>۫Y?\Kqڪ0os2)B` C]"&[,sJ F2>~s _!ֻ:<$49oS.یNIQyT(1J0`m%4LK ZC ]uBdˍ52uR+ }چ; ozcs+Iv<@SܤܴpmXJ{&'3U䙪Cli)yIɛ9kBf #=u-3pq"Q*u~P# %m!T `!9nL@Y h|3,IcFcF"g ='4aIю- )" `WHZH4" Z7)dSC3ۀ ͕2JBK00vowvC݉/1_M>o'mfYx=Ff5Ι2_[e]b+ Z8 D^˧xnvLW]6>n.,ZǸDyKC Ac0HPaB4'˴zΩ &G$aX+  \;fu ͻ\IIbeGzu5ˁ1x'med/X7J1mՅh utw)l~OfO17ℬ|:ëWhyKfm­a뉕3q{ѳB_s:#%Lg;`9cTS5ŷwr^G p*)"sHka.9R۰ C tsâtvKh5/$o"_L+;B'Z9 Y8Q  ئ%!rraKA5ioµ?z5w G.3 :%%Z9lY09< OZ|–47Nɹpb^^-]d{ضw}Jm5 p2`4TynsH3@sj-G֌pT*4CGOD= DBғak=1;K!M-23Ddc (6#"ݡtR# w1W :{>ǠGA mt 7sʀU6bPxXA[@"l.qpKz(%r8q{ہ={ϹE?]:vUsk2a/EfY{T39kЄON200DPp &$$?,Z JxT:>nd L/zP iok~Qߠ@1\Er`wCUpF ZRA%NF$"Nև~szVeWBrBHΪєYv0cS(,DoӚ\|q"a +F5;%3kN c^1xDDIpiYCV]\v}^Ļ:39fzLY#dH GڋzV&澪Ozl3?@gQ DVL%~C?(TQȍh<-$wO>oWCne3.ćLDr\I98 l/ ᅩbF/30 CuI}| T}-%ϰxs 3KsRk=U\2rX"_K_ةE % C(Ъ@?˺nm4KUJN b,vP)p”O0I/ i\Ftd)O>7oO@r!3j1| &i efp8, o힪c9)3&86,P8\n=זr6K#X1f <#NfS{Ex}Xsx3kEjE{ˡFx6{rP Lq_g6#'\饟Xć}hCQc=dܞpQ`nZQC]$3ɒ3^'i&Փx(D#=9e5[9M1^G/ 8 `Uix,9-@,0+E1W!ـ\Ry-oi &-$-#!D9☧Ás I<")x؞HmPg 犕 (cwP/ph\)-Tkj1VfJ&e|nMj hD \VnLIUOa6't 8M]hʫdL1v Ĵ~~ٕN:-$Foc{@ZY N.8L x>Fj`]Y,Q]m @2u}iQ@S% rM`> ׇezz[S. 1425Q/]I?n2e4s'HKy RP&9Kڀ57>ؤ}ROy#qה9܌G*3y1̕a($MKJ73-Ac3)ekO+p96IC%hHOHND%|Fʹπq o?Q9ȌXzEIJ&Hs뱨naѿ̱.S>RTKbzzl73uڃmj3^,|HP%'fgЪF WM7I2KGcY-U`qUjN!)=VPo?)uiT[qJwYb bӶs?,)@sxXOS%|i:Gǘhſ7t sl[W[gOun Ӂ:7Na!(}b>0 gn)̨1'jrS39K~:OiRMCwr mF:",\ndTg6|G:AaRw_xS\`B1g3gru@3(q a8aD '25ïJ)jD!W%{. y bp f?Ec.1Ƿ ShDqwv֭Um<6SOlP \1vD(oA؇ E^d1b+c`j@)R?nlteIX:+?C}IJ}eM,m`0ܤ0eջHǽh*ӫxXז:s$G6D$[ylVcr srb)OYr9/@Z꧳/_ZXfV@.]֚*.FN@Wgf>ӢڠOӘ2sm-Z:  I.>U`IfFu ^۪ɓVl]YiGk&eXBp AhL b0t+F8zkI63H1 Nnyb=?6VoGA|wz(ǜ^ 6!?L-*o^Ń(&=.ʁ&\ĿʕD `b)>/3O*hZhF/[OTwgGrzj6gL񗰖t 2ĸǚzdGcj3Ƶ@ vv9K$m܈a 4] =J!D]7r<%*c*-O+k\n\FI{7OB@z8&>\ +:oi39ąI(-a| 52-iC< ~[K8@,I텎R>y`#pX:>2Ǹ/rȾ-FR"p**Tôꛮ7:ԁ@.Vb& Q66_w>yTB/Ftܶ 6`}=by{8H"_.jmPendstream endobj 339 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7199 >> stream xY \Le[ZnZkEuAEQQqw !$!@H}/V[[n %m}}U[k{w}w"Я{$oss5t0n[EĽ?_4$IoL>ZGU`^Ђ6~6:YMBEaT! Nw85; "@)8Q% q4Mx jdl!8O-4aMx kJax]9 G6n7򱹰|QC6dPJO,ZȰ-z"`ޥ}qE4.&J6Xᓮ1K}CS[gVP} aZg! |D̚\qlwv(_^p+ec?*juFJ[974V0m" "/`9-TW@M1$@ 0[ FPk?}nFo肨`Y_l-ZcAJdUD bXk-^^׻|p| Y&h}M%pHUZ]rLDþŀ]3+q?b৻+zɊ>c{ӦB\3+wya ;j4BQCfQs5iVE5 \n?ݎ;l r$[%bXjl*e wǎU\||ux#啾9ܳP,qW :#Gl@ӱTы^DiBЂ|m";aG`fзT4AH1A׭ΟڐVk8;\7T6YX3 F F?S %11'Vn^h߻ev hyMoEBO찅rbH 3A+p}ŋ گC%9}, L.t!Wn&fƷ cs%GÈvl7| t+_eAP_]iq~X_y{!lILmkk6!=򥽢,"⦞-#vIUӾ+=媍-֒.1Z"|Pؐ;/jqIxVM8[ Ҿ(Dѵkv׎Y~' 9[QՑCcťc+Ì\ -C]G/`ϊ%g%7Co`l\=cAUDK"!K?'M=q3MM7d삭򛌮L =M紲̘&0CIvY;Nͅw2yVt|׆lxƦ׏04vafRCEzzw_NY(u] Vc˻qa^l0}ޱm`صPz.XfWHJ{F eM )@V|1a~8]h-ÑO~\h!jsPF& =K 8jrLO.% לSHHRb\wݢ@W~gG2ޏ1Kyʽρ49 &VҌ]+`l\mމ)J2-RjJSkHVk``wXǧ̓⑟MЗ!W&$[WPgME^_*1ۋ}:ꐌݜ6QKйx8C,6Yj DWDU?mw`R?Qxndc@^` HI>W.8^ȰԺ#O=fѫb}{k+Jݒ2\(}4ˑy~ k饏"I͜kU(Z;ti_KHQ6#mnPIVHPo:L|z(>IԦ'oߢw|_5NҕͦrDA35;1l-j "!}(tݵO^Sp>&cŗmY^oDA}=ɬ1WJJ'̅yPdmO߫Zp n@c%wWlKJ6[mlhmv?_ϟ[>XBN}E:Qf2 Shv)-B&m#| o%k+<'6T6o%xgo0~|0?5|,x=ujoAH~b,-^Q)RmzSE5u(U(rV *:@B^,Wm0, uރ^%,mll7TT(Rmv$Bqih9c\@.rUNc3\#RC){b㿝4 ݄@'s<|$ԥO7]@ m[k3g&w|}EQ s e`2DB{;0ehL_h-z-<`lZSJ\DPO7rx>B)&(a/^r3S⌝>ɫ45uڼ32HoKх{~O"ţ%ĉd"/3#5e uU=g/?_bm¶ZXl0Ht{ٲ2@Ah{qs'Yò-^W^ iA9¼) k=?EQ\s)/ۼC A [P;,4,WcW1 EE-e;=b8ؔ8gf7g6ɰ#;H¯r V:[ vyK_h̠mh"zJwEuų"Aco<8^-&:j0 7 lKEw]*%NL`C{x?D@j1ZbC 7dxp%ތKrs6rs>6RtA#|9GcD@rb1x A=C~$3]hPe|]ڃ@NvbRt. .,k#bݝnqnGt](@x&LXx y3=*֗KDd=j.]Yڤ "y-y9KI6d#U\^!}7|] =C*6_-×ݹ EjД\'LxjoRgFrK_Ρo=ArwVTǰ6YVNS)&m+- 괆~i/숎#UG:9_䂳'FAGQoLp`%IDxծs @#ًRDՕr tUn> -NXNhS5av8'\B;;K.ބ<W/lh~LzG2?jMHUj}_cCNQEh"'pPMxBPx|`tnSf h"zq͚7]7${~q1(>CMJE !B1)Д7!'@.=†4)mqNDFJZ2+ +! d1q)!$@Fˇ\"@2_VFRJ_[k)+,AW]]1DѤZy$44}ViY:Tj2푄)AY;4Y2D:֋YchԆ! J'XC6KzQoA94R9mRf\ h+;CᕺdW Yxd4(-I&Bꃩ 2k, r؋!hh7ZDYMUL:]0zQ@) F&K}եƦΨ3ӉG=p샣_L: ;ӭSMN%6m`%?OLDrڒ$Ȁ`Ȍ&q=+RV$Wt` ӗ.Vnq#a-MNsM9O3vL:"N߼߃k2ts ~J_&%V^e!t,Vo.ݮї_[A")2?Kݼ2C)h_sEݨ '[͏I+Ї#H=6S R<*z Ζ:̜6ܠdҗ YM%jZ)HEd4d>+}ؼþ *5qҺDFȅu(D/Uu[c \anQ<`l<x뜩#cc"-$KHExxG*BeImE]ziTV*D4**rOMDR*D5ll|S\qq_,/)pk>b[_cI!'#={{$f~ӄ}쌲/ᄐ|]X#֏hOYM@lńOHJDV[:\"R(0>@D=&ʊ||,ߐO[F<7|Oa0rSXa0ٕ]m9cEQ> stream x\Yqsއƾ%MQ̓ICaa֪=jZ6kGX$#3kCyFőeU EuyÅo/ÿwWjlF\^].RjS*xUl]+s%]YFiUQb=F;WеZ)}mgYp:_aEJW$k[ZeaUW¬zZ\)a$%LYզ %Ej-`Ÿ_G!6 f.+iXY 8,V6׍'umq>q=hj'`vO5ChnEq7oo# aڦqgovi c7opط9S]HҊR:GGsͻ7NmؾņMy7I{zkn8W<֍ThW>q~3nG(8фvgcb N50 ^w~%nc,GLl3D;%GcaMqr BJ'omx|'Y|ꆇi`:YzMq1wkD ft^(sC4JGHa܇+>xb ?Xi O2<@zmgm+2p7yQPy)mF!⫴C-9;̏ݒQSnf" *]t߄=*Utw=S+Ӌ]˥{L4EE, D%upbtNkmkaKuYW65قGzێ=3m{?0p^l2N/V+0 Ǥ} g֌zπD N-c Kh~O>ÊRl4OPԉl[6Q> 3ߣ 76Hc :вH'\$pB\9T8lnwqZ%Lt.DtinAMci=+Hwjџ& T6.Ic25.vF d<#} &%_sЋ\0ѱP@TMly^94E"us9ke@ mɨvg#1#@h?~cYo@5H݆mOmgUg.`ǩJYK^ik`<(kIVk&I jgD[5e 5CY8_< q~K$Q;:aOq[mIw-4\@pQ1Tߑs@ts6jrP:zAD4ZH@͛Fa(ױ NF蠟|U]%늚:Y쀦";.kM-OmmbUчxc6 袰9$| NѣrdO~uPU]՜P,VdėZK8TRm/.uyi%08-Tg rJ83{(ZnxA.rT3e—$WYNײ4vdϹ5%rdLV7/M f llQ>Ƿ> EezM˅lq^QW7Lx RE_*]Օ$:B6ڇEX E% Mg&=K' S>ND.miL6vKZ51sj{"i)"$%n*?&9DPQ5ginq=wG ¿o1Srh^Bbye>JT'(/˜IDKl+d-|aeDR]L'iBfD̆; wqv#9fDG`mN@tM F$9orIl=JG,,\{*1#sJb1C<egb K I`(s_1c׳W0t* TiU8SطaV-=уѹ%hFVGmOuZؑ h`yS4:/-3,hbĮn =,S&;25u0~<C |!aIjާƗaY^4d3<Ц0 D toerG/Zd}͹r1tv aY9)y2[0pK%R:~!PKӞ)VBwHuAN:Xjw t~;b29EI˪eLOL\4Y,Ws’ cX#h aэGe/Qr5ibUdzue6>`>.eѯ, PyqZK!dsp[dJ#32_|wH K(NXsFpܱlpv,xGwˬ~~~H~)4jnvLQj@>M>˓ M[5~Cݎxgփ$?|vcH]Jv"Lx:ngdBvZTmU 8|س?E; gJW|'nٸ_ȁU4Woʮ)dC+C2_|eTU>Ű9L&*N;Xe|s,LHՔv{|_\X!//GA^Nڹ96o+%wTb-;ϸ-Aq*y jp;_PMq@ЯsDM54e\{** ȧX7F/SVI n5I_NUŬ0a^HZ6LH|Miqv'muECr;UG[K4ì<iP.~Vw NN=ڼ'c)"Ԟ=2+Z7S磡q{}p2^e6rYaX3#|F~&9ؔ1Y5c*Rݘ)zg_!6T` F88EZpCEa?VTII#l#}͐b35tBn8uΣe=3`²صqoۘGc?{0KnPA=.7Yw 9;E$u$J/eCv 4v-1ier(hv- s( TwOů>~hQy3Tɢ&5)#Q _ф,Ę740q1ӺCiQSGA<]륀>Ohx }FDLjyY$\ U1:镩 XVGV)ajr˧i-4݇:lzxZs+#1_?xCk4joQЦXHf vq j& }0~ڇ&ڽ\@ Hf$\a7I,g^TS8'ꬊ c(l⡟AGV^ƲI?X=qKQ|6**dE#Ñ=M{\VAK9F } R*2'7Srp]Z(Y© S[\5zTW'~OC~_xggp S~nQB^j{+H&k}0M46x{[b+6I %͇q@ [^g?~& cv|,>eوH;0s^azS> stream xcd`ab`dd M3 JM/I, f!C N 'fNaa9S{( ̌E% Fƺ@R!RIO+19;8;S!1/EKWO/(>јAe5|֟]~M_zw| ?w|7[KOq[oߜF k/]6wݪҹr?ͱt CwHz+Æʂ&Ʈis䎲=83ک@e8Mv/X[( *Q;Sw^+[ym-ýwRO<<z7O;}i=endstream endobj 342 0 obj << /Filter /FlateDecode /Length 2121 >> stream xX͏۸{0۱a7MEqCh[3V<̡y}$Exv-9$>~|a 2onÌf?z5NrRh|u;GȜrQ0XRBj?/hY`2.``{%~adYW![-$1jD,$j;Cj~̗LG4*DIe`޸(UIPNp3^`6]!]hHz"1DbAMb4ާ~=ևb)(/wd̶7n }Qo8mSTۓv5f +4Ug1D1nfhtոi̺jpR߇%6}y\Fj`3B .D9FZCBM}_M=E9ʍ*T]D]@rkl=908 &i  xDk800OR²TV]vpT8Qj%#K)1/^+_ 8Zl5d %ȤP\^K"gq3k9& f1=*YdZHK|@Z*zt )C >gԉ*W;I&$#h`R`5Ma%HĴ9^_g&b.˟y.ܤK HN`HfAm)i5jLO^7HV, \`+T1 .ШE0W.Aj\ep cIx]מz\7CL&s3,G81"g3)6^pCyT@ҝ&@)2Sg4lKnf_,-sjQV(D Sgp>B,mHzQ,!ʄp!<63#J(3Bpa^xJiGIe(zP)a+$O7((d0vzތa exV=) |֘p4&GonGl(uA&!)- ^ 34nƥ[<ȖUBص`!lTCv mۨo 6g_NY5ڷDLlcIz(m6K.EҮ(?\0`jx4!25NI WI A6|8q#ދ1_wݎ;Gڬ 4VO,LΆ\>u~/B_K1;x/*~jm=|nhM ߌ]h(͓O:Ƨ+yΐ bgV+gV0R=vŎOᶁt04]l8FƬF?Au1fT.]/y7],,sԟ71nRܷΩ~•ntr )onj̫"D:NYP)I֜M7~=i ʖa"$u_u3MxKS<ٙ'fybO`NjW;]=E}0 |Yv0ޱeGQ- pw|Ns O)G߷G j]6D`cYCw2Do܍&~Yqz~ vfSU&;@7l-[4laz܈VsYHj$cD\bei(qݔgMtT@.?gШ:3Nb;&J㋲RpPggJ)S-w۬(Xm[;_7W^Y 1i]L~uFgiMp.Qpۙ䧾>ܝy/7boʳ,y 1 T'?}lfLӶZ+ӟURK'dИb*eۨ NL} öc;T6ͥo -_ E4j`*:67I/?q 2Y=W]uDkrae} s,endstream endobj 343 0 obj << /Filter /FlateDecode /Length 7979 >> stream x\]o$u}_H %p+Yud! bÆc<[lهs%lIlblCVח]N}}s1]I^\C@vw݅^.1o/_\lՋ?oӼ7_nմbnZ6\)nv]k~(yxsu.sN ӕ/ira|Ǩ0SCwiqKL82q^?9\wu۸ kuWoݟ>\ůh˻eTxs>lӒ/C gV-nA7ǒͲK7;Y_C21OtLG&~6aJO˒74Oisp{{a tc'__l^ݾٟwWo߽{trn+w&y/1`w[۹ ]n.o/b&$s.msA fmΗu- f--3 sl–ɸE` Ƒ팆e;iY=lS:{E^` ӴiA–2'.-iu`Ol B:; <3Eȸ새|y0m4H!)l&' Cm̖ղ[`m-.-Kȵa 2 $X MV;c4 4mыW&a[%m9Jq!3زD( Ɠ6Z9% Dmz>~؂hD\%/[`"#cFm<5m؂%qX.M3/^I)l蕄j'E0M 1vel) шyMƄ>wz,Xe<"} 6ڂ9ܺ0e[xkFyH,AKZ8YR!!e~D245˂(K]V,N@'.Xܬ}P r&F,AA=YZ.ੁ$G$TA a^IH X&DiY6f.4Il$nj]E{Bhz"|^ philA0RqGDg^EiAhH0)LL6qA༵8D#*@ HL҂PI30W!<46*&)F,3t0@b5Ek3ؚAZu'ٮ3O,A!2*Xx%J'$),5M(%tV 2L:$BǠ슥}ũLLK=)(wpoeBbbmQ3Ԗ`5nq7R~=|ڥB<P*Z*XRVMV#kIHIZ۬n%6@?_Z5»[Z 7XEԺjN=?Z[ D)VaD$RtDĂ *O2Xʍ)#ԉ#7oUr147LoFi0U2Hr-⼸gSBq $T @ 3U]YkDyu6ƥ\^sV'+'a3X1ɘ 7+au3V3Aۛdb$`ozU䘤!Ǒ4Ճ̨ #H3'8UVX,=PC6E5!k0A÷7O^]%^apk`OfæQ4?@*fdd+ jL.nAڸ0GM@*(!!Jk&C 5ajVtA i(XM>#ph.T" @od @_kLLư VsƧ Q` Pǔ8DꪈfˎoCTtF=R~GGj%,P-/(`f&] &-5 ug@L$J<@Ro&ckQ$ZM`Ci:8F_ җQ/dJ*ΓZ+/ʀeNFN F<-UNRq,M/RW| mI`UVVYR< !Y)ɲ\NI. z$YWfW-LO/)Ul8 ` $сSMN<d0tD>zz@`F]&jM:ȔTT)tSX>FKSaz)5brR-ij. <à}vJXh-$3q+Cgr+E5=ڷZ@/h$f? p%1:{/ \ģG e 3^ظMoZ)IH(&k`XrX*֤-\AE1"lhvְKZmQ9k5VY_ݬ-UdjK߫bګtjZKŁPfpQK | _‰2?Q=Zg93[P~{V,ȻǷ{ .wy; 'EM뻇SDG`2YQ@9-hs>;M48aIDAƋP$DL*'b!7W0T{:L/yinI"AI<'XB͞D\$;#eX;i6{3%c3'2LZ z2-5R[2-57Y=([Ŷg [ш[@qH&My#t(S#,-YNV%bmݬI"\]&}~ay I.p1Ya^`7 y$e¢LVwj_9fwOzU`^I-H3i-^( 6&j*h;Y>O"8#RC&H-&8Db^l?0YI{@s$b^Lɋ UVk6ܷrMUV@eUUҸ'WUYڅ2+R*lJt.zYS^N3MSRJKC4I SOHC4%{іb1%)NY.HIJ; FdQf:1K&()Jg+l)t.oa|F4f;i!n%xy2.D<¸ҋmQIl{H ?49U(JK txBF9.F> Hs1]tVeѫ Ȏ%y "ZEWXj2qp>ur;\d.S6 ^*%C BLtTKnkpHށxlxm`KES8[i[oz`5Ed;YJcP&̓Т ֌ʱ-Vπn^^ U ȥR"uƆx 8t(v-–! ؚ=>JSMA[)d3qq_z@`8CCv0]qcML a~8faqEcrr  oa3ɓQ>"Who_ġ$s sj^wi6ֺš6 3j5e(,=iK m97SoϽS5`m1nJ:7v1+ë[+E3ҥ1zZ3OlC2=r2/RFb$uE w;#Q$iF#`+>]dDRq)MNP;z;Rt b5s-I\c뤾j#ƓT3A=# FjA.Ty}62&AǢdJF(1)+JcMSDeVtdM I+pb R&)9 MSsSqq*d{ф弱~vuN `˖P\NuXtso-iFUSKsd+Cap0ܠ5r{,„t$´>,v}S}TRiY To`()M0ʤJCBLnHz e >P#+]n +D++<$HCj*uMLG!U9K3oc;_;#7jDY:xr5ԔrTFג.a-k*h-}vVGRcj xgbi=aZ=3DwbJ@PlqUae1hLG="Pkԧ >WU4TU|oxS|OsŐKv[Bm+;)v]/k{)E=h氖e-O!9Y:h 3 䢶bv-kD*N5N-Z<@vQď9EڈApQ= O>|} 0G@dsS2oHnO`J^Nj͡ꇎH$J|JTFrz|41񉞙;5sn-^պ^%.IֱE*gɅϳĖ>>drSXh7k>hefJqB5n- w7X#*$9Tao,:hX̅~?vlo|滉~w;kou.f4xżxӏ{Wߴs_^\_w3G9p={/6ڛo6 ho_P@_^e^ 5.\OI.*6"l(Zw[BuaW[$A|]/k!QcC͛x)?%wFo>T$oN>f^ e,~spktgOyF͝ SǵU[}_7{|pXjy``X|U~|cuw׿ۺ]:E}O~DY2y&+2oښ/6;$ 𺻱NfQ˱%LNu:3 nĩ.!'F\ đә~x.*n8 |Td'bWyߒDP_2~m74"Tlx ; C/y\{>yY_ifwzkRvc+/;Ӳ>|g ͛b%H#|WS8{Xo7%͹rhWnD"@YZ|ů/^mRy2zx> @"#" =jTp]|b@RnygPp iŇ /7u|/?)awVǻW^ퟮ}nVOu40#쾘|?`ŏzGF~?a,d$'nvtn0uۻS6دJ dvs鬩deT 7]lvQ|JХ&z'Q*d@t vBear9@*, iuג6z8<; uΚb>7+\r>%q-FjzA`3B95 uFOold+quߊ{#t]iTEW-y,:\J7\=7*p䝿9eGWQtCLQxZf%Y~4¼>E9S/O-\x$k]~?o};ݬ?޲)yϩЩxNM`Ws yāB83sZq[|F5}}N~D M;Te7fy_+hwV7)ԟ&?nKKw~K;* ,4gR{ ׭R_02ukA!,kmqQԛu0OrC ?e]#\ݮQtN$f"LK/>goǾ#?s% . Cnһۧ/[ ȔF_x>Ҍh(#s8Q}hApNl>f(1 GDzXC=j:g*\:.L9ٙx" 3>.RlNj#ǛT43Vc9zbH[ED۟ij3;O\y?@ruJpƤŃU\z)#=>>G& Ռ #խƑ&Je?Oqsj%"$֍-6Î+ʪ2`q2zl~Zsշ/frgȨq[@T6:fS?ֱvM#0@2$5ӄ:FnkB#z[͙ZK;> ,È,0J#$xB0Տ#o9$_Յg}S\>GZ dPTḫߕDlhrշqdZY{a?F..ݚ Afo'hj"pp$ڔ͛igڐ5f D67/7޽ck-'VNl OU~FN߾X׻ut#BB|$I .ծYnSow_o@oi%l861g:W%( g8 3YGe.E@GnO |9E$5(ː=OIP{1#mU*z--<ot7d1~t̖Y ]?ɺć˼ p~,{?i!B-[9ͣóQ/Q21>KocȗPLmu哓?id 3T["`9Зe[kmWǃFY~-ȍf};V*9;E=/:59fq evx*0d8;?[$-VoF. Gy9=~ W<¨ayͫЕI'r iBȏFd#ϙXOtqsAԖ]qv<ڻ_'z_>hE ?2).` \Mx( jyZVJ==u9wc|.a]GQendstream endobj 344 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3564 >> stream xViTS1Q nm9U[GZ8T)2O @S$9  fAP V-*+zZo{Y?=kݻwW#ɷ>0ŲDsq,Glli=ަ\eav(; 6S""]n7(*"e-!+yqQ0}^o^8\cI@ J'K=߸uϷe_~3 ;yb^7b~?}cn!0v;Ǿ<2sĖc$[0'[0[`b:kV *d{llnm,%QJLeo{PuE*ox=Pz6 fWH='H+l[1d=F!:) #=\(+W02rLlډ#ܧ"? %J%2@K҃)՞~4_$fȳĩљ Kkb{\oD.s̴vXӳP͗T (-Jre\}zyN jsk!@|0#2V2)DC$BZ\T˘82 zkɕL=2~d+d)eʼn"@8c9)j,=ZbCb#&r%ȵ+-g9rUM1װɫ֓ӍbmZϴ"-Ibڟ\ NJɏK)J4*]l2r3,1pO7ݦ`ouFc`eӢd'8_QB7h[~+\AT*c3vOZ)~EU)՜"P!g0Qgw݅:ʮU:3YzE3"*cA OQX̫%b]KQ`:ޥ>)_UhB[҂"wpʩ~YˌS>=F;FO`FD{%Ӥ9HTT^`sҍO@l:Xl4 ԅj*Qa 8䑚'K@43-!{k'{W.ma6#KL )h p]Юo7KO0HHfʒ䪕y j=jB8,RVVR/!L7Vˊ ʅuTҁ$#-6=!)E C;8;zd{+Ë^e*J@su (ܲd9C8\LڱO~1P+TZ6ُrM)p6t\u K~$p̻DA\\WԔXG,v9ZYzpsEx\S K1x\'~K,aJrfBwj$e TknH)>|V}JXi!gx(< /%@pᅛ{Q&?p3v 3~oy?h,Pɐb * kJ/ p}6RuK4"ch~hV)c~ RZd&! JbZ-|*ԍ,X uH_pNC8@v-hEΐk1p Uw xœ ycG 0.oR FͱI ԃw<;uzڒ]rOS[i)|9Ǧ#20K8vF{AJo%fS_i輾ol IN:L[n 9:"zߝCYr Ayٞm+[:zD T7:ѧ,HO1 9@w씥KOR2xBr YY*t+2.q!>Nߓ,1FqI" ހhCӵkT(S\G kUKʹ,x#J:a{=U`󾠮[K~\H|B{eF)(wz qm>m|-b}yxM[?#֮ë>{rrD_rS?!#q澫: ͐"1{~_aGZ5h3oq1 m*n}z.p5wB;-ڶFK~ z冡6pΓ 97K%LI#chfm#Umh(kO;2?X!F_ ?122\?O?=r{/\kzO5cʫ-NM[ǘ%», ɻ4F]959_py?6_`ɔ8=s7EkobzK)<[Z|Qib 8Cendstream endobj 345 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 @UJ. 2D! }Ik6|/`,@[ 4ZebY8)/dwSj"<՗*:MWHAH)1Oٝ57h*q\B in X3 rTendstream endobj 346 0 obj << /Filter /FlateDecode /Length 2547 >> stream xYK/^RqV|e)QJcytSY;6ɡC6oO7]IN\:3طSZ)vBדN__ o*K-^\M65lZBM/gXeJI.V7ɌYhEay7SjE]z9aFVR/RHr;㺲԰,&gI;cVpɷLfh7A [K2Nfsn৶O6:&f6j??Ǚ4#~~{-|έjcs*~fsjv;+ς4'UvrvHrـ_ay%iviê= }s!+%ΙܛȢN0o,~Z"+Mmk:}b\rJ h0ڻ ?%иLQ|kv8?.E I1Zh &da™zEW )֜ m^AeW362^W_$-Q ŞkQ!2::)!4ItZ*AQXP%r[ZX#G-ZOV)9A#;a}%4ꊂ{ ̱`MGFRWAFH?AXDgqʀ v3$EOBy>ŦT /,,9.N4 ;"( =\(IjM?l] ݭQ=29 au6T H>ny:Y_!~1vY(P>2)MRx,B/ݬZQ3 BAvE[6R`WNv07^t+]xm`5)u,qn\SɞDk~7$ڢkTkw 5\Jerv=Z.!kL"R`ae^,~uJy.M?6D~}P3{'X խd^|g$ c." %4}\n yui9::2u;!p˜wK8Tw]em)i:ӳ βg&~Ws}3%cSUJ0#zf V@S,~B8)Ə%,}QmJ * qXaZ´|f,Nb߹ @B4L_C`Tk]ⱘyLvgaHPz V*}哉!k}BEI|^ǛpRuIL1M{w \e."Ϟhxdy?A͒oA.~H/yQ܁g15KZ x|Ya<3 UOW>r:ܞ`i!*YЮ~}H>s|@^kW!:.F!AQ ^3yap{0b%1["/ ql,d]u>.$|eB %}f }{%H/)_$wxs{DVcs΍ihgn$;\r/ȅFuQyUb(+-Ld:s31p0Jo' n%# f~%~1vY%lZ?8eIW6 z#k"%ݱwmTvw ?l' }JرP(\Q܄秗>}-4E~`Ο&Ͷ~us<fTp|_07͗A޹0M>kzzgb{yE \WhQ> ļzún10P #?6f}ȐȮ}ĭev-"S;xS)S+W7;G=pf!wa^=NZOL #PA_<q_~}$|qzɋgA)V)Ś` NJ"mΣbo:XTPw'~vwhxS=ṋcBdOϮ*FQPM^4*&:,\)\ T -$wjtF`W)SqZ>0ZOjԊ^bT =& &:S+ΣR7wZwzZ/'%6?a(ΰ( z~ ̍Q10a-a4M !EVOwT8!ſ=tnÊu{6#f| ГsKՕʲ2=#ُLA scSqHN$`0ʲ/(a,̷ӃԹK& endstream endobj 347 0 obj << /Filter /FlateDecode /Length 7849 >> stream x\ێ\u}/$%Q2;!vfQ`-nMu2|y$Y=kYylfl޼{Eϛ/7As^6/>*8*o^}7If;۸y:{Ki:Z +|P7~D&mzylZ/28-oS,t6w ľ"Ƶyvo܌m8 O /r;-yQsڼճ?|b1qR1׻x}>T[Gb̋zKh;_ ~y:ݞve_^Cm; 0cۿ;V,:UA%~7 ZgfהG`h;}>uc:>ȗ:;V}ۦhY7>TQeW % ڒj[:J39_]븎׻{Z7mJ:ZzXZ/+ebn7Cݛ]c:]_c7|ޜUG:eRp޼o֢6"tӻ7QgU1˴pmssɠn8HP./ Χؑ-tcecZ+Nol}_Jʸ䒔_xWJ/" 3hqىd5L@;>iQ3:$(#xxw\agcF<vk`B0q:|꼽?J6W7}|d.-n9 V&8t4nq*4ͅ=?ܞŧ!%^9c\Ծ#Fp(9o$C?^^w?y4P&4ZsFT.G˲daEqiow ۉ;,@Tcۣ 9;?;얝C(m7`%alr'$@SiUo$QY1o(\u VZ ih\%!-%O=\S`OX v$іo-32B$`pnsu !"6>1E]3i}hE !<; A|0>9bt{8<)@Llr'IzPZа>4h B^wyWcy85!{AFOU 礤:9`|a:4t4ѱ.o)rξX) =5DZBⓄqGL6?^&9Z/ `Eiul|~w4ۖ|>x+RX9`rkRkA"jSq6L{3@2n_*o O … \ q{t2iXK=10&G#~u.Ui^KG8nAcp,ޝv=Byd'Cim4n0] t{Ұǁ]v)ͽ{5ݟw]Xi1gGao~%ᓤ0/o8(aĝc[_|g77.,lz-P bՆ|Y7&DWMbLR: zpIҙ;hv I~jk zm2T=JPdT)^E)%He<daX_pqRY#V;˟ۯtR$nAVRܪ^0TřXzu&)5 %8DG+|D]LS_NY Ւyˋ=Ϋ>W7vo\ex,ph0$?.n?,F~rj@z7@lEk=m(h%ي'k:JJJs B݀qCwR<_z",G֔9JwdV[Haw:cu@CÆ:zΌ2XF "$z&Qjvqݹ݈]ʇޞ۳w(s:j˗+JV@,p_aA,$ͻ;iZ<6KJyk 2+Rj?77m@@Ynl2y &9lbg$ ,"=R+!@" <~6\% 7ZHT ^O( Xpk+ "m#$H9fE\ϓu"As6J6/[ ]0bLA#lREFK* TbƋ$&hYZ &Gۙ1J$vZ["SdDbc8 @ VzX#&.T,ZBu+wӔD?9LJ@N,% 0Yv" DDp˄ ̀}_2azT0 :vMX5ЈE] K2Rvb ̠Jƨɬ2kPln]j_Q{ءL5xmfֆsV%cI E׉+(}$TD6o0piYUJxb)@rT6OI4EŘVBP{5c*eƸR)bB 6ä!7Q" t+;]HJK!\h4 VUqSf USU)j%խa䘃U;1.D2%!Qr,u0cj٠,z$#B5"RIZܙFF^bY Xq3K^cT8(*6:8eB0R+-XUw} 0U:j\JJ->k& ҕ:s`3N:3*LDs ;efZpiL%eQaG I3̭j$&ΐV ] =+&k=άMBmo&czҹkdT5W/$(+I,zw]]d,$QJ &#`JxLM %NH6#1>o}hR45:c.:{%kly>ӂ>϶` fbfP}B-ʩIg@}쪛[֮72A491O3YCͶ3w |& ΨI{|S;XF95vWhY^"fhp`dCXf&iQlWQLRIAF륧f+4 `(A!ar0(nxϚN*&dN8@,FvC]bag'pjd66jlX]!kӲw!ë rWΝ[ZreN̢ȭֈM;\!|*|RAIG+Ѕr_#`G \Th-YA4SX@0Z +9@r[JKϖͿ߃ ԲJہ2šg%$Ptyb"4=2 0?:nօiד@j;"ֆHڤ!!5cCQj$>FH(m5l63?7 ВZ[!8@(@ͦ@[ѥFqphװkq{bl((TmkPkQq倰΅ L>P9 #|.- d[zjnccJueT 2$bE$VxKXY7dk!-ڄ0?:@btVŸRKd ~;X *)V]L?\I]!(FPncW\\*XbQ:X񆲌RdTGGTQG\5o_؟ 66VfW_TfWᢴ@V7s7RW߼T,Ub%A`!GWR%`) (Z=xoViҭDT;^Z6f^vJѪü${"sn~zldY=[XhGm&6]o:g!mCA3ˑ;ϙ!E(S'LnR*Zc_HR[:uPGJ K~\BD= o΍ ԖN=v֒gA]&YK!lH]KTT>,K\+A;^H`4\Gs('@ϻ7@8Q}̓M^! $dxpp,!`cq' HnU d@Ϥ3:jD2!k, bU&(N'BPQǨ25Z8E :ε `t$n1ZݷvBg; 2tKP1JJwSQ,7Y Љv4jkh#+В%kN>K:1NH6f[:mB b iBIERzϺbSXwz0J[-~%Z8H%8]:d8]?af;dDlLNH)x͋ H pL7=TNZ+xؤ#D[;2 }#R9&FF3ZD7H?-b'@2tL. vLGS=p$q&!XrvJGؓ˴SPZXKf}.[:h$`*rH}Վ̾C4\f ht5rÅ/GA-Xyvk2O+ zR;vKK9`{'k}%Swh {;iḜ)xiz$9ih'M]8uԖ_89,C{T+AjU OߌriJoU#J哫.y,y_'\cYVh\n0K;#mBj j5N ]*1Qcd;: ^C&SM K~B6\C4Vac~'O-:*Z!4AsQ |Ӭ`(Dsm1x`}<9bX%iXюƝ/#n%ZIMW 2d-Gĩ+JRidIsZn9X2O8 tOyG \>W)b_s -'pk" yX Ǯqm8abL j?qpՏVܳ=~,< =)twH^ifԲ('0;Mr :H\3W}˽Uz~x:Z;Z_ Ƨ\~K`iI'BWoyE:i}24ixG.(9=#wKA-Iךv7k1#rV q[xtʫ Tʁӧ'XoOJnM=u><Տ0hdՇ{A$-:$ۧR{Rg}wPp9wS/}{sF<>x-GBjqF+8nG^ё N[1<] wـ7|qigNXk\#⧏Otp=rA!m^}L=4Kc9@yi"jfz~=W}?H*]Te׀>G}_Oj~6Os+IMO8UW_'- d3[2> stream xK[v A.4}w= +O~$w??-e_f}޿yY)av|R4;Gq[WOuq?;wBm9xG},Ǿ3_W}jW_s_N_CW4^'O򡖻V|wi]Zx(˻{$ӭߟOՓ!}jeO=ujm-߽|#a==/e3 R 6鉞›IxȋOF:f|Ss|xGkψJ#s%zW4xH}ڭaRе*~$Yc_;lt Z_:*溿]ǹY7KK:p#4 p$w3 ͊Qrd;̾60!_[cJNj#J|;sϼ6>w\~i^43ѝcd 4fdՔ~k2d02W j</}F]oFL¯1<~u]"U DV20g?=/f疣 n-W+:5tc`R#'o[L\j[ITkkOދ푽 'p}bzc`BoZ lG=ޯ_ymrҿ_=2}z3>1{s~}>*F z:Nړ1XeRi?\[:Mg?L3oD#].޷p3-h KAhVR{${NlvVh#~Ҥ} Ǟ+ٱgY[.^tcK}7yC"Qy]P4A-u/~p C_O4!^(-H'bfaڷ+9Ao[~2Ir7ܷ ,# JX( =Ys(eSt&J ptcd=ℰ^ U4 =L' MSoy?! CUfݷڻ# =d"սw!GL~ߎ97sde"@0YwlјWa9D푅)/䗗gt[$Y v{f C Jq~q0[ aIBh;~n9f/Dx CYx KmQ ѝSu@<1[]N<&kOz)6͡Ee4PXax U)8=u`:bk1|0k>C fG@)NE2hA-)-Tו`{rJ`Ϲ5n2B]e}{H8Խh2sld7%#F;*Ka3&Lho@hICi F}Yh(NdE7^a:1Pt^P˽ 2FŁZJ jyNW7PRXwm?fqR^5JXLeZ  4W@E_z&=.VOwaӢ7HY>E%.@;W/G[L˽,#,E^Qe%AVd.Eh_qɠ [NF!.%x-{(Dr dҶc8ed3W vN(V:|,F+#2_\8cp26*,dΰ+{:\E0hLu!*c,EcX~I < `ʾ5zbpc-M,A L5&h{Hs;E(w`7AvG/!"Pi/vAA<@@YFxj $MI55cZ:%jP&!름KGzQ§`'hk#CDQPY042i殰 Hn$[ܑ&H]ArC K"+#{I"vCY2#Ƽ)䘡>W‰yiW98Y8=('Gb!.%B݈`!#(Z&'uH, C2'^ (mIW'}A0eգA+%YBޙG4MIi#Y)V1`6-wlj#Q87=P܉<p@Y3PbY5fHqYGbzbAJM6CL˳^F<|ᝎ F_fm _}WOcVFck8W0a/yIQ˃c3a HuKzJk1KDq.<2_8BL#U_țL` \I$J(O0}h/Y*8`j&2V Cj)"bjR>C-N󕐵GNE-eQƘ&N[HʘMwSB(R}SHP tq@B[wq B`Q5a2Y4A*<(N@B*1OZEHK^y3s)6Gmb/'L:M@#BXȐ\S&:))iɰV Ew9"FrEAţ)TĈ$h *CEL3:=bًJ JZJS `T8DzBCx O5$pYv!RS:7w: E:O kJ peQ8Ŀ[NrSgࢠ ~- od71b\2YX!zXk|/>=},AJC4v5X ܔ^X¹W+8F(XeVNyGi9H#?˾9P? '~IuacUh(㐳ǪEi.0+ƍ.h0Axъ@#`z N9!& E)Ɓ%UdOOpEF͞ /JThȹ)Efs`da0q4߁!OBZL44p,fυDBsa=X鄝Μ>l2.j$ :8..(sᑣ@45rQOkp˸"A)P@ҋ3$h 3?@&1mi5 tՇ dK.?܊|@TH>񤬗À`NV=7RkDF)ؽ)~{8S$ijBȎB*%YAt4CR<#*#YH1e%`av)4n!RSOriHڴ^ؘHaR=YRTr1)f .L 9$pt]j҂U *b.I=em{|QcPĈT b$:,1R!(j!J`ר@(+@(FfH J*3lPPZFzL!,NF*@(^A8Q,%ܝRmKa Ru# 2C 54mSXA%G*#qZZ+LܓLY<\ѵ?Z=ٲ  W J=֐uBx,l>yv5`i!劧YġV 4{I_]9Qꂇ8l?ߩ'rdE,(lOwUX;{G<m ]A[Dکԑks\8W.ÁR֕9Bi`E"BQ3/9?*5\^*4J9{9+!sB2԰y.]FUB-e 4OnnB$JN Ifs$f蛑b%@ )p7F1ȸ>Hp"2hWFgmFN4Mj(#oI"iK˨ !"$/v:[#HF Ĕ:/J0l%J~f"[nIIb3Yʏ6c[4B3l7h܌`@P~1=%iZ%i{_FnDH##sϙs'vl}KٚS;5FFH9=oPI?RA$(P#\AiDkH'*DҰKH'E){ҬO+:6TH _ %i$"2m+J-(nUZTx*yҐG94Jdc(${{KZ Bt}Cٷ=KXޗj&Bt) B(._5h@߶HA:HP}Mۆ9md:MU%hBD t}t}qH]Ғ!#9{L Oי' uaD?_QփS8  A!? CH&΀j "C4G_雏.?$v6W('$xAGHtHj,f(E,A0~ߟińAmFzd+ɧe؎s}8YBj|p&xyǑ}jx,*F#S#bx G5$}Qۍ 3*F rD@'ȡo1H `G`No%he{  <kfs])yo8ͥb<_b{q<^|x\lw, ; .).L̇3k:fgil%.Vm?8gJ!r$ c^;VGN S~@?ߌlAm@l>ywGˁhc|m!IN(VXZe)t=UW6CE۵!>B9[ htM!,'f{uiA|Cݘ[Ѭ ÙCv!#LA QoMp)ᷦ}Я*7 6ķ(7-Nd:J3osb^ZV 7ͦ5ė7x:!#?|-o~A#VY9C[&ޭ~~F++ڿkjڽ;C!`?/tCyY@m:m%]C];kJA`d™ #$|^m|$MVijȚi1a+EB9sz\ HƢk|φ499{0!8dt$2ߨ/q=GX1#qN<B*ҔfLFjP_*z2gB/LWH&W('u62hjug>oQcms* g3 иCA;,ݩ ` )M@A|S@tf[~hN0mogf Ld^1 `֌zg9)bĠ_%E &HMM@&Kڐ29Y{om6Xr$ '[y^O, :ڈԹ# <9Fx5Q2xt'׊+8Aw{(.yw`{5N 96${9$ћIP/.6@+Bz\jkঢq삺[룖 z@ $/_glu5 SHu!٪&W X-_Z: @Z- o6GKjћ|(9c8W5:dt^ޫkFܨR(F"A˚^m/)1d _ ?F}0,7118pM]ftbұ Ʒֱ:F2c,mUVԊG1$jAc%=ZKX{Xkńpc*3;BgZȾwq<@l jcǥ=fl 6Fb Fz#`Ӟ8gVW/ƠL,X!#< ^ 7aC@ wx[ҿV {fP`$fQ@5@Ю(Lr1(h24Y!ۃAF%nb1[rj? fzΫ{@V{:4pD={=1J1P ,[ ☎:j#ŀ_E^EQ5xbEZh@gM3Ҕ(6n0K%ȱ(-=;GahC8jB^e4NF FSa= 5;.˦kIF]qa\C}A#{r^-*-3z Ó}^-]NGLT6Aͨ_ilWqq_M(UEEb opyb4$nny`Cyc*ݯ䷋7!$P&mI7i䈶F$6ZjϿC{%(_M|of!rDR|#zN oq0<ɑ帟?28PZ2P~OMJ勅%M@1bl8O |/`  p@ߝph2ЪcVK 6fCk֤fC lhqY yIo43Z6hh&h@j5;.+iY3^Ɏ̆;иuϱ3ޞadS 2b5,fe#ZTn f5'AеʹY ]{հ)9#Q3T4/ aiØ_N ~}5Da[̄0maͳYcêC p:-Un P tLڏ '0m@u|{)ˁRd:Pwl>B$ .Ύ.<3,h&%ٖ@`5P'S 85/F(Ί)wuXhv̉ӽy1=̋վ@3I">%=oЋ}1uUdt>[Y[NŸXYgn|hPţy1.)+Wndٔ 1̀wcq4ϻ 9Kb6@_2=$Bb=ZאX"'K+VmEjJ&W(K ϳ%ݑd.y/q퉾r#z5%Mq7~p AJ҄̀^m=E*P@KNJ?eTӁLtK!q{49WKZj PC/ X -һ1ryP$vxj(7k\t3I:nbC*@F([*0hXj' /!Eǡ MDY}i([@jKGxߍNk! Wc qdf-P`AO fz bfZBځPmvW(VletY %X-lz'@|FXA]Tֱ@lnvoF!b)@rNf6'pdve"|jW;kIի4L>.vfʒ\hVt1VGFȰW;alf>&qC4;gX`+hPY;m-!:oY-N@FGDuyXgCaúȅ{|qz Bb'$[섰AyV JH"5bYI\A]r<_+g2yL(M ]4p`3Gb8V'F&w6XoTKa]͂%у´lO34SZ-ۊVGnVAoZ(Y]lIXhnTCz !n\bݮt M4s78F\+PAjVJ5BblV =#$ƨ )9[͑ h&3ڗQiCjϡ'+Iγ#Cb@9s#j~NAGr9 |Po߆EN@ic?ej Lfc4 j ϭ ' +P1\bǾ4kfT1S#݀k!1f: 5fm~0t/Ȏrx#wP9@j4M6C & !iƦMԪj*g6>j4mѣv@v<'S6b` F=+!FA !K!Pv?f8@hڜ {f4dM_ٵ?aġN,O#1eځ!,2@\#o܏J4g݊'5f:`Y+,9nLBCK)tٔfyYkǰ%6wcNh= ޻Vq\iQ@ KadL`oFŔi_'OY;4^q03$@w;4.JOsM{ 4jZ ~Yq = bQ&1Cʼ3zqd䎀b < 5a.5JD=8u+8Ӂ˶ a~&\ƅ ; edIXb tG,GLyj0B5sJ7Mxp\(5hŽKq}`> uؤdVxYӤ:3sv(njZ+* 4ot"-hd'OB؍=PEFJ(#&Ncp qP}5|O><|+KA=97j)@y+ӟlG 鐷R(I;~tFSCJ][Io*ѯT+?6a!2>|+0>܉Afa|l/CUϾ ݥvcsWmU?WNsC>Hc5tyN;aؿG}҅H(]*%=׵tL|\{(寤U|PF1??Qʧ`Ɲ؏~_W^FRB>J HJ@>uL ZTPQ+xψB> pq7(H Xǻa|/+wh*W~9GFI)0>& 㯬Wpn&= ܵ=R%r U4{H~%ɲyDV0>a#}e|X+~!Ue7CUOV/ 湝Pi? ?W@o|BAF A zh>{ƞ.~1T1Tֳw{Bޅǿw=<{+K=xDZ5n-"4ccb1ecBD}s50`HLA[?}{O=nl?zRh6G6O1x򾴎9ӷШ t@y3Co4A\ȵwo%}nr B-!m<3S+'}g?\?EMdGJR[=]/lm\FK:f+~ÁϾorlAݍj -\tc/FlFAki9 gPV6_ & X𯔵E \JoMG=:CDZQ_Umf Ljem$ڐ4G/]Kv x`ߵ}| >o[R(`d߭Ր=VJh/NK1M!jx%WkdRS+$cZaH %&dvȾeTѾI'W=^vD&٘ψ=@=1oSwRItl |@K!|7|HA=J2Ċ|nh? A{%I:kC@~LS9~J0pƸ ?ļCP7 ?!iI:(56_C-µ@Ezzچw 5'VG9mǃ5C.t&Pރ5` ߯EnyDvd- [?cm0|,Um9K6Vd_hdpu9WAkxj7ohM')=u.#߮HZkOzc3D#o KqiE_FtK ~|҈?C&lVhEȣa&qfBwaf!Si٘}i|YC2;,WҖHJC292BL &2Pؘ]1ksp(~^4fvA15(~JՋ1ٍ ,,\O#a׍֞}@jW3x\705rHoL=MȒhBa΁0ә0-3 h|worlO#y.{dU~G=} v ﯵ4~V~d#M 1@؃ALQ#Eb#gVtŰO#}V]H:Cɾ>@{@J蟾CHա`ɐGd;Ce}=2c>>->. ?QYFAϨOT:V*tt&mz_brhy\i-x7D$yC/Ki\ysDNO@^K }J#R[: W(KK{s[Bjѯ7"cDFȕƘ!,PUx=+r?_oQS?<]FcT8@:7pH.MH~yh9!:w @쿬6cyD]VɩC虠`O#_n?9:T4X>:@y&BAZ!0L1yI`\0&$`\vTv4k] w47CwBK 4\8GwqS:M{#1=X e%Rf)q[~r16HAofbVR!?CFK6im/ilCR QlA| SZ #-/gl%|DRP!~֌.9-p '@on֒{FZaPXUńh/E@{DoY\U2 $Ӄ.޺M,<{R'PWX685;[>1DK2{h2*{9VҦfLm\iVҠS)}HH^%!TTk=o:' 0$Bw=Lft&@do3uU4Ŭ1f;jchכ-\+3`13Ou_Fo!9"]ьg~{ve&ҡY/̬fЖV@c8FH<4"aڔ/jV@[Zvq[;>䝺KICl3?6꧲udyZ_( YAh72S @@#ȼuxBJbh.VFzIjHN 9R+(q.7ˍq6NGNUbqi*E9]7 EsPh7( ɚoCuj\yblC8c^9ZJ;n>ThbF4q.YQ--p&,|߶n4wp.ԏp91{\ov\W)˖];İI2w03:WGʉ6vΙVhG{@6#`%MUbgOnQv $B=byߐD `bS'u^u56+`hz1jʷcplVL|e~RqRl ż݋#YghҴk ?4TMǚ3$ͧēWj$bAԴ"?|_,uh=B?P/W+(CuK. + ?sȏw>$|#˴3G#"?31"?u,X J\ei|t ΅^ZM.#ti;%~X_;@[Uȉ=V={k@F zH)>S|p;G+Fq3/>7OZw` =)cx/-B6`do#F`xH~ oI8gC%}lcpmpqi08CE;x19 <A^,? v m[H55o5) p S%(TU9Y;K[A~e59m#m?"!q;_m7pƦN|@m{,Kx88(eVG>pN =7nh[l>=8w6n+Ƿ4m8[ 8~i2kss~hR%Kng4-;X[)~ZJRopXKQl>^h6=JqCp|wJֳ0g8HBeK"Yχ mI),6 Jjc9At_w*ÔQ%iZ0"2{O6L 9kٴX:-^L[Fgm5<%,P4;Ah~v o!Ahz=t8 IcO3iC@.s1eW^yz6 ch%p0vf:[(Q_+tm BwnxV$J,P1NmrPY6Vb,U2y e d S]b2?!o mS3Vml^&;&!k0Bd>MU 'tMO,G--Zփp!E8'4%?nk67f-c) pÁ6H֘ŘјA)Akn`V2;mOk$! ef|M}el:|-qC4mE6q8:D4I~Cw':BSZ]G&^]mL9 C&y<)Mۭ;f=)ANNk]&AٯWe桍{{JSYw?xh~ShhZYePoimgo$aY68췺z'~삃87 ,Fk(P HׅOcmc{ݤ(ăԍyd+z @yxS>JH! LvW;z%iGiD"Pm8ƴ ^5EM 2=(c6N35^fk5dJO)58D SJM6XJO,r [e95ۀ;tN[`!R({'ڔ6mzT:JOG陂Y=zЦ9$~=5qhS=uqxiBT Di]iJ|HWDNxn天<4(B*SJ”Hw+:HTZ CG.v&!)W,NAWo[W׬IƝ~UJY$JTGq1X҈Eh3R_5)5+rt'QSf[5O^jRlK[zl_g[!EĚT*VNtɑ9jM\ j+ˍ~2j(W+:^{\ P%4e'&4?D=Ŗ2Nyd;zQħmBЅنU ="7$Vy>]ix B{ &\GS 1mPIާsvBV !ͦ<31%ߎ27rVJO+tVGct%;D UB`8J[[P# SB [QxU4(%lK"TEn`:dENYXN5RJn9JˬҞ( fPnMfP;puFj4QqUK,} HHZl{`ZZ7!Ɯf8lzIi+J^^/P?L:߫Kp,&P嘪caB귚``Oh29BTA.V D]w99Nu^n6(isi䉶qy v/>ETc9-P'S_+$b̈́#04مJܧ]^aK.LTF1iMtR$ژyIrZ>Hck&( on ?Z!XCW(S寻vA =$GB6I&En!PŢh#1QPlR rE͂WǗ9K)!10ɚ~|W`D h\}KN<][n[I7$&9##ʷ EݺTz[q;'0'C5d ҥo`vbg*ID*%#Yu盚kёɛqgߝavGC64{{”[=ǾؗӅ,-^zp==8BMz%/TCۡ aJ&ɐt)܂Ջ/$DPd5"2"zE”ܳռ%#*L!J";,Ra  \AR]IQV YՑQ[̢x׫)G8%hjj>& dZHH"utJAj{)}H q2z|) .JBs6^!٢q8rk01`Ԃi>5JMiutv#A jH_U&9B4 /AjȃP6^AhB b;y!4\!" :½v"PGjG&,XnMj hҲӒikUYq>T oF}n^44ՎRsŕ$фHMYG\d.MIuؙˆ%MR=M%g /809yV,LTqAg C\0-YX2/nYEL}Weɯ4eM[X,@Aŝg \QCǽ$jְx+猟Z'kR4X9KUdɤhmPY*IBEWcyERIChK.:`#j hKSEɚ&D5 nml~m^yIf^~m&I#:k3K9%g^RfRT C^IO.ND(ٻ{4xR7չh댋;B6ŞL :NePY\X~r%$_+[ A[X GbEUT#n$NtdcRUt uͲ;$ )Lˤcnm}΢U8PFf\&˺eR[uJ?;3xXd6~Hg&`ೄeDedSQ`ܚݚYOI>\⡔u?6D*o*B+L0, ٥۸4X D RWly t v bPXJ3 O}0'@7}duU%ɒ[ Oa\t\awd[I݁._~M!"c'd'_P׶ʾ|zFG{c'ΐF~2׶8xF} 6EzRGvlᎲ6Q[PcsEN+w]u>'?XA/-d}~P"n6b?/l*-P16p]w!U{Ry=ağ!kݧ=嵮{OŗwM_- e'#kkyg8.F2H w_\>w`)*'F-~?y싟O}CO/ғ/~gx7|{?|Gş{޾Kmw߼?}'t7?'wO?o~ooo'?ݟƽJa8Лbt!2-nih}t}|XJ=Cce:7}ɽ ! 3r9aґp?1Hz:B( Gur^xdtӣ |zy>=?uOϧGA>?['F~b=dǯ~gos̴g*}Oa5A,Wg>3gytDBE=)Ў|wpQ$i޿ 8:^NoKe)Cm_>{Q[5z~N𝻷w^_ч=⣉T-o#7$Z]sN΀|yb;^ )/M$F}ˢ!A\GP>w^_pkwwpU_5?뇭}'ǫ<!'_E?g:W.G|j}ʦRۍogw SL+}{$H~OgʤhMQ'x~`}ko ';tV0~^Q{V&?ŸzUpN}zͼJMyL?lt廕Sl_{eD!W;pIP7{[s' )=[> u>>7.+1U?C`qg7h)'ߋ|&8Em~ &MBnwnNk?tdTӿE3|^:{ -1JRLgE![J qϿMWfE5nO!6i/~wxWs|xGLYjȫް'%ׯԺ@J´P^E&'Kv.lW#{{W{SDfx~׿z'/0֏ʪ{wv q}RonVxryѻwo)M?>[ퟜß?y׿zso9FZw_']~{rw= wF½Q̼ytru.:kUl3פW`촌G,mM^'nG @#-n*HW_*>l&`/Wbõ+}k0v~PowQK>>gh` a[nM7ۯE|=X& ^aX~Q!o_@_rZnI?vh/)I=' ٿQP9>M3 g/%vf!O-BqF|_^cux/[wID}vfb^CX >cWobo:ݛs{3XY\=^Gy|bٸ[@|pД1>ؑb͋Wxߧ:;6q7._|[{rQÓ̍tw6vϗޭwO!ǭ[jj֨a?oK?g[&~㽝t7"֨s!q11ԟrlSLO43MWDy6 >;Qt4%'ԺI"f_o5i=.+ϓQ~FW]}K٬F n}i爟^Og_7v^I%"|dMO+(fUDzaO N"m|xU}ntAoo Ёš䅍}>`p `]c]Ϝ.|?W c+^uH z޿&shѽ?÷= 9Dm ƊV 8VڶRG3*_9aO,9}AVچ4e1{qQĊ",j'Q$oy4ǸqNYϭnq\^2;; yonyQ\|WmQ~s Iqᅱ mɔ~'j/cK]sM~Z>>ϞUNIg?Qf/+Amo W3q)~? Z1E)$mww qTbrO_\?O \X?*_}x⩚ǔdnygf?ՋҌ?mCN> stream x[[oƑ~_^l$GE`FϹο87MS6ZW4Ɲ_ eW`o8`ٷ+ 3jmU)`@Yx]VkQMz6Fo/[&il~4uVERlU`Ucva!غΐxc\KO...~5_j62)J|mtQN@iG*]hFCZFŵ (+x}>I'j <`'+vhD"빥Q_ ѿ :c6!H⛦*h AMֶFꔵ'K:`?ƹ/96`&X 췷~_Vſ'_^q;āuUpY/zv7w [g5k]3b'5/Hq|DDD=v_!/W$z V6X}w)?\ߵ•e9.k ?'J~6as/k4[k)tX"q,d@iƽs%Qn8a#}]vq\^2Wy7Ѹmwy2H2}<vYD4x#%pa; Ie^_o4JFMf=?@cjk!"[SW̩>*G:WܦiC@Ĉ x\݊dڲ'|nLPp'A) 1}h6;8J6uUsU\D[CV W*?\Է1jtCSb5-qdXurS. suaPaV-WY}8/Ga z}U‚ @5+ʃ-k-X.d= g?&^d.~./XUZJ+>cƤǰdQFU@)xFz3N\Q'M!#DuKԌ+N! q'^ECDzg KySFMš#<&vXe LE-~We1IUPQɷMA!=fҰ rbPD!]z6r U^&A.|=` 7#"=I$3h$o{@we6`QK lpHB:"|^9ìnDYRn@<> QTwPXUNN͜ dC*yo+I'M9Hv8P$B<3xÀJD3 ص"al-s@r;S ԎaNQnmS;]y)bD{+Y]a*kr!|L8bf/2HRQF-B'M)3!7m_ѐ  + ث|lfk8f$"jBquw!~~ś;lu: A I5eȑ{ȖKCcY!ڀ獛j;Hdz⋿dࠂʍi=Y7 ͡-"}u ے.^MYnV+pfcmHFKwKȻN61w8ѬOk6ۼf}BtAzdZg:<2`࿌P%TX|ضQz#z oam8H@ΙL6&v6SFtPx܇Ubx8=ӨD\`ݵ/7w}? NqZ4p+XŢI㘷Y!Ožx'u!cz92.qWUݿcOǪI2G{U">md٧kZq*\ ri9p /52BD)aKlT@\')IVN9$ UC$f(PZ*Մ4\4'Sf!z-m7yUcPƨKuM!k%ԉh!7+assMl{P*E~=\ ]y 5wB:AA_ ^zaxLQ+6+r1LdUܷԓm+s-@Uwѝd#)]Gi"Fafyn b!aZ}<iOɌ5xH{ތ% l.:оJ8SǙa2phe/,-G6 Dҹ%B`zX;ioeAN p| Tg vnb:[(/v,kka?V NbQQE {T6+lӱ[cnЁ@L~a|Q$L•"aȄ .$Lϝ24/mNEōj $ &"qpBŠcG|9x-NJkNJ7(̙ XIcB^xt9-~XOɔq銔ĒJ'vL( _J?ݳu]fO0e-wQ{1RyJ լU'[*n$iE')CkrIYt\ه@OG y?;t~@ P?P:ya'@|X^G6+S$KdܠEpmcdIn#WOui`L\*r\:n}ňlJn%3jU~t(_OymԤQ}Dm$\O=Oޏ~8eb+3s+Vx])Dd.ͪds8TdWy)|8uq }1L?ĥ,oJaMTs ٲ~sEF|}\HX>H 9_G-\6Fv&|'S@䖺DݽXF 4gwDڰ5L~zD8.93P y'ńpΐU<}:!]7wUt, |~-t?ю{pN0z:}4wQݽX(7NW7]NPnLAzEL*GUS0މG6xYwPX-u^oV}/meE#$LBN*"];H uf:914 QL]NWPw/!"ő(?_NBj*΅endstream endobj 350 0 obj << /Filter /FlateDecode /Length 23043 >> stream x[&I|()KJH%r=D=uͪ ;f<,ʚ /.瘙>:o}u*+Ϸ^׫ܯcW_ݒ^iqJ{(=?ZXwom9?~z|xM)Ew ?_?f]n|߾[yR{V_]_W|Pҫ6GU>UʽWǫqLm|'%UϽg%u_zּobV򫯟{7=1cW=Ka@X/^Zq/k]1vLx.W?7k1Ӱ "Z^$4v/%H_FԴ.kDX{Q12 h*T)k,h\3'>#[jUG/#:UCe`b^1)"* lde%z?͓L-㥦 H%E/4}t.F޼,mkD#ik0@Va 6UIJ@)0"j4!W'(Q,3>Cq$?FcLtڈٰ!vnC! dԃs>]Fr錎QU1b1DRH눈^өCluk\Lz)Va TLfRi[Ļ9'~)ɗ+P+hUbq6N"Ymp)|)ެ`2ErEDij"_f]ZJX%3 .0\E Ŀ/UɂPFMQaHmr\catEa~=J40&O%O9dۅ|񝶀9"'jKh&10U[s|eJ'X')x6("v@% ˯d6! w #E ?ۏ%_wZl'KI6猯8ٌݳc^.3 P/-o 3%3W]tq]DG/{-*$S.ǀ %z>{Ԧ{2#E0{q2=|=aq>-Xc=ve|f7NcK'G4pn[KؚZ8 obi#meMijbBiܬ ܬO^~e} l֗Mv'8թ&}欶ӓU,W(hB='W E#GhW勛߱VuFXOÍf}fl.ХɌMCGk7mW*%r>_Nd7)Y_& 1GB2lh7mxUV$1mo㸷7ccYQbAa8ܩ8RaFuyC9#%EgS $6(Q̦1sn#hB lLbIcV-7țzƥ on9z4OFJ%P236N,`X`D| f`ݙm:}buՀo2ojp*]ଂ`Tp1^mECrKS"0S"vR#O<@8B]8!P9Nbn7StG#:0#~)y !Mb:DJB8U#.w|SȤYP܍Fˉ0ME:1C1-!&M]Godd1`F}#/Z.⛺4 5܀߈47f8oEfߴqM; id.߈ HM; F MC*SaGFTWVA&n8 ː,%|5 pf'p( Ȉϩ&Rq822ɴ .?Ng$4i #T?|pKs NCɤGp."d"T - pL4% % I6#@4D3|## 4doZoB%i9"i033eģ7Ml޴=Fx##Cx##KGxӐcMCQ72R;otaiv722=>n鴋FGL(odMChHCp'|z  Z5jr6ĻVZqJ{^Q&eO )Zq솎eL4%F(:PpDFġǸ_)oƣheOse<]a,J sk`.FڼɧhcѻE[c\!Bb\[Fn @Gq`Tč-gwdc=M◇d_Nq2R}?=`R@E*#;6a>WD/˥|nD!RNӮ:5Ɯ'f l,Gf`NhGDP3;s5e@l" k#kO0<7g`>^2gՃyĎ,/Lh:18^x1SxAt%עhYu eM3p2 eZH`h$TXYWf*4:òT 1 Y Pd8&Tqdq͞1,Ɉj"[`lEl@kj&e[Q<=^fLfclFrB"YmX7̰̒}@6#QdYE, ]fnڱ> ɈZ~- T Z_,{XfbɣtR_ZzCd5YQ"~ψI6+CbEBe -ZՐ=THnZҟ55 SX b 2+tY ngK.[/J`gl!ڥL0rZcA0zӝ C&K(tK\$yiCPb3X,Շ\fddrŋ>~,\_QC' diK؃+PP*xHE$tei>sOCE22THaR5 чf䟚MË62AtjH`x`@ $(G}0mk9HU5(6!#z  OHbJd3B]z2F>e (G^ǑQ)uPZ> 8<1bJe`RPLmudՕv=4 R ,:f52?}9Xm!#S05 *z5 ekPyGFgIktqJ!&4?atTqžbtasOv$M2p񆸞\EڭYQ]]EGaJc(- @?VN OB}ͨd =&#qV֑Xzl2]p0L綣#}o3u$bL(F oD;&즮 !(vL)§nm)ZdP8| lA7u?Kuߢo#saCI&m߈C6FnZFڌzoF#fOuLd6,4$ JG@0v9Bx:RRS51 9 5nK7+T%FDl2_pd*CĚr[39?t0|/86m.'K,""mf_qixh?'@{2ArSq-Q&1e ƝƦcupSԉqǤ8x&e` EJ /87` `czk[2Ǔl?^mB_rq/ۓ%&-&Гe`>w7]SF`Lx)ͅf\[ f ӦptnhF Tw= ؈tfMIEsH%G@*) ڂ";HT`eh=!h֕+}#$cd)IπA-?'0LRp}PRg>.%J*b=9 :༁-2HP"fs J*(Kl71AJe% OAI;Jr8aDk-~oDgH7eH]AI;Ƽ-n1"μ AIKh"݄vPReAəvP"Z]J4}DPF%s&0PrhC%V=*iyg JڄD˪ 4T@>R@% 8CE+핉Jʍw,uT5˭T=ƤnȈ؈JE38"QIòJVՄ@IêJdx@H6;4,;CHd&4J 5cHȉ"1I"ݮ>H.l'(i <IC\H(!fXG$ ݙf ©ӆ\FH xӈˈ⑆ CƆGl iG2j;j rd4x.ם q>3{=ٻ/0KSZ1ⴋ >epqAoc$E@V U(Ô%6\C2ꛋr@'Gޖڣ3n=!vEM}EeFf) i{VsRiI0:+2쇛aghlP#mohO EA IrhQDΩ&$ˈN%mH4ʖ9zOLiu9cC%z AƝ(=g|ٟc[0SW\b\}dʒ-|$P1!x+]:Q2gdHk9m9l-fm¯X1P5{i.Np6x\# ŎVLk?s 4En_tJBKu# kcp9b&nrP969s2/\b#XA82K,H 8I!a"B{0+F+߈ܱ?9(-?)*-_cZ.PKGvxls49sBQsyY oS#$)`>\F2rfJ\lvDq6)K!f/ч]Ptu\ў-@'-G3bPlcҺ+ėX-ֈ=Z\d=[ѠCTP1j72sFUgG#0r0 eun?Ĺ m4iMmU';XT2?{tL:cmwd6@ Y&.~( Px=/ܔh1YH8P/_n'1邪g G+ gۉ߆.abXHPw "6QubpF&^u0ߞ!6a2݂.ox2@mptz=9֑4lݩQd h7zHl"2݁o$4+9Yq҃$6/H LB7vX$"Nx8 fG7Ő4=`+_N"v$Hq`$e(tiw.门/h'_i ڄMk+Ne-Ew$-]o%ˢZg܆A<"wmtj}8S{LFwH%ϥ<\g*^ݿubN뾡WFuZ. Fֽ_X'dZȵg;괚]ċyپrmmnouX4dtmaѰsvӘ/gQR}<uX^J|-R{Z  h-RֺrXkءY_}R LnlX笝UbE7k6 ,8}Ya?dgrXIXS+6 % dE+Mk^ [\(֮, 6c|,*/*^:t.} `V%V].\wA7o|=lkK:G`%l ,dqhuAB~V"hJX{l5'r, ͛eRchXp>)Uq jGsf::6MejmtS/g.hVn}%mGP3Ŗj)hzj75, Jmyׇ#bR(~Vr𭔪qpQ쮦Zrǹi#0&_-BM7v4Y9f7M}tćݵ +̳ dDkeT+%-5iN# DG`okH vk^]?6+L7 D:>s0R0>s/(x*ɪЛ=dو}(64֥Mԙ:EٟEN[X1 uYN <@HĖف9QwчIMT1*4|Va㢫wo{B_ mQDBV)`p ] ԁ 14&%>"^w⮃66F^|YpX"Nd%FüB,JBdb[D, -ZJԜy`cCmd6 D@=K۷)9-@L ߘCJa>'T&cq"s6't3ȡ1;9F[ȼf˨YϜ,bg6{W@P+۞:d oW>o'+r\!h#Kp' >m>h5cgQ|h(dTʜjO,+- 0mA-MЃ6g6$@X >45$7C+;J`A˚ >hղE> >4T,=%|h9|hX>hqY@MP+>q#*,@uЀ[ o=P‡ǘ/ZG qޣڡC):4݉g >wvh8`'C;:OаO6a;4j H_7P Cg۠C`ΠCCC':thJ 5aslqN 1ڴR AF<@Ck;;@# (ʌ䖭-c #XҰŞ1i}bŏR?. =a%Nj>xT0~9. QV#rY5ʸa8rv]BvE~#ZC 46Ji),6ع^_~vL;g&$3X-m*ymhDn7@HۙtC2cDKLgpKNVe^w2?rҖx;ґ=_Jv,r7<};KęPQ\j+Nw!FAd4g&֘_ֺrzOZW>ZZWnlD8h$q!4Oodʪa鶾KG@ *k)+zL3cm2Q6nD:48COD0,emvLVHwTߑh,wĬ qCD}7tLu #CPMnty$J#8h޹y$,z,vݱ%ƑDx3xJ.d.ɵ1U?}`5BhgtE'G+S ͔[9<;yXT,fӶᎃ˰:AFJCqczvD5V^L8ubH(,|A ETWN7Z c1K=Z72)qfKwsVO/΢}#ìYe5dTijpL`6,jE^5#xqCÒ,;9[@Ԯ~X:;> 7Z;ʑ ee $ BS;K^Em+M Zx~SAEm pXv7Z3-5^.l&HVcCisai4;nwQAPkC rvق5Z d+ToTQQ3$ҶV$Go8Jx(w1cghEB5+ՖMjᨫ;a=C2ҵIfwӑQlEib{*"H,T]2 UK٥j֞t-@Cߴ=X7#z+TęwV$wڻT5 ͂I5lM+wڷaZnUZؚYk{b#ͮٚ&4rZp?o"k=oEK -Cݡ='j֍ }K7yeͺEJNd= OY,Zvd U 55?P@K/x v.*,2961*?5[5N=Eig^CױG .mH zo:?boCH-qS|!0pic'{y|F dwG܂ZQFcb $^l.\fq}[?g!^gz*vEh0ɻӰEEm4hh>m ʷ15s;5<mUCq 78ACzgۭi^0֭6UѰáZ~=1EM{m 9:Y/ mPN/8n:Dˌmrͷ=o_A_NB? 9 *S"~ÓGoS]7Aվɑ$62Վ@S X8!w`M#vh8%k;$!>bR1;nɌfoշ/F#㠤rњ3\OkXU;G+CxђiN_&)R#ddD@y,lF,HtY7Z+8-8Nc]58wb:u&mՀyH ~eUڟ<.?ss&pތ>_oZąis\; CUWnwF-~)ӷmцQ5N#ZsɮjeA[g;vmw n-^J_eI1 N3'/qK5pk+~%v?fvDdB{3ӈw$pQi7KAs]cFS;xhWG.}Ip`rD:~̒Mp&̯e3“`NW52@BPHR qyA.y*t7؎ĀQ',αtbĤт.ڟ(|ڹ_FҒD$e/7'"N2&+_!*m6nc@Ɗf6͜`ʮjzf`8Iڬ"Öd$=yc_CG>͗oYU^ 5G_P ^-:S{V_{t8[*U+Dqފ蝑 zڽ×{ i~vJ垾,KEOݷA)z kKzz-.WX~WW(W_^Y^+>Fd^EO_7hzzmiYpzz-Og%鄹z-=4.}0_p̊Zm_oQ3zMՆ9yv۽g33 Lvkif{ ^iK7$ٟgFo G{ekYS猃?[rÚ.c oܽVC2~`Fae~*LGpD؅v^1ېݽbܽ&ʼnEk{܃o%1^F}qVd{ٌ͊~[@eg+twߐ.g}+{Rp^%3 ^iwU5>?%x{y{j7xDoU팦y{M\EwRSq ^!sba9V<, Zr1Qo{ bXD@3KQJD!]'RF,bBbcz23(~R hjbDGjsn[k_[Vzԭ ގ|hژr:fg!zA*-"8n%J0ERGFo͂7}%cHͨv,0fɻo޲!zr/ȕqz(t!nFL\_{ŭhpqsw/pjtCݶ %9@%c&K$n;WLW7fl^V| OTЯ}kzqn<ح Tf94TY@@cg+]advCkzCFz`EΊc=_>?H Cg„=,=*c\@bWF!@4*vG Q<8!F]"XJ4ayϴ<[˅q\@*`Ի&beT3RLS1T_j~q3V4Tuf#^}؟O8*֖XI;*{oQ"/Q]uPHMW/qr%vHV~' C Lx0uǸ ع!Ҧ=޺6 =uBd@R۟u&#= ?0?[hZ@nVu֮QШ$""}}UE\$ ʰG=[Vwq vT~֓ݣhZφ*~)'h,.3g] \G X~uZiY[k1yk~*6?<\`oDc=ZKzg>Ӻ@n~HG9m7!7#-9[3lgcRn[&,|'꯯R,yU~x+>j3@i<[^eg(ɰ743#8Uy{4iʭzȎv?%klmY)&~v^`v?Cw $e+Bh3P{ۅS37RquqMٶKE{#3viDό7/乃G|(+>3k oE hGR=MEaƫYbH:#,_TQXFjū;6+}#>m"_mǑ#HlG(u&d&~rFkH6{(| X n$G g9BGpA O*G ̩h d* )bkvߤ!#-0$2My8צ?("1 1F(-9'hW$kaHV9F[&#<fO" NN"{ϯlL lMSC'Ç =7ƽ^{J ;&"H m} &v:`&8` Ԗƽ {S7֟f'`KփqwY& Yɧ{RO:l7;Fd{A94`MwBfߊ40[Xh 7 ,vF:ؘl`ܵ o&Axw#[fOx N8{vt^YO6F ᚧno WVۚcә[N\#gŷp)Wr4)k39e)WlBDVDO %Y.Y+KjS5, =6hZdY,p`Y+VDb7KY醺菵7r&lstȚjTsȚ-èY9d$=CN=8BC7r5jG<#s5e5n)`^^)kJoDr擵ev-FfV>YOe4|h's5kot at [&d-5e2lݭKfV1pu)!ݥ!Z:d-]#7`%(d#eC֒ %C-)=  P1=*!k_;7T0c4c_Nzdώ0hX/3\U|qC{Cz [22=lY#8d&t2@YLrCU؊ϳϴ;d!cH!ϓ?n=YbDV/i3Gk] q`BqKxA67M3|5LE|͖vD8rIב'+ ~0>|VDA$/Z/;U?n+T10ZVi"i,v!CgbUQϺ³tŐ.=O^?sIVvS>;VYoހd Q5_VE#,0*&a[BPQ!jҊ #]kf&Dž!GoBZ8VTiD2Ð^JsĀnE7 8oH1wYBy#_!Cy3uhP#)ӊٌ*+zhwr1oFbhmfq,8<`?WH?`.1rcs&7',lHX@jgfGIߜHKan!"ƅhQ P@ sΞܛ Ls@d| `#D s4L0.0FtӖ| 2$.Ԉ#8U @dI4\H`C6 QH`"1#Gh9\QL$O-Lg p) X!KY {9 ($TFU{ŘV`'!4R֦Ic!3XzRq!$+㯲*V ! oZj|'}Saf ~W<Sq}q}t8KWWح"т__a]^aE^pWY?.8pV%q$N8^^ǛW(ϊ_f\o^>+WpqyvyEpSلfL W3+LFX\=4\1$JD.dwAs+5k#*\/#U'!z'ء w]9^IXnkszN&5$$n?vks+ =B;9<#Umr#bILN)hZnN~U`hZSyWI WpE?Znm =G"fEoe=?eFe ּsϝ=<ϝۜ?li6>?eL9ysI0Ҵl\mz>v`?*xK2 xn $h[H6*,qg^f4?G*.+G.W=\őU{# Z2V&<̄W.S|$Nv+-^őxU=*\NJ2yQgG54 `^CN 0=zjتwSop vY5d|˻+BK%59F_E-g{Y}M=?|x/.o4?Zm+k)Vяe?Y݇ݧ#0S2т㧊"?*mxծw^Ac:$~g8~燧O~}Ү=|*Ruyg'-7<^_. uɧG_zWzzS33�>=xw{xOv!^w(C"Z>0o=:vrk5 ?%n_,H]/?/7xr~wg}D&@B?\oo z[z|5!m>~x>jN=T~^V%oT_}?ݿ76,(:/ 2>v$z+|ڮp-E^˝%_vG vŒDxE~}|#_h6(r#OKhU`}݇׾"an)jJ}eI|zC$x,/o?=g2=~pqs:_WZ lcsQv)v"~1`ʡ"uLSY'Q+U*ކ^.x(4?6~ϯ%PেO|%sTEL^qs|aqn4./|.cmo+ENϫ֬K{?>9~~rN|84*Mͨ]4F>܂ &Lm]7\ίzUOQgO7=V~:n<z&pXOFٯ^pї!ȓI`׻0_*DwqqVǓ.w ]1ʃ2KN7",k꟡eL*<FD>[9W!:* ,osVZgj2%(2/R*M)]J?~0cчz6jQl&q[r"no>D@g~aCﴡ7#CW4;u+pezsWh|7<O_?hp,}o*,WAf8ܯR+48T狜 \eA8 u_^+g}yLJw{L%Wi?: 5\dQe.e}x;e Hw?=#7Hym8F G&Ù|/j?/a\`7W/Mj-[!7W0o 6}8s:#%N*R{7mNϴ..~=<kygv@vvA @׷l!dNgx# n4k⎏|VM* [}|f}(TL o7c)[OZw?ܘs@}| iްA}8ۋv><6/Q|n768e^/kxjJ(Z9{[I <|Kh=WOPߚ|E ԃB#m+vw ` $!Jw ցgHPnE_6"i/o +EO"{>nQ^(Zlky(A0Ï?9e_O~#z򢔸h9kȌ*8> ~ψ!h?4P(N u&UZOqmK·'Qlfh,wmiy؜cuSԭNƗ@`ޟ\pBmIf/ؙ/ؗ/v˘wndu&/T;9onlߵӰ]~cdlñj Y/Q x_94co#p=^ -` Qy`'""n/o?=^}(gnI?1xcRpB̉]$r?vvy{Y[~'-b{\s3uX$G{qvކk>YͳѢm?bʧW<76=?X7+- %U .w,OPz}+ ףO[M>1nXۇ} G-,P7_/J~DT14,G^Hߵj/!˼j xbKOWb?), =o>-8,\>G2#*7|܊5b4ץ1Y& 0\<5%DX˚iIOHJdo#_2d~oŽ4)"ahz..mC)ͅbWOL%#|B3b\-[JqkDH.pRhz}`lBk^_)OONtRW'xn?j᷏[{Sz>!z,- ]I>>~kU:jo 2c3(sq YŹPmy7M2l:!Ç٠%:8~؁v^L14H7MvVOߨ4.~|ZG 5RNER|/ gi5oGEcYFɺt@,!~gA#1F"*2T7Zendstream endobj 351 0 obj << /Filter /FlateDecode /Length 6268 >> stream x\Isȕ/ۀ&=&Zᰛ>&& E-^UT r}-^T_Uuq꟯"awWn lF]\|]E. ecU_qՔ:pys6*+g4(>]: h?‡Zվ.n+ctXW!-\j_6M1ZP_SPthC5 ootpyk'پ6.rӪ)}φwK"TN_\W0YDZLPbk/ Hۘ ^' آ;qnm۞ p +[ .){:9)>u06 L$vYj+N& 8½#T#|;+*ovkDTS߂d̘i7ߝ-8AU/p[u忒F'YCmR igi avsӼfN =5pA>ek!uCjPeվT42p5aVhn+YJXf]ov)6ea9uKKgF4# '>&!kz Kq4$>NVL;!⭕|LH(?\I!NĘ-â;,^5s`OHe`67a̡9.fhd$(fN0Ѧ][/FIaUg LT * jPUh2X9/1*"k =})\(ufHSNKnj4Vp;%hE8 'VT1 x )MFDfA7"PY䝖pۂQ:Pas]PFݨ `^:_X!?6?OFѕqD'b3Ϭjlj5,ؗ(kx_1OEr*t^2))4 aAvX,^@c^u X:&}LE58N- Jjfk8i|+QjxSRy .MC48ydt #*X;qj)\Cvy9t=1#5+"(-.~^आxL3hظcntqv6gbn{xOe|f#p [Ddow,+4kbLfwICD:;4Rjk e/#W+_y]5 4W_b^i?c# ƒI6m'L T>~CUW12 :S_jʅk62q=^L$J0E䟩) <,?a3o'i j&! KAAoBT9saLb*G NR4mN$MgU٥T]=nx4'BB,>%Xn1+=[v)pjn h8gbB6y@`io\T='죡`uF;mypZXlr >;9 ER]s:P.D;{o(\*QP&-?k%'/XzK&œ5Ebd/CЂTY_wWe[xo\;|3^j&{֋pq U딶X|aw^i:|h2J;="٧d&X/*6t熮Q$V*`'}2ZU (Dd@!y/['۵ =ۛهȘ-O4>bwa$VMot~Ka=}+ןf~Z Y2_Ӄm ㍊ ǘBvӯBTxr"!U,L2|QrڿYWN5m2 9֑JR˙)um(e, ˞}dЀ3^rWjܣuz)j|#^/qيq35kȇc+!SSZ I^5G#"n᚜qy+ ĕ_Q7|߅;$-i_1:ˆ#YUzQ[p/~KX>-PD>6E4Y|k :6g|jny;C :~SWU`ADBkCq8fZm:6 }o=̍nA#_MHendstream endobj 352 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 379 >> stream xcd`ab`dd N+64uIf!CO/VY~'Y3yyX}-=\{@fFʢ#c]] iTध_^竧_TSHJHISOSIP v Vp Cq`R} }'}?}{CgU|./.SqoNz߹qr[؄7yŞ>~nSg>;3wߛgO^-ymԆ-w| 2wn9.p{n&Nnt FVendstream endobj 353 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 713 >> stream x]mHSqm隙(J~B/ٗh/N73L֮˜m6/(]226lf%dkT DlJ:$(+7뇾t8x89? q}aN+H]U4&XAv8zFH{7վ(@v,9~"Jȡͨ UA +է+ Ҳj"A*MadV_["{:8eM edE /!\EZHʕ+cFBY(P"JB< qQԀX< D>'~ ;%79tzLQ|~V|>Y7_wjqf/4axg!X@prYKK$`Y&N0AIL/?˚68 r lrL\q35%~5Rt\3ڙ JW?L/gR~SuAS/B ΫF]p kl %gc<$`~i x g\ԓrg"<8;q:sNJqヂ;q}ֆ^?wkxrH#Č$):Jm%GHɨj޴R{VN ia|a1r=%е_#mll<*(Hw[&3]B[lѬF/Ŧ9Qendstream endobj 354 0 obj << /Filter /FlateDecode /Length 2130 >> stream xXmܶb$(7UWIIqυ p5hVwuCgHI$u:AAGry8/ 22ƿvaAwׯ(3v ]2!#KLxG^XFZ{c kk5/k/tt.44K8`/8:,}w`ɏ쓮䈧N1B])tW8Lt4A L~>}"\(!5E7+8w}MeW! po!,CXӈS`8ƱQXE1l7$ vCdUU}6üqA*tOFz4_ ^H(Q# g/hX{T΀IN 8Z#@2qHQũ0`8FL#AR%09!NiOP{t:1dҫi6 l7"/e\J7T>T y:q2T*!1ۓvyF9X[AcIEaD{oq B&)fʹEa5zivB0vfFK=SP΃;\x&r@ګeWX\B!'ʻJ}qE-*ܵts\m_!ҝع9چ@m{)v)̕λq%-splw[VmQ !8f,Jt ڮ8V _a8x%ba؞>i{w%ZCoo-ٗ So_%D(BYy{1KQ^o/D.!A9<1ؒ*|wD"&`U0|+&$=b^2 (Gk" &RR4pW"},3 8D=Q ]' /CHD,+݊)HQ!Fk$a߫2""J,?zHT?J{ &[`K #ھ׊؄ ?>un NɡkBJd0L)3i,;kjEx- ʝ_=[U&y)FEǑOѫIXdȁi@b@)̬@V.gdW5ggdQ 63rP.@P&I2S"9i}_@41- Ů;USM_&7BPW W'E$$xAsA39[W^qi'f&c ՃA.K*E'\1!R**Ş^l)0zQaOPb+;sA`a 81p'c e0A 1fKGtn<ѱO3@4ƀk9t6&'ũ`> :4IQ]*uZ$9Q2v\6' H{%I{?Oނvy> stream xZ[o[~_hx؆'{Cn \$ KP"Qq ffwϮLHa"{7ߜ=?ԬW/g󳏾9W/d^%>٤' nDŽ˳紐3Zۈ/k ~t }I:4]_.k͜>ʒMo&9#Li^N.N.o1?N_=t;}g4ų {:7&cÙ\zv?~y*V-sw})}y:ի{ais+sĢY͙`b3i ܩ5Q9U*[Dd &YcF(zHH 4e g{&93{.GJ䋹Rʳ:^+"= D{C' "Q<5%[b ٖ%=ȵ$DcX9 CzrՆS#D$ ԉ؀auHdk <#ԉc4,_m-t!f!!4+ $x'z` $QDtBV >UC+BHrd"K5u2pD)c&dO#mdMĄ!ވ,HR7JCK pi\ >L,g@C7Dٍ@!@ E`GmY@rC%"PTG;" A2@:dm؞v tqIiD$B#, @Lu(^r" \SHiR r=ҖE( X?IL,\%H1pA*7 B$!B1A!<@kjSH^q $SpH]A&˲% т%֐WH.!0/Kd jH,!eLIH\SI\)I4 ("yw II( L:VhCcߡ$6* P 0QN^P'8\ԩpIe;RPMENK% C !Jsժ VW-Rja U>9#>uk$V n%9ʰØR2j|IV|hc l@0]X$'kc VMR,z"CtT !*d"@LT&##V ]x v")x # W ae+[$LA!`@lSWPVbJ"J^ɲyJYy>$n, S4bi UYV2v]- '1 a,4\ڇrZ %  Gi 1Iwf ]u@kW.sB,ɲtWլ`Fbң9y!sȼ4_Oh :~oWs0~Uփryh|OJ~&cQOĵҷZ}~'λTreYbZzK" XGC6q;?U K秨  @-C FY]`mSukrlxAP[`Ʌ7T8RODac9j SƒR+.Y_PCՀޞc<(Z<T+ZM<1HWKhU\$ľV.XU8z-n8:Q+h 5}D+1!Z#y_ѣǮE?ERxAYC=I[=pT'25IخdfםMTYUˠE * j6 =E1_yC&CVOA>UZnwP,@2ltyu~i䆦@U@!JxnQ`xrwd^=ŊCHz*w)!N88dO72q7mw9qm:ޗ9>N|EeŅ_A.woɉ۝p"m4 `_4{ku*^ i?S6 Æ*h~z7_>zK^?ʕe:Z曰6EU lڷ-D ]i(3őִ~ޅ :6p}j{ؖiގtre'|y?> 9(ӏţ1/ c=m#Tdj;}чaQK4ݜɃ5wÄvzzo[LY>]oy`44`F:eon !2)r?*ۿ=*><-g&C5OL;Bzc6ut}CY)NY~txw|/!?ՑKo~p<`yUB;YR&Zow7f]#8`݉pWtŚR x@;:H$*68D- :Wt_Cf9G^g -u:0M^_LYW^L ԃ_yhḽX b!OOQ7%ƝBzAߞ}ߴd9)}}O.Sӓ vdyI޳XSfoZH5E].Er%Sg̋Ru;enh 9?9}?^LEaI.(ۮV6r$0L; emYiyTqN!b7ۭ=zStRe/q}M{S"d16E`PΟ7kG*()o!7ՒP !.bla\*.`]%Lo: D$VE*; G!ńh;H#6- -᜴_AKUgzDSҢJqb@ODL331\oV;{:f|p$NT:gDIA ?ҹ_^E0e4}یu,/e.F(l흶e6wC}迏z/qBMXR>5jNϥx,m.DMwDnX>`dHWtjRZd`DV^M?k/=UzֿkjǨߜ9AJendstream endobj 356 0 obj << /Filter /FlateDecode /Length 3359 >> stream xZ[o~_h (=AES>AErco̐4'W9 EqyBfwŗk o6"L,m^^.VWΰXca񧕂b*6% W[gEY񡒕aZk6XuHj#VmJ U)EZ*0_R|wMTpĽq|6/VkUOٞf4[<6jXǫT~J+$2 S~Zrr4s{ تd#|` 2k؍jmD9+69W(u8}r>xQhX~MK;KQa?Ya-@h|}>ld| $DKlؤMMnTD DwTdp"bSsls7oWoe8'_kf[8 6bh0a&X*=Az&lHbߕU1*lrD3&rF}fjQ p(kd|xAu ,E*PmЧX)qc2Wpn K71fwv\o>w[bJ~cr )9F&D/J9>xJ@8mrM(a|{ ?z)j Z'a+m­w~Al%@R]ߠ[h9cK%2Ep#qC#NqVH"$Pr~WӂL;MqPUPhYhe#J0\Me))dS,iʿ10/w#RV>Oͮn)+YOK ,X$mz$+v$%uDk~,&5o~c Vd690%z1S^Hm#*}OӸ6!3&=Dp|;LjxFnMUh{.u4,FIsمB˶5^6*n,kmOvR~tpUhq,ޥhC"7 ƑRyZZE<6|X+/ְKy}(p-zG9 YFbÜJTO8Uw l(Z譩^butM)uUGSg!^B(M迂f)䩫"ܲݫZ_~grE| dYJbA %,V,!#Fhs_ '|;e{My$uH%%wi'b8HPbhSۃHj^Xȋ>xhw9/(dP~&ze]N{H1(X%LjQ8k\SfD'VבCexۓWƇ)юJIwPTuކ6Hh_uiClWg5ǻ ?<#̮)):%5 *Pt&SCdҘe{eH1X!Ӯ!Mxϙ!{'xpO y>VR o])Rې}UTse)(m٭76x|c޵6O3uQmS-l_dmz^id}JuԶ!dZ_%A+YĀ H!̘\El,!ĢPc(TFfA,xSʢgcTeGʓ*ު-HC@r 팙:܄{;AҀw\8=&aw %t~'!L|\A +遼ysrCP46/WbC7 |aX* m4bц.5?Z(qȅdXiHf tP}~Darݾ (^CH%O %!a{jK%S|؊w>)5v,_R;lh 7#f ିEvˑfZ\]ayendstream endobj 357 0 obj << /Filter /FlateDecode /Length 2221 >> stream xXKϯJF ݽf06^>+qFEQKR#O琪~\aD_U}Uo˂ޖoڛ uooÿM{*o [Zz[-IK]X.oW{vLQrIuZE)%˝{VCfXra"_;[p֋%>jl\| XynӪP\U vsB%QʭNm[OkrߌoB3e풉ˆu6d()'iZ#Ț406B#( Q]ߞՋ)eO?=n_=Oz^ lP]PÂ6xj6HrnM9>VJC?Vc Ze<(wU{CHovUsJ+ɹf/&|`\DŽs[㮉QB?wH {ȱG r{f0COo^|Ӎ3C6]{3lW.@}mn(&X^Oa1b,#͐Ȣ?mkoW[W_jWR`Ѭ5[awaI}bHAwuZvFl@aS m\o}>Q2c27tM{>l stuфFȿsݑ!I%ad1߆oB KAphp4|ޚ=-?)L 7 Vj}ugoC2)nphK(ƁΧCr7+̱,ƅC'炷 o~p)Dlm ˸tQ> d`LSklV959JAa;0ҀL'ghȆYXpLOP*=-|snK |Xm~r}tpйB\U̩ PbeOsrS!3A;!LEC{02 ؕTLBX?Cku3rF&hvŔbjhGO 3eJ>K;8@߻jyac%Nܓ<2`nJ M8i)PD3hv.N\g l1MF(rEh 4;Z~4} <ę,pc #c#}C0Q`??ұ2a ¼ Sv>CD0D޻hM}U+To> stream x[KsF/$q !{&{Vd'vSeJ=3 A"%o[:ُ~/Nҿk)IK''b>q|b-ԓbz+464.}'o7S=bZߦZSYu=>8vΤW=RSa ưh!,~))/K@ -;fyH{ ;^NgGɞh:Uۛ^u7'/Li[e4i"|e/wԅvg:âR>Mތrdf1]Tz:mKɱ';>:>/hߝ|vȞEbb,t)DLo "/MK5hoV$Y..Q];-y be+ĞۂkXɏ'y~b?uUnI4[59e^\y&&k-UQ'/F#,KpD/^ϿP'kExP 0 X3p;)L^)x%``Cipɡj On$K *U7U=اҭތj4X5om؜廬G_sYN`n[F0BOCwp1NP3e!Y]˷gZj2[9~MK6v-oFM2>Um7}C)@=S?þDFIϖjEZ69yrCZ>c 5bEǻ`scv\.UC&KDH:UK]:B"T v?Vzo)!`9Aq Ypc\Tjj%/$D Q}8?@ H$n(l":̀w`T/,f3R  @ [.3l< j% %$Z_nQ\ޮȻQt[dԡ 6]IOw^n4]ոŕƇ!HࣲҰp91ǘ9c{>eGI6d8^+jXk)r$06K`bQ2cBxyfRQ@v^r3>)fF 1bS=)6<'4'#w"2N'ϊ/)=)viԥlB$D:͢nHL޳`0(JC\hв''mCz< EȢj}/5w`{8XiڬW-.?.d.;<^]ZU) ׀],JLG2 B};b7Tym(+mE”ԎoU u嘓i7 jlٴtcJ[}weeP VZ%X[7Tjnb}55cqJLᐖDn"PGlsN;- -BOϻ>#bQtDR|l.AD՟mo)EBHIR4BqD-w܃lmKܢ7VWyu5+6Ͳ" C_B꫘j[XvffqsUm sb]ign!}k0G=d#k^Ro+Tyʧy.64pEyax[birQgԏ>q *<*yrs\4U-p",1aKՋ[[r8JgR)= PcVzj<$)gqB{?T">b%J!ѻsYG T4|̺SI ,dfX-I~6]{'{k.J|ƾ GuTڡEflG7Ubu۝WƢ{#ٷfKmq0Ե'-cXܾǤu)x~QT<$ b^s  Z"ĵk뇴2U⇖~dYvhY 0ni]FFk$k+!^2=px v ۙ{!ԑ4J% B!OyHo .3n<93JKVח͐zuzQr!%g/jij?yL ;1]8>~U?!u*`~v?l%3Fc($55IMhj5_j 懸]䵆vkGCї_v%A 볊/z(ٲG>(Roho܌t)r>jcƓG32e(Q"K`?$~wi "xJzx*7^u۝-Uy t][}х ^ρzts ,Í݌]t| Վ=Ҹ+FmF|?-(?h ܷ#Bx%@ga)3/]Ҍ/rx&jx%SƱ:ՙS<[r>3eaL*3LGZj^b*^,pz^Q ya]GmHzA@ZD?e"K> $ZpvF+UIPA mCLJ<1a{f (6ꈠ׹4V9ȹ?J4Ol :=Hy52Q[@yj'46ez |XELY}3w`.|`r ii'(R'(IIPi7W Zi h.:pս_̲F$Eb{D}?uᦧ^^:> Xn˧a8+la?a 5E|1 hOAx8E}PYprN~*5Q]`,aY@bSKfڇz:$x |MC@Yq\bl!B<Ļ`6%8q{JD@%@!呜NQJHL:~ck.13WwZs9%EoNVё.>S'=ӡx6T)|\HV۞17@24j?ڏ C:'a P=xe @|dE(|:e.p$΃> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 360 /ID [<7949009edefb8b7bc37c5f8211fb3121>] >> stream xcb&F~0 $8Ja?@6{(N_ӡOgPwv'3ni94 6ہlQAP /B D* R @Q}"D@x;0kA$%)"ރH`\""&Ea/Ȯ 6?z) R D^3H `׺H`lAg/~`7pl2؜9`wN!,6gT33 endstream endobj startxref 290452 %%EOF brms/inst/doc/brms_missings.Rmd0000644000176200001440000002441014224753343016272 0ustar liggesusers--- title: "Handle Missing Values with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Handle Missing Values with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction Many real world data sets contain missing values for various reasons. Generally, we have quite a few options to handle those missing values. The easiest solution is to remove all rows from the data set, where one or more variables are missing. However, if values are not missing completely at random, this will likely lead to bias in our analysis. Accordingly, we usually want to impute missing values in one way or the other. Here, we will consider two very general approaches using **brms**: (1) Impute missing values *before* the model fitting with multiple imputation, and (2) impute missing values on the fly *during* model fitting[^1]. As a simple example, we will use the `nhanes` data set, which contains information on participants' `age`, `bmi` (body mass index), `hyp` (hypertensive), and `chl` (total serum cholesterol). For the purpose of the present vignette, we are primarily interested in predicting `bmi` by `age` and `chl`. ```{r} data("nhanes", package = "mice") head(nhanes) ``` ## Imputation before model fitting There are many approaches allowing us to impute missing data before the actual model fitting takes place. From a statistical perspective, multiple imputation is one of the best solutions. Each missing value is not imputed once but `m` times leading to a total of `m` fully imputed data sets. The model can then be fitted to each of those data sets separately and results are pooled across models, afterwards. One widely applied package for multiple imputation is **mice** (Buuren & Groothuis-Oudshoorn, 2010) and we will use it in the following in combination with **brms**. Here, we apply the default settings of **mice**, which means that all variables will be used to impute missing values in all other variables and imputation functions automatically chosen based on the variables' characteristics. ```{r} library(mice) imp <- mice(nhanes, m = 5, print = FALSE) ``` Now, we have `m = 5` imputed data sets stored within the `imp` object. In practice, we will likely need more than `5` of those to accurately account for the uncertainty induced by the missingness, perhaps even in the area of `100` imputed data sets (Zhou & Reiter, 2010). Of course, this increases the computational burden by a lot and so we stick to `m = 5` for the purpose of this vignette. Regardless of the value of `m`, we can either extract those data sets and then pass them to the actual model fitting function as a list of data frames, or pass `imp` directly. The latter works because **brms** offers special support for data imputed by **mice**. We will go with the latter approach, since it is less typing. Fitting our model of interest with **brms** to the multiple imputed data sets is straightforward. ```{r, results = 'hide', message = FALSE} fit_imp1 <- brm_multiple(bmi ~ age*chl, data = imp, chains = 2) ``` The returned fitted model is an ordinary `brmsfit` object containing the posterior draws of all `m` submodels. While pooling across models is not necessarily straightforward in classical statistics, it is trivial in a Bayesian framework. Here, pooling results of multiple imputed data sets is simply achieved by combining the posterior draws of the submodels. Accordingly, all post-processing methods can be used out of the box without having to worry about pooling at all. ```{r} summary(fit_imp1) ``` In the summary output, we notice that some `Rhat` values are higher than $1.1$ indicating possible convergence problems. For models based on multiple imputed data sets, this is often a **false positive**: Chains of different submodels may not overlay each other exactly, since there were fitted to different data. We can see the chains on the right-hand side of ```{r} plot(fit_imp1, variable = "^b", regex = TRUE) ``` Such non-overlaying chains imply high `Rhat` values without there actually being any convergence issue. Accordingly, we have to investigate the convergence of the submodels separately, which we can do by looking at ```{r} round(fit_imp1$rhats, 2) ``` The convergence of each of the submodels looks good. Accordingly, we can proceed with further post-processing and interpretation of the results. For instance, we could investigate the combined effect of `age` and `chl`. ```{r} conditional_effects(fit_imp1, "age:chl") ``` To summarize, the advantages of multiple imputation are obvious: One can apply it to all kinds of models, since model fitting functions do not need to know that the data sets were imputed, beforehand. Also, we do not need to worry about pooling across submodels when using fully Bayesian methods. The only drawback is the amount of time required for model fitting. Estimating Bayesian models is already quite slow with just a single data set and it only gets worse when working with multiple imputation. ### Compatibility with other multiple imputation packages **brms** offers built-in support for **mice** mainly because I use the latter in some of my own research projects. Nevertheless, `brm_multiple` supports all kinds of multiple imputation packages as it also accepts a *list* of data frames as input for its `data` argument. Thus, you just need to extract the imputed data frames in the form of a list, which can then be passed to `brm_multiple`. Most multiple imputation packages have some built-in functionality for this task. When using the **mi** package, for instance, you simply need to call the `mi::complete` function to get the desired output. ## Imputation during model fitting Imputation during model fitting is generally thought to be more complex than imputation before model fitting, because one has to take care of everything within one step. This remains true when imputing missing values with **brms**, but possibly to a somewhat smaller degree. Consider again the `nhanes` data with the goal to predict `bmi` by `age`, and `chl`. Since `age` contains no missing values, we only have to take special care of `bmi` and `chl`. We need to tell the model two things. (1) Which variables contain missing values and how they should be predicted, as well as (2) which of these imputed variables should be used as predictors. In **brms** we can do this as follows: ```{r, results = 'hide', message = FALSE} bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) fit_imp2 <- brm(bform, data = nhanes) ``` The model has become multivariate, as we no longer only predict `bmi` but also `chl` (see `vignette("brms_multivariate")` for details about the multivariate syntax of **brms**). We ensure that missings in both variables will be modeled rather than excluded by adding `| mi()` on the left-hand side of the formulas[^2]. We write `mi(chl)` on the right-hand side of the formula for `bmi` to ensure that the estimated missing values of `chl` will be used in the prediction of `bmi`. The summary is a bit more cluttered as we get coefficients for both response variables, but apart from that we can interpret coefficients in the usual way. ```{r} summary(fit_imp2) conditional_effects(fit_imp2, "age:chl", resp = "bmi") ``` The results look pretty similar to those obtained from multiple imputation, but be aware that this may not be generally the case. In multiple imputation, the default is to impute all variables based on all other variables, while in the 'one-step' approach, we have to explicitly specify the variables used in the imputation. Thus, arguably, multiple imputation is easier to apply. An obvious advantage of the 'one-step' approach is that the model needs to be fitted only once instead of `m` times. Also, within the **brms** framework, we can use multilevel structure and complex non-linear relationships for the imputation of missing values, which is not achieved as easily in standard multiple imputation software. On the downside, it is currently not possible to impute discrete variables, because **Stan** (the engine behind **brms**) does not allow estimating discrete parameters. ### Combining measurement error and missing values Missing value terms in **brms** cannot only handle missing values but also measurement error, or arbitrary combinations of the two. In fact, we can think of a missing value as a value with infinite measurement error. Thus, `mi` terms are a natural (and somewhat more verbose) generalization of the now soft deprecated `me` terms. Suppose we had measured the variable `chl` with some known error: ```{r} nhanes$se <- rexp(nrow(nhanes), 2) ``` Then we can go ahead an include this information into the model as follows: ```{r, results = 'hide', message = FALSE, eval = FALSE} bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi(se) ~ age) + set_rescor(FALSE) fit_imp3 <- brm(bform, data = nhanes) ``` Summarizing and post-processing the model continues to work as usual. [^1]: Actually, there is a third approach that only applies to missings in response variables. If we want to impute missing responses, we just fit the model using the observed responses and than impute the missings *after* fitting the model by means of posterior prediction. That is, we supply the predictor values corresponding to missing responses to the `predict` method. [^2]: We don't really need this for `bmi`, since `bmi` is not used as a predictor for another variable. Accordingly, we could also -- and equivalently -- impute missing values of `bmi` *after* model fitting by means of posterior prediction. ## References Buuren, S. V. & Groothuis-Oudshoorn, K. (2010). mice: Multivariate imputation by chained equations in R. *Journal of Statistical Software*, 1-68. doi.org/10.18637/jss.v045.i03 Zhou, X. & Reiter, J. P. (2010). A Note on Bayesian Inference After Multiple Imputation. *The American Statistician*, 64(2), 159-163. doi.org/10.1198/tast.2010.09109 brms/inst/doc/brms_customfamilies.Rmd0000644000176200001440000003230314224753323017460 0ustar liggesusers--- title: "Define Custom Response Distributions with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Define Custom Response Distributions with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction The **brms** package comes with a lot of built-in response distributions -- usually called *families* in R -- to specify among others linear, count data, survival, response times, or ordinal models (see `help(brmsfamily)` for an overview). Despite supporting over two dozen families, there is still a long list of distributions, which are not natively supported. The present vignette will explain how to specify such *custom families* in **brms**. By doing that, users can benefit from the modeling flexibility and post-processing options of **brms** even when using self-defined response distributions. If you have built a custom family that you want to make available to other users, you can submit a pull request to this [GitHub repository](https://github.com/paul-buerkner/custom-brms-families). ## A Case Study As a case study, we will use the `cbpp` data of the **lme4** package, which describes the development of the CBPP disease of cattle in Africa. The data set contains four variables: `period` (the time period), `herd` (a factor identifying the cattle herd), `incidence` (number of new disease cases for a given herd and time period), as well as `size` (the herd size at the beginning of a given time period). ```{r cbpp} data("cbpp", package = "lme4") head(cbpp) ``` In a first step, we will be predicting `incidence` using a simple binomial model, which will serve as our baseline model. For observed number of events $y$ (`incidence` in our case) and total number of trials $T$ (`size`), the probability mass function of the binomial distribution is defined as $$ P(y | T, p) = \binom{T}{y} p^{y} (1 - p)^{N-y} $$ where $p$ is the event probability. In the classical binomial model, we will directly predict $p$ on the logit-scale, which means that for each observation $i$ we compute the success probability $p_i$ as $$ p_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} $$ where $\eta_i$ is the linear predictor term of observation $i$ (see `vignette("brms_overview")` for more details on linear predictors in **brms**). Predicting `incidence` by `period` and a varying intercept of `herd` is straight forward in **brms**: ```{r fit1, results='hide'} fit1 <- brm(incidence | trials(size) ~ period + (1|herd), data = cbpp, family = binomial()) ``` In the summary output, we see that the incidence probability varies substantially over herds, but reduces over the course of the time as indicated by the negative coefficients of `period`. ```{r fit1_summary} summary(fit1) ``` A drawback of the binomial model is that -- after taking into account the linear predictor -- its variance is fixed to $\text{Var}(y_i) = T_i p_i (1 - p_i)$. All variance exceeding this value cannot be not taken into account by the model. There are multiple ways of dealing with this so called *overdispersion* and the solution described below will serve as an illustrative example of how to define custom families in **brms**. ## The Beta-Binomial Distribution The *beta-binomial* model is a generalization of the *binomial* model with an additional parameter to account for overdispersion. In the beta-binomial model, we do not predict the binomial probability $p_i$ directly, but assume it to be beta distributed with hyperparameters $\alpha > 0$ and $\beta > 0$: $$ p_i \sim \text{Beta}(\alpha_i, \beta_i) $$ The $\alpha$ and $\beta$ parameters are both hard to interpret and generally not recommended for use in regression models. Thus, we will apply a different parameterization with parameters $\mu \in [0, 1]$ and $\phi > 0$, which we will call $\text{Beta2}$: $$ \text{Beta2}(\mu, \phi) = \text{Beta}(\mu \phi, (1-\mu) \phi) $$ The parameters $\mu$ and $\phi$ specify the mean and precision parameter, respectively. By defining $$ \mu_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} $$ we still predict the expected probability by means of our transformed linear predictor (as in the original binomial model), but account for potential overdispersion via the parameter $\phi$. ## Fitting Custom Family Models The beta-binomial distribution is natively supported in **brms** nowadays, but we will still use it as an example to define it ourselves via the `custom_family` function. This function requires the family's name, the names of its parameters (`mu` and `phi` in our case), corresponding link functions (only applied if parameters are predicted), their theoretical lower and upper bounds (only applied if parameters are not predicted), information on whether the distribution is discrete or continuous, and finally, whether additional non-parameter variables need to be passed to the distribution. For our beta-binomial example, this results in the following custom family: ```{r beta_binomial2} beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(0, 0), ub = c(1, NA), type = "int", vars = "vint1[n]" ) ``` The name `vint1` for the variable containing the number of trials is not chosen arbitrarily as we will see below. Next, we have to provide the relevant **Stan** functions if the distribution is not defined in **Stan** itself. For the `beta_binomial2` distribution, this is straight forward since the ordinal `beta_binomial` distribution is already implemented. ```{r stan_funs} stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int T) { return beta_binomial_lpmf(y | T, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int T) { return beta_binomial_rng(T, mu * phi, (1 - mu) * phi); } " ``` For the model fitting, we will only need `beta_binomial2_lpmf`, but `beta_binomial2_rng` will come in handy when it comes to post-processing. We define: ```{r stanvars} stanvars <- stanvar(scode = stan_funs, block = "functions") ``` To provide information about the number of trials (an integer variable), we are going to use the addition argument `vint()`, which can only be used in custom families. Similarly, if we needed to include additional vectors of real data, we would use `vreal()`. Actually, for this particular example, we could more elegantly apply the addition argument `trials()` instead of `vint()`as in the basic binomial model. However, since the present vignette is meant to give a general overview of the topic, we will go with the more general method. We now have all components together to fit our custom beta-binomial model: ```{r fit2, results='hide'} fit2 <- brm( incidence | vint(size) ~ period + (1|herd), data = cbpp, family = beta_binomial2, stanvars = stanvars ) ``` The summary output reveals that the uncertainty in the coefficients of `period` is somewhat larger than in the basic binomial model, which is the result of including the overdispersion parameter `phi` in the model. Apart from that, the results looks pretty similar. ```{r summary_fit2} summary(fit2) ``` ## Post-Processing Custom Family Models Some post-processing methods such as `summary` or `plot` work out of the box for custom family models. However, there are three particularly important methods, which require additional input by the user. These are `posterior_epred`, `posterior_predict` and `log_lik` computing predicted mean values, predicted response values, and log-likelihood values, respectively. They are not only relevant for their own sake, but also provide the basis of many other post-processing methods. For instance, we may be interested in comparing the fit of the binomial model with that of the beta-binomial model by means of approximate leave-one-out cross-validation implemented in method `loo`, which in turn requires `log_lik` to be working. The `log_lik` function of a family should be named `log_lik_` and have the two arguments `i` (indicating observations) and `prep`. You don't have to worry too much about how `prep` is created (if you are interested, check out the `prepare_predictions` function). Instead, all you need to know is that parameters are stored in slot `dpars` and data are stored in slot `data`. Generally, parameters take on the form of a $S \times N$ matrix (with $S =$ number of posterior draws and $N =$ number of observations) if they are predicted (as is `mu` in our example) and a vector of size $N$ if the are not predicted (as is `phi`). We could define the complete log-likelihood function in R directly, or we can expose the self-defined **Stan** functions and apply them. The latter approach is usually more convenient, but the former is more stable and the only option when implementing custom families in other R packages building upon **brms**. For the purpose of the present vignette, we will go with the latter approach. ```{r} expose_functions(fit2, vectorize = TRUE) ``` and define the required `log_lik` functions with a few lines of code. ```{r log_lik} log_lik_beta_binomial2 <- function(i, prep) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] y <- prep$data$Y[i] beta_binomial2_lpmf(y, mu, phi, trials) } ``` The `get_dpar` function will do the necessary transformations to handle both the case when the distributional parameters are predicted separately for each row and when they are the same for the whole fit. With that being done, all of the post-processing methods requiring `log_lik` will work as well. For instance, model comparison can simply be performed via ```{r loo} loo(fit1, fit2) ``` Since larger `ELPD` values indicate better fit, we see that the beta-binomial model fits somewhat better, although the corresponding standard error reveals that the difference is not that substantial. Next, we will define the function necessary for the `posterior_predict` method: ```{r posterior_predict} posterior_predict_beta_binomial2 <- function(i, prep, ...) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] beta_binomial2_rng(mu, phi, trials) } ``` The `posterior_predict` function looks pretty similar to the corresponding `log_lik` function, except that we are now creating random draws of the response instead of log-likelihood values. Again, we are using an exposed **Stan** function for convenience. Make sure to add a `...` argument to your `posterior_predict` function even if you are not using it, since some families require additional arguments. With `posterior_predict` to be working, we can engage for instance in posterior-predictive checking: ```{r pp_check} pp_check(fit2) ``` When defining the `posterior_epred` function, you have to keep in mind that it has only a `prep` argument and should compute the mean response values for all observations at once. Since the mean of the beta-binomial distribution is $\text{E}(y) = \mu T$ definition of the corresponding `posterior_epred` function is not too complicated, but we need to get the dimension of parameters and data in line. ```{r posterior_epred} posterior_epred_beta_binomial2 <- function(prep) { mu <- brms::get_dpar(prep, "mu") trials <- prep$data$vint1 trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE) mu * trials } ``` A post-processing method relying directly on `posterior_epred` is `conditional_effects`, which allows to visualize effects of predictors. ```{r conditional_effects} conditional_effects(fit2, conditions = data.frame(size = 1)) ``` For ease of interpretation we have set `size` to 1 so that the y-axis of the above plot indicates probabilities. ## Turning a Custom Family into a Native Family Family functions built natively into **brms** are safer to use and more convenient, as they require much less user input. If you think that your custom family is general enough to be useful to other users, please feel free to open an issue on [GitHub](https://github.com/paul-buerkner/brms/issues) so that we can discuss all the details. Provided that we agree it makes sense to implement your family natively in brms, the following steps are required (`foo` is a placeholder for the family name): * In `family-lists.R`, add function `.family_foo` which should contain basic information about your family (you will find lots of examples for other families there). * In `families.R`, add family function `foo` which should be a simple wrapper around `.brmsfamily`. * In `stan-likelihood.R`, add function `stan_log_lik_foo` which provides the likelihood of the family in Stan language. * If necessary, add self-defined Stan functions in separate files under `inst/chunks`. * Add functions `posterior_predict_foo`, `posterior_epred_foo` and `log_lik_foo` to `posterior_predict.R`, `posterior_epred.R` and `log_lik.R`, respectively. * If necessary, add distribution functions to `distributions.R`. brms/inst/doc/brms_families.Rmd0000644000176200001440000003401714275414730016233 0ustar liggesusers--- title: "Parameterization of Response Distributions in brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Parameterization of Response Distributions in brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- The purpose of this vignette is to discuss the parameterizations of the families (i.e., response distributions) used in brms. For a more general overview of the package see `vignette("brms_overview")`. ## Notation Throughout this vignette, we denote values of the response variable as $y$, a density function as $f$, and use $\mu$ to refer to the main model parameter, which is usually the mean of the response distribution or some closely related quantity. In a regression framework, $\mu$ is not estimated directly but computed as $\mu = g(\eta)$, where $\eta$ is a predictor term (see `help(brmsformula)` for details) and $g$ is the response function (i.e., inverse of the link function). ## Location shift models The density of the **gaussian** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\frac{1}{2}\left(\frac{y - \mu}{\sigma}\right)^2\right) $$ where $\sigma$ is the residual standard deviation. The density of the **student** family is given by $$ f(y) = \frac{\Gamma((\nu + 1)/2)}{\Gamma(\nu/2)} \frac{1}{\sqrt{\nu\pi}\sigma}\left(1 + \frac{1}{\nu} \left(\frac{y - \mu}{\sigma}\right)^2\right)^{-(\nu+1)/2} $$ $\Gamma$ denotes the gamma function and $\nu > 1$ are the degrees of freedom. As $\nu \rightarrow \infty$, the student distribution becomes the gaussian distribution. The density of the **skew_normal** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\omega} \exp\left(-\frac{1}{2} \left(\frac{y - \xi}{\omega}\right)^2 \right) \left(1 + \text{erf} \left( \alpha \left(\frac{y - \xi}{\omega \sqrt{2}} \right) \right) \right) $$ where $\xi$ is the location parameter, $\omega$ is the positive scale parameter, $\alpha$ the skewness parameter, and $\text{erf}$ denotes the error function of the gaussian distribution. To parameterize the skew-normal distribution in terms of the mean $\mu$ and standard deviation $\sigma$, $\omega$ and $\xi$ are computed as $$ \omega = \frac{\sigma}{\sqrt{1 - \frac{2}{\pi} \frac{\alpha^2}{1 + \alpha^2}}} $$ $$ \xi = \mu - \omega \frac{\alpha}{\sqrt{1 + \alpha^2}} \sqrt{\frac{2}{\pi}} $$ If $\alpha = 0$, the skew-normal distribution becomes the gaussian distribution. For location shift models, $y$ can be any real value. ## Binary and count data models The density of the **binomial** family is given by $$ f(y) = {N \choose y} \mu^{y} (1-\mu)^{N - y} $$ where $N$ is the number of trials and $y \in \{0, ... , N\}$. When all $N$ are $1$ (i.e., $y \in \{0,1\}$), the **bernoulli** distribution for binary data arises. For $y \in \mathbb{N}_0$, the density of the **poisson** family is given by $$ f(y) = \frac{\mu^{y}}{y!} \exp(-\mu) $$ The density of the **negbinomial** (negative binomial) family is $$ f(y) = {y + \phi - 1 \choose y} \left(\frac{\mu}{\mu + \phi}\right)^{y} \left(\frac{\phi}{\mu + \phi}\right)^\phi $$ where $\phi$ is a positive precision parameter. For $\phi \rightarrow \infty$, the negative binomial distribution becomes the poisson distribution. The density of the **geometric** family arises if $\phi$ is set to $1$. ## Time-to-event models With time-to-event models we mean all models that are defined on the positive reals only, that is $y \in \mathbb{R}^+$. The density of the **lognormal** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma y} \exp\left(-\frac{1}{2}\left(\frac{\log(y) - \mu}{\sigma}\right)^2\right) $$ where $\sigma$ is the residual standard deviation on the log-scale. The density of the **Gamma** family is given by $$ f(y) = \frac{(\alpha / \mu)^\alpha}{\Gamma(\alpha)} y^{\alpha-1} \exp\left(-\frac{\alpha y}{\mu}\right) $$ where $\alpha$ is a positive shape parameter. The density of the **weibull** family is given by $$ f(y) = \frac{\alpha}{s} \left(\frac{y}{s}\right)^{\alpha-1} \exp\left(-\left(\frac{y}{s}\right)^\alpha\right) $$ where $\alpha$ is again a positive shape parameter and $s = \mu / \Gamma(1 + 1 / \alpha)$ is the scale parameter to that $\mu$ is the mean of the distribution. The **exponential** family arises if $\alpha$ is set to $1$ for either the gamma or Weibull distribution. The density of the **inverse.gaussian** family is given by $$ f(y) = \left(\frac{\alpha}{2 \pi y^3}\right)^{1/2} \exp \left(\frac{-\alpha (y - \mu)^2}{2 \mu^2 y} \right) $$ where $\alpha$ is a positive shape parameter. The **cox** family implements Cox proportional hazards model which assumes a hazard function of the form $h(y) = h_0(y) \mu$ with baseline hazard $h_0(y)$ expressed via M-splines (which integrate to I-splines) in order to ensure monotonicity. The density of the cox model is then given by $$ f(y) = h(y) S(y) $$ where $S(y)$ is the survival function implied by $h(y)$. ## Extreme value models Modeling extremes requires special distributions. One may use the **weibull** distribution (see above) or the **frechet** distribution with density $$ f(y) = \frac{\nu}{s} \left(\frac{y}{s}\right)^{-1-\nu} \exp\left(-\left(\frac{y}{s}\right)^{-\nu}\right) $$ where $s = \mu / \Gamma(1 - 1 / \nu)$ is a positive scale parameter and $\nu > 1$ is a shape parameter so that $\mu$ predicts the mean of the Frechet distribution. A generalization of both distributions is the generalized extreme value distribution (family **gen_extreme_value**) with density $$ f(y) = \frac{1}{\sigma} t(y)^{\xi + 1} \exp(-t(y)) $$ where $$ t(y) = \left(1 + \xi \left(\frac{y - \mu}{\sigma} \right)\right)^{-1 / \xi} $$ with positive scale parameter $\sigma$ and shape parameter $\xi$. ## Response time models One family that is especially suited to model reaction times is the **exgaussian** ('exponentially modified Gaussian') family. Its density is given by $$ f(y) = \frac{1}{2 \beta} \exp\left(\frac{1}{2 \beta} \left(2\xi + \sigma^2 / \beta - 2 y \right) \right) \text{erfc}\left(\frac{\xi + \sigma^2 / \beta - y}{\sqrt{2} \sigma} \right) $$ where $\beta$ is the scale (inverse rate) of the exponential component, $\xi$ is the mean of the Gaussian component, $\sigma$ is the standard deviation of the Gaussian component, and $\text{erfc}$ is the complementary error function. We parameterize $\mu = \xi + \beta$ so that the main predictor term equals the mean of the distribution. Another family well suited for modeling response times is the **shifted_lognormal** distribution. It's density equals that of the **lognormal** distribution except that the whole distribution is shifted to the right by a positive parameter called *ndt* (for consistency with the **wiener** diffusion model explained below). A family concerned with the combined modeling of reaction times and corresponding binary responses is the **wiener** diffusion model. It has four model parameters each with a natural interpretation. The parameter $\alpha > 0$ describes the separation between two boundaries of the diffusion process, $\tau > 0$ describes the non-decision time (e.g., due to image or motor processing), $\beta \in [0, 1]$ describes the initial bias in favor of the upper alternative, and $\delta \in \mathbb{R}$ describes the drift rate to the boundaries (a positive value indicates a drift towards to upper boundary). The density for the reaction time at the upper boundary is given by $$ f(y) = \frac{\alpha}{(y-\tau)^3/2} \exp \! \left(- \delta \alpha \beta - \frac{\delta^2(y-\tau)}{2}\right) \sum_{k = - \infty}^{\infty} (2k + \beta) \phi \! \left(\frac{2k + \alpha \beta}{\sqrt{y - \tau}}\right) $$ where $\phi(x)$ denotes the standard normal density function. The density at the lower boundary can be obtained by substituting $1 - \beta$ for $\beta$ and $-\delta$ for $\delta$ in the above equation. In brms the parameters $\alpha$, $\tau$, and $\beta$ are modeled as auxiliary parameters named *bs* ('boundary separation'), *ndt* ('non-decision time'), and *bias* respectively, whereas the drift rate $\delta$ is modeled via the ordinary model formula that is as $\delta = \mu$. ## Quantile regression Quantile regression is implemented via family **asym_laplace** (asymmetric Laplace distribution) with density $$ f(y) = \frac{p (1 - p)}{\sigma} \exp\left(-\rho_p\left(\frac{y - \mu}{\sigma}\right)\right) $$ where $\rho_p$ is given by $\rho_p(x) = x (p - I_{x < 0})$ and $I_A$ is the indicator function of set $A$. The parameter $\sigma$ is a positive scale parameter and $p$ is the *quantile* parameter taking on values in $(0, 1)$. For this distribution, we have $P(Y < g(\eta)) = p$. Thus, quantile regression can be performed by fixing $p$ to the quantile to interest. ## Probability models The density of the **Beta** family for $y \in (0,1)$ is given by $$ f(y) = \frac{y^{\mu \phi - 1} (1-y)^{(1-\mu) \phi-1}}{B(\mu \phi, (1-\mu) \phi)} $$ where $B$ is the beta function and $\phi$ is a positive precision parameter. A multivariate generalization of the **Beta** family is the **dirichlet** family with density $$ f(y) = \frac{1}{B((\mu_{1}, \ldots, \mu_{K}) \phi)} \prod_{k=1}^K y_{k}^{\mu_{k} \phi - 1}. $$ The **dirichlet** family is implemented with the multivariate logit link function so that $$ \mu_{j} = \frac{\exp(\eta_{j})}{\sum_{k = 1}^{K} \exp(\eta_{k})} $$ For reasons of identifiability, $\eta_{\rm ref}$ is set to $0$, where ${\rm ref}$ is one of the response categories chosen as reference. An alternative to the **dirichlet** family is the **logistic_normal** family with density $$ f(y) = \frac{1}{\prod_{k=1}^K y_k} \times \text{multivariate_normal}(\tilde{y} \, | \, \mu, \sigma, \Omega) $$ where $\tilde{y}$ is the multivariate logit transformed response $$ \tilde{y} = (\log(y_1 / y_{\rm ref}), \ldots, \log(y_{\rm ref-1} / y_{\rm ref}), \log(y_{\rm ref+1} / y_{\rm ref}), \ldots, \log(y_K / y_{\rm ref})) $$ of dimension $K-1$ (excluding the reference category), which is modeled as multivariate normally distributed with latent mean and standard deviation vectors $\mu$ and $\sigma$, as well as correlation matrix $\Omega$. ## Circular models The density of the **von_mises** family for $y \in (-\pi,\pi)$ is given by $$ f(y) = \frac{\exp(\kappa \cos(y - \mu))}{2\pi I_0(\kappa)} $$ where $I_0$ is the modified Bessel function of order 0 and $\kappa$ is a positive precision parameter. ## Ordinal and categorical models For ordinal and categorical models, $y$ is one of the categories $1, ..., K$. The intercepts of ordinal models are called thresholds and are denoted as $\tau_k$, with $k \in \{1, ..., K-1\}$, whereas $\eta$ does not contain a fixed effects intercept. Note that the applied link functions $h$ are technically distribution functions $\mathbb{R} \rightarrow [0,1]$. The density of the **cumulative** family (implementing the most basic ordinal model) is given by $$ f(y) = g(\tau_{y + 1} - \eta) - g(\tau_{y} - \eta) $$ The densities of the **sratio** (stopping ratio) and **cratio** (continuation ratio) families are given by $$ f(y) = g(\tau_{y + 1} - \eta) \prod_{k = 1}^{y} (1 - g(\tau_{k} - \eta)) $$ and $$ f(y) = (1 - g(\eta - \tau_{y + 1})) \prod_{k = 1}^{y} g(\eta - \tau_{k}) $$ respectively. Note that both families are equivalent for symmetric link functions such as logit or probit. The density of the **acat** (adjacent category) family is given by $$ f(y) = \frac{\prod_{k=1}^{y} g(\eta - \tau_{k}) \prod_{k=y+1}^K(1-g(\eta - \tau_{k}))}{\sum_{k=0}^K\prod_{j=1}^k g(\eta-\tau_{j}) \prod_{j=k+1}^K(1-g(\eta - \tau_{j}))} $$ For the logit link, this can be simplified to $$ f(y) = \frac{\exp \left(\sum_{k=1}^{y} (\eta - \tau_{k}) \right)} {\sum_{k=0}^K \exp\left(\sum_{j=1}^k (\eta - \tau_{j}) \right)} $$ The linear predictor $\eta$ can be generalized to also depend on the category $k$ for a subset of predictors. This leads to category specific effects (for details on how to specify them see `help(brm)`). Note that **cumulative** and **sratio** models use $\tau - \eta$, whereas **cratio** and **acat** use $\eta - \tau$. This is done to ensure that larger values of $\eta$ increase the probability of *higher* response categories. The **categorical** family is currently only implemented with the multivariate logit link function and has density $$ f(y) = \mu_{y} = \frac{\exp(\eta_{y})}{\sum_{k = 1}^{K} \exp(\eta_{k})} $$ Note that $\eta$ does also depend on the category $k$. For reasons of identifiability, $\eta_{1}$ is set to $0$. A generalization of the **categorical** family to more than one trial is the **multinomial** family with density $$ f(y) = {N \choose y_{1}, y_{2}, \ldots, y_{K}} \prod_{k=1}^K \mu_{k}^{y_{k}} $$ where, for each category, $\mu_{k}$ is estimated via the multivariate logit link function shown above. ## Zero-inflated and hurdle models **Zero-inflated** and **hurdle** families extend existing families by adding special processes for responses that are zero. The density of a **zero-inflated** family is given by $$ f_z(y) = z + (1 - z) f(0) \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) \quad \text{if } y > 0 $$ where $z$ denotes the zero-inflation probability. Currently implemented families are **zero_inflated_poisson**, **zero_inflated_binomial**, **zero_inflated_negbinomial**, and **zero_inflated_beta**. The density of a **hurdle** family is given by $$ f_z(y) = z \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) / (1 - f(0)) \quad \text{if } y > 0 $$ Currently implemented families are **hurdle_poisson**, **hurdle_negbinomial**, **hurdle_gamma**, and **hurdle_lognormal**. The density of a **zero-one-inflated** family is given by $$ f_{\alpha, \gamma}(y) = \alpha (1 - \gamma) \quad \text{if } y = 0 \\ f_{\alpha, \gamma}(y) = \alpha \gamma \quad \text{if } y = 1 \\ f_{\alpha, \gamma}(y) = (1 - \alpha) f(y) \quad \text{if } y \notin \{0, 1\} $$ where $\alpha$ is the zero-one-inflation probability (i.e. the probability that zero or one occurs) and $\gamma$ is the conditional one-inflation probability (i.e. the probability that one occurs rather than zero). Currently implemented families are **zero_one_inflated_beta**. brms/inst/doc/brms_nonlinear.Rmd0000644000176200001440000003016714224753370016431 0ustar liggesusers--- title: "Estimating Non-Linear Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Non-Linear Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette provides an introduction on how to fit non-linear multilevel models with **brms**. Non-linear models are incredibly flexible and powerful, but require much more care with respect to model specification and priors than typical generalized linear models. Ignoring group-level effects for the moment, the predictor term $\eta_n$ of a generalized linear model for observation $n$ can be written as follows: $$\eta_n = \sum_{i = 1}^K b_i x_{ni}$$ where $b_i$ is the regression coefficient of predictor $i$ and $x_{ni}$ is the data of predictor $i$ for observation $n$. This also comprises interaction terms and various other data transformations. However, the structure of $\eta_n$ is always linear in the sense that the regression coefficients $b_i$ are multiplied by some predictor values and then summed up. This implies that the hypothetical predictor term $$\eta_n = b_1 \exp(b_2 x_n)$$ would *not* be a *linear* predictor anymore and we could not fit it using classical techniques of generalized linear models. We thus need a more general model class, which we will call *non-linear* models. Note that the term 'non-linear' does not say anything about the assumed distribution of the response variable. In particular it does not mean 'not normally distributed' as we can apply non-linear predictor terms to all kinds of response distributions (for more details on response distributions available in **brms** see `vignette("brms_families")`). ## A Simple Non-Linear Model We begin with a simple example using simulated data. ```{r} b <- c(2, 0.75) x <- rnorm(100) y <- rnorm(100, mean = b[1] * exp(b[2] * x)) dat1 <- data.frame(x, y) ``` As stated above, we cannot use a generalized linear model to estimate $b$ so we go ahead an specify a non-linear model. ```{r, results='hide'} prior1 <- prior(normal(1, 2), nlpar = "b1") + prior(normal(0, 2), nlpar = "b2") fit1 <- brm(bf(y ~ b1 * exp(b2 * x), b1 + b2 ~ 1, nl = TRUE), data = dat1, prior = prior1) ``` When looking at the above code, the first thing that becomes obvious is that we changed the `formula` syntax to display the non-linear formula including predictors (i.e., `x`) and parameters (i.e., `b1` and `b2`) wrapped in a call to `bf`. This stands in contrast to classical **R** formulas, where only predictors are given and parameters are implicit. The argument `b1 + b2 ~ 1` serves two purposes. First, it provides information, which variables in `formula` are parameters, and second, it specifies the linear predictor terms for each parameter. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves (see also the following examples). In the present case, we have no further variables to predict `b1` and `b2` and thus we just fit intercepts that represent our estimates of $b_1$ and $b_2$ in the model equation above. The formula `b1 + b2 ~ 1` is a short form of `b1 ~ 1, b2 ~ 1` that can be used if multiple non-linear parameters share the same formula. Setting `nl = TRUE` tells **brms** that the formula should be treated as non-linear. In contrast to generalized linear models, priors on population-level parameters (i.e., 'fixed effects') are often mandatory to identify a non-linear model. Thus, **brms** requires the user to explicitly specify these priors. In the present example, we used a `normal(1, 2)` prior on (the population-level intercept of) `b1`, while we used a `normal(0, 2)` prior on (the population-level intercept of) `b2`. Setting priors is a non-trivial task in all kinds of models, especially in non-linear models, so you should always invest some time to think of appropriate priors. Quite often, you may be forced to change your priors after fitting a non-linear model for the first time, when you observe different MCMC chains converging to different posterior regions. This is a clear sign of an identification problem and one solution is to set stronger (i.e., more narrow) priors. To obtain summaries of the fitted model, we apply ```{r} summary(fit1) plot(fit1) plot(conditional_effects(fit1), points = TRUE) ``` The `summary` method reveals that we were able to recover the true parameter values pretty nicely. According to the `plot` method, our MCMC chains have converged well and to the same posterior. The `conditional_effects` method visualizes the model-implied (non-linear) regression line. We might be also interested in comparing our non-linear model to a classical linear model. ```{r, results='hide'} fit2 <- brm(y ~ x, data = dat1) ``` ```{r} summary(fit2) ``` To investigate and compare model fit, we can apply graphical posterior predictive checks, which make use of the **bayesplot** package on the backend. ```{r} pp_check(fit1) pp_check(fit2) ``` We can also easily compare model fit using leave-one-out cross-validation. ```{r} loo(fit1, fit2) ``` Since smaller `LOOIC` values indicate better model fit, it is immediately evident that the non-linear model fits the data better, which is of course not too surprising since we simulated the data from exactly that model. ## A Real-World Non-Linear model On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see https://www.magesblog.com/post/2015-11-03-loss-developments-via-growth-curves-and/). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows: $$cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)$$ $$\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)$$ The cumulative insurance payments $cum$ will grow over time, and we model this dependency using the variable $dev$. Further, $ult_{AY}$ is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters $\theta$ and $\omega$, which are responsible for the growth of the cumulative loss and are assumed to be the same across years. The data is already shipped with brms. ```{r} data(loss) head(loss) ``` and translate the proposed model into a non-linear **brms** model. ```{r, results='hide'} fit_loss <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) ``` We estimate a group-level effect of accident year (variable `AY`) for the ultimate loss `ult`. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in case of `ult`, contains only an varying intercept over year. Again, priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. We summarize the model using well known methods. ```{r} summary(fit_loss) plot(fit_loss, N = 3, ask = FALSE) conditional_effects(fit_loss) ``` Next, we show marginal effects separately for each year. ```{r} conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_loss <- conditional_effects( fit_loss, conditions = conditions, re_formula = NULL, method = "predict" ) plot(me_loss, ncol = 5, points = TRUE) ``` It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. For a more detailed discussion of this data set, see Section 4.5 in Gesmann & Morris (2020). ## Advanced Item-Response Models As a third example, we want to show how to model more advanced item-response models using the non-linear model framework of **brms**. For simplicity, suppose we have a single forced choice item with three alternatives of which only one is correct. Our response variable is whether a person answers the item correctly (1) or not (0). Person are assumed to vary in their ability to answer the item correctly. However, every person has a 33% chance of getting the item right just by guessing. We thus simulate some data to reflect this situation. ```{r} inv_logit <- function(x) 1 / (1 + exp(-x)) ability <- rnorm(300) p <- 0.33 + 0.67 * inv_logit(ability) answer <- ifelse(runif(300, 0, 1) < p, 1, 0) dat_ir <- data.frame(ability, answer) ``` The most basic item-response model is equivalent to a simple logistic regression model. ```{r, results='hide'} fit_ir1 <- brm(answer ~ ability, data = dat_ir, family = bernoulli()) ``` However, this model completely ignores the guessing probability and will thus likely come to biased estimates and predictions. ```{r} summary(fit_ir1) plot(conditional_effects(fit_ir1), points = TRUE) ``` A more sophisticated approach incorporating the guessing probability looks as follows: ```{r, results='hide'} fit_ir2 <- brm( bf(answer ~ 0.33 + 0.67 * inv_logit(eta), eta ~ ability, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = prior(normal(0, 5), nlpar = "eta") ) ``` It is very important to set the link function of the `bernoulli` family to `identity` or else we will apply two link functions. This is because our non-linear predictor term already contains the desired link function (`0.33 + 0.67 * inv_logit`), but the `bernoulli` family applies the default `logit` link on top of it. This will of course lead to strange and uninterpretable results. Thus, please make sure that you set the link function to `identity`, whenever your non-linear predictor term already contains the desired link function. ```{r} summary(fit_ir2) plot(conditional_effects(fit_ir2), points = TRUE) ``` Comparing model fit via leave-one-out cross-validation ```{r} loo(fit_ir1, fit_ir2) ``` shows that both model fit the data equally well, but remember that predictions of the first model might still be misleading as they may well be below the guessing probability for low ability values. Now, suppose that we don't know the guessing probability and want to estimate it from the data. This can easily be done changing the previous model just a bit. ```{r, results='hide'} fit_ir3 <- brm( bf(answer ~ guess + (1 - guess) * inv_logit(eta), eta ~ 0 + ability, guess ~ 1, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = c( prior(normal(0, 5), nlpar = "eta"), prior(beta(1, 1), nlpar = "guess", lb = 0, ub = 1) ) ) ``` Here, we model the guessing probability as a non-linear parameter making sure that it cannot exceed the interval $[0, 1]$. We did not estimate an intercept for `eta`, as this will lead to a bias in the estimated guessing parameter (try it out; this is an excellent example of how careful one has to be in non-linear models). ```{r} summary(fit_ir3) plot(fit_ir3) plot(conditional_effects(fit_ir3), points = TRUE) ``` The results show that we are able to recover the simulated model parameters with this non-linear model. Of course, real item-response data have multiple items so that accounting for item and person variability (e.g., using a multilevel model with varying intercepts) becomes necessary as we have multiple observations per item and person. Luckily, this can all be done within the non-linear framework of **brms** and I hope that this vignette serves as a good starting point. ## References Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving Models. *CAS Research Papers*. brms/inst/doc/brms_customfamilies.html0000644000176200001440000023276014504266026017713 0ustar liggesusers Define Custom Response Distributions with brms

Define Custom Response Distributions with brms

Paul Bürkner

2023-09-25

Introduction

The brms package comes with a lot of built-in response distributions – usually called families in R – to specify among others linear, count data, survival, response times, or ordinal models (see help(brmsfamily) for an overview). Despite supporting over two dozen families, there is still a long list of distributions, which are not natively supported. The present vignette will explain how to specify such custom families in brms. By doing that, users can benefit from the modeling flexibility and post-processing options of brms even when using self-defined response distributions. If you have built a custom family that you want to make available to other users, you can submit a pull request to this GitHub repository.

A Case Study

As a case study, we will use the cbpp data of the lme4 package, which describes the development of the CBPP disease of cattle in Africa. The data set contains four variables: period (the time period), herd (a factor identifying the cattle herd), incidence (number of new disease cases for a given herd and time period), as well as size (the herd size at the beginning of a given time period).

data("cbpp", package = "lme4")
head(cbpp)
  herd incidence size period
1    1         2   14      1
2    1         3   12      2
3    1         4    9      3
4    1         0    5      4
5    2         3   22      1
6    2         1   18      2

In a first step, we will be predicting incidence using a simple binomial model, which will serve as our baseline model. For observed number of events \(y\) (incidence in our case) and total number of trials \(T\) (size), the probability mass function of the binomial distribution is defined as

\[ P(y | T, p) = \binom{T}{y} p^{y} (1 - p)^{N-y} \]

where \(p\) is the event probability. In the classical binomial model, we will directly predict \(p\) on the logit-scale, which means that for each observation \(i\) we compute the success probability \(p_i\) as

\[ p_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} \]

where \(\eta_i\) is the linear predictor term of observation \(i\) (see vignette("brms_overview") for more details on linear predictors in brms). Predicting incidence by period and a varying intercept of herd is straight forward in brms:

fit1 <- brm(incidence | trials(size) ~ period + (1|herd),
            data = cbpp, family = binomial())

In the summary output, we see that the incidence probability varies substantially over herds, but reduces over the course of the time as indicated by the negative coefficients of period.

summary(fit1)
 Family: binomial 
  Links: mu = logit 
Formula: incidence | trials(size) ~ period + (1 | herd) 
   Data: cbpp (Number of observations: 56) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~herd (Number of levels: 15) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.76      0.23     0.39     1.31 1.01      939     2021

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -1.40      0.27    -1.93    -0.88 1.00     1952     2142
period2      -1.00      0.31    -1.61    -0.41 1.00     4513     3074
period3      -1.14      0.34    -1.84    -0.51 1.00     4241     2341
period4      -1.62      0.44    -2.55    -0.83 1.00     4149     2765

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

A drawback of the binomial model is that – after taking into account the linear predictor – its variance is fixed to \(\text{Var}(y_i) = T_i p_i (1 - p_i)\). All variance exceeding this value cannot be not taken into account by the model. There are multiple ways of dealing with this so called overdispersion and the solution described below will serve as an illustrative example of how to define custom families in brms.

The Beta-Binomial Distribution

The beta-binomial model is a generalization of the binomial model with an additional parameter to account for overdispersion. In the beta-binomial model, we do not predict the binomial probability \(p_i\) directly, but assume it to be beta distributed with hyperparameters \(\alpha > 0\) and \(\beta > 0\):

\[ p_i \sim \text{Beta}(\alpha_i, \beta_i) \]

The \(\alpha\) and \(\beta\) parameters are both hard to interpret and generally not recommended for use in regression models. Thus, we will apply a different parameterization with parameters \(\mu \in [0, 1]\) and \(\phi > 0\), which we will call \(\text{Beta2}\):

\[ \text{Beta2}(\mu, \phi) = \text{Beta}(\mu \phi, (1-\mu) \phi) \]

The parameters \(\mu\) and \(\phi\) specify the mean and precision parameter, respectively. By defining

\[ \mu_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} \]

we still predict the expected probability by means of our transformed linear predictor (as in the original binomial model), but account for potential overdispersion via the parameter \(\phi\).

Fitting Custom Family Models

The beta-binomial distribution is natively supported in brms nowadays, but we will still use it as an example to define it ourselves via the custom_family function. This function requires the family’s name, the names of its parameters (mu and phi in our case), corresponding link functions (only applied if parameters are predicted), their theoretical lower and upper bounds (only applied if parameters are not predicted), information on whether the distribution is discrete or continuous, and finally, whether additional non-parameter variables need to be passed to the distribution. For our beta-binomial example, this results in the following custom family:

beta_binomial2 <- custom_family(
  "beta_binomial2", dpars = c("mu", "phi"),
  links = c("logit", "log"),
  lb = c(0, 0), ub = c(1, NA),
  type = "int", vars = "vint1[n]"
)

The name vint1 for the variable containing the number of trials is not chosen arbitrarily as we will see below. Next, we have to provide the relevant Stan functions if the distribution is not defined in Stan itself. For the beta_binomial2 distribution, this is straight forward since the ordinal beta_binomial distribution is already implemented.

stan_funs <- "
  real beta_binomial2_lpmf(int y, real mu, real phi, int T) {
    return beta_binomial_lpmf(y | T, mu * phi, (1 - mu) * phi);
  }
  int beta_binomial2_rng(real mu, real phi, int T) {
    return beta_binomial_rng(T, mu * phi, (1 - mu) * phi);
  }
"

For the model fitting, we will only need beta_binomial2_lpmf, but beta_binomial2_rng will come in handy when it comes to post-processing. We define:

stanvars <- stanvar(scode = stan_funs, block = "functions")

To provide information about the number of trials (an integer variable), we are going to use the addition argument vint(), which can only be used in custom families. Similarly, if we needed to include additional vectors of real data, we would use vreal(). Actually, for this particular example, we could more elegantly apply the addition argument trials() instead of vint()as in the basic binomial model. However, since the present vignette is meant to give a general overview of the topic, we will go with the more general method.

We now have all components together to fit our custom beta-binomial model:

fit2 <- brm(
  incidence | vint(size) ~ period + (1|herd), data = cbpp,
  family = beta_binomial2, stanvars = stanvars
)

The summary output reveals that the uncertainty in the coefficients of period is somewhat larger than in the basic binomial model, which is the result of including the overdispersion parameter phi in the model. Apart from that, the results looks pretty similar.

summary(fit2)
 Family: beta_binomial2 
  Links: mu = logit; phi = identity 
Formula: incidence | vint(size) ~ period + (1 | herd) 
   Data: cbpp (Number of observations: 56) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~herd (Number of levels: 15) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.39      0.26     0.02     0.98 1.00     1154     1913

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -1.34      0.26    -1.86    -0.84 1.00     3490     2601
period2      -1.01      0.40    -1.84    -0.23 1.00     3651     2834
period3      -1.26      0.46    -2.22    -0.41 1.00     3671     2579
period4      -1.54      0.52    -2.63    -0.58 1.00     3636     2399

Family Specific Parameters: 
    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
phi    17.31     14.87     5.62    52.42 1.00     1845     1699

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Post-Processing Custom Family Models

Some post-processing methods such as summary or plot work out of the box for custom family models. However, there are three particularly important methods, which require additional input by the user. These are posterior_epred, posterior_predict and log_lik computing predicted mean values, predicted response values, and log-likelihood values, respectively. They are not only relevant for their own sake, but also provide the basis of many other post-processing methods. For instance, we may be interested in comparing the fit of the binomial model with that of the beta-binomial model by means of approximate leave-one-out cross-validation implemented in method loo, which in turn requires log_lik to be working.

The log_lik function of a family should be named log_lik_<family-name> and have the two arguments i (indicating observations) and prep. You don’t have to worry too much about how prep is created (if you are interested, check out the prepare_predictions function). Instead, all you need to know is that parameters are stored in slot dpars and data are stored in slot data. Generally, parameters take on the form of a \(S \times N\) matrix (with \(S =\) number of posterior draws and \(N =\) number of observations) if they are predicted (as is mu in our example) and a vector of size \(N\) if the are not predicted (as is phi).

We could define the complete log-likelihood function in R directly, or we can expose the self-defined Stan functions and apply them. The latter approach is usually more convenient, but the former is more stable and the only option when implementing custom families in other R packages building upon brms. For the purpose of the present vignette, we will go with the latter approach.

expose_functions(fit2, vectorize = TRUE)

and define the required log_lik functions with a few lines of code.

log_lik_beta_binomial2 <- function(i, prep) {
  mu <- brms::get_dpar(prep, "mu", i = i)
  phi <- brms::get_dpar(prep, "phi", i = i)
  trials <- prep$data$vint1[i]
  y <- prep$data$Y[i]
  beta_binomial2_lpmf(y, mu, phi, trials)
}

The get_dpar function will do the necessary transformations to handle both the case when the distributional parameters are predicted separately for each row and when they are the same for the whole fit.

With that being done, all of the post-processing methods requiring log_lik will work as well. For instance, model comparison can simply be performed via

loo(fit1, fit2)
Output of model 'fit1':

Computed from 4000 by 56 log-likelihood matrix

         Estimate   SE
elpd_loo   -100.3 10.2
p_loo        22.5  4.4
looic       200.6 20.4
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     43    76.8%   556       
 (0.5, 0.7]   (ok)        7    12.5%   349       
   (0.7, 1]   (bad)       5     8.9%   46        
   (1, Inf)   (very bad)  1     1.8%   16        
See help('pareto-k-diagnostic') for details.

Output of model 'fit2':

Computed from 4000 by 56 log-likelihood matrix

         Estimate   SE
elpd_loo    -94.9  8.2
p_loo        10.8  1.9
looic       189.7 16.5
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     47    83.9%   906       
 (0.5, 0.7]   (ok)        7    12.5%   204       
   (0.7, 1]   (bad)       2     3.6%   145       
   (1, Inf)   (very bad)  0     0.0%   <NA>      
See help('pareto-k-diagnostic') for details.

Model comparisons:
     elpd_diff se_diff
fit2  0.0       0.0   
fit1 -5.4       4.2   

Since larger ELPD values indicate better fit, we see that the beta-binomial model fits somewhat better, although the corresponding standard error reveals that the difference is not that substantial.

Next, we will define the function necessary for the posterior_predict method:

posterior_predict_beta_binomial2 <- function(i, prep, ...) {
  mu <- brms::get_dpar(prep, "mu", i = i)
  phi <- brms::get_dpar(prep, "phi", i = i)
  trials <- prep$data$vint1[i]
  beta_binomial2_rng(mu, phi, trials)
}

The posterior_predict function looks pretty similar to the corresponding log_lik function, except that we are now creating random draws of the response instead of log-likelihood values. Again, we are using an exposed Stan function for convenience. Make sure to add a ... argument to your posterior_predict function even if you are not using it, since some families require additional arguments. With posterior_predict to be working, we can engage for instance in posterior-predictive checking:

pp_check(fit2)

When defining the posterior_epred function, you have to keep in mind that it has only a prep argument and should compute the mean response values for all observations at once. Since the mean of the beta-binomial distribution is \(\text{E}(y) = \mu T\) definition of the corresponding posterior_epred function is not too complicated, but we need to get the dimension of parameters and data in line.

posterior_epred_beta_binomial2 <- function(prep) {
  mu <- brms::get_dpar(prep, "mu")
  trials <- prep$data$vint1
  trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE)
  mu * trials
}

A post-processing method relying directly on posterior_epred is conditional_effects, which allows to visualize effects of predictors.

conditional_effects(fit2, conditions = data.frame(size = 1))

For ease of interpretation we have set size to 1 so that the y-axis of the above plot indicates probabilities.

Turning a Custom Family into a Native Family

Family functions built natively into brms are safer to use and more convenient, as they require much less user input. If you think that your custom family is general enough to be useful to other users, please feel free to open an issue on GitHub so that we can discuss all the details. Provided that we agree it makes sense to implement your family natively in brms, the following steps are required (foo is a placeholder for the family name):

  • In family-lists.R, add function .family_foo which should contain basic information about your family (you will find lots of examples for other families there).
  • In families.R, add family function foo which should be a simple wrapper around .brmsfamily.
  • In stan-likelihood.R, add function stan_log_lik_foo which provides the likelihood of the family in Stan language.
  • If necessary, add self-defined Stan functions in separate files under inst/chunks.
  • Add functions posterior_predict_foo, posterior_epred_foo and log_lik_foo to posterior_predict.R, posterior_epred.R and log_lik.R, respectively.
  • If necessary, add distribution functions to distributions.R.
brms/inst/doc/brms_monotonic.html0000644000176200001440000066656314504266474016717 0ustar liggesusers Estimating Monotonic Effects with brms

Estimating Monotonic Effects with brms

Paul Bürkner

2023-09-25

Introduction

This vignette is about monotonic effects, a special way of handling discrete predictors that are on an ordinal or higher scale (Bürkner & Charpentier, in review). A predictor, which we want to model as monotonic (i.e., having a monotonically increasing or decreasing relationship with the response), must either be integer valued or an ordered factor. As opposed to a continuous predictor, predictor categories (or integers) are not assumed to be equidistant with respect to their effect on the response variable. Instead, the distance between adjacent predictor categories (or integers) is estimated from the data and may vary across categories. This is realized by parameterizing as follows: One parameter, \(b\), takes care of the direction and size of the effect similar to an ordinary regression parameter. If the monotonic effect is used in a linear model, \(b\) can be interpreted as the expected average difference between two adjacent categories of the ordinal predictor. An additional parameter vector, \(\zeta\), estimates the normalized distances between consecutive predictor categories which thus defines the shape of the monotonic effect. For a single monotonic predictor, \(x\), the linear predictor term of observation \(n\) looks as follows:

\[\eta_n = b D \sum_{i = 1}^{x_n} \zeta_i\]

The parameter \(b\) can take on any real value, while \(\zeta\) is a simplex, which means that it satisfies \(\zeta_i \in [0,1]\) and \(\sum_{i = 1}^D \zeta_i = 1\) with \(D\) being the number of elements of \(\zeta\). Equivalently, \(D\) is the number of categories (or highest integer in the data) minus 1, since we start counting categories from zero to simplify the notation.

A Simple Monotonic Model

A main application of monotonic effects are ordinal predictors that can be modeled this way without falsely treating them either as continuous or as unordered categorical predictors. In Psychology, for instance, this kind of data is omnipresent in the form of Likert scale items, which are often treated as being continuous for convenience without ever testing this assumption. As an example, suppose we are interested in the relationship of yearly income (in $) and life satisfaction measured on an arbitrary scale from 0 to 100. Usually, people are not asked for the exact income. Instead, they are asked to rank themselves in one of certain classes, say: ‘below 20k’, ‘between 20k and 40k’, ‘between 40k and 100k’ and ‘above 100k’. We use some simulated data for illustration purposes.

income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100")
income <- factor(sample(income_options, 100, TRUE),
                 levels = income_options, ordered = TRUE)
mean_ls <- c(30, 60, 70, 75)
ls <- mean_ls[income] + rnorm(100, sd = 7)
dat <- data.frame(income, ls)

We now proceed with analyzing the data modeling income as a monotonic effect.

fit1 <- brm(ls ~ mo(income), data = dat)

The summary methods yield

summary(fit1)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ mo(income) 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    28.89      1.46    26.04    31.77 1.00     2822     2453
moincome     15.17      0.62    13.95    16.39 1.00     2784     2735

Simplex Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
moincome1[1]     0.73      0.04     0.66     0.81 1.00     3111     2137
moincome1[2]     0.18      0.04     0.09     0.27 1.00     3577     2485
moincome1[3]     0.09      0.04     0.02     0.16 1.00     2593     1236

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.69      0.50     5.83     7.77 1.00     3090     2356

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fit1, variable = "simo", regex = TRUE)

plot(conditional_effects(fit1))

The distributions of the simplex parameter of income, as shown in the plot method, demonstrate that the largest difference (about 70% of the difference between minimum and maximum category) is between the first two categories.

Now, let’s compare of monotonic model with two common alternative models. (a) Assume income to be continuous:

dat$income_num <- as.numeric(dat$income)
fit2 <- brm(ls ~ income_num, data = dat)
summary(fit2)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ income_num 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     23.59      2.55    18.68    28.73 1.00     3445     2910
income_num    13.98      0.89    12.20    15.70 1.00     3636     3066

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     9.97      0.73     8.72    11.48 1.00     3447     2661

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

or (b) Assume income to be an unordered factor:

contrasts(dat$income) <- contr.treatment(4)
fit3 <- brm(ls ~ income, data = dat)
summary(fit3)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ income 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    28.65      1.46    25.80    31.58 1.00     2617     2525
income2      33.51      2.07    29.39    37.49 1.00     3166     3080
income3      41.78      1.90    38.02    45.44 1.00     2840     3062
income4      45.82      1.87    42.16    49.43 1.00     2819     3103

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.70      0.49     5.81     7.74 1.00     3656     2964

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

We can easily compare the fit of the three models using leave-one-out cross-validation.

loo(fit1, fit2, fit3)
Output of model 'fit1':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -334.0  7.2
p_loo         4.8  0.8
looic       668.0 14.4
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Output of model 'fit2':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -373.1  6.8
p_loo         2.9  0.5
looic       746.2 13.6
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Output of model 'fit3':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -333.9  7.2
p_loo         4.7  0.8
looic       667.8 14.3
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Model comparisons:
     elpd_diff se_diff
fit3   0.0       0.0  
fit1  -0.1       0.2  
fit2 -39.2       5.7  

The monotonic model fits better than the continuous model, which is not surprising given that the relationship between income and ls is non-linear. The monotonic and the unordered factor model have almost identical fit in this example, but this may not be the case for other data sets.

Setting Prior Distributions

In the previous monotonic model, we have implicitly assumed that all differences between adjacent categories were a-priori the same, or formulated correctly, had the same prior distribution. In the following, we want to show how to change this assumption. The canonical prior distribution of a simplex parameter is the Dirichlet distribution, a multivariate generalization of the beta distribution. It is non-zero for all valid simplexes (i.e., \(\zeta_i \in [0,1]\) and \(\sum_{i = 1}^D \zeta_i = 1\)) and zero otherwise. The Dirichlet prior has a single parameter \(\alpha\) of the same length as \(\zeta\). The higher \(\alpha_i\) the higher the a-priori probability of higher values of \(\zeta_i\). Suppose that, before looking at the data, we expected that the same amount of additional money matters more for people who generally have less money. This translates into a higher a-priori values of \(\zeta_1\) (difference between ‘below_20’ and ‘20_to_40’) and hence into higher values of \(\alpha_1\). We choose \(\alpha_1 = 2\) and \(\alpha_2 = \alpha_3 = 1\), the latter being the default value of \(\alpha\). To fit the model we write:

prior4 <- prior(dirichlet(c(2, 1, 1)), class = "simo", coef = "moincome1")
fit4 <- brm(ls ~ mo(income), data = dat,
            prior = prior4, sample_prior = TRUE)

The 1 at the end of "moincome1" may appear strange when first working with monotonic effects. However, it is necessary as one monotonic term may be associated with multiple simplex parameters, if interactions of multiple monotonic variables are included in the model.

summary(fit4)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ mo(income) 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    28.91      1.51    25.99    31.95 1.00     2706     2268
moincome     15.16      0.64    13.88    16.41 1.00     2576     2295

Simplex Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
moincome1[1]     0.73      0.04     0.66     0.81 1.00     3625     2657
moincome1[2]     0.18      0.04     0.10     0.27 1.00     3712     2342
moincome1[3]     0.09      0.04     0.02     0.16 1.00     3043     1623

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.69      0.50     5.82     7.77 1.00     3549     2039

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

We have used sample_prior = TRUE to also obtain draws from the prior distribution of simo_moincome1 so that we can visualized it.

plot(fit4, variable = "prior_simo", regex = TRUE, N = 3)

As is visible in the plots, simo_moincome1[1] was a-priori on average twice as high as simo_moincome1[2] and simo_moincome1[3] as a result of setting \(\alpha_1\) to 2.

Modeling interactions of monotonic variables

Suppose, we have additionally asked participants for their age.

dat$age <- rnorm(100, mean = 40, sd = 10)

We are not only interested in the main effect of age but also in the interaction of income and age. Interactions with monotonic variables can be specified in the usual way using the * operator:

fit5 <- brm(ls ~ mo(income)*age, data = dat)
summary(fit5)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ mo(income) * age 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept       29.69      4.26    21.10    37.70 1.00     1483     1965
age             -0.02      0.10    -0.21     0.19 1.00     1301     1661
moincome        14.94      2.00    11.45    19.11 1.00     1014     1851
moincome:age     0.01      0.05    -0.09     0.10 1.00      965     1680

Simplex Parameters: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
moincome1[1]         0.75      0.07     0.62     0.89 1.00     1235     1521
moincome1[2]         0.17      0.06     0.05     0.28 1.00     1950     1669
moincome1[3]         0.08      0.04     0.01     0.17 1.00     1788     1418
moincome:age1[1]     0.35      0.24     0.02     0.84 1.00     2109     2000
moincome:age1[2]     0.33      0.23     0.01     0.83 1.00     2683     2585
moincome:age1[3]     0.32      0.22     0.01     0.81 1.00     2594     2322

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.76      0.49     5.89     7.77 1.00     3069     2762

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
conditional_effects(fit5, "income:age")

Modelling Monotonic Group-Level Effects

Suppose that the 100 people in our sample data were drawn from 10 different cities; 10 people per city. Thus, we add an identifier for city to the data and add some city-related variation to ls.

dat$city <- rep(1:10, each = 10)
var_city <- rnorm(10, sd = 10)
dat$ls <- dat$ls + var_city[dat$city]

With the following code, we fit a multilevel model assuming the intercept and the effect of income to vary by city:

fit6 <- brm(ls ~ mo(income)*age + (mo(income) | city), data = dat)
summary(fit6)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ mo(income) * age + (mo(income) | city) 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~city (Number of levels: 10) 
                        Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)              10.53      3.19     5.96    18.07 1.00     1572     2153
sd(moincome)                0.91      0.75     0.03     2.65 1.00     1804     1985
cor(Intercept,moincome)    -0.21      0.53    -0.96     0.88 1.00     4953     2758

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept       35.76      5.86    24.09    47.34 1.00     1897     2323
age             -0.02      0.11    -0.24     0.22 1.00     2246     2608
moincome        15.38      2.17    11.53    19.97 1.00     1644     2142
moincome:age    -0.00      0.05    -0.11     0.09 1.00     1513     2118

Simplex Parameters: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
moincome1[1]         0.72      0.07     0.59     0.87 1.00     1940     1530
moincome1[2]         0.20      0.06     0.07     0.31 1.00     2474     1720
moincome1[3]         0.08      0.04     0.01     0.17 1.00     2602     2009
moincome:age1[1]     0.36      0.24     0.02     0.85 1.00     4208     2518
moincome:age1[2]     0.33      0.23     0.02     0.82 1.00     4694     3185
moincome:age1[3]     0.31      0.22     0.01     0.79 1.00     3981     3067

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.68      0.52     5.77     7.80 1.00     3835     3033

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

reveals that the effect of income varies only little across cities. For the present data, this is not overly surprising given that, in the data simulations, we assumed income to have the same effect across cities.

References

Bürkner P. C. & Charpentier, E. (in review). Monotonic Effects: A Principled Approach for Including Ordinal Predictors in Regression Models. PsyArXiv preprint.

brms/inst/doc/brms_overview.pdf0000644000176200001440000274156114504270214016342 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4513 /Filter /FlateDecode /N 86 /First 726 >> stream x\[s۶~?o;N;@Lb9Nڴ>mŕ&ٿ| eZɕ=D‡uHLee:+,L&י̈́EP|&D& RfJ(<2Ȅδf9g3#8τˌ5Qd*>FϬDkh5ץ_E\}>kt1iH57 Eqy   T,Ɓ(MǑ9%f mil:RHXںں ~oA2L 5դHLJeL*8TBA*^DHD/7U$z*uJ=|=DO|(-wZEPú|1ƛ9PNVٞTb6o/tˤgiՀ{s=Q}j@ٳnܔC ldľj,唽WSrxd16eDk!d/n | ko %QRK+uWVRJ=B Tt#4v"H%D*&R1TLb"4y:>a5zR],F,c zrɼ>w 2Ē x4 -z\C8 ?UC:zAgltpOq^kKhcr} ˠ bWd&yRYOObTóQ=dԓ:֘[,[HƛM%9ӛ*"ZcE*ڈokۀt/Pi9[)d`U׶?nVۥ͖ZeoQ:c-.AM%jBn| JF@qE\Uq؅J lė/_ߏ1uo.28SdwVPfoQȕs!BpQ}BX"䢭bNj7hBzd)ɢk)14mGtw`:#&ā]:!& m~hO:H.ݶILjBLaT|Lm@r8s&a{:tv,`׸ws5,]:iV?MUIٰMsD6 +r ݸNGC .zy7XMvM)eTwIi^#HJc؏,؉7S;%S;S;%S;SfZRwm\m_4kp׌ӟ{alTd<;d$۬cWsڌ#=6-r9Hy6˸cjjX\jS8 ᲠbJwuGG/I{F܊m{N'{L듘,SI6 8_$%TZ|Xo?6 0.&y(>ɞFCwAj7Z>^^WFh/Aq[ ѫ&4(rE񺔹J2Ƿp9@6|c`'.A{i̮<8n7ũ `sCiA)+PWk6[PK9LPIέcMp[.F`ܫ( >ͽ=PR; P9wKPh棂kԽĄѠx LF^ h)RT+.5l\xn*@$IwEE҄KR\B<&$]Vt 9k3y{E)乕oȷLS+ކ6/gV冋 %:ƻI S蠢D֘jI,EH`ȐjtuNS+ I"#)Mn, *kHtנ_$]Jh\WYx>bM,dY"\X{/VJ I"i߫S+rrs61tBޙ@G;()̶1zw[{h J 8-^ JS(rr,ïN_d~ `ZzB/e:L?lT.#9(tڶ7<Cljs4ߟf烺nѰ.͸?ag쬜Y9P5a>)Գb|>>mZ2׮[` ^5/UʅH v[ 7*궵SBۮ,i7ڽe gZA s{wjudAT0}Ya _~gn3Icun4_8W:.|5QXyG[s j8OdiVuPlJ/"ܸmK7v^bMEL-o\^7iMSr()wPIu%x_F{n7ʆ2ʭFhTa"ZZu|NK$>R*CXǑ 'ϿO^ͦrzW^T='M9i7md;3%JzTC)'/T.K8e p.%Pt=+aROPxay\'rCzv?|#Y _lLAn.gƃjD%U N fћy4;X##-Et3= d4v)kPz_,)}6ŝƮ1E),->f?=O sIDJs[FK8Xص iYor0+'l 줙NGK΃k+5ksvН}ǏW${Q@&W^C q} nBK x8] G>e$x<<=/A9A #3'@%wWqs]JfbaH}w^qtd'Lt{V{{+ dQf<͉U!!$ۻ%SoB(@%<ϫY5ؿנϱ%z>y$qzNiO4)l|ے˟fvZ 7endstream endobj 88 0 obj << /Subtype /XML /Type /Metadata /Length 1535 >> stream GPL Ghostscript 9.53.3 Bayesian inference, multilevel model, ordinal data, MCMC, Stan, R 2023-09-25T13:35:38+02:00 2023-09-25T13:35:38+02:00 LaTeX with hyperref brms: An R Package for Bayesian Multilevel Models using StanPaul-Christian Bürkner endstream endobj 89 0 obj << /Type /ObjStm /Length 3917 /Filter /FlateDecode /N 86 /First 800 >> stream x[mo7~8d|'Eǎqi*K^.w%J: BrÙ!r Ɍ,(46a27xmxI/ =2%g"ϙ*Ǎ`zTF1-QVi+n >'KRfnj64؜>fe38F!+bViYPča**e9r O!oߟԵ<|D}T &C1EBXU?)ftX!=(zt2<- ďY|?uA. <x^%OX3t :CЉNtLSD N2q&5Τƙ8gRljMl,TΧr! \HB*p#$z! =)))U))5))u))MD'=DO$z"p]&ztt{>̊X?<;:\ B>x81h<㋨Z"r:_ c.}P̆z>lHH4?]Ǫ qT.$^!헂~R=~y<:C01EL`^VAoVv ka@8 =BĘ/}G#9t9 "%/F͆4ze#;>? ?LJ|8M<cL ~/?]_c^?_1ɸ3>3>+#Ӭ|~9- >0 8+i1+g=?OAB⇣F5Vs d#烫b}2bT?*g38`鼸z2;~DEGeE^Nc|EI4,]%Qå ۢ$&K-JnV> :ξ44mC#@:% !W]K?H_UsՋҡJ, -I< qrDg 9?|0(?+Fs~1-bZb6E=fC * a͚c,BU_=8Zq{[_{zV xV._?c*7I*L]NȬ0zl9txigxL1͘'_ddˏ_[ջhEP~tME௯oW9gpzl87,Qq'~U|ʫ]e(4A/Mܾ FsOv>zyOoYl3#6#û-NR䞫臬r[p`P-;ܬYhęO}Ge9T"՚J>LApB]ӴMgt¬ݦ9'|V`I:ZPct=%ъAWA&zMȷ F|[tIP]ן_?]Z-SWV\Yniƅ!52ߪ 3zr |.< T4 yru6L7eN[ Іٿ(Fxݞ08L3Arl5^zљ9r kPf6Yb;֋T"O!+& J{=@*.$ ـ6 J[^% @uLUc3y{XoCt:ᣇG)APզO[{:FU# >nmYc O5lΚ֎s}w0R^7bظP/,} eM/DOx"\ֻ'bZ'Կ<=o@vn:-m!6I_BVWd` 'ջrg bt5i1+%_F8_(2ήοoX`BC :-U)zfKDk"C>ZN5C.Zu 냳= ^-`IxK2eS&I=-o 8ugd/2 g4YqZAy~^`=|=?/rE%h3rNw[ݬڌ{Gt`< 4X!:h/=%c> stream x[rF}R}MR#/Uc{R~$X˜"2_?6(Sh֔BhOߥng">d2Hc5EfLׂT8⧼F?.a3e-]Xm []+ы=?_>͋sbDz''EQ2&)_\)yxǜ3~6x/?IX >b%$9˪5'Nj_x|✗<9/?l̿'#!ktk6HlPƷKvdu]z-02c jE%)PĢ6*n'64KFRUKpFmݜZn(ރ:ўTМTМTPayC##tJ즍Mw^Q5mŪO/t'?>z> Yk㔲T^A Cv]}?E]Eڶi[B$9QS}G}=aFNWf$S&ؗp4^7d] B>BSk̏Wgy&FF?O+wu׬F!pn1}htS&z#0'zbo0˗+r{SC_{]y\PGt{Qz>64R@7b=1Ceƶl%I o龅MD0VuۡuKWLWCg\&Mq1ܜ7.S&={*_)~˼>J&.xLBdIEzaɺUJox>}vw vt1__&p͟KC77}iGX)A$k3pt+9.C;4ޔb~sH[=;^[29ʎDh`prUfƆ60>g*E)T_ɴ) U1RFs7SrRo&˘|9t9l>2jTu~p.-Y! YRB_>vbӯ:"ۑ ,bŲ$rӂYt޾dZoZncxltk JF\pVcYp3ʗ7Vw2f1+/G/]l2XCR/ |UC61[{,i+؋ȡja^uu+PSEL_pc_pq4jvԒ ׊{ 2*K/@ \ŨB_q1Ƃ*u H{$Fj4*7Q]v Fk`rJ-鳎շtP% ^ ї,p;Ums &|+:Jiam;Q5LчJ٧6n6[/`loQXudyL~EtH|Qܳg݂^\\ tq5+ysebdjxST@e-}XFRHAYl21KncV1➥1i3Ð4Vt U+H[4PL+r\]J~Ñ>U+Pe/QA2iP M1*RPchtk8M ;$H&}r?ۋq1)g?]&*U&'K% }>BI3}eu /$,i&EWkїÍl%U-!x>SP}_\*mڃ~ř endstream endobj 263 0 obj << /Type /ObjStm /Length 3092 /Filter /FlateDecode /N 86 /First 791 >> stream x[[s~c;vpdƗvɮd@S+L$KR\J5],ᜃshZIe((HgG-# ? LDPqD"w YoKzQ]?fto 2zrxd z^oj5•a6A X&2CNx]x(Lғ AxOV>PYnLZ@@EŊ4?""v"$tOڋ7"̍q&a(cZȓ1 L0d-fiH"yˍH(Ӑ0#W@wF hP"{08GA։WA˭`j}r_^D $Dcp "F; 2c1 a"J1a1,C%J\ |cX2\ñ;YGp451 4\zsPs|cP! z8Ζ kv-%ҕ7Ϧf {es:=}37Fs q]BO[Y>|6>iH=y*=_.C-Y s+\gWKciI`}$Xul'tWZZZZZ4?y 8s>OyTh´Owd %#V+$]Z`zy~-qi铄xJ-Y 圖+N./ ™["=D&g [٤#V2EdH+HIW4-0ͪTyd]VCgl¢fc*І{hV؃U/ ]݋o.:&`f'!CS0[ MRD̦[]ZԾ{dqMGpAB%|dD XP`ڋuV.b%9{BiwN][ ?8m;eVg+ɶiHz({ie0=@`Fe'+ k+9>@yMqPEQICdk0@#(zh:\Z\$N!\f b`e~ ˀd`A0,:,AY0s сxOC .::vL7S޶ ߞɌWMfb2&4 4Ri ~;۴ve;!mN<{3lkId7Vw JXz+gHxy_-K!9UmհT?)L]9vflf67"F*$8:! 5_m;%bA):,3&ƀ;5F"q @qh se_rxPluQ&`%ۏs&oi7(@u5L}rA!PvV58SJ<S6%]ہ0y2 jWtxƉv&(BЕ*W0(v bCeYHOkP{hwC0H}iN-ɱkXasʭAE4v ^j<+Ta=i e~*Dxv4V΍ ԣȫR5K%ٶTPxbCne|JR\\x3=s\sUCog` ּue=` {a%4!LXwPN&@c`HM9D #Le8v5%;xe -hO~yT7*>+އjpϾZbqm[#\}hmێ\95x򗂂#WQ9Bds~eZɑlr{&?TL=W?WH{~4W9[v9j<;MթjTQm?j'i2\D`UY:,;W hhj1q䶺TSx6ogE_My4nJywJy~.Owo¼U'(=>.Y.k*Ffmcr#MR:z~"F X5 -ݭڛjetq}?%T*Ջ)ٛ6uaHԱ:e2dܞ5y~ ,i s%oʛidGdX\Sk6$ݰdrv@+BڼkA.!N /U?$endstream endobj 350 0 obj << /Type /ObjStm /Length 2825 /Filter /FlateDecode /N 82 /First 753 >> stream x[]۶}c2SߝLfn3u̺mt &k#I{.HIjՇ5!=q2kQ&\xbS1%\"E,Q5)ZQI1y8oUNwa#n( &+ Vh FYUk NyL&-rJ%LX@Z9tBDV.hKF w=앗1 y9F}ד: xM"^6*XGV'&Bx0JZ[QE:  x)Z(jR,1iX X Hg4r@#-T ΤUc57HK-CҲh|ϡ=1 L5Z"0aFBbN199[( &˘ F2`0 "`Nk K+?|}ޫ'f_/w{Wj4S%XUj~%ߋywC%/_TXoꋛ͍vCZk`p_M+xl-T_~ٗƴq>T0090 a;Analkmb|PJnH Lj b=5pQqg{4iX̱hX(=*2~#YGѶ>Ynmb8lvʥ|ug4ҹڭs\}0T\8;b "vy*j8+>>Cr# 1w3B%(cҦ\JðFJn;!AS7=A7` Yy#|Ř8Kō*WFTW.9$]ZGair40?kUsC̈́퍨/J҇E4 F9:ޱO.Wiq<]7IM)I\;+Ic{LCo7'\rJhBEQP{m>l;%3>n3˥02E~thRzp'U[%KCUr#םN$te?˳ۗgۅy}YoznBII4f\ڄ}@BH=-hr' uzĨBg3x7;坃u ɭf?]]?]ߘn*2r$r|/B_+v6t6šzݙ*ȦEM]~pKAUH+d il,[_?pS~?&JJfX(Ǖ3\Je)y@"a&0و(`f`be1Q{d< %8;mG,W6pɰd8UgO,x 6RHs(*5"2AN&[R=GbI>h ",}",Q1E0YyP֙tL.Q%8 P`.YX80\ F`bwˇeƠ9V |ĢEs6(JNZPd,ƾg8|z_Scd`H0nmxs55yz,<ȉFS,t٘*,墘$b&9~19…KoJޮ]I1 j\q6B@.D8Mpg;]?; N̮2v|7J}˝Mȶ܏?O犴,I%_gH;щ!CьwA0£b༭h yV'x\ < ts.[$ R. /J;^,S0OYfyfzڬ[4Y~x]]ovbl++xj}X2sy5ȁu+I#i> ZAi`V!C*&@;H׾*endstream endobj 433 0 obj << /Filter /FlateDecode /Length 5401 >> stream x\MsGs*)a=,?^bvK"7r!Y9gfwzҶr= +zQn/~*ޯ^^^O:fu"N W^U_պjkeS#|.. P z:ޘZxqXklqi}'eTM&zi-VĠiz&ޮ@ߋ4ZZ[/ڵH͊D6^<If:V;oUUgkWie+k-J9WDd<Inm]9}Chi>/d*eCjcT{i&Jإ=6 ̭c(p3~y ]5a4ۋtm+V xj)W S+WJZJ+zzLzii]]m@w*&2A$yo¤*t#LAjD[,`@wYxNj]oj\eSG7>kJ&]ۡipMص#|nHP@nt͐Mj9MGjC-xߡMP6h~fVvǡ:]xL-Xaƃup]ʭi`气Fp6ࣣH[`<@Oؑt<F_ ]t*V]V6J(=;bxL@=~ F|9ϵ װvOtM?ޝ'f,ȶnH@2{N;1D+C]*%C\]xq  R^Xqx׽[[ʍ!C /s@jhsb^qahghoG :q4xL<};|FuPT&Zܯed2ȥم-҈C؜29؀p}@LJ^DhG9z}0Dl#^:X2ڐ#2g~iCr|G oDr3y0a9n 6&kq$Nx,tA8 q>aPpuA)ٗ\Hcl;1ZD4;`,|`/8KdH\4\Z=')l۱M2%cJ5nn|Fž z$_n GkQZT!h z/eKd}J3l+n)n|îKil+q׀֒q {BkhU|D, o˜ Lh*akL(` W?0>m\\'ƻ5 q W y3SۥN|Im|SD9{Nx=4O~4 ,d,c:0Ustǟ O{Ly))ƵlО8R-vzߝ6wOX-J0?_h7~,lqa;r 1J2eq ƀ˯b.R] Oy2**Ҫ*u%U@hN/1MQ`%qeRK_U,XQ5u#c;Xt@ i!6mדFFT~@5Fc6UFX[:26Q|1=W*8V}9Ay3ȯ '<߫o^}b.U!:U,2O9@&FUyٜ!Eޤ e!"XȘ-27)W 6C])QH5c=r!L)O9vGTp}U{QOR/l 6L{3ozfyS^99 MoR\D7`T䄇 YZ / 4deMviKMGgYG(t}H0cJBC#mҏWI7n˞W()?e `~*+%܃1K{,?~aMjm>e^<}j;q>I Ū>.mԭ +rI* K GiY8% `] @V+pysrQAӸ!8{w,䒭Ri )ģD6'ޅAj>fd攊D*jcXnH֩zњƢ? 1KO'\2t1)#IcD㱔3Ax燤)]A4SU7]1ii1MC]\zקiI6,&*?~}Yetji"ړI0+*Ty1NiJ4a l{NiLo`n`ȼ8w #z[!)N/V3> C&LtY+-Kl G 4vs6ow$o~` nJAbS6ESRDTUNHdzj׎QwQeUӁt?OLe!4V~z>t+LaHM#ߑknY୶ ݳI ׾i؉}&%{D}1u P(ȚZ? j8VƁ6%u<:93M5VI%M WY8)tK}hGIܺuh{<)",˄(Oltȣ.ՖP;`XK,PRi预2j3)ֲ89ՋĂ9QR$d¶3dgR}RMμ=ACW^:{M;VPԢTc> }-/D_EqD#aF˹!O$ҿ3\10.q@)Cv9y^BzEz7%0zX< ?-IlvQtH`PM>ofi<01clIso"tf&tm3lߝYZ5WpQ>t(I4α2@sqGg7φӺ uAN~BGvt!j#[ eHcf(@qȬ.w9g&X:OR?C5͘ j+viCa'R|o)Qq9ʽ=ĩQlסv%^Cfj#sFyע߈+JOEn@vQciT(ⱻ퇩BLKzT_z*" `gK`Ԑ}f`f U6m~Mae.昸{L`<*Er!2uRu)}Д[*~^t+hhQn^-"34S0>/oFl:r8b7U§ZBH$6S$E'޲ehstlA"SVT4׉Bܹ rg/کS1+cXp*}DЈ։g{7wx}1Q&%ckqXQ}ޖ]x>5\;v=W˳` X -r;#8i{c#nt˟axy{Iu9a }Pf?p>ۣe r YW, 8"(=ю<(q&9v|hMgG(z]Εk /_bӢ&T&43Vf%-cx,Eqj{~A5Noeю0NN[)%InÓ.$߮J[W҆R*R\o:?fi/_MR^>s5 E;zFYli?%\lt%uSkRO[jv%e`cٴ3%Q,^5+qZiet--Q$?3f7LNX3chK?|7BTӜ;OsSJߵ/ U*N?F~֠S8ƟPsVGVVɩM>_9uh=$ZK#%N.98f<( 6!Fj`n-eC&*~9$ڒ/X9?_>'9FnKE`=S8QxAo݈w5->ћaQr8墽3?R }tǮu9fS~Zҿ Z;cצBL^$O}VEf#W>S %۽?Kc .Jlo:M)]YgOw!̀rsJ_v> stream x\YFr~enmA{*+ڵwdrx}@C6[hRwfV,fc"Iά<< ?,Z,^m_5W?".>x_k ԡ bqq*v /θ:(ؾ{h mm0bmul('P\vQ.RB rFUKi_=-߄+ o}ص˕R2To6nښ~\JX|Ж00՛rmU ?x~6-ʻ66"-8WB*X)S{k#IV-FHn )Xxѣ4"* ZVm px}vU&}֮zHwJ8Y6[-o;䮦Z{~yr_}W/_^E6CZ7/0h@~{z+bUaAYAW_6=q]nH p+% -d%TmI<<$cJC:OiÎ?Ob5㇣(A7@᫖1`/\J6k@D0j|vzhҨ:hp$Mj籂v2F}[MtH6ȱ†\ )\e&HoCp公./GOoaB’_Fzaz,[\9Mb`P[%!V xeP%wzt2_霎 @bCsW%~7\$2,FL{O'5 Q` ?MFZtHʾdeP uEi$Ft&^ {K{s I2@ݱ f7Ww݈̫K9v#)#$oQSfvYQP(2Mm!|hvrL/@74f┴&)/b| 1 `L4~<,' אKR y0{ Kjoxn/gKom;[<5>ru+.Ss)J48uԿ.=|u-gzNaPn|P$-"#Gm K9t\]O.jK"jS(BO1:%;96lf8=XM.-%X jQi/z֊;DuzXX;䠫߂ "[![2ԱgR)T{H*Z+PҜabR2x4 1rxzC0ȏz9©b(I)Y7D~jFTtT\$ce|LN.u)'U > t9}P "CR%`@tܧ=@>J{#!FqA'D<nr]rjqI;!@_POgnI\(Ac-M +QUmSg8-Fu&\eX?#PiT3Wa@.}]uYϡ2T@XW~7>x {c 8If<*#\*(ÛZD8b9?zb^(QwDGq9=:M./IܕF;JUI)#(xxbȘ{VȰ15#_3*!TÜ:yjg0$ ۝"uEP'EM\8:GE.N j*ŴTQ7%+`i,jN/CLF+;M -@_$DK[f>wo W>$ģ6R4nHg@EkfT-W!(0zV *HxnC5A o8XjSZ3ԯ_KO #C-ta}'1vV_F&t714R9@G{*nSO9wyꔯc|  IP Z~9Mh>AC Bq*-DajPTx'+ߴi`ީTHOH ]0LCrҐٶ+4lCcÏ81X4=+P0ևjZ821k1@rIAE`0\cIc L%i%KO2VSqM*awڎ(#*5rb`T]K  qD;gt,-}Ъg` 6C #9 HS(oLM$)=`XVO"]#E`U+<%1#ZbUP;l)9l~ bQt9@e]C:Jy5 D01Bu$rN 8@N4%hu0WBE%V&;UutT;bI~"\N2S^,ZU1R#9姌&5alQ:J0334sB93@I[:Y$\L98(fJ4rj3UE%i_E(\o~BS &>/EںI#Swm[fY((!jQ85ٻMk80e1q1uFTEi$xhMOْ6gT Aϑ.`&'s  99G;vIIw+iC>4:WEGєO hl~XlDJX;{4)Tj~sM&AxoO,I }'nHC6bȡgH Ca`7eJ$gHO{`/)O/(@e{NM!ϱ>8MUKu>e;cJrDiOΥGqSɤ;&~|@H2[$,fa;H1#MŊO=xp}]FVR"ԈcT-FBXufH\&ƒͣXąNsDF?6f"omwϼk 򌠆,wy˃IS:UdeڟS#tvCfa%FL˹Z.;bX ®O*f &$wZ t\hvj!7j5CaYqnJZ>]Lg`<#ƻn}R%iѶuJ8w F Syī//h/51=۟,28v"Ð.(pK<&mʩGz'Oħ{tH0`K]֯X>oÈ[Vr }Yhݎ_& b8=a\h59! kZVc՜ZbڟX׌q lt]V&k׏xXWW}pt iFPoFTBnœ-AaQb`&/Ԍ $ dR|&CرMeߋ Q1p .: X4'XV t<@%BDv@Ej_0M~5b dhϸ\x)VTk^F*_ے=nH)]rʓٌc$lKٻt)nDALdbf\ůyK]r %09ߠ9Qz|:lKHYT.AF]QKu)Xg=pzv3f͸ 9|4-S;`q}ܼ݁mjE4sp פ;%]jx47]zsFpTt1Gc9dpq,M~Y"hD%B1!Mx˨ ђVN5Ο-XaR!0>_ވ4̍K.6Ш:wR.eHj"lcݤփFpb|6)+OXq#]Ah܏Ӹk6w3ŋ:3Ɣ/7%>- UVPV [z-8eRKJ}\RO`N(_S;aAihm5;t5esNj}Kb8ޒm"2g?zzHϪ$<&\8_=# \ӕu\YLrG.p n.HʹrV愆CݗvK.-3qnw}^7Nn ӥuw'#v$ ^^!>">qшC (S{T D=STkF-bC38k&A+o3}n3okj*MU{q 60?}LM{n&Iy0m}W_ǐ$:AM,V+a.xX-uV Kpp~_2 IEQH1*s7MmU>ڨȦ K^ڦ ")KQ/Jiendstream endobj 435 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4039 >> stream x}Wy\Sֽ1$7"r@7ZCe'bf˼aK2ef93Q2(| |,1~DIm ee/Yobqγm%y9Lrtژkc'bF]1&C2>^x%4r@a׌z@2*[_B/@?X "`,zo~uB8E9r'xbk]𣜻K4 W 5,:ёn#X ׅd}`=.I-lq5/uFA 65>׽[H2NK L+nhnw@9ˮY^PW)RL5Cw<4T9pʕ;UU3~y2Q-ژ@ƪɧJؔ٠ ؇3heFMS|/9d?= 'P sd2i`>r-TdN%h`m^L^xdjDBD/p@U;jy BgBU\U,As *8 NAP6s!K_i?G-pĕG|V&F.ݣP #}؃)]ۻA?}Udx^ac<6ܸC}.!sV \n!s 8qh|Ͽ-(xsi9bb\{}Pv2B;2h ILk;~t^jU)J g۷vFUVjv섷?4|I7JA[T CZ+wj+#@5'y†}I5Uq{ oe ld (JU &ivU{GBa?@Fh;LLDw ;qރ)T76Ra63`#m; fnB >bpޅ{/y(|ɳM37WZ .DCf$#HS@ .Sa0K,ezqb :&5הmW[B;p@Fj kxYKϔ(X"''̍K|H"ǂmoZkBvY!Dɒ߮>^d$,wOs)h﷽ ya,FphAM7rPpC`Ztqbu |uYO#%ǻZb#Ò<7 >@%ZJaH|R1uL: ФxEgC,q*+s_{Gӑ*MAq-WŒIzqw5 DX{UBSwti3YtnD/D!8ngmyg@84Ԑaw4Ȏ rbXcEsqd2y[f,5 {~; lĔ[ymoY0UQf[Yp)#RHV~" I᱇}(4t8{\hNM8 .88jQIY!'"'&6% i\cRΣ=:Sj"jba5)L4 كT4u&&!MFzWخ*}$OGGkuLsřC Rh !J%ƺupHљ7F9+B>bpE"J+&acܵ rJ KŒ%7gd ե|~n+*Emg5z[;Tupn5Ǔb u~2^ $Mdz95Y-dܺbs+xNi`H+8>- ]34,t8*vECИl#4+CFSnɸMPV[^ơO=nB+ϡk15k+SXn-Ogğ 5P75eJf|:J? em]Tj65Zsu,u^8YUz/Cr*QK;qőwNϢL(.-V̧,_F{Vfykq(IMR9{𩾟T3)%Y!1; SU#^U@dM7pd+ي(D?|1'7+ii`jXAh>NֿRG8 Fm[j\ֲf6&Z$R]TznR(ϬԜ!O20y۲!I+`GaFl^ދDvlCHQ`5~j]D/|im1oS>{# R(H;0ygA}@)y_tp)+z[sr~PYp6GƉkņ2q ˝&L8_PY6i"6wE:{Y7<*y>Kܨsǒm>^R"RWSM8L(9奻+\0 i|bbxgpJI!6MŪz,_(};~=*6d&vP]%. a\_̆hvfTfx{ :67Acw}g-l4rWT6}U & w'eWV:Eb ҿHW8x ϔ7XLjee?++1KLwg@P(LGM? _g~G.4a:Mߦ?78oo32Q [fH1D_G܇8)br_GȘtb?Fo-o]Pcl+8qh(/4ÎB!ڱT#> stream xU{PW{0M&"j@|Dc^ $"y<90S| ( QQ$FhE$K&){rRU}w~9GA9M Ŵ7Ów';~ TH&IJ(|*ojZpλ9.g &0~FRJbk\Þ]%Ks6Ơ ׾ߑ'j__>9KL&'ic&nFFGhC7Dn(m՞t}̎K.{aWR*LERjj F)rxj:A͠<)/J-WM9QMEI>W<sJT8ډnd0լtyK@DQz7K)c/>1"K:=1Iy] )'C|0Bǘkbw\!h_~5xiDp.mEplc8ct=,&2\'É=Ǻ>8lWw{GI`֘\+\/}aZ ;pQ=HƳ(Mr|i@wtEw.E4=~$sJJ CB ɘ44jja>?5>t956)f[a"@}Yd u.@c}:K*MUЭ'y`./RɎh睹8fAm6ey(a7 \ ]EG\~YEYhmWmAltWJG&?fbb$F3)[NrЈ%@yyayaD):0܄߿Q/%y'̆de)("J#aM-," ,]+OF XL^vQ} j,Yj=6eWRh @kY] 5[!Y&`0GCn<`g.{ sZX%l:`Z֢,GrEWum]:C(ʁy.Cn]} mփۧrS]&';GWWgYd ꕚ bWi744dV|8nC,]yj3c-gnJi lӁ̫ʫ̬4הղ::XU"]UCCeE],+J23  j2/rĪ g YŚ>EnFmT@Eˡ>Frx@j0Hh yea;Šᰊ%SB}N8۩ Id95p{lLd /'|~Gwi;p`LxsE}hGmy^7o>2%E_pZha2.ژ7vk*v[N7 {*xllZj\Gᨔw{X#_Թ+[}˾J= Kamg"<9y28O9iWT[>qv?h[Rutzendstream endobj 437 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8445 >> stream xyw\T2 b`K4 vA@^D`(ì:0D^5&D,bIL>yИrsw?9s^^Yϳ25v~ƏsqN=+־VśG;==0f9j6ZyA<5H}LO-S "j15ZBMRSerj*F?ՋMP}(sJL$T?*%՟P"X.Pۨ.TWutt.RT$JPg2ӤD8DXd:4]E:ɸvteQ]˺uk{TAwė.txOe!KY˨GYul5jUTg^=KVɾg}yҁ\?CM}0lְ/>hW϶x0ns͹! .f&MKɈ(P*ku;TJ3>H_Ug5 N( X^{, !St۬?MFk)0pf\Cn,5\m?@A4<O}tW* /g~@}=3q юrXD̴Ȕ_"9e/_qcq;Onmv6~ɰ0@I{hvwm@c6҇ޫ҅Bĥ$ʱϒb rH&eB% ni>%)(I,G˰ApÀ p ۫9hr8AM%Ćnc]#H@;@@CWu`d-ӹ/,dhM s\ 3޿&>[HpDd;Kn,T]C,-zZYn_;}6d쒡J>oH֓SxlrK{>G.Ǐϐ)pu ut[[,`~'+Wip., 7Q\NaX-Ɇ9(Tll= YFqv$h4Ѡe2GA<9b6뎎KlIx vE h #(ƀ8KC5Tb!# zMg9;WiS҇dsAp?0\oWkJ;?48&;`0sW"ukxjxC  h* r@+ڠGZ𸐹xѲJOs?Qqhg;%vDBB,c"j~->eo +H6eWW_=[sGRSeǢ-,2TWl*CPTmS/Chͭ_fh@aXdv, T}$4յNk [_by"Z[wczb1W'Co5EPtQ/,V*4%]i ߝ^[33sY!2Ia9AZGc&@l LOVU5={WjE3⛺{Qt5̃VJ&#eXJoĜ4h@S\}H^{" P#|*l FI$ qOt>tjZl)M8 Pg$<~u89 3'!w+u& 2 dh.ݘv kJGZn5FPoəմ"E4؀vrH5zd2J p6Flh@;8sK?C0U\]ɹVDJ:`xj ژ%8G1o ~NHu~௟ YNn8l@ >o8! !ZLГ7.;5qV=Mwor gx45m*D9wJ>$0Y%}GR> W}tXvAPE 4nI?UL[c*'" >%9Y>gJ)UA*ee#pUj <4#iz&)ȓYݖf?) 2u@:.`ބL" ܌K0 H S־~Vr4SE.>~G+IϤUШڙbU.4s:L!PvfmŢtt3RKzbAHFSK=1#xT7bPӧ2+VUh(h3ex87%zW;F!MT|~"Ѵ ߳ "xp :wX|*X)uA9K;g>2 [JIUR,DTRy P>:LDҠ,K$" :(d*# CCc|=^j:sEٕ y.87]Oo!7 udǁp"$5)hYTu2bҢnyS7|{6_TY7Ee] QW¿kx(@K/w&^$XYoMCxEz}qj)$-&eh3SsT~C-'w>rs}/Ȗt<c랕c$dM"էSI2! E$µ=Id{PNyڀN%䪅؍x+iq; }h-,~6.]-&E$ )팸qc+ܕ{g 4k: uм*k8^M$EG]x~ di+U#Ԉ TU*WU ܋$*Ui*͊U2ɇwC/ٷ [p K$ŧ(RX! փΐpT+iSRwCEBHX"[,%8NFe6*ʶiz^&:'&~q ^؅qxE>myY'tN_:24ZksGp_>_[llrd 1!U5[]2׽ڗ6SN!y6k_fѨ;z!oʜĊ0;QMv)Gvg#(-7G /BL˹Hn(vkřc.ggmո >rҹd1;"| ۽+3juc5ի`!-N 3@-WEM`vVCTCP~w}Z1m 9@hU$92b K2Y<SPyJ wf2(ReV`x w8_?r؂c'iدeω"S"{;Q7B զmB?L:Pѿ?M С[ dKCNUbgdlcܕF^ްkuӊ6Y; Q͛|HG4]HTݶ=` 61cM;nݫPndA <%}'W含fkj4% $;J§ ^?ua6=F'g5ѕ,a6MuP[e)~TTn  (ܹZv q{lQ-8}he!VH{SKq_ty֓ğ^?y6P9j(r: *lx*Ul"i[4Pp꤁N̽ I>{}{Ujl/b毟7ovڑ5쪓˪zUv(MFd]' .{.3d" 悍>9b}x ɭ,>mi)KB /-H-͇*2?2&Op&' cũ9*". 񔷂g@j^]j#$vBJNVٙ5kx駌kyYʖC G<r!JE&o%}ԭi=95Q1j.u/ o]W%441pW=c>F@`VD?Xm)d^ݤwctZO1'?<؄!*O6 nxAx\UHTPRKä؞( ,]?~WnhGkiFD2@Oy*qÑ[&#M!a S0!Po0/~l!^ZaD7G doI蟨JW(R]x2JMI'wY4"G'x(\R,kS8JK/>&庣MmRm6.AsiR4=eUF< sn4lxNrx%@uB䤖\nr  , /Oa53W8mR(T*HfӒrA4 J6#JA2QK0E2K|R ,,âK =3/n{!3_o͂*O@Qr ٞb!!,.^E!B.L=9Wh IMp_p_EpMMEE ˏȰ;9 !g'ɉϒG')Y?&@IuyV|N~jvE߽F8˱C3j^R#<:N`U))*RSk3>tQˏ0LFRj|lնq_]qU%P]T'hlA-Bn )Iw=?Bg.$͂aNꄦf ?'˲^K wiB KwuR<rS ʜwM|$=v)yJdf֛W~`/{du7zz -G_X_sh.5INq;f"ѫhd2/m0!'NbxD7HR\3c^,Z>~_ӆ}s$iM0,Aݦ-Q,ZK2EZ+7yʴD D_$)7/"ׄr]gD$D:34T6Wp3# 5[o` 3۝~ϙjU9V^ўe2k)b~ F>Gth)|-o84?;H*F͗&:;q޲XJ#=|hMgF$A-՛7?F6[Il plMkc͊m:v+8Tj"(y螼:=wm+rd 띺ls7w6 ;m.Nk-IMifs4DZj[~v1lZ!O_Yf\Dvz m;V\Rb]pQ\W( + ={Tl7ϴ"|8v\vhR=| j&7lMjN&iY-[W>ģcj ᕼlmwn~9.1f *Pμeϥ4/ztDq89J)߃ J)[=q51D?dmVtNW7?qs`8XNsv5lE&!_:yH K9s#8c{;WZL Z)ߗ*ς< =Wny?0Bjw2ݦ}K63!˽h^鶱(腤4D^2;QA SrohGǽ|(ש1&we |̣% #Xzcg ?ݩ Iqdq\xlQ$zfed@%.-coiD3vjg>N^ so X@sd"nB,^ɮ&XH̬6;{=\ Lz6C(fm9٢^SHNV(!$Rl< 0|tl"59=H|=LrWOZ4GsjW?DmUl&cvIEXyOdPhPYHX_΢tݔDIB53d؉999(cNvr0wLh^bNc46R)gJh~۝^YRʺ(!))//ynu8mLMfĸp?Mvm -Y\1m$W:%'~ iZL[oʹDЭa3D)BTxe0!3+G#}gPOvFI V堄4-h-\wD!uΥ`/]:>f%ٚ U~_> stream x]mL[eǟۖcs]B)_U"Mlvd+ p*fu^쥴#3ȤC2 / b]b%W2/I~'9@""b+UF@6[:6M sk5sF.ڢ{1ecDkOs+@HHR[kAjV[穦.JfmTiW ߴQ;fPd}3:1OcQzRk܉P5R:CEɐ hM!b ~ُz@} 7gè/]@ؽ/9PkoDK*{(eER{}W&VI(w܎.^,xv4 |O{+ xxBv=kg=Eru a \:a?Rl2IN APA2mGiqc>@߶_u}Z5{yI7/]m_&O.:Ty;!K¢m;,w.tEڱYZi6f%{JVM r ]&TR f$kV+n!!<0SK> stream xzxe /ETH}#D`)*JtB50fɴL3IϤ@ PWuoug7?ߺ]\p0s9!F |||ܼeGjrxʋ m O{{3 S |5Ʊ`ȷ?=IT8 M[pE?D1<21q[Sy٩)qI11cgA3'|o VJY&mmz`go ď؜%/jktP̶ظ;w%Nz#9x/,xqK_9k))Jӈmtb;1A$v]lb71xx&'DG%}D :XGH'6bwfb XJl%lj'' !($Fc;D b$QO#Gd<[ǞF2ɗѵcvygcƙ9;ǣeOɏ'zꉧDO{ibFO-r&MLIxG z*FsM:iii_1vFS3f϶i׻ p.~9c,rN jk NA\ s3TqV @uFQ.hV7i* d:ZՃ lB[ 4%_^8L" AAu+B(Hah+o2BtsΖ_`?G8vEWQ9ہі5tN4v9.QMa ,A2?RYjSN4Q+N@W iB5Wbͯ,,TXhi29E'AWX\}C 9[0c(^]9䁆ߴ9& ԗD\>I__߳Ɔ|Dvl zSF%e :yڠFT4 NBw9K^)Ɵ}l-\4}۾.|yiy`יdc]:='  fa۵ ̜~i : <āA)2*u7DÁmsG\6.<z.sҳ3`$Ak$Ua׶`lF,tD`w)r= Mˀ\,z)A_랏ӕ uteFjZ.4w5AtA{bS(q_ha#By*ͰCeI2u[A+7)- t3;Rl3LK^m)4Fshqf=G;VΠvkOn}Gbx36e=ޣcf,O{#M{/ b<*LγvţlzY0AKfݱ6e ?D{mmٷ> e&a?W*˽YEoҙ)(kՒظT%nLo$b0sKzov7м:" Ӫ6iI>fYmin3@ ݂+y0*ˑ%m}yez]htBOo'XZ[jSjJ 1 ZyYSV62?tk.E&Rhb;}~ ]P *s-?% _b_@Zk@ГlD&MӈԙV-5)vXg,S(PDz+@e d|˼3ٕYJ~Ruz>O砯>)`sHx=6$[@INGU$٭hSWPЍ8_ P)AIlrW!/>B=0/by震#8L1u7B\8 `-2XB_B&Wliy*>FU wfǨ3ͲHURk"V/f͙z΢ߟ#Z%潷?Eb?&r?^Ts5UW?oF9D, ĜWSVWx'ft^6(.ivi/On:)N BT/=\߃[ ]ChCETD* B a@D8L[R[ jjB$95,j8ȹAܾb+^T$W$ES~e:c`8v. Xq 72ׅyγ{HŠ͕Q"W2 jK-Pji?@wenGŠlj3I39XͣEKy){ sXR @ i<'uг\tV/],O ,rm j,dwj*;zytH0DI2_})~sd` NCy1^Lj}Q={S󊩿kX_PrV;L8@z3!+%Sdj5ӭMOMI>dhAW˅j*S1T ql w苍U> uz4ME`rq_ʙӭMM5T hWyGAAz۽Up ee01f}@9Uj# $6kp}̔NYBl=ps WbXh'FkEBChpֈ5sݯzcts6Ů#lTBqҭSojT)haqr{aʮ|)}~5YY]!\ʍY1tƜL+nj122rdoh**wI4 #>h( [C?:>'1Zm+.RTUSebD |rZ-jv?UK# ȺH4=|FB.[[#bӖЗT6ha3ӈ̥qcyF"-JX˩ąa ,ˇ\wl\`J@_ ob 7D> &JIsه(+;Jo&Yj2EjΟܔάfDmXjhf!@v@hJ*#3;;wQ;II`o~c9d*V*V[rwFw^mڭ)Q@ƞ!qvk4>K.Z1k(3 鿱E2ȣ٠I.ME>Se7{P3Z$\oj:G\rޡA l,E\w};}K[&/3_E;p?0 É x3Y@oOrCŇ-HldZU h`#q+p͋TR ĥy՘hWX4BD[P 2%W+2f]Va-6 HWwwR^ g5& ZZPb4G(ߕVsH*>7tP-1Ƴp'x}"T>&h3?.Sjub|)P4_/#z7A.GM:M1pq+3Ug :P+4'zRv(yPROb74=16Med_<eg@小eᦖ<\eƿ:ϲZ4+wѽ#3 g")<?MAQ F j,eo,Ȱn[ 䂝=(:O@~|#B*>u]hUN"ֲ=?ш9r*lFJ }x4BÕŋcA |T\3.Ņi`Ua|?cهHb-;N4Ed0xdvz0C\>dyR͜恧aی H^` h_hzQ=xsE0''7 LDlrzޘh,N8Y%lLk}*3f]rl͓G; 霥ENj<7r$"H4G $aTd{~·cu.ݚǛct홮n|2Z-%!7?x}\\r>~D`fjV-aUCC`Avq:ĉ>D\ܹȧ~eد9Y$*j*ogr"du}sPGM]Pg)GS^ vyb~~q2 ᝬӇ~ WP/"GN dJ|үsՅK~Rs1Pޝșlɪ}3TyO0oSg5#t3<ΐL+j)j/z|[ѵ19ʟ Cʔth.RnTMĩ-9#PT;Ks] Y]E0P.anߔdSK򚡓߹߰nUVʦBߟ&PCsV+)BwFV8 vїFNETU]:!\b•+~_m78u`ExCDkZ56 lVQXl/j}9vYh04Tq5p2 杩Pv6E6f?/I!F0(^C6T!jP0[ 0 N_ tNҧ!|:-$R J#c}XBm^XXuV}=}p yhd"obr52Wo 簷9MS]YmiЙhSoUWUջN; dU t\VPQFXxd(ʼ rNpŚFh;9W? _bca]bdF\Jba4Ql)*Rm[14ֻ*>O>_ 3 >lv[&T"B.LPN] LɆ¡aK*51!9!<41Sp6L/Q1@HvPb]?8|E; -t~Ӂ-=pּaQ{1k`! &s͸B=?dO0avoAl$,t_k|Hxb3v%q)=m{BNcPP`'ܻkRfbyV(}r fѩofB4i>8*jTRGxl7&UrrfS9&F TR*W1*Pԩy**yiu](΍qaURiQ52pԼD"+MN%0O`ƭeWrubHEd5Zjr6ɹ,f;Tqsʔ"GjG3Z+. nÕҡpks=c}tw6B쎉n6KԭUS^]ս!V+S3aXԸpD-bn_W$/K-M)UA)-3 '咲CV+B >)4ZQ[W&4RuL|&tq }̩TR~Jy fp/Dʏο9/ܺ5C mhV@9p>7k.4wcUyvANz^2PRq_?/yopRXQ]Sg)S8I^Gv@8ϔ2 =v՝AJ9䩮4glr8;6|w-+W{z /Ĝy=Yf3K-+qZA_"!ݛ#HgNlZtL2^Jً:L1~|eG{rG†fSv~& F턇xeFrdw:)ܣtᨗ)f!Ͷ> stream x[K۸9iԜRPC/>f{xlvsH w(QKQO~o~FnƻTS$_r&<,=_r&o>quMR<9s]y!s'W3ōJʬW,MRs[f踋2Tr/ʒH 0SZ,g(EV]]mfZàh֖(Ļٗ&"I+.Ibw՛Ou_F$B/M,s*7+TJ I Ԍ-B]y痯-ڦ67j@aH fSEFa̤7Ak u}^85Qrmun*;K܄ Kg.|lR)Q=LE8Hv=xr_9{M$ZP,U*@|AT.QrֹKu=YRC%( O?YL.?4$v%FS; n֚_-Kh3 c/W{a͊aJ |K?.N)j|$h06ppueA)J M >Ge _2bDs'؄+a7@Dn^S$t#Dunʋ¸u۲sd%!f]e Μ-_X0-?̳aȓT #TfyT6` s҈=y Maݲ*YօUAy${W Wf#"rn E*y"H!wWf4Z!0b5z±AX<cB2-E?vrsQ gSE_,>;G 2zGxÍcB Uө2d?lcRnx @郷^b)^wmLWI.Yi&l݄qK>o}`GA`w QAxA3%׬v2_MS#(uah8 e92FCʵ9c#2&bvlVmX;x\')jxbIԬ %89k]!Ъ0?&v?!_U1G+F+$hڧveWTrĄliRC3?}xy멥fxY8S!?@r~2&TBIu2<[ Ú܇悟 VGI +ha!E; ,G9Pۇ(y>+{B_2lŜ2W+J(ǕX !qn/?*uRaCp.s,LUS9 ~e4Ê@P,t+Kxdstu9"PVmwtRb~m j[XpG,{؁nW پLƤuYo| 2^']=Lw RiAEA*6X- @sBHwJ qvfiTȁ.IqPj嗀X~һ%8ʊǽ!&;K $ zNpS?9nY'ܔ'\a]+RϯU^R69~v㐧zÁV ();$ !Q2^% rTԁy ֚vٳgenssr+dޮ3l/bA{ M#/Ζ2OB_T<sKSD1;"K,ŋs*k݋0*?Cw> ofpƳ}2'_ֽ=W~ytJ sl"JhEvk 5VhQyu5]$f4SQ0˲.Jy!=P0DtfrBDȹ4#1LGcmYK"JS1* vkߖӅ: B>L#pm ݬ0OYJG&Z,c1;IwƷOw ޜذL'Atzn@J9)_#y?yJ^یᦧSKюZ!/_IxaXk(zzse;5>yO, wb!AdU~WNݚ%7?yYx;ݧf*:3&x`l)"Ga;lb$ mEE[ at.yiRw0g!8?U*˹ @֏#;(D%⦸vG9>pl r9Ȑո`O _[xaeN蛙+HѡSzDW9ֳlE[q߸;?xĤ[> stream xZrȱs%/^p娎`C'P8p,S!yv1݋UThF^y,$Ils,yo99=z}c%G~"ΤQ'.2+#{?eK+Y0ɳ\˂疭+V%k}[WTJ vqe]LxSܾ)H&3nGLGST4.Dڒj~. MY%>rlOg%n-ܰF?5eކf}b"I6VѠMX[u~8}{f=n~+:BjYj^Үܦa(X$vM= :V2EVd*dSx)3 Z\@X`avȅuo@[3Dd+vD6 `dnWb5]I!pdׁMҨ7Uka рTOFI}}x0L_eIs^6TH?CKkCKJnzcn< 8+d+VZ m؎wd27WG&dRi삽{"dO͸. w7  k26l,_GįWƯ/;jڃ?\J~+z~H!SBERSvS9H5*Mũ)I Աb1JvkExHjަېvE~k.Ν/JG x9lA(̲9ɕqw2GL"6SՆytD[[rպW_pNvʏ on/R F`+a@ i;m5N€Eu2MIV>CBTE 8a䕯]I/Tn ?Ṕc#.5>Sة04AƊr_n>1irD:"ZL̕Gy)ig,IYQDƹŌu?8ϤDG[:N`^i${߂c)h ]4 Vxv_8D aoK8L,u TzosK`6j}@¢Ұq݄IPu6ahЬrR+gz04nCs39PPE7>i|߂vr# {1M{F3iuYwYm' "d.SI`yJR2-z#uїIl݋X>`!K"wwa5SW{Ȇ} IϒWxK8tM:vzD_ 4#úL=dqbD X\Yפ0ѭa:1޿掆 _J~JWM9$z[&<3Y^1.ޭoWbN=4.o)Z7!=Ƀkkײ)*hnQvAՋ6 042{'c_/ZKknKPZX>12Ԧ(+􉠔X*0xBS7|H=<`6`o<bs<6aoxC7Lhhg~S~ dB@v@FhW&=pEش8㍆YVAZ Ü7Ox["t xb0- ^ i2z>'SИqN"C#?T!(:U!ce/YB:XP%u\D4AH h&M@s-;fׅo['5f$ATP4_zk깛$ t P:{o" ~(F~pGm]P@;x5QUQf0^TDTn272 z7 $IsP.w<-/ipKAUPPjpJBpij~*Ĩm{?#'S=wN!c&Y-T囀8-:V2XDmhKV6DZW^(@92, Nyqb * g^7 '{kxJ+Y!`MLkD̕޹KLn3U ?m A0Q9 & ~ vYGZs_1^YWb3R %VY49Lbu^ӗCx{x [) lz\n}~#{5krCUn$ [bvX+BS{R<'$wY^J;dw/LcmA33Ɲ + QO);&g9ArTϒVg:'Zh]ܜLaL<YX)=|5ej,0KKT+r)m8`A%W|(5| gPc@U|bL& h2hEyճH8~`A}E:<y)Q7ѠCX8i!/ ,mW;Ͻκ%x/1!7EUn>@(`YITB:RՉuH%y@W4%RC~{5 U^'9}D\F,4^)É@᳭POYlAB)ut}Xa0j|mupa5u %lk5ع bpw3b^dZZ2o^TI(fK.e8U 5}5闛YSendstream endobj 442 0 obj << /Filter /FlateDecode /Length 4149 >> stream xZw8=i/:Bo" {{$8=hYHs*gC(BW_}o竣rrwuы /--\.t„,-ua\>)3E-'vZ䚖,g'M]5*rqI.3aQqWoaFB3bZ+a|zh&̸,R}j:c0]#$hCEI x%B>3_ +i1IcP"V~6Iyl>,49n\%ٔ]_Iβv#Pe:@ rv4bJ1MjY}0ۛ`!+ɻ:XApNfRX,NX}}aD23@1h#yeIwmj Cv`[r*jsV^n z<DŽ} (Y[w.m3L-DB@ijC+ClƵJǯ>a˲0%/(e6Qt-TXV}qj%1]LF?LlYM[a4%Yj;EEi+J$bg :1 wJEaLAoCDpA.Z`VfT`3Y{RhVJѬcV0M멄FsRKOD al 9Og;IjŪv _0 ,bC5԰(vF|bFhDd`*2.&'P%DcVr6?&ܧ8 r\UR@=wQ|0sDHE)}uiKTorY,f%iX7 Mc9f?/L) fp )a[Pǜ*sc4V B(Ddq0Șdvo_1 8r8e-%W iA* 6@ c'_ B`>ݣ@jl·ɣdIcwR^z@'zIy 9}NFL.)S# Rؙ2r @m:ElNNZ Ma`dXb{Q:piJ]p}Ii2 ޘ+X?Jy'VH&T:AoG#YeVP[hؗ2| >!Gs.Kף`I~5Z]hC&H>~']n,`\eWU/޹:QYHWt $t B ~7ZUAÏ/^\|_\`v]q1mw"2P}, Ol`01JWIE q^`f,K;nWY;1,π̞pQqgo-#]aBs^7~tлv5JSߧlIAYR o0Ajr.EV~wxCA\@|KmJ]&en!m-L- -qML^]kXi 6I\czi6&}o`5ozj2R7vy/|2Թt؉.6? 7>`&ܭ +VUN.Wj.&X4^͋/us~YشY/ ˑܦ đ!E(<7 HBG|yB:(v˕qEK|DыUsu˷Auբķ,ȅ! _rU(P=&QDL@H"\|D߄0Eu6}h؍B )@ ދ  yӘmF"+Ht0[~'b~cI(7#or6 b@x|",jut(ڛ*؏0W0VN8sF}1ՑI؀M/eF5NHR"nΊ\pҰIl2rÔZ *9L~Ab o&#e&a!깇xy/:AN߷8*ɠCSpCNsE >]Ro[jB`?Hq.?]{eNJcyH9Kׁ!u`@h65-O dg:n];IK̚zZy_)ˬȫ)iոgށђQZe(t|!iJ3HAm`y~Dm'eAQKm yLPBd(Wd( moeA " ?).?%02itI*Ԯ(Ijp`sGB@xxi x47ɹX|10 -{mouz}s:jd,q(j֙b揭 h!&k]cgU1p4 :PA@h.RCU35j=*?_żZռ~zq['s oGÛ'ȸ]8}x{d|=ZU\q3yƞ6·AL /ÝO蹋󩡋R2wgV `I'[wEN¤֣لF@~޿y[MxQI$ A1RŦ@NsRSo@w>4x3/zBT~&n *k;ߌ+u!  /hg4vGXY@q xd6,uXn u ;3h;nk.<ЦDx&mmd^/6_c/#6 hS?/:m~TGĩ'@&-cQƳܭp'߮`C !X5:p8s/4Qf^׋;`垒?&}$Q]m0uMhF8$>>t?DȻeu_IX9z٨]7YB黇Hߝ%j}_"_2c&E`Nޭ%w 3Wm{{l*iZA7r>Wa6@G28F%71@:2K7彺]\d/2f#_jxQu@Ċ+9j $^#]Rĕ۳ (js&?W7s%D! i.i5z2!xaDk~&5)8rFyRMxM`koQ&Ƕ/ȝTvs!dgTdEׄVN-VD޵]] ]KвAU͗EsmklEw[Dq(~ݙW.՛pQo<4C Olv,"cu"B/gɄbd7"X 1e9dڲ#.:ƒ٠~%ub=>fendstream endobj 443 0 obj << /Filter /FlateDecode /Length 4093 >> stream xZv6kYhYh5}&k;n#+xTfMQvd PM)9ZM]UnU<9A~|{|}|zuRxyɏ.BY) 1CYiJpusv=ɳ\˂%kֻaw*w]LRYKvZMJ[8q)i5j3J Vmmue/IXak6N&Jh.t`tzn+׈Frg+Rgw$o e$[qB,gNS]!q`eAAAя+[r']['KVAYާ\Vg<+QbH~d+]Bd44* (غbJAqVct"Is|T*RJMG6XolY362<(7bejpۍ[AMEήdoˎ Jv^'Rl>jCbw*Vmnj4g*w7"iZF.őhNaǥo_0l-{n̙j.\N}^\\6qդ)X3q)vܟ!cն^HDhv] BZv0w2f|Yo׷dZJ]iY￾%+##V&8 @ IN+ZLbҟtdr,f#:(xp%;Q Y#Ԍ 䟦|Atnwf`fP_~pT(ag)8a) TmTUj(?Uyu0%bnIP>.Hpη]=-ΖU6St#'2'ǚ]w<:;>-rQE0]B@OAA *#RM~AUQ \תo@wd}2;ZFq E֮"|C%&۹;1"~D>{k6nMFBBB4c ,Y/2m _x5Iy$Hٌ(e_vopE V/sNH ;emT IjrSNm*f$[`KZ$ <%G!q#Igmw-!dX>h ^9>QEz>ITHFf¾ȠzJ2Q#C ^I٦*Gso`AeJe>;쟾qܣ))5D&ži5Gݢ)Vi$ OXne"!ODmچؗH=oY"W=&8vԴ OM8Ș)]jX?i`'./^g`@C"ȯP@%)z) mȖΉ!ADb}@, o zz0w,ٿOX4KWc^# XX9 ަPKgi q` R9>m^~`ABԠ* i$ C:| zaΐ.VV!]JxgLHWON~)#Ɔqf?&h[J,b ;+ǝyk]^+ 4M.],`;t6/w=cZ5)Y(GϕFa?p|jXRi -$- 5+BnOY &צUSAB ?F%όܓ+4䰪 'ܧƥ}Zq 3FȵTH |L|f%Z!h@h=cyãR*E~B#J*,5j 5429;OhOw IvZVQ$Q<z8ٗ<8gqs<ӛ>ypa*fXT-^M_)!.@-|։دdZHTζk2@Sk T5;AW%õȨ#E4Wu]yR/лFaFtgGUT40pyJ*sVo3c57տ"uW{>dD@?,ƭ~6H6Tp"o>u:w( DZ/ 0uۀϜ)%&רlE(gw҆ HOp p#D âO|>_v992S2j x@9vqUB=#y,UvʑXT#lf 4Rb.W8gjE?ҥGuŏKov茅Ƶ#"㯡f9FŀE}]:ٝR PJ%nLn;4)X䅢l4 ϗiW?䙦G#}+,]{\ׯovyͷz=x"er勖cm2)%}yF;+_,a M=8+zpv#I]ZK{G[Wq n 0[5̐ѕㅌk%"hi4-X)KP@d\իc ~YH݀}J\C0dަHA}LiF+Q &6X(|?WBI 0wO/繪2HuQlC3+#^'%ɝ_nW1E.]Y]%2/a ,o7LWxu,REyPX^?vhDqsZGEte݇JXDx* j*VU*]@p=bI0TCǯ8Ko1B ]pތ E.;Q>­DLF%֡>C 泃zMbLiJ&:ΐlFn-CtЋendstream endobj 444 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3543 >> stream xWiXglA)ZgZh4q3@ 1WnOwa7&"I1c$L&&DɘĐb>f >{g~{ONad2v% _~0z2!qIi_2m8&}tԗ8z:7n{wf68[#FdldqY YёQ-[AQd7&dFԛn[ޞAF'īC£" =ᯫvoص[k>y0g^ )>3CBwDDwp/ø3;5|Ɨf0?f żx3f#aYlfV2/3ۘjf.EeeSvM9mm2mx}Vv't{fXf|P":j3da+: YʂBO"ƾSjsTm'tV-lcTsmXL:+)| aI%f)vb#Kdk r 9CTF Gl@ ;Cv\D<ڀ!0Cqu2Djpn0+ѦsE8r(o~rTY:5S`+~a :oNBo7:8Ι,*qб:@P~8^)P9de?8\ !|KxMvKnDґX Jp`,:U5 "!Z>e&Q; ACi$ʬ`FK,,FwT"¡ \8u?"S;24TQ]B9p,{z/b *=79kېNfc'yqir lW],L3@}@]fOF JE"c y?"s(@b YB]<~rW)ڽ99a!Q3ު kmoi8/Ėh!YqEYc#G3م)^E.ZwB ǛO+d#eh2UN-͐L5$,Qۅ:Sz AȉT>y]gIq~][҉#-Ls]- H~L2.,*.?%fsW,K=. eBw9CWC%!2qꓘ |#}wlD]Pe[Uq#m)KJ j*#,BZa^qNURE8xy}IUGcԞ hnҭ%Ձ )/)M"2.LD1Bs$%z@GA0-ɍ)UF̢VqGw?$:{QF!T npǛO~x]cp͚շ+i␜_Uvs34B';-K4.5PV)^cJx$58%uHKGD3ԁV󞽸S23؊3p1r: UNKB-9Q =]@%JM."@G]u]"m7Shi=V#WtC5/o 3jT <!#f0wy`l0J ƐT_S~B;am1TgAɚ^4HRk_I\ F*vЬ1Vh`-W#Q./< b>ehh)4vuh/}:K,cXեk^ 䧸fv9U佴1-B 礩N95FqMXeb>;Tt5HlY#T"KB=G&tHm1Nfٙ1QTO7 +Tߡ-dh/L.lPοE.9bΧ$ vDDxGKOh{c%o:]6_2Qc1(Kobu@g)]: W2N3Nohɠ| [PVP$$y{YBQGl)\`l^AZtU6Pjh0כNz3ڣ!b2K´݁pN+ʪF|$@gPt? β}Naˣ/˅R}IAaFOiյ VjzqHS*9 e,\F]7,Nb7jӱRj|HGc :2zB 4A+7ceoX_*2䄑IfѩSdI"|ȭ;Bw9s5Oj>~fxُ:pG64l-R%u\&Z~D dFƅOu|*7&d.WʝR7]V Ju9A) f+ƛ6y7b,'ݪ4 ]_LF 2 :AkSzp簛e4ǖn+ڪ/֢| _SY0yEƂ d.qcZ,PS^bLP?eN#.ql^\77$O&q(q> stream xWyTT~0S hQiI.I]r4ѨFBXdaffc~d70Z պDcbﳗ.Ę6̙3;s}FDP"(pڍY 9gnLNUC[s 0I|%D]:)n?tt"ҍG;&PbhKƞYԴ๳gϛ9/ ޖ:+xuB⎬;҃2WZ;+x]VYL-9-A)#WnZ9cUЬ劝] kקG\4HQ4j=BER(j3 Y *CSkj-OQw)̢"4FV9ߙ#iezG1Ico_鸹>X) W+/-E6su߰U5뺛]Kfc.bYrZBpEߋ@0A d zKXT+՟Lg{ 4#%uTz ;օL.aL֚X 4029ڬ RJ2կW- !g&JctF2_B,u8/ lH2xGa.P2 5f:+kyP*A7oB^KʪW64y ܣZ^vBbCvᮩd+b0iJ̲`6׻-ݙc}Un>n@oR @o^l8ݰ!Y >Bj6$5G@Htے _[2`;:M3yt١`2Ņѫ])Ò_*v  E@f;!Dfs2Ln;M\nLc4ŮFjp.V;bc6$3,sW/ VKVdWW r{ZnU G#-Q'L'Dlёn[ކbXGWfw`!,Qg4B3x-U`oe+ޤ¹.eqqe(=}6fԋgD㾄Aw-1W ECAשsoEipx8>N skě TUĆ#oY6RNpFz JEi C.kUy-5Ƃ\ȃ #S}}7˽ CDD4enྵbBNᑼ/gO?LSI> Bk$7sd;ʻ`KcB}DCu Mتl.G%8,}eS$\|C^(G~oՏ̓)Ccգzkԫ;}zOǞrIgӁpR.ZKcuhf4Ֆͪ ŠT6&ٳpv]^Ln[SK$#\&8i\ .HALUkVVo_{a7Vt*돴wXMʠUZO͘u;%D<ӌhor*3(!>18Vzc x "zX<| @J^MY?CT26I/$̃2 [u_F-SOA4 `V. b !J\h.{)vٚ3 Y>۝sڡ0Jћ%-j}~E,4uڇ>> stream xYX]fh,(DQT^.\HD[,jZcLbĨ1јϘKwy//Cofr3(!cgo/ ̝3+M9& ۏ#l ޣFYYH<!C&Fh;FSz6S)j J9RӨԇ6j:Dmj5rVS;5G55ZKͥQ6|ʖZ@Rv"ʞ@-FP+(ʂMݔKR 52L(GR MGR,%Rè2ʐDrFO;=dR=S=w , nv7&6|pCCC7#,FifN#O'e›c?2%;b\5~xq[&9I"'S7'ǟ7A>{ռ5blxlYhU#忔 6q *J9bU$g@=*9 Ei (VPطty-jn@^"fWEtY=j尝&gfeV:*~RXxofZ(u'sX*ZR2]=*ݮW0.&?)v5#/K68b_3A=nfXr\F? FSx8,ejM'/n iy0 C^yκ0 <υf:Zw*|q޷i,-E~T.Ny?cfX4<  C`ȏcf[XZ?x͛wްjɕ38+  tu_2aM3w@>(ʃ jrw џ< aȭ5Ub笓y8yKϞV=zpjW ČUTH ;vWBQtaT]{bcp\H$a{1j\O+'CZu0TB pp2#|ᗚ w^A1]b0)c/lFHYDTW6-Ur\=.Kf7tw޵ WE`>ѯۮbLWW̆İ{K& ? Ǜk*a(B+XƯA-}v<`Ĕ*o(6!! V ubHŏ遪\@Ku$?Oq*=׽_au؛ ZI9y`&u\]7itHur_0qwQ]FPU0X巨#.g]1)Ȗ-k b#{Y.*T>]Z;CkRKqrtqd@1s}D3N=SY87Lɫa} !:xF̃::fEO%B T Z/Mb 8ΗS&3!(|:2,S*ĝ| _{:#zj7J KUA: :#"\=١Y1ǐ/W=(Ϊd9NxhvRaT9*D(44W(9DVq$sD'rX-`ddP][ҘߞE( ZפQ; 03¿lDz!f)|_@X傒ۻ>&g洧 \=.*?7=/#&0 >!nBr|u:{ KTm^EMWo1a _P0Y@ahۨ#8_$X%諷4 8ctqC 7 b)+1x0 OãBn> W8?Q=x$C5}%(yDr  7@ᄟ*T~ YZ+I}V'Zɻ^4^gHK )7۩4xcBU'39R/JS*AaRla"~R!$8V**b('H,,AQnv9&biЬCgj-=xLc JNHPɱ]Hh6:Z$*k7 `x|lr2eTA2;W=U7q:Ahu-|$;w}>EV颫-ԖXXl,` 8sm)^bvAwOѯA0R#?G}m5}FN H#G ,0šͽ3F {^ضz>N:aMk`ykZ5+2 ֩Wmb,Mq S#k +vbў2XȣLw4?@w Zo 0+4C7UE5tEI1( ӫ),>v$ ߻#γ@͉MQUEA5HPVS$EVx3ɓ.ߵ cbԩ'N:T֕{Hhao3# 7oYҷk q~6*7U%Q!ҍ[&yFIс6)J^ Y{xo!D :"[PW} ~v^+y |ž'7`o%a> D6xN pU3aIXDRX])n H.ܴUQXqX9nx+r?ܼĭУ <{e +]I Hb۶JԳXhioB24&iϦe솽[e~}] 6Ѵ%չŇcDhw ! u4ɛ5$+Yk a\,E!` Q\fL&шL'h -  d+F00 fp6HZ\RtZT ;tז:m!1&6p9ҵEcprOaA5h4ߴ۱T^Nӛ:M?Gkh 0n=PA<9ictl^?^6}cKfgƣ͛(^7~yOH\?I8Ҩ$ 2ۨBmj6J j_>UYi2-|7RȽe=JQ5 &\'U;׾N!yB cIؘÒr_!ai(r={1Smn$bXvt)e/맯C4 TDX /j[2֢] t O]XIH J'IP\rt 2'oD noKZT8>Yy 6i].ȌucޭŸ$?j胈zp΋y_ڼɉ?Ww9qaߪDWz@ $Dw0}]&aq"-}' /w <#eKy6ƗUՔ5%%ϴ|=fG1S\;Ya<+!8+Nl)ad \(X1ή6]4^F>|lfoR`q,厺7u% ;(L,MJsP@]HiZxu7lwE`qNN&JihX߼66 u88[))ONIVh9YR~lalQD BШ0)..-cKABb+XZSVYTL0#M$ةrnUa%tJI2Ib,J#0W){۲n@={+l`) Rxkm*yOUN_Vy*YpλjW'Hz<;PGSVKWoXy$xM1ag5)0a ;_9`+<)݅-_>~LRQ_WYui:)]p[fπ`HlBU:lBmf-*t_;Y?]QN.k_K#Q0حT4PFcKl烃"#߶&6aX)Vgf*U I/W-5D=ñN*ޱ_t0?] {W+VjeBٯ^$.Gk-Yb._۠ApljLH\IFLVx#C9eAtC;I jc,$&XO5MYLN?C.\(8&˧ B^҈դg1!/ǭͷ}~;]wO4F ˗624 44u/yxK|$eޝlVYLRr\5ZTk&vCj7L$F#xpv:{lX~t#W {Qp/sJ+j Yhx6`f]yuMΝkz=U&5(o.** kU "wNh(,rЏ = yEPFV(gc7196 D<'ä81+PaiEaF28,,|[[+EvjrַJ-_t1(ΈiDd״ܓbZ [.mj%5]ާjiFEI y xS3 Z*#wb8m09"4?+#ǰ7?Ws:V, hm2DYGDћ# J Q@Jep ރMp>~znՃ-HPSy@OM%m,B@SIb+ Io791 I)(FQ{H=ks^ d]0v#QRQBC'j:P0>6ԈS3ՎRv!6o|u,o"ƫ<^|-"0{&c]rӟ?urz |T!-[\ U¼> stream x]RmLSW>Xn\lqq ghQR -JCȡE&(嫀NG` *mB$|L2?:E 33?j|%}vmW)pޣw:`-̢0}hUCLňɣo.iRPiJKP,wC]{;2x&+Q7_&\GұtX{X_> 1YtBeߙe X́- <3G jhtTR)0*+q8=`r;Pmn&N^jV[} l1%NuR&lf9I(NKԘF;q}=\oK6EM=OhͽT [MVhbZieESendstream endobj 448 0 obj << /Filter /FlateDecode /Length 5245 >> stream x\IsHv'Ǧ7";|۞阅㉰(XbQd6':($ryV ݟ5g?)ߑî.g\pva:0n__Wu&  6?QW7VwpWUG.vN1q[-O`}!xN/Ϣ4y]A*9wviw:4,U;e1Qj mFas}ݵ pmuxx,K akC'bȏumuŠ;ư:^{šr:x609P4]U' bٛ Q6v["r-ޜ_(S;ު(U;Ȃ\8s)^dMkޥ=6Z@%E@e M{RS,W;RιK qZשGdF;5u@G:P4ʗ6WkI0i5:KTS)t"`KŵN@z_'O92t3o,,)MdIةaU1 n8&ZI;j9#(H zDcle3MQ^iE1cb#(DA u(h>t( c#4hAO X;R*M Q'sTnQ#@,+zGkEO/t`n"Cm59Ihi-<u?F_gzy|oxRx*pIamGv@}3o42 >q#>2E> {(G+oѷ_NWHoB7ۑ{5trmm`T[&<.4uaCҥ&6w ~}\T*ZΟPֆpmK "K0LJMa6Á ɷ(`;wc)8/'@4 ,bt:\;?`xU&2gF(>O<ܥBR }d(2e)"pQѧxqN3%?hb0rjy0C2~nB$StYzՉ%9dDž< KIYbںK_>C"D=ZɓZo9Հ0\ QL1hB?:8yfPɡVRz*`a,rwGǣ| rK`h(i<(7;Zk!]8u{n` p)PI'#k,|+nd@X@(] c;|qde~ӎ$'S"Z[N2KE<<ׅqHGLqF(x#h)W"ldJ*QSYecnQ'UIQ$=8$7"F^f`5"oM^&h'iԳm<laM&rVYAH~]6_(lJ=V15ɬ~!U.˻4}82 ]G%J{f6+AO#Lo II8aԹ)pc%%)X#0FJ+Cw蟖Z L`Z:K(M$46;%{%'SѤ1ofiZi"1֭mJٺ9)l[%O3yJ{ u g)EbKR/qCi]|4Kr& pu y:')Ez4::3%3kDseq' x|?Qd[MPl3eۼ}mfc;MG5(7^er90ύ tIskyf*B>U׭OAf3`Mv%T"~v c@~QS[a6!Ҙ L?-1w #ГE[ W`ġ>([[vT>.]ȅiBvnAgBP<(O&ྲ.dNMp@Z=id ɬoѴuW(ک$B-l[ōdy^(p\BF"[zrE,!Y#UB3v5}j'3\E`.@rDZDˈܼ,o'-\V<~UM3׻LV*j?fVط+\U̱v#˕5:sϫOwTlS۰FM4>(9_k<3@}?z , IfD<ƎPD"qB.Pn[Qc哮}7##"Lob7kjtp\n2/#e 5b+V0Mc̔lS<\";F9OeuS{ صHB#ƈCf AW"G/R+~A|1*1$j J ߟݰ^~Ƒ<:@=V?AӺgS u(Fy2zjbPຜ2OaȦئRtjA\Sވfڔ{b4R{Lr.㮛tfU>x0\{#]f>mL},ò,4:*U[mSg˙ -058l6DK3X/R}+i*KYSD^.%J90%j~3s=sP,Q=)>B uo0Ge! XKendstream endobj 449 0 obj << /Filter /FlateDecode /Length 5812 >> stream x\K7r0aGH178³#$zKkC {ӏQu)83T#Q3$D"QiS_O_O==Z~y~o-<}I"N8mM[{eN'>;;1XFEZH!Ҕ?-uӨLڻUoϤgBUg 鄳lOr}cU$0$V8i{1ЪZf]PM:2yx߅AD)Ȃz! ejgmɛA@E4&'5V_ ;)_[Ὄc^V4]K08=9 TʻӅlx;p-F< CZ(']҈%mWHgl78&iMXSo ںie]o# }[u8Ij7UG2¾ٷ i&GOBjSK>$x 䳁/2o$Imn_JEA*l̂e[sƢ||P)mIFcv lO \_m =c6ҲЭp`N&BVx࠹Eu !se9.דe"ȫ~ JWqe1zg' VI:۬ᐾۤf/8L<"BsƁ=sm/QM3W =(=S$KB϶C4q*NԘN.2tM7lDswX4m?`S7sbpn_"M-+6dߛTz>6f7@G<Oan|Jpc%xq =w x=kpu|cs T,P l=`RQ8x 9ՠ qB}WE1oщ`?:nśpe4%CN]%S%>wi|U[:y\ߏ6mZߏ( wc˨}N$*#\bnd|]E:O 5LIףZk9\+δ28ᖌ0v .n~@  UNUcä>S@/J NG&,vqVD$DPG@4;/atc G~!1T%H^$ #*o$(bFuJ'~W`Pj`D‚CLD٢AObFFn. ¸| 8?sjăixO^pHJz-<lUMnuy&1ٿ0T)r2ZQ \%U9_Rx2ʱp*s4e Zȳ5dj%*xӷ>QU_rPD Y7=a=ZDz\W5Q TE8?c1V@#bp!K3иCCㅡsT-' PE0E &z<#~ߦřc <[Kt&UI2uQěHn&>qʖ>UgufNu$p-[`O76 ) Kғ)xH7`]eUjQ M1m(۽ .jVDM) &$!_'1 ؄%vH^FQ@<1BT7*q|_|bh,n虔ŎU\3WkRoX%1eC wt5Gu2Z![~uΜy7$ת)Ÿj_Ȅ~BGX&biPHl8dyrurUfR 4ʄp%Hr6ցM0hشJߜuq* )b9]FOmu+&tj$SbL9f!ͩ7;V;΁ؖxYB,Hޙ|~ wPG|[Ԃz}3?ŏ\=K/4*6_&CRŠ!1%_Ā\eFm ёU 9}?MѴƚ4jcyV2|Ad ^­4.]]3µ.]QjӑaKo݇qx\5p1w$_Xo]QȆ+0a.4|IxJܙoBCN>M pb0o|Ì*X< +"1ECǡ^L&58IR~WjKáġ'tQbA& EL8LM S`Ό(aϻΊխnl>/E<;).́ꓳD4JRcO M,Lp첳5"Sp#tU!m‡e|2lڢkzFcK@h i<0%'F4c'\3?A&Lﶢ28Y2ӊkOۺc߆`d)i$;kH~ŻaI_>`2gFѢ.!8KC'tuCbFY,bmŨ!f{<{Kdf3!j^M\8WŁ J P4~g#g85u|7b^CH)΄c~pSr1fmBj{-vOGR|tck~4.ۻdcAri=el6xU|{(ŭuC"t2D@"m{k!p} -iuZZ Y7~ZJ"^Бe<)Cl9l/:'P'zQd1NԫK].!~d |W9|NGkrf?JRm-eJHOVj\c\~ڃUDBTC!#hhүh"TmZ(WC~^Ix6-~.v~D loD2`{ PT Shf>;WiV>iTTuZXcqq)B6Q+2TP rE@KrɤqnDkB,h V3j̡hS]+m)"(S]¤"R:&) /ɾ֌}~M,uXg|?<[V Xv~ݸώn;=bG5X5`UCPP!4|)>uɧF@2=})MSq۬dmZ7 b0# ␭nEJb?d_FaOK@C6xY[Um݊bQoZ&xuK0~bBbYA(^/Hw@iʵ̓Դ2HS>/d+`sd+ +)\I }w,LV¢>q 5zcTr,|`/Skn}oRE!5MzS?!?\cJl*k]2)WWh.?M[\(xŪ[u#Pl5z͊Z D^()ݡfKkb-rob@;OP]2CK0mCuhyi$nxH\mNz`]%Bnц6\(u"Pb&a85ZHѥBv6y]/P s62 '=Ƥ0k&F>0$n {ቆI`@sPQX,'5&XkgtGq-f=qq7-{ &Wٺ!gFg򴮵-C7Z7ݡ"xqvSGjj6SM2ס7T<%V}TXr揬Ȼ>:1ӂrbׄiPcU]*c\7}K-@\z`}`ͻ`t!f<'^/8""P^Os3襤Uxkc ;hv\ (7'] t}" ?8bS}D6^Հj[_Vu[d4-@3UfB߁ n/f&3`#2(>zv]]n ]20vbyx]-z*X.ӫT4I :/6S"Bꫥ-W%1E)+lprTpL ^1UI%F8OUӠ>ŽEH п"T s ]!uYXMG endstream endobj 450 0 obj << /Filter /FlateDecode /Length 6537 >> stream x\YFr~ GǾe>QE!Lρe#4gfU)r&iά<< -\, ڜ۳=]6.^<ɫ3E,6z+e_eRY}8/('*[/_OSf5|6FQ%(Ei_H񊤳UVu:?ΖZRず-̮aBfv65Цt6Uh`diT)-kٯDj8f{U^Xgf':@tK D<ߴ@Zaz8ޔ.CjlU}=_sXeK m"]Er"EQRp|_ Tvy ,axL"'l4un:(ņby5۲Qh X`Ӝ Ѭ*+lvK^BFW3};_JȮ¬N70a ?U7c۵Eaَ~Yc6^k6R'TbXK- 3Ql.0RĐD5:۸2kիQ݈)7F]5 J)dwTJfa28 ՛_~D bϧ#B %b,9ǹJqɺݦCE[k )~On6ͶW=9;&ncs"L' /Px$f@KM":Q/a5lد[I`l⼅l<2%?_ۉҋ?n{`-:XU(R|ۇ 6Fq u u{_ ltv< R֥ȵP kt6qAtQPKS~÷Gc;~ +ʭ 9/2月"lnq*?\K \W7}x"+UZq51zzo~H Jũz/PG׬(r[ؕDNr<iai.S EY\jQJPM]S{/a#KB#J|w);Ka_3 ZQ9X.lWd$|n7p4grTR)htnN<.DO; ex*9A *='Gɷ:fxvE %m:CV2R.n)0րPFnᏕ:P.*VcD~VVͫ9X4lP»mꛇӡ5*Wzrۤ;ѷenR,KqktԌ /q;v6ʏhmDF'c=Я!cs¯Lc-Þuh"\|pX)ԅM@6x`1L  xr]&x.D*-AɿqBp/A !*9pދ@IUv3x21Xr: }%l(NIdpj*l4hTocnRGP/ѭ#F~٪CݓkiجS@Ipv @tn\H < HX; cdCG.t.ae`M7!|8裬7$ R4`͢:'}|m(sI(:EQ GZC3J-NņǠMBP;J"jjS9ka!w}ORc>jO1^w;͎B'Ћ"i`B$%o@$eȍ CkXChr |МЈaSkf¯sf/ps/|eL G>Say8.wdzY2hQv‰ùA!1+լ"~|1Тjibdop ۂ_Ws!}I|G&XI` h$T??ߢ꒑$}?:V=E1% ε2w7(쵔uvE YA̓ʵr #\hwg_;upt9;#_w14fy?Z>sH&G>Me*Ph aӨ<`'X.@<^qƋhGvҖ$)GZ C|r|2dHfci⛚  L~ UBATW5Mo6DD)5z7}wIBV٩îmn& R> #ʷUM<˭cX]c[nub.^$h֯TЯ{N"{]wpi̦'"=ࡼImlN0>4XC->ȃXP &.VQ {4́X䉠<ߺ8NGlC4$Éz2dOhқ$ImS}EO2p=Q Cۮ=S/p(i<+)t@x >]Uaشٞ!kߍ9f.{XdzKC )/pXJ]3RzZu$ N=Oxv6 Gg:Č̻Yp-Z0U2~ M>B#? 0NaVmu/bȴ+ cpB@TuL-,:ЇCxZ!BMΝa7bohIh'oaL.K)p<8|F}3Ʋ(7ϟ]l"rx*@">3[/6.BJ[IY9 BDEΓPcb@o1{GO^0PC;Ń>*٢2QTz 校/8$x?O.WF\1嫠@Q O3Q9{9[˱/ǡ]?C#S?KA-r>`%tߗ-@z_}1)B#ᝇ\(q-$ ̦s\4eE+7[^RSm+fe{e9kA cKһ<-q&p (8}y|y,Ył2 C9:Xӟfg[U[%#K^nPJ~; /Ube+~8Cf<ߓppl;oŷbqmu?xDTLDSÙOO % lI7Mla<0g E_B{γiizY/箜,CUoJ|OU4M\.$ޥ"02O(*1ހrCO,HU%@j!"]|G徫KS%F^Cy>%UwoC,Ei@]u],wූ>ţt 4/I(QXQ>/Ąe㎙;M%rrU r%DWc*>|#L~ŝfS?߻$7Ms;J':R-&J*ēnz6U[Mãh]e!N;;[6'[5jEuasf>J}*1. vĭR=Ws?i*'S_h TZ 9ܠk*;L< ~vizӬg+++;ĝƳN(wPcSMpk峍ؾnW{ GM?s\Ik^oXLʇJ'_"VϘZ(' HJ,V$O_a\ƐI.=(UHG@`ߜ`r͙4\d}8)0ٱ FQfP-ZIsa |tCk+%[b@s8b\vuߣYS%6u3|7r8pzwbPD-5ĖA P&XQ9 J<ga3U? KNMw\{@XYbKx٣ 'mӊ%/w@kԂۇf=ĭF[]oVGGDꄫl*#Tʾq2EJ~p&\UYj®b[dYؕ/%^$=*+endstream endobj 451 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5084 >> stream xXXTWھ+`:0x,[{DQA bi3i"e, bYXMHwYSvm}{#c̺12ׂKBG1=$ptjh/&/'dv3>`%+>ƿ^7#<6oh1#F|t. $",;xü G:|AO; vt2YKf/Y}__ øL 2cPװGxGD[}_~K79jpg17f ĸ3C2f9x0tf3YdF2^,ƕ͌b0s1az2kg2JƆeTc[Ng{Ui,Ec0I\!{lcΒ=](HJ+UЅ$&'64/8 6@HNj i-h"Eb>B6BTOPLT] G{6CMFk נ̖[mL~)6cTAdns {!֫ɗpѣs򷐹rHQ\9i$A 寷B%|7:鑅݇ܳqwX}e{FT{OW@9k ] ð*^M^>D)O$@gEX@56Z$ԊeUhh%1D?QdĆ>pZ V@8$6 kcMpj7%-f1a@cIvŔFXT$쌍^#Éc̃1/['^<$]If 01-m'N;$/at:tE{HpQL!mt{0L'3"`p[ɴleۚBzʆ 9_)>(`=y%y7ᕯI[)ߗlJ 'ñq't!8AKq(E|}*s68x _)\ DI#,"Jmk{- NYe!]wQfu2̼CWUk,c ݅] "gGHTHi*ӈ!2c۟5d*#/-"tC+:EF:DtǴجO7%Ū:s&E΍1N<{OtZ\jBv$&ŨńB鮭 ,h_ v94^~KMiW ؾQef|.ͮ[G~F 1XY)%Ϧ_ "qwuTyeEQ]a91 vt:&c ric5 ͊L(V1.*`_aЉͻ7[6Y,.goV3κ`@NQLt:aCy02z YB|Ḫ@PA?o ߓ`T?Gs{sopD%{\&#.kb9:thOI?"}8<ɾ+kd 4/?m)|vR[K|Vl=Dtç7co \ug$*x$+FO(Ja4!nA1c΂$m!28cXQP`X&/t@@;}_Z%)&Է*9-,W ;D!>rgӎÏOA[8KNW''GPλf{YQ ΢E%CpTrr-CNmt76&Gq$ċ(i{U-IQ}p[5Wҵ :K Nӧ@1'Iamzl@z|M"#E jz1jEK rW*k\X)*rv)UhTFEwM(TqZ:~]DDKENJaMl("5µ4!dݙzO}eƗўPkR\:{>BSqٲETѽ?6ZKg3xfIF ; –ysý@hVKҡ2jFEk?,P;) GSj dQJGW:cQmO(nlB' MN~ |]7taglvUG,k3n [:@Gb}x٠T;b~oPG08 D-V3LjN5n%N~ߖQ,G 5S>ӞK?G^S[І%,ܺ=spN|xΟW ^a ZHLdQL{rYȓ̟lVW䗦Ep{y^^^BM}jV%D)&h@م\<ׅOR -N fǫm.n/>Y'hVw8`("Sj.)C m[oON1Cԕ8xo[KIrr}~d]|a]r9kn̑QŐ+ >]'i8aO|jl [|!oO)'F]{p $fsěT&w@hSv&jIBrsK`.|a VP&NJH#E _~"g.-ޏ==^A}PW'Ͱ%Rw҅0)\`=͟j!I(p+*;VnNӽϖfPg>[ ^5;`x, :G~ eN]~wpee_߆VJiHYY,19$NciQ:#E귓Gb<$x#**!o˿(6YcC/[\!iGl6o>h,hb>ddo< U?|j§(¤COg& p8r0iHWdѢB.*%٨2lO!4MPIeANVpq8WA=c@77ōܜf,۷7#C؛O&t٪ljK(Oje'wt ?|Ulq$mE$29KL솜Zc` Z5*;D.S{7-sOzS7`,,]&w~}}k{ePp"gFj9g}rnA.\+eٹ , !Vʲ"5%55Ősʪ1xCVF!+ͪ Kendstream endobj 452 0 obj << /Filter /FlateDecode /Length 294 >> stream x]Mn0F7c7R4dE"1Eoo.Y|)fuΟq*f6=_R S~M|KKU^AEyk*Wq2 5^яԞv4N7L;m}hu@hC E4D1܊bAY:6Y",+PzO^j="kH6ȝÁA4MhUVC2(v@;Q Qe}XW.nY(˛ ˼HTendstream endobj 453 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3384 >> stream xW pWna[i@E"mb7CB66!0Y3W!lٖCeٺ~ՒZmYn'`%&@B`7v2!ld7k-HvfT*wO",s &nܴI*Z*]dT^) |3&r`VZPp>g&dg@v3144 EŲEeu&CM5jea*e˖/Y"_9Y4gZl=ۈf` 0R#hOĈ&2([tv?gTg>y30G^|ȿ%S*˦Zv*;_a3!MVKP&3L5B"w#dO n2&|u%ZH[1G{Q! > )+_ggvCy 4x8Z:%e G|]H93q)YڭГ7erU\myf6:uOA"l\<`Ǜ!=U.%bYXh]~ЩsHqM]‰v-<N׵ev;)~va].r:Ov"n eL24$ԋ׳G[i%VμA<:xGnn&-h"9xwCCMS@<7_嚉dU\mx&$aXÞ?Mꦝi!5q|t@Qkm55D!Ӆی1TMr.+S1tn]kIo(^BgM957d@9FϢpTu"+;䶘L1Gh&88Oq3*W/F2}1/^<`\g7XZiVHrd{L'O5_K6!h7xJU6-P r箖shmܗJ(/ vk("!6Uu,NAeOjd%" %ŀ"L!a52)>srJXn d<"\\ ۜ/yMnqKc"z$ a0ڥ>;3N~* i aZeN44CLӒF^8vTeZY}"#uvQɩʍ2)=ZUgeAjO4I]D[U^8p ʼnWh'0>~/KnW5va[[GEwi.qC=և.F 48%I7*+*1}[;M t7j %eG_ӇڦvIk;dj(5Wt ZĦ^Ru5|ʯf koQ4dGǑBο++4~Q޿wѶOKh|ۀN29ɩWܶdv04dj.W מ˱7 РG8T0"B'޻eFRNZ چ@/k"}!xwc ѣɶ3cN?x뎸${ޤ,`cݪ+JE~_iV[]Umr +f&( hA6FÎM>7ul.&|p[M~2|A $|' cLUV^^SS^VE<R߉:.HW/TWT&z#Q"h#L'l7I!\@ႩԴ^>I m?!4%"aYš(s|:(cvҨ+3f0C"VLC1iC@ b1Y{F83)\6,2k BE0<?v3RKvn(E)@])|*;BCZ!`3zv_to~DtWo(c=o7c导bkg 9- ,^)Њ&R^i0wNf*6KNGӰyPcloԇC~>|OG<1uʣx.ٰ_K'y#fG{|iSyEЙs1|#1/Rj9&opGhH:뛪X-ԹES[mš (9*.pvND͡@>ڧ2[9 I U/$z)x|4 mBTZg&N\ώod$hc܍FipE ia,˄' Β:i!9@{IP]C`ꮧ zKdᮞ [Ee{)o]@l)wï•0% }ָ8w:o%ƆGڎ$zX!MAΣ=TQ>@anģ(k)0q: y#V]TC8.ʻ~[t:e7goYj԰Q 2]㍜s9?$`ski'By$/N+:nqzF%C"nI9Za"HxZ͎,9߰muOSDbگ~Ahb!]_/.6bJ >kg(!m5o$Vt15!-~f58%lۯAO{7T *q1Њǘ$Ac~JO]sFwkM^2ڎ Jm~AUe$&l~ ᴙO(Ʈv:JA-+S1| >w܁ϬRk(*ܴĴ;fOKӬ)idg{6fl!endstream endobj 454 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 761 >> stream xm[HSqǭ8ٲ =dXHŕSJiSer(&E=T>fH92CasbMGF,)Iڵ2歷ݍ9l!ǐif+ h¶Oy;3L6z\SI;4䡲%%Y=J㥤P%+_˚Rrltem4u5'E5i 5ťDq] Ytn"Vb~cYd80̮pce[;?IUmltMvBsp IAr)4:O-L뽆 908l!/?%xrM[8$ߪ}#,Q*2q~,r؃Y>VʕH"̻|9A|aP 'N=;\}%aAvPɠ;xDG_Q:%iիLRg<3g;l@L#JK8FS~Ϥr7 é,= 4#:3\X"(I$"El. dOF11y9tRN?u3D;y;I9)+@yx[[endstream endobj 455 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 575 >> stream xcd`ab`ddM,M)64 JM/I,f!Cg۬ Ni[EyyX#_{fVFIE% Fƺ@R!RIO+19;8;S!1/EKWO/(áhəE99%9ř 0003pI00,cbA5|9O0|{Z?[j+uϔmuK~YxxɏK]o{[wߵ{^.;w)v]]w[jՂ~u_쾽?VNq I=usVn=Ie1ݎݎ$><0?.9肶iMՍ]rm*~>Iz͡쫨S> stream xe PTg0ü"8u*,Hyf \p <I YDUFc$Ivk~캏eGU}_ׯ?%D"}`PHjrtkHlq,gX3@8sr&e=3A4k k0#m)HJKݙڥ\*|)e+}ݔk'3)15nAnʏRՂ3A钚NS)7nT U (v@wE,y˼5Z@Rv=5N9P%P,5LYQvB唄i,,,-~wIHZ-X]&J9Y~v%a(m3MQ%?=rEfD&F [ӥfTYYۥG%e\49VKlRk^`v4ƺa1NhߴmnOyΛ{÷}rµy B7C 3f^gp`Vr]0 ^'1]Wp1d){eؠ6!O~:^S&| =9^á"t{&LGu.>8WC6g"!5lӶ(7%hҫJ2*K ZhaOH#3>LٲջJqdcLζ-W>}SZ?t^*PS۟y95Gdl0eoik*pZƴuv:ͭ:Rlr*`dvl/*1"(Ut64j9] o91.9N茘(s9WwU靻Z9(qߠ(upiѾ 98|Bղ٧`wy|L8_uU&^4gBJI ,R} n YAwv*'O— Y.plztb1ı~0+':lci?3;9t~ۗ՞۰0<[x}rlHV_ГnY(\_]g|9!$fS>p:\qZk7n]cre/w^6+eJYF5㟌|CIμڜ"vJ2nj/?^K$2w. Ne+nĝ)oIWS]87=P TPРױdAk]IO^YL RSɒ XM)jmV[]r׬9Vq^]Tw##endstream endobj 457 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1773 >> stream xmPSW_{EDD}J؀늖(Z"- ! bBr RP@*:UKqY]yθa{f=s|\fmX,SsI1E/!z [L2qT`l[yH$P=f+7( RK,^n׊b"DezJb(Z_ch2ݒLT*D1qhDD!Aw6k>elB8rBri!  Elyu뼗=Fg"_><m}- H7Ǣu|%5YU͉S+w g y}]6*3p> T~ϮBc* RkB_:1Ir;?O#N@c:7/C (J;gPX ֮T2.r1 Ǭ늛p?3)BÙ{Ci6!æt[sTԷSń/XeVBB9@ G 8ZMe@2]4eEETC$eTƁXy(lmm٧5 MUgĜ}"rx3ڡ}0c1>T[dϜaQ|$X9Ű7WQᆞmנt %Tg=S_r5'#d3N9x[f(2F Zy|Ru+3i8(@$v=1iaaIt;9 KsBRWFȹ LX]RPN k$ g  -z /vET0cN]EyJ9k#?޻~_Zti%e_&5LsfmFv0țRwal0wBT:"zWCcE>8a0rnPڸ1ShrDKgW9" U3t_v&aj7U\P`8VgH'* F> stream xU}LSWϥ ئ 7suP8H"~T+ ҂BR-ThmN:(N!7e~%[&3e,ޜ:way9}s8@8K~gZWù$'bX9en2O~u\]G V F8=/dt"zXll.t{TU,j#d 嵮VeָfvTV;v\;ueEk˄%Kf*GYMUbߴ )!/\G^&˹vH^.;Pۇ瓞* Ș!+&N6U^d`ݪw 6⿙EmICp.K'=|3teE1\KDÖL9ʸVfΨߧ ,O l $it2}325 c lfU x = l>n֏Oiw,TZA}b=lٱC`V`QJ(\|CpE^6xcH2c _~F&4Y7 \U~R+T ^HQ?`I<2_:!YNQL͗(4RDYЛQ U*eB0*s0SğS-%-^`C`ĦR܌0wP 0λ־xٻk4f[0]hpzw. u~7:2:uYn:xAxO~ssؼܖ9\0h,?ʹX/aE:#mH[fo^t7L !Ed@kY<i&bJi7ODcTtFu.408/IPG8tp(G.DbRW#&g?'endstream endobj 459 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2628 >> stream xVyp]YHq;jvh2id! >0b!ɗdSuٲ- d|m6˘&$\!?ah24Y2HJNif4;]iXج(b=))UR]bE✙+XԼ(j>[J3}'3{f(<{$E'n氻1>xI3l2{V{T%ʫؖOhn[w4o@[MIY 2\ޢko#b- |Xwޢxu` O J-(5nh9 :,L1CZA |C/80@a:" "9˹=ﭖTf5ir~q9^ ֘*Ke:l]G߶)3夥,`ga/}rtqC/Wcl)]ײ'mahx}8"/tBLU zFs61AGCV5uKVQ15qܵ\Sz)џb b{I6+]YWw3l&/ 'V/%=Tp {JY\zg5W`uZzب:'<3m=X%ljKJ ~7yj%:YXKWd狲ߥ>{;r§kRsZcF)i=^B/X9pKфM:g(ݼ: ˕]hN152~𳮕z\É$3ǠW0OǏQ̼/gFI/c">+IKυpg[K y.SYK($M~vph{; ҹ fU#2֮d)9@{9 0 2kpYoOAm&d!j瑱?Xuu;Hq{@q#[^z݂{\<2rfV} >ȠwQRZdzaCQTC+- zuxz)N\d! uQk/3b[ oҶ+dyU}EE荺W+Å2@8p[X-cQSŘ)] 8FA= .+uW-mT**tjAOw~/g SBEDR"[d4ᓳn1܁^1Mz008Mfm*TDi[ɸ x*B35VT8#՘,,Z=ўPt/ޝs;Ll].$ Q$srZc+"FQR}[6$(dV)߫t+ӂ\>WCGOѧzfUU8b0(03rC9| 40~0h`5/~u3'֓<CY8V*1Tm>H苦h.4YWRdHT`4h9)\c!pFmav 2[m7VrUAn1& 9G "&ǓzQu/7ubάtIL4<di9[LLI6[3 EWendstream endobj 460 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 419 >> stream xcd`ab`dd M̳uIf!C'O/VY~'ٴۭe|<<,0 }=P1<%9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5P0000f300012,?S[C/~\;w9Zs &=~o>Μ:ynݑ1)U!}쳺:[V.W3k8I)6{[ս{k}==;o6ɖfp}swMm}{u_Ӿϝ:m?w>n9.|nM}=}'89 &Θ?Ceendstream endobj 461 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1527 >> stream x]{PTu%iY<ԻPZa6HcqavYALjrvADq] oJPjJ5hfq~Q~bJ$)VJ2yqI"Ir6?YOS%P_/J&hf vO "Sau4%ԛK cfώ&ρ]lNv>= bw'p7 ߁k,rΊ hFh?vwvwhT5F9qW8~sg.;쑳'ϞWv’*RI55`eb꼂rTys:y4!F$X9U%_'7>|OE2/[i8[+h7VnK Uz6q۫>8'm-PJoi.r>*m ghA&lbFip_z<܇=s,dgo*4斦LАg5ܭs綩\:op1?p#,D=0, ڷj˭{,j w *Ų?LZiK>! `;q—ۃ='uBvl޼ޥos=K%XՕQ] U~ *yFGHc(P=gH`kN}vt 8y9:#UE͛C=ZT8}c#$)y+3G1BGAw 20&!Sv܆Sֿ[!SaΒV#<1nn6F ՘ vARVRa:Qqo\j6 OhjB_܄7nx<;o){6kVg jm OQ߹endstream endobj 462 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5740 >> stream xX XSWھ!{U 5PZRZTP\Q@6 |IX"{ ph[Nvjj]99l;Ngg<OwcCD+VVϞ: (Vepe2oU/Rx#`԰aDhJ,y)ݔ;Bc\N8e$m.'2|2FtQxsYdUk<KyUf~/n]݃2^xd4 b/ؗ؂AۤH#T+de]9,(F1^3XHW ͌;iߕIJ¡]wdckPly@% P\}n;ʐQr IvHNA6#`*bC7l-A MUxՉUY,Ь: /s@z[&+ ۀGl r\IoVdv}(bI #Я-قr{Vq8lCk)7V iE_ȉe:RxS,62G_:_`(j$@yW5Qb6 wkeK'rx ʖ494X~U:t#9,l7Q-zfmr#-Je3nbK "OfGAx1#nz᭨-͛}a=bGiO·YY8ZR[stS"`>Bd ")&Wg\=B>rk[u'~Mr%exzH 71Ei]lZaSBDAtE]/\vJ 92ӵ\f~$ڃ_>q!Q+קAnл 5ӳu CgF3^g9x*Bs~@/rAV@L\%$]E71=H!Ec֬ .3<Ɗ+yY[]t]dA}$>Kܧ ͥT*Am>]6sS0%뽊,#;Cm)Zh ~폡H"4A1{-vw[AF`Bw[W[n0ƊwNI+f曨"BDK+^+=L0mh?2ˊ[;'L;dKf0;BC)“#r*^TܥfTLOU&^ G( 5e؅1K(ֶ_WK[F[WomϮ<Ɖ 65]iJxI}8 `ڼT`!c!ݘUs9yEŹm~Gx!áO>$ZЊxǟ*Z U)7jD< Pڙ#L~z 7Hp/bNr͇{/]uL(ҁ.v;#A!wg[E׸A{fx/?+3ɝ!wvn^-RYlD=V(6Bcrh‚uu[jtwWzYv,d݁y׆ɒ9;\ ](?VKל}MgJp:k'R~e A6Ӳ3FtJb85CbHCvѲ(ص9 y=>ޥ3jRP#)§-=]ˡ-s^ғ&:3j`ȰزU@,\OXz\)MT<.;>™…%A]@,}:&B3?-&e ZM*)@b4}%Bu4 9W ϲ5K򞔷"[b}$xoNB pWҍ6Pm6nj&X~wyki}4ZsrفX,zPU C!>85lQlNk#"i#n;Ywp@\}1 `  EC>4>3[˅ >y19hc>Iu.T&ӏ/#{dY lIQfpWY|+9l1Oĝ;зROF3*L3|&y\TU*EX>mh,!*w5AV@BlNHMt_/=?orL?t1livНcqB(XDV0՗Wq##ɭ-ftQp<: N%Kh-,0,a $i8L,w({ƜɭQ}9{} M>ɰ_=ZqF=ʬwl8}- v;d[^V$ >`.z[/0_]TS( Cʂw0}պ$MC^{ B?v0Zs[0Hk(d rK MINEǔ)%-`5*?1%qS8 ͻk奕f`:`\>9vjeܔ+iB99ܜ/ OFKC۲Н\hgr&G<;w"ٳxuKYUg:ҽ eyxyZ6F׆EGF76֒hnkV h mv<{Kh%'։R8vt>EKUVF^O] IO]tjcwV&Mvfj\ʎމL U3]GkߘRF_$h07}Bh RsMW5tW\ąUӎѳgxD]'˲NOZӒxKDͮl@㈕5jgAB`pnBb@6!B֔uʜB}=2f'+#{'$b-UbC.(nњ_l}@B&뻅R1䱄 /DҬAb󶽠yL_:;Jd4>UaBAw~Vڒ-[ڿGGvkizߍh,$pHicH 7 {vMzC^~a;d)e#uW,o!`Ґ[vZU׋D}ιۖ;6:?8k 녿C9V]FJR8gEmMĀZĮ]A2<˒j˺hs!YXk)דP& SXRnJ/u*3-;wIzѿ|,TBq&fa5*tz,q^myx۾m0/pn93#KEVlރ ׍+o_*o}p1bR̃ yp)^⯉- BItghVŭ|c2QR 8.򩓸P܆鰊Jd7ГO([-!AOᑒ8JC'3'1ZA^t '{',5Fo9~oPY`όDM˦xǧBpKÉTdMRGXqCmnLԂu' r$b6g.g抴\ ID9nYTrr ێ֞IA XG&l#в {|M~eCq[Lwl2sBр޷ 8!oINB.oRoZ F^BCrre9Sܰh q|HKN~aG(~Hᯩ=äOSIdj=)a7Qx&Uax}F$:_ ;;@X}aFiʮ];b( 'i5Z" G6ٗmeg) M0:^^[]Z<9y:]vvfiWcͳ{~7)endstream endobj 463 0 obj << /Filter /FlateDecode /Length 2706 >> stream xYMs|m)|),;vI]kWbRIIA;C~yy=3 1fS>zzf^^B$MO>pÓke&Γ'n e4Υ 'LhOC+,D 㓏.JD˔'9wDvsK^D}T$,"aVp1Cჱ;Ҍ;qeu3B'(P:0@H$Nw քę_j\`eT&f՜1,gU^i9r5jx dzS[Αɲ,7jwX܁R] c& ηLF,L@􅒱ѢhK )eC~'5 !AeWd_"78>tim)ESs/=v)w*9+겸O0_.rQNR6njY,lw Md8\l#s v8ⰷKtVݼnΔ~g:g,S{gl[H6su0P: g .)|D(Wu H}6 mܫӋFJ+JDAbM{Ɣ] X<Ggt5vI[_{=jۈ)JǁSeU mCd"W(bwaU.\sR3@q&M*TeVq/,#W2 1-]\FnVvl*<%#>%~^6wܯg.q&w:)๯kônzL(vq1<$%q $702ib5qYӬ^}5^#Q5,"7gN(GzRԣZ1>W?R#&̢eߓ;of8iG,GQ4PA&E(C$Cv1vilʫaJ=@q%EjJَ_&0+mw[HsVQJ"F!8y([Qch Ro-tP:^]~+Hhf01d%@َ]CbbdE5|*I1<p0I 68A(JFRΩ٦8-ٸ8/AĂhO֣8|AJ(j@N΁ Ї |p9+Puf,Ln;#\I\Kr USrkmHJT leP۝MPOn=l0y+#͍q@;zr'cm`{[,u5".Z"iuلd}eVTr9|^~stz[ R=:_uwr'pGH68o3}jR@Uzi*z[%WhV.'Gș* A4W(KK X)7&;O]/&?hZ-~R IҮ1Yc!BEnҦ{eE!2TmtA4ZՄ(ۙ@^ңzu& Q3s[pӸ2S_FH$6޾58j*GHo!9$v $4&U&3]6X}ږ.-p~*{ {3:h%^Fdn:Jܒ֪[[Eir|Rw{^x5/ BBLLۆV :(s_S>qm&tOSډ10Ċ iώw&O?nbTub-hjendstream endobj 464 0 obj << /Filter /FlateDecode /Length 4690 >> stream x[ݏ$7nOC㞪J[uA/CCtL1z#%Jg`J(T~YtXto/7".~?zھ&N /θWfqi' cXZ# ]oqٵQNt}3ONQ|ԭB)^mשa)m7/KOf\!M/i;+d˾-;ow4ץ{mk; |\`L'j֌T=&4|ZĔw͏>`isXA: N6x&1닃Api&p@ľ͖qE<D>ܱܱI?eC] 55Ti{L/n8'ŵV v:X yOCjF:ʁ+k =Ox6eIZ(J&i0/AoED[s0O:Q S62(ADs[|q,66@zLbz8Ǘ2~Or3 9iIVts-)lJkALjZV E}W@bZJg`לB=t.M~וx"Ly?ܭ#Uh6Cv5rHݜ4TStf`*p޿tang{|2OP׼)'xގwJG{VgHgtdzaFY |`IJ x_gy{ 2L¾za3@Z;@-Vf GZr=$kveya7 a!Dڀn^!hL/݁v+pl`4ďBY 70_J:U E #A·K=cmuMPb`'zT(c>М*8Wn`]Y,=/Ljj)_0^mãg}%1/ Va(os$O̜F'0ܟ3%7cR4q _tzؓχ !o*% .U&.'m=jh#CQtjUp^=yd]@E>c5JOG_IXL.0w0kY0b<8EZfAqȬ~KTP㑆 _U4!ػp',FXP2aeϕq\ak^= yKF!Η؉6v|=1*HoҁDIJ³42T9\˸C"$';9/5!_e8 doq&&ɵR+F x$ l'z`L3QtWژ9`+1 f>\ڟ3;%lN0mㄐ18e[p6Oy3JJg7,NA#j |Yu9eR!g/Z7qn[+YЎVMZㅛo^.+Lu ^>4w D]PK +`}Z/!< @ij!-@Yp#Y^|ϰ,@ $zv.=Rl kwM1@#X 0hFU}W8x-dQfiLz뉼g-?kA<¢]a8B.?PZJcP϶oX= ȣsWtȹU KOGEB vph~q6> .<2[QG!`K1NOL5hQ fS$$=D^ h C;K׌͂Q:$z v'r)ҮAŏJ"}HzuksW`f:su2Ґ sa襍z+V:ik[;E>l:w|!iSɊ ug $%$XI Vt_}k_q}`9YB#;/1uZS{?c2 Eԁ:u IA^ q<"53 x u 2Vrd4 [D ۆDiEfXD= 'C mZqx4ioSyTpD zLX{ do\EAudx<# fLx5oFT(ԜumkjWBxׅʻVBukt0!2_`:K# 8!b w-z`e35߄uz>uFC8\c6`#a "ց1xN[=ú2"`]j&֕ʂױ/:zQO}HϚf8'scQ8#JȕROԓ0+,*9d\d:ci*qi7]G8Nߋq-d9=PhًkR%FˍxkgE yYEtA|J+ ҵ$W".D]|br 8Nj=Ȋ$iUt ׸",h"$\a W+̸&D\ W򂌟WqE95ÕSp%d{s%Sօ [-Jw6i۠fB= ['c ,miPqV%iަ}2=2kV/GwWL0@ѡ8ejs)&TL_Pd~!YOe~QG rHp׮ݦ>+XץѭW<.OyNxyuVGz™'x_hgHSjwd@-r<=+c&f+3?_-w*9=[Ne RWow`\y,u ƕ>B q a0 `\9Ќ0U0 ˈ㩙`O0^G/ 2~B1xwX⚍z>}?DɻHޛPk5 5]*VJ^B`^/1}u o_\2-j,#Z:]Wr~Uh}־i33hw^>cbqb` _j>-C"?/!-Fwgo#01(,cS} O,t)y#K~PU?C'g@K8a9K:S7mŇo6;tp :[@#] 㭞UǛяa!;* o^H ٨\ 4/-w/@X-I}B=p::\jxo< }[ =mSm6+0 ҍH7~p0Jh>0Rgg0d,bGUeTi#/>mD4kV%. + (fendstream endobj 465 0 obj << /Filter /FlateDecode /Length 5312 >> stream x[[o8v~/E.E",f/AdFlk.*z s!%Ų ?$Q!y\㢩Ţ?~{,~two\:wj8yWBVF/j7۫O\K[7ʩ?]7uӪN4,DV8zwMEwQ/^%2 o~Zj-Kk+kgQƏ'[xG9Iɭ>U9K%,q4<1ƋnpIa4P ;Dg`D4]7WWmy E#a&ADǹ[T!Ӭ'q1827l1dIh. v V+Bgfm5Щ4r9S $ে,jaC"&q{|( M]~5(R7tALfJ!'R=] ) XGCn>U:`Dwm+i#HvͧJւ"{h߈V, >  V ؉Ι]ZA˯ u\eG0=qLhelXYTf纝SϒI>³J Bm&lͻöH3ByNx1 TIG&Qm&&elH?=Gl! oy{>TW├Rvu43ABO6;)#j#:W_"*|~3LW $U'ӭ! 7{^ LFywp2L"3 iaGe[USN6( @ClDjdjFr(δt;hPwz鬮6eJI )BӖ)'81р]J)on%J^us#2|V#;'e,:xxηd݇1nYc?)a[~y,/*Ģm3*<V‹0 M<AU>aZ$*qxWim>p[=y -Ҿq.^*4ǹa\ {*Ft, *pߍfnImկg]^ᰵ8͢+ ʫ4:EkU3J4jx 6iV6xᚬr#[EA;^wGl_>#@%yPsѣҜpOƵ{-]{iD&A'U/Dmaa]g`|Г.4r Y)pz700PmJd 6Or!i{Si Oٰ{i6gv/9ğY-YgҊ,Jv4=|Cu=7ْ}P+vٹ:j9]N8v{:_ Oz8nTL@xP;اv<`NN R&5@Cee} Uڈ̫Oj$(`$id^91ͳ3g6G 8%GxUJsC]y9O:(iAINB(QֿOtL=g' 8%3-RV%Zufg¹k7A!TO"c=$9-ew]YvG}RA75;-왾h P cF&!ٴGؤfmT'o 1~NGxG>-jlaaKQ*]aj>) h|) `iV]/i,Vm4>jJ/+j> ّnVU8:rVFs xxHvn)*(l fŠ/) uq]  ^ !^sQ)MJw?|Cl(|DCUdϪ-M$qkAUYl=؀=M.3p}2񀦣T:@#j=Wec21DoΈ50R2' MO>.2qHݵ1mB]]z70C)gekТց} @adP!HI&'\krU35I2 ٩k+n/?X g 2+,p%(}_DsL.,reLoPy:!4 =PE,jPŘ omS:EX-QԦ^;ϒҒByK|Y sSfqc"V6U'ztі=6"^ M}4R*".r~)Ľ8 ǻ+wɼJQ3IWQRSؚܿI!}aTS^ۥAdp.&=(B"lBGw&m)eAm|p7UD>GG4&>c˳?oqU)گD/>^ɇ $(Gƿ"K8!S5<9\ -.<6Ri>6oVŚ98_Qk_cAgtyW˱^WC#X2%G׻}]~ȣL 1rz*:P>MOq"ss"KsPcyM0a~oдLfCr294ŷWo|;#mˌS.v ϾӁk#c @K.pRvB+YQO).Gӣ.di+ލS0Uc:tFH'$))hT}s)hN|ľSR\\7طO,h_L]^Q, 5F"^eXݩ漞ygGpgƆkTol>s\@CL/lLN&htr3qoriQxgN_@XBzLx)̀ؗS>c)|aa׷QbjLw՗sф/sgLQR*,*|}#t_cZ!eEpYw:bP qPj;C]f ygel%𒟛 g0`QPuIH۞5o|Lie4DQrxNCxt?ȒSY߯獫@Э/WWV4 !H^ɶgtgs]-ftrpBtz n뉡_ZMgKߞ8o`c L*Nݟ|:ВDԛh ÏmI a΄zN&mu7w>0c5> stream x\[Fv~-n_BmF Nz`7ٍȃ;08-Č$)̴s(JG$r\smv7Aoo?ۯn7yUT񆻈R:JۻMV.Bp߭n/0ʉaw]TE\H!BY,yQz!m^E,d ʚ,EiR)WMڭok*]Dž+m!&sXBBh!;i*]]z{nRh[eo䥵%~~hF.qK";Ĥz+/) P째Mҡ|dw 5UN&nhMjQɄk ֳ}50?o lwOU&UYq^|(?S?A"e\$(~طXI8 >a@#&eFTY{/``fd خ7{fV,xD@fIDV֢iF]fQ76YpCЭO3DTat`;XoeI 2A .{~/OTܑmL64`~+zmw=W^"痮ʺ!vH>TzJ>r2$QGWi+Zp|Uu)!b0uqdz\6u_g=[\IU&vr $)PuqNNn 86D #ۓu1A0RBUp$7ѧ> *e2 bWXQ:Bwj~#vFV vjO^`Xi JWs&dNBr'^FK("`aD}jT[r@4ѯUA-B9th؇ւN|@+uSr()Jevj&׬ @RmdHĹ&Ag)F[3{H=%2+4F5QL{VOT$QLRB8[Q{78lީ* Hp{ϙ `0` Ydx^5av0KGtrI|Oo605nxqC;̚O^;}@fQ' ]<g'-H`tqm} UP+rN y2p }3Fu4G GZX @OeaaE°` xmĺܔȎWY+o=Sσp'^}@ d~0D[RBč~@) a9 I#:E8jk .6PMAl|ۉcA>hz5)h θH'({áo+sgrf}q/L ^%:g TL\ pd6i~`W@$W UQVU^Z?czh?Ɯ@61uN2CYA|Th;\(b(ƚuܫBnZ2{$ S YJeL/8 F @~jvӬ~`M].@0I Wl&˪䶲cD1qatB]鸛~m.tY\ip1#_bbjPZ׉:?6JN>gYSɳFJN|&o)$+PݺdaJnLr6Lф;b'C\kϭf:dUI%Iܘw~#H_., i8o(xR!%" 7&Fy?yiM.% )`ggRB ^X.Sm_8PCsHq!qell,?3AB(ۿ@,UmOUC@x̅9ן5!+qBUk2ɔ(P?*͢RL gt>3U>3e^z$@F@$B]|bVmA'^!yoUƒl05M67d͘L^KPǺ%dL8QULcQ:,Shc>tR5tن '1Q1PqUM`xN0V0G WHAf6d1DM#sl44u娙TpY@AVGbAп*+>=]*s'vr,vK&_cWMwgo ֟gX.MSM/*=1&DETS.Ͼw'&Gc _Y|e DX©x (5}wڊ)RI|JfGNC0 `3 D&Uwq`TEG"Px 4"ji"N31<FÆvq}HuNbbc{\T\ɈMŵGߡEe/75 >ei#vD]GS#t%;CڪCC84eGBFx%O`UQǒ9򑣓BGHyɳĉC-0D 4q5="S#D4o7euIZg!"϶ v;@7!/10@aa2 $ST\O\M|WnRm>?6;X3KKq+۪[Z*5_P ;(X]&*6<ѵ4G_f Jd4wvO܍T%w32TIwg_x'W| nXi-c_v*Oq3ůܭ)ED<L"4qH[sw~{CFDQdw2 gZM19 Sms!WJW\m&ۺ8qBq!CaRDrI;#Z\W] BL0 "s.J/-ׂ !hFC{ƆJ]yU\%W$\\$ qݦG|b\B'TUqNxr) AIk1W>u䩙: ǻ* TpQcC3I/4yQkSV˷ hxũ/: M<_[ H 3 3z c &0KCkN\IUpqxs? JQtD!ƷΨ4#_Tϟ1Fu8c;Vt!\V`}S*б/bi;SӔA]Тp'M%(BP$W {gD4=AT?@4b IB;gul-NtYQF~=Á·p cx5)zn=5hvm -?sRK{+ų}rIt" 󺉢K؅zρVj:^`}zH0W|"Ed4o%ʂ]_J)YX,31v,)B~9V*Sn[v9ŘQUR!d!v{ӹBW$:fڴRq<=Lb40Yhu̥t< *$ap: n@!=І@@%HA+.WE"L;YU=v52 hBB<6Qe+Læ18e{tƛs4Pд;INCjf!#D=eT6t7a4E\GA7B(]Rv/V8cTNJ~q?}¢NUfQpиN}Q:(%rho?M JNTUy(|w>d-hl4s >mWM1\*TyY!A tAY!|냿9@+U㐦MBLmkf'XpZe%l6!EnC$ *1tJV 0̈=Uu}SU2&z]n@g1Z3|];(,~h׼0+|O#T] vf7w<2s[)so frLԫ"]4vdO2B+2ssdU d\D(0%gASp؄$`b ۄP 6C}Q+0pᔱ!fN:ŷn1KyЇ::3%c0pe:X?xb>r&)r o`&|v0 L7}SDN}k\9 mEF+(@(3.$Uvv/>4j7lϿYU{:B[{Iq(6.txta60l';#:JŜU^[sH48!;pj^1J.!Qyf ϦIo`mBs~W#5_Uqd=x}R??JpNjgOxh2 돁/o{/t!)9 FO|{^~mWR rTLC^%= _9O1d'%6?.h*% U'.1+hOC"˘ VbD+~kVއBO>!>?TXG*.XOQU:. TbOm[_('7׽i`*ƸxMhs;%Hߊ?}2Qef`Էrc|%@)]3Lcy$TxCHٲĵrv̶Vun>AJӦ%_H<0}ނOF?J9xTтLɖ q|&9Tv iu2Wǿ%<  Zn<#KMW̭5ݕJa0m͒|NS h}L{QI9Jz5=՘G.Y}ȁE9Nf|O ?H9~~쇐^J@)0^uqC7/c PL*u6i5z`#CMF+;aAO8endstream endobj 467 0 obj << /Filter /FlateDecode /Length 6484 >> stream x\KsqmMè7mM~PhpfQU@UC̈́cC4UEUEwjp+A.wwWߍ_ʦjԦTV/qezwc_KYjT>/2ʉ)4;]_Rqa?iYP"liWEJkX)S?w~-s (\q\\+>Q{ jQ$.i;i[°Zv7a6+aj q)m H*%+Nh/:4@fM?nM!n:;U)jrcQ.WNi)[\?NbJLǁN@_{2T-eM(Giydz!i_AC»dUV~#aׄs%p@@1m+48(5Qáxj*>3@T*v'ac0ӾR:X jp.!HoGp!yꏾMEaloN|DVᰠ&Zyn I-~{:QJ۩O6ӷ*Pm#.<]}ֆLu+[]_ $''=fI;?uw7YJQ{B{12Ln'FA8(*bM)Ο}xV@{CB}Sr8XH5Ʒ6"=Z`e4lC)ф,&aS5@t&-Pir; ;Tq%ovnO(n8|6h&rɛ^ `EdN9p\dOq"?]13<Z]:Qu@LWXT~ce*UNɅdIkej .Tbw7~GLaS,-{2XJ}\рX?5tmKn@[*CG -٨v̷ҧ\[7ʄq㺕ӥ4M2" \'CrJu%K?TT_@)'8o%J+zK]˪[.CU&@4ҤzdG6'za] 6=H*0} 7OdueMEl}{7iif)41=Sn3=;+8Uy"|3 MB n#(y o3(a.B®Ǵ y43zk<oBΒ $nM]l']۩plHf{4qnÓ(O4i['-j8'NB6h 0?N\9@磡;U-#@ݫI˃R8&T*.y~O?s0gHڧ /ݞWU. +}!cHHp'H964vޝ"Ԅ4>(A`Pp@6j Y i }@)ݶM ixE{He'wjd;)7璞luI@]F×+#)0ظ/pdJY녱Mx%[J|&+ >I]Z0Btd`CC~9W=_A5,h-M\萩gNb1 =3{:n%ɁBpe1 ?aȞfדTNR48o&yS,B8Hȳ1Bs8=2"տi_(37Oy7h\4\o# h†q$źKiJHq7hyLc+ڢΤٮ 3 ۳cp:@Չ?)9":35],i(sm( cC+tda]e?X3ҝ,e5ۢPAȶ4uNk6CfY:̶keUͷ !! ѯ/ LSZ' _Ey#%xG,i`IwLGrx$a~MG:^?q 7*`~ 1 yF4% { SGAɄzJkz,Il*YǓLA܆^U,BS;NC۵'DWV GO5 JteLKUm> #$&9I_.)N.îƗT/hUspUHtCGQ C6)d+vh8o(Z ySDhfodR's8pCkx pZaP.1YRijc|o 5RJ b6)iW !pU2LNwKH(5xNoeO7[82^U7pivlr"} >XmgrIVq1폏W˥)p)@ C_MV{:g„cßonPl ؖ 2uh)*fs%hv軟`>!=.3\ bJ^)}hEoݡ-;E:.Sx7,wgwW*n?qJU%#mRE1z9AJ\@@ms^d`\ɂ?k|Lu( 鐭[̚Sgk;{S>[OypWЄNNݍK}OL&,事_ą3BU]." 15Qg5n8s۩8Ma9jfow?N pۻ=VK?2^p`]_.J' $ʜN-8zm iV!nV'e5jI{M7>HqWH qŐdzDGͫdƸ|Z|ڇ`KO?[ڐ '6p xw\nt~S,%4bB.Zc?QO_'C}y)/b346(3uM3f: t1w*=zؿLǀ>ֲ^pM_ئ5LH ^PE+=P\5584AZr C+k(V")N6MXq>G&t)k|fqP2,>a L/CPN/eXmQ#)iMř<*hJ %&z6>N`n2QQ`9>t,n>[ԥ#wo=LRmmzwk%4NRA^WTAQGpm<~erXpB9|aVO9Yf"çJH&8RXf_B-ӳZǸEvϰ:u(&ۘIk7yE)2ݵ;=+Ft+p0*ƿf XuXw= ;lx㌗|.0G\rR>2AVv 7s*T>tK~Oqhn?૴FTUnpVQV|=l!L2w)O@]zЉW{5 lTT2\]hަkEiG jŗ%'w^R419LD:fnl!$Zmk*,$=y9 H6J>[J/a'q?" Մ(hj*lbF3+}FPYB%}BHڤ \GSCz˙4Ը7:BRёv(Y%m,]+')9{#X[r7|A_?4QAVc'_eH¨& JO ,VH@o'Ϟijiߕmh4z61"ֆ_w/&z+?Dk=遼(_ h{8XhY ST$ c9MH0}қvDUMB% tUk.XA4OO?ZX]Rw`8O^?ʺvq7p5^J=X oFD[ XƛkJKT2…䍌 Q7SLgj 0,ߤq*NZKnӷ= K?`;^GrCv)Qt+kh<5,SI~l64+xI"|O$/,7wMzJ9W 3S@ rwlZ1q4yqɜ^9`%ij*&ojş-[u5|ʅ\Cz@z 8I6όc/%;mFJv5\GMdbC 0")zPw_72r\oe`'G2u%|;]! z}:^ZgX53C B!IPvP=|o{O?AJVqu=Oݷ[_x`/c̕Vʅuj;|F)_7a$U]?J`k~kPeDqf뎰>yan.4_Y\m=ޡ3"X]#DCݰ$Д\'rwtA%F:qQtí~{  1k9RWzCdThwMj|'u%ASp;x>lίkY`Ĕw6|ty}fendstream endobj 468 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5334 >> stream xX XS׶>9q@8FBӁAS֩^[J T<ɔ $$$$LqJh[cAi}wvݝ|k:.ˤUI&<{茄'/sBS8|Tc]aȓO>'7ջ KhB֛;Rcc}̞=wL{϶,Y>['c}& 8'(YLc}&'lٚu֮Yvu/f(jARΔei"m\66.$~}B⌙"^tW=տ,ymӦSj*ZC&Sj=FRoQ~FMj&F-Q˩ j%OͥRU|* Q)wʗ@yPKM"(>5rS3hʍCS#Ky#z]#\ϛKݡu c>׍:kLI㦎;=˽{\qFnQK^c:x Bu{ Pxl-O; {-\y+Ԗ `"HZCāJx+6#a&Cv-TP'P|1|ͥˡ L˵e<~=Dž':;_|mk NeCpu{x$8."!g\C̊"t][']+p2@:۱Nl㸗wJ(ι#aZLPԜxZURm&rb!{NAwm[+}[Xf$֏.y5`p o^3%N^C~W{v 1VqPåa]}]c0 c@hjZ "pcqE8qm4}V|d5`{.C|C.@-d5062}HC p2?6/Y?$7Y{e04а}mX#~.*x_* qY h+oZS )ߥa4,](PCnlL(ofؕuP-JW:.LAʅ:x?X $y$=|xkDkmt>xp5xLX :>ckk:/F0ɦ6P7(ܕ½@YdQtc-is ihRR?_Xyy9j*RZtw]NӡA+q ,63: [ `,G e's` xy+(LJ;(APzyy/OX#Eb)tr]'p8A/`Ft/Q N1oBՑ=fdK<'&@5Lޱik9z`dv'03 yW9 :ӓ< G`|< i==YCsgHy$}x*l˰Gh DD=2(9 d58wsµ?̖q ;PkjW&N ի .x$]LDcH&h/?sN5+X#r(ʙZ=4 (vVAnǙ amFB^ԫ h}:A+ Ȓ%8^+aXjRiv"5=( ;7a=D51OȚNѽC^`ǂȐ(Ȣ/#pT!Ng(~$0" N&Tuh uLU. Uz9 'sx^@c:W&/8qN<'%r xcR8Zxڴ- {DE l:EׄK!rE_w7?u~'gD ;9@L|k"|Kk=x 1d3l7/|2 }ETmzk覫O]p:5ynrWLJt+iƁ@@8~r'  v&~^lxx]hQ]1[SJ/j8aС07|ABv 7-gB8$?mPҪ&>kqq8m_i7#Iɹ|S{P*0_ P|LW[lE˿ n gFјJAqt QAr+(y:GϰΠviQ\%鐚] ЃƖ~H?5l@U] $$ Nŷ^0끐E*a5S dc}_ ǺG5لBr "bOr%J16g(^`|m&Tg M`< y8-=4?& ?ΰ*AM Ѭ/k"wVRat;x_yT4mHiNX_CuIp*?zdXw d?|i '_;4G^7+:XL"C@[cýC}4qyGj"H-0%7f3:!>'9aoĐ%2ti _b!9QlL3)%&Sk* uhmC]1 #νLHǬQtAi+i][>/NiHHLP'(Ra /& łjA/SH-=O`oyB(dYkIsPRQ\ƭeR!H=)G,F^x;~"$OH@ R3+Kx|춄T蛑[# M5"m'_LS6ކƋ=_]n fӷ* w&˔b-%": J]Aw,}`c`@r%Wt0_^[vH+o 2N[ #%4H76 ~U"AMfxQ׏e֢s@dk֢&y{z,ɶSm\5!؄|ZNhtR!܆0>])6qfǷ^:z,] G Keq9}M2iHp0::Q֮):iѬ[&/hѾYͦb3ivIkSJ9 w@^ÝN4)-RV)!Tb$9D5,Y;!A.di%V ٯ]͕PERYin^mMU֠lSDc 5YB6wIhil1އs *X(VTY>E'fE%m|JN#?(m64cW=z lVnhGkHاz*F*Hoj0,%OC/trzSf-J0RWv,̑Yj7a._r882!A^!DĄjcangk9Mh~9Z-gv@O^rU_TPu8"ҡZJ>7֞ߺKbdVV*.}i X9DYfznT. ?#φ ,[l_H+S!R:֑xCro%RfȐ䱣a=fTk4ۣ/Yt%wGQ7?endstream endobj 469 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3050 >> stream x}V TSg~1ip#űGzܰ.mmTq)JVb"HKx*J(":VfZƊ5j[[Omc;]_Ϝ3H$8iNNw~WDyD"фuAAZ%ߍ; 񑉎 SDS8E-P2|S z(u< @EוVju*Ϣ ߗ|R}FFiIqjHMO>FgVWh>c}B7ڸgАMW#EQOhuIȔXe]-42k5 B &j3Jͦ( j% VQ5Bj-H=OXj9EIhj %+Q`g\+.Q],QMKw'ܟ<hL0?o/](hK DyW0${'QG3K4>w ?uʟ8kcG tQg|q㺤ЭlT~3\btU}mK'TGWZt+DEJk`Z0}#^w y)/=&PboMd髕~m`a O74sT~ZYy{- ^iZ ;T˽l%2 &^>kSF z#@j]GMuu?4^ny7zv h<BBѕƂ}lh`kP Z,F/EC_EiEDODu+.u< E(P)IqtzB 1(2hHJܷ`]ے3`G dGtMzu-Y^m,jF1{9f#|rcD;h~:eG0BTt ݔ e),!4SLXiJmm/=ZݹK wf~WT݀nAk t:R3m[gJHTF,# hG =#9>\8ĭ;I 11Tf4g /DX洣 b$!g.h/w=@AMZ^K|+:bN#V3K3jp%a }>[odym3  y5I5pJ UB C0HuNT0\u_-ř@>&!ZXPV]ЕX 65Eݾ̀Wm-l8V() ajG߉%)M 09x\ څ_e9)X^tSTqC,ĸt5Ȇ m}bxܼX~@:yC(p]G=xlC*V\z L^,\={t{&GK_h9?\j LNG/_\{KAJR E=LmFjԕ*`|W/VK9 ‚5{,~_F:аL.dBnQ:lk(VS׾0[sMU5Ֆ҄j2Z8E÷lOnebAj'z;N.f/w^^H⢰pN{x$F=mC LQa7kzV۬Z"xa;|![qөOjs8KM SJJ(E] . ɐkP.pIyx U7N `$qljyk3M2*B+ ww~sirksw>`~ye$NVHG (N way6W[:Tk4ݛbCZ'. T:K`a"̲B3L6SJrި"o䔚򒢢QA79T]]еu_pTH 5tJF!QjB:qp09CDW:9])DTYh~]Ǝج cŖ== J k-6{xQv@endstream endobj 470 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 344 >> stream xcd`ab`dd N+ JM/I,f!CǗ^ Ni[fu0wS{p! ̌iE% Fƺ@R!RIO+19;8;S!1/EKWO/(<(1%L33,(~DŽ޼0Oqo R~s,{_7a^ |4endstream endobj 471 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3050 >> stream xW{X`%}Dw!G%,5/x7w]DP`"cS` ml2DPTK-Cay23/?{y7y<{n{s}!ËZld߻)q'"p;$O&}]p1ЯP((r82e~<=KNMN>}$ዓ'OΑxpQ*E,Wr(UNR Re\8}W]WKA UtI.+v?:7}$sz6Vnvz mc0,%- k8P:c3Tl:tOpȼisDb[g &v7tF$Cs#Gf#9`P$yҗ;`43zcfdPrGEB8"qx_ ku),h%Vݷ!Z"9-؅MX\(p2 1\S/hgwO#rAZKbI| r낊c;Qe̘K TXjjء)4@^.mō (*^uҔ|CP/߀ᖜm>-;#O/XhL{g v \XwM5/=E b9Tդ- ZOBt$'vZߔڲkl六 m=z%(H"{ӿ-XkW'HS{*@IQ1kuM~O2,8PRpTla"F L^q&SΆGUٵqÁ0 [fBYl T(Z0Y:_nd8 E  Kd1%hd͈"az@vWخsh /2MHIcrJ!ʨlU7Yx 1[kƶಲg50ȅyI@+_ڋ BLAF.MzPΜ)q̚d]=#ycnV8 zL$`ɵ+[q($t4/٠(%OݟbJr/żNc0N70daÇ; Bk-ާ$ 03XB)leR5"W7lQ"f*bu;@]hܮxvy,5lvXȨj ʮȫ*QWגMg[@EΟ?+!vmnJ?DB\!DTԵ(yl[Սƥ6xXn#p?ݾHoLܛ{Z; (K7L%b xXC*3(7~ͿT6IrGQ l@}uϹUp A0L-NCm0t׋].Qg;2܍.{ J;*.Z$ɷ$1S*TAESYl T}M.XuT~G~`o(2IɔJ̩ljr:dnGs9*J\ܡȡ]ۄA.N4=5,e4XhS!iF$u C+"|R*v⩔l*E!P]bC!}I?s1o =Dq}#}6Q;|kO);E)Y鞪%ed2sޒ/]/jFQɊd-9α.jBKf)y[<}٧C;/ߑ ui/a G ޵=5{lLA+TZ}nm]]U+D⑫1?/PUdN('#G2#قqۂC_`6endstream endobj 472 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1426 >> stream xT{LSw-w\;] SpNy.(. XGK嶴/@_}3RFT"S/jbQp,2'P'\Ne,ɽ'9}$$"$IiXdLO'i٧w-vpJOMzPƋw≐ 1Ij3 ´L|*Y/0S>'(hrc>YZSe|6]/_(C<}^aZͦLGj|:A.Z^W'$*4E|ED1"#2b%MHD9lE b/qd䚓^jD 1+̶_=*ً/a^WT5gήEtTp6i9jt; 8KB4\{,c?T ч~Gw@t22Yה-֢jDC@fGb%mTlb 21w3u U @AcEec{YS lqmzD/FW xs7U^t`*ŧ]]jkxضoO>j5iP%̢fsRw?^Q/& f,chDj魎-"=2$Tٱ(ёgZ5l,8 uLìe1б~}2"CC=L܅u@^1ߠXS$Zov] Ef7;mۓUœmEpsֆDx\ RA9s*DWY(5տ>~w%% e;rc(# PL򍞘 d_%m4IJc/vǭa%^7`@^A#h Zf9Z?nص']Y\5۵(_yDoJ;_@WP;_28 $p pSØR9a5Rx>qCۊ[˻c;=ԃ.p~N*0 WSc.@k8<L鲢#2!:p [)KUxŀT,2-}_> stream xkLSgÁJs]DDm*U\Xc2Z@Ok{Jm"-T. BHC+ M@"3KL.dٷl+>˛M<}a`&%GEU6Džruq5) CE^BPA:"11VtR&/svQQ1}-m(*IZ%Q ͖R䳬>n^#tv66҆bZM%:L՛X@%Դ*UZA*x  {%Z0\ ~B8W%KƇ<.KW%3w:@!oj)?Zgm g-Dy-Dcˎ$?67 -B1wyS"1Pd XJ!CFCC#Nݔ~y<qNڦ8  &k]ܶlt%-h{W[K͕5emhCXґ*OYDdUd2X #e<&,U6Mڛ:[ 24S4i`4YբhKk7kϲbi9 R1j8ދ9G/ҹ0 (U!xuDޗO|;,yG&L+gR̕պP0Q ڷF\ˑ|H!*{d`x91bXIgS*$fǥyBz~ piؤ,?#'v8gJ: ^@QǑzK.tB}V%2-^1zD{—xLk%7ܝ sfTc.̅3nU]A+ZZ'YeXRWfv[MǢyg@#wU/zϻ]hi 񸣼hQ5MTשZWz 5n߶/+ -2\*[r5-Mfvh;Xaov<ڬjP\}JO,l,2bɣZE.e9. f7 (,!pMlIendstream endobj 474 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 497 >> stream x%Ohaǟg{shCyp3bT%s4TH_y  :ceAmvbt{i|`d@cK4Hld9:ѓ{N4tsڑJL(,BJi-W\&oj# r=/T@bI-+Z_ ^jZ߯/_)jfrt9-Kf%Y]Rȝd^&ǙcD|*)iY- %<2 c챽wo #-{{p<00}}oLV0<y,Yooaڀw~/0t^}ڥ\؁qc!tW/'نfabN L+n-FNoR^57Mвaze/5Yo7Eұ l!> stream x}S{LSW>zi)&z{-D`ɬ`U^!Sj!JyH.D靈>f|MZ\s]lqْ;}_}@ XR1:&"r:+TnLnsu_SfYC(_($8 @Ze+a}!7Gk#̟ϟѴ 3\:]I'̥CҦdL6NM~_L+ՉIo8D 1zє^[P@"H+A2H8,JT`jTAAOO/; +l)NJ *VvυR\\uSxhƬRp6ePˈ"u&Qx*4h]/ bުا#bѯu._. Q@C 0Xv'td=$vCUwϝxܲ0j Xɢ+vQ$E^[b;E&ΞG]D*^ U*fn]ٸj{(t=|3pR5 S_kU{rn񰿆 4[:hS;wCc<%Z۶5/PDu߁m Q94:󠱄v"\S;QFuMdB땉[35rPH K`_7Dh$vH{"cs` u~rx…Vv҅y4n\[WN!zυRnm~>2PTZe^SQTO8s-O93?hdo6 NsCo|,\f\&+;vF mf˻؇x`'`xqZl!vV C '{ޫ@4~U"˘ C:Llu{gvfԘՓOHnG\fP3T]q.T޶ Xi,ARVmK'v4(vX-pMmbru FEX:<Ia+h:+@ GC{iXxS$I=+lCn)_?q"`}foF ݖ!^N5 vcc.K\0fanow8ڟSp> stream xcd`ab`ddM,M) JM/I,If!CgVY~'ٴ-yyX|(\{ BF&ʢ#c]] iTध_^竧_TSHJHISOSIP v Vp LTL%YR,]wGVdY jZ~K%uvǬ?|[7oz5&O>G~wKtޕ~4p Yg!8uB7-dƵ[9\Ğ{IOoߴމ{,;cڄI}=< Jendstream endobj 477 0 obj << /Filter /FlateDecode /Length 4980 >> stream x[Yoȑ~_ -.dxm`s,<6#AJjUE 6 vGDF,Z] 0"󈌌㋃?R*]T.=]?wWN[xRʋE"VNjS^_a0 JKYza*+jQ.qJH] ))_\^i˪RWͥw+>]Jτ*6Wκ6+`SvۡXsŇK {m%81ЪX'KqBnwhɯ& hWB26d;Rz$_{"GW;Jk`2I^^E6 \v= o.?]U8liva.t 4"pJqcXVZjWxX$](pLx+vA|nO"yUQΚP(ޏ\JGJ۵)vP-j_b)n8վzq)(ի+J0DNDڪC8הFz\T~tY27uF]vK )Pimf }35F[psɳ+)h.Yɳk ]ni9~ZiY"q GhADLqdD00`&8؟n?,6_7w]h?>ަ&nX[ٸ eJDņ'?3Vqv"՟sIƠg Q($şϴ B;".y_"xk$ Šx_m>w׿*<#=U]a}wܮi mO<pAƴv7 n2iVm>+}:\2};sFMkn7ۡY2@klq}S5a5@E,voϺLQUp|柅Bܶ&;I=~ X%a0x2O\JWtgF^bJ.X.3(pJށw:o7A2$9 Pw(pqju g;@ɹٟ ]Fl,9*)#nr}NA'{'g>&X[Á`S3>srU3(&#RԱ.6s%?ؾtμPTfb!T"A~4\3 י 'yB_SNk8/DX*Tk= FD|5"J~}e*SuXish2itAXo˚n)29zfzN-2l a! 9%G;d[K,6ÁqБt3Zx"w0 »d0hrc(GCrss> >eC$qdoպF5{A0g9mljk(VMl")q'ך\<&Xpu-ic /ᚩH;X!,> 2yq Ƈb3r [%)E!Q58N(3odlw]RUK*1T+xZ%C,Jag}`;/jSU䗾7A<)')Y&@8| )@UeqEc\9bDR:R hd lR6E};,FPZy&H[|8Ei2 y-j ay P D(sxAICh i%X"y ϛɪ2&Ǫ9Qؚt`ϵ,~g^3nVd7x@6=-( `|,y䙆[\$koQ 턿Kx(ȴyEMMMx &X f؁-Vk(n {x6ζ= #^_"ͪ?_,Z-b1"m ч cJ>񪯭=kJ|TQ|R ]pCW`isUȖަgLPc$ZtPkmKG=!-G6WxzvŠ e72W6'p@l$Sm ͩ@z}0 e3WE;SL߁Dk[}ƅ]X]ZAph> D99CE4Xǣ1 yR3 a$ܜnny 5 ҙbr$dӤWg%{|0il!NH(v:;Xv[^* J 6r ƺhzV!4ct ҥ:R@Jz2BdnX=%4{9 BbE)._%YDZLy%-o!1cA ,{ %fWz-pM^XBDSȓź뒎`̐ͣy#kpVs#5]"ՋfFTCE5S#;HY=nd@X;4c2^QŠ F| 3rWѿ,#zrblC=,d.s)OUs3d %gxTߏCds9XO`]7;]vv=u vTعz,]q&x[ݤc(yGX=F#wi҈)wcS^)J!;ŗ0ҨjRr7֕Xi2P5%ST72^QE]hI.~9FP0o;De7:Ɠ?}-܋eE5?}Ծ}m$}vk_kekw6_ęH?pCs` ND <í)6>=_MsLw7s\{czzŬc_ׁq~ij)~ݎ]MJܾ=l!~L8_l1D!NÛiMs~|Ȣ_-/p">XKۦ#E>rxh(s+: ur; Wn.6hP-`](B|E:66iɲ1]s K\ p[@iW'۾;+WDodx;lvs| IaKB|߰ڦKW66JMEGWjlU #1%~jEMu:`(zN>7aWC]+#03= 5Eu}tQ2HVq?Zʔ=6=(P!uk$v?)Pah Ӝ6*zj MTB㶽빎K␬bM"}R@Iuۃ HNr}/3endstream endobj 478 0 obj << /Filter /FlateDecode /Length 4290 >> stream x[m䶑>!lgZ_ė ؃ohG=3tj_UHz=ö$X*V=U~U|U\=|*w_}qwo7+_=)|eTpZo0(,Dᴃ ww,J^:OtP|F)Ud_k gK>w\z4-ڲS[)aQ_r݅Sֲ9·xҭ70J&!? ,]5PAD"+X~odUXHq1Rlx"~Tes$w"la eO='|}M?*@qҬ:. ά694zSwm4{0lq*Ueޗot J~7 {}j@Dž>C= ߭׋"ZW7w4tdn{<믲wj粨J{s}=Wx#ec"87mS|t&[JԤJ0 ;- NrQYCp)x*!>X^(kQl٬?5H^j`ۙr`{3|,б\&~)n)m&<׻vlqH'?-Xή&Oy8vl~Ыm^v*VfMP@ ǹ;,7ՁM$'=[|D}=aR1pz'qI]KZ a .jXڏRO:*磻Eqag37@ִrpW'dfz}N{0st~ߌǫ0JKܢ?,c^Rb_d:qFa}_\M}O!^{Aqg$\āN¤CA,\p6LWssj&N_Jǽ:FWN5}?3'"a Oq*u3^{g7x;waШŠQzemɣo\DN:lxz%1dZ0ap\/GNl%zbf֑ǰ2q셩x{"V^Ax79LXdpKiX<2JN!D6 oS,´] FsrV%(;\5@ 9 2\ &Cy?>G kmBN\ai0YTɦ{#O=턐B`30i b9gR[-}sDR @T_Qqx L+G=~ 0h0x0w=>LƥV[(9[ͨ%*/+H@)횣FVqTSy StwKx2:i~d$!.hL r@g\H= |! >e~ޠ"܇."8 ˣܐZ$$ BovbA;_sfeC&ǧ0GΆax?k)YO#+:4P'f~b̭[{7bzSLo. 6fWBHM.E⻵օp򸈸f-v<]gldd_la{G|tiNSKaa@@B *[q\rb]*J,䀂E>UYnT(G\-y'!ɽq?ɑ>g$# `Q_sPJri||fs\ Tj8^ԏ*}\Cjλ&爇6{vr)]!c&|a!.Rbڡ v<!tB spJ C>~qcp)RO3im Ú0 AjBp.b$TSwnRO0X#rHVicsp)`eN; o7ӗ6VCf?b$mO?gRgyUVWha!/gN#fa~psᔒ2p|n նI!t#`*V2TE TO1T>Y g3ƪ]Gܣt܊.UBKmH5Mt$@J믃2~*\a`8CpYIm %=8 eA`8+!~Vf[W>Ak+ srN3y z6U -=rf.)LeGT81_sYt;Sw"p\k]2S*o߾bq-$eɴ{vG^ n9q (#cᩬJa;n2MR e!ctcBdK~+ B ؤHzTiyBMnj;sH?c S usMD4DaZs94m^3sw =9Lĝ __T9ѓ}GGPr!bl^ס+H*qq4ᜅLVlx}dݩԆL!)."$DꤹC3Iok0 :Ej`c9S~H i`ʒ]b2 ]Ҿ}`ه`ӘIk5yv̦bO0;I$"MhPwur]E>iN %cn#cB Bah>72pP2 Pq@ w3_&?ѷTeJQªe ._FDɫHJzƄU}>]VHJcyc* 'yސ>b+!$³]|HoR~?D̏ 8.BX'H-7zس-S=&Y_KAIowԭ9j%=Q@=\X,2"ԣI]; I^MWM[ V7bU B XzNVf*dݾ 29~[݃V'%{pM=q-^.Ά۹~L1B(E!j#*of|z]CUƂE+1*)ksZQ\R_#lrs8M}101%lLH&PC(nȊOx 8iiH}:4% 4l'G>~zAE`\xv-vZ$DЩH-o\ ] \;tV d8߄'$7 ) 2,P){9S?Cӕ{$>諯)p,f{B *TƋehiH ?l!]n­FR@c*W9U>(r8]dP̭uSb_/endstream endobj 479 0 obj << /Filter /FlateDecode /Length 6275 >> stream x\[sFv~_]ŷ)A߁ʓl޲ʻ6PCX hyȹt ̐XzsΥiU N?N==۝pqo6 MՈӋ'EJmJe3l9؝-z&R*rji]ݍ]?;WJA_[Q>j{uv?jQ[xiH:[ZeaUBk-Nϕ)kk\JkpLiTY麸~'l<^nB`QixDt *Yce?Y6,|Dۦq&< ]k`x?t<.1]Bᕭa Q7.I#Dv|aZx>VZ+akvZVSt{k1i7|<3n=5jUtwn{%Jyzſ-vp8w9oQcrŶRܵʚ'hʈ=] +J7~" q+?a#;xָQ)kHF򫵙}J>ch,ukTU;`«dZrE;"pt7I j$28H`n^n:>_c /\UxyA~NH\&lްP.9v VLG?>R7BM]YtgڀBinڱ*czZ%^ E?MM9_X],E%o߅ UNޡ8iaEI܁u^悋/-C?6,'q6& ܶCLK$Lnp2iU#L&In*hݾ׆-@KMmO#q)&Z\Q8 ̠qcZ"IZD鵈aFiR&o7;H!s/R7d\uv KFT,a5ư^g@I~W$8S)[]CS`tQڑxQ+)]{ R!J /5UQ. `P囕 e]9,LSj?hԺa0! &($D7(ޑSBggcc!+j wgJy?x h^RF ۶Gtё'63VB|`:vrӊAۮe3|׉lTc`n=g)Zl% D$ Ġ=YQ-J 9(%yJ2RpI fw6lݟpě߰yӁh$Q%Qʷ_Wsp-?nR̽_Qs^ NBnTH1z2pɏw}3cO}t&<Ϗ"%0:1$hTKjM4d2A}vvXn{J\{r6Q{dlZN  =Mة77tKo /:7hȒ: G?li~5q84F)Ms역 uCHNֱ E,ynSH< nqM|;GI"tqgHcbȷ˷ UvwW-ZVO';92C7>5VNpk[jS%*]#m.Wh*z\5xȊ~~W_ a&g*w"YWUynv]Yo1BGR@@N~n եBO"1+3)4l%}* CM17ala!r]2>^,x\ zU5pPxYa=YJ1vvX3uF xR9ܕ0ǬX֨3@{Ǡ!Ϋ[p{dZi4aiJ  KJuE!> [ KV c{IQ)NlaWس@0;58 "=5h4l&X'8\7ջ>$WȲn< ՠM&6M&dމWS[[SZ(29: $_EO iw~ ܈t7E17 5u#RI88O|f4FΊN~$ +PX~NE4hnӶ[Ñ3TSPyG;ͪAv9vl;Sޒ?IB1&c, MQmkN,sc7BC 3B>1ԩb%( CjJfK+IȖdKL0x}5UReiO -XnQߤIvl!,raM&nW%͑ xOn+K p\"zao}6a(fyԪ#Gb[q}2]=CJw }~| WBmO!N }]G~X0O+Y;1mg`Kͱ3琿ք2(?z}s!Ң:W,a;: ]= WudSq{VK,BAk4 ѕ";7yݧnmC yI?쐼+1!x JڃU.*nVN tCwTLgI?1'%m?Sؑ[&2:ީDH*bCg{?lh*~?'%&-gX>R7S)7g9Q'UZ@i?s#R͡cͺJ=mTO+Gd-ey"UTͶwf4wD5ռDCG=/RR\V uwVq$>)UQ M L5Zh/璙m(p>/vKE(-z n~IhX99<3~՚JA<׶=-1WĨu5ҺBJ 1[(@4Yܤ)!K RE ϻwR;i`i!٨:)+X[ h` ) v%'`2#81<%]!R>4D.>,Gőu|p8EI6@#?wqustI=.Rx@>J,9;cdnKhF Jjv56~c+DU Y[Ғj^AgfPJavl8)HZfh֚pm(I_J> :5G&5().&, |MUisIҘyS, S~JEPaਫ਼^p ,&O8yeb[*u92ZwCx8DԊC7><A&#-]~t-:1]ЅW~UFx*3Q?T®6ncI4_Mhcy)>2MZʱ/괰tTIU ؅U;L@ދpn^i!znS7'p9TU/pt] $穤Zþ-8aJ+σ_UbaSRlCGl7MzC;_#)K69cXȀoҰķ<5ٲJQS9Vx#ώ:97{3"J%1 + !1oK/Ljt|9$MPDZVL8qd !è̒ӛ1OPyM\й=*u9:adZZEEג F! I8SlqW2&mE4l7ByLlj&v&)OatC5c~_g\JvSبʹkpxF@G5PW7qtd|5v*ĝ,jŖXʲ7~(w=Y=fȐRNey=58Q SP ݎXѵA{2rrx֎SoS tXp.X-WvX\›O] ׄw"-W鍿9#פ*Hf \5+% #E kA j_5K7FcA% ^o25L@pia1L<;w-%X>oއ>A̽@#P"$>ROMlRsks(Ue1*}Bc!rx~3y2 YK9#o\='b/|k1OO3aZzl9ٚ~_D`ŽÌf䭌=VEafsD21\&k?nۤv9Nho Tv|߸7mJa= fuɑԒ N^S+f ^Olɻǜ!x+a%Ռ` ^ _.p׀>[h3Ў䊼_勁r%c3P|%1T=Hl\҇۹—[٬He9b9rb'?1~23JlCc[g8AݬJDbO}|5z?!G|#ZTew}~'r2 yJjT4E3?,_׼pd+?g_~YJWTLWy+Gu|n7k`dWʭ\ik.6QO*"[.@Ը)_*kb  +>AhT-HM@fDE i(gbr}5xC`L!|3ɕ*Zb֮]:Ƙ#e-+)O0/o.9f{?x'ϡoYU-al8>zTy|%a=+ʀ%zD'\1+czq;mV<3JmCA+V|P(5S2#jL-xתz1=cG60& ,W t+"8`RGgj_ֽ͘/aLY+vzEVAew>=!?n7tG&y(k>=?}ec Jaj.\lRNM׸.>\#}+24||/@P|y=vg:={> stream x[ݏFr. 44 : ܙ]ʺ=&Hւ@;$Wխ_nRV/]nǛ_nVۯn+xSʋۻ0EJmJe3n~*_ȺWEsYTee/OhԞ/J)k'|y,PpmΑtWwBe>h]*S⸟]vmNͱ{X,4UQƄ$RZK_S%'AO:SpD!KJoo9hZ.n6p8횠"#NkF'.Thc %9>Tی[+.7OQ+s롰W+dVsnXSIJ >&NM#6+ P]AAREQQqޑK0[{>w!APHhQ'.6qȠ[[tWo_. ׮_x<%3&["iB ~A\H6WVќa3f2M`]|AF (iY47*&\,m! TVo= L3AmI7ePu D _ nǛ㞿;/ f(bsZ9Z$Oynɠϑi#,`7f#HD9^ugylJ$ccbsh$X>ìg2 ! ',4>M,txB`1"ɐ=)"e+i%CviM@>YIwdO蓵,{nD/ .&A PR8Êd'W?@V5("y6"c*WtYykЬ7 f ƆR2&s!כDtpYVy#ƍ#c}Fy)YAeXWlb2JYJ d'1/Q@ Q:$Ac4٨cU)j쌺?56\SZ>ZR2NQ#x% %pQ:SCbE Ap⑙3Q -  'oQ JRυe&ЄM3IP;7G!tgr'Sց%B%V1CM>2 0ŬeJLu_Ψ[rm h!5cؑ5fME 'e,[dQgEѧK]beg hí4f \BAEQx!Vu4;%U9ݩgQ2۠ 33233Ñ1lv}Y axԺ&cf2O s<*,5kFʺZ^KC[!sզu߰H~) B/MUDMr0OZ8*I8E ZeLVnPZK2Kf\Rb1K{ 8H ;b~xxh"N ǛIV*[gF" ssHD5&Zni\ȺܣzHS LCE8N VXBGt e@ЄjiQS7">ҪK*)ܸY$uEZb(HN͵4L;0rsa+Msu9^_YeьZ51ϼ]Z+YM$q$@ `oY|hy_%Z5V&X mM"JyM2ih!ھ9f]ߚi_fAY!ZiӞ#~b|<]RsąD -S`9"$N2 !)_^uZ>Nmc)[Cy1ݨ{1Pypvj{Q?o!>뫊"xcv PV7=׵!Tjpe2iVI#yb} >7ᆂ=]n&`kD[i4.&]Km?rϸT6XINx)3rxFSIJ|K{AH|EJq*CW=ȖXMDlF:9'3!>{?DRךiXK5}L!{/Fے͉F0i:Q*a?Ƃl(~H©5JY*~1^#l49Cn~!SC E >?t d1:fWF1!0Ex.k3.ShIWzt {|ng(ׇaEy>?jb~ <^1KdbKY_eb&vW9FV_q0ƈw‡*$@YW7)Ef+ Sth o>/f95b+1Z~eHBT[sUyH}ben?!äG]izhgi.joLS``sU0ђBciϗ%-PaOiz+*@p,H00l~S0RK'.z_7d =>d 0g*ˀ,]UB@dKE};2 z LӢ.Xۙxe@8]2aM`?o:O /\}K 2\M><ԍ 6)]ˏdz1i;Bq+f$h>Z<o'V!|L,t'A~JEBgZKUπ_Xc9&$;#ƜMYG3V5ԢRXMUXئ$8<s縭1؆A&`͹ ,C_r).EmFp{mlhKu?/.V(ζ%(C+}mltO2T _eH:ukHub'#7C ޼p-%tmpgq|CG5 Ց#C(GXN"+ v!`Do^ElKZIO.0ASkqK uo3/'K='3 + Pc&mH3of?pQiQ[Ǜ\ WP!RM{mc|#s8N_Rkb`<+t{jvkN()])E)\/JA/=ア Ͷv0,W P-uZ=,Ekm: e:D t=;鞉p0vP҅P2rI0ӇF"tAf/i W-A\IQ.p׮$`oJ jK3SO2_8(endstream endobj 481 0 obj << /Filter /FlateDecode /Length 278969 >> stream xKmMiV5Y+/-$e/ğAF1""|V .š1wu?_Ϳy"?/W_=_O?}{~>o|f~?w?/_W}^h\>?]zϵ>_m?O?/}~1?u6_z]>o_~?ǟ"Xx+\뗝|; 2>$sNî|LjAE֒ŏ?B{ úxyH/2_Aa8L\ ]+׏?!y@NtN1q0I֪@v\;Zgǵ.k'x=lߝxõ/^g- ZQo'V˛֒:Iy֒:l^A׊ڝJ6yJ/ҸoGH^_ `ZP}-+vDzHK.Dpö$VNJ9 m.Y7 G~Wǂ־?YW1{WoS վ>Uwr1pzE$u7F/0u ¤urk9Mb/4p!&Y$y'Zxt=N{\Fk#BLOktr`8Y ^g-XzpkQ| ۝t.@c- I=+".'wtg%jGw57gc/o\_DzH#[\ 5X_p~} ?&!zzIcX7netumY عи-ݲ֕l - L}W̧򺞶E^Ls-B:tgLK\ [_mr^]W둷;Vmsq'C_zT'J: Z,2f`%VDyHܞ;kz5niky]AH ZqTe ,\u /'Z+Z]E #9}kUfzwg=ogrr8?Fż\̶u8ɋWu}ÕǶ$'g^5']d) EEn{V\kQHZWoǐ{=+p7?q-^p≃v'-yN/lNCb%fPZVXdrȗ " ֽ/y;Ip-O hoܖe7`W߹+?7Vͅ^&F/TiČZW 2XȗEbi;"8?xkibF-A6AXֈ˃/ UEmYݫRV"͐mkyCBYbuȺ8_L=LU^ZB|u#Q۵L( ,2Q W/CK~뭡6us'[Bmj0?(Cܝ׫ۑrW^!/עu`q!.Tj}-h^Z{KZ!.V"[_KKת)yQZqmQ+UHKu=0.Vj"/Sv{Y8W+|qgm/ HoG,2,Bcۉ#/|/t0.nm&:@uJETceqjȅP vu#ke}H^g7c[@q1y\E\ƺJ^Z7_w |u+gha\+=.l#EtKSb=aLC FK~e͵~XB@kfE΂W6~-e_61؏XdFK""hsbeEZ*xxm7b\߹K!@k"Otp U—ZѨݱ6:6;h /A.qm%8mx^keH\ke"6^ke$U?l@bý_qu5еohs\b)e m 5YyI֚rfmVB:B;;N^1X 7B""| r$߆s/I9D!9D\=+Ax[x'=>@qp[x'$x X_Fm ! yC ?{E!l'y?! ! yCĥx> {E!WuzkAde9zyDD<#(FAXcMg'/J#c=d$쏵QY Er,5Gv[Wِ@&%ξr=D&u㭕Rg7~>k؃!+"(db˸b퓃H"cc;d\$yw4E\ ?og$Y畤<"zhaG-0Ab Ju*ֲ %њ$$\DxAb?HҎ}%$ڟ{aI}7Iߟؼp{O Hd II$Q$2$ZDfDOeHA"JL+HdcI%QV'_~FfIe&Q&$$X2 qeA"N,<2 'QD<6$@ǎ<KyETRAU<"mIt+B>GP)44ZOTDBx8Nt!8'a_<"JIT^(%B>ǾJFm&T )`DTTR@;N!%Q~Qn*d[FUH?҉(r[FڒW8҉(%Q鯐vE G:u Q,a5BND4꫅#:m!H'ޛD5BƑNDi9υ#6]8҉('QɼvQz/dD_H?b#S!#F@ iBёIM!'S8"%Q~lѰ*d_9~DlGdYWz? #`p#'؄C G Uy;+~lndA&q%h$odѕgq}723[腰˾vod[@~ldAAB%2l%@5{>Fxly&"ǦAI%l -!L%l 9{B!aO%ld DD@hDžJ{B!S%Fmq!FPHU \{B!]FPH_V5h{B!-[%лmۆds{B!]%mAFT VUFPH'8VFQHMk(ʬ͍#4-*э}t#c5%Z֍#4(걁GJ6ЎdFƱq¥Lli62|C9`pg#'8y$FG>B8شv62|sV`k# tmdg*FOpm#'8Vf G>sI4_WH;b# Glļ_ҎM#F G>#I4XH?b#! G>SI4yYH?,d E&8r͘8MBڑPpk!㈍MABue9ʄ2-e+CYW>BM/y.]~+;EA$vRߙ:5`6kdl|fF&={9ykˮܛCx9(KwfsپNF y:z # @t1ro< yǃ H,z͢|t~3B#+x 48%ܾFv4; Kix5؎x4tH6Hp6 aEc=Ct7rm#'PtZ`a5,j/犢a7K#4 b[Mz.6*5F$| "P2Ew4A ʴ2XT2«R\eиg(F(W%6%l͡{Z&o^Ɔkc~g&[l&\l4An \E| OX&;aPwqx>u)ôNz}9ޛVhꈎu/l2Y\& 9pYM=i]|@`[\q/KY%GCΪ,!+4Lg^0&4w@B1͚=Y);4I1lS%\0Oc>|GTKgRN L|:AQS)-)q"s9y[Kt3$*mͼ^WI3oN44֡gf@fIcj/X|+2LK4/Xi`N:'#vfL-J/eDv0ť/̫' <)Ui7p&ޥhҵJQ/$|a8]47%J-ǕLc^!]<.muEJ E} ihmiZ\םBM}O5nFOP*O9[V盖<*pByׇg_M<;n HK[m MFێ(xcʸm(Q>Q1>WMm6/EuH>Kn;| H R~6JKoċOMG)۟vxוߒ~=d8"`;X"EXKqŃi!`x0<빘T3J#0cTnXXN,8b<^8cxe0c-}8cG LkHƒW?cg\^so";>kȎXҘ9Obrt$Ԙ bj찉YYq -;~H0e']Rbv` fP

ekW5礮#h7j1u/9UGᆥbu8uטRlL2G6 ^s"/MM0uTcB*մݛ M[u6#77oy:&Ͻt@iwNm sq GO#CCjht_}GoG%IF h5@4ŒhXWx#߃;@chyB`!za}0j^9T~`BTˇD!46cTB#hNE%/ZzeQMaQ :&#ZaR C* 2)$ $29F(?'~*L o)`2(d+B4S h Q89~Mo -Nd8f>w6S$_X ޚ ?@||u")R$HrDƒ{/!DRL$Q$u3E2XU$z 0'Iھp$$$Zh! ]-AbL27H,I'B$6$툍ThI)HQ<#6rL26 &vn  $J2D""g(I rBZ(H2Cy$ qH{#0"o,e!?(G- ]IRZ2O͘_I$< BQ@?+#(ܓDgBz(AU<"jIt+'B>GPɱ8@ !DgBxN3s!8{'_<"ITR(UB>ǾEm&T)`DTQR@;2C* *jC#S!2VDBNDQ2DBƑND!/}#,hX8҉(>d!s2t"ʥITR-Df G:%$*2t"ITr.u%H'T/eJڑN^< ai~ĆZc"*G>~F!yTҎCJj6M%'dFTI?5*G>.W!UҏPGyD:sIԽd||BJڑOXCmG]`Aj!lVҏ}CJj ®q%'}.JZIdg:u̓^I; u+ơ&!Tҏ|BrJ%BB%'$(2JZT0|z訤y!cӐ¤P*{BjJi**g*{B JP}*{BB,q JƞPҩ*{\'*JP '!'K"Y%cO(BVm}\%cO(BmWy#6$d!}`!V"=+{FJ=f!TjV2А3D}qi%c5SD:B-d$"%=H)pɅ {F$R&"΅=4$N@!u[pyDE(H]HST2z) {J$Ҏq}!:.iH_HS(d+HуBRC!:b"HK". 9Q҉I1R8gIBOL!'b't G>>cψ$-*dD(1B1U8JѫB1U<KaBOY!'bx- G>rI4LWH;b# Glp_ ҎM# G>Ibf1I?b#G# 1fO\k&GBSAb248r͜0 SIڑP4kqF$:r6֧u(/B; i2pԠ7+N0Ht  $WYlNLX^4"xeV&4-;L6Bu>i+Y@vQ,YG\ ,iiz:~8@ϴ,.: \Oh MWjJm~9`V4v"-E ~6=dM!°M7gM⭼EjtbKp1˶>&ms@Q lJ8 OK>?|Q"G,q| fcCX >o>a'+wHx(5 }>!2BA*)EnM4FJ )8Pi7|Be> J}b&\zO{9 /Gye`­=ReuLRe࡭aaI(3İhF^0jVAl$m—>"%3l6:qyI`H\2<+Nm>:D5?#4*0GA#7B#c1B ^ZP7.>TDkDv y%4˜4-҂ZrQ$.:Z<rUگh#RO%)c z#  lAưD<4́P9ڢA ~ca Q/> mş/s d"e[lKlnζ’Hh[f; Dӟ!՚ߋk3 鿄޾W,bpN0Nxn9ԫ\8}Z ȿr7rox)_uܱ%Mf'ƏŐ; җC6X p`.; <}́Oqp9i tؙDh^'W4r Ve!zU?N1aы2WuSu1^,)r4ďQN'2"FBVplK^X<9Z&0>Z&TЂƇXQ7JS,):(m 5i[uxTG,yjI:N1*ži5edtqZ N4gAXSY4d4 c\u5%kMj OLs{.@Z£Z,k} o0h7.|s#ȏ=!\Kxd'I O8!XPNl&+ɮ!jt^l_'k>b#7[ʆb#.,*E6eQIxfFUc#=&TW 9"p!%)H>"d[4Dra!' 9 %b$$\xD x-Ab#HҎ %$ڗ=0$lIņ[zA"3H!'3$$i&2IFhyD>!)I Id_IfhD!Ob>^~FfIU&Q$$X2 pJFdj=IەkyHSBx(M'BqICQ!}᪐yEҒ W{|3c!mٳ DBzF!l{TҎCJjVM%'8 *A{i#:ئ< *AKl# 62`62~A>G>FڑO󸑱ڗ,zxst X+Av#7SJ"9ɔAQIƑOdUJҎ!$'r,H̋%G>sgIOZqKҏ|"g<]vF%Glt_LҎM#' #ȁ 1ÏIOeLҏ'.6#ȩ 1dfN)$H(r58b#bl~l9dfo~F 'GlqPN22  EU'юsNo'h{B<As~lu`]dQQIlH?:a/rQQI(ȟXiâ?C2o*a=ß< +/?&N??-+oc|N ?m6ӮoXza5ȟ!Z ?J82BRlSD}EdEuq!)N/"툊#2^:IS t N%ů$Ű  ,tx6B:<nJg4]&`ͲFUZ02V,?Һ@% #s31KRih4rHrRȃwgYCߜuU1]PX1@Vy wX '&.?ULi|UzwVЗPʤU`⋪Rf]ePMC@ٓz*gҎ/fPA "Fh72ˈ}^(4΄1^ sj8tаm(P]1Ϛnjoym$wKbWoE+Ͱ6m,ƪ_= kܾ3ޟM3\Შg蛲.o#Ơvߑn~OG4LaϾWV(7rkȞD/?u//7FƇCjVɌ=cЀ1im5ilƞF 6jؚ 5LF#5l백d3Ѱ4l\d^Ma"3a3l7fEfI5l; s&Cm3}@2Wh?h{CҬ*j]ڛФ-P!ҤmRzhVI[u6V5dGi;i_d4M7'Q bEDwE4M&_aD#a˰m!"ƇXp-?79hldC'1mWz#X)¬-,`'w 0GKߗ$#<$S0B/JLGr#"baB-)+,V#'X60[,0&t0Gj#XliCUyrIy24#{S2^yIc2F KOcA)r<vB1r@d( Xh`Vz##s$-,O">/SFM"] JW4Z.{Sewx,r (/C z djud R{ y.2񌇬JzgG&2`]qa\me -䂶Nz@R8O@v8bZ!ʐ/ K8b,W*H[_:͸Pr\@J9&ZyR1badsLC9 HrvĆDumO^JUEutYxHj:Yd2jֶVd<=HSLGJy@kg̛2PZrJj`K`Bh_} -k ǖ8'K\/CPl)tЖM@+|\]g dvFqϩRoK<"wȭ:'%axBɺtRg|)I- 7޲2P[cl&27;О~+`{v / ;׼BeSI; W8`K<1 3v6{I07a ;aaء T0j2nfNs~QY@9/hѻy9hʇ2/+S+'F% _^Ok G?`sb YaJF0XkZ 6nGW#pm3nIGGEt+#ZQ <7a[Q=#:R˨BJGR=K* 7>)(CuKnR\*6C8T2_Upu} 5H$e]$'A%ѽ ?b$(HZEcHt}x6Ig,HL!lTҎ|B-JƑOsTKcPyvŽX% u*GdCD]JO?'1=4ؼ,`d, Fl%7ЭdAp!lW2|BMBjf†w%8өmDJڑOA_<65 ~T26*G>!D!JTРࢂm@]G%3dt&PRIдT2M#1P?SIT2"UI4UH;" G>aI43VH?=+dD%ј[!'b\.F iGld^!㈍K)Bڱi,a!'b&1 Glc!'b2f- 3#$-df&$j!H(bqFL& :b y1$&~ Glp!󈍜@SIƱֵ\&#$:|mO(Z?~GTy5Oҏ#g֓#*r=H'GBcI#*r\?HL'GBAξsIƱw|Ͼs;Hҏ"}#*!Hx4$GB^Iw|#CϞPoEvdEh9;ŏ#Hxv$Ǯ9;#SI?B>2#L!d|d L% T2B>2|#*-%}d*GF! #S}dGRb#3ۗaՅ)>LT+\ FNmf%xV(݈GF<+j*6Oh%x7Oz%X 6+F|U)+F|u0wY6``mXE7FTu5ވؕ`U߈y#*o]DGTpG+]o#3s#.!•`vs o`Y 2syKvS@Y$2؃d z9AaOdU#wT.Ε ̞z; Sfa ʸrͺf)C??I[/c -k0~zlh: ̱8A^ShNKc9VFNOgtFhqZazliɓSl`\4k4w%ŖVLodaK+SҷF{ZKޟ{>$;7WF^/\Tf duqB9Rf_'Kt`'ȁecK&Уt7 57NJ}";r>1+e#rn:,G^g'X R F6rN%hmr('J%p`)ߋ.-ɥ\ leh5\g:T.ni6x-nN ddXg#l`3?y̯˗%UgP3nZ~h?ca^=*ȇS?~>|pl"Xgh) J ֳZbU 0k@,B.|,h? -G(q @_TWh=>ܨgmPB@9@ZHP@)@dA( D.@Ved/J " %*3UHfpDJ`Z#Bq>F?oAO#F8Y&A N:%msUp8%h!bA1)'r:&ݗ8&= zGCx8i?  >Bܶm]Eylz5?I"QoHTI}]Ş澲bE4Un%`*%`x _ Wm*O&`몃0}],0}]e,pcUdN2tmU.`J X5/w_|?_@7vP@Wx" WG#}cW뤀oj$`0]l){TsjoPw7vu hƮ~]c~aby]]l\]~DM>kcխMns iHj?]=澾^o / ("(m7:6v g* h{J'Dc_%H@]F}CQЈ$!F)`n;{(ZPRv$0=T> (*ow>DmgMRʖ 涳*TVmgVj hƶ,gKVDp (+o;{ .^LM_}CXm/L@ bc2ֱ,`li].PY@oK&Jm!jM@kcC=ڶL|*}gH5t$$.`]rT,з=tK=L;K^WxPx^@0QP&_@0^Rз=f _1{P~u_p(?kJ/LHqHGs5#}cJs_c%F]}cI9 ƾ|O%hmAsc)&}cscz*}c1J` ~c,}mcc.ǰ]o1@3} d`1_@# ƞ c:&h ~sR F3}sOrL 'IF),aN|m"X& h*[˘qku@,F0[7#վadn[n>m2Rɵs͡wԭqnF[d#׶m;yp.L:*?̷fޡ50B!F0GT #?TNdJZP"Vu .6ޗ6Ð@a~hdl)<2 안` S0m(mΌךbURlF.̛ܛIK%b䡏 R_^?e yhCk F0ӄ(rX$Fhc2dc+FZemv2X:#g18֡ȻнƛulxJ7fK \#b!<6#0*q/?Nmn-ֶŨ-\t_ߞ,AܪЦܑTc0+2u'^6bx.a%?K"mjjb /#/XP{ix_ ѧ7)Z{a;eu;!b+>vLLYl]LkŮ~,޶1Dױϲ:??aȦ"xvL7+eҭ-[k#v\4LWa֛ٝ xynL}YmqhCJ4 MtŎ{ȖԳٝil|XnW(-چ 25a է5M[ajb$ɐF+)KFł *]$hoު:̰I+WV{+v+a_ޝ=}paeTNB]ue%R{/#OPI|#.bA #8Iv!aE)^0W#C[#8I#b}g(#1Z!Cjҫ瀊{:y(z1Ж.2tƞrCRJ@M~Itltm䯄>chla꿊ZȲdgKPd]\ 9Ϡ EI&E([%[2+UOLSe+1MJ%*lƴW2"᲍C3lh!X{GXx 9YI8%g ŚNHl;koeޛ?KЙ&e dv6lzۥQ |vf&}Qe n2Bb@ZC;1ޛ?*P턎OH%%EKZDO);H)G7nPMw_bOڱ5T)$*8}m;f`HHS-qh% e^ %Ѵ>T4(Rq<Z>hɮe|wzf@𷙻^&͐7ÝA^XN=$BlZPlR VݗfNtkڏV^[q.4fQ|JBx۹Chx2v4K˩v\};K>RDZ7v4 yЎz&.Z%ҷ;>_R( 4Ji` X4S`WLcV[q0f@eT Fdkª*7ZyYziFStXE BcFj2*S8׳6y|BMX+d1?⨋ri#XX50c*?2gjͨJdāw|Wy8ʘ*&=k8HddnV-FGq$[px+0d_#MFá(j*jAMZ `)\ gleL^,xZ L-/cP8mHbw̬yA'k3uGoQE(V1Ӌ E SFifuV!b01yNTs##!Ĝ,2~-ei`XA4ghd|iaOub;PUO*FJShjb5zq1_F0FmӸcu3܌@c2F3J.vpY]u.aoK\ -}deJݎBJD=& {ċ +˚ͯN` J``k| @%p0rm6-гBJ`P  ł8Ѕ5 C%{(zSB-qD6)iBJ`Q;(cJQA*Gld$#J>$ p %~)J#2",$1H /HPH^'r-Ez=!)M$=Hq#־7????6~uZc=}X?uZ$r\Iʞ Ҏب{H?"Gh+a!3 ᾚD{o!}޴ I'\I´%2B(rBg%Q.VB~F䏅0,yh媅c7B;'Q~]sB'Q*_$<#CQ% tn)dA$:#sT8j2#[ ѯq,$:j!<&ѩ\<"NItR/$dA$*.D!cPg0l" *IXs)OYn ayLTH?҉(72-#VIT*D -#*mIT+dDTWH;"*#JdVҏg!H'zDBNDyQMp!H'DBQ.dDēh^H;҉(2t"IT/ B@BOD# c^J!㈌$Qߦv)dDTH?ȈV!#P~Dl#2ԯK^%U9 +iG>6d%c 2 YB @y)[I? 5w+GPI\ɕ#PC6+Ac+ǙN=$WҎ|BJq_#Ơy PPI? " v=j<*iG)H%4$8)JPo)2JP'!)ʟJƞP*i{\Z'!z*¨Jd E ' d Eh˒H~VB[%m6B2WBy^% *GlH.X=eb%sO(o,JQk(BYe#4L"h%mo6+BRDBu|͊'T/fE(H_p Hםm  qu4?(oEx(2Y^ IPH}(dn{ǹ}K0~l8ր"<, i[^-O${99ȿ^qNB6+sEa]D&-$R)m"Y [D.IRHfE2"hDұ& =zMCRrӭRmP>="A+88B|<,c^ S]%NpΓJ0@2ߜ@9H![m' _#NZrRͯr;:B,w5]J:82 wY,=Nzy—FA HNfPsrE]}؜@H͢[dP@M 319mo_lNYR'sힱ˖dٶlMۿDBy6'<3pb4A{F,Nh[ڥ]g?9G9Ü/ 8=p<…D,`qq, e٭ 6׏8G4 \ճ#a;` Vɻ%p0nwI[ hpȻZtKaq#2⑥[o(xH>N^[C1#/Q[~ <}$pwόcG(pѰ[6򡋏G̏Lrqn0}eӽ=Ӻ+kr3S~d]_d #Wi[5HvkSDbF=ݚ[饃d=Zb]zs/5J~vk`[NLvEv<݊5vkٽmOv sC݊Bv,!ܟS/q26R&uc?n=Ƚ|dpZ3 Y%»ӭ ݪcմ[4v?͖ 2ӽB}I|.iԗ>=⛢@խJ԰ d`#/.$Ka.,9ĒZ~y0.m],uàz7}$|~Ptۀnbΰ,,J~P.;VO,:yEnUjLORغ/Ȗb u]9pUvrVpADjz(hex5o\)|dƆu'ZT:OZ}ez'9u7~ ``#(=Pn[9e~ce/4AO/,/ĜBkfrU(c|x}"t({I]/7%.@IHubJlm/-2)`hFMBO_# -;%խǷ-lÇ A#{ی>r2`+ҷɾ2BGnN?4Knk; oZG'floCk<}폈ЫgOCQzOZCcv0`lsƠϋ1 [?B/)VDh!^]~Xy}b[;ުM3O3ayVW?фțayV'}~ء7Ϟ>+?z|?"9Rp=~^ z c %In-ޠُti/'UI{ࡑR{nk>1:=peеY H ~Ot{vcVω H,f7:/+tgF]t 7Gs}W(ћ=NYا) Fߢ4N:d,[M{6LI72Pn2:8մɉׁf|UdO '2q}H8>[A~Exr!iXPv{)f> OthQðX={j'0 f2U9NɅ<( -|\I\)t4/ȳ/\հɨ=2㕡|rB3<7MU毥r Ioj9奞3I!77œbB:)x˜hB y}8ɸLP n=5b9'oDѝ=k{^w~Y0 :$GTS󲢥[,HUi#cև̪tYy2ScFIKͬzI֙1I?x&uhפ -*ӬITɇ xۖr/ڥؿ,dr&$y9j-ƏϏE 91h|@ oMjm}c]mwKcuy=5K$.|͌$:94h<; YwOwwwwo???&?8*rJJjO+ϫO;ϻo4?F?X?j?|;UFSlsssNEBA"TJX*H[I%Q&]M (L03HIeH$$ DD!vA"TOp>"<$X@gUy-T5>BثUI_hdE%Q{Y%c (MUm#*k@mu U2VB.XI_h;d `c%}(Qk(߲dV2$GtV2X3Q aj%}|dnSC ɶFѬ[\#-}5B؃\I[#hedS]erݺ mJRDx%sj5/5B^IfZ+֡VBخ_I[ChdPBA%} )BPܶ)HQI_CPWTrۏߡߨ`,D@ LM M*&J!TTx"2-Ҧq*[_ \v#]w+OݙгLpF]>;N5?Oݔ @SFWg`dl1G7Qan51Ybtw k >vGFp!B٬i,#m ?]; @h&0:X!=+;ubyo/ѩDF-'ߞ7)!FF]zQx ?EF8Tލt807r`. ]B8L ˡĿ:Xujjnx1p,<=5t2Bw?ytf3r,:[Va|ho9i5-x\U#m9cұ:v Zm60<Q t>,t7r -`Y#X^hds1B2l©tG ,Dji#yV$H4#EcAL4#$tP/RFM:H+YäRO )#ٕ²[߂ifqeZ)[aFyߕukSEfɲOɻ\kkVW ֗7g0#Gݚ'JHZI 7Wn$?m9e* L?YmQwf"}ӟs$sma~^"ئxQfpPPbSJMFU6 :0͠Iipza~Ly*ŌΫӸ M ~ wݭ*A7WpUl숀aqWNA,X[yPdiR!6DX밠?xaǂId8`Vk ce59թhޢE3OkS[efwP*hiawTɭ٩e9h΃];=5!W b4˄wa#{ F064txB$004/: \IkЕQ[引@BYp%WpNDJ+ɒ@]>!eӕLI^A(.CwI+ %< %t$+b(JC$ MI+rRPR_`0f4E~%qP\*ca`"ؠD$T2Lh!*!SJ$ 4drH2dYE%7 4dyMF%7 ,'W2M$yM4F2Bh.RɈ!0B#J.n"2D3Hr3BJ&$o Tr3BØJf 7 i*b#AYc{c֋E' })W2iQ _ [/_HIKo&$庉%)_$H#2Dr>Z$>I?D܋"y{Z${\H!k IYDry".Sdns$eɵ\${'mnԽE$ܣH>&ҷQC3Er_%){H_ﷲ /QID2ɈD%"F$# HJ$%ID2jF"}5HS$Qm/IE2nf%dNR"zI` G!r~ۤ rVH=smR-H&E%$ItlMm8&9HmRq‰͍$ QHҷx"KAm[GUmfdq&Hp-BPYP E$}:xdnDD,IfFܒmfdN${XȱQH,mD$ PU3z˪*u6)DBoDM'QUQN v!r'Q%颢$-}!s8 : [<mRDB57ҷx"$N uc0xB QH)dlF$QJ!} (ߥn7D55BPdO2ր"ۊ΋la*dEvB%QT!}tU\lJBPdX!s (,H42ր"{ےnGWXKB67͍!L>BPdb!s (1:# kD핅\kDmIYئFBںyDi%c5VҷJ65y[s+kD]5nBQ\X#L.˕5&J:5H]Z+X&z+۬Pw! !EW2Y>B^I_C/]mVmj/m5@%s=ׅJ*kH JuH)DjJR*k}~Gc 'B;R%mRHRؖ7i] dn9PSI *۞!9Q!U2xBҥ$R7UҶI!T%c'*J6)dnbPEVI *[Bm'r5q 97XeY?}wsh;4!MYF6Zf`7}[':9WY`HY,=ou)KYM,``ˮ*벓 ;{zlrp-7`OZ[`o; :"{-[c|Ns>i oC8 '2)SJN 4FFN=}DRVFXNhe(IHyx ,FP ,/=dojqNnv`Jҭc{#xD< C5ջA/E|}yʼn|Ά&{z6r|uXN?}@j՜L:q n:|!ǩxrteK'G#_R'Y'<:'b'<5UOe o?r=N2w<3NpU ̃: ;Qv@f0>$81⭊` l 0:4O~LZ}0L%5ˡ3tG1BJʛ)1 #eQ=d-q32?㇞ ~l p T]vC&ɍm[1,L zI&&wa:2,ߏIG({URF*#5ӝ霎Lzw(의686CYmVH_pr6"?|]:@\dO3}f6Ǝ`"lo $~dG:0nVxhb"݊otq29V_#/wn&ٝb"YdGn4L{M$?2ic@ PCP?`Ef3h9(z4;k?"Pt?"F}^, D-,775r"_Py9]P#:"-ۘ묚g=֛݂X Y| YwrNj )5f@>M/T# Cm}^h@MsT'y#pl CLΦ 'QbF66wx7;S689{Gn.fl$i4f''f=ދlǺ~';BOzL̊~[5 9ս R!}"jUrELN:z |UQ8" ؁lh]ZN/(=P{~(ef[1\(ؘ~l^!'lsuC?-IwfH4ˆ;'p aS^/{9 W .ŠebygaP=N`r¦ŏ\`ce}:#tUTL6c6 hM'4ASG?䤡o'.:8c m8:ިB*a4:k:=xZ'0Saӭz1/ٹr<!L@t;i0AX#0Fq\r$t;N0l~v'6H;euڽ Ɵ hv⋼H +4;d: ɼD2 hgwi?Xx8_g5qrT㞏 L6='hЦg"|8(prѦqR )BЉ8wM 7qB HR4'4 M ivjmE,8i6NmhT8FUT9D 8d,% AB#Dʨ|rZȍ" Z x/pf*̉|.m-sB? -/[}|6= Tp  C3PӁ8ஂɽB u{N@mB\:Iu#tj +Qz_CO]B\+ @Cku@ W^s!υ^>pYP+Vu!/5,|-g+C.e]+|+ \*ĤgB\\ quB\] T 4B\m]  cR{!*Ҽ[Bos#$3h_ {oo'@Q@(H#HD%I&"$D$DVȒ(jKN$$}E&QDhXm7HDIA(;"$փDDDQ `=p /I6)$JIt3WM<]ۤȳd7ę4έIt $:#stM<3{ ٿM  ad"*9} M0%RX U AꥂeT,O%lQ%} 'udn[W0UI e*-CBdlᄲ{0XIۦ-PB2mZ(Y %Q aV2pByBdlr̅0]Iٕ-PZ+i[8|%s '/J67T6d7K8QI 9 am*T2L!,TҶxB%JOTMmP٪*Y%}U2]U*A寒c'T>mʐujYz˲ZmRZ˲mPyM a)*I²u%(mw%c;өDJOd_6 a{@%}'fP& aKC%}'Q'*i`Fc'Q[=*i[Jƶi6T׀"\*2јS\)=@5^BoTI[E-U2ր"ڟ aT%}jU\h*m]5JPDYU2ր": a[%m6uтW*iP_%sj,5JPѱBV׈"Z*+ֈ"Z3 af%cjMVJںyDi%c5uVҷ6J65n[[r+kD5BF\X#hG.-˕5J:5?]:+X&Z+۬Pw!l!EW2YB^I_Ch/mVmj/-5@%s=ׅ*kHJuH&DRJRk}~Gc 'B6R%mRHRؖ7 ] !dn9PSI *۞!1Q!U2xB$6UҶI!T%c'$*ZJ6)dnaP>VI *[IQ" YjWÚwT2]+aRIfG*[D!BnR" B'J6+dR" BJ6+!S" Y BhhD70!ٳix[L{Bk՗#8Cf4 w8Urߞ|u9^ tO2,zu]X.6ovB9,T[kޞ:i+z[|yƷ}ta{nT,}Tq5~{.u6p~V6v.=iA뜒DkƳXؿ[yI~ #bc|^>3󺩹n8%U+Ѭi!v}Հ伻LxX#?] MQVU9G^:0 'ǜzbw~9mhÐ|&JV~|n,Se%(=Oa8v ^]RonWhWi{o9j=B%8ѤcI;<xtiIG ?:&+RV|1,%:*6hNg3C<"~Y&j5JGVѥ 5ޝ:6:-X7Vn7Fto&d_ߤ~@f:r}0`ݒtaM2Z_;`1SNPx[1K ͻr4F%i+cG.f=*+d)JJLXeyQs0_=A.B >kT!A2V%͙dï4w%7$H՜FZ*j@ȵ苋T0cOIĎ:cNY8$Vf5ROXm|S^f @r9 ::7RM¯"yP0M4D*tQŧ˙ RLN:k aI}*Ai(ɍR.M,>6_V\TaT,Dʌ$kJnF֫,!ދ;[ahKrJZذI#tAviA,~~DR#"rTh>%*AO밨k c8l|P@6m{s FgZE6=^6`Lo xRu=UJoa X7?bb <—bӀXsz x!Tz!3B9 멧$A߅7#Ao|n4TVHG&#U_]q&Sh1Dm2ߪ 6%ǻh惶#O?Fd!'xۈ`UjG2r.=FĶE(#a U Y~4lŊSCk'Z']_v 31;j3<F؉jѫZjKρCuc7e#lj5;5ZH͹Y{ 7-^a#P?eeqsl$K\N.N>=Ж%UQzRKl62H=ۑn#|.f"p#b®ŶH_g/կ5ݲn {7;UF*yKaIcz,&EFź-&?Q#x (B$)Cp^xg,/-@agCiW:T"݅yk6g5FŷrҊ$y9Ԝg.:+Pbw40#ƼN*iiNCL+ Cݎqw,gH!KÔE:"#>=J|Y2*,c~/=F :ܑ2p.=V PuS.#ρj 4C2ȁO mˢ2! ~j26s NKhҕZ8ۮ%;;!twFHuŻKNp ? ¯HzB#czPhVi$}„JN4B3E-6I2`[NIAO -^\CRC^LOs @Ez>FCIW#.=:][k\zkPk0Dk( ŵBd79GiJ=Id#K$]BnJɛ& 8P%m!"?FuJCmTV6EJ#6Q qW#JEJs#݊b=Jj?9yJn2٤7P 3\褲c7UtgOԱ%?^yd `#A/5X49vD =, /RX5c ?c~m:&X[\@<9/XS FX`G8DAEXVx9JD_BNXxrcX{.`@.^[_{sۉ4\#9rXGFűu|1)r$bt1K$=6;ޗqӹ>OU)ޟpEe'!XTL'ڵ2 t1%\/uQ0ǥc ڔŌbs\Wqիǚ2`HAhs\1,09~ GK `鎑IYEck<_,'%'5˔W2.v·/4p%;I> [-aN>V\<h}I%(WZp 0\~,&Ҝw- zcH$7ڏǦ^n{&7]c O] VHpD 6 )cfH,z Z%Jjư MH N ח˳q((Ԑy(`]<#N eY*[8V^v /OXΗ x-<0ˮp :(Xw&lLEc֕H|Gy֛FIW)}])溲+O+!UJk%`櫀ʟ%`^x% 뾮t_e W^N&`J0}]̷0}]YL֫pcוaN$tmו.` 2/W_r0w/j XF(+ګUںvRX7v_BS@_Wxy ƮbQ֓ WU^{նU*dƮ2[mU+`,|}c^v$lY@_w? eW 5ucW6zRY9*ucW廒'`n*0ˮ} Ʈ(- X6vv0c8֐NumWEdЗ=; ! AR@_vC)`.;{t$`Kc٣m&;k h˕ƲGOЗ+Degl[*/;{4?0=Z˪ѫ\euƲGgY6֫zreg河k/lXlb]⫗1 hM%,_-Q4{I ek|-`,[{&`mmڣQ\|6ctuꨏ^wu#'`r}ۣ빀^wN'`ku}ۣA;{ huW'xc]O-eor"l//{{40^_@_p-ڏ_7(GC򸭗]ʈƺ0I_n쒃$bn쒝0ץ]Է0֍]*.9Nc%I@OmR0׍] )׍]R溱K0Kz%+`W_BԚֵ]ƺKz+`4z (+7Āug0U5v1ugH^}I-PY@_Wxi: kX'ehG ՗^} YPZXW1CT[@[wvIs  Bq[vGsR 'H:0.tj .evz%N@xc%#O0Czƺğ[ 3ug^wP_@_wv0%\?cceiP@[#:#0֭] e4$Cs]܏zvn2H@ƺ%#4 huGc#|?{HnAB6%d"f' X~rj9ĩ{fS:81}x'~?' !p88yS9r8|WI/=xٝ:3 tn_ٮ22I2@ƙfpޣZ"9i6ɨ6-Nd缽?"gN,qu ~K`5:* ?,'iN DŪˢ˦?VU?WYu.;ĒKw8d]jg"\WE>8N{e%v;GdgMﴒ/Ԉ?rQvͭ jA rzny8%GV]^} ~B{!H(3^8l}T .u%0Em2c%@ #%cyp`FMXJ;>&}uAG[ ;_A?_5^[R$F;*0=3],ުOGdVSGFj:xL|f9iÓG%.>(8|Q (րO1yH9!9Yȓ#>".>2eC%X չ#lIͳ24A#Rίq<+*4=#}Ӽ^zyƻH{گtc{/~=y䦂N<ɉ}pux4<$zl[RKyYEО G,W$͋3}^>^Lg'm[yw"Nk Nݙx?W;yKA/(k ⥄W68bÂD}/,AY f9+ xW'AߝK,Nn,Å $OsV@N䀾+(%Ujl,H5JPZWL'~愮/(9B*&' uNQ9ծ "% }fc5bYI˃m♼,NC/^-7&ƯQ[!(uTбԪkTc}e@0͂iXYU__XQX:*.`L?}uĩ5jp.n_qX fI=7}ua]}-,H/=҂mc"Em 57-L[3['@82LFz<̄8=?BĶ !r"2<|΃{CA/3as y#i WNR@G$ge?GhGƮ.?6{w7P}ytdBj6C65?լz!OLPTEr!L]u~: A;`RVB?BnCi iCE?BʶF?CG1ڳHD\C)H|jӏ֡g,!W@j!3_c8ͦp#Qв {./C0eMB"''X ߸Npq]Z nTtUї}u겅ܬo:#B&ˆnN0G)3[,IO ]֎$ss$iT91 Y˒гc(3tR[L-&$gtrV`!Q$v/nEb+lF\|z! [<[`JC)i;%2dY5RYjZ|:(bd+Ѭ&.~_eW[ &ԹY7K.ht|R8K/A!I^ȅdO Kѕ@A䨞-@m=8H /HP@$ljHH^rDڒ/sD$,r-A 9 RH}r=$IE~%)H"2C$2%KIYDrI$)˦F]~I-˸H.$e;iܨۊHn="=-Losn"]JR]oeiww $QDFB(P LmnD}p*2$PQHG!m:vRfF`NSH≨2x"FITZ*o[G [<$*ҷeB638$ xWȱQ,,mD P2z˪򙀵6)NX [H(2Ie$*2x"JIT.$*w23]̓DiQ/dnGUP&Et'$QC!}'"% iPEc'ضQ;;*i[JƶiӤvT׀"Z*1чS\h)-?5֡B^TI[E)U2ր"ڝ aKT%}ZU\h*m\5vJPD[YuU2ր": a[%m6[tW*iP_%sj,̈́5JP?V׈"(+ֈ"1 af%cLJںyDi%c5UVҷJ65][;p+kD5B5\X#>.ʕ5FJ:5-]+X&+۬Pcw!l!E4W2YfBذ^I_Ch|/mVɾmj/ 5a@%s=ׅ*kHbJuHD‰JRk}~Gc 'B'R$mRHRؖ7 [ !dn8PSI i~*۞aN *[#IQ_I4I2"Q{J͊taI2"\K͊4I2" hHIM0I a$LsH~6FҬ߇ {/??/w'6LwǿwP?BG ߖng????}dsq'Qyi=GS qOc7k<0gz6*-Jσ!ɿ:=|볂䳾H1$|%]C͞ LqGx`֟k)IyUY$ E wM؞7a㴇74cs;swRּ,/o_ Kt~O9 9o}R|VYIY,=]ɇqy}#v}{^ o7"9%H,$꼇ʳ䳾s/h$Y֒}/ м,EǓշ&^Ü*}0/ks9;j9"b0;lZ1 N%V,,ejgY=ʳDʳ_\i~FiBuW 3] &o߲msk;XH /糒g]Tgg5Q_l׿V]lѾY[&c[ ܐ~-TwkrY@8 P|'gc- RZKY} YilF{gk=~bcZ{?-;r\FOLT.,$=YAY-JRun-R$N˓{vσ3Ż;V9Hz~ 4Y~l'Ib$<1O}VKjyJ{>+&{zpyRf}V<=ˎN ߅:Lݬzws_w3ܖAnд[p~8^8~|ķu.FGVz"O|<|Ϲ9^^7w;e53. tN|6߾W\r[]p[}_ |OW6nu}91?ub4zoQtX| /n.hLC/ċW#>nkh.NoRƨ&=ݪާÖnGu~ 'F]#jQ& +l(ƥ0ShnГH IE'U{7A+yf$>ƻDS~wGӼ1rv4&O[OX,un \+c~]n10&1?`ukj)a5._asR̎Q$p͋U:ޫU&è+fD|ĨSs$uQAϙ~X5KWqe/L4׋Qƫ6 j;L|q!бsYȾ oe+?®6w,sůC21kۮ}4}e~Kuk]3KKZ_ra̱4?Yq⼲Fo3@Nw1SѝOhp{\y0˿ǗVt Y~jt_$_%&L ԗB{y`(WJxaøM+ ͦWŞk}ýL!1qOd>|}"'J5Āk%iuט]Ŗn׋bܘ]WB2^pcnRHByĈgMPqx^C>x$םmΫ>#Ƀ1W|a2Aۑ֙͌ .?$ݺυ1>5s p]'OfCݣ'.k@<7X]ωTɿzVy/.lTϒ 彛@ky=e)}(W(o`oi^ 6.s?pe0}ۭ3A9,Ɖ!0fpgc'6<hs*A1:K_ɲ+GW~:s.!,rPpo~ '&x?-^ WdzVJ,q*cҡ'~ˉo}]vtaѹyw9]gE39O~>.Ud{`ͻq_,°͡=11 Ű;[f?އǤY)U"VK~I>/>Ǽo<^B01~v>fYYdخ?Izs.[/|ynk|=D–caچnϺpGǰ}=e)|O2Gnvf~Ln<;cjQ oβj߰-*'ՓϹ0~{5ns|a7( ;ul+ܭcc2ߠU.u 99fW奄ݤ'/'f e^)G0sj>O8 cV6w11Ko=k5FM.^cM|qLp}؁ɑՔysfHӢ\xS'|Kɜ3"4C[m/Bwi*6g v5sp "hk^Ő놏^hݦW;_JV /kw ~b^jmoW/J ;S|@y< yHPQ*]t+sg~n$(.NR7~JL:~ܪdrz+c~Y-\x'ؒQ&Q9p=}_aE2IC9˚ٓD&,`54A?h:O|O~ve LrҺݡIԲlO;?mbŔ4=)Jz=fuϸ0Q*1Kcd3ybi,~q#^rPԒxqo/SfRYP"ƞ{ >dHٵ<9Y>=tQ^ rO/{!ۏxهS]}8|.4d|:~kٯn19thqњF|:w?ЧЎ.kpsR[MKd˄sH^u&6ܚfeSvwM[q1O.gڴ9|=GsRyz?>G4, JU_S6uox&4ZS}Kk#@axZbFQtM9$g}w7>|~osIKoc>w-c7s`+"}u3ru7昫W~U_T;Tϧ?kcXg鈛bpw ʘudWTW1^qa1/cK~okO̘ O;N6S^=Rqz6:uTzދ)GG]SmN;"c^DŽc8TtIc#tc;pMsQּnY9.5GS4 Ukޏ.V|t}<ʝ{u~7k6=ruçqGV:y珵Lkm(I\$m:rv{umLmYgJ^e!'֭i)sީA6#{/ VN2[}瀾(q|{>snygj߹еn:v:#WCeG9uK+s11>esC)ܯչO(o~Wpȫ%f窇Gs>ys^D1~^E#Agϱ.Kť#}ϲs3J>ws/K٠~.=E>;^eھhg{iSyGePoywܡҢN8 D1K|;SGG7奴stV 6υ=/ҺT~k1/޵,~ͭy/QզŎ1~>=to_y]SG,9rctu{OΧ3fՏ~ =Zu]"v\v̪O_}KU+"§}p-)cT;|Y㋽poq׍C[dMqR9 Zk@t1~>4zΡ?ƣ"3Y*S~>TՌ5Nײņ>(#bl>Ng;0~:~$zY)cZW! fjN0[qmVᾋfϩA`:rw~GpvuDԑOBD?s2o>[͎2_G~nHO1wuPwrfϧfzJ=ⳙ?1K{wQ#"(CXzYs3m֖W@߹=c|36dfjJ{xnw*qБΠg:❕=u"cVl֨X]QG2V_gB.x^=:gmϦd~p?YeluSW|Ux,֪Q;0j֫׌Cэ~p*\*r2_\s7jKyckИ=X,puWE{!zw\uuO {2 ]kgϣA]GV={Nܪ:[[P,d#G/\ON.jz<dѕ 2fO{OgϦNyVb;zrW߫+]ԯsW.~8'y?S ƤzяYh؏Ysr*Ҭy<լQǢsv槗P<>1qȷ)g {QZm)'ٛcVt٧Qٔ gϣCGn-&3w8ulzۭ{.=k|8v׹c8櫯|Їqeo,y,QZt.' c{Ů|cʜe?Jq'";1?،κ )BU. q&⑟uSBt܀UHEWy\폮ʍFWwP*^YtU46Bv4UHB' 2 fͫQ扦0ë5z*īSGīx^:TBʀiW}W*RS£ں]azcEp☧uz[9Q'M2 R4`W2zlWy*MlP΋2N/U.'bx)),o UrWVADigSh%WQJ @q'&UVن*Ze-L `Vqr< 'BZ%kW۔} *Lx 'nmHWq(g]hU*Sab/DW!]j*WqmUnqݕ*~WWO(*DW ԃjj]%(U9@Wqd;i ^E!*6W"\տxI; QttW^Rq w}"Z#^(ʫOR8:q1W~U'UE`l#*{Uϫ5O_:FeleVvfVlfes)ZYq*̊fţ 3+.Y,@҄Y02܂fVf0+bV񇘕Nbocf 700fV fũaVPg @{$!VhUd@+N[ 2\4)h'VLYZq ^C+cʡ5iA+]fzIBK>R^Z VQ#h<~t@+^`0rB!y< ܪFZVPV?lhOZh%\VA+Y(+N< kh⁴퀴t% 6VDGZ9 eie'VNGZAZ}qRgKii~nlIZyR/ja(b|rCZlj RLpL3¤j'VyAtVBD=MT9Q+Ǧ1*  > B) RgߣX+X+RTDZYfGVꔋQQ>q4RS+NږP+{ㅷ*JSbV)AZy1O?K+ƒV`k)!F@+Xx(00ʬ ejej62A2rmFIL\[Lθl}l2x[xlVz7[[aBal $Jί}7cll#05 lqWZ\Z)[mmFjk+8?  Ch+#Vp[qcx@&`+ock؋%-Ԋ#V0~j%j26LVe*/YñVxbPԌB]>顈>V^3X+akzX+Ȉm< akMq#զVztX+:VkE"ZDzX+YX+旱VHVcl=[+ˑ˭lkLgZa6@֊V8* !ڊ##V~ܻVEuVfp9 W \E8yJi5ggi%d; W?ŕe7 +5Q ,%"D:jq%9W W`WXoA\y^WO ĕ'" ӋB8%J+ίh3+N cfWva֞pW]9-we]Y&+x+* ҫ+9  aT6+^)Pvxԕ Y1"z u%v&PWfWxĹ]aBZh YЕKЕͷ02/Mylti AWP@WPAW]^]aItvJ+@W6+AWWlu+0c|e{y $|Y+"¯Y+Tb¯Di%!U,V/[܆ ⯜n6+rWs+-+g_2{0(TDJHW0Uï18~-CwW> } 6W܆.,6B2 5XKXn,FXbSWx|x|%egWDN'}e 0$_}W8Wb_9'aD H[kLWU⯜+ަ+m,_^eÈ` bW`*W⯘_Z_ͯ<+_|#|.𕳷ZFW}gn |E9Wja܌2ڃ0_0_ ұ+u !ݦl3\ /HOeTp V8]k ps(+W|;PI?#'} +0W<^9-XRى, '^9? ,GS#fy2 W>"8)$NH^},x ܕ ]e]>Z쮜1TFxT~J" C^RUr8+JxŹ8W0!mxwDĬ^W+e^Yz*+K|x%gsPxJD^q}j.+0WllX%WW:+sNț)weK WԎ{'Z"y%}yJ_蕘#WW^a c+Hl;|-W-gug0+u)v BE5 Q+0?kW7o|$\b|5r(e '~a'݅_M/_q]8 Wp`_ќve2c]Wl"ʳtuJHe3_a.~erD_aB~>~ CWfXOP&'EI02,+*lEi_I2퐿[%Y_ 4  b,tbXZ#6,mT?uKW%lFӁXb.` B,?R,N6 &9,ޥ2 u%ߗzl 3b= 6 c/ R,dTck`a(22Xh~!E (6X?K~ z, `1XE?E˒ū,§r64;?KM,I1X(T`a,6XCPwQ|xm,}R X,`veA( yc,.F`!8|rDt; K* X`yOe #l[[D`[9H?2\XF`y>%I`踏(H %,CAx\ ,Ⱥ#ξ`9X,号81EXc>a,\2,31k!K<&Xv.[,`9RLlX&X`9\I,0 ,DO,Ov`1 b' s-!X %dB,sڂw#) y,.`bXő,A`Q@Wؚ:MW\o_q_|]ŘW^>+lh%fX),X_q47rhu_9Pri^J S?KLJX L ,˓GD`} X't#X؎W _IB["W`W(̍\u=֞zh,z X.o 2mXn$hy(j,X~B% xY'E ,^l,; `φ`peO &XEkE<)`&X!/Ga `}ãiXaڂa| ҡOea-G(OL߭ԕ~ix?),hb(,N >0,^6R3_ b ]bX$ r\fX&"Eh&0,@0,!t̰800, d. uG, 7,֗)򩟝pԏ}gey^`X0X ,*+SQ, ,4 -GH#0A`q[|c%ojc,g^#5X ]J,Ç`YeI,B? e,A`ar+u5 ["32T+v& ,^4RF8k- XzF?\ w ԛ`#`Xn_IX"f`zL |%3 ,u_ hei <dz|,˹㳿V4 '@_ 4E W,O<"+0HSb.GJ6D'RirĻ_ɦW:/[D,E+gW΋-(Lt{8g^Jh(+ip+ި4 & Cl͠aCk:WEp6 X]! "+ץ\g. i+ xAWo#R#+65Btڱ[P9 J= u}]A,]9+aPg󅍮O ]Y/3+eҠ+u  X.C;+Р+AW@HAWǰՕ9%<=+ޅvQWZԕy@Wt%L+]AWx+Jw+T8cx* !2+谘+X6W\&asEi-!W#Cx돐+ა+/GD ?G W. P+!WX \ rhJx+ LE$> K'+!W C +bp5AByW!SmAfp2Q% ,' 2 +,~8 \A~WWX]i[Yxy+'7qg[ɵennE)V@VPV\7mPbMVze_rH[VNh+[9tn­81 O'0=co4+\;\\y\ ʲ peQWepGW⇀+hpp%0Wpp/+믰qOoq%E\ټg9J+Ī WEi+u!?tg??dor3 +% <IB2ȭW,C\a B%H\aш+ޜqŝ*?,\A+B + #PGGW؂qś,7+)Wĕ(WU.] ,5+xvnqKWq%S\^[ cn VTxRdBi; h+?l? > ٱWN\l+Ux+8o*v\p/+uq 1ZF+\qXW\ۈ=W\lu+,Dsgqn\ p%fP+ ,jVȻ\aWru$+㲵R~DJ!Wer򮂐+fd2r)|r1R+݅)#D\aq#A,qBU y+]D\M\9 T+7G\rҪ\Ӣ6f1W<[ tu:QWn,uevO+d\Yvg\y+ERs q%6W`dTWW;CvG}efP+ \q^ J8++nW(q}SB\al BZBx.re`\aOW)1W6áz*WvEݕ(vWL]bwZwe4]pW6hvW5]aw® q)bW_u9+U&rbx_+RWBӇ]vٕ֩կ2*+ܻͮOsqW@qWNTrWwr]u)^!ryuv+^ ^~z+m2BPo r‡^ζW_ℏ+@[ `rᯀt0_ -MXWEC'+W>n~%xp_}_ul~IW/ïʆ_A1b}B+՛2r_991+X%_yJ~E@%WW-/HI[j VRꝙ:l_0p+FͯJ+6yYqʹ늃_9,<_JZ¼CYJ̯g¯, 7J+`WR3a}QW~ ۚ6W|eE_1Qa]TätRP F +D炯_|0GY+T+/ 靕TT}rhJa}}iB5b%0W+~‰_l=D'_Q:¯ Ř_9r0+].3EiF=TPaKWP W ܏<+֣M2A^q \+!&rE^ь O,+u,)r%WLh> aefr+t+N[TDlW+Pxs+A^Yf+8lyF+A`^ 1  $q9N.C\[ {蕅UQ:4h^} ʬ%a]F|WyŕّWBW,0C+ðġD^%jyVJo}X^^1B !w\W`Wy !Wlt^q x x} ,J+~+ +\pW6FpW~55kݕZɳ+1`rWs+b]_dA+JNoF̮8ʳ@OlNZ\J/kɿ`+Z8`+ c+3mo?A[z,_am+VmV(Cr’@߿xU'ҁE[E[ bΗBlߎBmh+^0 #'E < ɠV~_j\Eah+Jl]V<㴶GI̒BFPVl>[5<2,q+K5*}0 .[y`bͭ\RPB0ڊWWmp+[w񊸕[? RʶV[ⅈh+Ȣw6#VV$BQʜK[q_:VTX[HomzB ڊW(@[mڊ_; IVq4RmV:ѪOgep+{_q+6Ngl_`n;g[q'܊fVTտN0BVmOi08h+'Lf;lOs+d)E[ٽ)Ph+J~:_m!oz?DhrkSp+Nz[lo:}i[oŻ[ϔRinZxp) [íMV[h;܊̭tGHD~$lFP:[9/6$VfAyPS0%B!Sk+xVڄogm2؊/Zq5# eN%k%VAf_.@~03,*OCmУ `+qԶfS6E*hc+wleLP U ؐ1DZZYYW."PrXZarEck212St}? yDJRJ8jeCD,GLBh}J=DiZ9Bh$;K+Q" 5H+H8) 'ҊEZx-VnaסJZyҊ R"܆%t t*_nVKV[h+ VX[gC[qflG)PnQGC\x+f x} 0&\6+7 d+V0W bx+=͕Zjs%;\\!dœ'抋W\I 7tř8AWP@W?Cʊ4XaVطh2rfZ!h༠e'Њk9+Xpr{k pVe 5˕rqVg%AǼ+*Ί8+3YhYY*[bb3?)V;ΊBLaVfkaVoZZE[+&b2֊cV\iBhjeM(5Ҋsb ieH+s0oliT*%.PBᨰ?,f %VIdˆm|a+˾WbbpC,\;[\>ӈ+S#0@\1aq1Y$x#B}+KqJ+T\YRC>7&W< yPGBI\c+bf0WA:`p\Ɖ1WbbT\ZJO5~r\qv1W|*wv\` m<$cs{sN\s-Vbx EP+vsuy6W؛s?+5Ƽ.J?o+`0Wlc*抃)1W f\ara\csw+K4Mȕ4ɕGw\77ɕޡqWeBv\a+H1W\È]bl?-sI- uy++EeTp?S1W+\z+Fcx˜+ WȕLE,+Q\ re~5̵\s_ms 抷֋roz U+ ̕(`s%0W@ ++6W5+D+c6ԁ\ن2Wn~sś\?u+L00(]BHЕP l.]YR+Ճx۾]y%\=rѢ6W`ܻ  B`\l8DosaO++͕k!^EYWxЗ!x+F\_Wv?WX\B\VbqūW'X\ɥ%qxq   ʈ+LN$54\9Wэiĕ%-H\aqe"qX+w5 W{-hA%ʮT?VytJWiq%)W(rmrE WPvf7D\9@E 0\9_4lb/q=ʵ[1Bi+I$\q%?8ۗ[JMXř\a-pGRˠ_ipe3peBpp%*aOV?c:"p+ Wq oLÞV\[._O 0([A[a#o%B[IP&V1݁M#87sRB8r J7;KWNDWX\Y ++H2 %,. vB ,rFd F+c+̅1W\y5WAp@ +rœc+O~ȕ&WA\9-\ q%j仈+hW\q&;i&r++hWV|R%F\ -cq%jFWqQ+ a;ĕg\ }OS"x} qU$dUfWkr(Yȕ}4 0 5+SJbrśT\a൚+׿ӻ+eހxetv? hte) rl`*3hdd+,;<Ӟo*U_iӑE]+AV1 W]1&C3!̮]鸆^"wȸ+aW 8 hu%[E]zD]qhtu(AW樢]QQ̕*C1W0YhueŮGnc{NלjvkϞ۲O,d=+/Ȗ{J(< d]flfm*|hͶ5Yٖ{Vۋie|ͶawUol=G0b(mkl,eFOsYnfm爪 O?VcB}C-s%j[9ּ.ksDnK}=v[ KiidrϪo+-giNq-,~ufwڜC*.&5E-tJܖ{m]>h-oZmpj-j=g 6Z*

3Ce@[GcJl鑋 שбɖ{}X&[dtR?:L,+d l&[q-}zediB}!R׫] в鳆6[5 2cm~KdcniB[nXjKi-KCR#-5K-N켌/KmV[0F7ؖ 5RC<Ͷُ5-՟~zj{]Zb[f7˦ٖ`AI-#V*\V[r5S5'y`(zŶ;T򞗲v[a$j: s05([oh^ [~Cr= B&ZnY6[7gܲ $T?&{z&\iݖhH^qW}Գc-u`i9 OSЅ.V{?>MPHG-ˠ_dKhleĉl-sI@M-K/垏!&[I"ɖR^AWl-u?-tQn=ƂH 8,C[-W nhWpKEt56RW-՝i{/ TwC-%,Q)-=ŶUn-ltLlK%m{nU[/,\^7R[,]іm}Ve雟F[xuЖKMiAmYB[H$B[ЖjtV[iF7~G7Rmn֥PyKie m]-esh)o[$$cehR[u: eydKmB_Gj|gեl)ZjK]6Zm.ʢՖeo%3Z-KՖ7ymF[5zw4On"[ e-d2%R\;/>jɖzu:v_bK}l,̦:Ml駾竦{l.ec.[j۟-R-ױnnXTM-_ i|._î 9+5I^q 3R"]xZj֟,e>ŵ,g%̋D}V{-Ջngy#̉y-h@GZ?I+g}kkK|P\KvZjuYٺwk-:>ċkY"wZ~}32R'eFo'N.|TH߶Ȗ5@ْǟȖ5WoTL$ 0lVE-5rfK}.c͖ 6m_G-uu:TT3#xr[hKJG/.D͖%B)eQl3,eB}i:Mq-Ʒ 2[i,UuYW- IlYO[@L-ʗܗ?Ж|Z3[:γKhK5F[*<~믆7 mt ߸Ֆ>vE|\ڲV[\juR[ԖڲD,ak-՗I3i垹ʶjf[;?:m+d[ Kj7ng^FbBb[j1^ٖg5Rnf[Rcy/'F ʶ,-5#V宕b[*EM-KVn˭eܖԋd1tN3ؖM~=,4RQG-KlCr[nXL-~[Vr=ܲZHwnQrKq$ ;nY&[ۼjs6Kn5`Të׉( P1 -smSE- ;CbPZn#eh1[na҄i-RtKbYNE?"Mdk%# -7l?tKTEdStK.Hƹ-9Vb/:):30ru!:m$l3CձGn,,ni9[n~g-[ꮲRv]9:vvK]o/vK[2̱imdvK]$^t{gZKfI-DXoI.a}_zs[14޲^lԕ+z &[glep]J{iZӑ#͞5[oIzno{c,O5SdjgQj4ޒE -Kޢ-痁xK[cnO}V-o4xK4[mޒzKaqc}ZoY:-˥,%# -TAxKźŜ4޲<2d}zK&[i]xC|Y`j%[c7-ޒ'G?~Rrxr{oak%cT-ub?xl J-$oY4޲[wAjuY[~QxK[[ T +8$`n[z`jyѥoxKҚd}xKN-Y_5rϴ2Q;$xK[xK=?}mw6R.񖌒DIc-utFoż/KZ۰ e|F$@xKtY˭5-"o;@D-u-Kh%+[.NxK&/[kPxK<oM돾_1[j:#:$@n향KROŃ]&%+[2k'6Sݧnֿ`3MOVnIt@ݲz?Ed#e$$ fFuennYF[溔XAJ$-m^GwGNK-IT1Rn[j_tK?a.]=}:2Gne'{e-\MtTTsi]薄([V9oi5ߥ(!)EtKghO2=nW+{7mLQ >%ފ]tK܏nYw[3mSnK/C7oZؔUTl25ےU-Y0Rv ԽԖ垗ڒXՖ]Ֆ:"Ԗ$[mC#CF˩ri-R+2;Zm›ՖEHmIhjKGӥ\rqN5D`Z:G˫sH$lY:(-=ϓrjuʩ2/gg+k1ײ̏ĵd\KItD%YaRB}%Y,%Te%Y[IlYF%[ObK=m*dbK=1k}Sߎz uiXL-uHZ]"kYZlf\K ͵1lȒk^c}X6͵d\z5גT&s-Ih4Rgk^_9^CdTv,/kIʪ[?x-ג?{-Q#b(b ײt>c]RUb>U Zjjt!y-KhG^KCkI\ OZ)׿Z^L'$#rm\Ku˟p%ԃka\KmkY&sZkNɻWCn.5a-%VUٔSHoaZf3:$Xj]YZ=R=gZJV˲`[-YՒ%'TZjD"/jzI-,FTK;jYͫෛa7-eyDKjIF*dy_*]iZ4%,AUI-5o`S8PK1B}R g-;-*%)e jY&Zz¥#oYTjQw 9~JԲ$xL$e i"eͭ,")-5`?Dl,!-)0rSj~d]}ҒUx#-IZiITH2Ҳ^"CuWYi1[̡}#-PHKާ#Jmm iOQۍ$HPRIU6zPҢT -U*FB|n -Y@R[̈$JgCjZzBKƓ @Mѳha-JX@KOdj[Z**kfonaʱ;,K>K2|K+5<ڑϒ\:,K>rgI}eB>[2dlegYZjlyZxQE<ҹ8>-TOY%BK} ؝z:%%2 -Ցɡie'hI$hYVh!AD2Ѳ+ dedYY̦# ^Y =?J++Nl(+ ̭T^G ojeuW@颬XYneꡬ^1jS++7,eDCì<y},3+ 8+Z!l Zi{"Pki%wRi;AZV,zBZi%*OVUAZq}_V!⥵EZPǾ^c+;Gc+IO[!JэTAphxZ kšX+b;V\kj% lP+Z|m[U\6li%BrV/heN"g7l@+^ZE(Ί8+ķgpYJ{ 8+ᬐlf%ufVR=2 *)+ZYYn)+,FỴXY=n++˼ì@$YUC )h<`yXb1綕tm++)۳fe=KK3+!fVg'YI"5ٱ2+j9+$%%+%Zw=-,xyu0N$ uIVf[jЦV-Ԋ7S+RyJ ZylZarܾ&d#ke93Vw /Zy\$FJjV^?V_rZmV$ZmkV `+FD rVlC[jIc+:H,`j%"Vu+rjńy| j%w< gVkAYZ7ݮB]9V*CbaBZN--dSTaB}AZcj%eVXZ=.E  YS+ZZ6j%*V荡Vv~+jI 萵iO6M UX+c,؊{`+E[[[JNJub^@\=)ySqArh%p_W"\CT;WX\IpIWrW \ݽ_bt+f\9t6,ʑR ~|[IE<ފVV2q[9 NV\o;<#[ hop (  C0ĕmc+\ pŹ#W~WLaWrxxqmlq%'I,6JJ^*'"q+q)X\9]*B,r%% r!WH\#:&WLPcr%+) Pז\r0^mrzMȕ>1Wjt_oW \#+gXLZvW<дR%yD(kWV*̭ܔkk+k+nι@[IlmeҔ2>3VjW[almeV )h+.EJ<#k+VBF[cTr1BlVkC[J}ćV>v*VL­<'׳Ss+íŌ[AyUtrf=y+BX+ape0\q\!CpeA xpWhR+VV V,БVj 3ss{[ɷWĭl-B [OjIx+6s+7 np+LD~$ Yx+[YHy+[a]( ~Yq+8S/3mm% &8vhmeFta[[D[m'ܣKhl%&`+@kV*[9._VaHJc+5CБ`x*+jR5RPvm @e\!oo%{+5۴o~[IJ{+Ad0J y+WV! V[IJVHZn WFh!VJeg\OݞWBm8ip%$\!QjW|2|\y@pEW\aJ 12,"l"p-x+!Kފ<{+"oL[D[I7W% iVN,) q1k+*m%? V<[]~כ9ͭ­BytaC }E֊rakemøرVȃ[ Frǻ,ꨱ[yF[ێ| k3֊Vbxi[+sq诚kZ.keW"VVXZ  TP(dV\ikP+jE{`A$j솮? gH+K$,gX ;ɅZDjdyhjŻ,@8*RE"RDP+l04Vp_,C}6ubB1Ҋ%EZ1M䈤2ni6l RVR@BJ4V0nYq̬/_ʯ0+8"Bm3+fV?B=J3dfe{QYY #rYYy7(ʊgu(+ZVVR.felPcee5Uxe%R++ Oۏ3+^ɋLUYW X O&OgAVrGY1 @+23pYw,JK#+ػAVO?2 ꃔX-RV*bUVƏPV€ O AVd_dܡYO ed%*'p6VrEV"xdie=bi HH+D|iŽ@O-$J#lE)?`+sV#l[ NX+Z~ak%V"ZIʆ"khVظ2: %V#V- /؊VX58 ޑ|q \EY V |V#VܴJBuVسoex+j.ފVBψ[[FCs+lFUJq+I6BU<0s+xV0yXrc+$*B,wm@jmV2k+uHNim%h+A[q] 94M2/KPVBnn([V 4 aVO)s+coNx+V쭰MoEW6+A!q&EV: S&+Tԍe(eIG7ѕѕAW]:芣1VW2~ՕEnͮPrl~e] fWӷٕsdvY * d&GJwfWp®8vgmk Bݕټ{vW20sxSwp|+zaW-]Qpj=xwᕰWɵ„yU-++ x<BA+<2aK˸Ls~xxx ] v0R T>WA?L장^Œ ze*oz%pW'^a:+bD(ڼ+#>J,+WM2n=Wn.FJb+^a{V%+/\$+G2 sAqWο pW Sr+md.UŠ8 ۆ^# i(Wc+JT+ӆW~{uEЕcjяlt%[X]9wMମ,XP+NE]Kb.u l_y+TAWv+\+dBue9)RWsUN`Y;wƕ2۰+$a Jޭd_+)TRW+C~' u۾]W'iE\% G\Y + ,'q%{$J\I"ʠWE\?H ~~Wp{\I9 Z\Ypȕˢɕ.5kM+x擉{ĕzoG\ lepĕ\[\{W /Jg^,J}D\ͿM`БuMZ\bTp{+qI⭈[aU ov!dgy+.4{+VSVREaoeUZ%{++8%}j,y+, ʈ'Vnͭ0[!⹀+8W npmMW6_W\~qŵtWi[\Y+)@\OCrݔrE ԫObqJQb]p+WVV ,_+b\CJ:w'cx,̕ra@yWЕI<]t/1y),}sЕY׿a+dJ67lnI.te=]PO:̔?*迧# ڐЕxgӎb7 S0A]!B|QW4B, t%.FWғZ]Y<+1`||e+ .PWRG z9ue~K!VW(%RG6Wˡ+uj+ܬ$CJAD+3L! QVWf] 3atqFW\]js%& 抷\!ԋBB̕+ +!WF\aa;+"qE\3 WtqeqZqśP!ǿcLW(/.W=Qaq(Jo++ԯN ?05B\ar穆\ bdrȕlE[Wfws"qĕEA\gP&Wm 2ɕ@&W*#/r &W mDD brxMt\Q|0ʆ`qq=W6BŻb/dopeA>J12WiH\q dmpEFVQ0C[qA x`+ m ӽ؊z ,?؊@JVBZLnK+aG|RS6eӴ'K+#`K+3?z7^1gi\d%x cie"YZd i8V` VV#IZ ؈rl꼑VJZQ+U_m/ԊC).YbU hŻZave@+H:@+T\ZA)1B rgs 0+YZ4 qVvrvV3Yv֭0+yYMeV6*XYFחB+TdXNV'ZXY+GQEniBs;C#? vAVBJVV0Pgg%ޖb\8+s'hVŹTSy&V žP+:V`L@HZ$XZrEZaKJH+&H+<˅%GJVCZa~rZsFJrVXXZTiTD*~rˠVDGLVM7RQ.D93: J4[+=V42~P+!VvBx(cj%3$lPS+˧Y+Vf"fle[; مVR ]:7J[qY7B.¼[1k=J_%}eg$[+;`d+p>V(Z6 Mcl*MVJl<<,*l Ɲ[ {6$a+clŵTV坭dZI噭ZIrRZa 'c+l@R`a+3E O`|bj~̘ZqJojűzBV -  `VZb$e1BV蘌,ʌ؊rl&N}mK[o{ R=>,27#VѡJ Wc+ ble3cl%Ԥ[I YVEVR:hke9VRrCl2 OVf[+.WHJeX[nAk+X[!* o[[ÒNڊ!.-X[qmeqZ[a; *[anšp+/_+K׺DF!@hweSmT3v*[1ըV[qT 9p+=no}dSoy{+͸ଡ଼ FR=Cx+np͈ҏ|Cz+3ȭ8[qnVVNo[鯣BbFJJ,ʝ"gbmoVE[a2x+3,9Sɠ1o4]oNފ-&8Cu ܊[s+gAnMͭP&GwS[YXtҺWx.+ȯ+ t R0[aEIkfzV@o]㭌學qr>*pł\yc/\Ip- P 2f=X=+J+ -WΑԫBJ+ r;ɕ4\ykc@|N+fI4yҳ̕{s ̕Y&S W(nr%&W"!WG[ȕG%s!W& \s1+͕"8aNb Rs%MX ңvT\GqZy/p8rppeW (r \Y|6'\b+m 4`pe[n`x+3%A oԆVV[y%_g;mW̉ipE p;V⭌Ro ފ匋b[YLVQIoo(JX+%peʑS++b\a#pŕ[\9]jSW5 CpER+.2J\Z{ĕT\9n8 c MpBX\1v*rh+M\q^r/fI, VJ72K J8jmr re^?M JJ Wz(䊫+/ lb+\!} \Y裘+jP2]wm}2 brWK[Sᭌ˩›x+m魸ІҎ\{+EWz*R? j6/er(aJ[ĕ#>gj>\ics%-JңrJ<[B)B4%+&J4(2ԁ+A\iB q3Wz䠸ž-܇J\[]4DvdW {?ؕ@ؕy)Ձ+Vu<'Օ4ueqy슻7B®$%\+Fw ϸ+\]Y;ZٕdK6OaWD]K!BL ۤLmCkH%`ȫkݚ+k4hͶ+W`"J8]1(ҩAW:/Jہv!Wd\@\= WFSr*cM!,ŕ2D\Yĕ;+ ` ̬-G^K\WLq[ʗ Wrŭ(%WܒrCr#Rһ魰ފ{px+ 議+WwWkqNP\y{2EoqEDqzyĕ.FF\qiWH V\!қI+Pk+U.%"ҙ+pe%XW(opeO XV+7HYjoz}"2 VVzhmoZV[qe= ʻF"m19B0 [EmT͈20BÏ$' ub+>;V~;=hKbH(Y`+hw[l9؊Eb+^GVJTkŢ?sbt.JX+!JoYQ QZke\K2r\V [qjGVV,R[Y:`+4EAl%'b"Z)KX .;b}>JEb+7[*[kl*Vܹ:JWl%,J,[[yRc+f [t7S!0Le 6Vl4rhT,ޢ[ql Vf$-tN֊[b+=[];JK>Bd8BX+Ҿ ؊c VHm|D[qV4dh+ly+m.4/i[68!M[4rv̬ZXleHlIV\+meh+ ^meVYV>\#h+j+r+}[>Zq*2WcffW rZi`jJCS+'_7J]Z鍜V>r VKfhX+atO"#'RJM$Q VwV QjeE" bZ)kedu/cG2yߊ Kͭ[z,JV⭌%v[n.?8nƊ qsD@\yQJ+sRĕ.U\qnĕE\x{\O|ޙҤ2Ox4W$Wz#,ĕqeQnW؏W2\/+W4W2vipb I+=WS V23"AnGx+;M2ב@q+cvn[<1_oe̩V<neVLneԎon-Vz0$k[q+m.\!Ivoq+]M Қ6WnE tOt|>*SMuOWWqqt&UBW]e!u eG*8F*#ն30[erky ђ-K *h, BYh*'| Z %*iAEL5U5U0Uzצ fMhT R1;HwmR`҄Tސ*c RLv+GThҧQe;DgQ߾ɒ{Kս~^Wg,p,G}1rZ?L>90/!012*%&1ҥErt %3(GfLG*Jd2cc[DcKU]DycʔYdNEd9F~Ys,Ȳ53<2kEY~_e΅Ϛ(1Rk6~$u*(1rr"e9fP\I6ẻ12e/ S,3e8 H,ͪt,}ȔRe:pe9F:^\ߎHU\ GO0Z=(O,H1<|ɷTPcƈ(lp,=?+rW֑!NW˲rY^]Wg4e\]yEך1r2+0._eK\:UyY5jxT|`eh-et,t^!WeaY~?IvXyuUv^Yf2k2JfwhY~?"娆?Ƀ/8,ן\_? fyxY;)617!T4F,u^9R g?tfÕṖŹ,Vev= f)D,HOBS1.|I@YfC`c$k+2}Y_},G2|M},V%g c f9a,?ϯ.eʻ#3J0{œ̒Y}d2vUeUf dy2X(%,,ZQR,bŲ[](Z~tYQ,ow1eW3l,#".2 +e>WrOl N,~,s\,-,C.=iT,˼?Z>²,s,?R22Su3jCh ͲLC}LWbf=f]ZYᝦP Ͳ~v,"4|b9s?/2˜Dl^Od.5d2JyO92loRji+0rD,78l)QsYfwpGgy񙂳{:Q?'"Le fs]^YE|>9yggCk=HJ-(cI7в<}Ԁ!d{~Z0Z%e6zPZ5![v5@2вpUiJ$gCYf/3u\}βף>2 \:2Fβ ,Yf=y*,ˍY8|8K=_(eQYT坸#6q̳t97P,VWe^~Q$ gY"Y&=RpY湭zd 2aDJfY_YfGrayW?0#2A̲#,Qd%e>BC,˅GqY*,e O\%e']-F`c0,\ 3o`2?rV_̧߾,s4d,ۍ42aee,,420˜_}Ve 68J4_z,ǐ 2Wu20h t@-0˲n,*R:p2'PV(K<_l,?.1 ̙R\ZigYfbiq YɍYEeBK#[ f fDJP2ˌʽ"GfV#{dDb9i,K&48{lc2 e_le5le"6fKhWͲ8^ ³64˼ҫ 2Xi%fY暡Yq|VeyfK+rh Tp١kdeesGfY"̎8dցs!0˼R_M,ƱZ>9"S%h&9^fLsed+",^*[Q:QHhcfgtsR}h^ f2D,=fs0ETxY\r@f·?!/AIdW{-(eYp6nF,}B̞fIYh+ٲ\YmYG(ewf4F,6-S6ҋ, Q8KYzC5,g|V,+?9>K/s SYhqžB⯼.Zz= eQeT-Y@Z7'aQihb2;O63i6ɅSwy;*ev\9"]ʭ8 i0 HK@Zc`tdAZznRNWHK |W# -=i#G [f iWA܏RZ;G(`Z2WbZzҳu ԁ>k(e;k23@#Jˌn+2)[$>f+0-=Hi1e*c̋Ki<>9TjF^s;ےe6|JK,r/J˼^, ô)Hؑɴ̫2dG1-(Y0-s.S/qZ37r9-5;?i&NӲ\MŴ_)-@Zz ҲUaJhY1Z{8FK}R"^\E7DK-}?7)e>ʔ2Cgwb `95hY2Zf䯮-bT"-pҲ `'up%+WR-y;!뺌ctFU?"FrV'e1̓Vg-}#t,BK!h喿:!ғ>ly*<&kf>3̇;GjDi0ZjFwe6qPBZ:e+Od¤lI",$2{05;aw̖dI(+Z 6?TfdF -s3vɞlz2!Z:$9/EtTe~s-j q$:DKøT: XDK/Zh5oyrce&)eyhd`"Z:!Zfг-~%!)ebTN*r`FU2y͝-Fz3ޕQXMiY;2_UH |J1Bmqz~]N:J|(V?ɧ'Ye~T'+ - -QJ9 D~ս"-)c2:AZ: ҲIwem$!W@2YuiAhO%;)eFeӢ[JZȕ_#i Q]J3Ӳ.,8-BRY̠t=,8-5]ޅiYV9-h*ns9-2+cDd1,O TNKkI'`Z .WaZvqZp/GUrZG!4A@-v[ 2[ L>RNKO´lbZRmQL2Z 80-rTK {Y1zCEO FW]29Weһ.WJz?]Iw,e=WNEiO=tJ2};WɽiDgp3BiYFQZ:e)Di:eu< ,gCxHOr)Jafne\]J˼bH۲M^ɣXeoJvIty?\9VߓRܒFI_^saWee:-KeDjrudZ%H-?Ra}ÊH-5 Ò'V.8VdmE*Sbћފe,Vf: ʷbފ[z+[<7Ny}{+ [1AYoŴCO&$Qosq*V(ĕ[!8 9߁b ފ(P{+$BY,­deVǃ8܊r+֟ʭi@a }J&p+H ܊er+;r+;1˭ɭʭ~*rqʭr4r+nr+wc*TDd_5nqVXs NJBA{+uVHil@rc+oiyK+'J+H+M +XʪbNԊIX+g`+j+~E[9Gm!FQB8pV`VX[lFG[y\`+#%'Y<& VģV'dh+AJkNh+&쫭 N&q> /G­ V,[Ym;AmŚ[ bLioeywZlGzVtVP[9/XrY୰ފz+D,[9)[qРBVGoEoBt+/lJWW-gOp壟Rv8 V%#QpUpU+pW"\i%V lo%(4X"&ZL+IP(bQ2ȕ~ɕ;bɕ{ WHmr$WX 8Ers$RlfΚ+wJ] lڂ++*!(K @[Q~@[ ڊ͠B؊Rb+,, /rlF/ClJmeH[Cne{+0[0nV譌 oL˜O* VKVlHr5rh­hoegrmoEJoVcܖx+M`258r2$W-TJBŗ q q- \!~PrR2u J3+7+('B%K+g!ȽIqř~G)pe=E,x+-[ao [o [A/[aq/p+K!F[[inaVȭP\r+md8[쿹 V%ꭜ7 ފ2ʔVX[9GXp';:ފSg6d[. ,JV[ ܋ͭp5x+7\ ܊\s+ J` [q@ nEHnܣVZ[ʭP窶roD[V[٩V[QnmEmYbns[9ɭh8\ Lp\!0"ʽqWdW\!ΌB˗%܊p8r+V˭x r+oMq/bĕ}+bG8@rEwrL+ }@Llȕ=ɕ5VrESNr&W|\!(h're \hdW>dWLdW~ꝿ aW#O+ٕk <uŘ dAQW.++.Ȯa"&"h "%-aH ٕKvfWx8֔]٘'ʮ\H+}ˮ+}QWrT]!%dVW 󪮐߼+ TWV]i]uEQՕ\v,fW?^9au#ɹ+sy} +b^y!Jn +sgaWeW ʮR]A2]zQW̅TW=7eՕCuEQ u<Օv'PW\ Еÿ]q´+1j@W\]iu!Za+vSU]NWUWouQW.vve;9++b7++}v DK]Kîɮ]iRw/[+7q1ݕxyub nC.WZ[^k{$W\I^ymS+5_|9+̫WHOm镃ЄJX+TIX/bMg+Cb5>9MlWWWXHozJ+IizH={^C{kW6\mlZ 񭼢߱%Wly,+䴼x=^Sb5Hɨ7^P0#nwrEjwUܕ>+JAӾfW|_͕u+,bޭ 6W4WĠ\XsTbx*1W`L@\~[M[no%J#+V/)$aWvrŢ&W0J#@+1+ B0Fr ^rOvhrŇ kMorAq++w" {!X2R\$ɕ\.muAXK,k$WZM\rŷ\Vrգ6WS%抔Ń\t%q6W44W6*j5W @5WFU_s7\A_2EW>x%HB? W6B~B+_ E+$WB9K7 M<7'r=!&WG$Wx$K*+ɕWҴFĕ2D\\Jql)%ފ+d{/<+NKRO0AbA惸buf+ɟYjgZ\y|gĕ”ߎb҇45ʈHZB\WھA\IRC+gVgVوdW(lpecHp0T+ x )mpe2-())}WHD\^RQqEpř}+T!("ؠ]BBP%(L&3jrECr9Hre[rʼn䊔r$b+D2\8w+@hJ_%s+#yrɭ  0bBseQX0W_0W\޺ C})D\ȕ W_šl+7ͦ++D%TW[]“ +{|k]ՕԺ,J+FD4WK \'a/kػԓ)"HQ8h%\0 kr%`k+ȕǔ\a'+;$W^7t B!W-̀bv+;(ʛ_ b Eט+\>\T 5W"Gi.B+_P\\4J;]TKbW 4W+@:[ioUoXnŮGn9Z q+aViV4V nneL@^`j+q"|bEmӫBʘA׿ɡ'F nڊ"ZbCb+# U_ lRVXmm[asB >rnX+nv2u a|ViVE[i8^i2VV$[Zi&Ҋ["4 J&-G_K+'l[:ʸr eފz+P具rxU[y.n%;XĥbSk+g.6ilpVwV|XqbJwU[qmgA[aKE[kjV`([[!ϤH4V!ڊ%mj+>dߊV~YnEHmip[Hl:8V 7b*BL9؊ WVc+# R؊b+ \' Z+sM(YHБItMȟ@X,2u򚄇+qgV5B PvS+duJ%Y$ sV8Z ʉ2#dԫk[ZAZSZyP+)4R+'ڂLVYYܘYh@8+K b^1+nfjr$Q2+YynIFaVv`#PfVXoffVji}ZZiUiqҊiecw VҊ`J+>ĔVZB̵)WPmEmq)ڊ]ڊʃ fڊP Ѱ-AQk%kM0i jEPjVȰlj&Z+ωyU1֊c^Zqh sFkaԊe4R+,6hԊV: ҉7MsKS+|ςZqqzg 綵<|X+>VX+G"f."+T J_ZX+k8Z[+$εJb0ZqBJ*,}U[;<^pL= Ka+ϕ lEF"Gb+%`"[$ Vx\!ҴJB8>Z+V ʖ7rV*VZEjE jͷVtjeQI/Zb[F;R+dpKh K? I -.+7yaH0/ZaVHlk P+ҴMdT"8ZaP+;tԊAVn_jQԊ.VvPVZ+pV j2hm-h]b>jXe¾况B֊j֊f֊kEkoB؊CFpVW\%`+4`+jb+}]P؊b+39gVr6ZWkVSbdk|.V>gX+%Sk֊iVFI8ZZq&L`ʈ lЊ! OV$VȀjheďsb( ; %4"gh&ҊfCZV\QZaFhAЊ%B+F$ZyӞqV@=b2+VffE1EgkYq8+ରejC+oVVؼJ+$|⢌J5H+ZJ+ۻH+XJ+CVޯtJ+> iERiV-Za7P+ǥV@KԊ#KvKVJ}QZDZy!V@ZqKiWi# 1(-SZQTZ!9Y3+ hʬ YQy(oo rifP̊qz2+2+lˬ2UzPV:B$}-(RgETRg=DvVXWjglO+YY]U#ΊIi:+nb^bJ +&tȊ"V,b QFV]Dd\_YxYdYEVYy2ZY1ȊJXQXPGX"byNήJ7'Ȋ*+;x*+'eXrS^*+n)QVRY!]l2+V8؃"fckvdV]Y1GVf#}eVXBlfŜN4fVqYasfV~YPJflfV|'tVa]g-uVqVXlgb @tVxY-'Bi}AZY>*!ފ-qʝ V†o \gŭuVeV6DϑUeuGeM̊[^YG3+nM) fV^ 0+ˬwujTVdVv*zeV 799`V<72+dt6B,ͬX^*> B ʴP,aV|2 9+:+}EZh R+ ZٯHHX7-BhVDV|iX ؄֊;okB"͠kTMZkjA#tljMZZj7V>'JLR+IVZ+i c 6bɶ sZyۺtVx̊;¬o3+T̊>ͬX/("jbA Lz;+lt}'t I2ͬP):Q++LX++b0+V_UqVnFe8+pˬX B J(VHgohnUhEZ9Q_V]ZqssV,Zq!yC+wv Y][gboB+X|]#aVXUmffV ϩ~~я\}6.Y!Y.Hd;YYDVYaFVMDVkd)Ȋ+idEMd>PHZȬ̊D2+0+ rw~{^fr{kYY͑YÑYakYReEa4r ܮ*+0+C*+Abʉ@Y9:hiܑYY9˂gxY!l!r00 +""+VjlLdEV>^QV"ZTVТxEY!]e) ;F-ʡĒDzVYifx¬ ɬ쏟c@KfOfNPfnRfăfVY1x% MfE&LfE6Xf@eVtVpVЊ,Њ 6hhLEZ2Z=P+vjyY@(VڵVokEkő ܶV.ZјZZ!*2BdÌV jp JZiVj[KޕVDZZG .8+ 4BDZ貥V_=}J7JP+R++cM7J.E.  ;5!SZ!-^f PЊB+Us@+ʇ"VMZAZ1Kh "*+TZli-VnvTZiie'sKiehAZY͏PK;VZSrImjZ!VizR˫U+rJ!΀-VdNhexD;pVYgeXᩳb,Gކ̶ *Vp6:+F:+A 7bҊJ+)XJ+; %H@++b}GC+6"VA X%r]+V]he Vxm|m򒄇oXv T++FʥB;J6QY1%c++䛊Y9=aVV^Ȋ &}!+@V9DVWdFV.%acDV ]c?`C -AlbENj%V 3&VPBdha4Vn™BmPҠƊ(\+X<+,.ʝ~|+9+k/Us, 36V܍]/VV8TY6VY~ (+UVȉme r;& 1 +-lejy =TVAʬ|^`=0+ϖYҒYYxFf? S%\3+}ìml\@+:WЊYV.V"t +QV6 iJi|VHkie,RVK 8Sjiiek!% іV)hh$ΊDΊ7Ί|%YavVogV,Z@\iċVܑ]j~NjuZZJVj ΥVXMzVQ_ZbGX+!QVJ]$R+m`o m*VOk`֊ԊR+ eR+wV ke:X+vV)J_LV,GZ]k˶VDRV[ifl2Y+/bgr**VN&+Wi% ^ ""})|NB+RWꆅ8AVZZa*҆Ҋ%2WVޯ/VPgVens+^ͭ`ȭn{+9ފފگފSm-bE퀽\.Wnp% V]^n.wVZ [!!70[akV[qMoe_2s$[Qˑ[9X6p+}?­'HVȭܲAp+mE­r+vVp+}%íT4@peW "upe0>W`kqm6[\QT\iqEzSq%p^Cpx 59yǭ/i'OVVOoV.s!W*D\a~++O9P\Av[80T[qrfj+Ab+j+V(ViV ?A[9PW؊ bQlŕb+/V [8x+Tgr{+Da[q-ouVVTEoe{ssckoEpp)  lPJ2W\\a\ B4Brn+$4bA:x+T㼾_\A] \y@0WKu2#UB R%k? 2 @ĕf;WXkq"-4R 9?b"s+$xJ0ms}6+sJ9 #6W 6WH\9\ b}K [K\O w\ِYrren% Jh+]N]+o|'|+;ml^W$W$W,]\q;<\9>I\d\ x+\Ow[EkM2Zke2xbXOrqKP R+G`ͥÜ4[r9jZPj NV(Ool Bl c|BXBVܗOm88j+Vݩ0lmm!]mKmōVUU[|gh"9J3p+w[inns+JGͭz,rp+˭$ QV]meっbJnQ[ G7Q[Zm,VA[immezomE[!\mG[9Vٲr~V(!Q["NmźIV4V V.Kj+op+[P*p+lr+Vp+n-6z+zx+bv袼9 tJwno%q\IR\فv-[u>+!$WHjry%WErJrejz+TX1b ̠ʉ#h Sp}xypeHqS+͗@lS+r>Qrj&W\qh'Ӓ+Gs*_J5 &WH< q6W8hTqb|+AWbb PJC+r4b" bu ͮg+,3]UuRt+EWmtEW0W&G+4W,\Wose6W|䵹B` ]XHsH~s`U+@5WhH]%WZc\c\oWpE@per$ʸ ddNӓj i\hpNp9ʮ2~o/l*Vv$V/ Ob+lJK/`+ۋ ҘJ{V؟Rls"z_-s6nV:7RM_b,7WZqbJZIyV"0QZVZ/".ʋ< J+RJ+oB"d mC+NB+vaB+heۤW.+O. aZZȷIJ+>V6NviiŘ'˟J+ ZYoiVT#- V(0VPZaVZ¬[ogŞXgI;%RY!嬝tV VxQT"87WZTZ!۩/cxc[1B+2`Ź V@9c ("H@Y2TPy2E+vˌ) jJ#T * e|vNT,_2wTx`LӢZ&dTXyOX-#L 2R(+M+KߐeLcDk!h-ݷ$L#J%k (\ZFBT|`N徴!v< ֲWAOc-Kk*#㧱qҦZХ* 22lL. kߧRZF{sq|{-# &^΀Pe\OuG`wڝ=[FVNb/W[$bhlUd1b/lߴTyMx-eoe|[FV[F[_G2GDlfO-#Jr-#ԥbۖ2|x-,Fg)eQ$x-m_^zܩ( 2=RM-'_`˸[FY lR1fdckה2W`*?ZhtZF,1NLkFk=Plh-[~ZkGײU꧹~µ̧2@&Zf T`ֿse)+\w ZfߛTly;J k잿 n&X¥T`~%@,(@t`-#h}*P0nyާ"@r߷`-.RqT`<"jIX-re-/8AeZjt(P-vo: !xKQ-a/HP-sT`y2YF"O 2Wlrb}k,GeTTu'PVV]Xj__TDu-,KFƻ>iwuqZspg*r'NDe^5e^/?0N˸uN{\BqZƃrZUr㴌aL.񇆠2~dJu/e 2^ LK?aZs1 +B 2W̸AiP8-sq5J9-˥"l9RNK8-}J봌Q8-s_te ȃvi+K1-a%LKOzaZ]ŴN-G1-48L8Ue|v\bZ#L˘k/2.qZi'3NKOqZe/&⧝9O2/r@-cWBP3e( @-c~#P˘F #{8-/+f|yute_N˸r e?HⴌQs8-_jjP-֫ TKORZ,#LQgH-c%T~HD#T\yi -#ÃBZƣҲ -8h(e 6}h1Z:2J_a0I*&O-OFO2wԖjbt es2yv<c-62./1ZƧ-sh@FtVe_q!-f ,etޟ@.'ieiBZƃ-N!-cRHriw0;x胴,7Cq9 2'UT, iY>+HK AZfSWq7 2|!-&e'}S -sל[&e9PHK"AZҫ -.ҽHH iAZ<H@oegYe٦AZƹC\Fiw(J!-lï늴HS$ -[H-#Dk%Bˌ僟^Eh'&2N1V}e="< eQ 2/etpX+%\)Yfo#GF,a>rg)B,[ϒԇY2te,q28/Zz erl0w@K?Zzl2g݉wY2Fw4Y:Dϲ9n9eg>r 'U,c,B)g?>˸Ar_ 2.\mYn^կYF*fjlQeTb#2bbV6 :(miW)YFBP,#w;Ihq-~hPU"E,lc#,cR8 %mqT 6&–fD,,"29r%ڞ=LY-#6,>fف[JfãDfwה23">?fdt9YfG^S2'[,[Jf*eL#e/%SY#n'ŲTK#V#aYۄ ˲\Ų +:²U:eq/ez²̢@-W|BLX,?BlF١[8es9qYfE˗bLe,ɫxő69 Y*76q,YfJopѤ82g1UJe2x;KJ/,^8tYiL,yM,}q³RgM},Fϲiiq[e>"or#%0/eAu"8Afòr$|fcuJD).U~=.zqeqU^@%0y,θY渺N\`,t`,#]+;0U¼08QR,;_;,r2ʞڿa2 X+f0xbMYǟefn~&,kB b&0K_,oh⪫6,c!6,bLeNGsp:Ũ8,3Uޥl1_ ?M̴I:p%N2h&Yƈ5GdEFxw2ˈ=y͑EC:,=vCf1AeF Efq:Ί e[,|,fh2AA f11aYdd1atfeODfoe==21e7Y_1̲PeHЏ#efqRe6K,3 JГb8 9d,h>U,ɴYKeNî\iu*xY\1f1^e,x_4˘FCbl_e\p-I'OefKՁ'qf İY\f)6xxyvgwp܅,$)e@LeT\כ:Kc,,JRgJe:+,^ot9R),egqYf-RYfQ%WY!zBYw9tW'Yzҗ:KWYZmxGqC#gh>Ue,=L2ųtesdZlIK&˸y+9]7]e[ցt,G#$29 ,]֑Y%[eaUb0KψYCKf@2˴%W[}CqY9pY:Ȁ2n0qY:be.xh},nʲt<C,s^ BX,bE&K_ژ,dNe\{y t,cұLX0YƹOkd1PO,e-,K?`Yf=n-1l0pD/2z$Y90g,2ıY,40 ғ~h^fqS,=āf1Qe1Box~,fzl:Y+%6Cfwj0O,i(eW}*EڴŔ*%[!ʜ^Zl)*-*ْKb$\VyYeq-6f)` be7-Cd2FVlF*'rHdGV* clIڣRY]Vx@XdFaUYXʬp0UiX6RX"V&a ڀU|*/Xe$V٨bV#3K$ |BL7VlAf.(ȇ V/OVeKA+5/ª,QXqi})c|.P*]-(`*a&a0TU4TTUPUf~eU0@X[\6#Ve˰Y+DdUv`Uŷb0J bvv*Ti(<Jǥ_Ye\JU%iF,dCj% 9*$) 8 VC:+U'7J_^Ua UFQRVՑ$< UVi7ZNiDÛWmqRUsb J3tҵO*FUX]x+)7c,ʈ i*]-%tMuz"[pBR#\0[a"\d!W?Dk=NFUHFW1]MtB*cTWP]ei*uX_4B),<)*:$$KpjU5_]e[a€«8~WٷC:0Eu>eʫƅWʀWW^ʟ*PW橮UR %a`uvH*jUH#[yOsyH,썾NhB_hqy6:Xy#4FqU.5N|%6hg0+ja ަRFar +6:Š##v*_b@XiQs!q&4W1J/2$V\Yf:+XQX@XIXa&Wj+?Rʓ?,E8Je*8rD=abbŝ,8Vn+ci V*lwV,XVѬVXUX!<+RargZ$+!MasXb+|JX';X1M{órJK V1XXUXDX%c["H"XQXX1[ +LYb8+e rsX;XKAXQ o4W|<+VYr.o%+`sq;X1U`ő+k RI`wH? pBV2Vx *UU&Jp$V U[etj$ӼJ*U8V1I[f Τ#"xNU:V,-WyLxJ˫d&҂~x^8 5U ˫-)x^e|Ux*U&)_<||USRF_V_$ j*Uc"J'+]X~CrWګ"("JP$S=J)5MWή KQ+k!X"ҕu+++Z%V+BXY V`Jbe\B(XGXTDarV6(`q!$_E `e Zj+ibJqwXAWyhޯU^j*{]y'JUc|O|ZbxP_mu_NrIU_Q|FUAZ}*d꫘U__3| V^`E[`"+1V(XLX_b"cd+̧!VXXir$Ɗ+JG+f, y+w+r&wco1Vު+U#bho.ƊƊ_cŴ+yxXcS/2s_bPG ٵ c"+D#,>LGVl, ✱'9+O qm&a⫘ע"X_|}i*Unj^TF _ZI+`e!W&b~ԕfg3YY B4AdEY͠lf`JKReVاJ}0+,M`Vz_ìllJaVܯ ̊+eV0eV\ (rȬ1iFxHSLh?*cVorbi|,U^L?U$;[ec?qlڡ*e*6U#$Sn4 ^*yW!*dɫؽW_"S2灤S52 UPt{*T/U3 *Waou7uZt&J^ARb- ]=o~7Qu&*}suUXz*vTWT_i9dsA'bIu^!it쭭2x?2id*=+&Y#eU:uQVq؃\sy HP ,u[ pZel#M/8e*Atbbi!hvUU:V)RUZ婮#RlpycU 7Lqfd*dWYtfAWqW!-貸 U~PU鼸DV5p{YVXI]E T]Gy\UX]e\DWcU.n1 {&gX+' Uv,$NNJ l!)mUmi*lQ[ lm+*8W Wى͈*U *lWU^ | \~A<«ԴdUqWŹ U*Ȫ+Uܲ*c`UF`^ӄ|:رɪ, @VeYX~*M<-ikɘgVWp2 ݧcWHuUNBfJ*bԜUHpU VXEIYeDV4 {-J+[F(/ټB0)rbV X WeU1EXYki0W+ɚ UqU"|R%Bw K`m\`grt`QV?mUlw(. %7*;\*c~G%ϕ|>e7VV!he`UW؈z \,?!JU,TVq.d+lo)4 YJ]U䟲+9*NBZ SI R:'JwVyS9⼹u* baJ|>UHY)/g-J@*#0[*W]Uz\Ut>*nɇ"qu}* \5* b.ʢ!z* \DR!,L'y*HV/RnZE *UʔVWZզ*n,VZ"i :Aޣ^-* DV_4ƔUF3ow/Ba)iiHu 灴7$sU,Ң"h Th:s$j<+<~jRI>Ʒ ҂^p9Uڙ[F*.VUJtX[Mm5*FU>>bR| * bJŤ*nQ_EL_E`*y>dUr㫸" :>wc}p,i+\XAXY}x*` *0B*w_y۪yxjD*=Wb{ʫ؛x*__e5Wa*ʫ,X: {'bdzJe*MWW. ]2puvr|ao8:؋ '%QWG]Bu u6cy6*f,WׂD*4pă+ı x**;U (-*8UhV9Z[e󊎭Ri [d*U>/l*!U VVH[pUʥVWn`@*/V1bQGI[e2y J*-PV^ܦ*/ p=*_l&ct?JD*%U~GxU<ʓ3*OdyU U!%*.mWyq>⫌6c}X Wq}ŧbX}r2WK_EL_NFH} b*,找f$s[Ut[U6Lm|iU𰾊#WyEwRhKb%VSVV S"TXq}b,ĊJqXIg__a8|W)ob !x7>W)z?*nyS_ech<|g5*WW𿾊UOWi _UFk |K*5VU૔Uy7yxUë@W=}qUz&U#t&+;o "^7Gy xU «T]WH ůUH"ʩb,N_Ź6*tQJW1U_e0}}VdVL[PXiWanIa a+ Vj+M*>qU:r<#hc ʳӀS_J2QULVʵ)\l9$#Θ)s XLrV:mX[`5+՚UUy`*#c]"zJU*'bɕ*ë* *yѩ¢ku$UZ.@Wq_Nt衫D^xohhyx*f5ʫU,W)B9WWx iU`/Uع^e~O m}GAX1QRae'/ZaxREb-KrJӗVħ ~=/kbϿȊY +VOd k$Lt%Vzʍ]$QC;X )$VbEs!V^~ĊPhC4ϳ BcEh+Sc#-+sb Ғ7zceXi:2J_4V^+d&XnXctWVf#+#&6JHs[ͯ rA( V>l1 2fgKPYbXa$_b} -Ò5VwCJR Db1Ab=!VJċ+Ϊ%V_jC a+cc;C#Š`j SXP?ɕbmKJ*V*tw^ +°+ݖ`eGJٲFXqSF+W^M_W[xX}Wa_]î*-]=UYҧ-{DW+|rJt*cU2/ 'ʓ@ W\"mkQҁ\mi*BDWY̠/[\3WY&7^^^N^s)W1ʍ_ei*p_R, U#*J1|+ULU W1O - p>U=\eFUrUF##ZmW^ڰ qEXVF!yFaEk S捓@J R`ecae}UR1*o+GX9YV0U꫼NU૴⫘K_s:UFF_T\]_y3JG*bDQWQpp+?j}/6[IWf⫰H_}{W1B-p#t|Vp :+:+$YIU.kF2+]݀YQ'V{eVAìX?"0+<¬tO7P^~Е~y&Ȭˬ7 KxewS|xY4J_WeVU0+5{¬,WE aVJx fŵ,ǁ2+T, |qB+wV߀3qVFr:8+VTӫJCc]9̦YaxLM¬|'X<ﻧf\Jk `Vʹɬ`V]~gK8+tV>uVpZq QB+AIVfUHi +O (WЊ_W4'LqW)BЊ[ 8fЊ:{K J uVpV4,b^7a9ᬌd\@+!8[DV Q(lyF`+DZ+%VVٮjb#JP+k^dVޔ@7ZnhR+7?j\jjP+c$$P jxZَ VqSjJdV:?j4AP+mR+XJ+JSZZa9Qjyv?P+#(JKV%Hl_Z٣!okP-J'VNK{$J VgV,,BԤʗ#x+κ,Pk"V\V[P\!Rr +%WX/&+,s*+А+$W^;MsŢ\A\1Ts9_"urߢ+薠+&tR tB%ѕ'f|DW_芓+mvyЕ@q~Azt2SJF]ئR!$Ԫ+x$ʿȕ^R&oEox+dK JCz+w㭸\oEoʬGn@3 yVDVQ[YTh+gڊڊ5r+mp+#Q?;2V*Ln'm,KY~ WL\qĕEC$+-y+"/ RxW*{U4pŢ  hpNpy{(GW<΀+f V\12bXqg&Ҿ sDB|r$h;JҐ\)re W:$ފ{d8[qio3Y׿v [+[1sz[.W\!\QqkŕE++pʛYJy+5W/pQĕW+ci=WQS\Y91)lbHqW)S\ap6j+gW~2\\qs\ fUr&bE{~˟\aCCi+iԈ"͕׍qs̚+'  WL\a_ɕ`At<KWHQ\1XqŊ6ȕbjP#s0WsEDt-ty+i2&#c+vBEW޺2+=AWʑWyH+ {tFtxf+5te1uؒZuu{ʮ|+9dW+eWxnɮE]\3+I*QG]1t+nA]] K芁Ǣ+c Q\.+AWh+eEWȍ]afAW4M@WH[ЕC% p M0+bQ AՕRs)P]yaD]qJ3 uEWi]yrYo](sE5Ktkw#(Jϲh++ޞ%W0 W[sEsbm W(?\y&C) Ғ+~+㖎r4Ȓ+;27)b13J(1WYSsgɕ,Krb(e!WQ\Yv ͈E]Yx+-]7Ti<%芏lЕ'0]iݼX 8J ̕'\A\yqWE(rz$uA<1yɃIdMQreЎ "Lĕ_"노9\aI!+#2ʖZeZm+ntu[q4#]oU+VɭX [UnnA +fY(ܼsĕ!W$ʓB]Lc D*XĢBpHq%:\A?:\iJKfW{to@x+b z+{_oV+{RWܺRp]a+U\"<1 Y^'+Wj& }J!↹<sb ! J귓s0+c+|DWGuegxԕkߪt_u%+b+1Tؑ$ y&%WzƑW:ky J WF~PBTY b)J'+t/b;`JRWXӓ^1Fz^! C{傯7_qd'r]9ۂؠWZpV|E|w{)/Y^yGRb^mX0+K^ Ry +ޥ+O~W:IxJy+U+FWDsWNԟ+˷RTyTRɧ"<+Vڊ;IU[y@2tR[Dm@VFadPA![9[Vg|noBE`+Olb+'b+ڊ]V>VʧG[Cڊ1m-j+ ҡ4 rP[yR o {V [ m3_b+`+Yl(4F,"\ģS[qXGJ﫶2_2_?=an3hgUד?~z*~ĖBO"qɸ <1VN2D<1ݟ$oЕ+ fs9ee4LeғLer;LeGe|eSjYc ?+]߿"Ph. Mc@-+#ۼ7#&?9g/Ej8M̞ZT7@}z2W ۉM2q7l!<0ŖVq_yͳ1@. h0-c63;0-ہ eUɴOf|ɴMb2-E# e̙g䔡Y&2rpxpb*-}C2yQ=51b2W3[ -S~^~iIDZ%1 -AS!H>l|rCQee?y,(-^9rV?#_<0|g,l1/H˸Le\nNvvAZc 嚍L2^sNyVӇ2zh4Ci8 8 e SrzҲL 2NPZ~/&h 2q&eGF_x -KOeLeN5WhZ::2.ќ᳌&wYƹU=}q+2Z<2q~r-?us0 j9kU1{meٜ>s=N7^|h]}z[u) eyMe᳓2:Ig#,K3>~ZOe|T22^c,5>h\FgYYƩ2}97Me4ז#gYY`aޓϲĴⳌ, J1}%Pe Ley\L濮^v&eRA&βRYq{pq8現332l1HͲĭc'go,kb3x2V2˙-6Ve4_X)쏟rdct,K <*ʳ-y<³mHɳlHɳ_w,K1>32rF6h7t` MyF:&2#hym~ϲtY,c,co3|qV3'29ZR/@ϰ.W,hi6>˼5}ee|=GR&2uՉhdv/ZF\svJZ֮c-r 2d&2ɟ|e$e-vWdA@2i{NeVGg \gY|Y~(>20/߄ڙgGVLeMLe܍{4w^gGޡV>,-,2/l7A2sh7>O-K9@˘Ou-!c-3`-s-3~0 㳌^(,KYt\y1ki 3 Ⳍ!Vo~YR|ee鳴 ϲN³,K)Y,#~5`dOSg{Yg&2UdβdhDgy`qKMe FgYJ;(q,pV.FgYY*NC4e^s] 2ge48)&2zO=W)Ͳ6ĤYƑeLk6,ef:Yz1e\!abH`'YCpY2nf7̲p3٫c,+I4fSh%1i>@Yқe\R%'2;ʖ+v2ZÏX˙`;ȔYFH| GfGRY 7?2D>4}Y3ӴYdi#LͲ\n3#hƀYn~YOsl,y6K׫YFDi,3['ʞ6țу2FEYQlETyf߿i,`4Llѵ%[de,]ʅff﹂#ehqxOA 7 ĤY!NäYct6Xhg|6N 2W.W#4e\n0i,saxgvl2#vZQ_?j,@t2(+Y-c,3-ʖӴYzʾ,]gJY˼WeXUK,ÃLŽ2~6hh)c<g~,j 2ysetii=i,͆ D,ctےHO8egK Gtrg?ړI-6˸9bIƌb@%W\qؓD2.KqY85]ee.D]9fP2Z*ɀY߄YƁsYƬ@哞|2eUYOȏ4KוYFGr4V'2bʁ$ɤY{h1]9(4ܻfȑd Gfp${L4Yn Ґ=08q'̲)reEe,r}ee+ˑ6geyZe٭F 2gCeY,}Lei&<*K3PY X!oX4r,K:,˘5:ړ> 2H8_ok,sȶ,s.>aV灑F*>)1IZ/p#˲aYU37PVXmρ u2~qF$\Ek+2nٔ:Rڙ3*ݐYe;pfWv,c<2fib>2AsJRpE.:4KhYZ;hq{=5,Mf= uu[92@ڀdhfA,,#eUFfsW3kfd|de0eDfYFGSfiYd%id,kRn&$UOhxqh%efIyM2)fi;4+2^4KʑY*2KbYbd,ʹ2[sI7Pemc%Jc@72KYHC%%W~,]p%pew'x%.Igwko2O,3s5J2䊈YY Ҋp&,#SB%bn+EWlpϺRu0;8Ks8Y*KIg!I|2?Yv YFJξ,cNe4O9g'sfUEl5 Me !US׃ytsLV]/t=U$AaJUnDBY+V X.rʑ  (R%`eQ2VƴgU4U+WT@_(73.|R磮4g< VUHSWyN<\g\\*PUPb*ON9%aFUnt*<* F]?^e{ͬ*UkUJA9 *zi\_"֍jlW[Rm jxUdoz^m*U,W0jU9oBv'X\^pU}QUCq%s|=$}DPfB}U^3Ҍ&F|'뫐dR_EI_% UVWQWZX^_^Gx{NZ~^U`3*(UfgUɗWl«d-˫+bo-1[*f˫l^*'*uR7`eHC`^`JV` +oaqVoyV74 +_e+Oyb %!VX(,]Ix`HdUc+,Xa[c:0&|Bʇ4o_5VX)#bNZb|CHPVC -|AF_`c#Jt1VZ&šnϝvYYQY[d"+ފ"+\-ȊK0"+ dYdEsJd؍bJ6ʊ#*+3y%V+ ܀ +ŠU +VX|U+,QX1]`:19VfV`%ZJ2 NbAx>\E[,}m/r(IVX[,R[%xU&!r)WyWf2SDWnio]*s7}@T8 +>Sxk]ŁʗrctbUJ(*Ǘs*_tw [*DiH\.Y\0qWRv]ŮT]+2q"*uIUV|9l**g*hqiU0U[@PVVATVqYYEPYeOY`RebѥUNAhHh;*^|UMU>7 uO*dd0e;e:e>efUvdme edoY㈬rWYYe{avDV>\~UY*|VV9K"*s* UT5UvT@M4Uܿ OM*q* EUN10%JP0^Uq".rWU9wqq@Q^4E:Que:" °Qe#I*TtT1tb\T'T#WU1E@TUUY$QFEUO/U+JD[hQElLRHB) U D灏PQ8*="XIBfEO<UXbEJD Ъ *v*WSʹ 3*}T0* t*y*)b'ղx7{~ "YE~IZzCip*yuUd7h%c3(cHm+ i)>q#Uir&3^I#G] ,GU>XU QX*7pM`)?*X+`FXx*QU $ 5@xP ` X*,jV^`f* *e?U0pUcqUU5UXE=@X«PUtUUg SQR)%UyֻAUkgU۠*TUxV˪ْ ";RrUYB΂ nU SU'PU Ȫ[V9 0 G]YU\g^lUR3TWŒi\'A/J &uU(eUVbEYJ d/ueJ*r`*U Yl.UUNh#*V*$J0YV!f/UcU6,m Q7%B**\VyV1g®UNs#pc nU'UdUWa"WAW9N]edGWp|GW*Uj!u&SW)U]^]EL]E 衺 ۵W^BNZySz^]UWao*"c*/X9+2^+y}fS\=h+#<W^0˭K`YD?qo7p v )V#*錯$7r+> S,^l*b*Uxx᫰_ J|WYW9U>pU2Wq*U*2*"m*~*οU4UvVUU$e ! *΂UlUaȫAQ^őJyO:Jx'tgu=OuUj5USrEWaW)u׌U+3uU$aiqV⳶ U6JpUqkQ*$WrP[_sW1_^_kI_eWa8}-FU|,G⫼UJUvoUb)’uu*WWqyqyB7*U'I2V~O߿,UW:]tUDસGJ\*)˺*nrRWo]!*7모X5+ع *lJQWeVv7aLUVV1XYUVhUHTV*XZe&,?*%rwJ: \vqULAW/W٨bWq/*X%}q(πXSUܔV]qMuUH,†s*K\[[sLh|̈h+[BʶSmz"mrU,VcXۈ@X(bEs*o Z7Y 7 TBViݐʖziʽ-ZEiK ZɅ)PC[ CRm|*>U:/ +rVyn`*BX.v!" {CXn.rC;UVIs+VygK@pV9O%a@*UVQV.[Xkaamk뀫Cj\I'_[Ubl ʇ|mk.VV!!nBbdQZ%UUש$ߙ$ pt;m~ɝM5%MSY=fU.lTeA*s*D$Uvj4UTVI"Ujz`fi-V J0,ƒZʎ&B%Ki[lZL@iҌKiUxI|*_yhbUibV1R% u+rN"X%BQq)sV1*""J!X+_ UW!,bUʽ9/"+b 2*wU rjELR\ef'J$*O{ * Jr q@gIVa"|RY>WncTV9i*lPYeTU X)ة*X뭬ʓ9*\U> 5UT.㧨TLڢ*uU\Ih]*W Y]*=KXY*aUaU2_%r#U? "s'" )SV%9UUԀUU Uq|bSY>WV*lȡR|'%`BWXyU(`^`aX*ry*. ; QxaU6\ `ta*dlVqqPXEXnJrU>?aS^\U (@VVn&S}mXVa|+e~]XEX%NUjUUU2UA0(⚖mpU V!ɡag*l.\6`;*& *UUYʙ: VaV*Vq aHa4*JJU WdUBX\-BEq* *Y*^U9RWqNt󞪫XvVzXi-BV*OV\+ x]`k\WqY}v("C&B Q`庡IV.=l d}O+N ʩ_-U`Ek*z\*[;%d}}3}}KSVVZ.b5 }ljQ_~U_>gWB_ (* UJr|W*lOU]@Wqgbyo*U$U(ëorHDVqzm*UFU U*>jlUdUɫU\E[\A q*[|Pmɟ 6hVfUdUJcVV!﫴 CemMe6Ǫ"_b-ʞu*]**U.dSe{SYuU4 H=Vd_Y.PYE)PY**TU. L*Ҿ*NU2{GW(+UF<ꩱJUWqS/}}OUc*UW9&6*&\ɫ)QWb*/6WqUyUs^]x/W1IK^dMy*aWs 3**V@ȫ^ y<|+|$8*jp rWE.Q*,*6˫T bnJ*;JJdVJBZ-iJVVz[ZdV/Ydd$J앸*7*qUb+MTaMQKP_UGU*VVȪ\K.bbuU>U>**!h*^VdpYYϝ5C*OB'ukA]ktURX DaV7rdJU,*UȐBV!dSYLmomU `J[ \ULVWn'U(WVѭVw[ZbrJ>*|Uܣ *(JNmvkVy/*WSł$]Χ%*nk8VQV1E{*}d_ZL ZbDuUʾG.J|p\pԹi3'J {*w*z*[j0ծTocGIX4iKU>arU "ʇҧ\ Wa6X\tU,W!I\t*ŋSWax)ӽʦ1UHPWQUVyVDyY 7 5*_Q dΠGY;AVxb(?WYŪi;*m?=J ]G]xCYhUUqҥrX*W PYT UI4*FTT/TQxTmTlZSzp``Ty' 7yPXf^T%osQ噞!|þʦa)3,̶U/PTْ\PEP4KPEQ2*JJpu"\N*Y^DU!C`HAͽbuzI*!U RBKI6*쟰*TkX[b|IS8S1UF <0IDHq> UלH.s> U!U#!Ur 1U#JZUdѢ**O KUa!bq EU,*U?)3&0hU(U9o$*Lh誐XWEDWj*_{\ *L? T.ʝ^KVeF~VeW$HU KXT#+ U/l 0ɪ8Uy aUeU`UW< \_XWS2ouUdޥU)6 **nΡBOlshdVI=c+? $PYeW"삀r* cQV-:]Yűse K9uUҽaUB|C=,J!X{gYaYYXceUXVU9CU^EUfo*DUرATũ HUU1%JU=ثhbSOB@HRRD*lHrJJ&„JJHGC2vDTyI FTTg*}11Ud.P΢*Ё0.BUTMSEUNTz QkUU@ͫ%T/Ս)*5|PUCXUp C*-*/x`3=UlpU΃^W*n⣷*&C YXyanU=PWE*_RuUR*nEWй骼B]gcy0 ]d&JLK4UET1KWSTagL4U#JTa*Xj\TP2͋(.ۢ*ۍoOEY*+UdUSUK&'r *-YXeVRFXe+Xl*qX7➿*6V!^*{*IEFaa*+Š2 -𷰢pCaz +? + ŠŠ} JX+9+; b;jmXb/XibĊ MX-ʥ}.-Ձ y-DKVǼ +Jw+c>\@Xqay&JV|VՐWWqyE^OR]eS!Yt3Jt**F=U.,; RUP? By5M8Vq"/Zdra4B'rT-TVqBYphRʩrt}eWɢwܲJ YŶ|CdVٳ}_?eykߛ{>~~o;ZU=BPkEY /e`Tn*56Vr+$Ż2fG޹b5U)ʲ$t0Q ,_岌ő _U`yľbte,#0tDT>[ ,S2#ri?6oyr3reg;8I\[e:LG\hW+a|,}'qeU``|`c7e3J,y2lާb 2mhh+J 2NwlY70x#vKGLc2Na}vhqm*eyM2ZJ},Tx`\YQښI[2H,UB 8rKDg -<: x5|V듶 2o|"@>U 2Fl3@8w^SPa]r67Dg1x 0ogR:WL2zϗ2>YRkB?,e8+tqFyYƍ!:_:˸2&[-Gtm$Ձ1/gg~D,s#/;{PYfoyḅxky笿YZCYv~YBL,witq,qWg+:XJ2NLtEpA2;:PびuY2\Yg|$L;2Ⳍ'Nh,W/>˼@J^R˸,q@Ⳍ'v,By tgD-4![J Fo rӅ'\j~hg-@/eGZ}XoS>Vu@?Yh$2NjC)9Lʑ15%@˜Jh?)FJdEh8 MhC(ZF|5>|M:RՑnI)TѶq;@˸ {9we-dt=pT^uao~ՅDg9:H~:DgY,,Tt1t}e5 DgKgσ::h Y#UTFu|eC g9O,ch#>[^SNUM||oJ4Yvv!g7/0,3"Gg8.s,}㠳tBgN,uBgYqx,)c(e {dDh;B˸*%Le'G&2Ëhy6SweO .GF~S62'u8ez)eD.[1F˘DѲGU` ^m>hYJKYPZFǂRJ0QZƏ*RJ׿gd(-Th(-Wded_JVҲU৕1|e22bto ҁ}.MKZZix-ޫ ^KZzekٿe,7Yꃵ&`-}^''h-sUBeZF"ZK;Zzap- ҝ?\XCetY2Oej-isge. q-Db)ë_ZK~ZkD{$Zze&6Ny2Z\2hϜ\-3p%*H+=7HHK?@Z4(n (AZ\-ih.f9BZ ǁ&H͝>C S(DZ<*H |q'V2?*)ŕ7-efkbAn0-c,uD\9rUe~坯˿*e:-38- 2,J<(eDk㴸-Բ=_JCi QZF(-2W+'}4 -ήDZFybHà ŴQ"x"- 6 edgPZBl2S@)-˗⢆Jqz -3u|h1+We Thh䔷1ZF2ZBB^yOe˜dHYzbTGe68/a,g峌{|,f,$U Dֈb2gܡU^Y50e|곌VR>K`--ݻTZHL9Xq$'2X5tqla^GBX1!23CRmeV^|--yMCThIF{mbDhr8=D'8-3߿$y}|qVgz"ೌ{6H|ೌ Gc9tŌe|ח26Tj 3Y*g1&bQeM,2³*+ xyPUuAg1B`TetR9YFyªlIeUkd?Iy.,Fe9Ycg>ltYLg1B8>K1,fL*>,R>8; ̡M $YgrD#],NYfrU}Rx ƒ+[YLZg/eL,0)iuΓJ/xYƅ Q>Ke,=g=>y,&t|Yƥ_gm, x,,) 9X^SB|Lw>ɡ?SމgH2>*|M1Њ`R|ZɛV%#i&!Z/>HqI 2np0!Z%DKB Zp#ңN*JR|Z4 + Z\lWhX$:ѓWwiK#Z("2.lRVْ>F-cZ}@KJ? X!):>7\ tX W]^hei4.>9z+J_eYƍe<ehGWꭘd᲌ y~,r$벌A>;.x2Nr`,9j䊫㱸,ԤYkdY׋[9,RX1>ʼn,~GaY':RXeY,ޕeA,K/"teyCWT͊UhYz e2Zy 8K~̑[,3#t(e{,p 68), g&:Id,!́P$w~Pi5[/IOA[-2:3GͲ,{* lY9~WA`9o+{l p+Xa?q$p_ p|SSU"W#,5QUƼ.GxN[Ws# J{Gy5۳ּp*T,;$K&U"rJdvJM^ٽʈ]9^xUH^%%*`'Vqj߶XH TRSU1_T9-*kX-UEsUeVUDUWUq82>*VHTU!تJS*,b *TU`Uetu=UUx . yn*UV!5a_ *gB^|MXͪ/bUxU$V7r$.lJaU?<ªt;⪌U_U!}LWe bZ8*}uUțUSU!CEWeWJd"${]WRvUpU*i*/tU%|+1U'aa* ARVi? jVK)9r'3:`,8,J7/`qvL` WVUjTBUs*}E`1H 6UWhX$U|41rP:ܰ ̆U\ۙ7N\x!8RULy͞3N=JVU\n "!ZĊOa3$V|H4bż&VVaVaŪVFX}V̺PX8Y5KDXih--,QfV}(4aEXjaE(MaZXi bEXb[:ʦbSJ#!VLnb݅XLXi!~+X V(ibřĊa+3?vKw-VXUXKXˉ2r#DX>D +=PDXGXyX6bpF_Bb']_}b*Y^m^vͫ0BWi O]*Hbb^]eTR ^EU&P[Jw* jBn*4uf* 5X*GSۆVQ|;h *eUZŀU*  UV0VAyYEV9%Q,a=Z+E4޴ *&9HzYhסU#,w. 7aվUQU4⤯aFV*Vy(Z*4 b K*}ȖA^ 1J*OUzUڜVو~P*D^UfUq4%.2@\PUZ(WeO>ICUi Xe=?Vt6*c*ɑ*"*M Ұ Y }JUdX-U[-f*EWjtUf*Uvv PUԋdUl3p0Uqȓ%,[ W-8@SVqۗU(U"mYU!IV%UU?ͪz%qWqUdpUZPS\FXeDP>!R^ -J;и*˪~gaU(U! ʖ?YL*EtW(}@o*YH UdT!RK $U6 H0ToaZؤE* T^ʫFlŊ2E(QU6BUq UQUU6TثbTUqبʈyׁ*)*6G,(+*T8* 5*U]qZUiU8{QUfJ䫪Z~}*fK=H"Px*zTOex*c2TZ, OSP+gTl]P2AR=Pe TnPDXAmU:öEU0U#+t=IHvYHM%I R2*R$U~yP*7G E* HWQ8ʢU@܏J;"+btN 2IT1YRNRetA9Bg|9UUUQ"2V7!آ Á&UlI*] RP,_Oegw}OOOeͫXUPńiA&*c2AQjtR~qUT930UiS`TI)ʦ#łTIY*7/BBLM$4U i,=&c,`J*W*[++1U,TaM*ng*0U0Tq1UhtT5oS &UF(;w *}(dVQUaJcx*j@N:sEۥEJUXLKXڞ WOeLU>* tW**=* 8CT^D(D.VT!ߢ沓=φƈ*]*!KRe*]Ĺ*T4U `,SU*oTi uSAUpUv*2U)b ʢ !49YevRW"! UvKe$e6jY奇YeNr#ZViPY!JWݲ*oNp<- \o^\9޸\;paJ%y>W{W9X*}""X *vq%*>:U,WL\b,&qَLJkUQ]ed@WZXu)*LZWa>yuv*ݗG[tmZWAm^W &A*j *Tl*x/WI|VR߼* 2* #ijUm]|uf3UmWJc 2VٞUVqV[e׉`LpC*]2K*۩/s>s\tWWqqFn bƂP8ӸhQR5U*=fVcN]m&b鳶 {N̰m-Kh&UVi6[eܼA>۸ lmH*vVkaV{5 (U|@D`6ʄU#J 6UGU<xbتgT'J@U2hYQ &J~*-XQ`̽7r}2~bILf)0UFRlOSex4Uoa̡^Lu*AUVU1Q*W!.Q]VUf%JF*l֪ [BU*DUs:wߓ=ObM*]aUqUڇUq"W ɆU6 `-Y]C*6eU,kU940%6>*YcmXeN b2H`dVI2F*We Bd*d)yFV1ae1 S,ezddUY"e*ZV!Ie_hY=Y*U} i\8hUƑ -UNuUWUrBDW84*b**=}l^ȫLD^c*eU|WDZ7O(tvU6DnyUHij^MU4U@a{Bm0V*NUdIUt۸N8\MqC*?HUX#J~*5¦*4Jy*GF*N UJETlLNw*‚UdFU(Vq [#j*dm[[*Һ Ny2ԚWȫ`u*&4Ҫ *&P«e*GߏUj*$EWxU$*dn~**;*@WaѼ `Jg˫g/_e>@]˫N* WmXUZWqLx xSUbk*>Uoٿyty8҉*N" T j`eK\`EOaE{-YJV.eqK&6hhŭ(VdʙGZCgmpVY(vYgwQYvVpV,YlaV4eVƲ0%m2JTYa!!bXdice5<+)V|XHEXq6V &rb%V,X@Ad䧉L|K!VPer"G73PXqcV"8%綰b +dr",ԇηضsX=MR Bx VԔXq[&VP$Vg V@Ub|$V%XDĊQyE,JJNXa%bFbXVc-TAVJ5(+nD E +7!dP YejkdIbV(U"g'*3qUFWe Ua'vUU}WvUrtU^rULUiWe4*Ok,UHCPV'0?<ʌVBV!e7k*@ViXeRUCU㬸4QUo]b`Uʸ{]]PqU:XBћJ8pU]4~UcXqUoWJ W*vU؂UWg]fU~U(GVi^YlVk(vUW$#*Y'ʞمUIz*Ӳ*08UU1һ48&U!gYe3 L7@TrMc* !U@AUZUUqx G*{*1U>U PUJG0EUjX\P$Ҙm\KtU:D\>S,-] DJX*=mY7)0FVIi,!2ÆWH䬬XYer{߲q$Ye9UYetbAe$۰ &*L\UfFp*ۓ"dN3WViq$JW*X@V? SiʐVy V~VDPBLd*]ٴhhcUoU %CZҥ*ɗ[vJ7[lUV1^$UF^DVCX1FaeָEa9!n +*_a;UXi&LarNXyX7o \_X(};_P_n_%*? 21P*fʫaaxW1T^Emo*7+c<%Xg֓>WoM%b J*fʫ+ɫXxWW!پ {B«4$r  tL +JV apV,X1wV>Qr.F&dX)`eL &VZbA]+$H쯼I|oh|&W!]_eKd*,M.J(= pd*^#blT\EX\X\e˕@\E X\%Z[*TA[7%k(B]7Jov mUv6*cN_VOVq_mmVVN%{4"'B^|*o} iTlQ7u*3-&Jlvj[lp*5j0]]l [e W`Xîb]qV9*PUWqW*]/ҧ4JUܗK\]Wُ\ rt}*ʀݸJ%UPK\n(ewY*WQm\]UUB\tC\2p7;Ui\WIŹhmhYVRkmB(;*LmP-BۡblVVX*sCd~>NJyt+UQiX*M[l`"/*]ЄҡeEDy*T4"B(bu:$w-D tݥ t! pUVy{FUXDjZE Ze1QUHj[{J[]NULk[EN[fb.BޢoS.*4b9ʸ)9o[**~[LqՁ*/GUKHz@n״EU9"9A4^ZeU 0(Ju? 躄VY:*n A2i*lVY R\Ul*,bQ[ś[YжUim*oll7`WY4*&b>05l_p*JW㫌b |,lW!կ}*ª*/J~/: Ċ&VD+_+a,J2i4V&QNI.JI+:Xi)ce"XY~ BVYؚȊ"+4&/<+>4V{7ƊIl:++)$VJ.E<Ċ+ibŞNtt+-Ċh+L%V[ V, Xa3As$V 4 B )*FSD1Xdy%v}U1};VW3Wyë4V 2> qyGUz$1y~˫WW!*2/_%sUƑ|Nʹ[WiS_%E "JJU,+Xx~ [ǴS]V1Z(h@ZesVivVو 2dSU(b: DVy%Ce*sRy?-thYYOd*UzUߐUƳOBV ҷ*)*xo#۶BĿe̫ UҰca(*s媾kGcIּ IlM\Be`}V]U)jX-.а_*'UFQ)VjWcU!vqUWtUȶ] &]vUe)UqAXš(oU Vial.bJX**H** #b(b abbU~G#1\w 0XYylo X7V1/Xe=Q*&*4$⎅*n-yaUH;U!YU1We{sb ⪼WjnUa[vUUqU:MWWuUUq?j\b*ԥ*(Vp^Weub^\*UlUON2ʑaUz԰*]A.bl*nᨪҭ U% ,,Kx*n_TUUO27UPU:LVnb*>U ]#]Z*f5B.{*D*.6bׄbR̾DVqWItURת"p m,)mUZVYЖ*"ݶ CUm*n!GpUm^UFlUdzT Y5ҖUDY%"-X7KVUZUVɡVV!Ge&> J,J:U,3JGvU9!㪴턫l* ɣb@oSo*v:WeNJUYE%haU~]*..ʘ|*;,Vx¦C*].0۴ #iI@@*q# [u(tUcdesCVUdV8(UF*e4[e0OaTV[JLmPTҧ"Joޭ ^[|V 'l'mPyS%B^/\ݻUR$EmV J_lE@lP`E[AhJ^k8\yW^Cgo**c:b 6yɫ]e>U2Wټ2*mUve$lib̼UgU7TEln X[[EW[E"H[`D_bC*@XU͆UAUHVy3qVUQ&ViXeCm0Ya#UZDV9E*/f.*-KX -q,BX*jU3@4 }T}=hJ [mhU-O*G*CU_[ŜUnl2[ ’Jmd!B%BXU^P_*ַ5"-)"`&Ҙ ʮZ ҞPhhVѶhZ1iH8ZtUW*=8V:|iC=*oZŁ h@V*c$ᴭb;ʂ\X`VK⽀2UU(i^]ZWy]GWCU #xnU4O^EOX_WQ舯2#UXn_E_E4Jc +VJj`'v+"9DxU$}gU bૼl:ʐX9]VpV?+U+4+oO뫌_UWI`C^gʈׁ*J+ ;aŭVYO$V,lbʾ b^N">5+NVz؆!V ?-4`ż,rb"/ uIUt*R5ɫW醥Bi+ۉ`ŬV$V<`e}RؒVfmkXÔ Va؊}+Mg*6 aVYKde}+B@*QV|+F Vz g+aXJC]Ċ "X+XqĊ+=X9.3C+" b" V|KlbTbe} VL3QYZGYi=eP_#+z4 +EY&JUV$Let.uŬI̊ì2+'Y0+[=Vfy-ƈϢ~d(+Pʊ*+mŨ 2+t15]@zק¬{̊ìt+̊1fV82+Ȭ*Olsʬe2+ì0+l3;FXႲ2A QnJWYVF!%J aaeӾ2s|#'<Ċ9Z+⃶dV |XXY+% g]dTY!^ce!U>Rq2V% X`+Va}T9&ka#+ AVdYaqAV(`Y][YMUYne(+S.J`##PV̊ ̊ jI¬t99Y r1 QvV⬰L"V :"Њt YjELRjEUjk¤VtZqZQImmHעPme҅nr+؇ yfǭVGV:nV:eZoXps{mWƅ$2;í|>p+c5+'neUd :܊w ␳X fV^VVJnEKAV,kR[a]ު4|(_xpJ[?VΗm h+sǯ(tV4-V^'me}BtGp+u˭T1˭xAV ͭ0|kn`s+P_J Vy[aOn-PWq"7V>"AVr v8VG%Y%r+Cn6q=V|[a r+V-neO 0Vnj+7<VHU[ole3[q[]Z!4RcVxhEhdfE!J+$I*K,V ;h&ȅ] 1֊P+n&bBZijŲ<P+ j,2VF_[ R+6rQA<'akVNM~ܵHlH҅Z+;'V\?BrbZ'V7sVxa{r謂:Zi޵LV!ʨdRR+(6}(iix8ZY@+=ZZ#/h$xr$ 遲WʊhFzQV,PYYlA-G*+QVw-enI|)+=WVYkenƪY!I7MEˬxa66̊̊ͬtYy_3+ΒY\ʬ<+Xqd+kY! 0+r0Y1f 2>'X/DZi]iŰJfDZZVi%;J+nH+K2V$٤VHljMVm<;8qV( 2'ZVe/ kЁ֊cy`=[k-hJRɯZI!J,VDWjj%2U'%:45EDZ9O#P@T]h Њ VV:d0 ҊumYۆ)O2g44ӳx!O,!O,bBuȏ@[_(J?"Y˿:sÒT!nτ<̓ϲ2OFhiONMIaYU_ZB|M{Mol6#˫>NJh}QBlp6^+7M/ h_.WyR7в-UR2{⼔2&,D-mJmYR9}Zx"rcm8 hRU,8@OLe(]_n1D-}n,›2c̳5gY:YfUKo-ep䝳%{?DEA"Z z۹hYJ--jP-RDˬSJK.eV")eveɖ2->o(巡~$YF`t2ZfS SFl9hьs_ieTjuH˼5$2R>Y_JˬDW!YS*e^Wj k:R~W)-Ï2lѓ i= ZYv{I!-VKU'c̆9# RՌ\0WRau9?oXBZfUTFˬ: ?6"Zf?Yg=FV_vQM~-UFWSFn^25H-U Z:SDrh"E-+)e>%Le޻/yŌ}`2ZfI-U]|Ձ9ɟ(WjE#hY1Z&Fr1hOw$Nv٦øu}LhYFe!Z昇7hO;G޹O)\SjOyfG2~AQ hi}Z}{/Q:<;teyDg00ONky纾pJgY&2!! hYn-rd - RBK.sܻL"TAvX ^W>=廔2׈NX}%(eOϽ'@pe2AYf[_#gYf[Y&ڰU<52F̟}>u?iYj8yr7_,|t*sEg9dOtY(<~1{gY@,>LCteJer}f,#,5i2/_u,K#:tFWIk:z&,t'<ҙϲh(Y:o\,7U->KVegY,K/>Kg)u?/W8-˜2B˜%G -BEBQU@ٶhY"!Z_PB|,RBCkDe%Eh#%,s-Lx׆(e^#\ yKhʑ)>ON:A-%|ԁN~2O:'c-D!)e![ )] ,SZ|bhΜ,@K( hYFZjjSy{h>ˀY梠2N3rOѲv!ZgULe^h%W CaQZֻywZҲX;GnKi q\RZRji4BZf[J˼i7,(-qD)eRZ۷y*BF2/G}Y%BѲt!Zfdy9w\B^7rTUh#>)D}7Zs - %GPB˼Sg}] ?~=Jhk Z -faHyW2h'4jyNM|y$:G-ϲ{嬓~ϲ̎;?YNa:R>eҧRfYF)eO蓓Qlw/eCG4Wg>j娘~TaJ hYZ*z\ȑZczԁNSm<)eP>ShY&%V[|V2j Ѳ"ZIjeeChey6D2Eҡ-K2BѲBW-%D˜_X^9HC'ei'eYS(eqR+smJiX>ᦥ evLRZ慸cu~ViXG_2ZOܔwȖ X.F|ZWX-F˼h}N hY1Z;4)e啐Dc{-R%e]e_f"-%e68I~2Zj$~s.i~穏zIiY'⦏2W>zTyݲntiό4ŴaW@bZ4`R1- TL2Rp/D'2@E1-5(OWӲn´a2-5*Αv6bZPLzFi#G:'miXٰy;@i3s*-/a*-2|8JiL~´[߷ ׇ2Ai#uۯTJ OԤ޸J]$ܐU<-E(g1ZjWo.x-=hC넏H/O"F -38qBD)'k-")y+Aw/J -H(oC) Z:D R:EH͓뜿4Zj0]k{.l!-~`sO>^FiM:7B!-ݥ)G -=i!L#Ҳ޸gb uht`# e=N9 -= y;RH6̡KR!-=iwJ|}4;׼Ba̻aˑ>FK1Z xA'f"ZYPDlV^}w[YeO:wZzh-2O*gHq&57B|Tz wcɰDh -sW߯h˷s.XGh'9W~t"@Y@K2Yf R:l!xJg7'Ǔ \:!R#Sg5:˼C2 ų㝷9|"ugƕ<kNe]t~:|VOtD٢||9/e ,eyFgnwIVBgMUEg?+LYvoS:-Į)e0y:H2۪,WtuUgY/D,_+R)Ge~҃H|Zgk.Ur6T>rT.a4-goa,̍ґd|X{P@zUeZzu-H؊ZI%QBE-s9\eh@ZDv ǒ+@2.~CJzZ|e2T$R|g,r,|~ZiBɩaJhB|UHDhZg -!Zz=!DK-{ԟҳjj*D-sɑO}my"t eN¨$G,&\8&76P,s0{%:|2ɨϢt(-:KEgU>A*:Pt:22ts[<_:<}t:O=*uYK`YLgϊ"QfȑYjʻc]2J̔)̞ CܶeK*}-,=_tДR|o;dF,̃ԓ,bujYՍmR:\mVeqH=Lcty!G,i,k(eggYjy~Y* o\X̞jr{&ޑ\%*|^ᓊ,1Z:RP2{O*F$_hY"S!ZDy2b]murdY% J*=1-= RTRN*=-UQ|Jϼ+H2 + -ˬ*H J -V2'TzKAZq|&5sAZP8BDZvsAZ*%5YJ6e7eiAZfөeTaF$HKg, -Eh?m)sqZb&в 2h\z= $`J9WOdO9'K?rhoW_H2M-F˜|q\LFK=>ɦ⊇h$ghi%&ѲDh!GeF8#Ք2{OhX=lO:Xs~|Jo_e"dJީy$+b(Z˨]BnB22'G^pQeэJhB˼uXhY-/:s'u'Ri~yAU_Չ.IJh4Q=)^I'Z:g`Z q= S c,sqQI ,1>oϲL 3\Zzm-l]fZ*$#ɤ[Շh:>KFhYbZ-Z&Bc2@{2OX-Raeb=H2N1ZgЖN-5F!Z\CBn BKUR|2[^X3Wol-eh=NeQ'>bBTl' IG!Zzy'$~j? -@]HO1J~guJ1YT`GiYև&y5 ujܨ1,϶@-˺N.ja@- j- ԲL jc tR*P׼r$G ~jYz@-*%ԗ!,2RԲ,:DjL =ʪU<|7,Ƕ{:+0Wʳr+íX4%BYZ\BqŌWRe!.m\ Op~B'BH; (brj+$JG|E\y}W(\@p$€+&R+ )_"+Χ\1J BfJ8VV0Z[ime]o$ފyQz+oo&{C=o(U{+LoR_+7Vf|^sq}HV[tV*Wlv,):9`+KZ%Mmc+Qke U kbX+wj/= NVol./4؊j+VQ[9Jsh+ X,bI}s+ 6,&ͭ")rr+$VA[iEFoRVzt VdV[aݲm#VlY& 2 fQp+[7[[9* ڊj+ͭP$b%܊ p+neqpV [,rf]ͭ<ڒ܊nEAnVv[aJ.V[H)p+ ɭ:meV_gj+ E[9O.܊OI6[V!bJ'b'BA_JQ&r+7OVb_:+B E·E8+NFpEi6]\aŕSq]kW% ew+f X "l+T+lq}ĕLqr&W WWObX@X.BBx%W4W^ As6WH[Z̕>se8;kܠ&+ +Js3+ޯ+7u+7z_+G&ݑPW,/u?va^ nٕ],E]!$uCYþ+WڄWZ\O+5+FւspWX8z]QwRώB]+D vWX#]dPܕ qW+ B؏H4PB:r+ax~WxyH@V+W -ݕ)pWsr5UL _ݕ: K+Tq X)rѹt3^q"Dr{C7R^yW4W4WF^Kp+R+gc,[-Bnq+aWZBL+&|+W4"WP R_+W<^9>W> f 2^ԕW=J׃(r=I[aꫡWg^9Z cNd޵2VW@nꮐ>S+++Th5B)ʩ"Bx@xX + Wt&WMg%QdU26k:\\V蕃I 1WJWXMi5++\yelYKjr-=[++Jt+>WȠڕWU^Q#^q6"Bx{^^qń^XzgJL^16'zezWzEUJz JlW$^BB,^92U^q"@my+'fԉ oCze,5Wl6rQW z}ɳ^顼rƻ M(++C%_q..)rChl/~ TN}_Q_WF@xW8'K,pW w9wWYs$J]wS+RQQE + bP@wvW]Wv 볐WWy%ys WD$W4W J^a}e9WWׄ^!\zyRWR:JjyWM++. =j+a+++yix+Š+y2WW7g0Ye(>+W y^u-(e*mD^2+&B`+2:+Cc+FuW>dWiDyEH6c+OiyLWȼW^50IԄ)J` kxR܆W3^p+W,^ixBۆWNyz%啃RKWX[^*srW{W^!3_ b)ʇDZsWV_cG+8[_U}aC#Wt!WR_e+g^jW%m+^Yp*%W}Lٲ !W,_agE꣕Wؕe +}s#|h~+qW|4pWTꮈ]®(24"0bJDؕc5aWV]<i޽–ʭϵB" m+!25ΑF[4ri#|+.IJQO u$ԕ>PWƞ9idc6hA+S+kt~+4pAWP]T]ium+ VW>TX!b J3 W^ٮ^2W.^TxJH]^wwC $ZxWͅWn ^>/ mwR-)ڰ+7#ؕj b=~+o~ kͮ\J¿ͮ.bʘgf ~++fˮ<I{]IvtvW]aiݕPfH]Aa +^ g+V΋4r<0+]a!62y8,QWR]Bku:jO K1XZ+9+VaWv4a'rX!6qWl8+9+Y>\t#\2Qs<5WHZ8 δr`ila٬#ʌ WЦ\WrH2s>\9\\!?ѕGGtEHFu^!tW踄W-5+l)Ҁ p ^ '鮌}+{r$J72D]/uv7dՕaԕ;WrZFŕ=-xWᵸB0V Z\q !-:\IrV+ +v$P/Ȯѕ/t98]FW v3{]EW:XJ!<BV-?tFW#+<+3X)&BtzЕte1ch抃6WjXB _+lJv^ȕrErBr?r"#r+;ʬA! ս+{-@€bLc~Mtӂ\1JM\\|$Wp%W+8rV\q `b?J%ĕA\q<+n3+Yjp 7aRJSW\y3Wed{+޾z+?ӟ[[([[\ g+4WZ\񉍹Bz%Z +QW\Q\\Y<~` 94W.6W&J#+" \qz 6W5W\aCɕ Hr%dފb{+ʞEx+YR[Ѣ[)[1,"@r+` ne, ? pE\pECpE Zpe|+{.-!ʞWXnqECqQ\:XA\i)+ɐoq2WWi(HY+R\iqa {e- 䊝jJ+@m KN+z+h9+7ʳ\ak&WFR HqE_qmWFB'I/reH&W$b W.'7>ɕ>Ò+ %WWtq%WFm\qhIcѕ"? M+w䊡f$ɕE\9t\QjrJ>++Yzosb8sx +>]5WƵ^\ 4W>MseqJ"? B&|+c5@t1{AhɕGA<,KcR\ŕqZqEOqZW]JmKqWnt:1o++᷸" {(9q+c[:pO+WpKoo\+؂+>WX[9@W } 9/ / 0s[)VlEMOl[ѐ[qj+„x`+߮ZbZqKOVZP V>J`+4򹓲*b^؊e$#Z+nZ+N^S>90Vr'B+"g+.пɕ}\JX EZN BBޣqhqŕ C\aŕCWȱQ\g\cnpApV,[Yx+hb2 VNW:[pW9qWb ٰ+>fW +(+n`-99+ X# [ XªPߦJ ފvz+J;Ax+۝VVERp$? \OZ>ͳ[pLpKIpu/9z5A!%npp#p0`w<}o2޵Y>S[~y$ F<|W#WHhpźWg6IqawW\ٰzWVY VW,Q\qK7V\akWEVV[q[;Z˜V[ [q+OpIW 2"<,(ppŜrBb|V!V%YoefxxB2 /o[ɅoeOx+O=,T魰ފ315P'[fn%d˽&h\Bpt31: 7HE?)M 7ԊcL6VZ Di 8SZ!V@f[Z@+7 R@OpV gΊثI=IVBpVɣEpVؖ YJYaCDf̊ O;+:+>tVYaRoxS%&@4guvVN6QY!pvVY947pVtVY]Ying̊b3+W!(12+q(+ZY ۟EY!Ve?B YwtYy?H6 +7o.w+@f,2<"+}ÀΩZ +}$lJ de@_Pb%]++hjͬ̊rΊ Qh?l ;.JF =ge9g;pV,J,. ẘ#1 fCdVZYrYq3tjfAYafVD=eVVYazaVc鬐N NЊ=ЊDJ[6@+v|/~!¦χЊԲЊ!bkB+mB+Ws-i `UYd-YynQ\ !B aZ g X.b$ڥb`WeAWY@YlUYѺAYϢPVVYt+n VJ-fBXXZcEAcE![ce.4 r%__C|D|By^YH]UUUPW~Wh*.諐Ғ g6bbkN (Y*U^_+P+@R+ZI?G#"or]K!R04jh' ٠-ņbXin̊Zy>LV!cj+VXom%KS`+th+ƂV ,j4Z[a6,b(HlZ[U[4w쉶2irV^&j+g t2[a-=K xͼgZVZZS+֊a|Z+Y6 mxj$2hEXh8ʘr?3 [5-MhC+̻@+ V, Y-oTZ!ɩ R+VİVz Q+OmҖVXQZy^H+"B+)Y\ﯳr J#@+\Gj@+$}ZzZ9VZZaZ᥵ڢ֊iZ+, j+젪r\IV[1DBZ[+ n VFZ5jKBފKbBȪSXfWd6VyhV ؊mb+nQ#I؊x)B V@[!GNkmWVЅZa0J~( aZI* 1ư "I’nS+ $qjlVt˅N Ԋ)R+ J}8Ҋ J+fb+hi% Bʊivʊ*+_֚YyY2gYZtVgˤG% 6ͬ4H'aVXof2+ˬX'/ ʻIYA0YY0+Ȭ1 !*+;22+kdVHYqaN~'⬐g2+}aVȮhfh¬Sft.ˬX$O:+>qV0uV0+;JfVXhfŔ~KdVPUfrYY(D*tV(Y/{fVܿXfJSͬ?0+lfBY PY+eVVXdhe.SeŬaゾ@YfHe嫧Bc++d*+WY(oejAeEAeEBeEfŬbV-ʓY  +Qfe XyW5VhAS+4VXjciwX- ]$a2XX1d%;^Xoc:\bXubK+T i<21VGcl.JX bs1õA1VjXіXvX u ĊJX!w qZ#ŬXw2ߘ*cMe=QUVƢP㫲xowȢke;ee{3nY^ dV,Y>f9Z3+ 춳RRg0$:+ɬ|Ya_rc}?Xjobڞ&VNB\B8Rkb ѹ&Vc% pbLp+}@XA8X\+b +vvu.2Jh)eZ:Pq`!VF#B,GB-I+#%ShVca +,jYncłRv0VX9\+nbDCcK]c^Jc?6d .J8WW#y^CUWU' Q'ysͫ0h^s^@-ҲVyU `rry$f u +v+`*W*{,UX$JK0+r2+*dVrଐʻYپ8+.;ꬰ7⬼|:ΊK:+EBr;+[}ZyxC+ȼ@+zfB+5YavVCVJ-I \ 98+g)qV\\mgEDhԜZSlhZiTCi]-s%AZq [j1 +M`8*VZaVj8'Z!WV\ okeB}S+$V\Zqt gR+nk#D}mR+R+@ZѺljgXjEvOjLjEJjN[k{ra8Z1J zZ+JX+d"YQ+V"0- 9~R++ jS+c%J%~Y}ZoX &M 5}Vr7h Kh=uVn?:T\X_Z! QC+& iЊB+-o8#Bʗ,2p+B;+ߔ lhmnFB+tgB+9BJyV,Z@hl ,G5̺nTZa`2|QRP\Tp^@+V k t#)&YRgqNffV`YY*fELfŪ7Y-̊2+60+d2+Y7/ Yxg EYa^Yyn,8+#[Ί[㬰;ꬠ$^HgEwNgMeuV4g*pVYvV(qY!v!mhgJ@+-ޥ8ΊJ {謌`=Ke[ iUK+ X;B"H \*CH+>V#PZWiVؕVCkZq]tΛZQhj;Tj݆V.mMP+R+>V r~6P+0VȐY Ԋ`ԊUR+4BȠ싸H+J+K- hK)QK+,?RZi$ҊE-PBvcK+c%ҊC}B"|H+c*xJV0ZZeJVAVRU "dVtV bԊR[+'֊!}7Vj]֊Q[+V2VI:=y4r'TP+61 K7M6Z᭵b) m,HJoVVHiktVڔ[q" kEWknkQ֊[q"2rۈ܊`+`+VDVf[q'Ҕ؊3V 5)R7\!)JV\Yhm , rڊڊ~Vk[acVV~[釆 qx+nފJSx+dBrg{+4ފP@y+z+Kͭhl7BtVA7\UnŘ^s+p+}í^ӈ+ۊ+c+d5r=霚\9 \pəJ7+8HpJvs]Bz+l[v)$>B؊ahb+#/JW~%X; ;5BQfc+EVv|h0Ǣj+ͭɭW"VBZ]k+<_[[1"Φ `o"Ak+J4h+}V)-Jr7Zi&ԊVgS+R+NޚZa"B]iEGiiePY珴2ni޺*M?yAZyZZaZ0J+NVo8F,spncKK???F2__: ݿ2-c 㵙pRLت"i Wx -o5LXR!Jv}cGijO#-ȹb5ʝhN"FX.*<[G*WO-s-J6)o#;"Jw 2_eBo3c[ieqBw j-s٩mVe1[*009ZZH-cQ RA|ϊ2¯TPyij 2.-#hٞG*<@qgEhht-(.3H?B(R1qޘ-o>L(BҀZ{#5QZcTHy 2UZ4@X;CK2ZSZoȖ "B_"wNDhx&B'r>k2CTLy,m\*(0>2PZ=S#HX; VHx/AZƂ#Uw@ZTTy|V(-˧Gi?l2^2P띣2Eqy2-#rDi]kl(-?Lh&L˸پ?LP2iSrZƢ8, + 2 *[92]y(- TXy__UX aZƯ<2z+]i\ܹ%`Zf@5F]e/K'J6UiM1-֥c ǥ4=*Ѳ>h.R]4F|^5eU$FV)]iWeevnj 2=z,E""iKi-Q eGh?2Z9Z{| (ZY* <˸#Pg2:2x;U"!AZJ#! -fe킴4rvh1AZFS2'lK!-tJHs)o{\RB1+!J KiW`O_9PHKw -#Xg^:`HKi'捜~ 22AZ_+ҲxU/򧑖~,e|l˖F'2iU>蟣j?+EsjGpY> %R˸E4R|dŸq}b$R+PKҁZƍ RPs 2uщw)e\w^&݋wNx܅`9aZFt9bQqVri#i%ui}!,_8RxxħR'فSR8){Iedv 2>)_8T@I-H-c&Rx厏2\ėRz@jGާԲ#!~^;@jY2s)e;7Z:eL>jcp@-=j]@-cd"PKOZFP\ke\lGZz 2'+P7Bj_NܑNx7ZJ9-nx23 jfYT>egT2+rIq˔e<2Si`8Nxn?Nxi 8->wBMrZ|f´t`eN´e9/aZpf9qZƤ!i7tbZ$9Wz1-`)ce8-s4X;N,M1qmte7㴌h K9-K,~bZ: ҝ1L\ I,Ŵt/PtetibZflګQӲ%2|eŏ,@1 PKGZ!'_HZ9ұQT28Z:ti@-2*KY~&Px; ˄Ze.XdmcGji5RKOZFP-72n6T' $TVjCسzBUgP-Zz&ԲH-0aSYY_0RKԐZ~DjH-=FjẸ,{gn#te<5rZ&P j}ʑ(ose%%)8-.$Leia4E}NuZ>OEN˘:Le7iw'')LqZe>`sqlo̱GO*8-399(-2fkJiWqН(-n(q@YM4i1PL˜׿j'O(-^2-#$RV0-eZƽ0-#5_,ŴΒiO:PJh#o\JҲ񧕖卣8xSiSH.WSN}3Ye\G jqqU:!P7q`.)/O@-9qRNxLUZ:NHר^ItZ˴$LK:-MtZ\i1ܡ2`:-V͵4(12{e?r&{2*s8iÙϏ2nO8-W-#7@ j1F5B-7EiH-qZFoi1 eܩw^QLriwBlF# iqʛiٞ*Lq31eƠ@1-aZ| JhO#[PZ5%J˜ց; !HqRQ{ؖ4M#bYHE 2Ư!zBlY;y -㌇b>H4@rDZLiq|#b`LH{ϐ,izi"-W4ZoSŤ#'4ZFוb Le㸔2zLK? ´?i,鴸Ӳ]7HS uZDeyL_,+eT2z8-ḟA}xy]A RL#9SdZ\iq eibZF 20-㥹L´lie6Wdșu PZ&)e ´:aZ۟ey.´lweZ\;iӣ:qZ%봘='wZƥu9>rk[/RqPԲ``g?,@h13OeP+@;9Ə2:4$gHly~g1,2m³P!2NP)eK,ϯ2Ϗ2ʊbWe,=h1Ge УhqMu0qhHH(@ -͛ҟG-ʉE-. t/ 2~f-ˍe;w-rFM;BP9]~!ev"'$U~! |Z2Lt-chrqZzв&B˜9eZhf]-=h1Mż<1e>x1Zl J-iu[S_pOR=LK%ZƉ yeC.kh1Ne ԁԟO*Z:ZRƙtN},-s#,E9{e|zBxyb?6:KRY5k5:˸^X, 䂳+b%8 Q,vKg1YeVegxyMiY,?g&_%8˵,gO/t gʣw\e&gQʑ#>2Lg*vgG|,sg$>,yI,xg1XeM|s'MZVygY:Ć2:0*{*SZZ bɹ@˜w Bڪ>K,.W̌:P:T,sY gT,ֻteYz8XtrjEt壾 L2~RNIz6<%',.˳t8e\,f1źC}{eO—%>,}/ .~DxóŴ,}Ke4?|92j#gq%yYkó{:xqrU*,"2>7"΁7 _:-fJ HS8eO>KYfJ܉K?Yz, \y >KY Y,g>|.,=bg!>KYNg'k/(,bM%DKOlB-Z:bB8Uu"!UDV̧[9o܊%"!LbM/᭭BtFmE le;u2> ֊UWX+Sf5rNu–p+F[^hnYnAVajn.n"r+7r+;[­tj1ʜl[* wneV68ɭHGɭL6@/ yC\VC\1Kteri'<'te -6]YЕʋrh0<\5W\!W. F@>'aՉ+-.i:p$Br*1oCȕc\iLIryrI q&WXmr%&W^*"$W\y$W86& VxWm+\AE\a: ,ʄ@KVfW5SQ,~tts+5O[ymp+[=OVF,EJF[񗗶HCLm\E[V[aCmeXW}rV,nmEmX +Vᭌ̚:nH­μ3V[Y!J3m+6, Wx+Xmb ̾+_ĕ("t?ʬRxՈte+++]qx4AWbKtE-Ktw|ALl_t庐'@WD\!1p'-zpݑ*WH3Wf&G&~>b*W*+εW\"lE*)8yG\q 'JW lGa‡.1W$0W+D%WK8W\Hɕo#M5B,rRn,-!Qu+T-+Xr!W`rRWH6hqŀJ+KWq+[@\*WD WLmqQ%ĕWR z+vx+c"JV[aIV\1x2oD\1mUqZ\ajmqeKԿ \flpj-W$Bbu0*⭴H5D‚xVNx[킷b o nE䵹(>[yw~8ʍtBŌ 8[y74_AZlO4+'ɴŕOq/⊈1Jw+P͈+a-$;lxjpGpE_ p&2* 2z88+ܭv+֪*< Tp0+p+ 2+Wh V\XU[T[q-[VVMV+}!ʘ滀/"2 Y[6[yW؊J^V4VF*l%t! 2.zP+jP+Zje<|V+ mlE_lŰ؊11H[s[[1Gma4h4 P 5 C+B fb+ɖV\!mlp^H`+%lqk>2ENI٣ց3KWjâR+*R+VӒZZ3jeɁH+߄"'VKVfĦ~eV4heA{V΍Z9ntgBH;+AgYq"gel8Y?|8+fe\)`VDdV5:dV 4LY^VJY Gn© ɬ0SYل/̀o^iz2Tnd0 hz,Ȋ?WAVYF{K|Cg+c~+^X=V}Md3DVAY)ϢF4r|}P|#ìXR)j3+_0+?HYQ&YNWg:EpV̊ĥ̊R0+cmYB¬9|3+ìhY+_&J 6\G4V"H׈2(0V0VFy2+oEX!TcedTb4Ȋ/ʸ%8Rzj!7-$IeW#JO^EV!'*DV'Yɼ-'id g"+ Ȋ x|@VX2mdRDV>AVz/&~On +EVZ]YZgOgYi eZYa ݲJY!|@VYdce<+1Vhv Xh5+IawD+Ic#*"+/dI+]c9++mkX 6V 5VXYq)gYid%rg5]cE:&6Xfg^Þԑ6"S8yI +WABV>"JBC`EUa\`a U+wGXQXWNvc޴J5VX+?J@4VGk'+l6\JQVz\;(+#q,MG[}0+w0+`VYbd/ЊZ~6ƒ^hş96YwYiK'Ja:)f0b ge@2 P:+`(⬴r2YyKmU:2z@94AMI&x皲*XKQşɡBSQUدAQeETv*"#,*ƵU""օbz*Ί0UTqK6UQE*Ef"x=AT ʢt4Pe52'PG@UQ S!ݜ [ٜ ۨ-TF/8Y( {gtPO%8TBLzI( T'Rqb \Z*]}¶mRIaʍ|ٖ p[*; <1 01JImlZ*RZK&R9oKC'P*/Rqe)RR*bZ*њRJIT/_.A !xH=TRaJHL%3ZRPR韍RgrbJLrT+A,ST:ʸl>+23񂴔楧"RJ| S@U_PUHaTٞ@~;DTٷtLbԦ bQLLTfmSkb(jצ*v*5U(mS] T1F& *}$߭Mg*w&URעJHTiR қTaO&UrTk-*!QTqb1"%41I9>&!RefwTqUReDAPTy>$JuADTzW JK=z*<UnȪU:,mPeN*T!^2&UfQ9UڒWTyuX$-ӏQe?r9SUƥt䍯sgQ@REMRE˵IMIYBL_,KHCR*Wkn S%{L^97*h\H*֨6 ȇsQ+Ѭ } V)** Uaw6UFe*[0UPU1P[)BcᏦ\H5UGhoS a*h#* w*2 oI!U%U9I5b %J']@XpפE.I*-|r/b/r}{zP\l"HUe9U*)hTEWU(J'ЩXrR㩪Bw[_I{cүɪX*H(bJ)_,YiU% *͉xHXV,ʪU9, \wjWR\^9 qU#媌 \(qUF7EI(*{ݟWe5XECa1xWULV3U .'XU#o*}<G* 2?WlP# FX?* k-d+T`*Q~h3*LV Z=U o[E [,mJl3XkIXstIe*nP.bIJêؓVQXD֭ZTɸEkoU>;>DH!UX,bʝUzGFDET(PʦB慢-CcPᨨbFU*>D|͝ +ʨrx**]dsAQjDcTq{_EDTURVRe[V"\ӑR8DͩWmO =z* Kq*! bup* zRi|r$AU49zTx(詰J#z*P @~0 PeLr%Z b_PQ*=*dTQWkPB@TQikPyTN@F t*ėPL{AiQ.U(P|֢ ;Y*tm\D@P==UTK 0jP<, M*xATLUSkIOk*OcͨxUDSgmL]0SYp0kOTn/0[EL'x0Yb*XSASRTNNV`*N̑`*v`*,NHgd>=LRI)KEIJŊ)WJGJp% TشIeURQmIűCIJ|ԖTU@ILI%TJIa%#P(b Z[UrmH*lђ:4  ?JHe\%ITIT3~s*2 T9+ɿr* NS&Nhbs*]p*8~9/8Nj*ۋMT4aJSnwHjt n, C4JJa )}J*TՒJ*\H*m!SUI/5$-$w]hIŪT%=L$ P#{ ''eB*RRavT hX5 X㉤ƃJSIP*ͽ*"L;%1I+[RQ2lȐT|8%nP*JEkITR2QRRRYjGRfQ3ʭxIe1DRq'A%MR@PJA(W8?BSYI/GRqE:(JTܻHMe?ت4H)6iJRy7R&\%b*`*.=5ҟb=,4:b*SSg}KEKT+Ta$]UX*PRwǏ"yUP*ҭ ~R ?S|8VH'Rq}?K9ʘ~{l "ߔ\yQ*.IRqU)R鰬2 P*R*NT4BJe'ҁl>ږ zmڪұTo¨^NE[NtSY<@US$N©Xs*rp*=S門SSq)T6Sa?TF9AB9r*ב'=/ҟ9vSa59Ǐp*l^tɝSu}eU+BdPQoDEӈfJ_Lj*G% r*ԋr {*}]*t@̳NP/* V*U\lP6LP%b9UPzA:Uس@PDTTxj,JLTq6Uƴ$ZIf$UOCP2֤C}Iq4IO$Ub%Ut%U$U$4U\4lSGMN,1U{VMzRMT1^m,Tٲ!Jǿ0Uf^ Q3M}MlMTqSMMiM=0M}CL/ʅbu[*)hR0U>N=HTTAmS% mf*B'*b#{k-mTUQU &*&at*isJz'hRTe)eOoc 5mSw TUqfجJ*wқtvr*굫PbQ6J$SWaNl_yMFEL?qwWa U*nݷ* %Mp "%u*HtQZ]ec_>tU \e#TʼnU r>r+W U;?Nx* WqQpdIcTo[e{[ťU4mVQ*Z쮹*]bꎸω|(pWyHW+zOpMpBP%۴WEQl2!q'tժ۹W*UnM lm02 3h ʜ5w' J_K*w)"ضJ7UVOHeqIy <[VaMZZefXԁ`d N b⺰ m*S OFmoBׅ *wk+q1ULWa Bt^U^^5G:4GZ]u0UzTuwWt'&¾*cn*}*-`~ {-'D[vlѼ10U|nj$iEE!N|ȯdo k*l{R`UVqɦUԴ*#! RZJ*i.+i_U -Ztoi7S}\U~ 0VX7mVa7a"/Xy/St2*c7N%Xewt,XY*W'Ci ^*+J2smVm[EeA[}UƸ}UV؈'`e,[e<`qb*xn?JٵUm[BV5*кU* ¦8X*y*AXMbbU*nN*\*n*m:}塯~F < $VZXiMA`$ވ`E _ W9Ҿ Z*wbAMJl\+5AX[ ԁ"V,Y bߟbeDA[`++Bt++UVk[e et+@VZY YkQK`Vn1 fe9geJtVM,EtV4,qV+YFÉR+Ҋ >vZaZage@pVFQć/, ΊΊ!:+UCV lSZaEZN"Xg*PK+'ҊXJV&VD"tiZZ!kVb*4ၴ:<ҊMw뭴bҊy5OPZYxP+KM @4 6¼Ok1֊ofV,U[yuV,hKCH kBOgh+X[Q[q&~ʕVZ ͭ`ʭr+wp+VɭH{ɭn O D[@PP+R+MEZ">DZe欑VʔV"T={VlH+ci3Ҋn%B"Prg VVRЊ($m7 ЊzCK+; i <(J=P+|;J+J+VHqolE~[[aw[#[9X|il%bkdgH+f+9"XP+tdR+>sV[Q6j+ˑh+V3meZVP̯bbck+Uh+8"Q'b+4b$ l`V[1 le8 [Y(]acmE+rW+4+u+] һ6 !r+T( tJ,b E+3_ɕZVK#\1CM\a,2WzQqŐ JM%{6. (p+e+݁)PpE:Zp>AEW]^<W1ofd7sQpEUpWWصCpVz~bhHz+Ux+n JBFn[ VFQn>nEne,ŭU{+VzoS[ybAoUx+#vmފ Qx+JvOnkq]V\SZZp֊AxvX+#k!ޏAkܶVt1VZKk[[KGJ?ŤV^=MIB'd'"} 2ekF\lYԊțԊ4Ԋ% ¤C>.???F&w_2{k.N>>g.pqZQ~GAiGPrZWe'=(E1-jQL> Ӳ͑9GUUWiMsϺ.3.;-K)-g>Rf\(eE`J̟}TYFT`|L랍1e}7VkE+3$; W>> /e~OZ{Eim³">xȝ ,⯾˳9,H YsrϲcKzYQ]>oHRd|}QggqTv?gGY۵,-"2 زU/̓8Yt?,TS:> /e&F,x2Oq_gG%hNh,nCS<> ?C1>ON_x}(gG_%bg|<>zUgG[?=< ųeų)TdB,"U>+C^x}oQ_³ՈʧnAׁ9̘2>ty(e_ՑG4{[~wtɧMKg'4G,ҍdR:K5X}YQi>f,)e~'ʧ!:x-Gqq-2 Y~Qe>a|cvj,(c…g]7Y>W5e6WZyGI hY-;Oeޠwn!Zg+2[!}׼;E+e~vtZfN+e6gZVB|kZku0[f`yhY: -w^HK= sdfWyVS7usRㅸ-G*eo6)N ӲIy?7B1-{V+2qQiG-'Gj0-H]'aZjiRL>߀*{5v52GX)izN˼.0n&l !N˼٪SV?DBi?쳌φQL|^䨓tqZ@'̮"zF9-ȏ2&H޳}\΢Zo[ыjYď%ǤZf[ģ^\FϡZ-ϑ{Tkc#1v*+eVey ȑ+Uى㹼DZۡ}@CV{+2oU9"]R>B%TH-k»|")oTLC[~ZYC}+/ߺdG Un7e+ ̛f 2 WFyC_*e^au k-5#Qn*=Ka-s}"ފkY/Zf Ve kCv)XaWA+G5!PVK}:pUo E,ьP-=#h2;N`|S(.ҕjgo NeySmR-2Zf9W;2Gl5(e~r8Zf5)e^iZ,X(eva7 k̿ GQXxkX R^khg9fTV[l_Z U /OE/Px-v?!Zuk p\{7W} ew@ڡ9YI;ֲ_v(e k˞פ% NkR[Z:[Z*ֲt?Z*e^AsJkY^i-K'ZmsU3?h-s\́BSZ>:\֥KXZK^]/lboO}u`&/e8rpY-WKղb&Ffy*ּ}R j7лZ-e j#-׿r˜T<}<0)4:P-˽:p.D_0ZfhN^w2A7jYwbˢ5Z^9}kWf?*evm$JkYZfQշb}ݔ2QSMd(^K w2H#kY&Z8Qy-K2^/e^O~CGkgZ批kZOܗ␣"-3G 2}w{z-s֡fP^KobykYZ5/y ޡX>H]sTD%ersqZ[3\K]Z8WEE `B̫ڲe%$RzEjOZag90m%eyDj͗}L+) R˼b2OJjC~Le4Rrb:6EA-sj+}fEm΄[9-󓢈e8( PKGuZW!O%j([ *r9sd#nɠ[1-K忟"´sMaQe.PVCݢ2x%߈rV(=N˼2>,5Wwy V}sǣaZfsƬ\U ?´8LKswWfO* 0-o_TŹ5re k5*n0-s ;n 2n]9-Xi g9-]WLY(-|G+)(-AZjXW^U7'B/'2;BRk->9LW9? \FK%;Z1|I-ek2^h򡲠gEh˝xgyRq -h?*F-l21Heaޜ]ⳬ_xOSK/0A=-p|,˝Z*-?9ڷ- ho^ e6+ h<+u^Y\?8-?F`܂[ń{#e)Qd232≭E-3hyn/3pyGA6>s栺PIJ @o4қ_hyOh @K_!oŕ/>DYGô#,#[0ceRoe-ϭ"?cl7H?TmY,[lc>gvk3-L52*0-󸙖J LV/iHn|;K|?PAZHKp6RHs.T6ydӝFZgh5 />7SKYhe -ύGtFZ~mmA߮ fAZz/p>VFZWheRr@Z6˕S-/Fk29D-N?jm=.Ş|z|-ϲ`S2ˍ7gpv VZ ?,JOr=>)Ziy{sQ=ζJu7,?+{ے=}`Z%yle|fZ|fZs0'y;L$;hiyy/IiZ|DiyƊeʹ-nZ -H[-݁$ԟqZ&Nsʲ'Wʹt!K?,QZ %QoeN Z+(u#G-[%;bi hy 1Zmtc?&].gWJ2-c'}` Fr%2*C !ZA.O-M 22-IW'F>hyNla"s v,!Z _<Z83 |(P'*Bk]eTZfkZz¥˯#< 0;=h\,<@3X76Ž,95L@sX~h1e# Z&e !Z^7-~%W|CL<4Bw"J DԒ@̸5Fhy(Il#<3ZȖ$ ׽TDOf/DKaɡ_P%-Lh @< -^2b-ύ*߼g tkIa]>=#$|LI?hy-6LL@3Y=S{E@k!BKY@5 2,SׇZgy3ϯg>Ο8<Ԏtm;WL>so$}DrɈhy9 lY `Z+!CV-ъMn(!Y:}>Lɣ"Y6t) Bgy-FL :ˌ5Y^Y^Z:)nyNOK]">kC%+Y^crp'h 2uU,eYc<|=ۂ8F]-6@ϳbL*=6dNal*,flCYI,cnl|&6k:6kZfy͝bTmbRYe+e ."AeR+)+KVΫ r XVN`Jĕ@X!n6ʶ%Obe\ c}^bEeeOcibF=TĿV,Z9|ЊB+s(VLlZNp) QVV*AFZ!7 i"#i͡V4V\+LJVÄYԛGΊ:+Ty9+TBI} g>ЊB+d J]AT^wV]h%kVuZ{A+w<@+fs bŃ4 6̊ykofesr(rQzN+\tV{Y]wΔ:+f:d< VׁVFpZ1^h@++ B++3tf#uVX9( +X+ "$bmʜ!VXB-tKXaf +" +JG +V)XOr~9+2 +BXqFu1ʘ+Я?+ wXbŌ~-AceפX,Ns6V]c\c`2̊0+}aVRңrn)D)6Y Y!fmʙ~/fَéBcaV Jv_GYD AVdDVdE`@V\VYqrVYɞ +gY!jDVd r< T*مaVHaRV43V^ T*8QX #f X`e;3aXIWW9  o`~+}{+2+.V1V9`1V`lV@XwM`Ū0׫++g.1 `JvJ_XWYKUUұHxhxUUWNxLW|U*;UEUVzxuUU*U,WP"(ro) FX)s")דX (,7#$V;%V,mX9m V>B=+Z$VRJjOVVҥ`}XaEa,,Xٟ`e*%yK`ԕVBXe`eE\XB`ŪeX.E`) 1VGXQaŊ1 VvV>ae<k7SaE$RaVq;JI>NqIqu #H)̵";0Šʝ`e+VyN>}VBX{+G¬G`%E{*V*뫐6*S*U"$rR"zN|1XU*YZc^glHV2VqA,bha*FU0V<[${*;y*VUijjlom:}Uޏ$^;[rJV0.UOEW/**L^e9*ë5cx/6*gU0)UZũbi-'mUk]EO]η)]ő_\e9*dЊ$"&r`p !\i*;DqaPqD%VJV#*3[(UK`|VKW4Aq Ry,-`gR9? : 8K^JrIKy*DZtUHZWlKi\OUSWeUqaBWqUحWy)~\3J3`sM BJV  kr0J3*: *H*Ԙr UoUaJ,V 6!b* *bU$UF݈B$D[{ 'c#VjZU =iV9 K{*LU2ZXVR&R UՠU4d2WK;Ip}Q\e=UUR[U,CVq/"""Y)WW*Zk*]*^`trFWQQWqY"B/U\ҫfU^ǂsttz*XU%˫,!U${Uؔ^ ëW!%h|q&}+}+hn͒@`e*B'Vh).9=p |Vsi"'+d "l*BB+#)ʐXaNah y#ay(BV X7PXBX#Սhr&B+ +N++Ox*WXdyV܋TXa +zD+ԑ+VcP6Ɗ1Vzw++މ5VHcŨ+kF^]+$uPkweVHHK |أZh5" 8+ dSfsg:Ί ΊS,MgŎ:+A\:+do 2V JM8Z QWϙWu>?Ǽ:KdVT +ו!G@V蹡R_'` 7QVlbu(+,TY1EFen*+DGYaARVY9tW oIDY9IAYCbʊ*+Z*+pʣ2*ʊTV,PYAʊUV6UVeGN{!3HŊQY%Qa3UVTYS qVfgf79)qVF"Ya5AΊctxaV#f +Wd:s[F{v4V^XMĊ٦*+6SYYPV dGYje.NxA,%v+b+ԓy+7El + 7 T(bʕnl)8W[*/ %+VrGX&ŠŠ#Š=>V#X(b 0FXa~*KJesVl .b?EZ*V`RAXB|T Bk}qV_Ū}U, W*8:@TydQ2ʩ5W#_$Y_*W!VV+0uCI:5 `}sWa|T xV_(*d5˫1 2jOvGX9alV`vS`{ ^+޴VX 4)G5h VWQ_Eh_śeV/rz$45ʕ&*sa*U9LUAUfUTBU3UeªCdUʪxUI*먪X‹R56M ը4NVGYkAU!'tX eϕQQYŒ:e]et ")24 +ma$V|3\;Xp VYe :dAU4ʋtw;*-BǚUOb%*i9:*gʱ XmB**^*4RVqI JO³@ooR+U$VY@IU(tXeJVdJUDFVIXEUXe0d*J**#UbzbOsaX{XA* * v*ץ2v̔*rC8*ԮJ .^?JQUUU: 薪$?ʊr,J=W*cgTUU@UPsTWUgQUߪʪ}bq]}%]YBGWeUY?.qU"˪`XUaaXc&|QV9^y |*1SrBVWeUUqNVW E/VUU Uq 2 QUMvTq*΁TU`WBC@1ŗTmH쏌2?-J TaUh=鵋*E&BA΀*..U P4AP#1JN==5;8詬x*#Si T1J¼-THNrTT(dN8a-TDwT,SQS9N@J&'a@Å*53#Xr98*FU=AG}A*8*DQE@QsMοT99 b\RN%#GS`"S'TN@9SqS|(qDT ȩX?B Je^ASY[r @)Sa ݦ=Qp*?N`%EVNuT O x#0GEO%9m<o_0SaCw<7=-=Sx*x*|GKOTSQSq$S1'Ba p*Pz*cUx* |vʐ5x*1=_S2 NéܨRp*/N<'uVu8Dr*T ɩS*r*br*4O% zcTTPb*H Mǿ0{1TtT+a 0c1[! ̹̉RTIzTԇTT S$LEL{@؀ RSMVS12 ģ,wX9[9919$"v%Bp*&r*΍TV8t éP6"B9<Cz*IOŨJoL%?& lZ*+Q-^ -O:5 uiLBTza*.KϭS錩bNj*pR4v4.Tl# bB`* rRK$0l MRK'BqZ*Ya\PJDž:e<IL%"Bg$)b=H*ՍBJ/%`בT,IJT(HVRH*4TRaZTOb1 E|J*=gʹcЯRnU=qTQmw[QHr#**bn%F0*őQj$zkXpTB $u:*4H !ST@H@**8B*E}B*VThɧbαT6T@S<ɪB@%KCdJ;EZ*Rag,@ݱT>)%X*~kkTKEUK$ȱTn~,jpKŲ$-`eb#V-/K%cغQKEGJCJP*hɆF`*IS?@( )80 `*vya*T6+ro;San0TTb{w,c܍béCO.épTmB^SI⮜? Sy)Weop*r*4NɝTHeBpEOnt8x9 S!&=JZ*%'X*\Z*R&rXBK%`.TT$TДS!p< yUUB7Uz +0U쯿DOTTgR:I5Jֵ7xIA8ATUEIUĨ*wP/#w-PFUUTe UV}Pvaq 7ID :庒*}ɡTR=!Ut$U\HXO*!b'?$ʱ$0< *$jJ3>qDK* &*F*VU Pޢ? 1Th8 9+Th@4 $=gd[h<o*z*+z*VasˌFO%r*pS!gg<vHSٹ+ROx*8Bx*TT ޜJT<Tz(5'rP)bѴB rR$r쉛D!Rq0RѼRW0(y') ȩwE%DJ*H̼("R*tRsJ:B)cp1*F|(FH*SbtKIECWI{ -ȆR.RGJ囄TZZIT;ʽT> JA8z$ʽ%b>ᯃY^K^X*]7T,)֥TGBlP*ƷTPÇR!n( BԡTT|S*0)DQCT#TCTN(NR*TN(u R*Ð@X},r@I P*׮Ҕʼ =8R͖RaeB}H*J*T(=F1F4⠫8*4T$RI@*T †@*T.ƁT }%yT31 !yBX*Tv -Z*tzKT=T(SyB1Yh슩 YԪ5SSh*c3dv4Rj*h*uTR%3(??H\`*#G MEZMŅzSQSq&0oLO _bML}1L%Z*ߴTjhJŜTH<Je(9%T:)U,J`P*(: QJ:p)Ol)o/'2 TV$ >RqR!z 'B*'m!OHe #!4qT8CaTAS¨VQqJF,qTHG%8* 2R<HʵrRQ9#騜8*ׁ"rBQ!2Q)MI吉BRuH*,YTLWRQRR; CP*'JT5H]RYi>#J UBR,RKЍJeUT1RYW7ag9 8WL45h*;j*Mj*n\Bh*T8ߜRp*,TvTȩDS!x4R^ 1,5U75ף\ɋ\K5j*eMj*Z8`*IKū J'BR\)1q}-T$۱TR5 CLR!nr{ 8)\ #8A4ʡ= IEOIroVR-T֔RK DR*,=FM[\KDF-KT Kel -j.T/K2%-1'5 5)Z*wR֥T_HLR*&J`"bg)|T$ SLeNILb*l r(r;ʦyMe~46GS/B|4TT4wCS>b4u5 Tbʐ0W)bաx;x*Tɩ1Th4&baʩp*X"nS_KZ,-TSa0 =SL%gc$<{%rRAuQyQz= (*Pu!. QQQ2CDE;Ce % *:%TNG5Tӏr'`5?-`YqkTpP9Q8S%ͧăO!q S)ROErSh-bzӃ7$ xvO7<S"SDFODBO)i~1|SS>wnr)v=%y*G2GBQ^ʜk*P9h= I**~PPN(3X *Q!>gkQ;ʹ`Q *D9 *~ B݉ LLDJ<]BF8*ΐTPRA2H@*gEc [ׅTH $Ge㣽GŢ-8qT2~a਌b[DItTjuT2}ߎJ:*E2*0èQOC$Jmy{R{?qTG6^HZWHTlT UR{Jm{U0泌RWW-$1FT0TH`IBn%:+X^H**P-zBC* HV[ Y8m@*w&!]HTRR+ 9 eG6ob_!rRp{  T6 = 0R8*Y2N P:*劣GŒ4!\TO+!= SRq.RH%QETQ)-"L "#BD}ATBQd**i9ʪ!]TB% c$T*0KaBɍJCP?PGゆ )*Vʘ*r*TɨrHQ~9* :*U_GCH"*$ C}VCePao *JZkcSSS^=ȅz FOqƉaO4NB) );Z[2v jN9xTOєO3<)B)킜&)#!ShSGNR:SSSINn:Cz `/=z -FO_)Vꧬ")S֌))SlGɧYB[S+>&r/>|ʽReE) {)t?E7Y?AW?~ Ə<}S"S ȧ8k>eM*c@'Js0TN+gus TCMq@U2GP2yWP&2Ž sV'APQΓPq+#8 ?#e> ⧐7~ʹrb>,~ -0OqOY=)i2|Z ZX>=QL)aSp.L@B=Yz ?V`))5<(#xʰ1B9 OIX;̞S|N!rm).v .JC)$J#;P;p1vLٰSDNoN`N<%t EC:s)'3ډBcSnF^{3'_|&Ӓ| I㧐SSOqO$O!wTxP1TBkW=J::GNۡ@{Ó\̨TNOunT5c~St S?eCOqNr^~JOt# Mq)OFz գ(WرC=ՄzE AO=(z hFOq^=EgnAO)P*.7T%Q  *|Bѯln[թRPSAeE):G_5ҸZuXq?W.|_|OπWjly+&iCͯï,_ҒWj)I$1ڠu@e R<6D#ɵ$jII8&[`+M`r׭aS:0PDgܔ&9z^W5I^ /W+-yA+y^Xqu}e$JmF_&Wj+'Wj!=~uu2P_ 3t6L4NB_yvt0x+Fj'\ {^ԡJ2~v?yQ#OB'6s]a'2⯬+.6RA~+m /J R[j௬/p ϩ `@X-R1nz ,u !YY%GJ8 RcK+{ا J0ÈR'W>Lqc3D_]]PW `sGzQ,J%˺RRE7<:#tHy[_:/r w6_<$ J%|BtDy>W+u4J 1hZ^uk䕚g R'y+u_ 23SZTiw;6Kg݈2S/ܕ]y&>WVZmh#*zATJe:za`B]! RW*HJ LuW\ ԋl^2Js?Qr.A蕪gizƹyF_5&JR}qIim/LD}fPJ)z>epD_>P+lW*FZ>AS_Bh+kEu±4W+Dci{e0^YnV>+5Z͉R%i{~*`,qzŰ%sGkZ^ʬWj!I^]WfxԤG1u׬ۡW*Q#JT镧$lyٽD^#D蕚aU\S\W8&/jye+C^ym"< (-by4RW+'0+5_yoTWf{O=G=-W^gN앚y;;+yB^&JH+mL{=aP (J K WU+$J{Ry$Wf}qy* ^|e?+[-P RF}ՃT^+i_~e ïs'*BRu.+KO :8Wj=S%J/~S<5_W*ï0K/J̄_q~rlgR%W9<34WVW2XfU R?'lK,yO,5ȜyXS7K]# ``Y>XjtERdDO_ JFRӴ'J2ʄoXe骚Xf_e6 X֍}JC/F(2!Xjm}`K p Rivf|X*{T)N,5'Gja'd6K]F9\X&Rk~,'',!Xj?'z D`*^@ ̳&X*zvɑ*,ԋ?3 ²G`yXj;jϊ`%J,R# g V' 2 Uw噅W6X͛42g'DoVA,gL RK-ԻV_y֑yoB%'N_qF~&WïԠיL+uvR{+B9t~_W5RN_3>K+bqD_YF|%ןJ rR<}Κwn}O+u R<סRoWj!J]ƁRw;WjYbl S |tW*H!u R1?Rܕs䍻R1Iܕ4R F!g]IMl 5aW.}ve͑~j={b+uc#AWAbTR81W:.bVsDaPc \TRȕ4,}O\yt,!WjFɕZwEiuY<8H+GLindd+n$R#T"Cơ+#_JeZDR7lOm߸+*+sW;G+_xWܹV^!+Ix{WjP G{V?J}Wsn?JqL+p+\yaxN+e}e]_W~c*J]h+n7?z#+k_F_|ŔMi+u E R!}-xp9<~ж{en+4W z^Y{]'ʑ}㈴2+Xtn߸+Lhww^G*$U]q/R'ZT+6® vNfWv슩I+5%vI cv5 ]y޹ighlU :+k) RK+3P@7ʳʆd-Rg+ʝ z.`'WZUm#RɞL@x31(B&O^(ʳ ?oiueu/ՕmyN'o4R]yR؛k#JsɷR=F]yO®Bv 9aW~Uؕ'$O~_|Ցp]![uFŜ3QW 櫮WL^1xe7Wfb] Œ4#VW_y_yVlyNYVڃ?ef+5MetQiNNѠ,]iUCNΜ,2%X,,`wN,k >⩜BDX֎ b^M6",aAW:g'V$ lX",f(ycbqX\°L ua}N0,+ Z\AXLaZR8-ɕ?bVdM⯘2i+3J¯-bJ9¯ԩ%ʓ~e__wn~LB j]?ʺ$W^6(~5OCMl{w++3ïZ'+RQ5#5s_g$+ÉeM,(2W\⯸⮝z>Qh)ɖ?1XdjS $[{ a\ax&#lZe *d .M#L e{)K>B RCgʟG,,OdK !Xjʒg2GU`y?Ld1X,raXhR['KMh)S0$1XnL-L.~J(Oß~#)k L2@Ы]>ei{@+٠S>_g씺FLL8.J씭s^v9);e()5)%)+I)ft_O9@ ʣdaO祧,@ )-)O5*;zTV`WP J@JP9:.bp*kh'?~E6PPOVgbPiCzx"~AY*&T?O+SLB>L7MWJiA@OH7)Ώ=e.,/獞R$92)nzZ)fj+$M){"wTN% WoX 9e[WN9/9er5v)۟9ri#̰2zKS4SH%0T P1,'L-H㧼ԕ)*3j2Tn1g* $*!T `ܾI -* . ',ʁl@w_ s'rOBԗ~cX7R~ )̴?Ť(3?QOso?eDH?S,K)ٓ~CJ )9K^+Bx|3?l(v*u3'X rcPqX@Ú~EV?pe*&T@Ś[/R:Pr? *u*TU+R-I @Ǜ#D'8R*3Py.'T)*йm{ *T;ނJ*OR1\TH*u?J=A`ԄPy/*T;b,_CeއrgRI)QVZP(FbFJ-(|}$YcTbAe=di)r2KJP*UߟBCrf k 6`1p*J-SCPqCe,,1TKiV;G k!Tj)3;r$BPDP8JݚT/FT**Y2 ڷ-B6,HT,YPAeb1S.n)u=i>e)nʧʣR7zʒЖxJy8Ń'R9yZ8ExNNNk (l);9SA NQؑNy(M,锥At t^) )NS@|7 APN,y)^i_ :XtO:]h# )3 Nxf߅SS)S!)XISDz SjVF9M9\P[Sn)c#LT9vxwE3N9eANZ2g)xxm =!wrJ%^APP Q)2)ptJ+)OfW)NqG;y)vJW??vJm~vuIOYhj2I)CO9h:rK}Ҝ)Sr,;Sj v q&z[lS.rS O/r*{6N-bex )CCOPs0 X=\|7RNCO|)'OSAN"9DNO9d+t[RYU i <OaO9Oy+S,OIGP * ЧDrVT TO~*B9dk`Ԑ*׏Den>o;߈T,97*_Q$h'"@/RL2mB*HEHe&!*@TV=2B*/~9߀TRPr|VH*ߥT.Rt]JeRy*¤4|7!<R*+,R*CP?TT GRk)TN%F?%TjST$$J\T9@tH*I I'nJŽG $P#BJ1qTjšGP+^0*!EFe,`T% 3WaTɨ38ʎ @*|tL}&C8*O~NR Q_|HJmOQxRپ89@*Vk*hᬱT,h 2_R)-ߖʒ X*YRIB4уGQ) $ L:*C9|tTJ`T>;FŎ2*_N (QaQ.`'aTj0.ͨ 3"è-&R;UTDyDQYDQY虫bI MFQ@S (*l LFu0*Z2*,q`T^"Fs^ =eTrFi8ʟ#4]?$J H+ͨ)RuTHEG)J~MK'b EŖq**MM (*p**T&/Ew b r B #tEBPKPq+ȫH_;Bp -*4TQq BJ5t|epQy? *$SQ7OrilDQ-GQ+ ETT fNZuāAQYb5JDQyH+[^EfHEQ+d 5TCVWECiir~uIL)wiBbx(!T'gRgu *l(ضlDPOA<T,EP19^Ael*Tfш2.r`(g9T׎B*׎BjŇP}oԕ*C|M0T65T@Pqف2 *fЋ:JE4^a`<6 *[ C^c$q WDX 2GFQ!ڭRWo>M Een2*_0* Bz܋QÌQ ? 9XՍb_ϗrÄQ|2*l r*aT -P()7eT&88.2FeEI} {r%ZEe\)zg;(*FՌFnJ*I6 BCaTnaT8PgQ9N¨<-Ԝ̋l:qTilGW2b*+rz,{9*AT=S7IUo*ے寄ʕ@ L * C`HKB*P*CBe#Ї*I<<+&" 2gB rb,AT|'ӽE ˣ}ﭨ>򉋎0DʾDQy6y#2 y6Κ *5h Ĥ9rt CW*P/ԬB*2KNBPBJ*6Pyk(ݢL T~R|*J ìХC~J9 UQDQQfE[ */KQ@F7r+!BQY0G DP6{9"* 㨐:ֈ[#:*aGeRYܠTD-R1P0TlD5 @Z*tZ*+ܿA/,O(ْ1D,TTJT*HE~IHE^HDB*OfO ٘Ru &R9XAԠyUP*a$cP >?J+/H=Q '.QRִR$1I.+Jtϕ$A%: X&2vTC*/HeO@*dx@* Hb`+T> #+DGRy^ʗ/&X*X*KنRV KeT;J,K8A^"讥2eZ* aTc,JR[ T 2(HP* 2R*IiJ ҤTR96IĆ)Kd(1cԄ*  TEԔʌP*FʱR (mIJM}$'8%qRa3B<(χ)6k/JJeP*)HLJe@\!TcP*P*O y )*#,CH*C|.(.TT*L ʭITTiEl,|-QGbFX*t S{z~;;{^"r$46j*,KT,S1uh<bXdsgSb~`<ۂ t^x*OT*ΗwsHUjߦB5I *Axc+hELwTY. LjMkUAUrSZ'bu J^sHAUU|*'5MTgAx*]BUhoP@LRSBpU<ZV"ha{uB- _J{—ڨ*% ]UU꓄݉R5ʨ*돪2քJR|VPRUIZ-Pċ;5ʩBAUR2V#v`SsL/N%T/Re͘R<5'T7'RQ~ie *DL3]U9UG6SNܕnsQ=c7DPFUlQTuI4Rw- c(eLS!I"2(J;ZT˜*ۊ'1y &yL:8wLy24*f \l+HPS${I/UF+ԙskBQU*T)5]MU1H)r}}$ )B b" CyKUa*aU9TU-R3*[E"JGl`4H#T!EgLʯL"|M*5UZK*Ty#<9Al*3GU1[UeTUܔPU9PU*8J;ɪ,`U_*XPX:{U1IVeUqCPVe'.2pH*pDUymT*}U%]xU10~؂UʓyNLk/Tq$0)H,AVBP8 /RQlH#Ir4UMJ~r$SʺqcXNb-LT1 BAU ̬TŶ\;V&Ɓrs/CUy}YiUe"*OhsTLo*#2VVAJ;Ur *Y7R%*5ȪB+aUV.;2̪*ΕUٺAUTbEUj̧ bfMhMzQLctTQTj3M1UaNb!9xTQT`?R^RCRܕ@@xALWHwTӼE8UFTDdTc 0T*LS} 0oBUn$ P+}!g$gTAUU!+2*lNrg|Pʔ,*ԇL iU bwWYʪ'*4ZWUIa? 2*ʽebU4q`UDTejJFUPUqUżkTg2*F)UU4iGUdRUUeN*9*$\ !U b@L(.2*Z*/xSK=3J =aTFTZ3RU_!#WEXWex]qUbo]B|UsUV*)>`7I*˕['X&nêXrK.Vy*up$EXe */qXUP8*X1a nL񯮊+EHUYY^X妕 ڄU;y*5GqurU0(*/%ʚc]!RWźg\w<~ӰJrVyocoKWeUl>%aTHXο= 9=UNF`*9XVj}`p,ɢg/~`"XZiXeƷU*tDV!a)) e.*t|*6CEXSUI[UnaU>*5S ,'NMbwpRgWV*B* #ʃmU> HkcRoqë\>r<$zKUxUE)[MeQpL_K^6ë^^囦~ëcxDGWp*Og!0t"u]ɠ/6ஸsI\F?ҿSp쉫܀~*V+ B*z*P*#Tauq=FWr|3FO襫 «3&J| xɫJ ^E6h*~~ U~| ૸2Jn30|dI^ <΢Y\U~,z}xBCy\g ]jUmW9EU>3ʊL"*bp\^]\YwȽJ^e9+«3!Uff RyS-Ky@WUvW*⢴b*U^?> Eë WMUWqt]H,ws>??JWU#BGUf[EhWh2=J'pMO g|pM+|6> FGV7WkU6]pt*@_e U ꫌_ºUUv_EZ^EEW^Eɥx†yë|oCQ[B-UvU,GWy~**B*LC]:*WU?j`?jLx'rʎ-↡őZ|2US_JwWc  qFW7/ʺb૨z, Uz^ y "I֖W$Sd]e 3; G]e!2+tfAW*B*T۽̒\}Owmʬ<ζlGX&]ƠE8~d{W'j{sЦA3͊ȌZB_tTӈ>y,*׊b^e9W}@ b%^?*cRx rfW c^[IG*ӡ8vUhUB㣫d]e蔇ʜItl.R8m*U,TZW96,|UFYuO*̫0*cƫUW3+*ސUʵ~=grFcoV[̾ ;UQ2^R7 s_Xh@׷(h[*KN~L߀Va=- {VU֎z Z%tDXT`MBpmI_w:hGowdbxI7⿍r03UJW14ë,`U/XI]A\!VbE+K[؇ҾCtD<7+?%bw+Ey{XYwXs5ŽAVYN Y!!{BVgYYd ǁjbd@m2 d,|LƊEY1V,$cx4+'dހ@V d%c++6ڬ-9efś]Yy AìTp g%~ b}d%&O5r/<#+=wdqkAVYa{ +|`¯qTJ6@Y ++#elEY9QV*^P {,JRV*UGT v ruG/#Xygظ0V^_;[Ju+^r_Ud\"0VQ6V򊵱(1+jl\hG2V\nc%6J<`3+FʘY"f%fV6̬Y0+lKUige TŬ]M !1C̊bV`V¬ܢ2ì,;x .3+yY 7f%¬do`0+GYN(+Cx)NrȊ?AV[0DmYg>tBVBYy ;3+v7mAͬSX:eྂY-^8;fV*̊YlQGhY1 ef*J"kiMQZZ S`ܞVhhe# V֏H+jeUVZA2EK֊mZix:k>ee8'֊V='j%|VȂZ_xV+gjedoԊ|[+՟"id)b;֊Z) Vj%Y+cLn8UԊ/޻VAJmVH&jLԊ'S+0ʦVk1Q+§VK#ieKW$0iŻ^"ZaheЂWhUVwYyZYYٷΊçNt0+7fV/ VJ08+ vVX3boΊ?0jXK+cJ~KC+D@+!GVdh`BYZiIVV*lůK+C AZqT &ZԦV܋Z1T 29d@ZZ1dV2D2B!JF)Q+IZ@TԊYVcQ+碮9J2VnQ+aZmjj:NGD*< 5Vg[e_kkˍ0/l%[ISz#84Vb0)glajVa{V׵F[9@&x> VPZghWePIr?+&++5-w֦7߁CRV#Fʊ"\'70ʊ=+3+aV27\I`V1e2^22YO} rV`xHegLo"^#M0+MX$(dge_⬐fJ. 2\8 Ff 2fN=8}g&1̊1+#OBJͦꈒCZ)\SGvZghe}9#h%@+c,C++nV22fzie캛$٠MSW gV<+2~.Ďagd-H+JݲDH+YZRqoJŦVZa3S+MIIDu[9}V+lw'ZaSZQb{!hieV֊[+Ǫ'Ja[+-ZauVNӆ Z eZq;t [%ZؗX+&ʽ[Y,.hODx[a+sX.beڊ XʣF8VHPYJLK9ZZJS9ѰbRDҊ3('ieGAZ 1MWahR0$Z JmՄ3C+N, Qe֊.M le7lX+׮;Gu=ZV|jlbbkeeRA_/ AV luӗ'AZ-`?Z9/Z{,k Z]RV&IҊ'VViBbVjgfVƛ_O7lǭoYUfV0R}ΰP塱b6fVhbVY\Q3+'t8+pqV҇BLrЊVjDji%;VsimX$,V jE>B*Dʭw\ʘHZ!Jv$@ZfH+!Ve 무,U)-hv+5MOդR̀Դ2G$XF"ҤxӲT*q ^[. I-47R2Z]T rYj^G,FK-h1dTwflGf j?2&Ѧ jYƛ㱿C-P2h]pC-~j R?SP2:*Zd?W@ ˣ}S-hŁYbBDE,#Чed8қjYFȜZ]u ղާMV2^ؕŶZgCN>*[[sj)zGu42޸,,ta^#K57RQ\;MS-xvĶ1[jnn'm,ǢeROGun[JG,]NJqVR:ҸղdL]V2zt-fԲZѣпy;V2R,/50Q_TK=RAjYO#OO@v]VKRWjYFZ`KneolLmeLN?Hkղ\ZYAgSS)~U7ZC2`kYF7^jt)͵,C\22UײG. %,#;]7j-t0Ė:"Ŗd[~ϑoxX-`]L"/Ŗj=[cAR[M-AJ"gkYkfzzuzq|!}980ʜn[k=i-JEZK#C"zme=s-ӻop- Y\K&}G{-UKjNlY`ZO/(5/(z~;.X`2lT_z<\.t$V ͵}kWPDs-ӋP\YR7hkw[ZN**aԛs~)zuT^R9$kYٱȆUdk#ulhf m[T ,c0[j&Y4R1-6g^),c jsׇqR6-" uieCp@ TIzlChI-}5R̩#G0Rebw72u/wJ-7iT:gR+hS`.A`K5/2nvnXk-oyjŤ\pZM]mԓZjeSgl~[jrq|Ra?Zl.Z[Mj>^*2j9=-~UJ=OC+] 'sZz^K=BSk^p/A&R\bPkZqhey-uDD{-c9w)7Zz|.vq- D2RkTA42ꅵLMt kFԻZRk-;X\Kʭ/ ^uzrWV^bCZkVRFk-6=]ՋZo:"ORoF,].Ik{MZK}JZKZkМZO(|j@T]eRZ4 eHkѫ[krN-_kkr,:gSoZ2O >4iVKj\jAHZ vkV)e^Ka-hXK 2cZz\ER*m,5qs.|ZKbi-u Th%TwmS*Mh-ֲ 6.\KNŵLjq-J\2^ DkT\KOES.q-wG\KU͵uRͨ4RC[4suk2~_YUB`P5Rol L ԓO+W@[m끊Ė1EU%Kl#,TŖĖүje[ꂻRɢ# }<]RE,(,ꦐbKݢ=[ٮoU)%,cyX?ŖjeSTVߪ_{Ŗj&nAO=Usȡ̏x-t[&^p['eѭ%e9dYK=wm-&J~4j9BZl$Le- Klއ8-yk鑨VU覉l1-oz 2['Iij\L-!->g-IIi:"˳ ЖϨ-ӜԖ--e*F[K-JU&;oёVU>@[BoF>F[S(іmbthKˎm;[F-U፶KhK=덶\]ۉfK &j^p:cJV$ԋg-5Ssb֖eoŖjF%j\,UZiΙkŵLC q-=\KEײ5STVZ- RC|Z185wO;Z!R݌TUUX4lRlUTa-Ӭ[c-ѳXKu`UT5jeiR?Zi$a-=WO@nLka XK}"Se2ZrhFr ϞhYRZzBXK[-8diyeLIh&eg2 j?Y-|ie9jMM[-5@VK"%nROXZgʎi}ɭ0Kljgp&~:G{]ZKO^蜚cL˦Z1 ُՎ /~9ˡ wa-\32^-̷mU/Za-앰b>uj=ԔZP4R =4pQLs-,HkYF꼃U5TJeLtNRAz"jPoJLOtTQCc-ՃXŮ]2 MSI-[@s9eu-TRi:YRKYtΦi[ 8 4-LC*Q-UBj7苶Z)LK50xk`ru7bI[-[g%#*zuihe޳˥2A-EiIQpӒ"LK`Z2i1;-U.3iYE;-=3;-56vZ2 /%8->G>p4n⤙jU%ʹ.ҽq-(RZRVZju %ږsfZzI,* L2BAY6LFiŴdӒ^ LK6,*ŴZ4R7ʬҒ LKf%aZO=UA[ˮ:n%=&eU̩p`P< jɪPKċZW1 >m/xvZj%4ZgONK]dvZj|NK&ZEBjJOYzEE@-j] jIhPKͤnE=# ,"WOjЕKmUC-#i2_%h*@7`;-GRwvZ9g>JD7H= yi%0-YiI凎l]`bZôLMjɖ,vZ2(%S8-8hB;-50NK <ԙiZ<2Ddƞq.K$ք{KݯwQ-YjLZm$bԒ/RKrS):zN ,:{C2JK-%^<R/㤥%vvZRKհėZRmHjD RKZGxj0zZȭs:~M9-;ХR~R;-ut-%S8-YjyP|g)G (ejO;JK&YPZ:f+-9w;*soJKw [iVIFfV|F3+-h^[iID2LKu4t ILK}(32^խ~ҳRۣ5-5nzBѲ+Mm}jg{OB,gL>Kw]fvUdVK>KYOR=-hj- ?gTQ%O-uwGB42y-5DKצs.-"Z0q+],$BT3+'jTmԛun iIGiHFK=boFKOПpv"Z#|z@ٕuAhh&U~{ݰ--'P6-Y2h>pC! s($__JKpQZ~6=)z0,{.q/zc 9jkR:FH-x\R4"%H-UŢ.{GRK=XhnџeZj@PK-5”.RK[a;Z5-dEejv$$FRKC7cPR]ˡF扃P v/e@jEG$-TKI-PyRKM3ܘd6!( Ȫe fU B$X%A7@-%$DDRK~J"ԃoٻ̟ʰR-(r4 L4_%53-^!EiIJKְPZj3*%I(-^JK]2G[$ti<6ڛKj2vvoeϙiI%LKb$iNL4}-#`6TN((K2Z)R/ђz$meƐђ6d2?'WÍ,oUɹD1,RߣV+1L/j1-[L4R/efZz@ ubZӒE)-t*CPX2ZR=D`d$v%W-S\FK&nhIP-5re:+D] y}b<Z 4{D*eZ2x-NhI1DKvJ:%8 %yn-rlB+O7LoU-6S@Khe|Og@?㉴2$,Ah\[A*7]`-U'So}$VeHh26BrtP܇z.jfeje5ђ1{;*"Zz)HEIW1hnZܿRGZВt<̿$e~o\րh2V]5ѲT&!~kn g9^LKm9$ʹtRlZbQGLK7sm4ӒH%O6#rZHR5,噶Eڴz*3iY`V\(|ޏPt_Kt֎*m+-SAJKНX.{IJKEQJieZR?sՑKkK iI.#JKu/-nvnm)-DiIJJRkN֭\|Z CZYXYRM{LMC7VXp"lV7_ & aiH+vƢV $Њ# &,V7.>ĨhZ1VKzVlR1qҊ"( +VjY9HY90&7$9geFfML #@SvWQ+)UQ+Q!j%qV |S+ #ݧhK+ԖVi塧i͓V.L4 I9@+'#@+w BVZ!!}q7XZ"VOfWt0{~ZqBJ  l22;LkezFVr9VvƎVU ~MkGZɃ6I+)Viiet[Z9H VC{AZaie_鋴R4VbixK+U Igjř`V,giŝS+~Z93 RYԊMiZp< KVMS+;*DZ95Q+4V&Mig$$sA K+"iumuHZI򳥕 Ҋ;dZmgg-㬜x*xxwTte9+c(˜LbaXvV l&DpVXbf$A>̊) 0D0+4(1+I+S hidrS+&l8kXw~iko-TD aQ+5RSoM~>%jظVRP+vHެ>ibʓMX3B1^Om:b0VHD[ !m% NE[BQVh̭lZ802VέCrVv>V'eBrf)c+/F[akk+!mř>V[qgRJ7VNP͌⎭gx֊S mZa%E 9V *LHVjMCj%S"VsDl73Q+dQ+KLJDdkjEsZV;E Њ{aVK%> hcZI"!J=VXh iecwH+PVoFZ]UV" ZY1*h>ΙZIwjXLh& j@72ڵEG8#rq-H+&$T]ׂH+$EҊ-87SǠZq&h`d 8ietIZ-gҊSN-\^MVvS挟@&iډ,4iX%/xV?VahŃmC+ZѲwl;+$DM 'fV!1+%3+YĬSri[(+LGY ʊhaVHYgȁˋ*XZq 1V*VKJC+s"Њ@+ B4 Y?gQVYhhŁ߆V2'h%觡m`h(8+YɄ vV<ඳbrvrVX+YXa\/"Ȋ[Xåd fe1+Sfů1+pV=kS@+6UVhV<Zɂc5j!I&BZ|H+\ZZ)h3iiiJ&pt 7P+՚Z1fje_ P+sZc[GDxkX+f^d>W IMYU^X+')B `0dk>bdLlkE]VpjkE IVH VZJqVLAēZJ9*Za@\JV*eT-'|]Y^@Ws䌐ރIؠ+2 $$`6⊆Bt+鋸6bq!y2z#e<W|ˆ+bY\cSGWkq`oiĕ=rP@:&Wnq$U ~eĕA)#G\qn+f+u|+L$8"S#W+C?2"puI\9k*Ut1WqJ+mĕyÈ+cը WgA]"+Ι_!r%/+N2rj1<RR+D0?rH\!`z\}0fap-l+G\q@{mq[ĕ "q%ސ+E"Wؿ&WI\ WFa"qY+`o+iÈ+5}W/ʽ5+..+nW$o;[6rE} box+_a{+ہ"p%WY/ip%yJWT\6WDH>XK/Io^{+|)Jފ$ފ33 LI\YAc$ -pe$HE,o~VY"VF[I [!8)`+ EՏ[ LnVr?|V rfQV3bpnESh+|GhmŋVm%凶Ggm4_cmEۣ[@ {+jpxVY-"r,H[8FԠM[I/ڊ#Ppt, + ko%TV . VhR>"୰u^V҉[?)K奮:{+aV)ފsV#x+x+d6 XV@+x >#D {\Q`p#3+$,AG\\ ĕ7H\4,ĕ9α8OE$ĕћQ XC\9Kys[[ٴgn%g­)ne0.STN}/9YF̭ C( 6r1n%M 8s+bVmi+Aݬ7Ίy7;+l0mgv?ґR@TaC [Y1b9 fei`VXmufVxefer?`Vtnf=¬űZY!csRVv*VJ~wYZpVOt0+D 9@atgf!+l>;tB¤(x,+eEFVcX2:;<+J1Vd OXRlXBFVFd+."/!+jBYx[YqʊB++nNxdge)ʊ{6AV󀬬+5%d%oI#+FV Y biDR:df4bƊq+mmc1BuDֹDŽY_e%es!(+yO P(+>QV2%=++mEY!X3ʊ٭Bd@(0+멶f8n5\0+QV2`eeX ##+uK?_ۛrtڌ8 - ;YqʤY{sx37#+k8e0ʊ:hVVea++7]VVce%nʊk-QV4 ,X!+7œq12ސ 0+LMYOBͬʊVV`(+D|[YZYItIY9͈2?.RV[YqFIb a:oJGYY!>J!!+% cDͰFXI`Xq%hwc匟Z-6V`b%q+C`"VD2vhh}nMx'@+(BX|[Ċ%$XL2*"䫌W|URmuU쫨Wg^%;WqRɾTsЛM ,U|RUs_1*&=Y4Y+~ ء0VFRu\V4`幃X1m`%X!D&7X櫨Uv?Ujy4H8U'xx*Bu7B*#VIDm:URQU i}׻t,/*EW]ɲYݭ8- )8O ]tZWquӠ'UTVt{7IaNؙB_taWOU윅WU/0WRo*c,vh_Ż3WqD|1w+_AVxh+Fb~8JUPR"\ef&UF~U Y?aUb^TJYuo7h`A*Jv@U;UW@WqEtwf] U|â䎐r,*FW9OS]m-[eeV} #[e ,UU< f[%llcAipV9VHlV1ή*a`U/V%)aUn$*ά̪5rhͬª,U kVJ3or<=PUAUjkUeOs0t̪BWWe"z䪰a x gU+We<p\gU]*֝ I`XNa 8ٰJ*N~5i;*U*U [4_$[e2[D0gZetVqޑi_VlZxhlU..Uƶr.(upVr[VW!@-2*,_W2Rīxi^*U,x*!NN}Un>f^*7Twx62U[o*"86ҊӲ-PĻZZ!uЊw6r121%VdkeF Y+mα|X+Q\4Ewc+`[clũVŴ?&ll؊x#r UV.`+. #`+)wa+cX lt-؊c+3l3Vؠ% [q'ǠLJm1p+N\Ra:ny!y+:p{3@U0J$oڊ7E[Q67X+LZg]>gNw:rJSγ8+YqoP;+̽Y $ge ସ!B'Ya1;+ r] *8+ 0+USJsp O0+S1+.fen)p3+:ĬxѩdaV fn¬XpY9g<ZaJVmimv5 @S@+tlxhe㬌.­b g%bhimC+NZ#6ЊSVҽZqdog #ޫMQO M8+i%@B8{J?0+U|:"fĬ71+&lQVnp6q:cVVbeͯQV\QVȊ~% {Y!(ʊPV*4j@VK6 +fvJFVld{Ȋ5#+YaderBPVH'hex`h%IZEGZ rÖVRH+EIx<ߥZLI6@+Ӭb"hB8t + Њ~ 嘡;XJrU&a@Vhx +jdfxc }r؂ = QV H-V,;Xas+ @VlFx* v#+ƚm#+z7 rGcd FVb4de[QLeUa2FVȌY~'ΊSpV'wJ2U Vqd<l(lj[e*~Wa2ys+\zq6WqR Jɤ"O-e9OCe8NUEhUv3GUbn.hPHU2Vɰڴph""٧T"JX*AoU߰mEUCVF=*욊2%*7EW+"l$L;=*iӍUx*^U6*yUڴ;U BVUb'i4*`l4 J[msUؐ/ DW _HWBUw\e`7b\%OJ2y7UHVY&ZDtGhQ*wn*622VNUغֶcb%)* )mV!L3Uj=ݲAYgȴ e筦̲Lc!A%y-0X1*.#dY3㠢UGVٹM,5WsLdV6׵:|7_WAx}`y|=ׯWI|=KYh[Gg~Wo~_z]| kkWQ=?ݟï?|W6bG{exrkI^wora?u5%׌q_jF k\gWbkzōsߍRAF__zWƘTIow_6b-4f#?_?"RIJՏ_1b3{r}kj 7QP3XS9 _Ƭ:~JW_}']Z#^D?|xe¼Z+>d,r|]Y6x,js֫skm]6OF?O^}bט|+G#YTQ3+mx^ߦҶuTGTi#U4#Y,@Urd:KU4#Y,Y?1ߤF 6w@8J?*Y>2E,*Y92E,΢r䬟Yi#su@Q{4|t]STtGTEY@U|d>KU4ő,UtG~^w:"ȓt䋕6r͟sGd(gtU|dgtU|d:*Y>~fX7cGTgdzSCSg󧨡,g82_j(g|jh:#Y,Lg:ThL}up䋕6Ϲ2U|d:*Y>2]Urd:*Y>2E,Y?Fo>?`xKZy9#q๿ÿCЖCx-g쿊9ծ%t9l{}:~^|.d>z/gq_j>~ Ղ,=1UJ=?ǫ5`ߦ<Ɯ[/Wϖ?鬭bFYC,>Vtߋ)h>kDT~Ylytqo?u9\oԉ>sIsd.}oeȑz,Jp\n%dY7iz:Z]]x#&\#V_.$^%Q9 5?ηY#ފuT|֡*5?_=b V#y?RЯϼn{.\ېַ#slvZ6yYWeg#_(h|8ܦPGLѫ mr9^*V:vH8޾HGVZ}K8o?]Z wŽh3>\?{5Uv?M<;~}:2\Q[)Y͚gOg]s%GڗGPW^<< 9W߭ߦoLF?.?rc'wso#o40Gɥ4q]:yMgvOOא>ym Dd] rG⣯8=[\jkY#rvVy;kȱoo u=L|ZyVfozBFIYouYw5\8 z~0DL6>~VUY}{i}tD(^oՑ踺~ϟԑ3O.#y`KwTa[X)#l䏜u9_}9?t|=0wF.Alߌ^˧?|/Fc{~Z7<_}3Z>/Oܯ>??}3r^קw|xo>>8m 7}|͘xWO雽mO|O>x.?u\} (>}*߾¯MM[.#un_Rr_鷿]f=.v_h~p楰:ب|"?Nendstream endobj 482 0 obj << /Filter /FlateDecode /Length 8175 >> stream x]ɲ\qݿ_wk<(B/(/` cв">'Oj"߃ C޺5dp2SS_ ~ =_>4 f<=͝^O9xh8l/<}~?%cwӟ󕼿%x'Kg]4|~qm884Ͽvܫ/_u1}Ym>oK>?ߚ}z7ۯS!ksOz=ףxBO9zc~z7wҰz͆?]^!Z1@ >/i1_Z!RgAK7FO`HƠ/Ѻ33@ K;c^b%g )Db2k1llSfӑӼCbh~))fLb+: R!3_njNk%|l7KRӥ`/uO3 b%hn#9 lDy-f3BxjiNO{*2M/%H5V4˥[o*'8aU{ Nndk9[6`"Æݚo5=SzS&Rl ڑ>lalZ4%\/eh8AMӘi \D074&&~vX.F .;""$R]2GF<$j  f[So]I]\]9cdgn#7p뚫pZ(|!q\LF7 5b .S=R܉BڰU`xUäD\0bjَ*-Dĸp0zeER\ftIq,AB\ s+5; KMBdJkT33J+$ه`$l+4D`vp5~`20S@n6ŘcK j)nTa8 ;&Ěm;dfCdFn ՜⎎*DaN[8M@pK L\RN(.%WP׉^pK HNvLIm)l,UKUJ O jK`ۮlRAƋЃODbX%Զw iqkY+i!u7>O1Гv1 Djߧ*@*ΚN"jgM9YU PђpWZ\" 7BBDY:Bۅ>2@͆BKdrpF}D &$6#rRv&˾]k"҇Kswҍ"R򮤈]GuuDL}Hd]g쎔vJ7@];sƮ;@V\7'j-N!U3}[aömۿ qiH2fM&a҃g3Dl׀>i߶VpSĺ2\\1#"DKBbhYseED#XY>7G] w/rjܥLVogJ6ppM6@L%oi6sq*QRIzy'$o fT p0@$o*άX%@|M8Cĺsp#wMI gV8c (UߔVi̔TMLG|'NM৹7aP8<.'v7"TfbY0N<>ET Pq|Pٿ3)[nf6LJ!cl_!hɈأKROŐL7UCLw&.jLb̏$RU~HZeh9ꐢ~5/K &O5HIo{n!6ͣmǃ!iά 9#6 7NmiMwҠjV֤ v4CaA:lNT2=3H䡙y2QmM65cUE`h)fRQ ZYnFQQN@+P4'@05i⊩Oag/EWQ1=@d+D z bMB,@M7rt}`M&iD9izOt N 9}i"{?JԺrEJG]wrEnZy6M0@Р'Q%إ_b?;k#96| z'~I('iaJ%1^Ie@V0S3)3d~xO_xg}l/j<1|;(DI Q~nON?46GfvBE|"TDOwwWG>Y%SYz+` ?&GʌE>7(~CGMxJ&`1c`/nkVQVyL+9myT+ nFGRbBJ;#rӭ!;I6~)bQ][6p'Xl,, g,!7/g1I&{˫v/ny}.ݢǚi5ws g(_&2Ϙ\j?fJ'oFSZ-N,cA 9|8 C 7Sdwȃǜr $EheVRˑQ. S:B>'T=/֗G(th2K*J}!jl{+ !ꋿ-QC :TVH3HN=*w}m/ؒzp͜9|//}03p:|zp /.3vNb!Vk9ؽGJ+yoőGy޽|t:-S|C1ǒv=}u|xQןojO+Paz~{H+g8>[.pNS土|-VT.;bw+~'4T`2cp|Wj ^+ D3ܣ'@<`@E a'(G>L*eG HX)֘zv끀 F&Ef,8Y 1&D9b-R} TI,iBP$zIjTD/K,Ũ|+nj+;,W<sT#y+ nWVf2|kARَboEo'jVg(fOéD!Xjo+ d!4L*gH rezKg ( !J$CZA KuZ!*nXRg(Y>T řPbR*QH>V4X*ɠtsbM,~#[[L40 *V5{5"1"LTMT1S6Q:G wy&yHֲdRWWȒZ<"_^ y̷սɗ-k(o]|gFϲ; }-R'4R)@"vF31QTvHˆ8@ &9ژX41@YU`zٶUX8B"TRȰBb11TySz&b7fYD 4`ow z񧈡)YgIa6 Y}2ړ*]/3hd*L,K!U%&U`U,0ѣ1CU2\|^`cM ,ՔU lN(岤wUYyڌt#~րUj=3DeȪm~j }jLRI_ C>b̲jR!w0o` ɘ!ˡ!^-kj |.l{~ӷMdjgbuN Ї|r *Wh&0D_rmaHڥu65Xʋȭ~#r ]IvRDKz/Ld(M;gu"YÁ6dC)Gn7ifoC4Z}7ז%Rn,;}8 !q <"[zks(8懫C@Ee"RV,]/eUO-MZ Uz}xQ)^[\Luky8<ܾ|(K&'m94!Tre{8* uPGx@$C zVWt/>[@ӇAz\D +RЩoGxEDxW K/n r`X"q>,fq$/᎝e6Qlś@ڪmV`HTt3%4,}o]kĄYf8rn inEV_* ZNd!ëkn!^=g9x,l'8:J9P8ͭvP*N xZduu0-Vb٬9۸ŁBi\@Q}\"uM^D=ܓi3:.#"i~l.NT<L6ZvOUUiZ94wx9U4dYxtlX"eVqgvĊ\U¯EA5R051اl9;,Y򀶪b3e@NV_ƺJ1)d襑}T]j*V9O8iX `nj23@6G6ڠj<"RNSwD06*8ohᇙ9ʤg7AdyrJ,8TF &wBq^]Ub׸ fj_:5KsVVԘρ -y(P<)ƴ΅V2ETnT p==D c̒\?҅p= _]A{4)푩lm6B8‹1)sژzZ'SfJ~. Q_b4{Z2BBR\JevſmliÐ64rD;\fΡ; n4:#?.MN]IImy^I몰^LbHW@Š!J<ܘ6 5V-^g/_.Ts,//QIpo1q#P&IN' 2S90'k;@k-?َn]x0\~pkykTҦ'Exπ(Hp+F'z˃ȲH+-<u,c#' u銱9ÀdMaݏ|sn˼ӘCg*J߹9Mu}@Ыk[foy \cJM5/QXwkDh=^O/EaUZ"7|I@V M%m%[,o'sG+xUS?P x{rc6k'Jw+u=+=jE)ߟ•Jt@+d']1TW:YҏO]Nw6A3O/nC/u^?{9 ɷ9߾ٟzx>{6xqC6usW|[*7O9ovwu߾zv!O^|Y7Ǐ9w7|km߾2F4 A}Go6Ÿ_/WG[T^ް[xȿ=/ycڱW߾ߧ湛y+7~>:}Uo^ja'nDervis9,NI鋻XJMendstream endobj 483 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 345 >> stream xcd`ab`dd74 JM/I, f!CL Ni[DyyX|?$=W{tF:ʢ#c]] iTध_^竧_TSHJHISOSIP v Vp Cw _X_ZWRl} ߏ?}Utc ߏ)ڹr^I7GWUUg n)^}=i9[ ?]X@ߡ z&L0-ýq0"ϺzL=f*/oendstream endobj 484 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 780 >> stream x]kHSawg[:j<@I7\RQTaѲB7ԖS6/[59s͜:cavӥd]F})}GNP"<FjOX#XLKS3n4 KUR2N)>NW&Q N"Eɶ$b( KW le}V~AZT.wt=eS,T*6ҽ|>[o*JL%P 659|FF9!TPGȈZVQ^hyM6_aPn:N?~k%ZչUre)%1LOItްx B}ǝ|;\wF n5 wARП"M3e>xgnثdї@ QeW#.Cǃu~=N~LoRsN+#f߽o TT(ed (R1;e E&wJB*vjc㞌gǩ X`~Q7 H3b@lF&b^cYendstream endobj 485 0 obj << /Filter /FlateDecode /Length 5765 >> stream x\IGv 8Jg54c[Gj`>dwSY rȬ"9MH+3c}-fM-f [_5wW?w7WW7WE̤6z挫2ի?e[7ʫ;^7ucV?<Уm]?zƻzh-tiH:[[eaU7J뛟 ,RmZ\ts荬iSQV{roixano0u#Ew-LyW7nV 衫ͥMZzޮV[M%ԠvD%XZ$@uH&ǽIܜnw}F>vZ]p>műH۰/뽫aK~\n-,8k80΀ [\nUZ  gMYD$A{W-qn%o.W[ /3rVeXg> "_Ls`x?_"1y|P Fպ3ۺ: _#,N &uLJyHeEO݇F^WGe]A PJȟ~k4IZc @[ _,Sڄ:ÄRKg+@ J%᫵:N3"G= Ƌ)Zu>ȟTSK-N9}XFF_c6K4$t .ܯ ӄK5JVO#S "Oj80 SlRhTpE {`d' 9ǰ& acP 'pw#J_4˪>cm3`ڇ!q(Ň02얘Kd?Hk+HOUal>LB3XL!/.-Uy\ [&}[Kюif`#ׯE"% &5N-#o/8pNVѻvIbZ:058Uh6w|5%GDi=9wd[?F"zPe85*RM t\Zۈ=RBujN0Qd$hMT?]n 7r}7f>%hI5Ƒ o 88p n3;;=DMqu;(cQ!@R2yN͸xP*M)S!N/jUA{޻L4\>HN"IޕyPՠ/G'BZRt\{&D## K>vC`ߨRP1H?gmՑ ,U*}$IQώ x`*Tc &8H]\#Q(vwXNO;no/hA,dB'x+&^WUG`HFPgxh Z|{J HqL7sd')Cp4wi@rY%é{-c4̖)1kpԖ]2\sPt{-D [0R f΂O{Ѥ@r`uB 0H փ80гWC2#h.,1 F *| UϡӢI&%ʏ8쀮cTStAu#` ZI[՜ZKRzA O)ܚ#2q&هՇCR Ve}{ 8xy<HI8~ե4=w.<5yӭ9pك>. V0"`o#t^cg6}Q!U@<[:I) ol CF<"2(jT#<4{<)mt}xٜ(uNd"D^q1N->a4"D3"KMHu a0`>j#f8CyW!HOɂqzl Oc;x X?On"ɫL\K&A_ģ/PNQVq3RS\S"'/u >8YLJARC J34X< >y莃ɳ0n4Y4C:*s%XMA\~؇\38uM^fC419X)e) (AQ9Kt꟞_\؂X"_mx2_KXko̳Jl:'XI4 8o7ťbĬ FC%Շ +f4- qC9m`a._@ ݾ~2k xq6VT7 0+ Fx͔hnc:/(.ד>.&{K"Ѧt8~\ 6kSiXME *s8rK*7z3rhp,Kc.CȂMqH} 0( ޢ+~Џu+d[:+JjSb$؇ F?N9gڨ!J3Tڹ!¸ I8Xzfkt^L'0s'ۘ'HhN'}v.OG|[OƓW3a8.yXas?M ~PΑ06Ջcnsx12DK!ىU'Td'"TEV;Wݦ#4)) 47JJ!M(BKYI@* 3y#P4T5||7ԨPZ.0֊*3MI=/ K7Hd&E)+moJ. >aLibu-,m%7+*Dm)7[e`;,hl,iI R]GDd7}/3]"?Et9Dnbosع4HEΊ%5aB. bY]g"8ԩB a?#֛JոpT @eԔk/tmb/VY%j3v>CTX=Dv0g`=TN9<Ew~.ns_Mj(S;i?~4Xih?MP*py#O =/#_[T[ҩ2 ِE q=%èΑFz0Q Tr[":/\;-0̰4!䑌mهGUS5fC"?2蘩Oq% J/o2Q35G3" T)<9X!s>PFL8{Vq4 / `I eZؓeT"Bīڽ=-][=K|C%YgQ:m-lYO˹(re"W8)DC,jHj2TtpRDIV--;wsX>U"jr%S~eo2Y )YܦKgqk`qT̀ |4S+)kӝF;ƍT'Qݾ1:'Oc轵E,;}y4hՌaaj9Ҷu:)CaU\?*jc+ "2d5 ~>)>}0\I u mTkqP.WZm!Y?(o~[>+Κz EyJlpcf qnCpYPO-Wa#VzӚ`1\f&M'N%좖6EXO=~9mU$~&83 LT,hjOWiN"A)FluݢLTR^A$ 裀5XwTXC AaX0GCmK" +HG'-;-X&2BЏ*#&(Q:as9Aendstream endobj 486 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 919 >> stream x]mL[u Mn]R4 \NpB"+ 8JO(eBq6DE]]]/<Nw$(2I$y\ޚTXY`?vH")4 ?e hkNvq!RPDP%~u+>ɑ*nW稯i)){9$z7װzka}zlq+ha{6hwm!YjڞgEH((R8$gBݔ4I|T6^俰KsI*Մ7sDSGt5@ \ς3&0?ŒƯ*wOEw8Ȃf x>_6?8[Eޔ;& e+'}#y]9YbUbuAwvq:,Q͵uz<8K .>14vX&)Mr.,H)s$ .v$[ZPW di8O_/])PW|+ U\j>{v\ǝ0hozdy> R3ゎcNd&nFv[&m3q'dtbnټZ^A&̽,<+v|;J G՘L/Fm8A,XSl$t77ʜZȇҀG?YO!Xv|UOTѭ5,:ȅfl_T>6|abz0] &e_p,8$ `HG-FQŜPDy/(c@{RlA9gendstream endobj 487 0 obj << /Filter /FlateDecode /Length 2609 >> stream xY[~_> ۱*^%nI[lɮ6ư\RXU6BIUDpc_2K,~FE9S[ǻ{xP,mθ*&VD3AϷ?Xpʩ2+Xa %P}"i} 쟂QzD\$V?M 7=&$:.X_јs&Kz֋#kI6mEjMn5͠ >)7 LٛowU'!ʏgm=S[{و11i䊢TdWsѵB2$AVP+]>)mJ29!hk3E jM o>K=tbxn&Q6)_'KP&Pa)2v]P1l )6&.i%Qhbʥ16iƺ%ZiAu_M+[H1Zcn 饮$՛^9dR4%x.b0}.- YۻfpT#EHiI)BIabQySK/36Su+KL6~ >B'VЕᴯ*܃+Q}=S3X.yY@㜮VGMX<~@)@\hhjofkHpleV*g2ʃ;Od/mFK-up?Nu"O o#40~&. EBx"y8~0\I ܊(Q"ps4]{j-"- u/YFFџ`90ȌOZJUHK&yV,SE̊@g\;hRܓJ4Q*!=I89Mh΋k|^#87P{Ȗ:Ҙ4/ ap҅,aP1Zx١)jcuAc Qz #IY(rωJ A37@hIWbXwۮzXu@q%ʱ#]=ondWm ҄o$XX&p æ=>]}mRɘ[7Z^)%q=Bm?rKv$gPx) NZ ? )aP,28swz{2:7ׂRq/zќtz O=cAote &8-'UV! lJX87|%'Px}?Mr}5sHԌGeK&GANZޙT=@5"pO<q.Z^Œ.5Ph9YO8>ZTb݄1./މ B%,(IS+AQyݿ¨[rβJ=/k@+ 8J782y PMn|hp&`'3M2ʪ}Rp)Jqcnr{d`u#pYX&hw/!98^n*w'Bpf?JRw:^`^FjH EQ]$^$_x$H/8{8xH$.t?^&0_endstream endobj 488 0 obj << /Filter /FlateDecode /Length 257635 >> stream xK/K7 |Lhd x`x Ȳ>Հ6=pk-a]w?2 rq9q?Nyӯ篿/_s?=~x?w<_?_w软_??\߬O4}\Zoi__W^_}Οk}~O{~~s?_˯ϳ.x vƯϟk{{]9O8/}jEy`-p?חɺNȻB d# zNΟ_]1<$>V\wy=J`L?3~ r=}C' d ڵ& \%c8oH^Z(^N ('`893H~uc׉T'?`#.i_^|~s4G1L|0&ޟo{5ĺ@$+:y[ O#?_&ܶOJ{᯼88@ur-4 EokI :kk: :Վ%^ņY{E\ޛGij> G5l7_Ɗ(~<"Cէ/;N>NtZVma{2:~N'+yֲ:4O_jwsYjs'rY]낓eך:(t o'"c$EB`ukXc=lXqzad4 6]Nn!s]au<0jx 18ySNmNn,rzE_]\z '^g-D<>u:8k}WY[&rqokKЁs?Bc4F\l_Sad[-r#~QYU-o,ŎȎZͣb~Exy"Xwm/{+wm9k+ɲKة&2Rd3ֱ/ ,7^zD<8QikC q@ qlmDb1ݒh'<nV ~!lݺUd q\&ZU q\&ZVq䞴ȅԼyZ <"U9|ZAbvm_oU"թU"_QW!4U ũuގ"o#nW Ύ޵ą:"ku6P~e>^LyXF0zC Ž2~!.N /S(5; 7j5KVś5Ax85qojgol$c VqO ZӵXS'N khNyE叚*cmj˾V3PnZRfg%@UJſEn NJ*vaNAia[f'M.rqE~^XƷ6gGUxR!l3UҷCJOU[c-2bdnV]*A˯c'9m:ohYetI+[PZmPcą\ 5 aúFu}6+ۙN $jWҶ|BJq_u-t PPI i" n /*|(my"mӐR*߄"-*B(T2 Ez C7>}Br߄BJ I>d~ I*C7,C7*>d|PB[%mhM(B{Wy-6dn!`!Vҿ EH+߄B3RVr}3dBf%c ?H#ZIn5d|sЬB]k%} c+[hHg[oFJ7mp!W2E V(B\u@Y]'$ͮdnQ!w!TWҿ)E+[TH^oJB(mQ!}%c:$/J7J\spM)bmHJ79J1Q1/RFJ*h[Ph0-op)C0o>4-PN!ܩo*۞1B8jT ,%XS%m GU2|BcVpF*[>ѰB8>VI U2|Blp䭒+u-64WbC~p"m,dl& c%} BV2|B#p첒=q߬dn @ h%c55rZR+i[BJ-mHn%s55ڛD㿕-64F\bCȅpbumo).Oz\ *BJkZS'- h?Ώ{.Hc#O]uM7~slW=>{FΏ{ه|s|u?32Zu_{9Z`#=I;ykNPJ[ BԿByk:|YW8ke. |6#cӠ0E49Zo;H1M*'e5r}Z#A5<(=6ixF,č.ٺ_y3z#xΔ7|>9F:`3C9\ w;$9FhYCZn8:wCL`٬1BgP-Buctgak#ld~tM;(9|֦ LTb3mB4SVI2WUTdXl03Sݰ]rmSmѥx?6܄ k`b1U'":y㨆ښ+΂i[$L XR5eǬ9"(lXcwvmQƂo}ر@l"\##/@MBcBwmmlt{IgB|{ϥb} [<Mbѿ w`J/t|sZh>)䨗Y]w`h'b"ZoϦ J?$)vУ Mhcc~}"FDhYަjؒrއm+,ΖxӊCi-]nx+vںe8ʛ>ճ=Np AۄNvӰ'S6!ԋSi{Iwt3MĎg_+V'uEm>?9n8Kk2aC;;®+kl{;tX6ye؟;e>m:[vN!'GnI [4u8@?OJ`pIyrt#*I*, { #,%2RQSRZ/K5h ņe=֤a({@Ck@@bRD&q7(FCigqڐe`eYT1U'1[)c3bPJ#}}% #.9P UeQUC*&#'l]t|*j^*>x/b,wx-o rI9nL3_ ,$^͌PfYj)CfzW}($!C@7"]s;&IG찏Ҥ`i(+`Ĕ=93쀱ڥ݃[#^V091d>k|wNYρ&֬VSmQډ7[hJ6,h8Ag#|Y6ijUˬTy>+_oD3V+a접8/F?AXYAYF#ἣ+D}\sLH!V6~i3bFm8i; 6izze25e_3^!IDAʊd̞Zi'S j:ˏpi(بjDjH4Eksj5DFhjx5u6qbe!c#:DnpT:8l@x&{kx4miilhޚ62hs{ EL[⡱#N ur]͔t~ >бF,p>97@w0PL/Iof9`+N1(^68>Z~z $U W2vݐ qϒ;d>O$$U$ \$ѳDWx9 r=I&u#֖ $DXXh9 KfF.AbyN%< $i[l䎒DNLAbJҷ]0v$Eu~7$ŮzmA"=H" $Q$2$z(3 SeXALfsA"+oc!. aD)j!}["MlfIU̻fI,? Px(:2sO )H [PY-sWȱE iS Y6λLDBqN#z!<'IET P+۾EMRfQHIZKm{ʢdS:DաBNDmQJV!}K'2VȵmQaK*\!cK'DBQ9,dlDT DE: [:$ҷt"*-PV+[:jr!8WҶPẒ*"y%mK'Tldn鄊_IbC Jc6"*[>~F!yTҶCJ>M%m'dlFTI߶*[>aOVI"CmJj%Q 7rlڅ-P۱ . >}hs 5Y aV2P_+[>t!SWҨ~w%c;өoDJږOG_6 T24*[>!D!TJTҾAEOPQU-ϔ:mRB%J%P4BS3oB*J75O!TT2 E( (U2 EH PUU2 E M(BV&!'K"Y%Pr*im#DroBbB(ȫm!]_%s  M(BX&4@c%QzQ *5+[hHDJwui%kHX+[hH[BCB3PV2E( Z(Bt\ɕoFJ74(.2 >%v%s 滒M)B;^ܢBBSSлBM|%m2#$I$/}S(d~u9VDoJ# mQ 1.QH9uQ|;; t"CpE̡2-Yh楐'rp1DC:-aBgPM2|"S!m *dlD T%U!m *dnD %ѠX!}'bଐ1Dm-$+mx-6b/f iۦ#-Q$W,oc-$,oO\j2"F>h,9^$FP-Q$c SIum9+I90dnAb89ֺ7 t%9Pdl[Gۚ9}ַ9?$f̓mY$sysIP|}kIƖP'@\@w.^IPgBEEz/ {$}K(! |ZV 7HNJ$mK3"2twGpH2];_gEadnǩ-HW ᜒmQ,IƖQKzTҶWN%s(G$r WNyBB³O^9ʙv+GM$??H+WC ;3‡;CD·xt}G`%RV"C<+!T'CB+S!WC|r_*l+w`߱J[ V%N0*UC|w e9u- 渹 }`H?fw񜌺+hۂ8idNzI>^'p3a䮉 2rP&Lp3*ȸ`<98x?cy~;@:QsH'p+b鄞BELgArN |;iLi UwrU3'4fʿAu,p&yrP1IŘn <-rC&' (ytD;Zr Ƞ- N^\kbH/ZIҙV<|䥻΅NZj3MW$eiL۠:c oPF*ܿ_Ƞ-K6Np-PՙVl"PrrW+i<cy۴$*4@g캀PVاۏE}ܴ `ewn.".V}rGUiQӠҼH{ӻ"DQU{=={H:GUAld} ",ObaAɢ >2܌}6yԃ6 "'wB6*=t~{"t%cdZ׈WiYtib٣Y}i*8ZXgP,Ch+HLoZd}Vixvۚ _Ӻp7=v%&Ѧ^4 7-v7ǗAn_ mEh"2a{pl!No@dq܈G ەZp5dKs]k";[VN#&aqrbvi| ;ӤX ^39^D+-ˋ"4tEi7F +tGl'\0_DOh/pek~ì~:lOظ(pBS H')F,PPr'$Kp# Ӯvq@$9P&hͦbڕ@I|H6ЀҎEJǴSJDd$ӆRpdJ2>" (YY/ M( ^MژLDR?3-pYɚ:s50 Vg=L*z@!'xaȋkAԔ)9yhREI>C/]2^MN Qux{?`s~y5!Hn9o7jTc&X4܆NSu7is"*CWs$~. c|i&qx;q8GT+)V1~Хq FhrƏ|e :~agڜ"C?~2 9AsQ84mVTJADiʪ /Jk}gHeA%_y7ȤNCmj:^^F`+-X1/N\9s+4/`EneICr^`v7y) x^ gl>'t쟗 v0B蠑\JIDރӊN:}vDQ0'#X98rI:^ 9 G |:P^ٹ+..a0΋>cq0ftƠ;?=m`WDnV5rM:/*0 6 5ᠱ:t<^NWvЪ^zx:c~:·ư;Qh s0 ryI9:n ~N&{<Sjr촜= 4YL8&, f%'H*<W:u Н֖ Ui=?Y)S&ʒ!;m6)l"]]u*lʎ"Ʈl-+l#r`Yٖ G%f<*LJ E!$+d );d$D2|/ZT?$ˇLE!/" ,c>}.I C #ZV$_X ޚ ?@||u")R$HrDޒ/1"qDRbM$Q$cNZ%E2I!ϐH>g$Y畤<"܋haG-0Ab Ju*ֲ %њ$$sXh.x!\h?(mJ!{ ȟDch-7`w/ @% 0(F%$PgP#rB%QU$ aVs ҷ0 MT-t&QV\3$J +9B{FD6n>ЉQ@~4Z [P'NHIEؒPW~[PID ah[H'΅-(DBzT_܂"ITY(ՇBmP&T)`|$a=eQ)dBU +JD-RS!s2bDUBNDuk2ʖDBƖND=/ k~-,T;dljIToazg%sK'T6-JND[ z a9*+s%m +[:Bx!,WҶtBEJNv_-6'd~8 `;jkG%m:BdlVL!lTҶ|BmJƖO}T[LmPjy¶X%} *[dMDJ +i[>c%a}d-] j†l%}7խdnAp! W2|BBجQXMJvS<앴-PmjBQ@%}'$.dnA!B!1Tҷ|BzB}C‹ 7v;*i[)H%c4$7)zJ7]K%i<Jh*߄"8oBB(d|BQ%Sd|;BMT%d~hBW%PM(BWDڳJ7 [!ԹUҾF*߄"twPWIbCJR B5a%P&M($m,J7e%71f!lV2А3ġJ7 j!ԳVҷА.P[Ifd~3Bp%QJ+iߌ"ΕohP/]|KJvBw%REP^I|/JFƶuH_oJ. B%R0C%s:4DoJ\cƣI'bP̒TжBƶhKҿD2|"qhd1S J)BƖOİRh*mAQ-$*mA\-$+oD̟2|"ؒh֭12Dcu-6b [B\[T~O2" }b4$m8s=Hҷ"-* I1j[BQ DwۇIW M(yHj!2i)I1ۮqIHߢ-%$DdlE5H!)&*"mj"2Eu>dnuZXRiH} Iqgk+ڭ_ZZ|##իWt݅Jp*4q)1a஡U ¯L)+AyPO}*wRYV%\: R8ka *51rVg@8&N 7fk0o`*v9'Щ˖)^ ̌Wxx;)+0]M%k/6Z aJyL*aVSUw^rrbvs\yX1B{rls= P*}{X32ՕN@ɶ2<6ƈJX…]\'꛿; iX.;qTO;i7ܬWS]l'=HT;lTJ6WѰNx%Zv67MuI]vl[Z6mB~svQ NC5RQe=m/:iivaBuyXm[ 'Q$|Nق| 申ئ:Bta1ܤ +O~gQ͡ynIpaOcj; pއ|}*8zsmsrpylsؖ8s u`R{Cm`P+ŒIJWlXt|tua}圪>YZGF%ӿAkإ2  uALkC75L4A_+zoOsz^;ך  qwQ]G.z3;,rؽc6[%)̏,즚R u\kgS! /lܚjkE4j鶰㫏g#vI2NX 6j7;TXoPa8x(Y4ی&ipR#h6[6߰֨0U͜P8`ߚ9͸)(XB}~:CU%YAU^Bΐrهh&^0-=G 5cF +KE JrUN[|t6PfRZ"},t,^LPفXX»@ ?s@h hlc*FT%IAHm5䏶rI"i#?eZ@ji0՘vӕMVTujgW6Զ\P=j;%qi2Wk%!*XmKRj;,mPZ݄WZSv2UVmD]BUmPle*Ba>0+1q  ?9:@EIm ]w(R:j'rJzl@m FuNU%Ґ[ڈ)~-Đۍ3uIy?m$,V*G.z˗qE%0d}IRӲm8Po 'x6<тJ-vv&K!b3f5`VMCQ栥&&ïũsJvl¦S1ǯ#mw8 b-MQA[T0~s\r;d9hQ+8aԄ QUf9I;"P4msU;ȱu`j[ z._b-UA錄sjc|ADUsXMVPF' ʜ:\/4=e ɚI7 a6@ҠUEcu|`N{Y;lGƬ`Ce`납3 iVzb .I#3pVW֜U& flyLU`nA3Y ϻlf1B)FO#ZIEK)9hLh1 1iEQF(`ӘeT[c3*1i#QٍO#ccccs|sIUϸ{9Q,cwEV'G[#5uxpxײA{ ؇3V 焍Lz`v-[?fC7ܲuhf#c|BH[JK"QSYkj>i^[8mc'.K̐e֜c9r8n>7i3{7 L+y#4}44o) RFnMr#@45 4uʠ r`b`'辅ac54^089C &F2B0@I ='tFR ++1172;+V//hF4MÓ#Zn!蛇HÎ$:aY]5 @X QB*g ‰%Rd`P/q1L ue*?oEMPdbS?@BCBGJ׿"&'_ A"yHʕɫ-w5$d!!@K`LɠNBCG=?I0 <'3DBX;h} kPSI. l&[l$$ZƓhAFn+I$$$} hL+m1]6'QPBk$Q>Rs$k aS$ʡ aDX! aN$B?Iҷ.r$ʋ aDu!L aD|!-$<3C?JR܂"@ItH* q*dnA$:_!q,NJuI*[Ii' ϕ-(t/gJzhQϙjP J۾JFMRfKm{TׇW%a6U2-CUBX٪o d\ۖB[!U2tBBXmaa%cK'TL*e%} U;+[:i!VҷtBJN[-PUV+i[l]  a*W2tBBX௤oFA%P-P_>*i֡J%c b aT2|BBbo[ZU-P˫*[dV"C]$U2~F-Pװ>V2fG#`nANk!Vҷ}CMJjr%c'Ԉ.J%Adlg:uΓ^I u+ơn!TTҷ|BʂJR(BC%}'(zJ74(`| 7 噒T2MCJBFPT2Fc M(BS&!)J7P!UҾq*J7S!TDUҿqªJ7}V!pUҿ Eh*߄"4eI;d|ЯB[%mRM(BnW%y-6$dn!`!TVҿ E+߄B{3OVr}3aBf%c )>HZIn.d|sPB%k%} )b+[hHY[ŷoFJ7)p! W2E 2(B\U@G]'$ĮdnQ!Aw!}Wҿ)E+[TH^oJB(mQ!m}%c:/:J7qJ\cpM)bmHqJ7J1Q1R&H*h[Ph-og)3/o>3- N!өo}*۞B8XT (%S%m BU2|B#Up쪒*[>)B8)VI MU2|Bkp+t-64WbCC}p𯒶m1?XCLQB1XLBŔf!sK(b3&B [I4}ZHb-dlðI40[H߶-dnf !B1,\b#h0umoH~~'.wuHk`N,kd{s O'w',`,%ޜ՜gf܂%`ΝS|FW2N}VpEsVsJ|axr[\+hwu1 oQjS v&'esRN ŝU6S){2Sm'oU9"!p)[NFq/,N? qBGcW 儎=HȱS1'u?b _=t|潙2.rӢi,RL=`IbzdV'Xn:RTx, Li=NmpXeaճHc̳T1Xe9c]<ɌZ5YDv, +Ujֳp` L~Ue)gXEKٮbyH`iĊ>rQE1W EPӢQ ja4Z^#0.2n\AmnG: Ӎ!`oX:"^3b !˒dlT.p\Caj,rra%u FuKz0Uz@Qw؟bAwNTxYx uaM/$Ԗ9e0 \.X^Dua, 3н,r{e}D%yO *`NE&6[ a T~9h: SUbecbyD&Ȱ ɯF0M'no 4BeME3x6nPZ\?$Zqò.qaAi%C- t脠f0A,Nlx p`SlߚE^k EcIȄx Bo '#=E:\W\=.rۓ *0Il6A+t?NBtn:,h04|ؚ] ڷ10!5|E&<]*^d<\ E/?y~c0]7v=Ƈ#Ey"Z)AY2S+C2l [Ip)YAٌ<ؔ mk7 ėB x-Q:8bENn 9=IX| q'T~)K/Ą W~/yۀP0D̃:!T&Tfk#7 /mPr [䒻I4x҃:=qu /}\ v1Mi/K ҅UJJc.R:5QԐ޴qŤ܁rYPykC[Wg>I\ȉ5[*OnX@ { jEaBEEdbǘ<&P[!]]-"rʁbړ+RU=|PW e#=c煀;:%gV MWt!ԓ{JLaHx"cj9hE}'xP_N~9BAJr'v3]{< fY4P?l O{rϿ| `W3XElh0G<;i61uA ;'' X ոsqR~hH&S6x17ipO'psi?VqHeF>/S.g]`yaNS>8WEX`&Zp\ ^C>cvds0*E ` gAoe)P4E>Ǜ@y0ǤCgRyч8rr,K]ؖh^~//K#bNZqZ|8h6E|Z-kdgs@xJC9tHrplP:. L^z!:cǡADbS`Q{~Ŭqj1tZ!ɬj/^:9P8)8*!Ϭk T8,5i~w : N`1`**0Y[ԮX/dLhp+x3`;ك4dwCShg J8^ S8 3>Bvΐ=›ch:;LFޕ!hi]{qi<{v;NFӳclMj=^F'qhj>Gl=D 9@Pȃ! @>D= 0H H( ʇtiR W>dR eTˇH!~$CfĆAC-4Dgg~|kFN2",LI@5A~8 5NOrz?rDޒ/1"qDRbM$Q$cvV7')χH>C"gQ$WLs/kIY?Dr!)HU"?y".?"suZ$r-Ab7HҶ]%v$ڝo;aI]7I>ou'@H(HL#Hd#I&2$ʎDDYVĒ(YKN$s$} (L4HIe$R$JDjDwA"OT?N x^H7D -Iy $~hs Y a+jV2Pk+[>6t!lUW(–w%c;өsDJږOK_65 T2>j*[>!-D!KTҾAEOPQ-ϔDmB9J%P4BS4oB:J7=O!T2 EH P)U2 E "PVU2 E( M(B V&*K"Y%P*im#roBB(̫m!_%s  M(BX&6@c%QQ*6+[hHDJwi%kZ*Z+[hH[BCB(­3V2Eh n(B\5ʕoFZJ74(. >%Mv%s i M)BG^ܢ"IY/SԾ'>EE z$Ҿ)E21H1B7iBuPD(S(~3O:"I8KR@ۂ"FR -I4RH9ESIBOO!s3b( [>JAbEE2|"hELr2|"&’hj1}VbKIBO\MҶ؈B1DmӈBƖOdbM/ҷ؈)BO4eM\ҷ'.&7 [BI4%Zr͘6MBږPdk!cM)Bu4n9+I9AdnAbZ9ֺ7dt%9adl[Gۚ9ʝ}ַ9Q$Γm$sIP}kIƖPI@\Aw.IPBEEZ1 $}K(!s0H0~8 EZX$i[VIƖQF\9}Gq.N!Ii8dnE s$c($%H$i[T!Kei$_-*D&24!)5AF$o9N0!qO7 zп=㿏qzj8q>! ONjrԭIj^O\s'g5TrX;z`1d8qbƒ̻3$a߮oV}m9|QY'>cNGx˃.!4aqJ\++qOZY{ƗNՁ q}]?ruv d;N-0IvqNFsr]er\eVĬ$wA'.;8p7ureurW;='ν[ 8'-bl8E 'x8Ep&?N0 hD9-'9l:rn=E>L$Bb2BW'f ~9ȇyGԏg>̹_CM'xVw`~N18,Z7,hp&lմSM{encKqTG'4gWGnP'EN.:76}tQ>͜M5aaP=amXfCj?c*Q0U ދ}c>%2OTOdMĶq]1;6bF]6WTˇC﮿ic/F]KOCu2lb/̣uaǫY 3˟f,djYF?=9σީh2Fy||,~wways<-mf?K.a5CM偕.m|%vs#LS铤a g/D/UV+a(u-@k9ǸB{,Oﯢ->%cȞJ:uCls>0-V4@yʯڞb|O鳭htiGKmi?71ݥ)?NyMIwn2nG8NHn!Hog&H(Jaj{;NQo?1 ,l00e,*R~sG^ fK^CJ'aW0Y)'d\eOsYNh-T: ]{`᜘%b0mM9Z5/9P[VM9U|mN9(QbK%1*)<'t|%Ny-NAs7PZ`p0K.3* 3蹃9FgsNݓUpLLJ%jxY g:άkɨ= аh44O ͜qoݲjtM4ޚz;d9l=Ƙr (M:i_om-1/ ;a}5 \l18Ì s,t65/+#g  l6z$7$S&3wBlGyZ5Xͮ;%=gWsN r>ÚG3U~v9/iM}-t7j -# ^d 4Ivy'uௐ]zy0|ȅPo_^Cj Q |#RY oTxTfA>kŇL̨RUJE.d }(A`7 CENO$sK>B>(ґ@r#Hϕ?F^F|ȍ" |b>d26 \v ?o$^$~CR!\Dr=#)kH$v&[l$$Z˓h{BF-I$$$}hL]5HIy;7(O(D!7(')yK60)9RQ0J|l0 _!} ! aYs$W Byoƅ0}N0UOlfIx((dž3G8$ETQAU܂"lIt+GB-(YIt-gBxM3o!<'ѹE^OIt/dnAU$,C!ǶoQ@f*0DTSR@۞N!$QQk*dn[FԬUH҉rm[FْgK'\WȱmQ+maBƖND2HT* [XDųQ9Mj!}K'J[҉&QEQYNs!mb2t"IT0/mzgir+>]}/5`6X@HmÃ@NC2݀z{_Ō`<ږNDὐQO!} 2 ВXH 6*Ac!m:CY"JЯYH }2|JcZH߶2|=J[H"-2ئ+}#ǖOk>.df-h$]܂JЏ]H u2`2|JЯ^Ht}23煰Ǿl/dn[`,`!}'8^*(BOp$M,ً5FXHL,dlGN*XBPhe!s44&S FiׄB#9 kBўJ0&#F ik\h^i!cM(4T f׸ЌBPhVs- 2ׄBepm!cM(4V TmCss kBJ0G2`%*\H_ M'.d +BQhr!ךQh.n.dlB8'n7]XsMͭVօ-48"Gm+8BQhw!s(4\ F2֌Bȕ`\y!m(4f+,-*8] &הB [Tp2L/)+B_ضW5F`!s=IkP  kJ!]BuPQ5 kJ!-B]ҌTⲒ-(NYؖ7\*f!}'Y *s*zg!}'ZP%⨐\ B!*dlDHUHۂ"]-X [>bBO-o-] i[lTFHHXH62|"4Ic,orBO2 g!sK(B DjBƖk4-k!c&m!-dnfh{F [l94˅mk{9х-u!c:|5wmM(Z9Iy B!O"|!}K(Br_ȵEE(H_p Hm  q}t?(o E(27$rl(o E8?2 % ?GkB.- 7BƖQFIdQvc8YH!}0)dnE$I!c((%H$i[T'Ke$_-*G&2!Y#c,`5-ŕ`.Kz%X+P׹mlU`;+[^%+֙DkCmpv {#pQJi(Zy⑟?Ĭp @DJBBtH#AJH15#Jеi&ɽ_ Z#eܫJQ)ddPLMdWV%$~+ 2ˎɯbZ} ,i E7}Z-,PZ/V"ĥpmkSjuGJjJl8[F0J,kX6BmnMIb*Ib#BU r }pO&0Hd&,I¼FI?+B|.H|-0"u,e!LA(M-o ]I¬92B} =B'a XF8$ЙE}|TPAU܂"kIt+B-(XIt -GBᑷ) <:W2+Q+[PlP+ Pɱ,bTl ƚMR ..Sݦ$'GYFTI ՚*ۖU!,kUҷtBJmPV*[:^!UҶPB&Q*-PW+[:"m%sK'T-JƖN\kϕ-6T®dl*ᅰZ^I +[:}!,WҷPp6~D%}'(JڶuR"CBجm>-P*֡>U%s'*=JU2P.x}#ǖOgXI +khYz˲Y:-(h-Jo[܂BB?dlЅW]Il}+ۙN$WҶ|B}Jq_g*[>тJQ(S -4D! ƚOptNwTҶɵEE?@#H2 IPH2H/ אo E>$qnoF ǚPE鄑dlE:j;ř#Hw$ۮqoQ#IQsIZT2"=QЪEj*[F!BhUSIۢBV5-UM)V5IdUSj ?ivUcU/OM^Jp@@ \ 2+ B<DBr3*YJ)T6࡯g=f#5f|,,|55WPHYiOKCi!0}`ȼUՍ*R2jVYu3b>zLD貥Au#.S} y͚.0t`#mbAq'th-PuجC=~b'}ηWk ~5s6+߿@bڱz/'ZXM/3'_Qm7GzxqgoE4b!gXq,h>jֈ#ƛw;D:}{lZ|:l 4%m)GbB$~IxYߒm _G|rSn2i#Ngyg @ Y?wx|(0j/_j?]hԬ{l;M>O?tiȃK:xW#G.4cmȃZ+wٚ:>~ޝ.9h5ezA4+髚~x.Nf;-q_t6 ]>G~Γw~=Q >~E۴GQ#//;[ }l~'g#]~7h7W@e9":xhds Mr6-r6d4:NcnGh%ȆknДo6qU[PG(Gn.pB" j áf}u 6|Np@@ h0D ->"ccIb` v~u|Dp*C 4:C |䑅Q|LJUZ`SЋ|i+O?8A'؏9:NhsV;\V, WRu ); HL9䟝8Հ9 9W*5^9gEric#^ ^<ȡF<舃q3 o#i4ؚUuo~gA9 ߳rd"*U\B4 Ϡَ}kF:>|+8BKm0f % d0J>`Z[hN9Yu|% &#?2Qf7_œS$AjAۜy9EȡP zbq%|Yd'tga6[60=0%2 yFp#L0)qj#,Ch#r=G.~)"8i 5˞A̩fO9G,q0Kp#Ԕ2iH-LjZ#7ÏݞBoͩoφ<8qZe.= sϳj8?;&?2xiSzw~.8f04g`O7qp@O.|ga?D7 A o,Dia6_#n_j|kLJ9P'SN]%H.>"A=e~j:~Bun#;VDDX/%I%6)^@4/2;–#)Hޯ$^$~h PUI5/ t&[l $V$Zɓh;BF,I$$v$} hL-5HlIzޝ{%A"IHD" $Q$$J~(A 9TYA"Kt-R:L-02}L3 &B)oH(uuIT>IF̐`} x* $s <CRYEْ\W~[P3,Z@DBz ~\܂"ItP/ǰ*-(T8(JP 1*hk6RHc&TO)% vrSjAX%o鄪MmPѪ*[:Y%׶eVq-PM*i[X|X !TYIB%JNrZ-P*‚p%cK'TW.J*aW2tBBX.m-P+[lUP\o8 `GlG%m:EdlnL!TҶ|BJƖOTLmPzX%} u*[dSDݼJ u+i[>d%c v1 XoYvA @#[I uv+[PC\ȕ-P7v+iw%c;өDMJږOY_65 `@%}'4`P܂B p(548|QX pJږgjTm9)c)5JiĘL! ET2ׄ"F{ O%cM(bUָJƚPS!qcV5qB8UI_ d E%Z%cM(bUm#F*kB#xpLņ*[lhjNVׄ"+kB)8YI_3Z3,Û-44DuyJƚkj!moJ-㸕5JQtp! dEL"iJښQs%c N pv%s yAJR@y%s JR|!mQJƶuhnW֔"4\RB(G)E*!yDI(*kJRJߡ`,DHF )myƥ`*k>rJOHS;-RBQ%c'ZJ"eS%m *[>!U!cUҶ҆BX%}'BdnlPVI ņDz-6$+JڶiHWX  ņ-.+'f%sK($-JJƖk4T-uk!cP&m!-dnf{F [l9ʅmk{9х-u!c:|-wmM(Z9HIy'֑$s |%)OrmQ !O2"m}b7$, m8s?Hҷ"=-*ҋ!H5$[BIwۛDs{ı&amifza$[FAsIk{ǹX$[THeպ؛-6)$JEmQQ-YDƖQTkb"Ҷ62"s(诓D:_TRLlHэ?i_` _#>￝g4e0^*#4ݡ.~-FV#k.8 #ta C[lFFf ~wA*MToJp3 ~-tyw'P>%r%Ĕe\Q.Uyյ7%=ʪ_0͌&zui6r.˷[x\u`Iۉ-Y%#cq1ҟ9;6.YwR#'kuR7gml =)DOkGQ RBT=05S!L*AVOqxWr FDı0 4%eM*íYp2 #ejʸ3g*sϰVvksbmSBB:Imӆߘx iI8ڬM:*$x=h6/mb?h"lY`WMꠢa8&=莴yV7}e?ĴKxִ4vP]ۮ}ju[G [Kdݲd{2T6|\nu&| y'xY9†Z_n큮0r69-yl&4O$vx;%iҡ&&l7*,ŻiӃ LgXw-=6k)ir!OpZ~y~1(WWn00dJ%/mz01_9$^26U]4F9qrl&w, ;tO@6LK N YʿInŸdžrj"?CCSK'&#[M?ٸ NHK{LQYR &+M,M>`ÉNY`\5e7OՋ`vF{uq@$RIh:pm_nq;@'Мt `8O5|g| {C|(s~C1 ho>Ǚ%)p`pxȤIc8Y^Cw-g8eM#xaWOTg]NDH?ѩQRq!Uw &N2wY*Dj팇/Mk>AۜᯠY`My)r =f9_;*eƹUv~[iу]H>c6xiXp[N0hуQr-xq>;IG[Fbl_!h5ǔ'm[mė1{[I|ii}ۣ9É~SVu( lOx{ zm5mu ȠZ{2\CIE\&,1Z{m°@ +H=| zJaiZLJRIA" ň*+K?32ղDzOh%`~X-'!}eO{|zˎa8C BWr XN{h{_Rc=F!+/v@Gd:Pkdg Id l,Y7M>f.=m~L|Yٹj@>)Zv:{IEo_ 3͊ "_ 3[ 3PfK W lΎr^ ३0.twvچҼ8S|*$8VbmQ)Ԓډ!WPhկNJ %o?:%^d^ߢҊ V";$(،%۴R EI;O+ $m}iHL47,-mWH'Wz67iZ$RW#)2b.k)b_(5Pk1"%吕H-lwŶkQtl>;&[Q FCA7TV}GԖAjSlEL*VĎ%Ѷ,v[R(@}w("+q(47(`>DFE[i'|Lo4z 60KoI{Eſ|w89AFyGx0E@â] Tow􍻌h0L@*D!#3Do" &!#蜄YDtWP»4$/ZMfA4OWD( .&ѡ bFp;f4aX]0[{D< @4n!hHÏ$zY5Q{;O)(ꛆJ_ek98 ӭvg9A8ʼn9A=8[j @"JC`Sh[O%-r@ N?@o.~~q :kD; %w$$$ kII"IAH;( q'&͚D7t! XI-pIE.Tp1K.Ip2؈8B>6BͧnPIҷ؈npKMmoڻ ^$J a"Q$JH aҒDM!L~ aDIT!L(+ [!LDWH#Bc<4BE›DIq!L(.w!ѓ(/~ rl5PfQKIrKmˢjS+;DBNDmQJV!}K':VȵmQeKJ\!cK'DEBQ:,dlDT DE; [:i!VҷtBJN[k-PM+i[l|] U a*W2tBuB{K'TOA%sh(JOQmPE0USI |*[>Q!.UҷC]JOU;b-2Ydn]u*?F-PǰAB8,QI[C5F쨤myC*ۦ9B{K(4$fZ*1p~&1S\)3?5١B8_TI["*kBNp&EVU2ׄ"f W%}M(b&1WDg5B8VI[d EѼJdnIB8MXI_Jd f c%}(bk(blV2gV#L $j!ǚQDk!} -dnI4[H_3-dEN'p!c(r9f ikF΅5440#,3م-*b; kJs-*b=f kJI4_Hۢ"f I4_H[S2s] HPH_S22#$AB6QH_S_rl?NGcI'R%:!G)dl[ZHRH_2|"8I$)oD~ ۞$2|"JABTHۂ"dQ-yUI i[PK"X!}'B|V%Э!K"Q]!m2_ iۦbBƖO(1-6BYR&زq!,dn E?H Zr&%!j-dl$ҷ#t-L=Hj~-6R;dn SN2碉NҶ"IƶumM(Z9H)y'֑$s9-HHJ\[Td?H-Hs {߹x  $c;ν\,-H$s`6 IPCf)Q=X(mifX`2"4H"ێBk{9 B [TH!s(°$;WR/.f&Tx':A43cs8dV\f~Ngb餑ܸ55Db"YUMW$N Lz?rҥz yvB$N$#0dAn;3y'l Lx"HT'7^' /epppG&fuqBOtN CNp`-; yx8[8'wu9m _NzX[JU~š Kgvxu"k a1y9 N7yH2saɍuQp>e9 p,N/Wʇ(:򑡷 GOXA6>Nzx9#td`/5 62GS Tz@PM6@}dBJZ +Y)Y:mųPo+jm*HV>"&V߼xṃ"(P#lbP b#7"K\8{GoQSL,*+޵ w\YY?m9+ YHĎG†]``g ۓN7}ywqڠ. fitA A4UN uGE _|Oss/vN=ƝTAt2qD ih:}B3[bxUMF󭡧eķ*XF?6xcyhy߇gІHX֠Uml}g@#ʶ<͗ח'i}?ڢ٦V'W6;̦H8ݠmꏅ7 [W_QhOGN6G:7vz?61hI<=9G55yӱ}}4[ [XNmhd|$S;]Wܿ7.gAO*oufldu0*5&G >"7&ϑc ~!iц4:Oq#x #Nh3ش9f_GF1`%G.n8N}>4ѐSr|7}uq*W͊CCI4X'Çud{JN,!xsp"P5/W1T%CW~աXS;>෼FdGABC07>rȺt~sWT9+48K%>9|'`I@A_:q|4 [prՠ"nfWF7>S0@rxM5)0}{'Őir)<)20QO߈9}4=6,[ ä7Ŷ#RZC+s{-Wߏ_ d7k]}>.V8j)<8ԛi=ODI\4Ǚ!- {)"{NB,=R%2?rE8OSA@49] u0YՋ`J#7#M?B0M{JL{F Nfrse|}P?Mhs{8xuc崼Dz?6x~+n^y1}?`~i|s *Gn/ iA\L*,ɫ\KE N^\BI=`pQ35,U9]Mz`y_(@U"//)a% ̏pP u~8(U?/I9z@U; LުnRh%C3=<椨b PUND 0&!zA66*:ArB=#^Za [QijD J'(.9t^:A-dX{:HMԍ:!xS6>uȁ>04z'$[ j9;u jmQbΨd׫wqBgHsHMH Z@f:A I 9;&F[ȧ#uvp$vB7H"bHo/=$-ZR:&UqulvI0)ٽeFGD hh>{q;t1}wҪ%-( A`j0f I٢qA1en#BPtc$ 1)dTf pց!:7N^|\H ?Dxe!@7!!9ZQy\prcPYل5Bj]bc!j:V  $5e + @h/B-yٔT+yA`9Dد䈀,Z22 K\u#v_\ P->$_X 4A9x 5DRH^oNH&-IE2FD2HJd8QILT2MC'p*&1R\7)45JP`O!d E AJ1TX{*Q5.bƪ&1Uǹ*kB3a5ٲ$?d E̱YJںm\%cM(bUҶМ_%s ™JPlb%sM(4Xg +kF\kF3pn?hFn1kZXs͘Y-s-44[BCspf1[\3 .Õ5)B8\I[3xd0X]\̮dnQ B8^I_S&dnQB8^I_S/C-*4l_ض JښR>BgP5MC%s:H"D%}M)BQɵ\;%HHJJ h[P2-opI"L!}'RMS$p!*dn{FHlT- aS!m H2|"tVI*mABO0,c- Z!s'B˖DzBOn.u-6BWb#~I$,mF [>$R0ҷ%d!s'BQDBvDžz%M"h!c5CqDBږPFdHH[H߶P2\3TAB[Hb#ą-6BDbBƶֵ\IږP:ض5Sȝ E?'$}:RdnQ !Oҷ"I-*R$dIƖP=@\lAw.IPyBEE0 $}K(s{3*!0~n8քW-ͬ&"c(q^L9Hqvc8/&!"}j6"2ce -*2 ZTK-*2 Z$%N%)8bMO-ql8-qyKs?'O_K ͉] o;Ya {ުyU{??{۷9X}aaӿÿs/V2N:o߳oe_/_Jy+6߉Uپڿ;7?M|77/Ͽ.?[4١wͿ5+o߉8\y3&-?}3{Ě݁ej'iJ6f\N3۲? 7Vry&;wyt>G$u^W}T|T;WY˃ҷxP(KG}~!_:14k}w_wL~i// :Wњ(_o~V0sQ[WLIʳA{xTi%A1eU$vAiyՉ"c^/o_dCII9.惂ăU8$^n#Ž{pi~?rbj6m]?:#CD8yqlKz^c!Ӧ|P( jVx|Px_xql[8o[}]?Y^v.~[~=IW,1@A+%3x@\_>_^kWiClX u^傊n10o$Y.((V(ZAʣ‹j;wZg\U_-@ \_%/xNlx+:\?mh$X߳>*H>;SW>*Iy^4/ Ku+ WW[]X ݋8}5^S^ռlBYmyAfޗ䃒GM( ˣGPGG˃ql7._sY_"߿IWk>E^䷃`gy滕ˣG=OߋGG5<*Iyw$fB>׭r4 Y֮=Fǹ,rG\w=TN{ĄiLs&=z7g׺ x2_ {WchGխgi k}Lm uK\|d@o\2zTm0u/7]n[3>MnC釓d=/zt6zי$nJX| ɷK1Nk| O}/^xݕ_wh6Wos'CN߫aÉ73:VăZ6ݎ'DxN| )~eS}@LWCUx -eɕ/fU>Ƈ8/xtE S]:IK]Oޠy['.US׍aG}>5!nw/}ǿޜ~d*`[n<4k(nX]vPArx\r/? qЫwn\^ 7X}ھ;{o|A_۰ō~גF26\5¦!/?_&[s8|XqQ # t˸z'Am_uSc^|~mx[,n_Æ|;n13f$7z/#/gSʣGw9ZLJ:;_h|dˇw|o,{Ӂ[pNN-k7G/]_o5 kbvSaֲ5l̀_Du뺠j=,^.VI:~ x˫5.5ƺ<*y<9!*`_d-XJ>09Dd"ۜ^^Yx|AX۬3UW2|k^yL Ǽ.׃~ WBgox#$O>SCܜ5VgMwy\eʲLu'G&Vk;Cr=o>-ߊuptu01+BXߺ5Mb}?6ka KbQ$Vk~:݆{=/b ,F\ou:Ć51݀ēՆoaM?l 8'!&i>A:_b\)Uw i_Nش/ǣI͎g!S#'u|m:mv\i&je獅mXo5l[AP^kX`n2S͑߭pAr2ym:νfK73]_Xe]/+Iܒ')I{_R|u6/VIL~C0Ʉu`>P8Y\V\@ZYoElvEW?UB3~0+:HIwß?Jxh),[ 0OT9m&yPx/,MΑ͛fLiAAѹTozoY<;黹Nc~C|yr-/~ݖlQUʋގsy{\L\7! nS=AB?obeVE:ɼx_dpsせ ėL4&FC"_t!"'҈!*5t}g]XY#QJlnJ;}َcT <ǚfn@{wݴU_S-"/7**amXMYwx~ODa5r?Lz?͇R\>9{PbE#طB-rݳ$w Q{VVeOXiVnTw}b}qXn$#XMz@>jgV} - ^*]7ûNݬ}IzH[*>}"/owèhyjI&*͑bVC 閻YЊ5zXw4TګE{p71Z}3R%|fs/xJ(z֪q/\9Kpk`x3\ޛA׍Fd17B.e|ײ`.7 9܂ٗ5,bלMXql@LVxD҆Q|S8j8:H+= ]ou6Qly/`k&>.: W~XlVAy6kY~d)Ϙ،l=/=XY~1TdxyREqCXe9utp1[G6e4|IqlLJg6~NQV><[cz/w;sǞ?wTF)゗_PtlƣzWy6x~)ޢ?5)zvOeYtEnwqԳ| A^Én@Qݩs7m]z`UEe7%5aϪd\vc` lAh~!!Lg ē _\,Sb%I.F.mfz^{*$mο;^ȱ2p2Z//bc@/ͯo?#O8Gr6 Τ3/_'8;Z*V!i\`Ǜz:/:PHĵ "& .D}r,ٚO0/_<:G3'>@lQz`Q6;wD<;E9ڢŠ%ҭ7㇭ hajZ\OO>EdGߌbr~Ow(iM_FX譊[D9VY|uG~!3Pmڧi DQ,r>:BY6nɂnm&t|KŲjWh_ e)ǭʽ܄Ɓ^Iq:9U9pufr^`X͔{ 3aχӁctEkr @LpA lcZaVdӝ G^6l90'l, lJ>=Njså1kÒAWXm*񑎰~Xvk2BXt>rڬ8[a#g ›5bG&]pc2f5 ¿_㎡WW.rQYun,IB1fw˱2Yu#G}am5^^ JH y7]7}a qgaqe S7 b6޹5>OQ^mVO {ʿ8[Y:|vSs߶[ar0oD6f -\XA a,YpIS=ÌO&2|;oggy0e/0Gn}NxT510'U Nf}d t:SY ^Nq WOpʳ?⿗S<h™~u,2ĭɲ0͛OJg k5`3Ddsp# +S8̘i%faqO=ɨ0ӗc?֎s,/ j` ɩӏ`UyoFAiG'C|n{ vO.E??њhI-U4xP'A4^|14dm#_&v\ȱ"5XÜo890̋+oI󼇠N0K?Ԝ,4kOifOn†`1,DǏ,c`EvMO&B`78~lyzda \ q?ɪōh)V=)o}ҪKfYJ ᜜ ĵTÔ,G[bKYtޔ,x[A>`ږ)Y,>,_8jM~AjRP5TJB1әtc&0ocq&ݑetEf|}N~ fa~tTP똅hGszÖqgih#ݔvx{몑LFvY[1mOMԊ,4s(c YPhVJ}OiYNv$m*&oy|U}5jjbrGgI_bo\$&hzcqR i/ixm8gvq2ˋd/4O!%[qr]h|U*hsBt bzi6Qy0GJYMuuFqs,?Qaw9NB o\z.irJȅ.I{ﷴp]iYH?;4)KWW'kV5YME-}ט{0?xD%P=XzZ^&>.rf"H7(SVDb=.m¦b( kyx+_V o F9B9V ag(Ƙ`ia;39.0 05yKfkƹް%U|n<\:G5pf?x~ ea?z&RQڏtT-M|F8md#TeVfms<Y< xg0 UnD.RBӰpBEi  Ӱ*Hc+O9#a cϓȺIA~d <ŵ$-#\MNjlV^8,>LR@|mǒe/NI/b~0/]~ t*zR9{QTPQAXzE;8:LraG>6x`6b7[a?Z8XsXqKol)=fߤJaؽL>c(R#wLO(0kyɸXdMR$.&iA3Rm NEEI^IX< h 6p0Ûqb c)"e\x8ʇ`Z)Z^{Vz <~ &_9+u.R9wLZMrR'|iX4U^xz} j25 WN>¸Bh7B)WN[PgÁnre|rƣ#V&Mm[N*r[99cn+yp[ P$Vbv+T k]يRdRIz\MV ZQ@^+!Dي$kh+ablmV8a.+tK,ru:[F @aATQu<|1#厛+t77[6_!DZa%\IPZIY~a&J˃V^ʬK1# h Y!4w2~;gcPI Hq= qH1QVQ]>kvީRe+'6K1Vˬ|W?v|dZ/B8ʽp `V>&aVȬ.r]G0+꬘-~apV,Yi4]R'X}Q r`VO(X VJ@4j}feSU/jC^jliŲ3#J%* ʸ*]OMS#kˑwNXf!XhEGhY@+ hEjK |Z Zhŵ>*|V HhJ䅼ĥV|"|?JH+DI\:v2٬rWKisZ5_BVZ='祉Z(ZZ>ke\x,%cqZ+cw~eϞ$pg왿 Bʶɯ|[- %%b+ YlZb+3`+rr[(̀d]WkQP+ jťR+>ˠVZ!Gj ]PZXi4MEZyebҊpҊ@J+V*Wie_ ꞿRiJcV"ҊSKqj,HZ"ҁJKV` hD}n]he\,&9{B׏s j-H+*o1@@0,]{ bf&VXШE)9K2IsZb5JK[*`~ Hgi\7 2~6[fZheξ AYQPY$MemMwnʗf" eV0<\r cf;k^̊;2+VX¬#҂*zPV,TY  _KeEMeevy{)+璧5ˋiRMee?x [¬LŬXbVQ2+ ʉ$rkTYz5AV,w9lYS%2қsde0Q Q5V^HM1$יMcXy1VX9nr_^ X1^0VFt=ʶftRc+NX$Fces.0Y1^dOƚ(ȥ VdAVD0V\>$FcƊ' cO_"?Q%ʬ2~B+#Hy++|b[cEPcE@c8s`\WcX9XY`i `02bX.Xi\*Ɗ+dϔX9|+h8Xygy +/cŹ ˉJ5V܄X),B܋X9PK V!VXyRnVYxVY͠XXeEe P\gʸEY*Y/gń.FUJ>:+VtVz Yy]VJzZFhЊQB+(Њ_ ~.JQ`4V]&w|^ʶ$T 23VZVZi96IVhe?V-eAT V %qXZ;ҥB+ $B+[h܄h%0Њ6LqNRH܆VrEY)29AV4DVY1TeEyet/F4aV57 %0+Oj͋Y&GU YeerSe 'ʳl:k<]QV^ HJrɡYaiFcdRZc (QYYXqj,AQV:Y&ʦXg_QVSG&+klX_ + YaE"+O^2\cT)FY:SeE]$J*+WQVHeeh-(+*,ࠬX_e1ee0y+|ޡফ$Wd.@FAV^~Ox!+F0Vscf!c XY5VĬ0V:X1V΅*vr`p[}ʝ,zF^b3cvNds✀|+DYܫʊUVnsNHe垵UVYWeeە?㓜Sg—7 DYSJ3eVV]0+Sh~2n3yͬvUfeVY֚̊A1?`Vbb+&k0?3+GdV0E%V6sIb0Ċ$7ee b881vPVDYkJkQVZb ʊ1ndAeaV\y1+gˬ7!b Qyif2+k~ʙ2+VY9qVk.9:+PRfʸ.Y¬\W¬X*WfA@ qV^ JkVPZI$Pg=Ίi&/g gY!YgK1TpV8+#hnYsˬ9Mfeܷf JkMuVNqV.z YTkF^ʊ,ʊʊV1o `J)+X1J2>+xFXgVƽV7bI&yTX1e_?ʓB;?`r"CPcm4VʥlY>эX6ܘ +k5ʃ_-rOAV4*¿#@V+!\dd>3,r&f+b<38TA5V Yyemһce}Udn*AV,Xi~{/Xq$i[+c4cE(Kcc=͌+-Xپ4XyN)Ɗsjq-Ã<\<X2._cї\YR*XiƊ+JAVzYBdƁ"+ujI0VjX1tVc+NVEV/@V:$-r!YiE _EV?@V(?" /BYYkCY%}fE(_fzf3; i5Vn-+-YA)(FVȊY)l#|C6Xyצckc5VMc%IL%V*X*_6bw{!V:X)b3+`@WkR+|#I.2+c`10+#Z$w]Q3`VYyVAbi@)<+pYvZ5X% h݁Z1l!֊JVE鹏*[[G{qb4"rߘ8X+h+&UC["x@R m[?W[!CmfLmTj+V^_'J#qܷ0 =5V:F[gAX}2P??ڊP#JQib #ȅފ0J/`9I^|+EHWƊT^UcgWm)?k+r)T\ҪM%U*ۈ+6Jr\9lJDWs̕Y%Rպ zyq EȕU$WV(rj+f/\ARq&NJ+5# Wg |qWFHm UPt+W^C4kP\9ygLmĕF+pWF\iCreSI}crO"|+:JWAW,]1Rtz?ѕkS,z]qMt%n+pqn]\~\{}̕\+Tq ņ<~(Wb\p+-\i(⮂+t+V ⋮zI1WZ2+_+MBK+0Wثrnx*1W^g@̕\\Y\y毌Ҟ1W\b>抬 ]tEmpDWd"JO Еk]JS]ivЕS}2MV+Hԕo'<݌$Sԕ碮ꊫTUW< TWTWD]q9n+-G]y)"+eB}\e=Yu%.Fm zAH<S /rŜb䊽!W\1rE Pr %W r%WlW_s{J+ ݞW.A\J+,s+TWP+|D\%da+V\9+ce<E{t̕ETٳw2 rQWſ"6Ǫ+ǝ!vuf eW*xdWYwLwBljbB]IHx ԩP3 2BlQM`W+-]H]{-U+dtz鮜ׯ2QV⮐'ٔ+qWz ʗCB]ɱR5tW<{Ċy,T)㮌ËwweW.®PvZns|kD]\+}BOuݪ+D®v r}9,qWNJQMٕs~+<$zK41Mveĺ 2FY+sfW`WzOv7Jb+g@מ8 S+^;+ 61W4$Wn++ }%W%W %WHZ\2\yȝliO䖋_y$WVr9]rCwɕIrYkB8\InJ-ʊ+iW7D\q2V #8U\aQPqqDOY+a4Wsi͕K\]B|ik/B4Br5jɕqOr;/r3ʨV 2 O+lCreC\ \Xrzxɕ5rJW\"qe s#R} q%+'YV9VڐmEڊ XVjs8B[imeTV[N1ڊl其D[ڊIlj+:U[iImFV:ڊv+mJVVT[14 [[)8rK\P/Bkt>W\p52ʚLEW^IQVr~DYp"XKbg8q˙ 2}u\MJ @WNw Wʽ\5Rʖ@9+LpRRQ+4y\DʾqG\q%hMW })\؈+>pW*D#ҍ*Qŕ+vXT\[r9F\qRq$ŕq)7kx["y+Q0^ RJeU,ˆ+cP"u)WhW_ Kkv#AW]a띿Wԕ<uQ-JׅTWHR]`艨+0*R. ؊\g+[L+4\yr@", \S͕sAψ2teAB@W' E;UW W++dWs?ꊥUW4D](wKEse">/C#"bJEW2ԁ]9QWXTtEt䄢+׬Y9AW <% 80Wl?һ芕+ tDRMuiJ+7l&슐+슝_ 뼲+BTW AWzv]hAW^"DЕj0W>͙4dg vƖT V\-TIq boQ@[/dĘ>)RmRnI 7r+.V,-nr+?ɭܩ/VX# 9j+Jb br|U*ZZ19w=֊e]ɞ =bZZipk%Jɏ"tR4Nke?TS>ޥl"srn9_skEYH LkIPZ)kŪ:0X+MZhkEbRkEk۰VƠdfaEke\SPZ6@`KTkZWj^ԊiP+ Be3oV%rRs M2؟@+LVZ 7QWTxZ!{WY'bYh|UB+<묘CbQ]J BX+/kuV0=VƝe2t?ZiZ+77֊5P+-ZZA>(B+LR+R+/Gj6~T)xK5J+;g#.] 2!i%zVnH-wRiROk.V@4!ҶVN(P+4Z3VQF@dg^ClT."$"Db[P+/aR+MZy2H+eK"XNiGYN19c@e%jŲR+G%pZq+b jmHV"ӭ'JV\UZ1V6ԏH+*XbkEX+fZ1BkB[犭|S>bFk1X+VެׅON aVuld|Z+V@ZnZiOnCjFdJ+-zK+9H+DZa%xEZi)&{˞#bESZ$Y:ᬼ8+^ʕ#{A+[gMheL=³ZngdVYaJdVYLY8#l.Jl>qVzYYi J%/1_8+c.7/$ΊjuV)8+wuVY٩YY@+ۚhEYhQH+B? J+eA"쫴BieIRҊ!8AVieW)V%JV+^i{pGZi Ҋ~nJ;_‰1YoH+-VAZyiuZysCՑVArީVZIUiEiJ'sJ+gOj>TR+j~R+J+[2V}.FRI 5H+hWZqBDVsVDieLI+K}Ƀi呥|؍ЩBmEEiTYieEܿ~eZy+VhKZQ2AZIDZi2JǧJ+9V^NdJVy91DX+jjTuj]ԡVOR+~LBjJ8 ajq‚ZSBZiY'<-X"x4b1ȑg}^*+ +ϚQXVf+ r YWdRu +F0V::XUKocedJV `pXJ+m1V^(H+,!U&JxD<%Sfr(+6Ĭ2J' fΰ2+aVlbVx.Ȭ|7ԍ0+-.YYͬWYeVYceeΉʱ"PDYeEc ^Xѫr DYɊKY!Jl GKfj0ʳ ' feO\V fcwg%+:+ů^%qV7ZhhVVHLVGfH+- xZq.Wh h DH+MXiAkVpnrRi֕ `J+/p$5Cj+ dBZhe|Ebvo 'J0+¬Rg5E8+v {9+:+XpV:?Y1?Jg+yf7?+ʬ܌`VlB 2n?[xQiɬ2ìxX.ҀJ1B0V tޚ1V^HgQ%uT'JM+ObblQU61oݮaDߧuȚ‘ƊbZ>Xپ71VXʘ}r;7Ɗk83YlY!4$[dUaq.ALeEd|BkǺAYLe (+yJYI̊dVFmkn=?P¬ԭr4TRKytDgeO:+tY gb:+_gQ̊%meVq6$C[i j+ͩP/”Glb+4Cxa+ [1؊ħk`+7ΕAɵڊeh+V>h+ptU[y!h+JKj+ҙh+gA[ kȺ֊E_Z+ugbZ|ju%ZWJZiiF23JV&UXAV^KI`hEY%8 D3Br( Bi21 +<φ +2JI +DVX~#-Ɗs4 A2EVږeeTj䌲 l$Xz@(T@Pbz'Jì\\IfrYQZi * b(J@+'[gECg%7eVYaਲ਼BtМG r*/fe~̊"Yg Vs:+V:+Yq‚ m tVƾgwبTgl:+'l ΊlJْ=tVUUgE'yP"8+:+P:+-Z9?nY.~&B+ϥNh{jT4 )B+9nt'r(+ h3ZVjTZCZ9.ĞH+zRi%DZ8JaQJ=y6ZgBbG/){rh"Z%)BQ-JJAJ4$BJ?[jyJxAXTjAoFR+Z+(Z+0R+̔V:" 2n&2毂Z9Nje<fLjzR+4zY+ܐVbԈX+URV^ HVV^<cha< Wܴh0ҢX6Z@5Vn\X+d-C#B\7J=R+ckgye3 l ଼Y׾IpVT4uVY15 g '?n´HggeY96nQ|}J%;k$ʺ$/XffQ/f%rQV蹃!K]&_dFYٶ\"+_s +5V AAaE,aWS@XaVa V$V> \!VXyYVE~+\+FgVAXiXXQXy5+!a"%VI+&i< /b%*By" WO%ŠK +=+"߬uHOjŞR+$aHfVb񕓚T뗵Bʭ2kKc4ykucr~X+a, [ [ivh[@lb+fЕXk&Z98VB[IjR+k VL\*|Aj' KbԊ Vl B8 º@X.bQԊafIRMkZ+OZ⬵R'ʳnok%gWX+-Z3#VŨ=WNriBBZg =tR+XJw V8jtVVNg5X+ջVZq5TkesVhVk<Vj7`.ZlYl5_m4VjezxO?FkC\kR+jR+zHZ9<P+^,iUJ+rX!c!ʋ6AZYiE'"Њ> (y {N|Vi 10Z%HLiYr9< p X&rY(YΊ&{7묔BJ7w)B+~@+Y+ҡ~ ZTVlSee?^PV8ZqVVh%ڎҊVZ1k{b+bKlL;$OlŦ5/mhrV7b+WF pfx@[aJRo'̍oꭸƇʓH??*ފi[iٵފ(Jz+$Vc";&-z+z+o ފd#w[to"p yV|#܊=ʭ_n[/܊|r+Oqp+x˭0[yW­< AԖp+wyuJ­Vn8Po\_y+܁Vjڈ2V"9?í4RCnee' V|Nɭ,K Gm 3VkܳŦJ`+VFpߎrMx؊43o{‚ /og[v=0ފB?ފYV< 󝧷2mN+@\A\_y T6WFG$%W +c_ K+^+v\1[ +q!Ẁa7?⊡!Wsu+/peP\H X"J +u>"t q~W,p+<Ŏ \Vp+sW*\yyH+)Ŝ9+~xŕq!ݩWqyኸR 4+{+3cc+H8)F\yb+sC>ZWaFWn+ Xx+τc~JXp+{ʭ8[1Hne !Xo?Ӻ'b+'48p`+l%{VTYV+[iJlJ-8JJ+\bJ+K+VVF+>G?JE7mM! ?W}?v?_}hʹ .~+-=gV"-c7-ۇd}7w̸ˆ0q\ѲCWe7j4ZFehyc;@ZFd 2'0](-O  +LFia;5 (he^6302(#AZƏeAZ#3_AZF EqfJLC 2vi35 ҲCiH.QZF%Gc (-O/[242YW6Le(:Ps qZQ ze\'AY&2K,0-cGLXeô8Rq u&2>ô=2 㮇c Ӳ&L˶0-4˦aZ0-f2q42tZ:iɝN˶i{8->Qϋ(N˸g3Nx((qZ-i 4TѲͼI 2e#Z2We|μ)'@8ZF5 s?h 2;dZg-Fe"yMqBvh _GLY߲(-??JʖWX ecAZI 2 cy'u(QZF=NNe`Z<F>aZX4~F1j 2@se<ɴôvZƆ qZuZoeFVwQVei3|VXis|P-yy*2g{nT8R9.Za|P-㱚#ej-3C 㱺X-w"̘¶3Zjxe1U&z÷l9Z!|`-S>4 \NGxy Yee͎1qeJqx}1KZwD?&2Nky2Le ;sk@.< 7kyV63|Ww^wa!\xV'\n 2T'ZX-kKõnZ:kïZư2[:ly0Ⳝ8" ke-".#bo`I$CȖmǓ2.3P[` @&b;[X.JĖ1^WGB |-ctr%Sl;pV >pIĖ#bxE-|e-oM薬ndˤ[]| 21bLe< e9ny[23y-#f-Ow;g6#'F5yq e9[nydC<#lt˸s Xw2"+l-K-2[FnoD)o!2Q[-cX 2іJA[\m14&2'u-#=%/i+25[Al12&@@bKe\w-0hK-)hxiykdD[mi,I-l1Oe۹Ulъ( zl7;[,}c<9y;KD[2[F{[M:8L[^-s#X,ZƁȾ2Dx-pᵘ2.Le^}+!͗|l橀ªLebldKk2[dly`s7r^|fA^z-!]e;7؞x-X̤Ҡ:`K㓀-]+l3>W%[:DleCP]62nUpdYK-`Kۀ-.k<ҹs%2'\Z|ԝ`31̉b ^˘p-<5DM)ޒ`1XgceFWeT;mQ'VKX-&SjxX-e.ْªK2_ťn'$h4X-FLe5=;&I Zj-]EkyXRWEZ8P!e-㮙Nw3^Xo5+-]lieϙU;#{̖'r-\2nllռbNҥ̖gbIx7 +@^9iNv[^9/%S^YX"Sye ++l栝قrp)Gt+j*\(%+bmy iprbWj|A Rzer ^yK+Lz~8B^9 rWU^^ye'oyYy%+/ ZxCJ)+# Xn-J  WvPx[_hrx2+7~+e5W6+W^39KWV mW}e=Z0䬾BTP}e~+Z&+o}e_$Ukf`YbN]vhc+ĎbV):^1+`{ҤW_yE%b)&>".W/ RbGjdu4$X^ إWՐ^1i eI+cL5 X1镑6e+b<&ٓìʂTBLXrT"pg@_@hW|n +"+ï4_~eр_PKT~%+U>^@W_I[KWqWe2[H,^;6^a}"XwGܷ]b}E+<쿰+ 0]Y]qy&]MMvU+֬ɮl+,fXz3ٕ+0dW(T]iuea濗ӽVuE[MuE+!n-+7uEʌ]sÚ抉5W><4WEjd! !+ht\Yw,̕O5W4WhT\qbb̕qF0͕5E'm%/WFbPt+#*Eu [h|VQmeY^V>U[IYVFf|`+䋭[!؊c!:3`+ROaGVRX!{\as)+ʧ V Xyx^`E]`H&'bvBVSk `mVXX1-r PVdQөxUWW܎*cz5U[U+=_Ÿ«|5ҥ6y*uUl4W +OV_ M~>z#PBa(ŠyyV WFa*Uae@,` y+>c ,)V\PX1a幞VxUU.*TWWΫRd_EU_H*PૌU *_f*cK*"W?%*~F_eW!I_*zW*%}UDWWWpq,[{5nО Am{`ihlR_:Hlz*^[絴 EΥUHUpU_ʼ U]e5.3Z!cuUo%'J)l *_BCu*UJAL9hI'gThJXZ`S[EI[E^[ ɒJ &lA/9ThdFbřϋX8!BL\be"M#Vh-BE(;@^b%I5V?ZYceSͻƊ+:+-X^GbQ1V,XaYNcDC5VskH2V᮱BV\*"++z FEVAVڊ\_L SB>n2+Oa/}rrì =aV^" ʺ%Yfe^_f2+Dʬ-^f6N2+' ̊ :JH"BNj Lhh}ZVhh!}by VvZE#z#`NVZ񉠴OVMOheZЊB+[ XNJr+d )VV_JVZ2M_hIVheK5DB+,uVOY YA/ɼp8+(uVίKBY9w0 :+ipVXY1Uf1'=bʬX'rФ]fӸ̊VLo(*+SY1HYeeQVVw;ʊ7>78Yn/dEdJi +3DV6*EVzȃYvY9.|QJV"+mAVNڐd m Y0V& Y,+X$EcŸʋ8!=Ɗ dV})RIf%32+M;ì^&Rg8+EGqV^ G篳cYgu:+s=4YaRg /GZ/gV4V)VF\'JJ:s͙B3:+ XvUiEzeB+Z%ʳ87l?J EV$DVhPPd:Kz8(+UVw\RVxkUYѓSYYy8G;X ˬfe}M*mfeС쀿Re]I) hUV(bI^T*+V,0ZJ//T7ЊB+\Њ`4p-&RhDVZNh[c /KBB+JJ+ 1F*F!t2fҊJ+"bie翗@Eh&*B+B̡Z)"ѮRi,J+#hni]2P%ǡ+c9XV~Za*$b_hEM qVsgXh !J (WZүAcZұ(ˋ:*Z+2qZ+ Z+NAVB֊/ly؊mcBu5?J-4?p+p+{q+VX/B;gr+xij+ƌVVNSj+VVb ڊ&nEYn3sp+<ƚن܊܊!#뭸.sP\oz+>x+gimEMmE\mA[K[!lBsj+/% yC[g^#=h?Ole&:j@ZʙKs寂JUlŞ`+c?֊,֊hhњٓwXkeAVy k|VȠVZGi%sB+6ZɳQgRpD:+#/f2+ dV ТJs uVlp_gEgVLqDb[qDbIh`MlT!2] ,?RY/eŖ*+U`TVFPV}WY@ee} I}MwUXf *V,UQ,8EtgPeV`]O\Au!\eQEIP kUuNWŜ];eꪴW4}]N2*]uU*%HUsQʘI;B"<_!XZa+ڀU:RVKV^YɆ Uƌ(IhXCZer *_6VaT*l**{U(FVҨ8UV*^u*C DWW0tU]WHx\Ur|Ax+&UdU+UOyQ;ɫWӞJC*Otduu=K*/] AW9xgWI duUD] UQ]*'J]H]u'5UUUwWt'=7 )+нH\7*e*^Uf(|ΔQИ iU4UPd,r2sXXSX9[c6+O#f-/ ,DeeUe=Ȋ "+6J)r\AVLaY21+4ܩROEavsq +X?JA7 Ca* +4/b+FR%V<)ЦGat-Š#R*,Š ++$V\)˧XX{]b%]J8eXMMXC!V|kiiʇx3cw5VDe5V BX9ԥĊÁ+Hg+؇@aE kR=+ܟFV]dEV,B"+˖ Rd𕃬Υ +E2Lof06-b=̊dVF5D8 *+. eŚBY)re?eEzVeEh D J,mj,Li"+t*rHȞRj dE'dEնʊ>̊̊Ɯ̊s#$Bk2+EY1& bN'2+¬Qfŕ!'aVYw0+.Y9o><:K~@e(ʒ$B+@+,Y!∳e?gn8Ļ$)ݛ8+]rVΊL Y1(wwB[J+))LVZ9BVl$^2傘AZ1r~9ՑVv!ԊҲ'J|h+諭ځ`+Yp=V|d8[~*h֊ זB`VڵV%K;ٞ9X+V6j$Mj#FR+KZQRZ WiJ+fo)]tVAqZB!BLсVsX@+)bDÅV"#bs_h%JB+Z DZ1ShV|P+2*VSZEj4fxZMs#;*llNiŲ$KސVjLje/BtV'[j%K{M2#mQ t˄VkX+} ʖb+滊@`+-2sV,Q[؊M|VC[)P jj+,?3ԊR+{V^jNZyM;MvZiAMóeORZ+KlVrZ*Ij%V^TZy*B+N Z"B7QV]hB+,zZa! ꗴr+ X菴Z9h#@+Vg :Z4ZZ,1$V;VN_shQlhvB+šZ!!bOpR hEa hEAYhe\9VB+ Z)}dUx)-YGx:+cbbgŞ8+pZѫ y:+>YYcb)%(#gilbi rL3ʊ*+C+&gAY!EJS\X9XHb8\BK2K$V^[v]%bQ^;X'vIXSBB +~jlUWRB' L+޿XX!ݨƊj+Gf+u&EVd$ Mq%V/",z%+dUX! j{@X!fTaŖ +X+&XpnXX9Q" /a% +6ԖX(Tb<'+/b&J8*X-qq+dTXY(CX6+:+bf"hV (+D+#U"XBXPb%_X8CU'ߋW!ū|U[^W, '"rݩӑWO|mGWUcJBUSP om*#;3X["*6lCEV)Ё¢Mm ǥU*VYWV9$QR| Qfj*o_e@TYe-dU_+7U[W)*=[eMVq"#ꪶ8׼G*VVaބ])FBEFmkK!Jl*<^JNZq.7k @ 3a `P` p\ކ)\kU`uU,iU1Ṯ ٶuUhPWt]qUhYWJ1\X;H+qUUU]V}UhUQXEnYX6U#TV![Y%|RX*JdU,]U"*TUYUUT7UU|hh=VU!\RU5g*F`U{*P*P0 V6eU*.FVr(`U_W;g )8gDJWK`** ؂rm*itUX~e9eMd?+܄XU@\PDjTPY3lfkN*cSsU!zR_SEXS~њ*ęI*$'TN L*ITL#jjPQSG u5Un"n*ʌ*[*ʸT1.:jIjͭ2rS~ʗsSŵ M>M*6TT/B@M|)}TR:*4#TysLt /!NSR%%D*v-! ْTJTp[R"/B *X1UֹWS˶*v4U>0U }itRbZS`ն RM9|M*fjTq~b(jxi,jx- UUb UyoTJtIRRA#Gd*?ʳU K TS'˯'\Rf$ULwTn^2Isr*J-VReT+W/S*U)!BuY **PBU4fU;5UUY3PUyꪪbIh1%ʕ7bq4Jkj,*₪<,G=UGz|*ƔLTQN PUܣUY~2TϕV(T91*;zA{**~;ANoa " is*dK@Y'Ao@#PUtUpTBQ=tK9ZɩtPN O)\u݂݊[j .슻'\\8X9!QXHY며Sa='k* UHcjPŵmE@* - -0RT{TaZO =z*>T"J{ނ*BUF R-@#"Ѡʎ,*GTY.De~D+.rȹ ߊ C`I?/R]HUU2hQ%@D7FTaAHTq3AU*:. /ZPe bEJv@hWMV"VP[XeEhX`+U>tR* FXv]Uɪl|ڬ4%NͪʚUBVeBVG fͪ 3U~HWgef/Y eSYeϊeB6T*Εʹ#-UFkI*UV hMeAV9U0ʚUqeMUe[VYYUrU' ne*tVA}V*GU2iYݲUnXڒU(x*AzU.~"J߯*(,"r2|i[ŏWQ.WѴWDWq-U;*QU\\EQ\Zqq.q*֓«t풼ʞ=^ CrR}M*JW鍦Uv=*J*mU6X1a m`R!TVq-V>HX{)¶+D_ɑ3g1V^_Yc%+ XjUa titcd&Vn+֌CY/~+TImbd&VSsX#BXɯXXq2s~^ʊ(+TbF뉲„̊C(YlYJQo~謜ggPΊ8+:B;8+d_r/KX r}謰jΊ1ěY&Y]gtYIª Os{̓eE!=ʕ|Wf|MKuVFZƷB}$Ya/Y::+kRCYm\4V>VVv#=+G`%OۃBL*>Uj_eUػ}WylQ+QV> I&bů޸ jI5բbs{(JJoɀ2 /EV򬑕uAVwȊk&"+^RVI1+DPV(0~!+: +lE!$DEYQod_dÜd +4 (,dY6 +#jȊw#+4K+n2VW+kyN1V5V~"Zc>+Xui4\V_ͬQife?jXmzc#o}{]C?m?-G82Z{i9F=A%$ҙ# 򄂴| -@.(2 kPZmr\ #u2݄i9FʹM1-H +-ZJ˼,aOJic$m2?J1*?8JQoUe~ʹ#W;`,rWYMRP|㜝 7; #]?>II-(w{RR\TJj9FqC.gI-U?J-(]H#(eދqDJjs[jצ &"!RQ-Jo5]m7cyoM>Ev&?yFcT1r\U#2*wHQ-cm/I%a}K-c69#xK-(aʩ(ޘ*jKZ~?Jâco9'k*G f|a~cð̦_yUB2J*jܣٮ*6VIO5=T˼y-K5:P- RTXE̟ys70/xS-YRlU%T(P-HLQ-(wMFj? 'c}I-ǨH-5{5G( rB`.30_SH-(~-R1kçr=˞6}Kj:EZQ>6QRǔ2A,%0OI-ƌSR&b2+%1PEI-uW2̜)2cTUqA%6~ZZfk*evhJj9)e~VLZf/evJjg]ղoZ(BS"R1e2#bPxe@E:P-A[0,307"j#-GflavJQ)j9FhlZWlfZfgS %Qb kc ^W`-5 2C }$Z#/ؼc\Kŵ̶]\j!&Jl/,[^Oވ-'ՐM2Ukmy2sLZӊzGkyph-5خ[5KjےZc@,Z˫2o@!52OT]ay kZY z{y[-y(帖μ)72_~va-sԕ9١P)eEkk/\Zk ˍ2 /?Z'qpDky_^6MkcOkϨO5W̐ kÄ4Z)RMb[kJ wvq-qX9S^ė)M\BkZf/y,>̓~^cLZ:E;ωrb=TZ^!h-sDb ցFOZf)/,QV>զLj]e6SX-sy[-3Ti룋jyS-wP-9ȭPT 9rO2,Z~{42'Z!oR-5Tב#]JC/e6Z٩NV!V>iGZfT5lZJ -5Pno0;uW¡x(̲Wޯ b̓s寮j٧EY-z˓[ZmսedR-sQjyqB̏ /3c|ϜZ4:h-]kkܕk0kW编_bHSY5ӽ9zX˜TTa2(jHa-1Z|2lr}9R9sgO(X-3`P-sV}d3T˼RkfR 28;L+\PL˻R#,{5aZk>yZ,FҤ9LˌQ2[잿:*r"-y]HeޕU>U9Fբ'%'%i9F_[1H Dy(e>̯S8-2_SNˌp-[ TrZF4 4N]0=l]aZ核NTZ^0-sU'S]oܻQZ3_qiyAZf/d9DiykEiy-i R,ܚՉiX.{ g:-;irZfKQN˼t9TÞ©~!JK G]lFi&qEi=f$i3߯r*Xa <⸜0e61''w])-r%Ruu ++: -5M#VRTF1-l+ :e>IG2wR"J<)e: W$|e~VZ^(-G(r J|P})-`J|]flkyL˼_L+|ۉ2cGi&rex@-3PҢ`Y>wE."_Z^g1g{V ۀUr~Z^C@-'킍ej`NNI-Q]W 0TlMcjp.w,*eތpC?9=js}S-FT)eVE7WevKl )ֳV|lOeP+h@Q-uJʽxU k׹v4İB2pZ^QP-[j׆KB:P-W3'Ø-kve^;AtPTky'TkFeٴZ^P-\ﶜZ)P䖥p i e jO\%GA-\Y*uo>Mt1GA-š ?5WhfR@-sG=)^7R:78шme)gDԘ>Z V ;N˼+#1N1 Gi_zC-LjySNUzl9̌2Nm9Fe9lGbU{N]9-X7)Vc[rZףNi6NK/ϊ!r&\rZf˪B8-&Ri+{E|htNKrZfϘ_u%8'L1fPi7P`bZfLʥ"Xcy1-se;M1-=$i镬0-rVf 2'כiO1ŴVhpFK0Zz~2o7<&%c(gA-evH[: &F WtZp*V "C/'F˜EXz>,G6z9k^CAc-yWٲ hW. -Rui<>zaZf9*LKǗ`Z:~Rh*Or1-sΑbq$Tl;_ո]-3dc:- 2'yVm80LeU {{ i\BZf^AZj;Gj،JAZ3%MٺỶF0BZ*ߦ/i\WHLK !W֬XԚjy ŵ})e0W8g(uFt~i 8-8-_e@&0NK/fP*GL\DST~%Q 9T=^M)-g5(*(-5}+-eN@Yt'g^i9_RL˸?hOX%et2!F28yցbq{G242Z:1Zz ђ%(Jh1be^Z: 2PNZ*\QoS@ˌ~F)evih @kneqZ:hQ h9 _WeZ?t *@<5h 2J{jOhh9Bq6)>f|,GcYSR(WSYfqGlIN?>|>Oogցǃ(e;g.*Kt "%ҩ,~l9ʇ23xM)KY^,sVR6;2'Kv`Y3b[Y^,پG!Y: Ru hbY^#,e>Rre&T,eު52Ė#)bc, ,aWR=ur򚜄f$/OQ>Ff@ kFEfA`kHA2Kgf YmYIԇ,{#8ken8Y>r7hN)hyZ@*nʞ)+t)BX&eX-x=wXIZgeY2Yej8i,] 2(wyR=E8|$2G-O'S>ߣY:uFgt%:˼b2rYzI߅G,0Z?bIԞtXYD5G<"889pR%8ˌ1'm27jo}f}f,.h*Y4 _9f=ݯh9,g2#I0O=ȤYd dI,jh^fRhfyEfy=BԀ^S4zfyjfm;͙*YȕŏJ>YI͏R3Bj[Vj嶾ɢe>@i%̱Hܜ>jr$uTѝ: :BK t:B\HFnSG|B˘?bVųXbmmpdgA3\Kʨ$򡳼t::Kı萣t:AY8KWZb=CYa"YfftctB:6K-ՁTQm'T4˼H90er'ǟ .eU.*}*N9 }3TVɫr|*0[x:UDVyNRYS Vi4X%c lTXӰMKVX\XX9*MGVN*M0!h4"2:5U^J_h\ULVVN e9*d⵬xVY{evUdtd"~idJ-eUZV9} .]*6BּeV*DVOV&Z[2Jp N\UnrpG;*y* MUv #VF'KG nrVڔX.<+ H4“TauV|F+ &V( UXYbώb K X/?3ʰU.*׎4Bw*L/_%?_E~Wq$U`eR XYn'{V*kbe_;BWcQMtҷĊY+$VlĊ=6r@obED bae Kp"VΛ=1V2E + +k|bRldˆ, +M388&WAV>/dex!T ϖgd++AV%AVV*TVXkeQV' bRV>lέ%8Jq)+ ,4KA+@,-" +` X 2>iCC+D-Z9Q>ߩJ+㊃r%H+*Kn3U̖V(ZSZqLrө+Jc1H+%i%VVhE(Ί:+xn⬌*2+DdV|ʬȬx>QVȊJL+VV"ʽy$Š +-z*+Iha>x '9:-ĊM%6|VWm_ʘN`e7Sց~'+|U#Mv*̻y׀Wi^~^ʮ<^d$," |K^E0@_E\`gVf X!0TX9VVVXh`<6V b84lceNc2j@2nȴ-cCcy0V6\w+ 4ɞX"!bM +g 9tPV]琼MͫD b*7M^]*'&y*V}@@Wy *Z/*t%*E*fXX]ER]E M]B y<<WŀWi& >`U:#*XAXwĕ*>UUW}*>UWQkWYyJUxg*{g>}3VHl`ť-c M7ŸLJ_+5QxV 8VX鯇BfR +-P台@Ċa* XYn-++c8YQBodKX]Z<V*na1  !lUWB_倩W1_7YW1WC_e}2W!S}h\_eˬJ^eCۖW5ʫ8vW!y-M/^%>3!*TĴbQ**UxAh;MJ Jm[N* ȪdݳY$Y&}ͪlV SfU50 o**}aUlVŒHYKUi*b*'7>V8Ysê4@[VE V䂣q@Te{`DUEU;*;&k(i#*'Bn*AUTA@UHUPT;HTE TH7M1MMJDUƣTyp*D_TٹyUi%ʝlFUh@UskTELT U*L<7 A*8zU9?pU ]h @!88UAnWQV~*:*UxiK [V~>*s*hhX&* VbZi[VYeDӲUؓNYent _ZEmCťl*UDմUt=Wa/]% /]eE@WJ*gj\ER\J6l[]UVsV'$dDI)iuUAi HN[ Y./[mćVSv\[)mlV>/ZEZ!@txMZEIZIZe*HSZE[ZŖU*vU*J*'΢ íqU,W7WW\%e/m$gisB Vѩ(Ze;U6@jiUJ4m(*UqhZEi_F=UV̥U֍[E[يJK:**ĩ2\5UNjWDi[eAFVm[QwʹdP"RB[bNi߶q4mcV!׾mvh[EK[e5杻r\q=k *f*#PGp*FU5U , tBWyMY-doU\oBX rtۮNGmUV0Fm* mV12 y*UUԵUH'n[M[ES[O[UmUVkj[*G_? b%bpRZ(}'H`RIJ*bTZiL*UQV!UXvUVq۴J6soZtU"Uؿq1US8= <{N FkB1h*זHJOU(WŔWq6%+&O}*=gX!Wl`\`VVI+z eTp3 XaB7%:}*_ۆ;{̖7Ĭ7>+ #rɃXaN`j`4!zV;Xb6BE*H=C$Hse'V``oVI`e?Vx~r N +)ū4;b%*~6i_唫WuQU܌}W\7*V⫄k^tg;0PWb0uwSW1N]KWA]u*᪫Wv+6%JBlr^!BJ"C 6߰ *ڶAсN:KVq?mmK탶*;e۶}b ;6J*}bDr)`T0% J.?) *NJ*ӣ@D\Q|UtT6 UHפS4UXjS4M4ULTqOWMf*T+IiCTq;4Eq,bEWEw[WTT)lQNQUJ̔TTaI{ 2W2t? UTMAlSqk0UdA U,T Jђ*VQhŻAhe< rZY/]hZd+O^gCZidis)-D*,;iYȌVHjhhE@hE_he|ZY ZV|2 l'F -B+( V o#kM$2@I+EAZyQuX / 0*VsJ Ҋ*"䨴Б*eWZPJie B* 2#uhZQlhQʨh共NhEXhmZ! blGh%Z>p(@+lULtVꬰYfe_h2+,ʬAZoK+dJ+n !rᬜ0 8+>B+ hewșP+ojP+΀^ ⦳X+ZYlUB獭䂷hX+Z9Zq#VR?UVMjV-FZjiPpK+#o~!J V&ʟ8*V35%Z+DZ+7bԊR+R+c:R{YEgE CgܸvVZ?):+,`tXΊ8+Ya_vVjUV^JmIkC+OgegM!YGkgs/gr+-0kgIfnYCgz I0y9+ @+m (H)H++*yvlifFZZ9USVnH+DVišoK+q@h VFcȔVܙ+ZT\p@+,_ 8.b1Њ)B+  VZ1ZhV\h峤lh,&%-DāYYSY Y1felΐffV bA ͬtCYyNQ0+ì(JE8+ΊY\fͻY"9~3+9fLב8+mH,'Sn#LVXmi%2 \V65&Mr@P BZGK+RgH+eJ+ I XB9!S0%P+JP%E$$2O5P+7+(VZ!ˣ* , oK+-8dhiEiłWZZa稖VvU>9H+b MBJR,6$>)Z!֊cZ+ OVNVEkJXҘΊ-@J9+>tVB1+*FF̊F-Ί3\{ tV0Z"jEsBj>Hks0jlsc+LqVѺu垎2ZE> melI% )iBAc+2~`+V(hlŢf"[!ٿxa++wʅBr [aVȭi{+@p+x {4lxRhSoeTUox+c/3Z୴Poj^ފfފVX8no@^ୌ& ɩ 4<bV^K)򰇤 ʨS9WmZ+޾P [Y~؊I%P6`+D^JƭZ+VfaSIX+im-.ZnZZjԊR+Lxj\qTROkh}ky(VZ}@[YMp+\wyi+O3Ԋ0`+js ʨJӚV';bOH+*"'޾B+*>B+ȁV_^h>9B+P5Jh|wSgJfvpVXamg@W;+΁Ң pyC+@ 8+RVɏ124Jb謨Vf"7^B+2Њ;# Њ!V&r)$P_plYH%lk\M[Cl>`Vn*#V3FPlp kř֊V֊OhlET׉\VB: rV[Fc+l}r 0[Zaa' 8 29P+ y*M/}Q+ld V0VS~rV_VX[iBlbVN- *eE85VV_lMb+SV$VX7[RR֍V\=ileV|[qumb==vkmŮp?FzVB7"=%BNLS+K /VAZpUZRZu ;5\Kh JV[Z> 2AaPVlʊMDeʮB0^PY1>2H,QV(SYFY!Pd>IdeO@VdEdELTd%K+-Vj i{m(j炱&Vbey72XUceCXY(XXa_6V(&Xa- aCHa _ tV5CHt+J + Lu+ `eMG_elWiX_-'W9rV5X9I+l vb=,}+ Bb+l*(U\+Bl*g<[W@\^dMYUNegec**C*.(PU,Ul}*kUf]qbUpQ*r(Z5UfU "tU\.mW%WܩvUtUޮ J\,r&IYGY%:aUT`UȪnV߈Bz*O?*?U@SbXZEh NZEIZDQUU'O|m7`W^\-ttUb\,;FvX<&*zr@mU"5ªv*Jɫ}&*JU5*ȫXѼʮ^m=W9|^u~WxOUN^B *䎴B)J/*׍j"B=M*7*nrx"TDG`eL,c\y `J+B嫐b ;bWWqx|u}}|}V2oaOBXv6 "`{n &J_MV(Xq\ 8_jt}rSr\9|wWk_fWۂ8WY |BǾp΋Ff*?qg**n,b7+bٗuո * R`<7}>J+*,*bJ4*m`Ѹ m6f**ZC*آJ*Yo]NVx*ϕY]łluUjUXn\e=S\+r(a(.g\[ŧ*Vk -[9Vabmd:hzU4dUiYݍU6v>oYe}UِU@dq]g<ઌ:K2᪰7K*VV袬Ⱥ󂪢5%$-J{*l|ҬVeF]П˪Hl* UUԩժJ3*mU]A[U9֔U7TUE%TUpv*ǁ?Ue 8+QU,VUaWVU.Ԫ /Ueʢג0AN ( U!QvkTIS6U44Un0UT5UDaTYuWR9e4V 6J7° VMdUayZVŵfUdLV.ŪX*07BU"sUeUvAoUecfYJO*EUU2_ʊbXHUhg\4#TƣAU60**NU䏕U؈eQ]R9w"#׮_XŲ~eQejURheXB0DV9 _UU`([jY ʒ쭖Uʶey0eRsZV9MUiYFQVQjSVI$J+*d6",N ߲  -ܟTUJy*-V ܈*-DVpd̅@VZYZV!e5U|(*h8cRY=ZVy.@d75@Vh%r&+KV!*^ndϒ ޶]XEVq@}w(V骰H*nlb W|vU\U1߰  e] \kha Ǿa|Wa5ac*栲/Y%J*gVٶh*qUi-M]lUN**U ]ŝ+U>;XIxVGU}C*#~͑*NUv4BN*#׾J,*NU J W9;+z +!XU>wzy6{*H*ۚ^e,9«Q_k[S~|e<[:JHk_~۫h16X/eKC 2-* eHfYL޴YF,Q,cӠYO E3&2* e2֗B0f18YF(t R@"4˺zeXDe,cu&tB`G岌 岌 RqMeaZe` RzGXr !.X{Ɋ˲TjO,cIk f_g2~BbYƺUՏJe?!F,|~,G&,8{a'"XYbhNo@W?eId=enYYȵ2V72G}R@ع~,2w$,/Y2#dֱVN[tx,{ UeXMXF/},㣢LeJ!dԥDq9u׈"Y,r@e-2.Dz\@ˑ'c=T1u +]'r/p,7ͱX"XFt"K8qcX8q$'3h$Xv±~&(XƮ 8(~cX|@]zD(yP,Tih/YJb=t`H, Q X_e9<+`Xƨ$>N$]hHٍ2#XFKTH,B!v$ %jXr$\X'*0Lw29Y/o22\Jfs(Yf,YFR:HY}':)MDgKgM. :\^_:xtCGtq2Y 2`'Yog+)eL>!\f³^ >ˈ[og|Rxd\Jg!Gt/2'J,{x1X<8qj³X&xj<ZFO,7,=gegg0?|u0=ⳌmN`1fS@ˬo24h#.iZIs"̱X>1?ܿ -~ZFz~fh"Z_h=C̦"Z:2n-d_DQi!Z:SeN-#eCEв'M -)=Wqy"C- W!ZFgr~)-|BҲ~p@aZP*`ZFqVRZ:2|T)-#\[)eh AZi̼M!- >H˺Bv{*fL]!Y iwY=d@ZF7#Hh'2Z2 Wq$"HhH 2zX)QZՌe-Oao쥔YToe<@XJiO@-_[FPKGZF?UⴌB8N˜ׁ-NG1+e>ybZF7e4iW@-nbozilEj6RhQZzbe @-K2>le9Zor%Qe֤EeiF9{Z: ԲWPˈaY j~e|#JI-FZF[P˸5j PKyZfm`}@- j}` ˲fPxԜeRy6StE qZzTeP,+CHYV"-cfKe9mq2"h2Fϗ21X2zE^RJ2-d면v299QZƴsRZƢ_.\qJsiU Ӳ 5E=K-+;/a?Mh@ WȖE ZF:3B-#:)D-ȵ b\SŅsV*-svVUJoeo68WiY# 2~h -@Z8%Hk"- iY)LY2-=i1 EetDWxbZ\1ibZ* "nL0-ˑHLXIgg_´ɥeiil0- 2z/(-EbRe\(-dZL}iYAYMc2-|5w N(*N˜i1!2i7i+LdZi%N2d?ʹB2EuZƕYIe.*uiZf*sŕLZ\jܴP-=jT*2~y}TiZ^-0Vղ<6X-VhBV6k3Ji-[Cb9bNZc-Dk1ZeX ZQkәLe·W ZXk1(.2n Z_"XX@˯ 2:ֲ.Wr` RXX-[h~%TwVCZ#[&V`-YI-ժ`-#Le2C2O*M'ZxֆbDew=.Zg\=%!+kj*kvZk9ҶAZ#,NWZ µv)ڡ^<u`OA{ZioZFƁI@ eϓ#ŵp-#[-SՁ#Medqy㪩ZᵌyC%EU{PpᵌPãepd[ާ-Kztѿ2*bGeY lqЉb^e 4lx-} k[; 90̑UnaZfjؗYR#ZdnQU*,;EKZfYX21^kC-8Kq-cLR\EԄe -ⵌ1嵘b61^ Yjr-˵hdp-õpd5Qn$\KGZ5pr-3 1GRWk1 ҃M_'^˜aHbZt eL{s'TIeIbõΦ)C!2ZZ5UY5h-6Z EcY 5G%~(7Z蟂Dk7j>μki-=CkYe:"b\xlE2,Bq%^z-c{UIu+=Z3D+O=WkY ^K!p-F2/KH}Rh-t\#5RGkq8ZkCe\k*H*FZC>Dh2W@UW>4?*Xˈ2>ZzWSZLk8LGbZP9?k4BzZ)kpW^:p˿Lkv X졺b- + X4"BK+Z^9@^釔 9+ySu% y+<_! bILѬ銼U+/z +sM\Z@+/(Cwn+F^IWxemìTT`v7u A% ܕpW ~p㮌,5< 2N4"ϭ+{v승ͮ(T;ٕK{'J?eWOn+3}5J+sZ ɔ+:+ +זˀri`D]YݳXv,ٕ7&^܋]m`,uuPW,G]*QWn+-"5|Fu~A]Vʑ x++'[ +%1W=+뇀 tɕ]B$z+v+c:}Ä́\b]ʶ @H\12é|rŕ\10rW,;\}{+V[qVEX4+P. 2V-[Ѽ 2h+KS[Y;1"Q'H+̷ 2FzAVVLnnA;J蟗砶rDʽLmYV}Z[!2u Xw "Z+RZ+z#x]Q\bsb`tI,ՁP+P+?{VZTqV[i[lι/7b+Ǔu[!ojtOk^k#$V*yY+ʹFAZZr8+IYTPYqYga2G5,BQ~O@gyS⬌1Xvq&&ʽA@0 ( S52+aVF#iD2+ɃYw'ʒ(YydpVaV}GfIY13{f`Ffb{3+!@`VnkfEg/Ί+1C?_lC+V%VZ "VV9o [hEVhEGiEVZZy V(WjiG+4J+N[ZCZ+@Z(4H+H+*Ԉ#[ZYЀV++ie=xZIԊPcLa$N J%ǢB@k j&x Q<^V,}Z?l êԊaAHjg MojETjEUjx5,jE?jL?2zsVAhkpLkŰIFk`ke>Z19E`l.N7Z+Z[bQlpVZ[Z[Y6ine[ (܊zV#?op+ :rMVfq@[1­Ҙފ'x+z+B8Yq2YK*[!YoF\oŢ_.Qmo%V ineT5Ij+ȭh"ȭ̱Ok+]D[kѕ#@[f*VEm;懣< 5rP<)bֱ YWK 1,+V`D\y + +c9Wq \y@BtJ +n QZ\yH}D\WHE\QWv ⊉ˈ+Z-P%b\W:TpA\p張SX 2:W^ * [vĕ)JW*4[ nEZnebu"Vvp+k-42Y ( 4܊ep+V<;GGڠ˃2Vxooev2߃bF[+*\kpʺ!X)+.6J:]1W\z[̕Yc]gse\!.ɚ+At\i8As6t ҤDWryt+ǚlؕ]}Έ0j+cB7Hn*[;l+-T7bO%BxCze3dJ^^y )+ W^P^ 蕙2R?v/9ȵgg~ B :B@olk1 FUi""#thz%z+pW^a#+)Lٵb,򊫑WsS ⁆#\FT$:MLBPۉ 3+F^7]+/CNf,W. vEOWGKX~ғIP j%=1*ۅND􏲴hi% JI+cۣ}`VhK+XZP+\RTe$3$gVl1Zqc,jőP+L`B0O4F;V4 Vj%sS+w/X+4 x[[C+^:@+V<Z5BZuggeul?geiT8+xvVxYrOcY^60+aޔ̊jX9u?X9X!W a1VpT+ŗƊ6| B("V\b:b1M%+L,P+&xMx%+6r>"V(b.X@Ċ/5Ozº|v2.8Uddad4:AV(s `Y{!d xFV4BXq"oj #HXI ea1 +wBSXX7jkd;gTJEKĥlފcՑ6V뷱bc+6VUrmT"A+ `/YqPdYȊ0+aVؐJD(6t?¬0P5b f%d3+^YqƟXi0+@Yz*ͬd fF&aVQAkiѭ%f%B xMN)+XYeEO+Fb;.@ʈ!2zW| Ċ +[V|&b=C8+3!g%;-HkC+̗-+EҊ…VniVRchiJ6ւZIzױ@y b_GJ2V&.FJ }`@9@c/M1 2֕ Z 8 X䶴21VYʾ@Z  22tVHP+rU3H1`rVjx( 9DvV,gg%g&]ge!}ghBT7wvVR`g䬐M dPC+ he=ZB,V\jh: t?`4 pKVAQ,VF8m 4e28ЊH+MI+V֝V*ABKZ)Ur3(JVjH`+.|a+^*3bV\[el_3؊w^bcDJ6 B8؊3j𰵶nVl$ T/[q:JWQ$tCୄio'S6ئHWx \Y..gkr+x V&J;VRdnCp+TG Nٜ[\JV*A"V\ZRI"-[@[q፵Dxck+,E[1S(nkV\ n%J7lne;(m?V|nEVHȠ86gm; Lone<Y㷷V[+X+flBaA{Z["k‘}@ ;V.ݠVLZy7LoLj~Ԋ+aV8jeYBLxSOS+VEZqVFT/6J?p?VƃV le[i`+;cUc+8H`+ 2-}@rJHVز3~p V"@}YZK=5J V GD3lVI=7Md i%K+Ҋ"$0VZ0i,VLZ99x`i>K+/!iAH+"hԵLJq+V0ʋ'Rյ}$Tb`r ~ÛZIᘩoڌ`FJP+IZC܇Q+7 嗋Z:cj%cimDoC[I`#܊Mq+㶗r>oE od[1X$oś[vdX''ioeY*zGVRjoťx+5J\; A+\r+KnH Xm"o5s+^V\ onŕuV&sp+!/܊i+m%5k+462*|#n V对,VN5V#Vu[)TG?[aSx+Zr+fV'a x+k䭌ܖW|pJ{:"pE{N[Idoq"qϐ+)r+91W + t%| ]U8]1^`teÜ]E+0(4Bѕ1FW tl̕Tb. ;cT +;C/rŬoȕ& +@XR; =ɕ+M2.4+!# ŽW(_5B'+k L4ĕ*\O&WmM@\H%r+ꃯH^q W<28J*捯^1j{ qi!\TcuA?WbU۠2GJS⨌綵y E3AW2oiCw4։@`"X62>rE`qejOD*s|]~u"(,`^W~EiWF'ʪ_}!|eɀn7 W,$Zi|e^WV~%s|e[uW D^yVpC06x)zpW.++Bnq r쵾@_a 5 ;Z_L@KW#=X" 4Wµ^[|n+ŮGvEQWԕJTmmeVk#AWJ1'BW*Q0W.Е *bO AWl]]yyd]9@jx)9ʋ $+1Pht%\beѕ`F+6W՞J66~$詘+kW~ftѝѕk2+ w;ܕtvWL]fvW]*AEU]!NvPW]TavŹɓB_byŁ+O$RC7v%+l46+aW*Z)dtJee+ʶH]3aFIG]"Օ[JQW_IA]C ue}N+x]kt.{;o2iseI5ϯOa?]Hn+g]5]q`tpt%; ]'hNG̕1Δ2, 46WE1W [\!wJmu+1J*x+VW\Ez+6RW<{ʸ5PWMI+,[]t +{Y]t] !tŻ\jftZ +/++I\q^Pt\[M1W\I=J!\5⍆&r&ȕqtV~dRx7 +5&W@HM\IR'J"s.oM\ +k'®xXv%ؕ]YpnĮd{s+ގ 1ؕb ߊ8i@SaiteĢE42š>tC+Ά]!s-!bxK5̕ \v1WCqe\: JF <W%J)s%O Z!W"@ß+!We\(reOȕR CZ'"W\q;J6N7]4"hpE_+cV(3v[z[ xps+%sn9'`[Aw7ͭxVs+uV۟]'oKeSbKeKVW${+~V|LCCPkmr!j(ir% Wr% ȕBkqЇŕJ$W\/ u,W6X2ĕ_<8+z qZ\q.WB"xJq=-̺u SWmr6Fڥ<+%B-ĕW|!W 2bBM W+Khî4uue[4S@WI]^VWtD+f!WrDwɕwvfY W ؘ\1BD\01"WHL1M&rj+*!WRb{2W\Ec5 $_ʮnrx&W\&s%+++1BR芷!2+1N]fW [vJ]a3+xb8I"ؕLŮb)wJvW\jw+D]q?fw%|e_`.ݕ*$e3'OI+vWR+,ye<4ʾsWƠஜYThoL+%IbmW ]weͷ11b)x0bx|H+プ/wۛ]a#+]a}/<+_P˶®8,'fWO6̮ٕBg} ,+c:`t]ATrl4}Wє>v*te6 t]&t%xЕ;f’HЕq<0Ҡ+,\qk̕BJn3+O\+W($H>]q\aWٕH@fW c]]U=]!S5ʍhwFJaW,\~W\tJVl⭸ +VBNr]V[o%V<Xo%qy+ާ8YW+7##V%[Y5{fV"Y[=r3\bF ;|q+QVp[kjp+ė8&2r6TonEVi?Q+YVFR~~V^@ ԊCDMxLbj 9V|BmLjŅV Z[N8+eg1qV'GX5@+Tv7i}#rVYaNC+cb}he uVX6%C+!Z7šZ9/.1WZVXq[qSSB2Mli+û8n۠D̓V&mŃk+렭8) VڵDVbMY[im`{ h+*Njm`+Vj7F؊ȍNck#zRZvķgVEG/V?Z֊#VnV>mGuO>MmlBSc+#E ;[[Ih fc+BCl[J}V K[9[!C؊ˆ<(&[>^ :V"SIc+^ ble}-hO:VFCo#kevDV¦O¸#֊3r|S+&jV*sUV1P+&[Jt v22Edjez;R Je0눜egB;+UwYq ;ZˁVL YنV^ZY_V۟:ǂ -Zq:tpV9Z KaX+Za1JzX+NB≭_m`nkwVFa#/AdK7k+;8h+=Ǎxlc+ZMmkœX+##^m1CJf2ZY+a‰Y+[?l?V}&k墘֊mx#l[+ZJRJ.֊Bm+y,e jke `Y+(Z_Pk\mVŶDX+.S?JnhY+Z(겵fkE_T[ V?XZqM[K+£%Ҋ+-lyV~"WEBZS H+l>R_ ̖V̊Z!/ZhЊiTZy^Њ@+/VZ)-QYyؘge\:=A+thV%HZȌ.ie_FҊ#P%ie+V\202V&RCN--Aڠ/$5 䒥[Z232IVGZ:J'H+{ C8ljVF!&eUCV-Ԋ5JI+}=9VmjiP+^q r?-vFZڣ$V*ZA7^r'hJЊ㝆V"(Ί=܇U9+Ll)rVƻaY䬸X roolheciQA $p#fT?VhF8+!g qVY1+ۥfe0e5ͬU'LY YA0Sh@+VF^9GZ b6C+qZ7Z1R{g83 k#V-S*-88XZBZ퀴U+V촴1lZ؁V6A+#h%=VxZ@+m ZIBCA+M qC+œR) <ҊOF,x2fi0!"BSHZ H+H+liP BZq昡TZ1bЊg_@+r"mrb!xc#K+$DZ91V[?SJPFT㬤Ί+Rg%Ԅ[hg8+ 8E^NfVr֛Y  BL:̬x 2"bV11+cpV<2Bl}rq]U-8Y u ATYY 0+apVlYዳ.9+ Qͬy̙Ya0+?1+ `VV\4 8bfVWzbYa)fbVvfVnaV$s3+#fe 0IXeIo@Yq# e%* ʊcofe%h.ʊQV([@Y1vqv#O<% wh~;+Yq!h%ǙxZid<ʊVV7"FRVV✠̳1 "e 6(+iYYǯܨEYjH3+DY!YX1+ޫ(ʥ-'g HY qV0K[ hıiiewVj- &^)EZqnR6n bj%2Wa1 @+7mlgI;+ YdgňFK+hH@+. bjRs-lLodh.8+~Dxfd5̊Ⱅ(0+Ӊ#ځZN~G*h Y V[+nV5m%#(qe6FUr^VQ n%1Xo5q+Zm2Fmi>8V[oԐNob k p+Ž9)[WV3!ջZ:u[+vmxk|E֊0} ZagBbje @ xg'g"3+l`V(< b+̊۴Gtͬ<7엮AKge>58+>Ί qV؆ʳZDʰJ*P+~ԊvBLb϶V<)ҦVBԊ)P+yZ9YD nZ;TMeNFJQ0+.ňb% goh)ʶЊ׿_[Փ煮^~/?~Wylk'娪{RZ f)-()-u'vjva*NdӑRZfv)lRZaHKAkrحmc? BKu̖$U*Pc_1H8m}hΝZ\gn:Zqmz2Tol:CdK-;ŵc h,V*S7U(- T,c=ZJc)RSgGN,u.-tW}HZ+öyHB}?xAQXy3Zg9F Yj>Y>W7 @81*v)$>F>,ǨL,՝Ct^Zg|JR:K][5YLMLuz"k6όE}~MY:K}a8msTy`N,<?Yꡠ{v7B,§qjRgG Y*:8K=.u4RoY^)5TzI?qUn#6־+7zYw=Z]:K[nimR&}JQMR8KbRi j++ב:K]kYu:7\gzMUK=ni,lgggͪwUXgY'}0gIbSgVYjKϮ:Qz76gAlY3>14sYs>K}0?L8L,tjܞg,ϒ@[KK/W_z={~,[h_HVYY&eW% Q< -̯?Z0vEód%e&TKZg_ Y|TG"tl81faRCkPq7NP,X nfؔk,>Yk9l h7RKMT QQ,խ=RL,]YɁh:4K C)泺1fMY9ķR"q?'Y4 iz(ΟȍYaq"_nԢY9 Nh⽷^u_}hjI}vDTc9jd",5|}7E,ne$Kf>z(rUb9!Nת#o큿iROXDT'&iVLa2mԃw#6jdLY) kYYx0-H&elb냟X,'mi,:t}YzjsbV gϩԩ g9} qĈU)ft38KW:puX61~ʥ}>2ŒdLl Rv,nu5R?YpۑXy-Cg0t݋H,x4rK:K}?5(1Riq 8ie>]Lx,$dkJf%we 4K췙fǢY",L]d4^XiiqK4CfQRǢY؆hw6͒I,-RJE[QY*sKY4ui,F}[f'"X;;,t fn,uoHhڴvYj&ԣ]):69MLs;,,H4K=%fa1-UX_+5T8Kuc"]gX]f7YtIpWN;o%,u-;*:ER}1!ͳLD,ճ4RQ'yiz*V/RFK,uFROkYQR:KI@ʭH~q]JgY tc jևK!Kc,5oߏu^fi :rTþvZgBYKie C2Yj ՠuK+'ӻΎ2Bt Ih]vD-CUnכa&ZjmRD ˞-Te,w7vӾ hg!~eF[v-DP4R9 4of\nsELSV)-5:Et0+A+-RSHJK䢔2uRZ,PJKE?]RmՑmw>6Nr3-ǘ^3ӒКddRo8MJK} J)4 "+3i9~ѧR=9#-#T\a;$e ?HhNR-}]*7ZE -X,O-[w B6jUOe"QJ_鋝|Yha6 -fjď"Yp-uCfj:isIM-c8Qi43nԖ6:}-~˞ՍBK=e]4R7t?/DL}-sIc^WGZ5/ -GZhL-j^ޫHl9Qn(,u"z!eL YG|hYj)%}Cs;~D#|i!Dj%h3-5f`'OY|^5I,e ggY*Kl6iutc[g3!Xu_2Ig%y݈g/,by~ׁY:kez Jg 9U/>#;,{(e 鼥:KOĴҷFY2oRq8gj$G62RnYzO癧|)+j]YztY=fZի#kYw7iGų$Ҁm5YjnwyMgN>K54űX3g;Y+KhN@K%lf-3|g[Wgq5,u1u}LY:v(ޗ^ruR&t8>p<8gKqlrxդ3楳"u4ϒ2wf)U2K(Y^t>m5Β98g,I&f6K}6,uϭR{mpW:5Y;^1l4hVnG^rwv RxͳTVeUxyhWZheBų}ի;DgtYͳwi~8Y2R9sʉxNUknEYg饻>ͳTVe}*ϒ .W-rRLj\m wef$RhL3YI[i 4͒Y/6K̅T#eN-mfjݜmP1RY3Β|It[:jEpi.qΒp)<jM:K=W6o%tz ˥HR5Yǽt::K&, TT۷t釶3VMt<ghFi0gƬLWA:KZgQ0L,Տ7Rm_&N,ųTBh,Y" <dQijTLMLH,%$( R_XG,YWgW?y[_K% HYi_ߦy[IUԲtYEy9i MgY@k Hi:kYb/nY:'rYx{ Tw^,YJgɢ>tU˥N֍rei+e$ed~A,zj,Yfɢ6Kזxq)ed$BM8XntB92K=QtaZfo, G2K}蝖YIdwYR6 $ΰ YIh$fI"&4KE/U}h,B:9,]X꩎zfyM,44\R>-mezHfrE v ̦YjXY(`:KfI2KΑY70K$,2KMD7̒`*?Y*ػ0KBI`DKfߓBOeF$Y$RSŲ&K-1Yɒ(:&K}&m&KM.r&K}ie 0d Ptr> K:pPGţ]KX@kv}NQk B}|U K2^̖ 5^FYDBL*,SD&Kjȭu@\2YR(RžBY+8ZefYiXò#CUVE,}4˒d"\~QbR,ʨnhrzfQe(w]:99cNY`}𫅓խL>,N*K .ЪɁ<eI (KPTU3z(Ky:NQ: ,]#J Ie=RYL,5,Չ2 Z!%%. eeۭ*wbX;mY(pT"LX2=IJLAK]oEeT$s`{yUEt\ݭ{:8-g>\{EA= /,:sS;6Ŵ$Lh,+cQ},S-NcKR'Kf$t#9V$LqN"pH:4KꂡYBTctDT<5I I"2Kr(C,SNr "h:,9hCtN^U/yU_<^ c%U1V^*2V d!&@l$AcŁ+wXI Vʋ2Ӊ++ζXX!a1+(oh}gU432ʃ4yQ*)2RX +jaeQg"8$J.All2@b/ {Xcb%"+7'c%9Y +ɽYq'J mc3+YdTa4Xq1x^+zx6RrSȊΔ1_07YXq-[XaaE+9wVX =^E+⫘mi^euU6n |qE$UW7Wq)̫bZīti'ʨ üJfͫ,&«U4 ^%*/ ʍݺ!!*.BWI9JHWqy8*-y! Wo^f^|<*#^4+* ^\(, *$"W^%*!0"UXE32.׈Wqy$*WI=9?WyU W)[P\`s#^%*cRC-ɾ *;U* _*㫐l_yB*XJͫ3*U<܈BfLW35r<>*.Bx)2M*.rIW983*?yksxgͫ/ yUjZWYcM``yX:udu{ *Db]/xkr)fe^ЌxĊ^e1{!^e;vWqBe.8^ߘXX7MZXL1Vu6V&< @VCBV0Ycde&:BVUr˄YaRlf;3+Y̊fV̅YcYqIo&2RVdgeEC+cl\6V\ ndt++ EYY&$`eS)+aDY`eQ=++eGY 3+d&febV2vY(lFO)bVEYj{3+OAS8+-EEJǯVm8b`&hE-h%ʫgC+VZ9NnN2<&i~@Ҋ+ЍZSJLihe2VпqV2|bV!g%;+DdΊ@+ŪZ']%b@ʸPL#@V&d_ J#+ ,`BV@AV4Y!+^42YaQ*+alL6V =a\DT`)6V2XI_&+eXzJVXW m@*Wv r܈WI7k^4*WYm,^kUͫWNz*v'̫2>Ex,FXY"l`$+Q X- DXq`e zHi`^*#m[H |̫ū5\xϦ̫lcU̦YW^RG UNWT W9_zApU2tWq&ip!2Jp{DW*cFoⵛ*LU\o^e'(e^ŋU6V|(;Jd|D[UҸ8g `㫌W_5WY 'JZx=İU2OW!0 YUm[/[ElֱUc8s4ʊk]%UN(ݘUDW8ں-tYryMQWj^JWae^pU)*޾UW$]e̬9"]śvW@ ]%*.qt7rXm\C#pTg**AeD3rn@.ҏ0*/2Oi]eߑ $ ]śXWIt*O{*.EWf^V>%Nx.x_ *UR]R:x*ͫX華T{| ]VRn`=  +&`E+t89hq/X٩X5zja%+IXIr6Vb Yq^FVLxY|#d $a%+DXYo^'J4kUCxT2n^%9*㾔^ͫ\J-쫬6*~UH1\*vU^ڌx%WLd^v~xZ<-FWBWquc*t ]D[@/r4VtҬJͪxhVE3*JxTq&(#RUpD6CUT>.ݨo)Pу **R8с*,Tq@R*PeF$x8*ϑMKT`*)*SUF"PeoPR@ >U5&"OJ=&,ߖ=va gbU[1Ċ6ܿAŠ"haeR1VVX +lVbŘtMH "TXXAUDXIWmašiWcaej+TXXфNV\Vaہ;Š,xFX^< LpUtdc%6+++i_+炇ccvŚ7N YaWW#+6XYdŻvYL3(+x iAeTݐ)+@YĬ{7rzW `pVr>¬<`-c2(+\CЕBսJC*B‡җ3 +M"x#0-3;_BXqCi`%+W!l+J"V[X(rbHXg-KXOVƋ2s=VaV~fT?Xѳ{Vbw>muSXa_EEYU*_x|WTjdV&A]+r2 IVa"zi^ūXȆW1Sd^ĶW&WWq*Ux+WŜ42-eHXq,Dzӭ{Xq!Ċs!VI䎈Z]â+1V&iDG!Xqj2V0VIaLWXƊl8cV鸀 lXXg:+"?DQ2BocOv2V:Ɗ -0V)Jjo1VJDzQVJ#eŽq(+GYnI郬x/j++$ԪO xbe%'YKzQV#!bbV:wf% TĬ-L3+s) HfVJ VdeԜ1+Y2ĬPyhfe]9_bVc0J++AYqMT3+"f~|+@fVBYA3ꛕodef}ІuH@VWڃd3m1V,u#+αY!de2t@VXC$`FXXX$ma%Š3M;MĊ/ Ċ6DXϟ~ZXّVRۀ(V\#a%VX4&Vkb%3+-ebFM+Xqh- &VRebIDbk;+ĊL+LB;F1"beUX? ;M %6VU`x`d| $/FV xJv1&4VVFcFa/߰I@Yy-wHY nL^~@8raeэ(+7ʊ„QVTPVY u<6r?~dYA2|mdEe[Xry *^5=UIAdPWAWݼ sU(WI q*:*2*??EXt;UU*%VaWU*b*"|&[*}\ڶJc*[eWN Wqpt,*v2~)eu('*@x"-ZePblcMUP:*L\'p\zp/UlO ]\=YL&lԎTVɄYVɯVl[IV[[ʢ 'YŒUYDJ*񎬲 "ϲUFHV ą2Nh 2΅TiVZiV&ޢU5xHAVGI#*1 \,X[>DV9EV!^YY+UjEXEaAZVO!*I/Jq}@ nUxVUVqYtYزXn"Yet" 2 U;eσUM؊h6 I @YZ}LT2*r eSK*Z436r_HUO'P ilUWxۘWW^mW&l^1Jūh3*3]%u?x;*bj(ʊN*NU ԰EӸ YvUF߸~*\e W%l*. GJz3*iLU _%\%q?*ߍPEc\Ł@*JuA/>rW,^2Yī7Tx Oj*.^52YU*ATyM3 Vآ0ĊĊ7 uX/Xς51X[TG7&+.GraKЉmXYfVyìDY->3+NYq֗fVnFbVHFNqVH.Eh*Ut$-vBy?XV?V&$`ie| I+#0ZI j%iS+!|V^329V Z )$k%kvVl)֊C`+!5V=P;0VƟT$m}KJ_LaBsVf}V!ZYdd Vldnell`+s\ ?4B: k2)ʘtal4`+]Ha+_A[9(<{+&Vj )-WЩ\3ko%b|x+8V<=JNJ{+ߤz<Wފ7r]AVH_3@RÄBh&o \qx.\J Qʄ eqefZ7k0"W6r0BrŹ!W@WE\yM+ƹ W&CJ-\'| Ã+R@\v%#^-% D#)y"<췆,!p+d[!S'ފw!Rjo ho5x+D2 &oV[7y+xd0{+N'[01)V |VLVRcnalfnX`;ĭ$nb̭n%$Wښ[q&e3|%₱+ 8+6jEJ'dGsxFW1yѕGb0BW]!ltņЕe: QFWmuŻG]9|DQWjG *S9N QW ]qk+E]_*P tEK?AW]PJ-AWWhx;^#^͉2"+ϒ2GTW&ye2n ^JΠ8ojxE+xšY+BW/ye$y%#dz +Y^*bzРLG`"HJ-+ +{e+aax & .++Wȴ9udW=b{ PVLxg{5tW.J+Wңx++,W+R$ kJ+ˌF}OR BQ+ci}yJWI9+rUW(;b{%¯2^wL0a|e\G:+[>}WdQ }eWE_l~Zʸ+O52H+WL_}|9WF&GEJLgKʱw60n~eDZ ӊ6k3WLF_JP +eUZ]+~Rʤ& Whʢ]{GR JzCN^r+!W-dyc#x;+l-xW+}o!x҇W&D򊋢,b++!2W<@^< yenSVW^6Wb x%wɛT^!w`M䮜˝JqW̴y]8aW,A®8Db4ICF]I}T^E v%.JJ(aW&= v|'vMv ?j+6 Į$vedHH]!"1`(xϸ+UTowUvWw䮸*Jzͫ:ܕ|w'J8]g]Cwl]1iw++⮗4r3+; bx슳P&v%4 n3-5D]xfJ \$Wjs> w+K]yU}WY>Į$ q] gw% ܕ t]QGaWrj3B蒁WV*WNG+7DW&T x2HB}+ dBl?]&vWFJP^Nn/+th+xx+Ϩ+aQWrPluZ $n-rW*_/yftb3Jt+5WRxP72κ+W\1x%ʄ4+? x%1+ dlx^Jf%X] cᮜvW+MWHU+ܕQlrs_,*]A5ٕst]Yveamvefiv*y ®\,]~]1 Sve?i7+]9;lLu̗ؕaW!t?FaWߛ]R}ՕJןBauENBW*gZ|Еq} b,   W+> ]+[\aKqh2⊷r<W"tZ\j#xreE̕1kTaet ;rfW=̮bvWgI%OQo`+qWHC4:+.p&+o<+lVfteKa 2"E +,b9 IW6WQ=J(G+^\ag/r(̕FW6%]ϯa?g ]9 LЕT.7RYrHc+AWW >VW>X]L ]ٌaWؗSaW<.bW< @!W)6 0B?+U@:"-EJn>W<_6+; A+J}=!J5W.Wo R'QB_!JW\*}"T*`aXGxDW _aJmw_+ _T7W"H_ɵ6Wp)A`ʡ+W&H\9*]LmBDr̊WR/}ި"0xW\6+5WRg}eB' z+/ye JryeWk X'+L"xŕ+^jrQgw=+]]qU6Jzoՠ ^yC /gx\Jg^qlfWvNWxݕxrW]Y;rWNW^-+fMFin+^SWƻxM+~N^q,JL ^y܎$xţh+r >I\ fWFcs^a$+ޮ~B|Vya`W1;t{t>wE,we%]~vWsw?:@ؕ,Z?r"a%+6։+ʪl5 +)B/`[dKWB+}UX `4}}VBXh3XaM2v-| ._U UUh^%yMU\stLxpa\zlĸ rbpW9!UpkpuR*dXWU*.qEWU\@j]UHR-Zl@0b, *g\% sUاlڃaV[!Xt VK2b~ XeC*5Bh`gWq`\*5;tX쟬Z~*U`Y%%f*c5vA\U\ȑURR%xYв$h`BE)0*^*Wer"`J7L䗋VQgU1*6EUVeXW`]%cQ*A+xVx8=ѾK*^ouk_e[䫸p _{WG.*aZ䫌`r#rWل,֧_%"XOM ,#;2&V #`ed=_X}?V/ ]$`Xq' b8 DXXɨaez~h}&axHVbRF+,'Ċ VQU*WI#Jn*|baœt +~-ma%+g}&aE)[VH; +ԟ1i:+Vƣґ 'L ,(V>GhzsUjy:_%Oz*ۣ3Jb*1 xh`>+%԰Jx*] LoE,lqV !ZX(QŠ&a`3+_B90VmLKn!VbAecoMJ?@>Ì)]YaBR DlRcD %*F&aeki#!VlAxhebzk&VAʘCKX!ehPVOBrowhe%m VVx eEIYJBS(+1שּׁxBne$ RV\Te@++FBY3!de2.•;i!+#74o"8dƞAVl LH̊J p3+̬tfaV·OY٥HY>VVf]i(bV9ֵ:uvVSqYYqPfeMbY>0+ͬ3ͬ8jbV`ͬ 2EYA$YJ m$++TFYqt̊HfV72bgeY$Sf ͬ8L g%7$RJ!qYg%$Ί>pV$.BMiΊw> gggrVjf0 0+NO=̬xef2CuTo$a.>1SjfM̊9V(rqV>Ί[ 9qVldފ}y쬄5$ԻZB?׿}꣟Rg/_oto_#̴|k_~ȟ>fe#\Iik2Rn² 2b;e:Jȯ8tʍ5UU3; 2,(Ce\g2\:0V;Ee5`t|a]ҲRZ*JH5сFZh+2aqѲ NqB%hYPx-.h{2ZR-u}Lh)Iie$UhJH-#SD'GH8 BZHie_lT:0e|´,ˋ!eq?42? V):$Y%BZTHH'iWAZ 2]G i2gBZFFLSJ˲J2Rڈi.e$/rYꂟ4fZ*MZ('ehɴ2242R}LĴL_reٕ,\lj -REצ 1-ۡE ?Jl.Tg:γԣ iY뀐(Gl^Hvcieb$>;2o94ZEJB˼:p=ZY;!k P2o ^ΜgQ"/>˼y42]7,yD$xyųlj Y*_ e6[JGfo)D66N,?WY°s68.Ԟ_~Ys珄V@t##D8<-i-m'I\clA2o{Y7>,'8YHg5,z β\lɥR~[,3TWB82w28a_*:5IUB%ucT`LHgNOt{,'!e&~+23Zgɣe ~ϲ]~YbRct`a},T6Bʦf1ij̡gQ/ZS]W*>vQI2"Rg$%m-wZ$thɬe땖_ٍ hZ%"C_Gi.J,MTH2/HR[BZj@I#-$REj{3I2ZfH#-*i~i%SV9e=DYԿ(-b,^}J˶ y)-ǖ1$e#)-R2nz)-"}RRZf;Ң:8Hi KfQZ<iTjׇE;We-emz+%"fZ{ô̓ e{˕߿(-1~QZ_#IiQ=/Jx2E^Hix%1-K/2g"ey>-cս!)DDRK:Zs԰$tя98SԲ/PJ2;]҆ZBXj2%/PK4ljدbPZ?VlevjKZf9I-)mY jJ + )~`A-#jZfE[2[G^ IjaH-#We}R52'n>}iZN2o2pԒ*~ZM-GR(,9tz{Zke^-]Oi9wxeֲ֒0Z|֒)Zˬ\K]~ci-Ik񔾠 zmJkCCJk-Ak`I"̆ZKMh-U6״֒2>+fY(i- yNZKc-5O:nZӔ2)Zfoie^rqkkh cb4֒XK.a-/#% e5k^[Ga-հFf"e0bW:ߚb.JX-36+%V˶cwG72Q-M9iR_ ZT32'C/iEuP-5ReBWjU}Bd 2[$Q-Zy|d$Ւ.EUX-@Y-& eVp.aޥLelX-rbl7տX-s0ZjnE螡Z݇jؾP-D,ex/CQKc-QDc-63XKlk&ל 5kk]Ok.]/y-eY(R!W6Ka-ZHV,#E?[-5oVK ,o%2rYkcIs-sepõU5]aq-s%%5^l7=kn6ytAh- IkYZn\ˬ'k*u,͵d5 e>?\~2_ci-umZk%Aeg2< q- Z|j7$e~t;i-sPtZK %HkVZKeHkT"L?VRU^>8^D7֒U'[˂u乏2gk} *Gߙ^Rq;Zd[mRS~0HlgL[sJ܄Ė,!d!eHl#"S6C`t ԠC/ȝjx-ZR^sX-s[jDie.k]T ؒu` -q l/3[f"\l.̞V l1he(t1d R WvխZfגP<^hi enkIrZKZ9ֲu/ZGglfĆU!dlrD,w-@ #AT˼xʍ-j_IY-s$HY-s@xjqV P-Yk}U9vdHlriMD2-sXز|G,,lEbs,TځIkCcZqZf-Z TlƆN@`[GJ{-kk= 4[-jI?[-kZf;x oȡEv.ZF^CZfCpi7y{x `K%*W8k;SL4TOkRŖ-`my3n2߸+ũ;[܋l`l[e j[<$l 52xa,[<6ز܀[<6I*`K].lq"OQ F%eR_. bţw-=VK.( [a8-12FA-`  !r [E-Nْ -vdVlٺŹ6[HV9b{2[tbT- ud02ٲ x&[בnd#&[ sDdC&[jEGl_Obl:M-~^[lqHdK|uTA⩵͖v -/ EfO1[7ЖdP-5 ߌ^8a(oS`hKd4ڲ]j -sx.Fjl[mIՖ:sՖ2{̡wPԖn?-;t\ًۤՖAXmq~m!bODA[Ux!ebdDVGnɈDrK[pamsw[nwGn.bQ_-W%$rK}e8+¬y҉芖[CnY]c-M"rC[oO,&9tW-[n(gD´ܒ{L-^طq-Wcnp"M6R+RY6[i|[LI[y`L}[4En|AnY 5m%(2W58]p /|5OnF- 5R3R #[fCy-˥ܲ-,Q?r †[ p nNC [ܔp[rT[T}B"n˜t/Rˣ"b>oi%b["ݰ-ElKhgYnap|«l.Jn˜e< ܖkcK7ےylv[f|n˼|BFmv-mkG%|-T4J*J-p ŹV[ 2{> eΗ2{wU[ !ecZmIjK6R[΅~L-m|EAՖ=^mnC3ͶTO;*IbH#XTW9~rbmN"=-Ϊmjwϰ-5d#b[Hm>ؖQ>f[Ab[ܱԖ6"_j|ꬤ-=eT-xQhA[ Br'YmIDm-k^@^ V[j ՖԈh~SڊEт޳͖9 e6f"[f.Yew=Ȗ'u W=^.'zJN-lR[/h%m"Z(t"[\`%c$Ȗz-sBl'rlYJd L1QǍ [|D- A8&oe^ <kuOR>\K\p-s#e>Skr jr)Ls-2SV\KuakYs7uZkYns-!D\K}p-ۀ6kqMk#C*C_P[y)bL⺑[#`e^O5O-^DlOO[Eb9ߑز\--y#de_$ڭ#slI eΖ;l26^kWD'kgz y-zX2uZfJڈ1׼^=a!iZkaa)[zIx-ky-뷑Ev{-NR7tZ_O^٬x-."ג.^K ^K%gkll~kwyrAR:y-5NQ\KH*颪*G0ZfNwTUٺiڥ^K]@ .gwͭe{%Z\+aeJ65ҹ[[`RVH*s+zVݼN-ڊk+nme! tZo #Ӏ+4+s-Op%G+uE\ yuCR dc\9(A4qERpe96*4;+pd\ykp\1pPnpe 鯨NJ6Bj'2mVjl f)V Uoe$ne5Ch+ b(ڊVW[^SJ־trJ`mbOgk+;7WVF[9.`+$[ Y+q?V\kkEd*Z2,2ʡ^WϛZ BL, I P+T \~bW|$ hIJ@+,d$Њ֞g?;+Fogbb9+/gvVl{Y qV6vVXY9܊Yy \ eeדҩ+)+~[Yqݵ?Pfe%VV\مb}mĬ\-u8@ eVVBxgec++;1 *BYqYpduFVsKY䇲R-)O\jdY!fe̬ÖbVvAV0FVAY!iAV !+CcFV\gd";FVC4BVdS{#+_FVƃ18< +Y9 Q=QTYY&3FVG\ʊ7VV>vR +!!+KSYfd%& ȊGtBVȊP!+53Y\ZJ( VV=ri(+%B Q(+3_=u> dey P9Jv+KymOX+obEzwAX(aZZI +P +vT'>`@+GX)%MC +@ZXqOka%LG +$! lf`ŚG nFjV.*$*WQXxIU]s*s=W!^K*}쫄h_jP૜X죍f&`C=?RJUJdV}XYu5"a犰BgaLEŭWQJ3JrDëͫ4⥵Wrh^(*'-*Լʥ]UjJs^-YU.W9wA8*cb]łuG%, y_ͫ@(ZxarJ[_IXa`ūVr5p Pai`ŁM+3VՎUvͫxu$ =y̫a^3ϼ+8b_|U\TBa|F1U{wW9^V6!V^ma8*:7+H, nӓs a% ,TBiaeWxXX1aXXe,ʧ}BZX9UXAXajXX_+cS2,KXbbd$9l,\P^S" +Ɗs挬0VֵCFVT[G02^P?K+7o"Ei:VHZ~VBt!|6 [Gp{ Zxnԗ :(A1-h Pbh*JuV[a3 V TIV\Bp6ҊK-YZqi[<=%.P8 qW>=gKY)bʊEVV:c?eeEYnJ`/ʎnJ QLVV:.ʊRV荲 (+|FV+YʊRVGsA-eQe`VVs++Z V"VG@\2C?XI) e(b%B+ V=+= V.JX WXa+1jVBXX8W1a+>..+= Xڭ |?Xqj`emj+1:XYBVNXYl6+u%tmV\7tK+1 (K+ $Xp)+ۨXa$ VHXY* kW8X46RQɤ*nU i "]eWxdUXUf_R $Uh6 pU.-WqO'_eX. }@+ uoV؎`[Wz3*/;WU_4;6*'}g쫰_հU_ӢP*^exG|3mMF*QxWGP1Uj@H*5Rob\.KHٲpeN(\%UƕAW9afW4pnӞ Vd0VlaZU*;iO*lQZeTGe״UL)Vag*3BjYV!hcbK~qh[U? **@lRjKUlU [EUb2 X*~}ξ?U܎}t Bjwlw*VVڶ ,cVɢIhV ˚V!- U<2QB4`ȲLW#*'au 2ıj-zA v`x(B:ElRUո {fW]U<4*Bg*M*W;"EqqE0B3JE6SF5,*'Ul\ ULBUb SAd\=LJ~hƴUZ*@*[, I BȶFжyK`!+[%UOU/ø qúWBh**^JU ZVٞ*V"0g2x,M+DMxaUt*dV :usU V*.EUH45UBBlr c *U,CW^*~ R=U؍0!.*,\Ŗ1~6,1*7VյYmQcURg\`8a\ p%$*:"HQX*qetZl4SlHl+ *^@†UL[WDɺb -3R,Kj*f+UH4b& mtT*2B~exҌKJBPīxw *bļ ,^e VWa0*^rz*!̫ W_Ogռ252v A|;PU5WIW=!,]Q൅k&Vb 6QZY1e; y"fVXEʿiA?(+]*+ۡA++.RvUVDY9|YYllgjCaV)C+NRMZ!Ί7}TyZq"G#@+޳ @+bhBЊ5푀Vy 0f d ܻ Њ7,yV8J:B+.6$^Zd/7Wܟv-ZA^3*3>tMV1$Њ xQC+/ Њ[)ʳΊ`he`}@!47BJԃty;m"Њ8L`vVY!Z23FZnVE{K+9VZ7FZP [Zskiem-8F | 8s |pKʂHVƭ }{rM$,$VjiyܖVU#t@+_- E+rm^Ϩ6"FJZY*%,N]I*׾[Z! i}v/kǒRcYV$[+lfrV [ak{A7B*oV[JgPZ>(CX+&b|֊m eRMJV\s.k4d`蕥4b ga W_v+Tm[_aϞ++5'J}wWbP+Dcw2RuD 앱P0mP_DBRr{j=/G+U tWSz+TE_Y6ŒZ< BZwK&8#ʜ/+vL̴9W+nW?^sa bCW<2"+W\nx+^RG}WjJL+$-ʍnxe4Wꬻ޺TRJVqWSUiw%#]95❰+oⱜٕՁ]d8/vWN+\Y+0+.W<32^..= WO7 Wp/\YӍp% -!q9+^17 \1Mfp\+9bWƊH\!qe4X\1q1n~qţ>+r[\1nqe>wWt@l/XK,G+HP!WR^.i+_G/+uI;!|p+ o|E_1\у8r4m+ "|DzB2Jjcxʾ+!m9+n+i+!a>3ͮ<9*I&5Baoî@ +.74쮸k` +x4++ε_~vW>q.wE\]9k+'fW(7ď3UkTx]LP]9)>t "1BT~ 2}@Jؕ_v*r]슋Į,*fW0®ePw۳ҹ+仄\qݮ"Wl6-re)0rf1Wr`Zrf<+W]Ij͕H7+6Wcꋹ+۩sA+Kz;P6Wqt`s`+nBWF&6rT+;})ԕ,TH]Yr\6ʭG՛]qVٕӑﯻ+ɗ 2t T]IMG^cSʊ"Ų+1BݕA,+3*E 3jܕvqWRLqWI-+֘+R)ъCF6WAteh2 ]QN[]~QWp +J3/FM+j׼Z9O ߢ梮 VW yŏ7+HW\5my߫%ߦedySW{BK^,kyWr㵼 W<yeijW+D^ ҩ !2nSʒ.xeԄ^a#x+ vW*fLvm"&J#GWD ^# E/WJkiw<+l+>c`r>]<Z yWAܕ\t]9\G+:T/ w堻}M8_2zm_,5 \+ mWX X+^~vW<ԏC`wt+WpY]yDʠ"bTʠP t]!3͠+te[t)> C++7\+Y 5ͮd5 K Bؕ'6IR 2 &Wve{J鍟bW T!]2PĮtmfT\J2ʣ &)+椬X:jVWb"ꊖG|'ЈBW{+;Zt\+vt>Aj@WN-]URffteqY\tWt/ctR+_s;{?C1Wp=u I\Y'ĕ6+l&mp46ⰋUr\9oc*{Dn [J1ފB(2Jy+QQVV3–p+'#ܮ K ؂[a e~,pe5 0bspҒ^"ql W# {n\9l/r7\@eU\ѧҤEV+=Hjq}"8 bqL9+lLLE\p0,`LY\r_`_{`'̕(KFWN#+xuŚ JVWn{.+.mEJpԕmY>Wi"]iwEaWvK+#ؕJ!ܕֳ=+|| 4^I `xŊM+IÉVU$hye<>rjmwŖݕ+ηbJ&|W:^B.xF'.^6b*PJņxʮ+~^ 2ixa+'Bj,vW+^qCWnD<*M5GW< {UbX fi.wVwڙݕS'kwQvWl]YHCD+I^Y`W&׈]CJ9;^_G oaWTumӣQRUZ]YFbW:ue( uGNpԕYAWy R1,抡i+5?~̕a&F+']i෢+A]YЕz+ՕwitdA]qJOȹ`6W\ފ2c|6W0bIǔFWѕ+T˭⍫l V02r+_+BA@W]9|DJw6FW {=d35ԕ%Rʲ`uk+Ia۴!BE +zw6BWt%7 ;]q@FWB/ vjV/PWl[]!,Yv+ Brs ~+N~2a&W5Klhz{BB8'SJzmVzrIE+Cx@W+6^FWZ 9WO59L P0-]M8COJ q hzz+ YjʠM?W]kwʼnrWJR>vW\iw幸+pW\bw^q_4B7!z%+iM ݡW6v6E,WHh vW\:ez%cW+!+.0r5?2YKO+̷x@+su/΢X_9W\H/!+y,]_a> }ҭx_SJdNz8ߢ_WHX_-g92'VTqG_as+.}e)WfG_!XJ`_ jC_nWRc]Wz AJI;1b|ŏWjI4pB6U+Li\_a_d+g'}j_3:+A_qJW`lSWN r~&T,_rH+=_q4v+++Z^"W{8=ʂT^aV+$4^/ #|϶W/^92E앺u-WvwVWFަ+~ ^Gt(%- +c)Z^I% W u _ɪRmk|%C+Kd|C+; V%\b_A.)c6=?4EVϚ/|ŽʂnJ }%,I+կ_qMf ,_[ > [|eWQx||ŮO+e|!_1d|%C+ɕk2aL+_)RQ AWg|@ UW/ xAW^Np^G^>Ω핥W$ԲWv-$dYkWn}-<.x+l+`5W<+Д]J׻,}ţj+,PE_ac}CWk1cWR |}W0,[+x_WZon|dz+npW|{_ᓰW\{X#tW ^]o{B+Z+~X^a^l{d&{z+҇Cݬʰ"}6=JKpJ5օ4l\bY?uT+XU n+FL2^y;7+ xet+^Omx|{7W^YQ^PJRW*r|Jǀ%x#RcwW*sWE yL+qG+z+z=L{eQ4&W_qWL_䲿NWP௰D? Xe52ū+ Xj:%/ŎZBocS]K6 2#o:"eUTX+iFU 5W. +LI+c q+8_e~"$g~% Q+92(V̯x j~ega~+1Q_IB .hc+.6bKPj_^!Jn2$ͯh?a}]}e(Nn}E_񭀾bZ I+dO@W85 |şm|eNc'{{Rt ~n^_9;5cڶWؼ7 {// W^Q8򇽒s.{LWz i+ +}W|^Z]}Wbye(jex% &+;GW|^yO +oʜ@F$xr xB[)Wރwr\cx0ˆW(}∝ՒWj+Xw^ٕC+RWؕ®' E3x]AMrrePWӖuȨ+D.$uk.vVWȞC]"u%>ԕbiku X]F8ouA+2XbE,k*s$+ ]єx)w%[]1row+]]~hve1a5] 2aWȾfW]|+lvc+v]̮X@3¶w?r><χc{1y;_u){We>P}υlc2ǨjP3Re"@9(geFi8- h>vZ H9d|:o:wuZֿӒaNLNK}Cm\ãMK-uM%*:͛jOd h.ZjӉP-Y2԰MT _Zj/(Qe]؋uPYA>p]WFrëA42Z\) B3-BM*-5lݪIiaC,a!-ݪŭ}p3-})WGGzG"mhS!TpH9h(iQ"-uq&=i__DuWSi+K>0uej^'m:R3ĊRO@} [mM3R2-(K$ 1xӍڃB)-}HStc+ۜ|FZcURJ#-ub[iɠ@NKuRein]?rZi鞪s;-u' Rs3]φZgZfTK]Ny/M#\NS-u=Cg.6ղ4MOJuujBPY-ձ} jA5hRVKu/_7q7 kos Y9Y+R]~Z.RXK?uw^c-napXK$\竗2?2`h{>hKVK=Rݧ>s\.5mcsmheB kpXKϊ, a-b`֮?+XqIjYXKMMV+*̛QXKݴ/ 2a-=޹!u-$Rm@.L׽eWrzcI6SAq k~ͪtG,2a-CY5Rȭ#gÜV[Z.ic- ~ZjVGZ,^9ݴJU\`KuU ul^`K#&^^z|+ز~[jbK=㺻r΅_p#q3VGZl &Ėٍ;7~-yJle~S-5ҙrBuL-q-sն{v-<2]FA4LbK^RC9\*vCl9[IZ9q:ZlEV-l/(ݡz>e(>z% ,3|-5Z\+R' P`KHqfzeSӾT-NF[yD`K?Wh,O7mlIXJ^RB%:(e^kZjZu^K1k9kiγ2b{ײ,3Zx!ua4ײLklǔ(AO{-u7ز6[^un RVc:r*L+ڭ[J:$EG} R& ԔTQ`-[wshezlY~[# bK"Cb ŖRcz%TV22u1lY"[* ɖzΟZklbreٲ~&[bR"[Ȗr飚l9/t2[̖jr^F1"[z#;["K9:^9 "[jȮShDlYf82[O1̖C-[c3-}[lЊݮݑJl1액}[l&lbK Zl!Cl?ҵiGeËl!!e4l%ɖ ʹ mBgelY6[ҁCj'ɖԷؖjf[̶ԤcU[zȼ-K?/N[@-ҷ{zx$(ʶTXCmb[ J-}z}(mGؖ 4E6۲öܖ 2Wz|'޹-5ԑGUm%~v[f7ܫUjK̮?j\!%+R[vlVdKCGn6l?dK""[gȖeq@dK* [潲lSlK42o꯿:"jTHTy-{kR] 8Jis%s-sn\ˌJEs-}+Y;"&%e(|@TiVaTB}x-ZfCЉ5e&'#m kEG׋j[(G(餵UkѺXpͯ^0͢P֥كRZjn/ ˅XcVOR |zͫwn7ٵZj2 4k|[^KE.{C9}L{-3 Brt(TkIY5l:,TK2 \#Z}ϕk( tZk#mZk1Jg@8RkZj&jcW*ͣ pEKkIZKQ8QT5ֲ^ZjMg[|ݢ IKC-$9hqp4RZjlޑRԒ3RWH-=(БpGZZ}=p(Z2Bj\^VR9+RfTAZz!zZ@TKZ_?n٧喷Wv rаU\O:ip7-^q߿Jd*JFx%ݪ'Щ 8M'NQDuZ|RBCJhIe![hqG@6h%WSDK5) M#WC(4ҩZB[^Ǖhd  DT8&Zf)"F-إB"Z2RqSoP }he]dTWF)>*0Z h%I-usѢ Z215Ѳ|x-Kђ"FKr1Zz-=בC ?Yh^M-Ylh _-B,?-uhIB-D2:RTJ+-5+⯞}W :=Sn۟2PZke>Ui楯, J(-$%(-i`hKX] FYԶa[Ziɓ*@-2jY4Z깰 a+PKYa= iџgzജUB8-ޢe)l˟dU( qZ:3نvZ}xRѢɎN#0-'U1-ɤiq:르tk^5m ,])-DžiY/V~LKR"̴TF#ʹ԰\Åi q)=ն ô8"ҽExRxe-%0e,%h1-5葟LK/]19?3-,WSj%e+60-kj<ʹT:KTQT#s+E^PKW%9ZjY&2Zj /Z/\S-Kg!Uj:6O>>7GTcFd)@Ca餹*Qirfe/55k6oWU\KRZE RRD<UEkvxJqZ\ee %$9ܕQfZpj2+]Z2R!n}uluOՍ&MI-iҝW/kPIi `-CI|heiZ{KJk S,rvFӵZjL - kݩ#T8P˫DdEUC,MOTK]r5*R*iH=⋢Y/ kй Z iW%EM`-YVk|[U;.jFnTQoՒ'\Xn|{.jRFQ[-U6tyqAjr;5Z*RO>:Q_ѣe~mHR fmȒZ*6RVKʷZ@`-ը4R БVFXKVZGMzTm;VYQ8: ѵ$ĴA\9Q?Ŵ@LM5U7pPM;9-iARQ\"ʪA+odi[!rZ 2_LK ЅܔUO>*PZLu>)ArQU![OH21Ӳs xk^e3j!),[yyO`I1!ru)Zj|.tUUZfw&RSU)=" ad(<j$\-O  +OenesBl*>*v䴤sʪ҅`+RAC1-wnZNwbZj"1-T`ZJWTWu3ӒK3-YӲLVĴT,^s{y'FI!zy7ti^stv+R_TJ;-^=m9 -)iu{EZ.;KieYRgvP1cr+yZ94 dJ&S|]2Ogt0# ZRЗuZy:>@+I6¸'Њj,Z@Zg!r'ZZ$EZZmj|D!iV*V:ԊB`L7V+f^2^ny+,w+%p9 ty/WyE^y(Fp}"V iԕ!+߭>ʧ}dKC]Q(1Kll}ns\q=Jb6WX\sM9lg̕RFd+bC͕Wۅ]1v`tdЕ{vG݂ѕ]3t+{#%]dtź0J*AWD GԠ+7?fX]qPЕO 2(+cЕB]D]a`ue;VW;2 :+|J>u+.Q2 +-芇\FW]]IɪW]qGet2b"~A] 22VܴSԕ끛A]ay9ꊛՕ.E]EG]Y#Z=s2>> uŏW+]2Gr'PW\ERԕ QW:E+ bvٕv]q-+î\wĮdxivK+]`WΛocu+.Av@+WfWN:S+^ 0>eW>U]q®6r\mîovaV+]avanv|ˮvmy]y_aW$6ՌFWLj\Gcsr+ʌ^~͕o`a$ls. ?+Q\C̥"W2rBϙL\-rTyPGl1 RCFWyBWRYtⲃ(uI+b2#M0W( r>:RWҭ[]qu%2Օ YQWP/H]Im+̍FWoiʠ+sfq.dcteM|ℨ7rS<7&鹖+qOd芧IVWHюny+vPWBZ]iY;+/>+^5> WnW+L-Py+p WnhC+rUb+\1nrhr~\&抃6W.ؕ@\-=+ס-+ͮxPy_Wjydy%J 0+$K^A$W([R}Y_!f}QfW}+<ͯx +WX_PW}e2)D{h~a}WHqmYxYW|\zl88b|&~Ԥx9 B/ _4R5z}5\q0+w/+޹rF^:w~Y+`!NJeȥpid{&nMV|e@+!̎pMßy E"G\}_qҧЩ+tWORL_?]뭯 }% m.~= O +)_)UPy(?~2Wf6銕ʂx[xW5+U: E,G7PXl%P V@X 96XH,a AQ8+X h8,.`yطB'\B.(`y7`1< X ! K XϏ+X\bu &XGB8E;`I KY>!XC%]!X,,`!;-~K^#Ž ?LRRBs^MZJ#~{b_/yQ.EUڧ7CW D_a_2+_a +T W^{%w?ʜSJC^au,=0#T[KiD+yW) WvxW\mï|$~M:򴌯_oW\2 +dn_9z т oS"JW|W~_qo _6*WWS _+V2R;WDa Hx2t!xe~_ ) $^!exۥ^60+]6[˝W>eB+]qgj]Q]ɻH]1auփ]O)lu\a_\\q-bB1Wx \9+7mAW1GoBVWrԶJUPWYRʭQL-7r/RW!A T]yv# r^ +IQ`wemஈ~]^FW\>}AWj/Q-H"芓~ܨ!+ߦW+.\s{\y} %+\&+e!+2Q "᷐+G\:`q[+0-+ ;\1wslSgi6W\qYab\͕m 46W\js&N+o'se:+q|CR0Z xz̕ٶU\lsx7\aC-Vx\- eg诨+tlu~0jPWcm+QWu:S+v;W̐Y\q}ϟ#jqa ĕT1\!W- Ѭ+,4\&WX!\ȕ`.m+aEw\mY:̕tYm8hsvO¢E0τ>+͋!NpĘ+ +{cs DcX2/2W(+s0WF RBʧkrҵ&Wd$W!Jbs? rF (v!P`I 2֧T7 cBux+l2"J⭌cVH[qBʌח[1 vފ bVo|o2[rV\L[\yY\@\\Xf9W T{+o[joE]p\NŕnU"⊊|Wqe9@ biS+-KB&GUr\O\qU}͕>1WF/ \q6WRvlU++Ņ\al䊕+$Wz.\y>-1WUsbcW֫\1KseƎX)e-+76䊻cI0RireI+ ,Dĕq@+!W6\~022 ph뒟r\6lsŽ5WT&0Wz7͕ϓ42W\]q'h]^]]t}@WMW !x!$M?Jo\.BI+f]AWxl/w{;$ KЕX2+b96 .ϑv]tήdDWXltvWSW̚T\lw\ɕ @MlnrE reW@\F1Wzjqx͕N\q]EȕXI1p\!vE+`+J+ǝ' s\I8J-,-xWqݹ.+#鿊+}W(\a'b ~n} -"bʛ\VOez+'ފz+RHS+c^pG,,ʾ2B8RAC1+#*˛VA=/T+w.' f{+A/oV [!wgV[HTpe;j"\ ĕA)qeyTJ\Y ;\y>Ҁ+xKVpi+lQ(;= X2p2@+: |Y++9+7^+bV+WDrŭ\a&W͕\!FE䊏Rɕʓ[qr.b +3=K}Wn#Pŕ1a޸WFw2R|r+Ԩ^+QO W@%W\'\ 0J\O)tJoB|?vQ\s+&/ w+-Uqd+!*(g+N WH )M@6bϥ_r 5W\#isE͕OF+#3]IvѕDWB `ڂ0]agFW&偹%2duȕ1ּD]!ٕv$d 2rCpXx b"i+[]<t=]Vt^+.b 5+#I1f Ԛ+B+\a6WbCHmMBsM'MIȕ®0M8+5W30W.\qu\)]aFW(htE]iG&H/+#s|+.]tnvNm!J|+iBs|5W|0Lc$W|(Bn[*[aW^#ފCx+o뭰XފފVV{bq{+Gv-[/ W>+ī[\9$W%hEh͕ԢI`)IdIuKqũʹ+pP\aMBĕ_W\U\ݴBGqwqWWWt$W Wrܱ\ѹ]y+j8W\ql EӘ+h)sb\kb 2@+v-+++G2|m8\YZRq%-c K͕1A ۋ6bPVrEIse#͕1~WserIHŽ2 pȕqIE:(m>ͮ(BJu+$rA]dBY4bԕqB0,K+Ϫ+TWYԕ'I $#xjW6o+N̕Wެᶼr2aP^! yE^x9+B8Ł^Y+[^;u zîrv+,7²e+%ȮIt[" +I^E.q/+3jFPImcx+A^!xQJ&w<w3Jӎ+졻YwŨy†ஐfʞ]y}*SDD]:M+ة+/}ԕ"PFJ$ԕcКJQ^YBٕ!ICve#b[+`WNVdW^@0`WF|1)haWTbYò+9a#(+Ov Vo`WsovĶvW~+݋]qrCr]Q^iOxb"B%h+.S 8<^qMxE8PxP ܕԧ,[h'he^B۲)LMX} Bv$Y}X՛b8eL23WXXfb H,S:Q% XKXbv@Lkt |.@,4KJ!~qX܊^2vX>!``X !/!X'Ž(ͰWJ#uXttXDa2 ˧^w^7ayB[R"iENO<_avX,a!y* >uaXH@hfX'!# "l*2bQX^vwEa1 )ue6(,3OR 4+ ,/u",v",EXTDXGb0aS@#,Wh b"Xv,xhEm& ,[*0X%2u ,\X4 " +kF2m:~uXѦCP,``|LyM. G d)=ˉb$H(ɻ" ҏ L+}WtW_k]0௸|+pJ+* MWxl|W_92+l;} WHUD WW WW-W؂ˎr70k|W|b/r1_!],J{^y^Q^{ "nBvE+V,b+lʝWI`A+PWH k|BLB_ِF} JvY W.+c@|]_!P"BYʩ.rq`4ק%b X^q殽r"ig[5+y+\o5ᕱ %@2+m+cr X**J'^1ӳl I Hlm^DWxWxExefԁ#qW dWgî̩F Lue D] CQWp-WuE uI+s0$pĕG\1yŕ +[bq(rZ\9A WP+R+IR+;yB& 8^jyj%je9VگZy(V.DC 0R+n!tWj}TZ!jE⣩YrZa뺦V-ie3@ZbIi`+BE'iř!J"\W@V:ien(ׄZVX)Z^V+jmV:je\P+C էZZHiZÖVN-Yg)YIZ4* qK+xb)h$Lhz@hs'Ws%.@+[ZqrZRh8vV,l| p8+Y/gvVOg`l6Hig Jbl68+$tVޤ묘8+KY9H2:*8+Y⸝FuVqYgBLgytVzSvtUtE謼8+7Y+O2Kl' VVP+jfS+W›Zij,tKidH+`K+Ҋ5"J+7ҊuJ+ 遴XԊ)MP+b JPB bix;+:+=Age 2iUJ+*X6J+n窴RU?b¯ AVZa /r ZWkVNJ%9[aB(➄b+N"[ bNʨle}Jl  dc+1܊kkeTlԁP+*VʛYyTgjtV'U.:+ꬰ fq;+8q2+Z2+ߛ4⚏ՄYeqVX*YFg*57Y% (/JzYqWrs"VH5SZh]F6V4Vg1Ίd~2@g㬌iNGC%Y9>a`VNofEJfr dV JOJ+vH+,>J+'J+oʖV|n*7Ҋ^-!sgvҊ* ˋ6J+ ۦ(42&>BWzS+'jv[ ZQjjV6Zyv.J$P+n jȽSHm722V4ݤV niV,AZ9g1+c[P0+H9̊OveVνYf hEyJCH+"~J+G0miZZJjVIHZ+TVHZ%9'\C YY8VΊpЊJ+H+-?AK+*IFV zJi,VG+gJ9VF?RJf-IP>ԊNԊsJjeHq[ZqE-d[S9j͵VƢ:Z+ӋBNkcҳ3}KkEMkEujZ5Zq7EZaͪZalk%ۉ JSS+_n4+JhX+idGEP+J8XZ1&W{~Z Z#GB\{~ `M^S+@OAWQ?lEMUl&p4~leNJ_lybV 9X-29%}:4[=O؏;;Q5r\le3lEXl[y B4؊6h  ċpfVVVX_m欭ikFS+gmςb4Ai1/BcI4 ~Vp\VXihEwB8J+t[Z)1bHg=uVR8+rV(i:+FKtV.LppVLYqGvVȽogVZa繆V6"!B+ PAh% #8+}<٪+o b(dEWdYɊl#+2"+:"+FV\ +=YA QȊa +-. |_N7hd)ce7G:b\dE g:+ģdVXnfv g7c}\> & 2o:';X_Xa%X1ʊBAҮ (G/e|h ĂdbޒJÒ*+msc++>VdLZoJeŕ *+b(+\TVpZY3Yq/`Xd`JYd$2$Y1r-¶"+㞿#YKdŕ& Pd`EVHY74 SX"+&Ȋ3K ,SdE@rYf7\y$ZtYfK Q.rfo V>XYfe~'ʜGe,˭e^YkUf'0Je̦T`mf]Ҫ/U`#Gr;St+Sb8˼,ρ,ۻ_n*e,3?Q\l#FGNS/e)wS>\=ʌ.l#}M-mXYmVmoQ|H˲r\bYbYQ;FTȤRIijﬔERk(5o"YCŲ̋§XٗbU~qT|pn躯.l#=- Iȑ3՘AYZQx1U(<peFDND,J(e`R{d(ee 2g)+2|Lߟy| (gR-pˌ.toe^Qn4(QU>}NlRY#D,շՁ[]NBYYe,st 2P"eN5^2OfT5AQYefeץ&(aYX9ò .,˼ʲ{|ϻ >@3E?}ȤY9h9{We6B̙*l -H,G"o2c -eUf]0˸{4%H7GebYf3+2Y9,2 2iWŲ,㒰,O̸X2JoYOZQ:޿+2?8F,l3JӦ=:|6b2ev-5h|Zfd?-K&@|uj$m>B;?I-r)eyhY!Z9-eȅhϷ#sW}~hY 1Z$4?e\FKe62Zj7]m;2G'[8Y22٪qog1ZVYF|F)eF>;[ԨՌgPHKŢ*H2 ҲRrdr|#ҲL #OiZQH˼B=28xv*!# ie婶}Jy?RS:e0EZpeNl, g/4M!-K3H72[dM~,(-y3宆0/&o5싉W,[:m RZf ciHQZfK^{fkAZ6F!-oe/e T4Le֗2o4:˧~Y~@Zf~+HKB2ކFKOkB,-3ցfT!2Gaf&2I"Z]&EPd*$D˼lyQa^BiDEy~yI__p-QWFKEc\ E2Qf* 傴;F˼0c1ܐ+ y´1L,(eJ,8-KH,N|<z8-UQN6J#y2DnM*eu^j0-ˊOSL|"݁[DhZaZjPG1-eSOEFи3Ӂioc2-ՠ2LKXZs 1wuZe&jYBρZ9eRL 2ebZu SL 2O(˖5aZIKU gT8οJK-D`Y+ߔR+Rϟ۝eּ`1(-:JKwQZW@JAZu -Kge?}K)-so}μ^u2#*}lr޾2'5 2n[+':,,H{pYY#VZ橫e&2/'˧ZxLoe_RRM+M*+eyh|Ӛ7\YJ~R,qP|:51\,9gY®!ZّW֙wSD˼UԩaѲhY.e-K5D2 2{Ol=-O).Co@-?S>be/ʷM>,hYb1Zj龮(2D2Zjz[e7F˲nnp+{-~%F˲(9?B)t/ZɝشX<\̓1D%e% GզwL-#𤄖%V> &2e|Zg/M.ZֆVBKQ_L#̑Q~f -3)e7,˷"DZz!sB"te2CVD<hO,2Z1 -D[9Ue3G꪿ThFQ*[1H63&Y AZ:.2PJS!8y@Zfk˛ isHw]ղ_ỳVBZGBiM"[c9Ș,GⲔwEid$_ߥFi`3J|ϔ{^Gi1Ie6#ozYީғK-M].evI :sWO_m<Ԋh=B^t^ho`oC-9w{h'DK}C-DK%!Z:]+D2hբ(.:/yF6pU"$e(JhYL -*e9oQ2?H3jCmq]$U M- QDMѲ-QDrBcNBҭFh"lY9[QB *)eQyeZj8&1}@Z3eBZ(M!- ҫJ -r9#HY3-s M~U)re~|HIA\I=˰iti=d0BZ֟THC) iYAZ:[esiOBZ#9jǖrG+vbyR9TN H b Fy`qJ2Bh\2Zf[ -ˈ.i}!w)>Z*dހg iTN*ԫUHKt9HK㡴t=L\q=ITTô,Ӛ0-5k_~Nȉ%/8- MWNKTDtrZef2RSD *eYKju)tJ7TnE]) rZf+K;GjÉDjYoZRB7T r/H-?b%RKB ̑T7Z(gqfg޲50JL.f9rTIٓJSP~,e,# -XH=Uet6:HKW@,cAZ:6$VJ+oe!.[R }R;+JuVduFVXYiade#G)>Yy䑀̅2FYjX7[YeSAVd" VAV'2ʕx++kR2CeEd奢p3 +EVzDV=YGAV.%AF@VH XyȻkcE#JWiXbъĊOMxFC?5U(,D|?!V;B+/R E6-|<"GBX)bB +ϙ&V(XiJcwbX;XYuf$VV[+,ĊCEJ},C &z+ZXyjG`KjV5rd\}q}klU2vW1|&ⲱ9*~W>|Jr1Wj߬UU$*- eZLm^Z^xqKW>S^Ut(UHU-쪾JBW'O*ϗMU, 2}1XRbG+g2 )b7rb+UV!Wi:hPDYJ1*rW b&d*rACUJF4לRcUDiULj[eE[e$P%^hwR9uI!W"dM-x׻UXmYSY4MVٟ\ZT hQUHYUeO UmW^Iู*/_VAV潲*ªzȪ\'I\V*3[H5*NC2!U\({! jYeB",l J Vq]UZ**SHѴm(3U.m)KhI8Vq5~hJ*oV\Y#媌Rt,ʈE<.Ҳׁ*ϛURU!OVaULl[X9YKdU7UNVV*JYp)Ve|.J>GReMRʻh"wu T銘6UR 2>HH*nQ*cU.WQbb( g!Jj*ۜ BIRk*-gM#B4تi0gS%CS2z4.VS&b0JZSIۘ 5荩0lS1L &*`*[b* cKRIKeRar|,Z*X* G"e2j+c8uRSRiJbq(#rw@@R)SVTJxNT/E(QyTiJLTTSTZNj)Z*c,"QSy<TSqRNbp:Z*26X*{6mKE,JLE* L9ʡ*S$`*lT&J?!TFM 4cr*MҜʢ\T|7rp*,A4Rq؟TA+BJ**&*2[idH%U$)<_UT\QLP#%9IȨIMiRxSSIl*.%XFQWQ~*= 2f_5TItr*L~bpF LMsL4Ufs& !UƄᎱ&OSeLSI2.T_Ih*IUQU+j*ǀ4TFUxP,֨vAsUFUu@U @Q%PEUޝOg۬BQXQ,o a ;2Ȫ) VEVݲ*BV7*nqU᪐>2]>lϸRVevW`UšUKlW@WE0TXe.Z .8l9daªɪ4VŕL]+!Y7 U5+Yb[VRҺ*o,ʱ9W2UNm/b@^ $BDfUƸ>.V#J3N*vw*ⵯ4J_e"TM-2JW([c>[ V`鍆Ulm*j*%U*,G* VxUX\\2eUHTUqVU~D*j K*Ӧʖ^M UUUK`UF Q>tUUi"Y-dJ8X\9X~UqJYzYŠU9[ª̀f}2 fUdUU*_Vªڰ*n **êJh*zZ*KyAUZUM[ZUٿmT,a듑ʌD'#RVa ғ%hG*)%jCdg LUIаJ*##ȀU܋a-,5XUyqUVQaV VW UN]F[*\B\SVyUiX^XŻ Xe,yS`[ʝS*Vyհq Rq O*=hX,aVUc̠rT`\pcRB"Gh{ܶJfJ*swm pzpUOUVMlA[v*Pͦbd$m}Zg,ʢV+hKh6V[0*360il*7{p5UR -yͫ}UƁW6XJXqul]E]eV[IO\ů AYq75j\٢5*6B dtdPWQ`W:W=qV3UUzTӸK\Un r|n4poW3WiD\鷸Mݳ MtH/U^=yXWπ&*\*wW4-xYx6yU^pt򭫴삮*FUޘB***ƫUUlY*1^J7XO 2ܯUҮr`J:mV9 a2wh!UZ[VqhӴ h1JU\@HPOٴ+N*+RCuBU DV MGroS*'-d ʇAt`^]EG]ŧq*W*UL#Jt2U,\n]%;6Bʤ{T9W2:J<rKW^*yULn^X7y6WaUk_8z*B]^ŘH*۸ȫ ?|?GVֽ*1^wV")ֿʞDVRG@VFB>7َ20'C)_C`Ɗc6Ba9b=L#+݈0Y]@V6bfdE8Wee #u2"|_e8 _`Vx50+̊e00+渢TYUVMw%ʱsJYyޮe ʊY*+T催EXY~?%Ȋ"+-`XȊ)"+'"+4F ZȊDȊ "+&ډ*+n *+mDYqVV?6DY1ʊ5(+b7ʊɕ(+oS,̊j 'cXheIJ)hVVZЊ{ xtڜ @+]VbwTZQZasEZ!K[jtVNwZtVXR(?m@.~Z+B Vދ?YP b+&_c+bb+Rh+y2Jor ׯTB,Ub+l؊b+n/_c+}VE>%*{58b+A_leV b y)b+AKle$@_9R"W^͛X+ P+]b _BԊ(I,#VZO?N+ݿ(x((!>qBA+]U+5'YE:ĕ!R\G\mq)m+WW+㞬ren̕sZD $0WwbXһ@kEʓϰ+#鯻r]i+Rw^\?MG2{$G⮐ +]\+鮌]cV:^#B5莿!U#xϘ+a\aBrK$W,\roȕQO%,\yK/S\qqen}rR5!WɕS\91Wa\i2VWiQWKԕ1ue|+ QWzeՕȽ+.+nڌ J+q be+=]qG]ɸMuU]&B&\͏I0W>.+~D^؝#O(^aöW`WX2T^qCc^q,B@+_f#4¶m+oe42d xbJ-+'qz Γqj ;鯌'^s`_N<sOZ,Wvѝ!b/ <UKG,fOʑu+*%++ O z+ 4{+_2zMQvWW\|%+ G+*JZ^I@M|09m& 3+d]b2iwE# [D6b0_xIWExEx%†WWExᕃxeuW2^y7́WLw^iXx HWbFyM!IGyҬW"HMdCmǞWf<8KmyeiWy?y((>"Dy+sG:xe /p , @T@^a_^1 L+rW"iyœb1u+X+6GUzLze^-^aWW+^W(lyl% lbNyiLWR^QlW^:y[^!yep-(7WTWZ}2WbD^W\ny;Z^y9Wf(hLz+$r|<'#OV^a{e8+k`2[^mW\ht;W)}+痆Rv5=\+b{+ 6G¯TIO+T,ʭxqEUZ_q,"'¦+Nm{)z+WF3|9 Z>ËbJghj^!bE6z"u镶UWA J;0+)Bt#a| a%6|s$J{WWƯu&b@Fs+2Y.wX# =+u+k`P쩽2Ni W Wwآ^a6~`^IBAA讈 lnF^c:HV{4qW #+4 +MUW-GnW^D]YX+SU+BudVW2W]KL{gʋB)8J'\Gʼu%Eԕ ]N]Alw\w|ݕ8c]9MwexqWF3h+H +艸+Gw\_ܕ9͛⮸TX#bn4s+@m{e>9w7^Y ʜ5WFo>1l=B^ɼX{EyxWлW%J +T.C(IEz ɤW 䮎4+WD';rK,G,l\N ]W!{W4lWŰW^yi{=WuJ}W7ȯX2t-{K$X66&`-/IJ-ҝVX^E++={~u\|(ʶAdf -~<@N%JK+iT4X>Y`s7 XN,\,6I^v`ae4%X$XR˝-4XƑ {Un);K/Ak`aK {0X: "r7R, }K ``4XT2:BAYB WpX~O_e u"u&wS`yS>ڕ6j,_q(``92#X̭jE#L { Ih&X E@cS^X-6RQH[J=f㷃_qOZ8_0(W2JWOR*W0WzyYW:uSe+nU2opt,#WA``[KM)r:,DZ`qK>7%:`y0۹X,r",Wz'6s;_~7W_iC~~MWܶ~(M+gZ_O?&с 24|^+B NWu7B5ʇ{W+%yҗ^{_^Ԁ^mW̬^cWW2 \:ܕtWwkw] Ĺ0"]N +7 [+/+ ʍ"Bi ʸt`ٕ^+  2~PUtqw>3H ˠ+c#J. {Ћ \ 2ggR7J\.N X.Lq8Lr7ɕPB̤:PC72\ao`IM)X@+ +iwx+,v ފx+M[&' ųAn[Y p+tríPb=?JצX 0dk+ț['[qOK lV^l8L nKVn0[YYjn w `Y.܊zENnnEe$܊ʭ=rSo[nENmeZCVබvj+ʇ9NSR[')b+$6BCc+Kt ǍtS һX⎣b+ Ia+s# |wdQdF Q/2 vc WX\\P[)ggVFPelE[V1Vk^Ukm۵VHYF fŠ+:O+.֊L(c +_̘+7 {i;yg͕p8 bzJY\q)r h P8Mpecf $JW&K+W:/B JK+#G. ⊺- [\a?DK-[\qb!t+!W 2LM+k1WF3fsCҚ+0i9ʝ=$Wk\P̒\e+Ey3ɕ~d%P͕l puB)vZ@+oo,K&5Bp~iz+[yʸ[W*gWCAq؁ + V+\TFWu EW-uV]W]wueS] uTWTW,8]? DWH1JשGJdWF aW ˮ+]Ϥ® zvWvefk®A+Ǜ\bWȮg+ v]$Ĕ5Uo&{#)a$W6b9JדCtU?ʌ$l rdm^2˓Z$MWL^nzN^|WFU@^8@(]W`D7啥눼Re/++w- L D^#[+]&ba"ϨW2h܃rfE J' ҅W6M" 4hxY ^Nxe.C2u5㮌` 2KexxNx LUW^qV^YdYݕ5)w!q+$ͮT#îL+)v1UWN QWoJS]acWf+(t:1ѕ+1WF˪_$W/HȕJ"*KSqE6Qq7F\ͰWzGTWܣZq}X*Jc +cSs ɕ_ W܁+lb%z+W|rRn B⊩ۊ+WDWWWRa4#%ΑW#Je (o%2b+*r0ile[[a,jPV^*"t*Ҋ;",M.5ZZOrmP&P+}V.*>V2Z%GZ9?+ELNgZA%oj2tfij If+V$Vy$b2Znhj\b k+WVPZy7IoL<Z|Ik.VB]7B8yR+]'RX+O6LZ-˵VPa+7o4dLM#J#h+ͭ܊nVnECne s­B+ ? 6jh*xhTheWh@+ hEaYhԁVNV 2Ɯ9ǁV,kiEie׏8>hũ " ~aB+i]@+ρ3Uk~ênB+yZ5UH5cu1c 哚 q8+., .Hh"3V1͈r0V#Fjh0qVN![) Wb@gD$Yq2ڼH,KfUe&򆲲p(+Gb*+]_ʊld;W煗Y`KX}7J*;cc6V#Gf8 _ͬ.sR+vPo8+ì4+&6 2+Vʬt2vgTjGDVDVR8)dAȊABRDVu'e:R;㥔e I4|(&~M[+'H4ber %ߐX2X4+K}+ǙG 4r~DWRs"+B[+X12VL+DV0Ba2H߾Ɲ V >\+X+sZ=ehRJ{+}jVF`դ}|Yo}*dët)2jۃUTW_<V7n]&PWTQNؿz=X$cP2&nWޕ0qg0+Xq'c+21VfjcEbe=G̢Iber VxĊ#1ȕXҩ| +c8{!V5B(r|PD V V:TғK+<1VLcX陭 e +G< R-Y1&Z7ȍCYSMʶ|m5Y!V]beMbYI<@#+"\cŠ kmh̓LE' BǾiF~|Aǿm!?/=_}ԙ -c{eq*r 2&t9P-3GhٿlѲa4ZF D0Z~FDqDWhHZF'^GJ)!@fT@@BTda%μ" {,,UF:ˈT:L2~G|,3e,{D,3#e ge.R1q5#Pgrp8g26YͬY 2o傂5.ie)f,DU βg'1aV*0䔜,#ȳY9/2~oԗ,ډβ᫳DtJvCgٷ_pJ-5gOӨ?8 HMgr"2~ה2a~St\,x<;6e#S<ˮ2m~xyEc~ٙQ YƏs۞g)te,qg]DU>˸xW,M"#>hAtN*7bџ2ZEHeJh*K2IU*0h2SP-cTda9E-xˤZr^Zbpj ZF"Tx҂TdaoUC;{C10KQ-}| ҽ3Th6ՐZ\TiA`&2Z\ \JpP-cCP-c~H~j q%+Ys\X-c$V˘)TǏ2/!i~SjkZvKZC27gqT˸)1-?TXqj>kRX/kى)2aVJkEhqEs ehZzeO^Qӫb#eN2Qn5gk.X|$ eyIt2f1%b_v-jS+3j?Zz2:zz`Vs)e%bjY.MEɻe&5eNXZ '݁Xj7oVս2˝vM'2gea4e4+KY-2fP-s#wnM61neT)ekjMsZk_j|n~SK6T)&jYod2[E>9B;6wZQ-e߿P-CD aeq jd2U:X-=jtKUe|?+j=AP-3=*/)e

\ja9Th{>o2N8X-3fuqN1Vj@ZƻBjg'KQ-cBwȤZF>6TrnB0_e(eIZ!eA*Zv]H`&2:[Zƕ])e㻔ҳH-UoRAeeQZF^7GJiƻBDiY´|0-3[)LՁRZZiet&Qn)q7hqp앯@˜lXLˇ\8R>xl)e>YN]g,Xk93<˼,uxԑLeo=$Re|A4#'`Zz ҽ?L,L0-@iڷB$iLIHg 2ަ~]ea(#Qf66i9IiyyW Nje.j,P,[:V)NK8Yi!qZƕDʏH e_C4i'? yRHE:2.8iqJ\i9/i9?Bhq+"e%F2+E̵Ex$Z1hwRD6[D˘R 2g`"Zƒ SDo 2۹J!Zfi-̑"ZƍW9{Te.ԁ;i!Z\GhSo>-G2S/rHwszZf2U#ZeG -fZw j -se'y*<{G%`ShXBˈXGhq"BXY=`D,$Pe|t1ZfP^r&<{$M=F-=yK'AbphY ->1EZ?he-!H2qԌ-8Z\nhŨ|Z\VhmTZF|"D7hs-jY-cg!@˸=,c`x%Df gqU5@ x-#oZ3 R@-3a'@˸e1Q8+ٌZ1hY.gqDhq2k FhabJUYs^>Y8aWѻ|l Gg#2^S-Ji2G^hÉꃂ!2b5iQZ2´8i1U@eִrfEK3 2.z! Ť t'2L2-`Zf9LMɑrZFg<8-M6N˘N#B-\iefi9OiM9&P_q jW'4CemBNPKB-gt(-&tʴ~#RpJ-cZƍɑZ* Qj|H-gej1HVK)Z朢2y' 7ZK+TQja1TqՁ;LI-3|\I;\#:-ݷഘSҳ3btZHiôe r^ PZ .>;J)(-.ԁ)QZ2J@-i7\NOb(XeV`i++iAzzi%"u`2-WцôT2Gi9*M秕UZRő#oֹ̿RJp[v[Ez(-#e)U9q̓k jqCeِ Z+ P6B-ݼZ < &ҳ_Gj#RIOH-s60˓5nP-c"QTxzIWjZX8VjYdu Q[j&R8Bh8TR-Uj)=5Z΋Y*Te%R-s%T7u$aDVnҴDVn)AVNCDVLJY9^_2ieB BʙD;+WC;+;+aVHPd 2&\B8HV@[!J㍭2#9 [SK!b1AʐAgc+b`+PlWkZiBkVuTX;ЊgFhEhCh&1B+7HB+f [Vsi:p5V&V^vZIOʻ+tdE%cTZcVUVZ+SZFhe{&{en,jbp9+&XE* Ί5w:+Yٺgel8+ĜuVaHgRdVN  0+ͅ 2 QaVLjf̊KJ(_p0+B;YnVfZ`V%Y^YitCg%Y;+XSb~o wd@+PV2idFV/G@V.%=;b#+,O:iWa/R*&lڮb=]J8*󨮲w@W4*URJWm W|ͫ_.ʳY*f˫4ӼI(BRC*P|lŬryQ)␥}o”~c|9}Uv} 2џsbN`pʸ ce91Vƽ(e5V4VƊ"+7YzV#+adYd#+֍E*37RW!}M"ʬWIC*,ʫAXxwW.ԓ*, yJPWiB^eWW^Ţ~yD^ER^QJ/|,G,"G|8Uf;U׼ʹWY btɫʫFˍUDZ"<.ʕ7J8Y!*W/q'*5i\*3?\巇(\EI/JD4Ҕ;VcW9W!&V*mUBEV 6Nu-rG|;(QmVH*:6aUz=Wei*ݥ᪌K qU/oW|vUr[WhLTU*mQUnCUqVUUoUUTUpX==Xy9UDUFEe<*3>W?`HXx$j5f3uKe*3u.I*](r_k* TAUTe:'嬇+@JPvhUIUe$)UEvQUUӪJgMQUfQUPU,RUQUP-2b uL2/TTaFUUaUaRQ'j{M*PLml9*b$Vkӵ*SLKTɎ(mUc6PRlHG(k*<U!QVEUoUꪠ,ꪨ/XvbJ*{F ` l_X9J*UcamV>*Vm iP l,}rrUcXdžU̢V9VWW堒UWeQTpUTW$]EpUHVm2obJ* hXltx*,v6`a#+ d~ޮ Q]dTUUYr\=tUW5vU (U~pUZ*@ VzuYldU|ȪO˪۬!#4 ddQQKHT%y@*ª*T٨/ ;顪_u!/CTvÚUS\'BdYHerYXJaU\$UdUwdUjeUsU4,ʘŸlqUkWAu*AV@V9;UMPV"|iK*7m[eCVV(o[asV(VVϳ?J?(UfET}epM\/ʱ‫lX5*Yn\Ї x;*HڸsG\;*J*r1 V1:2oUfy1ZX-7\E[E!O\0q* [> [g6*#YWiZzCh2\-O- TA tY$GeŚcOʊ ۭr.-ԵҖ JeoZdaVЧB4JY9NxϛP#ҎJ#/0+ଠ3RǟY_SM(,寋D9|­D-'!iY!$:+Y^8 _g vVhg6^Kq?8Z#|Vb&|S+i :jA 61V#:סoK+5bя؊j+` d 7 \ipKWzvE=J7+CWKW+QbJ¥ͭRs+֩R[9u^ڊ{o<_ $[9dV|ȭJp+s Vc2xr"ZifZ+l*X+IV,|hkKVN51 ZqR{OZ!֊uiZ+VIkX?bmʖ4V2ZikE[kD[+Vcia Yr+ةr+} VV܎Vm*h+Ga?L 4J|oD EP[a䤶j+s/ b+bh`+-N[Q[[!xV.6bA(JW$Ʒb+C-Z ؊؊OVB(@lkV B[qV h+̧x$ꊿ>V]٥VjD(r_+V.P+d-ԊT ʮIh J+ͳ(4)\`JH+ҊJO+֊^ rC9Zi kňJ?b12!+b7O@H%X'ʌ׉BJVkx֊Ԋ#j4V cQ-A|ҥH+=DZ9wH+m7!|G)'x~ZZ? Xh𐗔r>LVHk;d\kő֊EPP+](JV˂Za7rBJ*mBTgV.) 4J;!H+Q({[Zq')"6%ByC+0H+CB*fK+)PZ!6rnibԋb 2|+IXܕ[neZwRwqW(wtWSwKvܕP)tIWƙHB0"@FE+^!0ȦWX"` bʬze!^B~W^]4M2^G"2Jk,%B\9{G$^Rz;A^1z 镇vڥW ^?^Ԑ^j̱WF;<( ]{3W(^ٲ_/ʸ&W̎D^S'޵YR&ۄ^z+̥WXo^WN+;UI+It:P+-a+4++4] "5>MP^ʍF+n* >NХW}^~+nZ*kMX:,b] {Wo nb+&CXGkrFCtm"JWKX@"2 uc+]B|Ixk1Wzr1l*_ŮP  ]q ٕdW|,%;?-Iw+]1^we<<[qWܥwb+bh+< W#X8bu򊠇 P"+L /)x=|WF]+8H]!%́+<2*W+rSD} 2L)@ܛK+I8cmz{WJ] tWtW]a+vWZ9pWguWuW][wŪL]IutWȈmw]O]iw2^a^7]]qʁ/EpW؇@v}=]]1ʮ]ve>u ⹄]afWv[1zHivJ$Nf5ͮx ڡ yͮP b\ٕ`W U]pXuqE+'tEJteg.+*vRVDQWƙs" VꊃZ+nQWڐjuwb+J+aQWuŌvՕDȮl[n+*]+%ˮ+%aW vlٕy@T ՕQWF9ԕU]^\uGՕ d@ve2xQWHґ]W]!-+l/ʨ%kعDh^+VTWmuE`KudVW6\]}F}1WMseV `suT+׈悺Jp+Hc%䫺R}JӨ+TWXiuEgAWr灮툮\'+ ”ѕ K+w+;ҪI̕w5W6+3T_%xxyO?x\_s޸͕(j.K9%\I3x,BrEbGr hj| xsŊ6WJhr_r$iK\\I V\aGW#+# >?+mvI. 0MsB\aCq%W>Jĕ}=;D\)Lȕ h\Q9?M G\OIEՑW]ŕ=%۫_QYJ\vqVЕK*XbVZ+YksU#͕r+c F!z+I kre"WLJ+ P+[QhoHB X&Rc\1Sp,Wp%p,MpR#\; p2u\P>[qZ9 ~p+6e+Ԋ+W.J'y"-qŝ>#J*lTz"Qww+ T $p˧Wo[ %6[v!r+ip+ͭ܊Gs+dcʭWڕ܊r+[y5[Y ܊uCr+Vf<ʇ) 32\^WX\l"ފox+wWފ{ioe"ͭ،V8>n{SnV,.[yNVгI2$j+tr+=("mZ-TmZ[[yPYl+݉܇> 7JqesVDZTjV]Ri9V- J+H+){ Lik$&rhK+V{(%hi+IniVVieܧ)v aC~@Zk3㬌!qVzf"Q? r4rgdžV# +8+8+リH ge čr_8++N1dZ9TfL8YYqDqV,UY8+{8+g$@+mc\ H+.´gV)h\H+$2`H+= GZq?EZy0V>rIgieQ*F^H+IrT9OK+MMGZV B2s⬸uY;iEU:ᬰ8+P:+j8+rG0+SJǠpVY1A_f8+WB+ B#,[ ЊeA+hE@h%WeE[eE0PeE\YeEc_eŞWess|._eŰJ׫r=PQVPUKSCYWUY!ExQVLZYat $Fϑp++4j; VX9XRb倱"a6Vxhhi'X1?d dE& wr]1VھXh߿ʴ ~2W75V(RmdŇʊ " oUVQYq򾴤(+FcCY(+R \gYg 2+ }ˬޔY!cQfEIfEIfe%J eETeeO颬lQYi'ʊͬ-ʬl/ J dVȴc|O*Rc؈TV,JFFDV+l%#n@B`ei\9CQ|R;\7 X$Uxm_Hr+b M+R~V`e X9T`XE4Xeut"ا‰PV9UKeKYҶe. if-؏r|jY@cSV!ײ{+Xe@V1QeU(1VVm[ iiUVZ%nJhg7 r*w-U>*^:d=-в*>UF/ku}ȑo$RV^VaUZq 2ӡUZV ZꬦU;a,mK[Z@ljki6>*>U䈫Wa;U7'*$#`6RU? J7/u*>uO8,@CmXB8YV!߶ !U qI-qmEBVq>pDl]\$*N1dU $\VEjVdY1}\5 nTU9UiPs DUf_U:| {U\7t VTKw&E*>CUUDUUDDU{P@ Bh*ΩUq28TU%2(VU!WU5NU;-TsUUjUUkU>G*+"TUe_UELVŧJ,X@BTrI`U6mVٌ]v\&S& eFoe,(nm&>Jdgj',HoJ:rS2r~g,}+{xkm\|]}>+ey}T0e߮6s̲Ż̲:4Y~O螴, R2>B`}G̲ܽf3S0ʿfM V>7ՂYf8G3QuGP2˼{CZ̲OYx5wFoy]lu<E^4D,=e^݈%ET^>ijy"YQdjudgKjedGfG]Y0Yg=y~ց\?n, dQ0ôYf#⮌=,fYQț|]?Y/2c=@1DM,B̲~߂Y.|e.9SGMe|YfK:e>;>[=]z`506C f][aܛ@,YC¼Zj'}f\y%ͲҢYFRV~YF].9τh0&e,A?+|$2+j1#ʲz9__N,u5o5Va}BlkeŽAeGi~t,kT.KV?XT,} !,<(ex[f\a^'P˕|;2#$2*|+eցJe#{-i,sTWeQYH>ThFedbYYYVS,\,:gE(e>?)[c\y)e74 sS.l~5Ў2oa<|rYN`,sDw.Me`Ջ26 Y]kje fG-ua3RMH1 ˧E?eU 2w؝YjtQߧ`qY0х fOœЂYf;e0.|Tje 2# =cʲYԪsUʲ -SɲjuRaYg˲.3p~P3Zz/O,snSO,&es#T~< ,ן_pYl`Y~?*Y(ef=4hFμ栗u2H,Y `-W5뛙BheeufeQ}o?R#՝h2m9}^46l3U{oe6|G |DYfYf Q6lfo62ʅw NY~?)efm‘OE,~f[h9 rT efYfY >i9Js'u0^2ǕiOEW /m|w2dYsS wVþg߇'A|9c#YYY<˼ʳ~sR+gx=Ձˮ1Dgм[8\pyêժlx2; gYGxp9t[6>:zf]@ E,#,3pbņfY&>YkVfwT0Y&h}<aW4Ce,ф,n2X%*"ThY漱YpGd-ցOpPYLIqy,5_eee}.2o-I|~c, fA)0p ̲&!o<զkuܕ8bB\Cl˲}Dbp2{x$e],3@ W0! f?-e,g2-,+AY:02_ ̲ 1ԍe&aeޚoft]y*\ފ]G2MnYf#ը?ْe f=I(qilSj',s5myl*AZ,K<.6ˌo˟\,c 6$2coD;kY*M@(eRg?;920s,suxWų8>Kó,YYx&ߕg7~媦} .OV:|62Uo?,s Y}kt%e#JZY/X:mY5xZϪ9 ,eϾ,BϲL@gY'gYfSídRXZf\ee/@K+su%O-aR6Zf@=RRx/- el=s.r@i8wZhYV" PBzKhgb_;ܔ2;Jۋ2JC2S3q *B3Rjkhi$Z!)bW[w.h'˷.#P^hWDE)e&JJ-*e>9*7D|͞#gk&-WDKw-=hL^6h#UhPD>+AIѲ"Z;XN-EؒB-= Ѳ|Z:2yoݶUB>k*Y^i:;*8DK@x6JX޻!ZPi!Z:0DKF 0_I RShg D˼,!Z:W ;l^)1ZUQFK-NN٪a$Q {ç2W 1NhF??YBP-s\_ZzVвZ0BKͅ!9R3h1 2p%a ˓ $D&)e^As7ˑ3jJK!~}ծKi^e*- yFCKivL)--my]M ґfC2?+?yaZj%,]S8"/0-_:0t*"N˼9̞ rGP={q% j7gxZL^j9eLEH-5dmJ-Zl2#be/#V ,C@-P*P Yj7Zi &ᴌّxNR˹X~H ) -t(e^ۼh\-$25'hFK20ZU-0/H2ꋃ{.I!-E*AZW9QHThyͷqW"d -*eVi$u'Dҩ= -c3wAZKu iOgEZ),]dVi/#TirhY4Z: eKjVmPHl i)J'rfNeeyZi6bZzNid3R82tۨIVЧrZ_Yd~e4k(e6AC@-7H-+VTifΩi8-݀RT2o8TбH!RQH3xYNi-ʸô{&禘Ni"5 ooS'_t (y5#YiÁkuZP9$BTqȖ9ЉmSdK篮jJ-C+Ȗeenٲ2ߔU]''㛲* [ l|d,DSd˼~'l(1d2 2bu lÇX+C>J-U(#UYuDky-k{9>-ٲƛs*Vm&etfKa#?PȖC,wLȖe_d˼v6l1S[fSd|_&uUۗ3Zds?6kIQ9e>=j1‹ײGS^Kg]Z/`\[t2r! lZ(9,\`2 7`_Ѭ!nk>dM9< \M=Շ0Y}d[SOu|3FkZiRgHNjYOB{:P-URBY-rȝ$+eY-Kt:V\6RV˼+{ &2W){ʩjĞr#-VҭjYwZ:Zf# \_ZFN$k}cXZ*W9GRYŒ8^]yM*>0DZft=kߧ'<,Gռ`ĖUb̍2[^Cqզ5Y-D>b#Iq?-U)b()3-OԖ%]jK䢶,-K(jGmܑoʫMaר- ڲ?2MH}ٲLcN~[͖eKb`ID2[yL̖%S=1[#G%=tETWNٲ=S-s1(O-G-emO`O"_e^΅̧C}xЖR`mgk@[NesB[: emIՇE,ԖuGmO#GRbu>1GjveQ[YV#ʛ~E_pdC,-S,flg &5ViekGȍeVigC,ҹs-K>d? _d%Ol?1kSs1JWhX҉+Yore'V=Bܟd\y=%W6h\9Vs\q͕"P* u+ ]O@i9+\Rѕ1b+Ί0ZotLzTWhuHGJ"+huTBAeW6kb`+75+m]a승+7ٕJR n+V讘'r0W)\+G ft`,[O PSq"BWؗVJ,rS$Rkͯ+޽+ $d,6XUJx,{yXnG D _G_rVЛ_>~ES~%ͯ\JW$+ہYB8J WFMl9sc)}y@@WFEn>;,-EiO+%15ʞXŏ 2Ը|e U#X2̎( W|ť_`P  WWV'瓶2֦9|::앱qma,{e|+FuWWN$+_׻|+sW^} +dhrҿ;٪89W_QD_1G}eD_i+U˯Hɯ0<_IJ-WA_OMk勯-XA`9}XВXTZ`!\1߀`yyIG,P,vmP H z2OS,,:4_`!eh XH|m?j!Xv`1 O u$ *J`y. <=X `E`C ks XJQr`a9v!XIp  * FXQ@X52u`,,Y!X捜!Xske`0` !XFR˸@/LV,B},K7z2buqXhEIJbXXN`"A8`! V}&X~,51Xz|e r,z\ r+y8a ,.Ӊ@XD`i6ˍ'BK ,*,8@V,kyE egÃ&Xu,lj^'BK+,W*,OhJZaqaH ˤ%@ZaUa9^@(,oU, 2,>`Xai euׁað\[@ CTay1 ,6r~K²g k> ,7fFXF8 K`;IHo@T˨l, ,+ Y3-8Q`qk V,k}P.bKr`qYH!`#ps, dqkօ`dWX>'_5*&XnKegG 7 hEe wXRkEK,~N˂z C`kS`9ˆ(Ⲁs*` `iCecg\``mIE ׄ`q&n,E``lU; `>|x1lɹ` fsso_q)M ݵzIN$Oŵ'CtΩ;^ЈW{2f[ܔ+P+ +!.`1[+ rS%B%b3 GC1@m,A۔"Eb-,ˈ+}`󂩯W_aE__|kW; |'¯07b7̀_ќ_LE~a_y~ekr靄_v.ʶcÄ_Y3WVrQϣb͖${[{%̦W^Aoz˶+V=Coz W| WL^ym^13[{W,,^9b8^_0{LFHa+&,=L dW^&J E_S}w+m4+b@+%mwEKwESwܸvWX]! ["YCreWaW2eWna]Q>]y+z+n+l#ڴٕotvYؕM)J?;dWFY5ƔͮȮ|/+7ˮ,aW˔]٫ܨuOՕD[]CqXRU1W]kugʙ3ՕfXUWl++l DTW(hu0KlrŠX\ 4b]ɾ;m报9mP M, vW+\+vAeihB߁zJ +l !7is1\is6W|jh,Jc#AWL(mt]q 2sP]]Q ]9oܙreLrg2Sc`v%i͕`4rB@Bɕ& mlD5W(\ø\O\++R++)iuE uc+b(7 ꊱW^ dlxE Qx2^^^91WRػhl__6 Ȯ\6Qmv ,Z]ٶ4?Օ-tū+9à+ bl ![=B͕ Is%ᑘ+f϶r}mbl4W:k\1|m1M碹@JS@++ KT+B+}yW U4[p%Y4,pŜW4WӇ"S|g?JPNq>El#ڂ!ZLkX+Z+JVvVȔnkŇ g1JH+oB+ hNYC+*B+ [U. <!cR8Gh% PgEUg}Cy s7ⲦԊ"J*R+V^nB`)I=@8rijYjerjɖh+؊C[Kc髸>ح<$ E: `+/; OX1\aёAXa =TX(0RX b9 ͫ\ ɗͫ\~x Ob遼T*օɫWiw_WxWA WN^#ūX -Wğ x@*▓*W7BcWquxvDAWWW9*''^?xˣU"WyTnUHn^O %*'*6i\I> pBy Dba#1yUZWyiIP y6o]gy` 1RW]疺JU,*8t2jXJV_e U|W~}p*o =Uë,^Ey**.S|V%Wqf}D}'*"7+s C8XD`}rVF(tU19BCDXi ae>ewVj`m`VzvCYFwVF[.l#aЊE+f6"E2MX!5Or+b~ J`qJcVX|-teJpϋ82+ՁSF&z*Xx **/ ]ԿEWfT]ݟZWnTWNtBBRs*=]e{ U(h]ym*7c%tsZWGW1D*\UT6m \Ux0VVAU}(h41VmM*/*lyMt4¢p*6ZqU <̇J0Lm]UZi޶*H[GW)\E\\, PJUDUd"U𡫴 2Y|Ҧ⣼J*=ʃb.+JXG{X 9 `msVQߪi] 6%3 MI+{n`eDԊX9cWagWAW9u}.U'`be<8G%׿BR@a͕+-HZ - +?E +ۦ#^ +PnW}oWAzWqԼ Sy6w«dc]Fn*r*WZlLUUFuM~V*@U;'**c _eW1b=4%VƠׄXi&x/+%V>[+nFQĊa B_bp YbMK+$8/B "+ 㩬P(+(+LJeEYaSeo{D c_ T%VܝDbŇby[]+HJ+{ VƐ8BsIWXyO LÆ+7žM~b K$4.JVhaJFWa%V Š@ q-?n + V4QXaBN +Z+ -!+Š9+<&+$Bz +Ņ, 2V*B+hTBIVPZqoKVFMoX":+MԥXff%̑Ɗwny22VƊ%+V-kYhc80V#XH<نTeegwm0UV0WYqkD T ŭG7b1~2+ʬH%2Z( , eV,“YafVʊ5(+8f'@VLYa9 b J=dJ4(+*+k|N 3fELfŚ0eVޛFf dVYI|++ʊUYɕAY̰VVFNEʷYd-nEVvooj6LU2EYll \= +"+ |vh"{2YqB7YёYHQf !+R=2+DnYYZ!"Њ߁V((KhC 5Zhʩlb~H+4">m(i4kV^lEMl% UMX#"[iFJ+OZ!MV*7.VJeeF%T~#+S/p2FV7rW>Jd%FVLY!  +d¶:+O:+LjYa:e"˥rP bBY醇b)ʊYʊc*+,7B# 6{ @Vkr|TeCYeha "rZS"i#A"NX VTؔVoVVhΊu\)%WrVأjAYqʢbճ 8Y YiۿydVdV,gYq-bI>J0+'50+V׶ƒ_e_ZI06YigpC+۝ [52GC+3VcȠ鬜_&SgFV.O:Њ P VޗvBnbC+l'b B+m RC+vB+Vw6†, {fUB+O 9a@+*pB+`VGiZI0 VH5hiV>hJ+J-ThhUh 5VhMiqH+ՑVOFZVaVzF, [4"\(LVJhiEBh6!)gؐΊ}J8+:+DY/Yْ vV58+R:+T ʡvw=r)έRX|g.H셱0v-e;qaǚ1ru7bɪs~G "rV` pVzs-_ΊJqV1rVV]JmHfEvV8+ᳳB3 DP+\Vb56icVjUVTv$MZNk:X+GD`$2ȝ[!le`[iB3 ;`+[9/[1#lS6 "ڊ2me5BȗAJђUY_xi+ҧ%l嘍"Bb+J%؊V_*/$ RJX[AD[V[m뉶k+hm m.miImdebz+9R/{+x+ୀ\{+ey1&Vx[J{+(gx+Z2oiƈ[[Q=s+Kp+SQRG m rp+ M5 [-D[QiҗAڊvxZmrU3 V*¤ p+t'FV[M\Q+̉\2Е7Hj6&@WTb40Ac-tԕNϊ+20%X])YJ +F]a uE%tK5+# u[5+0VW2ʒ*ͮFE+x]Qٕ2=ؕor)auEEPs+OR]av[P u˙]O]VWTJ]a tEq_{al6Еziѕu0$Еi̘ԕmɰ`nQWH=F]iB:ؕNQ&+c/]!6슊 ® ]ɢ+W@4"hB$1 M+g˙\!ur"W WY2*C@\L$ Q{+!WHA\m=\h +Q5Bƅ̕e+zҷB~Rc0W\q[ЕUAWQ5RB| ` ^!$h ^!xnxE+Ԉk^c,x*Wiه-+ɀ]]GnweV EqWDD`w׸+A +L+]e _ʓ!J;@^1#yLWb~%w2C^њ> r+}}^7mC\C9+ 5C^11"y8f-NJB+UBBԱW:%be|_Al"z e Ww JH_I| {mR̪w"7-MT^,,E2X9w@ ,DxX4eopSeᯐWz++_!BEɍ / ?9+Rį6 + Y¯pBF3+hV++Ԓ_ $}9}c+38+0)WbnM5,:4PjQAXbEX$9\+ `Ri ,)BX}.B? -pA`=I`RI(8ڧRpb$ȏNB+26(NJ}II_7+2GBcmY_"JOJz+9VفȚ]SV\ͤ}f-Z;W&{YWN{!y"{EΦWB H+qЊft^^!{Ee{E7.+e _3_RJ^!{ z+|o 1zWcS_YPD|uރ$by#̺@!hTʼRs^8a,9+ {iYHWEU2w+s=vW&$rW{ர,B(xem^QʨܕN,M^Al+1?GXJ. HxwEx+eȋTd]Q4ٕ^8ݜ]kwe~]:}ܕ"wKKJLJ]Bn&a`W( sрB*ԕ#$ڠ+$уpY])m@W,jtț1TNЕs!2B1W8]+dPpBʾ +a+LjU]q!VDAW  'u%\bB ++\^PWMDigs%0\BJ|rcd\ᢀ%\IsE%lEc(s"s_muژ++H+ q+L=`ijsg̕~}.:2W1WF+z&\ 2yK\Ip<3ʜ( Yx+|ĭtv(Uoe '`&VNLy+s[Y{V/[)b pK 0V +Zm4R ŠbW O+epRVF+-#ԥ& B2R[RJnnJjv'rEU^LE)r%0Mp\%ʼa$b/rE!`&W\JN![ qW++zR h p8EK+[\ѵCފWJVzd[N*-D mEu^cθ⡭(m%C}V[)jEPVM9 V+\RZiGE,,ZQk+*nmEVG[)lm +c ` k(`+V<ile lWYD>V VU(lR`+ !L>fTJj+<3±VFMPDmEu92jm3YZVEDGj+k2jm,Oj+D&[[mi\`+=ؐE(MY[(VŸ\ь/P}V(+n"nq+qC^/p+[QMq+1.%!nLZo܊q+ií(JfV$U[B=s+] 0hmEVJeLAHrVF[!ڊV" YKh+h0m_2k #qIS$}Jj+J*d:Wk+H[i)meVTo%f[%oE+S?<͕x2Õ 84抅 +ds\ATR3y-sXf+@P!W!Y+3h⊪Z\+\Pp\WWȘ\4J$%Xj DB!Ua`Ip*+iWWjB$ !o%gX̭B$JsVTqV&pVd[.p+ >WX[!ڊ9WA[Y‰B9n!3 P/ķ7іVT.lV" ) \^؊nnVyX[Q0؊vVńuDž{ u݌"l%#U[фą -V[2kr- ZL]V%ScVeVG%QV ZiT$vkEiX+H4X+l䓚 ֊ʻCzHZni iEVpqV2.*)~$0.P+~X+Bc ʔAV" h+FF4V( 4{!peB0O+"N!|t+sFX\+"P0q$qpWHE\!qeuR\A@\#$qV\Yʨ݈+^5=* M+߰iԕKUW\lu%R4K+|]1#vE fWN*aW|H]!ʢJư+˪GԕJ]VRWtآ+ ]A{]<+T7ԙkՕn͟9u׆]1w vVE]1!w,yY#+|R+|o+}3R|ށ]M!vePB*T xm+(veD£ e R+]'~DJîdEW4슾 +xĮp:avE fWq+ZhRYF&~]Q…e1hfW/抲l𔇹"1Wdbh5*\Qb̕X[SF9Ueː'ݑ7yT-gsE0WLbPpsE6W%5E:)$tJ׷Ir%s6K\Q ]Y\bɠL DK'J'{ȕ^"W",Re$Rd\ Y +hDe\1!rE3s\DWQЕ^+=13 +̛8+$( Z-~i A+ʕbsJF N+"+%u+aL.++އFW!&BWp @W]tEK FWw^,dK+Y]Fwx ԕ)9+5 $rEOU\qE%ʋ \^\GȣA & J;ʾ}Z*J5 Vx+ 5J'C 7\92+ x pES}W@5£#ʠҟVFqH(s+=í("nn tq+ՖVLY[X[WnPnEh+gWE9IՅ[VL42o:^ɭ2y9q+7V#i+4VРV%E2os+UneYkne4Vx[aq"ɕ)L& W?&T5ڐ+ӤSYJ'rEv&WcrȕK`e% N k+hp{-3:L|*, }R d0{+L[1"VF1VK[\[Y ooa\AJGK+ |7ʷ,dyD bj\Ld^ռҒyU@Z \aGJ|+<"po2-Bl$0Bĕ|\A\a~^J\SH\Fq@re˼S+}쿟ϳd{icܾ/9_P_͇~sJRCxT><}w{վ6w%6DŎƌ_b>ݛoݾyݧ)bV|"NgGrl_|o?rS؄gS"hySOoʱ^O>bM)l!m\M< %7ooɍ*Z~Iry[7w|XJ/L7u 5s<|1q}.7u]6(rcD$7 S8.7&Q]ou75:,ߪƣ@K\cF5;𬡮̔#-CW^4^4[pzF+ŽفG/5hүeˣvs.5b܋Ƚhi@CD/44DEKKC^-/A?8L8豆X'a9I&h:Zmuih-/O-Ǯ*FKSqk^ӾzEjt-^52XmZ"/+>U?{I?!-.V[Wz)q?#r:F78$[Nm_sgp~ȭc΁S6:/{-=<@-rWm9:BNk>^s.5tk˹ױbt5r}oi)Ȑ XqS|[]>?UvG&y ' 4s3פO<ymyd4y)kw:"ʚs|ǿ I[GL\=Wܝ}17 6r{\,Ï_0^N~e {pyt ޴~6n[SPt9+}u|}7w3XIr}23+ dxPN M3cڰendstream endobj 489 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4550 >> stream xiTSO s8GU[Vm#Έ2 2C$$$$ ) 3 A"8kEA{իջoܵڵ]뾟޵!9g??&a,;*.,i ({ =E?V=u8UHO Ô~o'>.]A|zbTDd+W>;]’"1_RbqadPau{Ec&G0l6`GKbdޣBӏw w;/>Yi -bǘ¼O0ێ}c;ۉvaUl/ۏܰtl+6scN,fcs0qyd̄M6acvXeDMRL~gl7f;Fȉҗº?ʆ!j$"^Xϐ)jz55o:2x7X NtOJPXߎ]l:"tB9B3%rO")*D/xL|Is26DĦ~ߵ.7<ŪKe X'U +:Iz Ғz]L#~PRl W=h:PJ dEYO(tmɕuuU5y|MOmd`p-=H0U%\u*=2k^ .i`nO)H%)t8߉x˦WYPr s@&yh3+0϶'zPNCQx9;=t~ '-/yDrٝ>gUs-5o iڼ?J_b4O[ʳD9e5"P唁"8i,1{  9 #sIV/71%eĊS$ @Yݜ} \xxg:=Ǧ}#~M/+I\_}[O hwaAxeUY6U;BVgS׺B44C!fݠ,3?@?hSQtZIOؠ?g_AK+Lp [ Ƣ?-G'8ȿޡo:}[ޘhk;\ APzXTPh H"u p*ii:AZϚY16y&OV6uԔ' % 87tWo\lLب$ 1}4\՝b;[(XlC/Y[LNUahPWhcuQw5Kupt7Yk^WS6\(SA λwDJ?U~ެѧ,}(6t4LL>@XP$Om.+\FRRNP)@X,;7!C^=i8hp#k[N8 Tj5VutMVp4j%)R-%@>;8_Sk0vԠ!#[Y ^BoiU/nEFǠ*|K@]G«J)MqmՁ.>!;Tٞ bG~v8T;y(PdL2lk:-f!yؒ(KU%yc6˜Wz:M6K=tϺ\T1ʮ eFU/+K"G' 0+ %Zf-EL 4>ɁnU2pcBW'&q?XIV k+ZSɤ3P{e^6(f6h궜<ȫ'7PvW*7\zpTQAM7NhH@"M Mz%ٳW# 7bHg-Rm{keyشu+pGv?mvG8@sZӚnI7 gMIULp)3bpUBi ;L45>/K*#jAϬ]E7Ģ}u\X_Yngڐ5ӑ=00|hsnwuo\ ꁈ^Oy;d[±\ơ{!#Bv(~\:rq[@\ba'`վ䧅] ¾r/o ⇒_ҧMmsfO% h'5t[8Y׼ =Pk"[o̍nbwx 3zAQJ6ҺZOii2P .;<}C8a*/#eendstream endobj 490 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O10 @E'B 8QC_Ct;,ֳ ":ր#MEY' ݕ> stream x\IsHv/CAOFnH cfC(Q $Ȫߒ KTu([$~Ju]~Jѯn/_ʶjwQ׍Ηq7Bի(mu-t\X|^UeWU[vϫwb.ږJ?ڲLnmxY~S[qF5uS<~6&m)n{췡km_VVZ0rj m*eMIc"'OaQ/|Ĵ^+ kʦ$N`*2!]+ j]ҼO#u7id#(=΀Eba8%׳3.JXj_hs+v{+mSSho n,dO{iqGhXd9_3oG/iLHpoƓyD_"yEFh %kBJG"!]x꼖u>2F^m⸾I+uHUJsBn>رn 7tUzG5sbO3 )8]}.h0iOǢCTD8j1E3hMߕG=,{D}l O{68c䛦 ^kн@51^NdB"V .9 =)2Ӵ)Hz*3hhp$֠P:Ekjۃ-t)HmlQpHZ*] Zͪ1}}n|v]kKW뺵e |@Fm~CݨjCҞ4f;I8ͶIR_'eFNˍ)B0/] ̅.NB >F\CuqZI9y`Î OSKyA/Ys #j*{m,y#X  >ES8k #(vwgg]trKMgj!e#^"ʭ  wu]PN+j!a <f;#n=&| X9Š`-?ͭ"nm27(;*\dllćF_HD{V^GH,:>Wa/K\7wȞ%6 gͤ(@dr]dhZպqU]|fi i-E hhPԉP&Ro ޏ$¨rڻ#OmaGPJt3rnP3+ԚiU>.]jrIʎjύH$^ڍq#@N> G uˎp~q!$xi5h ӱW]_DWD?-[p.p $R/=1V [n8*DE&jm|'HhJn5g VqW JO?| :ázB(hriՎrV!!ñ]-x%+`0d/W ƶݦ>ߑ] L޿~?1uvOsd0!>69Lp/uBX;7DbO3v#we8 4.>%Q9w"l[4ƽ ׀ɳ&\C!Z,,0a%St(^$=V {BCa R>s[N;8 y0ۥy;.$Zс39L؄v*IC5rII q:$AnY/ IfGHMUx# 1aŢ49!BdS:nۢH%b z߅GCi'gSQYqE &E8 8:Z6Py^x3ƌPfJIF|cI !5Uڹ @%,+N#/`.a9rG.Ȏ>P0PA(~{\#sPKPF >>gُp\ QhA:s m(s($78W4zb͸̍0Ͷym3䇐tA܁Ө4z2ـK/{DQ;T $̵EA0bLTuA6>E ӣa,5_G,]ed8 3ݒpA/OG7yi1deQ$c)`#F{u ЗԺg^^'6O! a3ytiii 3ꌓ3K=BFWbԓ (O"wIղo N6)$h,6*dv/,<0ltSSRg)9sr.->񏋡(yhPqm;[GpYMZβDLD,gXHtnZyr_fw\y杒,K1t&T#܅m I0Y7G뵈!h)qsÈ] &al7QP$pTMS%Kt IN]Mn> cФd1OBOߗ 짘2XRjЉ(KZ<@J-p:cZ=vhtkTJbOsEoP3KȰc\ ,́nMqE9T.`B ?^8 ot&RxK3ꌣ$*ox*OqO3ŋi|0t {t3:ÁBp>&ŨB+xsc).$y" ,2rY1K S#QB/UEw-ZWK]\JJ7)Sy[uۣ)5vacO3sw25V?w;4r1AOUi3V9% kPemmc VIceh H_Dg݅lS6Iޅa1I! v?/dl)5M%m` |Va- ]ZHC̥9MS™sؼmg^ %4b&]*%O s0WY\X(ͥk1A?dz}4[/81Fw|gD;ϡeUyytR9ݲ5yPytbp5"Ș<0O <}KV#MQ&g\1%C0 ;!f|-kCRXCGQOև$΅'8#XPEG:ʞ'Y0h߄C)RCEء)M'Rϰ- M4]ߡAi D:b1k֬RCcCcopeG m}su-%O ɨ<|; [0i%&І͔[LZ 2G;s.ˢiը-0e"g&.]tȚ"pv``a2#,5-r>TESrVcx.d06bEޝҌؿj9 Å(o|E.>̭セqKb㳌Hryi;a,TD.0L\yodS(AWp9B Uق\bn5\T T2-Ve݅^^ (Tv?̈́pLnXs5"=R( QJl"1{ڧ^>T$FXx9VyѢAtG lg^"\&dt=߾Lė!BYt *ǁ=pFj\.en nfCVw#p%fZN_* sh̸k:t%( &ʞ1zϖFFQkFFpyw !oIť#g" Yڏ4f?[/ccz0n'V@`M+0, -$b&@V4${\(:Yhs1Kn˿9-rVoy8 = E6剙gFg-v61Ӕ&u;"Xh~HC0ۘM[Op.PiG3N>:-2ʹK*G{u+YivRo| " ewYC֘ iyA7DIXm_ sIx:rγGթj|#gn=1 u/^"bY7()Ts4}/dim+W:3}|-9V5N⋌%hw+x3MNicfDb&X.+*yT9 b% _N/2@(ІoJ4%($dn,pk>+/h%sy÷L(>4E> "$pwF$.Y2s{~ϡUݾZuſxx:i@sq]#QF$! _e^p,(bpaMد,%v!U`3."yͿ,!|sx~KILFX%=YpZ*T ..D@ >$13KnrɥׇT뎇^Ɍqt?k NI]^b9;Xi9儞 _𢪤.s2{;gOZEN b$CTUbo!l!̾dndq`x\'ϏPY߈,k >^bvw+L::BЏיUaÀ՛6\GڜB}pxA!BrM"(#w•Xz^5(XQ#swf4o"_*}0j7~$ DZ\]xpbngŨ!Wb4fj8$דfL,B5STija?yK/ٲi-q,\ \# {TPy%h3emvV>Z2bZZ זb\-csU4o`XqP8X>2R+CэTlyC @U6P7ը\:KH;Mj\{tҰ_M܏?j\w<WbTCС{_xpWOഄz1 k$gSAKsA+]tb*R 7 vO>zjן endstream endobj 492 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 379 >> stream xcd`ab`dd N+64uIf!CO/VY~'ٴۭ|<<,.={3#cxJs~AeQfzF.THTpSJL//THKQS/ f*h)$f$))F(+k8a``` b`0f`bdd?Ӿ ʾг*URmf]@{8-l_bOVAn7Ʃ33u/<6NJ?DjC^];ǂXOy҅?|\v s=g7y'Ly7}:Me`endstream endobj 493 0 obj << /Filter /FlateDecode /Length 2529 >> stream xZK*2 =(RZɖ6QUV9`Z!cqȿ=t04P˵\L k_&z~"ib5}x:ˠ EӷfJmzAjr~I +og/rl9_&hY]}Vz6WJ\'\^0qӿDIgU:0f1cR2OV0目>Z ˖+wZz*mt\dq^VˣPO>簡qDCf9y53* q}j-ʯ"&xnzh mm dP&l/` zmn: v&-|}S E!sgYݨ۝h(4S#w{4/܁:h`E7IQ8"@ Q%."J"A%%kXHcX4GA8ph0T!5fUo`% ʇ~q.uN}2^$\}ov{bYnGr\03; $0&9bC\P C TnWɶX}X`#BYq؛)+i1aLȳdr K #vA6˿ԑAcdo@= C[;t\tA*.-X:F ȿ)͜*2 t̹lD.bA*j,.!g|h$#5>&#Kw]._d^BkV^Ȝ$Z-ja) 2* K+kby7DT]tuo&iD$3?4J4,h/;h@Xk$6 ( (aO|VZ#%f]h7eގ5!Y0(FUM0~a|?PEͳNW'bNӳ>HJS,!Ȉv7d5цٶM!4:]7U@baYq:&aDQ&X6딊5Df[h}t3뼻waRęA#~d Q]Zn !rMiojI:vu}9}C& f[<۔O ,Awx0.g+?ゆǯr=,g䶈K+U{@fǯ $$'%PeJ(/ #R?. #d{]$e^Q ,{O" >n2-*l\q\s#jI;Rq=Fr/1qy+//'mS%dWdlLsbw1;7$a>'a9/t;Z\ǜ }?s-Ԧt3oE`wiAghU> stream xZs翐fo96&[It,fv ɖP Bvտ@嶣oigt_٧uOfb5;M(xosCZr9_mf꟰3Yeaz\.,\ܒz_cg[(狥"sN^ 2krC34(Ce],9̒uSZi1V5zTj59\J'/o7)ɛI^u!Tf|)E&W29@ "XZ^ Eqf:\ii9 ;7=P")?f80.(<%Yf曙*t`={sYPcʠpt͝OH %+RM 0);J6Ɇuդqh ;$tuN;d r[7E2h.@X,DGGVh$hd2Ŕb 3),V]K`P=Z(D"S #rĝ"'s`B>Ui2ҀC`꼆eY=_Wn+\T)x/RTXG\rcA_Np^vSUb6Np͏3 ]j#0EPƕqwNHyM=bM#eȃ%{qKn])W"vti)_*r}8 IA)Y?]b] {,`R bm[cNTr l ^Uˇ!)iQI:.7e3h  c] yہ㈈%AɘXxL3a~5bf P{Lmn5@[ծ;\!dê[,pa*.oHc?Lm=RgJ[G$& 9,ٱeJ~D7>;IH302ڈ{Mb$t} @D @4Ŝ]w>XhFdZn #w.}׌cL`ePL$i*ߛ6T6sMϝcUyǐ*F@L$LR t46uDR2hE}&I1Ul-4fe}>T8Ay2N8OH3ZwRUVP9i1h.d TImޫpM4;s~dt]!pD .wMeȫ>U4P3yFh'Vfh.p!qI AN\RPE9'OFNT NL0r|8ACLBh]M:-w \ү*xN% \i->OZp($9[JƵDe۪gwaRnDZaZO Hϡ@ti0K/.xK~F(X@ 1!\C Eņ|X#!fv?G`CGrlX!YF29N2LfR2V|ŠpedV&S A4C|q,F%z|9IGDre#^v6} qS]HvK;}AQW~L ,S4qL+׀׈:@^Ɔ˴oo N])Ʋ >?z8J_<^G [c$z(@}|rzجEf09nu>0!ȗm&>g'tNŮ&_.rh'Ov$&n˨@Svo&|Lχ&m> C6#P9N['SyFY]q$gT#*$&y5ؖ`6l0~Ge^colwIfSI<F7PR,/$^;PoeΟ5߂,͹Y2V} sr^G|(6eo5f ]0q+1xڐQ)wHAR?B}G8J-@FxN#ϛNq'nvL:2k@GPhIx9-K(^ >+̭XXFV6E'U2N@AOgyP8f{|D8Mvu1ƕtPk^twBi\~Sӄ^"WzIᕣ<+ TwPDGˏ^KE]6٠y[.4}|k K y[La-î`GelC y&]FCF0:SBivjRʽNMB6g{(vK@#|>T// rjr-rN!E ӯendstream endobj 495 0 obj << /Filter /FlateDecode /Length 1924 >> stream xYKFr/6#B(D`~z~Zʑ Ðb﫮.<%N W n&?O|W6OS&$JLSbtANƄcB0<ĠrFh.qaB~׻m썦Z +`UQ06Q"0!>{dMvz 8Jc%4L!D_'ac : BTSެ"WE&i>ҽ|_O6v% |n'X@ڍg6Ea<@JRkq1:1"TnsLaC8JMzݦ-*%OT`Fimޠ:_{Xd-QpI9j>Xw@Oeʚ|F%=m-ʛ gW"+AYmA5ə&|H9u1r;4jAv`Udz!&,&TJPbҫ*Y(J*#wW97PRU-*jU@LKt3dW @0[ϟ?hiq.p_իϾW/_>ڜ~W/kcrZpvU.>CrׇUYTPj,`ϙۑt GRq.Np>׆v f9=وz|&4N3KFCsaKSXCüu@XVОvг!cX.8Knq龫h@,%[ hIT~w,og Ee5Pmq\2}܆9xu;XZ'tn qsUݴ%N^AyXlӐ&'3;'6sAKK$o^L柿/6NBm(w픨U۴NeC\('9DP/z]7}]AQQܜ'H{8׵4uiǿDZ+e3 C~ͱ|%]Oᶧg PP @ }:U_ezd7a2 ⮻vO*Ԭh:*]Uoy<j15St 0RJpKP7b}bLa*\H&Q8j``aB-؄36k?C 9pNFɔŐE7 n~.endstream endobj 496 0 obj << /Type /XRef /Length 312 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 497 /ID [<23c60c9398c2c0e5b0120aec90481cb4>] >> stream xcb&F~0 $8JP?j -^|M#Ϡqd50%ܘ<F6NM #M #{r=- qC R@ DrHD*E.HM@Qv|F DJ7HV=)zD,b"s0 "9A$؄ {>d "ق@v\ RBDJ ֻ f{ 6yXZ6NH}`M; 6?H:Cr3صi`LpkH_֥K9h]s > endstream endobj startxref 770342 %%EOF brms/inst/doc/brms_threading.R0000644000176200001440000003670114504270201016054 0ustar liggesusersparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(ggplot2) library(brms) theme_set(theme_default()) ## ---- fake-data-sim, include=FALSE, eval=TRUE------------------------------------------- set.seed(54647) # number of observations N <- 1E4 # number of group levels G <- round(N / 10) # number of predictors P <- 3 # regression coefficients beta <- rnorm(P) # sampled covariates, group means and fake data fake <- matrix(rnorm(N * P), ncol = P) dimnames(fake) <- list(NULL, paste0("x", 1:P)) # fixed effect part and sampled group membership fake <- transform( as.data.frame(fake), theta = fake %*% beta, g = sample.int(G, N, replace=TRUE) ) # add random intercept by group fake <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g") # linear predictor fake <- transform(fake, mu = theta + eta) # sample Poisson data fake <- transform(fake, y = rpois(N, exp(mu))) # shuffle order of data rows to ensure even distribution of computational effort fake <- fake[sample.int(N, N),] # drop not needed row names rownames(fake) <- NULL ## ---- model-poisson, include=FALSE------------------------------------------------------ model_poisson <- brm( y ~ 1 + x1 + x2 + (1 | g), data = fake, family = poisson(), iter = 500, # short sampling to speedup example chains = 2, prior = prior(normal(0,1), class = b) + prior(constant(1), class = sd, group = g), backend = "cmdstanr", threads = threading(4), save_pars = save_pars(all = TRUE) ) ## ---- benchmark, include=FALSE---------------------------------------------------------- # Benchmarks given model with cross-product of tuning parameters CPU # cores, grainsize and iterations. Models are run with either static # or non-static scheduler and initial values are set by default to 0 on the # unconstrained scale. Function returns a data-frame with the # cross-product of the tuning parameters and as result column the # respective runtime. benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100, static = FALSE) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) scaling_model <- update( model, refresh = 0, threads = threading(1, grainsize = grainsize[1], static = static), chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark <- function(cores, size, iter) { bench_fit <- update( scaling_model, warmup=0, iter = iter, chains = 1, seed = 1234, init = init, refresh = 0, save_warmup=TRUE, threads = threading(cores, grainsize = size, static = static), inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter) res <- with(cases, mapply(run_benchmark, cores, grainsize, iter)) cbind(cases, as.data.frame(t(res))) } benchmark_reference <- function(model, iter=100, init=0) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) ref_model <- update( model, refresh = 0, threads = NULL, chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark_ref <- function(iter_bench) { bench_fit <- update( ref_model, warmup=0, iter = iter_bench, chains = 1, seed = 1234, init = init, refresh = 0, inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } ref <- sapply(iter, run_benchmark_ref) ref <- cbind(as.data.frame(t(ref)), iter=iter) ref } extract_warmup_info <- function(bfit) { adapt <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n") step_size <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2])) inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]])) list(step_size=step_size, inv_metric=inv_metric) } extract_draw <- function(sims, draw) { lapply(sims, brms:::slice, dim = 1, i = draw, drop = TRUE) } ## ---- eval=FALSE------------------------------------------------------------------------ # fit_serial <- brm( # count ~ zAge + zBase * Trt + (1|patient), # data = epilepsy, family = poisson(), # chains = 4, cores = 4, backend = "cmdstanr" # ) ## ---- eval=FALSE------------------------------------------------------------------------ # fit_parallel <- update( # fit_serial, chains = 2, cores = 2, # backend = "cmdstanr", threads = threading(2) # ) ## --------------------------------------------------------------------------------------- kable(head(fake, 10), digits = 3) ## ---- eval=FALSE------------------------------------------------------------------------ # model_poisson <- brm( # y ~ 1 + x1 + x2 + (1 | g), # data = fake, # family = poisson(), # iter = 500, # short sampling to speedup example # chains = 2, # prior = prior(normal(0,1), class = b) + # prior(constant(1), class = sd, group = g), # backend = "cmdstanr", # threads = threading(4), # save_pars = save_pars(all = TRUE) # ) ## ---- chunking-scale, message=FALSE, warning=FALSE, results='hide'---------------------- chunking_bench <- transform( data.frame(chunks = 4^(0:3)), grainsize = ceiling(N / chunks) ) iter_test <- c(10, 20, 40) # very short test runs scaling_chunking <- benchmark_threading( model_poisson, cores = 1, grainsize = chunking_bench$grainsize, # test various grainsizes iter = iter_test, static = TRUE # with static partitioner ) # run as reference the model *without* reduce_sum ref <- benchmark_reference(model_poisson, iter_test) # for additional data munging please refer to the appendix ## ---- munge-chunking-scaling, include=FALSE--------------------------------------------- scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize") single_chunk <- transform( subset(scaling_chunking, chunks == 1), num_leapfrog_single = num_leapfrog, num_leapfrog = NULL, runtime_single = runtime, runtime = NULL, grainsize = NULL, chunks=NULL ) scaling_chunking <- transform( merge(scaling_chunking, single_chunk), slowdown = runtime/runtime_single, iter = factor(iter), runtime_single = NULL ) ref <- transform(ref, iter=factor(iter)) ## --------------------------------------------------------------------------------------- ggplot(scaling_chunking) + aes(chunks, slowdown, colour = iter, shape = iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.8, 2.5, by=0.1)) + ggtitle("Slowdown with increasing number of chunks") ggplot(scaling_chunking) + aes(chunks, 1E3 * runtime/num_leapfrog, colour = iter, shape=iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.1, 2.0, by=0.1)) + geom_hline(data=ref, aes(yintercept=1E3 * runtime/num_leapfrog, colour=iter), linetype=I(2)) + ggtitle("Time per leapfrog step vs number of chunks", "Dashed line is reference model without reduce_sum") + ylab("Time per leapfrog step [ms]") ## ---- speedup-scale, message=FALSE, warning=FALSE, results='hide'----------------------- num_cpu <- parallel::detectCores(logical = FALSE) num_cpu_logical <- parallel::detectCores(logical = TRUE) grainsize_default <- ceiling(N / (2 * num_cpu)) cores <- c(2^seq(0, floor(log2(num_cpu_logical))), num_cpu, num_cpu_logical) cores <- sort(unique(cores)) grainsize <- c(grainsize_default, grainsize_default/2, grainsize_default/4) grainsize <- round(grainsize) iter_scaling <- 20 scaling_cores <- benchmark_threading( model_poisson, cores = cores, grainsize = grainsize, iter = iter_scaling, static = FALSE ) single_core <- transform( subset(scaling_cores, cores == 1), runtime_single = runtime, num_leapfrog=NULL, runtime=NULL, cores = NULL ) scaling_cores <- transform( merge(scaling_cores, single_core), speedup = runtime_single/runtime, grainsize = factor(grainsize) ) ## --------------------------------------------------------------------------------------- ggplot(scaling_cores) + aes(cores, runtime, shape = grainsize, color = grainsize) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_cores$cores) + scale_y_log10(breaks=seq(0.1, 1.4, by=0.1)) + theme(legend.position = c(0.85, 0.8)) + geom_hline(data=subset(ref, iter==iter_scaling), aes(yintercept=runtime), linetype=I(2)) + ggtitle("Runtime with varying number of cores", "Dashed line is reference model without reduce_sum") ggplot(scaling_cores) + aes(cores, speedup, shape = grainsize, color = grainsize) + geom_abline(slope = 1, intercept = 0, linetype = 2) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks=scaling_cores$cores) + scale_y_log10(breaks=scaling_cores$cores) + theme(aspect.ratio = 1) + coord_fixed(xlim = c(1, num_cpu_logical), ylim = c(1, num_cpu_logical)) + ggtitle("Relative speedup vs 1 core") ## --------------------------------------------------------------------------------------- kable(scaling_cores, digits = 2) ## ---- eval=FALSE------------------------------------------------------------------------ # set.seed(54647) # # number of observations # N <- 1E4 # # number of group levels # G <- round(N / 10) # # number of predictors # P <- 3 # # regression coefficients # beta <- rnorm(P) # # # sampled covariates, group means and fake data # fake <- matrix(rnorm(N * P), ncol = P) # dimnames(fake) <- list(NULL, paste0("x", 1:P)) # # # fixed effect part and sampled group membership # fake <- transform( # as.data.frame(fake), # theta = fake %*% beta, # g = sample.int(G, N, replace=TRUE) # ) # # # add random intercept by group # fake <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g") # # # linear predictor # fake <- transform(fake, mu = theta + eta) # # # sample Poisson data # fake <- transform(fake, y = rpois(N, exp(mu))) # # # shuffle order of data rows to ensure even distribution of computational effort # fake <- fake[sample.int(N, N),] # # # drop not needed row names # rownames(fake) <- NULL ## ---- eval=FALSE------------------------------------------------------------------------ # model_poisson <- brm( # y ~ 1 + x1 + x2 + (1 | g), # data = fake, # family = poisson(), # iter = 500, # short sampling to speedup example # chains = 2, # prior = prior(normal(0,1), class = b) + # prior(constant(1), class = sd, group = g), # backend = "cmdstanr", # threads = threading(4), # save_pars = save_pars(all = TRUE) # ) ## ---- eval=FALSE------------------------------------------------------------------------ # # Benchmarks given model with cross-product of tuning parameters CPU # # cores, grainsize and iterations. Models are run with either static # # or non-static scheduler and initial values are set by default to 0 on the # # unconstrained scale. Function returns a data-frame with the # # cross-product of the tuning parameters and as result column the # # respective runtime. # benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100, # static = FALSE) { # # winfo <- extract_warmup_info(model) # sims <- rstan::extract(model$fit) # init <- list(extract_draw(sims, 1)) # # scaling_model <- update( # model, refresh = 0, # threads = threading(1, grainsize = grainsize[1], static = static), # chains = 1, iter = 2, backend = "cmdstanr" # ) # # run_benchmark <- function(cores, size, iter) { # bench_fit <- update( # scaling_model, warmup=0, iter = iter, # chains = 1, seed = 1234, init = init, refresh = 0, save_warmup=TRUE, # threads = threading(cores, grainsize = size, static = static), # inv_metric=winfo$inv_metric[[1]], # step_size=winfo$step_size[[1]], # adapt_engaged=FALSE # ) # lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) # elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) # # c(num_leapfrog=lf, runtime=elapsed) # } # # cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter) # res <- with(cases, mapply(run_benchmark, cores, grainsize, iter)) # cbind(cases, as.data.frame(t(res))) # } # # benchmark_reference <- function(model, iter=100, init=0) { # winfo <- extract_warmup_info(model) # sims <- rstan::extract(model$fit) # init <- list(extract_draw(sims, 1)) # # ref_model <- update( # model, refresh = 0, threads = NULL, # chains = 1, iter = 2, backend = "cmdstanr" # ) # # run_benchmark_ref <- function(iter_bench) { # bench_fit <- update( # ref_model, warmup=0, iter = iter_bench, # chains = 1, seed = 1234, init = init, refresh = 0, # inv_metric=winfo$inv_metric[[1]], # step_size=winfo$step_size[[1]], # adapt_engaged=FALSE # ) # # lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) # elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) # # c(num_leapfrog=lf, runtime=elapsed) # } # # ref <- sapply(iter, run_benchmark_ref) # ref <- cbind(as.data.frame(t(ref)), iter=iter) # ref # } # # extract_warmup_info <- function(bfit) { # adapt <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n") # step_size <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2])) # inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]])) # list(step_size=step_size, inv_metric=inv_metric) # } # # extract_draw <- function(sims, draw) { # lapply(sims, brms:::slice, dim = 1, i = draw, drop = TRUE) # } ## ---- eval=FALSE------------------------------------------------------------------------ # scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize") # # single_chunk <- transform( # subset(scaling_chunking, chunks == 1), # num_leapfrog_single = num_leapfrog, num_leapfrog = NULL, # runtime_single = runtime, runtime = NULL, # grainsize = NULL, chunks=NULL # ) # # scaling_chunking <- transform( # merge(scaling_chunking, single_chunk), # slowdown = runtime/runtime_single, # iter = factor(iter), # runtime_single = NULL # ) # # ref <- transform(ref, iter=factor(iter)) brms/inst/doc/brms_families.html0000644000176200001440000006443614504266211016457 0ustar liggesusers Parameterization of Response Distributions in brms

Parameterization of Response Distributions in brms

Paul Bürkner

2023-09-25

The purpose of this vignette is to discuss the parameterizations of the families (i.e., response distributions) used in brms. For a more general overview of the package see vignette("brms_overview").

Notation

Throughout this vignette, we denote values of the response variable as \(y\), a density function as \(f\), and use \(\mu\) to refer to the main model parameter, which is usually the mean of the response distribution or some closely related quantity. In a regression framework, \(\mu\) is not estimated directly but computed as \(\mu = g(\eta)\), where \(\eta\) is a predictor term (see help(brmsformula) for details) and \(g\) is the response function (i.e., inverse of the link function).

Location shift models

The density of the gaussian family is given by \[ f(y) = \frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\frac{1}{2}\left(\frac{y - \mu}{\sigma}\right)^2\right) \]

where \(\sigma\) is the residual standard deviation. The density of the student family is given by \[ f(y) = \frac{\Gamma((\nu + 1)/2)}{\Gamma(\nu/2)} \frac{1}{\sqrt{\nu\pi}\sigma}\left(1 + \frac{1}{\nu} \left(\frac{y - \mu}{\sigma}\right)^2\right)^{-(\nu+1)/2} \]

\(\Gamma\) denotes the gamma function and \(\nu > 1\) are the degrees of freedom. As \(\nu \rightarrow \infty\), the student distribution becomes the gaussian distribution. The density of the skew_normal family is given by \[ f(y) = \frac{1}{\sqrt{2\pi}\omega} \exp\left(-\frac{1}{2} \left(\frac{y - \xi}{\omega}\right)^2 \right) \left(1 + \text{erf} \left( \alpha \left(\frac{y - \xi}{\omega \sqrt{2}} \right) \right) \right) \]

where \(\xi\) is the location parameter, \(\omega\) is the positive scale parameter, \(\alpha\) the skewness parameter, and \(\text{erf}\) denotes the error function of the gaussian distribution. To parameterize the skew-normal distribution in terms of the mean \(\mu\) and standard deviation \(\sigma\), \(\omega\) and \(\xi\) are computed as \[ \omega = \frac{\sigma}{\sqrt{1 - \frac{2}{\pi} \frac{\alpha^2}{1 + \alpha^2}}} \]

\[ \xi = \mu - \omega \frac{\alpha}{\sqrt{1 + \alpha^2}} \sqrt{\frac{2}{\pi}} \]

If \(\alpha = 0\), the skew-normal distribution becomes the gaussian distribution. For location shift models, \(y\) can be any real value.

Binary and count data models

The density of the binomial family is given by \[ f(y) = {N \choose y} \mu^{y} (1-\mu)^{N - y} \] where \(N\) is the number of trials and \(y \in \{0, ... , N\}\). When all \(N\) are \(1\) (i.e., \(y \in \{0,1\}\)), the bernoulli distribution for binary data arises.

For \(y \in \mathbb{N}_0\), the density of the poisson family is given by \[ f(y) = \frac{\mu^{y}}{y!} \exp(-\mu) \] The density of the negbinomial (negative binomial) family is \[ f(y) = {y + \phi - 1 \choose y} \left(\frac{\mu}{\mu + \phi}\right)^{y} \left(\frac{\phi}{\mu + \phi}\right)^\phi \] where \(\phi\) is a positive precision parameter. For \(\phi \rightarrow \infty\), the negative binomial distribution becomes the poisson distribution. The density of the geometric family arises if \(\phi\) is set to \(1\).

Time-to-event models

With time-to-event models we mean all models that are defined on the positive reals only, that is \(y \in \mathbb{R}^+\). The density of the lognormal family is given by \[ f(y) = \frac{1}{\sqrt{2\pi}\sigma y} \exp\left(-\frac{1}{2}\left(\frac{\log(y) - \mu}{\sigma}\right)^2\right) \] where \(\sigma\) is the residual standard deviation on the log-scale. The density of the Gamma family is given by \[ f(y) = \frac{(\alpha / \mu)^\alpha}{\Gamma(\alpha)} y^{\alpha-1} \exp\left(-\frac{\alpha y}{\mu}\right) \] where \(\alpha\) is a positive shape parameter. The density of the weibull family is given by \[ f(y) = \frac{\alpha}{s} \left(\frac{y}{s}\right)^{\alpha-1} \exp\left(-\left(\frac{y}{s}\right)^\alpha\right) \] where \(\alpha\) is again a positive shape parameter and \(s = \mu / \Gamma(1 + 1 / \alpha)\) is the scale parameter to that \(\mu\) is the mean of the distribution. The exponential family arises if \(\alpha\) is set to \(1\) for either the gamma or Weibull distribution. The density of the inverse.gaussian family is given by \[ f(y) = \left(\frac{\alpha}{2 \pi y^3}\right)^{1/2} \exp \left(\frac{-\alpha (y - \mu)^2}{2 \mu^2 y} \right) \] where \(\alpha\) is a positive shape parameter. The cox family implements Cox proportional hazards model which assumes a hazard function of the form \(h(y) = h_0(y) \mu\) with baseline hazard \(h_0(y)\) expressed via M-splines (which integrate to I-splines) in order to ensure monotonicity. The density of the cox model is then given by \[ f(y) = h(y) S(y) \] where \(S(y)\) is the survival function implied by \(h(y)\).

Extreme value models

Modeling extremes requires special distributions. One may use the weibull distribution (see above) or the frechet distribution with density \[ f(y) = \frac{\nu}{s} \left(\frac{y}{s}\right)^{-1-\nu} \exp\left(-\left(\frac{y}{s}\right)^{-\nu}\right) \] where \(s = \mu / \Gamma(1 - 1 / \nu)\) is a positive scale parameter and \(\nu > 1\) is a shape parameter so that \(\mu\) predicts the mean of the Frechet distribution. A generalization of both distributions is the generalized extreme value distribution (family gen_extreme_value) with density \[ f(y) = \frac{1}{\sigma} t(y)^{\xi + 1} \exp(-t(y)) \] where \[ t(y) = \left(1 + \xi \left(\frac{y - \mu}{\sigma} \right)\right)^{-1 / \xi} \] with positive scale parameter \(\sigma\) and shape parameter \(\xi\).

Response time models

One family that is especially suited to model reaction times is the exgaussian (‘exponentially modified Gaussian’) family. Its density is given by

\[ f(y) = \frac{1}{2 \beta} \exp\left(\frac{1}{2 \beta} \left(2\xi + \sigma^2 / \beta - 2 y \right) \right) \text{erfc}\left(\frac{\xi + \sigma^2 / \beta - y}{\sqrt{2} \sigma} \right) \] where \(\beta\) is the scale (inverse rate) of the exponential component, \(\xi\) is the mean of the Gaussian component, \(\sigma\) is the standard deviation of the Gaussian component, and \(\text{erfc}\) is the complementary error function. We parameterize \(\mu = \xi + \beta\) so that the main predictor term equals the mean of the distribution.

Another family well suited for modeling response times is the shifted_lognormal distribution. It’s density equals that of the lognormal distribution except that the whole distribution is shifted to the right by a positive parameter called ndt (for consistency with the wiener diffusion model explained below).

A family concerned with the combined modeling of reaction times and corresponding binary responses is the wiener diffusion model. It has four model parameters each with a natural interpretation. The parameter \(\alpha > 0\) describes the separation between two boundaries of the diffusion process, \(\tau > 0\) describes the non-decision time (e.g., due to image or motor processing), \(\beta \in [0, 1]\) describes the initial bias in favor of the upper alternative, and \(\delta \in \mathbb{R}\) describes the drift rate to the boundaries (a positive value indicates a drift towards to upper boundary). The density for the reaction time at the upper boundary is given by

\[ f(y) = \frac{\alpha}{(y-\tau)^3/2} \exp \! \left(- \delta \alpha \beta - \frac{\delta^2(y-\tau)}{2}\right) \sum_{k = - \infty}^{\infty} (2k + \beta) \phi \! \left(\frac{2k + \alpha \beta}{\sqrt{y - \tau}}\right) \]

where \(\phi(x)\) denotes the standard normal density function. The density at the lower boundary can be obtained by substituting \(1 - \beta\) for \(\beta\) and \(-\delta\) for \(\delta\) in the above equation. In brms the parameters \(\alpha\), \(\tau\), and \(\beta\) are modeled as auxiliary parameters named bs (‘boundary separation’), ndt (‘non-decision time’), and bias respectively, whereas the drift rate \(\delta\) is modeled via the ordinary model formula that is as \(\delta = \mu\).

Quantile regression

Quantile regression is implemented via family asym_laplace (asymmetric Laplace distribution) with density

\[ f(y) = \frac{p (1 - p)}{\sigma} \exp\left(-\rho_p\left(\frac{y - \mu}{\sigma}\right)\right) \] where \(\rho_p\) is given by \(\rho_p(x) = x (p - I_{x < 0})\) and \(I_A\) is the indicator function of set \(A\). The parameter \(\sigma\) is a positive scale parameter and \(p\) is the quantile parameter taking on values in \((0, 1)\). For this distribution, we have \(P(Y < g(\eta)) = p\). Thus, quantile regression can be performed by fixing \(p\) to the quantile to interest.

Probability models

The density of the Beta family for \(y \in (0,1)\) is given by \[ f(y) = \frac{y^{\mu \phi - 1} (1-y)^{(1-\mu) \phi-1}}{B(\mu \phi, (1-\mu) \phi)} \] where \(B\) is the beta function and \(\phi\) is a positive precision parameter. A multivariate generalization of the Beta family is the dirichlet family with density \[ f(y) = \frac{1}{B((\mu_{1}, \ldots, \mu_{K}) \phi)} \prod_{k=1}^K y_{k}^{\mu_{k} \phi - 1}. \] The dirichlet family is implemented with the multivariate logit link function so that \[ \mu_{j} = \frac{\exp(\eta_{j})}{\sum_{k = 1}^{K} \exp(\eta_{k})} \] For reasons of identifiability, \(\eta_{\rm ref}\) is set to \(0\), where \({\rm ref}\) is one of the response categories chosen as reference.

An alternative to the dirichlet family is the logistic_normal family with density \[ f(y) = \frac{1}{\prod_{k=1}^K y_k} \times \text{multivariate_normal}(\tilde{y} \, | \, \mu, \sigma, \Omega) \] where \(\tilde{y}\) is the multivariate logit transformed response \[ \tilde{y} = (\log(y_1 / y_{\rm ref}), \ldots, \log(y_{\rm ref-1} / y_{\rm ref}), \log(y_{\rm ref+1} / y_{\rm ref}), \ldots, \log(y_K / y_{\rm ref})) \] of dimension \(K-1\) (excluding the reference category), which is modeled as multivariate normally distributed with latent mean and standard deviation vectors \(\mu\) and \(\sigma\), as well as correlation matrix \(\Omega\).

Circular models

The density of the von_mises family for \(y \in (-\pi,\pi)\) is given by \[ f(y) = \frac{\exp(\kappa \cos(y - \mu))}{2\pi I_0(\kappa)} \] where \(I_0\) is the modified Bessel function of order 0 and \(\kappa\) is a positive precision parameter.

Ordinal and categorical models

For ordinal and categorical models, \(y\) is one of the categories \(1, ..., K\). The intercepts of ordinal models are called thresholds and are denoted as \(\tau_k\), with \(k \in \{1, ..., K-1\}\), whereas \(\eta\) does not contain a fixed effects intercept. Note that the applied link functions \(h\) are technically distribution functions \(\mathbb{R} \rightarrow [0,1]\). The density of the cumulative family (implementing the most basic ordinal model) is given by \[ f(y) = g(\tau_{y + 1} - \eta) - g(\tau_{y} - \eta) \]

The densities of the sratio (stopping ratio) and cratio (continuation ratio) families are given by \[ f(y) = g(\tau_{y + 1} - \eta) \prod_{k = 1}^{y} (1 - g(\tau_{k} - \eta)) \] and \[ f(y) = (1 - g(\eta - \tau_{y + 1})) \prod_{k = 1}^{y} g(\eta - \tau_{k}) \]

respectively. Note that both families are equivalent for symmetric link functions such as logit or probit. The density of the acat (adjacent category) family is given by \[ f(y) = \frac{\prod_{k=1}^{y} g(\eta - \tau_{k}) \prod_{k=y+1}^K(1-g(\eta - \tau_{k}))}{\sum_{k=0}^K\prod_{j=1}^k g(\eta-\tau_{j}) \prod_{j=k+1}^K(1-g(\eta - \tau_{j}))} \] For the logit link, this can be simplified to \[ f(y) = \frac{\exp \left(\sum_{k=1}^{y} (\eta - \tau_{k}) \right)} {\sum_{k=0}^K \exp\left(\sum_{j=1}^k (\eta - \tau_{j}) \right)} \] The linear predictor \(\eta\) can be generalized to also depend on the category \(k\) for a subset of predictors. This leads to category specific effects (for details on how to specify them see help(brm)). Note that cumulative and sratio models use \(\tau - \eta\), whereas cratio and acat use \(\eta - \tau\). This is done to ensure that larger values of \(\eta\) increase the probability of higher response categories.

The categorical family is currently only implemented with the multivariate logit link function and has density \[ f(y) = \mu_{y} = \frac{\exp(\eta_{y})}{\sum_{k = 1}^{K} \exp(\eta_{k})} \] Note that \(\eta\) does also depend on the category \(k\). For reasons of identifiability, \(\eta_{1}\) is set to \(0\). A generalization of the categorical family to more than one trial is the multinomial family with density \[ f(y) = {N \choose y_{1}, y_{2}, \ldots, y_{K}} \prod_{k=1}^K \mu_{k}^{y_{k}} \] where, for each category, \(\mu_{k}\) is estimated via the multivariate logit link function shown above.

Zero-inflated and hurdle models

Zero-inflated and hurdle families extend existing families by adding special processes for responses that are zero. The density of a zero-inflated family is given by \[ f_z(y) = z + (1 - z) f(0) \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) \quad \text{if } y > 0 \] where \(z\) denotes the zero-inflation probability. Currently implemented families are zero_inflated_poisson, zero_inflated_binomial, zero_inflated_negbinomial, and zero_inflated_beta.

The density of a hurdle family is given by \[ f_z(y) = z \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) / (1 - f(0)) \quad \text{if } y > 0 \] Currently implemented families are hurdle_poisson, hurdle_negbinomial, hurdle_gamma, and hurdle_lognormal.

The density of a zero-one-inflated family is given by \[ f_{\alpha, \gamma}(y) = \alpha (1 - \gamma) \quad \text{if } y = 0 \\ f_{\alpha, \gamma}(y) = \alpha \gamma \quad \text{if } y = 1 \\ f_{\alpha, \gamma}(y) = (1 - \alpha) f(y) \quad \text{if } y \notin \{0, 1\} \] where \(\alpha\) is the zero-one-inflation probability (i.e. the probability that zero or one occurs) and \(\gamma\) is the conditional one-inflation probability (i.e. the probability that one occurs rather than zero). Currently implemented families are zero_one_inflated_beta.

brms/inst/doc/brms_monotonic.R0000644000176200001440000000621314504266473016127 0ustar liggesusersparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") income <- factor(sample(income_options, 100, TRUE), levels = income_options, ordered = TRUE) mean_ls <- c(30, 60, 70, 75) ls <- mean_ls[income] + rnorm(100, sd = 7) dat <- data.frame(income, ls) ## ---- results='hide'-------------------------------------------------------------------- fit1 <- brm(ls ~ mo(income), data = dat) ## --------------------------------------------------------------------------------------- summary(fit1) plot(fit1, variable = "simo", regex = TRUE) plot(conditional_effects(fit1)) ## ---- results='hide'-------------------------------------------------------------------- dat$income_num <- as.numeric(dat$income) fit2 <- brm(ls ~ income_num, data = dat) ## --------------------------------------------------------------------------------------- summary(fit2) ## ---- results='hide'-------------------------------------------------------------------- contrasts(dat$income) <- contr.treatment(4) fit3 <- brm(ls ~ income, data = dat) ## --------------------------------------------------------------------------------------- summary(fit3) ## --------------------------------------------------------------------------------------- loo(fit1, fit2, fit3) ## ---- results='hide'-------------------------------------------------------------------- prior4 <- prior(dirichlet(c(2, 1, 1)), class = "simo", coef = "moincome1") fit4 <- brm(ls ~ mo(income), data = dat, prior = prior4, sample_prior = TRUE) ## --------------------------------------------------------------------------------------- summary(fit4) ## --------------------------------------------------------------------------------------- plot(fit4, variable = "prior_simo", regex = TRUE, N = 3) ## --------------------------------------------------------------------------------------- dat$age <- rnorm(100, mean = 40, sd = 10) ## ---- results='hide'-------------------------------------------------------------------- fit5 <- brm(ls ~ mo(income)*age, data = dat) ## --------------------------------------------------------------------------------------- summary(fit5) conditional_effects(fit5, "income:age") ## --------------------------------------------------------------------------------------- dat$city <- rep(1:10, each = 10) var_city <- rnorm(10, sd = 10) dat$ls <- dat$ls + var_city[dat$city] ## ---- results='hide'-------------------------------------------------------------------- fit6 <- brm(ls ~ mo(income)*age + (mo(income) | city), data = dat) ## --------------------------------------------------------------------------------------- summary(fit6) brms/inst/doc/brms_multivariate.Rmd0000644000176200001440000002007514224753362017150 0ustar liggesusers--- title: "Estimating Multivariate Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Multivariate Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction In the present vignette, we want to discuss how to specify multivariate multilevel models using **brms**. We call a model *multivariate* if it contains multiple response variables, each being predicted by its own set of predictors. Consider an example from biology. Hadfield, Nutall, Osorio, and Owens (2007) analyzed data of the Eurasian blue tit (https://en.wikipedia.org/wiki/Eurasian_blue_tit). They predicted the `tarsus` length as well as the `back` color of chicks. Half of the brood were put into another `fosternest`, while the other half stayed in the fosternest of their own `dam`. This allows to separate genetic from environmental factors. Additionally, we have information about the `hatchdate` and `sex` of the chicks (the latter being known for 94\% of the animals). ```{r data} data("BTdata", package = "MCMCglmm") head(BTdata) ``` ## Basic Multivariate Models We begin with a relatively simple multivariate normal model. ```{r fit1, message=FALSE, warning=FALSE, results='hide'} bform1 <- bf(mvbind(tarsus, back) ~ sex + hatchdate + (1|p|fosternest) + (1|q|dam)) + set_rescor(TRUE) fit1 <- brm(bform1, data = BTdata, chains = 2, cores = 2) ``` As can be seen in the model code, we have used `mvbind` notation to tell **brms** that both `tarsus` and `back` are separate response variables. The term `(1|p|fosternest)` indicates a varying intercept over `fosternest`. By writing `|p|` in between we indicate that all varying effects of `fosternest` should be modeled as correlated. This makes sense since we actually have two model parts, one for `tarsus` and one for `back`. The indicator `p` is arbitrary and can be replaced by other symbols that comes into your mind (for details about the multilevel syntax of **brms**, see `help("brmsformula")` and `vignette("brms_multilevel")`). Similarly, the term `(1|q|dam)` indicates correlated varying effects of the genetic mother of the chicks. Alternatively, we could have also modeled the genetic similarities through pedigrees and corresponding relatedness matrices, but this is not the focus of this vignette (please see `vignette("brms_phylogenetics")`). The model results are readily summarized via ```{r summary1, warning=FALSE} fit1 <- add_criterion(fit1, "loo") summary(fit1) ``` The summary output of multivariate models closely resembles those of univariate models, except that the parameters now have the corresponding response variable as prefix. Within dams, tarsus length and back color seem to be negatively correlated, while within fosternests the opposite is true. This indicates differential effects of genetic and environmental factors on these two characteristics. Further, the small residual correlation `rescor(tarsus, back)` on the bottom of the output indicates that there is little unmodeled dependency between tarsus length and back color. Although not necessary at this point, we have already computed and stored the LOO information criterion of `fit1`, which we will use for model comparisons. Next, let's take a look at some posterior-predictive checks, which give us a first impression of the model fit. ```{r pp_check1, message=FALSE} pp_check(fit1, resp = "tarsus") pp_check(fit1, resp = "back") ``` This looks pretty solid, but we notice a slight unmodeled left skewness in the distribution of `tarsus`. We will come back to this later on. Next, we want to investigate how much variation in the response variables can be explained by our model and we use a Bayesian generalization of the $R^2$ coefficient. ```{r R2_1} bayes_R2(fit1) ``` Clearly, there is much variation in both animal characteristics that we can not explain, but apparently we can explain more of the variation in tarsus length than in back color. ## More Complex Multivariate Models Now, suppose we only want to control for `sex` in `tarsus` but not in `back` and vice versa for `hatchdate`. Not that this is particular reasonable for the present example, but it allows us to illustrate how to specify different formulas for different response variables. We can no longer use `mvbind` syntax and so we have to use a more verbose approach: ```{r fit2, message=FALSE, warning=FALSE, results='hide'} bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) bf_back <- bf(back ~ hatchdate + (1|p|fosternest) + (1|q|dam)) fit2 <- brm(bf_tarsus + bf_back + set_rescor(TRUE), data = BTdata, chains = 2, cores = 2) ``` Note that we have literally *added* the two model parts via the `+` operator, which is in this case equivalent to writing `mvbf(bf_tarsus, bf_back)`. See `help("brmsformula")` and `help("mvbrmsformula")` for more details about this syntax. Again, we summarize the model first. ```{r summary2, warning=FALSE} fit2 <- add_criterion(fit2, "loo") summary(fit2) ``` Let's find out, how model fit changed due to excluding certain effects from the initial model: ```{r loo12} loo(fit1, fit2) ``` Apparently, there is no noteworthy difference in the model fit. Accordingly, we do not really need to model `sex` and `hatchdate` for both response variables, but there is also no harm in including them (so I would probably just include them). To give you a glimpse of the capabilities of **brms**' multivariate syntax, we change our model in various directions at the same time. Remember the slight left skewness of `tarsus`, which we will now model by using the `skew_normal` family instead of the `gaussian` family. Since we do not have a multivariate normal (or student-t) model, anymore, estimating residual correlations is no longer possible. We make this explicit using the `set_rescor` function. Further, we investigate if the relationship of `back` and `hatchdate` is really linear as previously assumed by fitting a non-linear spline of `hatchdate`. On top of it, we model separate residual variances of `tarsus` for male and female chicks. ```{r fit3, message=FALSE, warning=FALSE, results='hide'} bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) + lf(sigma ~ 0 + sex) + skew_normal() bf_back <- bf(back ~ s(hatchdate) + (1|p|fosternest) + (1|q|dam)) + gaussian() fit3 <- brm( bf_tarsus + bf_back + set_rescor(FALSE), data = BTdata, chains = 2, cores = 2, control = list(adapt_delta = 0.95) ) ``` Again, we summarize the model and look at some posterior-predictive checks. ```{r summary3, warning=FALSE} fit3 <- add_criterion(fit3, "loo") summary(fit3) ``` We see that the (log) residual standard deviation of `tarsus` is somewhat larger for chicks whose sex could not be identified as compared to male or female chicks. Further, we see from the negative `alpha` (skewness) parameter of `tarsus` that the residuals are indeed slightly left-skewed. Lastly, running ```{r me3} conditional_effects(fit3, "hatchdate", resp = "back") ``` reveals a non-linear relationship of `hatchdate` on the `back` color, which seems to change in waves over the course of the hatch dates. There are many more modeling options for multivariate models, which are not discussed in this vignette. Examples include autocorrelation structures, Gaussian processes, or explicit non-linear predictors (e.g., see `help("brmsformula")` or `vignette("brms_multilevel")`). In fact, nearly all the flexibility of univariate models is retained in multivariate models. ## References Hadfield JD, Nutall A, Osorio D, Owens IPF (2007). Testing the phenotypic gambit: phenotypic, genetic and environmental correlations of colour. *Journal of Evolutionary Biology*, 20(2), 549-557. brms/inst/doc/brms_phylogenetics.Rmd0000644000176200001440000002723214224753376017326 0ustar liggesusers--- title: "Estimating Phylogenetic Multilevel Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Phylogenetic Multilevel Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "jpeg", dpi = 100, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction In the present vignette, we want to discuss how to specify phylogenetic multilevel models using **brms**. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. The usual approach would be to model species as a grouping factor in a multilevel model and estimate varying intercepts (and possibly also varying slopes) over species. However, species are not independent as they come from the same phylogenetic tree and we thus have to adjust our model to incorporate this dependency. The examples discussed here are from chapter 11 of the book *Modern Phylogenetic Comparative Methods and the application in Evolutionary Biology* (de Villemeruil & Nakagawa, 2014). The necessary data can be downloaded from the corresponding website (https://www.mpcm-evolution.com/). Some of these models may take a few minutes to fit. ## A Simple Phylogenetic Model Assume we have measurements of a phenotype, `phen` (say the body size), and a `cofactor` variable (say the temperature of the environment). We prepare the data using the following code. ```{r} phylo <- ape::read.nexus("https://paul-buerkner.github.io/data/phylo.nex") data_simple <- read.table( "https://paul-buerkner.github.io/data/data_simple.txt", header = TRUE ) head(data_simple) ``` The `phylo` object contains information on the relationship between species. Using this information, we can construct a covariance matrix of species (Hadfield & Nakagawa, 2010). ```{r} A <- ape::vcv.phylo(phylo) ``` Now we are ready to fit our first phylogenetic multilevel model: ```{r, results='hide'} model_simple <- brm( phen ~ cofactor + (1|gr(phylo, cov = A)), data = data_simple, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "b"), prior(normal(0, 50), "Intercept"), prior(student_t(3, 0, 20), "sd"), prior(student_t(3, 0, 20), "sigma") ) ) ``` With the exception of `(1|gr(phylo, cov = A))` instead of `(1|phylo)` this is a basic multilevel model with a varying intercept over species (`phylo` is an indicator of species in this data set). However, by using `cov = A` in the `gr` function, we make sure that species are correlated as specified by the covariance matrix `A`. We pass `A` itself via the `data2` argument which can be used for any kinds of data that does not fit into the regular structure of the `data` argument. Setting priors is not required for achieving good convergence for this model, but it improves sampling speed a bit. After fitting, the results can be investigated in detail. ```{r} summary(model_simple) plot(model_simple, N = 2, ask = FALSE) plot(conditional_effects(model_simple), points = TRUE) ``` The so called phylogenetic signal (often symbolize by $\lambda$) can be computed with the `hypothesis` method and is roughly $\lambda = 0.7$ for this example. ```{r} hyp <- "sd_phylo__Intercept^2 / (sd_phylo__Intercept^2 + sigma^2) = 0" (hyp <- hypothesis(model_simple, hyp, class = NULL)) plot(hyp) ``` Note that the phylogenetic signal is just a synonym of the intra-class correlation (ICC) used in the context phylogenetic analysis. ## A Phylogenetic Model with Repeated Measurements Often, we have multiple observations per species and this allows to fit more complicated phylogenetic models. ```{r} data_repeat <- read.table( "https://paul-buerkner.github.io/data/data_repeat.txt", header = TRUE ) data_repeat$spec_mean_cf <- with(data_repeat, sapply(split(cofactor, phylo), mean)[phylo]) head(data_repeat) ``` The variable `spec_mean_cf` just contains the mean of the cofactor for each species. The code for the repeated measurement phylogenetic model looks as follows: ```{r, results='hide'} model_repeat1 <- brm( phen ~ spec_mean_cf + (1|gr(phylo, cov = A)) + (1|species), data = data_repeat, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0,10), "b"), prior(normal(0,50), "Intercept"), prior(student_t(3,0,20), "sd"), prior(student_t(3,0,20), "sigma") ), sample_prior = TRUE, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` The variables `phylo` and `species` are identical as they are both identifiers of the species. However, we model the phylogenetic covariance only for `phylo` and thus the `species` variable accounts for any specific effect that would be independent of the phylogenetic relationship between species (e.g., environmental or niche effects). Again we can obtain model summaries as well as estimates of the phylogenetic signal. ```{r} summary(model_repeat1) ``` ```{r} hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat1, hyp, class = NULL)) plot(hyp) ``` So far, we have completely ignored the variability of the cofactor within species. To incorporate this into the model, we define ```{r} data_repeat$within_spec_cf <- data_repeat$cofactor - data_repeat$spec_mean_cf ``` and then fit it again using `within_spec_cf` as an additional predictor. ```{r, results='hide'} model_repeat2 <- update( model_repeat1, formula = ~ . + within_spec_cf, newdata = data_repeat, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` The results are almost unchanged, with apparently no relationship between the phenotype and the within species variance of `cofactor`. ```{r} summary(model_repeat2) ``` Also, the phylogenetic signal remains more or less the same. ```{r} hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat2, hyp, class = NULL)) ``` ## A Phylogenetic Meta-Analysis Let's say we have Fisher's z-transformed correlation coefficients $Zr$ per species along with corresponding sample sizes (e.g., correlations between male coloration and reproductive success): ```{r} data_fisher <- read.table( "https://paul-buerkner.github.io/data/data_effect.txt", header = TRUE ) data_fisher$obs <- 1:nrow(data_fisher) head(data_fisher) ``` We assume the sampling variance to be known and as $V(Zr) = \frac{1}{N - 3}$ for Fisher's values, where $N$ is the sample size per species. Incorporating the known sampling variance into the model is straight forward. One has to keep in mind though, that **brms** requires the sampling standard deviation (square root of the variance) as input instead of the variance itself. The group-level effect of `obs` represents the residual variance, which we have to model explicitly in a meta-analytic model. ```{r, results='hide'} model_fisher <- brm( Zr | se(sqrt(1 / (N - 3))) ~ 1 + (1|gr(phylo, cov = A)) + (1|obs), data = data_fisher, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "Intercept"), prior(student_t(3, 0, 10), "sd") ), control = list(adapt_delta = 0.95), chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` A summary of the fitted model is obtained via ```{r} summary(model_fisher) plot(model_fisher) ``` The meta-analytic mean (i.e., the model intercept) is $0.16$ with a credible interval of $[0.08, 0.25]$. Thus the mean correlation across species is positive according to the model. ## A phylogenetic count-data model Suppose that we analyze a phenotype that consists of counts instead of being a continuous variable. In such a case, the normality assumption will likely not be justified and it is recommended to use a distribution explicitly suited for count data, for instance the Poisson distribution. The following data set (again retrieved from mpcm-evolution.org) provides an example. ```{r} data_pois <- read.table( "https://paul-buerkner.github.io/data/data_pois.txt", header = TRUE ) data_pois$obs <- 1:nrow(data_pois) head(data_pois) ``` As the Poisson distribution does not have a natural overdispersion parameter, we model the residual variance via the group-level effects of `obs` (e.g., see Lawless, 1987). ```{r, results='hide'} model_pois <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)) + (1|obs), data = data_pois, family = poisson("log"), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ``` Again, we obtain a summary of the fitted model via ```{r} summary(model_pois) plot(conditional_effects(model_pois), points = TRUE) ``` Now, assume we ignore the fact that the phenotype is count data and fit a linear normal model instead. ```{r, results='hide'} model_normal <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)), data = data_pois, family = gaussian(), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ``` ```{r} summary(model_normal) ``` We see that `cofactor` has a positive relationship with the phenotype in both models. One should keep in mind, though, that the estimates of the Poisson model are on the log-scale, as we applied the canonical log-link function in this example. Therefore, estimates are not comparable to a linear normal model even if applied to the same data. What we can compare, however, is the model fit, for instance graphically via posterior predictive checks. ```{r} pp_check(model_pois) pp_check(model_normal) ``` Apparently, the distribution of the phenotype predicted by the Poisson model resembles the original distribution of the phenotype pretty closely, while the normal models fails to do so. We can also apply leave-one-out cross-validation for direct numerical comparison of model fit. ```{r} loo(model_pois, model_normal) ``` Since smaller values of loo indicate better fit, it is again evident that the Poisson model fits the data better than the normal model. Of course, the Poisson model is not the only reasonable option here. For instance, you could use a negative binomial model (via family `negative_binomial`), which already contains an overdispersion parameter so that modeling a varying intercept of `obs` becomes obsolete. ## Phylogenetic models with multiple group-level effects In the above examples, we have only used a single group-level effect (i.e., a varying intercept) for the phylogenetic grouping factors. In **brms**, it is also possible to estimate multiple group-level effects (e.g., a varying intercept and a varying slope) for these grouping factors. However, it requires repeatedly computing Kronecker products of covariance matrices while fitting the model. This will be very slow especially when the grouping factors have many levels and matrices are thus large. ## References de Villemeruil P. & Nakagawa, S. (2014) General quantitative genetic methods for comparative biology. In: *Modern phylogenetic comparative methods and their application in evolutionary biology: concepts and practice* (ed. Garamszegi L.) Springer, New York. pp. 287-303. Hadfield, J. D. & Nakagawa, S. (2010) General quantitative genetic methods for comparative biology: phylogenies, taxonomies, and multi-trait models for continuous and categorical characters. *Journal of Evolutionary Biology*. 23. 494-508. Lawless, J. F. (1987). Negative binomial and mixed Poisson regression. *Canadian Journal of Statistics*, 15(3), 209-225. brms/inst/doc/brms_threading.html0000644000176200001440000072044314504270202016623 0ustar liggesusers Running brms models with within-chain parallelization

Running brms models with within-chain parallelization

Sebastian Weber & Paul Bürkner

2023-09-25

Introduction

Full Bayesian inference is a computationally very demanding task and often we wish to run our models faster in shorter walltime. With modern computers we nowadays have multiple processors available on a given machine such that the use of running the inference in parallel will shorten the overall walltime. While between-chain parallelization is straightforward by merely launching multiple chains at the same time, the use of within-chain parallelization is more complicated in various ways. This vignette aims to introduce the user to within-chain parallelization with brms, since its efficient use depends on various aspects specific to the users model.

Quick summary

Assuming you have a brms model which you wish to evaluate faster by using more cores per chain, for example:

fit_serial <- brm(
  count ~ zAge + zBase * Trt + (1|patient),
  data = epilepsy, family = poisson(),
  chains = 4, cores = 4, backend = "cmdstanr"
)

Then running this model with threading requires cmdstanr as backend and you can simply add threading support to an existing model with the update mechanism as:

fit_parallel <- update(
  fit_serial, chains = 2, cores = 2,
  backend = "cmdstanr", threads = threading(2)
)

The example above assumes that 4 cores are available which are best used without within-chain parallelization by running 4 chains in parallel. When using within chain parallelization it is still advisable to use just as many threads in total as you have CPU cores. It’s thus sensible in this case to reduce the number of chains running in parallel to just 2, but allow each chain to use 2 threads. Obviously this will reduce the number of iterations in the posterior here as we assumed a fixed amount of 4 cores.

  • Only apply within-chain parallelization to large problems which take more than a few minutes at least to calculate. The epilepsy example above is actually too small to gain in speed (just a few seconds per chain on this machine).
  • Within-chain parallelization is less efficient than between-chain parallelization. So only use within-chain parallelism if more CPUs can be used to run the entire analysis.
  • Due to details of the model and data-set, speedups with more cores can be very limited. Not every model amends to within-chain parallelization and an empirical evaluation is in some cases advisable.
  • Enabling threading usually slows down any model to some extent and this slowdown must be offset by sufficient cores per chain in order to really gain in execution speed.
  • Doubling the execution speed with few cores is a lot easier than obtaining larger speedups with even more cores.
  • Models with computationally expensive likelihoods are easier to parallelize than less expensive likelihoods. For example, the Poisson distribution involves expensive \(\log\Gamma\) functions whereas the normal likelihood is very cheap to calculate in comparison.
  • Models with many parameters (e.g., multilevel models) carry a large overhead when running in parallel.
  • With a larger overhead of the model, the likelihood must be sufficiently expensive such that the relative computational cost of likelihood to parallelization overhead is favorable.
  • Avoid using hyper-threading, that is, only use as many threads as you have physical cores available.
  • Ensure that the data is randomly sorted such that consecutive subsets of the data are roughly of the same computational effort.

Within-chain parallelization

The within-chain parallelization implemented in brms is based on the reduce_sum facility in Stan. The basic principle that reduce_sum uses is to split a large summation into arbitrary smaller partial sums. Due to the commutativity and associativity of the sum operation these smaller partial sums can be evaluated in any order and in parallel from one another. brms leverages reduce_sum to evaluate the log-likelihood of the model in parallel as for example

\[ \begin{aligned} l(y|\theta) &= \sum_{i=1}^N l_i(y_i| \theta) \\ &= \sum_{i=1}^{S_1} l_i(y_i| \theta) + \sum_{i=S_1+1}^N l_i(y_i| \theta). \end{aligned} \]

As a consequence, the within-chain parallelization requires mutually independent log-likelihood terms which restricts its applicability to some degree.

Furthermore, the within-chain parallelization is only applicable to the evaluation of the data likelihood while all other parts of the model, for example priors, will remain running serially. Thus, only a partial fraction of the entire Stan model will run in parallel which limits the potential speedup one may obtain. The theoretical speedup for a partially in parallel running program is described by Amdahl‘s law. For example, with 90% of the computational load running in parallel one can essentially double the execution speed with 2 cores while 8 cores may only speedup the program by at most 5x. How large the computational cost of the log-likelihood is in relation to the entire model is very dependent on the model of the user.

In practice, the speedups are even smaller than the theoretical speedups. This is caused by the additional overhead implied by forming multiple smaller sums than just one large one. For example, for each partial sum formed the entire parameter vector \(\theta\) has to be copied in memory for Stan to be able to calculate the gradient of the log-likelihood. Hence, with more partial sums, more copying is necessary as opposed to evaluating just one large sum. Whether the additional copying is indeed relevant depends on the computational cost of the log-likelihood of each term and the number of parameters. For a model with a computationally cheap normal log-likelihood, this effect is more important than for a model with a Poisson log-likelihood, and for multilevel models with many parameters more copying is needed than for simpler regression models. It may therefore be necessary to form sufficiently large partial sums to warrant an efficient parallel execution. The size of the partial sums is referred to as the grainsize, which is set to a reasonable default value. However, for some models this tuning parameter requires some attention from the user for optimal performance.

Finally, it is important to note that by default the exact size and order of the partial sums is not stable as it is adjusted to the load of the system. As a result, exact numerical reproducibility is not guaranteed by default. In order to warrant the same size and order of the partial sums, the static option must be used and set to TRUE, which uses a deterministic scheduler for the parallel work.

Example model

As a toy demonstration, we use here a multilevel Poisson model. The model is a varying intercept model with \(10^{4}\) data observation which are grouped into \(1000\) groups. Each data item has \(3\) continuous covariates. The simulation code for the fake data can be found in the appendix and it’s first \(10\) rows are:

kable(head(fake, 10), digits = 3)
g x1 x2 x3 theta eta mu y
382 0.496 0.623 0.069 -0.262 0.510 0.248 0
578 -0.748 -0.300 -0.768 -0.903 -0.032 -0.934 0
772 -1.124 -0.161 -0.882 -1.047 -0.551 -1.598 1
774 0.992 -0.593 1.007 1.578 -0.045 1.533 2
729 0.641 -1.563 -0.491 -0.291 -1.460 -1.751 0
897 -0.085 -0.531 -0.978 -1.296 -0.929 -2.226 0
110 -0.772 1.364 -0.629 -1.351 0.124 -1.227 0
248 -1.441 0.699 1.284 2.072 -1.020 1.053 1
754 -1.320 0.837 -0.137 -0.237 1.452 1.215 3
682 -1.345 -2.673 -1.628 -1.146 -0.388 -1.534 0

The brms model fitting this data is:

model_poisson <- brm(
  y ~ 1 + x1 + x2 + (1 | g),
  data = fake,
  family = poisson(),
  iter = 500, # short sampling to speedup example
  chains = 2,
  prior = prior(normal(0,1), class = b) +
    prior(constant(1), class = sd, group = g),
  backend = "cmdstanr",
  threads = threading(4),
  save_pars = save_pars(all = TRUE)
)

Here we have fixed the standard deviation of the between-group variation for the intercept to the true value of \(1\) as used in the simulation. This is to avoid unfavorable geometry of the problem allowing us to concentrate on computational aspects alone.

The Poisson likelihood is a relatively expensive likelihood due to the use of \(\log\Gamma\) function as opposed to, for example, a normal likelihood which does is by far less expensive operations. Moreover, this example is chosen in order to demonstrate parallelization overhead implied by a large number of parameters.

Managing parallelization overhead

As discussed above, the key mechanism to run Stan programs with parallelization is to split the large sum over independent log likelihood terms into arbitrary smaller partial sums. Creating more partial sums allows to increase simultaneous parallel computations in a granular way, but at the same time additional overhead is introduced through the requirement to copy the entire parameter vector for each partial sum formed along with further overhead due to splitting up a single large task into multiple smaller ones.

By default, brms will choose a sensible grainsize which defines how large a given partial sum will roughly be. The actual chunk size is automatically tuned whenever the default non-static scheduler is used, which is the recommended choice to start with. As noted before, only the static scheduler is giving fully deterministic results since the chunk size and order of partial sums will be the same during sampling.

While we expect that the default grainsize in brms is reasonably good for many models, it can improve performance if one tunes the grainsize specifically to a given model and data-set. We suggest to increase successively the number of chunks a given data set is split into with the static scheduler and run this on a single core. This way one can control the number of partial sum accurately and monitor the execution time as it increases. These experiments are run with only a single chain and very short iteration numbers as we are not interested in the statistical results, but rather aim to be able to explore the tuning parameter space of the chunk size as quickly as possible. The number of iterations needed to get reliable runtime estimates for a given chunk size will depend on many details and the easiest way to determine this is to run this benchmark with multiple number of iterations. Whenever their results match approximately, then the iteration numbers are sufficient. In order to decrease the variation between runs, we also fix the random seed, initial value and the tuning parameters of the sampler (step size and mass matrix).

Below is an example R code demonstrating such a benchmark. The utility function benchmark_threading is shown and explained in the appendix.

chunking_bench <- transform(
    data.frame(chunks = 4^(0:3)),
    grainsize = ceiling(N / chunks)
)

iter_test <- c(10, 20, 40)  # very short test runs
scaling_chunking <- benchmark_threading(
  model_poisson,
  cores = 1,
  grainsize = chunking_bench$grainsize,  # test various grainsizes
  iter = iter_test,
  static = TRUE  # with static partitioner
)

# run as reference the model *without* reduce_sum
ref <- benchmark_reference(model_poisson, iter_test)

# for additional data munging please refer to the appendix

Graphically summarizing the results shows that with more than 8 chunks the overhead is about 10% and increasing further with more chunks. For models without many parameters, no such overhead should be observed. Furthermore, one can see that 25 and 50 iterations give similar results implying that 25 iterations suffice for stable runtime estimates for these (and the following) benchmarks. The overhead of up to 20% in this example with 16 chunks may seem large due to the scaling of the plot. One must not forget that when we start to use more CPU cores, the overhead is easily offset, but it limits the maximal speedup we can get. For example, some 2 units of computation become 2.4 units due to the overhead such that on 2 cores we don’t quite double the execution speed, but rather get a 1.6x increase in speed instead of a 2x speedup.

Considering in addition the time per leapfrog step of the NUTS sampler shows on an absolute scale similar information as before. The upside of this representation is that we can visualize the slowdown in relation to the program without reduce_sum. As we can see, the additional overhead due to merely enabling reduce_sum is substantial in this example. This is attributed in the specific example to the large number of random effects.

ggplot(scaling_chunking) +
    aes(chunks, slowdown, colour = iter, shape = iter) +
    geom_line() + geom_point() +
    scale_x_log10(breaks = scaling_chunking$chunks) +
    scale_y_log10(breaks=seq(0.8, 2.5, by=0.1)) +
    ggtitle("Slowdown with increasing number of chunks")

ggplot(scaling_chunking) +
    aes(chunks, 1E3 * runtime/num_leapfrog, colour = iter, shape=iter) +
    geom_line() + geom_point() +
    scale_x_log10(breaks = scaling_chunking$chunks) +
    scale_y_log10(breaks=seq(0.1, 2.0, by=0.1)) +
    geom_hline(data=ref, aes(yintercept=1E3 * runtime/num_leapfrog, colour=iter), linetype=I(2)) +
    ggtitle("Time per leapfrog step vs number of chunks",
            "Dashed line is reference model without reduce_sum") +
    ylab("Time per leapfrog step [ms]")

Parallelization speedup

In practice, we are often interested in so-called “hard-scaling” properties of the parallelization system. That is, for a fixed problem size we would like to know how much faster we can execute the Stan program with increasing number of threads. As nowadays CPUs usually run with so-called hyper-threading, it is also of interest if this technique is beneficial for Stan programs as well (spoiler alert: it’s not useful). As we have seen before, the grainsize can have an impact on the performance and is as such a tuning parameter. Below we demonstrate some exemplary R code which runs a benchmark with varying number of CPU cores and varying number of grainsizes.

num_cpu <- parallel::detectCores(logical = FALSE)
num_cpu_logical <- parallel::detectCores(logical = TRUE)
grainsize_default <- ceiling(N / (2 * num_cpu))
cores <- c(2^seq(0, floor(log2(num_cpu_logical))), num_cpu, num_cpu_logical)
cores <- sort(unique(cores))
grainsize <- c(grainsize_default, grainsize_default/2, grainsize_default/4)
grainsize <- round(grainsize)

iter_scaling <- 20
scaling_cores <- benchmark_threading(
  model_poisson,
  cores = cores,
  grainsize = grainsize,
  iter = iter_scaling,
  static = FALSE
)

single_core  <- transform(
    subset(scaling_cores, cores == 1),
    runtime_single = runtime,
    num_leapfrog=NULL, runtime=NULL, cores = NULL
)

scaling_cores <- transform(
  merge(scaling_cores, single_core),
  speedup = runtime_single/runtime,
  grainsize = factor(grainsize)
)

It is important to consider the absolute runtime and the relative speedup vs. running on a single core. The relative speedup can be misleading if the single core runtime is very slow in which case speed gains on more CPUs may look overly good. Considering instead the absolute runtime avoids this problem. After all, we are interested in the shortest walltime we can get rather than any relative speedups.

ggplot(scaling_cores) +
    aes(cores, runtime, shape = grainsize, color = grainsize) +
    geom_vline(xintercept = num_cpu, linetype = 3) +
    geom_line() + geom_point() +
    scale_x_log10(breaks = scaling_cores$cores) +
    scale_y_log10(breaks=seq(0.1, 1.4, by=0.1)) +
    theme(legend.position = c(0.85, 0.8)) +
    geom_hline(data=subset(ref, iter==iter_scaling), aes(yintercept=runtime), linetype=I(2)) +
    ggtitle("Runtime with varying number of cores",
            "Dashed line is reference model without reduce_sum")

ggplot(scaling_cores) +
  aes(cores, speedup, shape = grainsize, color = grainsize) +
  geom_abline(slope = 1, intercept = 0, linetype = 2) +
  geom_vline(xintercept = num_cpu, linetype = 3) +
  geom_line() + geom_point() +
  scale_x_log10(breaks=scaling_cores$cores) +
  scale_y_log10(breaks=scaling_cores$cores) +
  theme(aspect.ratio = 1) +
  coord_fixed(xlim = c(1, num_cpu_logical), ylim = c(1, num_cpu_logical)) +
  ggtitle("Relative speedup vs 1 core")

The vertical dotted line marks the physical number of CPU cores on the machine this was run. The horizontal dashed line in the plot with absolute runtime marks the respective runtime of the model without reduce_sum and the dashed unity line in the plot with the relative speedup marks the theoretical maximal speedup. We can see that there is no further reduction in execution time when increasing the thread count to be greater than the number of physical CPUs. Hence, the use of hyper-threading is not helpful when aiming to maximize the speed of a Stan program. Moreover, the use of threading outperforms the single core runtime only when using more than 4 cores in this example.

For this example, the shown grainsizes matter on some machines but not on others, so your results may look quite different from what is shown here. The overall speedups may not seem impressive in this case, which is attributed in this case to the large number of parameters relative to the number of observations. However, we can still outperform the single core runtime when using many cores. Though the most important advantage of threading is that with an increasing data set size, the user has the option to use a brute-force approach to balance the increase in walltime needed.

kable(scaling_cores, digits = 2)
grainsize iter cores num_leapfrog runtime runtime_single speedup
125 20 1 620 0.50 0.50 1.00
125 20 2 620 0.28 0.50 1.80
125 20 4 620 0.20 0.50 2.57
125 20 8 620 0.16 0.50 3.23
125 20 10 620 0.16 0.50 3.19
250 20 1 620 0.42 0.42 1.00
250 20 2 620 0.23 0.42 1.80
250 20 4 620 0.16 0.42 2.69
250 20 8 620 0.15 0.42 2.81
250 20 10 620 0.16 0.42 2.67
500 20 1 620 0.37 0.37 1.00
500 20 2 620 0.21 0.37 1.76
500 20 4 620 0.15 0.37 2.52
500 20 8 620 0.13 0.37 2.85
500 20 10 620 0.13 0.37 2.83

For a given Stan model one should usually choose the number of chains and the number of threads per chain to be equal to the number of (physical) cores one wishes to use. Only if different chains of the model have relatively different execution times (which they should not have, but it occurs sometimes in practice), then one may consider the use of hyper-threading. Doing so will share the resources evenly across all chains and whenever the fastest chain finishes, the freed resources can be given to the still running chains.

Appendix

Fake data simulation

set.seed(54647)
# number of observations
N <- 1E4
# number of group levels
G <- round(N / 10)
# number of predictors
P <- 3
# regression coefficients
beta <- rnorm(P)

# sampled covariates, group means and fake data
fake <- matrix(rnorm(N * P), ncol = P)
dimnames(fake) <- list(NULL, paste0("x", 1:P))

# fixed effect part and sampled group membership
fake <- transform(
  as.data.frame(fake),
  theta = fake %*% beta,
  g = sample.int(G, N, replace=TRUE)
)

# add random intercept by group
fake  <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g")

# linear predictor
fake  <- transform(fake, mu = theta + eta)

# sample Poisson data
fake  <- transform(fake, y = rpois(N, exp(mu)))

# shuffle order of data rows to ensure even distribution of computational effort
fake <- fake[sample.int(N, N),]

# drop not needed row names
rownames(fake) <- NULL

Poisson example model

model_poisson <- brm(
  y ~ 1 + x1 + x2 + (1 | g),
  data = fake,
  family = poisson(),
  iter = 500, # short sampling to speedup example
  chains = 2,
  prior = prior(normal(0,1), class = b) +
    prior(constant(1), class = sd, group = g),
  backend = "cmdstanr",
  threads = threading(4),
  save_pars = save_pars(all = TRUE)
)

Threading benchmark function

# Benchmarks given model with cross-product of tuning parameters CPU
# cores, grainsize and iterations. Models are run with either static
# or non-static scheduler and initial values are set by default to 0 on the
# unconstrained scale. Function returns a data-frame with the
# cross-product of the tuning parameters and as result column the
# respective runtime.
benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100,
                                static = FALSE) {

    winfo <- extract_warmup_info(model)
    sims  <- rstan::extract(model$fit)
    init <- list(extract_draw(sims, 1))

    scaling_model <- update(
        model, refresh = 0,
        threads = threading(1, grainsize = grainsize[1], static = static),
        chains = 1, iter = 2, backend = "cmdstanr"
    )

    run_benchmark <- function(cores, size, iter) {
        bench_fit <- update(
            scaling_model, warmup=0, iter = iter,
            chains = 1, seed = 1234, init = init, refresh = 0, save_warmup=TRUE,
            threads = threading(cores, grainsize = size, static = static),
            inv_metric=winfo$inv_metric[[1]],
            step_size=winfo$step_size[[1]],
            adapt_engaged=FALSE
        )
        lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value)
        elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit)))

        c(num_leapfrog=lf, runtime=elapsed)
    }

    cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter)
    res <- with(cases, mapply(run_benchmark, cores, grainsize, iter))
    cbind(cases, as.data.frame(t(res)))
}

benchmark_reference <- function(model, iter=100, init=0) {
    winfo <- extract_warmup_info(model)
    sims  <- rstan::extract(model$fit)
    init <- list(extract_draw(sims, 1))

    ref_model <- update(
        model, refresh = 0, threads = NULL,
        chains = 1, iter = 2, backend = "cmdstanr"
    )

    run_benchmark_ref <- function(iter_bench) {
        bench_fit <- update(
            ref_model, warmup=0, iter = iter_bench,
            chains = 1, seed = 1234, init = init, refresh = 0,
            inv_metric=winfo$inv_metric[[1]],
            step_size=winfo$step_size[[1]],
            adapt_engaged=FALSE
        )

        lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value)
        elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit)))

        c(num_leapfrog=lf, runtime=elapsed)
    }

    ref <- sapply(iter, run_benchmark_ref)
    ref <- cbind(as.data.frame(t(ref)), iter=iter)
    ref
}

extract_warmup_info <- function(bfit) {
    adapt  <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n")
    step_size  <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2]))
    inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]]))
    list(step_size=step_size, inv_metric=inv_metric)
}

extract_draw <- function(sims, draw) {
  lapply(sims, brms:::slice, dim = 1, i = draw, drop = TRUE)
}

Munging of slowdown with chunking data

scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize")

single_chunk  <- transform(
    subset(scaling_chunking, chunks == 1),
    num_leapfrog_single = num_leapfrog, num_leapfrog = NULL,
    runtime_single = runtime, runtime = NULL,
    grainsize = NULL, chunks=NULL
)

scaling_chunking <- transform(
    merge(scaling_chunking, single_chunk),
    slowdown = runtime/runtime_single,
    iter = factor(iter),
    runtime_single = NULL
)

ref <- transform(ref, iter=factor(iter))
brms/inst/doc/brms_missings.html0000644000176200001440000046261614504266264016534 0ustar liggesusers Handle Missing Values with brms

Handle Missing Values with brms

Paul Bürkner

2023-09-25

Introduction

Many real world data sets contain missing values for various reasons. Generally, we have quite a few options to handle those missing values. The easiest solution is to remove all rows from the data set, where one or more variables are missing. However, if values are not missing completely at random, this will likely lead to bias in our analysis. Accordingly, we usually want to impute missing values in one way or the other. Here, we will consider two very general approaches using brms: (1) Impute missing values before the model fitting with multiple imputation, and (2) impute missing values on the fly during model fitting1. As a simple example, we will use the nhanes data set, which contains information on participants’ age, bmi (body mass index), hyp (hypertensive), and chl (total serum cholesterol). For the purpose of the present vignette, we are primarily interested in predicting bmi by age and chl.

data("nhanes", package = "mice")
head(nhanes)
  age  bmi hyp chl
1   1   NA  NA  NA
2   2 22.7   1 187
3   1   NA   1 187
4   3   NA  NA  NA
5   1 20.4   1 113
6   3   NA  NA 184

Imputation before model fitting

There are many approaches allowing us to impute missing data before the actual model fitting takes place. From a statistical perspective, multiple imputation is one of the best solutions. Each missing value is not imputed once but m times leading to a total of m fully imputed data sets. The model can then be fitted to each of those data sets separately and results are pooled across models, afterwards. One widely applied package for multiple imputation is mice (Buuren & Groothuis-Oudshoorn, 2010) and we will use it in the following in combination with brms. Here, we apply the default settings of mice, which means that all variables will be used to impute missing values in all other variables and imputation functions automatically chosen based on the variables’ characteristics.

library(mice)
imp <- mice(nhanes, m = 5, print = FALSE)

Now, we have m = 5 imputed data sets stored within the imp object. In practice, we will likely need more than 5 of those to accurately account for the uncertainty induced by the missingness, perhaps even in the area of 100 imputed data sets (Zhou & Reiter, 2010). Of course, this increases the computational burden by a lot and so we stick to m = 5 for the purpose of this vignette. Regardless of the value of m, we can either extract those data sets and then pass them to the actual model fitting function as a list of data frames, or pass imp directly. The latter works because brms offers special support for data imputed by mice. We will go with the latter approach, since it is less typing. Fitting our model of interest with brms to the multiple imputed data sets is straightforward.

fit_imp1 <- brm_multiple(bmi ~ age*chl, data = imp, chains = 2)

The returned fitted model is an ordinary brmsfit object containing the posterior draws of all m submodels. While pooling across models is not necessarily straightforward in classical statistics, it is trivial in a Bayesian framework. Here, pooling results of multiple imputed data sets is simply achieved by combining the posterior draws of the submodels. Accordingly, all post-processing methods can be used out of the box without having to worry about pooling at all.

summary(fit_imp1)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: bmi ~ age * chl 
   Data: imp (Number of observations: 25) 
  Draws: 10 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 10000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    15.74      8.55    -1.36    32.24 1.11       62      440
age           0.83      5.45   -10.15    11.33 1.18       38      233
chl           0.08      0.04    -0.00     0.17 1.08       78      501
age:chl      -0.02      0.02    -0.07     0.03 1.10       66      530

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     3.39      0.63     2.42     4.87 1.13       49      171

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

In the summary output, we notice that some Rhat values are higher than \(1.1\) indicating possible convergence problems. For models based on multiple imputed data sets, this is often a false positive: Chains of different submodels may not overlay each other exactly, since there were fitted to different data. We can see the chains on the right-hand side of

plot(fit_imp1, variable = "^b", regex = TRUE)

Such non-overlaying chains imply high Rhat values without there actually being any convergence issue. Accordingly, we have to investigate the convergence of the submodels separately, which we can do by looking at

round(fit_imp1$rhats, 2)
  b_Intercept b_age b_chl b_age.chl sigma lprior lp__
1           1  1.00     1         1     1      1    1
2           1  1.00     1         1     1      1    1
3           1  1.01     1         1     1      1    1
4           1  1.00     1         1     1      1    1
5           1  1.00     1         1     1      1    1

The convergence of each of the submodels looks good. Accordingly, we can proceed with further post-processing and interpretation of the results. For instance, we could investigate the combined effect of age and chl.

conditional_effects(fit_imp1, "age:chl")

To summarize, the advantages of multiple imputation are obvious: One can apply it to all kinds of models, since model fitting functions do not need to know that the data sets were imputed, beforehand. Also, we do not need to worry about pooling across submodels when using fully Bayesian methods. The only drawback is the amount of time required for model fitting. Estimating Bayesian models is already quite slow with just a single data set and it only gets worse when working with multiple imputation.

Compatibility with other multiple imputation packages

brms offers built-in support for mice mainly because I use the latter in some of my own research projects. Nevertheless, brm_multiple supports all kinds of multiple imputation packages as it also accepts a list of data frames as input for its data argument. Thus, you just need to extract the imputed data frames in the form of a list, which can then be passed to brm_multiple. Most multiple imputation packages have some built-in functionality for this task. When using the mi package, for instance, you simply need to call the mi::complete function to get the desired output.

Imputation during model fitting

Imputation during model fitting is generally thought to be more complex than imputation before model fitting, because one has to take care of everything within one step. This remains true when imputing missing values with brms, but possibly to a somewhat smaller degree. Consider again the nhanes data with the goal to predict bmi by age, and chl. Since age contains no missing values, we only have to take special care of bmi and chl. We need to tell the model two things. (1) Which variables contain missing values and how they should be predicted, as well as (2) which of these imputed variables should be used as predictors. In brms we can do this as follows:

bform <- bf(bmi | mi() ~ age * mi(chl)) +
  bf(chl | mi() ~ age) + set_rescor(FALSE)
fit_imp2 <- brm(bform, data = nhanes)

The model has become multivariate, as we no longer only predict bmi but also chl (see vignette("brms_multivariate") for details about the multivariate syntax of brms). We ensure that missings in both variables will be modeled rather than excluded by adding | mi() on the left-hand side of the formulas2. We write mi(chl) on the right-hand side of the formula for bmi to ensure that the estimated missing values of chl will be used in the prediction of bmi. The summary is a bit more cluttered as we get coefficients for both response variables, but apart from that we can interpret coefficients in the usual way.

summary(fit_imp2)
 Family: MV(gaussian, gaussian) 
  Links: mu = identity; sigma = identity
         mu = identity; sigma = identity 
Formula: bmi | mi() ~ age * mi(chl) 
         chl | mi() ~ age 
   Data: nhanes (Number of observations: 25) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
bmi_Intercept    14.24      8.60    -2.26    31.61 1.00     1275     2409
chl_Intercept   141.78     24.52    93.54   191.04 1.00     2773     2672
bmi_age           2.51      5.39    -7.89    13.64 1.00     1258     1960
chl_age          28.60     13.16     2.81    54.21 1.00     2663     2322
bmi_michl         0.10      0.04     0.01     0.18 1.00     1468     2347
bmi_michl:age    -0.03      0.02    -0.08     0.02 1.00     1224     1868

Family Specific Parameters: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma_bmi     3.39      0.79     2.22     5.24 1.01     1068     2501
sigma_chl    40.32      7.80    28.64    59.17 1.00     2191     2556

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
conditional_effects(fit_imp2, "age:chl", resp = "bmi")

The results look pretty similar to those obtained from multiple imputation, but be aware that this may not be generally the case. In multiple imputation, the default is to impute all variables based on all other variables, while in the ‘one-step’ approach, we have to explicitly specify the variables used in the imputation. Thus, arguably, multiple imputation is easier to apply. An obvious advantage of the ‘one-step’ approach is that the model needs to be fitted only once instead of m times. Also, within the brms framework, we can use multilevel structure and complex non-linear relationships for the imputation of missing values, which is not achieved as easily in standard multiple imputation software. On the downside, it is currently not possible to impute discrete variables, because Stan (the engine behind brms) does not allow estimating discrete parameters.

Combining measurement error and missing values

Missing value terms in brms cannot only handle missing values but also measurement error, or arbitrary combinations of the two. In fact, we can think of a missing value as a value with infinite measurement error. Thus, mi terms are a natural (and somewhat more verbose) generalization of the now soft deprecated me terms. Suppose we had measured the variable chl with some known error:

nhanes$se <- rexp(nrow(nhanes), 2)

Then we can go ahead an include this information into the model as follows:

bform <- bf(bmi | mi() ~ age * mi(chl)) +
  bf(chl | mi(se) ~ age) + set_rescor(FALSE)
fit_imp3 <- brm(bform, data = nhanes)

Summarizing and post-processing the model continues to work as usual.

References

Buuren, S. V. & Groothuis-Oudshoorn, K. (2010). mice: Multivariate imputation by chained equations in R. Journal of Statistical Software, 1-68. doi.org/10.18637/jss.v045.i03

Zhou, X. & Reiter, J. P. (2010). A Note on Bayesian Inference After Multiple Imputation. The American Statistician, 64(2), 159-163. doi.org/10.1198/tast.2010.09109


  1. Actually, there is a third approach that only applies to missings in response variables. If we want to impute missing responses, we just fit the model using the observed responses and than impute the missings after fitting the model by means of posterior prediction. That is, we supply the predictor values corresponding to missing responses to the predict method.↩︎

  2. We don’t really need this for bmi, since bmi is not used as a predictor for another variable. Accordingly, we could also – and equivalently – impute missing values of bmi after model fitting by means of posterior prediction.↩︎

brms/inst/doc/brms_multivariate.html0000644000176200001440000033406314504267211017371 0ustar liggesusers Estimating Multivariate Models with brms

Estimating Multivariate Models with brms

Paul Bürkner

2023-09-25

Introduction

In the present vignette, we want to discuss how to specify multivariate multilevel models using brms. We call a model multivariate if it contains multiple response variables, each being predicted by its own set of predictors. Consider an example from biology. Hadfield, Nutall, Osorio, and Owens (2007) analyzed data of the Eurasian blue tit (https://en.wikipedia.org/wiki/Eurasian_blue_tit). They predicted the tarsus length as well as the back color of chicks. Half of the brood were put into another fosternest, while the other half stayed in the fosternest of their own dam. This allows to separate genetic from environmental factors. Additionally, we have information about the hatchdate and sex of the chicks (the latter being known for 94% of the animals).

data("BTdata", package = "MCMCglmm")
head(BTdata)
       tarsus       back  animal     dam fosternest  hatchdate  sex
1 -1.89229718  1.1464212 R187142 R187557      F2102 -0.6874021  Fem
2  1.13610981 -0.7596521 R187154 R187559      F1902 -0.6874021 Male
3  0.98468946  0.1449373 R187341 R187568       A602 -0.4279814 Male
4  0.37900806  0.2555847 R046169 R187518      A1302 -1.4656641 Male
5 -0.07525299 -0.3006992 R046161 R187528      A2602 -1.4656641  Fem
6 -1.13519543  1.5577219 R187409 R187945      C2302  0.3502805  Fem

Basic Multivariate Models

We begin with a relatively simple multivariate normal model.

bform1 <- 
  bf(mvbind(tarsus, back) ~ sex + hatchdate + (1|p|fosternest) + (1|q|dam)) +
  set_rescor(TRUE)

fit1 <- brm(bform1, data = BTdata, chains = 2, cores = 2)

As can be seen in the model code, we have used mvbind notation to tell brms that both tarsus and back are separate response variables. The term (1|p|fosternest) indicates a varying intercept over fosternest. By writing |p| in between we indicate that all varying effects of fosternest should be modeled as correlated. This makes sense since we actually have two model parts, one for tarsus and one for back. The indicator p is arbitrary and can be replaced by other symbols that comes into your mind (for details about the multilevel syntax of brms, see help("brmsformula") and vignette("brms_multilevel")). Similarly, the term (1|q|dam) indicates correlated varying effects of the genetic mother of the chicks. Alternatively, we could have also modeled the genetic similarities through pedigrees and corresponding relatedness matrices, but this is not the focus of this vignette (please see vignette("brms_phylogenetics")). The model results are readily summarized via

fit1 <- add_criterion(fit1, "loo")
summary(fit1)
 Family: MV(gaussian, gaussian) 
  Links: mu = identity; sigma = identity
         mu = identity; sigma = identity 
Formula: tarsus ~ sex + hatchdate + (1 | p | fosternest) + (1 | q | dam) 
         back ~ sex + hatchdate + (1 | p | fosternest) + (1 | q | dam) 
   Data: BTdata (Number of observations: 828) 
  Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 2000

Group-Level Effects: 
~dam (Number of levels: 106) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.48      0.05     0.39     0.58 1.00      908
sd(back_Intercept)                       0.24      0.07     0.10     0.38 1.01      333
cor(tarsus_Intercept,back_Intercept)    -0.52      0.23    -0.92    -0.06 1.00      499
                                     Tail_ESS
sd(tarsus_Intercept)                     1662
sd(back_Intercept)                        571
cor(tarsus_Intercept,back_Intercept)      566

~fosternest (Number of levels: 104) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.27      0.05     0.17     0.38 1.00      753
sd(back_Intercept)                       0.35      0.06     0.24     0.46 1.00      601
cor(tarsus_Intercept,back_Intercept)     0.69      0.20     0.22     0.98 1.03      257
                                     Tail_ESS
sd(tarsus_Intercept)                     1286
sd(back_Intercept)                       1212
cor(tarsus_Intercept,back_Intercept)      582

Population-Level Effects: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
tarsus_Intercept    -0.41      0.07    -0.54    -0.27 1.00     1646     1420
back_Intercept      -0.01      0.07    -0.15     0.11 1.00     3194     1546
tarsus_sexMale       0.77      0.06     0.66     0.88 1.00     3994     1428
tarsus_sexUNK        0.23      0.13    -0.03     0.47 1.00     4522     1762
tarsus_hatchdate    -0.04      0.06    -0.16     0.07 1.00     1756     1582
back_sexMale         0.01      0.07    -0.12     0.14 1.01     4563     1487
back_sexUNK          0.15      0.15    -0.16     0.44 1.00     3878     1375
back_hatchdate      -0.09      0.05    -0.19     0.01 1.00     2719     1631

Family Specific Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma_tarsus     0.76      0.02     0.72     0.80 1.00     3043     1382
sigma_back       0.90      0.02     0.86     0.95 1.00     2481     1515

Residual Correlations: 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
rescor(tarsus,back)    -0.05      0.04    -0.13     0.02 1.00     3473     1379

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

The summary output of multivariate models closely resembles those of univariate models, except that the parameters now have the corresponding response variable as prefix. Within dams, tarsus length and back color seem to be negatively correlated, while within fosternests the opposite is true. This indicates differential effects of genetic and environmental factors on these two characteristics. Further, the small residual correlation rescor(tarsus, back) on the bottom of the output indicates that there is little unmodeled dependency between tarsus length and back color. Although not necessary at this point, we have already computed and stored the LOO information criterion of fit1, which we will use for model comparisons. Next, let’s take a look at some posterior-predictive checks, which give us a first impression of the model fit.

pp_check(fit1, resp = "tarsus")

pp_check(fit1, resp = "back")

This looks pretty solid, but we notice a slight unmodeled left skewness in the distribution of tarsus. We will come back to this later on. Next, we want to investigate how much variation in the response variables can be explained by our model and we use a Bayesian generalization of the \(R^2\) coefficient.

bayes_R2(fit1)
          Estimate  Est.Error      Q2.5     Q97.5
R2tarsus 0.4349094 0.02250483 0.3884970 0.4760042
R2back   0.1981263 0.02842267 0.1431563 0.2548206

Clearly, there is much variation in both animal characteristics that we can not explain, but apparently we can explain more of the variation in tarsus length than in back color.

More Complex Multivariate Models

Now, suppose we only want to control for sex in tarsus but not in back and vice versa for hatchdate. Not that this is particular reasonable for the present example, but it allows us to illustrate how to specify different formulas for different response variables. We can no longer use mvbind syntax and so we have to use a more verbose approach:

bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam))
bf_back <- bf(back ~ hatchdate + (1|p|fosternest) + (1|q|dam))
fit2 <- brm(bf_tarsus + bf_back + set_rescor(TRUE), 
            data = BTdata, chains = 2, cores = 2)

Note that we have literally added the two model parts via the + operator, which is in this case equivalent to writing mvbf(bf_tarsus, bf_back). See help("brmsformula") and help("mvbrmsformula") for more details about this syntax. Again, we summarize the model first.

fit2 <- add_criterion(fit2, "loo")
summary(fit2)
 Family: MV(gaussian, gaussian) 
  Links: mu = identity; sigma = identity
         mu = identity; sigma = identity 
Formula: tarsus ~ sex + (1 | p | fosternest) + (1 | q | dam) 
         back ~ hatchdate + (1 | p | fosternest) + (1 | q | dam) 
   Data: BTdata (Number of observations: 828) 
  Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 2000

Group-Level Effects: 
~dam (Number of levels: 106) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.48      0.05     0.39     0.59 1.00      827
sd(back_Intercept)                       0.25      0.07     0.10     0.38 1.01      307
cor(tarsus_Intercept,back_Intercept)    -0.50      0.22    -0.92    -0.08 1.00      554
                                     Tail_ESS
sd(tarsus_Intercept)                     1196
sd(back_Intercept)                        536
cor(tarsus_Intercept,back_Intercept)      695

~fosternest (Number of levels: 104) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.26      0.05     0.16     0.37 1.00      603
sd(back_Intercept)                       0.35      0.06     0.23     0.47 1.00      349
cor(tarsus_Intercept,back_Intercept)     0.66      0.21     0.19     0.97 1.00      228
                                     Tail_ESS
sd(tarsus_Intercept)                      812
sd(back_Intercept)                        954
cor(tarsus_Intercept,back_Intercept)      581

Population-Level Effects: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
tarsus_Intercept    -0.41      0.07    -0.55    -0.27 1.00     1334     1317
back_Intercept       0.00      0.06    -0.11     0.11 1.00     1769     1450
tarsus_sexMale       0.77      0.06     0.65     0.89 1.00     2775     1434
tarsus_sexUNK        0.22      0.13    -0.03     0.47 1.00     3137     1668
back_hatchdate      -0.08      0.05    -0.18     0.02 1.00     1816     1315

Family Specific Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma_tarsus     0.76      0.02     0.72     0.80 1.00     1812      848
sigma_back       0.90      0.02     0.86     0.95 1.00     1938     1509

Residual Correlations: 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
rescor(tarsus,back)    -0.05      0.04    -0.13     0.02 1.00     2589     1656

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Let’s find out, how model fit changed due to excluding certain effects from the initial model:

loo(fit1, fit2)
Output of model 'fit1':

Computed from 2000 by 828 log-likelihood matrix

         Estimate   SE
elpd_loo  -2126.6 33.6
p_loo       176.5  7.4
looic      4253.2 67.2
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     810   97.8%   206       
 (0.5, 0.7]   (ok)        17    2.1%   78        
   (0.7, 1]   (bad)        1    0.1%   65        
   (1, Inf)   (very bad)   0    0.0%   <NA>      
See help('pareto-k-diagnostic') for details.

Output of model 'fit2':

Computed from 2000 by 828 log-likelihood matrix

         Estimate   SE
elpd_loo  -2125.4 33.6
p_loo       174.6  7.4
looic      4250.7 67.2
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     804   97.1%   180       
 (0.5, 0.7]   (ok)        22    2.7%   98        
   (0.7, 1]   (bad)        2    0.2%   45        
   (1, Inf)   (very bad)   0    0.0%   <NA>      
See help('pareto-k-diagnostic') for details.

Model comparisons:
     elpd_diff se_diff
fit2  0.0       0.0   
fit1 -1.3       1.3   

Apparently, there is no noteworthy difference in the model fit. Accordingly, we do not really need to model sex and hatchdate for both response variables, but there is also no harm in including them (so I would probably just include them).

To give you a glimpse of the capabilities of brms’ multivariate syntax, we change our model in various directions at the same time. Remember the slight left skewness of tarsus, which we will now model by using the skew_normal family instead of the gaussian family. Since we do not have a multivariate normal (or student-t) model, anymore, estimating residual correlations is no longer possible. We make this explicit using the set_rescor function. Further, we investigate if the relationship of back and hatchdate is really linear as previously assumed by fitting a non-linear spline of hatchdate. On top of it, we model separate residual variances of tarsus for male and female chicks.

bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) +
  lf(sigma ~ 0 + sex) + skew_normal()
bf_back <- bf(back ~ s(hatchdate) + (1|p|fosternest) + (1|q|dam)) +
  gaussian()

fit3 <- brm(
  bf_tarsus + bf_back + set_rescor(FALSE),
  data = BTdata, chains = 2, cores = 2,
  control = list(adapt_delta = 0.95)
)

Again, we summarize the model and look at some posterior-predictive checks.

fit3 <- add_criterion(fit3, "loo")
summary(fit3)
 Family: MV(skew_normal, gaussian) 
  Links: mu = identity; sigma = log; alpha = identity
         mu = identity; sigma = identity 
Formula: tarsus ~ sex + (1 | p | fosternest) + (1 | q | dam) 
         sigma ~ 0 + sex
         back ~ s(hatchdate) + (1 | p | fosternest) + (1 | q | dam) 
   Data: BTdata (Number of observations: 828) 
  Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 2000

Smooth Terms: 
                       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sds(back_shatchdate_1)     1.98      1.03     0.36     4.31 1.00      553      496

Group-Level Effects: 
~dam (Number of levels: 106) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.47      0.05     0.38     0.58 1.00      774
sd(back_Intercept)                       0.23      0.07     0.10     0.37 1.01      256
cor(tarsus_Intercept,back_Intercept)    -0.54      0.23    -0.96    -0.08 1.01      256
                                     Tail_ESS
sd(tarsus_Intercept)                     1167
sd(back_Intercept)                        591
cor(tarsus_Intercept,back_Intercept)      218

~fosternest (Number of levels: 104) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.26      0.06     0.14     0.37 1.01      374
sd(back_Intercept)                       0.31      0.06     0.20     0.43 1.00      500
cor(tarsus_Intercept,back_Intercept)     0.65      0.22     0.17     0.97 1.01      271
                                     Tail_ESS
sd(tarsus_Intercept)                      717
sd(back_Intercept)                        901
cor(tarsus_Intercept,back_Intercept)      486

Population-Level Effects: 
                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
tarsus_Intercept        -0.41      0.07    -0.54    -0.28 1.00      842     1422
back_Intercept           0.00      0.05    -0.10     0.10 1.00     1270     1572
tarsus_sexMale           0.77      0.05     0.66     0.87 1.00     3045     1141
tarsus_sexUNK            0.21      0.12    -0.02     0.44 1.00     2731     1746
sigma_tarsus_sexFem     -0.30      0.04    -0.38    -0.22 1.00     2929     1561
sigma_tarsus_sexMale    -0.24      0.04    -0.32    -0.17 1.00     2338     1622
sigma_tarsus_sexUNK     -0.39      0.13    -0.64    -0.14 1.00     2202     1560
back_shatchdate_1       -0.16      3.18    -5.64     6.81 1.00      897     1036

Family Specific Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma_back       0.90      0.02     0.86     0.95 1.00     1674     1801
alpha_tarsus    -1.22      0.43    -1.87     0.05 1.00     1148      481

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

We see that the (log) residual standard deviation of tarsus is somewhat larger for chicks whose sex could not be identified as compared to male or female chicks. Further, we see from the negative alpha (skewness) parameter of tarsus that the residuals are indeed slightly left-skewed. Lastly, running

conditional_effects(fit3, "hatchdate", resp = "back")

reveals a non-linear relationship of hatchdate on the back color, which seems to change in waves over the course of the hatch dates.

There are many more modeling options for multivariate models, which are not discussed in this vignette. Examples include autocorrelation structures, Gaussian processes, or explicit non-linear predictors (e.g., see help("brmsformula") or vignette("brms_multilevel")). In fact, nearly all the flexibility of univariate models is retained in multivariate models.

References

Hadfield JD, Nutall A, Osorio D, Owens IPF (2007). Testing the phenotypic gambit: phenotypic, genetic and environmental correlations of colour. Journal of Evolutionary Biology, 20(2), 549-557.

brms/inst/doc/brms_overview.ltx0000644000176200001440000017473514213413565016406 0ustar liggesusers\documentclass[article, nojss]{jss} %\VignetteIndexEntry{Overview of the brms Package} %\VignetteEngine{R.rsp::tex} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% almost as usual \author{Paul-Christian B\"urkner} \title{\pkg{brms}: An \proglang{R} Package for Bayesian Multilevel Models using \pkg{Stan}} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Paul-Christian B\"urkner} %% comma-separated \Plaintitle{brms: An R Package for Bayesian Multilevel Models using Stan} %% without formatting \Shorttitle{\pkg{brms}: Bayesian Multilevel Models using Stan} %% a short title (if necessary) %% an abstract and keywords \Abstract{ The \pkg{brms} package implements Bayesian multilevel models in \proglang{R} using the probabilistic programming language \pkg{Stan}. A wide range of distributions and link functions are supported, allowing users to fit -- among others -- linear, robust linear, binomial, Poisson, survival, response times, ordinal, quantile, zero-inflated, hurdle, and even non-linear models all in a multilevel context. Further modeling options include autocorrelation of the response variable, user defined covariance structures, censored data, as well as meta-analytic standard errors. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their beliefs. In addition, model fit can easily be assessed and compared using posterior-predictive checks and leave-one-out cross-validation. If you use \pkg{brms}, please cite this article as published in the Journal of Statistical Software \citep{brms1}. } \Keywords{Bayesian inference, multilevel model, ordinal data, MCMC, \proglang{Stan}, \proglang{R}} \Plainkeywords{Bayesian inference, multilevel model, ordinal data, MCMC, Stan, R} %% without formatting %% at least one keyword must be supplied %% publication information %% NOTE: Typically, this can be left commented and will be filled out by the technical editor %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{2012-06-04} %% \Acceptdate{2012-06-04} %% The address of (at least) one author should be given %% in the following format: \Address{ Paul-Christian B\"urkner\\ E-mail: \email{paul.buerkner@gmail.com}\\ URL: \url{https://paul-buerkner.github.io} } %% It is also possible to add a telephone and fax number %% before the e-mail in the following format: %% Telephone: +43/512/507-7103 %% Fax: +43/512/507-2851 %% for those who use Sweave please include the following line (with % symbols): %% need no \usepackage{Sweave.sty} %% end of declarations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %% include your article here, just as usual %% Note that you should use the \pkg{}, \proglang{} and \code{} commands. \section{Introduction} Multilevel models (MLMs) offer a great flexibility for researchers across sciences \citep{brown2015, demidenko2013, gelmanMLM2006, pinheiro2006}. They allow the modeling of data measured on different levels at the same time -- for instance data of students nested within classes and schools -- thus taking complex dependency structures into account. It is not surprising that many packages for \proglang{R} \citep{Rcore2015} have been developed to fit MLMs. Possibly the most widely known package in this area is \pkg{lme4} \citep{bates2015}, which uses maximum likelihood or restricted maximum likelihood methods for model fitting. Although alternative Bayesian methods have several advantages over frequentist approaches (e.g., the possibility of explicitly incorporating prior knowledge about parameters into the model), their practical use was limited for a long time because the posterior distributions of more complex models (such as MLMs) could not be found analytically. Markov chain Monte Carlo (MCMC) algorithms allowing to draw random samples from the posterior were not available or too time-consuming. In the last few decades, however, this has changed with the development of new algorithms and the rapid increase of general computing power. Today, several software packages implement these techniques, for instance \pkg{WinBugs} \citep{lunn2000, spiegelhalter2003}, \pkg{OpenBugs} \citep{spiegelhalter2007}, \pkg{JAGS} \citep{plummer2013}, \pkg{MCMCglmm} \citep{hadfield2010} and \pkg{Stan} \citep{stan2017, carpenter2017} to mention only a few. With the exception of the latter, all of these programs are primarily using combinations of Metropolis-Hastings updates \citep{metropolis1953,hastings1970} and Gibbs-sampling \citep{geman1984,gelfand1990}, sometimes also coupled with slice-sampling \citep{damien1999,neal2003}. One of the main problems of these algorithms is their rather slow convergence for high-dimensional models with correlated parameters \citep{neal2011,hoffman2014,gelman2014}. Furthermore, Gibbs-sampling requires priors to be conjugate to the likelihood of parameters in order to work efficiently \citep{gelman2014}, thus reducing the freedom of the researcher in choosing a prior that reflects his or her beliefs. In contrast, \pkg{Stan} implements Hamiltonian Monte Carlo \citep{duane1987, neal2011} and its extension, the No-U-Turn Sampler (NUTS) \citep{hoffman2014}. These algorithms converge much more quickly especially for high-dimensional models regardless of whether the priors are conjugate or not \citep{hoffman2014}. Similar to software packages like \pkg{WinBugs}, \pkg{Stan} comes with its own programming language, allowing for great modeling flexibility (cf., \citeauthor{stanM2017} \citeyear{stanM2017}; \citeauthor{carpenter2017} \citeyear{carpenter2017}). Many researchers may still hesitate to use \pkg{Stan} directly, as every model has to be written, debugged and possibly also optimized. This may be a time-consuming and error prone process even for researchers familiar with Bayesian inference. The package \pkg{brms}, presented in this paper, aims at closing this gap (at least for MLMs) allowing the user to benefit from the merits of \pkg{Stan} only by using simple, \pkg{lme4}-like formula syntax. \pkg{brms} supports a wide range of distributions and link functions, allows for multiple grouping factors each with multiple group-level effects, autocorrelation of the response variable, user defined covariance structures, as well as flexible and explicit prior specifications. The purpose of the present article is to provide a general overview of the \pkg{brms} package (version 0.10.0). We begin by explaining the underlying structure of MLMs. Next, the software is introduced in detail using recurrence times of infection in kidney patients \citep{mcgilchrist1991} and ratings of inhaler instructions \citep{ezzet1991} as examples. We end by comparing \pkg{brms} to other \proglang{R} packages implementing MLMs and describe future plans for extending the package. \section{Model description} \label{model} The core of every MLM is the prediction of the response $y$ through the linear combination $\eta$ of predictors transformed by the inverse link function $f$ assuming a certain distribution $D$ for $y$. We write $$y_i \sim D(f(\eta_i), \theta)$$ to stress the dependency on the $i\textsuperscript{th}$ data point. In many \proglang{R} packages, $D$ is also called the `family' and we will use this term in the following. The parameter $\theta$ describes additional family specific parameters that typically do not vary across data points, such as the standard deviation $\sigma$ in normal models or the shape $\alpha$ in Gamma or negative binomial models. The linear predictor can generally be written as $$\eta = \mathbf{X} \beta + \mathbf{Z} u$$ In this equation, $\beta$ and $u$ are the coefficients at population-level and group-level respectively and $\mathbf{X}, \mathbf{Z}$ are the corresponding design matrices. The response $y$ as well as $\mathbf{X}$ and $\mathbf{Z}$ make up the data, whereas $\beta$, $u$, and $\theta$ are the model parameters being estimated. The coefficients $\beta$ and $u$ may be more commonly known as fixed and random effects. However, we avoid these terms in the present paper following the recommendations of \cite{gelmanMLM2006}, as they are not used unambiguously in the literature. Also, we want to make explicit that $u$ is a model parameter in the same manner as $\beta$ so that uncertainty in its estimates can be naturally evaluated. In fact, this is an important advantage of Bayesian MCMC methods as compared to maximum likelihood approaches, which do not treat $u$ as a parameter, but assume that it is part of the error term instead (cf., \citeauthor{fox2011}, \citeyear{fox2011}). Except for linear models, we do not incorporate an additional error term for every observation by default. If desired, such an error term can always be modeled using a grouping factor with as many levels as observations in the data. \subsection{Prior distributions} \subsubsection{Regression parameters at population-level} In \pkg{brms}, population-level parameters are not restricted to have normal priors. Instead, every parameter can have every one-dimensional prior implemented in \pkg{Stan}, for instance uniform, Cauchy or even Gamma priors. As a negative side effect of this flexibility, correlations between them cannot be modeled as parameters. If desired, point estimates of the correlations can be obtained after sampling has been done. By default, population level parameters have an improper flat prior over the reals. \subsubsection{Regression parameters at group-level} The group-level parameters $u$ are assumed to come from a multivariate normal distribution with mean zero and unknown covariance matrix $\mathbf{\Sigma}$: $$u \sim N(0, \mathbf{\Sigma})$$ As is generally the case, covariances between group-level parameters of different grouping factors are assumed to be zero. This implies that $\mathbf{Z}$ and $u$ can be split up into several matrices $\mathbf{Z_k}$ and parameter vectors $u_k$, where $k$ indexes grouping factors, so that the model can be simplified to $$u_k \sim N(0, \mathbf{\Sigma_k})$$ Usually, but not always, we can also assume group-level parameters associated with different levels (indexed by $j$) of the same grouping factor to be independent leading to $$u_{kj} \sim N(0, \mathbf{V_k})$$ The covariance matrices $\mathbf{V_k}$ are modeled as parameters. In most packages, an Inverse-Wishart distribution is used as a prior for $\mathbf{V_k}$. This is mostly because its conjugacy leads to good properties of Gibbs-Samplers \citep{gelman2014}. However, there are good arguments against the Inverse-Wishart prior \citep{natarajan2000, kass2006}. The NUTS-Sampler implemented in \pkg{Stan} does not require priors to be conjugate. This advantage is utilized in \pkg{brms}: $\mathbf{V_k}$ is parameterized in terms of a correlation matrix $\mathbf{\Omega_k}$ and a vector of standard deviations $\sigma_k$ through $$\mathbf{V_k} = \mathbf{D}(\sigma_k) \mathbf{\Omega_k} \mathbf{D}(\sigma_k)$$ where $\mathbf{D}(\sigma_k)$ denotes the diagonal matrix with diagonal elements $\sigma_k$. Priors are then specified for the parameters on the right hand side of the equation. For $\mathbf{\Omega_k}$, we use the LKJ-Correlation prior with parameter $\zeta > 0$ by \cite{lewandowski2009}\footnote{Internally, the Cholesky factor of the correlation matrix is used, as it is more efficient and numerically stable.}: $$\mathbf{\Omega_k} \sim \mathrm{LKJ}(\zeta)$$ The expected value of the LKJ-prior is the identity matrix (implying correlations of zero) for any positive value of $\zeta$, which can be interpreted like the shape parameter of a symmetric beta distribution \citep{stanM2017}. If $\zeta = 1$ (the default in \pkg{brms}) the density is uniform over correlation matrices of the respective dimension. If $\zeta > 1$, the identity matrix is the mode of the prior, with a sharper peak in the density for larger values of $\zeta$. If $0 < \zeta < 1$ the prior is U-shaped having a trough at the identity matrix, which leads to higher probabilities for non-zero correlations. For every element of $\sigma_k$, any prior can be applied that is defined on the non-negative reals only. As default in \pkg{brms}, we use a half Student-t prior with 3 degrees of freedom. This prior often leads to better convergence of the models than a half Cauchy prior, while still being relatively weakly informative. Sometimes -- for instance when modeling pedigrees -- different levels of the same grouping factor cannot be assumed to be independent. In this case, the covariance matrix of $u_k$ becomes $$\mathbf{\Sigma_k} = \mathbf{V_k} \otimes \mathbf{A_k}$$ where $\mathbf{A_k}$ is the known covariance matrix between levels and $\otimes$ is the Kronecker product. \subsubsection{Family specific parameters} For some families, additional parameters need to be estimated. In the current section, we only name the most important ones. Normal and Student's distributions need the parameter $\sigma$ to account for residual error variance. By default, $\sigma$ has a half Cauchy prior with a scale parameter that depends on the standard deviation of the response variable to remain only weakly informative regardless of response variable's scaling. Furthermore, Student's distributions needs the parameter $\nu$ representing the degrees of freedom. By default, $\nu$ has a wide gamma prior as proposed by \cite{juarez2010}. Gamma, Weibull, and negative binomial distributions need the shape parameter $\alpha$ that also has a wide gamma prior by default. \section{Parameter estimation} The \pkg{brms} package does not fit models itself but uses \pkg{Stan} on the back-end. Accordingly, all samplers implemented in \pkg{Stan} can be used to fit \pkg{brms} models. Currently, these are the static Hamiltonian Monte-Carlo (HMC) Sampler sometimes also referred to as Hybrid Monte-Carlo \citep{neal2011, neal2003, duane1987} and its extension the No-U-Turn Sampler (NUTS) by \cite{hoffman2014}. HMC-like algorithms produce samples that are much less autocorrelated than those of other samplers such as the random-walk Metropolis algorithm \citep{hoffman2014, Creutz1988}. The main drawback of this increased efficiency is the need to calculate the gradient of the log-posterior, which can be automated using algorithmic differentiation \citep{griewank2008} but is still a time-consuming process for more complex models. Thus, using HMC leads to higher quality samples but takes more time per sample than other algorithms typically applied. Another drawback of HMC is the need to pre-specify at least two parameters, which are both critical for the performance of HMC. The NUTS Sampler allows setting these parameters automatically thus eliminating the need for any hand-tuning, while still being at least as efficient as a well tuned HMC \citep{hoffman2014}. For more details on the sampling algorithms applied in \pkg{Stan}, see the \pkg{Stan} user's manual \citep{stanM2017} as well as \cite{hoffman2014}. In addition to the estimation of model parameters, \pkg{brms} allows drawing samples from the posterior predictive distribution as well as from the pointwise log-likelihood. Both can be used to assess model fit. The former allows a comparison between the actual response $y$ and the response $\hat{y}$ predicted by the model. The pointwise log-likelihood can be used, among others, to calculate the widely applicable information criterion (WAIC) proposed by \cite{watanabe2010} and leave-one-out cross-validation (LOO; \citealp{gelfand1992}; \citealp{vehtari2015}; see also \citealp{ionides2008}) both allowing to compare different models applied to the same data (lower WAICs and LOOs indicate better model fit). The WAIC can be viewed as an improvement of the popular deviance information criterion (DIC), which has been criticized by several authors (\citealp{vehtari2015}; \citealp{plummer2008}; \citealp{vanderlinde2005}; see also the discussion at the end of the original DIC paper by \citealp{spiegelhalter2002}) in part because of problems arising from fact that the DIC is only a point estimate. In \pkg{brms}, WAIC and LOO are implemented using the \pkg{loo} package \citep{loo2016} also following the recommendations of \cite{vehtari2015}. \section{Software} \label{software} The \pkg{brms} package provides functions for fitting MLMs using \pkg{Stan} for full Bayesian inference. To install the latest release version of \pkg{brms} from CRAN, type \code{install.packages("brms")} within \proglang{R}. The current developmental version can be downloaded from GitHub via \begin{Sinput} devtools::install_github("paul-buerkner/brms") \end{Sinput} Additionally, a \proglang{C++} compiler is required. This is because \pkg{brms} internally creates \pkg{Stan} code, which is translated to \proglang{C++} and compiled afterwards. The program \pkg{Rtools} \citep{Rtools2015} comes with a \proglang{C++} compiler for Windows\footnote{During the installation process, there is an option to change the system \code{PATH}. Please make sure to check this options, because otherwise \pkg{Rtools} will not be available within \proglang{R}.}. On OS X, one should use \pkg{Xcode} \citep{Xcode2015} from the App Store. To check whether the compiler can be called within \proglang{R}, run \code{system("g++ -v")} when using \pkg{Rtools} or \code{system("clang++ -v")} when using \pkg{Xcode}. If no warning occurs and a few lines of difficult to read system code are printed out, the compiler should work correctly. For more detailed instructions on how to get the compilers running, see the prerequisites section on \url{https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started}. Models are fitted in \pkg{brms} using the following procedure, which is also summarized in Figure~\ref{flowchart}. First, the user specifies the model using the \code{brm} function in a way typical for most model fitting \proglang{R} functions, that is by defining \code{formula}, \code{data}, and \code{family}, as well as some other optional arguments. Second, this information is processed and the \code{make_stancode} and \code{make_standata} functions are called. The former generates the model code in \pkg{Stan} language and the latter prepares the data for use in \pkg{Stan}. These two are the mandatory parts of every \pkg{Stan} model and without \pkg{brms}, users would have to specify them themselves. Third, \pkg{Stan} code and data as well as additional arguments (such as the number of iterations and chains) are passed to functions of the \pkg{rstan} package (the \proglang{R} interface of \pkg{Stan}; \citeauthor{stan2017}, \citeyear{stan2017}). Fourth, the model is fitted by \pkg{Stan} after translating and compiling it in \proglang{C++}. Fifth, after the model has been fitted and returned by \pkg{rstan}, the fitted model object is post-processed in \pkg{brms} among others by renaming the model parameters to be understood by the user. Sixth, the results can be investigated in \proglang{R} using various methods such as \code{summary}, \code{plot}, or \code{predict} (for a complete list of methods type \code{methods(class = "brmsfit")}). \begin{figure}[ht] \centering \includegraphics[height = 0.4\textheight, keepaspectratio]{flowchart.pdf} \caption{High level description of the model fitting procedure used in \pkg{brms}.} \label{flowchart} \end{figure} \subsection{A worked example} In the following, we use an example about the recurrence time of an infection in kidney patients initially published by \cite{mcgilchrist1991}. The data set consists of 76 entries of 7 variables: \begin{Sinput} R> library("brms") R> data("kidney") R> head(kidney, n = 3) \end{Sinput} \begin{Soutput} time censored patient recur age sex disease 1 8 0 1 1 28 male other 2 23 0 2 1 48 female GN 3 22 0 3 1 32 male other \end{Soutput} Variable \code{time} represents the recurrence time of the infection, \code{censored} indicates if \code{time} is right censored (\code{1}) or not censored (\code{0}), variable \code{patient} is the patient id, and \code{recur} indicates if it is the first or second recurrence in that patient. Finally, variables \code{age}, \code{sex}, and \code{disease} make up the predictors. \subsection[Fitting models with brms]{Fitting models with \pkg{brms}} The core of the \pkg{brms} package is the \code{brm} function and we will explain its argument structure using the example above. Suppose we want to predict the (possibly censored) recurrence time using a log-normal model, in which the intercept as well as the effect of \code{age} is nested within patients. Then, we may use the following code: \begin{Sinput} fit1 <- brm(formula = time | cens(censored) ~ age * sex + disease + (1 + age|patient), data = kidney, family = lognormal(), prior = c(set_prior("normal(0,5)", class = "b"), set_prior("cauchy(0,2)", class = "sd"), set_prior("lkj(2)", class = "cor")), warmup = 1000, iter = 2000, chains = 4, control = list(adapt_delta = 0.95)) \end{Sinput} \subsection[formula: Information on the response and predictors]{\code{formula}: Information on the response and predictors} Without doubt, \code{formula} is the most complicated argument, as it contains information on the response variable as well as on predictors at different levels of the model. Everything before the $\sim$ sign relates to the response part of \code{formula}. In the usual and most simple case, this is just one variable name (e.g., \code{time}). However, to incorporate additional information about the response, one can add one or more terms of the form \code{| fun(variable)}. \code{fun} may be one of a few functions defined internally in \pkg{brms} and \code{variable} corresponds to a variable in the data set supplied by the user. In this example, \code{cens} makes up the internal function that handles censored data, and \code{censored} is the variable that contains information on the censoring. Other available functions in this context are \code{weights} and \code{disp} to allow different sorts of weighting, \code{se} to specify known standard errors primarily for meta-analysis, \code{trunc} to define truncation boundaries, \code{trials} for binomial models\footnote{In functions such as \code{glm} or \code{glmer}, the binomial response is typically passed as \code{cbind(success, failure)}. In \pkg{brms}, the equivalent syntax is \code{success | trials(success + failure)}.}, and \code{cat} to specify the number of categories for ordinal models. Everything on the right side of $\sim$ specifies predictors. Here, the syntax exactly matches that of \pkg{lme4}. For both, population-level and group-level terms, the \code{+} is used to separate different effects from each other. Group-level terms are of the form \code{(coefs | group)}, where \code{coefs} contains one or more variables whose effects are assumed to vary with the levels of the grouping factor given in \code{group}. Multiple grouping factors each with multiple group-level coefficients are possible. In the present example, only one group-level term is specified in which \code{1 + age} are the coefficients varying with the grouping factor \code{patient}. This implies that the intercept of the model as well as the effect of age is supposed to vary between patients. By default, group-level coefficients within a grouping factor are assumed to be correlated. Correlations can be set to zero by using the \code{(coefs || group)} syntax\footnote{In contrast to \pkg{lme4}, the \code{||} operator in \pkg{brms} splits up the design matrix computed from \code{coefs} instead of decomposing \code{coefs} in its terms. This implies that columns of the design matrix originating from the same factor are also assumed to be uncorrelated, whereas \pkg{lme4} estimates the correlations in this case. For a way to achieve \pkg{brms}-like behavior with \pkg{lme4}, see the \code{mixed} function of the \pkg{afex} package by \cite{afex2015}.}. Everything on the right side of \code{formula} that is not recognized as part of a group-level term is treated as a population-level effect. In this example, the population-level effects are \code{age}, \code{sex}, and \code{disease}. \subsection[family: Distribution of the response variable]{\code{family}: Distribution of the response variable} Argument \code{family} should usually be a family function, a call to a family function or a character string naming the family. If not otherwise specified, default link functions are applied. \pkg{brms} comes with a large variety of families. Linear and robust linear regression can be performed using the \code{gaussian} or \code{student} family combined with the \code{identity} link. For dichotomous and categorical data, families \code{bernoulli}, \code{binomial}, and \code{categorical} combined with the \code{logit} link, by default, are perfectly suited. Families \code{poisson}, \code{negbinomial}, and \code{geometric} allow for modeling count data. Families \code{lognormal}, \code{Gamma}, \code{exponential}, and \code{weibull} can be used (among others) for survival regression. Ordinal regression can be performed using the families \code{cumulative}, \code{cratio}, \code{sratio}, and \code{acat}. Finally, families \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, \code{zero_inflated_binomial}, \code{zero_inflated_beta}, \code{hurdle_poisson}, \code{hurdle_negbinomial}, and \code{hurdle_gamma} can be used to adequately model excess zeros in the response. In our example, we use \code{family = lognormal()} implying a log-normal ``survival'' model for the response variable \code{time}. \subsection[prior: Prior distributions of model parameters]{\code{prior}: Prior distributions of model parameters} Every population-level effect has its corresponding regression parameter. These parameters are named as \code{b\_}, where \code{} represents the name of the corresponding population-level effect. The default prior is an improper flat prior over the reals. Suppose, for instance, that we want to set a normal prior with mean \code{0} and standard deviation \code{10} on the effect of \code{age} and a Cauchy prior with location \code{1} and scale \code{2} on \code{sexfemale}\footnote{When factors are used as predictors, parameter names will depend on the factor levels. To get an overview of all parameters and parameter classes for which priors can be specified, use function \code{get\_prior}. For the present example, \code{get\_prior(time | cens(censored) $\sim$ age * sex + disease + (1 + age|patient), data = kidney, family = lognormal())} does the desired.}. Then, we may write \begin{Sinput} prior <- c(set_prior("normal(0,10)", class = "b", coef = "age"), set_prior("cauchy(1,2)", class = "b", coef = "sexfemale")) \end{Sinput} To put the same prior (e.g., a normal prior) on all population-level effects at once, we may write as a shortcut \code{set_prior("normal(0,10)", class = "b")}. This also leads to faster sampling, because priors can be vectorized in this case. Note that we could also omit the \code{class} argument for population-level effects, as it is the default class in \code{set_prior}. A special shrinkage prior to be applied on population-level effects is the horseshoe prior \citep{carvalho2009, carvalho2010}. It is symmetric around zero with fat tails and an infinitely large spike at zero. This makes it ideal for sparse models that have many regression coefficients, although only a minority of them is non-zero. The horseshoe prior can be applied on all population-level effects at once (excluding the intercept) by using \code{set_prior("horseshoe(1)")}. The $1$ implies that the Student-$t$ prior of the local shrinkage parameters has 1 degrees of freedom. In \pkg{brms} it is possible to increase the degrees of freedom (which will often improve convergence), although the prior no longer resembles a horseshoe in this case\footnote{This class of priors is often referred to as hierarchical shrinkage family, which contains the original horseshoe prior as a special case.}. For more details see \cite{carvalho2009, carvalho2010}. Each group-level effect of each grouping factor has a standard deviation parameter, which is restricted to be non-negative and, by default, has a half Student-$t$ prior with $3$ degrees of freedom and a scale parameter that is minimally $10$. For non-ordinal models, \pkg{brms} tries to evaluate if the scale is large enough to be considered only weakly informative for the model at hand by comparing it with the standard deviation of the response after applying the link function. If this is not the case, it will increase the scale based on the aforementioned standard deviation\footnote{Changing priors based on the data is not truly Bayesian and might rightly be criticized. However, it helps avoiding the problem of too informative default priors without always forcing users to define their own priors. The latter would also be problematic as not all users can be expected to be well educated Bayesians and reasonable default priors will help them a lot in using Bayesian methods.}. \pkg{Stan} implicitly defines a half Student-$t$ prior by using a Student-$t$ prior on a restricted parameter \citep{stanM2017}. For other reasonable priors on standard deviations see \cite{gelman2006}. In \pkg{brms}, standard deviation parameters are named as \code{sd\_\_} so that \code{sd\_patient\_Intercept} and \code{sd\_patient\_age} are the parameter names in the example. If desired, it is possible to set a different prior on each parameter, but statements such as \code{set_prior("student_t(3,0,5)", class = "sd", group = "patient")} or even \code{set_prior("student_t(3,0,5)", class = "sd")} may also be used and are again faster because of vectorization. If there is more than one group-level effect per grouping factor, correlations between group-level effects are estimated. As mentioned in Section~\ref{model}, the LKJ-Correlation prior with parameter $\zeta > 0$ \citep{lewandowski2009} is used for this purpose. In \pkg{brms}, this prior is abbreviated as \code{"lkj(zeta)"} and correlation matrix parameters are named as \code{cor\_}, (e.g., \code{cor_patient}), so that \code{set_prior("lkj(2)", class = "cor", group = "patient")} is a valid statement. To set the same prior on every correlation matrix in the model, \code{set_prior("lkj(2)", class = "cor")} is also allowed, but does not come with any efficiency increases. Other model parameters such as the residual standard deviation \code{sigma} in normal models or the \code{shape} in Gamma models have their priors defined in the same way, where each of them is treated as having its own parameter class. A complete overview on possible prior distributions is given in the \pkg{Stan} user's manual \citep{stanM2017}. Note that \pkg{brms} does not thoroughly check if the priors are written in correct \pkg{Stan} language. Instead, \pkg{Stan} will check their syntactical correctness when the model is parsed to \proglang{C++} and return an error if they are not. This, however, does not imply that priors are always meaningful if they are accepted by \pkg{Stan}. Although \pkg{brms} tries to find common problems (e.g., setting bounded priors on unbounded parameters), there is no guarantee that the defined priors are reasonable for the model. \subsection[control Adjusting the sampling behavior of Stan]{\code{control}: Adjusting the sampling behavior of \pkg{Stan}} In addition to choosing the number of iterations, warmup samples, and chains, users can control the behavior of the NUTS sampler by using the \code{control} argument. The most important reason to use \code{control} is to decrease (or eliminate at best) the number of divergent transitions that cause a bias in the obtained posterior samples. Whenever you see the warning \code{"There were x divergent transitions after warmup."}, you should really think about increasing \code{adapt_delta}. To do this, write \code{control = list(adapt_delta = )}, where \code{} should usually be a value between \code{0.8} (current default) and \code{1}. Increasing \code{adapt_delta} will slow down the sampler but will decrease the number of divergent transitions threatening the validity of your posterior samples. Another problem arises when the depth of the tree being evaluated in each iteration is exceeded. This is less common than having divergent transitions, but may also bias the posterior samples. When it happens, \pkg{Stan} will throw out a warning suggesting to increase \code{max_treedepth}, which can be accomplished by writing \code{control = list(max_treedepth = )} with a positive integer \code{} that should usually be larger than the current default of \code{10}. \subsection{Analyzing the results} The example model \code{fit1} is fitted using 4 chains, each with 2000 iterations of which the first 1000 are warmup to calibrate the sampler, leading to a total of 4000 posterior samples\footnote{To save time, chains may also run in parallel when using argument \code{cluster}.}. For researchers familiar with Gibbs or Metropolis-Hastings sampling, this number may seem far too small to achieve good convergence and reasonable results, especially for multilevel models. However, as \pkg{brms} utilizes the NUTS sampler \citep{hoffman2014} implemented in \pkg{Stan}, even complex models can often be fitted with not more than a few thousand samples. Of course, every iteration is more computationally intensive and time-consuming than the iterations of other algorithms, but the quality of the samples (i.e., the effective sample size per iteration) is usually higher. After the posterior samples have been computed, the \code{brm} function returns an \proglang{R} object, containing (among others) the fully commented model code in \pkg{Stan} language, the data to fit the model, and the posterior samples themselves. The model code and data for the present example can be extracted through \code{stancode(fit1)} and \code{standata(fit1)} respectively\footnote{Both model code and data may be amended and used to fit new models. That way, \pkg{brms} can also serve as a good starting point in building more complicated models in \pkg{Stan}, directly.}. A model summary is readily available using \begin{Sinput} R> summary(fit1, waic = TRUE) \end{Sinput} \begin{Soutput} Family: lognormal (identity) Formula: time | cens(censored) ~ age * sex + disease + (1 + age | patient) Data: kidney (Number of observations: 76) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: 673.51 Group-Level Effects: ~patient (Number of levels: 38) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.40 0.28 0.01 1.01 1731 1 sd(age) 0.01 0.01 0.00 0.02 1137 1 cor(Intercept,age) -0.13 0.46 -0.88 0.76 3159 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 2.73 0.96 0.82 4.68 2139 1 age 0.01 0.02 -0.03 0.06 1614 1 sexfemale 2.42 1.13 0.15 4.64 2065 1 diseaseGN -0.40 0.53 -1.45 0.64 2664 1 diseaseAN -0.52 0.50 -1.48 0.48 2713 1 diseasePKD 0.60 0.74 -0.86 2.02 2968 1 age:sexfemale -0.02 0.03 -0.07 0.03 1956 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 1.15 0.13 0.91 1.44 4000 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Soutput} On the top of the output, some general information on the model is given, such as family, formula, number of iterations and chains, as well as the WAIC. Next, group-level effects are displayed separately for each grouping factor in terms of standard deviations and correlations between group-level effects. On the bottom of the output, population-level effects are displayed. If incorporated, autocorrelation and family specific parameters (e.g., the residual standard deviation \code{sigma}) are also given. In general, every parameter is summarized using the mean (\code{Estimate}) and the standard deviation (\code{Est.Error}) of the posterior distribution as well as two-sided 95\% Credible intervals (\code{l-95\% CI} and \code{u-95\% CI}) based on quantiles. The \code{Eff.Sample} value is an estimation of the effective sample size; that is the number of independent samples from the posterior distribution that would be expected to yield the same standard error of the posterior mean as is obtained from the dependent samples returned by the MCMC algorithm. The \code{Rhat} value provides information on the convergence of the algorithm (cf., \citeauthor{gelman1992}, \citeyear{gelman1992}). If \code{Rhat} is considerably greater than 1 (i.e., $> 1.1$), the chains have not yet converged and it is necessary to run more iterations and/or set stronger priors. To visually investigate the chains as well as the posterior distribution, the \code{plot} method can be used (see Figure~\ref{kidney_plot}). An even more detailed investigation can be achieved by applying the \pkg{shinystan} package \citep{gabry2015} through method \code{launch_shiny}. With respect to the above summary, \code{sexfemale} seems to be the only population-level effect with considerable influence on the response. Because the mean of \code{sexfemale} is positive, the model predicts longer periods without an infection for females than for males. Effects of population-level predictors can also be visualized with the \code{conditional_effects} method (see Figure~\ref{kidney_conditional_effects}). \begin{figure}[ht] \centering \includegraphics[width=0.95\textwidth]{kidney_plot.pdf} \caption{Trace and Density plots of all relevant parameters of the kidney model discussed in Section~\ref{software}.} \label{kidney_plot} \end{figure} \begin{figure}[ht] \centering \includegraphics[height=0.90\textheight]{kidney_conditional_effects.pdf} \caption{Conditional effects plots of all population-level predictors of the kidney model discussed in Section~\ref{software}.} \label{kidney_conditional_effects} \end{figure} Looking at the group-level effects, the standard deviation parameter of \code{age} is suspiciously small. To test whether it is smaller than the standard deviation parameter of \code{Intercept}, we apply the \code{hypothesis} method: \begin{Sinput} R> hypothesis(fit1, "Intercept - age > 0", class = "sd", group = "patient") \end{Sinput} \begin{Soutput} Hypothesis Tests for class sd_patient: Estimate Est.Error l-95% CI u-95% CI Evid.Ratio Intercept-age > 0 0.39 0.27 0.03 Inf 67.97 * --- '*': The expected value under the hypothesis lies outside the 95% CI. \end{Soutput} The one-sided 95\% credibility interval does not contain zero, thus indicating that the standard deviations differ from each other in the expected direction. In accordance with this finding, the \code{Evid.Ratio} shows that the hypothesis being tested (i.e., \code{Intercept - age > 0}) is about $68$ times more likely than the alternative hypothesis \code{Intercept - age < 0}. It is important to note that this kind of comparison is not easily possible when applying frequentist methods, because in this case only point estimates are available for group-level standard deviations and correlations. When looking at the correlation between both group-level effects, its distribution displayed in Figure~\ref{kidney_plot} and the 95\% credibility interval in the summary output appear to be rather wide. This indicates that there is not enough evidence in the data to reasonably estimate the correlation. Together, the small standard deviation of \code{age} and the uncertainty in the correlation raise the question if \code{age} should be modeled as a group specific term at all. To answer this question, we fit another model without this term: \begin{Sinput} R> fit2 <- update(fit1, formula. = ~ . - (1 + age|patient) + (1|patient)) \end{Sinput} A good way to compare both models is leave-one-out cross-validation (LOO)\footnote{The WAIC is an approximation of LOO that is faster and easier to compute. However, according to \cite{vehtari2015}, LOO may be the preferred method to perform model comparisons.}, which can be called in \pkg{brms} using \begin{Sinput} R> LOO(fit1, fit2) \end{Sinput} \begin{Soutput} LOOIC SE fit1 675.45 45.18 fit2 674.17 45.06 fit1 - fit2 1.28 0.99 \end{Soutput} In the output, the LOO information criterion for each model as well as the difference of the LOOs each with its corresponding standard error is shown. Both LOO and WAIC are approximately normal if the number of observations is large so that the standard errors can be very helpful in evaluating differences in the information criteria. However, for small sample sizes, standard errors should be interpreted with care \citep{vehtari2015}. For the present example, it is immediately evident that both models have very similar fit, indicating that there is little benefit in adding group specific coefficients for \code{age}. \subsection{Modeling ordinal data} In the following, we want to briefly discuss a second example to demonstrate the capabilities of \pkg{brms} in handling ordinal data. \cite{ezzet1991} analyze data from a two-treatment, two-period crossover trial to compare 2 inhalation devices for delivering the drug salbutamol in 286 asthma patients. Patients were asked to rate the clarity of leaflet instructions accompanying each device, using a four-point ordinal scale. Ratings are predicted by \code{treat} to indicate which of the two inhaler devices was used, \code{period} to indicate the time of administration, and \code{carry} to model possible carry over effects. \begin{Sinput} R> data("inhaler") R> head(inhaler, n = 1) \end{Sinput} \begin{Soutput} subject rating treat period carry 1 1 1 0.5 0.5 0 \end{Soutput} Typically, the ordinal response is assumed to originate from the categorization of a latent continuous variable. That is there are $K$ latent thresholds (model intercepts), which partition the continuous scale into the $K + 1$ observable, ordered categories. Following this approach leads to the cumulative or graded-response model \citep{samejima1969} for ordinal data implemented in many \proglang{R} packages. In \pkg{brms}, it is available via family \code{cumulative}. Fitting the cumulative model to the inhaler data, also incorporating an intercept varying by subjects, may look this: \begin{Sinput} fit3 <- brm(formula = rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative) \end{Sinput} While the support for ordinal data in most \proglang{R} packages ends here\footnote{Exceptions known to us are the packages \pkg{ordinal} \citep{christensen2015} and \pkg{VGAM} \citep{yee2010}. The former supports only cumulative models but with different modeling option for the thresholds. The latter supports all four ordinal families also implemented in \pkg{brms} as well as category specific effects but no group-specific effects.}, \pkg{brms} allows changes to this basic model in at least three ways. First of all, three additional ordinal families are implemented. Families \code{sratio} (stopping ratio) and \code{cratio} (continuation ratio) are so called sequential models \citep{tutz1990}. Both are equivalent to each other for symmetric link functions such as \code{logit} but will differ for asymmetric ones such as \code{cloglog}. The fourth ordinal family is \code{acat} (adjacent category) also known as partial credits model \citep{masters1982, andrich1978a}. Second, restrictions to the thresholds can be applied. By default, thresholds are ordered for family \code{cumulative} or are completely free to vary for the other families. This is indicated by argument \code{threshold = "flexible"} (default) in \code{brm}. Using \code{threshold = "equidistant"} forces the distance between two adjacent thresholds to be the same, that is $$\tau_k = \tau_1 + (k-1)\delta$$ for thresholds $\tau_k$ and distance $\delta$ (see also \citealp{andrich1978b}; \citealp{andrich1978a}; \citealp{andersen1977}). Third, the assumption that predictors have constant effects across categories may be relaxed for non-cumulative ordinal models \citep{vanderark2001, tutz2000} leading to category specific effects. For instance, variable \code{treat} may only have an impact on the decision between category 3 and 4, but not on the lower categories. Without using category specific effects, such a pattern would remain invisible. To illustrate all three modeling options at once, we fit a (hardly theoretically justified) stopping ratio model with equidistant thresholds and category specific effects for variable \code{treat} on which we apply an informative prior. \begin{Sinput} fit4 <- brm(formula = rating ~ period + carry + cs(treat) + (1|subject), data = inhaler, family = sratio, threshold = "equidistant", prior = set_prior("normal(-1,2)", coef = "treat")) \end{Sinput} Note that priors are defined on category specific effects in the same way as for other population-level effects. A model summary can be obtained in the same way as before: \begin{Sinput} R> summary(fit4, waic = TRUE) \end{Sinput} \begin{Soutput} Family: sratio (logit) Formula: rating ~ period + carry + cs(treat) + (1 | subject) Data: inhaler (Number of observations: 572) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: 911.9 Group-Level Effects: ~subject (Number of levels: 286) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 1.05 0.23 0.56 1.5 648 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept[1] 0.72 0.13 0.48 0.99 2048 1 Intercept[2] 2.67 0.35 2.00 3.39 969 1 Intercept[3] 4.62 0.66 3.36 5.95 1037 1 period 0.25 0.18 -0.09 0.61 4000 1 carry -0.26 0.22 -0.70 0.17 1874 1 treat[1] -0.96 0.30 -1.56 -0.40 1385 1 treat[2] -0.65 0.49 -1.60 0.27 4000 1 treat[3] -2.65 1.21 -5.00 -0.29 4000 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat delta 1.95 0.32 1.33 2.6 1181 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Soutput} Trace and density plots of the model parameters as produced by \code{plot(fit4)} can be found in Figure~\ref{inhaler_plot}. We see that three intercepts (thresholds) and three effects of \code{treat} have been estimated, because a four-point scale was used for the ratings. The treatment effect seems to be strongest between category 3 and 4. At the same time, however, the credible interval is also much larger. In fact, the intervals of all three effects of \code{treat} are highly overlapping, which indicates that there is not enough evidence in the data to support category specific effects. On the bottom of the output, parameter \code{delta} specifies the distance between two adjacent thresholds and indeed the intercepts differ from each other by the magnitude of \code{delta}. \begin{figure}[ht] \centering \includegraphics[width=0.95\textwidth]{inhaler_plot.pdf} \caption{Trace and Density plots of all relevant parameters of the inhaler model discussed in Section~\ref{software}.} \label{inhaler_plot} \end{figure} \section[Comparison]{Comparison between packages} Over the years, many \proglang{R} packages have been developed that implement MLMs, each being more or less general in their supported models. Comparing all of them to \pkg{brms} would be too extensive and barely helpful for the purpose of the present paper. Accordingly, we concentrate on a comparison with four packages. These are \pkg{lme4} \citep{bates2015} and \pkg{MCMCglmm} \citep{hadfield2010}, which are possibly the most general and widely applied \proglang{R} packages for MLMs, as well as \pkg{rstanarm} \citep{rstanarm2016} and \pkg{rethinking} \citep{mcelreath2016}, which are both based on \pkg{Stan}. As opposed to the other packages, \pkg{rethinking} was primarily written for teaching purposes and requires the user to specify the full model explicitly using its own simplified \pkg{BUGS}-like syntax thus helping users to better understand the models that are fitted to their data. Regarding model families, all five packages support the most common types such as linear and binomial models as well as Poisson models for count data. Currently, \pkg{brms} and \pkg{MCMCglmm} provide more flexibility when modeling categorical and ordinal data. In addition, \pkg{brms} supports robust linear regression using Student's distribution, which is also implemented on a GitHub branch of \pkg{rstanarm}. \pkg{MCMCglmm} allows fitting multinomial models that are currently not available in the other packages. Generalizing classical MLMs, \pkg{brms} and \pkg{MCMCglmm} allow fiting zero-inflated and hurdle models dealing with excess zeros in the response. Furthermore, \pkg{brms} supports non-linear models similar to the \pkg{nlme} package \citep{nlme2016} providing great flexibility but also requiring more care to produce reasonable results. Another flexible model class are generalized additive mixed models \citep{hastie1990,wood2011,zuur2014}, which can be fitted with \pkg{brms} and \pkg{rstanarm}. In all five packages, there are quite a few additional modeling options. Variable link functions can be specified in all packages except for \pkg{MCMCglmm}, in which only one link is available per family. \pkg{MCMCglmm} generally supports multivariate responses using data in wide format, whereas \pkg{brms} currently only offers this option for families \code{gaussian} and \code{student}. It should be noted that it is always possible to transform data from wide to long format for compatibility with the other packages. Autocorrelation of the response can only be fitted in \pkg{brms}, which supports auto-regressive as well as moving-average effects. For ordinal models in \pkg{brms}, effects of predictors may vary across different levels of the response as explained in the inhaler example. A feature currently exclusive to \pkg{rethinking} is the possibility to impute missing values in the predictor variables. Information criteria are available in all three packages. The advantage of WAIC and LOO implemented in \pkg{brms}, \pkg{rstanarm}, and \pkg{rethinking} is that their standard errors can be easily estimated to get a better sense of the uncertainty in the criteria. Comparing the prior options of the Bayesian packages, \pkg{brms} and \pkg{rethinking} offer a little more flexibility than \pkg{MCMCglmm} and \pkg{rstanarm}, as virtually any prior distribution can be applied on population-level effects as well as on the standard deviations of group-level effects. In addition, we believe that the way priors are specified in \pkg{brms} and \pkg{rethinking} is more intuitive as it is directly evident what prior is actually applied. A more detailed comparison of the packages can be found in Table~\ref{comparison1} and Table~\ref{comparison2}. To facilitate the understanding of the model formulation in \pkg{brms}, Table~\ref{syntax} shows \pkg{lme4} function calls to fit sample models along with the equivalent \pkg{brms} syntax. So far the focus was only on capabilities. Another important topic is speed, especially for more complex models. Of course, \pkg{lme4} is usually much faster than the other packages as it uses maximum likelihood methods instead of MCMC algorithms, which are slower by design. To compare the efficiency of the four Bayesian packages, we fitted multilevel models on real data sets using the minimum effective sample size divided by sampling time as a measure of sampling efficiency. One should always aim at running multiple chains as one cannot be sure that a single chain really explores the whole posterior distribution. However, as \pkg{MCMCglmm} does not come with a built-in option to run multiple chains, we used only a single chain to fit the models after making sure that it leads to the same results as multiple chains. The \proglang{R} code allowing to replicate the results is available as supplemental material. The first thing that becomes obvious when fitting the models is that \pkg{brms} and \pkg{rethinking} need to compile the \proglang{C++} model before actually fitting it, because the \pkg{Stan} code being parsed to \proglang{C++} is generated on the fly based on the user's input. Compilation takes about a half to one minute depending on the model complexity and computing power of the machine. This is not required by \pkg{rstanarm} and \pkg{MCMCglmm}, although the former is also based on \pkg{Stan}, as compilation takes place only once at installation time. While the latter approach saves the compilation time, the former is more flexible when it comes to model specification. For small and simple models, compilation time dominates the overall computation time, but for larger and more complex models, sampling will take several minutes or hours so that one minute more or less will not really matter, anymore. Accordingly, the following comparisons do not include the compilation time. In models containing only group-specific intercepts, \pkg{MCMCglmm} is usually more efficient than the \pkg{Stan} packages. However, when also estimating group-specific slopes, \pkg{MCMCglmm} falls behind the other packages and quite often refuses to sample at all unless one carefully specifies informative priors. Note that these results are obtained by running only a single chain. For all three \pkg{Stan} packages, sampling efficiency can easily be increased by running multiple chains in parallel. Comparing the \pkg{Stan} packages to each other, \pkg{brms} is usually most efficient for models with group-specific terms, whereas \pkg{rstanarm} tends to be roughly $50\%$ to $75\%$ as efficient at least for the analyzed data sets. The efficiency of \pkg{rethinking} is more variable depending on the model formulation and data, sometimes being slightly ahead of the other two packages, but usually being considerably less efficient. Generally, \pkg{rethinking} loses efficiency for models with many population-level effects presumably because one cannot use design matrices and vectorized prior specifications for population-level parameters. Note that it was not possible to specify the exact same priors across packages due to varying parameterizations. Of course, efficiency depends heavily on the model, chosen priors, and data at hand so that the present results should not be over-interpreted. \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{lme4}} & \parbox{2cm}{\pkg{MCMCglmm}} \\ \hline \\ [-1.5ex] \parbox{6cm}{Supported model types:} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & no & no \\ Binomial models & yes & yes & yes \\ Categorical models & yes & no & yes \\ Multinomial models & no & no & yes \\ Count data models & yes & yes & yes \\ Survival models & yes$^1$ & yes & yes \\ Ordinal models & various & no & cumulative \\ Zero-inflated and hurdle models & yes & no & yes \\ Generalized additive models & yes & no & no \\ Non-linear models & yes & no & no \\ \hline \\ [-1.5ex] \parbox{5cm}{Additional modeling options:} & & & \\ [1ex] Variable link functions & various & various & no \\ Weights & yes & yes & no \\ Offset & yes & yes & using priors \\ Multivariate responses & limited & no & yes \\ Autocorrelation effects & yes & no & no \\ Category specific effects & yes & no & no \\ Standard errors for meta-analysis & yes & no & yes \\ Censored data & yes & no & yes \\ Truncated data & yes & no & no \\ Customized covariances & yes & no & yes \\ Missing value imputation & no & no & no \\ \hline \\ [-1.5ex] Bayesian specifics: & & & \\ [1ex] parallelization & yes & -- & no \\ population-level priors & flexible & --$^3$ & normal \\ group-level priors & normal & --$^3$ & normal \\ covariance priors & flexible & --$^3$ & restricted$^4$ \\ \hline \\ [-1.5ex] Other: & & & \\ [1ex] Estimator & HMC, NUTS & ML, REML & MH, Gibbs$^2$ \\ Information criterion & WAIC, LOO & AIC, BIC & DIC \\ \proglang{C++} compiler required & yes & no & no \\ Modularized & no & yes & no \\ \hline \end{tabular} \caption{Comparison of the capabilities of the \pkg{brms}, \pkg{lme4} and \pkg{MCMCglmm} package. Notes: (1) Weibull family only available in \pkg{brms}. (2) Estimator consists of a combination of both algorithms. (3) Priors may be imposed using the \pkg{blme} package \citep{chung2013}. (4) For details see \cite{hadfield2010}.} \label{comparison1} \end{table} \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{rstanarm}} & \parbox{2cm}{\pkg{rethinking}} \\ \hline \\ [-1.5ex] \parbox{6cm}{Supported model types:} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & yes$^1$ & no \\ Binomial models & yes & yes & yes \\ Categorical models & yes & no & no \\ Multinomial models & no & no & no \\ Count data models & yes & yes & yes \\ Survival models & yes$^2$ & yes & yes \\ Ordinal models & various & cumulative$^3$ & no \\ Zero-inflated and hurdle models & yes & no & no \\ Generalized additive models & yes & yes & no \\ Non-linear models & yes & no & limited$^4$ \\ \hline \\ [-1.5ex] \parbox{5cm}{Additional modeling options:} & & & \\ [1ex] Variable link functions & various & various & various \\ Weights & yes & yes & no \\ Offset & yes & yes & yes \\ Multivariate responses & limited & no & no \\ Autocorrelation effects & yes & no & no \\ Category specific effects & yes & no & no \\ Standard errors for meta-analysis & yes & no & no \\ Censored data & yes & no & no \\ Truncated data & yes & no & yes \\ Customized covariances & yes & no & no \\ Missing value imputation & no & no & yes \\ \hline \\ [-1.5ex] Bayesian specifics: & & & \\ [1ex] parallelization & yes & yes & yes \\ population-level priors & flexible & normal, Student-t & flexible \\ group-level priors & normal & normal & normal \\ covariance priors & flexible & restricted$^5$ & flexible \\ \hline \\ [-1.5ex] Other: & & & \\ [1ex] Estimator & HMC, NUTS & HMC, NUTS & HMC, NUTS \\ Information criterion & WAIC, LOO & AIC, LOO & AIC, LOO \\ \proglang{C++} compiler required & yes & no & yes \\ Modularized & no & no & no \\ \hline \end{tabular} \caption{Comparison of the capabilities of the \pkg{brms}, \pkg{rstanarm} and \pkg{rethinking} package. Notes: (1) Currently only implemented on a branch on GitHub. (2) Weibull family only available in \pkg{brms}. (3) No group-level terms allowed. (4) The parser is mainly written for linear models but also accepts some non-linear model specifications. (5) For details see \url{https://github.com/stan-dev/rstanarm/wiki/Prior-distributions}.} \label{comparison2} \end{table} \begin{table}[hbtp] \centering %\renewcommand{\arraystretch}{2} \begin{tabular}{ll} Dataset & \parbox{10cm}{Function call} \\ \hline \\ [-1.5ex] \parbox{2cm}{cake} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{lmer(angle $\sim$ recipe * temperature + (1|recipe:replicate), \\ \hspace*{5ex} data = cake)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(angle $\sim$ recipe * temperature + (1|recipe:replicate), \\ \hspace*{4ex} data = cake)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{sleepstudy} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{lmer(Reaction $\sim$ Days + (Days|Subject), data = sleepstudy)}} \\ [1.5ex] \pkg{brms} & \parbox{13cm}{\code{brm(Reaction $\sim$ Days + (Days|Subject), data = sleepstudy)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{cbpp$^1$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(cbind(incidence, size - incidence) $\sim$ period + (1 | herd), \\ \hspace*{6ex} family = binomial("logit"), data = cbpp)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(incidence | trials(size) $\sim$ period + (1 | herd), \\ \hspace*{4ex} family = binomial("logit"), data = cbpp)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{grouseticks$^1$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(TICKS $\sim$ YEAR + HEIGHT + (1|BROOD) + (1|LOCATION), \\ \hspace*{6ex} family = poisson("log"), data = grouseticks)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(TICKS $\sim$ YEAR + HEIGHT + (1|BROOD) + (1|LOCATION), \\ \hspace*{4ex} family = poisson("log"), data = grouseticks)}} \\ [2ex] \hline \\ [-1ex] \parbox{2cm}{VerbAgg$^2$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(r2 $\sim$ (Anger + Gender + btype + situ)\^{}2 + (1|id) \\ \hspace*{6ex} + (1|item), family = binomial, data = VerbAgg)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(r2 $\sim$ (Anger + Gender + btype + situ)\^{}2 + (1|id) \\ \hspace*{4ex} + (1|item), family = bernoulli, data = VerbAgg)}} \\ [2ex] \hline \\ [-1.5ex] \end{tabular} \caption{Comparison of the model syntax of \pkg{lme4} and \pkg{brms} using data sets included in \pkg{lme4}. Notes: (1) Default links are used to that the link argument may be omitted. (2) Fitting this model takes some time. A proper prior on the population-level effects (e.g., \code{prior = set\_prior("normal(0,5)")}) may help in increasing sampling speed.} \label{syntax} \end{table} \section{Conclusion} The present paper is meant to provide a general overview on the \proglang{R} package \pkg{brms} implementing MLMs using the probabilistic programming language \pkg{Stan} for full Bayesian inference. Although only a small selection of the modeling options available in \pkg{brms} are discussed in detail, I hope that this article can serve as a good starting point to further explore the capabilities of the package. For the future, I have several plans on how to improve the functionality of \pkg{brms}. I want to include multivariate models that can handle multiple response variables coming from different distributions as well as new correlation structures for instance for spatial data. Similarily, distributional regression models as well as mixture response distributions appear to be valuable extensions of the package. I am always grateful for any suggestions and ideas regarding new features. \section*{Acknowledgments} First of all, I would like to thank the Stan Development Team for creating the probabilistic programming language \pkg{Stan}, which is an incredibly powerful and flexible tool for performing full Bayesian inference. Without it, \pkg{brms} could not fit a single model. Two anonymous reviewers provided very detailed and thoughtful suggestions to substantially improve both the package and the paper. Furthermore, Prof. Philipp Doebler and Prof. Heinz Holling have given valuable feedback on earlier versions of the paper. Lastly, I want to thank the many users who reported bugs or had ideas for new features, thus helping to continuously improve \pkg{brms}. \bibliography{citations_overview} \end{document} brms/inst/CITATION0000644000176200001440000000330214213413565013331 0ustar liggesusersbibentry( bibtype = "Article", title = "{brms}: An {R} Package for {Bayesian} Multilevel Models Using {Stan}", author = person(given = "Paul-Christian", family = "Bürkner"), journal = "Journal of Statistical Software", year = "2017", volume = "80", number = "1", pages = "1--28", doi = "10.18637/jss.v080.i01", header = "To cite brms in publications use:", textVersion = paste( "Paul-Christian Bürkner (2017).", "brms: An R Package for Bayesian Multilevel Models Using Stan.", "Journal of Statistical Software, 80(1), 1-28.", "doi:10.18637/jss.v080.i01" ), encoding = "UTF-8" ) bibentry( bibtype = "Article", title = "Advanced {Bayesian} Multilevel Modeling with the {R} Package {brms}", author = person(given = "Paul-Christian", family = "Bürkner"), journal = "The R Journal", year = "2018", volume = "10", number = "1", pages = "395--411", doi = "10.32614/RJ-2018-017", textVersion = paste( "Paul-Christian Bürkner (2018).", "Advanced Bayesian Multilevel Modeling with the R Package brms.", "The R Journal, 10(1), 395-411.", "doi:10.32614/RJ-2018-017" ), encoding = "UTF-8" ) bibentry( bibtype = "Article", title = "Bayesian Item Response Modeling in {R} with {brms} and {Stan}", author = person(given = "Paul-Christian", family = "Bürkner"), journal = "Journal of Statistical Software", year = "2021", volume = "100", number = "5", pages = "1--54", doi = "10.18637/jss.v100.i05", textVersion = paste( "Paul-Christian Bürkner (2021).", "Bayesian Item Response Modeling in R with brms and Stan.", "Journal of Statistical Software, 100(5), 1-54.", "doi:10.18637/jss.v100.i05" ), encoding = "UTF-8" ) brms/inst/chunks/0000755000176200001440000000000014504270214013464 5ustar liggesusersbrms/inst/chunks/fun_cloglog.stan0000644000176200001440000000060014361545260016654 0ustar liggesusers /* compute the cloglog link * Args: * p: a scalar in (0, 1) * Returns: * a scalar in (-Inf, Inf) */ real cloglog(real p) { return log(-log1m(p)); } /* compute the cloglog link (vectorized) * Args: * p: a vector in (0, 1) * Returns: * a vector in (-Inf, Inf) */ vector cloglog_vector(vector p) { return log(-log1m(p)); } brms/inst/chunks/fun_cholesky_cor_ma1.stan0000644000176200001440000000111114213413565020444 0ustar liggesusers /* compute the cholesky factor of a MA1 correlation matrix * Args: * ma: MA1 autocorrelation * nrows: number of rows of the covariance matrix * Returns: * A nrows x nrows MA1 covariance matrix */ matrix cholesky_cor_ma1(real ma, int nrows) { matrix[nrows, nrows] mat; mat = diag_matrix(rep_vector(1 + ma^2, nrows)); if (nrows > 1) { mat[1, 2] = ma; for (i in 2:(nrows - 1)) { mat[i, i - 1] = ma; mat[i, i + 1] = ma; } mat[nrows, nrows - 1] = ma; } return cholesky_decompose(mat); } brms/inst/chunks/fun_r2d2.stan0000644000176200001440000000042714424715563016013 0ustar liggesusers /* compute scale parameters of the R2D2 prior * Args: * phi: local weight parameters * tau2: global scale parameter * Returns: * scale parameter vector of the R2D2 prior */ vector scales_R2D2(vector phi, real tau2) { return sqrt(phi * tau2); } brms/inst/chunks/fun_scale_r_cor_by.stan0000644000176200001440000000134514477014334020203 0ustar liggesusers /* compute correlated group-level effects with 'by' variables * Args: * z: matrix of unscaled group-level effects * SD: matrix of standard deviation parameters * L: an array of cholesky factor correlation matrices * Jby: index which grouping level belongs to which by level * Returns: * matrix of scaled group-level effects */ matrix scale_r_cor_by(matrix z, matrix SD, matrix[] L, int[] Jby) { // r is stored in another dimension order than z matrix[cols(z), rows(z)] r; array[size(L)] matrix[rows(L[1]), cols(L[1])] LC; for (i in 1:size(LC)) { LC[i] = diag_pre_multiply(SD[, i], L[i]); } for (j in 1:rows(r)) { r[j] = transpose(LC[Jby[j]] * z[, j]); } return r; } brms/inst/chunks/fun_cholesky_cor_ar1.stan0000644000176200001440000000113014213413565020452 0ustar liggesusers /* compute the cholesky factor of an AR1 correlation matrix * Args: * ar: AR1 autocorrelation * nrows: number of rows of the covariance matrix * Returns: * A nrows x nrows matrix */ matrix cholesky_cor_ar1(real ar, int nrows) { matrix[nrows, nrows] mat; vector[nrows - 1] gamma; mat = diag_matrix(rep_vector(1, nrows)); for (i in 2:nrows) { gamma[i - 1] = pow(ar, i - 1); for (j in 1:(i - 1)) { mat[i, j] = gamma[i - j]; mat[j, i] = gamma[i - j]; } } return cholesky_decompose(mat ./ (1 - ar^2)); } brms/inst/chunks/fun_scale_r_cor_by_cov.stan0000644000176200001440000000330714477014270021051 0ustar liggesusers /* compute correlated group-level effects with 'by' variables * in the presence of a within-group covariance matrix * Args: * z: matrix of unscaled group-level effects * SD: matrix of standard deviation parameters * L: an array of cholesky factor correlation matrices * Jby: index which grouping level belongs to which by level * Lcov: cholesky factor of within-group correlation matrix * Returns: * matrix of scaled group-level effects */ matrix scale_r_cor_by_cov(matrix z, matrix SD, matrix[] L, int[] Jby, matrix Lcov) { vector[num_elements(z)] z_flat = to_vector(z); vector[num_elements(z)] r = rep_vector(0, num_elements(z)); array[size(L)] matrix[rows(L[1]), cols(L[1])] LC; int rows_z = rows(z); int rows_L = rows(L[1]); for (i in 1:size(LC)) { LC[i] = diag_pre_multiply(SD[, i], L[i]); } // kronecker product of cholesky factors times a vector for (icov in 1:rows(Lcov)) { for (jcov in 1:icov) { if (Lcov[icov, jcov] > 1e-10) { // avoid calculating products between unrelated individuals for (i in 1:rows_L) { for (j in 1:i) { // incremented element of the output vector int k = (rows_L * (icov - 1)) + i; // applied element of the input vector int l = (rows_L * (jcov - 1)) + j; // column number of z to which z_flat[l] belongs int m = (l - 1) / rows_z + 1; r[k] = r[k] + Lcov[icov, jcov] * LC[Jby[m]][i, j] * z_flat[l]; } } } } } // r is returned in another dimension order than z return to_matrix(r, cols(z), rows(z), 0); } brms/inst/chunks/fun_scale_time_err.stan0000644000176200001440000000447314477014450020217 0ustar liggesusers /* scale and correlate time-series residuals * using the Cholesky factor of the correlation matrix * Args: * zerr: standardized and independent residuals * sderr: standard deviation of the residuals * chol_cor: cholesky factor of the correlation matrix * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * vector of scaled and correlated residuals */ vector scale_time_err(vector zerr, real sderr, matrix chol_cor, int[] nobs, int[] begin, int[] end) { vector[rows(zerr)] err; for (i in 1:size(nobs)) { matrix[nobs[i], nobs[i]] L_i; L_i = sderr * chol_cor[1:nobs[i], 1:nobs[i]]; err[begin[i]:end[i]] = L_i * zerr[begin[i]:end[i]]; } return err; } /* scale and correlate time-series residuals * allows for flexible correlation matrix subsets * Deviating Args: * Jtime: array of time indices per group * Returns: * vector of scaled and correlated residuals */ vector scale_time_err_flex(vector zerr, real sderr, matrix chol_cor, int[] nobs, int[] begin, int[] end, int[,] Jtime) { vector[rows(zerr)] err; int I = size(nobs); array[I] int has_err = rep_array(0, I); int i = 1; matrix[rows(chol_cor), cols(chol_cor)] L; matrix[rows(chol_cor), cols(chol_cor)] Cov; L = sderr * chol_cor; Cov = multiply_lower_tri_self_transpose(L); while (i <= I) { array[nobs[i]] int iobs = Jtime[i, 1:nobs[i]]; matrix[nobs[i], nobs[i]] L_i; if (is_equal(iobs, sequence(1, rows(L)))) { // all timepoints are present in this group L_i = L; } else { // arbitrary subsets cannot be taken on L directly L_i = cholesky_decompose(Cov[iobs, iobs]); } err[begin[i]:end[i]] = L_i * zerr[begin[i]:end[i]]; has_err[i] = 1; // find all additional groups where we have the same timepoints for (j in (i+1):I) { if (has_err[j] == 0 && is_equal(Jtime[j], Jtime[i]) == 1) { err[begin[j]:end[j]] = L_i * zerr[begin[j]:end[j]]; has_err[j] = 1; } } while (i <= I && has_err[i] == 1) { i += 1; } } return err; } brms/inst/chunks/fun_com_poisson.stan0000644000176200001440000000755014213413565017567 0ustar liggesusers // log approximate normalizing constant of the COM poisson distribuion // approximation based on doi:10.1007/s10463-017-0629-6 // Args: see log_Z_com_poisson() real log_Z_com_poisson_approx(real log_mu, real nu) { real nu_mu = nu * exp(log_mu); real nu2 = nu^2; // first 4 terms of the residual series real log_sum_resid = log1p( nu_mu^(-1) * (nu2 - 1) / 24 + nu_mu^(-2) * (nu2 - 1) / 1152 * (nu2 + 23) + nu_mu^(-3) * (nu2 - 1) / 414720 * (5 * nu2^2 - 298 * nu2 + 11237) ); return nu_mu + log_sum_resid - ((log(2 * pi()) + log_mu) * (nu - 1) / 2 + log(nu) / 2); } // log normalizing constant of the COM Poisson distribution // implementation inspired by code of Ben Goodrich // improved following suggestions of Sebastian Weber (#892) // Args: // log_mu: log location parameter // shape: positive shape parameter real log_Z_com_poisson(real log_mu, real nu) { real log_Z; int k = 2; int M = 10000; int converged = 0; int num_terms = 50; if (nu == 1) { return exp(log_mu); } // nu == 0 or Inf will fail in this parameterization if (nu <= 0) { reject("nu must be positive"); } if (nu == positive_infinity()) { reject("nu must be finite"); } if (log_mu * nu >= log(1.5) && log_mu >= log(1.5)) { return log_Z_com_poisson_approx(log_mu, nu); } // direct computation of the truncated series // check if the Mth term of the series is small enough if (nu * (M * log_mu - lgamma(M + 1)) > -36.0) { reject("nu is too close to zero."); } // first 2 terms of the series log_Z = log1p_exp(nu * log_mu); while (converged == 0) { // adding terms in batches simplifies the AD tape vector[num_terms + 1] log_Z_terms; int i = 1; log_Z_terms[1] = log_Z; while (i <= num_terms) { log_Z_terms[i + 1] = nu * (k * log_mu - lgamma(k + 1)); k += 1; if (log_Z_terms[i + 1] <= -36.0) { converged = 1; break; } i += 1; } log_Z = log_sum_exp(log_Z_terms[1:(i + 1)]); } return log_Z; } // COM Poisson log-PMF for a single response (log parameterization) // Args: // y: the response value // log_mu: log location parameter // shape: positive shape parameter real com_poisson_log_lpmf(int y, real log_mu, real nu) { if (nu == 1) return poisson_log_lpmf(y | log_mu); return nu * (y * log_mu - lgamma(y + 1)) - log_Z_com_poisson(log_mu, nu); } // COM Poisson log-PMF for a single response real com_poisson_lpmf(int y, real mu, real nu) { if (nu == 1) return poisson_lpmf(y | mu); return com_poisson_log_lpmf(y | log(mu), nu); } // COM Poisson log-CDF for a single response real com_poisson_lcdf(int y, real mu, real nu) { real log_mu; real log_Z; // log denominator vector[y] log_num_terms; // terms of the log numerator if (nu == 1) { return poisson_lcdf(y | mu); } // nu == 0 or Inf will fail in this parameterization if (nu <= 0) { reject("nu must be positive"); } if (nu == positive_infinity()) { reject("nu must be finite"); } if (y > 10000) { reject("cannot handle y > 10000"); } log_mu = log(mu); if (nu * (y * log_mu - lgamma(y + 1)) <= -36.0) { // y is large enough for the CDF to be very close to 1; return 0; } log_Z = log_Z_com_poisson(log_mu, nu); if (y == 0) { return -log_Z; } // first 2 terms of the series log_num_terms[1] = log1p_exp(nu * log_mu); // remaining terms of the series until y for (k in 2:y) { log_num_terms[k] = nu * (k * log_mu - lgamma(k + 1)); } return log_sum_exp(log_num_terms) - log_Z; } // COM Poisson log-CCDF for a single response real com_poisson_lccdf(int y, real mu, real nu) { return log1m_exp(com_poisson_lcdf(y | mu, nu)); } brms/inst/chunks/fun_student_t_time.stan0000644000176200001440000000753614361545260020274 0ustar liggesusers /* multi-student-t log-PDF for time-series covariance structures * in Cholesky parameterization and assuming homogoneous variances * Args: * y: response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: scale parameter * chol_cor: cholesky factor of the correlation matrix * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * sum of the log-PDF values of all observations */ real student_t_time_hom_lpdf(vector y, real nu, vector mu, real sigma, matrix chol_cor, int[] nobs, int[] begin, int[] end) { int I = size(nobs); vector[I] lp; for (i in 1:I) { matrix[nobs[i], nobs[i]] Cov_i; Cov_i = sigma * chol_cor[1:nobs[i], 1:nobs[i]]; Cov_i = multiply_lower_tri_self_transpose(Cov_i); lp[i] = multi_student_t_lpdf( y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov_i ); } return sum(lp); } /* multi-student-t log-PDF for time-series covariance structures * in Cholesky parameterization and assuming heterogenous variances * Deviating Args: * sigma: residual scale vector * Returns: * sum of the log-PDF values of all observations */ real student_t_time_het_lpdf(vector y, real nu, vector mu, vector sigma, matrix chol_cor, int[] nobs, int[] begin, int[] end) { int I = size(nobs); vector[I] lp; for (i in 1:I) { matrix[nobs[i], nobs[i]] Cov_i; Cov_i = diag_pre_multiply(sigma[begin[i]:end[i]], chol_cor[1:nobs[i], 1:nobs[i]]); Cov_i = multiply_lower_tri_self_transpose(Cov_i); lp[i] = multi_student_t_lpdf( y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov_i ); } return sum(lp); } /* multi-student-t log-PDF for time-series covariance structures * in Cholesky parameterization and assuming homogoneous variances * allows for flexible correlation matrix subsets * Deviating Args: * Jtime: array of time indices per group * Returns: * sum of the log-PDF values of all observations */ real student_t_time_hom_flex_lpdf(vector y, real nu, vector mu, real sigma, matrix chol_cor, int[] nobs, int[] begin, int[] end, int[,] Jtime) { int I = size(nobs); vector[I] lp; matrix[rows(chol_cor), cols(chol_cor)] Cov; Cov = multiply_lower_tri_self_transpose(sigma * chol_cor); for (i in 1:I) { int iobs[nobs[i]] = Jtime[i, 1:nobs[i]]; matrix[nobs[i], nobs[i]] Cov_i = Cov[iobs, iobs]; lp[i] = multi_student_t_lpdf( y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov_i ); } return sum(lp); } /* multi-student-t log-PDF for time-series covariance structures * in Cholesky parameterization and assuming heterogenous variances * allows for flexible correlation matrix subsets * Deviating Args: * sigma: scale parameter vector * Jtime: array of time indices per group * Returns: * sum of the log-PDF values of all observations */ real student_t_time_het_flex_lpdf(vector y, real nu, vector mu, vector sigma, matrix chol_cor, int[] nobs, int[] begin, int[] end, int[,] Jtime) { int I = size(nobs); vector[I] lp; matrix[rows(chol_cor), cols(chol_cor)] Cor; Cor = multiply_lower_tri_self_transpose(chol_cor); for (i in 1:I) { int iobs[nobs[i]] = Jtime[i, 1:nobs[i]]; matrix[nobs[i], nobs[i]] Cov_i; Cov_i = quad_form_diag(Cor[iobs, iobs], sigma[begin[i]:end[i]]); lp[i] = multi_student_t_lpdf( y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov_i ); } return sum(lp); } brms/inst/chunks/fun_horseshoe.stan0000644000176200001440000000110214424715563017230 0ustar liggesusers /* Efficient computation of the horseshoe scale parameters * see Appendix C.1 in https://projecteuclid.org/euclid.ejs/1513306866 * Args: * lambda: local shrinkage parameters * tau: global shrinkage parameter * c2: slap regularization parameter * Returns: * scale parameter vector of the horseshoe prior */ vector scales_horseshoe(vector lambda, real tau, real c2) { int K = rows(lambda); vector[K] lambda2 = square(lambda); vector[K] lambda_tilde = sqrt(c2 * lambda2 ./ (c2 + tau^2 * lambda2)); return lambda_tilde * tau; } brms/inst/chunks/fun_zero_inflated_beta.stan0000644000176200001440000000323014213413565021046 0ustar liggesusers /* zero-inflated beta log-PDF of a single response * Args: * y: the response value * mu: mean parameter of the beta distribution * phi: precision parameter of the beta distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_beta_lpdf(real y, real mu, real phi, real zi) { row_vector[2] shape = [mu * phi, (1 - mu) * phi]; if (y == 0) { return bernoulli_lpmf(1 | zi); } else { return bernoulli_lpmf(0 | zi) + beta_lpdf(y | shape[1], shape[2]); } } /* zero-inflated beta log-PDF of a single response * logit parameterization of the zero-inflation part * Args: * y: the response value * mu: mean parameter of the beta distribution * phi: precision parameter of the beta distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_beta_logit_lpdf(real y, real mu, real phi, real zi) { row_vector[2] shape = [mu * phi, (1 - mu) * phi]; if (y == 0) { return bernoulli_logit_lpmf(1 | zi); } else { return bernoulli_logit_lpmf(0 | zi) + beta_lpdf(y | shape[1], shape[2]); } } // zero-inflated beta log-CCDF and log-CDF functions real zero_inflated_beta_lccdf(real y, real mu, real phi, real zi) { row_vector[2] shape = [mu * phi, (1 - mu) * phi]; return bernoulli_lpmf(0 | zi) + beta_lccdf(y | shape[1], shape[2]); } real zero_inflated_beta_lcdf(real y, real mu, real phi, real zi) { return log1m_exp(zero_inflated_beta_lccdf(y | mu, phi, zi)); } brms/inst/chunks/fun_which_range.stan0000644000176200001440000000224614477015026017514 0ustar liggesusers /* how many elements are in a range of integers? * Args: * x: an integer array * start: start of the range (inclusive) * end: end of the range (inclusive) * Returns: * a scalar integer */ int size_range(int[] x, int start, int end) { int out = 0; for (i in 1:size(x)) { out += (x[i] >= start && x[i] <= end); } return out; } /* which elements are in a range of integers? * Args: * x: an integer array * start: start of the range (inclusive) * end: end of the range (inclusive) * Returns: * an integer array */ int[] which_range(int[] x, int start, int end) { array[size_range(x, start, end)] int out; int j = 1; for (i in 1:size(x)) { if (x[i] >= start && x[i] <= end) { out[j] = i; j += 1; } } return out; } /* adjust array values to x - start + 1 * Args: * x: an integer array * start: start of the range of values in x (inclusive) * Returns: * an integer array */ int[] start_at_one(int[] x, int start) { array[size(x)] int out; for (i in 1:size(x)) { out[i] = x[i] - start + 1; } return out; } brms/inst/chunks/fun_normal_fcor.stan0000644000176200001440000000176214213413565017537 0ustar liggesusers /* multi-normal log-PDF for fixed correlation matrices * assuming homogoneous variances * Args: * y: response vector * mu: mean parameter vector * sigma: residual standard deviation * chol_cor: cholesky factor of the correlation matrix * Returns: * sum of the log-PDF values of all observations */ real normal_fcor_hom_lpdf(vector y, vector mu, real sigma, data matrix chol_cor) { return multi_normal_cholesky_lpdf(y | mu, sigma * chol_cor); } /* multi-normal log-PDF for fixed correlation matrices * assuming heterogenous variances * Args: * y: response vector * mu: mean parameter vector * sigma: residual standard deviation vector * chol_cor: cholesky factor of the correlation matrix * Returns: * sum of the log-PDF values of all observations */ real normal_fcor_het_lpdf(vector y, vector mu, vector sigma, data matrix chol_cor) { return multi_normal_cholesky_lpdf(y | mu, diag_pre_multiply(sigma, chol_cor)); } brms/inst/chunks/fun_softplus.stan0000644000176200001440000000065214361545260017114 0ustar liggesusers /* softplus link function inverse to 'log1p_exp' * Args: * x: a positive scalar * Returns: * a scalar in (-Inf, Inf) */ real log_expm1(real x) { return log(expm1(x)); } /* softplus link function inverse to 'log1p_exp' (vectorized) * Args: * x: a positive vector * Returns: * a vector in (-Inf, Inf) */ vector log_expm1_vector(vector x) { return log(expm1(x)); } brms/inst/chunks/fun_zero_inflated_poisson.stan0000644000176200001440000000565114213413565021636 0ustar liggesusers /* zero-inflated poisson log-PDF of a single response * Args: * y: the response value * lambda: mean parameter of the poisson distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_poisson_lpmf(int y, real lambda, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + poisson_lpmf(0 | lambda)); } else { return bernoulli_lpmf(0 | zi) + poisson_lpmf(y | lambda); } } /* zero-inflated poisson log-PDF of a single response * logit parameterization of the zero-inflation part * Args: * y: the response value * lambda: mean parameter of the poisson distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_poisson_logit_lpmf(int y, real lambda, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + poisson_lpmf(0 | lambda)); } else { return bernoulli_logit_lpmf(0 | zi) + poisson_lpmf(y | lambda); } } /* zero-inflated poisson log-PDF of a single response * log parameterization for the poisson part * Args: * y: the response value * eta: linear predictor for poisson distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_poisson_log_lpmf(int y, real eta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + poisson_log_lpmf(0 | eta)); } else { return bernoulli_lpmf(0 | zi) + poisson_log_lpmf(y | eta); } } /* zero-inflated poisson log-PDF of a single response * log parameterization for the poisson part * logit parameterization of the zero-inflation part * Args: * y: the response value * eta: linear predictor for poisson distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_poisson_log_logit_lpmf(int y, real eta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + poisson_log_lpmf(0 | eta)); } else { return bernoulli_logit_lpmf(0 | zi) + poisson_log_lpmf(y | eta); } } // zero-inflated poisson log-CCDF and log-CDF functions real zero_inflated_poisson_lccdf(int y, real lambda, real zi) { return bernoulli_lpmf(0 | zi) + poisson_lccdf(y | lambda); } real zero_inflated_poisson_lcdf(int y, real lambda, real zi) { return log1m_exp(zero_inflated_poisson_lccdf(y | lambda, zi)); } brms/inst/chunks/fun_zero_inflated_beta_binomial.stan0000644000176200001440000000500114224021424022705 0ustar liggesusers /* zero-inflated beta-binomial log-PDF of a single response * Args: * y: the response value * trials: number of trials of the binomial part * mu: mean parameter of the beta distribution * phi: precision parameter of the beta distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_beta_binomial_lpmf(int y, int trials, real mu, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + beta_binomial_lpmf(0 | trials, mu * phi, (1 - mu) * phi)); } else { return bernoulli_lpmf(0 | zi) + beta_binomial_lpmf(y | trials, mu * phi, (1 - mu) * phi); } } /* zero-inflated beta-binomial log-PDF of a single response * logit parameterization of the zero-inflation part * Args: * y: the response value * trials: number of trials of the binomial part * mu: mean parameter of the beta distribution * phi: precision parameter of the beta distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_beta_binomial_logit_lpmf(int y, int trials, real mu, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + beta_binomial_lpmf(0 | trials, mu * phi, (1 - mu) * phi)); } else { return bernoulli_logit_lpmf(0 | zi) + beta_binomial_lpmf(y | trials, mu * phi, (1 - mu) * phi); } } // zero-inflated beta-binomial log-CCDF and log-CDF functions real zero_inflated_beta_binomial_lccdf(int y, int trials, real mu, real phi, real zi) { return bernoulli_lpmf(0 | zi) + beta_binomial_lccdf(y | trials, mu * phi, (1 - mu) * phi); } real zero_inflated_beta_binomial_lcdf(int y, int trials, real mu, real phi, real zi) { return log1m_exp(zero_inflated_beta_binomial_lccdf(y | trials, mu, phi, zi)); } brms/inst/chunks/fun_squareplus.stan0000644000176200001440000000163214361545260017440 0ustar liggesusers /* squareplus inverse link function (squareplus itself) * Args: * x: a scalar in (-Inf, Inf) * Returns: * a positive scalar */ real squareplus(real x) { return (x + sqrt(square(x) + 4)) / 2; } /* squareplus inverse link function (squareplus itself; vectorized) * Args: * x: a vector in (-Inf, Inf) * Returns: * a positive vector */ vector squareplus_vector(vector x) { return (x + sqrt(square(x) + 4)) / 2; } /* squareplus link function (inverse squareplus) * Args: * x: a positive scalar * Returns: * a scalar in (-Inf, Inf) */ real inv_squareplus(real x) { return (square(x) - 1) ./ x; } /* squareplus link function (inverse squareplus; vectorized) * Args: * x: a positive vector * Returns: * a vector in (-Inf, Inf) */ vector inv_squareplus_vector(vector x) { return (square(x) - 1) ./ x; } brms/inst/chunks/fun_gaussian_process_approx.stan0000644000176200001440000000211614477017337022202 0ustar liggesusers /* Spectral density function of a Gaussian process * with squared exponential covariance kernel * Args: * x: array of numeric values of dimension NB x D * sdgp: marginal SD parameter * lscale: vector of length-scale parameters * Returns: * numeric values of the function evaluated at 'x' */ vector spd_cov_exp_quad(data vector[] x, real sdgp, vector lscale) { int NB = dims(x)[1]; int D = dims(x)[2]; int Dls = rows(lscale); vector[NB] out; if (Dls == 1) { // one dimensional or isotropic GP real constant = square(sdgp) * (sqrt(2 * pi()) * lscale[1])^D; real neg_half_lscale2 = -0.5 * square(lscale[1]); for (m in 1:NB) { out[m] = constant * exp(neg_half_lscale2 * dot_self(x[m])); } } else { // multi-dimensional non-isotropic GP real constant = square(sdgp) * sqrt(2 * pi())^D * prod(lscale); vector[Dls] neg_half_lscale2 = -0.5 * square(lscale); for (m in 1:NB) { out[m] = constant * exp(dot_product(neg_half_lscale2, square(x[m]))); } } return out; } brms/inst/chunks/fun_zero_inflated_asym_laplace.stan0000644000176200001440000000440514213413565022572 0ustar liggesusers /* zero-inflated asymmetric laplace log-PDF for a single response * Args: * y: the response value * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_asym_laplace_lpdf(real y, real mu, real sigma, real quantile, real zi) { if (y == 0) { return bernoulli_lpmf(1 | zi); } else { return bernoulli_lpmf(0 | zi) + asym_laplace_lpdf(y | mu, sigma, quantile); } } /* zero-inflated asymmetric laplace log-PDF for a single response * Args: * y: the response value * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * zi: linear predictor of the zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_asym_laplace_logit_lpdf(real y, real mu, real sigma, real quantile, real zi) { if (y == 0) { return bernoulli_logit_lpmf(1 | zi); } else { return bernoulli_logit_lpmf(0 | zi) + asym_laplace_lpdf(y | mu, sigma, quantile); } } // zero-inflated asymmetric laplace log-CDF function real zero_inflated_asym_laplace_lcdf(real y, real mu, real sigma, real quantile, real zi) { if (y < 0) { return bernoulli_lpmf(0 | zi) + asym_laplace_lcdf(y | mu, sigma, quantile); } else { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + asym_laplace_lcdf(y | mu, sigma, quantile)); } } // zero-inflated asymmetric laplace log-CCDF function real zero_inflated_asym_laplace_lccdf(real y, real mu, real sigma, real quantile, real zi) { if (y > 0) { return bernoulli_lpmf(0 | zi) + asym_laplace_lccdf(y | mu, sigma, quantile); } else { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + asym_laplace_lccdf(y | mu, sigma, quantile)); } } brms/inst/chunks/fun_wiener_diffusion.stan0000644000176200001440000000122214213413565020564 0ustar liggesusers /* Wiener diffusion log-PDF for a single response * Args: * y: reaction time data * dec: decision data (0 or 1) * alpha: boundary separation parameter > 0 * tau: non-decision time parameter > 0 * beta: initial bias parameter in [0, 1] * delta: drift rate parameter * Returns: * a scalar to be added to the log posterior */ real wiener_diffusion_lpdf(real y, int dec, real alpha, real tau, real beta, real delta) { if (dec == 1) { return wiener_lpdf(y | alpha, tau, beta, delta); } else { return wiener_lpdf(y | alpha, tau, 1 - beta, - delta); } } brms/inst/chunks/fun_cholesky_cor_arma1.stan0000644000176200001440000000130714213413565020776 0ustar liggesusers /* compute the cholesky factor of an ARMA1 correlation matrix * Args: * ar: AR1 autocorrelation * ma: MA1 autocorrelation * nrows: number of rows of the covariance matrix * Returns: * A nrows x nrows matrix */ matrix cholesky_cor_arma1(real ar, real ma, int nrows) { matrix[nrows, nrows] mat; vector[nrows] gamma; mat = diag_matrix(rep_vector(1 + ma^2 + 2 * ar * ma, nrows)); gamma[1] = (1 + ar * ma) * (ar + ma); for (i in 2:nrows) { gamma[i] = gamma[1] * pow(ar, i - 1); for (j in 1:(i - 1)) { mat[i, j] = gamma[i - j]; mat[j, i] = gamma[i - j]; } } return cholesky_decompose(mat ./ (1 - ar^2)); } brms/inst/chunks/fun_normal_time.stan0000644000176200001440000001244314477014163017545 0ustar liggesusers /* multi-normal log-PDF for time-series covariance structures * in Cholesky parameterization and assuming homogoneous variances * Args: * y: response vector * mu: mean parameter vector * sigma: residual standard deviation * chol_cor: cholesky factor of the correlation matrix * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * sum of the log-PDF values of all observations */ real normal_time_hom_lpdf(vector y, vector mu, real sigma, matrix chol_cor, int[] nobs, int[] begin, int[] end) { int I = size(nobs); vector[I] lp; matrix[rows(chol_cor), cols(chol_cor)] L = sigma * chol_cor; for (i in 1:I) { matrix[nobs[i], nobs[i]] L_i = L[1:nobs[i], 1:nobs[i]]; lp[i] = multi_normal_cholesky_lpdf( y[begin[i]:end[i]] | mu[begin[i]:end[i]], L_i ); } return sum(lp); } /* multi-normal log-PDF for time-series covariance structures * in Cholesky parameterization and assuming heterogenous variances * Deviating Args: * sigma: residual standard deviation vector * Returns: * sum of the log-PDF values of all observations */ real normal_time_het_lpdf(vector y, vector mu, vector sigma, matrix chol_cor, int[] nobs, int[] begin, int[] end) { int I = size(nobs); vector[I] lp; for (i in 1:I) { matrix[nobs[i], nobs[i]] L_i; L_i = diag_pre_multiply(sigma[begin[i]:end[i]], chol_cor[1:nobs[i], 1:nobs[i]]); lp[i] = multi_normal_cholesky_lpdf( y[begin[i]:end[i]] | mu[begin[i]:end[i]], L_i ); } return sum(lp); } /* multi-normal log-PDF for time-series covariance structures * in Cholesky parameterization and assuming homogoneous variances * allows for flexible correlation matrix subsets * Deviating Args: * Jtime: array of time indices per group * Returns: * sum of the log-PDF values of all observations */ real normal_time_hom_flex_lpdf(vector y, vector mu, real sigma, matrix chol_cor, int[] nobs, int[] begin, int[] end, int[,] Jtime) { real lp = 0.0; int I = size(nobs); array[I] int has_lp = rep_array(0, I); int i = 1; matrix[rows(chol_cor), cols(chol_cor)] L; matrix[rows(chol_cor), cols(chol_cor)] Cov; L = sigma * chol_cor; Cov = multiply_lower_tri_self_transpose(L); while (i <= I) { array[nobs[i]] int iobs = Jtime[i, 1:nobs[i]]; array[I-i+1] int lp_terms = rep_array(0, I-i+1); matrix[nobs[i], nobs[i]] L_i; if (is_equal(iobs, sequence(1, rows(L)))) { // all timepoints are present in this group L_i = L; } else { // arbitrary subsets cannot be taken on L directly L_i = cholesky_decompose(Cov[iobs, iobs]); } has_lp[i] = 1; lp_terms[1] = 1; // find all additional groups where we have the same timepoints for (j in (i+1):I) { if (has_lp[j] == 0 && is_equal(Jtime[j], Jtime[i]) == 1) { has_lp[j] = 1; lp_terms[j-i+1] = 1; } } // vectorize the log likelihood by stacking the vectors lp += multi_normal_cholesky_lpdf( stack_vectors(y, nobs[i], lp_terms, begin[i:I], end[i:I]) | stack_vectors(mu, nobs[i], lp_terms, begin[i:I], end[i:I]), L_i ); while (i <= I && has_lp[i] == 1) { i += 1; } } return lp; } /* multi-normal log-PDF for time-series covariance structures * in Cholesky parameterization and assuming heterogenous variances * allows for flexible correlation matrix subsets * Deviating Args: * sigma: residual standard deviation vectors * Jtime: array of time indices per group * Returns: * sum of the log-PDF values of all observations */ real normal_time_het_flex_lpdf(vector y, vector mu, vector sigma, matrix chol_cor, int[] nobs, int[] begin, int[] end, int[,] Jtime) { int I = size(nobs); vector[I] lp; array[I] int has_lp = rep_array(0, I); int i = 1; matrix[rows(chol_cor), cols(chol_cor)] Cor; Cor = multiply_lower_tri_self_transpose(chol_cor); while (i <= I) { array[nobs[i]] int iobs = Jtime[i, 1:nobs[i]]; matrix[nobs[i], nobs[i]] Lcor_i; matrix[nobs[i], nobs[i]] L_i; if (is_equal(iobs, sequence(1, rows(chol_cor)))) { // all timepoints are present in this group Lcor_i = chol_cor; } else { // arbitrary subsets cannot be taken on chol_cor directly Lcor_i = cholesky_decompose(Cor[iobs, iobs]); } L_i = diag_pre_multiply(sigma[begin[i]:end[i]], Lcor_i); lp[i] = multi_normal_cholesky_lpdf(y[begin[i]:end[i]] | mu[begin[i]:end[i]], L_i); has_lp[i] = 1; // find all additional groups where we have the same timepoints for (j in (i+1):I) { if (has_lp[j] == 0 && is_equal(Jtime[j], Jtime[i]) == 1) { // group j may have different sigmas that group i L_i = diag_pre_multiply(sigma[begin[j]:end[j]], Lcor_i); lp[j] = multi_normal_cholesky_lpdf(y[begin[j]:end[j]] | mu[begin[j]:end[j]], L_i); has_lp[j] = 1; } } while (i <= I && has_lp[i] == 1) { i += 1; } } return sum(lp); } brms/inst/chunks/fun_inv_gaussian.stan0000644000176200001440000000323214300667214017715 0ustar liggesusers /* inverse Gaussian log-PDF for a single response * Args: * y: the response value * mu: positive mean parameter * shape: positive shape parameter * Returns: * a scalar to be added to the log posterior */ real inv_gaussian_lpdf(real y, real mu, real shape) { return 0.5 * log(shape / (2 * pi())) - 1.5 * log(y) - 0.5 * shape * square((y - mu) / (mu * sqrt(y))); } /* vectorized inverse Gaussian log-PDF * Args: * y: response vector * mu: positive mean parameter vector * shape: positive shape parameter * Returns: * a scalar to be added to the log posterior */ real inv_gaussian_vector_lpdf(vector y, vector mu, real shape) { return 0.5 * rows(y) * log(shape / (2 * pi())) - 1.5 * sum(log(y)) - 0.5 * shape * dot_self((y - mu) ./ (mu .* sqrt(y))); } /* inverse Gaussian log-CDF for a single quantile * Args: * y: a quantile * mu: positive mean parameter * shape: positive shape parameter * Returns: * log(P(Y <= y)) */ real inv_gaussian_lcdf(real y, real mu, real shape) { return log(Phi(sqrt(shape) / sqrt(y) * (y / mu - 1)) + exp(2 * shape / mu) * Phi(-sqrt(shape) / sqrt(y) * (y / mu + 1))); } /* inverse Gaussian log-CCDF for a single quantile * Args: * y: a quantile * mu: positive mean parameter * shape: positive shape parameter * Returns: * log(P(Y > y)) */ real inv_gaussian_lccdf(real y, real mu, real shape) { return log1m(Phi(sqrt(shape) / sqrt(y) * (y / mu - 1)) - exp(2 * shape / mu) * Phi(-sqrt(shape) / sqrt(y) * (y / mu + 1))); } brms/inst/chunks/fun_stack_vectors.stan0000644000176200001440000000076014477014603020107 0ustar liggesusers /* grouped data stored linearly in "data" as indexed by begin and end * is repacked to be stacked into an array of vectors. */ vector[] stack_vectors(vector long_data, int n, int[] stack, int[] begin, int[] end) { int S = sum(stack); int G = size(stack); array[S] vector[n] stacked; int j = 1; for (i in 1:G) { if (stack[i] == 1) { stacked[j] = long_data[begin[i]:end[i]]; j += 1; } } return stacked; } brms/inst/chunks/fun_is_equal.stan0000644000176200001440000000040714361545260017035 0ustar liggesusers // are two 1D integer arrays equal? int is_equal(int[] a, int[] b) { int n_a = size(a); int n_b = size(b); if (n_a != n_b) { return 0; } for (i in 1:n_a) { if (a[i] != b[i]) { return 0; } } return 1; } brms/inst/chunks/fun_zero_one_inflated_beta.stan0000644000176200001440000000143714213413565021716 0ustar liggesusers /* zero-one-inflated beta log-PDF of a single response * Args: * y: response value * mu: mean parameter of the beta part * phi: precision parameter of the beta part * zoi: zero-one-inflation probability * coi: conditional one-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_one_inflated_beta_lpdf(real y, real mu, real phi, real zoi, real coi) { row_vector[2] shape = [mu * phi, (1 - mu) * phi]; if (y == 0) { return bernoulli_lpmf(1 | zoi) + bernoulli_lpmf(0 | coi); } else if (y == 1) { return bernoulli_lpmf(1 | zoi) + bernoulli_lpmf(1 | coi); } else { return bernoulli_lpmf(0 | zoi) + beta_lpdf(y | shape[1], shape[2]); } } brms/inst/chunks/fun_hurdle_gamma.stan0000644000176200001440000000264714213413565017666 0ustar liggesusers /* hurdle gamma log-PDF of a single response * Args: * y: the response value * alpha: shape parameter of the gamma distribution * beta: rate parameter of the gamma distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_gamma_lpdf(real y, real alpha, real beta, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + gamma_lpdf(y | alpha, beta); } } /* hurdle gamma log-PDF of a single response * logit parameterization of the hurdle part * Args: * y: the response value * alpha: shape parameter of the gamma distribution * beta: rate parameter of the gamma distribution * hu: linear predictor for the hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_gamma_logit_lpdf(real y, real alpha, real beta, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + gamma_lpdf(y | alpha, beta); } } // hurdle gamma log-CCDF and log-CDF functions real hurdle_gamma_lccdf(real y, real alpha, real beta, real hu) { return bernoulli_lpmf(0 | hu) + gamma_lccdf(y | alpha, beta); } real hurdle_gamma_lcdf(real y, real alpha, real beta, real hu) { return log1m_exp(hurdle_gamma_lccdf(y | alpha, beta, hu)); } brms/inst/chunks/fun_asym_laplace.stan0000644000176200001440000000342514213413565017666 0ustar liggesusers /* helper function for asym_laplace_lpdf * Args: * y: the response value * quantile: quantile parameter in (0, 1) */ real rho_quantile(real y, real quantile) { if (y < 0) { return y * (quantile - 1); } else { return y * quantile; } } /* asymmetric laplace log-PDF for a single response * Args: * y: the response value * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * Returns: * a scalar to be added to the log posterior */ real asym_laplace_lpdf(real y, real mu, real sigma, real quantile) { return log(quantile * (1 - quantile)) - log(sigma) - rho_quantile((y - mu) / sigma, quantile); } /* asymmetric laplace log-CDF for a single quantile * Args: * y: a quantile * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * Returns: * a scalar to be added to the log posterior */ real asym_laplace_lcdf(real y, real mu, real sigma, real quantile) { if (y < mu) { return log(quantile) + (1 - quantile) * (y - mu) / sigma; } else { return log1m((1 - quantile) * exp(-quantile * (y - mu) / sigma)); } } /* asymmetric laplace log-CCDF for a single quantile * Args: * y: a quantile * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * Returns: * a scalar to be added to the log posterior */ real asym_laplace_lccdf(real y, real mu, real sigma, real quantile) { if (y < mu) { return log1m(quantile * exp((1 - quantile) * (y - mu) / sigma)); } else { return log1m(quantile) - quantile * (y - mu) / sigma; } } brms/inst/chunks/fun_sparse_icar_lpdf.stan0000644000176200001440000000254414213413565020535 0ustar liggesusers /* Return the log probability of an intrinsic conditional autoregressive * (ICAR) prior with a sparse representation for the adjacency matrix * Full credit to Max Joseph (https://github.com/mbjoseph/CARstan) * Args: * phi: Vector containing the CAR parameters for each location * sdcar: Standard deviation parameter for the CAR prior * Nloc: Number of locations * Nedges: Number of edges (adjacency pairs) * Nneigh: Number of neighbors for each location * eigenW: Eigenvalues of D^(-1/2) * W * D^(-1/2) * edges1, edges2: Sparse representation of adjacency matrix * Details: * D = Diag(Nneigh) * Returns: * Log probability density of CAR prior up to additive constant */ real sparse_icar_lpdf(vector phi, real sdcar, int Nloc, int Nedges, data vector Nneigh, data vector eigenW, int[] edges1, int[] edges2) { real tau; // precision parameter row_vector[Nloc] phit_D; // phi' * D row_vector[Nloc] phit_W; // phi' * W tau = inv_square(sdcar); phit_D = (phi .* Nneigh)'; phit_W = rep_row_vector(0, Nloc); for (i in 1:Nedges) { phit_W[edges1[i]] = phit_W[edges1[i]] + phi[edges2[i]]; phit_W[edges2[i]] = phit_W[edges2[i]] + phi[edges1[i]]; } return 0.5 * ((Nloc - 1) * log(tau) - tau * (phit_D * phi - (phit_W * phi))); } brms/inst/chunks/fun_hurdle_poisson.stan0000644000176200001440000000475314213413565020276 0ustar liggesusers /* hurdle poisson log-PDF of a single response * Args: * y: the response value * lambda: mean parameter of the poisson distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_poisson_lpmf(int y, real lambda, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + poisson_lpmf(y | lambda) - log1m_exp(-lambda); } } /* hurdle poisson log-PDF of a single response * logit parameterization of the hurdle part * Args: * y: the response value * lambda: mean parameter of the poisson distribution * hu: linear predictor for hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_poisson_logit_lpmf(int y, real lambda, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + poisson_lpmf(y | lambda) - log1m_exp(-lambda); } } /* hurdle poisson log-PDF of a single response * log parameterization for the poisson part * Args: * y: the response value * eta: linear predictor for poisson part * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_poisson_log_lpmf(int y, real eta, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + poisson_log_lpmf(y | eta) - log1m_exp(-exp(eta)); } } /* hurdle poisson log-PDF of a single response * log parameterization for the poisson part * logit parameterization of the hurdle part * Args: * y: the response value * eta: linear predictor for poisson part * hu: linear predictor for hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_poisson_log_logit_lpmf(int y, real eta, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + poisson_log_lpmf(y | eta) - log1m_exp(-exp(eta)); } } // hurdle poisson log-CCDF and log-CDF functions real hurdle_poisson_lccdf(int y, real lambda, real hu) { return bernoulli_lpmf(0 | hu) + poisson_lccdf(y | lambda) - log1m_exp(-lambda); } real hurdle_poisson_lcdf(int y, real lambda, real hu) { return log1m_exp(hurdle_poisson_lccdf(y | lambda, hu)); } brms/inst/chunks/fun_zero_inflated_negbinomial.stan0000644000176200001440000000712714213413565022430 0ustar liggesusers /* zero-inflated negative binomial log-PDF of a single response * Args: * y: the response value * mu: mean parameter of negative binomial distribution * phi: shape parameter of negative binomial distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_neg_binomial_lpmf(int y, real mu, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + neg_binomial_2_lpmf(0 | mu, phi)); } else { return bernoulli_lpmf(0 | zi) + neg_binomial_2_lpmf(y | mu, phi); } } /* zero-inflated negative binomial log-PDF of a single response * logit parameterization of the zero-inflation part * Args: * y: the response value * mu: mean parameter of negative binomial distribution * phi: shape parameter of negative binomial distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_neg_binomial_logit_lpmf(int y, real mu, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + neg_binomial_2_lpmf(0 | mu, phi)); } else { return bernoulli_logit_lpmf(0 | zi) + neg_binomial_2_lpmf(y | mu, phi); } } /* zero-inflated negative binomial log-PDF of a single response * log parameterization for the negative binomial part * Args: * y: the response value * eta: linear predictor for negative binomial distribution * phi: shape parameter of negative binomial distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_neg_binomial_log_lpmf(int y, real eta, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + neg_binomial_2_log_lpmf(0 | eta, phi)); } else { return bernoulli_lpmf(0 | zi) + neg_binomial_2_log_lpmf(y | eta, phi); } } /* zero-inflated negative binomial log-PDF of a single response * log parameterization for the negative binomial part * logit parameterization of the zero-inflation part * Args: * y: the response value * eta: linear predictor for negative binomial distribution * phi: shape parameter of negative binomial distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_neg_binomial_log_logit_lpmf(int y, real eta, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + neg_binomial_2_log_lpmf(0 | eta, phi)); } else { return bernoulli_logit_lpmf(0 | zi) + neg_binomial_2_log_lpmf(y | eta, phi); } } // zero_inflated negative binomial log-CCDF and log-CDF functions real zero_inflated_neg_binomial_lccdf(int y, real mu, real phi, real hu) { return bernoulli_lpmf(0 | hu) + neg_binomial_2_lccdf(y | mu, phi); } real zero_inflated_neg_binomial_lcdf(int y, real mu, real phi, real hu) { return log1m_exp(zero_inflated_neg_binomial_lccdf(y | mu, phi, hu)); } brms/inst/chunks/fun_normal_time_se.stan0000644000176200001440000001033514361545260020231 0ustar liggesusers /* multi-normal log-PDF for time-series covariance structures * in Cholesky parameterization and assuming homogoneous variances * and known standard errors * Args: * y: response vector * mu: mean parameter vector * sigma: residual standard deviation * se2: square of user defined standard errors * chol_cor: cholesky factor of the correlation matrix * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * sum of the log-PDF values of all observations */ real normal_time_hom_se_lpdf(vector y, vector mu, real sigma, data vector se2, matrix chol_cor, int[] nobs, int[] begin, int[] end) { int I = size(nobs); vector[I] lp; matrix[rows(chol_cor), cols(chol_cor)] Cov; Cov = multiply_lower_tri_self_transpose(sigma * chol_cor); for (i in 1:I) { matrix[nobs[i], nobs[i]] Cov_i = Cov[1:nobs[i], 1:nobs[i]]; // need to add 'se' to the covariance matrix itself Cov_i += diag_matrix(se2[begin[i]:end[i]]); lp[i] = multi_normal_lpdf(y[begin[i]:end[i]] | mu[begin[i]:end[i]], Cov_i); } return sum(lp); } /* multi-normal log-PDF for time-series covariance structures * in Cholesky parameterization and assuming heterogenous variances * and known standard errors * Deviating Args: * sigma: residual standard deviation vector * Returns: * sum of the log-PDF values of all observations */ real normal_time_het_se_lpdf(vector y, vector mu, vector sigma, data vector se2, matrix chol_cor, int[] nobs, int[] begin, int[] end) { int I = size(nobs); vector[I] lp; for (i in 1:I) { matrix[nobs[i], nobs[i]] Cov_i; Cov_i = diag_pre_multiply(sigma[begin[i]:end[i]], chol_cor[1:nobs[i], 1:nobs[i]]); // need to add 'se' to the covariance matrix itself Cov_i = multiply_lower_tri_self_transpose(Cov_i); Cov_i += diag_matrix(se2[begin[i]:end[i]]); lp[i] = multi_normal_lpdf(y[begin[i]:end[i]] | mu[begin[i]:end[i]], Cov_i); } return sum(lp); } /* multi-normal log-PDF for time-series covariance structures * in Cholesky parameterization and assuming homogoneous variances * and known standard errors * allows for flexible correlation matrix subsets * Deviating Args: * Jtime: array of time indices per group * Returns: * sum of the log-PDF values of all observations */ real normal_time_hom_se_flex_lpdf(vector y, vector mu, real sigma, data vector se2, matrix chol_cor, int[] nobs, int[] begin, int[] end, int[,] Jtime) { int I = size(nobs); vector[I] lp; matrix[rows(chol_cor), cols(chol_cor)] Cov; Cov = multiply_lower_tri_self_transpose(sigma * chol_cor); for (i in 1:I) { int iobs[nobs[i]] = Jtime[i, 1:nobs[i]]; matrix[nobs[i], nobs[i]] Cov_i = Cov[iobs, iobs]; Cov_i += diag_matrix(se2[begin[i]:end[i]]); lp[i] = multi_normal_lpdf(y[begin[i]:end[i]] | mu[begin[i]:end[i]], Cov_i); } return sum(lp); } /* multi-normal log-PDF for time-series covariance structures * in Cholesky parameterization and assuming heterogenous variances * and known standard errors * allows for flexible correlation matrix subsets * Deviating Args: * sigma: residual standard deviation vector * Jtime: array of time indices per group * Returns: * sum of the log-PDF values of all observations */ real normal_time_het_se_flex_lpdf(vector y, vector mu, vector sigma, data vector se2, matrix chol_cor, int[] nobs, int[] begin, int[] end, int[,] Jtime) { int I = size(nobs); vector[I] lp; matrix[rows(chol_cor), cols(chol_cor)] Cor; Cor = multiply_lower_tri_self_transpose(chol_cor); for (i in 1:I) { int iobs[nobs[i]] = Jtime[i, 1:nobs[i]]; matrix[nobs[i], nobs[i]] Cov_i; Cov_i = quad_form_diag(Cor[iobs, iobs], sigma[begin[i]:end[i]]); Cov_i += diag_matrix(se2[begin[i]:end[i]]); lp[i] = multi_normal_lpdf(y[begin[i]:end[i]] | mu[begin[i]:end[i]], Cov_i); } return sum(lp); } brms/inst/chunks/fun_tan_half.stan0000644000176200001440000000145514361545260017013 0ustar liggesusers /* compute the tan_half link * Args: * x: a scalar in (-pi, pi) * Returns: * a scalar in (-Inf, Inf) */ real tan_half(real x) { return tan(x / 2); } /* compute the tan_half link (vectorized) * Args: * x: a vector in (-pi, pi) * Returns: * a vector in (-Inf, Inf) */ vector tan_half_vector(vector x) { return tan(x / 2); } /* compute the inverse of the tan_half link * Args: * y: a scalar in (-Inf, Inf) * Returns: * a scalar in (-pi, pi) */ real inv_tan_half(real y) { return 2 * atan(y); } /* compute the inverse of the tan_half link (vectorized) * Args: * y: a vector in (-Inf, Inf) * Returns: * a vector in (-pi, pi) */ vector inv_tan_half_vector(vector y) { return 2 * atan(y); } brms/inst/chunks/fun_normal_errorsar.stan0000644000176200001440000000147114213413565020442 0ustar liggesusers /* normal log-pdf for spatially lagged residuals * Args: * y: the response vector * mu: mean parameter vector * sigma: residual standard deviation * rho: positive autoregressive parameter * W: spatial weight matrix * eigenW: precomputed eigenvalues of W * Returns: * a scalar to be added to the log posterior */ real normal_errorsar_lpdf(vector y, vector mu, real sigma, real rho, data matrix W, data vector eigenW) { int N = rows(y); real inv_sigma2 = inv_square(sigma); matrix[N, N] W_tilde = add_diag(-rho * W, 1); vector[N] half_pred; real log_det; half_pred = W_tilde * (y - mu); log_det = sum(log1m(rho * eigenW)); return 0.5 * N * log(inv_sigma2) + log_det - 0.5 * dot_self(half_pred) * inv_sigma2; } brms/inst/chunks/fun_discrete_weibull.stan0000644000176200001440000000122414213413565020554 0ustar liggesusers /* discrete Weibull log-PMF for a single response * Args: * y: the response value * mu: location parameter on the unit interval * shape: positive shape parameter * Returns: * a scalar to be added to the log posterior */ real discrete_weibull_lpmf(int y, real mu, real shape) { return log(mu^y^shape - mu^(y+1)^shape); } // discrete Weibull log-CDF for a single response real discrete_weibull_lcdf(int y, real mu, real shape) { return log1m(mu^(y + 1)^shape); } // discrete Weibull log-CCDF for a single response real discrete_weibull_lccdf(int y, real mu, real shape) { return lmultiply((y + 1)^shape, mu); } brms/inst/chunks/fun_student_t_time_se.stan0000644000176200001440000001052514361545260020753 0ustar liggesusers /* multi-student-t log-PDF for time-series covariance structures * in Cholesky parameterization and assuming homogoneous variances * and known standard errors * Args: * y: response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: scale parameter * se2: square of user defined standard errors * chol_cor: cholesky factor of the correlation matrix * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * sum of the log-PDF values of all observations */ real student_t_time_hom_se_lpdf(vector y, real nu, vector mu, real sigma, data vector se2, matrix chol_cor, int[] nobs, int[] begin, int[] end) { int I = size(nobs); vector[I] lp; matrix[rows(chol_cor), cols(chol_cor)] Cov; Cov = multiply_lower_tri_self_transpose(sigma * chol_cor); for (i in 1:I) { matrix[nobs[i], nobs[i]] Cov_i = Cov[1:nobs[i], 1:nobs[i]]; // need to add 'se' to the covariance matrix itself Cov_i += diag_matrix(se2[begin[i]:end[i]]); lp[i] = multi_student_t_lpdf(y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov_i); } return sum(lp); } /* multi-student-t log-PDF for time-series covariance structures * in Cholesky parameterization and assuming heterogenous variances * and known standard errors * Deviating Args: * sigma: scale parameter vector * Returns: * sum of the log-PDF values of all observations */ real student_t_time_het_se_lpdf(vector y, real nu, vector mu, vector sigma, data vector se2, matrix chol_cor, int[] nobs, int[] begin, int[] end) { int I = size(nobs); vector[I] lp; for (i in 1:I) { matrix[nobs[i], nobs[i]] Cov_i; Cov_i = diag_pre_multiply(sigma[begin[i]:end[i]], chol_cor[1:nobs[i], 1:nobs[i]]); Cov_i = multiply_lower_tri_self_transpose(Cov_i); Cov_i += diag_matrix(se2[begin[i]:end[i]]); lp[i] = multi_student_t_lpdf(y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov_i); } return sum(lp); } /* multi-student-t log-PDF for time-series covariance structures * in Cholesky parameterization and assuming homogoneous variances * and known standard errors * allows for flexible correlation matrix subsets * Deviating Args: * Jtime: array of time indices per group * Returns: * sum of the log-PDF values of all observations */ real student_t_time_hom_se_flex_lpdf(vector y, real nu, vector mu, real sigma, data vector se2, matrix chol_cor, int[] nobs, int[] begin, int[] end, int[,] Jtime) { int I = size(nobs); vector[I] lp; matrix[rows(chol_cor), cols(chol_cor)] Cov; Cov = multiply_lower_tri_self_transpose(sigma * chol_cor); for (i in 1:I) { int iobs[nobs[i]] = Jtime[i, 1:nobs[i]]; matrix[nobs[i], nobs[i]] Cov_i = Cov[iobs, iobs]; Cov_i += diag_matrix(se2[begin[i]:end[i]]); lp[i] = multi_student_t_lpdf(y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov_i); } return sum(lp); } /* multi-student-t log-PDF for time-series covariance structures * in Cholesky parameterization and assuming heterogenous variances * and known standard errors * allows for flexible correlation matrix subsets * Deviating Args: * sigma: scale parameter vector * Jtime: array of time indices per group * Returns: * sum of the log-PDF values of all observations */ real student_t_time_het_se_flex_lpdf(vector y, real nu, vector mu, vector sigma, data vector se2, matrix chol_cor, int[] nobs, int[] begin, int[] end, int[,] Jtime) { int I = size(nobs); vector[I] lp; matrix[rows(chol_cor), cols(chol_cor)] Cor; Cor = multiply_lower_tri_self_transpose(chol_cor); for (i in 1:I) { int iobs[nobs[i]] = Jtime[i, 1:nobs[i]]; matrix[nobs[i], nobs[i]] Cov_i; Cov_i = quad_form_diag(Cor[iobs, iobs], sigma[begin[i]:end[i]]); Cov_i += diag_matrix(se2[begin[i]:end[i]]); lp[i] = multi_student_t_lpdf(y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov_i); } return sum(lp); } brms/inst/chunks/fun_student_t_fcor.stan0000644000176200001440000000250714213413565020256 0ustar liggesusers /* multi-student-t log-PDF for fixed correlation matrices * assuming homogoneous variances * Args: * y: response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: scale parameter * chol_cor: cholesky factor of the correlation matrix * Returns: * sum of the log-PDF values of all observations */ real student_t_fcor_hom_lpdf(vector y, real nu, vector mu, real sigma, data matrix chol_cor) { int N = rows(chol_cor); matrix[N, N] Cov = multiply_lower_tri_self_transpose(sigma * chol_cor); return multi_student_t_lpdf(y | nu, mu, Cov); } /* multi-student-t log-PDF for fixed correlation matrices * assuming heterogenous variances * Args: * y: response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: scale parameter vector * chol_cor: cholesky factor of the correlation matrix * Returns: * sum of the log-PDF values of all observations */ real student_t_fcor_het_lpdf(vector y, real nu, vector mu, vector sigma, data matrix chol_cor) { int N = rows(chol_cor); matrix[N, N] Cov = diag_pre_multiply(sigma, chol_cor); Cov = multiply_lower_tri_self_transpose(Cov); return multi_student_t_lpdf(y | nu, mu, Cov); } brms/inst/chunks/fun_dirichlet_logit.stan0000644000176200001440000000053614213413565020401 0ustar liggesusers /* dirichlet-logit log-PDF * Args: * y: vector of real response values * mu: vector of category logit probabilities * phi: precision parameter * Returns: * a scalar to be added to the log posterior */ real dirichlet_logit_lpdf(vector y, vector mu, real phi) { return dirichlet_lpdf(y | softmax(mu) * phi); } brms/inst/chunks/fun_logm1.stan0000644000176200001440000000140214361545260016246 0ustar liggesusers /* compute the logm1 link * Args: * p: a positive scalar * Returns: * a scalar in (-Inf, Inf) */ real logm1(real y) { return log(y - 1.0); } /* compute the logm1 link (vectorized) * Args: * p: a positive vector * Returns: * a vector in (-Inf, Inf) */ vector logm1_vector(vector y) { return log(y - 1.0); } /* compute the inverse of the logm1 link * Args: * y: a scalar in (-Inf, Inf) * Returns: * a positive scalar */ real expp1(real y) { return exp(y) + 1.0; } /* compute the inverse of the logm1 link (vectorized) * Args: * y: a vector in (-Inf, Inf) * Returns: * a positive vector */ vector expp1_vector(vector y) { return exp(y) + 1.0; } brms/inst/chunks/fun_scale_r_cor.stan0000644000176200001440000000064314213413565017506 0ustar liggesusers /* compute correlated group-level effects * Args: * z: matrix of unscaled group-level effects * SD: vector of standard deviation parameters * L: cholesky factor correlation matrix * Returns: * matrix of scaled group-level effects */ matrix scale_r_cor(matrix z, vector SD, matrix L) { // r is stored in another dimension order than z return transpose(diag_pre_multiply(SD, L) * z); } brms/inst/chunks/fun_normal_lagsar.stan0000644000176200001440000000146314213413565020055 0ustar liggesusers /* normal log-pdf for spatially lagged responses * Args: * y: the response vector * mu: mean parameter vector * sigma: residual standard deviation * rho: positive autoregressive parameter * W: spatial weight matrix * eigenW: precomputed eigenvalues of W * Returns: * a scalar to be added to the log posterior */ real normal_lagsar_lpdf(vector y, vector mu, real sigma, real rho, data matrix W, data vector eigenW) { int N = rows(y); real inv_sigma2 = inv_square(sigma); matrix[N, N] W_tilde = add_diag(-rho * W, 1); vector[N] half_pred; real log_det; half_pred = W_tilde * y - mu; log_det = sum(log1m(rho * eigenW)); return 0.5 * N * log(inv_sigma2) + log_det - 0.5 * dot_self(half_pred) * inv_sigma2; } brms/inst/chunks/fun_student_t_errorsar.stan0000644000176200001440000000177714213413565021174 0ustar liggesusers /* student-t log-pdf for spatially lagged residuals * Args: * y: the response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: residual scale parameter * rho: positive autoregressive parameter * W: spatial weight matrix * eigenW: precomputed eigenvalues of W * Returns: * a scalar to be added to the log posterior */ real student_t_errorsar_lpdf(vector y, real nu, vector mu, real sigma, real rho, data matrix W, data vector eigenW) { int N = rows(y); real K = rows(y); // avoid integer division warning real inv_sigma2 = inv_square(sigma); matrix[N, N] W_tilde = add_diag(-rho * W, 1); vector[N] half_pred; real log_det; half_pred = W_tilde * (y - mu); log_det = sum(log1m(rho * eigenW)); return - K / 2 * log(nu) + lgamma((nu + K) / 2) - lgamma(nu / 2) + 0.5 * K * log(inv_sigma2) + log_det - (nu + K) / 2 * log1p(dot_self(half_pred) * inv_sigma2 / nu); } brms/inst/chunks/fun_scale_r_cor_cov.stan0000644000176200001440000000256714213413565020364 0ustar liggesusers /* compute correlated group-level effects * in the presence of a within-group covariance matrix * Args: * z: matrix of unscaled group-level effects * SD: vector of standard deviation parameters * L: cholesky factor correlation matrix * Lcov: cholesky factor of within-group correlation matrix * Returns: * matrix of scaled group-level effects */ matrix scale_r_cor_cov(matrix z, vector SD, matrix L, matrix Lcov) { vector[num_elements(z)] z_flat = to_vector(z); vector[num_elements(z)] r = rep_vector(0, num_elements(z)); matrix[rows(L), cols(L)] LC = diag_pre_multiply(SD, L); int rows_z = rows(z); int rows_L = rows(L); // kronecker product of cholesky factors times a vector for (icov in 1:rows(Lcov)) { for (jcov in 1:icov) { if (Lcov[icov, jcov] > 1e-10) { // avoid calculating products between unrelated individuals for (i in 1:rows_L) { for (j in 1:i) { // incremented element of the output vector int k = (rows_L * (icov - 1)) + i; // applied element of the input vector int l = (rows_L * (jcov - 1)) + j; r[k] = r[k] + Lcov[icov, jcov] * LC[i, j] * z_flat[l]; } } } } } // r is returned in another dimension order than z return to_matrix(r, cols(z), rows(z), 0); } brms/inst/chunks/fun_cauchit.stan0000644000176200001440000000150014361545260016646 0ustar liggesusers /* compute the cauchit link * Args: * p: a scalar in (0, 1) * Returns: * a scalar in (-Inf, Inf) */ real cauchit(real p) { return tan(pi() * (p - 0.5)); } /* compute the cauchit link (vectorized) * Args: * p: a vector in (0, 1) * Returns: * a vector in (-Inf, Inf) */ vector cauchit_vector(vector p) { return tan(pi() * (p - 0.5)); } /* compute the inverse of the cauchit link * Args: * y: a scalar in (-Inf, Inf) * Returns: * a scalar in (0, 1) */ real inv_cauchit(real y) { return atan(y) / pi() + 0.5; } /* compute the inverse of the cauchit link (vectorized) * Args: * y: a vector in (-Inf, Inf) * Returns: * a vector in (0, 1) */ vector inv_cauchit_vector(vector y) { return atan(y) / pi() + 0.5; } brms/inst/chunks/fun_sparse_car_lpdf.stan0000644000176200001440000000302314213413565020355 0ustar liggesusers /* Return the log probability of a proper conditional autoregressive (CAR) * prior with a sparse representation for the adjacency matrix * Full credit to Max Joseph (https://github.com/mbjoseph/CARstan) * Args: * phi: Vector containing the CAR parameters for each location * car: Dependence (usually spatial) parameter for the CAR prior * sdcar: Standard deviation parameter for the CAR prior * Nloc: Number of locations * Nedges: Number of edges (adjacency pairs) * Nneigh: Number of neighbors for each location * eigenW: Eigenvalues of D^(-1/2) * W * D^(-1/2) * edges1, edges2: Sparse representation of adjacency matrix * Details: * D = Diag(Nneigh) * Returns: * Log probability density of CAR prior up to additive constant */ real sparse_car_lpdf(vector phi, real car, real sdcar, int Nloc, int Nedges, data vector Nneigh, data vector eigenW, int[] edges1, int[] edges2) { real tau; // precision parameter row_vector[Nloc] phit_D; // phi' * D row_vector[Nloc] phit_W; // phi' * W vector[Nloc] ldet; tau = inv_square(sdcar); phit_D = (phi .* Nneigh)'; phit_W = rep_row_vector(0, Nloc); for (i in 1:Nedges) { phit_W[edges1[i]] = phit_W[edges1[i]] + phi[edges2[i]]; phit_W[edges2[i]] = phit_W[edges2[i]] + phi[edges1[i]]; } for (i in 1:Nloc) { ldet[i] = log1m(car * eigenW[i]); } return 0.5 * (Nloc * log(tau) + sum(ldet) - tau * (phit_D * phi - car * (phit_W * phi))); } brms/inst/chunks/fun_logistic_normal.stan0000644000176200001440000000246314213413565020422 0ustar liggesusers /* multi-logit transform * Args: * y: simplex vector of length D * ref: a single integer in 1:D indicating the reference category * Returns: * an unbounded real vector of length D - 1 */ vector multi_logit(vector y, int ref) { vector[rows(y) - 1] x; for (i in 1:(ref - 1)) { x[i] = log(y[i]) - log(y[ref]); } for (i in (ref+1):rows(y)) { x[i - 1] = log(y[i]) - log(y[ref]); } return(x); } /* logistic-normal log-PDF * Args: * y: simplex vector of response values (length D) * mu: vector of means on the logit scale (length D-1) * sigma: vector for standard deviations on the logit scale (length D-1) * Lcor: Cholesky correlation matrix on the logit scale (dim D-1) * ref: a single integer in 1:D indicating the reference category * Returns: * a scalar to be added to the log posterior */ real logistic_normal_cholesky_cor_lpdf(vector y, vector mu, vector sigma, matrix Lcor, int ref) { int D = rows(y); vector[D - 1] x = multi_logit(y, ref); matrix[D - 1, D - 1] Lcov = diag_pre_multiply(sigma, Lcor); // multi-normal plus Jacobian adjustment of multivariate logit transform return multi_normal_cholesky_lpdf(x | mu, Lcov) - sum(log(y)); } brms/inst/chunks/fun_zero_inflated_binomial.stan0000644000176200001440000000671014213413565021733 0ustar liggesusers /* zero-inflated binomial log-PDF of a single response * Args: * y: the response value * trials: number of trials of the binomial part * theta: probability parameter of the binomial part * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_binomial_lpmf(int y, int trials, real theta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + binomial_lpmf(0 | trials, theta)); } else { return bernoulli_lpmf(0 | zi) + binomial_lpmf(y | trials, theta); } } /* zero-inflated binomial log-PDF of a single response * logit parameterization of the zero-inflation part * Args: * y: the response value * trials: number of trials of the binomial part * theta: probability parameter of the binomial part * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_binomial_logit_lpmf(int y, int trials, real theta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + binomial_lpmf(0 | trials, theta)); } else { return bernoulli_logit_lpmf(0 | zi) + binomial_lpmf(y | trials, theta); } } /* zero-inflated binomial log-PDF of a single response * logit parameterization of the binomial part * Args: * y: the response value * trials: number of trials of the binomial part * eta: linear predictor for binomial part * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_binomial_blogit_lpmf(int y, int trials, real eta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + binomial_logit_lpmf(0 | trials, eta)); } else { return bernoulli_lpmf(0 | zi) + binomial_logit_lpmf(y | trials, eta); } } /* zero-inflated binomial log-PDF of a single response * logit parameterization of the binomial part * logit parameterization of the zero-inflation part * Args: * y: the response value * trials: number of trials of the binomial part * eta: linear predictor for binomial part * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_binomial_blogit_logit_lpmf(int y, int trials, real eta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + binomial_logit_lpmf(0 | trials, eta)); } else { return bernoulli_logit_lpmf(0 | zi) + binomial_logit_lpmf(y | trials, eta); } } // zero-inflated binomial log-CCDF and log-CDF functions real zero_inflated_binomial_lccdf(int y, int trials, real theta, real zi) { return bernoulli_lpmf(0 | zi) + binomial_lccdf(y | trials, theta); } real zero_inflated_binomial_lcdf(int y, int trials, real theta, real zi) { return log1m_exp(zero_inflated_binomial_lccdf(y | trials, theta, zi)); } brms/inst/chunks/fun_cox.stan0000644000176200001440000000270614213413565016026 0ustar liggesusers /* distribution functions of the Cox proportional hazards model * parameterize hazard(t) = baseline(t) * mu * so that higher values of 'mu' imply lower survival times * Args: * y: the response value; currently ignored as the relevant * information is passed via 'bhaz' and 'cbhaz' * mu: positive location parameter * bhaz: baseline hazard * cbhaz: cumulative baseline hazard */ real cox_lhaz(real y, real mu, real bhaz, real cbhaz) { return log(bhaz) + log(mu); } real cox_lccdf(real y, real mu, real bhaz, real cbhaz) { // equivalent to the log survival function return - cbhaz * mu; } real cox_lcdf(real y, real mu, real bhaz, real cbhaz) { return log1m_exp(cox_lccdf(y | mu, bhaz, cbhaz)); } real cox_lpdf(real y, real mu, real bhaz, real cbhaz) { return cox_lhaz(y, mu, bhaz, cbhaz) + cox_lccdf(y | mu, bhaz, cbhaz); } // Distribution functions of the Cox model in log parameterization real cox_log_lhaz(real y, real log_mu, real bhaz, real cbhaz) { return log(bhaz) + log_mu; } real cox_log_lccdf(real y, real log_mu, real bhaz, real cbhaz) { return - cbhaz * exp(log_mu); } real cox_log_lcdf(real y, real log_mu, real bhaz, real cbhaz) { return log1m_exp(cox_log_lccdf(y | log_mu, bhaz, cbhaz)); } real cox_log_lpdf(real y, real log_mu, real bhaz, real cbhaz) { return cox_log_lhaz(y, log_mu, bhaz, cbhaz) + cox_log_lccdf(y | log_mu, bhaz, cbhaz); } brms/inst/chunks/fun_cholesky_cor_cosy.stan0000644000176200001440000000104214213413565020746 0ustar liggesusers /* compute the cholesky factor of a compound symmetry correlation matrix * Args: * cosy: compound symmetry correlation * nrows: number of rows of the covariance matrix * Returns: * A nrows x nrows covariance matrix */ matrix cholesky_cor_cosy(real cosy, int nrows) { matrix[nrows, nrows] mat; mat = diag_matrix(rep_vector(1, nrows)); for (i in 2:nrows) { for (j in 1:(i - 1)) { mat[i, j] = cosy; mat[j, i] = mat[i, j]; } } return cholesky_decompose(mat); } brms/inst/chunks/fun_von_mises.stan0000644000176200001440000000210414213413565017227 0ustar liggesusers /* von Mises log-PDF of a single response * for kappa > 100 the normal approximation is used * for reasons of numerial stability * Args: * y: the response vector between -pi and pi * mu: location parameter vector * kappa: precision parameter * Returns: * a scalar to be added to the log posterior */ real von_mises_real_lpdf(real y, real mu, real kappa) { if (kappa < 100) { return von_mises_lpdf(y | mu, kappa); } else { return normal_lpdf(y | mu, sqrt(1 / kappa)); } } /* von Mises log-PDF of a response vector * for kappa > 100 the normal approximation is used * for reasons of numerial stability * Args: * y: the response vector between -pi and pi * mu: location parameter vector * kappa: precision parameter * Returns: * a scalar to be added to the log posterior */ real von_mises_vector_lpdf(vector y, vector mu, real kappa) { if (kappa < 100) { return von_mises_lpdf(y | mu, kappa); } else { return normal_lpdf(y | mu, sqrt(1 / kappa)); } } brms/inst/chunks/fun_scale_xi.stan0000644000176200001440000000206614160105076017017 0ustar liggesusers /* scale auxiliary parameter xi to a suitable region * expecting sigma to be a scalar * Args: * xi: unscaled shape parameter * y: response values * mu: location parameter * sigma: scale parameter * Returns: * scaled shape parameter xi */ real scale_xi(real xi, vector y, vector mu, real sigma) { vector[rows(y)] x = (y - mu) / sigma; vector[2] bounds = [-inv(min(x)), -inv(max(x))]'; real lb = min(bounds); real ub = max(bounds); return inv_logit(xi) * (ub - lb) + lb; } /* scale auxiliary parameter xi to a suitable region * expecting sigma to be a vector * Args: * xi: unscaled shape parameter * y: response values * mu: location parameter * sigma: scale parameter * Returns: * scaled shape parameter xi */ real scale_xi_vector(real xi, vector y, vector mu, vector sigma) { vector[rows(y)] x = (y - mu) ./ sigma; vector[2] bounds = [-inv(min(x)), -inv(max(x))]'; real lb = min(bounds); real ub = max(bounds); return inv_logit(xi) * (ub - lb) + lb; } brms/inst/chunks/fun_monotonic.stan0000644000176200001440000000047014361545260017240 0ustar liggesusers /* compute monotonic effects * Args: * scale: a simplex parameter * i: index to sum over the simplex * Returns: * a scalar between 0 and rows(scale) */ real mo(vector scale, int i) { if (i == 0) { return 0; } else { return rows(scale) * sum(scale[1:i]); } } brms/inst/chunks/fun_gen_extreme_value.stan0000644000176200001440000000254314213413565020732 0ustar liggesusers /* generalized extreme value log-PDF for a single response * Args: * y: the response value * mu: location parameter * sigma: scale parameter * xi: shape parameter * Returns: * a scalar to be added to the log posterior */ real gen_extreme_value_lpdf(real y, real mu, real sigma, real xi) { real x = (y - mu) / sigma; if (xi == 0) { return - log(sigma) - x - exp(-x); } else { real t = 1 + xi * x; real inv_xi = 1 / xi; return - log(sigma) - (1 + inv_xi) * log(t) - pow(t, -inv_xi); } } /* generalized extreme value log-CDF for a single response * Args: * y: a quantile * mu: location parameter * sigma: scale parameter * xi: shape parameter * Returns: * log(P(Y <= y)) */ real gen_extreme_value_lcdf(real y, real mu, real sigma, real xi) { real x = (y - mu) / sigma; if (xi == 0) { return - exp(-x); } else { return - pow(1 + xi * x, - 1 / xi); } } /* generalized extreme value log-CCDF for a single response * Args: * y: a quantile * mu: location parameter * sigma: scale parameter * xi: shape parameter * Returns: * log(P(Y > y)) */ real gen_extreme_value_lccdf(real y, real mu, real sigma, real xi) { return log1m_exp(gen_extreme_value_lcdf(y | mu, sigma, xi)); } brms/inst/chunks/fun_softit.stan0000644000176200001440000000152614361545260016546 0ustar liggesusers /* compute the softit link * Args: * p: a scalar in (0, 1) * Returns: * a scalar in (-Inf, Inf) */ real softit(real p) { return log(expm1(-p / (p - 1))); } /* compute the softit link (vectorized) * Args: * p: a vector in (0, 1) * Returns: * a vector in (-Inf, Inf) */ vector softit_vector(vector p) { return log(expm1(-p / (p - 1))); } /* compute the inverse of the sofit link * Args: * y: a scalar in (-Inf, Inf) * Returns: * a scalar in (0, 1) */ real inv_softit(real y) { return log1p_exp(y) / (1 + log1p_exp(y)); } /* compute the inverse of the sofit link (vectorized) * Args: * y: a vector in (-Inf, Inf) * Returns: * a vector in (0, 1) */ vector inv_softit_vector(vector y) { return log1p_exp(y) / (1 + log1p_exp(y)); } brms/inst/chunks/fun_hurdle_negbinomial.stan0000644000176200001440000000600114213413565021054 0ustar liggesusers /* hurdle negative binomial log-PDF of a single response * Args: * y: the response value * mu: mean parameter of negative binomial distribution * phi: shape parameter of negative binomial distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_neg_binomial_lpmf(int y, real mu, real phi, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + neg_binomial_2_lpmf(y | mu, phi) - log1m((phi / (mu + phi))^phi); } } /* hurdle negative binomial log-PDF of a single response * logit parameterization for the hurdle part * Args: * y: the response value * mu: mean parameter of negative binomial distribution * phi: phi parameter of negative binomial distribution * hu: linear predictor of hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_neg_binomial_logit_lpmf(int y, real mu, real phi, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + neg_binomial_2_lpmf(y | mu, phi) - log1m((phi / (mu + phi))^phi); } } /* hurdle negative binomial log-PDF of a single response * log parameterization for the negative binomial part * Args: * y: the response value * eta: linear predictor for negative binomial distribution * phi phi parameter of negative binomial distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_neg_binomial_log_lpmf(int y, real eta, real phi, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + neg_binomial_2_log_lpmf(y | eta, phi) - log1m((phi / (exp(eta) + phi))^phi); } } /* hurdle negative binomial log-PDF of a single response * log parameterization for the negative binomial part * logit parameterization for the hurdle part * Args: * y: the response value * eta: linear predictor for negative binomial distribution * phi: phi parameter of negative binomial distribution * hu: linear predictor of hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_neg_binomial_log_logit_lpmf(int y, real eta, real phi, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + neg_binomial_2_log_lpmf(y | eta, phi) - log1m((phi / (exp(eta) + phi))^phi); } } // hurdle negative binomial log-CCDF and log-CDF functions real hurdle_neg_binomial_lccdf(int y, real mu, real phi, real hu) { return bernoulli_lpmf(0 | hu) + neg_binomial_2_lccdf(y | mu, phi) - log1m((phi / (mu + phi))^phi); } real hurdle_neg_binomial_lcdf(int y, real mu, real phi, real hu) { return log1m_exp(hurdle_neg_binomial_lccdf(y | mu, phi, hu)); } brms/inst/chunks/fun_sequence.stan0000644000176200001440000000051614477014516017047 0ustar liggesusers /* integer sequence of values * Args: * start: starting integer * end: ending integer * Returns: * an integer sequence from start to end */ int[] sequence(int start, int end) { array[end - start + 1] int seq; for (n in 1:num_elements(seq)) { seq[n] = n + start - 1; } return seq; } brms/inst/chunks/fun_hurdle_lognormal.stan0000644000176200001440000000271014213413565020565 0ustar liggesusers /* hurdle lognormal log-PDF of a single response * Args: * y: the response value * mu: mean parameter of the lognormal distribution * sigma: sd parameter of the lognormal distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_lognormal_lpdf(real y, real mu, real sigma, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + lognormal_lpdf(y | mu, sigma); } } /* hurdle lognormal log-PDF of a single response * logit parameterization of the hurdle part * Args: * y: the response value * mu: mean parameter of the lognormal distribution * sigma: sd parameter of the lognormal distribution * hu: linear predictor for the hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_lognormal_logit_lpdf(real y, real mu, real sigma, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + lognormal_lpdf(y | mu, sigma); } } // hurdle lognormal log-CCDF and log-CDF functions real hurdle_lognormal_lccdf(real y, real mu, real sigma, real hu) { return bernoulli_lpmf(0 | hu) + lognormal_lccdf(y | mu, sigma); } real hurdle_lognormal_lcdf(real y, real mu, real sigma, real hu) { return log1m_exp(hurdle_lognormal_lccdf(y | mu, sigma, hu)); } brms/inst/chunks/fun_multinomial_logit.stan0000644000176200001440000000046614213413565020766 0ustar liggesusers /* multinomial-logit log-PMF * Args: * y: array of integer response values * mu: vector of category logit probabilities * Returns: * a scalar to be added to the log posterior */ real multinomial_logit2_lpmf(int[] y, vector mu) { return multinomial_lpmf(y | softmax(mu)); } brms/inst/chunks/fun_student_t_lagsar.stan0000644000176200001440000000177114213413565020600 0ustar liggesusers /* student-t log-pdf for spatially lagged responses * Args: * y: the response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: residual scale parameter * rho: positive autoregressive parameter * W: spatial weight matrix * eigenW: precomputed eigenvalues of W * Returns: * a scalar to be added to the log posterior */ real student_t_lagsar_lpdf(vector y, real nu, vector mu, real sigma, real rho, data matrix W, data vector eigenW) { int N = rows(y); real K = rows(y); // avoid integer division warning real inv_sigma2 = inv_square(sigma); matrix[N, N] W_tilde = add_diag(-rho * W, 1); vector[N] half_pred; real log_det; half_pred = W_tilde * y - mu; log_det = sum(log1m(rho * eigenW)); return - K / 2 * log(nu) + lgamma((nu + K) / 2) - lgamma(nu / 2) + 0.5 * K * log(inv_sigma2) + log_det - (nu + K) / 2 * log1p(dot_self(half_pred) * inv_sigma2 / nu); } brms/inst/chunks/fun_gaussian_process.stan0000644000176200001440000000162414213413565020603 0ustar liggesusers /* compute a latent Gaussian process * Args: * x: array of continuous predictor values * sdgp: marginal SD parameter * lscale: length-scale parameter * zgp: vector of independent standard normal variables * Returns: * a vector to be added to the linear predictor */ vector gp(data vector[] x, real sdgp, vector lscale, vector zgp) { int Dls = rows(lscale); int N = size(x); matrix[N, N] cov; if (Dls == 1) { // one dimensional or isotropic GP cov = gp_exp_quad_cov(x, sdgp, lscale[1]); } else { // multi-dimensional non-isotropic GP cov = gp_exp_quad_cov(x[, 1], sdgp, lscale[1]); for (d in 2:Dls) { cov = cov .* gp_exp_quad_cov(x[, d], 1, lscale[d]); } } for (n in 1:N) { // deal with numerical non-positive-definiteness cov[n, n] += 1e-12; } return cholesky_decompose(cov) * zgp; }